Bug 1913377 - Comment and assert that `allowedScope` has a very limited set of values...
[gecko.git] / js / src / jit / CodeGenerator.cpp
blobab17d949941e125545625ae69ce29845a649ece0
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/CodeGenerator.h"
9 #include "mozilla/Assertions.h"
10 #include "mozilla/Casting.h"
11 #include "mozilla/DebugOnly.h"
12 #include "mozilla/EndianUtils.h"
13 #include "mozilla/EnumeratedArray.h"
14 #include "mozilla/EnumeratedRange.h"
15 #include "mozilla/EnumSet.h"
16 #include "mozilla/IntegerTypeTraits.h"
17 #include "mozilla/Latin1.h"
18 #include "mozilla/MathAlgorithms.h"
19 #include "mozilla/ScopeExit.h"
20 #include "mozilla/SIMD.h"
22 #include <limits>
23 #include <type_traits>
24 #include <utility>
26 #include "jslibmath.h"
27 #include "jsmath.h"
28 #include "jsnum.h"
30 #include "builtin/MapObject.h"
31 #include "builtin/RegExp.h"
32 #include "builtin/String.h"
33 #include "irregexp/RegExpTypes.h"
34 #include "jit/ABIArgGenerator.h"
35 #include "jit/CompileInfo.h"
36 #include "jit/InlineScriptTree.h"
37 #include "jit/Invalidation.h"
38 #include "jit/IonGenericCallStub.h"
39 #include "jit/IonIC.h"
40 #include "jit/IonScript.h"
41 #include "jit/JitcodeMap.h"
42 #include "jit/JitFrames.h"
43 #include "jit/JitRuntime.h"
44 #include "jit/JitSpewer.h"
45 #include "jit/JitZone.h"
46 #include "jit/Linker.h"
47 #include "jit/MIRGenerator.h"
48 #include "jit/MoveEmitter.h"
49 #include "jit/RangeAnalysis.h"
50 #include "jit/RegExpStubConstants.h"
51 #include "jit/SafepointIndex.h"
52 #include "jit/SharedICHelpers.h"
53 #include "jit/SharedICRegisters.h"
54 #include "jit/VMFunctions.h"
55 #include "jit/WarpSnapshot.h"
56 #include "js/ColumnNumber.h" // JS::LimitedColumnNumberOneOrigin
57 #include "js/experimental/JitInfo.h" // JSJit{Getter,Setter}CallArgs, JSJitMethodCallArgsTraits, JSJitInfo
58 #include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
59 #include "js/RegExpFlags.h" // JS::RegExpFlag
60 #include "js/ScalarType.h" // js::Scalar::Type
61 #include "proxy/DOMProxy.h"
62 #include "proxy/ScriptedProxyHandler.h"
63 #include "util/CheckedArithmetic.h"
64 #include "util/Unicode.h"
65 #include "vm/ArrayBufferViewObject.h"
66 #include "vm/AsyncFunction.h"
67 #include "vm/AsyncIteration.h"
68 #include "vm/BuiltinObjectKind.h"
69 #include "vm/FunctionFlags.h" // js::FunctionFlags
70 #include "vm/Interpreter.h"
71 #include "vm/JSAtomUtils.h" // AtomizeString
72 #include "vm/MatchPairs.h"
73 #include "vm/RegExpObject.h"
74 #include "vm/RegExpStatics.h"
75 #include "vm/StaticStrings.h"
76 #include "vm/StringObject.h"
77 #include "vm/StringType.h"
78 #include "vm/TypedArrayObject.h"
79 #include "wasm/WasmCodegenConstants.h"
80 #include "wasm/WasmPI.h"
81 #include "wasm/WasmValType.h"
82 #ifdef MOZ_VTUNE
83 # include "vtune/VTuneWrapper.h"
84 #endif
85 #include "wasm/WasmBinary.h"
86 #include "wasm/WasmGC.h"
87 #include "wasm/WasmGcObject.h"
88 #include "wasm/WasmStubs.h"
90 #include "builtin/Boolean-inl.h"
91 #include "jit/MacroAssembler-inl.h"
92 #include "jit/shared/CodeGenerator-shared-inl.h"
93 #include "jit/TemplateObject-inl.h"
94 #include "jit/VMFunctionList-inl.h"
95 #include "vm/JSScript-inl.h"
96 #include "wasm/WasmInstance-inl.h"
98 using namespace js;
99 using namespace js::jit;
101 using mozilla::CheckedUint32;
102 using mozilla::DebugOnly;
103 using mozilla::FloatingPoint;
104 using mozilla::NegativeInfinity;
105 using mozilla::PositiveInfinity;
107 using JS::ExpandoAndGeneration;
109 namespace js {
110 namespace jit {
112 #ifdef CHECK_OSIPOINT_REGISTERS
113 template <class Op>
114 static void HandleRegisterDump(Op op, MacroAssembler& masm,
115 LiveRegisterSet liveRegs, Register activation,
116 Register scratch) {
117 const size_t baseOffset = JitActivation::offsetOfRegs();
119 // Handle live GPRs.
120 for (GeneralRegisterIterator iter(liveRegs.gprs()); iter.more(); ++iter) {
121 Register reg = *iter;
122 Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
124 if (reg == activation) {
125 // To use the original value of the activation register (that's
126 // now on top of the stack), we need the scratch register.
127 masm.push(scratch);
128 masm.loadPtr(Address(masm.getStackPointer(), sizeof(uintptr_t)), scratch);
129 op(scratch, dump);
130 masm.pop(scratch);
131 } else {
132 op(reg, dump);
136 // Handle live FPRs.
137 for (FloatRegisterIterator iter(liveRegs.fpus()); iter.more(); ++iter) {
138 FloatRegister reg = *iter;
139 Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
140 op(reg, dump);
144 class StoreOp {
145 MacroAssembler& masm;
147 public:
148 explicit StoreOp(MacroAssembler& masm) : masm(masm) {}
150 void operator()(Register reg, Address dump) { masm.storePtr(reg, dump); }
151 void operator()(FloatRegister reg, Address dump) {
152 if (reg.isDouble()) {
153 masm.storeDouble(reg, dump);
154 } else if (reg.isSingle()) {
155 masm.storeFloat32(reg, dump);
156 } else if (reg.isSimd128()) {
157 MOZ_CRASH("Unexpected case for SIMD");
158 } else {
159 MOZ_CRASH("Unexpected register type.");
164 class VerifyOp {
165 MacroAssembler& masm;
166 Label* failure_;
168 public:
169 VerifyOp(MacroAssembler& masm, Label* failure)
170 : masm(masm), failure_(failure) {}
172 void operator()(Register reg, Address dump) {
173 masm.branchPtr(Assembler::NotEqual, dump, reg, failure_);
175 void operator()(FloatRegister reg, Address dump) {
176 if (reg.isDouble()) {
177 ScratchDoubleScope scratch(masm);
178 masm.loadDouble(dump, scratch);
179 masm.branchDouble(Assembler::DoubleNotEqual, scratch, reg, failure_);
180 } else if (reg.isSingle()) {
181 ScratchFloat32Scope scratch(masm);
182 masm.loadFloat32(dump, scratch);
183 masm.branchFloat(Assembler::DoubleNotEqual, scratch, reg, failure_);
184 } else if (reg.isSimd128()) {
185 MOZ_CRASH("Unexpected case for SIMD");
186 } else {
187 MOZ_CRASH("Unexpected register type.");
192 void CodeGenerator::verifyOsiPointRegs(LSafepoint* safepoint) {
193 // Ensure the live registers stored by callVM did not change between
194 // the call and this OsiPoint. Try-catch relies on this invariant.
196 // Load pointer to the JitActivation in a scratch register.
197 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
198 Register scratch = allRegs.takeAny();
199 masm.push(scratch);
200 masm.loadJitActivation(scratch);
202 // If we should not check registers (because the instruction did not call
203 // into the VM, or a GC happened), we're done.
204 Label failure, done;
205 Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
206 masm.branch32(Assembler::Equal, checkRegs, Imm32(0), &done);
208 // Having more than one VM function call made in one visit function at
209 // runtime is a sec-ciritcal error, because if we conservatively assume that
210 // one of the function call can re-enter Ion, then the invalidation process
211 // will potentially add a call at a random location, by patching the code
212 // before the return address.
213 masm.branch32(Assembler::NotEqual, checkRegs, Imm32(1), &failure);
215 // Set checkRegs to 0, so that we don't try to verify registers after we
216 // return from this script to the caller.
217 masm.store32(Imm32(0), checkRegs);
219 // Ignore clobbered registers. Some instructions (like LValueToInt32) modify
220 // temps after calling into the VM. This is fine because no other
221 // instructions (including this OsiPoint) will depend on them. Also
222 // backtracking can also use the same register for an input and an output.
223 // These are marked as clobbered and shouldn't get checked.
224 LiveRegisterSet liveRegs;
225 liveRegs.set() = RegisterSet::Intersect(
226 safepoint->liveRegs().set(),
227 RegisterSet::Not(safepoint->clobberedRegs().set()));
229 VerifyOp op(masm, &failure);
230 HandleRegisterDump<VerifyOp>(op, masm, liveRegs, scratch, allRegs.getAny());
232 masm.jump(&done);
234 // Do not profile the callWithABI that occurs below. This is to avoid a
235 // rare corner case that occurs when profiling interacts with itself:
237 // When slow profiling assertions are turned on, FunctionBoundary ops
238 // (which update the profiler pseudo-stack) may emit a callVM, which
239 // forces them to have an osi point associated with them. The
240 // FunctionBoundary for inline function entry is added to the caller's
241 // graph with a PC from the caller's code, but during codegen it modifies
242 // Gecko Profiler instrumentation to add the callee as the current top-most
243 // script. When codegen gets to the OSIPoint, and the callWithABI below is
244 // emitted, the codegen thinks that the current frame is the callee, but
245 // the PC it's using from the OSIPoint refers to the caller. This causes
246 // the profiler instrumentation of the callWithABI below to ASSERT, since
247 // the script and pc are mismatched. To avoid this, we simply omit
248 // instrumentation for these callWithABIs.
250 // Any live register captured by a safepoint (other than temp registers)
251 // must remain unchanged between the call and the OsiPoint instruction.
252 masm.bind(&failure);
253 masm.assumeUnreachable("Modified registers between VM call and OsiPoint");
255 masm.bind(&done);
256 masm.pop(scratch);
259 bool CodeGenerator::shouldVerifyOsiPointRegs(LSafepoint* safepoint) {
260 if (!checkOsiPointRegisters) {
261 return false;
264 if (safepoint->liveRegs().emptyGeneral() &&
265 safepoint->liveRegs().emptyFloat()) {
266 return false; // No registers to check.
269 return true;
272 void CodeGenerator::resetOsiPointRegs(LSafepoint* safepoint) {
273 if (!shouldVerifyOsiPointRegs(safepoint)) {
274 return;
277 // Set checkRegs to 0. If we perform a VM call, the instruction
278 // will set it to 1.
279 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
280 Register scratch = allRegs.takeAny();
281 masm.push(scratch);
282 masm.loadJitActivation(scratch);
283 Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
284 masm.store32(Imm32(0), checkRegs);
285 masm.pop(scratch);
288 static void StoreAllLiveRegs(MacroAssembler& masm, LiveRegisterSet liveRegs) {
289 // Store a copy of all live registers before performing the call.
290 // When we reach the OsiPoint, we can use this to check nothing
291 // modified them in the meantime.
293 // Load pointer to the JitActivation in a scratch register.
294 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
295 Register scratch = allRegs.takeAny();
296 masm.push(scratch);
297 masm.loadJitActivation(scratch);
299 Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
300 masm.add32(Imm32(1), checkRegs);
302 StoreOp op(masm);
303 HandleRegisterDump<StoreOp>(op, masm, liveRegs, scratch, allRegs.getAny());
305 masm.pop(scratch);
307 #endif // CHECK_OSIPOINT_REGISTERS
309 // Before doing any call to Cpp, you should ensure that volatile
310 // registers are evicted by the register allocator.
311 void CodeGenerator::callVMInternal(VMFunctionId id, LInstruction* ins) {
312 TrampolinePtr code = gen->jitRuntime()->getVMWrapper(id);
313 const VMFunctionData& fun = GetVMFunction(id);
315 // Stack is:
316 // ... frame ...
317 // [args]
318 #ifdef DEBUG
319 MOZ_ASSERT(pushedArgs_ == fun.explicitArgs);
320 pushedArgs_ = 0;
321 #endif
323 #ifdef CHECK_OSIPOINT_REGISTERS
324 if (shouldVerifyOsiPointRegs(ins->safepoint())) {
325 StoreAllLiveRegs(masm, ins->safepoint()->liveRegs());
327 #endif
329 #ifdef DEBUG
330 if (ins->mirRaw()) {
331 MOZ_ASSERT(ins->mirRaw()->isInstruction());
332 MInstruction* mir = ins->mirRaw()->toInstruction();
333 MOZ_ASSERT_IF(mir->needsResumePoint(), mir->resumePoint());
335 // If this MIR instruction has an overridden AliasSet, set the JitRuntime's
336 // disallowArbitraryCode_ flag so we can assert this VMFunction doesn't call
337 // RunScript. Whitelist MInterruptCheck and MCheckOverRecursed because
338 // interrupt callbacks can call JS (chrome JS or shell testing functions).
339 bool isWhitelisted = mir->isInterruptCheck() || mir->isCheckOverRecursed();
340 if (!mir->hasDefaultAliasSet() && !isWhitelisted) {
341 const void* addr = gen->jitRuntime()->addressOfDisallowArbitraryCode();
342 masm.move32(Imm32(1), ReturnReg);
343 masm.store32(ReturnReg, AbsoluteAddress(addr));
346 #endif
348 // Push an exit frame descriptor.
349 masm.PushFrameDescriptor(FrameType::IonJS);
351 // Call the wrapper function. The wrapper is in charge to unwind the stack
352 // when returning from the call. Failures are handled with exceptions based
353 // on the return value of the C functions. To guard the outcome of the
354 // returned value, use another LIR instruction.
355 ensureOsiSpace();
356 uint32_t callOffset = masm.callJit(code);
357 markSafepointAt(callOffset, ins);
359 #ifdef DEBUG
360 // Reset the disallowArbitraryCode flag after the call.
362 const void* addr = gen->jitRuntime()->addressOfDisallowArbitraryCode();
363 masm.push(ReturnReg);
364 masm.move32(Imm32(0), ReturnReg);
365 masm.store32(ReturnReg, AbsoluteAddress(addr));
366 masm.pop(ReturnReg);
368 #endif
370 // Pop rest of the exit frame and the arguments left on the stack.
371 int framePop =
372 sizeof(ExitFrameLayout) - ExitFrameLayout::bytesPoppedAfterCall();
373 masm.implicitPop(fun.explicitStackSlots() * sizeof(void*) + framePop);
375 // Stack is:
376 // ... frame ...
379 template <typename Fn, Fn fn>
380 void CodeGenerator::callVM(LInstruction* ins) {
381 VMFunctionId id = VMFunctionToId<Fn, fn>::id;
382 callVMInternal(id, ins);
385 // ArgSeq store arguments for OutOfLineCallVM.
387 // OutOfLineCallVM are created with "oolCallVM" function. The third argument of
388 // this function is an instance of a class which provides a "generate" in charge
389 // of pushing the argument, with "pushArg", for a VMFunction.
391 // Such list of arguments can be created by using the "ArgList" function which
392 // creates one instance of "ArgSeq", where the type of the arguments are
393 // inferred from the type of the arguments.
395 // The list of arguments must be written in the same order as if you were
396 // calling the function in C++.
398 // Example:
399 // ArgList(ToRegister(lir->lhs()), ToRegister(lir->rhs()))
401 template <typename... ArgTypes>
402 class ArgSeq {
403 std::tuple<std::remove_reference_t<ArgTypes>...> args_;
405 template <std::size_t... ISeq>
406 inline void generate(CodeGenerator* codegen,
407 std::index_sequence<ISeq...>) const {
408 // Arguments are pushed in reverse order, from last argument to first
409 // argument.
410 (codegen->pushArg(std::get<sizeof...(ISeq) - 1 - ISeq>(args_)), ...);
413 public:
414 explicit ArgSeq(ArgTypes&&... args)
415 : args_(std::forward<ArgTypes>(args)...) {}
417 inline void generate(CodeGenerator* codegen) const {
418 generate(codegen, std::index_sequence_for<ArgTypes...>{});
421 #ifdef DEBUG
422 static constexpr size_t numArgs = sizeof...(ArgTypes);
423 #endif
426 template <typename... ArgTypes>
427 inline ArgSeq<ArgTypes...> ArgList(ArgTypes&&... args) {
428 return ArgSeq<ArgTypes...>(std::forward<ArgTypes>(args)...);
431 // Store wrappers, to generate the right move of data after the VM call.
433 struct StoreNothing {
434 inline void generate(CodeGenerator* codegen) const {}
435 inline LiveRegisterSet clobbered() const {
436 return LiveRegisterSet(); // No register gets clobbered
440 class StoreRegisterTo {
441 private:
442 Register out_;
444 public:
445 explicit StoreRegisterTo(Register out) : out_(out) {}
447 inline void generate(CodeGenerator* codegen) const {
448 // It's okay to use storePointerResultTo here - the VMFunction wrapper
449 // ensures the upper bytes are zero for bool/int32 return values.
450 codegen->storePointerResultTo(out_);
452 inline LiveRegisterSet clobbered() const {
453 LiveRegisterSet set;
454 set.add(out_);
455 return set;
459 class StoreFloatRegisterTo {
460 private:
461 FloatRegister out_;
463 public:
464 explicit StoreFloatRegisterTo(FloatRegister out) : out_(out) {}
466 inline void generate(CodeGenerator* codegen) const {
467 codegen->storeFloatResultTo(out_);
469 inline LiveRegisterSet clobbered() const {
470 LiveRegisterSet set;
471 set.add(out_);
472 return set;
476 template <typename Output>
477 class StoreValueTo_ {
478 private:
479 Output out_;
481 public:
482 explicit StoreValueTo_(const Output& out) : out_(out) {}
484 inline void generate(CodeGenerator* codegen) const {
485 codegen->storeResultValueTo(out_);
487 inline LiveRegisterSet clobbered() const {
488 LiveRegisterSet set;
489 set.add(out_);
490 return set;
494 template <typename Output>
495 StoreValueTo_<Output> StoreValueTo(const Output& out) {
496 return StoreValueTo_<Output>(out);
499 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
500 class OutOfLineCallVM : public OutOfLineCodeBase<CodeGenerator> {
501 private:
502 LInstruction* lir_;
503 ArgSeq args_;
504 StoreOutputTo out_;
506 public:
507 OutOfLineCallVM(LInstruction* lir, const ArgSeq& args,
508 const StoreOutputTo& out)
509 : lir_(lir), args_(args), out_(out) {}
511 void accept(CodeGenerator* codegen) override {
512 codegen->visitOutOfLineCallVM(this);
515 LInstruction* lir() const { return lir_; }
516 const ArgSeq& args() const { return args_; }
517 const StoreOutputTo& out() const { return out_; }
520 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
521 OutOfLineCode* CodeGenerator::oolCallVM(LInstruction* lir, const ArgSeq& args,
522 const StoreOutputTo& out) {
523 MOZ_ASSERT(lir->mirRaw());
524 MOZ_ASSERT(lir->mirRaw()->isInstruction());
526 #ifdef DEBUG
527 VMFunctionId id = VMFunctionToId<Fn, fn>::id;
528 const VMFunctionData& fun = GetVMFunction(id);
529 MOZ_ASSERT(fun.explicitArgs == args.numArgs);
530 MOZ_ASSERT(fun.returnsData() !=
531 (std::is_same_v<StoreOutputTo, StoreNothing>));
532 #endif
534 OutOfLineCode* ool = new (alloc())
535 OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>(lir, args, out);
536 addOutOfLineCode(ool, lir->mirRaw()->toInstruction());
537 return ool;
540 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
541 void CodeGenerator::visitOutOfLineCallVM(
542 OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>* ool) {
543 LInstruction* lir = ool->lir();
545 #ifdef JS_JITSPEW
546 JitSpewStart(JitSpew_Codegen, " # LIR=%s",
547 lir->opName());
548 if (const char* extra = lir->getExtraName()) {
549 JitSpewCont(JitSpew_Codegen, ":%s", extra);
551 JitSpewFin(JitSpew_Codegen);
552 #endif
553 perfSpewer_.recordInstruction(masm, lir);
554 saveLive(lir);
555 ool->args().generate(this);
556 callVM<Fn, fn>(lir);
557 ool->out().generate(this);
558 restoreLiveIgnore(lir, ool->out().clobbered());
559 masm.jump(ool->rejoin());
562 class OutOfLineICFallback : public OutOfLineCodeBase<CodeGenerator> {
563 private:
564 LInstruction* lir_;
565 size_t cacheIndex_;
566 size_t cacheInfoIndex_;
568 public:
569 OutOfLineICFallback(LInstruction* lir, size_t cacheIndex,
570 size_t cacheInfoIndex)
571 : lir_(lir), cacheIndex_(cacheIndex), cacheInfoIndex_(cacheInfoIndex) {}
573 void bind(MacroAssembler* masm) override {
574 // The binding of the initial jump is done in
575 // CodeGenerator::visitOutOfLineICFallback.
578 size_t cacheIndex() const { return cacheIndex_; }
579 size_t cacheInfoIndex() const { return cacheInfoIndex_; }
580 LInstruction* lir() const { return lir_; }
582 void accept(CodeGenerator* codegen) override {
583 codegen->visitOutOfLineICFallback(this);
587 void CodeGeneratorShared::addIC(LInstruction* lir, size_t cacheIndex) {
588 if (cacheIndex == SIZE_MAX) {
589 masm.setOOM();
590 return;
593 DataPtr<IonIC> cache(this, cacheIndex);
594 MInstruction* mir = lir->mirRaw()->toInstruction();
595 cache->setScriptedLocation(mir->block()->info().script(),
596 mir->resumePoint()->pc());
598 Register temp = cache->scratchRegisterForEntryJump();
599 icInfo_.back().icOffsetForJump = masm.movWithPatch(ImmWord(-1), temp);
600 masm.jump(Address(temp, 0));
602 MOZ_ASSERT(!icInfo_.empty());
604 OutOfLineICFallback* ool =
605 new (alloc()) OutOfLineICFallback(lir, cacheIndex, icInfo_.length() - 1);
606 addOutOfLineCode(ool, mir);
608 masm.bind(ool->rejoin());
609 cache->setRejoinOffset(CodeOffset(ool->rejoin()->offset()));
612 void CodeGenerator::visitOutOfLineICFallback(OutOfLineICFallback* ool) {
613 LInstruction* lir = ool->lir();
614 size_t cacheIndex = ool->cacheIndex();
615 size_t cacheInfoIndex = ool->cacheInfoIndex();
617 DataPtr<IonIC> ic(this, cacheIndex);
619 // Register the location of the OOL path in the IC.
620 ic->setFallbackOffset(CodeOffset(masm.currentOffset()));
622 switch (ic->kind()) {
623 case CacheKind::GetProp:
624 case CacheKind::GetElem: {
625 IonGetPropertyIC* getPropIC = ic->asGetPropertyIC();
627 saveLive(lir);
629 pushArg(getPropIC->id());
630 pushArg(getPropIC->value());
631 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
632 pushArg(ImmGCPtr(gen->outerInfo().script()));
634 using Fn = bool (*)(JSContext*, HandleScript, IonGetPropertyIC*,
635 HandleValue, HandleValue, MutableHandleValue);
636 callVM<Fn, IonGetPropertyIC::update>(lir);
638 StoreValueTo(getPropIC->output()).generate(this);
639 restoreLiveIgnore(lir, StoreValueTo(getPropIC->output()).clobbered());
641 masm.jump(ool->rejoin());
642 return;
644 case CacheKind::GetPropSuper:
645 case CacheKind::GetElemSuper: {
646 IonGetPropSuperIC* getPropSuperIC = ic->asGetPropSuperIC();
648 saveLive(lir);
650 pushArg(getPropSuperIC->id());
651 pushArg(getPropSuperIC->receiver());
652 pushArg(getPropSuperIC->object());
653 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
654 pushArg(ImmGCPtr(gen->outerInfo().script()));
656 using Fn =
657 bool (*)(JSContext*, HandleScript, IonGetPropSuperIC*, HandleObject,
658 HandleValue, HandleValue, MutableHandleValue);
659 callVM<Fn, IonGetPropSuperIC::update>(lir);
661 StoreValueTo(getPropSuperIC->output()).generate(this);
662 restoreLiveIgnore(lir,
663 StoreValueTo(getPropSuperIC->output()).clobbered());
665 masm.jump(ool->rejoin());
666 return;
668 case CacheKind::SetProp:
669 case CacheKind::SetElem: {
670 IonSetPropertyIC* setPropIC = ic->asSetPropertyIC();
672 saveLive(lir);
674 pushArg(setPropIC->rhs());
675 pushArg(setPropIC->id());
676 pushArg(setPropIC->object());
677 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
678 pushArg(ImmGCPtr(gen->outerInfo().script()));
680 using Fn = bool (*)(JSContext*, HandleScript, IonSetPropertyIC*,
681 HandleObject, HandleValue, HandleValue);
682 callVM<Fn, IonSetPropertyIC::update>(lir);
684 restoreLive(lir);
686 masm.jump(ool->rejoin());
687 return;
689 case CacheKind::GetName: {
690 IonGetNameIC* getNameIC = ic->asGetNameIC();
692 saveLive(lir);
694 pushArg(getNameIC->environment());
695 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
696 pushArg(ImmGCPtr(gen->outerInfo().script()));
698 using Fn = bool (*)(JSContext*, HandleScript, IonGetNameIC*, HandleObject,
699 MutableHandleValue);
700 callVM<Fn, IonGetNameIC::update>(lir);
702 StoreValueTo(getNameIC->output()).generate(this);
703 restoreLiveIgnore(lir, StoreValueTo(getNameIC->output()).clobbered());
705 masm.jump(ool->rejoin());
706 return;
708 case CacheKind::BindName: {
709 IonBindNameIC* bindNameIC = ic->asBindNameIC();
711 saveLive(lir);
713 pushArg(bindNameIC->environment());
714 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
715 pushArg(ImmGCPtr(gen->outerInfo().script()));
717 using Fn =
718 JSObject* (*)(JSContext*, HandleScript, IonBindNameIC*, HandleObject);
719 callVM<Fn, IonBindNameIC::update>(lir);
721 StoreRegisterTo(bindNameIC->output()).generate(this);
722 restoreLiveIgnore(lir, StoreRegisterTo(bindNameIC->output()).clobbered());
724 masm.jump(ool->rejoin());
725 return;
727 case CacheKind::GetIterator: {
728 IonGetIteratorIC* getIteratorIC = ic->asGetIteratorIC();
730 saveLive(lir);
732 pushArg(getIteratorIC->value());
733 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
734 pushArg(ImmGCPtr(gen->outerInfo().script()));
736 using Fn = JSObject* (*)(JSContext*, HandleScript, IonGetIteratorIC*,
737 HandleValue);
738 callVM<Fn, IonGetIteratorIC::update>(lir);
740 StoreRegisterTo(getIteratorIC->output()).generate(this);
741 restoreLiveIgnore(lir,
742 StoreRegisterTo(getIteratorIC->output()).clobbered());
744 masm.jump(ool->rejoin());
745 return;
747 case CacheKind::OptimizeSpreadCall: {
748 auto* optimizeSpreadCallIC = ic->asOptimizeSpreadCallIC();
750 saveLive(lir);
752 pushArg(optimizeSpreadCallIC->value());
753 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
754 pushArg(ImmGCPtr(gen->outerInfo().script()));
756 using Fn = bool (*)(JSContext*, HandleScript, IonOptimizeSpreadCallIC*,
757 HandleValue, MutableHandleValue);
758 callVM<Fn, IonOptimizeSpreadCallIC::update>(lir);
760 StoreValueTo(optimizeSpreadCallIC->output()).generate(this);
761 restoreLiveIgnore(
762 lir, StoreValueTo(optimizeSpreadCallIC->output()).clobbered());
764 masm.jump(ool->rejoin());
765 return;
767 case CacheKind::In: {
768 IonInIC* inIC = ic->asInIC();
770 saveLive(lir);
772 pushArg(inIC->object());
773 pushArg(inIC->key());
774 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
775 pushArg(ImmGCPtr(gen->outerInfo().script()));
777 using Fn = bool (*)(JSContext*, HandleScript, IonInIC*, HandleValue,
778 HandleObject, bool*);
779 callVM<Fn, IonInIC::update>(lir);
781 StoreRegisterTo(inIC->output()).generate(this);
782 restoreLiveIgnore(lir, StoreRegisterTo(inIC->output()).clobbered());
784 masm.jump(ool->rejoin());
785 return;
787 case CacheKind::HasOwn: {
788 IonHasOwnIC* hasOwnIC = ic->asHasOwnIC();
790 saveLive(lir);
792 pushArg(hasOwnIC->id());
793 pushArg(hasOwnIC->value());
794 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
795 pushArg(ImmGCPtr(gen->outerInfo().script()));
797 using Fn = bool (*)(JSContext*, HandleScript, IonHasOwnIC*, HandleValue,
798 HandleValue, int32_t*);
799 callVM<Fn, IonHasOwnIC::update>(lir);
801 StoreRegisterTo(hasOwnIC->output()).generate(this);
802 restoreLiveIgnore(lir, StoreRegisterTo(hasOwnIC->output()).clobbered());
804 masm.jump(ool->rejoin());
805 return;
807 case CacheKind::CheckPrivateField: {
808 IonCheckPrivateFieldIC* checkPrivateFieldIC = ic->asCheckPrivateFieldIC();
810 saveLive(lir);
812 pushArg(checkPrivateFieldIC->id());
813 pushArg(checkPrivateFieldIC->value());
815 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
816 pushArg(ImmGCPtr(gen->outerInfo().script()));
818 using Fn = bool (*)(JSContext*, HandleScript, IonCheckPrivateFieldIC*,
819 HandleValue, HandleValue, bool*);
820 callVM<Fn, IonCheckPrivateFieldIC::update>(lir);
822 StoreRegisterTo(checkPrivateFieldIC->output()).generate(this);
823 restoreLiveIgnore(
824 lir, StoreRegisterTo(checkPrivateFieldIC->output()).clobbered());
826 masm.jump(ool->rejoin());
827 return;
829 case CacheKind::InstanceOf: {
830 IonInstanceOfIC* hasInstanceOfIC = ic->asInstanceOfIC();
832 saveLive(lir);
834 pushArg(hasInstanceOfIC->rhs());
835 pushArg(hasInstanceOfIC->lhs());
836 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
837 pushArg(ImmGCPtr(gen->outerInfo().script()));
839 using Fn = bool (*)(JSContext*, HandleScript, IonInstanceOfIC*,
840 HandleValue lhs, HandleObject rhs, bool* res);
841 callVM<Fn, IonInstanceOfIC::update>(lir);
843 StoreRegisterTo(hasInstanceOfIC->output()).generate(this);
844 restoreLiveIgnore(lir,
845 StoreRegisterTo(hasInstanceOfIC->output()).clobbered());
847 masm.jump(ool->rejoin());
848 return;
850 case CacheKind::UnaryArith: {
851 IonUnaryArithIC* unaryArithIC = ic->asUnaryArithIC();
853 saveLive(lir);
855 pushArg(unaryArithIC->input());
856 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
857 pushArg(ImmGCPtr(gen->outerInfo().script()));
859 using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
860 IonUnaryArithIC* stub, HandleValue val,
861 MutableHandleValue res);
862 callVM<Fn, IonUnaryArithIC::update>(lir);
864 StoreValueTo(unaryArithIC->output()).generate(this);
865 restoreLiveIgnore(lir, StoreValueTo(unaryArithIC->output()).clobbered());
867 masm.jump(ool->rejoin());
868 return;
870 case CacheKind::ToPropertyKey: {
871 IonToPropertyKeyIC* toPropertyKeyIC = ic->asToPropertyKeyIC();
873 saveLive(lir);
875 pushArg(toPropertyKeyIC->input());
876 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
877 pushArg(ImmGCPtr(gen->outerInfo().script()));
879 using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
880 IonToPropertyKeyIC* ic, HandleValue val,
881 MutableHandleValue res);
882 callVM<Fn, IonToPropertyKeyIC::update>(lir);
884 StoreValueTo(toPropertyKeyIC->output()).generate(this);
885 restoreLiveIgnore(lir,
886 StoreValueTo(toPropertyKeyIC->output()).clobbered());
888 masm.jump(ool->rejoin());
889 return;
891 case CacheKind::BinaryArith: {
892 IonBinaryArithIC* binaryArithIC = ic->asBinaryArithIC();
894 saveLive(lir);
896 pushArg(binaryArithIC->rhs());
897 pushArg(binaryArithIC->lhs());
898 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
899 pushArg(ImmGCPtr(gen->outerInfo().script()));
901 using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
902 IonBinaryArithIC* stub, HandleValue lhs,
903 HandleValue rhs, MutableHandleValue res);
904 callVM<Fn, IonBinaryArithIC::update>(lir);
906 StoreValueTo(binaryArithIC->output()).generate(this);
907 restoreLiveIgnore(lir, StoreValueTo(binaryArithIC->output()).clobbered());
909 masm.jump(ool->rejoin());
910 return;
912 case CacheKind::Compare: {
913 IonCompareIC* compareIC = ic->asCompareIC();
915 saveLive(lir);
917 pushArg(compareIC->rhs());
918 pushArg(compareIC->lhs());
919 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
920 pushArg(ImmGCPtr(gen->outerInfo().script()));
922 using Fn =
923 bool (*)(JSContext* cx, HandleScript outerScript, IonCompareIC* stub,
924 HandleValue lhs, HandleValue rhs, bool* res);
925 callVM<Fn, IonCompareIC::update>(lir);
927 StoreRegisterTo(compareIC->output()).generate(this);
928 restoreLiveIgnore(lir, StoreRegisterTo(compareIC->output()).clobbered());
930 masm.jump(ool->rejoin());
931 return;
933 case CacheKind::CloseIter: {
934 IonCloseIterIC* closeIterIC = ic->asCloseIterIC();
936 saveLive(lir);
938 pushArg(closeIterIC->iter());
939 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
940 pushArg(ImmGCPtr(gen->outerInfo().script()));
942 using Fn =
943 bool (*)(JSContext*, HandleScript, IonCloseIterIC*, HandleObject);
944 callVM<Fn, IonCloseIterIC::update>(lir);
946 restoreLive(lir);
948 masm.jump(ool->rejoin());
949 return;
951 case CacheKind::OptimizeGetIterator: {
952 auto* optimizeGetIteratorIC = ic->asOptimizeGetIteratorIC();
954 saveLive(lir);
956 pushArg(optimizeGetIteratorIC->value());
957 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
958 pushArg(ImmGCPtr(gen->outerInfo().script()));
960 using Fn = bool (*)(JSContext*, HandleScript, IonOptimizeGetIteratorIC*,
961 HandleValue, bool* res);
962 callVM<Fn, IonOptimizeGetIteratorIC::update>(lir);
964 StoreRegisterTo(optimizeGetIteratorIC->output()).generate(this);
965 restoreLiveIgnore(
966 lir, StoreRegisterTo(optimizeGetIteratorIC->output()).clobbered());
968 masm.jump(ool->rejoin());
969 return;
971 case CacheKind::Call:
972 case CacheKind::TypeOf:
973 case CacheKind::TypeOfEq:
974 case CacheKind::ToBool:
975 case CacheKind::GetIntrinsic:
976 case CacheKind::NewArray:
977 case CacheKind::NewObject:
978 MOZ_CRASH("Unsupported IC");
980 MOZ_CRASH();
983 StringObject* MNewStringObject::templateObj() const {
984 return &templateObj_->as<StringObject>();
987 CodeGenerator::CodeGenerator(MIRGenerator* gen, LIRGraph* graph,
988 MacroAssembler* masm)
989 : CodeGeneratorSpecific(gen, graph, masm),
990 ionScriptLabels_(gen->alloc()),
991 ionNurseryObjectLabels_(gen->alloc()),
992 scriptCounts_(nullptr),
993 zoneStubsToReadBarrier_(0) {}
995 CodeGenerator::~CodeGenerator() { js_delete(scriptCounts_); }
997 void CodeGenerator::visitValueToInt32(LValueToInt32* lir) {
998 ValueOperand operand = ToValue(lir, LValueToInt32::Input);
999 Register output = ToRegister(lir->output());
1000 FloatRegister temp = ToFloatRegister(lir->tempFloat());
1002 Label fails;
1003 if (lir->mode() == LValueToInt32::TRUNCATE) {
1004 OutOfLineCode* oolDouble = oolTruncateDouble(temp, output, lir->mir());
1006 // We can only handle strings in truncation contexts, like bitwise
1007 // operations.
1008 Register stringReg = ToRegister(lir->temp());
1009 using Fn = bool (*)(JSContext*, JSString*, double*);
1010 auto* oolString = oolCallVM<Fn, StringToNumber>(lir, ArgList(stringReg),
1011 StoreFloatRegisterTo(temp));
1012 Label* stringEntry = oolString->entry();
1013 Label* stringRejoin = oolString->rejoin();
1015 masm.truncateValueToInt32(operand, stringEntry, stringRejoin,
1016 oolDouble->entry(), stringReg, temp, output,
1017 &fails);
1018 masm.bind(oolDouble->rejoin());
1019 } else {
1020 MOZ_ASSERT(lir->mode() == LValueToInt32::NORMAL);
1021 masm.convertValueToInt32(operand, temp, output, &fails,
1022 lir->mirNormal()->needsNegativeZeroCheck(),
1023 lir->mirNormal()->conversion());
1026 bailoutFrom(&fails, lir->snapshot());
1029 void CodeGenerator::visitValueToDouble(LValueToDouble* lir) {
1030 ValueOperand operand = ToValue(lir, LValueToDouble::InputIndex);
1031 FloatRegister output = ToFloatRegister(lir->output());
1033 Label fail;
1034 masm.convertValueToDouble(operand, output, &fail);
1035 bailoutFrom(&fail, lir->snapshot());
1038 void CodeGenerator::visitValueToFloat32(LValueToFloat32* lir) {
1039 ValueOperand operand = ToValue(lir, LValueToFloat32::InputIndex);
1040 FloatRegister output = ToFloatRegister(lir->output());
1042 Label fail;
1043 masm.convertValueToFloat32(operand, output, &fail);
1044 bailoutFrom(&fail, lir->snapshot());
1047 void CodeGenerator::visitValueToFloat16(LValueToFloat16* lir) {
1048 ValueOperand operand = ToValue(lir, LValueToFloat16::InputIndex);
1049 Register temp = ToTempRegisterOrInvalid(lir->temp0());
1050 FloatRegister output = ToFloatRegister(lir->output());
1052 LiveRegisterSet volatileRegs;
1053 if (!MacroAssembler::SupportsFloat64To16()) {
1054 volatileRegs = liveVolatileRegs(lir);
1057 Label fail;
1058 masm.convertValueToFloat16(operand, output, temp, volatileRegs, &fail);
1059 bailoutFrom(&fail, lir->snapshot());
1062 void CodeGenerator::visitValueToBigInt(LValueToBigInt* lir) {
1063 ValueOperand operand = ToValue(lir, LValueToBigInt::InputIndex);
1064 Register output = ToRegister(lir->output());
1066 using Fn = BigInt* (*)(JSContext*, HandleValue);
1067 auto* ool =
1068 oolCallVM<Fn, ToBigInt>(lir, ArgList(operand), StoreRegisterTo(output));
1070 Register tag = masm.extractTag(operand, output);
1072 Label notBigInt, done;
1073 masm.branchTestBigInt(Assembler::NotEqual, tag, &notBigInt);
1074 masm.unboxBigInt(operand, output);
1075 masm.jump(&done);
1076 masm.bind(&notBigInt);
1078 masm.branchTestBoolean(Assembler::Equal, tag, ool->entry());
1079 masm.branchTestString(Assembler::Equal, tag, ool->entry());
1081 // ToBigInt(object) can have side-effects; all other types throw a TypeError.
1082 bailout(lir->snapshot());
1084 masm.bind(ool->rejoin());
1085 masm.bind(&done);
1088 void CodeGenerator::visitInt32ToDouble(LInt32ToDouble* lir) {
1089 masm.convertInt32ToDouble(ToRegister(lir->input()),
1090 ToFloatRegister(lir->output()));
1093 void CodeGenerator::visitFloat32ToDouble(LFloat32ToDouble* lir) {
1094 masm.convertFloat32ToDouble(ToFloatRegister(lir->input()),
1095 ToFloatRegister(lir->output()));
1098 void CodeGenerator::visitDoubleToFloat32(LDoubleToFloat32* lir) {
1099 masm.convertDoubleToFloat32(ToFloatRegister(lir->input()),
1100 ToFloatRegister(lir->output()));
1103 void CodeGenerator::visitInt32ToFloat32(LInt32ToFloat32* lir) {
1104 masm.convertInt32ToFloat32(ToRegister(lir->input()),
1105 ToFloatRegister(lir->output()));
1108 void CodeGenerator::visitDoubleToFloat16(LDoubleToFloat16* lir) {
1109 LiveRegisterSet volatileRegs;
1110 if (!MacroAssembler::SupportsFloat64To16()) {
1111 volatileRegs = liveVolatileRegs(lir);
1113 masm.convertDoubleToFloat16(
1114 ToFloatRegister(lir->input()), ToFloatRegister(lir->output()),
1115 ToTempRegisterOrInvalid(lir->temp0()), volatileRegs);
1118 void CodeGenerator::visitDoubleToFloat32ToFloat16(
1119 LDoubleToFloat32ToFloat16* lir) {
1120 masm.convertDoubleToFloat16(
1121 ToFloatRegister(lir->input()), ToFloatRegister(lir->output()),
1122 ToRegister(lir->temp0()), ToRegister(lir->temp1()));
1125 void CodeGenerator::visitFloat32ToFloat16(LFloat32ToFloat16* lir) {
1126 LiveRegisterSet volatileRegs;
1127 if (!MacroAssembler::SupportsFloat32To16()) {
1128 volatileRegs = liveVolatileRegs(lir);
1130 masm.convertFloat32ToFloat16(
1131 ToFloatRegister(lir->input()), ToFloatRegister(lir->output()),
1132 ToTempRegisterOrInvalid(lir->temp0()), volatileRegs);
1135 void CodeGenerator::visitInt32ToFloat16(LInt32ToFloat16* lir) {
1136 LiveRegisterSet volatileRegs;
1137 if (!MacroAssembler::SupportsFloat32To16()) {
1138 volatileRegs = liveVolatileRegs(lir);
1140 masm.convertInt32ToFloat16(
1141 ToRegister(lir->input()), ToFloatRegister(lir->output()),
1142 ToTempRegisterOrInvalid(lir->temp0()), volatileRegs);
1145 void CodeGenerator::visitDoubleToInt32(LDoubleToInt32* lir) {
1146 Label fail;
1147 FloatRegister input = ToFloatRegister(lir->input());
1148 Register output = ToRegister(lir->output());
1149 masm.convertDoubleToInt32(input, output, &fail,
1150 lir->mir()->needsNegativeZeroCheck());
1151 bailoutFrom(&fail, lir->snapshot());
1154 void CodeGenerator::visitFloat32ToInt32(LFloat32ToInt32* lir) {
1155 Label fail;
1156 FloatRegister input = ToFloatRegister(lir->input());
1157 Register output = ToRegister(lir->output());
1158 masm.convertFloat32ToInt32(input, output, &fail,
1159 lir->mir()->needsNegativeZeroCheck());
1160 bailoutFrom(&fail, lir->snapshot());
1163 void CodeGenerator::visitInt32ToIntPtr(LInt32ToIntPtr* lir) {
1164 #ifdef JS_64BIT
1165 // This LIR instruction is only used if the input can be negative.
1166 MOZ_ASSERT(lir->mir()->canBeNegative());
1168 Register output = ToRegister(lir->output());
1169 const LAllocation* input = lir->input();
1170 if (input->isRegister()) {
1171 masm.move32SignExtendToPtr(ToRegister(input), output);
1172 } else {
1173 masm.load32SignExtendToPtr(ToAddress(input), output);
1175 #else
1176 MOZ_CRASH("Not used on 32-bit platforms");
1177 #endif
1180 void CodeGenerator::visitNonNegativeIntPtrToInt32(
1181 LNonNegativeIntPtrToInt32* lir) {
1182 #ifdef JS_64BIT
1183 Register output = ToRegister(lir->output());
1184 MOZ_ASSERT(ToRegister(lir->input()) == output);
1186 Label bail;
1187 masm.guardNonNegativeIntPtrToInt32(output, &bail);
1188 bailoutFrom(&bail, lir->snapshot());
1189 #else
1190 MOZ_CRASH("Not used on 32-bit platforms");
1191 #endif
1194 void CodeGenerator::visitIntPtrToDouble(LIntPtrToDouble* lir) {
1195 Register input = ToRegister(lir->input());
1196 FloatRegister output = ToFloatRegister(lir->output());
1197 masm.convertIntPtrToDouble(input, output);
1200 void CodeGenerator::visitAdjustDataViewLength(LAdjustDataViewLength* lir) {
1201 Register output = ToRegister(lir->output());
1202 MOZ_ASSERT(ToRegister(lir->input()) == output);
1204 uint32_t byteSize = lir->mir()->byteSize();
1206 #ifdef DEBUG
1207 Label ok;
1208 masm.branchTestPtr(Assembler::NotSigned, output, output, &ok);
1209 masm.assumeUnreachable("Unexpected negative value in LAdjustDataViewLength");
1210 masm.bind(&ok);
1211 #endif
1213 Label bail;
1214 masm.branchSubPtr(Assembler::Signed, Imm32(byteSize - 1), output, &bail);
1215 bailoutFrom(&bail, lir->snapshot());
1218 void CodeGenerator::emitOOLTestObject(Register objreg,
1219 Label* ifEmulatesUndefined,
1220 Label* ifDoesntEmulateUndefined,
1221 Register scratch) {
1222 saveVolatile(scratch);
1223 #if defined(DEBUG) || defined(FUZZING)
1224 masm.loadPtr(AbsoluteAddress(
1225 gen->runtime->addressOfHasSeenObjectEmulateUndefinedFuse()),
1226 scratch);
1227 using Fn = bool (*)(JSObject* obj, size_t fuseValue);
1228 masm.setupAlignedABICall();
1229 masm.passABIArg(objreg);
1230 masm.passABIArg(scratch);
1231 masm.callWithABI<Fn, js::EmulatesUndefinedCheckFuse>();
1232 #else
1233 using Fn = bool (*)(JSObject* obj);
1234 masm.setupAlignedABICall();
1235 masm.passABIArg(objreg);
1236 masm.callWithABI<Fn, js::EmulatesUndefined>();
1237 #endif
1238 masm.storeCallPointerResult(scratch);
1239 restoreVolatile(scratch);
1241 masm.branchIfTrueBool(scratch, ifEmulatesUndefined);
1242 masm.jump(ifDoesntEmulateUndefined);
1245 // Base out-of-line code generator for all tests of the truthiness of an
1246 // object, where the object might not be truthy. (Recall that per spec all
1247 // objects are truthy, but we implement the JSCLASS_EMULATES_UNDEFINED class
1248 // flag to permit objects to look like |undefined| in certain contexts,
1249 // including in object truthiness testing.) We check truthiness inline except
1250 // when we're testing it on a proxy, in which case out-of-line code will call
1251 // EmulatesUndefined for a conclusive answer.
1252 class OutOfLineTestObject : public OutOfLineCodeBase<CodeGenerator> {
1253 Register objreg_;
1254 Register scratch_;
1256 Label* ifEmulatesUndefined_;
1257 Label* ifDoesntEmulateUndefined_;
1259 #ifdef DEBUG
1260 bool initialized() { return ifEmulatesUndefined_ != nullptr; }
1261 #endif
1263 public:
1264 OutOfLineTestObject()
1265 : ifEmulatesUndefined_(nullptr), ifDoesntEmulateUndefined_(nullptr) {}
1267 void accept(CodeGenerator* codegen) final {
1268 MOZ_ASSERT(initialized());
1269 codegen->emitOOLTestObject(objreg_, ifEmulatesUndefined_,
1270 ifDoesntEmulateUndefined_, scratch_);
1273 // Specify the register where the object to be tested is found, labels to
1274 // jump to if the object is truthy or falsy, and a scratch register for
1275 // use in the out-of-line path.
1276 void setInputAndTargets(Register objreg, Label* ifEmulatesUndefined,
1277 Label* ifDoesntEmulateUndefined, Register scratch) {
1278 MOZ_ASSERT(!initialized());
1279 MOZ_ASSERT(ifEmulatesUndefined);
1280 objreg_ = objreg;
1281 scratch_ = scratch;
1282 ifEmulatesUndefined_ = ifEmulatesUndefined;
1283 ifDoesntEmulateUndefined_ = ifDoesntEmulateUndefined;
1287 // A subclass of OutOfLineTestObject containing two extra labels, for use when
1288 // the ifTruthy/ifFalsy labels are needed in inline code as well as out-of-line
1289 // code. The user should bind these labels in inline code, and specify them as
1290 // targets via setInputAndTargets, as appropriate.
1291 class OutOfLineTestObjectWithLabels : public OutOfLineTestObject {
1292 Label label1_;
1293 Label label2_;
1295 public:
1296 OutOfLineTestObjectWithLabels() = default;
1298 Label* label1() { return &label1_; }
1299 Label* label2() { return &label2_; }
1302 void CodeGenerator::testObjectEmulatesUndefinedKernel(
1303 Register objreg, Label* ifEmulatesUndefined,
1304 Label* ifDoesntEmulateUndefined, Register scratch,
1305 OutOfLineTestObject* ool) {
1306 ool->setInputAndTargets(objreg, ifEmulatesUndefined, ifDoesntEmulateUndefined,
1307 scratch);
1309 // Perform a fast-path check of the object's class flags if the object's
1310 // not a proxy. Let out-of-line code handle the slow cases that require
1311 // saving registers, making a function call, and restoring registers.
1312 masm.branchIfObjectEmulatesUndefined(objreg, scratch, ool->entry(),
1313 ifEmulatesUndefined);
1316 void CodeGenerator::branchTestObjectEmulatesUndefined(
1317 Register objreg, Label* ifEmulatesUndefined,
1318 Label* ifDoesntEmulateUndefined, Register scratch,
1319 OutOfLineTestObject* ool) {
1320 MOZ_ASSERT(!ifDoesntEmulateUndefined->bound(),
1321 "ifDoesntEmulateUndefined will be bound to the fallthrough path");
1323 testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined,
1324 ifDoesntEmulateUndefined, scratch, ool);
1325 masm.bind(ifDoesntEmulateUndefined);
1328 void CodeGenerator::testObjectEmulatesUndefined(Register objreg,
1329 Label* ifEmulatesUndefined,
1330 Label* ifDoesntEmulateUndefined,
1331 Register scratch,
1332 OutOfLineTestObject* ool) {
1333 testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined,
1334 ifDoesntEmulateUndefined, scratch, ool);
1335 masm.jump(ifDoesntEmulateUndefined);
1338 void CodeGenerator::testValueTruthyForType(
1339 JSValueType type, ScratchTagScope& tag, const ValueOperand& value,
1340 Register tempToUnbox, Register temp, FloatRegister floatTemp,
1341 Label* ifTruthy, Label* ifFalsy, OutOfLineTestObject* ool,
1342 bool skipTypeTest) {
1343 #ifdef DEBUG
1344 if (skipTypeTest) {
1345 Label expected;
1346 masm.branchTestType(Assembler::Equal, tag, type, &expected);
1347 masm.assumeUnreachable("Unexpected Value type in testValueTruthyForType");
1348 masm.bind(&expected);
1350 #endif
1352 // Handle irregular types first.
1353 switch (type) {
1354 case JSVAL_TYPE_UNDEFINED:
1355 case JSVAL_TYPE_NULL:
1356 // Undefined and null are falsy.
1357 if (!skipTypeTest) {
1358 masm.branchTestType(Assembler::Equal, tag, type, ifFalsy);
1359 } else {
1360 masm.jump(ifFalsy);
1362 return;
1363 case JSVAL_TYPE_SYMBOL:
1364 // Symbols are truthy.
1365 if (!skipTypeTest) {
1366 masm.branchTestSymbol(Assembler::Equal, tag, ifTruthy);
1367 } else {
1368 masm.jump(ifTruthy);
1370 return;
1371 case JSVAL_TYPE_OBJECT: {
1372 Label notObject;
1373 if (!skipTypeTest) {
1374 masm.branchTestObject(Assembler::NotEqual, tag, &notObject);
1376 ScratchTagScopeRelease _(&tag);
1377 Register objreg = masm.extractObject(value, tempToUnbox);
1378 testObjectEmulatesUndefined(objreg, ifFalsy, ifTruthy, temp, ool);
1379 masm.bind(&notObject);
1380 return;
1382 default:
1383 break;
1386 // Check the type of the value (unless this is the last possible type).
1387 Label differentType;
1388 if (!skipTypeTest) {
1389 masm.branchTestType(Assembler::NotEqual, tag, type, &differentType);
1392 // Branch if the value is falsy.
1393 ScratchTagScopeRelease _(&tag);
1394 switch (type) {
1395 case JSVAL_TYPE_BOOLEAN: {
1396 masm.branchTestBooleanTruthy(false, value, ifFalsy);
1397 break;
1399 case JSVAL_TYPE_INT32: {
1400 masm.branchTestInt32Truthy(false, value, ifFalsy);
1401 break;
1403 case JSVAL_TYPE_STRING: {
1404 masm.branchTestStringTruthy(false, value, ifFalsy);
1405 break;
1407 case JSVAL_TYPE_BIGINT: {
1408 masm.branchTestBigIntTruthy(false, value, ifFalsy);
1409 break;
1411 case JSVAL_TYPE_DOUBLE: {
1412 masm.unboxDouble(value, floatTemp);
1413 masm.branchTestDoubleTruthy(false, floatTemp, ifFalsy);
1414 break;
1416 default:
1417 MOZ_CRASH("Unexpected value type");
1420 // If we reach this point, the value is truthy. We fall through for
1421 // truthy on the last test; otherwise, branch.
1422 if (!skipTypeTest) {
1423 masm.jump(ifTruthy);
1426 masm.bind(&differentType);
1429 void CodeGenerator::testValueTruthy(const ValueOperand& value,
1430 Register tempToUnbox, Register temp,
1431 FloatRegister floatTemp,
1432 const TypeDataList& observedTypes,
1433 Label* ifTruthy, Label* ifFalsy,
1434 OutOfLineTestObject* ool) {
1435 ScratchTagScope tag(masm, value);
1436 masm.splitTagForTest(value, tag);
1438 const std::initializer_list<JSValueType> defaultOrder = {
1439 JSVAL_TYPE_UNDEFINED, JSVAL_TYPE_NULL, JSVAL_TYPE_BOOLEAN,
1440 JSVAL_TYPE_INT32, JSVAL_TYPE_OBJECT, JSVAL_TYPE_STRING,
1441 JSVAL_TYPE_DOUBLE, JSVAL_TYPE_SYMBOL, JSVAL_TYPE_BIGINT};
1443 mozilla::EnumSet<JSValueType, uint32_t> remaining(defaultOrder);
1445 // Generate tests for previously observed types first.
1446 // The TypeDataList is sorted by descending frequency.
1447 for (auto& observed : observedTypes) {
1448 JSValueType type = observed.type();
1449 remaining -= type;
1451 testValueTruthyForType(type, tag, value, tempToUnbox, temp, floatTemp,
1452 ifTruthy, ifFalsy, ool, /*skipTypeTest*/ false);
1455 // Generate tests for remaining types.
1456 for (auto type : defaultOrder) {
1457 if (!remaining.contains(type)) {
1458 continue;
1460 remaining -= type;
1462 // We don't need a type test for the last possible type.
1463 bool skipTypeTest = remaining.isEmpty();
1464 testValueTruthyForType(type, tag, value, tempToUnbox, temp, floatTemp,
1465 ifTruthy, ifFalsy, ool, skipTypeTest);
1467 MOZ_ASSERT(remaining.isEmpty());
1469 // We fall through if the final test is truthy.
1472 void CodeGenerator::visitTestBIAndBranch(LTestBIAndBranch* lir) {
1473 Label* ifTrueLabel = getJumpLabelForBranch(lir->ifTrue());
1474 Label* ifFalseLabel = getJumpLabelForBranch(lir->ifFalse());
1475 Register input = ToRegister(lir->input());
1477 if (isNextBlock(lir->ifFalse()->lir())) {
1478 masm.branchIfBigIntIsNonZero(input, ifTrueLabel);
1479 } else if (isNextBlock(lir->ifTrue()->lir())) {
1480 masm.branchIfBigIntIsZero(input, ifFalseLabel);
1481 } else {
1482 masm.branchIfBigIntIsZero(input, ifFalseLabel);
1483 jumpToBlock(lir->ifTrue());
1487 void CodeGenerator::assertObjectDoesNotEmulateUndefined(
1488 Register input, Register temp, const MInstruction* mir) {
1489 #if defined(DEBUG) || defined(FUZZING)
1490 // Validate that the object indeed doesn't have the emulates undefined flag.
1491 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
1492 addOutOfLineCode(ool, mir);
1494 Label* doesNotEmulateUndefined = ool->label1();
1495 Label* emulatesUndefined = ool->label2();
1497 testObjectEmulatesUndefined(input, emulatesUndefined, doesNotEmulateUndefined,
1498 temp, ool);
1499 masm.bind(emulatesUndefined);
1500 masm.assumeUnreachable(
1501 "Found an object emulating undefined while the fuse is intact");
1502 masm.bind(doesNotEmulateUndefined);
1503 #endif
1506 void CodeGenerator::visitTestOAndBranch(LTestOAndBranch* lir) {
1507 Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
1508 Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
1509 Register input = ToRegister(lir->input());
1511 bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
1512 if (intact) {
1513 assertObjectDoesNotEmulateUndefined(input, ToRegister(lir->temp()),
1514 lir->mir());
1515 // Bug 1874905: It would be fantastic if this could be optimized out
1516 masm.jump(truthy);
1517 } else {
1518 auto* ool = new (alloc()) OutOfLineTestObject();
1519 addOutOfLineCode(ool, lir->mir());
1521 testObjectEmulatesUndefined(input, falsy, truthy, ToRegister(lir->temp()),
1522 ool);
1526 void CodeGenerator::visitTestVAndBranch(LTestVAndBranch* lir) {
1527 auto* ool = new (alloc()) OutOfLineTestObject();
1528 addOutOfLineCode(ool, lir->mir());
1530 Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
1531 Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
1533 ValueOperand input = ToValue(lir, LTestVAndBranch::Input);
1534 Register tempToUnbox = ToTempUnboxRegister(lir->temp1());
1535 Register temp = ToRegister(lir->temp2());
1536 FloatRegister floatTemp = ToFloatRegister(lir->tempFloat());
1537 const TypeDataList& observedTypes = lir->mir()->observedTypes();
1539 testValueTruthy(input, tempToUnbox, temp, floatTemp, observedTypes, truthy,
1540 falsy, ool);
1541 masm.jump(truthy);
1544 void CodeGenerator::visitBooleanToString(LBooleanToString* lir) {
1545 Register input = ToRegister(lir->input());
1546 Register output = ToRegister(lir->output());
1547 const JSAtomState& names = gen->runtime->names();
1548 Label true_, done;
1550 masm.branchTest32(Assembler::NonZero, input, input, &true_);
1551 masm.movePtr(ImmGCPtr(names.false_), output);
1552 masm.jump(&done);
1554 masm.bind(&true_);
1555 masm.movePtr(ImmGCPtr(names.true_), output);
1557 masm.bind(&done);
1560 void CodeGenerator::visitIntToString(LIntToString* lir) {
1561 Register input = ToRegister(lir->input());
1562 Register output = ToRegister(lir->output());
1564 using Fn = JSLinearString* (*)(JSContext*, int);
1565 OutOfLineCode* ool = oolCallVM<Fn, Int32ToString<CanGC>>(
1566 lir, ArgList(input), StoreRegisterTo(output));
1568 masm.lookupStaticIntString(input, output, gen->runtime->staticStrings(),
1569 ool->entry());
1571 masm.bind(ool->rejoin());
1574 void CodeGenerator::visitDoubleToString(LDoubleToString* lir) {
1575 FloatRegister input = ToFloatRegister(lir->input());
1576 Register temp = ToRegister(lir->temp0());
1577 Register output = ToRegister(lir->output());
1579 using Fn = JSString* (*)(JSContext*, double);
1580 OutOfLineCode* ool = oolCallVM<Fn, NumberToString<CanGC>>(
1581 lir, ArgList(input), StoreRegisterTo(output));
1583 // Try double to integer conversion and run integer to string code.
1584 masm.convertDoubleToInt32(input, temp, ool->entry(), false);
1585 masm.lookupStaticIntString(temp, output, gen->runtime->staticStrings(),
1586 ool->entry());
1588 masm.bind(ool->rejoin());
1591 void CodeGenerator::visitValueToString(LValueToString* lir) {
1592 ValueOperand input = ToValue(lir, LValueToString::InputIndex);
1593 Register output = ToRegister(lir->output());
1595 using Fn = JSString* (*)(JSContext*, HandleValue);
1596 OutOfLineCode* ool = oolCallVM<Fn, ToStringSlow<CanGC>>(
1597 lir, ArgList(input), StoreRegisterTo(output));
1599 Label done;
1600 Register tag = masm.extractTag(input, output);
1601 const JSAtomState& names = gen->runtime->names();
1603 // String
1605 Label notString;
1606 masm.branchTestString(Assembler::NotEqual, tag, &notString);
1607 masm.unboxString(input, output);
1608 masm.jump(&done);
1609 masm.bind(&notString);
1612 // Integer
1614 Label notInteger;
1615 masm.branchTestInt32(Assembler::NotEqual, tag, &notInteger);
1616 Register unboxed = ToTempUnboxRegister(lir->temp0());
1617 unboxed = masm.extractInt32(input, unboxed);
1618 masm.lookupStaticIntString(unboxed, output, gen->runtime->staticStrings(),
1619 ool->entry());
1620 masm.jump(&done);
1621 masm.bind(&notInteger);
1624 // Double
1626 // Note: no fastpath. Need two extra registers and can only convert doubles
1627 // that fit integers and are smaller than StaticStrings::INT_STATIC_LIMIT.
1628 masm.branchTestDouble(Assembler::Equal, tag, ool->entry());
1631 // Undefined
1633 Label notUndefined;
1634 masm.branchTestUndefined(Assembler::NotEqual, tag, &notUndefined);
1635 masm.movePtr(ImmGCPtr(names.undefined), output);
1636 masm.jump(&done);
1637 masm.bind(&notUndefined);
1640 // Null
1642 Label notNull;
1643 masm.branchTestNull(Assembler::NotEqual, tag, &notNull);
1644 masm.movePtr(ImmGCPtr(names.null), output);
1645 masm.jump(&done);
1646 masm.bind(&notNull);
1649 // Boolean
1651 Label notBoolean, true_;
1652 masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
1653 masm.branchTestBooleanTruthy(true, input, &true_);
1654 masm.movePtr(ImmGCPtr(names.false_), output);
1655 masm.jump(&done);
1656 masm.bind(&true_);
1657 masm.movePtr(ImmGCPtr(names.true_), output);
1658 masm.jump(&done);
1659 masm.bind(&notBoolean);
1662 // Objects/symbols are only possible when |mir->mightHaveSideEffects()|.
1663 if (lir->mir()->mightHaveSideEffects()) {
1664 // Object
1665 if (lir->mir()->supportSideEffects()) {
1666 masm.branchTestObject(Assembler::Equal, tag, ool->entry());
1667 } else {
1668 // Bail.
1669 MOZ_ASSERT(lir->mir()->needsSnapshot());
1670 Label bail;
1671 masm.branchTestObject(Assembler::Equal, tag, &bail);
1672 bailoutFrom(&bail, lir->snapshot());
1675 // Symbol
1676 if (lir->mir()->supportSideEffects()) {
1677 masm.branchTestSymbol(Assembler::Equal, tag, ool->entry());
1678 } else {
1679 // Bail.
1680 MOZ_ASSERT(lir->mir()->needsSnapshot());
1681 Label bail;
1682 masm.branchTestSymbol(Assembler::Equal, tag, &bail);
1683 bailoutFrom(&bail, lir->snapshot());
1687 // BigInt
1689 // No fastpath currently implemented.
1690 masm.branchTestBigInt(Assembler::Equal, tag, ool->entry());
1693 masm.assumeUnreachable("Unexpected type for LValueToString.");
1695 masm.bind(&done);
1696 masm.bind(ool->rejoin());
1699 using StoreBufferMutationFn = void (*)(js::gc::StoreBuffer*, js::gc::Cell**);
1701 static void EmitStoreBufferMutation(MacroAssembler& masm, Register holder,
1702 size_t offset, Register buffer,
1703 LiveGeneralRegisterSet& liveVolatiles,
1704 StoreBufferMutationFn fun) {
1705 Label callVM;
1706 Label exit;
1708 // Call into the VM to barrier the write. The only registers that need to
1709 // be preserved are those in liveVolatiles, so once they are saved on the
1710 // stack all volatile registers are available for use.
1711 masm.bind(&callVM);
1712 masm.PushRegsInMask(liveVolatiles);
1714 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
1715 regs.takeUnchecked(buffer);
1716 regs.takeUnchecked(holder);
1717 Register addrReg = regs.takeAny();
1719 masm.computeEffectiveAddress(Address(holder, offset), addrReg);
1721 bool needExtraReg = !regs.hasAny<GeneralRegisterSet::DefaultType>();
1722 if (needExtraReg) {
1723 masm.push(holder);
1724 masm.setupUnalignedABICall(holder);
1725 } else {
1726 masm.setupUnalignedABICall(regs.takeAny());
1728 masm.passABIArg(buffer);
1729 masm.passABIArg(addrReg);
1730 masm.callWithABI(DynamicFunction<StoreBufferMutationFn>(fun),
1731 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
1733 if (needExtraReg) {
1734 masm.pop(holder);
1736 masm.PopRegsInMask(liveVolatiles);
1737 masm.bind(&exit);
1740 // Warning: this function modifies prev and next.
1741 static void EmitPostWriteBarrierS(MacroAssembler& masm, Register holder,
1742 size_t offset, Register prev, Register next,
1743 LiveGeneralRegisterSet& liveVolatiles) {
1744 Label exit;
1745 Label checkRemove, putCell;
1747 // if (next && (buffer = next->storeBuffer()))
1748 // but we never pass in nullptr for next.
1749 Register storebuffer = next;
1750 masm.loadStoreBuffer(next, storebuffer);
1751 masm.branchPtr(Assembler::Equal, storebuffer, ImmWord(0), &checkRemove);
1753 // if (prev && prev->storeBuffer())
1754 masm.branchPtr(Assembler::Equal, prev, ImmWord(0), &putCell);
1755 masm.loadStoreBuffer(prev, prev);
1756 masm.branchPtr(Assembler::NotEqual, prev, ImmWord(0), &exit);
1758 // buffer->putCell(cellp)
1759 masm.bind(&putCell);
1760 EmitStoreBufferMutation(masm, holder, offset, storebuffer, liveVolatiles,
1761 JSString::addCellAddressToStoreBuffer);
1762 masm.jump(&exit);
1764 // if (prev && (buffer = prev->storeBuffer()))
1765 masm.bind(&checkRemove);
1766 masm.branchPtr(Assembler::Equal, prev, ImmWord(0), &exit);
1767 masm.loadStoreBuffer(prev, storebuffer);
1768 masm.branchPtr(Assembler::Equal, storebuffer, ImmWord(0), &exit);
1769 EmitStoreBufferMutation(masm, holder, offset, storebuffer, liveVolatiles,
1770 JSString::removeCellAddressFromStoreBuffer);
1772 masm.bind(&exit);
1775 void CodeGenerator::visitRegExp(LRegExp* lir) {
1776 Register output = ToRegister(lir->output());
1777 Register temp = ToRegister(lir->temp0());
1778 JSObject* source = lir->mir()->source();
1780 using Fn = JSObject* (*)(JSContext*, Handle<RegExpObject*>);
1781 OutOfLineCode* ool = oolCallVM<Fn, CloneRegExpObject>(
1782 lir, ArgList(ImmGCPtr(source)), StoreRegisterTo(output));
1783 if (lir->mir()->hasShared()) {
1784 TemplateObject templateObject(source);
1785 masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
1786 ool->entry());
1787 } else {
1788 masm.jump(ool->entry());
1790 masm.bind(ool->rejoin());
1793 static constexpr int32_t RegExpPairsVectorStartOffset(
1794 int32_t inputOutputDataStartOffset) {
1795 return inputOutputDataStartOffset + int32_t(InputOutputDataSize) +
1796 int32_t(sizeof(MatchPairs));
1799 static Address RegExpPairCountAddress(MacroAssembler& masm,
1800 int32_t inputOutputDataStartOffset) {
1801 return Address(FramePointer, inputOutputDataStartOffset +
1802 int32_t(InputOutputDataSize) +
1803 MatchPairs::offsetOfPairCount());
1806 static void UpdateRegExpStatics(MacroAssembler& masm, Register regexp,
1807 Register input, Register lastIndex,
1808 Register staticsReg, Register temp1,
1809 Register temp2, gc::Heap initialStringHeap,
1810 LiveGeneralRegisterSet& volatileRegs) {
1811 Address pendingInputAddress(staticsReg,
1812 RegExpStatics::offsetOfPendingInput());
1813 Address matchesInputAddress(staticsReg,
1814 RegExpStatics::offsetOfMatchesInput());
1815 Address lazySourceAddress(staticsReg, RegExpStatics::offsetOfLazySource());
1816 Address lazyIndexAddress(staticsReg, RegExpStatics::offsetOfLazyIndex());
1818 masm.guardedCallPreBarrier(pendingInputAddress, MIRType::String);
1819 masm.guardedCallPreBarrier(matchesInputAddress, MIRType::String);
1820 masm.guardedCallPreBarrier(lazySourceAddress, MIRType::String);
1822 if (initialStringHeap == gc::Heap::Default) {
1823 // Writing into RegExpStatics tenured memory; must post-barrier.
1824 if (staticsReg.volatile_()) {
1825 volatileRegs.add(staticsReg);
1828 masm.loadPtr(pendingInputAddress, temp1);
1829 masm.storePtr(input, pendingInputAddress);
1830 masm.movePtr(input, temp2);
1831 EmitPostWriteBarrierS(masm, staticsReg,
1832 RegExpStatics::offsetOfPendingInput(),
1833 temp1 /* prev */, temp2 /* next */, volatileRegs);
1835 masm.loadPtr(matchesInputAddress, temp1);
1836 masm.storePtr(input, matchesInputAddress);
1837 masm.movePtr(input, temp2);
1838 EmitPostWriteBarrierS(masm, staticsReg,
1839 RegExpStatics::offsetOfMatchesInput(),
1840 temp1 /* prev */, temp2 /* next */, volatileRegs);
1841 } else {
1842 masm.debugAssertGCThingIsTenured(input, temp1);
1843 masm.storePtr(input, pendingInputAddress);
1844 masm.storePtr(input, matchesInputAddress);
1847 masm.storePtr(lastIndex,
1848 Address(staticsReg, RegExpStatics::offsetOfLazyIndex()));
1849 masm.store32(
1850 Imm32(1),
1851 Address(staticsReg, RegExpStatics::offsetOfPendingLazyEvaluation()));
1853 masm.unboxNonDouble(Address(regexp, NativeObject::getFixedSlotOffset(
1854 RegExpObject::SHARED_SLOT)),
1855 temp1, JSVAL_TYPE_PRIVATE_GCTHING);
1856 masm.loadPtr(Address(temp1, RegExpShared::offsetOfSource()), temp2);
1857 masm.storePtr(temp2, lazySourceAddress);
1858 static_assert(sizeof(JS::RegExpFlags) == 1, "load size must match flag size");
1859 masm.load8ZeroExtend(Address(temp1, RegExpShared::offsetOfFlags()), temp2);
1860 masm.store8(temp2, Address(staticsReg, RegExpStatics::offsetOfLazyFlags()));
1863 // Prepare an InputOutputData and optional MatchPairs which space has been
1864 // allocated for on the stack, and try to execute a RegExp on a string input.
1865 // If the RegExp was successfully executed and matched the input, fallthrough.
1866 // Otherwise, jump to notFound or failure.
1868 // inputOutputDataStartOffset is the offset relative to the frame pointer
1869 // register. This offset is negative for the RegExpExecTest stub.
1870 static bool PrepareAndExecuteRegExp(MacroAssembler& masm, Register regexp,
1871 Register input, Register lastIndex,
1872 Register temp1, Register temp2,
1873 Register temp3,
1874 int32_t inputOutputDataStartOffset,
1875 gc::Heap initialStringHeap, Label* notFound,
1876 Label* failure) {
1877 JitSpew(JitSpew_Codegen, "# Emitting PrepareAndExecuteRegExp");
1879 using irregexp::InputOutputData;
1882 * [SMDOC] Stack layout for PrepareAndExecuteRegExp
1884 * Before this function is called, the caller is responsible for
1885 * allocating enough stack space for the following data:
1887 * inputOutputDataStartOffset +-----> +---------------+
1888 * |InputOutputData|
1889 * inputStartAddress +----------> inputStart|
1890 * inputEndAddress +----------> inputEnd|
1891 * startIndexAddress +----------> startIndex|
1892 * matchesAddress +----------> matches|-----+
1893 * +---------------+ |
1894 * matchPairs(Address|Offset) +-----> +---------------+ <--+
1895 * | MatchPairs |
1896 * pairCountAddress +----------> count |
1897 * pairsPointerAddress +----------> pairs |-----+
1898 * +---------------+ |
1899 * pairsArray(Address|Offset) +-----> +---------------+ <--+
1900 * | MatchPair |
1901 * firstMatchStartAddress +----------> start | <--+
1902 * | limit | |
1903 * +---------------+ |
1904 * . |
1905 * . Reserved space for
1906 * . RegExpObject::MaxPairCount
1907 * . MatchPair objects
1908 * . |
1909 * +---------------+ |
1910 * | MatchPair | |
1911 * | start | |
1912 * | limit | <--+
1913 * +---------------+
1916 int32_t ioOffset = inputOutputDataStartOffset;
1917 int32_t matchPairsOffset = ioOffset + int32_t(sizeof(InputOutputData));
1918 int32_t pairsArrayOffset = matchPairsOffset + int32_t(sizeof(MatchPairs));
1920 Address inputStartAddress(FramePointer,
1921 ioOffset + InputOutputData::offsetOfInputStart());
1922 Address inputEndAddress(FramePointer,
1923 ioOffset + InputOutputData::offsetOfInputEnd());
1924 Address startIndexAddress(FramePointer,
1925 ioOffset + InputOutputData::offsetOfStartIndex());
1926 Address matchesAddress(FramePointer,
1927 ioOffset + InputOutputData::offsetOfMatches());
1929 Address matchPairsAddress(FramePointer, matchPairsOffset);
1930 Address pairCountAddress(FramePointer,
1931 matchPairsOffset + MatchPairs::offsetOfPairCount());
1932 Address pairsPointerAddress(FramePointer,
1933 matchPairsOffset + MatchPairs::offsetOfPairs());
1935 Address pairsArrayAddress(FramePointer, pairsArrayOffset);
1936 Address firstMatchStartAddress(FramePointer,
1937 pairsArrayOffset + MatchPair::offsetOfStart());
1939 // First, fill in a skeletal MatchPairs instance on the stack. This will be
1940 // passed to the OOL stub in the caller if we aren't able to execute the
1941 // RegExp inline, and that stub needs to be able to determine whether the
1942 // execution finished successfully.
1944 // Initialize MatchPairs::pairCount to 1. The correct value can only
1945 // be determined after loading the RegExpShared. If the RegExpShared
1946 // has Kind::Atom, this is the correct pairCount.
1947 masm.store32(Imm32(1), pairCountAddress);
1949 // Initialize MatchPairs::pairs pointer
1950 masm.computeEffectiveAddress(pairsArrayAddress, temp1);
1951 masm.storePtr(temp1, pairsPointerAddress);
1953 // Initialize MatchPairs::pairs[0]::start to MatchPair::NoMatch
1954 masm.store32(Imm32(MatchPair::NoMatch), firstMatchStartAddress);
1956 // Determine the set of volatile inputs to save when calling into C++ or
1957 // regexp code.
1958 LiveGeneralRegisterSet volatileRegs;
1959 if (lastIndex.volatile_()) {
1960 volatileRegs.add(lastIndex);
1962 if (input.volatile_()) {
1963 volatileRegs.add(input);
1965 if (regexp.volatile_()) {
1966 volatileRegs.add(regexp);
1969 // Ensure the input string is not a rope.
1970 Label isLinear;
1971 masm.branchIfNotRope(input, &isLinear);
1973 masm.PushRegsInMask(volatileRegs);
1975 using Fn = JSLinearString* (*)(JSString*);
1976 masm.setupUnalignedABICall(temp1);
1977 masm.passABIArg(input);
1978 masm.callWithABI<Fn, js::jit::LinearizeForCharAccessPure>();
1980 MOZ_ASSERT(!volatileRegs.has(temp1));
1981 masm.storeCallPointerResult(temp1);
1982 masm.PopRegsInMask(volatileRegs);
1984 masm.branchTestPtr(Assembler::Zero, temp1, temp1, failure);
1986 masm.bind(&isLinear);
1988 // Load the RegExpShared.
1989 Register regexpReg = temp1;
1990 Address sharedSlot = Address(
1991 regexp, NativeObject::getFixedSlotOffset(RegExpObject::SHARED_SLOT));
1992 masm.branchTestUndefined(Assembler::Equal, sharedSlot, failure);
1993 masm.unboxNonDouble(sharedSlot, regexpReg, JSVAL_TYPE_PRIVATE_GCTHING);
1995 // Handle Atom matches
1996 Label notAtom, checkSuccess;
1997 masm.branchPtr(Assembler::Equal,
1998 Address(regexpReg, RegExpShared::offsetOfPatternAtom()),
1999 ImmWord(0), &notAtom);
2001 masm.computeEffectiveAddress(matchPairsAddress, temp3);
2003 masm.PushRegsInMask(volatileRegs);
2004 using Fn =
2005 RegExpRunStatus (*)(RegExpShared* re, const JSLinearString* input,
2006 size_t start, MatchPairs* matchPairs);
2007 masm.setupUnalignedABICall(temp2);
2008 masm.passABIArg(regexpReg);
2009 masm.passABIArg(input);
2010 masm.passABIArg(lastIndex);
2011 masm.passABIArg(temp3);
2012 masm.callWithABI<Fn, js::ExecuteRegExpAtomRaw>();
2014 MOZ_ASSERT(!volatileRegs.has(temp1));
2015 masm.storeCallInt32Result(temp1);
2016 masm.PopRegsInMask(volatileRegs);
2018 masm.jump(&checkSuccess);
2020 masm.bind(&notAtom);
2022 // Don't handle regexps with too many capture pairs.
2023 masm.load32(Address(regexpReg, RegExpShared::offsetOfPairCount()), temp2);
2024 masm.branch32(Assembler::Above, temp2, Imm32(RegExpObject::MaxPairCount),
2025 failure);
2027 // Fill in the pair count in the MatchPairs on the stack.
2028 masm.store32(temp2, pairCountAddress);
2030 // Load code pointer and length of input (in bytes).
2031 // Store the input start in the InputOutputData.
2032 Register codePointer = temp1; // Note: temp1 was previously regexpReg.
2033 Register byteLength = temp3;
2035 Label isLatin1, done;
2036 masm.loadStringLength(input, byteLength);
2038 masm.branchLatin1String(input, &isLatin1);
2040 // Two-byte input
2041 masm.loadStringChars(input, temp2, CharEncoding::TwoByte);
2042 masm.storePtr(temp2, inputStartAddress);
2043 masm.loadPtr(
2044 Address(regexpReg, RegExpShared::offsetOfJitCode(/*latin1 =*/false)),
2045 codePointer);
2046 masm.lshiftPtr(Imm32(1), byteLength);
2047 masm.jump(&done);
2049 // Latin1 input
2050 masm.bind(&isLatin1);
2051 masm.loadStringChars(input, temp2, CharEncoding::Latin1);
2052 masm.storePtr(temp2, inputStartAddress);
2053 masm.loadPtr(
2054 Address(regexpReg, RegExpShared::offsetOfJitCode(/*latin1 =*/true)),
2055 codePointer);
2057 masm.bind(&done);
2059 // Store end pointer
2060 masm.addPtr(byteLength, temp2);
2061 masm.storePtr(temp2, inputEndAddress);
2064 // Guard that the RegExpShared has been compiled for this type of input.
2065 // If it has not been compiled, we fall back to the OOL case, which will
2066 // do a VM call into the interpreter.
2067 // TODO: add an interpreter trampoline?
2068 masm.branchPtr(Assembler::Equal, codePointer, ImmWord(0), failure);
2069 masm.loadPtr(Address(codePointer, JitCode::offsetOfCode()), codePointer);
2071 // Finish filling in the InputOutputData instance on the stack
2072 masm.computeEffectiveAddress(matchPairsAddress, temp2);
2073 masm.storePtr(temp2, matchesAddress);
2074 masm.storePtr(lastIndex, startIndexAddress);
2076 // Execute the RegExp.
2077 masm.computeEffectiveAddress(
2078 Address(FramePointer, inputOutputDataStartOffset), temp2);
2079 masm.PushRegsInMask(volatileRegs);
2080 masm.setupUnalignedABICall(temp3);
2081 masm.passABIArg(temp2);
2082 masm.callWithABI(codePointer);
2083 masm.storeCallInt32Result(temp1);
2084 masm.PopRegsInMask(volatileRegs);
2086 masm.bind(&checkSuccess);
2087 masm.branch32(Assembler::Equal, temp1,
2088 Imm32(int32_t(RegExpRunStatus::Success_NotFound)), notFound);
2089 masm.branch32(Assembler::Equal, temp1, Imm32(int32_t(RegExpRunStatus::Error)),
2090 failure);
2092 // Lazily update the RegExpStatics.
2093 size_t offset = GlobalObjectData::offsetOfRegExpRealm() +
2094 RegExpRealm::offsetOfRegExpStatics();
2095 masm.loadGlobalObjectData(temp1);
2096 masm.loadPtr(Address(temp1, offset), temp1);
2097 UpdateRegExpStatics(masm, regexp, input, lastIndex, temp1, temp2, temp3,
2098 initialStringHeap, volatileRegs);
2100 return true;
2103 static void EmitInitDependentStringBase(MacroAssembler& masm,
2104 Register dependent, Register base,
2105 Register temp1, Register temp2,
2106 bool needsPostBarrier) {
2107 // Determine the base string to use and store it in temp2.
2108 Label notDependent, markedDependedOn;
2109 masm.load32(Address(base, JSString::offsetOfFlags()), temp1);
2110 masm.branchTest32(Assembler::Zero, temp1, Imm32(JSString::DEPENDENT_BIT),
2111 &notDependent);
2113 // The base is also a dependent string. Load its base to prevent chains of
2114 // dependent strings in most cases. This must either be an atom or already
2115 // have the DEPENDED_ON_BIT set.
2116 masm.loadDependentStringBase(base, temp2);
2117 masm.jump(&markedDependedOn);
2119 masm.bind(&notDependent);
2121 // The base is not a dependent string. Set the DEPENDED_ON_BIT if it's not
2122 // an atom.
2123 masm.movePtr(base, temp2);
2124 masm.branchTest32(Assembler::NonZero, temp1, Imm32(JSString::ATOM_BIT),
2125 &markedDependedOn);
2126 masm.or32(Imm32(JSString::DEPENDED_ON_BIT), temp1);
2127 masm.store32(temp1, Address(temp2, JSString::offsetOfFlags()));
2129 masm.bind(&markedDependedOn);
2131 #ifdef DEBUG
2132 // Assert the base has the DEPENDED_ON_BIT set or is an atom.
2133 Label isAppropriatelyMarked;
2134 masm.branchTest32(Assembler::NonZero,
2135 Address(temp2, JSString::offsetOfFlags()),
2136 Imm32(JSString::ATOM_BIT | JSString::DEPENDED_ON_BIT),
2137 &isAppropriatelyMarked);
2138 masm.assumeUnreachable("Base string is missing DEPENDED_ON_BIT");
2139 masm.bind(&isAppropriatelyMarked);
2140 #endif
2141 masm.storeDependentStringBase(temp2, dependent);
2143 // Post-barrier the base store. The base is still in temp2.
2144 if (needsPostBarrier) {
2145 Label done;
2146 masm.branchPtrInNurseryChunk(Assembler::Equal, dependent, temp1, &done);
2147 masm.branchPtrInNurseryChunk(Assembler::NotEqual, temp2, temp1, &done);
2149 LiveRegisterSet regsToSave(RegisterSet::Volatile());
2150 regsToSave.takeUnchecked(temp1);
2151 regsToSave.takeUnchecked(temp2);
2153 masm.PushRegsInMask(regsToSave);
2155 masm.mov(ImmPtr(masm.runtime()), temp1);
2157 using Fn = void (*)(JSRuntime* rt, js::gc::Cell* cell);
2158 masm.setupUnalignedABICall(temp2);
2159 masm.passABIArg(temp1);
2160 masm.passABIArg(dependent);
2161 masm.callWithABI<Fn, PostWriteBarrier>();
2163 masm.PopRegsInMask(regsToSave);
2165 masm.bind(&done);
2166 } else {
2167 #ifdef DEBUG
2168 Label done;
2169 masm.branchPtrInNurseryChunk(Assembler::Equal, dependent, temp1, &done);
2170 masm.branchPtrInNurseryChunk(Assembler::NotEqual, temp2, temp1, &done);
2171 masm.assumeUnreachable("Missing post barrier for dependent string base");
2172 masm.bind(&done);
2173 #endif
2177 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
2178 Register len, Register byteOpScratch,
2179 CharEncoding encoding,
2180 size_t maximumLength = SIZE_MAX);
2182 class CreateDependentString {
2183 CharEncoding encoding_;
2184 Register string_;
2185 Register temp1_;
2186 Register temp2_;
2187 Label* failure_;
2189 enum class FallbackKind : uint8_t {
2190 InlineString,
2191 FatInlineString,
2192 NotInlineString,
2193 Count
2195 mozilla::EnumeratedArray<FallbackKind, Label, size_t(FallbackKind::Count)>
2196 fallbacks_, joins_;
2198 public:
2199 CreateDependentString(CharEncoding encoding, Register string, Register temp1,
2200 Register temp2, Label* failure)
2201 : encoding_(encoding),
2202 string_(string),
2203 temp1_(temp1),
2204 temp2_(temp2),
2205 failure_(failure) {}
2207 Register string() const { return string_; }
2208 CharEncoding encoding() const { return encoding_; }
2210 // Generate code that creates DependentString.
2211 // Caller should call generateFallback after masm.ret(), to generate
2212 // fallback path.
2213 void generate(MacroAssembler& masm, const JSAtomState& names,
2214 CompileRuntime* runtime, Register base,
2215 BaseIndex startIndexAddress, BaseIndex limitIndexAddress,
2216 gc::Heap initialStringHeap);
2218 // Generate fallback path for creating DependentString.
2219 void generateFallback(MacroAssembler& masm);
2222 void CreateDependentString::generate(MacroAssembler& masm,
2223 const JSAtomState& names,
2224 CompileRuntime* runtime, Register base,
2225 BaseIndex startIndexAddress,
2226 BaseIndex limitIndexAddress,
2227 gc::Heap initialStringHeap) {
2228 JitSpew(JitSpew_Codegen, "# Emitting CreateDependentString (encoding=%s)",
2229 (encoding_ == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
2231 auto newGCString = [&](FallbackKind kind) {
2232 uint32_t flags = kind == FallbackKind::InlineString
2233 ? JSString::INIT_THIN_INLINE_FLAGS
2234 : kind == FallbackKind::FatInlineString
2235 ? JSString::INIT_FAT_INLINE_FLAGS
2236 : JSString::INIT_DEPENDENT_FLAGS;
2237 if (encoding_ == CharEncoding::Latin1) {
2238 flags |= JSString::LATIN1_CHARS_BIT;
2241 if (kind != FallbackKind::FatInlineString) {
2242 masm.newGCString(string_, temp2_, initialStringHeap, &fallbacks_[kind]);
2243 } else {
2244 masm.newGCFatInlineString(string_, temp2_, initialStringHeap,
2245 &fallbacks_[kind]);
2247 masm.bind(&joins_[kind]);
2248 masm.store32(Imm32(flags), Address(string_, JSString::offsetOfFlags()));
2251 // Compute the string length.
2252 masm.load32(startIndexAddress, temp2_);
2253 masm.load32(limitIndexAddress, temp1_);
2254 masm.sub32(temp2_, temp1_);
2256 Label done, nonEmpty;
2258 // Zero length matches use the empty string.
2259 masm.branchTest32(Assembler::NonZero, temp1_, temp1_, &nonEmpty);
2260 masm.movePtr(ImmGCPtr(names.empty_), string_);
2261 masm.jump(&done);
2263 masm.bind(&nonEmpty);
2265 // Complete matches use the base string.
2266 Label nonBaseStringMatch;
2267 masm.branchTest32(Assembler::NonZero, temp2_, temp2_, &nonBaseStringMatch);
2268 masm.branch32(Assembler::NotEqual, Address(base, JSString::offsetOfLength()),
2269 temp1_, &nonBaseStringMatch);
2270 masm.movePtr(base, string_);
2271 masm.jump(&done);
2273 masm.bind(&nonBaseStringMatch);
2275 Label notInline;
2277 int32_t maxInlineLength = encoding_ == CharEncoding::Latin1
2278 ? JSFatInlineString::MAX_LENGTH_LATIN1
2279 : JSFatInlineString::MAX_LENGTH_TWO_BYTE;
2280 masm.branch32(Assembler::Above, temp1_, Imm32(maxInlineLength), &notInline);
2282 // Make a thin or fat inline string.
2283 Label stringAllocated, fatInline;
2285 int32_t maxThinInlineLength = encoding_ == CharEncoding::Latin1
2286 ? JSThinInlineString::MAX_LENGTH_LATIN1
2287 : JSThinInlineString::MAX_LENGTH_TWO_BYTE;
2288 masm.branch32(Assembler::Above, temp1_, Imm32(maxThinInlineLength),
2289 &fatInline);
2290 if (encoding_ == CharEncoding::Latin1) {
2291 // One character Latin-1 strings can be loaded directly from the
2292 // static strings table.
2293 Label thinInline;
2294 masm.branch32(Assembler::Above, temp1_, Imm32(1), &thinInline);
2296 static_assert(
2297 StaticStrings::UNIT_STATIC_LIMIT - 1 == JSString::MAX_LATIN1_CHAR,
2298 "Latin-1 strings can be loaded from static strings");
2300 masm.loadStringChars(base, temp1_, encoding_);
2301 masm.loadChar(temp1_, temp2_, temp1_, encoding_);
2303 masm.lookupStaticString(temp1_, string_, runtime->staticStrings());
2305 masm.jump(&done);
2307 masm.bind(&thinInline);
2310 newGCString(FallbackKind::InlineString);
2311 masm.jump(&stringAllocated);
2313 masm.bind(&fatInline);
2314 { newGCString(FallbackKind::FatInlineString); }
2315 masm.bind(&stringAllocated);
2317 masm.store32(temp1_, Address(string_, JSString::offsetOfLength()));
2319 masm.push(string_);
2320 masm.push(base);
2322 MOZ_ASSERT(startIndexAddress.base == FramePointer,
2323 "startIndexAddress is still valid after stack pushes");
2325 // Load chars pointer for the new string.
2326 masm.loadInlineStringCharsForStore(string_, string_);
2328 // Load the source characters pointer.
2329 masm.loadStringChars(base, temp2_, encoding_);
2330 masm.load32(startIndexAddress, base);
2331 masm.addToCharPtr(temp2_, base, encoding_);
2333 CopyStringChars(masm, string_, temp2_, temp1_, base, encoding_);
2335 masm.pop(base);
2336 masm.pop(string_);
2338 masm.jump(&done);
2341 masm.bind(&notInline);
2344 // Make a dependent string.
2345 // Warning: string may be tenured (if the fallback case is hit), so
2346 // stores into it must be post barriered.
2347 newGCString(FallbackKind::NotInlineString);
2349 masm.store32(temp1_, Address(string_, JSString::offsetOfLength()));
2351 masm.loadNonInlineStringChars(base, temp1_, encoding_);
2352 masm.load32(startIndexAddress, temp2_);
2353 masm.addToCharPtr(temp1_, temp2_, encoding_);
2354 masm.storeNonInlineStringChars(temp1_, string_);
2356 EmitInitDependentStringBase(masm, string_, base, temp1_, temp2_,
2357 /* needsPostBarrier = */ true);
2360 masm.bind(&done);
2363 void CreateDependentString::generateFallback(MacroAssembler& masm) {
2364 JitSpew(JitSpew_Codegen,
2365 "# Emitting CreateDependentString fallback (encoding=%s)",
2366 (encoding_ == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
2368 LiveRegisterSet regsToSave(RegisterSet::Volatile());
2369 regsToSave.takeUnchecked(string_);
2370 regsToSave.takeUnchecked(temp2_);
2372 for (FallbackKind kind : mozilla::MakeEnumeratedRange(FallbackKind::Count)) {
2373 masm.bind(&fallbacks_[kind]);
2375 masm.PushRegsInMask(regsToSave);
2377 using Fn = void* (*)(JSContext* cx);
2378 masm.setupUnalignedABICall(string_);
2379 masm.loadJSContext(string_);
2380 masm.passABIArg(string_);
2381 if (kind == FallbackKind::FatInlineString) {
2382 masm.callWithABI<Fn, AllocateFatInlineString>();
2383 } else {
2384 masm.callWithABI<Fn, AllocateDependentString>();
2386 masm.storeCallPointerResult(string_);
2388 masm.PopRegsInMask(regsToSave);
2390 masm.branchPtr(Assembler::Equal, string_, ImmWord(0), failure_);
2392 masm.jump(&joins_[kind]);
2396 // Generate the RegExpMatcher and RegExpExecMatch stubs. These are very similar,
2397 // but RegExpExecMatch also has to load and update .lastIndex for global/sticky
2398 // regular expressions.
2399 static JitCode* GenerateRegExpMatchStubShared(JSContext* cx,
2400 gc::Heap initialStringHeap,
2401 bool isExecMatch) {
2402 if (isExecMatch) {
2403 JitSpew(JitSpew_Codegen, "# Emitting RegExpExecMatch stub");
2404 } else {
2405 JitSpew(JitSpew_Codegen, "# Emitting RegExpMatcher stub");
2408 // |initialStringHeap| could be stale after a GC.
2409 JS::AutoCheckCannotGC nogc(cx);
2411 Register regexp = RegExpMatcherRegExpReg;
2412 Register input = RegExpMatcherStringReg;
2413 Register lastIndex = RegExpMatcherLastIndexReg;
2414 ValueOperand result = JSReturnOperand;
2416 // We are free to clobber all registers, as LRegExpMatcher is a call
2417 // instruction.
2418 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2419 regs.take(input);
2420 regs.take(regexp);
2421 regs.take(lastIndex);
2423 Register temp1 = regs.takeAny();
2424 Register temp2 = regs.takeAny();
2425 Register temp3 = regs.takeAny();
2426 Register maybeTemp4 = InvalidReg;
2427 if (!regs.empty()) {
2428 // There are not enough registers on x86.
2429 maybeTemp4 = regs.takeAny();
2431 Register maybeTemp5 = InvalidReg;
2432 if (!regs.empty()) {
2433 // There are not enough registers on x86.
2434 maybeTemp5 = regs.takeAny();
2437 Address flagsSlot(regexp, RegExpObject::offsetOfFlags());
2438 Address lastIndexSlot(regexp, RegExpObject::offsetOfLastIndex());
2440 TempAllocator temp(&cx->tempLifoAlloc());
2441 JitContext jcx(cx);
2442 StackMacroAssembler masm(cx, temp);
2443 AutoCreatedBy acb(masm, "GenerateRegExpMatchStubShared");
2445 #ifdef JS_USE_LINK_REGISTER
2446 masm.pushReturnAddress();
2447 #endif
2448 masm.push(FramePointer);
2449 masm.moveStackPtrTo(FramePointer);
2451 Label notFoundZeroLastIndex;
2452 if (isExecMatch) {
2453 masm.loadRegExpLastIndex(regexp, input, lastIndex, &notFoundZeroLastIndex);
2456 // The InputOutputData is placed above the frame pointer and return address on
2457 // the stack.
2458 int32_t inputOutputDataStartOffset = 2 * sizeof(void*);
2460 Label notFound, oolEntry;
2461 if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
2462 temp3, inputOutputDataStartOffset,
2463 initialStringHeap, &notFound, &oolEntry)) {
2464 return nullptr;
2467 // If a regexp has named captures, fall back to the OOL stub, which
2468 // will end up calling CreateRegExpMatchResults.
2469 Register shared = temp2;
2470 masm.unboxNonDouble(Address(regexp, NativeObject::getFixedSlotOffset(
2471 RegExpObject::SHARED_SLOT)),
2472 shared, JSVAL_TYPE_PRIVATE_GCTHING);
2473 masm.branchPtr(Assembler::NotEqual,
2474 Address(shared, RegExpShared::offsetOfGroupsTemplate()),
2475 ImmWord(0), &oolEntry);
2477 // Similarly, if the |hasIndices| flag is set, fall back to the OOL stub.
2478 masm.branchTest32(Assembler::NonZero,
2479 Address(shared, RegExpShared::offsetOfFlags()),
2480 Imm32(int32_t(JS::RegExpFlag::HasIndices)), &oolEntry);
2482 Address pairCountAddress =
2483 RegExpPairCountAddress(masm, inputOutputDataStartOffset);
2485 // Construct the result.
2486 Register object = temp1;
2488 // In most cases, the array will have just 1-2 elements, so we optimize for
2489 // that by emitting separate code paths for capacity 2/6/14 (= 4/8/16 slots
2490 // because two slots are used for the elements header).
2492 // Load the array length in temp2 and the shape in temp3.
2493 Label allocated;
2494 masm.load32(pairCountAddress, temp2);
2495 size_t offset = GlobalObjectData::offsetOfRegExpRealm() +
2496 RegExpRealm::offsetOfNormalMatchResultShape();
2497 masm.loadGlobalObjectData(temp3);
2498 masm.loadPtr(Address(temp3, offset), temp3);
2500 auto emitAllocObject = [&](size_t elementCapacity) {
2501 gc::AllocKind kind = GuessArrayGCKind(elementCapacity);
2502 MOZ_ASSERT(CanChangeToBackgroundAllocKind(kind, &ArrayObject::class_));
2503 kind = ForegroundToBackgroundAllocKind(kind);
2505 #ifdef DEBUG
2506 // Assert all of the available slots are used for |elementCapacity|
2507 // elements.
2508 size_t usedSlots = ObjectElements::VALUES_PER_HEADER + elementCapacity;
2509 MOZ_ASSERT(usedSlots == GetGCKindSlots(kind));
2510 #endif
2512 constexpr size_t numUsedDynamicSlots =
2513 RegExpRealm::MatchResultObjectSlotSpan;
2514 constexpr size_t numDynamicSlots =
2515 RegExpRealm::MatchResultObjectNumDynamicSlots;
2516 constexpr size_t arrayLength = 1;
2517 masm.createArrayWithFixedElements(object, temp3, temp2, temp3,
2518 arrayLength, elementCapacity,
2519 numUsedDynamicSlots, numDynamicSlots,
2520 kind, gc::Heap::Default, &oolEntry);
2523 Label moreThan2;
2524 masm.branch32(Assembler::Above, temp2, Imm32(2), &moreThan2);
2525 emitAllocObject(2);
2526 masm.jump(&allocated);
2528 Label moreThan6;
2529 masm.bind(&moreThan2);
2530 masm.branch32(Assembler::Above, temp2, Imm32(6), &moreThan6);
2531 emitAllocObject(6);
2532 masm.jump(&allocated);
2534 masm.bind(&moreThan6);
2535 static_assert(RegExpObject::MaxPairCount == 14);
2536 emitAllocObject(RegExpObject::MaxPairCount);
2538 masm.bind(&allocated);
2541 // clang-format off
2543 * [SMDOC] Stack layout for the RegExpMatcher stub
2545 * +---------------+
2546 * FramePointer +-----> |Caller-FramePtr|
2547 * +---------------+
2548 * |Return-Address |
2549 * +---------------+
2550 * inputOutputDataStartOffset +-----> +---------------+
2551 * |InputOutputData|
2552 * +---------------+
2553 * +---------------+
2554 * | MatchPairs |
2555 * pairsCountAddress +-----------> count |
2556 * | pairs |
2557 * | |
2558 * +---------------+
2559 * pairsVectorStartOffset +-----> +---------------+
2560 * | MatchPair |
2561 * matchPairStart +------------> start | <-------+
2562 * matchPairLimit +------------> limit | | Reserved space for
2563 * +---------------+ | `RegExpObject::MaxPairCount`
2564 * . | MatchPair objects.
2565 * . |
2566 * . | `count` objects will be
2567 * +---------------+ | initialized and can be
2568 * | MatchPair | | accessed below.
2569 * | start | <-------+
2570 * | limit |
2571 * +---------------+
2573 // clang-format on
2575 static_assert(sizeof(MatchPair) == 2 * sizeof(int32_t),
2576 "MatchPair consists of two int32 values representing the start"
2577 "and the end offset of the match");
2579 int32_t pairsVectorStartOffset =
2580 RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
2582 // Incremented by one below for each match pair.
2583 Register matchIndex = temp2;
2584 masm.move32(Imm32(0), matchIndex);
2586 // The element in which to store the result of the current match.
2587 size_t elementsOffset = NativeObject::offsetOfFixedElements();
2588 BaseObjectElementIndex objectMatchElement(object, matchIndex, elementsOffset);
2590 // The current match pair's "start" and "limit" member.
2591 BaseIndex matchPairStart(FramePointer, matchIndex, TimesEight,
2592 pairsVectorStartOffset + MatchPair::offsetOfStart());
2593 BaseIndex matchPairLimit(FramePointer, matchIndex, TimesEight,
2594 pairsVectorStartOffset + MatchPair::offsetOfLimit());
2596 Label* depStrFailure = &oolEntry;
2597 Label restoreRegExpAndLastIndex;
2599 Register temp4;
2600 if (maybeTemp4 == InvalidReg) {
2601 depStrFailure = &restoreRegExpAndLastIndex;
2603 // We don't have enough registers for a fourth temporary. Reuse |regexp|
2604 // as a temporary. We restore its value at |restoreRegExpAndLastIndex|.
2605 masm.push(regexp);
2606 temp4 = regexp;
2607 } else {
2608 temp4 = maybeTemp4;
2611 Register temp5;
2612 if (maybeTemp5 == InvalidReg) {
2613 depStrFailure = &restoreRegExpAndLastIndex;
2615 // We don't have enough registers for a fifth temporary. Reuse |lastIndex|
2616 // as a temporary. We restore its value at |restoreRegExpAndLastIndex|.
2617 masm.push(lastIndex);
2618 temp5 = lastIndex;
2619 } else {
2620 temp5 = maybeTemp5;
2623 auto maybeRestoreRegExpAndLastIndex = [&]() {
2624 if (maybeTemp5 == InvalidReg) {
2625 masm.pop(lastIndex);
2627 if (maybeTemp4 == InvalidReg) {
2628 masm.pop(regexp);
2632 // Loop to construct the match strings. There are two different loops,
2633 // depending on whether the input is a Two-Byte or a Latin-1 string.
2634 CreateDependentString depStrs[]{
2635 {CharEncoding::TwoByte, temp3, temp4, temp5, depStrFailure},
2636 {CharEncoding::Latin1, temp3, temp4, temp5, depStrFailure},
2640 Label isLatin1, done;
2641 masm.branchLatin1String(input, &isLatin1);
2643 for (auto& depStr : depStrs) {
2644 if (depStr.encoding() == CharEncoding::Latin1) {
2645 masm.bind(&isLatin1);
2648 Label matchLoop;
2649 masm.bind(&matchLoop);
2651 static_assert(MatchPair::NoMatch == -1,
2652 "MatchPair::start is negative if no match was found");
2654 Label isUndefined, storeDone;
2655 masm.branch32(Assembler::LessThan, matchPairStart, Imm32(0),
2656 &isUndefined);
2658 depStr.generate(masm, cx->names(), CompileRuntime::get(cx->runtime()),
2659 input, matchPairStart, matchPairLimit,
2660 initialStringHeap);
2662 // Storing into nursery-allocated results object's elements; no post
2663 // barrier.
2664 masm.storeValue(JSVAL_TYPE_STRING, depStr.string(), objectMatchElement);
2665 masm.jump(&storeDone);
2667 masm.bind(&isUndefined);
2668 { masm.storeValue(UndefinedValue(), objectMatchElement); }
2669 masm.bind(&storeDone);
2671 masm.add32(Imm32(1), matchIndex);
2672 masm.branch32(Assembler::LessThanOrEqual, pairCountAddress, matchIndex,
2673 &done);
2674 masm.jump(&matchLoop);
2677 #ifdef DEBUG
2678 masm.assumeUnreachable("The match string loop doesn't fall through.");
2679 #endif
2681 masm.bind(&done);
2684 maybeRestoreRegExpAndLastIndex();
2686 // Fill in the rest of the output object.
2687 masm.store32(
2688 matchIndex,
2689 Address(object,
2690 elementsOffset + ObjectElements::offsetOfInitializedLength()));
2691 masm.store32(
2692 matchIndex,
2693 Address(object, elementsOffset + ObjectElements::offsetOfLength()));
2695 Address firstMatchPairStartAddress(
2696 FramePointer, pairsVectorStartOffset + MatchPair::offsetOfStart());
2697 Address firstMatchPairLimitAddress(
2698 FramePointer, pairsVectorStartOffset + MatchPair::offsetOfLimit());
2700 static_assert(RegExpRealm::MatchResultObjectIndexSlot == 0,
2701 "First slot holds the 'index' property");
2702 static_assert(RegExpRealm::MatchResultObjectInputSlot == 1,
2703 "Second slot holds the 'input' property");
2705 masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
2707 masm.load32(firstMatchPairStartAddress, temp3);
2708 masm.storeValue(JSVAL_TYPE_INT32, temp3, Address(temp2, 0));
2710 // No post barrier needed (address is within nursery object.)
2711 masm.storeValue(JSVAL_TYPE_STRING, input, Address(temp2, sizeof(Value)));
2713 // For the ExecMatch stub, if the regular expression is global or sticky, we
2714 // have to update its .lastIndex slot.
2715 if (isExecMatch) {
2716 MOZ_ASSERT(object != lastIndex);
2717 Label notGlobalOrSticky;
2718 masm.branchTest32(Assembler::Zero, flagsSlot,
2719 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
2720 &notGlobalOrSticky);
2721 masm.load32(firstMatchPairLimitAddress, lastIndex);
2722 masm.storeValue(JSVAL_TYPE_INT32, lastIndex, lastIndexSlot);
2723 masm.bind(&notGlobalOrSticky);
2726 // All done!
2727 masm.tagValue(JSVAL_TYPE_OBJECT, object, result);
2728 masm.pop(FramePointer);
2729 masm.ret();
2731 masm.bind(&notFound);
2732 if (isExecMatch) {
2733 Label notGlobalOrSticky;
2734 masm.branchTest32(Assembler::Zero, flagsSlot,
2735 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
2736 &notGlobalOrSticky);
2737 masm.bind(&notFoundZeroLastIndex);
2738 masm.storeValue(Int32Value(0), lastIndexSlot);
2739 masm.bind(&notGlobalOrSticky);
2741 masm.moveValue(NullValue(), result);
2742 masm.pop(FramePointer);
2743 masm.ret();
2745 // Fallback paths for CreateDependentString.
2746 for (auto& depStr : depStrs) {
2747 depStr.generateFallback(masm);
2750 // Fall-through to the ool entry after restoring the registers.
2751 masm.bind(&restoreRegExpAndLastIndex);
2752 maybeRestoreRegExpAndLastIndex();
2754 // Use an undefined value to signal to the caller that the OOL stub needs to
2755 // be called.
2756 masm.bind(&oolEntry);
2757 masm.moveValue(UndefinedValue(), result);
2758 masm.pop(FramePointer);
2759 masm.ret();
2761 Linker linker(masm);
2762 JitCode* code = linker.newCode(cx, CodeKind::Other);
2763 if (!code) {
2764 return nullptr;
2767 const char* name = isExecMatch ? "RegExpExecMatchStub" : "RegExpMatcherStub";
2768 CollectPerfSpewerJitCodeProfile(code, name);
2769 #ifdef MOZ_VTUNE
2770 vtune::MarkStub(code, name);
2771 #endif
2773 return code;
2776 JitCode* JitZone::generateRegExpMatcherStub(JSContext* cx) {
2777 return GenerateRegExpMatchStubShared(cx, initialStringHeap,
2778 /* isExecMatch = */ false);
2781 JitCode* JitZone::generateRegExpExecMatchStub(JSContext* cx) {
2782 return GenerateRegExpMatchStubShared(cx, initialStringHeap,
2783 /* isExecMatch = */ true);
2786 class OutOfLineRegExpMatcher : public OutOfLineCodeBase<CodeGenerator> {
2787 LRegExpMatcher* lir_;
2789 public:
2790 explicit OutOfLineRegExpMatcher(LRegExpMatcher* lir) : lir_(lir) {}
2792 void accept(CodeGenerator* codegen) override {
2793 codegen->visitOutOfLineRegExpMatcher(this);
2796 LRegExpMatcher* lir() const { return lir_; }
2799 void CodeGenerator::visitOutOfLineRegExpMatcher(OutOfLineRegExpMatcher* ool) {
2800 LRegExpMatcher* lir = ool->lir();
2801 Register lastIndex = ToRegister(lir->lastIndex());
2802 Register input = ToRegister(lir->string());
2803 Register regexp = ToRegister(lir->regexp());
2805 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2806 regs.take(lastIndex);
2807 regs.take(input);
2808 regs.take(regexp);
2809 Register temp = regs.takeAny();
2811 masm.computeEffectiveAddress(
2812 Address(masm.getStackPointer(), InputOutputDataSize), temp);
2814 pushArg(temp);
2815 pushArg(lastIndex);
2816 pushArg(input);
2817 pushArg(regexp);
2819 // We are not using oolCallVM because we are in a Call, and that live
2820 // registers are already saved by the the register allocator.
2821 using Fn =
2822 bool (*)(JSContext*, HandleObject regexp, HandleString input,
2823 int32_t lastIndex, MatchPairs* pairs, MutableHandleValue output);
2824 callVM<Fn, RegExpMatcherRaw>(lir);
2826 masm.jump(ool->rejoin());
2829 void CodeGenerator::visitRegExpMatcher(LRegExpMatcher* lir) {
2830 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpMatcherRegExpReg);
2831 MOZ_ASSERT(ToRegister(lir->string()) == RegExpMatcherStringReg);
2832 MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpMatcherLastIndexReg);
2833 MOZ_ASSERT(ToOutValue(lir) == JSReturnOperand);
2835 #if defined(JS_NUNBOX32)
2836 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Type);
2837 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Data);
2838 static_assert(RegExpMatcherStringReg != JSReturnReg_Type);
2839 static_assert(RegExpMatcherStringReg != JSReturnReg_Data);
2840 static_assert(RegExpMatcherLastIndexReg != JSReturnReg_Type);
2841 static_assert(RegExpMatcherLastIndexReg != JSReturnReg_Data);
2842 #elif defined(JS_PUNBOX64)
2843 static_assert(RegExpMatcherRegExpReg != JSReturnReg);
2844 static_assert(RegExpMatcherStringReg != JSReturnReg);
2845 static_assert(RegExpMatcherLastIndexReg != JSReturnReg);
2846 #endif
2848 masm.reserveStack(RegExpReservedStack);
2850 OutOfLineRegExpMatcher* ool = new (alloc()) OutOfLineRegExpMatcher(lir);
2851 addOutOfLineCode(ool, lir->mir());
2853 const JitZone* jitZone = gen->realm->zone()->jitZone();
2854 JitCode* regExpMatcherStub =
2855 jitZone->regExpMatcherStubNoBarrier(&zoneStubsToReadBarrier_);
2856 masm.call(regExpMatcherStub);
2857 masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, ool->entry());
2858 masm.bind(ool->rejoin());
2860 masm.freeStack(RegExpReservedStack);
2863 class OutOfLineRegExpExecMatch : public OutOfLineCodeBase<CodeGenerator> {
2864 LRegExpExecMatch* lir_;
2866 public:
2867 explicit OutOfLineRegExpExecMatch(LRegExpExecMatch* lir) : lir_(lir) {}
2869 void accept(CodeGenerator* codegen) override {
2870 codegen->visitOutOfLineRegExpExecMatch(this);
2873 LRegExpExecMatch* lir() const { return lir_; }
2876 void CodeGenerator::visitOutOfLineRegExpExecMatch(
2877 OutOfLineRegExpExecMatch* ool) {
2878 LRegExpExecMatch* lir = ool->lir();
2879 Register input = ToRegister(lir->string());
2880 Register regexp = ToRegister(lir->regexp());
2882 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2883 regs.take(input);
2884 regs.take(regexp);
2885 Register temp = regs.takeAny();
2887 masm.computeEffectiveAddress(
2888 Address(masm.getStackPointer(), InputOutputDataSize), temp);
2890 pushArg(temp);
2891 pushArg(input);
2892 pushArg(regexp);
2894 // We are not using oolCallVM because we are in a Call and live registers have
2895 // already been saved by the register allocator.
2896 using Fn =
2897 bool (*)(JSContext*, Handle<RegExpObject*> regexp, HandleString input,
2898 MatchPairs* pairs, MutableHandleValue output);
2899 callVM<Fn, RegExpBuiltinExecMatchFromJit>(lir);
2900 masm.jump(ool->rejoin());
2903 void CodeGenerator::visitRegExpExecMatch(LRegExpExecMatch* lir) {
2904 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpMatcherRegExpReg);
2905 MOZ_ASSERT(ToRegister(lir->string()) == RegExpMatcherStringReg);
2906 MOZ_ASSERT(ToOutValue(lir) == JSReturnOperand);
2908 #if defined(JS_NUNBOX32)
2909 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Type);
2910 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Data);
2911 static_assert(RegExpMatcherStringReg != JSReturnReg_Type);
2912 static_assert(RegExpMatcherStringReg != JSReturnReg_Data);
2913 #elif defined(JS_PUNBOX64)
2914 static_assert(RegExpMatcherRegExpReg != JSReturnReg);
2915 static_assert(RegExpMatcherStringReg != JSReturnReg);
2916 #endif
2918 masm.reserveStack(RegExpReservedStack);
2920 auto* ool = new (alloc()) OutOfLineRegExpExecMatch(lir);
2921 addOutOfLineCode(ool, lir->mir());
2923 const JitZone* jitZone = gen->realm->zone()->jitZone();
2924 JitCode* regExpExecMatchStub =
2925 jitZone->regExpExecMatchStubNoBarrier(&zoneStubsToReadBarrier_);
2926 masm.call(regExpExecMatchStub);
2927 masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, ool->entry());
2929 masm.bind(ool->rejoin());
2930 masm.freeStack(RegExpReservedStack);
2933 JitCode* JitZone::generateRegExpSearcherStub(JSContext* cx) {
2934 JitSpew(JitSpew_Codegen, "# Emitting RegExpSearcher stub");
2936 Register regexp = RegExpSearcherRegExpReg;
2937 Register input = RegExpSearcherStringReg;
2938 Register lastIndex = RegExpSearcherLastIndexReg;
2939 Register result = ReturnReg;
2941 // We are free to clobber all registers, as LRegExpSearcher is a call
2942 // instruction.
2943 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2944 regs.take(input);
2945 regs.take(regexp);
2946 regs.take(lastIndex);
2948 Register temp1 = regs.takeAny();
2949 Register temp2 = regs.takeAny();
2950 Register temp3 = regs.takeAny();
2952 TempAllocator temp(&cx->tempLifoAlloc());
2953 JitContext jcx(cx);
2954 StackMacroAssembler masm(cx, temp);
2955 AutoCreatedBy acb(masm, "JitZone::generateRegExpSearcherStub");
2957 #ifdef JS_USE_LINK_REGISTER
2958 masm.pushReturnAddress();
2959 #endif
2960 masm.push(FramePointer);
2961 masm.moveStackPtrTo(FramePointer);
2963 #ifdef DEBUG
2964 // Store sentinel value to cx->regExpSearcherLastLimit.
2965 // See comment in RegExpSearcherImpl.
2966 masm.loadJSContext(temp1);
2967 masm.store32(Imm32(RegExpSearcherLastLimitSentinel),
2968 Address(temp1, JSContext::offsetOfRegExpSearcherLastLimit()));
2969 #endif
2971 // The InputOutputData is placed above the frame pointer and return address on
2972 // the stack.
2973 int32_t inputOutputDataStartOffset = 2 * sizeof(void*);
2975 Label notFound, oolEntry;
2976 if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
2977 temp3, inputOutputDataStartOffset,
2978 initialStringHeap, &notFound, &oolEntry)) {
2979 return nullptr;
2982 // clang-format off
2984 * [SMDOC] Stack layout for the RegExpSearcher stub
2986 * +---------------+
2987 * FramePointer +-----> |Caller-FramePtr|
2988 * +---------------+
2989 * |Return-Address |
2990 * +---------------+
2991 * inputOutputDataStartOffset +-----> +---------------+
2992 * |InputOutputData|
2993 * +---------------+
2994 * +---------------+
2995 * | MatchPairs |
2996 * | count |
2997 * | pairs |
2998 * | |
2999 * +---------------+
3000 * pairsVectorStartOffset +-----> +---------------+
3001 * | MatchPair |
3002 * matchPairStart +------------> start | <-------+
3003 * matchPairLimit +------------> limit | | Reserved space for
3004 * +---------------+ | `RegExpObject::MaxPairCount`
3005 * . | MatchPair objects.
3006 * . |
3007 * . | Only a single object will
3008 * +---------------+ | be initialized and can be
3009 * | MatchPair | | accessed below.
3010 * | start | <-------+
3011 * | limit |
3012 * +---------------+
3014 // clang-format on
3016 int32_t pairsVectorStartOffset =
3017 RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
3018 Address matchPairStart(FramePointer,
3019 pairsVectorStartOffset + MatchPair::offsetOfStart());
3020 Address matchPairLimit(FramePointer,
3021 pairsVectorStartOffset + MatchPair::offsetOfLimit());
3023 // Store match limit to cx->regExpSearcherLastLimit and return the index.
3024 masm.load32(matchPairLimit, result);
3025 masm.loadJSContext(input);
3026 masm.store32(result,
3027 Address(input, JSContext::offsetOfRegExpSearcherLastLimit()));
3028 masm.load32(matchPairStart, result);
3029 masm.pop(FramePointer);
3030 masm.ret();
3032 masm.bind(&notFound);
3033 masm.move32(Imm32(RegExpSearcherResultNotFound), result);
3034 masm.pop(FramePointer);
3035 masm.ret();
3037 masm.bind(&oolEntry);
3038 masm.move32(Imm32(RegExpSearcherResultFailed), result);
3039 masm.pop(FramePointer);
3040 masm.ret();
3042 Linker linker(masm);
3043 JitCode* code = linker.newCode(cx, CodeKind::Other);
3044 if (!code) {
3045 return nullptr;
3048 CollectPerfSpewerJitCodeProfile(code, "RegExpSearcherStub");
3049 #ifdef MOZ_VTUNE
3050 vtune::MarkStub(code, "RegExpSearcherStub");
3051 #endif
3053 return code;
3056 class OutOfLineRegExpSearcher : public OutOfLineCodeBase<CodeGenerator> {
3057 LRegExpSearcher* lir_;
3059 public:
3060 explicit OutOfLineRegExpSearcher(LRegExpSearcher* lir) : lir_(lir) {}
3062 void accept(CodeGenerator* codegen) override {
3063 codegen->visitOutOfLineRegExpSearcher(this);
3066 LRegExpSearcher* lir() const { return lir_; }
3069 void CodeGenerator::visitOutOfLineRegExpSearcher(OutOfLineRegExpSearcher* ool) {
3070 LRegExpSearcher* lir = ool->lir();
3071 Register lastIndex = ToRegister(lir->lastIndex());
3072 Register input = ToRegister(lir->string());
3073 Register regexp = ToRegister(lir->regexp());
3075 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
3076 regs.take(lastIndex);
3077 regs.take(input);
3078 regs.take(regexp);
3079 Register temp = regs.takeAny();
3081 masm.computeEffectiveAddress(
3082 Address(masm.getStackPointer(), InputOutputDataSize), temp);
3084 pushArg(temp);
3085 pushArg(lastIndex);
3086 pushArg(input);
3087 pushArg(regexp);
3089 // We are not using oolCallVM because we are in a Call, and that live
3090 // registers are already saved by the the register allocator.
3091 using Fn = bool (*)(JSContext* cx, HandleObject regexp, HandleString input,
3092 int32_t lastIndex, MatchPairs* pairs, int32_t* result);
3093 callVM<Fn, RegExpSearcherRaw>(lir);
3095 masm.jump(ool->rejoin());
3098 void CodeGenerator::visitRegExpSearcher(LRegExpSearcher* lir) {
3099 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpSearcherRegExpReg);
3100 MOZ_ASSERT(ToRegister(lir->string()) == RegExpSearcherStringReg);
3101 MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpSearcherLastIndexReg);
3102 MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
3104 static_assert(RegExpSearcherRegExpReg != ReturnReg);
3105 static_assert(RegExpSearcherStringReg != ReturnReg);
3106 static_assert(RegExpSearcherLastIndexReg != ReturnReg);
3108 masm.reserveStack(RegExpReservedStack);
3110 OutOfLineRegExpSearcher* ool = new (alloc()) OutOfLineRegExpSearcher(lir);
3111 addOutOfLineCode(ool, lir->mir());
3113 const JitZone* jitZone = gen->realm->zone()->jitZone();
3114 JitCode* regExpSearcherStub =
3115 jitZone->regExpSearcherStubNoBarrier(&zoneStubsToReadBarrier_);
3116 masm.call(regExpSearcherStub);
3117 masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpSearcherResultFailed),
3118 ool->entry());
3119 masm.bind(ool->rejoin());
3121 masm.freeStack(RegExpReservedStack);
3124 void CodeGenerator::visitRegExpSearcherLastLimit(
3125 LRegExpSearcherLastLimit* lir) {
3126 Register result = ToRegister(lir->output());
3127 Register scratch = ToRegister(lir->temp0());
3129 masm.loadAndClearRegExpSearcherLastLimit(result, scratch);
3132 JitCode* JitZone::generateRegExpExecTestStub(JSContext* cx) {
3133 JitSpew(JitSpew_Codegen, "# Emitting RegExpExecTest stub");
3135 Register regexp = RegExpExecTestRegExpReg;
3136 Register input = RegExpExecTestStringReg;
3137 Register result = ReturnReg;
3139 TempAllocator temp(&cx->tempLifoAlloc());
3140 JitContext jcx(cx);
3141 StackMacroAssembler masm(cx, temp);
3142 AutoCreatedBy acb(masm, "JitZone::generateRegExpExecTestStub");
3144 #ifdef JS_USE_LINK_REGISTER
3145 masm.pushReturnAddress();
3146 #endif
3147 masm.push(FramePointer);
3148 masm.moveStackPtrTo(FramePointer);
3150 // We are free to clobber all registers, as LRegExpExecTest is a call
3151 // instruction.
3152 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
3153 regs.take(input);
3154 regs.take(regexp);
3156 // Ensure lastIndex != result.
3157 regs.take(result);
3158 Register lastIndex = regs.takeAny();
3159 regs.add(result);
3160 Register temp1 = regs.takeAny();
3161 Register temp2 = regs.takeAny();
3162 Register temp3 = regs.takeAny();
3164 Address flagsSlot(regexp, RegExpObject::offsetOfFlags());
3165 Address lastIndexSlot(regexp, RegExpObject::offsetOfLastIndex());
3167 masm.reserveStack(RegExpReservedStack);
3169 // Load lastIndex and skip RegExp execution if needed.
3170 Label notFoundZeroLastIndex;
3171 masm.loadRegExpLastIndex(regexp, input, lastIndex, &notFoundZeroLastIndex);
3173 // In visitRegExpMatcher and visitRegExpSearcher, we reserve stack space
3174 // before calling the stub. For RegExpExecTest we call the stub before
3175 // reserving stack space, so the offset of the InputOutputData relative to the
3176 // frame pointer is negative.
3177 constexpr int32_t inputOutputDataStartOffset = -int32_t(RegExpReservedStack);
3179 // On ARM64, load/store instructions can encode an immediate offset in the
3180 // range [-256, 4095]. If we ever fail this assertion, it would be more
3181 // efficient to store the data above the frame pointer similar to
3182 // RegExpMatcher and RegExpSearcher.
3183 static_assert(inputOutputDataStartOffset >= -256);
3185 Label notFound, oolEntry;
3186 if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
3187 temp3, inputOutputDataStartOffset,
3188 initialStringHeap, &notFound, &oolEntry)) {
3189 return nullptr;
3192 // Set `result` to true/false to indicate found/not-found, or to
3193 // RegExpExecTestResultFailed if we have to retry in C++. If the regular
3194 // expression is global or sticky, we also have to update its .lastIndex slot.
3196 Label done;
3197 int32_t pairsVectorStartOffset =
3198 RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
3199 Address matchPairLimit(FramePointer,
3200 pairsVectorStartOffset + MatchPair::offsetOfLimit());
3202 masm.move32(Imm32(1), result);
3203 masm.branchTest32(Assembler::Zero, flagsSlot,
3204 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
3205 &done);
3206 masm.load32(matchPairLimit, lastIndex);
3207 masm.storeValue(JSVAL_TYPE_INT32, lastIndex, lastIndexSlot);
3208 masm.jump(&done);
3210 masm.bind(&notFound);
3211 masm.move32(Imm32(0), result);
3212 masm.branchTest32(Assembler::Zero, flagsSlot,
3213 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
3214 &done);
3215 masm.storeValue(Int32Value(0), lastIndexSlot);
3216 masm.jump(&done);
3218 masm.bind(&notFoundZeroLastIndex);
3219 masm.move32(Imm32(0), result);
3220 masm.storeValue(Int32Value(0), lastIndexSlot);
3221 masm.jump(&done);
3223 masm.bind(&oolEntry);
3224 masm.move32(Imm32(RegExpExecTestResultFailed), result);
3226 masm.bind(&done);
3227 masm.freeStack(RegExpReservedStack);
3228 masm.pop(FramePointer);
3229 masm.ret();
3231 Linker linker(masm);
3232 JitCode* code = linker.newCode(cx, CodeKind::Other);
3233 if (!code) {
3234 return nullptr;
3237 CollectPerfSpewerJitCodeProfile(code, "RegExpExecTestStub");
3238 #ifdef MOZ_VTUNE
3239 vtune::MarkStub(code, "RegExpExecTestStub");
3240 #endif
3242 return code;
3245 class OutOfLineRegExpExecTest : public OutOfLineCodeBase<CodeGenerator> {
3246 LRegExpExecTest* lir_;
3248 public:
3249 explicit OutOfLineRegExpExecTest(LRegExpExecTest* lir) : lir_(lir) {}
3251 void accept(CodeGenerator* codegen) override {
3252 codegen->visitOutOfLineRegExpExecTest(this);
3255 LRegExpExecTest* lir() const { return lir_; }
3258 void CodeGenerator::visitOutOfLineRegExpExecTest(OutOfLineRegExpExecTest* ool) {
3259 LRegExpExecTest* lir = ool->lir();
3260 Register input = ToRegister(lir->string());
3261 Register regexp = ToRegister(lir->regexp());
3263 pushArg(input);
3264 pushArg(regexp);
3266 // We are not using oolCallVM because we are in a Call and live registers have
3267 // already been saved by the register allocator.
3268 using Fn = bool (*)(JSContext* cx, Handle<RegExpObject*> regexp,
3269 HandleString input, bool* result);
3270 callVM<Fn, RegExpBuiltinExecTestFromJit>(lir);
3272 masm.jump(ool->rejoin());
3275 void CodeGenerator::visitRegExpExecTest(LRegExpExecTest* lir) {
3276 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpExecTestRegExpReg);
3277 MOZ_ASSERT(ToRegister(lir->string()) == RegExpExecTestStringReg);
3278 MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
3280 static_assert(RegExpExecTestRegExpReg != ReturnReg);
3281 static_assert(RegExpExecTestStringReg != ReturnReg);
3283 auto* ool = new (alloc()) OutOfLineRegExpExecTest(lir);
3284 addOutOfLineCode(ool, lir->mir());
3286 const JitZone* jitZone = gen->realm->zone()->jitZone();
3287 JitCode* regExpExecTestStub =
3288 jitZone->regExpExecTestStubNoBarrier(&zoneStubsToReadBarrier_);
3289 masm.call(regExpExecTestStub);
3291 masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpExecTestResultFailed),
3292 ool->entry());
3294 masm.bind(ool->rejoin());
3297 void CodeGenerator::visitRegExpHasCaptureGroups(LRegExpHasCaptureGroups* ins) {
3298 Register regexp = ToRegister(ins->regexp());
3299 Register input = ToRegister(ins->input());
3300 Register output = ToRegister(ins->output());
3302 using Fn =
3303 bool (*)(JSContext*, Handle<RegExpObject*>, Handle<JSString*>, bool*);
3304 auto* ool = oolCallVM<Fn, js::RegExpHasCaptureGroups>(
3305 ins, ArgList(regexp, input), StoreRegisterTo(output));
3307 // Load RegExpShared in |output|.
3308 Label vmCall;
3309 masm.loadParsedRegExpShared(regexp, output, ool->entry());
3311 // Return true iff pairCount > 1.
3312 Label returnTrue;
3313 masm.branch32(Assembler::Above,
3314 Address(output, RegExpShared::offsetOfPairCount()), Imm32(1),
3315 &returnTrue);
3316 masm.move32(Imm32(0), output);
3317 masm.jump(ool->rejoin());
3319 masm.bind(&returnTrue);
3320 masm.move32(Imm32(1), output);
3322 masm.bind(ool->rejoin());
3325 class OutOfLineRegExpPrototypeOptimizable
3326 : public OutOfLineCodeBase<CodeGenerator> {
3327 LRegExpPrototypeOptimizable* ins_;
3329 public:
3330 explicit OutOfLineRegExpPrototypeOptimizable(LRegExpPrototypeOptimizable* ins)
3331 : ins_(ins) {}
3333 void accept(CodeGenerator* codegen) override {
3334 codegen->visitOutOfLineRegExpPrototypeOptimizable(this);
3336 LRegExpPrototypeOptimizable* ins() const { return ins_; }
3339 void CodeGenerator::visitRegExpPrototypeOptimizable(
3340 LRegExpPrototypeOptimizable* ins) {
3341 Register object = ToRegister(ins->object());
3342 Register output = ToRegister(ins->output());
3343 Register temp = ToRegister(ins->temp0());
3345 OutOfLineRegExpPrototypeOptimizable* ool =
3346 new (alloc()) OutOfLineRegExpPrototypeOptimizable(ins);
3347 addOutOfLineCode(ool, ins->mir());
3349 const GlobalObject* global = gen->realm->maybeGlobal();
3350 MOZ_ASSERT(global);
3351 masm.branchIfNotRegExpPrototypeOptimizable(object, temp, global,
3352 ool->entry());
3353 masm.move32(Imm32(0x1), output);
3355 masm.bind(ool->rejoin());
3358 void CodeGenerator::visitOutOfLineRegExpPrototypeOptimizable(
3359 OutOfLineRegExpPrototypeOptimizable* ool) {
3360 LRegExpPrototypeOptimizable* ins = ool->ins();
3361 Register object = ToRegister(ins->object());
3362 Register output = ToRegister(ins->output());
3364 saveVolatile(output);
3366 using Fn = bool (*)(JSContext* cx, JSObject* proto);
3367 masm.setupAlignedABICall();
3368 masm.loadJSContext(output);
3369 masm.passABIArg(output);
3370 masm.passABIArg(object);
3371 masm.callWithABI<Fn, RegExpPrototypeOptimizableRaw>();
3372 masm.storeCallBoolResult(output);
3374 restoreVolatile(output);
3376 masm.jump(ool->rejoin());
3379 class OutOfLineRegExpInstanceOptimizable
3380 : public OutOfLineCodeBase<CodeGenerator> {
3381 LRegExpInstanceOptimizable* ins_;
3383 public:
3384 explicit OutOfLineRegExpInstanceOptimizable(LRegExpInstanceOptimizable* ins)
3385 : ins_(ins) {}
3387 void accept(CodeGenerator* codegen) override {
3388 codegen->visitOutOfLineRegExpInstanceOptimizable(this);
3390 LRegExpInstanceOptimizable* ins() const { return ins_; }
3393 void CodeGenerator::visitRegExpInstanceOptimizable(
3394 LRegExpInstanceOptimizable* ins) {
3395 Register object = ToRegister(ins->object());
3396 Register output = ToRegister(ins->output());
3397 Register temp = ToRegister(ins->temp0());
3399 OutOfLineRegExpInstanceOptimizable* ool =
3400 new (alloc()) OutOfLineRegExpInstanceOptimizable(ins);
3401 addOutOfLineCode(ool, ins->mir());
3403 const GlobalObject* global = gen->realm->maybeGlobal();
3404 MOZ_ASSERT(global);
3405 masm.branchIfNotRegExpInstanceOptimizable(object, temp, global, ool->entry());
3406 masm.move32(Imm32(0x1), output);
3408 masm.bind(ool->rejoin());
3411 void CodeGenerator::visitOutOfLineRegExpInstanceOptimizable(
3412 OutOfLineRegExpInstanceOptimizable* ool) {
3413 LRegExpInstanceOptimizable* ins = ool->ins();
3414 Register object = ToRegister(ins->object());
3415 Register proto = ToRegister(ins->proto());
3416 Register output = ToRegister(ins->output());
3418 saveVolatile(output);
3420 using Fn = bool (*)(JSContext* cx, JSObject* obj, JSObject* proto);
3421 masm.setupAlignedABICall();
3422 masm.loadJSContext(output);
3423 masm.passABIArg(output);
3424 masm.passABIArg(object);
3425 masm.passABIArg(proto);
3426 masm.callWithABI<Fn, RegExpInstanceOptimizableRaw>();
3427 masm.storeCallBoolResult(output);
3429 restoreVolatile(output);
3431 masm.jump(ool->rejoin());
3434 static void FindFirstDollarIndex(MacroAssembler& masm, Register str,
3435 Register len, Register temp0, Register temp1,
3436 Register output, CharEncoding encoding) {
3437 #ifdef DEBUG
3438 Label ok;
3439 masm.branch32(Assembler::GreaterThan, len, Imm32(0), &ok);
3440 masm.assumeUnreachable("Length should be greater than 0.");
3441 masm.bind(&ok);
3442 #endif
3444 Register chars = temp0;
3445 masm.loadStringChars(str, chars, encoding);
3447 masm.move32(Imm32(0), output);
3449 Label start, done;
3450 masm.bind(&start);
3452 Register currentChar = temp1;
3453 masm.loadChar(chars, output, currentChar, encoding);
3454 masm.branch32(Assembler::Equal, currentChar, Imm32('$'), &done);
3456 masm.add32(Imm32(1), output);
3457 masm.branch32(Assembler::NotEqual, output, len, &start);
3459 masm.move32(Imm32(-1), output);
3461 masm.bind(&done);
3464 void CodeGenerator::visitGetFirstDollarIndex(LGetFirstDollarIndex* ins) {
3465 Register str = ToRegister(ins->str());
3466 Register output = ToRegister(ins->output());
3467 Register temp0 = ToRegister(ins->temp0());
3468 Register temp1 = ToRegister(ins->temp1());
3469 Register len = ToRegister(ins->temp2());
3471 using Fn = bool (*)(JSContext*, JSString*, int32_t*);
3472 OutOfLineCode* ool = oolCallVM<Fn, GetFirstDollarIndexRaw>(
3473 ins, ArgList(str), StoreRegisterTo(output));
3475 masm.branchIfRope(str, ool->entry());
3476 masm.loadStringLength(str, len);
3478 Label isLatin1, done;
3479 masm.branchLatin1String(str, &isLatin1);
3481 FindFirstDollarIndex(masm, str, len, temp0, temp1, output,
3482 CharEncoding::TwoByte);
3483 masm.jump(&done);
3485 masm.bind(&isLatin1);
3487 FindFirstDollarIndex(masm, str, len, temp0, temp1, output,
3488 CharEncoding::Latin1);
3490 masm.bind(&done);
3491 masm.bind(ool->rejoin());
3494 void CodeGenerator::visitStringReplace(LStringReplace* lir) {
3495 if (lir->replacement()->isConstant()) {
3496 pushArg(ImmGCPtr(lir->replacement()->toConstant()->toString()));
3497 } else {
3498 pushArg(ToRegister(lir->replacement()));
3501 if (lir->pattern()->isConstant()) {
3502 pushArg(ImmGCPtr(lir->pattern()->toConstant()->toString()));
3503 } else {
3504 pushArg(ToRegister(lir->pattern()));
3507 if (lir->string()->isConstant()) {
3508 pushArg(ImmGCPtr(lir->string()->toConstant()->toString()));
3509 } else {
3510 pushArg(ToRegister(lir->string()));
3513 using Fn =
3514 JSString* (*)(JSContext*, HandleString, HandleString, HandleString);
3515 if (lir->mir()->isFlatReplacement()) {
3516 callVM<Fn, StringFlatReplaceString>(lir);
3517 } else {
3518 callVM<Fn, StringReplace>(lir);
3522 void CodeGenerator::visitBinaryValueCache(LBinaryValueCache* lir) {
3523 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
3524 TypedOrValueRegister lhs =
3525 TypedOrValueRegister(ToValue(lir, LBinaryValueCache::LhsIndex));
3526 TypedOrValueRegister rhs =
3527 TypedOrValueRegister(ToValue(lir, LBinaryValueCache::RhsIndex));
3528 ValueOperand output = ToOutValue(lir);
3530 JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc());
3532 switch (jsop) {
3533 case JSOp::Add:
3534 case JSOp::Sub:
3535 case JSOp::Mul:
3536 case JSOp::Div:
3537 case JSOp::Mod:
3538 case JSOp::Pow:
3539 case JSOp::BitAnd:
3540 case JSOp::BitOr:
3541 case JSOp::BitXor:
3542 case JSOp::Lsh:
3543 case JSOp::Rsh:
3544 case JSOp::Ursh: {
3545 IonBinaryArithIC ic(liveRegs, lhs, rhs, output);
3546 addIC(lir, allocateIC(ic));
3547 return;
3549 default:
3550 MOZ_CRASH("Unsupported jsop in MBinaryValueCache");
3554 void CodeGenerator::visitBinaryBoolCache(LBinaryBoolCache* lir) {
3555 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
3556 TypedOrValueRegister lhs =
3557 TypedOrValueRegister(ToValue(lir, LBinaryBoolCache::LhsIndex));
3558 TypedOrValueRegister rhs =
3559 TypedOrValueRegister(ToValue(lir, LBinaryBoolCache::RhsIndex));
3560 Register output = ToRegister(lir->output());
3562 JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc());
3564 switch (jsop) {
3565 case JSOp::Lt:
3566 case JSOp::Le:
3567 case JSOp::Gt:
3568 case JSOp::Ge:
3569 case JSOp::Eq:
3570 case JSOp::Ne:
3571 case JSOp::StrictEq:
3572 case JSOp::StrictNe: {
3573 IonCompareIC ic(liveRegs, lhs, rhs, output);
3574 addIC(lir, allocateIC(ic));
3575 return;
3577 default:
3578 MOZ_CRASH("Unsupported jsop in MBinaryBoolCache");
3582 void CodeGenerator::visitUnaryCache(LUnaryCache* lir) {
3583 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
3584 TypedOrValueRegister input =
3585 TypedOrValueRegister(ToValue(lir, LUnaryCache::InputIndex));
3586 ValueOperand output = ToOutValue(lir);
3588 IonUnaryArithIC ic(liveRegs, input, output);
3589 addIC(lir, allocateIC(ic));
3592 void CodeGenerator::visitModuleMetadata(LModuleMetadata* lir) {
3593 pushArg(ImmPtr(lir->mir()->module()));
3595 using Fn = JSObject* (*)(JSContext*, HandleObject);
3596 callVM<Fn, js::GetOrCreateModuleMetaObject>(lir);
3599 void CodeGenerator::visitDynamicImport(LDynamicImport* lir) {
3600 pushArg(ToValue(lir, LDynamicImport::OptionsIndex));
3601 pushArg(ToValue(lir, LDynamicImport::SpecifierIndex));
3602 pushArg(ImmGCPtr(current->mir()->info().script()));
3604 using Fn = JSObject* (*)(JSContext*, HandleScript, HandleValue, HandleValue);
3605 callVM<Fn, js::StartDynamicModuleImport>(lir);
3608 void CodeGenerator::visitLambda(LLambda* lir) {
3609 Register envChain = ToRegister(lir->environmentChain());
3610 Register output = ToRegister(lir->output());
3611 Register tempReg = ToRegister(lir->temp0());
3613 JSFunction* fun = lir->mir()->templateFunction();
3615 using Fn = JSObject* (*)(JSContext*, HandleFunction, HandleObject);
3616 OutOfLineCode* ool = oolCallVM<Fn, js::Lambda>(
3617 lir, ArgList(ImmGCPtr(fun), envChain), StoreRegisterTo(output));
3619 TemplateObject templateObject(fun);
3620 masm.createGCObject(output, tempReg, templateObject, gc::Heap::Default,
3621 ool->entry());
3623 masm.storeValue(JSVAL_TYPE_OBJECT, envChain,
3624 Address(output, JSFunction::offsetOfEnvironment()));
3625 // No post barrier needed because output is guaranteed to be allocated in
3626 // the nursery.
3628 masm.bind(ool->rejoin());
3631 void CodeGenerator::visitFunctionWithProto(LFunctionWithProto* lir) {
3632 Register envChain = ToRegister(lir->envChain());
3633 Register prototype = ToRegister(lir->prototype());
3635 pushArg(prototype);
3636 pushArg(envChain);
3637 pushArg(ImmGCPtr(lir->mir()->function()));
3639 using Fn =
3640 JSObject* (*)(JSContext*, HandleFunction, HandleObject, HandleObject);
3641 callVM<Fn, js::FunWithProtoOperation>(lir);
3644 void CodeGenerator::visitSetFunName(LSetFunName* lir) {
3645 pushArg(Imm32(lir->mir()->prefixKind()));
3646 pushArg(ToValue(lir, LSetFunName::NameIndex));
3647 pushArg(ToRegister(lir->fun()));
3649 using Fn =
3650 bool (*)(JSContext*, HandleFunction, HandleValue, FunctionPrefixKind);
3651 callVM<Fn, js::SetFunctionName>(lir);
3654 void CodeGenerator::visitOsiPoint(LOsiPoint* lir) {
3655 // Note: markOsiPoint ensures enough space exists between the last
3656 // LOsiPoint and this one to patch adjacent call instructions.
3658 MOZ_ASSERT(masm.framePushed() == frameSize());
3660 uint32_t osiCallPointOffset = markOsiPoint(lir);
3662 LSafepoint* safepoint = lir->associatedSafepoint();
3663 MOZ_ASSERT(!safepoint->osiCallPointOffset());
3664 safepoint->setOsiCallPointOffset(osiCallPointOffset);
3666 #ifdef DEBUG
3667 // There should be no movegroups or other instructions between
3668 // an instruction and its OsiPoint. This is necessary because
3669 // we use the OsiPoint's snapshot from within VM calls.
3670 for (LInstructionReverseIterator iter(current->rbegin(lir));
3671 iter != current->rend(); iter++) {
3672 if (*iter == lir) {
3673 continue;
3675 MOZ_ASSERT(!iter->isMoveGroup());
3676 MOZ_ASSERT(iter->safepoint() == safepoint);
3677 break;
3679 #endif
3681 #ifdef CHECK_OSIPOINT_REGISTERS
3682 if (shouldVerifyOsiPointRegs(safepoint)) {
3683 verifyOsiPointRegs(safepoint);
3685 #endif
3688 void CodeGenerator::visitPhi(LPhi* lir) {
3689 MOZ_CRASH("Unexpected LPhi in CodeGenerator");
3692 void CodeGenerator::visitGoto(LGoto* lir) { jumpToBlock(lir->target()); }
3694 void CodeGenerator::visitTableSwitch(LTableSwitch* ins) {
3695 MTableSwitch* mir = ins->mir();
3696 Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
3697 const LAllocation* temp;
3699 if (mir->getOperand(0)->type() != MIRType::Int32) {
3700 temp = ins->tempInt()->output();
3702 // The input is a double, so try and convert it to an integer.
3703 // If it does not fit in an integer, take the default case.
3704 masm.convertDoubleToInt32(ToFloatRegister(ins->index()), ToRegister(temp),
3705 defaultcase, false);
3706 } else {
3707 temp = ins->index();
3710 emitTableSwitchDispatch(mir, ToRegister(temp),
3711 ToRegisterOrInvalid(ins->tempPointer()));
3714 void CodeGenerator::visitTableSwitchV(LTableSwitchV* ins) {
3715 MTableSwitch* mir = ins->mir();
3716 Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
3718 Register index = ToRegister(ins->tempInt());
3719 ValueOperand value = ToValue(ins, LTableSwitchV::InputValue);
3720 Register tag = masm.extractTag(value, index);
3721 masm.branchTestNumber(Assembler::NotEqual, tag, defaultcase);
3723 Label unboxInt, isInt;
3724 masm.branchTestInt32(Assembler::Equal, tag, &unboxInt);
3726 FloatRegister floatIndex = ToFloatRegister(ins->tempFloat());
3727 masm.unboxDouble(value, floatIndex);
3728 masm.convertDoubleToInt32(floatIndex, index, defaultcase, false);
3729 masm.jump(&isInt);
3732 masm.bind(&unboxInt);
3733 masm.unboxInt32(value, index);
3735 masm.bind(&isInt);
3737 emitTableSwitchDispatch(mir, index, ToRegisterOrInvalid(ins->tempPointer()));
3740 void CodeGenerator::visitParameter(LParameter* lir) {}
3742 void CodeGenerator::visitCallee(LCallee* lir) {
3743 Register callee = ToRegister(lir->output());
3744 Address ptr(FramePointer, JitFrameLayout::offsetOfCalleeToken());
3746 masm.loadFunctionFromCalleeToken(ptr, callee);
3749 void CodeGenerator::visitIsConstructing(LIsConstructing* lir) {
3750 Register output = ToRegister(lir->output());
3751 Address calleeToken(FramePointer, JitFrameLayout::offsetOfCalleeToken());
3752 masm.loadPtr(calleeToken, output);
3754 // We must be inside a function.
3755 MOZ_ASSERT(current->mir()->info().script()->function());
3757 // The low bit indicates whether this call is constructing, just clear the
3758 // other bits.
3759 static_assert(CalleeToken_Function == 0x0,
3760 "CalleeTokenTag value should match");
3761 static_assert(CalleeToken_FunctionConstructing == 0x1,
3762 "CalleeTokenTag value should match");
3763 masm.andPtr(Imm32(0x1), output);
3766 void CodeGenerator::visitReturn(LReturn* lir) {
3767 #if defined(JS_NUNBOX32)
3768 DebugOnly<LAllocation*> type = lir->getOperand(TYPE_INDEX);
3769 DebugOnly<LAllocation*> payload = lir->getOperand(PAYLOAD_INDEX);
3770 MOZ_ASSERT(ToRegister(type) == JSReturnReg_Type);
3771 MOZ_ASSERT(ToRegister(payload) == JSReturnReg_Data);
3772 #elif defined(JS_PUNBOX64)
3773 DebugOnly<LAllocation*> result = lir->getOperand(0);
3774 MOZ_ASSERT(ToRegister(result) == JSReturnReg);
3775 #endif
3776 // Don't emit a jump to the return label if this is the last block, as
3777 // it'll fall through to the epilogue.
3779 // This is -not- true however for a Generator-return, which may appear in the
3780 // middle of the last block, so we should always emit the jump there.
3781 if (current->mir() != *gen->graph().poBegin() || lir->isGenerator()) {
3782 masm.jump(&returnLabel_);
3786 void CodeGenerator::visitOsrEntry(LOsrEntry* lir) {
3787 Register temp = ToRegister(lir->temp());
3789 // Remember the OSR entry offset into the code buffer.
3790 masm.flushBuffer();
3791 setOsrEntryOffset(masm.size());
3793 // Allocate the full frame for this function
3794 // Note we have a new entry here. So we reset MacroAssembler::framePushed()
3795 // to 0, before reserving the stack.
3796 MOZ_ASSERT(masm.framePushed() == frameSize());
3797 masm.setFramePushed(0);
3799 // The Baseline code ensured both the frame pointer and stack pointer point to
3800 // the JitFrameLayout on the stack.
3802 // If profiling, save the current frame pointer to a per-thread global field.
3803 if (isProfilerInstrumentationEnabled()) {
3804 masm.profilerEnterFrame(FramePointer, temp);
3807 masm.reserveStack(frameSize());
3808 MOZ_ASSERT(masm.framePushed() == frameSize());
3810 // Ensure that the Ion frames is properly aligned.
3811 masm.assertStackAlignment(JitStackAlignment, 0);
3814 void CodeGenerator::visitOsrEnvironmentChain(LOsrEnvironmentChain* lir) {
3815 const LAllocation* frame = lir->getOperand(0);
3816 const LDefinition* object = lir->getDef(0);
3818 const ptrdiff_t frameOffset =
3819 BaselineFrame::reverseOffsetOfEnvironmentChain();
3821 masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
3824 void CodeGenerator::visitOsrArgumentsObject(LOsrArgumentsObject* lir) {
3825 const LAllocation* frame = lir->getOperand(0);
3826 const LDefinition* object = lir->getDef(0);
3828 const ptrdiff_t frameOffset = BaselineFrame::reverseOffsetOfArgsObj();
3830 masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
3833 void CodeGenerator::visitOsrValue(LOsrValue* value) {
3834 const LAllocation* frame = value->getOperand(0);
3835 const ValueOperand out = ToOutValue(value);
3837 const ptrdiff_t frameOffset = value->mir()->frameOffset();
3839 masm.loadValue(Address(ToRegister(frame), frameOffset), out);
3842 void CodeGenerator::visitOsrReturnValue(LOsrReturnValue* lir) {
3843 const LAllocation* frame = lir->getOperand(0);
3844 const ValueOperand out = ToOutValue(lir);
3846 Address flags =
3847 Address(ToRegister(frame), BaselineFrame::reverseOffsetOfFlags());
3848 Address retval =
3849 Address(ToRegister(frame), BaselineFrame::reverseOffsetOfReturnValue());
3851 masm.moveValue(UndefinedValue(), out);
3853 Label done;
3854 masm.branchTest32(Assembler::Zero, flags, Imm32(BaselineFrame::HAS_RVAL),
3855 &done);
3856 masm.loadValue(retval, out);
3857 masm.bind(&done);
3860 void CodeGenerator::visitStackArgT(LStackArgT* lir) {
3861 const LAllocation* arg = lir->arg();
3862 MIRType argType = lir->type();
3863 uint32_t argslot = lir->argslot();
3864 MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());
3866 Address dest = AddressOfPassedArg(argslot);
3868 if (arg->isFloatReg()) {
3869 masm.boxDouble(ToFloatRegister(arg), dest);
3870 } else if (arg->isRegister()) {
3871 masm.storeValue(ValueTypeFromMIRType(argType), ToRegister(arg), dest);
3872 } else {
3873 masm.storeValue(arg->toConstant()->toJSValue(), dest);
3877 void CodeGenerator::visitStackArgV(LStackArgV* lir) {
3878 ValueOperand val = ToValue(lir, 0);
3879 uint32_t argslot = lir->argslot();
3880 MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());
3882 masm.storeValue(val, AddressOfPassedArg(argslot));
3885 void CodeGenerator::visitMoveGroup(LMoveGroup* group) {
3886 if (!group->numMoves()) {
3887 return;
3890 MoveResolver& resolver = masm.moveResolver();
3892 for (size_t i = 0; i < group->numMoves(); i++) {
3893 const LMove& move = group->getMove(i);
3895 LAllocation from = move.from();
3896 LAllocation to = move.to();
3897 LDefinition::Type type = move.type();
3899 // No bogus moves.
3900 MOZ_ASSERT(from != to);
3901 MOZ_ASSERT(!from.isConstant());
3902 MoveOp::Type moveType;
3903 switch (type) {
3904 case LDefinition::OBJECT:
3905 case LDefinition::SLOTS:
3906 case LDefinition::WASM_ANYREF:
3907 #ifdef JS_NUNBOX32
3908 case LDefinition::TYPE:
3909 case LDefinition::PAYLOAD:
3910 #else
3911 case LDefinition::BOX:
3912 #endif
3913 case LDefinition::GENERAL:
3914 case LDefinition::STACKRESULTS:
3915 moveType = MoveOp::GENERAL;
3916 break;
3917 case LDefinition::INT32:
3918 moveType = MoveOp::INT32;
3919 break;
3920 case LDefinition::FLOAT32:
3921 moveType = MoveOp::FLOAT32;
3922 break;
3923 case LDefinition::DOUBLE:
3924 moveType = MoveOp::DOUBLE;
3925 break;
3926 case LDefinition::SIMD128:
3927 moveType = MoveOp::SIMD128;
3928 break;
3929 default:
3930 MOZ_CRASH("Unexpected move type");
3933 masm.propagateOOM(
3934 resolver.addMove(toMoveOperand(from), toMoveOperand(to), moveType));
3937 masm.propagateOOM(resolver.resolve());
3938 if (masm.oom()) {
3939 return;
3942 MoveEmitter emitter(masm);
3944 #ifdef JS_CODEGEN_X86
3945 if (group->maybeScratchRegister().isGeneralReg()) {
3946 emitter.setScratchRegister(
3947 group->maybeScratchRegister().toGeneralReg()->reg());
3948 } else {
3949 resolver.sortMemoryToMemoryMoves();
3951 #endif
3953 emitter.emit(resolver);
3954 emitter.finish();
3957 void CodeGenerator::visitInteger(LInteger* lir) {
3958 masm.move32(Imm32(lir->i32()), ToRegister(lir->output()));
3961 void CodeGenerator::visitInteger64(LInteger64* lir) {
3962 masm.move64(Imm64(lir->i64()), ToOutRegister64(lir));
3965 void CodeGenerator::visitPointer(LPointer* lir) {
3966 masm.movePtr(ImmGCPtr(lir->gcptr()), ToRegister(lir->output()));
3969 void CodeGenerator::visitNurseryObject(LNurseryObject* lir) {
3970 Register output = ToRegister(lir->output());
3971 uint32_t nurseryIndex = lir->mir()->nurseryIndex();
3973 // Load a pointer to the entry in IonScript's nursery objects list.
3974 CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), output);
3975 masm.propagateOOM(ionNurseryObjectLabels_.emplaceBack(label, nurseryIndex));
3977 // Load the JSObject*.
3978 masm.loadPtr(Address(output, 0), output);
3981 void CodeGenerator::visitKeepAliveObject(LKeepAliveObject* lir) {
3982 // No-op.
3985 void CodeGenerator::visitDebugEnterGCUnsafeRegion(
3986 LDebugEnterGCUnsafeRegion* lir) {
3987 Register temp = ToRegister(lir->temp0());
3989 masm.loadJSContext(temp);
3991 Address inUnsafeRegion(temp, JSContext::offsetOfInUnsafeRegion());
3992 masm.add32(Imm32(1), inUnsafeRegion);
3994 Label ok;
3995 masm.branch32(Assembler::GreaterThan, inUnsafeRegion, Imm32(0), &ok);
3996 masm.assumeUnreachable("unbalanced enter/leave GC unsafe region");
3997 masm.bind(&ok);
4000 void CodeGenerator::visitDebugLeaveGCUnsafeRegion(
4001 LDebugLeaveGCUnsafeRegion* lir) {
4002 Register temp = ToRegister(lir->temp0());
4004 masm.loadJSContext(temp);
4006 Address inUnsafeRegion(temp, JSContext::offsetOfInUnsafeRegion());
4007 masm.add32(Imm32(-1), inUnsafeRegion);
4009 Label ok;
4010 masm.branch32(Assembler::GreaterThanOrEqual, inUnsafeRegion, Imm32(0), &ok);
4011 masm.assumeUnreachable("unbalanced enter/leave GC unsafe region");
4012 masm.bind(&ok);
4015 void CodeGenerator::visitSlots(LSlots* lir) {
4016 Address slots(ToRegister(lir->object()), NativeObject::offsetOfSlots());
4017 masm.loadPtr(slots, ToRegister(lir->output()));
4020 void CodeGenerator::visitLoadDynamicSlotV(LLoadDynamicSlotV* lir) {
4021 ValueOperand dest = ToOutValue(lir);
4022 Register base = ToRegister(lir->input());
4023 int32_t offset = lir->mir()->slot() * sizeof(js::Value);
4025 masm.loadValue(Address(base, offset), dest);
4028 static ConstantOrRegister ToConstantOrRegister(const LAllocation* value,
4029 MIRType valueType) {
4030 if (value->isConstant()) {
4031 return ConstantOrRegister(value->toConstant()->toJSValue());
4033 return TypedOrValueRegister(valueType, ToAnyRegister(value));
4036 void CodeGenerator::visitStoreDynamicSlotT(LStoreDynamicSlotT* lir) {
4037 Register base = ToRegister(lir->slots());
4038 int32_t offset = lir->mir()->slot() * sizeof(js::Value);
4039 Address dest(base, offset);
4041 if (lir->mir()->needsBarrier()) {
4042 emitPreBarrier(dest);
4045 MIRType valueType = lir->mir()->value()->type();
4046 ConstantOrRegister value = ToConstantOrRegister(lir->value(), valueType);
4047 masm.storeUnboxedValue(value, valueType, dest);
4050 void CodeGenerator::visitStoreDynamicSlotV(LStoreDynamicSlotV* lir) {
4051 Register base = ToRegister(lir->slots());
4052 int32_t offset = lir->mir()->slot() * sizeof(Value);
4054 const ValueOperand value = ToValue(lir, LStoreDynamicSlotV::ValueIndex);
4056 if (lir->mir()->needsBarrier()) {
4057 emitPreBarrier(Address(base, offset));
4060 masm.storeValue(value, Address(base, offset));
4063 void CodeGenerator::visitElements(LElements* lir) {
4064 Address elements(ToRegister(lir->object()), NativeObject::offsetOfElements());
4065 masm.loadPtr(elements, ToRegister(lir->output()));
4068 void CodeGenerator::visitFunctionEnvironment(LFunctionEnvironment* lir) {
4069 Address environment(ToRegister(lir->function()),
4070 JSFunction::offsetOfEnvironment());
4071 masm.unboxObject(environment, ToRegister(lir->output()));
4074 void CodeGenerator::visitHomeObject(LHomeObject* lir) {
4075 Register func = ToRegister(lir->function());
4076 Address homeObject(func, FunctionExtended::offsetOfMethodHomeObjectSlot());
4078 masm.assertFunctionIsExtended(func);
4079 #ifdef DEBUG
4080 Label isObject;
4081 masm.branchTestObject(Assembler::Equal, homeObject, &isObject);
4082 masm.assumeUnreachable("[[HomeObject]] must be Object");
4083 masm.bind(&isObject);
4084 #endif
4086 masm.unboxObject(homeObject, ToRegister(lir->output()));
4089 void CodeGenerator::visitHomeObjectSuperBase(LHomeObjectSuperBase* lir) {
4090 Register homeObject = ToRegister(lir->homeObject());
4091 ValueOperand output = ToOutValue(lir);
4092 Register temp = output.scratchReg();
4094 masm.loadObjProto(homeObject, temp);
4096 #ifdef DEBUG
4097 // We won't encounter a lazy proto, because the prototype is guaranteed to
4098 // either be a JSFunction or a PlainObject, and only proxy objects can have a
4099 // lazy proto.
4100 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
4102 Label proxyCheckDone;
4103 masm.branchPtr(Assembler::NotEqual, temp, ImmWord(1), &proxyCheckDone);
4104 masm.assumeUnreachable("Unexpected lazy proto in JSOp::SuperBase");
4105 masm.bind(&proxyCheckDone);
4106 #endif
4108 Label nullProto, done;
4109 masm.branchPtr(Assembler::Equal, temp, ImmWord(0), &nullProto);
4111 // Box prototype and return
4112 masm.tagValue(JSVAL_TYPE_OBJECT, temp, output);
4113 masm.jump(&done);
4115 masm.bind(&nullProto);
4116 masm.moveValue(NullValue(), output);
4118 masm.bind(&done);
4121 template <class T>
4122 static T* ToConstantObject(MDefinition* def) {
4123 MOZ_ASSERT(def->isConstant());
4124 return &def->toConstant()->toObject().as<T>();
4127 void CodeGenerator::visitNewLexicalEnvironmentObject(
4128 LNewLexicalEnvironmentObject* lir) {
4129 Register output = ToRegister(lir->output());
4130 Register temp = ToRegister(lir->temp0());
4132 auto* templateObj = ToConstantObject<BlockLexicalEnvironmentObject>(
4133 lir->mir()->templateObj());
4134 auto* scope = &templateObj->scope();
4135 gc::Heap initialHeap = gc::Heap::Default;
4137 using Fn =
4138 BlockLexicalEnvironmentObject* (*)(JSContext*, Handle<LexicalScope*>);
4139 auto* ool =
4140 oolCallVM<Fn, BlockLexicalEnvironmentObject::createWithoutEnclosing>(
4141 lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
4143 TemplateObject templateObject(templateObj);
4144 masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
4146 masm.bind(ool->rejoin());
4149 void CodeGenerator::visitNewClassBodyEnvironmentObject(
4150 LNewClassBodyEnvironmentObject* lir) {
4151 Register output = ToRegister(lir->output());
4152 Register temp = ToRegister(lir->temp0());
4154 auto* templateObj = ToConstantObject<ClassBodyLexicalEnvironmentObject>(
4155 lir->mir()->templateObj());
4156 auto* scope = &templateObj->scope();
4157 gc::Heap initialHeap = gc::Heap::Default;
4159 using Fn = ClassBodyLexicalEnvironmentObject* (*)(JSContext*,
4160 Handle<ClassBodyScope*>);
4161 auto* ool =
4162 oolCallVM<Fn, ClassBodyLexicalEnvironmentObject::createWithoutEnclosing>(
4163 lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
4165 TemplateObject templateObject(templateObj);
4166 masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
4168 masm.bind(ool->rejoin());
4171 void CodeGenerator::visitNewVarEnvironmentObject(
4172 LNewVarEnvironmentObject* lir) {
4173 Register output = ToRegister(lir->output());
4174 Register temp = ToRegister(lir->temp0());
4176 auto* templateObj =
4177 ToConstantObject<VarEnvironmentObject>(lir->mir()->templateObj());
4178 auto* scope = &templateObj->scope().as<VarScope>();
4179 gc::Heap initialHeap = gc::Heap::Default;
4181 using Fn = VarEnvironmentObject* (*)(JSContext*, Handle<VarScope*>);
4182 auto* ool = oolCallVM<Fn, VarEnvironmentObject::createWithoutEnclosing>(
4183 lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
4185 TemplateObject templateObject(templateObj);
4186 masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
4188 masm.bind(ool->rejoin());
4191 void CodeGenerator::visitGuardShape(LGuardShape* guard) {
4192 Register obj = ToRegister(guard->input());
4193 Register temp = ToTempRegisterOrInvalid(guard->temp0());
4194 Label bail;
4195 masm.branchTestObjShape(Assembler::NotEqual, obj, guard->mir()->shape(), temp,
4196 obj, &bail);
4197 bailoutFrom(&bail, guard->snapshot());
4200 void CodeGenerator::visitGuardFuse(LGuardFuse* guard) {
4201 auto fuseIndex = guard->mir()->fuseIndex();
4202 switch (fuseIndex) {
4203 case RealmFuses::FuseIndex::OptimizeGetIteratorFuse:
4204 addOptimizeGetIteratorFuseDependency();
4205 return;
4206 default:
4207 // validateAndRegisterFuseDependencies doesn't have
4208 // handling for this yet, actively check fuse instead.
4209 break;
4212 Register temp = ToRegister(guard->temp0());
4213 Label bail;
4215 // Bake specific fuse address for Ion code, because we won't share this code
4216 // across realms.
4217 GuardFuse* fuse = mirGen().realm->realmFuses().getFuseByIndex(fuseIndex);
4218 masm.loadPtr(AbsoluteAddress(fuse->fuseRef()), temp);
4219 masm.branchPtr(Assembler::NotEqual, temp, ImmPtr(nullptr), &bail);
4221 bailoutFrom(&bail, guard->snapshot());
4224 void CodeGenerator::visitGuardMultipleShapes(LGuardMultipleShapes* guard) {
4225 Register obj = ToRegister(guard->object());
4226 Register shapeList = ToRegister(guard->shapeList());
4227 Register temp = ToRegister(guard->temp0());
4228 Register temp2 = ToRegister(guard->temp1());
4229 Register temp3 = ToRegister(guard->temp2());
4230 Register spectre = ToTempRegisterOrInvalid(guard->temp3());
4232 Label bail;
4233 masm.loadPtr(Address(shapeList, NativeObject::offsetOfElements()), temp);
4234 masm.branchTestObjShapeList(Assembler::NotEqual, obj, temp, temp2, temp3,
4235 spectre, &bail);
4236 bailoutFrom(&bail, guard->snapshot());
4239 void CodeGenerator::visitGuardProto(LGuardProto* guard) {
4240 Register obj = ToRegister(guard->object());
4241 Register expected = ToRegister(guard->expected());
4242 Register temp = ToRegister(guard->temp0());
4244 masm.loadObjProto(obj, temp);
4246 Label bail;
4247 masm.branchPtr(Assembler::NotEqual, temp, expected, &bail);
4248 bailoutFrom(&bail, guard->snapshot());
4251 void CodeGenerator::visitGuardNullProto(LGuardNullProto* guard) {
4252 Register obj = ToRegister(guard->input());
4253 Register temp = ToRegister(guard->temp0());
4255 masm.loadObjProto(obj, temp);
4257 Label bail;
4258 masm.branchTestPtr(Assembler::NonZero, temp, temp, &bail);
4259 bailoutFrom(&bail, guard->snapshot());
4262 void CodeGenerator::visitGuardIsNativeObject(LGuardIsNativeObject* guard) {
4263 Register obj = ToRegister(guard->input());
4264 Register temp = ToRegister(guard->temp0());
4266 Label bail;
4267 masm.branchIfNonNativeObj(obj, temp, &bail);
4268 bailoutFrom(&bail, guard->snapshot());
4271 void CodeGenerator::visitGuardGlobalGeneration(LGuardGlobalGeneration* guard) {
4272 Register temp = ToRegister(guard->temp0());
4273 Label bail;
4275 masm.load32(AbsoluteAddress(guard->mir()->generationAddr()), temp);
4276 masm.branch32(Assembler::NotEqual, temp, Imm32(guard->mir()->expected()),
4277 &bail);
4278 bailoutFrom(&bail, guard->snapshot());
4281 void CodeGenerator::visitGuardIsProxy(LGuardIsProxy* guard) {
4282 Register obj = ToRegister(guard->input());
4283 Register temp = ToRegister(guard->temp0());
4285 Label bail;
4286 masm.branchTestObjectIsProxy(false, obj, temp, &bail);
4287 bailoutFrom(&bail, guard->snapshot());
4290 void CodeGenerator::visitGuardIsNotProxy(LGuardIsNotProxy* guard) {
4291 Register obj = ToRegister(guard->input());
4292 Register temp = ToRegister(guard->temp0());
4294 Label bail;
4295 masm.branchTestObjectIsProxy(true, obj, temp, &bail);
4296 bailoutFrom(&bail, guard->snapshot());
4299 void CodeGenerator::visitGuardIsNotDOMProxy(LGuardIsNotDOMProxy* guard) {
4300 Register proxy = ToRegister(guard->proxy());
4301 Register temp = ToRegister(guard->temp0());
4303 Label bail;
4304 masm.branchTestProxyHandlerFamily(Assembler::Equal, proxy, temp,
4305 GetDOMProxyHandlerFamily(), &bail);
4306 bailoutFrom(&bail, guard->snapshot());
4309 void CodeGenerator::visitProxyGet(LProxyGet* lir) {
4310 Register proxy = ToRegister(lir->proxy());
4311 Register temp = ToRegister(lir->temp0());
4313 pushArg(lir->mir()->id(), temp);
4314 pushArg(proxy);
4316 using Fn = bool (*)(JSContext*, HandleObject, HandleId, MutableHandleValue);
4317 callVM<Fn, ProxyGetProperty>(lir);
4320 void CodeGenerator::visitProxyGetByValue(LProxyGetByValue* lir) {
4321 Register proxy = ToRegister(lir->proxy());
4322 ValueOperand idVal = ToValue(lir, LProxyGetByValue::IdIndex);
4324 pushArg(idVal);
4325 pushArg(proxy);
4327 using Fn =
4328 bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
4329 callVM<Fn, ProxyGetPropertyByValue>(lir);
4332 void CodeGenerator::visitProxyHasProp(LProxyHasProp* lir) {
4333 Register proxy = ToRegister(lir->proxy());
4334 ValueOperand idVal = ToValue(lir, LProxyHasProp::IdIndex);
4336 pushArg(idVal);
4337 pushArg(proxy);
4339 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
4340 if (lir->mir()->hasOwn()) {
4341 callVM<Fn, ProxyHasOwn>(lir);
4342 } else {
4343 callVM<Fn, ProxyHas>(lir);
4347 void CodeGenerator::visitProxySet(LProxySet* lir) {
4348 Register proxy = ToRegister(lir->proxy());
4349 ValueOperand rhs = ToValue(lir, LProxySet::RhsIndex);
4350 Register temp = ToRegister(lir->temp0());
4352 pushArg(Imm32(lir->mir()->strict()));
4353 pushArg(rhs);
4354 pushArg(lir->mir()->id(), temp);
4355 pushArg(proxy);
4357 using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
4358 callVM<Fn, ProxySetProperty>(lir);
4361 void CodeGenerator::visitProxySetByValue(LProxySetByValue* lir) {
4362 Register proxy = ToRegister(lir->proxy());
4363 ValueOperand idVal = ToValue(lir, LProxySetByValue::IdIndex);
4364 ValueOperand rhs = ToValue(lir, LProxySetByValue::RhsIndex);
4366 pushArg(Imm32(lir->mir()->strict()));
4367 pushArg(rhs);
4368 pushArg(idVal);
4369 pushArg(proxy);
4371 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
4372 callVM<Fn, ProxySetPropertyByValue>(lir);
4375 void CodeGenerator::visitCallSetArrayLength(LCallSetArrayLength* lir) {
4376 Register obj = ToRegister(lir->obj());
4377 ValueOperand rhs = ToValue(lir, LCallSetArrayLength::RhsIndex);
4379 pushArg(Imm32(lir->mir()->strict()));
4380 pushArg(rhs);
4381 pushArg(obj);
4383 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool);
4384 callVM<Fn, jit::SetArrayLength>(lir);
4387 void CodeGenerator::visitMegamorphicLoadSlot(LMegamorphicLoadSlot* lir) {
4388 Register obj = ToRegister(lir->object());
4389 Register temp0 = ToRegister(lir->temp0());
4390 Register temp1 = ToRegister(lir->temp1());
4391 Register temp2 = ToRegister(lir->temp2());
4392 Register temp3 = ToRegister(lir->temp3());
4393 ValueOperand output = ToOutValue(lir);
4395 Label cacheHit;
4396 masm.emitMegamorphicCacheLookup(lir->mir()->name(), obj, temp0, temp1, temp2,
4397 output, &cacheHit);
4399 Label bail;
4400 masm.branchIfNonNativeObj(obj, temp0, &bail);
4402 masm.Push(UndefinedValue());
4403 masm.moveStackPtrTo(temp3);
4405 using Fn = bool (*)(JSContext* cx, JSObject* obj, PropertyKey id,
4406 MegamorphicCache::Entry* cacheEntry, Value* vp);
4407 masm.setupAlignedABICall();
4408 masm.loadJSContext(temp0);
4409 masm.passABIArg(temp0);
4410 masm.passABIArg(obj);
4411 masm.movePropertyKey(lir->mir()->name(), temp1);
4412 masm.passABIArg(temp1);
4413 masm.passABIArg(temp2);
4414 masm.passABIArg(temp3);
4416 masm.callWithABI<Fn, GetNativeDataPropertyPure>();
4418 MOZ_ASSERT(!output.aliases(ReturnReg));
4419 masm.Pop(output);
4421 masm.branchIfFalseBool(ReturnReg, &bail);
4422 masm.bind(&cacheHit);
4424 bailoutFrom(&bail, lir->snapshot());
4427 void CodeGenerator::visitMegamorphicLoadSlotPermissive(
4428 LMegamorphicLoadSlotPermissive* lir) {
4429 Register obj = ToRegister(lir->object());
4430 Register temp0 = ToRegister(lir->temp0());
4431 Register temp1 = ToRegister(lir->temp1());
4432 Register temp2 = ToRegister(lir->temp2());
4433 ValueOperand output = ToOutValue(lir);
4435 Label cacheHit;
4436 masm.emitMegamorphicCacheLookup(lir->mir()->name(), obj, temp0, temp1, temp2,
4437 output, &cacheHit);
4439 masm.movePropertyKey(lir->mir()->name(), temp1);
4440 pushArg(temp2);
4441 pushArg(temp1);
4442 pushArg(obj);
4444 using Fn = bool (*)(JSContext*, HandleObject, HandleId,
4445 MegamorphicCacheEntry*, MutableHandleValue);
4446 callVM<Fn, GetPropMaybeCached>(lir);
4448 masm.bind(&cacheHit);
4451 void CodeGenerator::visitMegamorphicLoadSlotByValue(
4452 LMegamorphicLoadSlotByValue* lir) {
4453 Register obj = ToRegister(lir->object());
4454 ValueOperand idVal = ToValue(lir, LMegamorphicLoadSlotByValue::IdIndex);
4455 Register temp0 = ToRegister(lir->temp0());
4456 Register temp1 = ToRegister(lir->temp1());
4457 Register temp2 = ToRegister(lir->temp2());
4458 ValueOperand output = ToOutValue(lir);
4460 Label cacheHit, bail;
4461 masm.emitMegamorphicCacheLookupByValue(idVal, obj, temp0, temp1, temp2,
4462 output, &cacheHit);
4464 masm.branchIfNonNativeObj(obj, temp0, &bail);
4466 // idVal will be in vp[0], result will be stored in vp[1].
4467 masm.reserveStack(sizeof(Value));
4468 masm.Push(idVal);
4469 masm.moveStackPtrTo(temp0);
4471 using Fn = bool (*)(JSContext* cx, JSObject* obj,
4472 MegamorphicCache::Entry* cacheEntry, Value* vp);
4473 masm.setupAlignedABICall();
4474 masm.loadJSContext(temp1);
4475 masm.passABIArg(temp1);
4476 masm.passABIArg(obj);
4477 masm.passABIArg(temp2);
4478 masm.passABIArg(temp0);
4479 masm.callWithABI<Fn, GetNativeDataPropertyByValuePure>();
4481 MOZ_ASSERT(!idVal.aliases(temp0));
4482 masm.storeCallPointerResult(temp0);
4483 masm.Pop(idVal);
4485 uint32_t framePushed = masm.framePushed();
4486 Label ok;
4487 masm.branchIfTrueBool(temp0, &ok);
4488 masm.freeStack(sizeof(Value)); // Discard result Value.
4489 masm.jump(&bail);
4491 masm.bind(&ok);
4492 masm.setFramePushed(framePushed);
4493 masm.Pop(output);
4495 masm.bind(&cacheHit);
4497 bailoutFrom(&bail, lir->snapshot());
4500 void CodeGenerator::visitMegamorphicLoadSlotByValuePermissive(
4501 LMegamorphicLoadSlotByValuePermissive* lir) {
4502 Register obj = ToRegister(lir->object());
4503 ValueOperand idVal = ToValue(lir, LMegamorphicLoadSlotByValue::IdIndex);
4504 Register temp0 = ToRegister(lir->temp0());
4505 Register temp1 = ToRegister(lir->temp1());
4506 Register temp2 = ToRegister(lir->temp2());
4507 ValueOperand output = ToOutValue(lir);
4509 Label cacheHit;
4510 masm.emitMegamorphicCacheLookupByValue(idVal, obj, temp0, temp1, temp2,
4511 output, &cacheHit);
4513 pushArg(temp2);
4514 pushArg(idVal);
4515 pushArg(obj);
4517 using Fn = bool (*)(JSContext*, HandleObject, HandleValue,
4518 MegamorphicCacheEntry*, MutableHandleValue);
4519 callVM<Fn, GetElemMaybeCached>(lir);
4521 masm.bind(&cacheHit);
4524 void CodeGenerator::visitMegamorphicStoreSlot(LMegamorphicStoreSlot* lir) {
4525 Register obj = ToRegister(lir->object());
4526 ValueOperand value = ToValue(lir, LMegamorphicStoreSlot::RhsIndex);
4528 Register temp0 = ToRegister(lir->temp0());
4529 #ifndef JS_CODEGEN_X86
4530 Register temp1 = ToRegister(lir->temp1());
4531 Register temp2 = ToRegister(lir->temp2());
4532 #endif
4534 Label cacheHit, done;
4535 #ifdef JS_CODEGEN_X86
4536 masm.emitMegamorphicCachedSetSlot(
4537 lir->mir()->name(), obj, temp0, value, &cacheHit,
4538 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
4539 EmitPreBarrier(masm, addr, mirType);
4541 #else
4542 masm.emitMegamorphicCachedSetSlot(
4543 lir->mir()->name(), obj, temp0, temp1, temp2, value, &cacheHit,
4544 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
4545 EmitPreBarrier(masm, addr, mirType);
4547 #endif
4549 pushArg(Imm32(lir->mir()->strict()));
4550 pushArg(value);
4551 pushArg(lir->mir()->name(), temp0);
4552 pushArg(obj);
4554 using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
4555 callVM<Fn, SetPropertyMegamorphic<true>>(lir);
4557 masm.jump(&done);
4558 masm.bind(&cacheHit);
4560 masm.branchPtrInNurseryChunk(Assembler::Equal, obj, temp0, &done);
4561 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp0, &done);
4563 saveVolatile(temp0);
4564 emitPostWriteBarrier(obj);
4565 restoreVolatile(temp0);
4567 masm.bind(&done);
4570 void CodeGenerator::visitMegamorphicHasProp(LMegamorphicHasProp* lir) {
4571 Register obj = ToRegister(lir->object());
4572 ValueOperand idVal = ToValue(lir, LMegamorphicHasProp::IdIndex);
4573 Register temp0 = ToRegister(lir->temp0());
4574 Register temp1 = ToRegister(lir->temp1());
4575 Register temp2 = ToRegister(lir->temp2());
4576 Register output = ToRegister(lir->output());
4578 Label bail, cacheHit;
4579 masm.emitMegamorphicCacheLookupExists(idVal, obj, temp0, temp1, temp2, output,
4580 &cacheHit, lir->mir()->hasOwn());
4582 masm.branchIfNonNativeObj(obj, temp0, &bail);
4584 // idVal will be in vp[0], result will be stored in vp[1].
4585 masm.reserveStack(sizeof(Value));
4586 masm.Push(idVal);
4587 masm.moveStackPtrTo(temp0);
4589 using Fn = bool (*)(JSContext* cx, JSObject* obj,
4590 MegamorphicCache::Entry* cacheEntry, Value* vp);
4591 masm.setupAlignedABICall();
4592 masm.loadJSContext(temp1);
4593 masm.passABIArg(temp1);
4594 masm.passABIArg(obj);
4595 masm.passABIArg(temp2);
4596 masm.passABIArg(temp0);
4597 if (lir->mir()->hasOwn()) {
4598 masm.callWithABI<Fn, HasNativeDataPropertyPure<true>>();
4599 } else {
4600 masm.callWithABI<Fn, HasNativeDataPropertyPure<false>>();
4603 MOZ_ASSERT(!idVal.aliases(temp0));
4604 masm.storeCallPointerResult(temp0);
4605 masm.Pop(idVal);
4607 uint32_t framePushed = masm.framePushed();
4608 Label ok;
4609 masm.branchIfTrueBool(temp0, &ok);
4610 masm.freeStack(sizeof(Value)); // Discard result Value.
4611 masm.jump(&bail);
4613 masm.bind(&ok);
4614 masm.setFramePushed(framePushed);
4615 masm.unboxBoolean(Address(masm.getStackPointer(), 0), output);
4616 masm.freeStack(sizeof(Value));
4617 masm.bind(&cacheHit);
4619 bailoutFrom(&bail, lir->snapshot());
4622 void CodeGenerator::visitSmallObjectVariableKeyHasProp(
4623 LSmallObjectVariableKeyHasProp* lir) {
4624 Register id = ToRegister(lir->id());
4625 Register output = ToRegister(lir->output());
4627 #ifdef DEBUG
4628 Label isAtom;
4629 masm.branchTest32(Assembler::NonZero, Address(id, JSString::offsetOfFlags()),
4630 Imm32(JSString::ATOM_BIT), &isAtom);
4631 masm.assumeUnreachable("Expected atom input");
4632 masm.bind(&isAtom);
4633 #endif
4635 SharedShape* shape = &lir->mir()->shape()->asShared();
4637 Label done, success;
4638 for (SharedShapePropertyIter<NoGC> iter(shape); !iter.done(); iter++) {
4639 masm.branchPtr(Assembler::Equal, id, ImmGCPtr(iter->key().toAtom()),
4640 &success);
4642 masm.move32(Imm32(0), output);
4643 masm.jump(&done);
4644 masm.bind(&success);
4645 masm.move32(Imm32(1), output);
4646 masm.bind(&done);
4649 void CodeGenerator::visitGuardIsNotArrayBufferMaybeShared(
4650 LGuardIsNotArrayBufferMaybeShared* guard) {
4651 Register obj = ToRegister(guard->input());
4652 Register temp = ToRegister(guard->temp0());
4654 Label bail;
4655 masm.loadObjClassUnsafe(obj, temp);
4656 masm.branchPtr(Assembler::Equal, temp,
4657 ImmPtr(&FixedLengthArrayBufferObject::class_), &bail);
4658 masm.branchPtr(Assembler::Equal, temp,
4659 ImmPtr(&FixedLengthSharedArrayBufferObject::class_), &bail);
4660 masm.branchPtr(Assembler::Equal, temp,
4661 ImmPtr(&ResizableArrayBufferObject::class_), &bail);
4662 masm.branchPtr(Assembler::Equal, temp,
4663 ImmPtr(&GrowableSharedArrayBufferObject::class_), &bail);
4664 bailoutFrom(&bail, guard->snapshot());
4667 void CodeGenerator::visitGuardIsTypedArray(LGuardIsTypedArray* guard) {
4668 Register obj = ToRegister(guard->input());
4669 Register temp = ToRegister(guard->temp0());
4671 Label bail;
4672 masm.loadObjClassUnsafe(obj, temp);
4673 masm.branchIfClassIsNotTypedArray(temp, &bail);
4674 bailoutFrom(&bail, guard->snapshot());
4677 void CodeGenerator::visitGuardIsFixedLengthTypedArray(
4678 LGuardIsFixedLengthTypedArray* guard) {
4679 Register obj = ToRegister(guard->input());
4680 Register temp = ToRegister(guard->temp0());
4682 Label bail;
4683 masm.loadObjClassUnsafe(obj, temp);
4684 masm.branchIfClassIsNotFixedLengthTypedArray(temp, &bail);
4685 bailoutFrom(&bail, guard->snapshot());
4688 void CodeGenerator::visitGuardIsResizableTypedArray(
4689 LGuardIsResizableTypedArray* guard) {
4690 Register obj = ToRegister(guard->input());
4691 Register temp = ToRegister(guard->temp0());
4693 Label bail;
4694 masm.loadObjClassUnsafe(obj, temp);
4695 masm.branchIfClassIsNotResizableTypedArray(temp, &bail);
4696 bailoutFrom(&bail, guard->snapshot());
4699 void CodeGenerator::visitGuardHasProxyHandler(LGuardHasProxyHandler* guard) {
4700 Register obj = ToRegister(guard->input());
4702 Label bail;
4704 Address handlerAddr(obj, ProxyObject::offsetOfHandler());
4705 masm.branchPtr(Assembler::NotEqual, handlerAddr,
4706 ImmPtr(guard->mir()->handler()), &bail);
4708 bailoutFrom(&bail, guard->snapshot());
4711 void CodeGenerator::visitGuardObjectIdentity(LGuardObjectIdentity* guard) {
4712 Register input = ToRegister(guard->input());
4713 Register expected = ToRegister(guard->expected());
4715 Assembler::Condition cond =
4716 guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
4717 bailoutCmpPtr(cond, input, expected, guard->snapshot());
4720 void CodeGenerator::visitGuardSpecificFunction(LGuardSpecificFunction* guard) {
4721 Register input = ToRegister(guard->input());
4722 Register expected = ToRegister(guard->expected());
4724 bailoutCmpPtr(Assembler::NotEqual, input, expected, guard->snapshot());
4727 void CodeGenerator::visitGuardSpecificAtom(LGuardSpecificAtom* guard) {
4728 Register str = ToRegister(guard->str());
4729 Register scratch = ToRegister(guard->temp0());
4731 LiveRegisterSet volatileRegs = liveVolatileRegs(guard);
4732 volatileRegs.takeUnchecked(scratch);
4734 Label bail;
4735 masm.guardSpecificAtom(str, guard->mir()->atom(), scratch, volatileRegs,
4736 &bail);
4737 bailoutFrom(&bail, guard->snapshot());
4740 void CodeGenerator::visitGuardSpecificSymbol(LGuardSpecificSymbol* guard) {
4741 Register symbol = ToRegister(guard->symbol());
4743 bailoutCmpPtr(Assembler::NotEqual, symbol, ImmGCPtr(guard->mir()->expected()),
4744 guard->snapshot());
4747 void CodeGenerator::visitGuardSpecificInt32(LGuardSpecificInt32* guard) {
4748 Register num = ToRegister(guard->num());
4750 bailoutCmp32(Assembler::NotEqual, num, Imm32(guard->mir()->expected()),
4751 guard->snapshot());
4754 void CodeGenerator::visitGuardStringToIndex(LGuardStringToIndex* lir) {
4755 Register str = ToRegister(lir->string());
4756 Register output = ToRegister(lir->output());
4758 Label vmCall, done;
4759 masm.loadStringIndexValue(str, output, &vmCall);
4760 masm.jump(&done);
4763 masm.bind(&vmCall);
4765 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
4766 volatileRegs.takeUnchecked(output);
4767 masm.PushRegsInMask(volatileRegs);
4769 using Fn = int32_t (*)(JSString* str);
4770 masm.setupAlignedABICall();
4771 masm.passABIArg(str);
4772 masm.callWithABI<Fn, GetIndexFromString>();
4773 masm.storeCallInt32Result(output);
4775 masm.PopRegsInMask(volatileRegs);
4777 // GetIndexFromString returns a negative value on failure.
4778 bailoutTest32(Assembler::Signed, output, output, lir->snapshot());
4781 masm.bind(&done);
4784 void CodeGenerator::visitGuardStringToInt32(LGuardStringToInt32* lir) {
4785 Register str = ToRegister(lir->string());
4786 Register output = ToRegister(lir->output());
4787 Register temp = ToRegister(lir->temp0());
4789 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
4791 Label bail;
4792 masm.guardStringToInt32(str, output, temp, volatileRegs, &bail);
4793 bailoutFrom(&bail, lir->snapshot());
4796 void CodeGenerator::visitGuardStringToDouble(LGuardStringToDouble* lir) {
4797 Register str = ToRegister(lir->string());
4798 FloatRegister output = ToFloatRegister(lir->output());
4799 Register temp0 = ToRegister(lir->temp0());
4800 Register temp1 = ToRegister(lir->temp1());
4802 Label vmCall, done;
4803 // Use indexed value as fast path if possible.
4804 masm.loadStringIndexValue(str, temp0, &vmCall);
4805 masm.convertInt32ToDouble(temp0, output);
4806 masm.jump(&done);
4808 masm.bind(&vmCall);
4810 // Reserve stack for holding the result value of the call.
4811 masm.reserveStack(sizeof(double));
4812 masm.moveStackPtrTo(temp0);
4814 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
4815 volatileRegs.takeUnchecked(temp0);
4816 volatileRegs.takeUnchecked(temp1);
4817 masm.PushRegsInMask(volatileRegs);
4819 using Fn = bool (*)(JSContext* cx, JSString* str, double* result);
4820 masm.setupAlignedABICall();
4821 masm.loadJSContext(temp1);
4822 masm.passABIArg(temp1);
4823 masm.passABIArg(str);
4824 masm.passABIArg(temp0);
4825 masm.callWithABI<Fn, StringToNumberPure>();
4826 masm.storeCallPointerResult(temp0);
4828 masm.PopRegsInMask(volatileRegs);
4830 Label ok;
4831 masm.branchIfTrueBool(temp0, &ok);
4833 // OOM path, recovered by StringToNumberPure.
4835 // Use addToStackPtr instead of freeStack as freeStack tracks stack height
4836 // flow-insensitively, and using it here would confuse the stack height
4837 // tracking.
4838 masm.addToStackPtr(Imm32(sizeof(double)));
4839 bailout(lir->snapshot());
4841 masm.bind(&ok);
4842 masm.Pop(output);
4844 masm.bind(&done);
4847 void CodeGenerator::visitGuardNoDenseElements(LGuardNoDenseElements* guard) {
4848 Register obj = ToRegister(guard->input());
4849 Register temp = ToRegister(guard->temp0());
4851 // Load obj->elements.
4852 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), temp);
4854 // Make sure there are no dense elements.
4855 Address initLength(temp, ObjectElements::offsetOfInitializedLength());
4856 bailoutCmp32(Assembler::NotEqual, initLength, Imm32(0), guard->snapshot());
4859 void CodeGenerator::visitBooleanToInt64(LBooleanToInt64* lir) {
4860 Register input = ToRegister(lir->input());
4861 Register64 output = ToOutRegister64(lir);
4863 masm.move32To64ZeroExtend(input, output);
4866 void CodeGenerator::emitStringToInt64(LInstruction* lir, Register input,
4867 Register64 output) {
4868 Register temp = output.scratchReg();
4870 saveLive(lir);
4872 masm.reserveStack(sizeof(uint64_t));
4873 masm.moveStackPtrTo(temp);
4874 pushArg(temp);
4875 pushArg(input);
4877 using Fn = bool (*)(JSContext*, HandleString, uint64_t*);
4878 callVM<Fn, DoStringToInt64>(lir);
4880 masm.load64(Address(masm.getStackPointer(), 0), output);
4881 masm.freeStack(sizeof(uint64_t));
4883 restoreLiveIgnore(lir, StoreValueTo(output).clobbered());
4886 void CodeGenerator::visitStringToInt64(LStringToInt64* lir) {
4887 Register input = ToRegister(lir->input());
4888 Register64 output = ToOutRegister64(lir);
4890 emitStringToInt64(lir, input, output);
4893 void CodeGenerator::visitValueToInt64(LValueToInt64* lir) {
4894 ValueOperand input = ToValue(lir, LValueToInt64::InputIndex);
4895 Register temp = ToRegister(lir->temp0());
4896 Register64 output = ToOutRegister64(lir);
4898 int checks = 3;
4900 Label fail, done;
4901 // Jump to fail if this is the last check and we fail it,
4902 // otherwise to the next test.
4903 auto emitTestAndUnbox = [&](auto testAndUnbox) {
4904 MOZ_ASSERT(checks > 0);
4906 checks--;
4907 Label notType;
4908 Label* target = checks ? &notType : &fail;
4910 testAndUnbox(target);
4912 if (checks) {
4913 masm.jump(&done);
4914 masm.bind(&notType);
4918 Register tag = masm.extractTag(input, temp);
4920 // BigInt.
4921 emitTestAndUnbox([&](Label* target) {
4922 masm.branchTestBigInt(Assembler::NotEqual, tag, target);
4923 masm.unboxBigInt(input, temp);
4924 masm.loadBigInt64(temp, output);
4927 // Boolean
4928 emitTestAndUnbox([&](Label* target) {
4929 masm.branchTestBoolean(Assembler::NotEqual, tag, target);
4930 masm.unboxBoolean(input, temp);
4931 masm.move32To64ZeroExtend(temp, output);
4934 // String
4935 emitTestAndUnbox([&](Label* target) {
4936 masm.branchTestString(Assembler::NotEqual, tag, target);
4937 masm.unboxString(input, temp);
4938 emitStringToInt64(lir, temp, output);
4941 MOZ_ASSERT(checks == 0);
4943 bailoutFrom(&fail, lir->snapshot());
4944 masm.bind(&done);
4947 void CodeGenerator::visitTruncateBigIntToInt64(LTruncateBigIntToInt64* lir) {
4948 Register operand = ToRegister(lir->input());
4949 Register64 output = ToOutRegister64(lir);
4951 masm.loadBigInt64(operand, output);
4954 OutOfLineCode* CodeGenerator::createBigIntOutOfLine(LInstruction* lir,
4955 Scalar::Type type,
4956 Register64 input,
4957 Register output) {
4958 #if JS_BITS_PER_WORD == 32
4959 using Fn = BigInt* (*)(JSContext*, uint32_t, uint32_t);
4960 auto args = ArgList(input.low, input.high);
4961 #else
4962 using Fn = BigInt* (*)(JSContext*, uint64_t);
4963 auto args = ArgList(input);
4964 #endif
4966 if (type == Scalar::BigInt64) {
4967 return oolCallVM<Fn, jit::CreateBigIntFromInt64>(lir, args,
4968 StoreRegisterTo(output));
4970 MOZ_ASSERT(type == Scalar::BigUint64);
4971 return oolCallVM<Fn, jit::CreateBigIntFromUint64>(lir, args,
4972 StoreRegisterTo(output));
4975 void CodeGenerator::emitCreateBigInt(LInstruction* lir, Scalar::Type type,
4976 Register64 input, Register output,
4977 Register maybeTemp) {
4978 OutOfLineCode* ool = createBigIntOutOfLine(lir, type, input, output);
4980 if (maybeTemp != InvalidReg) {
4981 masm.newGCBigInt(output, maybeTemp, initialBigIntHeap(), ool->entry());
4982 } else {
4983 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
4984 regs.take(input);
4985 regs.take(output);
4987 Register temp = regs.takeAny();
4989 masm.push(temp);
4991 Label fail, ok;
4992 masm.newGCBigInt(output, temp, initialBigIntHeap(), &fail);
4993 masm.pop(temp);
4994 masm.jump(&ok);
4995 masm.bind(&fail);
4996 masm.pop(temp);
4997 masm.jump(ool->entry());
4998 masm.bind(&ok);
5000 masm.initializeBigInt64(type, output, input);
5001 masm.bind(ool->rejoin());
5004 void CodeGenerator::visitInt32ToBigInt(LInt32ToBigInt* lir) {
5005 Register input = ToRegister(lir->input());
5006 Register temp = ToRegister(lir->temp0());
5007 Register output = ToRegister(lir->output());
5009 using Fn = BigInt* (*)(JSContext*, int32_t);
5010 auto* ool = oolCallVM<Fn, jit::CreateBigIntFromInt32>(
5011 lir, ArgList(input), StoreRegisterTo(output));
5013 masm.newGCBigInt(output, temp, initialBigIntHeap(), ool->entry());
5014 masm.move32SignExtendToPtr(input, temp);
5015 masm.initializeBigInt(output, temp);
5016 masm.bind(ool->rejoin());
5019 void CodeGenerator::visitInt64ToBigInt(LInt64ToBigInt* lir) {
5020 Register64 input = ToRegister64(lir->input());
5021 Register temp = ToRegister(lir->temp0());
5022 Register output = ToRegister(lir->output());
5024 emitCreateBigInt(lir, Scalar::BigInt64, input, output, temp);
5027 void CodeGenerator::visitGuardValue(LGuardValue* lir) {
5028 ValueOperand input = ToValue(lir, LGuardValue::InputIndex);
5029 Value expected = lir->mir()->expected();
5030 Label bail;
5031 masm.branchTestValue(Assembler::NotEqual, input, expected, &bail);
5032 bailoutFrom(&bail, lir->snapshot());
5035 void CodeGenerator::visitGuardNullOrUndefined(LGuardNullOrUndefined* lir) {
5036 ValueOperand input = ToValue(lir, LGuardNullOrUndefined::InputIndex);
5038 ScratchTagScope tag(masm, input);
5039 masm.splitTagForTest(input, tag);
5041 Label done;
5042 masm.branchTestNull(Assembler::Equal, tag, &done);
5044 Label bail;
5045 masm.branchTestUndefined(Assembler::NotEqual, tag, &bail);
5046 bailoutFrom(&bail, lir->snapshot());
5048 masm.bind(&done);
5051 void CodeGenerator::visitGuardIsNotObject(LGuardIsNotObject* lir) {
5052 ValueOperand input = ToValue(lir, LGuardIsNotObject::InputIndex);
5054 Label bail;
5055 masm.branchTestObject(Assembler::Equal, input, &bail);
5056 bailoutFrom(&bail, lir->snapshot());
5059 void CodeGenerator::visitGuardFunctionFlags(LGuardFunctionFlags* lir) {
5060 Register function = ToRegister(lir->function());
5062 Label bail;
5063 if (uint16_t flags = lir->mir()->expectedFlags()) {
5064 masm.branchTestFunctionFlags(function, flags, Assembler::Zero, &bail);
5066 if (uint16_t flags = lir->mir()->unexpectedFlags()) {
5067 masm.branchTestFunctionFlags(function, flags, Assembler::NonZero, &bail);
5069 bailoutFrom(&bail, lir->snapshot());
5072 void CodeGenerator::visitGuardFunctionIsNonBuiltinCtor(
5073 LGuardFunctionIsNonBuiltinCtor* lir) {
5074 Register function = ToRegister(lir->function());
5075 Register temp = ToRegister(lir->temp0());
5077 Label bail;
5078 masm.branchIfNotFunctionIsNonBuiltinCtor(function, temp, &bail);
5079 bailoutFrom(&bail, lir->snapshot());
5082 void CodeGenerator::visitGuardFunctionKind(LGuardFunctionKind* lir) {
5083 Register function = ToRegister(lir->function());
5084 Register temp = ToRegister(lir->temp0());
5086 Assembler::Condition cond =
5087 lir->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
5089 Label bail;
5090 masm.branchFunctionKind(cond, lir->mir()->expected(), function, temp, &bail);
5091 bailoutFrom(&bail, lir->snapshot());
5094 void CodeGenerator::visitGuardFunctionScript(LGuardFunctionScript* lir) {
5095 Register function = ToRegister(lir->function());
5097 Address scriptAddr(function, JSFunction::offsetOfJitInfoOrScript());
5098 bailoutCmpPtr(Assembler::NotEqual, scriptAddr,
5099 ImmGCPtr(lir->mir()->expected()), lir->snapshot());
5102 // Out-of-line path to update the store buffer.
5103 class OutOfLineCallPostWriteBarrier : public OutOfLineCodeBase<CodeGenerator> {
5104 LInstruction* lir_;
5105 const LAllocation* object_;
5107 public:
5108 OutOfLineCallPostWriteBarrier(LInstruction* lir, const LAllocation* object)
5109 : lir_(lir), object_(object) {}
5111 void accept(CodeGenerator* codegen) override {
5112 codegen->visitOutOfLineCallPostWriteBarrier(this);
5115 LInstruction* lir() const { return lir_; }
5116 const LAllocation* object() const { return object_; }
5119 static void EmitStoreBufferCheckForConstant(MacroAssembler& masm,
5120 const gc::TenuredCell* cell,
5121 AllocatableGeneralRegisterSet& regs,
5122 Label* exit, Label* callVM) {
5123 Register temp = regs.takeAny();
5125 gc::Arena* arena = cell->arena();
5127 Register cells = temp;
5128 masm.loadPtr(AbsoluteAddress(&arena->bufferedCells()), cells);
5130 size_t index = gc::ArenaCellSet::getCellIndex(cell);
5131 size_t word;
5132 uint32_t mask;
5133 gc::ArenaCellSet::getWordIndexAndMask(index, &word, &mask);
5134 size_t offset = gc::ArenaCellSet::offsetOfBits() + word * sizeof(uint32_t);
5136 masm.branchTest32(Assembler::NonZero, Address(cells, offset), Imm32(mask),
5137 exit);
5139 // Check whether this is the sentinel set and if so call the VM to allocate
5140 // one for this arena.
5141 masm.branchPtr(Assembler::Equal,
5142 Address(cells, gc::ArenaCellSet::offsetOfArena()),
5143 ImmPtr(nullptr), callVM);
5145 // Add the cell to the set.
5146 masm.or32(Imm32(mask), Address(cells, offset));
5147 masm.jump(exit);
5149 regs.add(temp);
5152 static void EmitPostWriteBarrier(MacroAssembler& masm, CompileRuntime* runtime,
5153 Register objreg, JSObject* maybeConstant,
5154 bool isGlobal,
5155 AllocatableGeneralRegisterSet& regs) {
5156 MOZ_ASSERT_IF(isGlobal, maybeConstant);
5158 Label callVM;
5159 Label exit;
5161 Register temp = regs.takeAny();
5163 // We already have a fast path to check whether a global is in the store
5164 // buffer.
5165 if (!isGlobal) {
5166 if (maybeConstant) {
5167 // Check store buffer bitmap directly for known object.
5168 EmitStoreBufferCheckForConstant(masm, &maybeConstant->asTenured(), regs,
5169 &exit, &callVM);
5170 } else {
5171 // Check one element cache to avoid VM call.
5172 masm.branchPtr(Assembler::Equal,
5173 AbsoluteAddress(runtime->addressOfLastBufferedWholeCell()),
5174 objreg, &exit);
5178 // Call into the VM to barrier the write.
5179 masm.bind(&callVM);
5181 Register runtimereg = temp;
5182 masm.mov(ImmPtr(runtime), runtimereg);
5184 masm.setupAlignedABICall();
5185 masm.passABIArg(runtimereg);
5186 masm.passABIArg(objreg);
5187 if (isGlobal) {
5188 using Fn = void (*)(JSRuntime* rt, GlobalObject* obj);
5189 masm.callWithABI<Fn, PostGlobalWriteBarrier>();
5190 } else {
5191 using Fn = void (*)(JSRuntime* rt, js::gc::Cell* obj);
5192 masm.callWithABI<Fn, PostWriteBarrier>();
5195 masm.bind(&exit);
5198 void CodeGenerator::emitPostWriteBarrier(const LAllocation* obj) {
5199 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5201 Register objreg;
5202 JSObject* object = nullptr;
5203 bool isGlobal = false;
5204 if (obj->isConstant()) {
5205 object = &obj->toConstant()->toObject();
5206 isGlobal = isGlobalObject(object);
5207 objreg = regs.takeAny();
5208 masm.movePtr(ImmGCPtr(object), objreg);
5209 } else {
5210 objreg = ToRegister(obj);
5211 regs.takeUnchecked(objreg);
5214 EmitPostWriteBarrier(masm, gen->runtime, objreg, object, isGlobal, regs);
5217 // Returns true if `def` might be allocated in the nursery.
5218 static bool ValueNeedsPostBarrier(MDefinition* def) {
5219 if (def->isBox()) {
5220 def = def->toBox()->input();
5222 if (def->type() == MIRType::Value) {
5223 return true;
5225 return NeedsPostBarrier(def->type());
5228 class OutOfLineElementPostWriteBarrier
5229 : public OutOfLineCodeBase<CodeGenerator> {
5230 LiveRegisterSet liveVolatileRegs_;
5231 const LAllocation* index_;
5232 int32_t indexDiff_;
5233 Register obj_;
5234 Register scratch_;
5236 public:
5237 OutOfLineElementPostWriteBarrier(const LiveRegisterSet& liveVolatileRegs,
5238 Register obj, const LAllocation* index,
5239 Register scratch, int32_t indexDiff)
5240 : liveVolatileRegs_(liveVolatileRegs),
5241 index_(index),
5242 indexDiff_(indexDiff),
5243 obj_(obj),
5244 scratch_(scratch) {}
5246 void accept(CodeGenerator* codegen) override {
5247 codegen->visitOutOfLineElementPostWriteBarrier(this);
5250 const LiveRegisterSet& liveVolatileRegs() const { return liveVolatileRegs_; }
5251 const LAllocation* index() const { return index_; }
5252 int32_t indexDiff() const { return indexDiff_; }
5254 Register object() const { return obj_; }
5255 Register scratch() const { return scratch_; }
5258 void CodeGenerator::emitElementPostWriteBarrier(
5259 MInstruction* mir, const LiveRegisterSet& liveVolatileRegs, Register obj,
5260 const LAllocation* index, Register scratch, const ConstantOrRegister& val,
5261 int32_t indexDiff) {
5262 if (val.constant()) {
5263 MOZ_ASSERT_IF(val.value().isGCThing(),
5264 !IsInsideNursery(val.value().toGCThing()));
5265 return;
5268 TypedOrValueRegister reg = val.reg();
5269 if (reg.hasTyped() && !NeedsPostBarrier(reg.type())) {
5270 return;
5273 auto* ool = new (alloc()) OutOfLineElementPostWriteBarrier(
5274 liveVolatileRegs, obj, index, scratch, indexDiff);
5275 addOutOfLineCode(ool, mir);
5277 masm.branchPtrInNurseryChunk(Assembler::Equal, obj, scratch, ool->rejoin());
5279 if (reg.hasValue()) {
5280 masm.branchValueIsNurseryCell(Assembler::Equal, reg.valueReg(), scratch,
5281 ool->entry());
5282 } else {
5283 masm.branchPtrInNurseryChunk(Assembler::Equal, reg.typedReg().gpr(),
5284 scratch, ool->entry());
5287 masm.bind(ool->rejoin());
5290 void CodeGenerator::visitOutOfLineElementPostWriteBarrier(
5291 OutOfLineElementPostWriteBarrier* ool) {
5292 Register obj = ool->object();
5293 Register scratch = ool->scratch();
5294 const LAllocation* index = ool->index();
5295 int32_t indexDiff = ool->indexDiff();
5297 masm.PushRegsInMask(ool->liveVolatileRegs());
5299 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5300 regs.takeUnchecked(obj);
5301 regs.takeUnchecked(scratch);
5303 Register indexReg;
5304 if (index->isConstant()) {
5305 indexReg = regs.takeAny();
5306 masm.move32(Imm32(ToInt32(index) + indexDiff), indexReg);
5307 } else {
5308 indexReg = ToRegister(index);
5309 regs.takeUnchecked(indexReg);
5310 if (indexDiff != 0) {
5311 masm.add32(Imm32(indexDiff), indexReg);
5315 masm.setupUnalignedABICall(scratch);
5316 masm.movePtr(ImmPtr(gen->runtime), scratch);
5317 masm.passABIArg(scratch);
5318 masm.passABIArg(obj);
5319 masm.passABIArg(indexReg);
5320 using Fn = void (*)(JSRuntime* rt, JSObject* obj, int32_t index);
5321 masm.callWithABI<Fn, PostWriteElementBarrier>();
5323 // We don't need a sub32 here because indexReg must be in liveVolatileRegs
5324 // if indexDiff is not zero, so it will be restored below.
5325 MOZ_ASSERT_IF(indexDiff != 0, ool->liveVolatileRegs().has(indexReg));
5327 masm.PopRegsInMask(ool->liveVolatileRegs());
5329 masm.jump(ool->rejoin());
5332 void CodeGenerator::emitPostWriteBarrier(Register objreg) {
5333 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5334 regs.takeUnchecked(objreg);
5335 EmitPostWriteBarrier(masm, gen->runtime, objreg, nullptr, false, regs);
5338 void CodeGenerator::visitOutOfLineCallPostWriteBarrier(
5339 OutOfLineCallPostWriteBarrier* ool) {
5340 saveLiveVolatile(ool->lir());
5341 const LAllocation* obj = ool->object();
5342 emitPostWriteBarrier(obj);
5343 restoreLiveVolatile(ool->lir());
5345 masm.jump(ool->rejoin());
5348 void CodeGenerator::maybeEmitGlobalBarrierCheck(const LAllocation* maybeGlobal,
5349 OutOfLineCode* ool) {
5350 // Check whether an object is a global that we have already barriered before
5351 // calling into the VM.
5353 // We only check for the script's global, not other globals within the same
5354 // compartment, because we bake in a pointer to realm->globalWriteBarriered
5355 // and doing that would be invalid for other realms because they could be
5356 // collected before the Ion code is discarded.
5358 if (!maybeGlobal->isConstant()) {
5359 return;
5362 JSObject* obj = &maybeGlobal->toConstant()->toObject();
5363 if (gen->realm->maybeGlobal() != obj) {
5364 return;
5367 const uint32_t* addr = gen->realm->addressOfGlobalWriteBarriered();
5368 masm.branch32(Assembler::NotEqual, AbsoluteAddress(addr), Imm32(0),
5369 ool->rejoin());
5372 template <class LPostBarrierType, MIRType nurseryType>
5373 void CodeGenerator::visitPostWriteBarrierCommon(LPostBarrierType* lir,
5374 OutOfLineCode* ool) {
5375 static_assert(NeedsPostBarrier(nurseryType));
5377 addOutOfLineCode(ool, lir->mir());
5379 Register temp = ToTempRegisterOrInvalid(lir->temp0());
5381 if (lir->object()->isConstant()) {
5382 // Constant nursery objects cannot appear here, see
5383 // LIRGenerator::visitPostWriteElementBarrier.
5384 MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject()));
5385 } else {
5386 masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()),
5387 temp, ool->rejoin());
5390 maybeEmitGlobalBarrierCheck(lir->object(), ool);
5392 Register value = ToRegister(lir->value());
5393 if constexpr (nurseryType == MIRType::Object) {
5394 MOZ_ASSERT(lir->mir()->value()->type() == MIRType::Object);
5395 } else if constexpr (nurseryType == MIRType::String) {
5396 MOZ_ASSERT(lir->mir()->value()->type() == MIRType::String);
5397 } else {
5398 static_assert(nurseryType == MIRType::BigInt);
5399 MOZ_ASSERT(lir->mir()->value()->type() == MIRType::BigInt);
5401 masm.branchPtrInNurseryChunk(Assembler::Equal, value, temp, ool->entry());
5403 masm.bind(ool->rejoin());
5406 template <class LPostBarrierType>
5407 void CodeGenerator::visitPostWriteBarrierCommonV(LPostBarrierType* lir,
5408 OutOfLineCode* ool) {
5409 addOutOfLineCode(ool, lir->mir());
5411 Register temp = ToTempRegisterOrInvalid(lir->temp0());
5413 if (lir->object()->isConstant()) {
5414 // Constant nursery objects cannot appear here, see
5415 // LIRGenerator::visitPostWriteElementBarrier.
5416 MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject()));
5417 } else {
5418 masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()),
5419 temp, ool->rejoin());
5422 maybeEmitGlobalBarrierCheck(lir->object(), ool);
5424 ValueOperand value = ToValue(lir, LPostBarrierType::ValueIndex);
5425 masm.branchValueIsNurseryCell(Assembler::Equal, value, temp, ool->entry());
5427 masm.bind(ool->rejoin());
5430 void CodeGenerator::visitPostWriteBarrierO(LPostWriteBarrierO* lir) {
5431 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5432 visitPostWriteBarrierCommon<LPostWriteBarrierO, MIRType::Object>(lir, ool);
5435 void CodeGenerator::visitPostWriteBarrierS(LPostWriteBarrierS* lir) {
5436 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5437 visitPostWriteBarrierCommon<LPostWriteBarrierS, MIRType::String>(lir, ool);
5440 void CodeGenerator::visitPostWriteBarrierBI(LPostWriteBarrierBI* lir) {
5441 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5442 visitPostWriteBarrierCommon<LPostWriteBarrierBI, MIRType::BigInt>(lir, ool);
5445 void CodeGenerator::visitPostWriteBarrierV(LPostWriteBarrierV* lir) {
5446 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5447 visitPostWriteBarrierCommonV(lir, ool);
5450 // Out-of-line path to update the store buffer.
5451 class OutOfLineCallPostWriteElementBarrier
5452 : public OutOfLineCodeBase<CodeGenerator> {
5453 LInstruction* lir_;
5454 const LAllocation* object_;
5455 const LAllocation* index_;
5457 public:
5458 OutOfLineCallPostWriteElementBarrier(LInstruction* lir,
5459 const LAllocation* object,
5460 const LAllocation* index)
5461 : lir_(lir), object_(object), index_(index) {}
5463 void accept(CodeGenerator* codegen) override {
5464 codegen->visitOutOfLineCallPostWriteElementBarrier(this);
5467 LInstruction* lir() const { return lir_; }
5469 const LAllocation* object() const { return object_; }
5471 const LAllocation* index() const { return index_; }
5474 void CodeGenerator::visitOutOfLineCallPostWriteElementBarrier(
5475 OutOfLineCallPostWriteElementBarrier* ool) {
5476 saveLiveVolatile(ool->lir());
5478 const LAllocation* obj = ool->object();
5479 const LAllocation* index = ool->index();
5481 Register objreg = obj->isConstant() ? InvalidReg : ToRegister(obj);
5482 Register indexreg = ToRegister(index);
5484 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5485 regs.takeUnchecked(indexreg);
5487 if (obj->isConstant()) {
5488 objreg = regs.takeAny();
5489 masm.movePtr(ImmGCPtr(&obj->toConstant()->toObject()), objreg);
5490 } else {
5491 regs.takeUnchecked(objreg);
5494 Register runtimereg = regs.takeAny();
5495 using Fn = void (*)(JSRuntime* rt, JSObject* obj, int32_t index);
5496 masm.setupAlignedABICall();
5497 masm.mov(ImmPtr(gen->runtime), runtimereg);
5498 masm.passABIArg(runtimereg);
5499 masm.passABIArg(objreg);
5500 masm.passABIArg(indexreg);
5501 masm.callWithABI<Fn, PostWriteElementBarrier>();
5503 restoreLiveVolatile(ool->lir());
5505 masm.jump(ool->rejoin());
5508 void CodeGenerator::visitPostWriteElementBarrierO(
5509 LPostWriteElementBarrierO* lir) {
5510 auto ool = new (alloc())
5511 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5512 visitPostWriteBarrierCommon<LPostWriteElementBarrierO, MIRType::Object>(lir,
5513 ool);
5516 void CodeGenerator::visitPostWriteElementBarrierS(
5517 LPostWriteElementBarrierS* lir) {
5518 auto ool = new (alloc())
5519 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5520 visitPostWriteBarrierCommon<LPostWriteElementBarrierS, MIRType::String>(lir,
5521 ool);
5524 void CodeGenerator::visitPostWriteElementBarrierBI(
5525 LPostWriteElementBarrierBI* lir) {
5526 auto ool = new (alloc())
5527 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5528 visitPostWriteBarrierCommon<LPostWriteElementBarrierBI, MIRType::BigInt>(lir,
5529 ool);
5532 void CodeGenerator::visitPostWriteElementBarrierV(
5533 LPostWriteElementBarrierV* lir) {
5534 auto ool = new (alloc())
5535 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5536 visitPostWriteBarrierCommonV(lir, ool);
5539 void CodeGenerator::visitAssertCanElidePostWriteBarrier(
5540 LAssertCanElidePostWriteBarrier* lir) {
5541 Register object = ToRegister(lir->object());
5542 ValueOperand value =
5543 ToValue(lir, LAssertCanElidePostWriteBarrier::ValueIndex);
5544 Register temp = ToRegister(lir->temp0());
5546 Label ok;
5547 masm.branchPtrInNurseryChunk(Assembler::Equal, object, temp, &ok);
5548 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp, &ok);
5550 masm.assumeUnreachable("Unexpected missing post write barrier");
5552 masm.bind(&ok);
5555 template <typename LCallIns>
5556 void CodeGenerator::emitCallNative(LCallIns* call, JSNative native,
5557 Register argContextReg, Register argUintNReg,
5558 Register argVpReg, Register tempReg,
5559 uint32_t unusedStack) {
5560 masm.checkStackAlignment();
5562 // Native functions have the signature:
5563 // bool (*)(JSContext*, unsigned, Value* vp)
5564 // Where vp[0] is space for an outparam, vp[1] is |this|, and vp[2] onward
5565 // are the function arguments.
5567 // Allocate space for the outparam, moving the StackPointer to what will be
5568 // &vp[1].
5569 masm.adjustStack(unusedStack);
5571 // Push a Value containing the callee object: natives are allowed to access
5572 // their callee before setting the return value. The StackPointer is moved
5573 // to &vp[0].
5575 // Also reserves the space for |NativeExitFrameLayout::{lo,hi}CalleeResult_|.
5576 if constexpr (std::is_same_v<LCallIns, LCallClassHook>) {
5577 Register calleeReg = ToRegister(call->getCallee());
5578 masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(calleeReg)));
5580 // Enter the callee realm.
5581 if (call->mir()->maybeCrossRealm()) {
5582 masm.switchToObjectRealm(calleeReg, tempReg);
5584 } else {
5585 WrappedFunction* target = call->mir()->getSingleTarget();
5586 masm.Push(ObjectValue(*target->rawNativeJSFunction()));
5588 // Enter the callee realm.
5589 if (call->mir()->maybeCrossRealm()) {
5590 masm.movePtr(ImmGCPtr(target->rawNativeJSFunction()), tempReg);
5591 masm.switchToObjectRealm(tempReg, tempReg);
5595 // Preload arguments into registers.
5596 masm.loadJSContext(argContextReg);
5597 masm.moveStackPtrTo(argVpReg);
5599 // Initialize |NativeExitFrameLayout::argc_|.
5600 masm.Push(argUintNReg);
5602 // Construct native exit frame.
5604 // |buildFakeExitFrame| initializes |NativeExitFrameLayout::exit_| and
5605 // |enterFakeExitFrameForNative| initializes |NativeExitFrameLayout::footer_|.
5607 // The NativeExitFrameLayout is now fully initialized.
5608 uint32_t safepointOffset = masm.buildFakeExitFrame(tempReg);
5609 masm.enterFakeExitFrameForNative(argContextReg, tempReg,
5610 call->mir()->isConstructing());
5612 markSafepointAt(safepointOffset, call);
5614 // Construct and execute call.
5615 masm.setupAlignedABICall();
5616 masm.passABIArg(argContextReg);
5617 masm.passABIArg(argUintNReg);
5618 masm.passABIArg(argVpReg);
5620 ensureOsiSpace();
5621 // If we're using a simulator build, `native` will already point to the
5622 // simulator's call-redirection code for LCallClassHook. Load the address in
5623 // a register first so that we don't try to redirect it a second time.
5624 bool emittedCall = false;
5625 #ifdef JS_SIMULATOR
5626 if constexpr (std::is_same_v<LCallIns, LCallClassHook>) {
5627 masm.movePtr(ImmPtr(native), tempReg);
5628 masm.callWithABI(tempReg);
5629 emittedCall = true;
5631 #endif
5632 if (!emittedCall) {
5633 masm.callWithABI(DynamicFunction<JSNative>(native), ABIType::General,
5634 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
5637 // Test for failure.
5638 masm.branchIfFalseBool(ReturnReg, masm.failureLabel());
5640 // Exit the callee realm.
5641 if (call->mir()->maybeCrossRealm()) {
5642 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
5645 // Load the outparam vp[0] into output register(s).
5646 masm.loadValue(
5647 Address(masm.getStackPointer(), NativeExitFrameLayout::offsetOfResult()),
5648 JSReturnOperand);
5650 // Until C++ code is instrumented against Spectre, prevent speculative
5651 // execution from returning any private data.
5652 if (JitOptions.spectreJitToCxxCalls && !call->mir()->ignoresReturnValue() &&
5653 call->mir()->hasLiveDefUses()) {
5654 masm.speculationBarrier();
5657 #ifdef DEBUG
5658 // Native constructors are guaranteed to return an Object value.
5659 if (call->mir()->isConstructing()) {
5660 Label notPrimitive;
5661 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
5662 &notPrimitive);
5663 masm.assumeUnreachable("native constructors don't return primitives");
5664 masm.bind(&notPrimitive);
5666 #endif
5669 template <typename LCallIns>
5670 void CodeGenerator::emitCallNative(LCallIns* call, JSNative native) {
5671 uint32_t unusedStack =
5672 UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
5674 // Registers used for callWithABI() argument-passing.
5675 const Register argContextReg = ToRegister(call->getArgContextReg());
5676 const Register argUintNReg = ToRegister(call->getArgUintNReg());
5677 const Register argVpReg = ToRegister(call->getArgVpReg());
5679 // Misc. temporary registers.
5680 const Register tempReg = ToRegister(call->getTempReg());
5682 DebugOnly<uint32_t> initialStack = masm.framePushed();
5684 // Initialize the argc register.
5685 masm.move32(Imm32(call->mir()->numActualArgs()), argUintNReg);
5687 // Create the exit frame and call the native.
5688 emitCallNative(call, native, argContextReg, argUintNReg, argVpReg, tempReg,
5689 unusedStack);
5691 // The next instruction is removing the footer of the exit frame, so there
5692 // is no need for leaveFakeExitFrame.
5694 // Move the StackPointer back to its original location, unwinding the native
5695 // exit frame.
5696 masm.adjustStack(NativeExitFrameLayout::Size() - unusedStack);
5697 MOZ_ASSERT(masm.framePushed() == initialStack);
5700 void CodeGenerator::visitCallNative(LCallNative* call) {
5701 WrappedFunction* target = call->getSingleTarget();
5702 MOZ_ASSERT(target);
5703 MOZ_ASSERT(target->isNativeWithoutJitEntry());
5705 JSNative native = target->native();
5706 if (call->ignoresReturnValue() && target->hasJitInfo()) {
5707 const JSJitInfo* jitInfo = target->jitInfo();
5708 if (jitInfo->type() == JSJitInfo::IgnoresReturnValueNative) {
5709 native = jitInfo->ignoresReturnValueMethod;
5712 emitCallNative(call, native);
5715 void CodeGenerator::visitCallClassHook(LCallClassHook* call) {
5716 emitCallNative(call, call->mir()->target());
5719 static void LoadDOMPrivate(MacroAssembler& masm, Register obj, Register priv,
5720 DOMObjectKind kind) {
5721 // Load the value in DOM_OBJECT_SLOT for a native or proxy DOM object. This
5722 // will be in the first slot but may be fixed or non-fixed.
5723 MOZ_ASSERT(obj != priv);
5725 switch (kind) {
5726 case DOMObjectKind::Native:
5727 // If it's a native object, the value must be in a fixed slot.
5728 // See CanAttachDOMCall in CacheIR.cpp.
5729 masm.debugAssertObjHasFixedSlots(obj, priv);
5730 masm.loadPrivate(Address(obj, NativeObject::getFixedSlotOffset(0)), priv);
5731 break;
5732 case DOMObjectKind::Proxy: {
5733 #ifdef DEBUG
5734 // Sanity check: it must be a DOM proxy.
5735 Label isDOMProxy;
5736 masm.branchTestProxyHandlerFamily(
5737 Assembler::Equal, obj, priv, GetDOMProxyHandlerFamily(), &isDOMProxy);
5738 masm.assumeUnreachable("Expected a DOM proxy");
5739 masm.bind(&isDOMProxy);
5740 #endif
5741 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), priv);
5742 masm.loadPrivate(
5743 Address(priv, js::detail::ProxyReservedSlots::offsetOfSlot(0)), priv);
5744 break;
5749 void CodeGenerator::visitCallDOMNative(LCallDOMNative* call) {
5750 WrappedFunction* target = call->getSingleTarget();
5751 MOZ_ASSERT(target);
5752 MOZ_ASSERT(target->isNativeWithoutJitEntry());
5753 MOZ_ASSERT(target->hasJitInfo());
5754 MOZ_ASSERT(call->mir()->isCallDOMNative());
5756 int unusedStack = UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
5758 // Registers used for callWithABI() argument-passing.
5759 const Register argJSContext = ToRegister(call->getArgJSContext());
5760 const Register argObj = ToRegister(call->getArgObj());
5761 const Register argPrivate = ToRegister(call->getArgPrivate());
5762 const Register argArgs = ToRegister(call->getArgArgs());
5764 DebugOnly<uint32_t> initialStack = masm.framePushed();
5766 masm.checkStackAlignment();
5768 // DOM methods have the signature:
5769 // bool (*)(JSContext*, HandleObject, void* private, const
5770 // JSJitMethodCallArgs& args)
5771 // Where args is initialized from an argc and a vp, vp[0] is space for an
5772 // outparam and the callee, vp[1] is |this|, and vp[2] onward are the
5773 // function arguments. Note that args stores the argv, not the vp, and
5774 // argv == vp + 2.
5776 // Nestle the stack up against the pushed arguments, leaving StackPointer at
5777 // &vp[1]
5778 masm.adjustStack(unusedStack);
5779 // argObj is filled with the extracted object, then returned.
5780 Register obj = masm.extractObject(Address(masm.getStackPointer(), 0), argObj);
5781 MOZ_ASSERT(obj == argObj);
5783 // Push a Value containing the callee object: natives are allowed to access
5784 // their callee before setting the return value. After this the StackPointer
5785 // points to &vp[0].
5786 masm.Push(ObjectValue(*target->rawNativeJSFunction()));
5788 // Now compute the argv value. Since StackPointer is pointing to &vp[0] and
5789 // argv is &vp[2] we just need to add 2*sizeof(Value) to the current
5790 // StackPointer.
5791 static_assert(JSJitMethodCallArgsTraits::offsetOfArgv == 0);
5792 static_assert(JSJitMethodCallArgsTraits::offsetOfArgc ==
5793 IonDOMMethodExitFrameLayoutTraits::offsetOfArgcFromArgv);
5794 masm.computeEffectiveAddress(
5795 Address(masm.getStackPointer(), 2 * sizeof(Value)), argArgs);
5797 LoadDOMPrivate(masm, obj, argPrivate,
5798 static_cast<MCallDOMNative*>(call->mir())->objectKind());
5800 // Push argc from the call instruction into what will become the IonExitFrame
5801 masm.Push(Imm32(call->numActualArgs()));
5803 // Push our argv onto the stack
5804 masm.Push(argArgs);
5805 // And store our JSJitMethodCallArgs* in argArgs.
5806 masm.moveStackPtrTo(argArgs);
5808 // Push |this| object for passing HandleObject. We push after argc to
5809 // maintain the same sp-relative location of the object pointer with other
5810 // DOMExitFrames.
5811 masm.Push(argObj);
5812 masm.moveStackPtrTo(argObj);
5814 if (call->mir()->maybeCrossRealm()) {
5815 // We use argJSContext as scratch register here.
5816 masm.movePtr(ImmGCPtr(target->rawNativeJSFunction()), argJSContext);
5817 masm.switchToObjectRealm(argJSContext, argJSContext);
5820 // Construct native exit frame.
5821 uint32_t safepointOffset = masm.buildFakeExitFrame(argJSContext);
5822 masm.loadJSContext(argJSContext);
5823 masm.enterFakeExitFrame(argJSContext, argJSContext,
5824 ExitFrameType::IonDOMMethod);
5826 markSafepointAt(safepointOffset, call);
5828 // Construct and execute call.
5829 masm.setupAlignedABICall();
5830 masm.loadJSContext(argJSContext);
5831 masm.passABIArg(argJSContext);
5832 masm.passABIArg(argObj);
5833 masm.passABIArg(argPrivate);
5834 masm.passABIArg(argArgs);
5835 ensureOsiSpace();
5836 masm.callWithABI(DynamicFunction<JSJitMethodOp>(target->jitInfo()->method),
5837 ABIType::General,
5838 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
5840 if (target->jitInfo()->isInfallible) {
5841 masm.loadValue(Address(masm.getStackPointer(),
5842 IonDOMMethodExitFrameLayout::offsetOfResult()),
5843 JSReturnOperand);
5844 } else {
5845 // Test for failure.
5846 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
5848 // Load the outparam vp[0] into output register(s).
5849 masm.loadValue(Address(masm.getStackPointer(),
5850 IonDOMMethodExitFrameLayout::offsetOfResult()),
5851 JSReturnOperand);
5854 // Switch back to the current realm if needed. Note: if the DOM method threw
5855 // an exception, the exception handler will do this.
5856 if (call->mir()->maybeCrossRealm()) {
5857 static_assert(!JSReturnOperand.aliases(ReturnReg),
5858 "Clobbering ReturnReg should not affect the return value");
5859 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
5862 // Until C++ code is instrumented against Spectre, prevent speculative
5863 // execution from returning any private data.
5864 if (JitOptions.spectreJitToCxxCalls && call->mir()->hasLiveDefUses()) {
5865 masm.speculationBarrier();
5868 // The next instruction is removing the footer of the exit frame, so there
5869 // is no need for leaveFakeExitFrame.
5871 // Move the StackPointer back to its original location, unwinding the native
5872 // exit frame.
5873 masm.adjustStack(IonDOMMethodExitFrameLayout::Size() - unusedStack);
5874 MOZ_ASSERT(masm.framePushed() == initialStack);
5877 void CodeGenerator::visitCallGetIntrinsicValue(LCallGetIntrinsicValue* lir) {
5878 pushArg(ImmGCPtr(lir->mir()->name()));
5880 using Fn = bool (*)(JSContext* cx, Handle<PropertyName*>, MutableHandleValue);
5881 callVM<Fn, GetIntrinsicValue>(lir);
5884 void CodeGenerator::emitCallInvokeFunction(
5885 LInstruction* call, Register calleereg, bool constructing,
5886 bool ignoresReturnValue, uint32_t argc, uint32_t unusedStack) {
5887 // Nestle %esp up to the argument vector.
5888 // Each path must account for framePushed_ separately, for callVM to be valid.
5889 masm.freeStack(unusedStack);
5891 pushArg(masm.getStackPointer()); // argv.
5892 pushArg(Imm32(argc)); // argc.
5893 pushArg(Imm32(ignoresReturnValue));
5894 pushArg(Imm32(constructing)); // constructing.
5895 pushArg(calleereg); // JSFunction*.
5897 using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
5898 MutableHandleValue);
5899 callVM<Fn, jit::InvokeFunction>(call);
5901 // Un-nestle %esp from the argument vector. No prefix was pushed.
5902 masm.reserveStack(unusedStack);
5905 void CodeGenerator::visitCallGeneric(LCallGeneric* call) {
5906 // The callee is passed straight through to the trampoline.
5907 MOZ_ASSERT(ToRegister(call->getCallee()) == IonGenericCallCalleeReg);
5909 Register argcReg = ToRegister(call->getArgc());
5910 uint32_t unusedStack =
5911 UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
5913 // Known-target case is handled by LCallKnown.
5914 MOZ_ASSERT(!call->hasSingleTarget());
5916 masm.checkStackAlignment();
5918 masm.move32(Imm32(call->numActualArgs()), argcReg);
5920 // Nestle the StackPointer up to the argument vector.
5921 masm.freeStack(unusedStack);
5922 ensureOsiSpace();
5924 auto kind = call->mir()->isConstructing() ? IonGenericCallKind::Construct
5925 : IonGenericCallKind::Call;
5927 TrampolinePtr genericCallStub =
5928 gen->jitRuntime()->getIonGenericCallStub(kind);
5929 uint32_t callOffset = masm.callJit(genericCallStub);
5930 markSafepointAt(callOffset, call);
5932 if (call->mir()->maybeCrossRealm()) {
5933 static_assert(!JSReturnOperand.aliases(ReturnReg),
5934 "ReturnReg available as scratch after scripted calls");
5935 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
5938 // Restore stack pointer.
5939 masm.setFramePushed(frameSize());
5940 emitRestoreStackPointerFromFP();
5942 // If the return value of the constructing function is Primitive,
5943 // replace the return value with the Object from CreateThis.
5944 if (call->mir()->isConstructing()) {
5945 Label notPrimitive;
5946 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
5947 &notPrimitive);
5948 masm.loadValue(Address(masm.getStackPointer(), unusedStack),
5949 JSReturnOperand);
5950 #ifdef DEBUG
5951 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
5952 &notPrimitive);
5953 masm.assumeUnreachable("CreateThis creates an object");
5954 #endif
5955 masm.bind(&notPrimitive);
5959 void JitRuntime::generateIonGenericCallArgumentsShift(
5960 MacroAssembler& masm, Register argc, Register curr, Register end,
5961 Register scratch, Label* done) {
5962 static_assert(sizeof(Value) == 8);
5963 // There are |argc| Values on the stack. Shift them all down by 8 bytes,
5964 // overwriting the first value.
5966 // Initialize `curr` to the destination of the first copy, and `end` to the
5967 // final value of curr.
5968 masm.moveStackPtrTo(curr);
5969 masm.computeEffectiveAddress(BaseValueIndex(curr, argc), end);
5971 Label loop;
5972 masm.bind(&loop);
5973 masm.branchPtr(Assembler::Equal, curr, end, done);
5974 masm.loadPtr(Address(curr, 8), scratch);
5975 masm.storePtr(scratch, Address(curr, 0));
5976 masm.addPtr(Imm32(sizeof(uintptr_t)), curr);
5977 masm.jump(&loop);
5980 void JitRuntime::generateIonGenericCallStub(MacroAssembler& masm,
5981 IonGenericCallKind kind) {
5982 AutoCreatedBy acb(masm, "JitRuntime::generateIonGenericCallStub");
5983 ionGenericCallStubOffset_[kind] = startTrampolineCode(masm);
5985 // This code is tightly coupled with visitCallGeneric.
5987 // Upon entry:
5988 // IonGenericCallCalleeReg contains a pointer to the callee object.
5989 // IonGenericCallArgcReg contains the number of actual args.
5990 // The arguments have been pushed onto the stack:
5991 // [newTarget] (iff isConstructing)
5992 // [argN]
5993 // ...
5994 // [arg1]
5995 // [arg0]
5996 // [this]
5997 // <return address> (if not JS_USE_LINK_REGISTER)
5999 // This trampoline is responsible for entering the callee's realm,
6000 // massaging the stack into the right shape, and then performing a
6001 // tail call. We will return directly to the Ion code from the
6002 // callee.
6004 // To do a tail call, we keep the return address in a register, even
6005 // on platforms that don't normally use a link register, and push it
6006 // just before jumping to the callee, after we are done setting up
6007 // the stack.
6009 // The caller is responsible for switching back to the caller's
6010 // realm and cleaning up the stack.
6012 Register calleeReg = IonGenericCallCalleeReg;
6013 Register argcReg = IonGenericCallArgcReg;
6014 Register scratch = IonGenericCallScratch;
6015 Register scratch2 = IonGenericCallScratch2;
6017 #ifndef JS_USE_LINK_REGISTER
6018 Register returnAddrReg = IonGenericCallReturnAddrReg;
6019 masm.pop(returnAddrReg);
6020 #endif
6022 #ifdef JS_CODEGEN_ARM
6023 // The default second scratch register on arm is lr, which we need
6024 // preserved for tail calls.
6025 AutoNonDefaultSecondScratchRegister andssr(masm, IonGenericSecondScratchReg);
6026 #endif
6028 bool isConstructing = kind == IonGenericCallKind::Construct;
6030 Label entry, notFunction, noJitEntry, vmCall;
6031 masm.bind(&entry);
6033 // Guard that the callee is actually a function.
6034 masm.branchTestObjIsFunction(Assembler::NotEqual, calleeReg, scratch,
6035 calleeReg, &notFunction);
6037 // Guard that the callee supports the [[Call]] or [[Construct]] operation.
6038 // If these tests fail, we will call into the VM to throw an exception.
6039 if (isConstructing) {
6040 masm.branchTestFunctionFlags(calleeReg, FunctionFlags::CONSTRUCTOR,
6041 Assembler::Zero, &vmCall);
6042 } else {
6043 masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
6044 calleeReg, scratch, &vmCall);
6047 if (isConstructing) {
6048 // Use the slow path if CreateThis was unable to create the |this| object.
6049 Address thisAddr(masm.getStackPointer(), 0);
6050 masm.branchTestNull(Assembler::Equal, thisAddr, &vmCall);
6053 masm.switchToObjectRealm(calleeReg, scratch);
6055 // Load jitCodeRaw for callee if it exists.
6056 masm.branchIfFunctionHasNoJitEntry(calleeReg, &noJitEntry);
6058 // ****************************
6059 // * Functions with jit entry *
6060 // ****************************
6061 masm.loadJitCodeRaw(calleeReg, scratch2);
6063 // Construct the JitFrameLayout.
6064 masm.PushCalleeToken(calleeReg, isConstructing);
6065 masm.PushFrameDescriptorForJitCall(FrameType::IonJS, argcReg, scratch);
6066 #ifndef JS_USE_LINK_REGISTER
6067 masm.push(returnAddrReg);
6068 #endif
6070 // Check whether we need a rectifier frame.
6071 Label noRectifier;
6072 masm.loadFunctionArgCount(calleeReg, scratch);
6073 masm.branch32(Assembler::BelowOrEqual, scratch, argcReg, &noRectifier);
6075 // Tail-call the arguments rectifier.
6076 // Because all trampolines are created at the same time,
6077 // we can't create a TrampolinePtr for the arguments rectifier,
6078 // because it hasn't been linked yet. We can, however, directly
6079 // encode its offset.
6080 Label rectifier;
6081 bindLabelToOffset(&rectifier, argumentsRectifierOffset_);
6083 masm.jump(&rectifier);
6086 // Tail call the jit entry.
6087 masm.bind(&noRectifier);
6088 masm.jump(scratch2);
6090 // ********************
6091 // * Native functions *
6092 // ********************
6093 masm.bind(&noJitEntry);
6094 if (!isConstructing) {
6095 generateIonGenericCallFunCall(masm, &entry, &vmCall);
6097 generateIonGenericCallNativeFunction(masm, isConstructing);
6099 // *******************
6100 // * Bound functions *
6101 // *******************
6102 // TODO: support class hooks?
6103 masm.bind(&notFunction);
6104 if (!isConstructing) {
6105 // TODO: support generic bound constructors?
6106 generateIonGenericCallBoundFunction(masm, &entry, &vmCall);
6109 // ********************
6110 // * Fallback VM call *
6111 // ********************
6112 masm.bind(&vmCall);
6114 masm.push(masm.getStackPointer()); // argv
6115 masm.push(argcReg); // argc
6116 masm.push(Imm32(false)); // ignores return value
6117 masm.push(Imm32(isConstructing)); // constructing
6118 masm.push(calleeReg); // callee
6120 using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
6121 MutableHandleValue);
6122 VMFunctionId id = VMFunctionToId<Fn, jit::InvokeFunction>::id;
6123 uint32_t invokeFunctionOffset = functionWrapperOffsets_[size_t(id)];
6124 Label invokeFunctionVMEntry;
6125 bindLabelToOffset(&invokeFunctionVMEntry, invokeFunctionOffset);
6127 masm.pushFrameDescriptor(FrameType::IonJS);
6128 #ifndef JS_USE_LINK_REGISTER
6129 masm.push(returnAddrReg);
6130 #endif
6131 masm.jump(&invokeFunctionVMEntry);
6134 void JitRuntime::generateIonGenericCallNativeFunction(MacroAssembler& masm,
6135 bool isConstructing) {
6136 Register calleeReg = IonGenericCallCalleeReg;
6137 Register argcReg = IonGenericCallArgcReg;
6138 Register scratch = IonGenericCallScratch;
6139 Register scratch2 = IonGenericCallScratch2;
6140 Register contextReg = IonGenericCallScratch3;
6141 #ifndef JS_USE_LINK_REGISTER
6142 Register returnAddrReg = IonGenericCallReturnAddrReg;
6143 #endif
6145 // Push a value containing the callee, which will become argv[0].
6146 masm.pushValue(JSVAL_TYPE_OBJECT, calleeReg);
6148 // Load the callee address into calleeReg.
6149 #ifdef JS_SIMULATOR
6150 masm.movePtr(ImmPtr(RedirectedCallAnyNative()), calleeReg);
6151 #else
6152 masm.loadPrivate(Address(calleeReg, JSFunction::offsetOfNativeOrEnv()),
6153 calleeReg);
6154 #endif
6156 // Load argv into scratch2.
6157 masm.moveStackPtrTo(scratch2);
6159 // Push argc.
6160 masm.push(argcReg);
6162 masm.loadJSContext(contextReg);
6164 // Construct native exit frame. Note that unlike other cases in this
6165 // trampoline, this code does not use a tail call.
6166 masm.pushFrameDescriptor(FrameType::IonJS);
6167 #ifdef JS_USE_LINK_REGISTER
6168 masm.pushReturnAddress();
6169 #else
6170 masm.push(returnAddrReg);
6171 #endif
6173 masm.push(FramePointer);
6174 masm.moveStackPtrTo(FramePointer);
6175 masm.enterFakeExitFrameForNative(contextReg, scratch, isConstructing);
6177 masm.setupUnalignedABICall(scratch);
6178 masm.passABIArg(contextReg); // cx
6179 masm.passABIArg(argcReg); // argc
6180 masm.passABIArg(scratch2); // argv
6182 masm.callWithABI(calleeReg);
6184 // Test for failure.
6185 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
6187 masm.loadValue(
6188 Address(masm.getStackPointer(), NativeExitFrameLayout::offsetOfResult()),
6189 JSReturnOperand);
6191 // Leave the exit frame.
6192 masm.moveToStackPtr(FramePointer);
6193 masm.pop(FramePointer);
6195 // Return.
6196 masm.ret();
6199 void JitRuntime::generateIonGenericCallFunCall(MacroAssembler& masm,
6200 Label* entry, Label* vmCall) {
6201 Register calleeReg = IonGenericCallCalleeReg;
6202 Register argcReg = IonGenericCallArgcReg;
6203 Register scratch = IonGenericCallScratch;
6204 Register scratch2 = IonGenericCallScratch2;
6205 Register scratch3 = IonGenericCallScratch3;
6207 Label notFunCall;
6208 masm.branchPtr(Assembler::NotEqual,
6209 Address(calleeReg, JSFunction::offsetOfNativeOrEnv()),
6210 ImmPtr(js::fun_call), &notFunCall);
6212 // In general, we can implement fun_call by replacing calleeReg with
6213 // |this|, sliding all the other arguments down, and decrementing argc.
6215 // *BEFORE* *AFTER*
6216 // [argN] argc = N+1 <padding>
6217 // ... [argN] argc = N
6218 // [arg1] ...
6219 // [arg0] [arg1] <- now arg0
6220 // [this] <- top of stack (aligned) [arg0] <- now this
6222 // The only exception is when argc is already 0, in which case instead
6223 // of shifting arguments down we replace [this] with UndefinedValue():
6225 // *BEFORE* *AFTER*
6226 // [this] argc = 0 [undef] argc = 0
6228 // After making this transformation, we can jump back to the beginning
6229 // of this trampoline to handle the inner call.
6231 // Guard that |this| is an object. If it is, replace calleeReg.
6232 masm.fallibleUnboxObject(Address(masm.getStackPointer(), 0), scratch, vmCall);
6233 masm.movePtr(scratch, calleeReg);
6235 Label hasArgs;
6236 masm.branch32(Assembler::NotEqual, argcReg, Imm32(0), &hasArgs);
6238 // No arguments. Replace |this| with |undefined| and start from the top.
6239 masm.storeValue(UndefinedValue(), Address(masm.getStackPointer(), 0));
6240 masm.jump(entry);
6242 masm.bind(&hasArgs);
6244 Label doneSliding;
6245 generateIonGenericCallArgumentsShift(masm, argcReg, scratch, scratch2,
6246 scratch3, &doneSliding);
6247 masm.bind(&doneSliding);
6248 masm.sub32(Imm32(1), argcReg);
6250 masm.jump(entry);
6252 masm.bind(&notFunCall);
6255 void JitRuntime::generateIonGenericCallBoundFunction(MacroAssembler& masm,
6256 Label* entry,
6257 Label* vmCall) {
6258 Register calleeReg = IonGenericCallCalleeReg;
6259 Register argcReg = IonGenericCallArgcReg;
6260 Register scratch = IonGenericCallScratch;
6261 Register scratch2 = IonGenericCallScratch2;
6262 Register scratch3 = IonGenericCallScratch3;
6264 masm.branchTestObjClass(Assembler::NotEqual, calleeReg,
6265 &BoundFunctionObject::class_, scratch, calleeReg,
6266 vmCall);
6268 Address targetSlot(calleeReg, BoundFunctionObject::offsetOfTargetSlot());
6269 Address flagsSlot(calleeReg, BoundFunctionObject::offsetOfFlagsSlot());
6270 Address thisSlot(calleeReg, BoundFunctionObject::offsetOfBoundThisSlot());
6271 Address firstInlineArgSlot(
6272 calleeReg, BoundFunctionObject::offsetOfFirstInlineBoundArg());
6274 // Check that we won't be pushing too many arguments.
6275 masm.load32(flagsSlot, scratch);
6276 masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), scratch);
6277 masm.add32(argcReg, scratch);
6278 masm.branch32(Assembler::Above, scratch, Imm32(JIT_ARGS_LENGTH_MAX), vmCall);
6280 // The stack is currently correctly aligned for a jit call. We will
6281 // be updating the `this` value and potentially adding additional
6282 // arguments. On platforms with 16-byte alignment, if the number of
6283 // bound arguments is odd, we have to move the arguments that are
6284 // currently on the stack. For example, with one bound argument:
6286 // *BEFORE* *AFTER*
6287 // [argN] <padding>
6288 // ... [argN] |
6289 // [arg1] ... | These arguments have been
6290 // [arg0] [arg1] | shifted down 8 bytes.
6291 // [this] <- top of stack (aligned) [arg0] v
6292 // [bound0] <- one bound argument (odd)
6293 // [boundThis] <- top of stack (aligned)
6295 Label poppedThis;
6296 if (JitStackValueAlignment > 1) {
6297 Label alreadyAligned;
6298 masm.branchTest32(Assembler::Zero, flagsSlot,
6299 Imm32(1 << BoundFunctionObject::NumBoundArgsShift),
6300 &alreadyAligned);
6302 // We have an odd number of bound arguments. Shift the existing arguments
6303 // down by 8 bytes.
6304 generateIonGenericCallArgumentsShift(masm, argcReg, scratch, scratch2,
6305 scratch3, &poppedThis);
6306 masm.bind(&alreadyAligned);
6309 // Pop the current `this`. It will be replaced with the bound `this`.
6310 masm.freeStack(sizeof(Value));
6311 masm.bind(&poppedThis);
6313 // Load the number of bound arguments in scratch
6314 masm.load32(flagsSlot, scratch);
6315 masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), scratch);
6317 Label donePushingBoundArguments;
6318 masm.branch32(Assembler::Equal, scratch, Imm32(0),
6319 &donePushingBoundArguments);
6321 // Update argc to include bound arguments.
6322 masm.add32(scratch, argcReg);
6324 // Load &boundArgs[0] in scratch2.
6325 Label outOfLineBoundArguments, haveBoundArguments;
6326 masm.branch32(Assembler::Above, scratch,
6327 Imm32(BoundFunctionObject::MaxInlineBoundArgs),
6328 &outOfLineBoundArguments);
6329 masm.computeEffectiveAddress(firstInlineArgSlot, scratch2);
6330 masm.jump(&haveBoundArguments);
6332 masm.bind(&outOfLineBoundArguments);
6333 masm.unboxObject(firstInlineArgSlot, scratch2);
6334 masm.loadPtr(Address(scratch2, NativeObject::offsetOfElements()), scratch2);
6336 masm.bind(&haveBoundArguments);
6338 // Load &boundArgs[numBoundArgs] in scratch.
6339 BaseObjectElementIndex lastBoundArg(scratch2, scratch);
6340 masm.computeEffectiveAddress(lastBoundArg, scratch);
6342 // Push the bound arguments, starting with the last one.
6343 // Copying pre-decrements scratch until scratch2 is reached.
6344 Label boundArgumentsLoop;
6345 masm.bind(&boundArgumentsLoop);
6346 masm.subPtr(Imm32(sizeof(Value)), scratch);
6347 masm.pushValue(Address(scratch, 0));
6348 masm.branchPtr(Assembler::Above, scratch, scratch2, &boundArgumentsLoop);
6349 masm.bind(&donePushingBoundArguments);
6351 // Push the bound `this`.
6352 masm.pushValue(thisSlot);
6354 // Load the target in calleeReg.
6355 masm.unboxObject(targetSlot, calleeReg);
6357 // At this point, all preconditions for entering the trampoline are met:
6358 // - calleeReg contains a pointer to the callee object
6359 // - argcReg contains the number of actual args (now including bound args)
6360 // - the arguments are on the stack with the correct alignment.
6361 // Instead of generating more code, we can jump back to the entry point
6362 // of the trampoline to call the bound target.
6363 masm.jump(entry);
6366 void CodeGenerator::visitCallKnown(LCallKnown* call) {
6367 Register calleereg = ToRegister(call->getFunction());
6368 Register objreg = ToRegister(call->getTempObject());
6369 uint32_t unusedStack =
6370 UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
6371 WrappedFunction* target = call->getSingleTarget();
6373 // Native single targets (except Wasm and TrampolineNative functions) are
6374 // handled by LCallNative.
6375 MOZ_ASSERT(target->hasJitEntry());
6377 // Missing arguments must have been explicitly appended by WarpBuilder.
6378 DebugOnly<unsigned> numNonArgsOnStack = 1 + call->isConstructing();
6379 MOZ_ASSERT(target->nargs() <=
6380 call->mir()->numStackArgs() - numNonArgsOnStack);
6382 MOZ_ASSERT_IF(call->isConstructing(), target->isConstructor());
6384 masm.checkStackAlignment();
6386 if (target->isClassConstructor() && !call->isConstructing()) {
6387 emitCallInvokeFunction(call, calleereg, call->isConstructing(),
6388 call->ignoresReturnValue(), call->numActualArgs(),
6389 unusedStack);
6390 return;
6393 MOZ_ASSERT_IF(target->isClassConstructor(), call->isConstructing());
6395 MOZ_ASSERT(!call->mir()->needsThisCheck());
6397 if (call->mir()->maybeCrossRealm()) {
6398 masm.switchToObjectRealm(calleereg, objreg);
6401 masm.loadJitCodeRaw(calleereg, objreg);
6403 // Nestle the StackPointer up to the argument vector.
6404 masm.freeStack(unusedStack);
6406 // Construct the JitFrameLayout.
6407 masm.PushCalleeToken(calleereg, call->mir()->isConstructing());
6408 masm.PushFrameDescriptorForJitCall(FrameType::IonJS, call->numActualArgs());
6410 // Finally call the function in objreg.
6411 ensureOsiSpace();
6412 uint32_t callOffset = masm.callJit(objreg);
6413 markSafepointAt(callOffset, call);
6415 if (call->mir()->maybeCrossRealm()) {
6416 static_assert(!JSReturnOperand.aliases(ReturnReg),
6417 "ReturnReg available as scratch after scripted calls");
6418 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
6421 // Restore stack pointer: pop JitFrameLayout fields still left on the stack
6422 // and undo the earlier |freeStack(unusedStack)|.
6423 int prefixGarbage =
6424 sizeof(JitFrameLayout) - JitFrameLayout::bytesPoppedAfterCall();
6425 masm.adjustStack(prefixGarbage - unusedStack);
6427 // If the return value of the constructing function is Primitive,
6428 // replace the return value with the Object from CreateThis.
6429 if (call->mir()->isConstructing()) {
6430 Label notPrimitive;
6431 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6432 &notPrimitive);
6433 masm.loadValue(Address(masm.getStackPointer(), unusedStack),
6434 JSReturnOperand);
6435 #ifdef DEBUG
6436 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6437 &notPrimitive);
6438 masm.assumeUnreachable("CreateThis creates an object");
6439 #endif
6440 masm.bind(&notPrimitive);
6444 template <typename T>
6445 void CodeGenerator::emitCallInvokeFunction(T* apply) {
6446 pushArg(masm.getStackPointer()); // argv.
6447 pushArg(ToRegister(apply->getArgc())); // argc.
6448 pushArg(Imm32(apply->mir()->ignoresReturnValue())); // ignoresReturnValue.
6449 pushArg(Imm32(apply->mir()->isConstructing())); // isConstructing.
6450 pushArg(ToRegister(apply->getFunction())); // JSFunction*.
6452 using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
6453 MutableHandleValue);
6454 callVM<Fn, jit::InvokeFunction>(apply);
6457 // Do not bailout after the execution of this function since the stack no longer
6458 // correspond to what is expected by the snapshots.
6459 void CodeGenerator::emitAllocateSpaceForApply(Register argcreg,
6460 Register scratch) {
6461 // Use scratch register to calculate stack space (including padding).
6462 masm.movePtr(argcreg, scratch);
6464 // Align the JitFrameLayout on the JitStackAlignment.
6465 if (JitStackValueAlignment > 1) {
6466 MOZ_ASSERT(frameSize() % JitStackAlignment == 0,
6467 "Stack padding assumes that the frameSize is correct");
6468 MOZ_ASSERT(JitStackValueAlignment == 2);
6469 Label noPaddingNeeded;
6470 // If the number of arguments is odd, then we do not need any padding.
6472 // Note: The |JitStackValueAlignment == 2| condition requires that the
6473 // overall number of values on the stack is even. When we have an odd number
6474 // of arguments, we don't need any padding, because the |thisValue| is
6475 // pushed after the arguments, so the overall number of values on the stack
6476 // is even.
6477 masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
6478 masm.addPtr(Imm32(1), scratch);
6479 masm.bind(&noPaddingNeeded);
6482 // Reserve space for copying the arguments.
6483 NativeObject::elementsSizeMustNotOverflow();
6484 masm.lshiftPtr(Imm32(ValueShift), scratch);
6485 masm.subFromStackPtr(scratch);
6487 #ifdef DEBUG
6488 // Put a magic value in the space reserved for padding. Note, this code cannot
6489 // be merged with the previous test, as not all architectures can write below
6490 // their stack pointers.
6491 if (JitStackValueAlignment > 1) {
6492 MOZ_ASSERT(JitStackValueAlignment == 2);
6493 Label noPaddingNeeded;
6494 // If the number of arguments is odd, then we do not need any padding.
6495 masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
6496 BaseValueIndex dstPtr(masm.getStackPointer(), argcreg);
6497 masm.storeValue(MagicValue(JS_ARG_POISON), dstPtr);
6498 masm.bind(&noPaddingNeeded);
6500 #endif
6503 // Do not bailout after the execution of this function since the stack no longer
6504 // correspond to what is expected by the snapshots.
6505 void CodeGenerator::emitAllocateSpaceForConstructAndPushNewTarget(
6506 Register argcreg, Register newTargetAndScratch) {
6507 // Align the JitFrameLayout on the JitStackAlignment. Contrary to
6508 // |emitAllocateSpaceForApply()|, we're always pushing a magic value, because
6509 // we can't write to |newTargetAndScratch| before |new.target| has been pushed
6510 // onto the stack.
6511 if (JitStackValueAlignment > 1) {
6512 MOZ_ASSERT(frameSize() % JitStackAlignment == 0,
6513 "Stack padding assumes that the frameSize is correct");
6514 MOZ_ASSERT(JitStackValueAlignment == 2);
6516 Label noPaddingNeeded;
6517 // If the number of arguments is even, then we do not need any padding.
6519 // Note: The |JitStackValueAlignment == 2| condition requires that the
6520 // overall number of values on the stack is even. When we have an even
6521 // number of arguments, we don't need any padding, because |new.target| is
6522 // is pushed before the arguments and |thisValue| is pushed after all
6523 // arguments, so the overall number of values on the stack is even.
6524 masm.branchTestPtr(Assembler::Zero, argcreg, Imm32(1), &noPaddingNeeded);
6525 masm.pushValue(MagicValue(JS_ARG_POISON));
6526 masm.bind(&noPaddingNeeded);
6529 // Push |new.target| after the padding value, but before any arguments.
6530 masm.pushValue(JSVAL_TYPE_OBJECT, newTargetAndScratch);
6532 // Use newTargetAndScratch to calculate stack space (including padding).
6533 masm.movePtr(argcreg, newTargetAndScratch);
6535 // Reserve space for copying the arguments.
6536 NativeObject::elementsSizeMustNotOverflow();
6537 masm.lshiftPtr(Imm32(ValueShift), newTargetAndScratch);
6538 masm.subFromStackPtr(newTargetAndScratch);
6541 // Destroys argvIndex and copyreg.
6542 void CodeGenerator::emitCopyValuesForApply(Register argvSrcBase,
6543 Register argvIndex, Register copyreg,
6544 size_t argvSrcOffset,
6545 size_t argvDstOffset) {
6546 Label loop;
6547 masm.bind(&loop);
6549 // As argvIndex is off by 1, and we use the decBranchPtr instruction to loop
6550 // back, we have to substract the size of the word which are copied.
6551 BaseValueIndex srcPtr(argvSrcBase, argvIndex,
6552 int32_t(argvSrcOffset) - sizeof(void*));
6553 BaseValueIndex dstPtr(masm.getStackPointer(), argvIndex,
6554 int32_t(argvDstOffset) - sizeof(void*));
6555 masm.loadPtr(srcPtr, copyreg);
6556 masm.storePtr(copyreg, dstPtr);
6558 // Handle 32 bits architectures.
6559 if (sizeof(Value) == 2 * sizeof(void*)) {
6560 BaseValueIndex srcPtrLow(argvSrcBase, argvIndex,
6561 int32_t(argvSrcOffset) - 2 * sizeof(void*));
6562 BaseValueIndex dstPtrLow(masm.getStackPointer(), argvIndex,
6563 int32_t(argvDstOffset) - 2 * sizeof(void*));
6564 masm.loadPtr(srcPtrLow, copyreg);
6565 masm.storePtr(copyreg, dstPtrLow);
6568 masm.decBranchPtr(Assembler::NonZero, argvIndex, Imm32(1), &loop);
6571 void CodeGenerator::emitRestoreStackPointerFromFP() {
6572 // This is used to restore the stack pointer after a call with a dynamic
6573 // number of arguments.
6575 MOZ_ASSERT(masm.framePushed() == frameSize());
6577 int32_t offset = -int32_t(frameSize());
6578 masm.computeEffectiveAddress(Address(FramePointer, offset),
6579 masm.getStackPointer());
6582 void CodeGenerator::emitPushArguments(Register argcreg, Register scratch,
6583 Register copyreg, uint32_t extraFormals) {
6584 Label end;
6586 // Skip the copy of arguments if there are none.
6587 masm.branchTestPtr(Assembler::Zero, argcreg, argcreg, &end);
6589 // clang-format off
6591 // We are making a copy of the arguments which are above the JitFrameLayout
6592 // of the current Ion frame.
6594 // [arg1] [arg0] <- src [this] [JitFrameLayout] [.. frameSize ..] [pad] [arg1] [arg0] <- dst
6596 // clang-format on
6598 // Compute the source and destination offsets into the stack.
6600 // The |extraFormals| parameter is used when copying rest-parameters and
6601 // allows to skip the initial parameters before the actual rest-parameters.
6602 Register argvSrcBase = FramePointer;
6603 size_t argvSrcOffset =
6604 JitFrameLayout::offsetOfActualArgs() + extraFormals * sizeof(JS::Value);
6605 size_t argvDstOffset = 0;
6607 Register argvIndex = scratch;
6608 masm.move32(argcreg, argvIndex);
6610 // Copy arguments.
6611 emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, argvSrcOffset,
6612 argvDstOffset);
6614 // Join with all arguments copied.
6615 masm.bind(&end);
6618 void CodeGenerator::emitPushArguments(LApplyArgsGeneric* apply) {
6619 // Holds the function nargs.
6620 Register argcreg = ToRegister(apply->getArgc());
6621 Register copyreg = ToRegister(apply->getTempObject());
6622 Register scratch = ToRegister(apply->getTempForArgCopy());
6623 uint32_t extraFormals = apply->numExtraFormals();
6625 // Allocate space on the stack for arguments.
6626 emitAllocateSpaceForApply(argcreg, scratch);
6628 emitPushArguments(argcreg, scratch, copyreg, extraFormals);
6630 // Push |this|.
6631 masm.pushValue(ToValue(apply, LApplyArgsGeneric::ThisIndex));
6634 void CodeGenerator::emitPushArguments(LApplyArgsObj* apply) {
6635 Register argsObj = ToRegister(apply->getArgsObj());
6636 Register tmpArgc = ToRegister(apply->getTempObject());
6637 Register scratch = ToRegister(apply->getTempForArgCopy());
6639 // argc and argsObj are mapped to the same calltemp register.
6640 MOZ_ASSERT(argsObj == ToRegister(apply->getArgc()));
6642 // Load argc into tmpArgc.
6643 masm.loadArgumentsObjectLength(argsObj, tmpArgc);
6645 // Allocate space on the stack for arguments.
6646 emitAllocateSpaceForApply(tmpArgc, scratch);
6648 // Load arguments data.
6649 masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
6650 argsObj);
6651 size_t argsSrcOffset = ArgumentsData::offsetOfArgs();
6653 // This is the end of the lifetime of argsObj.
6654 // After this call, the argsObj register holds the argument count instead.
6655 emitPushArrayAsArguments(tmpArgc, argsObj, scratch, argsSrcOffset);
6657 // Push |this|.
6658 masm.pushValue(ToValue(apply, LApplyArgsObj::ThisIndex));
6661 void CodeGenerator::emitPushArrayAsArguments(Register tmpArgc,
6662 Register srcBaseAndArgc,
6663 Register scratch,
6664 size_t argvSrcOffset) {
6665 // Preconditions:
6666 // 1. |tmpArgc| * sizeof(Value) bytes have been allocated at the top of
6667 // the stack to hold arguments.
6668 // 2. |srcBaseAndArgc| + |srcOffset| points to an array of |tmpArgc| values.
6670 // Postconditions:
6671 // 1. The arguments at |srcBaseAndArgc| + |srcOffset| have been copied into
6672 // the allocated space.
6673 // 2. |srcBaseAndArgc| now contains the original value of |tmpArgc|.
6675 // |scratch| is used as a temp register within this function and clobbered.
6677 Label noCopy, epilogue;
6679 // Skip the copy of arguments if there are none.
6680 masm.branchTestPtr(Assembler::Zero, tmpArgc, tmpArgc, &noCopy);
6682 // Copy the values. This code is skipped entirely if there are no values.
6683 size_t argvDstOffset = 0;
6685 Register argvSrcBase = srcBaseAndArgc;
6687 // Stash away |tmpArgc| and adjust argvDstOffset accordingly.
6688 masm.push(tmpArgc);
6689 Register argvIndex = tmpArgc;
6690 argvDstOffset += sizeof(void*);
6692 // Copy
6693 emitCopyValuesForApply(argvSrcBase, argvIndex, scratch, argvSrcOffset,
6694 argvDstOffset);
6696 // Restore.
6697 masm.pop(srcBaseAndArgc); // srcBaseAndArgc now contains argc.
6698 masm.jump(&epilogue);
6700 masm.bind(&noCopy);
6702 // Clear argc if we skipped the copy step.
6703 masm.movePtr(ImmWord(0), srcBaseAndArgc);
6706 // Join with all arguments copied.
6707 // Note, "srcBase" has become "argc".
6708 masm.bind(&epilogue);
6711 void CodeGenerator::emitPushArguments(LApplyArrayGeneric* apply) {
6712 Register elements = ToRegister(apply->getElements());
6713 Register tmpArgc = ToRegister(apply->getTempObject());
6714 Register scratch = ToRegister(apply->getTempForArgCopy());
6716 // argc and elements are mapped to the same calltemp register.
6717 MOZ_ASSERT(elements == ToRegister(apply->getArgc()));
6719 // Invariants guarded in the caller:
6720 // - the array is not too long
6721 // - the array length equals its initialized length
6723 // The array length is our argc for the purposes of allocating space.
6724 masm.load32(Address(elements, ObjectElements::offsetOfLength()), tmpArgc);
6726 // Allocate space for the values.
6727 emitAllocateSpaceForApply(tmpArgc, scratch);
6729 // After this call "elements" has become "argc".
6730 size_t elementsOffset = 0;
6731 emitPushArrayAsArguments(tmpArgc, elements, scratch, elementsOffset);
6733 // Push |this|.
6734 masm.pushValue(ToValue(apply, LApplyArrayGeneric::ThisIndex));
6737 void CodeGenerator::emitPushArguments(LConstructArgsGeneric* construct) {
6738 // Holds the function nargs.
6739 Register argcreg = ToRegister(construct->getArgc());
6740 Register copyreg = ToRegister(construct->getTempObject());
6741 Register scratch = ToRegister(construct->getTempForArgCopy());
6742 uint32_t extraFormals = construct->numExtraFormals();
6744 // newTarget and scratch are mapped to the same calltemp register.
6745 MOZ_ASSERT(scratch == ToRegister(construct->getNewTarget()));
6747 // Allocate space for the values.
6748 // After this call "newTarget" has become "scratch".
6749 emitAllocateSpaceForConstructAndPushNewTarget(argcreg, scratch);
6751 emitPushArguments(argcreg, scratch, copyreg, extraFormals);
6753 // Push |this|.
6754 masm.pushValue(ToValue(construct, LConstructArgsGeneric::ThisIndex));
6757 void CodeGenerator::emitPushArguments(LConstructArrayGeneric* construct) {
6758 Register elements = ToRegister(construct->getElements());
6759 Register tmpArgc = ToRegister(construct->getTempObject());
6760 Register scratch = ToRegister(construct->getTempForArgCopy());
6762 // argc and elements are mapped to the same calltemp register.
6763 MOZ_ASSERT(elements == ToRegister(construct->getArgc()));
6765 // newTarget and scratch are mapped to the same calltemp register.
6766 MOZ_ASSERT(scratch == ToRegister(construct->getNewTarget()));
6768 // Invariants guarded in the caller:
6769 // - the array is not too long
6770 // - the array length equals its initialized length
6772 // The array length is our argc for the purposes of allocating space.
6773 masm.load32(Address(elements, ObjectElements::offsetOfLength()), tmpArgc);
6775 // Allocate space for the values.
6776 // After this call "newTarget" has become "scratch".
6777 emitAllocateSpaceForConstructAndPushNewTarget(tmpArgc, scratch);
6779 // After this call "elements" has become "argc".
6780 size_t elementsOffset = 0;
6781 emitPushArrayAsArguments(tmpArgc, elements, scratch, elementsOffset);
6783 // Push |this|.
6784 masm.pushValue(ToValue(construct, LConstructArrayGeneric::ThisIndex));
6787 template <typename T>
6788 void CodeGenerator::emitApplyGeneric(T* apply) {
6789 // Holds the function object.
6790 Register calleereg = ToRegister(apply->getFunction());
6792 // Temporary register for modifying the function object.
6793 Register objreg = ToRegister(apply->getTempObject());
6794 Register scratch = ToRegister(apply->getTempForArgCopy());
6796 // Holds the function nargs, computed in the invoker or (for ApplyArray,
6797 // ConstructArray, or ApplyArgsObj) in the argument pusher.
6798 Register argcreg = ToRegister(apply->getArgc());
6800 // Copy the arguments of the current function.
6802 // In the case of ApplyArray, ConstructArray, or ApplyArgsObj, also compute
6803 // argc. The argc register and the elements/argsObj register are the same;
6804 // argc must not be referenced before the call to emitPushArguments() and
6805 // elements/argsObj must not be referenced after it returns.
6807 // In the case of ConstructArray or ConstructArgs, also overwrite newTarget;
6808 // newTarget must not be referenced after this point.
6810 // objreg is dead across this call.
6811 emitPushArguments(apply);
6813 masm.checkStackAlignment();
6815 bool constructing = apply->mir()->isConstructing();
6817 // If the function is native, the call is compiled through emitApplyNative.
6818 MOZ_ASSERT_IF(apply->hasSingleTarget(),
6819 !apply->getSingleTarget()->isNativeWithoutJitEntry());
6821 Label end, invoke;
6823 // Unless already known, guard that calleereg is actually a function object.
6824 if (!apply->hasSingleTarget()) {
6825 masm.branchTestObjIsFunction(Assembler::NotEqual, calleereg, objreg,
6826 calleereg, &invoke);
6829 // Guard that calleereg is an interpreted function with a JSScript.
6830 masm.branchIfFunctionHasNoJitEntry(calleereg, &invoke);
6832 // Guard that callee allows the [[Call]] or [[Construct]] operation required.
6833 if (constructing) {
6834 masm.branchTestFunctionFlags(calleereg, FunctionFlags::CONSTRUCTOR,
6835 Assembler::Zero, &invoke);
6836 } else {
6837 masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
6838 calleereg, objreg, &invoke);
6841 // Use the slow path if CreateThis was unable to create the |this| object.
6842 if (constructing) {
6843 Address thisAddr(masm.getStackPointer(), 0);
6844 masm.branchTestNull(Assembler::Equal, thisAddr, &invoke);
6847 // Call with an Ion frame or a rectifier frame.
6849 if (apply->mir()->maybeCrossRealm()) {
6850 masm.switchToObjectRealm(calleereg, objreg);
6853 // Knowing that calleereg is a non-native function, load jitcode.
6854 masm.loadJitCodeRaw(calleereg, objreg);
6856 masm.PushCalleeToken(calleereg, constructing);
6857 masm.PushFrameDescriptorForJitCall(FrameType::IonJS, argcreg, scratch);
6859 Label underflow, rejoin;
6861 // Check whether the provided arguments satisfy target argc.
6862 if (!apply->hasSingleTarget()) {
6863 Register nformals = scratch;
6864 masm.loadFunctionArgCount(calleereg, nformals);
6865 masm.branch32(Assembler::Below, argcreg, nformals, &underflow);
6866 } else {
6867 masm.branch32(Assembler::Below, argcreg,
6868 Imm32(apply->getSingleTarget()->nargs()), &underflow);
6871 // Skip the construction of the rectifier frame because we have no
6872 // underflow.
6873 masm.jump(&rejoin);
6875 // Argument fixup needed. Get ready to call the argumentsRectifier.
6877 masm.bind(&underflow);
6879 // Hardcode the address of the argumentsRectifier code.
6880 TrampolinePtr argumentsRectifier =
6881 gen->jitRuntime()->getArgumentsRectifier();
6882 masm.movePtr(argumentsRectifier, objreg);
6885 masm.bind(&rejoin);
6887 // Finally call the function in objreg, as assigned by one of the paths
6888 // above.
6889 ensureOsiSpace();
6890 uint32_t callOffset = masm.callJit(objreg);
6891 markSafepointAt(callOffset, apply);
6893 if (apply->mir()->maybeCrossRealm()) {
6894 static_assert(!JSReturnOperand.aliases(ReturnReg),
6895 "ReturnReg available as scratch after scripted calls");
6896 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
6899 // Discard JitFrameLayout fields still left on the stack.
6900 masm.freeStack(sizeof(JitFrameLayout) -
6901 JitFrameLayout::bytesPoppedAfterCall());
6902 masm.jump(&end);
6905 // Handle uncompiled or native functions.
6907 masm.bind(&invoke);
6908 emitCallInvokeFunction(apply);
6911 masm.bind(&end);
6913 // If the return value of the constructing function is Primitive, replace the
6914 // return value with the Object from CreateThis.
6915 if (constructing) {
6916 Label notPrimitive;
6917 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6918 &notPrimitive);
6919 masm.loadValue(Address(masm.getStackPointer(), 0), JSReturnOperand);
6921 #ifdef DEBUG
6922 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6923 &notPrimitive);
6924 masm.assumeUnreachable("CreateThis creates an object");
6925 #endif
6927 masm.bind(&notPrimitive);
6930 // Pop arguments and continue.
6931 emitRestoreStackPointerFromFP();
6934 template <typename T>
6935 void CodeGenerator::emitAlignStackForApplyNative(T* apply, Register argc) {
6936 static_assert(JitStackAlignment % ABIStackAlignment == 0,
6937 "aligning on JIT stack subsumes ABI alignment");
6939 // Align the arguments on the JitStackAlignment.
6940 if (JitStackValueAlignment > 1) {
6941 MOZ_ASSERT(JitStackValueAlignment == 2,
6942 "Stack padding adds exactly one Value");
6943 MOZ_ASSERT(frameSize() % JitStackValueAlignment == 0,
6944 "Stack padding assumes that the frameSize is correct");
6946 Assembler::Condition cond;
6947 if constexpr (T::isConstructing()) {
6948 // If the number of arguments is even, then we do not need any padding.
6950 // Also see emitAllocateSpaceForApply().
6951 cond = Assembler::Zero;
6952 } else {
6953 // If the number of arguments is odd, then we do not need any padding.
6955 // Also see emitAllocateSpaceForConstructAndPushNewTarget().
6956 cond = Assembler::NonZero;
6959 Label noPaddingNeeded;
6960 masm.branchTestPtr(cond, argc, Imm32(1), &noPaddingNeeded);
6961 masm.pushValue(MagicValue(JS_ARG_POISON));
6962 masm.bind(&noPaddingNeeded);
6966 template <typename T>
6967 void CodeGenerator::emitPushNativeArguments(T* apply) {
6968 Register argc = ToRegister(apply->getArgc());
6969 Register tmpArgc = ToRegister(apply->getTempObject());
6970 Register scratch = ToRegister(apply->getTempForArgCopy());
6971 uint32_t extraFormals = apply->numExtraFormals();
6973 // Align stack.
6974 emitAlignStackForApplyNative(apply, argc);
6976 // Push newTarget.
6977 if constexpr (T::isConstructing()) {
6978 masm.pushValue(JSVAL_TYPE_OBJECT, ToRegister(apply->getNewTarget()));
6981 // Push arguments.
6982 Label noCopy;
6983 masm.branchTestPtr(Assembler::Zero, argc, argc, &noCopy);
6985 // Use scratch register to calculate stack space.
6986 masm.movePtr(argc, scratch);
6988 // Reserve space for copying the arguments.
6989 NativeObject::elementsSizeMustNotOverflow();
6990 masm.lshiftPtr(Imm32(ValueShift), scratch);
6991 masm.subFromStackPtr(scratch);
6993 // Compute the source and destination offsets into the stack.
6994 Register argvSrcBase = FramePointer;
6995 size_t argvSrcOffset =
6996 JitFrameLayout::offsetOfActualArgs() + extraFormals * sizeof(JS::Value);
6997 size_t argvDstOffset = 0;
6999 Register argvIndex = tmpArgc;
7000 masm.move32(argc, argvIndex);
7002 // Copy arguments.
7003 emitCopyValuesForApply(argvSrcBase, argvIndex, scratch, argvSrcOffset,
7004 argvDstOffset);
7006 masm.bind(&noCopy);
7008 // Push |this|.
7009 if constexpr (T::isConstructing()) {
7010 masm.pushValue(MagicValue(JS_IS_CONSTRUCTING));
7011 } else {
7012 masm.pushValue(ToValue(apply, T::ThisIndex));
7016 template <typename T>
7017 void CodeGenerator::emitPushArrayAsNativeArguments(T* apply) {
7018 Register argc = ToRegister(apply->getArgc());
7019 Register elements = ToRegister(apply->getElements());
7020 Register tmpArgc = ToRegister(apply->getTempObject());
7021 Register scratch = ToRegister(apply->getTempForArgCopy());
7023 // NB: argc and elements are mapped to the same register.
7024 MOZ_ASSERT(argc == elements);
7026 // Invariants guarded in the caller:
7027 // - the array is not too long
7028 // - the array length equals its initialized length
7030 // The array length is our argc.
7031 masm.load32(Address(elements, ObjectElements::offsetOfLength()), tmpArgc);
7033 // Align stack.
7034 emitAlignStackForApplyNative(apply, tmpArgc);
7036 // Push newTarget.
7037 if constexpr (T::isConstructing()) {
7038 masm.pushValue(JSVAL_TYPE_OBJECT, ToRegister(apply->getNewTarget()));
7041 // Skip the copy of arguments if there are none.
7042 Label noCopy;
7043 masm.branchTestPtr(Assembler::Zero, tmpArgc, tmpArgc, &noCopy);
7045 // |tmpArgc| is off-by-one, so adjust the offset accordingly.
7046 BaseObjectElementIndex srcPtr(elements, tmpArgc,
7047 -int32_t(sizeof(JS::Value)));
7049 Label loop;
7050 masm.bind(&loop);
7051 masm.pushValue(srcPtr, scratch);
7052 masm.decBranchPtr(Assembler::NonZero, tmpArgc, Imm32(1), &loop);
7054 masm.bind(&noCopy);
7056 // Set argc in preparation for calling the native function.
7057 masm.load32(Address(elements, ObjectElements::offsetOfLength()), argc);
7059 // Push |this|.
7060 if constexpr (T::isConstructing()) {
7061 masm.pushValue(MagicValue(JS_IS_CONSTRUCTING));
7062 } else {
7063 masm.pushValue(ToValue(apply, T::ThisIndex));
7067 void CodeGenerator::emitPushArguments(LApplyArgsNative* apply) {
7068 emitPushNativeArguments(apply);
7071 void CodeGenerator::emitPushArguments(LApplyArrayNative* apply) {
7072 emitPushArrayAsNativeArguments(apply);
7075 void CodeGenerator::emitPushArguments(LConstructArgsNative* construct) {
7076 emitPushNativeArguments(construct);
7079 void CodeGenerator::emitPushArguments(LConstructArrayNative* construct) {
7080 emitPushArrayAsNativeArguments(construct);
7083 void CodeGenerator::emitPushArguments(LApplyArgsObjNative* apply) {
7084 Register argc = ToRegister(apply->getArgc());
7085 Register argsObj = ToRegister(apply->getArgsObj());
7086 Register tmpArgc = ToRegister(apply->getTempObject());
7087 Register scratch = ToRegister(apply->getTempForArgCopy());
7088 Register scratch2 = ToRegister(apply->getTempExtra());
7090 // NB: argc and argsObj are mapped to the same register.
7091 MOZ_ASSERT(argc == argsObj);
7093 // Load argc into tmpArgc.
7094 masm.loadArgumentsObjectLength(argsObj, tmpArgc);
7096 // Align stack.
7097 emitAlignStackForApplyNative(apply, tmpArgc);
7099 // Push arguments.
7100 Label noCopy, epilogue;
7101 masm.branchTestPtr(Assembler::Zero, tmpArgc, tmpArgc, &noCopy);
7103 // Use scratch register to calculate stack space.
7104 masm.movePtr(tmpArgc, scratch);
7106 // Reserve space for copying the arguments.
7107 NativeObject::elementsSizeMustNotOverflow();
7108 masm.lshiftPtr(Imm32(ValueShift), scratch);
7109 masm.subFromStackPtr(scratch);
7111 // Load arguments data.
7112 Register argvSrcBase = argsObj;
7113 masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
7114 argvSrcBase);
7115 size_t argvSrcOffset = ArgumentsData::offsetOfArgs();
7116 size_t argvDstOffset = 0;
7118 Register argvIndex = scratch2;
7119 masm.move32(tmpArgc, argvIndex);
7121 // Copy the values.
7122 emitCopyValuesForApply(argvSrcBase, argvIndex, scratch, argvSrcOffset,
7123 argvDstOffset);
7125 masm.bind(&noCopy);
7127 // Set argc in preparation for calling the native function.
7128 masm.movePtr(tmpArgc, argc);
7130 // Push |this|.
7131 masm.pushValue(ToValue(apply, LApplyArgsObjNative::ThisIndex));
7134 template <typename T>
7135 void CodeGenerator::emitApplyNative(T* apply) {
7136 MOZ_ASSERT(T::isConstructing() == apply->mir()->isConstructing(),
7137 "isConstructing condition must be consistent");
7139 WrappedFunction* target = apply->mir()->getSingleTarget();
7140 MOZ_ASSERT(target->isNativeWithoutJitEntry());
7142 JSNative native = target->native();
7143 if (apply->mir()->ignoresReturnValue() && target->hasJitInfo()) {
7144 const JSJitInfo* jitInfo = target->jitInfo();
7145 if (jitInfo->type() == JSJitInfo::IgnoresReturnValueNative) {
7146 native = jitInfo->ignoresReturnValueMethod;
7150 // Push arguments, including newTarget and |this|.
7151 emitPushArguments(apply);
7153 // Registers used for callWithABI() argument-passing.
7154 Register argContextReg = ToRegister(apply->getTempObject());
7155 Register argUintNReg = ToRegister(apply->getArgc());
7156 Register argVpReg = ToRegister(apply->getTempForArgCopy());
7157 Register tempReg = ToRegister(apply->getTempExtra());
7159 // No unused stack for variadic calls.
7160 uint32_t unusedStack = 0;
7162 // Pushed arguments don't change the pushed frames amount.
7163 MOZ_ASSERT(masm.framePushed() == frameSize());
7165 // Create the exit frame and call the native.
7166 emitCallNative(apply, native, argContextReg, argUintNReg, argVpReg, tempReg,
7167 unusedStack);
7169 // The exit frame is still on the stack.
7170 MOZ_ASSERT(masm.framePushed() == frameSize() + NativeExitFrameLayout::Size());
7172 // The next instruction is removing the exit frame, so there is no need for
7173 // leaveFakeExitFrame.
7175 // Pop arguments and continue.
7176 masm.setFramePushed(frameSize());
7177 emitRestoreStackPointerFromFP();
7180 template <typename T>
7181 void CodeGenerator::emitApplyArgsGuard(T* apply) {
7182 LSnapshot* snapshot = apply->snapshot();
7183 Register argcreg = ToRegister(apply->getArgc());
7185 // Ensure that we have a reasonable number of arguments.
7186 bailoutCmp32(Assembler::Above, argcreg, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
7189 template <typename T>
7190 void CodeGenerator::emitApplyArgsObjGuard(T* apply) {
7191 Register argsObj = ToRegister(apply->getArgsObj());
7192 Register temp = ToRegister(apply->getTempObject());
7194 Label bail;
7195 masm.loadArgumentsObjectLength(argsObj, temp, &bail);
7196 masm.branch32(Assembler::Above, temp, Imm32(JIT_ARGS_LENGTH_MAX), &bail);
7197 bailoutFrom(&bail, apply->snapshot());
7200 template <typename T>
7201 void CodeGenerator::emitApplyArrayGuard(T* apply) {
7202 LSnapshot* snapshot = apply->snapshot();
7203 Register elements = ToRegister(apply->getElements());
7204 Register tmp = ToRegister(apply->getTempObject());
7206 Address length(elements, ObjectElements::offsetOfLength());
7207 masm.load32(length, tmp);
7209 // Ensure that we have a reasonable number of arguments.
7210 bailoutCmp32(Assembler::Above, tmp, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
7212 // Ensure that the array does not contain an uninitialized tail.
7214 Address initializedLength(elements,
7215 ObjectElements::offsetOfInitializedLength());
7216 masm.sub32(initializedLength, tmp);
7217 bailoutCmp32(Assembler::NotEqual, tmp, Imm32(0), snapshot);
7220 void CodeGenerator::visitApplyArgsGeneric(LApplyArgsGeneric* apply) {
7221 emitApplyArgsGuard(apply);
7222 emitApplyGeneric(apply);
7225 void CodeGenerator::visitApplyArgsObj(LApplyArgsObj* apply) {
7226 emitApplyArgsObjGuard(apply);
7227 emitApplyGeneric(apply);
7230 void CodeGenerator::visitApplyArrayGeneric(LApplyArrayGeneric* apply) {
7231 emitApplyArrayGuard(apply);
7232 emitApplyGeneric(apply);
7235 void CodeGenerator::visitConstructArgsGeneric(LConstructArgsGeneric* lir) {
7236 emitApplyArgsGuard(lir);
7237 emitApplyGeneric(lir);
7240 void CodeGenerator::visitConstructArrayGeneric(LConstructArrayGeneric* lir) {
7241 emitApplyArrayGuard(lir);
7242 emitApplyGeneric(lir);
7245 void CodeGenerator::visitApplyArgsNative(LApplyArgsNative* lir) {
7246 emitApplyArgsGuard(lir);
7247 emitApplyNative(lir);
7250 void CodeGenerator::visitApplyArgsObjNative(LApplyArgsObjNative* lir) {
7251 emitApplyArgsObjGuard(lir);
7252 emitApplyNative(lir);
7255 void CodeGenerator::visitApplyArrayNative(LApplyArrayNative* lir) {
7256 emitApplyArrayGuard(lir);
7257 emitApplyNative(lir);
7260 void CodeGenerator::visitConstructArgsNative(LConstructArgsNative* lir) {
7261 emitApplyArgsGuard(lir);
7262 emitApplyNative(lir);
7265 void CodeGenerator::visitConstructArrayNative(LConstructArrayNative* lir) {
7266 emitApplyArrayGuard(lir);
7267 emitApplyNative(lir);
7270 void CodeGenerator::visitBail(LBail* lir) { bailout(lir->snapshot()); }
7272 void CodeGenerator::visitUnreachable(LUnreachable* lir) {
7273 masm.assumeUnreachable("end-of-block assumed unreachable");
7276 void CodeGenerator::visitEncodeSnapshot(LEncodeSnapshot* lir) {
7277 encode(lir->snapshot());
7280 void CodeGenerator::visitUnreachableResultV(LUnreachableResultV* lir) {
7281 masm.assumeUnreachable("must be unreachable");
7284 void CodeGenerator::visitUnreachableResultT(LUnreachableResultT* lir) {
7285 masm.assumeUnreachable("must be unreachable");
7288 // Out-of-line path to report over-recursed error and fail.
7289 class CheckOverRecursedFailure : public OutOfLineCodeBase<CodeGenerator> {
7290 LInstruction* lir_;
7292 public:
7293 explicit CheckOverRecursedFailure(LInstruction* lir) : lir_(lir) {}
7295 void accept(CodeGenerator* codegen) override {
7296 codegen->visitCheckOverRecursedFailure(this);
7299 LInstruction* lir() const { return lir_; }
7302 void CodeGenerator::visitCheckOverRecursed(LCheckOverRecursed* lir) {
7303 // If we don't push anything on the stack, skip the check.
7304 if (omitOverRecursedCheck()) {
7305 return;
7308 // Ensure that this frame will not cross the stack limit.
7309 // This is a weak check, justified by Ion using the C stack: we must always
7310 // be some distance away from the actual limit, since if the limit is
7311 // crossed, an error must be thrown, which requires more frames.
7313 // It must always be possible to trespass past the stack limit.
7314 // Ion may legally place frames very close to the limit. Calling additional
7315 // C functions may then violate the limit without any checking.
7317 // Since Ion frames exist on the C stack, the stack limit may be
7318 // dynamically set by JS_SetThreadStackLimit() and JS_SetNativeStackQuota().
7320 CheckOverRecursedFailure* ool = new (alloc()) CheckOverRecursedFailure(lir);
7321 addOutOfLineCode(ool, lir->mir());
7323 // Conditional forward (unlikely) branch to failure.
7324 const void* limitAddr = gen->runtime->addressOfJitStackLimit();
7325 masm.branchStackPtrRhs(Assembler::AboveOrEqual, AbsoluteAddress(limitAddr),
7326 ool->entry());
7327 masm.bind(ool->rejoin());
7330 void CodeGenerator::visitCheckOverRecursedFailure(
7331 CheckOverRecursedFailure* ool) {
7332 // The OOL path is hit if the recursion depth has been exceeded.
7333 // Throw an InternalError for over-recursion.
7335 // LFunctionEnvironment can appear before LCheckOverRecursed, so we have
7336 // to save all live registers to avoid crashes if CheckOverRecursed triggers
7337 // a GC.
7338 saveLive(ool->lir());
7340 using Fn = bool (*)(JSContext*);
7341 callVM<Fn, CheckOverRecursed>(ool->lir());
7343 restoreLive(ool->lir());
7344 masm.jump(ool->rejoin());
7347 IonScriptCounts* CodeGenerator::maybeCreateScriptCounts() {
7348 // If scripts are being profiled, create a new IonScriptCounts for the
7349 // profiling data, which will be attached to the associated JSScript or
7350 // wasm module after code generation finishes.
7351 if (!gen->hasProfilingScripts()) {
7352 return nullptr;
7355 // This test inhibits IonScriptCount creation for wasm code which is
7356 // currently incompatible with wasm codegen for two reasons: (1) wasm code
7357 // must be serializable and script count codegen bakes in absolute
7358 // addresses, (2) wasm code does not have a JSScript with which to associate
7359 // code coverage data.
7360 JSScript* script = gen->outerInfo().script();
7361 if (!script) {
7362 return nullptr;
7365 auto counts = MakeUnique<IonScriptCounts>();
7366 if (!counts || !counts->init(graph.numBlocks())) {
7367 return nullptr;
7370 for (size_t i = 0; i < graph.numBlocks(); i++) {
7371 MBasicBlock* block = graph.getBlock(i)->mir();
7373 uint32_t offset = 0;
7374 char* description = nullptr;
7375 if (MResumePoint* resume = block->entryResumePoint()) {
7376 // Find a PC offset in the outermost script to use. If this
7377 // block is from an inlined script, find a location in the
7378 // outer script to associate information about the inlining
7379 // with.
7380 while (resume->caller()) {
7381 resume = resume->caller();
7383 offset = script->pcToOffset(resume->pc());
7385 if (block->entryResumePoint()->caller()) {
7386 // Get the filename and line number of the inner script.
7387 JSScript* innerScript = block->info().script();
7388 description = js_pod_calloc<char>(200);
7389 if (description) {
7390 snprintf(description, 200, "%s:%u", innerScript->filename(),
7391 innerScript->lineno());
7396 if (!counts->block(i).init(block->id(), offset, description,
7397 block->numSuccessors())) {
7398 return nullptr;
7401 for (size_t j = 0; j < block->numSuccessors(); j++) {
7402 counts->block(i).setSuccessor(
7403 j, skipTrivialBlocks(block->getSuccessor(j))->id());
7407 scriptCounts_ = counts.release();
7408 return scriptCounts_;
7411 // Structure for managing the state tracked for a block by script counters.
7412 struct ScriptCountBlockState {
7413 IonBlockCounts& block;
7414 MacroAssembler& masm;
7416 Sprinter printer;
7418 public:
7419 ScriptCountBlockState(IonBlockCounts* block, MacroAssembler* masm)
7420 : block(*block), masm(*masm), printer(GetJitContext()->cx, false) {}
7422 bool init() {
7423 if (!printer.init()) {
7424 return false;
7427 // Bump the hit count for the block at the start. This code is not
7428 // included in either the text for the block or the instruction byte
7429 // counts.
7430 masm.inc64(AbsoluteAddress(block.addressOfHitCount()));
7432 // Collect human readable assembly for the code generated in the block.
7433 masm.setPrinter(&printer);
7435 return true;
7438 void visitInstruction(LInstruction* ins) {
7439 #ifdef JS_JITSPEW
7440 // Prefix stream of assembly instructions with their LIR instruction
7441 // name and any associated high level info.
7442 if (const char* extra = ins->getExtraName()) {
7443 printer.printf("[%s:%s]\n", ins->opName(), extra);
7444 } else {
7445 printer.printf("[%s]\n", ins->opName());
7447 #endif
7450 ~ScriptCountBlockState() {
7451 masm.setPrinter(nullptr);
7453 if (JS::UniqueChars str = printer.release()) {
7454 block.setCode(str.get());
7459 void CodeGenerator::branchIfInvalidated(Register temp, Label* invalidated) {
7460 CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), temp);
7461 masm.propagateOOM(ionScriptLabels_.append(label));
7463 // If IonScript::invalidationCount_ != 0, the script has been invalidated.
7464 masm.branch32(Assembler::NotEqual,
7465 Address(temp, IonScript::offsetOfInvalidationCount()), Imm32(0),
7466 invalidated);
7469 #ifdef DEBUG
7470 void CodeGenerator::emitAssertGCThingResult(Register input,
7471 const MDefinition* mir) {
7472 MIRType type = mir->type();
7473 MOZ_ASSERT(type == MIRType::Object || type == MIRType::String ||
7474 type == MIRType::Symbol || type == MIRType::BigInt);
7476 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
7477 regs.take(input);
7479 Register temp = regs.takeAny();
7480 masm.push(temp);
7482 // Don't check if the script has been invalidated. In that case invalid
7483 // types are expected (until we reach the OsiPoint and bailout).
7484 Label done;
7485 branchIfInvalidated(temp, &done);
7487 # ifndef JS_SIMULATOR
7488 // Check that we have a valid GC pointer.
7489 // Disable for wasm because we don't have a context on wasm compilation
7490 // threads and this needs a context.
7491 // Also disable for simulator builds because the C++ call is a lot slower
7492 // there than on actual hardware.
7493 if (JitOptions.fullDebugChecks && !IsCompilingWasm()) {
7494 saveVolatile();
7495 masm.setupUnalignedABICall(temp);
7496 masm.loadJSContext(temp);
7497 masm.passABIArg(temp);
7498 masm.passABIArg(input);
7500 switch (type) {
7501 case MIRType::Object: {
7502 using Fn = void (*)(JSContext* cx, JSObject* obj);
7503 masm.callWithABI<Fn, AssertValidObjectPtr>();
7504 break;
7506 case MIRType::String: {
7507 using Fn = void (*)(JSContext* cx, JSString* str);
7508 masm.callWithABI<Fn, AssertValidStringPtr>();
7509 break;
7511 case MIRType::Symbol: {
7512 using Fn = void (*)(JSContext* cx, JS::Symbol* sym);
7513 masm.callWithABI<Fn, AssertValidSymbolPtr>();
7514 break;
7516 case MIRType::BigInt: {
7517 using Fn = void (*)(JSContext* cx, JS::BigInt* bi);
7518 masm.callWithABI<Fn, AssertValidBigIntPtr>();
7519 break;
7521 default:
7522 MOZ_CRASH();
7525 restoreVolatile();
7527 # endif
7529 masm.bind(&done);
7530 masm.pop(temp);
7533 void CodeGenerator::emitAssertResultV(const ValueOperand input,
7534 const MDefinition* mir) {
7535 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
7536 regs.take(input);
7538 Register temp1 = regs.takeAny();
7539 Register temp2 = regs.takeAny();
7540 masm.push(temp1);
7541 masm.push(temp2);
7543 // Don't check if the script has been invalidated. In that case invalid
7544 // types are expected (until we reach the OsiPoint and bailout).
7545 Label done;
7546 branchIfInvalidated(temp1, &done);
7548 // Check that we have a valid GC pointer.
7549 if (JitOptions.fullDebugChecks) {
7550 saveVolatile();
7552 masm.pushValue(input);
7553 masm.moveStackPtrTo(temp1);
7555 using Fn = void (*)(JSContext* cx, Value* v);
7556 masm.setupUnalignedABICall(temp2);
7557 masm.loadJSContext(temp2);
7558 masm.passABIArg(temp2);
7559 masm.passABIArg(temp1);
7560 masm.callWithABI<Fn, AssertValidValue>();
7561 masm.popValue(input);
7562 restoreVolatile();
7565 masm.bind(&done);
7566 masm.pop(temp2);
7567 masm.pop(temp1);
7570 void CodeGenerator::emitGCThingResultChecks(LInstruction* lir,
7571 MDefinition* mir) {
7572 if (lir->numDefs() == 0) {
7573 return;
7576 MOZ_ASSERT(lir->numDefs() == 1);
7577 if (lir->getDef(0)->isBogusTemp()) {
7578 return;
7581 Register output = ToRegister(lir->getDef(0));
7582 emitAssertGCThingResult(output, mir);
7585 void CodeGenerator::emitValueResultChecks(LInstruction* lir, MDefinition* mir) {
7586 if (lir->numDefs() == 0) {
7587 return;
7590 MOZ_ASSERT(lir->numDefs() == BOX_PIECES);
7591 if (!lir->getDef(0)->output()->isRegister()) {
7592 return;
7595 ValueOperand output = ToOutValue(lir);
7597 emitAssertResultV(output, mir);
7600 void CodeGenerator::emitDebugResultChecks(LInstruction* ins) {
7601 // In debug builds, check that LIR instructions return valid values.
7603 MDefinition* mir = ins->mirRaw();
7604 if (!mir) {
7605 return;
7608 switch (mir->type()) {
7609 case MIRType::Object:
7610 case MIRType::String:
7611 case MIRType::Symbol:
7612 case MIRType::BigInt:
7613 emitGCThingResultChecks(ins, mir);
7614 break;
7615 case MIRType::Value:
7616 emitValueResultChecks(ins, mir);
7617 break;
7618 default:
7619 break;
7623 void CodeGenerator::emitDebugForceBailing(LInstruction* lir) {
7624 if (MOZ_LIKELY(!gen->options.ionBailAfterEnabled())) {
7625 return;
7627 if (!lir->snapshot()) {
7628 return;
7630 if (lir->isOsiPoint()) {
7631 return;
7634 masm.comment("emitDebugForceBailing");
7635 const void* bailAfterCounterAddr =
7636 gen->runtime->addressOfIonBailAfterCounter();
7638 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
7640 Label done, notBail;
7641 masm.branch32(Assembler::Equal, AbsoluteAddress(bailAfterCounterAddr),
7642 Imm32(0), &done);
7644 Register temp = regs.takeAny();
7646 masm.push(temp);
7647 masm.load32(AbsoluteAddress(bailAfterCounterAddr), temp);
7648 masm.sub32(Imm32(1), temp);
7649 masm.store32(temp, AbsoluteAddress(bailAfterCounterAddr));
7651 masm.branch32(Assembler::NotEqual, temp, Imm32(0), &notBail);
7653 masm.pop(temp);
7654 bailout(lir->snapshot());
7656 masm.bind(&notBail);
7657 masm.pop(temp);
7659 masm.bind(&done);
7661 #endif
7663 bool CodeGenerator::generateBody() {
7664 JitSpewCont(JitSpew_Codegen, "\n");
7665 AutoCreatedBy acb(masm, "CodeGenerator::generateBody");
7667 JitSpew(JitSpew_Codegen, "==== BEGIN CodeGenerator::generateBody ====");
7668 IonScriptCounts* counts = maybeCreateScriptCounts();
7670 const bool compilingWasm = gen->compilingWasm();
7672 for (size_t i = 0; i < graph.numBlocks(); i++) {
7673 current = graph.getBlock(i);
7675 // Don't emit any code for trivial blocks, containing just a goto. Such
7676 // blocks are created to split critical edges, and if we didn't end up
7677 // putting any instructions in them, we can skip them.
7678 if (current->isTrivial()) {
7679 continue;
7682 #ifdef JS_JITSPEW
7683 const char* filename = nullptr;
7684 size_t lineNumber = 0;
7685 JS::LimitedColumnNumberOneOrigin columnNumber;
7686 if (current->mir()->info().script()) {
7687 filename = current->mir()->info().script()->filename();
7688 if (current->mir()->pc()) {
7689 lineNumber = PCToLineNumber(current->mir()->info().script(),
7690 current->mir()->pc(), &columnNumber);
7693 JitSpew(JitSpew_Codegen, "--------------------------------");
7694 JitSpew(JitSpew_Codegen, "# block%zu %s:%zu:%u%s:", i,
7695 filename ? filename : "?", lineNumber,
7696 columnNumber.oneOriginValue(),
7697 current->mir()->isLoopHeader() ? " (loop header)" : "");
7698 #endif
7700 if (current->mir()->isLoopHeader() && compilingWasm) {
7701 masm.nopAlign(CodeAlignment);
7704 masm.bind(current->label());
7706 mozilla::Maybe<ScriptCountBlockState> blockCounts;
7707 if (counts) {
7708 blockCounts.emplace(&counts->block(i), &masm);
7709 if (!blockCounts->init()) {
7710 return false;
7714 for (LInstructionIterator iter = current->begin(); iter != current->end();
7715 iter++) {
7716 if (!alloc().ensureBallast()) {
7717 return false;
7720 perfSpewer_.recordInstruction(masm, *iter);
7721 #ifdef JS_JITSPEW
7722 JitSpewStart(JitSpew_Codegen, " # LIR=%s",
7723 iter->opName());
7724 if (const char* extra = iter->getExtraName()) {
7725 JitSpewCont(JitSpew_Codegen, ":%s", extra);
7727 JitSpewFin(JitSpew_Codegen);
7728 #endif
7730 if (counts) {
7731 blockCounts->visitInstruction(*iter);
7734 #ifdef CHECK_OSIPOINT_REGISTERS
7735 if (iter->safepoint() && !compilingWasm) {
7736 resetOsiPointRegs(iter->safepoint());
7738 #endif
7740 if (!compilingWasm) {
7741 if (MDefinition* mir = iter->mirRaw()) {
7742 if (!addNativeToBytecodeEntry(mir->trackedSite())) {
7743 return false;
7748 setElement(*iter); // needed to encode correct snapshot location.
7750 #ifdef DEBUG
7751 emitDebugForceBailing(*iter);
7752 #endif
7754 switch (iter->op()) {
7755 #ifndef JS_CODEGEN_NONE
7756 # define LIROP(op) \
7757 case LNode::Opcode::op: \
7758 visit##op(iter->to##op()); \
7759 break;
7760 LIR_OPCODE_LIST(LIROP)
7761 # undef LIROP
7762 #endif
7763 case LNode::Opcode::Invalid:
7764 default:
7765 MOZ_CRASH("Invalid LIR op");
7768 #ifdef DEBUG
7769 if (!counts) {
7770 emitDebugResultChecks(*iter);
7772 #endif
7774 if (masm.oom()) {
7775 return false;
7779 JitSpew(JitSpew_Codegen, "==== END CodeGenerator::generateBody ====\n");
7780 return true;
7783 // Out-of-line object allocation for LNewArray.
7784 class OutOfLineNewArray : public OutOfLineCodeBase<CodeGenerator> {
7785 LNewArray* lir_;
7787 public:
7788 explicit OutOfLineNewArray(LNewArray* lir) : lir_(lir) {}
7790 void accept(CodeGenerator* codegen) override {
7791 codegen->visitOutOfLineNewArray(this);
7794 LNewArray* lir() const { return lir_; }
7797 void CodeGenerator::visitNewArrayCallVM(LNewArray* lir) {
7798 Register objReg = ToRegister(lir->output());
7800 MOZ_ASSERT(!lir->isCall());
7801 saveLive(lir);
7803 JSObject* templateObject = lir->mir()->templateObject();
7805 if (templateObject) {
7806 pushArg(ImmGCPtr(templateObject->shape()));
7807 pushArg(Imm32(lir->mir()->length()));
7809 using Fn = ArrayObject* (*)(JSContext*, uint32_t, Handle<Shape*>);
7810 callVM<Fn, NewArrayWithShape>(lir);
7811 } else {
7812 pushArg(Imm32(GenericObject));
7813 pushArg(Imm32(lir->mir()->length()));
7815 using Fn = ArrayObject* (*)(JSContext*, uint32_t, NewObjectKind);
7816 callVM<Fn, NewArrayOperation>(lir);
7819 masm.storeCallPointerResult(objReg);
7821 MOZ_ASSERT(!lir->safepoint()->liveRegs().has(objReg));
7822 restoreLive(lir);
7825 void CodeGenerator::visitAtan2D(LAtan2D* lir) {
7826 FloatRegister y = ToFloatRegister(lir->y());
7827 FloatRegister x = ToFloatRegister(lir->x());
7829 using Fn = double (*)(double x, double y);
7830 masm.setupAlignedABICall();
7831 masm.passABIArg(y, ABIType::Float64);
7832 masm.passABIArg(x, ABIType::Float64);
7833 masm.callWithABI<Fn, ecmaAtan2>(ABIType::Float64);
7835 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
7838 void CodeGenerator::visitHypot(LHypot* lir) {
7839 uint32_t numArgs = lir->numArgs();
7840 masm.setupAlignedABICall();
7842 for (uint32_t i = 0; i < numArgs; ++i) {
7843 masm.passABIArg(ToFloatRegister(lir->getOperand(i)), ABIType::Float64);
7846 switch (numArgs) {
7847 case 2: {
7848 using Fn = double (*)(double x, double y);
7849 masm.callWithABI<Fn, ecmaHypot>(ABIType::Float64);
7850 break;
7852 case 3: {
7853 using Fn = double (*)(double x, double y, double z);
7854 masm.callWithABI<Fn, hypot3>(ABIType::Float64);
7855 break;
7857 case 4: {
7858 using Fn = double (*)(double x, double y, double z, double w);
7859 masm.callWithABI<Fn, hypot4>(ABIType::Float64);
7860 break;
7862 default:
7863 MOZ_CRASH("Unexpected number of arguments to hypot function.");
7865 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
7868 void CodeGenerator::visitNewArray(LNewArray* lir) {
7869 Register objReg = ToRegister(lir->output());
7870 Register tempReg = ToRegister(lir->temp());
7871 DebugOnly<uint32_t> length = lir->mir()->length();
7873 MOZ_ASSERT(length <= NativeObject::MAX_DENSE_ELEMENTS_COUNT);
7875 if (lir->mir()->isVMCall()) {
7876 visitNewArrayCallVM(lir);
7877 return;
7880 OutOfLineNewArray* ool = new (alloc()) OutOfLineNewArray(lir);
7881 addOutOfLineCode(ool, lir->mir());
7883 TemplateObject templateObject(lir->mir()->templateObject());
7884 #ifdef DEBUG
7885 size_t numInlineElements = gc::GetGCKindSlots(templateObject.getAllocKind()) -
7886 ObjectElements::VALUES_PER_HEADER;
7887 MOZ_ASSERT(length <= numInlineElements,
7888 "Inline allocation only supports inline elements");
7889 #endif
7890 masm.createGCObject(objReg, tempReg, templateObject,
7891 lir->mir()->initialHeap(), ool->entry());
7893 masm.bind(ool->rejoin());
7896 void CodeGenerator::visitOutOfLineNewArray(OutOfLineNewArray* ool) {
7897 visitNewArrayCallVM(ool->lir());
7898 masm.jump(ool->rejoin());
7901 void CodeGenerator::visitNewArrayDynamicLength(LNewArrayDynamicLength* lir) {
7902 Register lengthReg = ToRegister(lir->length());
7903 Register objReg = ToRegister(lir->output());
7904 Register tempReg = ToRegister(lir->temp0());
7906 JSObject* templateObject = lir->mir()->templateObject();
7907 gc::Heap initialHeap = lir->mir()->initialHeap();
7909 using Fn = ArrayObject* (*)(JSContext*, Handle<ArrayObject*>, int32_t length);
7910 OutOfLineCode* ool = oolCallVM<Fn, ArrayConstructorOneArg>(
7911 lir, ArgList(ImmGCPtr(templateObject), lengthReg),
7912 StoreRegisterTo(objReg));
7914 bool canInline = true;
7915 size_t inlineLength = 0;
7916 if (templateObject->as<ArrayObject>().hasFixedElements()) {
7917 size_t numSlots =
7918 gc::GetGCKindSlots(templateObject->asTenured().getAllocKind());
7919 inlineLength = numSlots - ObjectElements::VALUES_PER_HEADER;
7920 } else {
7921 canInline = false;
7924 if (canInline) {
7925 // Try to do the allocation inline if the template object is big enough
7926 // for the length in lengthReg. If the length is bigger we could still
7927 // use the template object and not allocate the elements, but it's more
7928 // efficient to do a single big allocation than (repeatedly) reallocating
7929 // the array later on when filling it.
7930 masm.branch32(Assembler::Above, lengthReg, Imm32(inlineLength),
7931 ool->entry());
7933 TemplateObject templateObj(templateObject);
7934 masm.createGCObject(objReg, tempReg, templateObj, initialHeap,
7935 ool->entry());
7937 size_t lengthOffset = NativeObject::offsetOfFixedElements() +
7938 ObjectElements::offsetOfLength();
7939 masm.store32(lengthReg, Address(objReg, lengthOffset));
7940 } else {
7941 masm.jump(ool->entry());
7944 masm.bind(ool->rejoin());
7947 void CodeGenerator::visitNewIterator(LNewIterator* lir) {
7948 Register objReg = ToRegister(lir->output());
7949 Register tempReg = ToRegister(lir->temp0());
7951 OutOfLineCode* ool;
7952 switch (lir->mir()->type()) {
7953 case MNewIterator::ArrayIterator: {
7954 using Fn = ArrayIteratorObject* (*)(JSContext*);
7955 ool = oolCallVM<Fn, NewArrayIterator>(lir, ArgList(),
7956 StoreRegisterTo(objReg));
7957 break;
7959 case MNewIterator::StringIterator: {
7960 using Fn = StringIteratorObject* (*)(JSContext*);
7961 ool = oolCallVM<Fn, NewStringIterator>(lir, ArgList(),
7962 StoreRegisterTo(objReg));
7963 break;
7965 case MNewIterator::RegExpStringIterator: {
7966 using Fn = RegExpStringIteratorObject* (*)(JSContext*);
7967 ool = oolCallVM<Fn, NewRegExpStringIterator>(lir, ArgList(),
7968 StoreRegisterTo(objReg));
7969 break;
7971 default:
7972 MOZ_CRASH("unexpected iterator type");
7975 TemplateObject templateObject(lir->mir()->templateObject());
7976 masm.createGCObject(objReg, tempReg, templateObject, gc::Heap::Default,
7977 ool->entry());
7979 masm.bind(ool->rejoin());
7982 void CodeGenerator::visitNewTypedArray(LNewTypedArray* lir) {
7983 Register objReg = ToRegister(lir->output());
7984 Register tempReg = ToRegister(lir->temp0());
7985 Register lengthReg = ToRegister(lir->temp1());
7986 LiveRegisterSet liveRegs = liveVolatileRegs(lir);
7988 JSObject* templateObject = lir->mir()->templateObject();
7989 gc::Heap initialHeap = lir->mir()->initialHeap();
7991 auto* ttemplate = &templateObject->as<FixedLengthTypedArrayObject>();
7993 size_t n = ttemplate->length();
7994 MOZ_ASSERT(n <= INT32_MAX,
7995 "Template objects are only created for int32 lengths");
7997 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t length);
7998 OutOfLineCode* ool = oolCallVM<Fn, NewTypedArrayWithTemplateAndLength>(
7999 lir, ArgList(ImmGCPtr(templateObject), Imm32(n)),
8000 StoreRegisterTo(objReg));
8002 TemplateObject templateObj(templateObject);
8003 masm.createGCObject(objReg, tempReg, templateObj, initialHeap, ool->entry());
8005 masm.initTypedArraySlots(objReg, tempReg, lengthReg, liveRegs, ool->entry(),
8006 ttemplate, MacroAssembler::TypedArrayLength::Fixed);
8008 masm.bind(ool->rejoin());
8011 void CodeGenerator::visitNewTypedArrayDynamicLength(
8012 LNewTypedArrayDynamicLength* lir) {
8013 Register lengthReg = ToRegister(lir->length());
8014 Register objReg = ToRegister(lir->output());
8015 Register tempReg = ToRegister(lir->temp0());
8016 LiveRegisterSet liveRegs = liveVolatileRegs(lir);
8018 JSObject* templateObject = lir->mir()->templateObject();
8019 gc::Heap initialHeap = lir->mir()->initialHeap();
8021 auto* ttemplate = &templateObject->as<FixedLengthTypedArrayObject>();
8023 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t length);
8024 OutOfLineCode* ool = oolCallVM<Fn, NewTypedArrayWithTemplateAndLength>(
8025 lir, ArgList(ImmGCPtr(templateObject), lengthReg),
8026 StoreRegisterTo(objReg));
8028 // Volatile |lengthReg| is saved across the ABI call in |initTypedArraySlots|.
8029 MOZ_ASSERT_IF(lengthReg.volatile_(), liveRegs.has(lengthReg));
8031 TemplateObject templateObj(templateObject);
8032 masm.createGCObject(objReg, tempReg, templateObj, initialHeap, ool->entry());
8034 masm.initTypedArraySlots(objReg, tempReg, lengthReg, liveRegs, ool->entry(),
8035 ttemplate,
8036 MacroAssembler::TypedArrayLength::Dynamic);
8038 masm.bind(ool->rejoin());
8041 void CodeGenerator::visitNewTypedArrayFromArray(LNewTypedArrayFromArray* lir) {
8042 pushArg(ToRegister(lir->array()));
8043 pushArg(ImmGCPtr(lir->mir()->templateObject()));
8045 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject);
8046 callVM<Fn, js::NewTypedArrayWithTemplateAndArray>(lir);
8049 void CodeGenerator::visitNewTypedArrayFromArrayBuffer(
8050 LNewTypedArrayFromArrayBuffer* lir) {
8051 pushArg(ToValue(lir, LNewTypedArrayFromArrayBuffer::LengthIndex));
8052 pushArg(ToValue(lir, LNewTypedArrayFromArrayBuffer::ByteOffsetIndex));
8053 pushArg(ToRegister(lir->arrayBuffer()));
8054 pushArg(ImmGCPtr(lir->mir()->templateObject()));
8056 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject,
8057 HandleValue, HandleValue);
8058 callVM<Fn, js::NewTypedArrayWithTemplateAndBuffer>(lir);
8061 void CodeGenerator::visitBindFunction(LBindFunction* lir) {
8062 Register target = ToRegister(lir->target());
8063 Register temp1 = ToRegister(lir->temp0());
8064 Register temp2 = ToRegister(lir->temp1());
8066 // Try to allocate a new BoundFunctionObject we can pass to the VM function.
8067 // If this fails, we set temp1 to nullptr so we do the allocation in C++.
8068 TemplateObject templateObject(lir->mir()->templateObject());
8069 Label allocOk, allocFailed;
8070 masm.createGCObject(temp1, temp2, templateObject, gc::Heap::Default,
8071 &allocFailed);
8072 masm.jump(&allocOk);
8074 masm.bind(&allocFailed);
8075 masm.movePtr(ImmWord(0), temp1);
8077 masm.bind(&allocOk);
8079 // Set temp2 to the address of the first argument on the stack.
8080 // Note that the Value slots used for arguments are currently aligned for a
8081 // JIT call, even though that's not strictly necessary for calling into C++.
8082 uint32_t argc = lir->mir()->numStackArgs();
8083 if (JitStackValueAlignment > 1) {
8084 argc = AlignBytes(argc, JitStackValueAlignment);
8086 uint32_t unusedStack = UnusedStackBytesForCall(argc);
8087 masm.computeEffectiveAddress(Address(masm.getStackPointer(), unusedStack),
8088 temp2);
8090 pushArg(temp1);
8091 pushArg(Imm32(lir->mir()->numStackArgs()));
8092 pushArg(temp2);
8093 pushArg(target);
8095 using Fn = BoundFunctionObject* (*)(JSContext*, Handle<JSObject*>, Value*,
8096 uint32_t, Handle<BoundFunctionObject*>);
8097 callVM<Fn, js::BoundFunctionObject::functionBindImpl>(lir);
8100 void CodeGenerator::visitNewBoundFunction(LNewBoundFunction* lir) {
8101 Register output = ToRegister(lir->output());
8102 Register temp = ToRegister(lir->temp0());
8104 JSObject* templateObj = lir->mir()->templateObj();
8106 using Fn = BoundFunctionObject* (*)(JSContext*, Handle<BoundFunctionObject*>);
8107 OutOfLineCode* ool = oolCallVM<Fn, BoundFunctionObject::createWithTemplate>(
8108 lir, ArgList(ImmGCPtr(templateObj)), StoreRegisterTo(output));
8110 TemplateObject templateObject(templateObj);
8111 masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
8112 ool->entry());
8114 masm.bind(ool->rejoin());
8117 // Out-of-line object allocation for JSOp::NewObject.
8118 class OutOfLineNewObject : public OutOfLineCodeBase<CodeGenerator> {
8119 LNewObject* lir_;
8121 public:
8122 explicit OutOfLineNewObject(LNewObject* lir) : lir_(lir) {}
8124 void accept(CodeGenerator* codegen) override {
8125 codegen->visitOutOfLineNewObject(this);
8128 LNewObject* lir() const { return lir_; }
8131 void CodeGenerator::visitNewObjectVMCall(LNewObject* lir) {
8132 Register objReg = ToRegister(lir->output());
8134 MOZ_ASSERT(!lir->isCall());
8135 saveLive(lir);
8137 JSObject* templateObject = lir->mir()->templateObject();
8139 // If we're making a new object with a class prototype (that is, an object
8140 // that derives its class from its prototype instead of being
8141 // PlainObject::class_'d) from self-hosted code, we need a different init
8142 // function.
8143 switch (lir->mir()->mode()) {
8144 case MNewObject::ObjectLiteral: {
8145 MOZ_ASSERT(!templateObject);
8146 pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
8147 pushArg(ImmGCPtr(lir->mir()->block()->info().script()));
8149 using Fn = JSObject* (*)(JSContext*, HandleScript, const jsbytecode* pc);
8150 callVM<Fn, NewObjectOperation>(lir);
8151 break;
8153 case MNewObject::ObjectCreate: {
8154 pushArg(ImmGCPtr(templateObject));
8156 using Fn = PlainObject* (*)(JSContext*, Handle<PlainObject*>);
8157 callVM<Fn, ObjectCreateWithTemplate>(lir);
8158 break;
8162 masm.storeCallPointerResult(objReg);
8164 MOZ_ASSERT(!lir->safepoint()->liveRegs().has(objReg));
8165 restoreLive(lir);
8168 static bool ShouldInitFixedSlots(LNewPlainObject* lir, const Shape* shape,
8169 uint32_t nfixed) {
8170 // Look for StoreFixedSlot instructions following an object allocation
8171 // that write to this object before a GC is triggered or this object is
8172 // passed to a VM call. If all fixed slots will be initialized, the
8173 // allocation code doesn't need to set the slots to |undefined|.
8175 if (nfixed == 0) {
8176 return false;
8179 // Keep track of the fixed slots that are initialized. initializedSlots is
8180 // a bit mask with a bit for each slot.
8181 MOZ_ASSERT(nfixed <= NativeObject::MAX_FIXED_SLOTS);
8182 static_assert(NativeObject::MAX_FIXED_SLOTS <= 32,
8183 "Slot bits must fit in 32 bits");
8184 uint32_t initializedSlots = 0;
8185 uint32_t numInitialized = 0;
8187 MInstruction* allocMir = lir->mir();
8188 MBasicBlock* block = allocMir->block();
8190 // Skip the allocation instruction.
8191 MInstructionIterator iter = block->begin(allocMir);
8192 MOZ_ASSERT(*iter == allocMir);
8193 iter++;
8195 // Handle the leading shape guard, if present.
8196 for (; iter != block->end(); iter++) {
8197 if (iter->isConstant()) {
8198 // This instruction won't trigger a GC or read object slots.
8199 continue;
8201 if (iter->isGuardShape()) {
8202 auto* guard = iter->toGuardShape();
8203 if (guard->object() != allocMir || guard->shape() != shape) {
8204 return true;
8206 allocMir = guard;
8207 iter++;
8209 break;
8212 for (; iter != block->end(); iter++) {
8213 if (iter->isConstant() || iter->isPostWriteBarrier()) {
8214 // These instructions won't trigger a GC or read object slots.
8215 continue;
8218 if (iter->isStoreFixedSlot()) {
8219 MStoreFixedSlot* store = iter->toStoreFixedSlot();
8220 if (store->object() != allocMir) {
8221 return true;
8224 // We may not initialize this object slot on allocation, so the
8225 // pre-barrier could read uninitialized memory. Simply disable
8226 // the barrier for this store: the object was just initialized
8227 // so the barrier is not necessary.
8228 store->setNeedsBarrier(false);
8230 uint32_t slot = store->slot();
8231 MOZ_ASSERT(slot < nfixed);
8232 if ((initializedSlots & (1 << slot)) == 0) {
8233 numInitialized++;
8234 initializedSlots |= (1 << slot);
8236 if (numInitialized == nfixed) {
8237 // All fixed slots will be initialized.
8238 MOZ_ASSERT(mozilla::CountPopulation32(initializedSlots) == nfixed);
8239 return false;
8242 continue;
8245 // Unhandled instruction, assume it bails or reads object slots.
8246 return true;
8249 MOZ_CRASH("Shouldn't get here");
8252 void CodeGenerator::visitNewObject(LNewObject* lir) {
8253 Register objReg = ToRegister(lir->output());
8254 Register tempReg = ToRegister(lir->temp());
8256 if (lir->mir()->isVMCall()) {
8257 visitNewObjectVMCall(lir);
8258 return;
8261 OutOfLineNewObject* ool = new (alloc()) OutOfLineNewObject(lir);
8262 addOutOfLineCode(ool, lir->mir());
8264 TemplateObject templateObject(lir->mir()->templateObject());
8266 masm.createGCObject(objReg, tempReg, templateObject,
8267 lir->mir()->initialHeap(), ool->entry());
8269 masm.bind(ool->rejoin());
8272 void CodeGenerator::visitOutOfLineNewObject(OutOfLineNewObject* ool) {
8273 visitNewObjectVMCall(ool->lir());
8274 masm.jump(ool->rejoin());
8277 void CodeGenerator::visitNewPlainObject(LNewPlainObject* lir) {
8278 Register objReg = ToRegister(lir->output());
8279 Register temp0Reg = ToRegister(lir->temp0());
8280 Register temp1Reg = ToRegister(lir->temp1());
8281 Register shapeReg = ToRegister(lir->temp2());
8283 auto* mir = lir->mir();
8284 const Shape* shape = mir->shape();
8285 gc::Heap initialHeap = mir->initialHeap();
8286 gc::AllocKind allocKind = mir->allocKind();
8288 using Fn =
8289 JSObject* (*)(JSContext*, Handle<SharedShape*>, gc::AllocKind, gc::Heap);
8290 OutOfLineCode* ool = oolCallVM<Fn, NewPlainObjectOptimizedFallback>(
8291 lir,
8292 ArgList(ImmGCPtr(shape), Imm32(int32_t(allocKind)),
8293 Imm32(int32_t(initialHeap))),
8294 StoreRegisterTo(objReg));
8296 bool initContents = ShouldInitFixedSlots(lir, shape, mir->numFixedSlots());
8298 masm.movePtr(ImmGCPtr(shape), shapeReg);
8299 masm.createPlainGCObject(
8300 objReg, shapeReg, temp0Reg, temp1Reg, mir->numFixedSlots(),
8301 mir->numDynamicSlots(), allocKind, initialHeap, ool->entry(),
8302 AllocSiteInput(gc::CatchAllAllocSite::Optimized), initContents);
8304 #ifdef DEBUG
8305 // ShouldInitFixedSlots expects that the leading GuardShape will never fail,
8306 // so ensure the newly created object has the correct shape. Should the guard
8307 // ever fail, we may end up with uninitialized fixed slots, which can confuse
8308 // the GC.
8309 Label ok;
8310 masm.branchTestObjShape(Assembler::Equal, objReg, shape, temp0Reg, objReg,
8311 &ok);
8312 masm.assumeUnreachable("Newly created object has the correct shape");
8313 masm.bind(&ok);
8314 #endif
8316 masm.bind(ool->rejoin());
8319 void CodeGenerator::visitNewArrayObject(LNewArrayObject* lir) {
8320 Register objReg = ToRegister(lir->output());
8321 Register temp0Reg = ToRegister(lir->temp0());
8322 Register shapeReg = ToRegister(lir->temp1());
8324 auto* mir = lir->mir();
8325 uint32_t arrayLength = mir->length();
8327 gc::AllocKind allocKind = GuessArrayGCKind(arrayLength);
8328 MOZ_ASSERT(CanChangeToBackgroundAllocKind(allocKind, &ArrayObject::class_));
8329 allocKind = ForegroundToBackgroundAllocKind(allocKind);
8331 uint32_t slotCount = GetGCKindSlots(allocKind);
8332 MOZ_ASSERT(slotCount >= ObjectElements::VALUES_PER_HEADER);
8333 uint32_t arrayCapacity = slotCount - ObjectElements::VALUES_PER_HEADER;
8335 const Shape* shape = mir->shape();
8337 NewObjectKind objectKind =
8338 mir->initialHeap() == gc::Heap::Tenured ? TenuredObject : GenericObject;
8340 using Fn =
8341 ArrayObject* (*)(JSContext*, uint32_t, gc::AllocKind, NewObjectKind);
8342 OutOfLineCode* ool = oolCallVM<Fn, NewArrayObjectOptimizedFallback>(
8343 lir,
8344 ArgList(Imm32(arrayLength), Imm32(int32_t(allocKind)), Imm32(objectKind)),
8345 StoreRegisterTo(objReg));
8347 masm.movePtr(ImmPtr(shape), shapeReg);
8348 masm.createArrayWithFixedElements(
8349 objReg, shapeReg, temp0Reg, InvalidReg, arrayLength, arrayCapacity, 0, 0,
8350 allocKind, mir->initialHeap(), ool->entry(),
8351 AllocSiteInput(gc::CatchAllAllocSite::Optimized));
8352 masm.bind(ool->rejoin());
8355 void CodeGenerator::visitNewNamedLambdaObject(LNewNamedLambdaObject* lir) {
8356 Register objReg = ToRegister(lir->output());
8357 Register tempReg = ToRegister(lir->temp0());
8358 const CompileInfo& info = lir->mir()->block()->info();
8360 using Fn = js::NamedLambdaObject* (*)(JSContext*, HandleFunction);
8361 OutOfLineCode* ool = oolCallVM<Fn, NamedLambdaObject::createWithoutEnclosing>(
8362 lir, ArgList(info.funMaybeLazy()), StoreRegisterTo(objReg));
8364 TemplateObject templateObject(lir->mir()->templateObj());
8366 masm.createGCObject(objReg, tempReg, templateObject, gc::Heap::Default,
8367 ool->entry());
8369 masm.bind(ool->rejoin());
8372 void CodeGenerator::visitNewCallObject(LNewCallObject* lir) {
8373 Register objReg = ToRegister(lir->output());
8374 Register tempReg = ToRegister(lir->temp0());
8376 CallObject* templateObj = lir->mir()->templateObject();
8378 using Fn = CallObject* (*)(JSContext*, Handle<SharedShape*>);
8379 OutOfLineCode* ool = oolCallVM<Fn, CallObject::createWithShape>(
8380 lir, ArgList(ImmGCPtr(templateObj->sharedShape())),
8381 StoreRegisterTo(objReg));
8383 // Inline call object creation, using the OOL path only for tricky cases.
8384 TemplateObject templateObject(templateObj);
8385 masm.createGCObject(objReg, tempReg, templateObject, gc::Heap::Default,
8386 ool->entry());
8388 masm.bind(ool->rejoin());
8391 void CodeGenerator::visitNewStringObject(LNewStringObject* lir) {
8392 Register input = ToRegister(lir->input());
8393 Register output = ToRegister(lir->output());
8394 Register temp = ToRegister(lir->temp0());
8396 StringObject* templateObj = lir->mir()->templateObj();
8398 using Fn = JSObject* (*)(JSContext*, HandleString);
8399 OutOfLineCode* ool = oolCallVM<Fn, NewStringObject>(lir, ArgList(input),
8400 StoreRegisterTo(output));
8402 TemplateObject templateObject(templateObj);
8403 masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
8404 ool->entry());
8406 masm.loadStringLength(input, temp);
8408 masm.storeValue(JSVAL_TYPE_STRING, input,
8409 Address(output, StringObject::offsetOfPrimitiveValue()));
8410 masm.storeValue(JSVAL_TYPE_INT32, temp,
8411 Address(output, StringObject::offsetOfLength()));
8413 masm.bind(ool->rejoin());
8416 void CodeGenerator::visitInitElemGetterSetter(LInitElemGetterSetter* lir) {
8417 Register obj = ToRegister(lir->object());
8418 Register value = ToRegister(lir->value());
8420 pushArg(value);
8421 pushArg(ToValue(lir, LInitElemGetterSetter::IdIndex));
8422 pushArg(obj);
8423 pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
8425 using Fn = bool (*)(JSContext*, jsbytecode*, HandleObject, HandleValue,
8426 HandleObject);
8427 callVM<Fn, InitElemGetterSetterOperation>(lir);
8430 void CodeGenerator::visitMutateProto(LMutateProto* lir) {
8431 Register objReg = ToRegister(lir->object());
8433 pushArg(ToValue(lir, LMutateProto::ValueIndex));
8434 pushArg(objReg);
8436 using Fn =
8437 bool (*)(JSContext* cx, Handle<PlainObject*> obj, HandleValue value);
8438 callVM<Fn, MutatePrototype>(lir);
8441 void CodeGenerator::visitInitPropGetterSetter(LInitPropGetterSetter* lir) {
8442 Register obj = ToRegister(lir->object());
8443 Register value = ToRegister(lir->value());
8445 pushArg(value);
8446 pushArg(ImmGCPtr(lir->mir()->name()));
8447 pushArg(obj);
8448 pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
8450 using Fn = bool (*)(JSContext*, jsbytecode*, HandleObject,
8451 Handle<PropertyName*>, HandleObject);
8452 callVM<Fn, InitPropGetterSetterOperation>(lir);
8455 void CodeGenerator::visitCreateThis(LCreateThis* lir) {
8456 const LAllocation* callee = lir->callee();
8457 const LAllocation* newTarget = lir->newTarget();
8459 if (newTarget->isConstant()) {
8460 pushArg(ImmGCPtr(&newTarget->toConstant()->toObject()));
8461 } else {
8462 pushArg(ToRegister(newTarget));
8465 if (callee->isConstant()) {
8466 pushArg(ImmGCPtr(&callee->toConstant()->toObject()));
8467 } else {
8468 pushArg(ToRegister(callee));
8471 using Fn = bool (*)(JSContext* cx, HandleObject callee,
8472 HandleObject newTarget, MutableHandleValue rval);
8473 callVM<Fn, jit::CreateThisFromIon>(lir);
8476 void CodeGenerator::visitCreateArgumentsObject(LCreateArgumentsObject* lir) {
8477 // This should be getting constructed in the first block only, and not any OSR
8478 // entry blocks.
8479 MOZ_ASSERT(lir->mir()->block()->id() == 0);
8481 Register callObj = ToRegister(lir->callObject());
8482 Register temp0 = ToRegister(lir->temp0());
8483 Label done;
8485 if (ArgumentsObject* templateObj = lir->mir()->templateObject()) {
8486 Register objTemp = ToRegister(lir->temp1());
8487 Register cxTemp = ToRegister(lir->temp2());
8489 masm.Push(callObj);
8491 // Try to allocate an arguments object. This will leave the reserved
8492 // slots uninitialized, so it's important we don't GC until we
8493 // initialize these slots in ArgumentsObject::finishForIonPure.
8494 Label failure;
8495 TemplateObject templateObject(templateObj);
8496 masm.createGCObject(objTemp, temp0, templateObject, gc::Heap::Default,
8497 &failure,
8498 /* initContents = */ false);
8500 masm.moveStackPtrTo(temp0);
8501 masm.addPtr(Imm32(masm.framePushed()), temp0);
8503 using Fn = ArgumentsObject* (*)(JSContext* cx, jit::JitFrameLayout* frame,
8504 JSObject* scopeChain, ArgumentsObject* obj);
8505 masm.setupAlignedABICall();
8506 masm.loadJSContext(cxTemp);
8507 masm.passABIArg(cxTemp);
8508 masm.passABIArg(temp0);
8509 masm.passABIArg(callObj);
8510 masm.passABIArg(objTemp);
8512 masm.callWithABI<Fn, ArgumentsObject::finishForIonPure>();
8513 masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, &failure);
8515 // Discard saved callObj on the stack.
8516 masm.addToStackPtr(Imm32(sizeof(uintptr_t)));
8517 masm.jump(&done);
8519 masm.bind(&failure);
8520 masm.Pop(callObj);
8523 masm.moveStackPtrTo(temp0);
8524 masm.addPtr(Imm32(frameSize()), temp0);
8526 pushArg(callObj);
8527 pushArg(temp0);
8529 using Fn = ArgumentsObject* (*)(JSContext*, JitFrameLayout*, HandleObject);
8530 callVM<Fn, ArgumentsObject::createForIon>(lir);
8532 masm.bind(&done);
8535 void CodeGenerator::visitCreateInlinedArgumentsObject(
8536 LCreateInlinedArgumentsObject* lir) {
8537 Register callObj = ToRegister(lir->getCallObject());
8538 Register callee = ToRegister(lir->getCallee());
8539 Register argsAddress = ToRegister(lir->temp1());
8540 Register argsObj = ToRegister(lir->temp2());
8542 // TODO: Do we have to worry about alignment here?
8544 // Create a contiguous array of values for ArgumentsObject::create
8545 // by pushing the arguments onto the stack in reverse order.
8546 uint32_t argc = lir->mir()->numActuals();
8547 for (uint32_t i = 0; i < argc; i++) {
8548 uint32_t argNum = argc - i - 1;
8549 uint32_t index = LCreateInlinedArgumentsObject::ArgIndex(argNum);
8550 ConstantOrRegister arg =
8551 toConstantOrRegister(lir, index, lir->mir()->getArg(argNum)->type());
8552 masm.Push(arg);
8554 masm.moveStackPtrTo(argsAddress);
8556 Label done;
8557 if (ArgumentsObject* templateObj = lir->mir()->templateObject()) {
8558 LiveRegisterSet liveRegs;
8559 liveRegs.add(callObj);
8560 liveRegs.add(callee);
8562 masm.PushRegsInMask(liveRegs);
8564 // We are free to clobber all registers, as LCreateInlinedArgumentsObject is
8565 // a call instruction.
8566 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
8567 allRegs.take(callObj);
8568 allRegs.take(callee);
8569 allRegs.take(argsObj);
8570 allRegs.take(argsAddress);
8572 Register temp3 = allRegs.takeAny();
8573 Register temp4 = allRegs.takeAny();
8575 // Try to allocate an arguments object. This will leave the reserved slots
8576 // uninitialized, so it's important we don't GC until we initialize these
8577 // slots in ArgumentsObject::finishForIonPure.
8578 Label failure;
8579 TemplateObject templateObject(templateObj);
8580 masm.createGCObject(argsObj, temp3, templateObject, gc::Heap::Default,
8581 &failure,
8582 /* initContents = */ false);
8584 Register numActuals = temp3;
8585 masm.move32(Imm32(argc), numActuals);
8587 using Fn = ArgumentsObject* (*)(JSContext*, JSObject*, JSFunction*, Value*,
8588 uint32_t, ArgumentsObject*);
8589 masm.setupAlignedABICall();
8590 masm.loadJSContext(temp4);
8591 masm.passABIArg(temp4);
8592 masm.passABIArg(callObj);
8593 masm.passABIArg(callee);
8594 masm.passABIArg(argsAddress);
8595 masm.passABIArg(numActuals);
8596 masm.passABIArg(argsObj);
8598 masm.callWithABI<Fn, ArgumentsObject::finishInlineForIonPure>();
8599 masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, &failure);
8601 // Discard saved callObj, callee, and values array on the stack.
8602 masm.addToStackPtr(
8603 Imm32(MacroAssembler::PushRegsInMaskSizeInBytes(liveRegs) +
8604 argc * sizeof(Value)));
8605 masm.jump(&done);
8607 masm.bind(&failure);
8608 masm.PopRegsInMask(liveRegs);
8610 // Reload argsAddress because it may have been overridden.
8611 masm.moveStackPtrTo(argsAddress);
8614 pushArg(Imm32(argc));
8615 pushArg(callObj);
8616 pushArg(callee);
8617 pushArg(argsAddress);
8619 using Fn = ArgumentsObject* (*)(JSContext*, Value*, HandleFunction,
8620 HandleObject, uint32_t);
8621 callVM<Fn, ArgumentsObject::createForInlinedIon>(lir);
8623 // Discard the array of values.
8624 masm.freeStack(argc * sizeof(Value));
8626 masm.bind(&done);
8629 template <class GetInlinedArgument>
8630 void CodeGenerator::emitGetInlinedArgument(GetInlinedArgument* lir,
8631 Register index,
8632 ValueOperand output) {
8633 uint32_t numActuals = lir->mir()->numActuals();
8634 MOZ_ASSERT(numActuals <= ArgumentsObject::MaxInlinedArgs);
8636 // The index has already been bounds-checked, so the code we
8637 // generate here should be unreachable. We can end up in this
8638 // situation in self-hosted code using GetArgument(), or in a
8639 // monomorphically inlined function if we've inlined some CacheIR
8640 // that was created for a different caller.
8641 if (numActuals == 0) {
8642 masm.assumeUnreachable("LGetInlinedArgument: invalid index");
8643 return;
8646 // Check the first n-1 possible indices.
8647 Label done;
8648 for (uint32_t i = 0; i < numActuals - 1; i++) {
8649 Label skip;
8650 ConstantOrRegister arg = toConstantOrRegister(
8651 lir, GetInlinedArgument::ArgIndex(i), lir->mir()->getArg(i)->type());
8652 masm.branch32(Assembler::NotEqual, index, Imm32(i), &skip);
8653 masm.moveValue(arg, output);
8655 masm.jump(&done);
8656 masm.bind(&skip);
8659 #ifdef DEBUG
8660 Label skip;
8661 masm.branch32(Assembler::Equal, index, Imm32(numActuals - 1), &skip);
8662 masm.assumeUnreachable("LGetInlinedArgument: invalid index");
8663 masm.bind(&skip);
8664 #endif
8666 // The index has already been bounds-checked, so load the last argument.
8667 uint32_t lastIdx = numActuals - 1;
8668 ConstantOrRegister arg =
8669 toConstantOrRegister(lir, GetInlinedArgument::ArgIndex(lastIdx),
8670 lir->mir()->getArg(lastIdx)->type());
8671 masm.moveValue(arg, output);
8672 masm.bind(&done);
8675 void CodeGenerator::visitGetInlinedArgument(LGetInlinedArgument* lir) {
8676 Register index = ToRegister(lir->getIndex());
8677 ValueOperand output = ToOutValue(lir);
8679 emitGetInlinedArgument(lir, index, output);
8682 void CodeGenerator::visitGetInlinedArgumentHole(LGetInlinedArgumentHole* lir) {
8683 Register index = ToRegister(lir->getIndex());
8684 ValueOperand output = ToOutValue(lir);
8686 uint32_t numActuals = lir->mir()->numActuals();
8688 if (numActuals == 0) {
8689 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
8690 masm.moveValue(UndefinedValue(), output);
8691 return;
8694 Label outOfBounds, done;
8695 masm.branch32(Assembler::AboveOrEqual, index, Imm32(numActuals),
8696 &outOfBounds);
8698 emitGetInlinedArgument(lir, index, output);
8699 masm.jump(&done);
8701 masm.bind(&outOfBounds);
8702 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
8703 masm.moveValue(UndefinedValue(), output);
8705 masm.bind(&done);
8708 void CodeGenerator::visitGetArgumentsObjectArg(LGetArgumentsObjectArg* lir) {
8709 Register temp = ToRegister(lir->temp0());
8710 Register argsObj = ToRegister(lir->argsObject());
8711 ValueOperand out = ToOutValue(lir);
8713 masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
8714 temp);
8715 Address argAddr(temp, ArgumentsData::offsetOfArgs() +
8716 lir->mir()->argno() * sizeof(Value));
8717 masm.loadValue(argAddr, out);
8718 #ifdef DEBUG
8719 Label success;
8720 masm.branchTestMagic(Assembler::NotEqual, out, &success);
8721 masm.assumeUnreachable(
8722 "Result from ArgumentObject shouldn't be JSVAL_TYPE_MAGIC.");
8723 masm.bind(&success);
8724 #endif
8727 void CodeGenerator::visitSetArgumentsObjectArg(LSetArgumentsObjectArg* lir) {
8728 Register temp = ToRegister(lir->getTemp(0));
8729 Register argsObj = ToRegister(lir->argsObject());
8730 ValueOperand value = ToValue(lir, LSetArgumentsObjectArg::ValueIndex);
8732 masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
8733 temp);
8734 Address argAddr(temp, ArgumentsData::offsetOfArgs() +
8735 lir->mir()->argno() * sizeof(Value));
8736 emitPreBarrier(argAddr);
8737 #ifdef DEBUG
8738 Label success;
8739 masm.branchTestMagic(Assembler::NotEqual, argAddr, &success);
8740 masm.assumeUnreachable(
8741 "Result in ArgumentObject shouldn't be JSVAL_TYPE_MAGIC.");
8742 masm.bind(&success);
8743 #endif
8744 masm.storeValue(value, argAddr);
8747 void CodeGenerator::visitLoadArgumentsObjectArg(LLoadArgumentsObjectArg* lir) {
8748 Register temp = ToRegister(lir->temp0());
8749 Register argsObj = ToRegister(lir->argsObject());
8750 Register index = ToRegister(lir->index());
8751 ValueOperand out = ToOutValue(lir);
8753 Label bail;
8754 masm.loadArgumentsObjectElement(argsObj, index, out, temp, &bail);
8755 bailoutFrom(&bail, lir->snapshot());
8758 void CodeGenerator::visitLoadArgumentsObjectArgHole(
8759 LLoadArgumentsObjectArgHole* lir) {
8760 Register temp = ToRegister(lir->temp0());
8761 Register argsObj = ToRegister(lir->argsObject());
8762 Register index = ToRegister(lir->index());
8763 ValueOperand out = ToOutValue(lir);
8765 Label bail;
8766 masm.loadArgumentsObjectElementHole(argsObj, index, out, temp, &bail);
8767 bailoutFrom(&bail, lir->snapshot());
8770 void CodeGenerator::visitInArgumentsObjectArg(LInArgumentsObjectArg* lir) {
8771 Register temp = ToRegister(lir->temp0());
8772 Register argsObj = ToRegister(lir->argsObject());
8773 Register index = ToRegister(lir->index());
8774 Register out = ToRegister(lir->output());
8776 Label bail;
8777 masm.loadArgumentsObjectElementExists(argsObj, index, out, temp, &bail);
8778 bailoutFrom(&bail, lir->snapshot());
8781 void CodeGenerator::visitArgumentsObjectLength(LArgumentsObjectLength* lir) {
8782 Register argsObj = ToRegister(lir->argsObject());
8783 Register out = ToRegister(lir->output());
8785 Label bail;
8786 masm.loadArgumentsObjectLength(argsObj, out, &bail);
8787 bailoutFrom(&bail, lir->snapshot());
8790 void CodeGenerator::visitArrayFromArgumentsObject(
8791 LArrayFromArgumentsObject* lir) {
8792 pushArg(ToRegister(lir->argsObject()));
8794 using Fn = ArrayObject* (*)(JSContext*, Handle<ArgumentsObject*>);
8795 callVM<Fn, js::ArrayFromArgumentsObject>(lir);
8798 void CodeGenerator::visitGuardArgumentsObjectFlags(
8799 LGuardArgumentsObjectFlags* lir) {
8800 Register argsObj = ToRegister(lir->argsObject());
8801 Register temp = ToRegister(lir->temp0());
8803 Label bail;
8804 masm.branchTestArgumentsObjectFlags(argsObj, temp, lir->mir()->flags(),
8805 Assembler::NonZero, &bail);
8806 bailoutFrom(&bail, lir->snapshot());
8809 void CodeGenerator::visitBoundFunctionNumArgs(LBoundFunctionNumArgs* lir) {
8810 Register obj = ToRegister(lir->object());
8811 Register output = ToRegister(lir->output());
8813 masm.unboxInt32(Address(obj, BoundFunctionObject::offsetOfFlagsSlot()),
8814 output);
8815 masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), output);
8818 void CodeGenerator::visitGuardBoundFunctionIsConstructor(
8819 LGuardBoundFunctionIsConstructor* lir) {
8820 Register obj = ToRegister(lir->object());
8822 Label bail;
8823 Address flagsSlot(obj, BoundFunctionObject::offsetOfFlagsSlot());
8824 masm.branchTest32(Assembler::Zero, flagsSlot,
8825 Imm32(BoundFunctionObject::IsConstructorFlag), &bail);
8826 bailoutFrom(&bail, lir->snapshot());
8829 void CodeGenerator::visitReturnFromCtor(LReturnFromCtor* lir) {
8830 ValueOperand value = ToValue(lir, LReturnFromCtor::ValueIndex);
8831 Register obj = ToRegister(lir->object());
8832 Register output = ToRegister(lir->output());
8834 Label valueIsObject, end;
8836 masm.branchTestObject(Assembler::Equal, value, &valueIsObject);
8838 // Value is not an object. Return that other object.
8839 masm.movePtr(obj, output);
8840 masm.jump(&end);
8842 // Value is an object. Return unbox(Value).
8843 masm.bind(&valueIsObject);
8844 Register payload = masm.extractObject(value, output);
8845 if (payload != output) {
8846 masm.movePtr(payload, output);
8849 masm.bind(&end);
8852 class OutOfLineBoxNonStrictThis : public OutOfLineCodeBase<CodeGenerator> {
8853 LBoxNonStrictThis* ins_;
8855 public:
8856 explicit OutOfLineBoxNonStrictThis(LBoxNonStrictThis* ins) : ins_(ins) {}
8857 void accept(CodeGenerator* codegen) override {
8858 codegen->visitOutOfLineBoxNonStrictThis(this);
8860 LBoxNonStrictThis* ins() const { return ins_; }
8863 void CodeGenerator::visitBoxNonStrictThis(LBoxNonStrictThis* lir) {
8864 ValueOperand value = ToValue(lir, LBoxNonStrictThis::ValueIndex);
8865 Register output = ToRegister(lir->output());
8867 auto* ool = new (alloc()) OutOfLineBoxNonStrictThis(lir);
8868 addOutOfLineCode(ool, lir->mir());
8870 masm.fallibleUnboxObject(value, output, ool->entry());
8871 masm.bind(ool->rejoin());
8874 void CodeGenerator::visitOutOfLineBoxNonStrictThis(
8875 OutOfLineBoxNonStrictThis* ool) {
8876 LBoxNonStrictThis* lir = ool->ins();
8878 ValueOperand value = ToValue(lir, LBoxNonStrictThis::ValueIndex);
8879 Register output = ToRegister(lir->output());
8881 Label notNullOrUndefined;
8883 Label isNullOrUndefined;
8884 ScratchTagScope tag(masm, value);
8885 masm.splitTagForTest(value, tag);
8886 masm.branchTestUndefined(Assembler::Equal, tag, &isNullOrUndefined);
8887 masm.branchTestNull(Assembler::NotEqual, tag, &notNullOrUndefined);
8888 masm.bind(&isNullOrUndefined);
8889 masm.movePtr(ImmGCPtr(lir->mir()->globalThis()), output);
8890 masm.jump(ool->rejoin());
8893 masm.bind(&notNullOrUndefined);
8895 saveLive(lir);
8897 pushArg(value);
8898 using Fn = JSObject* (*)(JSContext*, HandleValue);
8899 callVM<Fn, BoxNonStrictThis>(lir);
8901 StoreRegisterTo(output).generate(this);
8902 restoreLiveIgnore(lir, StoreRegisterTo(output).clobbered());
8904 masm.jump(ool->rejoin());
8907 void CodeGenerator::visitImplicitThis(LImplicitThis* lir) {
8908 pushArg(ImmGCPtr(lir->mir()->name()));
8909 pushArg(ToRegister(lir->env()));
8911 using Fn = bool (*)(JSContext*, HandleObject, Handle<PropertyName*>,
8912 MutableHandleValue);
8913 callVM<Fn, ImplicitThisOperation>(lir);
8916 void CodeGenerator::visitArrayLength(LArrayLength* lir) {
8917 Register elements = ToRegister(lir->elements());
8918 Register output = ToRegister(lir->output());
8920 Address length(elements, ObjectElements::offsetOfLength());
8921 masm.load32(length, output);
8923 // Bail out if the length doesn't fit in int32.
8924 bailoutTest32(Assembler::Signed, output, output, lir->snapshot());
8927 static void SetLengthFromIndex(MacroAssembler& masm, const LAllocation* index,
8928 const Address& length) {
8929 if (index->isConstant()) {
8930 masm.store32(Imm32(ToInt32(index) + 1), length);
8931 } else {
8932 Register newLength = ToRegister(index);
8933 masm.add32(Imm32(1), newLength);
8934 masm.store32(newLength, length);
8935 masm.sub32(Imm32(1), newLength);
8939 void CodeGenerator::visitSetArrayLength(LSetArrayLength* lir) {
8940 Address length(ToRegister(lir->elements()), ObjectElements::offsetOfLength());
8941 SetLengthFromIndex(masm, lir->index(), length);
8944 void CodeGenerator::visitFunctionLength(LFunctionLength* lir) {
8945 Register function = ToRegister(lir->function());
8946 Register output = ToRegister(lir->output());
8948 Label bail;
8950 // Get the JSFunction flags.
8951 masm.load32(Address(function, JSFunction::offsetOfFlagsAndArgCount()),
8952 output);
8954 // Functions with a SelfHostedLazyScript must be compiled with the slow-path
8955 // before the function length is known. If the length was previously resolved,
8956 // the length property may be shadowed.
8957 masm.branchTest32(
8958 Assembler::NonZero, output,
8959 Imm32(FunctionFlags::SELFHOSTLAZY | FunctionFlags::RESOLVED_LENGTH),
8960 &bail);
8962 masm.loadFunctionLength(function, output, output, &bail);
8964 bailoutFrom(&bail, lir->snapshot());
8967 void CodeGenerator::visitFunctionName(LFunctionName* lir) {
8968 Register function = ToRegister(lir->function());
8969 Register output = ToRegister(lir->output());
8971 Label bail;
8973 const JSAtomState& names = gen->runtime->names();
8974 masm.loadFunctionName(function, output, ImmGCPtr(names.empty_), &bail);
8976 bailoutFrom(&bail, lir->snapshot());
8979 template <class OrderedHashTable>
8980 static void RangeFront(MacroAssembler&, Register, Register, Register);
8982 template <>
8983 void RangeFront<ValueMap>(MacroAssembler& masm, Register range, Register i,
8984 Register front) {
8985 masm.loadPtr(Address(range, ValueMap::Range::offsetOfHashTable()), front);
8986 masm.loadPtr(Address(front, ValueMap::offsetOfImplData()), front);
8988 MOZ_ASSERT(ValueMap::offsetOfImplDataElement() == 0,
8989 "offsetof(Data, element) is 0");
8990 static_assert(ValueMap::sizeofImplData() == 24, "sizeof(Data) is 24");
8991 masm.mulBy3(i, i);
8992 masm.lshiftPtr(Imm32(3), i);
8993 masm.addPtr(i, front);
8996 template <>
8997 void RangeFront<ValueSet>(MacroAssembler& masm, Register range, Register i,
8998 Register front) {
8999 masm.loadPtr(Address(range, ValueSet::Range::offsetOfHashTable()), front);
9000 masm.loadPtr(Address(front, ValueSet::offsetOfImplData()), front);
9002 MOZ_ASSERT(ValueSet::offsetOfImplDataElement() == 0,
9003 "offsetof(Data, element) is 0");
9004 static_assert(ValueSet::sizeofImplData() == 16, "sizeof(Data) is 16");
9005 masm.lshiftPtr(Imm32(4), i);
9006 masm.addPtr(i, front);
9009 template <class OrderedHashTable>
9010 static void RangePopFront(MacroAssembler& masm, Register range, Register front,
9011 Register dataLength, Register temp) {
9012 Register i = temp;
9014 masm.add32(Imm32(1),
9015 Address(range, OrderedHashTable::Range::offsetOfCount()));
9017 masm.load32(Address(range, OrderedHashTable::Range::offsetOfI()), i);
9019 Label done, seek;
9020 masm.bind(&seek);
9021 masm.add32(Imm32(1), i);
9022 masm.branch32(Assembler::AboveOrEqual, i, dataLength, &done);
9024 // We can add sizeof(Data) to |front| to select the next element, because
9025 // |front| and |range.ht.data[i]| point to the same location.
9026 MOZ_ASSERT(OrderedHashTable::offsetOfImplDataElement() == 0,
9027 "offsetof(Data, element) is 0");
9028 masm.addPtr(Imm32(OrderedHashTable::sizeofImplData()), front);
9030 masm.branchTestMagic(Assembler::Equal,
9031 Address(front, OrderedHashTable::offsetOfEntryKey()),
9032 JS_HASH_KEY_EMPTY, &seek);
9034 masm.bind(&done);
9035 masm.store32(i, Address(range, OrderedHashTable::Range::offsetOfI()));
9038 template <class OrderedHashTable>
9039 static inline void RangeDestruct(MacroAssembler& masm, Register iter,
9040 Register range, Register temp0,
9041 Register temp1) {
9042 Register next = temp0;
9043 Register prevp = temp1;
9045 masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfNext()), next);
9046 masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfPrevP()), prevp);
9047 masm.storePtr(next, Address(prevp, 0));
9049 Label hasNoNext;
9050 masm.branchTestPtr(Assembler::Zero, next, next, &hasNoNext);
9052 masm.storePtr(prevp, Address(next, OrderedHashTable::Range::offsetOfPrevP()));
9054 masm.bind(&hasNoNext);
9056 Label nurseryAllocated;
9057 masm.branchPtrInNurseryChunk(Assembler::Equal, iter, temp0,
9058 &nurseryAllocated);
9060 masm.callFreeStub(range);
9062 masm.bind(&nurseryAllocated);
9065 template <>
9066 void CodeGenerator::emitLoadIteratorValues<ValueMap>(Register result,
9067 Register temp,
9068 Register front) {
9069 size_t elementsOffset = NativeObject::offsetOfFixedElements();
9071 Address keyAddress(front, ValueMap::Entry::offsetOfKey());
9072 Address valueAddress(front, ValueMap::Entry::offsetOfValue());
9073 Address keyElemAddress(result, elementsOffset);
9074 Address valueElemAddress(result, elementsOffset + sizeof(Value));
9075 masm.guardedCallPreBarrier(keyElemAddress, MIRType::Value);
9076 masm.guardedCallPreBarrier(valueElemAddress, MIRType::Value);
9077 masm.storeValue(keyAddress, keyElemAddress, temp);
9078 masm.storeValue(valueAddress, valueElemAddress, temp);
9080 Label emitBarrier, skipBarrier;
9081 masm.branchValueIsNurseryCell(Assembler::Equal, keyAddress, temp,
9082 &emitBarrier);
9083 masm.branchValueIsNurseryCell(Assembler::NotEqual, valueAddress, temp,
9084 &skipBarrier);
9086 masm.bind(&emitBarrier);
9087 saveVolatile(temp);
9088 emitPostWriteBarrier(result);
9089 restoreVolatile(temp);
9091 masm.bind(&skipBarrier);
9094 template <>
9095 void CodeGenerator::emitLoadIteratorValues<ValueSet>(Register result,
9096 Register temp,
9097 Register front) {
9098 size_t elementsOffset = NativeObject::offsetOfFixedElements();
9100 Address keyAddress(front, ValueSet::offsetOfEntryKey());
9101 Address keyElemAddress(result, elementsOffset);
9102 masm.guardedCallPreBarrier(keyElemAddress, MIRType::Value);
9103 masm.storeValue(keyAddress, keyElemAddress, temp);
9105 Label skipBarrier;
9106 masm.branchValueIsNurseryCell(Assembler::NotEqual, keyAddress, temp,
9107 &skipBarrier);
9109 saveVolatile(temp);
9110 emitPostWriteBarrier(result);
9111 restoreVolatile(temp);
9113 masm.bind(&skipBarrier);
9116 template <class IteratorObject, class OrderedHashTable>
9117 void CodeGenerator::emitGetNextEntryForIterator(LGetNextEntryForIterator* lir) {
9118 Register iter = ToRegister(lir->iter());
9119 Register result = ToRegister(lir->result());
9120 Register temp = ToRegister(lir->temp0());
9121 Register dataLength = ToRegister(lir->temp1());
9122 Register range = ToRegister(lir->temp2());
9123 Register output = ToRegister(lir->output());
9125 #ifdef DEBUG
9126 // Self-hosted code is responsible for ensuring GetNextEntryForIterator is
9127 // only called with the correct iterator class. Assert here all self-
9128 // hosted callers of GetNextEntryForIterator perform this class check.
9129 // No Spectre mitigations are needed because this is DEBUG-only code.
9130 Label success;
9131 masm.branchTestObjClassNoSpectreMitigations(
9132 Assembler::Equal, iter, &IteratorObject::class_, temp, &success);
9133 masm.assumeUnreachable("Iterator object should have the correct class.");
9134 masm.bind(&success);
9135 #endif
9137 masm.loadPrivate(Address(iter, NativeObject::getFixedSlotOffset(
9138 IteratorObject::RangeSlot)),
9139 range);
9141 Label iterAlreadyDone, iterDone, done;
9142 masm.branchTestPtr(Assembler::Zero, range, range, &iterAlreadyDone);
9144 masm.load32(Address(range, OrderedHashTable::Range::offsetOfI()), temp);
9145 masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfHashTable()),
9146 dataLength);
9147 masm.load32(Address(dataLength, OrderedHashTable::offsetOfImplDataLength()),
9148 dataLength);
9149 masm.branch32(Assembler::AboveOrEqual, temp, dataLength, &iterDone);
9151 masm.Push(iter);
9153 Register front = iter;
9154 RangeFront<OrderedHashTable>(masm, range, temp, front);
9156 emitLoadIteratorValues<OrderedHashTable>(result, temp, front);
9158 RangePopFront<OrderedHashTable>(masm, range, front, dataLength, temp);
9160 masm.Pop(iter);
9161 masm.move32(Imm32(0), output);
9163 masm.jump(&done);
9165 masm.bind(&iterDone);
9167 RangeDestruct<OrderedHashTable>(masm, iter, range, temp, dataLength);
9169 masm.storeValue(PrivateValue(nullptr),
9170 Address(iter, NativeObject::getFixedSlotOffset(
9171 IteratorObject::RangeSlot)));
9173 masm.bind(&iterAlreadyDone);
9175 masm.move32(Imm32(1), output);
9177 masm.bind(&done);
9180 void CodeGenerator::visitGetNextEntryForIterator(
9181 LGetNextEntryForIterator* lir) {
9182 if (lir->mir()->mode() == MGetNextEntryForIterator::Map) {
9183 emitGetNextEntryForIterator<MapIteratorObject, ValueMap>(lir);
9184 } else {
9185 MOZ_ASSERT(lir->mir()->mode() == MGetNextEntryForIterator::Set);
9186 emitGetNextEntryForIterator<SetIteratorObject, ValueSet>(lir);
9190 // The point of these is to inform Ion of where these values already are; they
9191 // don't normally generate (much) code.
9192 void CodeGenerator::visitWasmRegisterPairResult(LWasmRegisterPairResult* lir) {}
9193 void CodeGenerator::visitWasmStackResult(LWasmStackResult* lir) {}
9194 void CodeGenerator::visitWasmStackResult64(LWasmStackResult64* lir) {}
9196 void CodeGenerator::visitWasmStackResultArea(LWasmStackResultArea* lir) {
9197 LAllocation* output = lir->getDef(0)->output();
9198 MOZ_ASSERT(output->isStackArea());
9199 bool tempInit = false;
9200 for (auto iter = output->toStackArea()->results(); iter; iter.next()) {
9201 // Zero out ref stack results.
9202 if (iter.isWasmAnyRef()) {
9203 Register temp = ToRegister(lir->temp0());
9204 if (!tempInit) {
9205 masm.xorPtr(temp, temp);
9206 tempInit = true;
9208 masm.storePtr(temp, ToAddress(iter.alloc()));
9213 void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {
9214 #ifdef JS_64BIT
9215 if (MWasmRegisterResult* mir = lir->mir()) {
9216 if (mir->type() == MIRType::Int32) {
9217 masm.widenInt32(ToRegister(lir->output()));
9220 #endif
9223 void CodeGenerator::visitWasmCall(LWasmCall* lir) {
9224 const MWasmCallBase* callBase = lir->callBase();
9225 bool isReturnCall = lir->isReturnCall();
9227 // If this call is in Wasm try code block, initialise a wasm::TryNote for this
9228 // call.
9229 bool inTry = callBase->inTry();
9230 if (inTry) {
9231 size_t tryNoteIndex = callBase->tryNoteIndex();
9232 wasm::TryNoteVector& tryNotes = masm.tryNotes();
9233 wasm::TryNote& tryNote = tryNotes[tryNoteIndex];
9234 tryNote.setTryBodyBegin(masm.currentOffset());
9237 MOZ_ASSERT((sizeof(wasm::Frame) + masm.framePushed()) % WasmStackAlignment ==
9239 static_assert(
9240 WasmStackAlignment >= ABIStackAlignment &&
9241 WasmStackAlignment % ABIStackAlignment == 0,
9242 "The wasm stack alignment should subsume the ABI-required alignment");
9244 #ifdef DEBUG
9245 Label ok;
9246 masm.branchTestStackPtr(Assembler::Zero, Imm32(WasmStackAlignment - 1), &ok);
9247 masm.breakpoint();
9248 masm.bind(&ok);
9249 #endif
9251 // LWasmCallBase::isCallPreserved() assumes that all MWasmCalls preserve the
9252 // instance and pinned regs. The only case where where we don't have to
9253 // reload the instance and pinned regs is when the callee preserves them.
9254 bool reloadRegs = true;
9255 bool switchRealm = true;
9257 const wasm::CallSiteDesc& desc = callBase->desc();
9258 const wasm::CalleeDesc& callee = callBase->callee();
9259 CodeOffset retOffset;
9260 CodeOffset secondRetOffset;
9261 switch (callee.which()) {
9262 case wasm::CalleeDesc::Func:
9263 #ifdef ENABLE_WASM_TAIL_CALLS
9264 if (isReturnCall) {
9265 ReturnCallAdjustmentInfo retCallInfo(
9266 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
9267 masm.wasmReturnCall(desc, callee.funcIndex(), retCallInfo);
9268 // The rest of the method is unnecessary for a return call.
9269 return;
9271 #endif
9272 MOZ_ASSERT(!isReturnCall);
9273 retOffset = masm.call(desc, callee.funcIndex());
9274 reloadRegs = false;
9275 switchRealm = false;
9276 break;
9277 case wasm::CalleeDesc::Import:
9278 #ifdef ENABLE_WASM_TAIL_CALLS
9279 if (isReturnCall) {
9280 ReturnCallAdjustmentInfo retCallInfo(
9281 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
9282 masm.wasmReturnCallImport(desc, callee, retCallInfo);
9283 // The rest of the method is unnecessary for a return call.
9284 return;
9286 #endif
9287 MOZ_ASSERT(!isReturnCall);
9288 retOffset = masm.wasmCallImport(desc, callee);
9289 break;
9290 case wasm::CalleeDesc::AsmJSTable:
9291 retOffset = masm.asmCallIndirect(desc, callee);
9292 break;
9293 case wasm::CalleeDesc::WasmTable: {
9294 Label* boundsCheckFailed = nullptr;
9295 if (lir->needsBoundsCheck()) {
9296 OutOfLineAbortingWasmTrap* ool =
9297 new (alloc()) OutOfLineAbortingWasmTrap(
9298 wasm::BytecodeOffset(desc.lineOrBytecode()),
9299 wasm::Trap::OutOfBounds);
9300 if (lir->isCatchable()) {
9301 addOutOfLineCode(ool, lir->mirCatchable());
9302 } else if (isReturnCall) {
9303 #ifdef ENABLE_WASM_TAIL_CALLS
9304 addOutOfLineCode(ool, lir->mirReturnCall());
9305 #else
9306 MOZ_CRASH("Return calls are disabled.");
9307 #endif
9308 } else {
9309 addOutOfLineCode(ool, lir->mirUncatchable());
9311 boundsCheckFailed = ool->entry();
9313 Label* nullCheckFailed = nullptr;
9314 #ifndef WASM_HAS_HEAPREG
9316 OutOfLineAbortingWasmTrap* ool =
9317 new (alloc()) OutOfLineAbortingWasmTrap(
9318 wasm::BytecodeOffset(desc.lineOrBytecode()),
9319 wasm::Trap::IndirectCallToNull);
9320 if (lir->isCatchable()) {
9321 addOutOfLineCode(ool, lir->mirCatchable());
9322 } else if (isReturnCall) {
9323 # ifdef ENABLE_WASM_TAIL_CALLS
9324 addOutOfLineCode(ool, lir->mirReturnCall());
9325 # else
9326 MOZ_CRASH("Return calls are disabled.");
9327 # endif
9328 } else {
9329 addOutOfLineCode(ool, lir->mirUncatchable());
9331 nullCheckFailed = ool->entry();
9333 #endif
9334 #ifdef ENABLE_WASM_TAIL_CALLS
9335 if (isReturnCall) {
9336 ReturnCallAdjustmentInfo retCallInfo(
9337 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
9338 masm.wasmReturnCallIndirect(desc, callee, boundsCheckFailed,
9339 nullCheckFailed, mozilla::Nothing(),
9340 retCallInfo);
9341 // The rest of the method is unnecessary for a return call.
9342 return;
9344 #endif
9345 MOZ_ASSERT(!isReturnCall);
9346 masm.wasmCallIndirect(desc, callee, boundsCheckFailed, nullCheckFailed,
9347 lir->tableSize(), &retOffset, &secondRetOffset);
9348 // Register reloading and realm switching are handled dynamically inside
9349 // wasmCallIndirect. There are two return offsets, one for each call
9350 // instruction (fast path and slow path).
9351 reloadRegs = false;
9352 switchRealm = false;
9353 break;
9355 case wasm::CalleeDesc::Builtin:
9356 retOffset = masm.call(desc, callee.builtin());
9357 reloadRegs = false;
9358 switchRealm = false;
9359 break;
9360 case wasm::CalleeDesc::BuiltinInstanceMethod:
9361 retOffset = masm.wasmCallBuiltinInstanceMethod(
9362 desc, callBase->instanceArg(), callee.builtin(),
9363 callBase->builtinMethodFailureMode());
9364 switchRealm = false;
9365 break;
9366 case wasm::CalleeDesc::FuncRef:
9367 #ifdef ENABLE_WASM_TAIL_CALLS
9368 if (isReturnCall) {
9369 ReturnCallAdjustmentInfo retCallInfo(
9370 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
9371 masm.wasmReturnCallRef(desc, callee, retCallInfo);
9372 // The rest of the method is unnecessary for a return call.
9373 return;
9375 #endif
9376 MOZ_ASSERT(!isReturnCall);
9377 // Register reloading and realm switching are handled dynamically inside
9378 // wasmCallRef. There are two return offsets, one for each call
9379 // instruction (fast path and slow path).
9380 masm.wasmCallRef(desc, callee, &retOffset, &secondRetOffset);
9381 reloadRegs = false;
9382 switchRealm = false;
9383 break;
9386 // Note the assembler offset for the associated LSafePoint.
9387 MOZ_ASSERT(!isReturnCall);
9388 markSafepointAt(retOffset.offset(), lir);
9390 // Now that all the outbound in-memory args are on the stack, note the
9391 // required lower boundary point of the associated StackMap.
9392 uint32_t framePushedAtStackMapBase =
9393 masm.framePushed() -
9394 wasm::AlignStackArgAreaSize(callBase->stackArgAreaSizeUnaligned());
9395 lir->safepoint()->setFramePushedAtStackMapBase(framePushedAtStackMapBase);
9396 MOZ_ASSERT(lir->safepoint()->wasmSafepointKind() ==
9397 WasmSafepointKind::LirCall);
9399 // Note the assembler offset and framePushed for use by the adjunct
9400 // LSafePoint, see visitor for LWasmCallIndirectAdjunctSafepoint below.
9401 if (callee.which() == wasm::CalleeDesc::WasmTable) {
9402 lir->adjunctSafepoint()->recordSafepointInfo(secondRetOffset,
9403 framePushedAtStackMapBase);
9406 if (reloadRegs) {
9407 masm.loadPtr(
9408 Address(masm.getStackPointer(), WasmCallerInstanceOffsetBeforeCall),
9409 InstanceReg);
9410 masm.loadWasmPinnedRegsFromInstance();
9411 if (switchRealm) {
9412 masm.switchToWasmInstanceRealm(ABINonArgReturnReg0, ABINonArgReturnReg1);
9414 } else {
9415 MOZ_ASSERT(!switchRealm);
9418 #ifdef ENABLE_WASM_TAIL_CALLS
9419 switch (callee.which()) {
9420 case wasm::CalleeDesc::Func:
9421 case wasm::CalleeDesc::Import:
9422 case wasm::CalleeDesc::WasmTable:
9423 case wasm::CalleeDesc::FuncRef:
9424 // Stack allocation could change during Wasm (return) calls,
9425 // recover pre-call state.
9426 masm.freeStackTo(masm.framePushed());
9427 break;
9428 default:
9429 break;
9431 #endif // ENABLE_WASM_TAIL_CALLS
9433 if (inTry) {
9434 // Set the end of the try note range
9435 size_t tryNoteIndex = callBase->tryNoteIndex();
9436 wasm::TryNoteVector& tryNotes = masm.tryNotes();
9437 wasm::TryNote& tryNote = tryNotes[tryNoteIndex];
9439 // Don't set the end of the try note if we've OOM'ed, as the above
9440 // instructions may not have been emitted, which will trigger an assert
9441 // about zero-length try-notes. This is okay as this compilation will be
9442 // thrown away.
9443 if (!masm.oom()) {
9444 tryNote.setTryBodyEnd(masm.currentOffset());
9447 // This instruction or the adjunct safepoint must be the last instruction
9448 // in the block. No other instructions may be inserted.
9449 LBlock* block = lir->block();
9450 MOZ_RELEASE_ASSERT(*block->rbegin() == lir ||
9451 (block->rbegin()->isWasmCallIndirectAdjunctSafepoint() &&
9452 *(++block->rbegin()) == lir));
9454 // Jump to the fallthrough block
9455 jumpToBlock(lir->mirCatchable()->getSuccessor(
9456 MWasmCallCatchable::FallthroughBranchIndex));
9460 #ifdef ENABLE_WASM_JSPI
9461 void CodeGenerator::callWasmUpdateSuspenderState(
9462 wasm::UpdateSuspenderStateAction kind, Register suspender, Register temp) {
9463 masm.Push(InstanceReg);
9464 int32_t framePushedAfterInstance = masm.framePushed();
9466 masm.move32(Imm32(uint32_t(kind)), temp);
9468 masm.setupWasmABICall();
9469 masm.passABIArg(InstanceReg);
9470 masm.passABIArg(suspender);
9471 masm.passABIArg(temp);
9472 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
9473 masm.callWithABI(wasm::BytecodeOffset(0),
9474 wasm::SymbolicAddress::UpdateSuspenderState,
9475 mozilla::Some(instanceOffset));
9477 masm.Pop(InstanceReg);
9480 void CodeGenerator::prepareWasmStackSwitchTrampolineCall(Register suspender,
9481 Register data) {
9482 // Reserve stack space for the wasm call.
9483 unsigned argDecrement;
9485 WasmABIArgGenerator abi;
9486 ABIArg arg;
9487 arg = abi.next(MIRType::Pointer);
9488 arg = abi.next(MIRType::Pointer);
9489 argDecrement = StackDecrementForCall(WasmStackAlignment, 0,
9490 abi.stackBytesConsumedSoFar());
9492 masm.reserveStack(argDecrement);
9494 // Pass the suspender and data params through the wasm function ABI registers.
9495 WasmABIArgGenerator abi;
9496 ABIArg arg;
9497 arg = abi.next(MIRType::Pointer);
9498 if (arg.kind() == ABIArg::GPR) {
9499 masm.movePtr(suspender, arg.gpr());
9500 } else {
9501 MOZ_ASSERT(arg.kind() == ABIArg::Stack);
9502 masm.storePtr(suspender,
9503 Address(masm.getStackPointer(), arg.offsetFromArgBase()));
9505 arg = abi.next(MIRType::Pointer);
9506 if (arg.kind() == ABIArg::GPR) {
9507 masm.movePtr(data, arg.gpr());
9508 } else {
9509 MOZ_ASSERT(arg.kind() == ABIArg::Stack);
9510 masm.storePtr(data,
9511 Address(masm.getStackPointer(), arg.offsetFromArgBase()));
9514 masm.storePtr(InstanceReg, Address(masm.getStackPointer(),
9515 WasmCallerInstanceOffsetBeforeCall));
9517 #endif // ENABLE_WASM_JSPI
9519 void CodeGenerator::visitWasmStackSwitchToSuspendable(
9520 LWasmStackSwitchToSuspendable* lir) {
9521 #ifdef ENABLE_WASM_JSPI
9522 const Register SuspenderReg = lir->suspender()->toRegister().gpr();
9523 const Register FnReg = lir->fn()->toRegister().gpr();
9524 const Register DataReg = lir->data()->toRegister().gpr();
9525 const Register SuspenderDataReg = ABINonArgReg3;
9527 # ifdef JS_CODEGEN_ARM64
9528 vixl::UseScratchRegisterScope temps(&masm);
9529 const Register ScratchReg1 = temps.AcquireX().asUnsized();
9530 # elif defined(JS_CODEGEN_X86)
9531 const Register ScratchReg1 = ABINonArgReg3;
9532 # elif defined(JS_CODEGEN_X64)
9533 const Register ScratchReg1 = ScratchReg;
9534 # elif defined(JS_CODEGEN_ARM)
9535 const Register ScratchReg1 = ABINonArgReturnVolatileReg;
9536 # else
9537 # error "NYI: scratch register"
9538 # endif
9540 masm.Push(SuspenderReg);
9541 masm.Push(FnReg);
9542 masm.Push(DataReg);
9544 callWasmUpdateSuspenderState(wasm::UpdateSuspenderStateAction::Enter,
9545 SuspenderReg, ScratchReg1);
9546 masm.Pop(DataReg);
9547 masm.Pop(FnReg);
9548 masm.Pop(SuspenderReg);
9550 masm.Push(SuspenderReg);
9551 int32_t framePushedAtSuspender = masm.framePushed();
9552 masm.Push(InstanceReg);
9554 wasm::CallSiteDesc desc(wasm::CallSiteDesc::Kind::StackSwitch);
9555 CodeLabel returnCallsite;
9557 // Aligning stack before trampoline call.
9558 uint32_t reserve = ComputeByteAlignment(
9559 masm.framePushed() - sizeof(wasm::Frame), WasmStackAlignment);
9560 masm.reserveStack(reserve);
9562 masm.loadPrivate(Address(SuspenderReg, NativeObject::getFixedSlotOffset(
9563 wasm::SuspenderObjectDataSlot)),
9564 SuspenderDataReg);
9566 // Switch stacks to suspendable, keep original FP to maintain
9567 // frames chain between main and suspendable stack segments.
9568 masm.storeStackPtr(
9569 Address(SuspenderDataReg, wasm::SuspenderObjectData::offsetOfMainSP()));
9570 masm.storePtr(
9571 FramePointer,
9572 Address(SuspenderDataReg, wasm::SuspenderObjectData::offsetOfMainFP()));
9574 masm.loadStackPtr(Address(
9575 SuspenderDataReg, wasm::SuspenderObjectData::offsetOfSuspendableSP()));
9577 masm.assertStackAlignment(WasmStackAlignment);
9579 // The FramePointer is not changed for SwitchToSuspendable.
9580 uint32_t framePushed = masm.framePushed();
9582 // On different stack, reset framePushed. FramePointer is not valid here.
9583 masm.setFramePushed(0);
9585 prepareWasmStackSwitchTrampolineCall(SuspenderReg, DataReg);
9587 // Get wasm instance pointer for callee.
9588 size_t instanceSlotOffset = FunctionExtended::offsetOfExtendedSlot(
9589 FunctionExtended::WASM_INSTANCE_SLOT);
9590 masm.loadPtr(Address(FnReg, instanceSlotOffset), InstanceReg);
9592 masm.storePtr(InstanceReg, Address(masm.getStackPointer(),
9593 WasmCalleeInstanceOffsetBeforeCall));
9594 masm.loadWasmPinnedRegsFromInstance();
9596 masm.assertStackAlignment(WasmStackAlignment);
9598 const Register ReturnAddressReg = ScratchReg1;
9600 // DataReg is not needed anymore, using it as a scratch register.
9601 const Register ScratchReg2 = DataReg;
9603 // Save future of suspendable stack exit frame pointer.
9604 masm.computeEffectiveAddress(
9605 Address(masm.getStackPointer(), -int32_t(sizeof(wasm::Frame))),
9606 ScratchReg2);
9607 masm.storePtr(
9608 ScratchReg2,
9609 Address(SuspenderDataReg,
9610 wasm::SuspenderObjectData::offsetOfSuspendableExitFP()));
9612 masm.mov(&returnCallsite, ReturnAddressReg);
9614 // Call wasm function fast.
9615 # ifdef JS_USE_LINK_REGISTER
9616 masm.mov(ReturnAddressReg, lr);
9617 # else
9618 masm.Push(ReturnAddressReg);
9619 # endif
9620 // Get funcUncheckedCallEntry() from the function's
9621 // WASM_FUNC_UNCHECKED_ENTRY_SLOT extended slot.
9622 size_t uncheckedEntrySlotOffset = FunctionExtended::offsetOfExtendedSlot(
9623 FunctionExtended::WASM_FUNC_UNCHECKED_ENTRY_SLOT);
9624 masm.loadPtr(Address(FnReg, uncheckedEntrySlotOffset), ScratchReg2);
9625 masm.jump(ScratchReg2);
9627 // About to use valid FramePointer -- restore framePushed.
9628 masm.setFramePushed(framePushed);
9630 // For IsPlausibleStackMapKey check for the following callsite.
9631 masm.wasmTrapInstruction();
9633 // Callsite for return from main stack.
9634 masm.bind(&returnCallsite);
9635 masm.append(desc, *returnCallsite.target());
9636 masm.addCodeLabel(returnCallsite);
9638 masm.assertStackAlignment(WasmStackAlignment);
9640 markSafepointAt(returnCallsite.target()->offset(), lir);
9641 lir->safepoint()->setFramePushedAtStackMapBase(framePushed);
9642 lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::StackSwitch);
9643 // Rooting SuspenderReg.
9644 masm.propagateOOM(
9645 lir->safepoint()->addWasmAnyRefSlot(true, framePushedAtSuspender));
9647 masm.freeStackTo(framePushed);
9649 masm.freeStack(reserve);
9650 masm.Pop(InstanceReg);
9651 masm.Pop(SuspenderReg);
9653 masm.switchToWasmInstanceRealm(ScratchReg1, ScratchReg2);
9655 callWasmUpdateSuspenderState(wasm::UpdateSuspenderStateAction::Leave,
9656 SuspenderReg, ScratchReg1);
9657 #else
9658 MOZ_CRASH("NYI");
9659 #endif // ENABLE_WASM_JSPI
9662 void CodeGenerator::visitWasmStackSwitchToMain(LWasmStackSwitchToMain* lir) {
9663 #ifdef ENABLE_WASM_JSPI
9664 const Register SuspenderReg = lir->suspender()->toRegister().gpr();
9665 const Register FnReg = lir->fn()->toRegister().gpr();
9666 const Register DataReg = lir->data()->toRegister().gpr();
9667 const Register SuspenderDataReg = ABINonArgReg3;
9669 # ifdef JS_CODEGEN_ARM64
9670 vixl::UseScratchRegisterScope temps(&masm);
9671 const Register ScratchReg1 = temps.AcquireX().asUnsized();
9672 # elif defined(JS_CODEGEN_X86)
9673 const Register ScratchReg1 = ABINonArgReg3;
9674 # elif defined(JS_CODEGEN_X64)
9675 const Register ScratchReg1 = ScratchReg;
9676 # elif defined(JS_CODEGEN_ARM)
9677 const Register ScratchReg1 = ABINonArgReturnVolatileReg;
9678 # else
9679 # error "NYI: scratch register"
9680 # endif
9682 masm.Push(SuspenderReg);
9683 masm.Push(FnReg);
9684 masm.Push(DataReg);
9686 callWasmUpdateSuspenderState(wasm::UpdateSuspenderStateAction::Suspend,
9687 SuspenderReg, ScratchReg1);
9689 masm.Pop(DataReg);
9690 masm.Pop(FnReg);
9691 masm.Pop(SuspenderReg);
9693 masm.Push(SuspenderReg);
9694 int32_t framePushedAtSuspender = masm.framePushed();
9695 masm.Push(InstanceReg);
9697 wasm::CallSiteDesc desc(wasm::CallSiteDesc::Kind::StackSwitch);
9698 CodeLabel returnCallsite;
9700 // Aligning stack before trampoline call.
9701 uint32_t reserve = ComputeByteAlignment(
9702 masm.framePushed() - sizeof(wasm::Frame), WasmStackAlignment);
9703 masm.reserveStack(reserve);
9705 masm.loadPrivate(Address(SuspenderReg, NativeObject::getFixedSlotOffset(
9706 wasm::SuspenderObjectDataSlot)),
9707 SuspenderDataReg);
9709 // Switch stacks to main.
9710 masm.storeStackPtr(Address(
9711 SuspenderDataReg, wasm::SuspenderObjectData::offsetOfSuspendableSP()));
9712 masm.storePtr(FramePointer,
9713 Address(SuspenderDataReg,
9714 wasm::SuspenderObjectData::offsetOfSuspendableFP()));
9716 masm.loadStackPtr(
9717 Address(SuspenderDataReg, wasm::SuspenderObjectData::offsetOfMainSP()));
9718 masm.loadPtr(
9719 Address(SuspenderDataReg, wasm::SuspenderObjectData::offsetOfMainFP()),
9720 FramePointer);
9722 // Set main_ra field to returnCallsite.
9723 # ifdef JS_CODEGEN_X86
9724 // SuspenderDataReg is also ScratchReg1, use DataReg as a scratch register.
9725 MOZ_ASSERT(ScratchReg1 == SuspenderDataReg);
9726 masm.push(DataReg);
9727 masm.mov(&returnCallsite, DataReg);
9728 masm.storePtr(
9729 DataReg,
9730 Address(SuspenderDataReg,
9731 wasm::SuspenderObjectData::offsetOfSuspendedReturnAddress()));
9732 masm.pop(DataReg);
9733 # else
9734 MOZ_ASSERT(ScratchReg1 != SuspenderDataReg);
9735 masm.mov(&returnCallsite, ScratchReg1);
9736 masm.storePtr(
9737 ScratchReg1,
9738 Address(SuspenderDataReg,
9739 wasm::SuspenderObjectData::offsetOfSuspendedReturnAddress()));
9740 # endif
9742 masm.assertStackAlignment(WasmStackAlignment);
9744 // The FramePointer is pointing to the same
9745 // place as before switch happened.
9746 uint32_t framePushed = masm.framePushed();
9748 // On different stack, reset framePushed. FramePointer is not valid here.
9749 masm.setFramePushed(0);
9751 prepareWasmStackSwitchTrampolineCall(SuspenderReg, DataReg);
9753 // Get wasm instance pointer for callee.
9754 size_t instanceSlotOffset = FunctionExtended::offsetOfExtendedSlot(
9755 FunctionExtended::WASM_INSTANCE_SLOT);
9756 masm.loadPtr(Address(FnReg, instanceSlotOffset), InstanceReg);
9758 masm.storePtr(InstanceReg, Address(masm.getStackPointer(),
9759 WasmCalleeInstanceOffsetBeforeCall));
9760 masm.loadWasmPinnedRegsFromInstance();
9762 masm.assertStackAlignment(WasmStackAlignment);
9764 const Register ReturnAddressReg = ScratchReg1;
9765 // DataReg is not needed anymore, using it as a scratch register.
9766 const Register ScratchReg2 = DataReg;
9768 // Load InstanceReg from suspendable stack exit frame.
9769 masm.loadPtr(Address(SuspenderDataReg,
9770 wasm::SuspenderObjectData::offsetOfSuspendableExitFP()),
9771 ScratchReg2);
9772 masm.loadPtr(
9773 Address(ScratchReg2, wasm::FrameWithInstances::callerInstanceOffset()),
9774 ScratchReg2);
9775 masm.storePtr(ScratchReg2, Address(masm.getStackPointer(),
9776 WasmCallerInstanceOffsetBeforeCall));
9778 // Load RA from suspendable stack exit frame.
9779 masm.loadPtr(Address(SuspenderDataReg,
9780 wasm::SuspenderObjectData::offsetOfSuspendableExitFP()),
9781 ScratchReg1);
9782 masm.loadPtr(Address(ScratchReg1, wasm::Frame::returnAddressOffset()),
9783 ReturnAddressReg);
9785 // Call wasm function fast.
9786 # ifdef JS_USE_LINK_REGISTER
9787 masm.mov(ReturnAddressReg, lr);
9788 # else
9789 masm.Push(ReturnAddressReg);
9790 # endif
9791 // Get funcUncheckedCallEntry() from the function's
9792 // WASM_FUNC_UNCHECKED_ENTRY_SLOT extended slot.
9793 size_t uncheckedEntrySlotOffset = FunctionExtended::offsetOfExtendedSlot(
9794 FunctionExtended::WASM_FUNC_UNCHECKED_ENTRY_SLOT);
9795 masm.loadPtr(Address(FnReg, uncheckedEntrySlotOffset), ScratchReg2);
9796 masm.jump(ScratchReg2);
9798 // About to use valid FramePointer -- restore framePushed.
9799 masm.setFramePushed(framePushed);
9801 // For IsPlausibleStackMapKey check for the following callsite.
9802 masm.wasmTrapInstruction();
9804 // Callsite for return from suspendable stack.
9805 masm.bind(&returnCallsite);
9806 masm.append(desc, *returnCallsite.target());
9807 masm.addCodeLabel(returnCallsite);
9809 masm.assertStackAlignment(WasmStackAlignment);
9811 markSafepointAt(returnCallsite.target()->offset(), lir);
9812 lir->safepoint()->setFramePushedAtStackMapBase(framePushed);
9813 lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::StackSwitch);
9814 // Rooting SuspenderReg.
9815 masm.propagateOOM(
9816 lir->safepoint()->addWasmAnyRefSlot(true, framePushedAtSuspender));
9818 masm.freeStackTo(framePushed);
9820 masm.freeStack(reserve);
9821 masm.Pop(InstanceReg);
9822 masm.Pop(SuspenderReg);
9824 masm.switchToWasmInstanceRealm(ScratchReg1, ScratchReg2);
9826 callWasmUpdateSuspenderState(wasm::UpdateSuspenderStateAction::Resume,
9827 SuspenderReg, ScratchReg1);
9828 #else
9829 MOZ_CRASH("NYI");
9830 #endif // ENABLE_WASM_JSPI
9833 void CodeGenerator::visitWasmStackContinueOnSuspendable(
9834 LWasmStackContinueOnSuspendable* lir) {
9835 #ifdef ENABLE_WASM_JSPI
9836 const Register SuspenderReg = lir->suspender()->toRegister().gpr();
9837 const Register SuspenderDataReg = ABINonArgReg3;
9839 # ifdef JS_CODEGEN_ARM64
9840 vixl::UseScratchRegisterScope temps(&masm);
9841 const Register ScratchReg1 = temps.AcquireX().asUnsized();
9842 # elif defined(JS_CODEGEN_X86)
9843 const Register ScratchReg1 = ABINonArgReg2;
9844 # elif defined(JS_CODEGEN_X64)
9845 const Register ScratchReg1 = ScratchReg;
9846 # elif defined(JS_CODEGEN_ARM)
9847 const Register ScratchReg1 = ABINonArgReturnVolatileReg;
9848 # else
9849 # error "NYI: scratch register"
9850 # endif
9851 const Register ScratchReg2 = ABINonArgReg1;
9853 masm.Push(SuspenderReg);
9854 int32_t framePushedAtSuspender = masm.framePushed();
9855 masm.Push(InstanceReg);
9857 wasm::CallSiteDesc desc(wasm::CallSiteDesc::Kind::StackSwitch);
9858 CodeLabel returnCallsite;
9860 // Aligning stack before trampoline call.
9861 uint32_t reserve = ComputeByteAlignment(
9862 masm.framePushed() - sizeof(wasm::Frame), WasmStackAlignment);
9863 masm.reserveStack(reserve);
9865 masm.loadPrivate(Address(SuspenderReg, NativeObject::getFixedSlotOffset(
9866 wasm::SuspenderObjectDataSlot)),
9867 SuspenderDataReg);
9868 masm.storeStackPtr(
9869 Address(SuspenderDataReg, wasm::SuspenderObjectData::offsetOfMainSP()));
9870 masm.storePtr(
9871 FramePointer,
9872 Address(SuspenderDataReg, wasm::SuspenderObjectData::offsetOfMainFP()));
9874 // Adjust exit frame FP.
9875 masm.loadPtr(Address(SuspenderDataReg,
9876 wasm::SuspenderObjectData::offsetOfSuspendableExitFP()),
9877 ScratchReg1);
9878 masm.storePtr(FramePointer,
9879 Address(ScratchReg1, wasm::Frame::callerFPOffset()));
9881 // Adjust exit frame RA.
9882 masm.mov(&returnCallsite, ScratchReg2);
9884 masm.storePtr(ScratchReg2,
9885 Address(ScratchReg1, wasm::Frame::returnAddressOffset()));
9886 // Adjust exit frame caller instance slot.
9887 masm.storePtr(
9888 InstanceReg,
9889 Address(ScratchReg1, wasm::FrameWithInstances::callerInstanceOffset()));
9891 // Switch stacks to suspendable.
9892 masm.loadStackPtr(Address(
9893 SuspenderDataReg, wasm::SuspenderObjectData::offsetOfSuspendableSP()));
9894 masm.loadPtr(Address(SuspenderDataReg,
9895 wasm::SuspenderObjectData::offsetOfSuspendableFP()),
9896 FramePointer);
9898 masm.assertStackAlignment(WasmStackAlignment);
9900 // The FramePointer is pointing to the same
9901 // place as before switch happened.
9902 uint32_t framePushed = masm.framePushed();
9904 // On different stack, reset framePushed. FramePointer is not valid here.
9905 masm.setFramePushed(0);
9907 // Restore shadow stack area and instance slots.
9908 WasmABIArgGenerator abi;
9909 unsigned reserveBeforeCall = abi.stackBytesConsumedSoFar();
9910 MOZ_ASSERT(masm.framePushed() == 0);
9911 unsigned argDecrement =
9912 StackDecrementForCall(WasmStackAlignment, 0, reserveBeforeCall);
9913 masm.reserveStack(argDecrement);
9915 masm.storePtr(InstanceReg, Address(masm.getStackPointer(),
9916 WasmCallerInstanceOffsetBeforeCall));
9917 masm.storePtr(InstanceReg, Address(masm.getStackPointer(),
9918 WasmCalleeInstanceOffsetBeforeCall));
9920 masm.assertStackAlignment(WasmStackAlignment);
9922 const Register ReturnAddressReg = ScratchReg1;
9924 // Pretend we just returned from the function.
9925 masm.loadPtr(
9926 Address(SuspenderDataReg,
9927 wasm::SuspenderObjectData::offsetOfSuspendedReturnAddress()),
9928 ReturnAddressReg);
9929 masm.jump(ReturnAddressReg);
9931 // About to use valid FramePointer -- restore framePushed.
9932 masm.setFramePushed(framePushed);
9934 // For IsPlausibleStackMapKey check for the following callsite.
9935 masm.wasmTrapInstruction();
9937 // Callsite for return from suspendable stack.
9938 masm.bind(&returnCallsite);
9939 masm.append(desc, *returnCallsite.target());
9940 masm.addCodeLabel(returnCallsite);
9942 masm.assertStackAlignment(WasmStackAlignment);
9944 markSafepointAt(returnCallsite.target()->offset(), lir);
9945 lir->safepoint()->setFramePushedAtStackMapBase(framePushed);
9946 lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::StackSwitch);
9947 // Rooting SuspenderReg.
9948 masm.propagateOOM(
9949 lir->safepoint()->addWasmAnyRefSlot(true, framePushedAtSuspender));
9951 masm.freeStackTo(framePushed);
9953 masm.freeStack(reserve);
9954 masm.Pop(InstanceReg);
9955 masm.Pop(SuspenderReg);
9957 // Using SuspenderDataReg and ABINonArgReg2 as temps.
9958 masm.switchToWasmInstanceRealm(SuspenderDataReg, ABINonArgReg2);
9960 callWasmUpdateSuspenderState(wasm::UpdateSuspenderStateAction::Leave,
9961 SuspenderReg, ScratchReg1);
9962 #else
9963 MOZ_CRASH("NYI");
9964 #endif // ENABLE_WASM_JSPI
9967 void CodeGenerator::visitWasmCallLandingPrePad(LWasmCallLandingPrePad* lir) {
9968 LBlock* block = lir->block();
9969 MWasmCallLandingPrePad* mir = lir->mir();
9970 MBasicBlock* mirBlock = mir->block();
9971 MBasicBlock* callMirBlock = mir->callBlock();
9973 // This block must be the pre-pad successor of the call block. No blocks may
9974 // be inserted between us, such as for critical edge splitting.
9975 MOZ_RELEASE_ASSERT(mirBlock == callMirBlock->getSuccessor(
9976 MWasmCallCatchable::PrePadBranchIndex));
9978 // This instruction or a move group must be the first instruction in the
9979 // block. No other instructions may be inserted.
9980 MOZ_RELEASE_ASSERT(*block->begin() == lir || (block->begin()->isMoveGroup() &&
9981 *(++block->begin()) == lir));
9983 wasm::TryNoteVector& tryNotes = masm.tryNotes();
9984 wasm::TryNote& tryNote = tryNotes[mir->tryNoteIndex()];
9985 // Set the entry point for the call try note to be the beginning of this
9986 // block. The above assertions (and assertions in visitWasmCall) guarantee
9987 // that we are not skipping over instructions that should be executed.
9988 tryNote.setLandingPad(block->label()->offset(), masm.framePushed());
9991 void CodeGenerator::visitWasmCallIndirectAdjunctSafepoint(
9992 LWasmCallIndirectAdjunctSafepoint* lir) {
9993 markSafepointAt(lir->safepointLocation().offset(), lir);
9994 lir->safepoint()->setFramePushedAtStackMapBase(
9995 lir->framePushedAtStackMapBase());
9998 template <typename InstructionWithMaybeTrapSite>
9999 void EmitSignalNullCheckTrapSite(MacroAssembler& masm,
10000 InstructionWithMaybeTrapSite* ins,
10001 FaultingCodeOffset fco,
10002 wasm::TrapMachineInsn tmi) {
10003 if (!ins->maybeTrap()) {
10004 return;
10006 wasm::BytecodeOffset trapOffset(ins->maybeTrap()->offset);
10007 masm.append(wasm::Trap::NullPointerDereference,
10008 wasm::TrapSite(tmi, fco, trapOffset));
10011 template <typename InstructionWithMaybeTrapSite, class AddressOrBaseIndex>
10012 void CodeGenerator::emitWasmValueLoad(InstructionWithMaybeTrapSite* ins,
10013 MIRType type, MWideningOp wideningOp,
10014 AddressOrBaseIndex addr,
10015 AnyRegister dst) {
10016 FaultingCodeOffset fco;
10017 switch (type) {
10018 case MIRType::Int32:
10019 switch (wideningOp) {
10020 case MWideningOp::None:
10021 fco = masm.load32(addr, dst.gpr());
10022 EmitSignalNullCheckTrapSite(masm, ins, fco,
10023 wasm::TrapMachineInsn::Load32);
10024 break;
10025 case MWideningOp::FromU16:
10026 fco = masm.load16ZeroExtend(addr, dst.gpr());
10027 EmitSignalNullCheckTrapSite(masm, ins, fco,
10028 wasm::TrapMachineInsn::Load16);
10029 break;
10030 case MWideningOp::FromS16:
10031 fco = masm.load16SignExtend(addr, dst.gpr());
10032 EmitSignalNullCheckTrapSite(masm, ins, fco,
10033 wasm::TrapMachineInsn::Load16);
10034 break;
10035 case MWideningOp::FromU8:
10036 fco = masm.load8ZeroExtend(addr, dst.gpr());
10037 EmitSignalNullCheckTrapSite(masm, ins, fco,
10038 wasm::TrapMachineInsn::Load8);
10039 break;
10040 case MWideningOp::FromS8:
10041 fco = masm.load8SignExtend(addr, dst.gpr());
10042 EmitSignalNullCheckTrapSite(masm, ins, fco,
10043 wasm::TrapMachineInsn::Load8);
10044 break;
10045 default:
10046 MOZ_CRASH("unexpected widening op in ::visitWasmLoadElement");
10048 break;
10049 case MIRType::Float32:
10050 MOZ_ASSERT(wideningOp == MWideningOp::None);
10051 fco = masm.loadFloat32(addr, dst.fpu());
10052 EmitSignalNullCheckTrapSite(masm, ins, fco,
10053 wasm::TrapMachineInsn::Load32);
10054 break;
10055 case MIRType::Double:
10056 MOZ_ASSERT(wideningOp == MWideningOp::None);
10057 fco = masm.loadDouble(addr, dst.fpu());
10058 EmitSignalNullCheckTrapSite(masm, ins, fco,
10059 wasm::TrapMachineInsn::Load64);
10060 break;
10061 case MIRType::Pointer:
10062 case MIRType::WasmAnyRef:
10063 case MIRType::WasmArrayData:
10064 MOZ_ASSERT(wideningOp == MWideningOp::None);
10065 fco = masm.loadPtr(addr, dst.gpr());
10066 EmitSignalNullCheckTrapSite(masm, ins, fco,
10067 wasm::TrapMachineInsnForLoadWord());
10068 break;
10069 default:
10070 MOZ_CRASH("unexpected type in ::emitWasmValueLoad");
10074 template <typename InstructionWithMaybeTrapSite, class AddressOrBaseIndex>
10075 void CodeGenerator::emitWasmValueStore(InstructionWithMaybeTrapSite* ins,
10076 MIRType type, MNarrowingOp narrowingOp,
10077 AnyRegister src,
10078 AddressOrBaseIndex addr) {
10079 FaultingCodeOffset fco;
10080 switch (type) {
10081 case MIRType::Int32:
10082 switch (narrowingOp) {
10083 case MNarrowingOp::None:
10084 fco = masm.store32(src.gpr(), addr);
10085 EmitSignalNullCheckTrapSite(masm, ins, fco,
10086 wasm::TrapMachineInsn::Store32);
10087 break;
10088 case MNarrowingOp::To16:
10089 fco = masm.store16(src.gpr(), addr);
10090 EmitSignalNullCheckTrapSite(masm, ins, fco,
10091 wasm::TrapMachineInsn::Store16);
10092 break;
10093 case MNarrowingOp::To8:
10094 fco = masm.store8(src.gpr(), addr);
10095 EmitSignalNullCheckTrapSite(masm, ins, fco,
10096 wasm::TrapMachineInsn::Store8);
10097 break;
10098 default:
10099 MOZ_CRASH();
10101 break;
10102 case MIRType::Float32:
10103 fco = masm.storeFloat32(src.fpu(), addr);
10104 EmitSignalNullCheckTrapSite(masm, ins, fco,
10105 wasm::TrapMachineInsn::Store32);
10106 break;
10107 case MIRType::Double:
10108 fco = masm.storeDouble(src.fpu(), addr);
10109 EmitSignalNullCheckTrapSite(masm, ins, fco,
10110 wasm::TrapMachineInsn::Store64);
10111 break;
10112 case MIRType::Pointer:
10113 // This could be correct, but it would be a new usage, so check carefully.
10114 MOZ_CRASH("Unexpected type in ::emitWasmValueStore.");
10115 case MIRType::WasmAnyRef:
10116 MOZ_CRASH("Bad type in ::emitWasmValueStore. Use LWasmStoreElementRef.");
10117 default:
10118 MOZ_CRASH("unexpected type in ::emitWasmValueStore");
10122 void CodeGenerator::visitWasmLoadSlot(LWasmLoadSlot* ins) {
10123 MIRType type = ins->type();
10124 MWideningOp wideningOp = ins->wideningOp();
10125 Register container = ToRegister(ins->containerRef());
10126 Address addr(container, ins->offset());
10127 AnyRegister dst = ToAnyRegister(ins->output());
10129 #ifdef ENABLE_WASM_SIMD
10130 if (type == MIRType::Simd128) {
10131 MOZ_ASSERT(wideningOp == MWideningOp::None);
10132 FaultingCodeOffset fco = masm.loadUnalignedSimd128(addr, dst.fpu());
10133 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Load128);
10134 return;
10136 #endif
10137 emitWasmValueLoad(ins, type, wideningOp, addr, dst);
10140 void CodeGenerator::visitWasmLoadElement(LWasmLoadElement* ins) {
10141 MIRType type = ins->type();
10142 MWideningOp wideningOp = ins->wideningOp();
10143 Scale scale = ins->scale();
10144 Register base = ToRegister(ins->base());
10145 Register index = ToRegister(ins->index());
10146 AnyRegister dst = ToAnyRegister(ins->output());
10148 #ifdef ENABLE_WASM_SIMD
10149 if (type == MIRType::Simd128) {
10150 MOZ_ASSERT(wideningOp == MWideningOp::None);
10151 FaultingCodeOffset fco;
10152 Register temp = ToRegister(ins->temp0());
10153 masm.movePtr(index, temp);
10154 masm.lshiftPtr(Imm32(4), temp);
10155 fco = masm.loadUnalignedSimd128(BaseIndex(base, temp, Scale::TimesOne),
10156 dst.fpu());
10157 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Load128);
10158 return;
10160 #endif
10161 emitWasmValueLoad(ins, type, wideningOp, BaseIndex(base, index, scale), dst);
10164 void CodeGenerator::visitWasmStoreSlot(LWasmStoreSlot* ins) {
10165 MIRType type = ins->type();
10166 MNarrowingOp narrowingOp = ins->narrowingOp();
10167 Register container = ToRegister(ins->containerRef());
10168 Address addr(container, ins->offset());
10169 AnyRegister src = ToAnyRegister(ins->value());
10170 if (type != MIRType::Int32) {
10171 MOZ_RELEASE_ASSERT(narrowingOp == MNarrowingOp::None);
10174 #ifdef ENABLE_WASM_SIMD
10175 if (type == MIRType::Simd128) {
10176 FaultingCodeOffset fco = masm.storeUnalignedSimd128(src.fpu(), addr);
10177 EmitSignalNullCheckTrapSite(masm, ins, fco,
10178 wasm::TrapMachineInsn::Store128);
10179 return;
10181 #endif
10182 emitWasmValueStore(ins, type, narrowingOp, src, addr);
10185 void CodeGenerator::visitWasmStoreElement(LWasmStoreElement* ins) {
10186 MIRType type = ins->type();
10187 MNarrowingOp narrowingOp = ins->narrowingOp();
10188 Scale scale = ins->scale();
10189 Register base = ToRegister(ins->base());
10190 Register index = ToRegister(ins->index());
10191 AnyRegister src = ToAnyRegister(ins->value());
10192 if (type != MIRType::Int32) {
10193 MOZ_RELEASE_ASSERT(narrowingOp == MNarrowingOp::None);
10196 #ifdef ENABLE_WASM_SIMD
10197 if (type == MIRType::Simd128) {
10198 Register temp = ToRegister(ins->temp0());
10199 masm.movePtr(index, temp);
10200 masm.lshiftPtr(Imm32(4), temp);
10201 FaultingCodeOffset fco = masm.storeUnalignedSimd128(
10202 src.fpu(), BaseIndex(base, temp, Scale::TimesOne));
10203 EmitSignalNullCheckTrapSite(masm, ins, fco,
10204 wasm::TrapMachineInsn::Store128);
10205 return;
10207 #endif
10208 emitWasmValueStore(ins, type, narrowingOp, src,
10209 BaseIndex(base, index, scale));
10212 void CodeGenerator::visitWasmLoadTableElement(LWasmLoadTableElement* ins) {
10213 Register elements = ToRegister(ins->elements());
10214 Register index = ToRegister(ins->index());
10215 Register output = ToRegister(ins->output());
10216 masm.loadPtr(BaseIndex(elements, index, ScalePointer), output);
10219 void CodeGenerator::visitWasmDerivedPointer(LWasmDerivedPointer* ins) {
10220 masm.movePtr(ToRegister(ins->base()), ToRegister(ins->output()));
10221 masm.addPtr(Imm32(int32_t(ins->offset())), ToRegister(ins->output()));
10224 void CodeGenerator::visitWasmDerivedIndexPointer(
10225 LWasmDerivedIndexPointer* ins) {
10226 Register base = ToRegister(ins->base());
10227 Register index = ToRegister(ins->index());
10228 Register output = ToRegister(ins->output());
10229 masm.computeEffectiveAddress(BaseIndex(base, index, ins->scale()), output);
10232 void CodeGenerator::visitWasmStoreRef(LWasmStoreRef* ins) {
10233 Register instance = ToRegister(ins->instance());
10234 Register valueBase = ToRegister(ins->valueBase());
10235 size_t offset = ins->offset();
10236 Register value = ToRegister(ins->value());
10237 Register temp = ToRegister(ins->temp0());
10239 if (ins->preBarrierKind() == WasmPreBarrierKind::Normal) {
10240 Label skipPreBarrier;
10241 wasm::EmitWasmPreBarrierGuard(
10242 masm, instance, temp, Address(valueBase, offset), &skipPreBarrier,
10243 ins->maybeTrap() ? &ins->maybeTrap()->offset : nullptr);
10244 wasm::EmitWasmPreBarrierCallImmediate(masm, instance, temp, valueBase,
10245 offset);
10246 masm.bind(&skipPreBarrier);
10249 FaultingCodeOffset fco = masm.storePtr(value, Address(valueBase, offset));
10250 EmitSignalNullCheckTrapSite(masm, ins, fco,
10251 wasm::TrapMachineInsnForStoreWord());
10252 // The postbarrier is handled separately.
10255 void CodeGenerator::visitWasmStoreElementRef(LWasmStoreElementRef* ins) {
10256 Register instance = ToRegister(ins->instance());
10257 Register base = ToRegister(ins->base());
10258 Register index = ToRegister(ins->index());
10259 Register value = ToRegister(ins->value());
10260 Register temp0 = ToTempRegisterOrInvalid(ins->temp0());
10261 Register temp1 = ToTempRegisterOrInvalid(ins->temp1());
10263 BaseIndex addr(base, index, ScalePointer);
10265 if (ins->preBarrierKind() == WasmPreBarrierKind::Normal) {
10266 Label skipPreBarrier;
10267 wasm::EmitWasmPreBarrierGuard(
10268 masm, instance, temp0, addr, &skipPreBarrier,
10269 ins->maybeTrap() ? &ins->maybeTrap()->offset : nullptr);
10270 wasm::EmitWasmPreBarrierCallIndex(masm, instance, temp0, temp1, addr);
10271 masm.bind(&skipPreBarrier);
10274 FaultingCodeOffset fco = masm.storePtr(value, addr);
10275 EmitSignalNullCheckTrapSite(masm, ins, fco,
10276 wasm::TrapMachineInsnForStoreWord());
10277 // The postbarrier is handled separately.
10280 // Out-of-line path to update the store buffer for wasm references.
10281 class OutOfLineWasmCallPostWriteBarrierImmediate
10282 : public OutOfLineCodeBase<CodeGenerator> {
10283 LInstruction* lir_;
10284 Register valueBase_;
10285 Register temp_;
10286 uint32_t valueOffset_;
10288 public:
10289 OutOfLineWasmCallPostWriteBarrierImmediate(LInstruction* lir,
10290 Register valueBase, Register temp,
10291 uint32_t valueOffset)
10292 : lir_(lir),
10293 valueBase_(valueBase),
10294 temp_(temp),
10295 valueOffset_(valueOffset) {}
10297 void accept(CodeGenerator* codegen) override {
10298 codegen->visitOutOfLineWasmCallPostWriteBarrierImmediate(this);
10301 LInstruction* lir() const { return lir_; }
10302 Register valueBase() const { return valueBase_; }
10303 Register temp() const { return temp_; }
10304 uint32_t valueOffset() const { return valueOffset_; }
10307 void CodeGenerator::visitOutOfLineWasmCallPostWriteBarrierImmediate(
10308 OutOfLineWasmCallPostWriteBarrierImmediate* ool) {
10309 saveLiveVolatile(ool->lir());
10310 masm.Push(InstanceReg);
10311 int32_t framePushedAfterInstance = masm.framePushed();
10313 // Fold the value offset into the value base
10314 Register valueAddr = ool->valueBase();
10315 Register temp = ool->temp();
10316 masm.computeEffectiveAddress(Address(valueAddr, ool->valueOffset()), temp);
10318 // Call Instance::postBarrier
10319 masm.setupWasmABICall();
10320 masm.passABIArg(InstanceReg);
10321 masm.passABIArg(temp);
10322 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
10323 masm.callWithABI(wasm::BytecodeOffset(0), wasm::SymbolicAddress::PostBarrier,
10324 mozilla::Some(instanceOffset), ABIType::General);
10326 masm.Pop(InstanceReg);
10327 restoreLiveVolatile(ool->lir());
10329 masm.jump(ool->rejoin());
10332 void CodeGenerator::visitWasmPostWriteBarrierImmediate(
10333 LWasmPostWriteBarrierImmediate* lir) {
10334 Register object = ToRegister(lir->object());
10335 Register value = ToRegister(lir->value());
10336 Register valueBase = ToRegister(lir->valueBase());
10337 Register temp = ToRegister(lir->temp0());
10338 MOZ_ASSERT(ToRegister(lir->instance()) == InstanceReg);
10339 auto* ool = new (alloc()) OutOfLineWasmCallPostWriteBarrierImmediate(
10340 lir, valueBase, temp, lir->valueOffset());
10341 addOutOfLineCode(ool, lir->mir());
10343 wasm::EmitWasmPostBarrierGuard(masm, mozilla::Some(object), temp, value,
10344 ool->rejoin());
10345 masm.jump(ool->entry());
10346 masm.bind(ool->rejoin());
10349 // Out-of-line path to update the store buffer for wasm references.
10350 class OutOfLineWasmCallPostWriteBarrierIndex
10351 : public OutOfLineCodeBase<CodeGenerator> {
10352 LInstruction* lir_;
10353 Register valueBase_;
10354 Register index_;
10355 Register temp_;
10356 uint32_t elemSize_;
10358 public:
10359 OutOfLineWasmCallPostWriteBarrierIndex(LInstruction* lir, Register valueBase,
10360 Register index, Register temp,
10361 uint32_t elemSize)
10362 : lir_(lir),
10363 valueBase_(valueBase),
10364 index_(index),
10365 temp_(temp),
10366 elemSize_(elemSize) {
10367 MOZ_ASSERT(elemSize == 1 || elemSize == 2 || elemSize == 4 ||
10368 elemSize == 8 || elemSize == 16);
10371 void accept(CodeGenerator* codegen) override {
10372 codegen->visitOutOfLineWasmCallPostWriteBarrierIndex(this);
10375 LInstruction* lir() const { return lir_; }
10376 Register valueBase() const { return valueBase_; }
10377 Register index() const { return index_; }
10378 Register temp() const { return temp_; }
10379 uint32_t elemSize() const { return elemSize_; }
10382 void CodeGenerator::visitOutOfLineWasmCallPostWriteBarrierIndex(
10383 OutOfLineWasmCallPostWriteBarrierIndex* ool) {
10384 saveLiveVolatile(ool->lir());
10385 masm.Push(InstanceReg);
10386 int32_t framePushedAfterInstance = masm.framePushed();
10388 // Fold the value offset into the value base
10389 Register temp = ool->temp();
10390 if (ool->elemSize() == 16) {
10391 masm.movePtr(ool->index(), temp);
10392 masm.lshiftPtr(Imm32(4), temp);
10393 masm.addPtr(ool->valueBase(), temp);
10394 } else {
10395 masm.computeEffectiveAddress(BaseIndex(ool->valueBase(), ool->index(),
10396 ScaleFromElemWidth(ool->elemSize())),
10397 temp);
10400 // Call Instance::postBarrier
10401 masm.setupWasmABICall();
10402 masm.passABIArg(InstanceReg);
10403 masm.passABIArg(temp);
10404 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
10405 masm.callWithABI(wasm::BytecodeOffset(0), wasm::SymbolicAddress::PostBarrier,
10406 mozilla::Some(instanceOffset), ABIType::General);
10408 masm.Pop(InstanceReg);
10409 restoreLiveVolatile(ool->lir());
10411 masm.jump(ool->rejoin());
10414 void CodeGenerator::visitWasmPostWriteBarrierIndex(
10415 LWasmPostWriteBarrierIndex* lir) {
10416 Register object = ToRegister(lir->object());
10417 Register value = ToRegister(lir->value());
10418 Register valueBase = ToRegister(lir->valueBase());
10419 Register index = ToRegister(lir->index());
10420 Register temp = ToRegister(lir->temp0());
10421 MOZ_ASSERT(ToRegister(lir->instance()) == InstanceReg);
10422 auto* ool = new (alloc()) OutOfLineWasmCallPostWriteBarrierIndex(
10423 lir, valueBase, index, temp, lir->elemSize());
10424 addOutOfLineCode(ool, lir->mir());
10426 wasm::EmitWasmPostBarrierGuard(masm, mozilla::Some(object), temp, value,
10427 ool->rejoin());
10428 masm.jump(ool->entry());
10429 masm.bind(ool->rejoin());
10432 void CodeGenerator::visitWasmLoadSlotI64(LWasmLoadSlotI64* ins) {
10433 Register container = ToRegister(ins->containerRef());
10434 Address addr(container, ins->offset());
10435 Register64 output = ToOutRegister64(ins);
10436 // Either 1 or 2 words. On a 32-bit target, it is hard to argue that one
10437 // transaction will always trap before the other, so it seems safest to
10438 // register both of them as potentially trapping.
10439 #ifdef JS_64BIT
10440 FaultingCodeOffset fco = masm.load64(addr, output);
10441 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Load64);
10442 #else
10443 FaultingCodeOffsetPair fcop = masm.load64(addr, output);
10444 EmitSignalNullCheckTrapSite(masm, ins, fcop.first,
10445 wasm::TrapMachineInsn::Load32);
10446 EmitSignalNullCheckTrapSite(masm, ins, fcop.second,
10447 wasm::TrapMachineInsn::Load32);
10448 #endif
10451 void CodeGenerator::visitWasmLoadElementI64(LWasmLoadElementI64* ins) {
10452 Register base = ToRegister(ins->base());
10453 Register index = ToRegister(ins->index());
10454 BaseIndex addr(base, index, Scale::TimesEight);
10455 Register64 output = ToOutRegister64(ins);
10456 // Either 1 or 2 words. On a 32-bit target, it is hard to argue that one
10457 // transaction will always trap before the other, so it seems safest to
10458 // register both of them as potentially trapping.
10459 #ifdef JS_64BIT
10460 FaultingCodeOffset fco = masm.load64(addr, output);
10461 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Load64);
10462 #else
10463 FaultingCodeOffsetPair fcop = masm.load64(addr, output);
10464 EmitSignalNullCheckTrapSite(masm, ins, fcop.first,
10465 wasm::TrapMachineInsn::Load32);
10466 EmitSignalNullCheckTrapSite(masm, ins, fcop.second,
10467 wasm::TrapMachineInsn::Load32);
10468 #endif
10471 void CodeGenerator::visitWasmStoreSlotI64(LWasmStoreSlotI64* ins) {
10472 Register container = ToRegister(ins->containerRef());
10473 Address addr(container, ins->offset());
10474 Register64 value = ToRegister64(ins->value());
10475 // Either 1 or 2 words. As above we register both transactions in the
10476 // 2-word case.
10477 #ifdef JS_64BIT
10478 FaultingCodeOffset fco = masm.store64(value, addr);
10479 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Store64);
10480 #else
10481 FaultingCodeOffsetPair fcop = masm.store64(value, addr);
10482 EmitSignalNullCheckTrapSite(masm, ins, fcop.first,
10483 wasm::TrapMachineInsn::Store32);
10484 EmitSignalNullCheckTrapSite(masm, ins, fcop.second,
10485 wasm::TrapMachineInsn::Store32);
10486 #endif
10489 void CodeGenerator::visitWasmStoreElementI64(LWasmStoreElementI64* ins) {
10490 Register base = ToRegister(ins->base());
10491 Register index = ToRegister(ins->index());
10492 BaseIndex addr(base, index, Scale::TimesEight);
10493 Register64 value = ToRegister64(ins->value());
10494 // Either 1 or 2 words. As above we register both transactions in the
10495 // 2-word case.
10496 #ifdef JS_64BIT
10497 FaultingCodeOffset fco = masm.store64(value, addr);
10498 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Store64);
10499 #else
10500 FaultingCodeOffsetPair fcop = masm.store64(value, addr);
10501 EmitSignalNullCheckTrapSite(masm, ins, fcop.first,
10502 wasm::TrapMachineInsn::Store32);
10503 EmitSignalNullCheckTrapSite(masm, ins, fcop.second,
10504 wasm::TrapMachineInsn::Store32);
10505 #endif
10508 void CodeGenerator::visitWasmClampTable64Index(LWasmClampTable64Index* lir) {
10509 #ifdef ENABLE_WASM_MEMORY64
10510 Register64 index = ToRegister64(lir->index());
10511 Register out = ToRegister(lir->output());
10512 masm.wasmClampTable64Index(index, out);
10513 #else
10514 MOZ_CRASH("table64 indexes should not be valid without memory64");
10515 #endif
10518 void CodeGenerator::visitArrayBufferByteLength(LArrayBufferByteLength* lir) {
10519 Register obj = ToRegister(lir->object());
10520 Register out = ToRegister(lir->output());
10521 masm.loadArrayBufferByteLengthIntPtr(obj, out);
10524 void CodeGenerator::visitArrayBufferViewLength(LArrayBufferViewLength* lir) {
10525 Register obj = ToRegister(lir->object());
10526 Register out = ToRegister(lir->output());
10527 masm.loadArrayBufferViewLengthIntPtr(obj, out);
10530 void CodeGenerator::visitArrayBufferViewByteOffset(
10531 LArrayBufferViewByteOffset* lir) {
10532 Register obj = ToRegister(lir->object());
10533 Register out = ToRegister(lir->output());
10534 masm.loadArrayBufferViewByteOffsetIntPtr(obj, out);
10537 void CodeGenerator::visitArrayBufferViewElements(
10538 LArrayBufferViewElements* lir) {
10539 Register obj = ToRegister(lir->object());
10540 Register out = ToRegister(lir->output());
10541 masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), out);
10544 void CodeGenerator::visitTypedArrayElementSize(LTypedArrayElementSize* lir) {
10545 Register obj = ToRegister(lir->object());
10546 Register out = ToRegister(lir->output());
10548 masm.typedArrayElementSize(obj, out);
10551 void CodeGenerator::visitResizableTypedArrayByteOffsetMaybeOutOfBounds(
10552 LResizableTypedArrayByteOffsetMaybeOutOfBounds* lir) {
10553 Register obj = ToRegister(lir->object());
10554 Register out = ToRegister(lir->output());
10555 Register temp = ToRegister(lir->temp0());
10557 masm.loadResizableTypedArrayByteOffsetMaybeOutOfBoundsIntPtr(obj, out, temp);
10560 void CodeGenerator::visitResizableTypedArrayLength(
10561 LResizableTypedArrayLength* lir) {
10562 Register obj = ToRegister(lir->object());
10563 Register out = ToRegister(lir->output());
10564 Register temp = ToRegister(lir->temp0());
10566 masm.loadResizableTypedArrayLengthIntPtr(lir->synchronization(), obj, out,
10567 temp);
10570 void CodeGenerator::visitResizableDataViewByteLength(
10571 LResizableDataViewByteLength* lir) {
10572 Register obj = ToRegister(lir->object());
10573 Register out = ToRegister(lir->output());
10574 Register temp = ToRegister(lir->temp0());
10576 masm.loadResizableDataViewByteLengthIntPtr(lir->synchronization(), obj, out,
10577 temp);
10580 void CodeGenerator::visitGrowableSharedArrayBufferByteLength(
10581 LGrowableSharedArrayBufferByteLength* lir) {
10582 Register obj = ToRegister(lir->object());
10583 Register out = ToRegister(lir->output());
10585 // Explicit |byteLength| accesses are seq-consistent atomic loads.
10586 auto sync = Synchronization::Load();
10588 masm.loadGrowableSharedArrayBufferByteLengthIntPtr(sync, obj, out);
10591 void CodeGenerator::visitGuardResizableArrayBufferViewInBounds(
10592 LGuardResizableArrayBufferViewInBounds* lir) {
10593 Register obj = ToRegister(lir->object());
10594 Register temp = ToRegister(lir->temp0());
10596 Label bail;
10597 masm.branchIfResizableArrayBufferViewOutOfBounds(obj, temp, &bail);
10598 bailoutFrom(&bail, lir->snapshot());
10601 void CodeGenerator::visitGuardResizableArrayBufferViewInBoundsOrDetached(
10602 LGuardResizableArrayBufferViewInBoundsOrDetached* lir) {
10603 Register obj = ToRegister(lir->object());
10604 Register temp = ToRegister(lir->temp0());
10606 Label done, bail;
10607 masm.branchIfResizableArrayBufferViewInBounds(obj, temp, &done);
10608 masm.branchIfHasAttachedArrayBuffer(obj, temp, &bail);
10609 masm.bind(&done);
10610 bailoutFrom(&bail, lir->snapshot());
10613 void CodeGenerator::visitGuardHasAttachedArrayBuffer(
10614 LGuardHasAttachedArrayBuffer* lir) {
10615 Register obj = ToRegister(lir->object());
10616 Register temp = ToRegister(lir->temp0());
10618 Label bail;
10619 masm.branchIfHasDetachedArrayBuffer(obj, temp, &bail);
10620 bailoutFrom(&bail, lir->snapshot());
10623 class OutOfLineGuardNumberToIntPtrIndex
10624 : public OutOfLineCodeBase<CodeGenerator> {
10625 LGuardNumberToIntPtrIndex* lir_;
10627 public:
10628 explicit OutOfLineGuardNumberToIntPtrIndex(LGuardNumberToIntPtrIndex* lir)
10629 : lir_(lir) {}
10631 void accept(CodeGenerator* codegen) override {
10632 codegen->visitOutOfLineGuardNumberToIntPtrIndex(this);
10634 LGuardNumberToIntPtrIndex* lir() const { return lir_; }
10637 void CodeGenerator::visitGuardNumberToIntPtrIndex(
10638 LGuardNumberToIntPtrIndex* lir) {
10639 FloatRegister input = ToFloatRegister(lir->input());
10640 Register output = ToRegister(lir->output());
10642 if (!lir->mir()->supportOOB()) {
10643 Label bail;
10644 masm.convertDoubleToPtr(input, output, &bail, false);
10645 bailoutFrom(&bail, lir->snapshot());
10646 return;
10649 auto* ool = new (alloc()) OutOfLineGuardNumberToIntPtrIndex(lir);
10650 addOutOfLineCode(ool, lir->mir());
10652 masm.convertDoubleToPtr(input, output, ool->entry(), false);
10653 masm.bind(ool->rejoin());
10656 void CodeGenerator::visitOutOfLineGuardNumberToIntPtrIndex(
10657 OutOfLineGuardNumberToIntPtrIndex* ool) {
10658 // Substitute the invalid index with an arbitrary out-of-bounds index.
10659 masm.movePtr(ImmWord(-1), ToRegister(ool->lir()->output()));
10660 masm.jump(ool->rejoin());
10663 void CodeGenerator::visitStringLength(LStringLength* lir) {
10664 Register input = ToRegister(lir->string());
10665 Register output = ToRegister(lir->output());
10667 masm.loadStringLength(input, output);
10670 void CodeGenerator::visitMinMaxI(LMinMaxI* ins) {
10671 Register first = ToRegister(ins->first());
10672 Register output = ToRegister(ins->output());
10674 MOZ_ASSERT(first == output);
10676 Assembler::Condition cond =
10677 ins->mir()->isMax() ? Assembler::GreaterThan : Assembler::LessThan;
10679 if (ins->second()->isConstant()) {
10680 Label done;
10681 masm.branch32(cond, first, Imm32(ToInt32(ins->second())), &done);
10682 masm.move32(Imm32(ToInt32(ins->second())), output);
10683 masm.bind(&done);
10684 } else {
10685 Register second = ToRegister(ins->second());
10686 masm.cmp32Move32(cond, second, first, second, output);
10690 void CodeGenerator::visitMinMaxArrayI(LMinMaxArrayI* ins) {
10691 Register array = ToRegister(ins->array());
10692 Register output = ToRegister(ins->output());
10693 Register temp1 = ToRegister(ins->temp1());
10694 Register temp2 = ToRegister(ins->temp2());
10695 Register temp3 = ToRegister(ins->temp3());
10696 bool isMax = ins->isMax();
10698 Label bail;
10699 masm.minMaxArrayInt32(array, output, temp1, temp2, temp3, isMax, &bail);
10700 bailoutFrom(&bail, ins->snapshot());
10703 void CodeGenerator::visitMinMaxArrayD(LMinMaxArrayD* ins) {
10704 Register array = ToRegister(ins->array());
10705 FloatRegister output = ToFloatRegister(ins->output());
10706 Register temp1 = ToRegister(ins->temp1());
10707 Register temp2 = ToRegister(ins->temp2());
10708 FloatRegister floatTemp = ToFloatRegister(ins->floatTemp());
10709 bool isMax = ins->isMax();
10711 Label bail;
10712 masm.minMaxArrayNumber(array, output, floatTemp, temp1, temp2, isMax, &bail);
10713 bailoutFrom(&bail, ins->snapshot());
10716 // For Abs*, lowering will have tied input to output on platforms where that is
10717 // sensible, and otherwise left them untied.
10719 void CodeGenerator::visitAbsI(LAbsI* ins) {
10720 Register input = ToRegister(ins->input());
10721 Register output = ToRegister(ins->output());
10723 if (ins->mir()->fallible()) {
10724 Label positive;
10725 if (input != output) {
10726 masm.move32(input, output);
10728 masm.branchTest32(Assembler::NotSigned, output, output, &positive);
10729 Label bail;
10730 masm.branchNeg32(Assembler::Overflow, output, &bail);
10731 bailoutFrom(&bail, ins->snapshot());
10732 masm.bind(&positive);
10733 } else {
10734 masm.abs32(input, output);
10738 void CodeGenerator::visitAbsD(LAbsD* ins) {
10739 masm.absDouble(ToFloatRegister(ins->input()), ToFloatRegister(ins->output()));
10742 void CodeGenerator::visitAbsF(LAbsF* ins) {
10743 masm.absFloat32(ToFloatRegister(ins->input()),
10744 ToFloatRegister(ins->output()));
10747 void CodeGenerator::visitPowII(LPowII* ins) {
10748 Register value = ToRegister(ins->value());
10749 Register power = ToRegister(ins->power());
10750 Register output = ToRegister(ins->output());
10751 Register temp0 = ToRegister(ins->temp0());
10752 Register temp1 = ToRegister(ins->temp1());
10754 Label bailout;
10755 masm.pow32(value, power, output, temp0, temp1, &bailout);
10756 bailoutFrom(&bailout, ins->snapshot());
10759 void CodeGenerator::visitPowI(LPowI* ins) {
10760 FloatRegister value = ToFloatRegister(ins->value());
10761 Register power = ToRegister(ins->power());
10763 using Fn = double (*)(double x, int32_t y);
10764 masm.setupAlignedABICall();
10765 masm.passABIArg(value, ABIType::Float64);
10766 masm.passABIArg(power);
10768 masm.callWithABI<Fn, js::powi>(ABIType::Float64);
10769 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
10772 void CodeGenerator::visitPowD(LPowD* ins) {
10773 FloatRegister value = ToFloatRegister(ins->value());
10774 FloatRegister power = ToFloatRegister(ins->power());
10776 using Fn = double (*)(double x, double y);
10777 masm.setupAlignedABICall();
10778 masm.passABIArg(value, ABIType::Float64);
10779 masm.passABIArg(power, ABIType::Float64);
10780 masm.callWithABI<Fn, ecmaPow>(ABIType::Float64);
10782 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
10785 void CodeGenerator::visitPowOfTwoI(LPowOfTwoI* ins) {
10786 Register power = ToRegister(ins->power());
10787 Register output = ToRegister(ins->output());
10789 uint32_t base = ins->base();
10790 MOZ_ASSERT(mozilla::IsPowerOfTwo(base));
10792 uint32_t n = mozilla::FloorLog2(base);
10793 MOZ_ASSERT(n != 0);
10795 // Hacker's Delight, 2nd edition, theorem D2.
10796 auto ceilingDiv = [](uint32_t x, uint32_t y) { return (x + y - 1) / y; };
10798 // Take bailout if |power| is greater-or-equals |log_y(2^31)| or is negative.
10799 // |2^(n*y) < 2^31| must hold, hence |n*y < 31| resp. |y < 31/n|.
10801 // Note: it's important for this condition to match the code in CacheIR.cpp
10802 // (CanAttachInt32Pow) to prevent failure loops.
10803 bailoutCmp32(Assembler::AboveOrEqual, power, Imm32(ceilingDiv(31, n)),
10804 ins->snapshot());
10806 // Compute (2^n)^y as 2^(n*y) using repeated shifts. We could directly scale
10807 // |power| and perform a single shift, but due to the lack of necessary
10808 // MacroAssembler functionality, like multiplying a register with an
10809 // immediate, we restrict the number of generated shift instructions when
10810 // lowering this operation.
10811 masm.move32(Imm32(1), output);
10812 do {
10813 masm.lshift32(power, output);
10814 n--;
10815 } while (n > 0);
10818 void CodeGenerator::visitSqrtD(LSqrtD* ins) {
10819 FloatRegister input = ToFloatRegister(ins->input());
10820 FloatRegister output = ToFloatRegister(ins->output());
10821 masm.sqrtDouble(input, output);
10824 void CodeGenerator::visitSqrtF(LSqrtF* ins) {
10825 FloatRegister input = ToFloatRegister(ins->input());
10826 FloatRegister output = ToFloatRegister(ins->output());
10827 masm.sqrtFloat32(input, output);
10830 void CodeGenerator::visitSignI(LSignI* ins) {
10831 Register input = ToRegister(ins->input());
10832 Register output = ToRegister(ins->output());
10833 masm.signInt32(input, output);
10836 void CodeGenerator::visitSignD(LSignD* ins) {
10837 FloatRegister input = ToFloatRegister(ins->input());
10838 FloatRegister output = ToFloatRegister(ins->output());
10839 masm.signDouble(input, output);
10842 void CodeGenerator::visitSignDI(LSignDI* ins) {
10843 FloatRegister input = ToFloatRegister(ins->input());
10844 FloatRegister temp = ToFloatRegister(ins->temp0());
10845 Register output = ToRegister(ins->output());
10847 Label bail;
10848 masm.signDoubleToInt32(input, output, temp, &bail);
10849 bailoutFrom(&bail, ins->snapshot());
10852 void CodeGenerator::visitMathFunctionD(LMathFunctionD* ins) {
10853 FloatRegister input = ToFloatRegister(ins->input());
10854 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
10856 UnaryMathFunction fun = ins->mir()->function();
10857 UnaryMathFunctionType funPtr = GetUnaryMathFunctionPtr(fun);
10859 masm.setupAlignedABICall();
10861 masm.passABIArg(input, ABIType::Float64);
10862 masm.callWithABI(DynamicFunction<UnaryMathFunctionType>(funPtr),
10863 ABIType::Float64);
10866 void CodeGenerator::visitMathFunctionF(LMathFunctionF* ins) {
10867 FloatRegister input = ToFloatRegister(ins->input());
10868 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnFloat32Reg);
10870 masm.setupAlignedABICall();
10871 masm.passABIArg(input, ABIType::Float32);
10873 using Fn = float (*)(float x);
10874 Fn funptr = nullptr;
10875 CheckUnsafeCallWithABI check = CheckUnsafeCallWithABI::Check;
10876 switch (ins->mir()->function()) {
10877 case UnaryMathFunction::Floor:
10878 funptr = floorf;
10879 check = CheckUnsafeCallWithABI::DontCheckOther;
10880 break;
10881 case UnaryMathFunction::Round:
10882 funptr = math_roundf_impl;
10883 break;
10884 case UnaryMathFunction::Trunc:
10885 funptr = math_truncf_impl;
10886 break;
10887 case UnaryMathFunction::Ceil:
10888 funptr = ceilf;
10889 check = CheckUnsafeCallWithABI::DontCheckOther;
10890 break;
10891 default:
10892 MOZ_CRASH("Unknown or unsupported float32 math function");
10895 masm.callWithABI(DynamicFunction<Fn>(funptr), ABIType::Float32, check);
10898 void CodeGenerator::visitModD(LModD* ins) {
10899 MOZ_ASSERT(!gen->compilingWasm());
10901 FloatRegister lhs = ToFloatRegister(ins->lhs());
10902 FloatRegister rhs = ToFloatRegister(ins->rhs());
10904 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
10906 using Fn = double (*)(double a, double b);
10907 masm.setupAlignedABICall();
10908 masm.passABIArg(lhs, ABIType::Float64);
10909 masm.passABIArg(rhs, ABIType::Float64);
10910 masm.callWithABI<Fn, NumberMod>(ABIType::Float64);
10913 void CodeGenerator::visitModPowTwoD(LModPowTwoD* ins) {
10914 FloatRegister lhs = ToFloatRegister(ins->lhs());
10915 uint32_t divisor = ins->divisor();
10916 MOZ_ASSERT(mozilla::IsPowerOfTwo(divisor));
10918 FloatRegister output = ToFloatRegister(ins->output());
10920 // Compute |n % d| using |copysign(n - (d * trunc(n / d)), n)|.
10922 // This doesn't work if |d| isn't a power of two, because we may lose too much
10923 // precision. For example |Number.MAX_VALUE % 3 == 2|, but
10924 // |3 * trunc(Number.MAX_VALUE / 3) == Infinity|.
10926 Label done;
10928 ScratchDoubleScope scratch(masm);
10930 // Subnormals can lead to performance degradation, which can make calling
10931 // |fmod| faster than this inline implementation. Work around this issue by
10932 // directly returning the input for any value in the interval ]-1, +1[.
10933 Label notSubnormal;
10934 masm.loadConstantDouble(1.0, scratch);
10935 masm.loadConstantDouble(-1.0, output);
10936 masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, lhs, scratch,
10937 &notSubnormal);
10938 masm.branchDouble(Assembler::DoubleLessThanOrEqual, lhs, output,
10939 &notSubnormal);
10941 masm.moveDouble(lhs, output);
10942 masm.jump(&done);
10944 masm.bind(&notSubnormal);
10946 if (divisor == 1) {
10947 // The pattern |n % 1 == 0| is used to detect integer numbers. We can skip
10948 // the multiplication by one in this case.
10949 masm.moveDouble(lhs, output);
10950 masm.nearbyIntDouble(RoundingMode::TowardsZero, output, scratch);
10951 masm.subDouble(scratch, output);
10952 } else {
10953 masm.loadConstantDouble(1.0 / double(divisor), scratch);
10954 masm.loadConstantDouble(double(divisor), output);
10956 masm.mulDouble(lhs, scratch);
10957 masm.nearbyIntDouble(RoundingMode::TowardsZero, scratch, scratch);
10958 masm.mulDouble(output, scratch);
10960 masm.moveDouble(lhs, output);
10961 masm.subDouble(scratch, output);
10965 masm.copySignDouble(output, lhs, output);
10966 masm.bind(&done);
10969 void CodeGenerator::visitWasmBuiltinModD(LWasmBuiltinModD* ins) {
10970 masm.Push(InstanceReg);
10971 int32_t framePushedAfterInstance = masm.framePushed();
10973 FloatRegister lhs = ToFloatRegister(ins->lhs());
10974 FloatRegister rhs = ToFloatRegister(ins->rhs());
10976 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
10978 masm.setupWasmABICall();
10979 masm.passABIArg(lhs, ABIType::Float64);
10980 masm.passABIArg(rhs, ABIType::Float64);
10982 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
10983 masm.callWithABI(ins->mir()->bytecodeOffset(), wasm::SymbolicAddress::ModD,
10984 mozilla::Some(instanceOffset), ABIType::Float64);
10986 masm.Pop(InstanceReg);
10989 void CodeGenerator::visitBigIntAdd(LBigIntAdd* ins) {
10990 Register lhs = ToRegister(ins->lhs());
10991 Register rhs = ToRegister(ins->rhs());
10992 Register temp1 = ToRegister(ins->temp1());
10993 Register temp2 = ToRegister(ins->temp2());
10994 Register output = ToRegister(ins->output());
10996 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10997 auto* ool = oolCallVM<Fn, BigInt::add>(ins, ArgList(lhs, rhs),
10998 StoreRegisterTo(output));
11000 // 0n + x == x
11001 Label lhsNonZero;
11002 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
11003 masm.movePtr(rhs, output);
11004 masm.jump(ool->rejoin());
11005 masm.bind(&lhsNonZero);
11007 // x + 0n == x
11008 Label rhsNonZero;
11009 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
11010 masm.movePtr(lhs, output);
11011 masm.jump(ool->rejoin());
11012 masm.bind(&rhsNonZero);
11014 // Call into the VM when either operand can't be loaded into a pointer-sized
11015 // register.
11016 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
11017 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
11019 masm.branchAddPtr(Assembler::Overflow, temp2, temp1, ool->entry());
11021 // Create and return the result.
11022 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
11023 masm.initializeBigInt(output, temp1);
11025 masm.bind(ool->rejoin());
11028 void CodeGenerator::visitBigIntSub(LBigIntSub* ins) {
11029 Register lhs = ToRegister(ins->lhs());
11030 Register rhs = ToRegister(ins->rhs());
11031 Register temp1 = ToRegister(ins->temp1());
11032 Register temp2 = ToRegister(ins->temp2());
11033 Register output = ToRegister(ins->output());
11035 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
11036 auto* ool = oolCallVM<Fn, BigInt::sub>(ins, ArgList(lhs, rhs),
11037 StoreRegisterTo(output));
11039 // x - 0n == x
11040 Label rhsNonZero;
11041 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
11042 masm.movePtr(lhs, output);
11043 masm.jump(ool->rejoin());
11044 masm.bind(&rhsNonZero);
11046 // Call into the VM when either operand can't be loaded into a pointer-sized
11047 // register.
11048 masm.loadBigInt(lhs, temp1, ool->entry());
11049 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
11051 masm.branchSubPtr(Assembler::Overflow, temp2, temp1, ool->entry());
11053 // Create and return the result.
11054 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
11055 masm.initializeBigInt(output, temp1);
11057 masm.bind(ool->rejoin());
11060 void CodeGenerator::visitBigIntMul(LBigIntMul* ins) {
11061 Register lhs = ToRegister(ins->lhs());
11062 Register rhs = ToRegister(ins->rhs());
11063 Register temp1 = ToRegister(ins->temp1());
11064 Register temp2 = ToRegister(ins->temp2());
11065 Register output = ToRegister(ins->output());
11067 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
11068 auto* ool = oolCallVM<Fn, BigInt::mul>(ins, ArgList(lhs, rhs),
11069 StoreRegisterTo(output));
11071 // 0n * x == 0n
11072 Label lhsNonZero;
11073 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
11074 masm.movePtr(lhs, output);
11075 masm.jump(ool->rejoin());
11076 masm.bind(&lhsNonZero);
11078 // x * 0n == 0n
11079 Label rhsNonZero;
11080 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
11081 masm.movePtr(rhs, output);
11082 masm.jump(ool->rejoin());
11083 masm.bind(&rhsNonZero);
11085 // Call into the VM when either operand can't be loaded into a pointer-sized
11086 // register.
11087 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
11088 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
11090 masm.branchMulPtr(Assembler::Overflow, temp2, temp1, ool->entry());
11092 // Create and return the result.
11093 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
11094 masm.initializeBigInt(output, temp1);
11096 masm.bind(ool->rejoin());
11099 void CodeGenerator::visitBigIntDiv(LBigIntDiv* ins) {
11100 Register lhs = ToRegister(ins->lhs());
11101 Register rhs = ToRegister(ins->rhs());
11102 Register temp1 = ToRegister(ins->temp1());
11103 Register temp2 = ToRegister(ins->temp2());
11104 Register output = ToRegister(ins->output());
11106 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
11107 auto* ool = oolCallVM<Fn, BigInt::div>(ins, ArgList(lhs, rhs),
11108 StoreRegisterTo(output));
11110 // x / 0 throws an error.
11111 if (ins->mir()->canBeDivideByZero()) {
11112 masm.branchIfBigIntIsZero(rhs, ool->entry());
11115 // 0n / x == 0n
11116 Label lhsNonZero;
11117 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
11118 masm.movePtr(lhs, output);
11119 masm.jump(ool->rejoin());
11120 masm.bind(&lhsNonZero);
11122 // Call into the VM when either operand can't be loaded into a pointer-sized
11123 // register.
11124 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
11125 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
11127 // |BigInt::div()| returns |lhs| for |lhs / 1n|, which means there's no
11128 // allocation which might trigger a minor GC to free up nursery space. This
11129 // requires us to apply the same optimization here, otherwise we'd end up with
11130 // always entering the OOL call, because the nursery is never evicted.
11131 Label notOne;
11132 masm.branchPtr(Assembler::NotEqual, temp2, ImmWord(1), &notOne);
11133 masm.movePtr(lhs, output);
11134 masm.jump(ool->rejoin());
11135 masm.bind(&notOne);
11137 static constexpr auto DigitMin = std::numeric_limits<
11138 mozilla::SignedStdintTypeForSize<sizeof(BigInt::Digit)>::Type>::min();
11140 // Handle an integer overflow from INT{32,64}_MIN / -1.
11141 Label notOverflow;
11142 masm.branchPtr(Assembler::NotEqual, temp1, ImmWord(DigitMin), &notOverflow);
11143 masm.branchPtr(Assembler::Equal, temp2, ImmWord(-1), ool->entry());
11144 masm.bind(&notOverflow);
11146 emitBigIntDiv(ins, temp1, temp2, output, ool->entry());
11148 masm.bind(ool->rejoin());
11151 void CodeGenerator::visitBigIntMod(LBigIntMod* ins) {
11152 Register lhs = ToRegister(ins->lhs());
11153 Register rhs = ToRegister(ins->rhs());
11154 Register temp1 = ToRegister(ins->temp1());
11155 Register temp2 = ToRegister(ins->temp2());
11156 Register output = ToRegister(ins->output());
11158 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
11159 auto* ool = oolCallVM<Fn, BigInt::mod>(ins, ArgList(lhs, rhs),
11160 StoreRegisterTo(output));
11162 // x % 0 throws an error.
11163 if (ins->mir()->canBeDivideByZero()) {
11164 masm.branchIfBigIntIsZero(rhs, ool->entry());
11167 // 0n % x == 0n
11168 Label lhsNonZero;
11169 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
11170 masm.movePtr(lhs, output);
11171 masm.jump(ool->rejoin());
11172 masm.bind(&lhsNonZero);
11174 // Call into the VM when either operand can't be loaded into a pointer-sized
11175 // register.
11176 masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
11177 masm.loadBigIntAbsolute(rhs, temp2, ool->entry());
11179 // Similar to the case for BigInt division, we must apply the same allocation
11180 // optimizations as performed in |BigInt::mod()|.
11181 Label notBelow;
11182 masm.branchPtr(Assembler::AboveOrEqual, temp1, temp2, &notBelow);
11183 masm.movePtr(lhs, output);
11184 masm.jump(ool->rejoin());
11185 masm.bind(&notBelow);
11187 // Convert both digits to signed pointer-sized values.
11188 masm.bigIntDigitToSignedPtr(lhs, temp1, ool->entry());
11189 masm.bigIntDigitToSignedPtr(rhs, temp2, ool->entry());
11191 static constexpr auto DigitMin = std::numeric_limits<
11192 mozilla::SignedStdintTypeForSize<sizeof(BigInt::Digit)>::Type>::min();
11194 // Handle an integer overflow from INT{32,64}_MIN / -1.
11195 Label notOverflow;
11196 masm.branchPtr(Assembler::NotEqual, temp1, ImmWord(DigitMin), &notOverflow);
11197 masm.branchPtr(Assembler::NotEqual, temp2, ImmWord(-1), &notOverflow);
11198 masm.movePtr(ImmWord(0), temp1);
11199 masm.bind(&notOverflow);
11201 emitBigIntMod(ins, temp1, temp2, output, ool->entry());
11203 masm.bind(ool->rejoin());
11206 void CodeGenerator::visitBigIntPow(LBigIntPow* ins) {
11207 Register lhs = ToRegister(ins->lhs());
11208 Register rhs = ToRegister(ins->rhs());
11209 Register temp1 = ToRegister(ins->temp1());
11210 Register temp2 = ToRegister(ins->temp2());
11211 Register output = ToRegister(ins->output());
11213 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
11214 auto* ool = oolCallVM<Fn, BigInt::pow>(ins, ArgList(lhs, rhs),
11215 StoreRegisterTo(output));
11217 // x ** -y throws an error.
11218 if (ins->mir()->canBeNegativeExponent()) {
11219 masm.branchIfBigIntIsNegative(rhs, ool->entry());
11222 Register dest = temp1;
11223 Register base = temp2;
11224 Register exponent = output;
11226 Label done;
11227 masm.movePtr(ImmWord(1), dest); // p = 1
11229 // 1n ** y == 1n
11230 // -1n ** y == 1n when y is even
11231 // -1n ** y == -1n when y is odd
11232 Label lhsNotOne;
11233 masm.branch32(Assembler::Above, Address(lhs, BigInt::offsetOfLength()),
11234 Imm32(1), &lhsNotOne);
11235 masm.loadFirstBigIntDigitOrZero(lhs, base);
11236 masm.branchPtr(Assembler::NotEqual, base, Imm32(1), &lhsNotOne);
11238 masm.loadFirstBigIntDigitOrZero(rhs, exponent);
11240 Label lhsNonNegative;
11241 masm.branchIfBigIntIsNonNegative(lhs, &lhsNonNegative);
11242 masm.branchTestPtr(Assembler::Zero, exponent, Imm32(1), &done);
11243 masm.bind(&lhsNonNegative);
11244 masm.movePtr(lhs, output);
11245 masm.jump(ool->rejoin());
11247 masm.bind(&lhsNotOne);
11249 // x ** 0n == 1n
11250 masm.branchIfBigIntIsZero(rhs, &done);
11252 // 0n ** y == 0n with y != 0n
11253 Label lhsNonZero;
11254 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
11256 masm.movePtr(lhs, output);
11257 masm.jump(ool->rejoin());
11259 masm.bind(&lhsNonZero);
11261 // Call into the VM when the exponent can't be loaded into a pointer-sized
11262 // register.
11263 masm.loadBigIntAbsolute(rhs, exponent, ool->entry());
11265 // x ** y with x > 1 and y >= DigitBits can't be pointer-sized.
11266 masm.branchPtr(Assembler::AboveOrEqual, exponent, Imm32(BigInt::DigitBits),
11267 ool->entry());
11269 // x ** 1n == x
11270 Label rhsNotOne;
11271 masm.branch32(Assembler::NotEqual, exponent, Imm32(1), &rhsNotOne);
11273 masm.movePtr(lhs, output);
11274 masm.jump(ool->rejoin());
11276 masm.bind(&rhsNotOne);
11278 // Call into the VM when the base operand can't be loaded into a pointer-sized
11279 // register.
11280 masm.loadBigIntNonZero(lhs, base, ool->entry());
11282 // MacroAssembler::pow32() adjusted to work on pointer-sized registers.
11284 // m = base
11285 // n = exponent
11287 Label start, loop;
11288 masm.jump(&start);
11289 masm.bind(&loop);
11291 // m *= m
11292 masm.branchMulPtr(Assembler::Overflow, base, base, ool->entry());
11294 masm.bind(&start);
11296 // if ((n & 1) != 0) p *= m
11297 Label even;
11298 masm.branchTest32(Assembler::Zero, exponent, Imm32(1), &even);
11299 masm.branchMulPtr(Assembler::Overflow, base, dest, ool->entry());
11300 masm.bind(&even);
11302 // n >>= 1
11303 // if (n == 0) return p
11304 masm.branchRshift32(Assembler::NonZero, Imm32(1), exponent, &loop);
11307 MOZ_ASSERT(temp1 == dest);
11309 // Create and return the result.
11310 masm.bind(&done);
11311 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
11312 masm.initializeBigInt(output, temp1);
11314 masm.bind(ool->rejoin());
11317 void CodeGenerator::visitBigIntBitAnd(LBigIntBitAnd* ins) {
11318 Register lhs = ToRegister(ins->lhs());
11319 Register rhs = ToRegister(ins->rhs());
11320 Register temp1 = ToRegister(ins->temp1());
11321 Register temp2 = ToRegister(ins->temp2());
11322 Register output = ToRegister(ins->output());
11324 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
11325 auto* ool = oolCallVM<Fn, BigInt::bitAnd>(ins, ArgList(lhs, rhs),
11326 StoreRegisterTo(output));
11328 // 0n & x == 0n
11329 Label lhsNonZero;
11330 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
11331 masm.movePtr(lhs, output);
11332 masm.jump(ool->rejoin());
11333 masm.bind(&lhsNonZero);
11335 // x & 0n == 0n
11336 Label rhsNonZero;
11337 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
11338 masm.movePtr(rhs, output);
11339 masm.jump(ool->rejoin());
11340 masm.bind(&rhsNonZero);
11342 // Call into the VM when either operand can't be loaded into a pointer-sized
11343 // register.
11344 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
11345 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
11347 masm.andPtr(temp2, temp1);
11349 // Create and return the result.
11350 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
11351 masm.initializeBigInt(output, temp1);
11353 masm.bind(ool->rejoin());
11356 void CodeGenerator::visitBigIntBitOr(LBigIntBitOr* ins) {
11357 Register lhs = ToRegister(ins->lhs());
11358 Register rhs = ToRegister(ins->rhs());
11359 Register temp1 = ToRegister(ins->temp1());
11360 Register temp2 = ToRegister(ins->temp2());
11361 Register output = ToRegister(ins->output());
11363 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
11364 auto* ool = oolCallVM<Fn, BigInt::bitOr>(ins, ArgList(lhs, rhs),
11365 StoreRegisterTo(output));
11367 // 0n | x == x
11368 Label lhsNonZero;
11369 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
11370 masm.movePtr(rhs, output);
11371 masm.jump(ool->rejoin());
11372 masm.bind(&lhsNonZero);
11374 // x | 0n == x
11375 Label rhsNonZero;
11376 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
11377 masm.movePtr(lhs, output);
11378 masm.jump(ool->rejoin());
11379 masm.bind(&rhsNonZero);
11381 // Call into the VM when either operand can't be loaded into a pointer-sized
11382 // register.
11383 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
11384 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
11386 masm.orPtr(temp2, temp1);
11388 // Create and return the result.
11389 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
11390 masm.initializeBigInt(output, temp1);
11392 masm.bind(ool->rejoin());
11395 void CodeGenerator::visitBigIntBitXor(LBigIntBitXor* ins) {
11396 Register lhs = ToRegister(ins->lhs());
11397 Register rhs = ToRegister(ins->rhs());
11398 Register temp1 = ToRegister(ins->temp1());
11399 Register temp2 = ToRegister(ins->temp2());
11400 Register output = ToRegister(ins->output());
11402 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
11403 auto* ool = oolCallVM<Fn, BigInt::bitXor>(ins, ArgList(lhs, rhs),
11404 StoreRegisterTo(output));
11406 // 0n ^ x == x
11407 Label lhsNonZero;
11408 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
11409 masm.movePtr(rhs, output);
11410 masm.jump(ool->rejoin());
11411 masm.bind(&lhsNonZero);
11413 // x ^ 0n == x
11414 Label rhsNonZero;
11415 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
11416 masm.movePtr(lhs, output);
11417 masm.jump(ool->rejoin());
11418 masm.bind(&rhsNonZero);
11420 // Call into the VM when either operand can't be loaded into a pointer-sized
11421 // register.
11422 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
11423 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
11425 masm.xorPtr(temp2, temp1);
11427 // Create and return the result.
11428 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
11429 masm.initializeBigInt(output, temp1);
11431 masm.bind(ool->rejoin());
11434 void CodeGenerator::visitBigIntLsh(LBigIntLsh* ins) {
11435 Register lhs = ToRegister(ins->lhs());
11436 Register rhs = ToRegister(ins->rhs());
11437 Register temp1 = ToRegister(ins->temp1());
11438 Register temp2 = ToRegister(ins->temp2());
11439 Register temp3 = ToRegister(ins->temp3());
11440 Register output = ToRegister(ins->output());
11442 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
11443 auto* ool = oolCallVM<Fn, BigInt::lsh>(ins, ArgList(lhs, rhs),
11444 StoreRegisterTo(output));
11446 // 0n << x == 0n
11447 Label lhsNonZero;
11448 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
11449 masm.movePtr(lhs, output);
11450 masm.jump(ool->rejoin());
11451 masm.bind(&lhsNonZero);
11453 // x << 0n == x
11454 Label rhsNonZero;
11455 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
11456 masm.movePtr(lhs, output);
11457 masm.jump(ool->rejoin());
11458 masm.bind(&rhsNonZero);
11460 // Inline |BigInt::lsh| for the case when |lhs| contains a single digit.
11462 Label rhsTooLarge;
11463 masm.loadBigIntAbsolute(rhs, temp2, &rhsTooLarge);
11465 // Call into the VM when the left-hand side operand can't be loaded into a
11466 // pointer-sized register.
11467 masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
11469 // Handle shifts exceeding |BigInt::DigitBits| first.
11470 Label shift, create;
11471 masm.branchPtr(Assembler::Below, temp2, Imm32(BigInt::DigitBits), &shift);
11473 masm.bind(&rhsTooLarge);
11475 // x << DigitBits with x != 0n always exceeds pointer-sized storage.
11476 masm.branchIfBigIntIsNonNegative(rhs, ool->entry());
11478 // x << -DigitBits == x >> DigitBits, which is either 0n or -1n.
11479 masm.move32(Imm32(0), temp1);
11480 masm.branchIfBigIntIsNonNegative(lhs, &create);
11481 masm.move32(Imm32(1), temp1);
11482 masm.jump(&create);
11484 masm.bind(&shift);
11486 Label nonNegative;
11487 masm.branchIfBigIntIsNonNegative(rhs, &nonNegative);
11489 masm.movePtr(temp1, temp3);
11491 // |x << -y| is computed as |x >> y|.
11492 masm.rshiftPtr(temp2, temp1);
11494 // For negative numbers, round down if any bit was shifted out.
11495 masm.branchIfBigIntIsNonNegative(lhs, &create);
11497 // Compute |mask = (static_cast<Digit>(1) << shift) - 1|.
11498 masm.movePtr(ImmWord(-1), output);
11499 masm.lshiftPtr(temp2, output);
11500 masm.notPtr(output);
11502 // Add plus one when |(lhs.digit(0) & mask) != 0|.
11503 masm.branchTestPtr(Assembler::Zero, output, temp3, &create);
11504 masm.addPtr(ImmWord(1), temp1);
11505 masm.jump(&create);
11507 masm.bind(&nonNegative);
11509 masm.movePtr(temp2, temp3);
11511 // Compute |grow = lhs.digit(0) >> (DigitBits - shift)|.
11512 masm.negPtr(temp2);
11513 masm.addPtr(Imm32(BigInt::DigitBits), temp2);
11514 masm.movePtr(temp1, output);
11515 masm.rshiftPtr(temp2, output);
11517 // Call into the VM when any bit will be shifted out.
11518 masm.branchTestPtr(Assembler::NonZero, output, output, ool->entry());
11520 masm.movePtr(temp3, temp2);
11521 masm.lshiftPtr(temp2, temp1);
11523 masm.bind(&create);
11525 // Create and return the result.
11526 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
11527 masm.initializeBigIntAbsolute(output, temp1);
11529 // Set the sign bit when the left-hand side is negative.
11530 masm.branchIfBigIntIsNonNegative(lhs, ool->rejoin());
11531 masm.or32(Imm32(BigInt::signBitMask()),
11532 Address(output, BigInt::offsetOfFlags()));
11534 masm.bind(ool->rejoin());
11537 void CodeGenerator::visitBigIntRsh(LBigIntRsh* ins) {
11538 Register lhs = ToRegister(ins->lhs());
11539 Register rhs = ToRegister(ins->rhs());
11540 Register temp1 = ToRegister(ins->temp1());
11541 Register temp2 = ToRegister(ins->temp2());
11542 Register temp3 = ToRegister(ins->temp3());
11543 Register output = ToRegister(ins->output());
11545 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
11546 auto* ool = oolCallVM<Fn, BigInt::rsh>(ins, ArgList(lhs, rhs),
11547 StoreRegisterTo(output));
11549 // 0n >> x == 0n
11550 Label lhsNonZero;
11551 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
11552 masm.movePtr(lhs, output);
11553 masm.jump(ool->rejoin());
11554 masm.bind(&lhsNonZero);
11556 // x >> 0n == x
11557 Label rhsNonZero;
11558 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
11559 masm.movePtr(lhs, output);
11560 masm.jump(ool->rejoin());
11561 masm.bind(&rhsNonZero);
11563 // Inline |BigInt::rsh| for the case when |lhs| contains a single digit.
11565 Label rhsTooLarge;
11566 masm.loadBigIntAbsolute(rhs, temp2, &rhsTooLarge);
11568 // Call into the VM when the left-hand side operand can't be loaded into a
11569 // pointer-sized register.
11570 masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
11572 // Handle shifts exceeding |BigInt::DigitBits| first.
11573 Label shift, create;
11574 masm.branchPtr(Assembler::Below, temp2, Imm32(BigInt::DigitBits), &shift);
11576 masm.bind(&rhsTooLarge);
11578 // x >> -DigitBits == x << DigitBits, which exceeds pointer-sized storage.
11579 masm.branchIfBigIntIsNegative(rhs, ool->entry());
11581 // x >> DigitBits is either 0n or -1n.
11582 masm.move32(Imm32(0), temp1);
11583 masm.branchIfBigIntIsNonNegative(lhs, &create);
11584 masm.move32(Imm32(1), temp1);
11585 masm.jump(&create);
11587 masm.bind(&shift);
11589 Label nonNegative;
11590 masm.branchIfBigIntIsNonNegative(rhs, &nonNegative);
11592 masm.movePtr(temp2, temp3);
11594 // Compute |grow = lhs.digit(0) >> (DigitBits - shift)|.
11595 masm.negPtr(temp2);
11596 masm.addPtr(Imm32(BigInt::DigitBits), temp2);
11597 masm.movePtr(temp1, output);
11598 masm.rshiftPtr(temp2, output);
11600 // Call into the VM when any bit will be shifted out.
11601 masm.branchTestPtr(Assembler::NonZero, output, output, ool->entry());
11603 // |x >> -y| is computed as |x << y|.
11604 masm.movePtr(temp3, temp2);
11605 masm.lshiftPtr(temp2, temp1);
11606 masm.jump(&create);
11608 masm.bind(&nonNegative);
11610 masm.movePtr(temp1, temp3);
11612 masm.rshiftPtr(temp2, temp1);
11614 // For negative numbers, round down if any bit was shifted out.
11615 masm.branchIfBigIntIsNonNegative(lhs, &create);
11617 // Compute |mask = (static_cast<Digit>(1) << shift) - 1|.
11618 masm.movePtr(ImmWord(-1), output);
11619 masm.lshiftPtr(temp2, output);
11620 masm.notPtr(output);
11622 // Add plus one when |(lhs.digit(0) & mask) != 0|.
11623 masm.branchTestPtr(Assembler::Zero, output, temp3, &create);
11624 masm.addPtr(ImmWord(1), temp1);
11626 masm.bind(&create);
11628 // Create and return the result.
11629 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
11630 masm.initializeBigIntAbsolute(output, temp1);
11632 // Set the sign bit when the left-hand side is negative.
11633 masm.branchIfBigIntIsNonNegative(lhs, ool->rejoin());
11634 masm.or32(Imm32(BigInt::signBitMask()),
11635 Address(output, BigInt::offsetOfFlags()));
11637 masm.bind(ool->rejoin());
11640 void CodeGenerator::visitBigIntIncrement(LBigIntIncrement* ins) {
11641 Register input = ToRegister(ins->input());
11642 Register temp1 = ToRegister(ins->temp1());
11643 Register temp2 = ToRegister(ins->temp2());
11644 Register output = ToRegister(ins->output());
11646 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
11647 auto* ool =
11648 oolCallVM<Fn, BigInt::inc>(ins, ArgList(input), StoreRegisterTo(output));
11650 // Call into the VM when the input can't be loaded into a pointer-sized
11651 // register.
11652 masm.loadBigInt(input, temp1, ool->entry());
11653 masm.movePtr(ImmWord(1), temp2);
11655 masm.branchAddPtr(Assembler::Overflow, temp2, temp1, ool->entry());
11657 // Create and return the result.
11658 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
11659 masm.initializeBigInt(output, temp1);
11661 masm.bind(ool->rejoin());
11664 void CodeGenerator::visitBigIntDecrement(LBigIntDecrement* ins) {
11665 Register input = ToRegister(ins->input());
11666 Register temp1 = ToRegister(ins->temp1());
11667 Register temp2 = ToRegister(ins->temp2());
11668 Register output = ToRegister(ins->output());
11670 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
11671 auto* ool =
11672 oolCallVM<Fn, BigInt::dec>(ins, ArgList(input), StoreRegisterTo(output));
11674 // Call into the VM when the input can't be loaded into a pointer-sized
11675 // register.
11676 masm.loadBigInt(input, temp1, ool->entry());
11677 masm.movePtr(ImmWord(1), temp2);
11679 masm.branchSubPtr(Assembler::Overflow, temp2, temp1, ool->entry());
11681 // Create and return the result.
11682 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
11683 masm.initializeBigInt(output, temp1);
11685 masm.bind(ool->rejoin());
11688 void CodeGenerator::visitBigIntNegate(LBigIntNegate* ins) {
11689 Register input = ToRegister(ins->input());
11690 Register temp = ToRegister(ins->temp());
11691 Register output = ToRegister(ins->output());
11693 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
11694 auto* ool =
11695 oolCallVM<Fn, BigInt::neg>(ins, ArgList(input), StoreRegisterTo(output));
11697 // -0n == 0n
11698 Label lhsNonZero;
11699 masm.branchIfBigIntIsNonZero(input, &lhsNonZero);
11700 masm.movePtr(input, output);
11701 masm.jump(ool->rejoin());
11702 masm.bind(&lhsNonZero);
11704 // Call into the VM when the input uses heap digits.
11705 masm.copyBigIntWithInlineDigits(input, output, temp, initialBigIntHeap(),
11706 ool->entry());
11708 // Flip the sign bit.
11709 masm.xor32(Imm32(BigInt::signBitMask()),
11710 Address(output, BigInt::offsetOfFlags()));
11712 masm.bind(ool->rejoin());
11715 void CodeGenerator::visitBigIntBitNot(LBigIntBitNot* ins) {
11716 Register input = ToRegister(ins->input());
11717 Register temp1 = ToRegister(ins->temp1());
11718 Register temp2 = ToRegister(ins->temp2());
11719 Register output = ToRegister(ins->output());
11721 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
11722 auto* ool = oolCallVM<Fn, BigInt::bitNot>(ins, ArgList(input),
11723 StoreRegisterTo(output));
11725 masm.loadBigIntAbsolute(input, temp1, ool->entry());
11727 // This follows the C++ implementation because it let's us support the full
11728 // range [-2^64, 2^64 - 1] on 64-bit resp. [-2^32, 2^32 - 1] on 32-bit.
11729 Label nonNegative, done;
11730 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
11732 // ~(-x) == ~(~(x-1)) == x-1
11733 masm.subPtr(Imm32(1), temp1);
11734 masm.jump(&done);
11736 masm.bind(&nonNegative);
11738 // ~x == -x-1 == -(x+1)
11739 masm.movePtr(ImmWord(1), temp2);
11740 masm.branchAddPtr(Assembler::CarrySet, temp2, temp1, ool->entry());
11742 masm.bind(&done);
11744 // Create and return the result.
11745 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
11746 masm.initializeBigIntAbsolute(output, temp1);
11748 // Set the sign bit when the input is positive.
11749 masm.branchIfBigIntIsNegative(input, ool->rejoin());
11750 masm.or32(Imm32(BigInt::signBitMask()),
11751 Address(output, BigInt::offsetOfFlags()));
11753 masm.bind(ool->rejoin());
11756 void CodeGenerator::visitInt32ToStringWithBase(LInt32ToStringWithBase* lir) {
11757 Register input = ToRegister(lir->input());
11758 RegisterOrInt32 base = ToRegisterOrInt32(lir->base());
11759 Register output = ToRegister(lir->output());
11760 Register temp0 = ToRegister(lir->temp0());
11761 Register temp1 = ToRegister(lir->temp1());
11763 bool lowerCase = lir->mir()->lowerCase();
11765 using Fn = JSString* (*)(JSContext*, int32_t, int32_t, bool);
11766 if (base.is<Register>()) {
11767 auto* ool = oolCallVM<Fn, js::Int32ToStringWithBase>(
11768 lir, ArgList(input, base.as<Register>(), Imm32(lowerCase)),
11769 StoreRegisterTo(output));
11771 LiveRegisterSet liveRegs = liveVolatileRegs(lir);
11772 masm.loadInt32ToStringWithBase(input, base.as<Register>(), output, temp0,
11773 temp1, gen->runtime->staticStrings(),
11774 liveRegs, lowerCase, ool->entry());
11775 masm.bind(ool->rejoin());
11776 } else {
11777 auto* ool = oolCallVM<Fn, js::Int32ToStringWithBase>(
11778 lir, ArgList(input, Imm32(base.as<int32_t>()), Imm32(lowerCase)),
11779 StoreRegisterTo(output));
11781 masm.loadInt32ToStringWithBase(input, base.as<int32_t>(), output, temp0,
11782 temp1, gen->runtime->staticStrings(),
11783 lowerCase, ool->entry());
11784 masm.bind(ool->rejoin());
11788 void CodeGenerator::visitNumberParseInt(LNumberParseInt* lir) {
11789 Register string = ToRegister(lir->string());
11790 Register radix = ToRegister(lir->radix());
11791 ValueOperand output = ToOutValue(lir);
11792 Register temp = ToRegister(lir->temp0());
11794 #ifdef DEBUG
11795 Label ok;
11796 masm.branch32(Assembler::Equal, radix, Imm32(0), &ok);
11797 masm.branch32(Assembler::Equal, radix, Imm32(10), &ok);
11798 masm.assumeUnreachable("radix must be 0 or 10 for indexed value fast path");
11799 masm.bind(&ok);
11800 #endif
11802 // Use indexed value as fast path if possible.
11803 Label vmCall, done;
11804 masm.loadStringIndexValue(string, temp, &vmCall);
11805 masm.tagValue(JSVAL_TYPE_INT32, temp, output);
11806 masm.jump(&done);
11808 masm.bind(&vmCall);
11810 pushArg(radix);
11811 pushArg(string);
11813 using Fn = bool (*)(JSContext*, HandleString, int32_t, MutableHandleValue);
11814 callVM<Fn, js::NumberParseInt>(lir);
11816 masm.bind(&done);
11819 void CodeGenerator::visitDoubleParseInt(LDoubleParseInt* lir) {
11820 FloatRegister number = ToFloatRegister(lir->number());
11821 Register output = ToRegister(lir->output());
11822 FloatRegister temp = ToFloatRegister(lir->temp0());
11824 Label bail;
11825 masm.branchDouble(Assembler::DoubleUnordered, number, number, &bail);
11826 masm.branchTruncateDoubleToInt32(number, output, &bail);
11828 Label ok;
11829 masm.branch32(Assembler::NotEqual, output, Imm32(0), &ok);
11831 // Accept both +0 and -0 and return 0.
11832 masm.loadConstantDouble(0.0, temp);
11833 masm.branchDouble(Assembler::DoubleEqual, number, temp, &ok);
11835 // Fail if a non-zero input is in the exclusive range (-1, 1.0e-6).
11836 masm.loadConstantDouble(DOUBLE_DECIMAL_IN_SHORTEST_LOW, temp);
11837 masm.branchDouble(Assembler::DoubleLessThan, number, temp, &bail);
11839 masm.bind(&ok);
11841 bailoutFrom(&bail, lir->snapshot());
11844 void CodeGenerator::visitFloor(LFloor* lir) {
11845 FloatRegister input = ToFloatRegister(lir->input());
11846 Register output = ToRegister(lir->output());
11848 Label bail;
11849 masm.floorDoubleToInt32(input, output, &bail);
11850 bailoutFrom(&bail, lir->snapshot());
11853 void CodeGenerator::visitFloorF(LFloorF* lir) {
11854 FloatRegister input = ToFloatRegister(lir->input());
11855 Register output = ToRegister(lir->output());
11857 Label bail;
11858 masm.floorFloat32ToInt32(input, output, &bail);
11859 bailoutFrom(&bail, lir->snapshot());
11862 void CodeGenerator::visitCeil(LCeil* lir) {
11863 FloatRegister input = ToFloatRegister(lir->input());
11864 Register output = ToRegister(lir->output());
11866 Label bail;
11867 masm.ceilDoubleToInt32(input, output, &bail);
11868 bailoutFrom(&bail, lir->snapshot());
11871 void CodeGenerator::visitCeilF(LCeilF* lir) {
11872 FloatRegister input = ToFloatRegister(lir->input());
11873 Register output = ToRegister(lir->output());
11875 Label bail;
11876 masm.ceilFloat32ToInt32(input, output, &bail);
11877 bailoutFrom(&bail, lir->snapshot());
11880 void CodeGenerator::visitRound(LRound* lir) {
11881 FloatRegister input = ToFloatRegister(lir->input());
11882 FloatRegister temp = ToFloatRegister(lir->temp0());
11883 Register output = ToRegister(lir->output());
11885 Label bail;
11886 masm.roundDoubleToInt32(input, output, temp, &bail);
11887 bailoutFrom(&bail, lir->snapshot());
11890 void CodeGenerator::visitRoundF(LRoundF* lir) {
11891 FloatRegister input = ToFloatRegister(lir->input());
11892 FloatRegister temp = ToFloatRegister(lir->temp0());
11893 Register output = ToRegister(lir->output());
11895 Label bail;
11896 masm.roundFloat32ToInt32(input, output, temp, &bail);
11897 bailoutFrom(&bail, lir->snapshot());
11900 void CodeGenerator::visitTrunc(LTrunc* lir) {
11901 FloatRegister input = ToFloatRegister(lir->input());
11902 Register output = ToRegister(lir->output());
11904 Label bail;
11905 masm.truncDoubleToInt32(input, output, &bail);
11906 bailoutFrom(&bail, lir->snapshot());
11909 void CodeGenerator::visitTruncF(LTruncF* lir) {
11910 FloatRegister input = ToFloatRegister(lir->input());
11911 Register output = ToRegister(lir->output());
11913 Label bail;
11914 masm.truncFloat32ToInt32(input, output, &bail);
11915 bailoutFrom(&bail, lir->snapshot());
11918 void CodeGenerator::visitCompareS(LCompareS* lir) {
11919 JSOp op = lir->mir()->jsop();
11920 Register left = ToRegister(lir->left());
11921 Register right = ToRegister(lir->right());
11922 Register output = ToRegister(lir->output());
11924 OutOfLineCode* ool = nullptr;
11926 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
11927 if (op == JSOp::Eq || op == JSOp::StrictEq) {
11928 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::Equal>>(
11929 lir, ArgList(left, right), StoreRegisterTo(output));
11930 } else if (op == JSOp::Ne || op == JSOp::StrictNe) {
11931 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::NotEqual>>(
11932 lir, ArgList(left, right), StoreRegisterTo(output));
11933 } else if (op == JSOp::Lt) {
11934 ool = oolCallVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(
11935 lir, ArgList(left, right), StoreRegisterTo(output));
11936 } else if (op == JSOp::Le) {
11937 // Push the operands in reverse order for JSOp::Le:
11938 // - |left <= right| is implemented as |right >= left|.
11939 ool =
11940 oolCallVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(
11941 lir, ArgList(right, left), StoreRegisterTo(output));
11942 } else if (op == JSOp::Gt) {
11943 // Push the operands in reverse order for JSOp::Gt:
11944 // - |left > right| is implemented as |right < left|.
11945 ool = oolCallVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(
11946 lir, ArgList(right, left), StoreRegisterTo(output));
11947 } else {
11948 MOZ_ASSERT(op == JSOp::Ge);
11949 ool =
11950 oolCallVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(
11951 lir, ArgList(left, right), StoreRegisterTo(output));
11954 masm.compareStrings(op, left, right, output, ool->entry());
11956 masm.bind(ool->rejoin());
11959 void CodeGenerator::visitCompareSInline(LCompareSInline* lir) {
11960 JSOp op = lir->mir()->jsop();
11961 MOZ_ASSERT(IsEqualityOp(op));
11963 Register input = ToRegister(lir->input());
11964 Register output = ToRegister(lir->output());
11966 const JSLinearString* str = lir->constant();
11967 MOZ_ASSERT(str->length() > 0);
11969 OutOfLineCode* ool = nullptr;
11971 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
11972 if (op == JSOp::Eq || op == JSOp::StrictEq) {
11973 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::Equal>>(
11974 lir, ArgList(ImmGCPtr(str), input), StoreRegisterTo(output));
11975 } else {
11976 MOZ_ASSERT(op == JSOp::Ne || op == JSOp::StrictNe);
11977 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::NotEqual>>(
11978 lir, ArgList(ImmGCPtr(str), input), StoreRegisterTo(output));
11981 Label compareChars;
11983 Label notPointerEqual;
11985 // If operands point to the same instance, the strings are trivially equal.
11986 masm.branchPtr(Assembler::NotEqual, input, ImmGCPtr(str), &notPointerEqual);
11987 masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq), output);
11988 masm.jump(ool->rejoin());
11990 masm.bind(&notPointerEqual);
11992 Label setNotEqualResult;
11994 if (str->isAtom()) {
11995 // Atoms cannot be equal to each other if they point to different strings.
11996 Imm32 atomBit(JSString::ATOM_BIT);
11997 masm.branchTest32(Assembler::NonZero,
11998 Address(input, JSString::offsetOfFlags()), atomBit,
11999 &setNotEqualResult);
12002 if (str->hasTwoByteChars()) {
12003 // Pure two-byte strings can't be equal to Latin-1 strings.
12004 JS::AutoCheckCannotGC nogc;
12005 if (!mozilla::IsUtf16Latin1(str->twoByteRange(nogc))) {
12006 masm.branchLatin1String(input, &setNotEqualResult);
12010 // Strings of different length can never be equal.
12011 masm.branch32(Assembler::NotEqual,
12012 Address(input, JSString::offsetOfLength()),
12013 Imm32(str->length()), &setNotEqualResult);
12015 if (str->isAtom()) {
12016 Label forwardedPtrEqual;
12017 masm.tryFastAtomize(input, output, output, &compareChars);
12019 // We now have two atoms. Just check pointer equality.
12020 masm.branchPtr(Assembler::Equal, output, ImmGCPtr(str),
12021 &forwardedPtrEqual);
12023 masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
12024 masm.jump(ool->rejoin());
12026 masm.bind(&forwardedPtrEqual);
12027 masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq), output);
12028 masm.jump(ool->rejoin());
12029 } else {
12030 masm.jump(&compareChars);
12033 masm.bind(&setNotEqualResult);
12034 masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
12035 masm.jump(ool->rejoin());
12038 masm.bind(&compareChars);
12040 // Load the input string's characters.
12041 Register stringChars = output;
12042 masm.loadStringCharsForCompare(input, str, stringChars, ool->entry());
12044 // Start comparing character by character.
12045 masm.compareStringChars(op, stringChars, str, output);
12047 masm.bind(ool->rejoin());
12050 void CodeGenerator::visitCompareSSingle(LCompareSSingle* lir) {
12051 JSOp op = lir->jsop();
12052 MOZ_ASSERT(IsRelationalOp(op));
12054 Register input = ToRegister(lir->input());
12055 Register output = ToRegister(lir->output());
12056 Register temp = ToRegister(lir->temp0());
12058 const JSLinearString* str = lir->constant();
12059 MOZ_ASSERT(str->length() == 1);
12061 char16_t ch = str->latin1OrTwoByteChar(0);
12063 masm.movePtr(input, temp);
12065 // Check if the string is empty.
12066 Label compareLength;
12067 masm.branch32(Assembler::Equal, Address(temp, JSString::offsetOfLength()),
12068 Imm32(0), &compareLength);
12070 // The first character is in the left-most rope child.
12071 Label notRope;
12072 masm.branchIfNotRope(temp, &notRope);
12074 // Unwind ropes at the start if possible.
12075 Label unwindRope;
12076 masm.bind(&unwindRope);
12077 masm.loadRopeLeftChild(temp, output);
12078 masm.movePtr(output, temp);
12080 #ifdef DEBUG
12081 Label notEmpty;
12082 masm.branch32(Assembler::NotEqual,
12083 Address(temp, JSString::offsetOfLength()), Imm32(0),
12084 &notEmpty);
12085 masm.assumeUnreachable("rope children are non-empty");
12086 masm.bind(&notEmpty);
12087 #endif
12089 // Otherwise keep unwinding ropes.
12090 masm.branchIfRope(temp, &unwindRope);
12092 masm.bind(&notRope);
12094 // Load the first character into |output|.
12095 auto loadFirstChar = [&](auto encoding) {
12096 masm.loadStringChars(temp, output, encoding);
12097 masm.loadChar(Address(output, 0), output, encoding);
12100 Label done;
12101 if (ch <= JSString::MAX_LATIN1_CHAR) {
12102 // Handle both encodings when the search character is Latin-1.
12103 Label twoByte, compare;
12104 masm.branchTwoByteString(temp, &twoByte);
12106 loadFirstChar(CharEncoding::Latin1);
12107 masm.jump(&compare);
12109 masm.bind(&twoByte);
12110 loadFirstChar(CharEncoding::TwoByte);
12112 masm.bind(&compare);
12113 } else {
12114 // The search character is a two-byte character, so it can't be equal to any
12115 // character of a Latin-1 string.
12116 masm.move32(Imm32(int32_t(op == JSOp::Lt || op == JSOp::Le)), output);
12117 masm.branchLatin1String(temp, &done);
12119 loadFirstChar(CharEncoding::TwoByte);
12122 // Compare the string length when the search character is equal to the
12123 // input's first character.
12124 masm.branch32(Assembler::Equal, output, Imm32(ch), &compareLength);
12126 // Otherwise compute the result and jump to the end.
12127 masm.cmp32Set(JSOpToCondition(op, /* isSigned = */ false), output, Imm32(ch),
12128 output);
12129 masm.jump(&done);
12131 // Compare the string length to compute the overall result.
12132 masm.bind(&compareLength);
12133 masm.cmp32Set(JSOpToCondition(op, /* isSigned = */ false),
12134 Address(temp, JSString::offsetOfLength()), Imm32(1), output);
12136 masm.bind(&done);
12139 void CodeGenerator::visitCompareBigInt(LCompareBigInt* lir) {
12140 JSOp op = lir->mir()->jsop();
12141 Register left = ToRegister(lir->left());
12142 Register right = ToRegister(lir->right());
12143 Register temp0 = ToRegister(lir->temp0());
12144 Register temp1 = ToRegister(lir->temp1());
12145 Register temp2 = ToRegister(lir->temp2());
12146 Register output = ToRegister(lir->output());
12148 Label notSame;
12149 Label compareSign;
12150 Label compareLength;
12151 Label compareDigit;
12153 Label* notSameSign;
12154 Label* notSameLength;
12155 Label* notSameDigit;
12156 if (IsEqualityOp(op)) {
12157 notSameSign = &notSame;
12158 notSameLength = &notSame;
12159 notSameDigit = &notSame;
12160 } else {
12161 notSameSign = &compareSign;
12162 notSameLength = &compareLength;
12163 notSameDigit = &compareDigit;
12166 masm.equalBigInts(left, right, temp0, temp1, temp2, output, notSameSign,
12167 notSameLength, notSameDigit);
12169 Label done;
12170 masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq || op == JSOp::Le ||
12171 op == JSOp::Ge),
12172 output);
12173 masm.jump(&done);
12175 if (IsEqualityOp(op)) {
12176 masm.bind(&notSame);
12177 masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
12178 } else {
12179 Label invertWhenNegative;
12181 // There are two cases when sign(left) != sign(right):
12182 // 1. sign(left) = positive and sign(right) = negative,
12183 // 2. or the dual case with reversed signs.
12185 // For case 1, |left| <cmp> |right| is true for cmp=Gt or cmp=Ge and false
12186 // for cmp=Lt or cmp=Le. Initialize the result for case 1 and handle case 2
12187 // with |invertWhenNegative|.
12188 masm.bind(&compareSign);
12189 masm.move32(Imm32(op == JSOp::Gt || op == JSOp::Ge), output);
12190 masm.jump(&invertWhenNegative);
12192 // For sign(left) = sign(right) and len(digits(left)) != len(digits(right)),
12193 // we have to consider the two cases:
12194 // 1. len(digits(left)) < len(digits(right))
12195 // 2. len(digits(left)) > len(digits(right))
12197 // For |left| <cmp> |right| with cmp=Lt:
12198 // Assume both BigInts are positive, then |left < right| is true for case 1
12199 // and false for case 2. When both are negative, the result is reversed.
12201 // The other comparison operators can be handled similarly.
12203 // |temp0| holds the digits length of the right-hand side operand.
12204 masm.bind(&compareLength);
12205 masm.cmp32Set(JSOpToCondition(op, /* isSigned = */ false),
12206 Address(left, BigInt::offsetOfLength()), temp0, output);
12207 masm.jump(&invertWhenNegative);
12209 // Similar to the case above, compare the current digit to determine the
12210 // overall comparison result.
12212 // |temp1| points to the current digit of the left-hand side operand.
12213 // |output| holds the current digit of the right-hand side operand.
12214 masm.bind(&compareDigit);
12215 masm.cmpPtrSet(JSOpToCondition(op, /* isSigned = */ false),
12216 Address(temp1, 0), output, output);
12218 Label nonNegative;
12219 masm.bind(&invertWhenNegative);
12220 masm.branchIfBigIntIsNonNegative(left, &nonNegative);
12221 masm.xor32(Imm32(1), output);
12222 masm.bind(&nonNegative);
12225 masm.bind(&done);
12228 void CodeGenerator::visitCompareBigIntInt32(LCompareBigIntInt32* lir) {
12229 JSOp op = lir->mir()->jsop();
12230 Register left = ToRegister(lir->left());
12231 Register right = ToRegister(lir->right());
12232 Register temp0 = ToRegister(lir->temp0());
12233 Register temp1 = ToRegister(lir->temp1());
12234 Register output = ToRegister(lir->output());
12236 Label ifTrue, ifFalse;
12237 masm.compareBigIntAndInt32(op, left, right, temp0, temp1, &ifTrue, &ifFalse);
12239 Label done;
12240 masm.bind(&ifFalse);
12241 masm.move32(Imm32(0), output);
12242 masm.jump(&done);
12243 masm.bind(&ifTrue);
12244 masm.move32(Imm32(1), output);
12245 masm.bind(&done);
12248 void CodeGenerator::visitCompareBigIntDouble(LCompareBigIntDouble* lir) {
12249 JSOp op = lir->mir()->jsop();
12250 Register left = ToRegister(lir->left());
12251 FloatRegister right = ToFloatRegister(lir->right());
12252 Register output = ToRegister(lir->output());
12254 masm.setupAlignedABICall();
12256 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
12257 // - |left <= right| is implemented as |right >= left|.
12258 // - |left > right| is implemented as |right < left|.
12259 if (op == JSOp::Le || op == JSOp::Gt) {
12260 masm.passABIArg(right, ABIType::Float64);
12261 masm.passABIArg(left);
12262 } else {
12263 masm.passABIArg(left);
12264 masm.passABIArg(right, ABIType::Float64);
12267 using FnBigIntNumber = bool (*)(BigInt*, double);
12268 using FnNumberBigInt = bool (*)(double, BigInt*);
12269 switch (op) {
12270 case JSOp::Eq: {
12271 masm.callWithABI<FnBigIntNumber,
12272 jit::BigIntNumberEqual<EqualityKind::Equal>>();
12273 break;
12275 case JSOp::Ne: {
12276 masm.callWithABI<FnBigIntNumber,
12277 jit::BigIntNumberEqual<EqualityKind::NotEqual>>();
12278 break;
12280 case JSOp::Lt: {
12281 masm.callWithABI<FnBigIntNumber,
12282 jit::BigIntNumberCompare<ComparisonKind::LessThan>>();
12283 break;
12285 case JSOp::Gt: {
12286 masm.callWithABI<FnNumberBigInt,
12287 jit::NumberBigIntCompare<ComparisonKind::LessThan>>();
12288 break;
12290 case JSOp::Le: {
12291 masm.callWithABI<
12292 FnNumberBigInt,
12293 jit::NumberBigIntCompare<ComparisonKind::GreaterThanOrEqual>>();
12294 break;
12296 case JSOp::Ge: {
12297 masm.callWithABI<
12298 FnBigIntNumber,
12299 jit::BigIntNumberCompare<ComparisonKind::GreaterThanOrEqual>>();
12300 break;
12302 default:
12303 MOZ_CRASH("unhandled op");
12306 masm.storeCallBoolResult(output);
12309 void CodeGenerator::visitCompareBigIntString(LCompareBigIntString* lir) {
12310 JSOp op = lir->mir()->jsop();
12311 Register left = ToRegister(lir->left());
12312 Register right = ToRegister(lir->right());
12314 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
12315 // - |left <= right| is implemented as |right >= left|.
12316 // - |left > right| is implemented as |right < left|.
12317 if (op == JSOp::Le || op == JSOp::Gt) {
12318 pushArg(left);
12319 pushArg(right);
12320 } else {
12321 pushArg(right);
12322 pushArg(left);
12325 using FnBigIntString =
12326 bool (*)(JSContext*, HandleBigInt, HandleString, bool*);
12327 using FnStringBigInt =
12328 bool (*)(JSContext*, HandleString, HandleBigInt, bool*);
12330 switch (op) {
12331 case JSOp::Eq: {
12332 constexpr auto Equal = EqualityKind::Equal;
12333 callVM<FnBigIntString, BigIntStringEqual<Equal>>(lir);
12334 break;
12336 case JSOp::Ne: {
12337 constexpr auto NotEqual = EqualityKind::NotEqual;
12338 callVM<FnBigIntString, BigIntStringEqual<NotEqual>>(lir);
12339 break;
12341 case JSOp::Lt: {
12342 constexpr auto LessThan = ComparisonKind::LessThan;
12343 callVM<FnBigIntString, BigIntStringCompare<LessThan>>(lir);
12344 break;
12346 case JSOp::Gt: {
12347 constexpr auto LessThan = ComparisonKind::LessThan;
12348 callVM<FnStringBigInt, StringBigIntCompare<LessThan>>(lir);
12349 break;
12351 case JSOp::Le: {
12352 constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
12353 callVM<FnStringBigInt, StringBigIntCompare<GreaterThanOrEqual>>(lir);
12354 break;
12356 case JSOp::Ge: {
12357 constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
12358 callVM<FnBigIntString, BigIntStringCompare<GreaterThanOrEqual>>(lir);
12359 break;
12361 default:
12362 MOZ_CRASH("Unexpected compare op");
12366 void CodeGenerator::visitIsNullOrLikeUndefinedV(LIsNullOrLikeUndefinedV* lir) {
12367 MOZ_ASSERT(lir->mir()->compareType() == MCompare::Compare_Undefined ||
12368 lir->mir()->compareType() == MCompare::Compare_Null);
12370 JSOp op = lir->mir()->jsop();
12371 MOZ_ASSERT(IsLooseEqualityOp(op));
12373 const ValueOperand value = ToValue(lir, LIsNullOrLikeUndefinedV::ValueIndex);
12374 Register output = ToRegister(lir->output());
12376 bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
12377 if (!intact) {
12378 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
12379 addOutOfLineCode(ool, lir->mir());
12381 Label* nullOrLikeUndefined = ool->label1();
12382 Label* notNullOrLikeUndefined = ool->label2();
12385 ScratchTagScope tag(masm, value);
12386 masm.splitTagForTest(value, tag);
12388 masm.branchTestNull(Assembler::Equal, tag, nullOrLikeUndefined);
12389 masm.branchTestUndefined(Assembler::Equal, tag, nullOrLikeUndefined);
12391 // Check whether it's a truthy object or a falsy object that emulates
12392 // undefined.
12393 masm.branchTestObject(Assembler::NotEqual, tag, notNullOrLikeUndefined);
12396 Register objreg =
12397 masm.extractObject(value, ToTempUnboxRegister(lir->temp0()));
12398 branchTestObjectEmulatesUndefined(objreg, nullOrLikeUndefined,
12399 notNullOrLikeUndefined, output, ool);
12400 // fall through
12402 Label done;
12404 // It's not null or undefined, and if it's an object it doesn't
12405 // emulate undefined, so it's not like undefined.
12406 masm.move32(Imm32(op == JSOp::Ne), output);
12407 masm.jump(&done);
12409 masm.bind(nullOrLikeUndefined);
12410 masm.move32(Imm32(op == JSOp::Eq), output);
12412 // Both branches meet here.
12413 masm.bind(&done);
12414 } else {
12415 Label nullOrUndefined, notNullOrLikeUndefined;
12416 #if defined(DEBUG) || defined(FUZZING)
12417 Register objreg = Register::Invalid();
12418 #endif
12420 ScratchTagScope tag(masm, value);
12421 masm.splitTagForTest(value, tag);
12423 masm.branchTestNull(Assembler::Equal, tag, &nullOrUndefined);
12424 masm.branchTestUndefined(Assembler::Equal, tag, &nullOrUndefined);
12426 #if defined(DEBUG) || defined(FUZZING)
12427 // Check whether it's a truthy object or a falsy object that emulates
12428 // undefined.
12429 masm.branchTestObject(Assembler::NotEqual, tag, &notNullOrLikeUndefined);
12430 objreg = masm.extractObject(value, ToTempUnboxRegister(lir->temp0()));
12431 #endif
12434 #if defined(DEBUG) || defined(FUZZING)
12435 assertObjectDoesNotEmulateUndefined(objreg, output, lir->mir());
12436 masm.bind(&notNullOrLikeUndefined);
12437 #endif
12439 Label done;
12441 // It's not null or undefined, and if it's an object it doesn't
12442 // emulate undefined.
12443 masm.move32(Imm32(op == JSOp::Ne), output);
12444 masm.jump(&done);
12446 masm.bind(&nullOrUndefined);
12447 masm.move32(Imm32(op == JSOp::Eq), output);
12449 // Both branches meet here.
12450 masm.bind(&done);
12454 void CodeGenerator::visitIsNullOrLikeUndefinedAndBranchV(
12455 LIsNullOrLikeUndefinedAndBranchV* lir) {
12456 MOZ_ASSERT(lir->cmpMir()->compareType() == MCompare::Compare_Undefined ||
12457 lir->cmpMir()->compareType() == MCompare::Compare_Null);
12459 JSOp op = lir->cmpMir()->jsop();
12460 MOZ_ASSERT(IsLooseEqualityOp(op));
12462 const ValueOperand value =
12463 ToValue(lir, LIsNullOrLikeUndefinedAndBranchV::Value);
12465 MBasicBlock* ifTrue = lir->ifTrue();
12466 MBasicBlock* ifFalse = lir->ifFalse();
12468 if (op == JSOp::Ne) {
12469 // Swap branches.
12470 std::swap(ifTrue, ifFalse);
12473 bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
12475 Label* ifTrueLabel = getJumpLabelForBranch(ifTrue);
12476 Label* ifFalseLabel = getJumpLabelForBranch(ifFalse);
12479 ScratchTagScope tag(masm, value);
12480 masm.splitTagForTest(value, tag);
12482 masm.branchTestNull(Assembler::Equal, tag, ifTrueLabel);
12483 masm.branchTestUndefined(Assembler::Equal, tag, ifTrueLabel);
12485 masm.branchTestObject(Assembler::NotEqual, tag, ifFalseLabel);
12488 bool extractObject = !intact;
12489 #if defined(DEBUG) || defined(FUZZING)
12490 // always extract objreg if we're in debug and
12491 // assertObjectDoesNotEmulateUndefined;
12492 extractObject = true;
12493 #endif
12495 Register objreg = Register::Invalid();
12496 Register scratch = ToRegister(lir->temp());
12497 if (extractObject) {
12498 objreg = masm.extractObject(value, ToTempUnboxRegister(lir->tempToUnbox()));
12500 if (!intact) {
12501 // Objects that emulate undefined are loosely equal to null/undefined.
12502 OutOfLineTestObject* ool = new (alloc()) OutOfLineTestObject();
12503 addOutOfLineCode(ool, lir->cmpMir());
12504 testObjectEmulatesUndefined(objreg, ifTrueLabel, ifFalseLabel, scratch,
12505 ool);
12506 } else {
12507 assertObjectDoesNotEmulateUndefined(objreg, scratch, lir->cmpMir());
12508 // Bug 1874905. This would be nice to optimize out at the MIR level.
12509 masm.jump(ifFalseLabel);
12513 void CodeGenerator::visitIsNullOrLikeUndefinedT(LIsNullOrLikeUndefinedT* lir) {
12514 MOZ_ASSERT(lir->mir()->compareType() == MCompare::Compare_Undefined ||
12515 lir->mir()->compareType() == MCompare::Compare_Null);
12516 MOZ_ASSERT(lir->mir()->lhs()->type() == MIRType::Object);
12518 bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
12519 JSOp op = lir->mir()->jsop();
12520 Register output = ToRegister(lir->output());
12521 Register objreg = ToRegister(lir->input());
12522 if (!intact) {
12523 MOZ_ASSERT(IsLooseEqualityOp(op),
12524 "Strict equality should have been folded");
12526 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
12527 addOutOfLineCode(ool, lir->mir());
12529 Label* emulatesUndefined = ool->label1();
12530 Label* doesntEmulateUndefined = ool->label2();
12532 branchTestObjectEmulatesUndefined(objreg, emulatesUndefined,
12533 doesntEmulateUndefined, output, ool);
12535 Label done;
12537 masm.move32(Imm32(op == JSOp::Ne), output);
12538 masm.jump(&done);
12540 masm.bind(emulatesUndefined);
12541 masm.move32(Imm32(op == JSOp::Eq), output);
12542 masm.bind(&done);
12543 } else {
12544 assertObjectDoesNotEmulateUndefined(objreg, output, lir->mir());
12545 masm.move32(Imm32(op == JSOp::Ne), output);
12549 void CodeGenerator::visitIsNullOrLikeUndefinedAndBranchT(
12550 LIsNullOrLikeUndefinedAndBranchT* lir) {
12551 MOZ_ASSERT(lir->cmpMir()->compareType() == MCompare::Compare_Undefined ||
12552 lir->cmpMir()->compareType() == MCompare::Compare_Null);
12553 MOZ_ASSERT(lir->cmpMir()->lhs()->type() == MIRType::Object);
12555 bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
12557 JSOp op = lir->cmpMir()->jsop();
12558 MOZ_ASSERT(IsLooseEqualityOp(op), "Strict equality should have been folded");
12560 MBasicBlock* ifTrue = lir->ifTrue();
12561 MBasicBlock* ifFalse = lir->ifFalse();
12563 if (op == JSOp::Ne) {
12564 // Swap branches.
12565 std::swap(ifTrue, ifFalse);
12568 Register input = ToRegister(lir->getOperand(0));
12569 Register scratch = ToRegister(lir->temp());
12570 Label* ifTrueLabel = getJumpLabelForBranch(ifTrue);
12571 Label* ifFalseLabel = getJumpLabelForBranch(ifFalse);
12573 if (intact) {
12574 // Bug 1874905. Ideally branches like this would be optimized out.
12575 assertObjectDoesNotEmulateUndefined(input, scratch, lir->mir());
12576 masm.jump(ifFalseLabel);
12577 } else {
12578 auto* ool = new (alloc()) OutOfLineTestObject();
12579 addOutOfLineCode(ool, lir->cmpMir());
12581 // Objects that emulate undefined are loosely equal to null/undefined.
12582 testObjectEmulatesUndefined(input, ifTrueLabel, ifFalseLabel, scratch, ool);
12586 void CodeGenerator::visitIsNull(LIsNull* lir) {
12587 MCompare::CompareType compareType = lir->mir()->compareType();
12588 MOZ_ASSERT(compareType == MCompare::Compare_Null);
12590 JSOp op = lir->mir()->jsop();
12591 MOZ_ASSERT(IsStrictEqualityOp(op));
12593 const ValueOperand value = ToValue(lir, LIsNull::ValueIndex);
12594 Register output = ToRegister(lir->output());
12596 Assembler::Condition cond = JSOpToCondition(compareType, op);
12597 masm.testNullSet(cond, value, output);
12600 void CodeGenerator::visitIsUndefined(LIsUndefined* lir) {
12601 MCompare::CompareType compareType = lir->mir()->compareType();
12602 MOZ_ASSERT(compareType == MCompare::Compare_Undefined);
12604 JSOp op = lir->mir()->jsop();
12605 MOZ_ASSERT(IsStrictEqualityOp(op));
12607 const ValueOperand value = ToValue(lir, LIsUndefined::ValueIndex);
12608 Register output = ToRegister(lir->output());
12610 Assembler::Condition cond = JSOpToCondition(compareType, op);
12611 masm.testUndefinedSet(cond, value, output);
12614 void CodeGenerator::visitIsNullAndBranch(LIsNullAndBranch* lir) {
12615 MCompare::CompareType compareType = lir->cmpMir()->compareType();
12616 MOZ_ASSERT(compareType == MCompare::Compare_Null);
12618 JSOp op = lir->cmpMir()->jsop();
12619 MOZ_ASSERT(IsStrictEqualityOp(op));
12621 const ValueOperand value = ToValue(lir, LIsNullAndBranch::Value);
12623 Assembler::Condition cond = JSOpToCondition(compareType, op);
12624 testNullEmitBranch(cond, value, lir->ifTrue(), lir->ifFalse());
12627 void CodeGenerator::visitIsUndefinedAndBranch(LIsUndefinedAndBranch* lir) {
12628 MCompare::CompareType compareType = lir->cmpMir()->compareType();
12629 MOZ_ASSERT(compareType == MCompare::Compare_Undefined);
12631 JSOp op = lir->cmpMir()->jsop();
12632 MOZ_ASSERT(IsStrictEqualityOp(op));
12634 const ValueOperand value = ToValue(lir, LIsUndefinedAndBranch::Value);
12636 Assembler::Condition cond = JSOpToCondition(compareType, op);
12637 testUndefinedEmitBranch(cond, value, lir->ifTrue(), lir->ifFalse());
12640 void CodeGenerator::visitSameValueDouble(LSameValueDouble* lir) {
12641 FloatRegister left = ToFloatRegister(lir->left());
12642 FloatRegister right = ToFloatRegister(lir->right());
12643 FloatRegister temp = ToFloatRegister(lir->temp0());
12644 Register output = ToRegister(lir->output());
12646 masm.sameValueDouble(left, right, temp, output);
12649 void CodeGenerator::visitSameValue(LSameValue* lir) {
12650 ValueOperand lhs = ToValue(lir, LSameValue::LhsIndex);
12651 ValueOperand rhs = ToValue(lir, LSameValue::RhsIndex);
12652 Register output = ToRegister(lir->output());
12654 using Fn = bool (*)(JSContext*, HandleValue, HandleValue, bool*);
12655 OutOfLineCode* ool =
12656 oolCallVM<Fn, SameValue>(lir, ArgList(lhs, rhs), StoreRegisterTo(output));
12658 // First check to see if the values have identical bits.
12659 // This is correct for SameValue because SameValue(NaN,NaN) is true,
12660 // and SameValue(0,-0) is false.
12661 masm.branch64(Assembler::NotEqual, lhs.toRegister64(), rhs.toRegister64(),
12662 ool->entry());
12663 masm.move32(Imm32(1), output);
12665 // If this fails, call SameValue.
12666 masm.bind(ool->rejoin());
12669 void CodeGenerator::emitConcat(LInstruction* lir, Register lhs, Register rhs,
12670 Register output) {
12671 using Fn =
12672 JSString* (*)(JSContext*, HandleString, HandleString, js::gc::Heap);
12673 OutOfLineCode* ool = oolCallVM<Fn, ConcatStrings<CanGC>>(
12674 lir, ArgList(lhs, rhs, static_cast<Imm32>(int32_t(gc::Heap::Default))),
12675 StoreRegisterTo(output));
12677 const JitZone* jitZone = gen->realm->zone()->jitZone();
12678 JitCode* stringConcatStub =
12679 jitZone->stringConcatStubNoBarrier(&zoneStubsToReadBarrier_);
12680 masm.call(stringConcatStub);
12681 masm.branchTestPtr(Assembler::Zero, output, output, ool->entry());
12683 masm.bind(ool->rejoin());
12686 void CodeGenerator::visitConcat(LConcat* lir) {
12687 Register lhs = ToRegister(lir->lhs());
12688 Register rhs = ToRegister(lir->rhs());
12690 Register output = ToRegister(lir->output());
12692 MOZ_ASSERT(lhs == CallTempReg0);
12693 MOZ_ASSERT(rhs == CallTempReg1);
12694 MOZ_ASSERT(ToRegister(lir->temp0()) == CallTempReg0);
12695 MOZ_ASSERT(ToRegister(lir->temp1()) == CallTempReg1);
12696 MOZ_ASSERT(ToRegister(lir->temp2()) == CallTempReg2);
12697 MOZ_ASSERT(ToRegister(lir->temp3()) == CallTempReg3);
12698 MOZ_ASSERT(ToRegister(lir->temp4()) == CallTempReg4);
12699 MOZ_ASSERT(output == CallTempReg5);
12701 emitConcat(lir, lhs, rhs, output);
12704 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
12705 Register len, Register byteOpScratch,
12706 CharEncoding fromEncoding, CharEncoding toEncoding,
12707 size_t maximumLength = SIZE_MAX) {
12708 // Copy |len| char16_t code units from |from| to |to|. Assumes len > 0
12709 // (checked below in debug builds), and when done |to| must point to the
12710 // next available char.
12712 #ifdef DEBUG
12713 Label ok;
12714 masm.branch32(Assembler::GreaterThan, len, Imm32(0), &ok);
12715 masm.assumeUnreachable("Length should be greater than 0.");
12716 masm.bind(&ok);
12718 if (maximumLength != SIZE_MAX) {
12719 MOZ_ASSERT(maximumLength <= INT32_MAX, "maximum length fits into int32");
12721 Label ok;
12722 masm.branchPtr(Assembler::BelowOrEqual, len, Imm32(maximumLength), &ok);
12723 masm.assumeUnreachable("Length should not exceed maximum length.");
12724 masm.bind(&ok);
12726 #endif
12728 MOZ_ASSERT_IF(toEncoding == CharEncoding::Latin1,
12729 fromEncoding == CharEncoding::Latin1);
12731 size_t fromWidth =
12732 fromEncoding == CharEncoding::Latin1 ? sizeof(char) : sizeof(char16_t);
12733 size_t toWidth =
12734 toEncoding == CharEncoding::Latin1 ? sizeof(char) : sizeof(char16_t);
12736 // Try to copy multiple characters at once when both encoding are equal.
12737 if (fromEncoding == toEncoding) {
12738 constexpr size_t ptrWidth = sizeof(uintptr_t);
12740 // Copy |width| bytes and then adjust |from| and |to|.
12741 auto copyCharacters = [&](size_t width) {
12742 static_assert(ptrWidth <= 8, "switch handles only up to eight bytes");
12744 switch (width) {
12745 case 1:
12746 masm.load8ZeroExtend(Address(from, 0), byteOpScratch);
12747 masm.store8(byteOpScratch, Address(to, 0));
12748 break;
12749 case 2:
12750 masm.load16ZeroExtend(Address(from, 0), byteOpScratch);
12751 masm.store16(byteOpScratch, Address(to, 0));
12752 break;
12753 case 4:
12754 masm.load32(Address(from, 0), byteOpScratch);
12755 masm.store32(byteOpScratch, Address(to, 0));
12756 break;
12757 case 8:
12758 MOZ_ASSERT(width == ptrWidth);
12759 masm.loadPtr(Address(from, 0), byteOpScratch);
12760 masm.storePtr(byteOpScratch, Address(to, 0));
12761 break;
12764 masm.addPtr(Imm32(width), from);
12765 masm.addPtr(Imm32(width), to);
12768 // First align |len| to pointer width.
12769 Label done;
12770 for (size_t width = fromWidth; width < ptrWidth; width *= 2) {
12771 // Number of characters which fit into |width| bytes.
12772 size_t charsPerWidth = width / fromWidth;
12774 if (charsPerWidth < maximumLength) {
12775 Label next;
12776 masm.branchTest32(Assembler::Zero, len, Imm32(charsPerWidth), &next);
12778 copyCharacters(width);
12780 masm.branchSub32(Assembler::Zero, Imm32(charsPerWidth), len, &done);
12781 masm.bind(&next);
12782 } else if (charsPerWidth == maximumLength) {
12783 copyCharacters(width);
12784 masm.sub32(Imm32(charsPerWidth), len);
12788 size_t maxInlineLength;
12789 if (fromEncoding == CharEncoding::Latin1) {
12790 maxInlineLength = JSFatInlineString::MAX_LENGTH_LATIN1;
12791 } else {
12792 maxInlineLength = JSFatInlineString::MAX_LENGTH_TWO_BYTE;
12795 // Number of characters which fit into a single register.
12796 size_t charsPerPtr = ptrWidth / fromWidth;
12798 // Unroll small loops.
12799 constexpr size_t unrollLoopLimit = 3;
12800 size_t loopCount = std::min(maxInlineLength, maximumLength) / charsPerPtr;
12802 #ifdef JS_64BIT
12803 static constexpr size_t latin1MaxInlineByteLength =
12804 JSFatInlineString::MAX_LENGTH_LATIN1 * sizeof(char);
12805 static constexpr size_t twoByteMaxInlineByteLength =
12806 JSFatInlineString::MAX_LENGTH_TWO_BYTE * sizeof(char16_t);
12808 // |unrollLoopLimit| should be large enough to allow loop unrolling on
12809 // 64-bit targets.
12810 static_assert(latin1MaxInlineByteLength / ptrWidth == unrollLoopLimit,
12811 "Latin-1 loops are unrolled on 64-bit");
12812 static_assert(twoByteMaxInlineByteLength / ptrWidth == unrollLoopLimit,
12813 "Two-byte loops are unrolled on 64-bit");
12814 #endif
12816 if (loopCount <= unrollLoopLimit) {
12817 Label labels[unrollLoopLimit];
12819 // Check up front how many characters can be copied.
12820 for (size_t i = 1; i < loopCount; i++) {
12821 masm.branch32(Assembler::Below, len, Imm32((i + 1) * charsPerPtr),
12822 &labels[i]);
12825 // Generate the unrolled loop body.
12826 for (size_t i = loopCount; i > 0; i--) {
12827 copyCharacters(ptrWidth);
12828 masm.sub32(Imm32(charsPerPtr), len);
12830 // Jump target for the previous length check.
12831 if (i != 1) {
12832 masm.bind(&labels[i - 1]);
12835 } else {
12836 Label start;
12837 masm.bind(&start);
12838 copyCharacters(ptrWidth);
12839 masm.branchSub32(Assembler::NonZero, Imm32(charsPerPtr), len, &start);
12842 masm.bind(&done);
12843 } else {
12844 Label start;
12845 masm.bind(&start);
12846 masm.loadChar(Address(from, 0), byteOpScratch, fromEncoding);
12847 masm.storeChar(byteOpScratch, Address(to, 0), toEncoding);
12848 masm.addPtr(Imm32(fromWidth), from);
12849 masm.addPtr(Imm32(toWidth), to);
12850 masm.branchSub32(Assembler::NonZero, Imm32(1), len, &start);
12854 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
12855 Register len, Register byteOpScratch,
12856 CharEncoding encoding, size_t maximumLength) {
12857 CopyStringChars(masm, to, from, len, byteOpScratch, encoding, encoding,
12858 maximumLength);
12861 static void CopyStringCharsMaybeInflate(MacroAssembler& masm, Register input,
12862 Register destChars, Register temp1,
12863 Register temp2) {
12864 // destChars is TwoByte and input is a Latin1 or TwoByte string, so we may
12865 // have to inflate.
12867 Label isLatin1, done;
12868 masm.loadStringLength(input, temp1);
12869 masm.branchLatin1String(input, &isLatin1);
12871 masm.loadStringChars(input, temp2, CharEncoding::TwoByte);
12872 masm.movePtr(temp2, input);
12873 CopyStringChars(masm, destChars, input, temp1, temp2,
12874 CharEncoding::TwoByte);
12875 masm.jump(&done);
12877 masm.bind(&isLatin1);
12879 masm.loadStringChars(input, temp2, CharEncoding::Latin1);
12880 masm.movePtr(temp2, input);
12881 CopyStringChars(masm, destChars, input, temp1, temp2, CharEncoding::Latin1,
12882 CharEncoding::TwoByte);
12884 masm.bind(&done);
12887 static void AllocateThinOrFatInlineString(MacroAssembler& masm, Register output,
12888 Register length, Register temp,
12889 gc::Heap initialStringHeap,
12890 Label* failure,
12891 CharEncoding encoding) {
12892 #ifdef DEBUG
12893 size_t maxInlineLength;
12894 if (encoding == CharEncoding::Latin1) {
12895 maxInlineLength = JSFatInlineString::MAX_LENGTH_LATIN1;
12896 } else {
12897 maxInlineLength = JSFatInlineString::MAX_LENGTH_TWO_BYTE;
12900 Label ok;
12901 masm.branch32(Assembler::BelowOrEqual, length, Imm32(maxInlineLength), &ok);
12902 masm.assumeUnreachable("string length too large to be allocated as inline");
12903 masm.bind(&ok);
12904 #endif
12906 size_t maxThinInlineLength;
12907 if (encoding == CharEncoding::Latin1) {
12908 maxThinInlineLength = JSThinInlineString::MAX_LENGTH_LATIN1;
12909 } else {
12910 maxThinInlineLength = JSThinInlineString::MAX_LENGTH_TWO_BYTE;
12913 Label isFat, allocDone;
12914 masm.branch32(Assembler::Above, length, Imm32(maxThinInlineLength), &isFat);
12916 uint32_t flags = JSString::INIT_THIN_INLINE_FLAGS;
12917 if (encoding == CharEncoding::Latin1) {
12918 flags |= JSString::LATIN1_CHARS_BIT;
12920 masm.newGCString(output, temp, initialStringHeap, failure);
12921 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
12922 masm.jump(&allocDone);
12924 masm.bind(&isFat);
12926 uint32_t flags = JSString::INIT_FAT_INLINE_FLAGS;
12927 if (encoding == CharEncoding::Latin1) {
12928 flags |= JSString::LATIN1_CHARS_BIT;
12930 masm.newGCFatInlineString(output, temp, initialStringHeap, failure);
12931 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
12933 masm.bind(&allocDone);
12935 // Store length.
12936 masm.store32(length, Address(output, JSString::offsetOfLength()));
12939 static void ConcatInlineString(MacroAssembler& masm, Register lhs, Register rhs,
12940 Register output, Register temp1, Register temp2,
12941 Register temp3, gc::Heap initialStringHeap,
12942 Label* failure, CharEncoding encoding) {
12943 JitSpew(JitSpew_Codegen, "# Emitting ConcatInlineString (encoding=%s)",
12944 (encoding == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
12946 // State: result length in temp2.
12948 // Ensure both strings are linear.
12949 masm.branchIfRope(lhs, failure);
12950 masm.branchIfRope(rhs, failure);
12952 // Allocate a JSThinInlineString or JSFatInlineString.
12953 AllocateThinOrFatInlineString(masm, output, temp2, temp1, initialStringHeap,
12954 failure, encoding);
12956 // Load chars pointer in temp2.
12957 masm.loadInlineStringCharsForStore(output, temp2);
12959 auto copyChars = [&](Register src) {
12960 if (encoding == CharEncoding::TwoByte) {
12961 CopyStringCharsMaybeInflate(masm, src, temp2, temp1, temp3);
12962 } else {
12963 masm.loadStringLength(src, temp3);
12964 masm.loadStringChars(src, temp1, CharEncoding::Latin1);
12965 masm.movePtr(temp1, src);
12966 CopyStringChars(masm, temp2, src, temp3, temp1, CharEncoding::Latin1);
12970 // Copy lhs chars. Note that this advances temp2 to point to the next
12971 // char. This also clobbers the lhs register.
12972 copyChars(lhs);
12974 // Copy rhs chars. Clobbers the rhs register.
12975 copyChars(rhs);
12978 void CodeGenerator::visitSubstr(LSubstr* lir) {
12979 Register string = ToRegister(lir->string());
12980 Register begin = ToRegister(lir->begin());
12981 Register length = ToRegister(lir->length());
12982 Register output = ToRegister(lir->output());
12983 Register temp0 = ToRegister(lir->temp0());
12984 Register temp2 = ToRegister(lir->temp2());
12986 // On x86 there are not enough registers. In that case reuse the string
12987 // register as temporary.
12988 Register temp1 =
12989 lir->temp1()->isBogusTemp() ? string : ToRegister(lir->temp1());
12991 size_t maximumLength = SIZE_MAX;
12993 Range* range = lir->mir()->length()->range();
12994 if (range && range->hasInt32UpperBound()) {
12995 MOZ_ASSERT(range->upper() >= 0);
12996 maximumLength = size_t(range->upper());
12999 static_assert(JSThinInlineString::MAX_LENGTH_TWO_BYTE <=
13000 JSThinInlineString::MAX_LENGTH_LATIN1);
13002 static_assert(JSFatInlineString::MAX_LENGTH_TWO_BYTE <=
13003 JSFatInlineString::MAX_LENGTH_LATIN1);
13005 bool tryFatInlineOrDependent =
13006 maximumLength > JSThinInlineString::MAX_LENGTH_TWO_BYTE;
13007 bool tryDependent = maximumLength > JSFatInlineString::MAX_LENGTH_TWO_BYTE;
13009 #ifdef DEBUG
13010 if (maximumLength != SIZE_MAX) {
13011 Label ok;
13012 masm.branch32(Assembler::BelowOrEqual, length, Imm32(maximumLength), &ok);
13013 masm.assumeUnreachable("length should not exceed maximum length");
13014 masm.bind(&ok);
13016 #endif
13018 Label nonZero, nonInput;
13020 // For every edge case use the C++ variant.
13021 // Note: we also use this upon allocation failure in newGCString and
13022 // newGCFatInlineString. To squeeze out even more performance those failures
13023 // can be handled by allocate in ool code and returning to jit code to fill
13024 // in all data.
13025 using Fn = JSString* (*)(JSContext* cx, HandleString str, int32_t begin,
13026 int32_t len);
13027 OutOfLineCode* ool = oolCallVM<Fn, SubstringKernel>(
13028 lir, ArgList(string, begin, length), StoreRegisterTo(output));
13029 Label* slowPath = ool->entry();
13030 Label* done = ool->rejoin();
13032 // Zero length, return emptystring.
13033 masm.branchTest32(Assembler::NonZero, length, length, &nonZero);
13034 const JSAtomState& names = gen->runtime->names();
13035 masm.movePtr(ImmGCPtr(names.empty_), output);
13036 masm.jump(done);
13038 // Substring from 0..|str.length|, return str.
13039 masm.bind(&nonZero);
13040 masm.branch32(Assembler::NotEqual,
13041 Address(string, JSString::offsetOfLength()), length, &nonInput);
13042 #ifdef DEBUG
13044 Label ok;
13045 masm.branchTest32(Assembler::Zero, begin, begin, &ok);
13046 masm.assumeUnreachable("length == str.length implies begin == 0");
13047 masm.bind(&ok);
13049 #endif
13050 masm.movePtr(string, output);
13051 masm.jump(done);
13053 // Use slow path for ropes.
13054 masm.bind(&nonInput);
13055 masm.branchIfRope(string, slowPath);
13057 // Optimize one and two character strings.
13058 Label nonStatic;
13059 masm.branch32(Assembler::Above, length, Imm32(2), &nonStatic);
13061 Label loadLengthOne, loadLengthTwo;
13063 auto loadChars = [&](CharEncoding encoding, bool fallthru) {
13064 size_t size = encoding == CharEncoding::Latin1 ? sizeof(JS::Latin1Char)
13065 : sizeof(char16_t);
13067 masm.loadStringChars(string, temp0, encoding);
13068 masm.loadChar(temp0, begin, temp2, encoding);
13069 masm.branch32(Assembler::Equal, length, Imm32(1), &loadLengthOne);
13070 masm.loadChar(temp0, begin, temp0, encoding, int32_t(size));
13071 if (!fallthru) {
13072 masm.jump(&loadLengthTwo);
13076 Label isLatin1;
13077 masm.branchLatin1String(string, &isLatin1);
13078 loadChars(CharEncoding::TwoByte, /* fallthru = */ false);
13080 masm.bind(&isLatin1);
13081 loadChars(CharEncoding::Latin1, /* fallthru = */ true);
13083 // Try to load a length-two static string.
13084 masm.bind(&loadLengthTwo);
13085 masm.lookupStaticString(temp2, temp0, output, gen->runtime->staticStrings(),
13086 &nonStatic);
13087 masm.jump(done);
13089 // Try to load a length-one static string.
13090 masm.bind(&loadLengthOne);
13091 masm.lookupStaticString(temp2, output, gen->runtime->staticStrings(),
13092 &nonStatic);
13093 masm.jump(done);
13095 masm.bind(&nonStatic);
13097 // Allocate either a JSThinInlineString or JSFatInlineString, or jump to
13098 // notInline if we need a dependent string.
13099 Label notInline;
13101 static_assert(JSThinInlineString::MAX_LENGTH_LATIN1 <
13102 JSFatInlineString::MAX_LENGTH_LATIN1);
13103 static_assert(JSThinInlineString::MAX_LENGTH_TWO_BYTE <
13104 JSFatInlineString::MAX_LENGTH_TWO_BYTE);
13106 // Use temp2 to store the JS(Thin|Fat)InlineString flags. This avoids having
13107 // duplicate newGCString/newGCFatInlineString codegen for Latin1 vs TwoByte
13108 // strings.
13110 Label allocFat, allocDone;
13111 if (tryFatInlineOrDependent) {
13112 Label isLatin1, allocThin;
13113 masm.branchLatin1String(string, &isLatin1);
13115 if (tryDependent) {
13116 masm.branch32(Assembler::Above, length,
13117 Imm32(JSFatInlineString::MAX_LENGTH_TWO_BYTE),
13118 &notInline);
13120 masm.move32(Imm32(0), temp2);
13121 masm.branch32(Assembler::Above, length,
13122 Imm32(JSThinInlineString::MAX_LENGTH_TWO_BYTE),
13123 &allocFat);
13124 masm.jump(&allocThin);
13127 masm.bind(&isLatin1);
13129 if (tryDependent) {
13130 masm.branch32(Assembler::Above, length,
13131 Imm32(JSFatInlineString::MAX_LENGTH_LATIN1),
13132 &notInline);
13134 masm.move32(Imm32(JSString::LATIN1_CHARS_BIT), temp2);
13135 masm.branch32(Assembler::Above, length,
13136 Imm32(JSThinInlineString::MAX_LENGTH_LATIN1), &allocFat);
13139 masm.bind(&allocThin);
13140 } else {
13141 masm.load32(Address(string, JSString::offsetOfFlags()), temp2);
13142 masm.and32(Imm32(JSString::LATIN1_CHARS_BIT), temp2);
13146 masm.newGCString(output, temp0, initialStringHeap(), slowPath);
13147 masm.or32(Imm32(JSString::INIT_THIN_INLINE_FLAGS), temp2);
13150 if (tryFatInlineOrDependent) {
13151 masm.jump(&allocDone);
13153 masm.bind(&allocFat);
13155 masm.newGCFatInlineString(output, temp0, initialStringHeap(), slowPath);
13156 masm.or32(Imm32(JSString::INIT_FAT_INLINE_FLAGS), temp2);
13159 masm.bind(&allocDone);
13162 masm.store32(temp2, Address(output, JSString::offsetOfFlags()));
13163 masm.store32(length, Address(output, JSString::offsetOfLength()));
13165 auto initializeInlineString = [&](CharEncoding encoding) {
13166 masm.loadStringChars(string, temp0, encoding);
13167 masm.addToCharPtr(temp0, begin, encoding);
13168 if (temp1 == string) {
13169 masm.push(string);
13171 masm.loadInlineStringCharsForStore(output, temp1);
13172 CopyStringChars(masm, temp1, temp0, length, temp2, encoding,
13173 maximumLength);
13174 masm.loadStringLength(output, length);
13175 if (temp1 == string) {
13176 masm.pop(string);
13180 Label isInlineLatin1;
13181 masm.branchTest32(Assembler::NonZero, temp2,
13182 Imm32(JSString::LATIN1_CHARS_BIT), &isInlineLatin1);
13183 initializeInlineString(CharEncoding::TwoByte);
13184 masm.jump(done);
13186 masm.bind(&isInlineLatin1);
13187 initializeInlineString(CharEncoding::Latin1);
13190 // Handle other cases with a DependentString.
13191 if (tryDependent) {
13192 masm.jump(done);
13194 masm.bind(&notInline);
13195 masm.newGCString(output, temp0, gen->initialStringHeap(), slowPath);
13196 masm.store32(length, Address(output, JSString::offsetOfLength()));
13198 // Note: no post barrier is needed because the dependent string is either
13199 // allocated in the nursery or both strings are tenured (if nursery strings
13200 // are disabled for this zone).
13201 EmitInitDependentStringBase(masm, output, string, temp0, temp2,
13202 /* needsPostBarrier = */ false);
13204 auto initializeDependentString = [&](CharEncoding encoding) {
13205 uint32_t flags = JSString::INIT_DEPENDENT_FLAGS;
13206 if (encoding == CharEncoding::Latin1) {
13207 flags |= JSString::LATIN1_CHARS_BIT;
13209 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
13210 masm.loadNonInlineStringChars(string, temp0, encoding);
13211 masm.addToCharPtr(temp0, begin, encoding);
13212 masm.storeNonInlineStringChars(temp0, output);
13215 Label isLatin1;
13216 masm.branchLatin1String(string, &isLatin1);
13217 initializeDependentString(CharEncoding::TwoByte);
13218 masm.jump(done);
13220 masm.bind(&isLatin1);
13221 initializeDependentString(CharEncoding::Latin1);
13224 masm.bind(done);
13227 JitCode* JitZone::generateStringConcatStub(JSContext* cx) {
13228 JitSpew(JitSpew_Codegen, "# Emitting StringConcat stub");
13230 TempAllocator temp(&cx->tempLifoAlloc());
13231 JitContext jcx(cx);
13232 StackMacroAssembler masm(cx, temp);
13233 AutoCreatedBy acb(masm, "JitZone::generateStringConcatStub");
13235 Register lhs = CallTempReg0;
13236 Register rhs = CallTempReg1;
13237 Register temp1 = CallTempReg2;
13238 Register temp2 = CallTempReg3;
13239 Register temp3 = CallTempReg4;
13240 Register output = CallTempReg5;
13242 Label failure;
13243 #ifdef JS_USE_LINK_REGISTER
13244 masm.pushReturnAddress();
13245 #endif
13246 masm.Push(FramePointer);
13247 masm.moveStackPtrTo(FramePointer);
13249 // If lhs is empty, return rhs.
13250 Label leftEmpty;
13251 masm.loadStringLength(lhs, temp1);
13252 masm.branchTest32(Assembler::Zero, temp1, temp1, &leftEmpty);
13254 // If rhs is empty, return lhs.
13255 Label rightEmpty;
13256 masm.loadStringLength(rhs, temp2);
13257 masm.branchTest32(Assembler::Zero, temp2, temp2, &rightEmpty);
13259 masm.add32(temp1, temp2);
13261 // Check if we can use a JSInlineString. The result is a Latin1 string if
13262 // lhs and rhs are both Latin1, so we AND the flags.
13263 Label isInlineTwoByte, isInlineLatin1;
13264 masm.load32(Address(lhs, JSString::offsetOfFlags()), temp1);
13265 masm.and32(Address(rhs, JSString::offsetOfFlags()), temp1);
13267 Label isLatin1, notInline;
13268 masm.branchTest32(Assembler::NonZero, temp1,
13269 Imm32(JSString::LATIN1_CHARS_BIT), &isLatin1);
13271 masm.branch32(Assembler::BelowOrEqual, temp2,
13272 Imm32(JSFatInlineString::MAX_LENGTH_TWO_BYTE),
13273 &isInlineTwoByte);
13274 masm.jump(&notInline);
13276 masm.bind(&isLatin1);
13278 masm.branch32(Assembler::BelowOrEqual, temp2,
13279 Imm32(JSFatInlineString::MAX_LENGTH_LATIN1), &isInlineLatin1);
13281 masm.bind(&notInline);
13283 // Keep AND'ed flags in temp1.
13285 // Ensure result length <= JSString::MAX_LENGTH.
13286 masm.branch32(Assembler::Above, temp2, Imm32(JSString::MAX_LENGTH), &failure);
13288 // Allocate a new rope, guaranteed to be in the nursery if initialStringHeap
13289 // == gc::Heap::Default. (As a result, no post barriers are needed below.)
13290 masm.newGCString(output, temp3, initialStringHeap, &failure);
13292 // Store rope length and flags. temp1 still holds the result of AND'ing the
13293 // lhs and rhs flags, so we just have to clear the other flags to get our rope
13294 // flags (Latin1 if both lhs and rhs are Latin1).
13295 static_assert(JSString::INIT_ROPE_FLAGS == 0,
13296 "Rope type flags must have no bits set");
13297 masm.and32(Imm32(JSString::LATIN1_CHARS_BIT), temp1);
13298 masm.store32(temp1, Address(output, JSString::offsetOfFlags()));
13299 masm.store32(temp2, Address(output, JSString::offsetOfLength()));
13301 // Store left and right nodes.
13302 masm.storeRopeChildren(lhs, rhs, output);
13303 masm.pop(FramePointer);
13304 masm.ret();
13306 masm.bind(&leftEmpty);
13307 masm.mov(rhs, output);
13308 masm.pop(FramePointer);
13309 masm.ret();
13311 masm.bind(&rightEmpty);
13312 masm.mov(lhs, output);
13313 masm.pop(FramePointer);
13314 masm.ret();
13316 masm.bind(&isInlineTwoByte);
13317 ConcatInlineString(masm, lhs, rhs, output, temp1, temp2, temp3,
13318 initialStringHeap, &failure, CharEncoding::TwoByte);
13319 masm.pop(FramePointer);
13320 masm.ret();
13322 masm.bind(&isInlineLatin1);
13323 ConcatInlineString(masm, lhs, rhs, output, temp1, temp2, temp3,
13324 initialStringHeap, &failure, CharEncoding::Latin1);
13325 masm.pop(FramePointer);
13326 masm.ret();
13328 masm.bind(&failure);
13329 masm.movePtr(ImmPtr(nullptr), output);
13330 masm.pop(FramePointer);
13331 masm.ret();
13333 Linker linker(masm);
13334 JitCode* code = linker.newCode(cx, CodeKind::Other);
13336 CollectPerfSpewerJitCodeProfile(code, "StringConcatStub");
13337 #ifdef MOZ_VTUNE
13338 vtune::MarkStub(code, "StringConcatStub");
13339 #endif
13341 return code;
13344 void JitRuntime::generateFreeStub(MacroAssembler& masm) {
13345 AutoCreatedBy acb(masm, "JitRuntime::generateFreeStub");
13347 const Register regSlots = CallTempReg0;
13349 freeStubOffset_ = startTrampolineCode(masm);
13351 #ifdef JS_USE_LINK_REGISTER
13352 masm.pushReturnAddress();
13353 #endif
13354 AllocatableRegisterSet regs(RegisterSet::Volatile());
13355 regs.takeUnchecked(regSlots);
13356 LiveRegisterSet save(regs.asLiveSet());
13357 masm.PushRegsInMask(save);
13359 const Register regTemp = regs.takeAnyGeneral();
13360 MOZ_ASSERT(regTemp != regSlots);
13362 using Fn = void (*)(void* p);
13363 masm.setupUnalignedABICall(regTemp);
13364 masm.passABIArg(regSlots);
13365 masm.callWithABI<Fn, js_free>(ABIType::General,
13366 CheckUnsafeCallWithABI::DontCheckOther);
13368 masm.PopRegsInMask(save);
13370 masm.ret();
13373 void JitRuntime::generateLazyLinkStub(MacroAssembler& masm) {
13374 AutoCreatedBy acb(masm, "JitRuntime::generateLazyLinkStub");
13376 lazyLinkStubOffset_ = startTrampolineCode(masm);
13378 #ifdef JS_USE_LINK_REGISTER
13379 masm.pushReturnAddress();
13380 #endif
13381 masm.Push(FramePointer);
13382 masm.moveStackPtrTo(FramePointer);
13384 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
13385 Register temp0 = regs.takeAny();
13386 Register temp1 = regs.takeAny();
13387 Register temp2 = regs.takeAny();
13389 masm.loadJSContext(temp0);
13390 masm.enterFakeExitFrame(temp0, temp2, ExitFrameType::LazyLink);
13391 masm.moveStackPtrTo(temp1);
13393 using Fn = uint8_t* (*)(JSContext* cx, LazyLinkExitFrameLayout* frame);
13394 masm.setupUnalignedABICall(temp2);
13395 masm.passABIArg(temp0);
13396 masm.passABIArg(temp1);
13397 masm.callWithABI<Fn, LazyLinkTopActivation>(
13398 ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
13400 // Discard exit frame and restore frame pointer.
13401 masm.leaveExitFrame(0);
13402 masm.pop(FramePointer);
13404 #ifdef JS_USE_LINK_REGISTER
13405 // Restore the return address such that the emitPrologue function of the
13406 // CodeGenerator can push it back on the stack with pushReturnAddress.
13407 masm.popReturnAddress();
13408 #endif
13409 masm.jump(ReturnReg);
13412 void JitRuntime::generateInterpreterStub(MacroAssembler& masm) {
13413 AutoCreatedBy acb(masm, "JitRuntime::generateInterpreterStub");
13415 interpreterStubOffset_ = startTrampolineCode(masm);
13417 #ifdef JS_USE_LINK_REGISTER
13418 masm.pushReturnAddress();
13419 #endif
13420 masm.Push(FramePointer);
13421 masm.moveStackPtrTo(FramePointer);
13423 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
13424 Register temp0 = regs.takeAny();
13425 Register temp1 = regs.takeAny();
13426 Register temp2 = regs.takeAny();
13428 masm.loadJSContext(temp0);
13429 masm.enterFakeExitFrame(temp0, temp2, ExitFrameType::InterpreterStub);
13430 masm.moveStackPtrTo(temp1);
13432 using Fn = bool (*)(JSContext* cx, InterpreterStubExitFrameLayout* frame);
13433 masm.setupUnalignedABICall(temp2);
13434 masm.passABIArg(temp0);
13435 masm.passABIArg(temp1);
13436 masm.callWithABI<Fn, InvokeFromInterpreterStub>(
13437 ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
13439 masm.branchIfFalseBool(ReturnReg, masm.failureLabel());
13441 // Discard exit frame and restore frame pointer.
13442 masm.leaveExitFrame(0);
13443 masm.pop(FramePointer);
13445 // InvokeFromInterpreterStub stores the return value in argv[0], where the
13446 // caller stored |this|. Subtract |sizeof(void*)| for the frame pointer we
13447 // just popped.
13448 masm.loadValue(Address(masm.getStackPointer(),
13449 JitFrameLayout::offsetOfThis() - sizeof(void*)),
13450 JSReturnOperand);
13451 masm.ret();
13454 void JitRuntime::generateDoubleToInt32ValueStub(MacroAssembler& masm) {
13455 AutoCreatedBy acb(masm, "JitRuntime::generateDoubleToInt32ValueStub");
13456 doubleToInt32ValueStubOffset_ = startTrampolineCode(masm);
13458 Label done;
13459 masm.branchTestDouble(Assembler::NotEqual, R0, &done);
13461 masm.unboxDouble(R0, FloatReg0);
13462 masm.convertDoubleToInt32(FloatReg0, R1.scratchReg(), &done,
13463 /* negativeZeroCheck = */ false);
13464 masm.tagValue(JSVAL_TYPE_INT32, R1.scratchReg(), R0);
13466 masm.bind(&done);
13467 masm.abiret();
13470 void CodeGenerator::visitLinearizeString(LLinearizeString* lir) {
13471 Register str = ToRegister(lir->str());
13472 Register output = ToRegister(lir->output());
13474 using Fn = JSLinearString* (*)(JSContext*, JSString*);
13475 auto* ool = oolCallVM<Fn, jit::LinearizeForCharAccess>(
13476 lir, ArgList(str), StoreRegisterTo(output));
13478 masm.branchIfRope(str, ool->entry());
13480 masm.movePtr(str, output);
13481 masm.bind(ool->rejoin());
13484 void CodeGenerator::visitLinearizeForCharAccess(LLinearizeForCharAccess* lir) {
13485 Register str = ToRegister(lir->str());
13486 Register index = ToRegister(lir->index());
13487 Register output = ToRegister(lir->output());
13489 using Fn = JSLinearString* (*)(JSContext*, JSString*);
13490 auto* ool = oolCallVM<Fn, jit::LinearizeForCharAccess>(
13491 lir, ArgList(str), StoreRegisterTo(output));
13493 masm.branchIfNotCanLoadStringChar(str, index, output, ool->entry());
13495 masm.movePtr(str, output);
13496 masm.bind(ool->rejoin());
13499 void CodeGenerator::visitLinearizeForCodePointAccess(
13500 LLinearizeForCodePointAccess* lir) {
13501 Register str = ToRegister(lir->str());
13502 Register index = ToRegister(lir->index());
13503 Register output = ToRegister(lir->output());
13504 Register temp = ToRegister(lir->temp0());
13506 using Fn = JSLinearString* (*)(JSContext*, JSString*);
13507 auto* ool = oolCallVM<Fn, jit::LinearizeForCharAccess>(
13508 lir, ArgList(str), StoreRegisterTo(output));
13510 masm.branchIfNotCanLoadStringCodePoint(str, index, output, temp,
13511 ool->entry());
13513 masm.movePtr(str, output);
13514 masm.bind(ool->rejoin());
13517 void CodeGenerator::visitToRelativeStringIndex(LToRelativeStringIndex* lir) {
13518 Register index = ToRegister(lir->index());
13519 Register length = ToRegister(lir->length());
13520 Register output = ToRegister(lir->output());
13522 masm.move32(Imm32(0), output);
13523 masm.cmp32Move32(Assembler::LessThan, index, Imm32(0), length, output);
13524 masm.add32(index, output);
13527 void CodeGenerator::visitCharCodeAt(LCharCodeAt* lir) {
13528 Register str = ToRegister(lir->str());
13529 Register output = ToRegister(lir->output());
13530 Register temp0 = ToRegister(lir->temp0());
13531 Register temp1 = ToRegister(lir->temp1());
13533 using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
13535 if (lir->index()->isBogus()) {
13536 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, Imm32(0)),
13537 StoreRegisterTo(output));
13538 masm.loadStringChar(str, 0, output, temp0, temp1, ool->entry());
13539 masm.bind(ool->rejoin());
13540 } else {
13541 Register index = ToRegister(lir->index());
13543 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, index),
13544 StoreRegisterTo(output));
13545 masm.loadStringChar(str, index, output, temp0, temp1, ool->entry());
13546 masm.bind(ool->rejoin());
13550 void CodeGenerator::visitCharCodeAtOrNegative(LCharCodeAtOrNegative* lir) {
13551 Register str = ToRegister(lir->str());
13552 Register output = ToRegister(lir->output());
13553 Register temp0 = ToRegister(lir->temp0());
13554 Register temp1 = ToRegister(lir->temp1());
13556 using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
13558 // Return -1 for out-of-bounds access.
13559 masm.move32(Imm32(-1), output);
13561 if (lir->index()->isBogus()) {
13562 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, Imm32(0)),
13563 StoreRegisterTo(output));
13565 masm.branch32(Assembler::Equal, Address(str, JSString::offsetOfLength()),
13566 Imm32(0), ool->rejoin());
13567 masm.loadStringChar(str, 0, output, temp0, temp1, ool->entry());
13568 masm.bind(ool->rejoin());
13569 } else {
13570 Register index = ToRegister(lir->index());
13572 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, index),
13573 StoreRegisterTo(output));
13575 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
13576 temp0, ool->rejoin());
13577 masm.loadStringChar(str, index, output, temp0, temp1, ool->entry());
13578 masm.bind(ool->rejoin());
13582 void CodeGenerator::visitCodePointAt(LCodePointAt* lir) {
13583 Register str = ToRegister(lir->str());
13584 Register index = ToRegister(lir->index());
13585 Register output = ToRegister(lir->output());
13586 Register temp0 = ToRegister(lir->temp0());
13587 Register temp1 = ToRegister(lir->temp1());
13589 using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
13590 auto* ool = oolCallVM<Fn, jit::CodePointAt>(lir, ArgList(str, index),
13591 StoreRegisterTo(output));
13593 masm.loadStringCodePoint(str, index, output, temp0, temp1, ool->entry());
13594 masm.bind(ool->rejoin());
13597 void CodeGenerator::visitCodePointAtOrNegative(LCodePointAtOrNegative* lir) {
13598 Register str = ToRegister(lir->str());
13599 Register index = ToRegister(lir->index());
13600 Register output = ToRegister(lir->output());
13601 Register temp0 = ToRegister(lir->temp0());
13602 Register temp1 = ToRegister(lir->temp1());
13604 using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
13605 auto* ool = oolCallVM<Fn, jit::CodePointAt>(lir, ArgList(str, index),
13606 StoreRegisterTo(output));
13608 // Return -1 for out-of-bounds access.
13609 masm.move32(Imm32(-1), output);
13611 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
13612 temp0, ool->rejoin());
13613 masm.loadStringCodePoint(str, index, output, temp0, temp1, ool->entry());
13614 masm.bind(ool->rejoin());
13617 void CodeGenerator::visitNegativeToNaN(LNegativeToNaN* lir) {
13618 Register input = ToRegister(lir->input());
13619 ValueOperand output = ToOutValue(lir);
13621 masm.tagValue(JSVAL_TYPE_INT32, input, output);
13623 Label done;
13624 masm.branchTest32(Assembler::NotSigned, input, input, &done);
13625 masm.moveValue(JS::NaNValue(), output);
13626 masm.bind(&done);
13629 void CodeGenerator::visitNegativeToUndefined(LNegativeToUndefined* lir) {
13630 Register input = ToRegister(lir->input());
13631 ValueOperand output = ToOutValue(lir);
13633 masm.tagValue(JSVAL_TYPE_INT32, input, output);
13635 Label done;
13636 masm.branchTest32(Assembler::NotSigned, input, input, &done);
13637 masm.moveValue(JS::UndefinedValue(), output);
13638 masm.bind(&done);
13641 void CodeGenerator::visitFromCharCode(LFromCharCode* lir) {
13642 Register code = ToRegister(lir->code());
13643 Register output = ToRegister(lir->output());
13645 using Fn = JSLinearString* (*)(JSContext*, int32_t);
13646 auto* ool = oolCallVM<Fn, js::StringFromCharCode>(lir, ArgList(code),
13647 StoreRegisterTo(output));
13649 // OOL path if code >= UNIT_STATIC_LIMIT.
13650 masm.lookupStaticString(code, output, gen->runtime->staticStrings(),
13651 ool->entry());
13653 masm.bind(ool->rejoin());
13656 void CodeGenerator::visitFromCharCodeEmptyIfNegative(
13657 LFromCharCodeEmptyIfNegative* lir) {
13658 Register code = ToRegister(lir->code());
13659 Register output = ToRegister(lir->output());
13661 using Fn = JSLinearString* (*)(JSContext*, int32_t);
13662 auto* ool = oolCallVM<Fn, js::StringFromCharCode>(lir, ArgList(code),
13663 StoreRegisterTo(output));
13665 // Return the empty string for negative inputs.
13666 const JSAtomState& names = gen->runtime->names();
13667 masm.movePtr(ImmGCPtr(names.empty_), output);
13668 masm.branchTest32(Assembler::Signed, code, code, ool->rejoin());
13670 // OOL path if code >= UNIT_STATIC_LIMIT.
13671 masm.lookupStaticString(code, output, gen->runtime->staticStrings(),
13672 ool->entry());
13674 masm.bind(ool->rejoin());
13677 void CodeGenerator::visitFromCharCodeUndefinedIfNegative(
13678 LFromCharCodeUndefinedIfNegative* lir) {
13679 Register code = ToRegister(lir->code());
13680 ValueOperand output = ToOutValue(lir);
13681 Register temp = output.scratchReg();
13683 using Fn = JSLinearString* (*)(JSContext*, int32_t);
13684 auto* ool = oolCallVM<Fn, js::StringFromCharCode>(lir, ArgList(code),
13685 StoreRegisterTo(temp));
13687 // Return |undefined| for negative inputs.
13688 Label done;
13689 masm.moveValue(UndefinedValue(), output);
13690 masm.branchTest32(Assembler::Signed, code, code, &done);
13692 // OOL path if code >= UNIT_STATIC_LIMIT.
13693 masm.lookupStaticString(code, temp, gen->runtime->staticStrings(),
13694 ool->entry());
13696 masm.bind(ool->rejoin());
13697 masm.tagValue(JSVAL_TYPE_STRING, temp, output);
13699 masm.bind(&done);
13702 void CodeGenerator::visitFromCodePoint(LFromCodePoint* lir) {
13703 Register codePoint = ToRegister(lir->codePoint());
13704 Register output = ToRegister(lir->output());
13705 Register temp0 = ToRegister(lir->temp0());
13706 Register temp1 = ToRegister(lir->temp1());
13707 LSnapshot* snapshot = lir->snapshot();
13709 // The OOL path is only taken when we can't allocate the inline string.
13710 using Fn = JSLinearString* (*)(JSContext*, char32_t);
13711 auto* ool = oolCallVM<Fn, js::StringFromCodePoint>(lir, ArgList(codePoint),
13712 StoreRegisterTo(output));
13714 Label isTwoByte;
13715 Label* done = ool->rejoin();
13717 static_assert(
13718 StaticStrings::UNIT_STATIC_LIMIT - 1 == JSString::MAX_LATIN1_CHAR,
13719 "Latin-1 strings can be loaded from static strings");
13722 masm.lookupStaticString(codePoint, output, gen->runtime->staticStrings(),
13723 &isTwoByte);
13724 masm.jump(done);
13726 masm.bind(&isTwoByte);
13728 // Use a bailout if the input is not a valid code point, because
13729 // MFromCodePoint is movable and it'd be observable when a moved
13730 // fromCodePoint throws an exception before its actual call site.
13731 bailoutCmp32(Assembler::Above, codePoint, Imm32(unicode::NonBMPMax),
13732 snapshot);
13734 // Allocate a JSThinInlineString.
13736 static_assert(JSThinInlineString::MAX_LENGTH_TWO_BYTE >= 2,
13737 "JSThinInlineString can hold a supplementary code point");
13739 uint32_t flags = JSString::INIT_THIN_INLINE_FLAGS;
13740 masm.newGCString(output, temp0, gen->initialStringHeap(), ool->entry());
13741 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
13744 Label isSupplementary;
13745 masm.branch32(Assembler::AboveOrEqual, codePoint, Imm32(unicode::NonBMPMin),
13746 &isSupplementary);
13748 // Store length.
13749 masm.store32(Imm32(1), Address(output, JSString::offsetOfLength()));
13751 // Load chars pointer in temp0.
13752 masm.loadInlineStringCharsForStore(output, temp0);
13754 masm.store16(codePoint, Address(temp0, 0));
13756 masm.jump(done);
13758 masm.bind(&isSupplementary);
13760 // Store length.
13761 masm.store32(Imm32(2), Address(output, JSString::offsetOfLength()));
13763 // Load chars pointer in temp0.
13764 masm.loadInlineStringCharsForStore(output, temp0);
13766 // Inlined unicode::LeadSurrogate(uint32_t).
13767 masm.move32(codePoint, temp1);
13768 masm.rshift32(Imm32(10), temp1);
13769 masm.add32(Imm32(unicode::LeadSurrogateMin - (unicode::NonBMPMin >> 10)),
13770 temp1);
13772 masm.store16(temp1, Address(temp0, 0));
13774 // Inlined unicode::TrailSurrogate(uint32_t).
13775 masm.move32(codePoint, temp1);
13776 masm.and32(Imm32(0x3FF), temp1);
13777 masm.or32(Imm32(unicode::TrailSurrogateMin), temp1);
13779 masm.store16(temp1, Address(temp0, sizeof(char16_t)));
13783 masm.bind(done);
13786 void CodeGenerator::visitStringIncludes(LStringIncludes* lir) {
13787 pushArg(ToRegister(lir->searchString()));
13788 pushArg(ToRegister(lir->string()));
13790 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
13791 callVM<Fn, js::StringIncludes>(lir);
13794 template <typename LIns>
13795 static void CallStringMatch(MacroAssembler& masm, LIns* lir, OutOfLineCode* ool,
13796 LiveRegisterSet volatileRegs) {
13797 Register string = ToRegister(lir->string());
13798 Register output = ToRegister(lir->output());
13799 Register tempLength = ToRegister(lir->temp0());
13800 Register tempChars = ToRegister(lir->temp1());
13801 Register maybeTempPat = ToTempRegisterOrInvalid(lir->temp2());
13803 const JSLinearString* searchString = lir->searchString();
13804 size_t length = searchString->length();
13805 MOZ_ASSERT(length == 1 || length == 2);
13807 // The additional temp register is only needed when searching for two
13808 // pattern characters.
13809 MOZ_ASSERT_IF(length == 2, maybeTempPat != InvalidReg);
13811 if constexpr (std::is_same_v<LIns, LStringIncludesSIMD>) {
13812 masm.move32(Imm32(0), output);
13813 } else {
13814 masm.move32(Imm32(-1), output);
13817 masm.loadStringLength(string, tempLength);
13819 // Can't be a substring when the string is smaller than the search string.
13820 Label done;
13821 masm.branch32(Assembler::Below, tempLength, Imm32(length), ool->rejoin());
13823 bool searchStringIsPureTwoByte = false;
13824 if (searchString->hasTwoByteChars()) {
13825 JS::AutoCheckCannotGC nogc;
13826 searchStringIsPureTwoByte =
13827 !mozilla::IsUtf16Latin1(searchString->twoByteRange(nogc));
13830 // Pure two-byte strings can't occur in a Latin-1 string.
13831 if (searchStringIsPureTwoByte) {
13832 masm.branchLatin1String(string, ool->rejoin());
13835 // Slow path when we need to linearize the string.
13836 masm.branchIfRope(string, ool->entry());
13838 Label restoreVolatile;
13840 auto callMatcher = [&](CharEncoding encoding) {
13841 masm.loadStringChars(string, tempChars, encoding);
13843 LiveGeneralRegisterSet liveRegs;
13844 if constexpr (std::is_same_v<LIns, LStringIndexOfSIMD>) {
13845 // Save |tempChars| to compute the result index.
13846 liveRegs.add(tempChars);
13848 #ifdef DEBUG
13849 // Save |tempLength| in debug-mode for assertions.
13850 liveRegs.add(tempLength);
13851 #endif
13853 // Exclude non-volatile registers.
13854 liveRegs.set() = GeneralRegisterSet::Intersect(
13855 liveRegs.set(), GeneralRegisterSet::Volatile());
13857 masm.PushRegsInMask(liveRegs);
13860 if (length == 1) {
13861 char16_t pat = searchString->latin1OrTwoByteChar(0);
13862 MOZ_ASSERT_IF(encoding == CharEncoding::Latin1,
13863 pat <= JSString::MAX_LATIN1_CHAR);
13865 masm.move32(Imm32(pat), output);
13867 masm.setupAlignedABICall();
13868 masm.passABIArg(tempChars);
13869 masm.passABIArg(output);
13870 masm.passABIArg(tempLength);
13871 if (encoding == CharEncoding::Latin1) {
13872 using Fn = const char* (*)(const char*, char, size_t);
13873 masm.callWithABI<Fn, mozilla::SIMD::memchr8>(
13874 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
13875 } else {
13876 using Fn = const char16_t* (*)(const char16_t*, char16_t, size_t);
13877 masm.callWithABI<Fn, mozilla::SIMD::memchr16>(
13878 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
13880 } else {
13881 char16_t pat0 = searchString->latin1OrTwoByteChar(0);
13882 MOZ_ASSERT_IF(encoding == CharEncoding::Latin1,
13883 pat0 <= JSString::MAX_LATIN1_CHAR);
13885 char16_t pat1 = searchString->latin1OrTwoByteChar(1);
13886 MOZ_ASSERT_IF(encoding == CharEncoding::Latin1,
13887 pat1 <= JSString::MAX_LATIN1_CHAR);
13889 masm.move32(Imm32(pat0), output);
13890 masm.move32(Imm32(pat1), maybeTempPat);
13892 masm.setupAlignedABICall();
13893 masm.passABIArg(tempChars);
13894 masm.passABIArg(output);
13895 masm.passABIArg(maybeTempPat);
13896 masm.passABIArg(tempLength);
13897 if (encoding == CharEncoding::Latin1) {
13898 using Fn = const char* (*)(const char*, char, char, size_t);
13899 masm.callWithABI<Fn, mozilla::SIMD::memchr2x8>(
13900 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
13901 } else {
13902 using Fn =
13903 const char16_t* (*)(const char16_t*, char16_t, char16_t, size_t);
13904 masm.callWithABI<Fn, mozilla::SIMD::memchr2x16>(
13905 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
13909 masm.storeCallPointerResult(output);
13911 // Convert to string index for `indexOf`.
13912 if constexpr (std::is_same_v<LIns, LStringIndexOfSIMD>) {
13913 // Restore |tempChars|. (And in debug mode |tempLength|.)
13914 masm.PopRegsInMask(liveRegs);
13916 Label found;
13917 masm.branchPtr(Assembler::NotEqual, output, ImmPtr(nullptr), &found);
13919 masm.move32(Imm32(-1), output);
13920 masm.jump(&restoreVolatile);
13922 masm.bind(&found);
13924 #ifdef DEBUG
13925 // Check lower bound.
13926 Label lower;
13927 masm.branchPtr(Assembler::AboveOrEqual, output, tempChars, &lower);
13928 masm.assumeUnreachable("result pointer below string chars");
13929 masm.bind(&lower);
13931 // Compute the end position of the characters.
13932 auto scale = encoding == CharEncoding::Latin1 ? TimesOne : TimesTwo;
13933 masm.computeEffectiveAddress(BaseIndex(tempChars, tempLength, scale),
13934 tempLength);
13936 // Check upper bound.
13937 Label upper;
13938 masm.branchPtr(Assembler::Below, output, tempLength, &upper);
13939 masm.assumeUnreachable("result pointer above string chars");
13940 masm.bind(&upper);
13941 #endif
13943 masm.subPtr(tempChars, output);
13945 if (encoding == CharEncoding::TwoByte) {
13946 masm.rshiftPtr(Imm32(1), output);
13951 volatileRegs.takeUnchecked(output);
13952 volatileRegs.takeUnchecked(tempLength);
13953 volatileRegs.takeUnchecked(tempChars);
13954 if (maybeTempPat != InvalidReg) {
13955 volatileRegs.takeUnchecked(maybeTempPat);
13957 masm.PushRegsInMask(volatileRegs);
13959 // Handle the case when the input is a Latin-1 string.
13960 if (!searchStringIsPureTwoByte) {
13961 Label twoByte;
13962 masm.branchTwoByteString(string, &twoByte);
13964 callMatcher(CharEncoding::Latin1);
13965 masm.jump(&restoreVolatile);
13967 masm.bind(&twoByte);
13970 // Handle the case when the input is a two-byte string.
13971 callMatcher(CharEncoding::TwoByte);
13973 masm.bind(&restoreVolatile);
13974 masm.PopRegsInMask(volatileRegs);
13976 // Convert to bool for `includes`.
13977 if constexpr (std::is_same_v<LIns, LStringIncludesSIMD>) {
13978 masm.cmpPtrSet(Assembler::NotEqual, output, ImmPtr(nullptr), output);
13981 masm.bind(ool->rejoin());
13984 void CodeGenerator::visitStringIncludesSIMD(LStringIncludesSIMD* lir) {
13985 Register string = ToRegister(lir->string());
13986 Register output = ToRegister(lir->output());
13987 const JSLinearString* searchString = lir->searchString();
13989 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
13990 auto* ool = oolCallVM<Fn, js::StringIncludes>(
13991 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
13993 CallStringMatch(masm, lir, ool, liveVolatileRegs(lir));
13996 void CodeGenerator::visitStringIndexOf(LStringIndexOf* lir) {
13997 pushArg(ToRegister(lir->searchString()));
13998 pushArg(ToRegister(lir->string()));
14000 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
14001 callVM<Fn, js::StringIndexOf>(lir);
14004 void CodeGenerator::visitStringIndexOfSIMD(LStringIndexOfSIMD* lir) {
14005 Register string = ToRegister(lir->string());
14006 Register output = ToRegister(lir->output());
14007 const JSLinearString* searchString = lir->searchString();
14009 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
14010 auto* ool = oolCallVM<Fn, js::StringIndexOf>(
14011 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
14013 CallStringMatch(masm, lir, ool, liveVolatileRegs(lir));
14016 void CodeGenerator::visitStringLastIndexOf(LStringLastIndexOf* lir) {
14017 pushArg(ToRegister(lir->searchString()));
14018 pushArg(ToRegister(lir->string()));
14020 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
14021 callVM<Fn, js::StringLastIndexOf>(lir);
14024 void CodeGenerator::visitStringStartsWith(LStringStartsWith* lir) {
14025 pushArg(ToRegister(lir->searchString()));
14026 pushArg(ToRegister(lir->string()));
14028 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
14029 callVM<Fn, js::StringStartsWith>(lir);
14032 void CodeGenerator::visitStringStartsWithInline(LStringStartsWithInline* lir) {
14033 Register string = ToRegister(lir->string());
14034 Register output = ToRegister(lir->output());
14035 Register temp = ToRegister(lir->temp0());
14037 const JSLinearString* searchString = lir->searchString();
14039 size_t length = searchString->length();
14040 MOZ_ASSERT(length > 0);
14042 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
14043 auto* ool = oolCallVM<Fn, js::StringStartsWith>(
14044 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
14046 masm.move32(Imm32(0), output);
14048 // Can't be a prefix when the string is smaller than the search string.
14049 masm.branch32(Assembler::Below, Address(string, JSString::offsetOfLength()),
14050 Imm32(length), ool->rejoin());
14052 // Unwind ropes at the start if possible.
14053 Label compare;
14054 masm.movePtr(string, temp);
14055 masm.branchIfNotRope(temp, &compare);
14057 Label unwindRope;
14058 masm.bind(&unwindRope);
14059 masm.loadRopeLeftChild(temp, output);
14060 masm.movePtr(output, temp);
14062 // If the left child is smaller than the search string, jump into the VM to
14063 // linearize the string.
14064 masm.branch32(Assembler::Below, Address(temp, JSString::offsetOfLength()),
14065 Imm32(length), ool->entry());
14067 // Otherwise keep unwinding ropes.
14068 masm.branchIfRope(temp, &unwindRope);
14070 masm.bind(&compare);
14072 // If operands point to the same instance, it's trivially a prefix.
14073 Label notPointerEqual;
14074 masm.branchPtr(Assembler::NotEqual, temp, ImmGCPtr(searchString),
14075 &notPointerEqual);
14076 masm.move32(Imm32(1), output);
14077 masm.jump(ool->rejoin());
14078 masm.bind(&notPointerEqual);
14080 if (searchString->hasTwoByteChars()) {
14081 // Pure two-byte strings can't be a prefix of Latin-1 strings.
14082 JS::AutoCheckCannotGC nogc;
14083 if (!mozilla::IsUtf16Latin1(searchString->twoByteRange(nogc))) {
14084 Label compareChars;
14085 masm.branchTwoByteString(temp, &compareChars);
14086 masm.move32(Imm32(0), output);
14087 masm.jump(ool->rejoin());
14088 masm.bind(&compareChars);
14092 // Load the input string's characters.
14093 Register stringChars = output;
14094 masm.loadStringCharsForCompare(temp, searchString, stringChars, ool->entry());
14096 // Start comparing character by character.
14097 masm.compareStringChars(JSOp::Eq, stringChars, searchString, output);
14099 masm.bind(ool->rejoin());
14102 void CodeGenerator::visitStringEndsWith(LStringEndsWith* lir) {
14103 pushArg(ToRegister(lir->searchString()));
14104 pushArg(ToRegister(lir->string()));
14106 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
14107 callVM<Fn, js::StringEndsWith>(lir);
14110 void CodeGenerator::visitStringEndsWithInline(LStringEndsWithInline* lir) {
14111 Register string = ToRegister(lir->string());
14112 Register output = ToRegister(lir->output());
14113 Register temp = ToRegister(lir->temp0());
14115 const JSLinearString* searchString = lir->searchString();
14117 size_t length = searchString->length();
14118 MOZ_ASSERT(length > 0);
14120 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
14121 auto* ool = oolCallVM<Fn, js::StringEndsWith>(
14122 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
14124 masm.move32(Imm32(0), output);
14126 // Can't be a suffix when the string is smaller than the search string.
14127 masm.branch32(Assembler::Below, Address(string, JSString::offsetOfLength()),
14128 Imm32(length), ool->rejoin());
14130 // Unwind ropes at the end if possible.
14131 Label compare;
14132 masm.movePtr(string, temp);
14133 masm.branchIfNotRope(temp, &compare);
14135 Label unwindRope;
14136 masm.bind(&unwindRope);
14137 masm.loadRopeRightChild(temp, output);
14138 masm.movePtr(output, temp);
14140 // If the right child is smaller than the search string, jump into the VM to
14141 // linearize the string.
14142 masm.branch32(Assembler::Below, Address(temp, JSString::offsetOfLength()),
14143 Imm32(length), ool->entry());
14145 // Otherwise keep unwinding ropes.
14146 masm.branchIfRope(temp, &unwindRope);
14148 masm.bind(&compare);
14150 // If operands point to the same instance, it's trivially a suffix.
14151 Label notPointerEqual;
14152 masm.branchPtr(Assembler::NotEqual, temp, ImmGCPtr(searchString),
14153 &notPointerEqual);
14154 masm.move32(Imm32(1), output);
14155 masm.jump(ool->rejoin());
14156 masm.bind(&notPointerEqual);
14158 CharEncoding encoding = searchString->hasLatin1Chars()
14159 ? CharEncoding::Latin1
14160 : CharEncoding::TwoByte;
14161 if (encoding == CharEncoding::TwoByte) {
14162 // Pure two-byte strings can't be a suffix of Latin-1 strings.
14163 JS::AutoCheckCannotGC nogc;
14164 if (!mozilla::IsUtf16Latin1(searchString->twoByteRange(nogc))) {
14165 Label compareChars;
14166 masm.branchTwoByteString(temp, &compareChars);
14167 masm.move32(Imm32(0), output);
14168 masm.jump(ool->rejoin());
14169 masm.bind(&compareChars);
14173 // Load the input string's characters.
14174 Register stringChars = output;
14175 masm.loadStringCharsForCompare(temp, searchString, stringChars, ool->entry());
14177 // Move string-char pointer to the suffix string.
14178 masm.loadStringLength(temp, temp);
14179 masm.sub32(Imm32(length), temp);
14180 masm.addToCharPtr(stringChars, temp, encoding);
14182 // Start comparing character by character.
14183 masm.compareStringChars(JSOp::Eq, stringChars, searchString, output);
14185 masm.bind(ool->rejoin());
14188 void CodeGenerator::visitStringToLowerCase(LStringToLowerCase* lir) {
14189 Register string = ToRegister(lir->string());
14190 Register output = ToRegister(lir->output());
14191 Register temp0 = ToRegister(lir->temp0());
14192 Register temp1 = ToRegister(lir->temp1());
14193 Register temp2 = ToRegister(lir->temp2());
14195 // On x86 there are not enough registers. In that case reuse the string
14196 // register as a temporary.
14197 Register temp3 =
14198 lir->temp3()->isBogusTemp() ? string : ToRegister(lir->temp3());
14199 Register temp4 = ToRegister(lir->temp4());
14201 using Fn = JSString* (*)(JSContext*, HandleString);
14202 OutOfLineCode* ool = oolCallVM<Fn, js::StringToLowerCase>(
14203 lir, ArgList(string), StoreRegisterTo(output));
14205 // Take the slow path if the string isn't a linear Latin-1 string.
14206 Imm32 linearLatin1Bits(JSString::LINEAR_BIT | JSString::LATIN1_CHARS_BIT);
14207 Register flags = temp0;
14208 masm.load32(Address(string, JSString::offsetOfFlags()), flags);
14209 masm.and32(linearLatin1Bits, flags);
14210 masm.branch32(Assembler::NotEqual, flags, linearLatin1Bits, ool->entry());
14212 Register length = temp0;
14213 masm.loadStringLength(string, length);
14215 // Return the input if it's the empty string.
14216 Label notEmptyString;
14217 masm.branch32(Assembler::NotEqual, length, Imm32(0), &notEmptyString);
14219 masm.movePtr(string, output);
14220 masm.jump(ool->rejoin());
14222 masm.bind(&notEmptyString);
14224 Register inputChars = temp1;
14225 masm.loadStringChars(string, inputChars, CharEncoding::Latin1);
14227 Register toLowerCaseTable = temp2;
14228 masm.movePtr(ImmPtr(unicode::latin1ToLowerCaseTable), toLowerCaseTable);
14230 // Single element strings can be directly retrieved from static strings cache.
14231 Label notSingleElementString;
14232 masm.branch32(Assembler::NotEqual, length, Imm32(1), &notSingleElementString);
14234 Register current = temp4;
14236 masm.loadChar(Address(inputChars, 0), current, CharEncoding::Latin1);
14237 masm.load8ZeroExtend(BaseIndex(toLowerCaseTable, current, TimesOne),
14238 current);
14239 masm.lookupStaticString(current, output, gen->runtime->staticStrings());
14241 masm.jump(ool->rejoin());
14243 masm.bind(&notSingleElementString);
14245 // Use the OOL-path when the string is too long. This prevents scanning long
14246 // strings which have upper case characters only near the end a second time in
14247 // the VM.
14248 constexpr int32_t MaxInlineLength = 64;
14249 masm.branch32(Assembler::Above, length, Imm32(MaxInlineLength), ool->entry());
14252 // Check if there are any characters which need to be converted.
14254 // This extra loop gives a small performance improvement for strings which
14255 // are already lower cased and lets us avoid calling into the runtime for
14256 // non-inline, all lower case strings. But more importantly it avoids
14257 // repeated inline allocation failures:
14258 // |AllocateThinOrFatInlineString| below takes the OOL-path and calls the
14259 // |js::StringToLowerCase| runtime function when the result string can't be
14260 // allocated inline. And |js::StringToLowerCase| directly returns the input
14261 // string when no characters need to be converted. That means it won't
14262 // trigger GC to clear up the free nursery space, so the next toLowerCase()
14263 // call will again fail to inline allocate the result string.
14264 Label hasUpper;
14266 Register checkInputChars = output;
14267 masm.movePtr(inputChars, checkInputChars);
14269 Register current = temp4;
14271 Label start;
14272 masm.bind(&start);
14273 masm.loadChar(Address(checkInputChars, 0), current, CharEncoding::Latin1);
14274 masm.branch8(Assembler::NotEqual,
14275 BaseIndex(toLowerCaseTable, current, TimesOne), current,
14276 &hasUpper);
14277 masm.addPtr(Imm32(sizeof(Latin1Char)), checkInputChars);
14278 masm.branchSub32(Assembler::NonZero, Imm32(1), length, &start);
14280 // Input is already in lower case.
14281 masm.movePtr(string, output);
14282 masm.jump(ool->rejoin());
14284 masm.bind(&hasUpper);
14286 // |length| was clobbered above, reload.
14287 masm.loadStringLength(string, length);
14289 // Call into the runtime when we can't create an inline string.
14290 masm.branch32(Assembler::Above, length,
14291 Imm32(JSFatInlineString::MAX_LENGTH_LATIN1), ool->entry());
14293 AllocateThinOrFatInlineString(masm, output, length, temp4,
14294 initialStringHeap(), ool->entry(),
14295 CharEncoding::Latin1);
14297 if (temp3 == string) {
14298 masm.push(string);
14301 Register outputChars = temp3;
14302 masm.loadInlineStringCharsForStore(output, outputChars);
14305 Register current = temp4;
14307 Label start;
14308 masm.bind(&start);
14309 masm.loadChar(Address(inputChars, 0), current, CharEncoding::Latin1);
14310 masm.load8ZeroExtend(BaseIndex(toLowerCaseTable, current, TimesOne),
14311 current);
14312 masm.storeChar(current, Address(outputChars, 0), CharEncoding::Latin1);
14313 masm.addPtr(Imm32(sizeof(Latin1Char)), inputChars);
14314 masm.addPtr(Imm32(sizeof(Latin1Char)), outputChars);
14315 masm.branchSub32(Assembler::NonZero, Imm32(1), length, &start);
14318 if (temp3 == string) {
14319 masm.pop(string);
14323 masm.bind(ool->rejoin());
14326 void CodeGenerator::visitStringToUpperCase(LStringToUpperCase* lir) {
14327 pushArg(ToRegister(lir->string()));
14329 using Fn = JSString* (*)(JSContext*, HandleString);
14330 callVM<Fn, js::StringToUpperCase>(lir);
14333 void CodeGenerator::visitCharCodeToLowerCase(LCharCodeToLowerCase* lir) {
14334 Register code = ToRegister(lir->code());
14335 Register output = ToRegister(lir->output());
14336 Register temp = ToRegister(lir->temp0());
14338 using Fn = JSString* (*)(JSContext*, int32_t);
14339 auto* ool = oolCallVM<Fn, jit::CharCodeToLowerCase>(lir, ArgList(code),
14340 StoreRegisterTo(output));
14342 constexpr char16_t NonLatin1Min = char16_t(JSString::MAX_LATIN1_CHAR) + 1;
14344 // OOL path if code >= NonLatin1Min.
14345 masm.boundsCheck32PowerOfTwo(code, NonLatin1Min, ool->entry());
14347 // Convert to lower case.
14348 masm.movePtr(ImmPtr(unicode::latin1ToLowerCaseTable), temp);
14349 masm.load8ZeroExtend(BaseIndex(temp, code, TimesOne), temp);
14351 // Load static string for lower case character.
14352 masm.lookupStaticString(temp, output, gen->runtime->staticStrings());
14354 masm.bind(ool->rejoin());
14357 void CodeGenerator::visitCharCodeToUpperCase(LCharCodeToUpperCase* lir) {
14358 Register code = ToRegister(lir->code());
14359 Register output = ToRegister(lir->output());
14360 Register temp = ToRegister(lir->temp0());
14362 using Fn = JSString* (*)(JSContext*, int32_t);
14363 auto* ool = oolCallVM<Fn, jit::CharCodeToUpperCase>(lir, ArgList(code),
14364 StoreRegisterTo(output));
14366 constexpr char16_t NonLatin1Min = char16_t(JSString::MAX_LATIN1_CHAR) + 1;
14368 // OOL path if code >= NonLatin1Min.
14369 masm.boundsCheck32PowerOfTwo(code, NonLatin1Min, ool->entry());
14371 // Most one element Latin-1 strings can be directly retrieved from the
14372 // static strings cache, except the following three characters:
14374 // 1. ToUpper(U+00B5) = 0+039C
14375 // 2. ToUpper(U+00FF) = 0+0178
14376 // 3. ToUpper(U+00DF) = 0+0053 0+0053
14377 masm.branch32(Assembler::Equal, code, Imm32(unicode::MICRO_SIGN),
14378 ool->entry());
14379 masm.branch32(Assembler::Equal, code,
14380 Imm32(unicode::LATIN_SMALL_LETTER_Y_WITH_DIAERESIS),
14381 ool->entry());
14382 masm.branch32(Assembler::Equal, code,
14383 Imm32(unicode::LATIN_SMALL_LETTER_SHARP_S), ool->entry());
14385 // Inline unicode::ToUpperCase (without the special case for ASCII characters)
14387 constexpr size_t shift = unicode::CharInfoShift;
14389 // code >> shift
14390 masm.move32(code, temp);
14391 masm.rshift32(Imm32(shift), temp);
14393 // index = index1[code >> shift];
14394 masm.movePtr(ImmPtr(unicode::index1), output);
14395 masm.load8ZeroExtend(BaseIndex(output, temp, TimesOne), temp);
14397 // (code & ((1 << shift) - 1)
14398 masm.move32(code, output);
14399 masm.and32(Imm32((1 << shift) - 1), output);
14401 // (index << shift) + (code & ((1 << shift) - 1))
14402 masm.lshift32(Imm32(shift), temp);
14403 masm.add32(output, temp);
14405 // index = index2[(index << shift) + (code & ((1 << shift) - 1))]
14406 masm.movePtr(ImmPtr(unicode::index2), output);
14407 masm.load8ZeroExtend(BaseIndex(output, temp, TimesOne), temp);
14409 // Compute |index * 6| through |(index * 3) * TimesTwo|.
14410 static_assert(sizeof(unicode::CharacterInfo) == 6);
14411 masm.mulBy3(temp, temp);
14413 // upperCase = js_charinfo[index].upperCase
14414 masm.movePtr(ImmPtr(unicode::js_charinfo), output);
14415 masm.load16ZeroExtend(BaseIndex(output, temp, TimesTwo,
14416 offsetof(unicode::CharacterInfo, upperCase)),
14417 temp);
14419 // uint16_t(ch) + upperCase
14420 masm.add32(code, temp);
14422 // Clear any high bits added when performing the unsigned 16-bit addition
14423 // through a signed 32-bit addition.
14424 masm.move8ZeroExtend(temp, temp);
14426 // Load static string for upper case character.
14427 masm.lookupStaticString(temp, output, gen->runtime->staticStrings());
14429 masm.bind(ool->rejoin());
14432 void CodeGenerator::visitStringTrimStartIndex(LStringTrimStartIndex* lir) {
14433 Register string = ToRegister(lir->string());
14434 Register output = ToRegister(lir->output());
14436 auto volatileRegs = liveVolatileRegs(lir);
14437 volatileRegs.takeUnchecked(output);
14439 masm.PushRegsInMask(volatileRegs);
14441 using Fn = int32_t (*)(const JSString*);
14442 masm.setupAlignedABICall();
14443 masm.passABIArg(string);
14444 masm.callWithABI<Fn, jit::StringTrimStartIndex>();
14445 masm.storeCallInt32Result(output);
14447 masm.PopRegsInMask(volatileRegs);
14450 void CodeGenerator::visitStringTrimEndIndex(LStringTrimEndIndex* lir) {
14451 Register string = ToRegister(lir->string());
14452 Register start = ToRegister(lir->start());
14453 Register output = ToRegister(lir->output());
14455 auto volatileRegs = liveVolatileRegs(lir);
14456 volatileRegs.takeUnchecked(output);
14458 masm.PushRegsInMask(volatileRegs);
14460 using Fn = int32_t (*)(const JSString*, int32_t);
14461 masm.setupAlignedABICall();
14462 masm.passABIArg(string);
14463 masm.passABIArg(start);
14464 masm.callWithABI<Fn, jit::StringTrimEndIndex>();
14465 masm.storeCallInt32Result(output);
14467 masm.PopRegsInMask(volatileRegs);
14470 void CodeGenerator::visitStringSplit(LStringSplit* lir) {
14471 pushArg(Imm32(INT32_MAX));
14472 pushArg(ToRegister(lir->separator()));
14473 pushArg(ToRegister(lir->string()));
14475 using Fn = ArrayObject* (*)(JSContext*, HandleString, HandleString, uint32_t);
14476 callVM<Fn, js::StringSplitString>(lir);
14479 void CodeGenerator::visitInitializedLength(LInitializedLength* lir) {
14480 Address initLength(ToRegister(lir->elements()),
14481 ObjectElements::offsetOfInitializedLength());
14482 masm.load32(initLength, ToRegister(lir->output()));
14485 void CodeGenerator::visitSetInitializedLength(LSetInitializedLength* lir) {
14486 Address initLength(ToRegister(lir->elements()),
14487 ObjectElements::offsetOfInitializedLength());
14488 SetLengthFromIndex(masm, lir->index(), initLength);
14491 void CodeGenerator::visitNotBI(LNotBI* lir) {
14492 Register input = ToRegister(lir->input());
14493 Register output = ToRegister(lir->output());
14495 masm.cmp32Set(Assembler::Equal, Address(input, BigInt::offsetOfLength()),
14496 Imm32(0), output);
14499 void CodeGenerator::visitNotO(LNotO* lir) {
14500 Register objreg = ToRegister(lir->input());
14501 Register output = ToRegister(lir->output());
14503 bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
14504 if (intact) {
14505 // Bug 1874905: It would be fantastic if this could be optimized out.
14506 assertObjectDoesNotEmulateUndefined(objreg, output, lir->mir());
14507 masm.move32(Imm32(0), output);
14508 } else {
14509 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
14510 addOutOfLineCode(ool, lir->mir());
14512 Label* ifEmulatesUndefined = ool->label1();
14513 Label* ifDoesntEmulateUndefined = ool->label2();
14515 branchTestObjectEmulatesUndefined(objreg, ifEmulatesUndefined,
14516 ifDoesntEmulateUndefined, output, ool);
14517 // fall through
14519 Label join;
14521 masm.move32(Imm32(0), output);
14522 masm.jump(&join);
14524 masm.bind(ifEmulatesUndefined);
14525 masm.move32(Imm32(1), output);
14527 masm.bind(&join);
14531 void CodeGenerator::visitNotV(LNotV* lir) {
14532 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
14533 addOutOfLineCode(ool, lir->mir());
14535 Label* ifTruthy = ool->label1();
14536 Label* ifFalsy = ool->label2();
14538 ValueOperand input = ToValue(lir, LNotV::InputIndex);
14539 Register tempToUnbox = ToTempUnboxRegister(lir->temp1());
14540 FloatRegister floatTemp = ToFloatRegister(lir->temp0());
14541 Register output = ToRegister(lir->output());
14542 const TypeDataList& observedTypes = lir->mir()->observedTypes();
14544 testValueTruthy(input, tempToUnbox, output, floatTemp, observedTypes,
14545 ifTruthy, ifFalsy, ool);
14547 Label join;
14549 // Note that the testValueTruthy call above may choose to fall through
14550 // to ifTruthy instead of branching there.
14551 masm.bind(ifTruthy);
14552 masm.move32(Imm32(0), output);
14553 masm.jump(&join);
14555 masm.bind(ifFalsy);
14556 masm.move32(Imm32(1), output);
14558 // both branches meet here.
14559 masm.bind(&join);
14562 void CodeGenerator::visitBoundsCheck(LBoundsCheck* lir) {
14563 const LAllocation* index = lir->index();
14564 const LAllocation* length = lir->length();
14565 LSnapshot* snapshot = lir->snapshot();
14567 MIRType type = lir->mir()->type();
14569 auto bailoutCmp = [&](Assembler::Condition cond, auto lhs, auto rhs) {
14570 if (type == MIRType::Int32) {
14571 bailoutCmp32(cond, lhs, rhs, snapshot);
14572 } else {
14573 MOZ_ASSERT(type == MIRType::IntPtr);
14574 bailoutCmpPtr(cond, lhs, rhs, snapshot);
14578 auto bailoutCmpConstant = [&](Assembler::Condition cond, auto lhs,
14579 int32_t rhs) {
14580 if (type == MIRType::Int32) {
14581 bailoutCmp32(cond, lhs, Imm32(rhs), snapshot);
14582 } else {
14583 MOZ_ASSERT(type == MIRType::IntPtr);
14584 bailoutCmpPtr(cond, lhs, ImmWord(rhs), snapshot);
14588 if (index->isConstant()) {
14589 // Use uint32 so that the comparison is unsigned.
14590 uint32_t idx = ToInt32(index);
14591 if (length->isConstant()) {
14592 uint32_t len = ToInt32(lir->length());
14593 if (idx < len) {
14594 return;
14596 bailout(snapshot);
14597 return;
14600 if (length->isRegister()) {
14601 bailoutCmpConstant(Assembler::BelowOrEqual, ToRegister(length), idx);
14602 } else {
14603 bailoutCmpConstant(Assembler::BelowOrEqual, ToAddress(length), idx);
14605 return;
14608 Register indexReg = ToRegister(index);
14609 if (length->isConstant()) {
14610 bailoutCmpConstant(Assembler::AboveOrEqual, indexReg, ToInt32(length));
14611 } else if (length->isRegister()) {
14612 bailoutCmp(Assembler::BelowOrEqual, ToRegister(length), indexReg);
14613 } else {
14614 bailoutCmp(Assembler::BelowOrEqual, ToAddress(length), indexReg);
14618 void CodeGenerator::visitBoundsCheckRange(LBoundsCheckRange* lir) {
14619 int32_t min = lir->mir()->minimum();
14620 int32_t max = lir->mir()->maximum();
14621 MOZ_ASSERT(max >= min);
14623 LSnapshot* snapshot = lir->snapshot();
14624 MIRType type = lir->mir()->type();
14626 const LAllocation* length = lir->length();
14627 Register temp = ToRegister(lir->getTemp(0));
14629 auto bailoutCmp = [&](Assembler::Condition cond, auto lhs, auto rhs) {
14630 if (type == MIRType::Int32) {
14631 bailoutCmp32(cond, lhs, rhs, snapshot);
14632 } else {
14633 MOZ_ASSERT(type == MIRType::IntPtr);
14634 bailoutCmpPtr(cond, lhs, rhs, snapshot);
14638 auto bailoutCmpConstant = [&](Assembler::Condition cond, auto lhs,
14639 int32_t rhs) {
14640 if (type == MIRType::Int32) {
14641 bailoutCmp32(cond, lhs, Imm32(rhs), snapshot);
14642 } else {
14643 MOZ_ASSERT(type == MIRType::IntPtr);
14644 bailoutCmpPtr(cond, lhs, ImmWord(rhs), snapshot);
14648 if (lir->index()->isConstant()) {
14649 int32_t nmin, nmax;
14650 int32_t index = ToInt32(lir->index());
14651 if (SafeAdd(index, min, &nmin) && SafeAdd(index, max, &nmax) && nmin >= 0) {
14652 if (length->isRegister()) {
14653 bailoutCmpConstant(Assembler::BelowOrEqual, ToRegister(length), nmax);
14654 } else {
14655 bailoutCmpConstant(Assembler::BelowOrEqual, ToAddress(length), nmax);
14657 return;
14659 masm.mov(ImmWord(index), temp);
14660 } else {
14661 masm.mov(ToRegister(lir->index()), temp);
14664 // If the minimum and maximum differ then do an underflow check first.
14665 // If the two are the same then doing an unsigned comparison on the
14666 // length will also catch a negative index.
14667 if (min != max) {
14668 if (min != 0) {
14669 Label bail;
14670 if (type == MIRType::Int32) {
14671 masm.branchAdd32(Assembler::Overflow, Imm32(min), temp, &bail);
14672 } else {
14673 masm.branchAddPtr(Assembler::Overflow, Imm32(min), temp, &bail);
14675 bailoutFrom(&bail, snapshot);
14678 bailoutCmpConstant(Assembler::LessThan, temp, 0);
14680 if (min != 0) {
14681 int32_t diff;
14682 if (SafeSub(max, min, &diff)) {
14683 max = diff;
14684 } else {
14685 if (type == MIRType::Int32) {
14686 masm.sub32(Imm32(min), temp);
14687 } else {
14688 masm.subPtr(Imm32(min), temp);
14694 // Compute the maximum possible index. No overflow check is needed when
14695 // max > 0. We can only wraparound to a negative number, which will test as
14696 // larger than all nonnegative numbers in the unsigned comparison, and the
14697 // length is required to be nonnegative (else testing a negative length
14698 // would succeed on any nonnegative index).
14699 if (max != 0) {
14700 if (max < 0) {
14701 Label bail;
14702 if (type == MIRType::Int32) {
14703 masm.branchAdd32(Assembler::Overflow, Imm32(max), temp, &bail);
14704 } else {
14705 masm.branchAddPtr(Assembler::Overflow, Imm32(max), temp, &bail);
14707 bailoutFrom(&bail, snapshot);
14708 } else {
14709 if (type == MIRType::Int32) {
14710 masm.add32(Imm32(max), temp);
14711 } else {
14712 masm.addPtr(Imm32(max), temp);
14717 if (length->isRegister()) {
14718 bailoutCmp(Assembler::BelowOrEqual, ToRegister(length), temp);
14719 } else {
14720 bailoutCmp(Assembler::BelowOrEqual, ToAddress(length), temp);
14724 void CodeGenerator::visitBoundsCheckLower(LBoundsCheckLower* lir) {
14725 int32_t min = lir->mir()->minimum();
14726 bailoutCmp32(Assembler::LessThan, ToRegister(lir->index()), Imm32(min),
14727 lir->snapshot());
14730 void CodeGenerator::visitSpectreMaskIndex(LSpectreMaskIndex* lir) {
14731 MOZ_ASSERT(JitOptions.spectreIndexMasking);
14733 const LAllocation* length = lir->length();
14734 Register index = ToRegister(lir->index());
14735 Register output = ToRegister(lir->output());
14737 if (lir->mir()->type() == MIRType::Int32) {
14738 if (length->isRegister()) {
14739 masm.spectreMaskIndex32(index, ToRegister(length), output);
14740 } else {
14741 masm.spectreMaskIndex32(index, ToAddress(length), output);
14743 } else {
14744 MOZ_ASSERT(lir->mir()->type() == MIRType::IntPtr);
14745 if (length->isRegister()) {
14746 masm.spectreMaskIndexPtr(index, ToRegister(length), output);
14747 } else {
14748 masm.spectreMaskIndexPtr(index, ToAddress(length), output);
14753 class OutOfLineStoreElementHole : public OutOfLineCodeBase<CodeGenerator> {
14754 LInstruction* ins_;
14756 public:
14757 explicit OutOfLineStoreElementHole(LInstruction* ins) : ins_(ins) {
14758 MOZ_ASSERT(ins->isStoreElementHoleV() || ins->isStoreElementHoleT());
14761 void accept(CodeGenerator* codegen) override {
14762 codegen->visitOutOfLineStoreElementHole(this);
14765 MStoreElementHole* mir() const {
14766 return ins_->isStoreElementHoleV() ? ins_->toStoreElementHoleV()->mir()
14767 : ins_->toStoreElementHoleT()->mir();
14769 LInstruction* ins() const { return ins_; }
14772 void CodeGenerator::emitStoreHoleCheck(Register elements,
14773 const LAllocation* index,
14774 LSnapshot* snapshot) {
14775 Label bail;
14776 if (index->isConstant()) {
14777 Address dest(elements, ToInt32(index) * sizeof(js::Value));
14778 masm.branchTestMagic(Assembler::Equal, dest, &bail);
14779 } else {
14780 BaseObjectElementIndex dest(elements, ToRegister(index));
14781 masm.branchTestMagic(Assembler::Equal, dest, &bail);
14783 bailoutFrom(&bail, snapshot);
14786 void CodeGenerator::emitStoreElementTyped(const LAllocation* value,
14787 MIRType valueType, Register elements,
14788 const LAllocation* index) {
14789 MOZ_ASSERT(valueType != MIRType::MagicHole);
14790 ConstantOrRegister v = ToConstantOrRegister(value, valueType);
14791 if (index->isConstant()) {
14792 Address dest(elements, ToInt32(index) * sizeof(js::Value));
14793 masm.storeUnboxedValue(v, valueType, dest);
14794 } else {
14795 BaseObjectElementIndex dest(elements, ToRegister(index));
14796 masm.storeUnboxedValue(v, valueType, dest);
14800 void CodeGenerator::visitStoreElementT(LStoreElementT* store) {
14801 Register elements = ToRegister(store->elements());
14802 const LAllocation* index = store->index();
14804 if (store->mir()->needsBarrier()) {
14805 emitPreBarrier(elements, index);
14808 if (store->mir()->needsHoleCheck()) {
14809 emitStoreHoleCheck(elements, index, store->snapshot());
14812 emitStoreElementTyped(store->value(), store->mir()->value()->type(), elements,
14813 index);
14816 void CodeGenerator::visitStoreElementV(LStoreElementV* lir) {
14817 const ValueOperand value = ToValue(lir, LStoreElementV::Value);
14818 Register elements = ToRegister(lir->elements());
14819 const LAllocation* index = lir->index();
14821 if (lir->mir()->needsBarrier()) {
14822 emitPreBarrier(elements, index);
14825 if (lir->mir()->needsHoleCheck()) {
14826 emitStoreHoleCheck(elements, index, lir->snapshot());
14829 if (lir->index()->isConstant()) {
14830 Address dest(elements, ToInt32(lir->index()) * sizeof(js::Value));
14831 masm.storeValue(value, dest);
14832 } else {
14833 BaseObjectElementIndex dest(elements, ToRegister(lir->index()));
14834 masm.storeValue(value, dest);
14838 void CodeGenerator::visitStoreHoleValueElement(LStoreHoleValueElement* lir) {
14839 Register elements = ToRegister(lir->elements());
14840 Register index = ToRegister(lir->index());
14842 Address elementsFlags(elements, ObjectElements::offsetOfFlags());
14843 masm.or32(Imm32(ObjectElements::NON_PACKED), elementsFlags);
14845 BaseObjectElementIndex element(elements, index);
14846 masm.storeValue(MagicValue(JS_ELEMENTS_HOLE), element);
14849 void CodeGenerator::visitStoreElementHoleT(LStoreElementHoleT* lir) {
14850 auto* ool = new (alloc()) OutOfLineStoreElementHole(lir);
14851 addOutOfLineCode(ool, lir->mir());
14853 Register obj = ToRegister(lir->object());
14854 Register elements = ToRegister(lir->elements());
14855 Register index = ToRegister(lir->index());
14856 Register temp = ToRegister(lir->temp0());
14858 Address initLength(elements, ObjectElements::offsetOfInitializedLength());
14859 masm.spectreBoundsCheck32(index, initLength, temp, ool->entry());
14861 emitPreBarrier(elements, lir->index());
14863 masm.bind(ool->rejoin());
14864 emitStoreElementTyped(lir->value(), lir->mir()->value()->type(), elements,
14865 lir->index());
14867 if (ValueNeedsPostBarrier(lir->mir()->value())) {
14868 LiveRegisterSet regs = liveVolatileRegs(lir);
14869 ConstantOrRegister val =
14870 ToConstantOrRegister(lir->value(), lir->mir()->value()->type());
14871 emitElementPostWriteBarrier(lir->mir(), regs, obj, lir->index(), temp, val);
14875 void CodeGenerator::visitStoreElementHoleV(LStoreElementHoleV* lir) {
14876 auto* ool = new (alloc()) OutOfLineStoreElementHole(lir);
14877 addOutOfLineCode(ool, lir->mir());
14879 Register obj = ToRegister(lir->object());
14880 Register elements = ToRegister(lir->elements());
14881 Register index = ToRegister(lir->index());
14882 const ValueOperand value = ToValue(lir, LStoreElementHoleV::ValueIndex);
14883 Register temp = ToRegister(lir->temp0());
14885 Address initLength(elements, ObjectElements::offsetOfInitializedLength());
14886 masm.spectreBoundsCheck32(index, initLength, temp, ool->entry());
14888 emitPreBarrier(elements, lir->index());
14890 masm.bind(ool->rejoin());
14891 masm.storeValue(value, BaseObjectElementIndex(elements, index));
14893 if (ValueNeedsPostBarrier(lir->mir()->value())) {
14894 LiveRegisterSet regs = liveVolatileRegs(lir);
14895 emitElementPostWriteBarrier(lir->mir(), regs, obj, lir->index(), temp,
14896 ConstantOrRegister(value));
14900 void CodeGenerator::visitOutOfLineStoreElementHole(
14901 OutOfLineStoreElementHole* ool) {
14902 Register object, elements, index;
14903 LInstruction* ins = ool->ins();
14904 mozilla::Maybe<ConstantOrRegister> value;
14905 Register temp;
14907 if (ins->isStoreElementHoleV()) {
14908 LStoreElementHoleV* store = ins->toStoreElementHoleV();
14909 object = ToRegister(store->object());
14910 elements = ToRegister(store->elements());
14911 index = ToRegister(store->index());
14912 value.emplace(
14913 TypedOrValueRegister(ToValue(store, LStoreElementHoleV::ValueIndex)));
14914 temp = ToRegister(store->temp0());
14915 } else {
14916 LStoreElementHoleT* store = ins->toStoreElementHoleT();
14917 object = ToRegister(store->object());
14918 elements = ToRegister(store->elements());
14919 index = ToRegister(store->index());
14920 if (store->value()->isConstant()) {
14921 value.emplace(
14922 ConstantOrRegister(store->value()->toConstant()->toJSValue()));
14923 } else {
14924 MIRType valueType = store->mir()->value()->type();
14925 value.emplace(
14926 TypedOrValueRegister(valueType, ToAnyRegister(store->value())));
14928 temp = ToRegister(store->temp0());
14931 Address initLength(elements, ObjectElements::offsetOfInitializedLength());
14933 // We're out-of-bounds. We only handle the index == initlength case.
14934 // If index > initializedLength, bail out. Note that this relies on the
14935 // condition flags sticking from the incoming branch.
14936 // Also note: this branch does not need Spectre mitigations, doing that for
14937 // the capacity check below is sufficient.
14938 Label allocElement, addNewElement;
14939 #if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
14940 defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
14941 // Had to reimplement for MIPS because there are no flags.
14942 bailoutCmp32(Assembler::NotEqual, initLength, index, ins->snapshot());
14943 #else
14944 bailoutIf(Assembler::NotEqual, ins->snapshot());
14945 #endif
14947 // If index < capacity, we can add a dense element inline. If not, we need
14948 // to allocate more elements first.
14949 masm.spectreBoundsCheck32(
14950 index, Address(elements, ObjectElements::offsetOfCapacity()), temp,
14951 &allocElement);
14952 masm.jump(&addNewElement);
14954 masm.bind(&allocElement);
14956 // Save all live volatile registers, except |temp|.
14957 LiveRegisterSet liveRegs = liveVolatileRegs(ins);
14958 liveRegs.takeUnchecked(temp);
14959 masm.PushRegsInMask(liveRegs);
14961 masm.setupAlignedABICall();
14962 masm.loadJSContext(temp);
14963 masm.passABIArg(temp);
14964 masm.passABIArg(object);
14966 using Fn = bool (*)(JSContext*, NativeObject*);
14967 masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
14968 masm.storeCallPointerResult(temp);
14970 masm.PopRegsInMask(liveRegs);
14971 bailoutIfFalseBool(temp, ins->snapshot());
14973 // Load the reallocated elements pointer.
14974 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), elements);
14976 masm.bind(&addNewElement);
14978 // Increment initLength
14979 masm.add32(Imm32(1), initLength);
14981 // If length is now <= index, increment length too.
14982 Label skipIncrementLength;
14983 Address length(elements, ObjectElements::offsetOfLength());
14984 masm.branch32(Assembler::Above, length, index, &skipIncrementLength);
14985 masm.add32(Imm32(1), length);
14986 masm.bind(&skipIncrementLength);
14988 // Jump to the inline path where we will store the value.
14989 // We rejoin after the prebarrier, because the memory is uninitialized.
14990 masm.jump(ool->rejoin());
14993 void CodeGenerator::visitArrayPopShift(LArrayPopShift* lir) {
14994 Register obj = ToRegister(lir->object());
14995 Register temp1 = ToRegister(lir->temp0());
14996 Register temp2 = ToRegister(lir->temp1());
14997 ValueOperand out = ToOutValue(lir);
14999 Label bail;
15000 if (lir->mir()->mode() == MArrayPopShift::Pop) {
15001 masm.packedArrayPop(obj, out, temp1, temp2, &bail);
15002 } else {
15003 MOZ_ASSERT(lir->mir()->mode() == MArrayPopShift::Shift);
15004 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
15005 masm.packedArrayShift(obj, out, temp1, temp2, volatileRegs, &bail);
15007 bailoutFrom(&bail, lir->snapshot());
15010 class OutOfLineArrayPush : public OutOfLineCodeBase<CodeGenerator> {
15011 LArrayPush* ins_;
15013 public:
15014 explicit OutOfLineArrayPush(LArrayPush* ins) : ins_(ins) {}
15016 void accept(CodeGenerator* codegen) override {
15017 codegen->visitOutOfLineArrayPush(this);
15020 LArrayPush* ins() const { return ins_; }
15023 void CodeGenerator::visitArrayPush(LArrayPush* lir) {
15024 Register obj = ToRegister(lir->object());
15025 Register elementsTemp = ToRegister(lir->temp0());
15026 Register length = ToRegister(lir->output());
15027 ValueOperand value = ToValue(lir, LArrayPush::ValueIndex);
15028 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
15030 auto* ool = new (alloc()) OutOfLineArrayPush(lir);
15031 addOutOfLineCode(ool, lir->mir());
15033 // Load obj->elements in elementsTemp.
15034 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), elementsTemp);
15036 Address initLengthAddr(elementsTemp,
15037 ObjectElements::offsetOfInitializedLength());
15038 Address lengthAddr(elementsTemp, ObjectElements::offsetOfLength());
15039 Address capacityAddr(elementsTemp, ObjectElements::offsetOfCapacity());
15041 // Bail out if length != initLength.
15042 masm.load32(lengthAddr, length);
15043 bailoutCmp32(Assembler::NotEqual, initLengthAddr, length, lir->snapshot());
15045 // If length < capacity, we can add a dense element inline. If not, we
15046 // need to allocate more elements.
15047 masm.spectreBoundsCheck32(length, capacityAddr, spectreTemp, ool->entry());
15048 masm.bind(ool->rejoin());
15050 // Store the value.
15051 masm.storeValue(value, BaseObjectElementIndex(elementsTemp, length));
15053 // Update length and initialized length.
15054 masm.add32(Imm32(1), length);
15055 masm.store32(length, Address(elementsTemp, ObjectElements::offsetOfLength()));
15056 masm.store32(length, Address(elementsTemp,
15057 ObjectElements::offsetOfInitializedLength()));
15059 if (ValueNeedsPostBarrier(lir->mir()->value())) {
15060 LiveRegisterSet regs = liveVolatileRegs(lir);
15061 regs.addUnchecked(length);
15062 emitElementPostWriteBarrier(lir->mir(), regs, obj, lir->output()->output(),
15063 elementsTemp, ConstantOrRegister(value),
15064 /* indexDiff = */ -1);
15068 void CodeGenerator::visitOutOfLineArrayPush(OutOfLineArrayPush* ool) {
15069 LArrayPush* ins = ool->ins();
15071 Register object = ToRegister(ins->object());
15072 Register temp = ToRegister(ins->temp0());
15074 LiveRegisterSet liveRegs = liveVolatileRegs(ins);
15075 liveRegs.takeUnchecked(temp);
15076 liveRegs.addUnchecked(ToRegister(ins->output()));
15077 liveRegs.addUnchecked(ToValue(ins, LArrayPush::ValueIndex));
15079 masm.PushRegsInMask(liveRegs);
15081 masm.setupAlignedABICall();
15082 masm.loadJSContext(temp);
15083 masm.passABIArg(temp);
15084 masm.passABIArg(object);
15086 using Fn = bool (*)(JSContext*, NativeObject* obj);
15087 masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
15088 masm.storeCallPointerResult(temp);
15090 masm.PopRegsInMask(liveRegs);
15091 bailoutIfFalseBool(temp, ins->snapshot());
15093 // Load the reallocated elements pointer.
15094 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
15096 masm.jump(ool->rejoin());
15099 void CodeGenerator::visitArraySlice(LArraySlice* lir) {
15100 Register object = ToRegister(lir->object());
15101 Register begin = ToRegister(lir->begin());
15102 Register end = ToRegister(lir->end());
15103 Register temp0 = ToRegister(lir->temp0());
15104 Register temp1 = ToRegister(lir->temp1());
15106 Label call, fail;
15108 Label bail;
15109 masm.branchArrayIsNotPacked(object, temp0, temp1, &bail);
15110 bailoutFrom(&bail, lir->snapshot());
15112 // Try to allocate an object.
15113 TemplateObject templateObject(lir->mir()->templateObj());
15114 masm.createGCObject(temp0, temp1, templateObject, lir->mir()->initialHeap(),
15115 &fail);
15117 masm.jump(&call);
15119 masm.bind(&fail);
15120 masm.movePtr(ImmPtr(nullptr), temp0);
15122 masm.bind(&call);
15124 pushArg(temp0);
15125 pushArg(end);
15126 pushArg(begin);
15127 pushArg(object);
15129 using Fn =
15130 JSObject* (*)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
15131 callVM<Fn, ArraySliceDense>(lir);
15134 void CodeGenerator::visitArgumentsSlice(LArgumentsSlice* lir) {
15135 Register object = ToRegister(lir->object());
15136 Register begin = ToRegister(lir->begin());
15137 Register end = ToRegister(lir->end());
15138 Register temp0 = ToRegister(lir->temp0());
15139 Register temp1 = ToRegister(lir->temp1());
15141 Label call, fail;
15143 // Try to allocate an object.
15144 TemplateObject templateObject(lir->mir()->templateObj());
15145 masm.createGCObject(temp0, temp1, templateObject, lir->mir()->initialHeap(),
15146 &fail);
15148 masm.jump(&call);
15150 masm.bind(&fail);
15151 masm.movePtr(ImmPtr(nullptr), temp0);
15153 masm.bind(&call);
15155 pushArg(temp0);
15156 pushArg(end);
15157 pushArg(begin);
15158 pushArg(object);
15160 using Fn =
15161 JSObject* (*)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
15162 callVM<Fn, ArgumentsSliceDense>(lir);
15165 #ifdef DEBUG
15166 void CodeGenerator::emitAssertArgumentsSliceBounds(const RegisterOrInt32& begin,
15167 const RegisterOrInt32& count,
15168 Register numActualArgs) {
15169 // |begin| must be positive or zero.
15170 if (begin.is<Register>()) {
15171 Label beginOk;
15172 masm.branch32(Assembler::GreaterThanOrEqual, begin.as<Register>(), Imm32(0),
15173 &beginOk);
15174 masm.assumeUnreachable("begin < 0");
15175 masm.bind(&beginOk);
15176 } else {
15177 MOZ_ASSERT(begin.as<int32_t>() >= 0);
15180 // |count| must be positive or zero.
15181 if (count.is<Register>()) {
15182 Label countOk;
15183 masm.branch32(Assembler::GreaterThanOrEqual, count.as<Register>(), Imm32(0),
15184 &countOk);
15185 masm.assumeUnreachable("count < 0");
15186 masm.bind(&countOk);
15187 } else {
15188 MOZ_ASSERT(count.as<int32_t>() >= 0);
15191 // |begin| must be less-or-equal to |numActualArgs|.
15192 Label argsBeginOk;
15193 if (begin.is<Register>()) {
15194 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, begin.as<Register>(),
15195 &argsBeginOk);
15196 } else {
15197 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
15198 Imm32(begin.as<int32_t>()), &argsBeginOk);
15200 masm.assumeUnreachable("begin <= numActualArgs");
15201 masm.bind(&argsBeginOk);
15203 // |count| must be less-or-equal to |numActualArgs|.
15204 Label argsCountOk;
15205 if (count.is<Register>()) {
15206 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, count.as<Register>(),
15207 &argsCountOk);
15208 } else {
15209 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
15210 Imm32(count.as<int32_t>()), &argsCountOk);
15212 masm.assumeUnreachable("count <= numActualArgs");
15213 masm.bind(&argsCountOk);
15215 // |begin| and |count| must be preserved, but |numActualArgs| can be changed.
15217 // Pre-condition: |count| <= |numActualArgs|
15218 // Condition to test: |begin + count| <= |numActualArgs|
15219 // Transform to: |begin| <= |numActualArgs - count|
15220 if (count.is<Register>()) {
15221 masm.subPtr(count.as<Register>(), numActualArgs);
15222 } else {
15223 masm.subPtr(Imm32(count.as<int32_t>()), numActualArgs);
15226 // |begin + count| must be less-or-equal to |numActualArgs|.
15227 Label argsBeginCountOk;
15228 if (begin.is<Register>()) {
15229 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, begin.as<Register>(),
15230 &argsBeginCountOk);
15231 } else {
15232 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
15233 Imm32(begin.as<int32_t>()), &argsBeginCountOk);
15235 masm.assumeUnreachable("begin + count <= numActualArgs");
15236 masm.bind(&argsBeginCountOk);
15238 #endif
15240 template <class ArgumentsSlice>
15241 void CodeGenerator::emitNewArray(ArgumentsSlice* lir,
15242 const RegisterOrInt32& count, Register output,
15243 Register temp) {
15244 using Fn = ArrayObject* (*)(JSContext*, int32_t);
15245 auto* ool = count.match(
15246 [&](Register count) {
15247 return oolCallVM<Fn, NewArrayObjectEnsureDenseInitLength>(
15248 lir, ArgList(count), StoreRegisterTo(output));
15250 [&](int32_t count) {
15251 return oolCallVM<Fn, NewArrayObjectEnsureDenseInitLength>(
15252 lir, ArgList(Imm32(count)), StoreRegisterTo(output));
15255 TemplateObject templateObject(lir->mir()->templateObj());
15256 MOZ_ASSERT(templateObject.isArrayObject());
15258 auto templateNativeObj = templateObject.asTemplateNativeObject();
15259 MOZ_ASSERT(templateNativeObj.getArrayLength() == 0);
15260 MOZ_ASSERT(templateNativeObj.getDenseInitializedLength() == 0);
15261 MOZ_ASSERT(!templateNativeObj.hasDynamicElements());
15263 // Check array capacity. Call into the VM if the template object's capacity
15264 // is too small.
15265 bool tryAllocate = count.match(
15266 [&](Register count) {
15267 masm.branch32(Assembler::Above, count,
15268 Imm32(templateNativeObj.getDenseCapacity()),
15269 ool->entry());
15270 return true;
15272 [&](int32_t count) {
15273 MOZ_ASSERT(count >= 0);
15274 if (uint32_t(count) > templateNativeObj.getDenseCapacity()) {
15275 masm.jump(ool->entry());
15276 return false;
15278 return true;
15281 if (tryAllocate) {
15282 // Try to allocate an object.
15283 masm.createGCObject(output, temp, templateObject, lir->mir()->initialHeap(),
15284 ool->entry());
15286 auto setInitializedLengthAndLength = [&](auto count) {
15287 const int elementsOffset = NativeObject::offsetOfFixedElements();
15289 // Update initialized length.
15290 Address initLength(
15291 output, elementsOffset + ObjectElements::offsetOfInitializedLength());
15292 masm.store32(count, initLength);
15294 // Update length.
15295 Address length(output, elementsOffset + ObjectElements::offsetOfLength());
15296 masm.store32(count, length);
15299 // The array object was successfully created. Set the length and initialized
15300 // length and then proceed to fill the elements.
15301 count.match([&](Register count) { setInitializedLengthAndLength(count); },
15302 [&](int32_t count) {
15303 if (count > 0) {
15304 setInitializedLengthAndLength(Imm32(count));
15309 masm.bind(ool->rejoin());
15312 void CodeGenerator::visitFrameArgumentsSlice(LFrameArgumentsSlice* lir) {
15313 Register begin = ToRegister(lir->begin());
15314 Register count = ToRegister(lir->count());
15315 Register temp = ToRegister(lir->temp0());
15316 Register output = ToRegister(lir->output());
15318 #ifdef DEBUG
15319 masm.loadNumActualArgs(FramePointer, temp);
15320 emitAssertArgumentsSliceBounds(RegisterOrInt32(begin), RegisterOrInt32(count),
15321 temp);
15322 #endif
15324 emitNewArray(lir, RegisterOrInt32(count), output, temp);
15326 Label done;
15327 masm.branch32(Assembler::Equal, count, Imm32(0), &done);
15329 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
15330 allRegs.take(begin);
15331 allRegs.take(count);
15332 allRegs.take(temp);
15333 allRegs.take(output);
15335 ValueOperand value = allRegs.takeAnyValue();
15337 LiveRegisterSet liveRegs;
15338 liveRegs.add(output);
15339 liveRegs.add(begin);
15340 liveRegs.add(value);
15342 masm.PushRegsInMask(liveRegs);
15344 // Initialize all elements.
15346 Register elements = output;
15347 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
15349 Register argIndex = begin;
15351 Register index = temp;
15352 masm.move32(Imm32(0), index);
15354 size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
15355 BaseValueIndex argPtr(FramePointer, argIndex, argvOffset);
15357 Label loop;
15358 masm.bind(&loop);
15360 masm.loadValue(argPtr, value);
15362 // We don't need a pre-barrier, because the element at |index| is guaranteed
15363 // to be a non-GC thing (either uninitialized memory or the magic hole
15364 // value).
15365 masm.storeValue(value, BaseObjectElementIndex(elements, index));
15367 masm.add32(Imm32(1), index);
15368 masm.add32(Imm32(1), argIndex);
15370 masm.branch32(Assembler::LessThan, index, count, &loop);
15372 masm.PopRegsInMask(liveRegs);
15374 // Emit a post-write barrier if |output| is tenured.
15376 // We expect that |output| is nursery allocated, so it isn't worth the
15377 // trouble to check if no frame argument is a nursery thing, which would
15378 // allow to omit the post-write barrier.
15379 masm.branchPtrInNurseryChunk(Assembler::Equal, output, temp, &done);
15381 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
15382 volatileRegs.takeUnchecked(temp);
15383 if (output.volatile_()) {
15384 volatileRegs.addUnchecked(output);
15387 masm.PushRegsInMask(volatileRegs);
15388 emitPostWriteBarrier(output);
15389 masm.PopRegsInMask(volatileRegs);
15391 masm.bind(&done);
15394 CodeGenerator::RegisterOrInt32 CodeGenerator::ToRegisterOrInt32(
15395 const LAllocation* allocation) {
15396 if (allocation->isConstant()) {
15397 return RegisterOrInt32(allocation->toConstant()->toInt32());
15399 return RegisterOrInt32(ToRegister(allocation));
15402 void CodeGenerator::visitInlineArgumentsSlice(LInlineArgumentsSlice* lir) {
15403 RegisterOrInt32 begin = ToRegisterOrInt32(lir->begin());
15404 RegisterOrInt32 count = ToRegisterOrInt32(lir->count());
15405 Register temp = ToRegister(lir->temp());
15406 Register output = ToRegister(lir->output());
15408 uint32_t numActuals = lir->mir()->numActuals();
15410 #ifdef DEBUG
15411 masm.move32(Imm32(numActuals), temp);
15413 emitAssertArgumentsSliceBounds(begin, count, temp);
15414 #endif
15416 emitNewArray(lir, count, output, temp);
15418 // We're done if there are no actual arguments.
15419 if (numActuals == 0) {
15420 return;
15423 // Check if any arguments have to be copied.
15424 Label done;
15425 if (count.is<Register>()) {
15426 masm.branch32(Assembler::Equal, count.as<Register>(), Imm32(0), &done);
15427 } else if (count.as<int32_t>() == 0) {
15428 return;
15431 auto getArg = [&](uint32_t i) {
15432 return toConstantOrRegister(lir, LInlineArgumentsSlice::ArgIndex(i),
15433 lir->mir()->getArg(i)->type());
15436 auto storeArg = [&](uint32_t i, auto dest) {
15437 // We don't need a pre-barrier because the element at |index| is guaranteed
15438 // to be a non-GC thing (either uninitialized memory or the magic hole
15439 // value).
15440 masm.storeConstantOrRegister(getArg(i), dest);
15443 // Initialize all elements.
15444 if (numActuals == 1) {
15445 // There's exactly one argument. We've checked that |count| is non-zero,
15446 // which implies that |begin| must be zero.
15447 MOZ_ASSERT_IF(begin.is<int32_t>(), begin.as<int32_t>() == 0);
15449 Register elements = temp;
15450 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
15452 storeArg(0, Address(elements, 0));
15453 } else if (begin.is<Register>()) {
15454 // There is more than one argument and |begin| isn't a compile-time
15455 // constant. Iterate through 0..numActuals to search for |begin| and then
15456 // start copying |count| arguments from that index.
15458 LiveGeneralRegisterSet liveRegs;
15459 liveRegs.add(output);
15460 liveRegs.add(begin.as<Register>());
15462 masm.PushRegsInMask(liveRegs);
15464 Register elements = output;
15465 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
15467 Register argIndex = begin.as<Register>();
15469 Register index = temp;
15470 masm.move32(Imm32(0), index);
15472 Label doneLoop;
15473 for (uint32_t i = 0; i < numActuals; ++i) {
15474 Label next;
15475 masm.branch32(Assembler::NotEqual, argIndex, Imm32(i), &next);
15477 storeArg(i, BaseObjectElementIndex(elements, index));
15479 masm.add32(Imm32(1), index);
15480 masm.add32(Imm32(1), argIndex);
15482 if (count.is<Register>()) {
15483 masm.branch32(Assembler::GreaterThanOrEqual, index,
15484 count.as<Register>(), &doneLoop);
15485 } else {
15486 masm.branch32(Assembler::GreaterThanOrEqual, index,
15487 Imm32(count.as<int32_t>()), &doneLoop);
15490 masm.bind(&next);
15492 masm.bind(&doneLoop);
15494 masm.PopRegsInMask(liveRegs);
15495 } else {
15496 // There is more than one argument and |begin| is a compile-time constant.
15498 Register elements = temp;
15499 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
15501 int32_t argIndex = begin.as<int32_t>();
15503 int32_t index = 0;
15505 Label doneLoop;
15506 for (uint32_t i = argIndex; i < numActuals; ++i) {
15507 storeArg(i, Address(elements, index * sizeof(Value)));
15509 index += 1;
15511 if (count.is<Register>()) {
15512 masm.branch32(Assembler::LessThanOrEqual, count.as<Register>(),
15513 Imm32(index), &doneLoop);
15514 } else {
15515 if (index >= count.as<int32_t>()) {
15516 break;
15520 masm.bind(&doneLoop);
15523 // Determine if we have to emit post-write barrier.
15525 // If either |begin| or |count| is a constant, use their value directly.
15526 // Otherwise assume we copy all inline arguments from 0..numActuals.
15527 bool postWriteBarrier = false;
15528 uint32_t actualBegin = begin.match([](Register) { return 0; },
15529 [](int32_t value) { return value; });
15530 uint32_t actualCount =
15531 count.match([=](Register) { return numActuals; },
15532 [](int32_t value) -> uint32_t { return value; });
15533 for (uint32_t i = 0; i < actualCount; ++i) {
15534 ConstantOrRegister arg = getArg(actualBegin + i);
15535 if (arg.constant()) {
15536 Value v = arg.value();
15537 if (v.isGCThing() && IsInsideNursery(v.toGCThing())) {
15538 postWriteBarrier = true;
15540 } else {
15541 MIRType type = arg.reg().type();
15542 if (type == MIRType::Value || NeedsPostBarrier(type)) {
15543 postWriteBarrier = true;
15548 // Emit a post-write barrier if |output| is tenured and we couldn't
15549 // determine at compile-time that no barrier is needed.
15550 if (postWriteBarrier) {
15551 masm.branchPtrInNurseryChunk(Assembler::Equal, output, temp, &done);
15553 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
15554 volatileRegs.takeUnchecked(temp);
15555 if (output.volatile_()) {
15556 volatileRegs.addUnchecked(output);
15559 masm.PushRegsInMask(volatileRegs);
15560 emitPostWriteBarrier(output);
15561 masm.PopRegsInMask(volatileRegs);
15564 masm.bind(&done);
15567 void CodeGenerator::visitNormalizeSliceTerm(LNormalizeSliceTerm* lir) {
15568 Register value = ToRegister(lir->value());
15569 Register length = ToRegister(lir->length());
15570 Register output = ToRegister(lir->output());
15572 masm.move32(value, output);
15574 Label positive;
15575 masm.branch32(Assembler::GreaterThanOrEqual, value, Imm32(0), &positive);
15577 Label done;
15578 masm.add32(length, output);
15579 masm.branch32(Assembler::GreaterThanOrEqual, output, Imm32(0), &done);
15580 masm.move32(Imm32(0), output);
15581 masm.jump(&done);
15583 masm.bind(&positive);
15584 masm.cmp32Move32(Assembler::LessThan, length, value, length, output);
15586 masm.bind(&done);
15589 void CodeGenerator::visitArrayJoin(LArrayJoin* lir) {
15590 Label skipCall;
15592 Register output = ToRegister(lir->output());
15593 Register sep = ToRegister(lir->separator());
15594 Register array = ToRegister(lir->array());
15595 Register temp = ToRegister(lir->temp0());
15597 // Fast path for simple length <= 1 cases.
15599 masm.loadPtr(Address(array, NativeObject::offsetOfElements()), temp);
15600 Address length(temp, ObjectElements::offsetOfLength());
15601 Address initLength(temp, ObjectElements::offsetOfInitializedLength());
15603 // Check for length == 0
15604 Label notEmpty;
15605 masm.branch32(Assembler::NotEqual, length, Imm32(0), &notEmpty);
15606 const JSAtomState& names = gen->runtime->names();
15607 masm.movePtr(ImmGCPtr(names.empty_), output);
15608 masm.jump(&skipCall);
15610 masm.bind(&notEmpty);
15611 Label notSingleString;
15612 // Check for length == 1, initializedLength >= 1, arr[0].isString()
15613 masm.branch32(Assembler::NotEqual, length, Imm32(1), &notSingleString);
15614 masm.branch32(Assembler::LessThan, initLength, Imm32(1), &notSingleString);
15616 Address elem0(temp, 0);
15617 masm.branchTestString(Assembler::NotEqual, elem0, &notSingleString);
15619 // At this point, 'output' can be used as a scratch register, since we're
15620 // guaranteed to succeed.
15621 masm.unboxString(elem0, output);
15622 masm.jump(&skipCall);
15623 masm.bind(&notSingleString);
15626 pushArg(sep);
15627 pushArg(array);
15629 using Fn = JSString* (*)(JSContext*, HandleObject, HandleString);
15630 callVM<Fn, jit::ArrayJoin>(lir);
15631 masm.bind(&skipCall);
15634 void CodeGenerator::visitObjectKeys(LObjectKeys* lir) {
15635 Register object = ToRegister(lir->object());
15637 pushArg(object);
15639 using Fn = JSObject* (*)(JSContext*, HandleObject);
15640 callVM<Fn, jit::ObjectKeys>(lir);
15643 void CodeGenerator::visitObjectKeysLength(LObjectKeysLength* lir) {
15644 Register object = ToRegister(lir->object());
15646 pushArg(object);
15648 using Fn = bool (*)(JSContext*, HandleObject, int32_t*);
15649 callVM<Fn, jit::ObjectKeysLength>(lir);
15652 void CodeGenerator::visitGetIteratorCache(LGetIteratorCache* lir) {
15653 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
15654 TypedOrValueRegister val =
15655 toConstantOrRegister(lir, LGetIteratorCache::ValueIndex,
15656 lir->mir()->value()->type())
15657 .reg();
15658 Register output = ToRegister(lir->output());
15659 Register temp0 = ToRegister(lir->temp0());
15660 Register temp1 = ToRegister(lir->temp1());
15662 IonGetIteratorIC ic(liveRegs, val, output, temp0, temp1);
15663 addIC(lir, allocateIC(ic));
15666 void CodeGenerator::visitOptimizeSpreadCallCache(
15667 LOptimizeSpreadCallCache* lir) {
15668 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
15669 ValueOperand val = ToValue(lir, LOptimizeSpreadCallCache::ValueIndex);
15670 ValueOperand output = ToOutValue(lir);
15671 Register temp = ToRegister(lir->temp0());
15673 IonOptimizeSpreadCallIC ic(liveRegs, val, output, temp);
15674 addIC(lir, allocateIC(ic));
15677 void CodeGenerator::visitCloseIterCache(LCloseIterCache* lir) {
15678 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
15679 Register iter = ToRegister(lir->iter());
15680 Register temp = ToRegister(lir->temp0());
15681 CompletionKind kind = CompletionKind(lir->mir()->completionKind());
15683 IonCloseIterIC ic(liveRegs, iter, temp, kind);
15684 addIC(lir, allocateIC(ic));
15687 void CodeGenerator::visitOptimizeGetIteratorCache(
15688 LOptimizeGetIteratorCache* lir) {
15689 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
15690 ValueOperand val = ToValue(lir, LOptimizeGetIteratorCache::ValueIndex);
15691 Register output = ToRegister(lir->output());
15692 Register temp = ToRegister(lir->temp0());
15694 IonOptimizeGetIteratorIC ic(liveRegs, val, output, temp);
15695 addIC(lir, allocateIC(ic));
15698 void CodeGenerator::visitIteratorMore(LIteratorMore* lir) {
15699 const Register obj = ToRegister(lir->iterator());
15700 const ValueOperand output = ToOutValue(lir);
15701 const Register temp = ToRegister(lir->temp0());
15703 masm.iteratorMore(obj, output, temp);
15706 void CodeGenerator::visitIsNoIterAndBranch(LIsNoIterAndBranch* lir) {
15707 ValueOperand input = ToValue(lir, LIsNoIterAndBranch::Input);
15708 Label* ifTrue = getJumpLabelForBranch(lir->ifTrue());
15709 Label* ifFalse = getJumpLabelForBranch(lir->ifFalse());
15711 masm.branchTestMagic(Assembler::Equal, input, ifTrue);
15713 if (!isNextBlock(lir->ifFalse()->lir())) {
15714 masm.jump(ifFalse);
15718 void CodeGenerator::visitIteratorEnd(LIteratorEnd* lir) {
15719 const Register obj = ToRegister(lir->object());
15720 const Register temp0 = ToRegister(lir->temp0());
15721 const Register temp1 = ToRegister(lir->temp1());
15722 const Register temp2 = ToRegister(lir->temp2());
15724 masm.iteratorClose(obj, temp0, temp1, temp2);
15727 void CodeGenerator::visitArgumentsLength(LArgumentsLength* lir) {
15728 // read number of actual arguments from the JS frame.
15729 Register argc = ToRegister(lir->output());
15730 masm.loadNumActualArgs(FramePointer, argc);
15733 void CodeGenerator::visitGetFrameArgument(LGetFrameArgument* lir) {
15734 ValueOperand result = ToOutValue(lir);
15735 const LAllocation* index = lir->index();
15736 size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
15738 // This instruction is used to access actual arguments and formal arguments.
15739 // The number of Values on the stack is |max(numFormals, numActuals)|, so we
15740 // assert |index < numFormals || index < numActuals| in debug builds.
15741 DebugOnly<size_t> numFormals = gen->outerInfo().script()->function()->nargs();
15743 if (index->isConstant()) {
15744 int32_t i = index->toConstant()->toInt32();
15745 #ifdef DEBUG
15746 if (uint32_t(i) >= numFormals) {
15747 Label ok;
15748 Register argc = result.scratchReg();
15749 masm.loadNumActualArgs(FramePointer, argc);
15750 masm.branch32(Assembler::Above, argc, Imm32(i), &ok);
15751 masm.assumeUnreachable("Invalid argument index");
15752 masm.bind(&ok);
15754 #endif
15755 Address argPtr(FramePointer, sizeof(Value) * i + argvOffset);
15756 masm.loadValue(argPtr, result);
15757 } else {
15758 Register i = ToRegister(index);
15759 #ifdef DEBUG
15760 Label ok;
15761 Register argc = result.scratchReg();
15762 masm.branch32(Assembler::Below, i, Imm32(numFormals), &ok);
15763 masm.loadNumActualArgs(FramePointer, argc);
15764 masm.branch32(Assembler::Above, argc, i, &ok);
15765 masm.assumeUnreachable("Invalid argument index");
15766 masm.bind(&ok);
15767 #endif
15768 BaseValueIndex argPtr(FramePointer, i, argvOffset);
15769 masm.loadValue(argPtr, result);
15773 void CodeGenerator::visitGetFrameArgumentHole(LGetFrameArgumentHole* lir) {
15774 ValueOperand result = ToOutValue(lir);
15775 Register index = ToRegister(lir->index());
15776 Register length = ToRegister(lir->length());
15777 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp0());
15778 size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
15780 Label outOfBounds, done;
15781 masm.spectreBoundsCheck32(index, length, spectreTemp, &outOfBounds);
15783 BaseValueIndex argPtr(FramePointer, index, argvOffset);
15784 masm.loadValue(argPtr, result);
15785 masm.jump(&done);
15787 masm.bind(&outOfBounds);
15788 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
15789 masm.moveValue(UndefinedValue(), result);
15791 masm.bind(&done);
15794 void CodeGenerator::visitRest(LRest* lir) {
15795 Register numActuals = ToRegister(lir->numActuals());
15796 Register temp0 = ToRegister(lir->temp0());
15797 Register temp1 = ToRegister(lir->temp1());
15798 Register temp2 = ToRegister(lir->temp2());
15799 Register temp3 = ToRegister(lir->temp3());
15800 unsigned numFormals = lir->mir()->numFormals();
15802 constexpr uint32_t arrayCapacity = 2;
15804 if (Shape* shape = lir->mir()->shape()) {
15805 uint32_t arrayLength = 0;
15806 gc::AllocKind allocKind = GuessArrayGCKind(arrayCapacity);
15807 MOZ_ASSERT(CanChangeToBackgroundAllocKind(allocKind, &ArrayObject::class_));
15808 allocKind = ForegroundToBackgroundAllocKind(allocKind);
15809 MOZ_ASSERT(GetGCKindSlots(allocKind) ==
15810 arrayCapacity + ObjectElements::VALUES_PER_HEADER);
15812 Label joinAlloc, failAlloc;
15813 masm.movePtr(ImmGCPtr(shape), temp0);
15814 masm.createArrayWithFixedElements(temp2, temp0, temp1, InvalidReg,
15815 arrayLength, arrayCapacity, 0, 0,
15816 allocKind, gc::Heap::Default, &failAlloc);
15817 masm.jump(&joinAlloc);
15819 masm.bind(&failAlloc);
15820 masm.movePtr(ImmPtr(nullptr), temp2);
15822 masm.bind(&joinAlloc);
15823 } else {
15824 masm.movePtr(ImmPtr(nullptr), temp2);
15827 // Set temp1 to the address of the first actual argument.
15828 size_t actualsOffset = JitFrameLayout::offsetOfActualArgs();
15829 masm.computeEffectiveAddress(Address(FramePointer, actualsOffset), temp1);
15831 // Compute array length: max(numActuals - numFormals, 0).
15832 Register lengthReg;
15833 if (numFormals) {
15834 lengthReg = temp0;
15835 Label emptyLength, joinLength;
15836 masm.branch32(Assembler::LessThanOrEqual, numActuals, Imm32(numFormals),
15837 &emptyLength);
15839 masm.move32(numActuals, lengthReg);
15840 masm.sub32(Imm32(numFormals), lengthReg);
15842 // Skip formal arguments.
15843 masm.addPtr(Imm32(sizeof(Value) * numFormals), temp1);
15845 masm.jump(&joinLength);
15847 masm.bind(&emptyLength);
15849 masm.move32(Imm32(0), lengthReg);
15851 // Leave temp1 pointed to the start of actuals() when the rest-array
15852 // length is zero. We don't use |actuals() + numFormals| because
15853 // |numFormals| can be any non-negative int32 value when this MRest was
15854 // created from scalar replacement optimizations. And it seems
15855 // questionable to compute a Value* pointer which points to who knows
15856 // where.
15858 masm.bind(&joinLength);
15859 } else {
15860 // Use numActuals directly when there are no formals.
15861 lengthReg = numActuals;
15864 // Try to initialize the array elements.
15865 Label vmCall, done;
15866 if (lir->mir()->shape()) {
15867 // Call into C++ if we failed to allocate an array or there are more than
15868 // |arrayCapacity| elements.
15869 masm.branchTestPtr(Assembler::Zero, temp2, temp2, &vmCall);
15870 masm.branch32(Assembler::Above, lengthReg, Imm32(arrayCapacity), &vmCall);
15872 // The array must be nursery allocated so no post barrier is needed.
15873 #ifdef DEBUG
15874 Label ok;
15875 masm.branchPtrInNurseryChunk(Assembler::Equal, temp2, temp3, &ok);
15876 masm.assumeUnreachable("Unexpected tenured object for LRest");
15877 masm.bind(&ok);
15878 #endif
15880 Label initialized;
15881 masm.branch32(Assembler::Equal, lengthReg, Imm32(0), &initialized);
15883 // Store length and initializedLength.
15884 Register elements = temp3;
15885 masm.loadPtr(Address(temp2, NativeObject::offsetOfElements()), elements);
15886 Address lengthAddr(elements, ObjectElements::offsetOfLength());
15887 Address initLengthAddr(elements,
15888 ObjectElements::offsetOfInitializedLength());
15889 masm.store32(lengthReg, lengthAddr);
15890 masm.store32(lengthReg, initLengthAddr);
15892 // Store either one or two elements. This may clobber lengthReg (temp0).
15893 static_assert(arrayCapacity == 2, "code handles 1 or 2 elements");
15894 Label storeFirst;
15895 masm.branch32(Assembler::Equal, lengthReg, Imm32(1), &storeFirst);
15896 masm.storeValue(Address(temp1, sizeof(Value)),
15897 Address(elements, sizeof(Value)), temp0);
15898 masm.bind(&storeFirst);
15899 masm.storeValue(Address(temp1, 0), Address(elements, 0), temp0);
15901 // Done.
15902 masm.bind(&initialized);
15903 masm.movePtr(temp2, ReturnReg);
15904 masm.jump(&done);
15907 masm.bind(&vmCall);
15909 pushArg(temp2);
15910 pushArg(temp1);
15911 pushArg(lengthReg);
15913 using Fn =
15914 ArrayObject* (*)(JSContext*, uint32_t, Value*, Handle<ArrayObject*>);
15915 callVM<Fn, InitRestParameter>(lir);
15917 masm.bind(&done);
15920 // Create a stackmap from the given safepoint, with the structure:
15922 // <reg dump, if any>
15923 // | ++ <body (general spill)>
15924 // | | ++ <space for Frame>
15925 // | | ++ <inbound args>
15926 // | | |
15927 // Lowest Addr Highest Addr
15928 // |
15929 // framePushedAtStackMapBase
15931 // The caller owns the resulting stackmap. This assumes a grow-down stack.
15933 // For non-debug builds, if the stackmap would contain no pointers, no
15934 // stackmap is created, and nullptr is returned. For a debug build, a
15935 // stackmap is always created and returned.
15937 // Depending on the type of safepoint, the stackmap may need to account for
15938 // spilled registers. WasmSafepointKind::LirCall corresponds to LIR nodes where
15939 // isCall() == true, for which the register allocator will spill/restore all
15940 // live registers at the LIR level - in this case, the LSafepoint sees only live
15941 // values on the stack, never in registers. WasmSafepointKind::CodegenCall, on
15942 // the other hand, is for LIR nodes which may manually spill/restore live
15943 // registers in codegen, in which case the stackmap must account for this. Traps
15944 // also require tracking of live registers, but spilling is handled by the trap
15945 // mechanism.
15946 static bool CreateStackMapFromLSafepoint(LSafepoint& safepoint,
15947 const RegisterOffsets& trapExitLayout,
15948 size_t trapExitLayoutNumWords,
15949 size_t nInboundStackArgBytes,
15950 wasm::StackMap** result) {
15951 // Ensure this is defined on all return paths.
15952 *result = nullptr;
15954 // The size of the wasm::Frame itself.
15955 const size_t nFrameBytes = sizeof(wasm::Frame);
15957 // This is the number of bytes spilled for live registers, outside of a trap.
15958 // For traps, trapExitLayout and trapExitLayoutNumWords will be used.
15959 const size_t nRegisterDumpBytes =
15960 MacroAssembler::PushRegsInMaskSizeInBytes(safepoint.liveRegs());
15962 // As mentioned above, for WasmSafepointKind::LirCall, register spills and
15963 // restores are handled at the LIR level and there should therefore be no live
15964 // registers to handle here.
15965 MOZ_ASSERT_IF(safepoint.wasmSafepointKind() == WasmSafepointKind::LirCall,
15966 nRegisterDumpBytes == 0);
15967 MOZ_ASSERT(nRegisterDumpBytes % sizeof(void*) == 0);
15969 // This is the number of bytes in the general spill area, below the Frame.
15970 const size_t nBodyBytes = safepoint.framePushedAtStackMapBase();
15972 // The stack map owns any alignment padding around inbound stack args.
15973 const size_t nInboundStackArgBytesAligned =
15974 wasm::AlignStackArgAreaSize(nInboundStackArgBytes);
15976 // This is the number of bytes in the general spill area, the Frame, and the
15977 // incoming args, but not including any register dump area.
15978 const size_t nNonRegisterBytes =
15979 nBodyBytes + nFrameBytes + nInboundStackArgBytesAligned;
15980 MOZ_ASSERT(nNonRegisterBytes % sizeof(void*) == 0);
15982 // This is the number of bytes in the register dump area, if any, below the
15983 // general spill area.
15984 const size_t nRegisterBytes =
15985 (safepoint.wasmSafepointKind() == WasmSafepointKind::Trap)
15986 ? (trapExitLayoutNumWords * sizeof(void*))
15987 : nRegisterDumpBytes;
15989 // This is the total number of bytes covered by the map.
15990 const size_t nTotalBytes = nNonRegisterBytes + nRegisterBytes;
15992 #ifndef DEBUG
15993 bool needStackMap = !(safepoint.wasmAnyRefRegs().empty() &&
15994 safepoint.wasmAnyRefSlots().empty() &&
15995 safepoint.slotsOrElementsSlots().empty());
15997 // There are no references, and this is a non-debug build, so don't bother
15998 // building the stackmap.
15999 if (!needStackMap) {
16000 return true;
16002 #endif
16004 wasm::StackMap* stackMap =
16005 wasm::StackMap::create(nTotalBytes / sizeof(void*));
16006 if (!stackMap) {
16007 return false;
16009 if (safepoint.wasmSafepointKind() == WasmSafepointKind::Trap) {
16010 stackMap->setExitStubWords(trapExitLayoutNumWords);
16013 // REG DUMP AREA, if any.
16014 size_t regDumpWords = 0;
16015 const LiveGeneralRegisterSet wasmAnyRefRegs = safepoint.wasmAnyRefRegs();
16016 const LiveGeneralRegisterSet slotsOrElementsRegs =
16017 safepoint.slotsOrElementsRegs();
16018 const LiveGeneralRegisterSet refRegs(GeneralRegisterSet::Union(
16019 wasmAnyRefRegs.set(), slotsOrElementsRegs.set()));
16020 GeneralRegisterForwardIterator refRegsIter(refRegs);
16021 switch (safepoint.wasmSafepointKind()) {
16022 case WasmSafepointKind::LirCall:
16023 case WasmSafepointKind::StackSwitch:
16024 case WasmSafepointKind::CodegenCall: {
16025 size_t spilledNumWords = nRegisterDumpBytes / sizeof(void*);
16026 regDumpWords += spilledNumWords;
16028 for (; refRegsIter.more(); ++refRegsIter) {
16029 Register reg = *refRegsIter;
16030 size_t offsetFromSpillBase =
16031 safepoint.liveRegs().gprs().offsetOfPushedRegister(reg) /
16032 sizeof(void*);
16033 MOZ_ASSERT(0 < offsetFromSpillBase &&
16034 offsetFromSpillBase <= spilledNumWords);
16035 size_t index = spilledNumWords - offsetFromSpillBase;
16037 if (wasmAnyRefRegs.has(reg)) {
16038 stackMap->set(index, wasm::StackMap::AnyRef);
16039 } else {
16040 MOZ_ASSERT(slotsOrElementsRegs.has(reg));
16041 stackMap->set(index, wasm::StackMap::ArrayDataPointer);
16044 // Float and vector registers do not have to be handled; they cannot
16045 // contain wasm anyrefs, and they are spilled after general-purpose
16046 // registers. Gprs are therefore closest to the spill base and thus their
16047 // offset calculation does not need to account for other spills.
16048 } break;
16049 case WasmSafepointKind::Trap: {
16050 regDumpWords += trapExitLayoutNumWords;
16052 for (; refRegsIter.more(); ++refRegsIter) {
16053 Register reg = *refRegsIter;
16054 size_t offsetFromTop = trapExitLayout.getOffset(reg);
16056 // If this doesn't hold, the associated register wasn't saved by
16057 // the trap exit stub. Better to crash now than much later, in
16058 // some obscure place, and possibly with security consequences.
16059 MOZ_RELEASE_ASSERT(offsetFromTop < trapExitLayoutNumWords);
16061 // offsetFromTop is an offset in words down from the highest
16062 // address in the exit stub save area. Switch it around to be an
16063 // offset up from the bottom of the (integer register) save area.
16064 size_t offsetFromBottom = trapExitLayoutNumWords - 1 - offsetFromTop;
16066 if (wasmAnyRefRegs.has(reg)) {
16067 stackMap->set(offsetFromBottom, wasm::StackMap::AnyRef);
16068 } else {
16069 MOZ_ASSERT(slotsOrElementsRegs.has(reg));
16070 stackMap->set(offsetFromBottom, wasm::StackMap::ArrayDataPointer);
16073 } break;
16074 default:
16075 MOZ_CRASH("unreachable");
16078 // Ensure other reg/slot collections on LSafepoint are empty.
16079 MOZ_ASSERT(safepoint.gcRegs().empty() && safepoint.gcSlots().empty());
16080 #ifdef JS_NUNBOX32
16081 MOZ_ASSERT(safepoint.nunboxParts().empty());
16082 #elif JS_PUNBOX64
16083 MOZ_ASSERT(safepoint.valueRegs().empty() && safepoint.valueSlots().empty());
16084 #endif
16086 // BODY (GENERAL SPILL) AREA and FRAME and INCOMING ARGS
16087 // Deal with roots on the stack.
16088 const LSafepoint::SlotList& wasmAnyRefSlots = safepoint.wasmAnyRefSlots();
16089 for (SafepointSlotEntry wasmAnyRefSlot : wasmAnyRefSlots) {
16090 // The following needs to correspond with JitFrameLayout::slotRef
16091 // wasmAnyRefSlot.stack == 0 means the slot is in the args area
16092 if (wasmAnyRefSlot.stack) {
16093 // It's a slot in the body allocation, so .slot is interpreted
16094 // as an index downwards from the Frame*
16095 MOZ_ASSERT(wasmAnyRefSlot.slot <= nBodyBytes);
16096 uint32_t offsetInBytes = nBodyBytes - wasmAnyRefSlot.slot;
16097 MOZ_ASSERT(offsetInBytes % sizeof(void*) == 0);
16098 stackMap->set(regDumpWords + offsetInBytes / sizeof(void*),
16099 wasm::StackMap::AnyRef);
16100 } else {
16101 // It's an argument slot
16102 MOZ_ASSERT(wasmAnyRefSlot.slot < nInboundStackArgBytes);
16103 uint32_t offsetInBytes = nBodyBytes + nFrameBytes + wasmAnyRefSlot.slot;
16104 MOZ_ASSERT(offsetInBytes % sizeof(void*) == 0);
16105 stackMap->set(regDumpWords + offsetInBytes / sizeof(void*),
16106 wasm::StackMap::AnyRef);
16110 // Track array data pointers on the stack
16111 const LSafepoint::SlotList& slots = safepoint.slotsOrElementsSlots();
16112 for (SafepointSlotEntry slot : slots) {
16113 MOZ_ASSERT(slot.stack);
16115 // It's a slot in the body allocation, so .slot is interpreted
16116 // as an index downwards from the Frame*
16117 MOZ_ASSERT(slot.slot <= nBodyBytes);
16118 uint32_t offsetInBytes = nBodyBytes - slot.slot;
16119 MOZ_ASSERT(offsetInBytes % sizeof(void*) == 0);
16120 stackMap->set(regDumpWords + offsetInBytes / sizeof(void*),
16121 wasm::StackMap::Kind::ArrayDataPointer);
16124 // Record in the map, how far down from the highest address the Frame* is.
16125 // Take the opportunity to check that we haven't marked any part of the
16126 // Frame itself as a pointer.
16127 stackMap->setFrameOffsetFromTop((nInboundStackArgBytesAligned + nFrameBytes) /
16128 sizeof(void*));
16129 #ifdef DEBUG
16130 for (uint32_t i = 0; i < nFrameBytes / sizeof(void*); i++) {
16131 MOZ_ASSERT(stackMap->get(stackMap->header.numMappedWords -
16132 stackMap->header.frameOffsetFromTop + i) ==
16133 wasm::StackMap::Kind::POD);
16135 #endif
16137 *result = stackMap;
16138 return true;
16141 bool CodeGenerator::generateWasm(
16142 wasm::CallIndirectId callIndirectId, wasm::BytecodeOffset trapOffset,
16143 const wasm::ArgTypeVector& argTypes, const RegisterOffsets& trapExitLayout,
16144 size_t trapExitLayoutNumWords, wasm::FuncOffsets* offsets,
16145 wasm::StackMaps* stackMaps, wasm::Decoder* decoder) {
16146 AutoCreatedBy acb(masm, "CodeGenerator::generateWasm");
16148 JitSpew(JitSpew_Codegen, "# Emitting wasm code");
16150 size_t nInboundStackArgBytes = StackArgAreaSizeUnaligned(argTypes);
16151 inboundStackArgBytes_ = nInboundStackArgBytes;
16153 wasm::GenerateFunctionPrologue(masm, callIndirectId, mozilla::Nothing(),
16154 offsets);
16156 MOZ_ASSERT(masm.framePushed() == 0);
16158 // Very large frames are implausible, probably an attack.
16159 if (frameSize() > wasm::MaxFrameSize) {
16160 return decoder->fail(decoder->beginOffset(), "stack frame is too large");
16163 if (omitOverRecursedCheck()) {
16164 masm.reserveStack(frameSize());
16165 } else {
16166 std::pair<CodeOffset, uint32_t> pair =
16167 masm.wasmReserveStackChecked(frameSize(), trapOffset);
16168 CodeOffset trapInsnOffset = pair.first;
16169 size_t nBytesReservedBeforeTrap = pair.second;
16171 wasm::StackMap* functionEntryStackMap = nullptr;
16172 if (!CreateStackMapForFunctionEntryTrap(
16173 argTypes, trapExitLayout, trapExitLayoutNumWords,
16174 nBytesReservedBeforeTrap, nInboundStackArgBytes,
16175 &functionEntryStackMap)) {
16176 return false;
16179 // In debug builds, we'll always have a stack map, even if there are no
16180 // refs to track.
16181 MOZ_ASSERT(functionEntryStackMap);
16183 if (functionEntryStackMap &&
16184 !stackMaps->add((uint8_t*)(uintptr_t)trapInsnOffset.offset(),
16185 functionEntryStackMap)) {
16186 functionEntryStackMap->destroy();
16187 return false;
16191 MOZ_ASSERT(masm.framePushed() == frameSize());
16193 if (!generateBody()) {
16194 return false;
16197 masm.bind(&returnLabel_);
16198 wasm::GenerateFunctionEpilogue(masm, frameSize(), offsets);
16200 if (!generateOutOfLineCode()) {
16201 return false;
16204 masm.flush();
16205 if (masm.oom()) {
16206 return false;
16209 offsets->end = masm.currentOffset();
16211 MOZ_ASSERT(!masm.failureLabel()->used());
16212 MOZ_ASSERT(snapshots_.listSize() == 0);
16213 MOZ_ASSERT(snapshots_.RVATableSize() == 0);
16214 MOZ_ASSERT(recovers_.size() == 0);
16215 MOZ_ASSERT(graph.numConstants() == 0);
16216 MOZ_ASSERT(osiIndices_.empty());
16217 MOZ_ASSERT(icList_.empty());
16218 MOZ_ASSERT(safepoints_.size() == 0);
16219 MOZ_ASSERT(!scriptCounts_);
16221 // Convert the safepoints to stackmaps and add them to our running
16222 // collection thereof.
16223 for (CodegenSafepointIndex& index : safepointIndices_) {
16224 wasm::StackMap* stackMap = nullptr;
16225 if (!CreateStackMapFromLSafepoint(*index.safepoint(), trapExitLayout,
16226 trapExitLayoutNumWords,
16227 nInboundStackArgBytes, &stackMap)) {
16228 return false;
16231 // In debug builds, we'll always have a stack map.
16232 MOZ_ASSERT(stackMap);
16233 if (!stackMap) {
16234 continue;
16237 if (!stackMaps->add((uint8_t*)(uintptr_t)index.displacement(), stackMap)) {
16238 stackMap->destroy();
16239 return false;
16243 return true;
16246 bool CodeGenerator::generate() {
16247 AutoCreatedBy acb(masm, "CodeGenerator::generate");
16249 JitSpew(JitSpew_Codegen, "# Emitting code for script %s:%u:%u",
16250 gen->outerInfo().script()->filename(),
16251 gen->outerInfo().script()->lineno(),
16252 gen->outerInfo().script()->column().oneOriginValue());
16254 // Initialize native code table with an entry to the start of
16255 // top-level script.
16256 InlineScriptTree* tree = gen->outerInfo().inlineScriptTree();
16257 jsbytecode* startPC = tree->script()->code();
16258 BytecodeSite* startSite = new (gen->alloc()) BytecodeSite(tree, startPC);
16259 if (!addNativeToBytecodeEntry(startSite)) {
16260 return false;
16263 if (!safepoints_.init(gen->alloc())) {
16264 return false;
16267 perfSpewer_.recordOffset(masm, "Prologue");
16268 if (!generatePrologue()) {
16269 return false;
16272 // Reset native => bytecode map table with top-level script and startPc.
16273 if (!addNativeToBytecodeEntry(startSite)) {
16274 return false;
16277 if (!generateBody()) {
16278 return false;
16281 // Reset native => bytecode map table with top-level script and startPc.
16282 if (!addNativeToBytecodeEntry(startSite)) {
16283 return false;
16286 perfSpewer_.recordOffset(masm, "Epilogue");
16287 if (!generateEpilogue()) {
16288 return false;
16291 // Reset native => bytecode map table with top-level script and startPc.
16292 if (!addNativeToBytecodeEntry(startSite)) {
16293 return false;
16296 perfSpewer_.recordOffset(masm, "InvalidateEpilogue");
16297 generateInvalidateEpilogue();
16299 // native => bytecode entries for OOL code will be added
16300 // by CodeGeneratorShared::generateOutOfLineCode
16301 perfSpewer_.recordOffset(masm, "OOLCode");
16302 if (!generateOutOfLineCode()) {
16303 return false;
16306 // Add terminal entry.
16307 if (!addNativeToBytecodeEntry(startSite)) {
16308 return false;
16311 // Dump Native to bytecode entries to spew.
16312 dumpNativeToBytecodeEntries();
16314 // We encode safepoints after the OSI-point offsets have been determined.
16315 if (!encodeSafepoints()) {
16316 return false;
16319 return !masm.oom();
16322 static bool AddInlinedCompilations(JSContext* cx, HandleScript script,
16323 IonCompilationId compilationId,
16324 const WarpSnapshot* snapshot,
16325 bool* isValid) {
16326 MOZ_ASSERT(!*isValid);
16327 RecompileInfo recompileInfo(script, compilationId);
16329 JitZone* jitZone = cx->zone()->jitZone();
16331 for (const auto* scriptSnapshot : snapshot->scripts()) {
16332 JSScript* inlinedScript = scriptSnapshot->script();
16333 if (inlinedScript == script) {
16334 continue;
16337 // TODO(post-Warp): This matches FinishCompilation and is necessary to
16338 // ensure in-progress compilations are canceled when an inlined functon
16339 // becomes a debuggee. See the breakpoint-14.js jit-test.
16340 // When TI is gone, try to clean this up by moving AddInlinedCompilations to
16341 // WarpOracle so that we can handle this as part of addPendingRecompile
16342 // instead of requiring this separate check.
16343 if (inlinedScript->isDebuggee()) {
16344 *isValid = false;
16345 return true;
16348 if (!jitZone->addInlinedCompilation(recompileInfo, inlinedScript)) {
16349 return false;
16353 *isValid = true;
16354 return true;
16357 void CodeGenerator::validateAndRegisterFuseDependencies(JSContext* cx,
16358 HandleScript script,
16359 bool* isValid) {
16360 // No need to validate as we will toss this compilation anyhow.
16361 if (!*isValid) {
16362 return;
16365 for (auto dependency : fuseDependencies) {
16366 switch (dependency) {
16367 case FuseDependencyKind::HasSeenObjectEmulateUndefinedFuse: {
16368 auto& hasSeenObjectEmulateUndefinedFuse =
16369 cx->runtime()->hasSeenObjectEmulateUndefinedFuse.ref();
16371 if (!hasSeenObjectEmulateUndefinedFuse.intact()) {
16372 JitSpew(JitSpew_Codegen,
16373 "tossing compilation; hasSeenObjectEmulateUndefinedFuse fuse "
16374 "dependency no longer valid\n");
16375 *isValid = false;
16376 return;
16379 if (!hasSeenObjectEmulateUndefinedFuse.addFuseDependency(cx, script)) {
16380 JitSpew(JitSpew_Codegen,
16381 "tossing compilation; failed to register "
16382 "hasSeenObjectEmulateUndefinedFuse script dependency\n");
16383 *isValid = false;
16384 return;
16386 break;
16389 case FuseDependencyKind::OptimizeGetIteratorFuse: {
16390 auto& optimizeGetIteratorFuse =
16391 cx->realm()->realmFuses.optimizeGetIteratorFuse;
16392 if (!optimizeGetIteratorFuse.intact()) {
16393 JitSpew(JitSpew_Codegen,
16394 "tossing compilation; optimizeGetIteratorFuse fuse "
16395 "dependency no longer valid\n");
16396 *isValid = false;
16397 return;
16400 if (!optimizeGetIteratorFuse.addFuseDependency(cx, script)) {
16401 JitSpew(JitSpew_Codegen,
16402 "tossing compilation; failed to register "
16403 "optimizeGetIteratorFuse script dependency\n");
16404 *isValid = false;
16405 return;
16407 break;
16410 default:
16411 MOZ_CRASH("Unknown Dependency Kind");
16416 bool CodeGenerator::link(JSContext* cx, const WarpSnapshot* snapshot) {
16417 AutoCreatedBy acb(masm, "CodeGenerator::link");
16419 // We cancel off-thread Ion compilations in a few places during GC, but if
16420 // this compilation was performed off-thread it will already have been
16421 // removed from the relevant lists by this point. Don't allow GC here.
16422 JS::AutoAssertNoGC nogc(cx);
16424 RootedScript script(cx, gen->outerInfo().script());
16425 MOZ_ASSERT(!script->hasIonScript());
16427 // Perform any read barriers which were skipped while compiling the
16428 // script, which may have happened off-thread.
16429 JitZone* jitZone = cx->zone()->jitZone();
16430 jitZone->performStubReadBarriers(zoneStubsToReadBarrier_);
16432 if (scriptCounts_ && !script->hasScriptCounts() &&
16433 !script->initScriptCounts(cx)) {
16434 return false;
16437 IonCompilationId compilationId =
16438 cx->runtime()->jitRuntime()->nextCompilationId();
16439 jitZone->currentCompilationIdRef().emplace(compilationId);
16440 auto resetCurrentId = mozilla::MakeScopeExit(
16441 [jitZone] { jitZone->currentCompilationIdRef().reset(); });
16443 // Record constraints. If an error occured, returns false and potentially
16444 // prevent future compilations. Otherwise, if an invalidation occured, then
16445 // skip the current compilation.
16446 bool isValid = false;
16448 // If an inlined script is invalidated (for example, by attaching
16449 // a debugger), we must also invalidate the parent IonScript.
16450 if (!AddInlinedCompilations(cx, script, compilationId, snapshot, &isValid)) {
16451 return false;
16454 // Validate fuse dependencies here; if a fuse has popped since we registered a
16455 // dependency then we need to toss this compilation as it assumes things which
16456 // are not valid.
16458 // Eagerly register a fuse dependency here too; this way if we OOM we can
16459 // instead simply remove the compilation and move on with our lives.
16460 validateAndRegisterFuseDependencies(cx, script, &isValid);
16462 // This compilation is no longer valid; don't proceed, but return true as this
16463 // isn't an error case either.
16464 if (!isValid) {
16465 return true;
16468 uint32_t argumentSlots = (gen->outerInfo().nargs() + 1) * sizeof(Value);
16470 size_t numNurseryObjects = snapshot->nurseryObjects().length();
16472 IonScript* ionScript = IonScript::New(
16473 cx, compilationId, graph.localSlotsSize(), argumentSlots, frameDepth_,
16474 snapshots_.listSize(), snapshots_.RVATableSize(), recovers_.size(),
16475 graph.numConstants(), numNurseryObjects, safepointIndices_.length(),
16476 osiIndices_.length(), icList_.length(), runtimeData_.length(),
16477 safepoints_.size());
16478 if (!ionScript) {
16479 return false;
16481 #ifdef DEBUG
16482 ionScript->setICHash(snapshot->icHash());
16483 #endif
16485 auto freeIonScript = mozilla::MakeScopeExit([&ionScript] {
16486 // Use js_free instead of IonScript::Destroy: the cache list is still
16487 // uninitialized.
16488 js_free(ionScript);
16491 Linker linker(masm);
16492 JitCode* code = linker.newCode(cx, CodeKind::Ion);
16493 if (!code) {
16494 return false;
16497 // Encode native to bytecode map if profiling is enabled.
16498 if (isProfilerInstrumentationEnabled()) {
16499 // Generate native-to-bytecode main table.
16500 IonEntry::ScriptList scriptList;
16501 if (!generateCompactNativeToBytecodeMap(cx, code, scriptList)) {
16502 return false;
16505 uint8_t* ionTableAddr =
16506 ((uint8_t*)nativeToBytecodeMap_.get()) + nativeToBytecodeTableOffset_;
16507 JitcodeIonTable* ionTable = (JitcodeIonTable*)ionTableAddr;
16509 // Construct the IonEntry that will go into the global table.
16510 auto entry = MakeJitcodeGlobalEntry<IonEntry>(
16511 cx, code, code->raw(), code->rawEnd(), std::move(scriptList), ionTable);
16512 if (!entry) {
16513 return false;
16515 (void)nativeToBytecodeMap_.release(); // Table is now owned by |entry|.
16517 // Add entry to the global table.
16518 JitcodeGlobalTable* globalTable =
16519 cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
16520 if (!globalTable->addEntry(std::move(entry))) {
16521 return false;
16524 // Mark the jitcode as having a bytecode map.
16525 code->setHasBytecodeMap();
16526 } else {
16527 // Add a dumy jitcodeGlobalTable entry.
16528 auto entry = MakeJitcodeGlobalEntry<DummyEntry>(cx, code, code->raw(),
16529 code->rawEnd());
16530 if (!entry) {
16531 return false;
16534 // Add entry to the global table.
16535 JitcodeGlobalTable* globalTable =
16536 cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
16537 if (!globalTable->addEntry(std::move(entry))) {
16538 return false;
16541 // Mark the jitcode as having a bytecode map.
16542 code->setHasBytecodeMap();
16545 ionScript->setMethod(code);
16547 // If the Gecko Profiler is enabled, mark IonScript as having been
16548 // instrumented accordingly.
16549 if (isProfilerInstrumentationEnabled()) {
16550 ionScript->setHasProfilingInstrumentation();
16553 Assembler::PatchDataWithValueCheck(
16554 CodeLocationLabel(code, invalidateEpilogueData_), ImmPtr(ionScript),
16555 ImmPtr((void*)-1));
16557 for (CodeOffset offset : ionScriptLabels_) {
16558 Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, offset),
16559 ImmPtr(ionScript), ImmPtr((void*)-1));
16562 for (NurseryObjectLabel label : ionNurseryObjectLabels_) {
16563 void* entry = ionScript->addressOfNurseryObject(label.nurseryIndex);
16564 Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, label.offset),
16565 ImmPtr(entry), ImmPtr((void*)-1));
16568 // for generating inline caches during the execution.
16569 if (runtimeData_.length()) {
16570 ionScript->copyRuntimeData(&runtimeData_[0]);
16572 if (icList_.length()) {
16573 ionScript->copyICEntries(&icList_[0]);
16576 for (size_t i = 0; i < icInfo_.length(); i++) {
16577 IonIC& ic = ionScript->getICFromIndex(i);
16578 Assembler::PatchDataWithValueCheck(
16579 CodeLocationLabel(code, icInfo_[i].icOffsetForJump),
16580 ImmPtr(ic.codeRawPtr()), ImmPtr((void*)-1));
16581 Assembler::PatchDataWithValueCheck(
16582 CodeLocationLabel(code, icInfo_[i].icOffsetForPush), ImmPtr(&ic),
16583 ImmPtr((void*)-1));
16586 JitSpew(JitSpew_Codegen, "Created IonScript %p (raw %p)", (void*)ionScript,
16587 (void*)code->raw());
16589 ionScript->setInvalidationEpilogueDataOffset(
16590 invalidateEpilogueData_.offset());
16591 if (jsbytecode* osrPc = gen->outerInfo().osrPc()) {
16592 ionScript->setOsrPc(osrPc);
16593 ionScript->setOsrEntryOffset(getOsrEntryOffset());
16595 ionScript->setInvalidationEpilogueOffset(invalidate_.offset());
16597 perfSpewer_.saveProfile(cx, script, code);
16599 #ifdef MOZ_VTUNE
16600 vtune::MarkScript(code, script, "ion");
16601 #endif
16603 // Set a Ion counter hint for this script.
16604 if (cx->runtime()->jitRuntime()->hasJitHintsMap()) {
16605 JitHintsMap* jitHints = cx->runtime()->jitRuntime()->getJitHintsMap();
16606 jitHints->recordIonCompilation(script);
16609 // for marking during GC.
16610 if (safepointIndices_.length()) {
16611 ionScript->copySafepointIndices(&safepointIndices_[0]);
16613 if (safepoints_.size()) {
16614 ionScript->copySafepoints(&safepoints_);
16617 // for recovering from an Ion Frame.
16618 if (osiIndices_.length()) {
16619 ionScript->copyOsiIndices(&osiIndices_[0]);
16621 if (snapshots_.listSize()) {
16622 ionScript->copySnapshots(&snapshots_);
16624 MOZ_ASSERT_IF(snapshots_.listSize(), recovers_.size());
16625 if (recovers_.size()) {
16626 ionScript->copyRecovers(&recovers_);
16628 if (graph.numConstants()) {
16629 const Value* vp = graph.constantPool();
16630 ionScript->copyConstants(vp);
16631 for (size_t i = 0; i < graph.numConstants(); i++) {
16632 const Value& v = vp[i];
16633 if (v.isGCThing()) {
16634 if (gc::StoreBuffer* sb = v.toGCThing()->storeBuffer()) {
16635 sb->putWholeCell(script);
16636 break;
16642 // Attach any generated script counts to the script.
16643 if (IonScriptCounts* counts = extractScriptCounts()) {
16644 script->addIonCounts(counts);
16646 // WARNING: Code after this point must be infallible!
16648 // Copy the list of nursery objects. Note that the store buffer can add
16649 // HeapPtr edges that must be cleared in IonScript::Destroy. See the
16650 // infallibility warning above.
16651 const auto& nurseryObjects = snapshot->nurseryObjects();
16652 for (size_t i = 0; i < nurseryObjects.length(); i++) {
16653 ionScript->nurseryObjects()[i].init(nurseryObjects[i]);
16656 // Transfer ownership of the IonScript to the JitScript. At this point enough
16657 // of the IonScript must be initialized for IonScript::Destroy to work.
16658 freeIonScript.release();
16659 script->jitScript()->setIonScript(script, ionScript);
16661 return true;
16664 // An out-of-line path to convert a boxed int32 to either a float or double.
16665 class OutOfLineUnboxFloatingPoint : public OutOfLineCodeBase<CodeGenerator> {
16666 LUnboxFloatingPoint* unboxFloatingPoint_;
16668 public:
16669 explicit OutOfLineUnboxFloatingPoint(LUnboxFloatingPoint* unboxFloatingPoint)
16670 : unboxFloatingPoint_(unboxFloatingPoint) {}
16672 void accept(CodeGenerator* codegen) override {
16673 codegen->visitOutOfLineUnboxFloatingPoint(this);
16676 LUnboxFloatingPoint* unboxFloatingPoint() const {
16677 return unboxFloatingPoint_;
16681 void CodeGenerator::visitUnboxFloatingPoint(LUnboxFloatingPoint* lir) {
16682 const ValueOperand box = ToValue(lir, LUnboxFloatingPoint::Input);
16683 const LDefinition* result = lir->output();
16685 // Out-of-line path to convert int32 to double or bailout
16686 // if this instruction is fallible.
16687 OutOfLineUnboxFloatingPoint* ool =
16688 new (alloc()) OutOfLineUnboxFloatingPoint(lir);
16689 addOutOfLineCode(ool, lir->mir());
16691 FloatRegister resultReg = ToFloatRegister(result);
16692 masm.branchTestDouble(Assembler::NotEqual, box, ool->entry());
16693 masm.unboxDouble(box, resultReg);
16694 if (lir->type() == MIRType::Float32) {
16695 masm.convertDoubleToFloat32(resultReg, resultReg);
16697 masm.bind(ool->rejoin());
16700 void CodeGenerator::visitOutOfLineUnboxFloatingPoint(
16701 OutOfLineUnboxFloatingPoint* ool) {
16702 LUnboxFloatingPoint* ins = ool->unboxFloatingPoint();
16703 const ValueOperand value = ToValue(ins, LUnboxFloatingPoint::Input);
16705 if (ins->mir()->fallible()) {
16706 Label bail;
16707 masm.branchTestInt32(Assembler::NotEqual, value, &bail);
16708 bailoutFrom(&bail, ins->snapshot());
16710 if (ins->type() == MIRType::Float32) {
16711 masm.convertInt32ToFloat32(value.payloadOrValueReg(),
16712 ToFloatRegister(ins->output()));
16713 } else {
16714 masm.convertInt32ToDouble(value.payloadOrValueReg(),
16715 ToFloatRegister(ins->output()));
16717 masm.jump(ool->rejoin());
16720 void CodeGenerator::visitCallBindVar(LCallBindVar* lir) {
16721 pushArg(ToRegister(lir->environmentChain()));
16723 using Fn = JSObject* (*)(JSContext*, JSObject*);
16724 callVM<Fn, BindVarOperation>(lir);
16727 void CodeGenerator::visitMegamorphicSetElement(LMegamorphicSetElement* lir) {
16728 Register obj = ToRegister(lir->getOperand(0));
16729 ValueOperand idVal = ToValue(lir, LMegamorphicSetElement::IndexIndex);
16730 ValueOperand value = ToValue(lir, LMegamorphicSetElement::ValueIndex);
16732 Register temp0 = ToRegister(lir->temp0());
16733 // See comment in LIROps.yaml (x86 is short on registers)
16734 #ifndef JS_CODEGEN_X86
16735 Register temp1 = ToRegister(lir->temp1());
16736 Register temp2 = ToRegister(lir->temp2());
16737 #endif
16739 Label cacheHit, done;
16740 #ifdef JS_CODEGEN_X86
16741 masm.emitMegamorphicCachedSetSlot(
16742 idVal, obj, temp0, value, &cacheHit,
16743 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
16744 EmitPreBarrier(masm, addr, mirType);
16746 #else
16747 masm.emitMegamorphicCachedSetSlot(
16748 idVal, obj, temp0, temp1, temp2, value, &cacheHit,
16749 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
16750 EmitPreBarrier(masm, addr, mirType);
16752 #endif
16754 pushArg(Imm32(lir->mir()->strict()));
16755 pushArg(ToValue(lir, LMegamorphicSetElement::ValueIndex));
16756 pushArg(ToValue(lir, LMegamorphicSetElement::IndexIndex));
16757 pushArg(obj);
16759 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
16760 callVM<Fn, js::jit::SetElementMegamorphic<true>>(lir);
16762 masm.jump(&done);
16763 masm.bind(&cacheHit);
16765 masm.branchPtrInNurseryChunk(Assembler::Equal, obj, temp0, &done);
16766 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp0, &done);
16768 saveVolatile(temp0);
16769 emitPostWriteBarrier(obj);
16770 restoreVolatile(temp0);
16772 masm.bind(&done);
16775 void CodeGenerator::visitLoadScriptedProxyHandler(
16776 LLoadScriptedProxyHandler* ins) {
16777 Register obj = ToRegister(ins->getOperand(0));
16778 Register output = ToRegister(ins->output());
16780 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), output);
16782 Label bail;
16783 Address handlerAddr(output, js::detail::ProxyReservedSlots::offsetOfSlot(
16784 ScriptedProxyHandler::HANDLER_EXTRA));
16785 masm.fallibleUnboxObject(handlerAddr, output, &bail);
16786 bailoutFrom(&bail, ins->snapshot());
16789 #ifdef JS_PUNBOX64
16790 void CodeGenerator::visitCheckScriptedProxyGetResult(
16791 LCheckScriptedProxyGetResult* ins) {
16792 ValueOperand target = ToValue(ins, LCheckScriptedProxyGetResult::TargetIndex);
16793 ValueOperand value = ToValue(ins, LCheckScriptedProxyGetResult::ValueIndex);
16794 ValueOperand id = ToValue(ins, LCheckScriptedProxyGetResult::IdIndex);
16795 Register scratch = ToRegister(ins->temp0());
16796 Register scratch2 = ToRegister(ins->temp1());
16798 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue,
16799 MutableHandleValue);
16800 OutOfLineCode* ool = oolCallVM<Fn, CheckProxyGetByValueResult>(
16801 ins, ArgList(scratch, id, value), StoreValueTo(value));
16803 masm.unboxObject(target, scratch);
16804 masm.branchTestObjectNeedsProxyResultValidation(Assembler::NonZero, scratch,
16805 scratch2, ool->entry());
16806 masm.bind(ool->rejoin());
16808 #endif
16810 void CodeGenerator::visitIdToStringOrSymbol(LIdToStringOrSymbol* ins) {
16811 ValueOperand id = ToValue(ins, LIdToStringOrSymbol::IdIndex);
16812 ValueOperand output = ToOutValue(ins);
16813 Register scratch = ToRegister(ins->temp0());
16815 masm.moveValue(id, output);
16817 Label done, callVM;
16818 Label bail;
16820 ScratchTagScope tag(masm, output);
16821 masm.splitTagForTest(output, tag);
16822 masm.branchTestString(Assembler::Equal, tag, &done);
16823 masm.branchTestSymbol(Assembler::Equal, tag, &done);
16824 masm.branchTestInt32(Assembler::NotEqual, tag, &bail);
16827 masm.unboxInt32(output, scratch);
16829 using Fn = JSLinearString* (*)(JSContext*, int);
16830 OutOfLineCode* ool = oolCallVM<Fn, Int32ToString<CanGC>>(
16831 ins, ArgList(scratch), StoreRegisterTo(output.scratchReg()));
16833 masm.lookupStaticIntString(scratch, output.scratchReg(),
16834 gen->runtime->staticStrings(), ool->entry());
16836 masm.bind(ool->rejoin());
16837 masm.tagValue(JSVAL_TYPE_STRING, output.scratchReg(), output);
16838 masm.bind(&done);
16840 bailoutFrom(&bail, ins->snapshot());
16843 void CodeGenerator::visitLoadFixedSlotV(LLoadFixedSlotV* ins) {
16844 const Register obj = ToRegister(ins->getOperand(0));
16845 size_t slot = ins->mir()->slot();
16846 ValueOperand result = ToOutValue(ins);
16848 masm.loadValue(Address(obj, NativeObject::getFixedSlotOffset(slot)), result);
16851 void CodeGenerator::visitLoadFixedSlotT(LLoadFixedSlotT* ins) {
16852 const Register obj = ToRegister(ins->getOperand(0));
16853 size_t slot = ins->mir()->slot();
16854 AnyRegister result = ToAnyRegister(ins->getDef(0));
16855 MIRType type = ins->mir()->type();
16857 masm.loadUnboxedValue(Address(obj, NativeObject::getFixedSlotOffset(slot)),
16858 type, result);
16861 template <typename T>
16862 static void EmitLoadAndUnbox(MacroAssembler& masm, const T& src, MIRType type,
16863 bool fallible, AnyRegister dest, Label* fail) {
16864 if (type == MIRType::Double) {
16865 MOZ_ASSERT(dest.isFloat());
16866 masm.ensureDouble(src, dest.fpu(), fail);
16867 return;
16869 if (fallible) {
16870 switch (type) {
16871 case MIRType::Int32:
16872 masm.fallibleUnboxInt32(src, dest.gpr(), fail);
16873 break;
16874 case MIRType::Boolean:
16875 masm.fallibleUnboxBoolean(src, dest.gpr(), fail);
16876 break;
16877 case MIRType::Object:
16878 masm.fallibleUnboxObject(src, dest.gpr(), fail);
16879 break;
16880 case MIRType::String:
16881 masm.fallibleUnboxString(src, dest.gpr(), fail);
16882 break;
16883 case MIRType::Symbol:
16884 masm.fallibleUnboxSymbol(src, dest.gpr(), fail);
16885 break;
16886 case MIRType::BigInt:
16887 masm.fallibleUnboxBigInt(src, dest.gpr(), fail);
16888 break;
16889 default:
16890 MOZ_CRASH("Unexpected MIRType");
16892 return;
16894 masm.loadUnboxedValue(src, type, dest);
16897 void CodeGenerator::visitLoadFixedSlotAndUnbox(LLoadFixedSlotAndUnbox* ins) {
16898 const MLoadFixedSlotAndUnbox* mir = ins->mir();
16899 MIRType type = mir->type();
16900 Register input = ToRegister(ins->object());
16901 AnyRegister result = ToAnyRegister(ins->output());
16902 size_t slot = mir->slot();
16904 Address address(input, NativeObject::getFixedSlotOffset(slot));
16906 Label bail;
16907 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
16908 if (mir->fallible()) {
16909 bailoutFrom(&bail, ins->snapshot());
16913 void CodeGenerator::visitLoadDynamicSlotAndUnbox(
16914 LLoadDynamicSlotAndUnbox* ins) {
16915 const MLoadDynamicSlotAndUnbox* mir = ins->mir();
16916 MIRType type = mir->type();
16917 Register input = ToRegister(ins->slots());
16918 AnyRegister result = ToAnyRegister(ins->output());
16919 size_t slot = mir->slot();
16921 Address address(input, slot * sizeof(JS::Value));
16923 Label bail;
16924 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
16925 if (mir->fallible()) {
16926 bailoutFrom(&bail, ins->snapshot());
16930 void CodeGenerator::visitLoadElementAndUnbox(LLoadElementAndUnbox* ins) {
16931 const MLoadElementAndUnbox* mir = ins->mir();
16932 MIRType type = mir->type();
16933 Register elements = ToRegister(ins->elements());
16934 AnyRegister result = ToAnyRegister(ins->output());
16936 Label bail;
16937 if (ins->index()->isConstant()) {
16938 NativeObject::elementsSizeMustNotOverflow();
16939 int32_t offset = ToInt32(ins->index()) * sizeof(Value);
16940 Address address(elements, offset);
16941 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
16942 } else {
16943 BaseObjectElementIndex address(elements, ToRegister(ins->index()));
16944 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
16947 if (mir->fallible()) {
16948 bailoutFrom(&bail, ins->snapshot());
16952 class OutOfLineAtomizeSlot : public OutOfLineCodeBase<CodeGenerator> {
16953 LInstruction* lir_;
16954 Register stringReg_;
16955 Address slotAddr_;
16956 TypedOrValueRegister dest_;
16958 public:
16959 OutOfLineAtomizeSlot(LInstruction* lir, Register stringReg, Address slotAddr,
16960 TypedOrValueRegister dest)
16961 : lir_(lir), stringReg_(stringReg), slotAddr_(slotAddr), dest_(dest) {}
16963 void accept(CodeGenerator* codegen) final {
16964 codegen->visitOutOfLineAtomizeSlot(this);
16966 LInstruction* lir() const { return lir_; }
16967 Register stringReg() const { return stringReg_; }
16968 Address slotAddr() const { return slotAddr_; }
16969 TypedOrValueRegister dest() const { return dest_; }
16972 void CodeGenerator::visitOutOfLineAtomizeSlot(OutOfLineAtomizeSlot* ool) {
16973 LInstruction* lir = ool->lir();
16974 Register stringReg = ool->stringReg();
16975 Address slotAddr = ool->slotAddr();
16976 TypedOrValueRegister dest = ool->dest();
16978 // This code is called with a non-atomic string in |stringReg|.
16979 // When it returns, |stringReg| contains an unboxed pointer to an
16980 // atomized version of that string, and |slotAddr| contains a
16981 // StringValue pointing to that atom. If |dest| is a ValueOperand,
16982 // it contains the same StringValue; otherwise we assert that |dest|
16983 // is |stringReg|.
16985 saveLive(lir);
16986 pushArg(stringReg);
16988 using Fn = JSAtom* (*)(JSContext*, JSString*);
16989 callVM<Fn, js::AtomizeString>(lir);
16990 StoreRegisterTo(stringReg).generate(this);
16991 restoreLiveIgnore(lir, StoreRegisterTo(stringReg).clobbered());
16993 if (dest.hasValue()) {
16994 masm.moveValue(
16995 TypedOrValueRegister(MIRType::String, AnyRegister(stringReg)),
16996 dest.valueReg());
16997 } else {
16998 MOZ_ASSERT(dest.typedReg().gpr() == stringReg);
17001 emitPreBarrier(slotAddr);
17002 masm.storeTypedOrValue(dest, slotAddr);
17004 // We don't need a post-barrier because atoms aren't nursery-allocated.
17005 #ifdef DEBUG
17006 // We need a temp register for the nursery check. Spill something.
17007 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
17008 allRegs.take(stringReg);
17009 Register temp = allRegs.takeAny();
17010 masm.push(temp);
17012 Label tenured;
17013 masm.branchPtrInNurseryChunk(Assembler::NotEqual, stringReg, temp, &tenured);
17014 masm.assumeUnreachable("AtomizeString returned a nursery pointer");
17015 masm.bind(&tenured);
17017 masm.pop(temp);
17018 #endif
17020 masm.jump(ool->rejoin());
17023 void CodeGenerator::emitMaybeAtomizeSlot(LInstruction* ins, Register stringReg,
17024 Address slotAddr,
17025 TypedOrValueRegister dest) {
17026 OutOfLineAtomizeSlot* ool =
17027 new (alloc()) OutOfLineAtomizeSlot(ins, stringReg, slotAddr, dest);
17028 addOutOfLineCode(ool, ins->mirRaw()->toInstruction());
17029 masm.branchTest32(Assembler::NonZero,
17030 Address(stringReg, JSString::offsetOfFlags()),
17031 Imm32(JSString::ATOM_BIT), ool->rejoin());
17033 masm.branchTest32(Assembler::Zero,
17034 Address(stringReg, JSString::offsetOfFlags()),
17035 Imm32(JSString::ATOM_REF_BIT), ool->entry());
17036 masm.loadPtr(Address(stringReg, JSAtomRefString::offsetOfAtom()), stringReg);
17038 if (dest.hasValue()) {
17039 masm.moveValue(
17040 TypedOrValueRegister(MIRType::String, AnyRegister(stringReg)),
17041 dest.valueReg());
17042 } else {
17043 MOZ_ASSERT(dest.typedReg().gpr() == stringReg);
17046 emitPreBarrier(slotAddr);
17047 masm.storeTypedOrValue(dest, slotAddr);
17049 masm.bind(ool->rejoin());
17052 void CodeGenerator::visitLoadFixedSlotAndAtomize(
17053 LLoadFixedSlotAndAtomize* ins) {
17054 Register obj = ToRegister(ins->getOperand(0));
17055 Register temp = ToRegister(ins->temp0());
17056 size_t slot = ins->mir()->slot();
17057 ValueOperand result = ToOutValue(ins);
17059 Address slotAddr(obj, NativeObject::getFixedSlotOffset(slot));
17060 masm.loadValue(slotAddr, result);
17062 Label notString;
17063 masm.branchTestString(Assembler::NotEqual, result, &notString);
17064 masm.unboxString(result, temp);
17065 emitMaybeAtomizeSlot(ins, temp, slotAddr, result);
17066 masm.bind(&notString);
17069 void CodeGenerator::visitLoadDynamicSlotAndAtomize(
17070 LLoadDynamicSlotAndAtomize* ins) {
17071 ValueOperand result = ToOutValue(ins);
17072 Register temp = ToRegister(ins->temp0());
17073 Register base = ToRegister(ins->input());
17074 int32_t offset = ins->mir()->slot() * sizeof(js::Value);
17076 Address slotAddr(base, offset);
17077 masm.loadValue(slotAddr, result);
17079 Label notString;
17080 masm.branchTestString(Assembler::NotEqual, result, &notString);
17081 masm.unboxString(result, temp);
17082 emitMaybeAtomizeSlot(ins, temp, slotAddr, result);
17083 masm.bind(&notString);
17086 void CodeGenerator::visitLoadFixedSlotUnboxAndAtomize(
17087 LLoadFixedSlotUnboxAndAtomize* ins) {
17088 const MLoadFixedSlotAndUnbox* mir = ins->mir();
17089 MOZ_ASSERT(mir->type() == MIRType::String);
17090 Register input = ToRegister(ins->object());
17091 AnyRegister result = ToAnyRegister(ins->output());
17092 size_t slot = mir->slot();
17094 Address slotAddr(input, NativeObject::getFixedSlotOffset(slot));
17096 Label bail;
17097 EmitLoadAndUnbox(masm, slotAddr, MIRType::String, mir->fallible(), result,
17098 &bail);
17099 emitMaybeAtomizeSlot(ins, result.gpr(), slotAddr,
17100 TypedOrValueRegister(MIRType::String, result));
17102 if (mir->fallible()) {
17103 bailoutFrom(&bail, ins->snapshot());
17107 void CodeGenerator::visitLoadDynamicSlotUnboxAndAtomize(
17108 LLoadDynamicSlotUnboxAndAtomize* ins) {
17109 const MLoadDynamicSlotAndUnbox* mir = ins->mir();
17110 MOZ_ASSERT(mir->type() == MIRType::String);
17111 Register input = ToRegister(ins->slots());
17112 AnyRegister result = ToAnyRegister(ins->output());
17113 size_t slot = mir->slot();
17115 Address slotAddr(input, slot * sizeof(JS::Value));
17117 Label bail;
17118 EmitLoadAndUnbox(masm, slotAddr, MIRType::String, mir->fallible(), result,
17119 &bail);
17120 emitMaybeAtomizeSlot(ins, result.gpr(), slotAddr,
17121 TypedOrValueRegister(MIRType::String, result));
17123 if (mir->fallible()) {
17124 bailoutFrom(&bail, ins->snapshot());
17128 void CodeGenerator::visitAddAndStoreSlot(LAddAndStoreSlot* ins) {
17129 const Register obj = ToRegister(ins->getOperand(0));
17130 const ValueOperand value = ToValue(ins, LAddAndStoreSlot::ValueIndex);
17131 const Register maybeTemp = ToTempRegisterOrInvalid(ins->temp0());
17133 Shape* shape = ins->mir()->shape();
17134 masm.storeObjShape(shape, obj, [](MacroAssembler& masm, const Address& addr) {
17135 EmitPreBarrier(masm, addr, MIRType::Shape);
17138 // Perform the store. No pre-barrier required since this is a new
17139 // initialization.
17141 uint32_t offset = ins->mir()->slotOffset();
17142 if (ins->mir()->kind() == MAddAndStoreSlot::Kind::FixedSlot) {
17143 Address slot(obj, offset);
17144 masm.storeValue(value, slot);
17145 } else {
17146 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), maybeTemp);
17147 Address slot(maybeTemp, offset);
17148 masm.storeValue(value, slot);
17152 void CodeGenerator::visitAllocateAndStoreSlot(LAllocateAndStoreSlot* ins) {
17153 const Register obj = ToRegister(ins->getOperand(0));
17154 const ValueOperand value = ToValue(ins, LAllocateAndStoreSlot::ValueIndex);
17155 const Register temp0 = ToRegister(ins->temp0());
17156 const Register temp1 = ToRegister(ins->temp1());
17158 masm.Push(obj);
17159 masm.Push(value);
17161 using Fn = bool (*)(JSContext* cx, NativeObject* obj, uint32_t newCount);
17162 masm.setupAlignedABICall();
17163 masm.loadJSContext(temp0);
17164 masm.passABIArg(temp0);
17165 masm.passABIArg(obj);
17166 masm.move32(Imm32(ins->mir()->numNewSlots()), temp1);
17167 masm.passABIArg(temp1);
17168 masm.callWithABI<Fn, NativeObject::growSlotsPure>();
17169 masm.storeCallPointerResult(temp0);
17171 masm.Pop(value);
17172 masm.Pop(obj);
17174 bailoutIfFalseBool(temp0, ins->snapshot());
17176 masm.storeObjShape(ins->mir()->shape(), obj,
17177 [](MacroAssembler& masm, const Address& addr) {
17178 EmitPreBarrier(masm, addr, MIRType::Shape);
17181 // Perform the store. No pre-barrier required since this is a new
17182 // initialization.
17183 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), temp0);
17184 Address slot(temp0, ins->mir()->slotOffset());
17185 masm.storeValue(value, slot);
17188 void CodeGenerator::visitAddSlotAndCallAddPropHook(
17189 LAddSlotAndCallAddPropHook* ins) {
17190 const Register obj = ToRegister(ins->object());
17191 const ValueOperand value =
17192 ToValue(ins, LAddSlotAndCallAddPropHook::ValueIndex);
17194 pushArg(ImmGCPtr(ins->mir()->shape()));
17195 pushArg(value);
17196 pushArg(obj);
17198 using Fn =
17199 bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, Handle<Shape*>);
17200 callVM<Fn, AddSlotAndCallAddPropHook>(ins);
17203 void CodeGenerator::visitStoreFixedSlotV(LStoreFixedSlotV* ins) {
17204 const Register obj = ToRegister(ins->getOperand(0));
17205 size_t slot = ins->mir()->slot();
17207 const ValueOperand value = ToValue(ins, LStoreFixedSlotV::ValueIndex);
17209 Address address(obj, NativeObject::getFixedSlotOffset(slot));
17210 if (ins->mir()->needsBarrier()) {
17211 emitPreBarrier(address);
17214 masm.storeValue(value, address);
17217 void CodeGenerator::visitStoreFixedSlotT(LStoreFixedSlotT* ins) {
17218 const Register obj = ToRegister(ins->getOperand(0));
17219 size_t slot = ins->mir()->slot();
17221 const LAllocation* value = ins->value();
17222 MIRType valueType = ins->mir()->value()->type();
17224 Address address(obj, NativeObject::getFixedSlotOffset(slot));
17225 if (ins->mir()->needsBarrier()) {
17226 emitPreBarrier(address);
17229 ConstantOrRegister nvalue =
17230 value->isConstant()
17231 ? ConstantOrRegister(value->toConstant()->toJSValue())
17232 : TypedOrValueRegister(valueType, ToAnyRegister(value));
17233 masm.storeConstantOrRegister(nvalue, address);
17236 void CodeGenerator::visitGetNameCache(LGetNameCache* ins) {
17237 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
17238 Register envChain = ToRegister(ins->envObj());
17239 ValueOperand output = ToOutValue(ins);
17240 Register temp = ToRegister(ins->temp0());
17242 IonGetNameIC ic(liveRegs, envChain, output, temp);
17243 addIC(ins, allocateIC(ic));
17246 void CodeGenerator::addGetPropertyCache(LInstruction* ins,
17247 LiveRegisterSet liveRegs,
17248 TypedOrValueRegister value,
17249 const ConstantOrRegister& id,
17250 ValueOperand output) {
17251 CacheKind kind = CacheKind::GetElem;
17252 if (id.constant() && id.value().isString()) {
17253 JSString* idString = id.value().toString();
17254 if (idString->isAtom() && !idString->asAtom().isIndex()) {
17255 kind = CacheKind::GetProp;
17258 IonGetPropertyIC cache(kind, liveRegs, value, id, output);
17259 addIC(ins, allocateIC(cache));
17262 void CodeGenerator::addSetPropertyCache(LInstruction* ins,
17263 LiveRegisterSet liveRegs,
17264 Register objReg, Register temp,
17265 const ConstantOrRegister& id,
17266 const ConstantOrRegister& value,
17267 bool strict) {
17268 CacheKind kind = CacheKind::SetElem;
17269 if (id.constant() && id.value().isString()) {
17270 JSString* idString = id.value().toString();
17271 if (idString->isAtom() && !idString->asAtom().isIndex()) {
17272 kind = CacheKind::SetProp;
17275 IonSetPropertyIC cache(kind, liveRegs, objReg, temp, id, value, strict);
17276 addIC(ins, allocateIC(cache));
17279 ConstantOrRegister CodeGenerator::toConstantOrRegister(LInstruction* lir,
17280 size_t n, MIRType type) {
17281 if (type == MIRType::Value) {
17282 return TypedOrValueRegister(ToValue(lir, n));
17285 const LAllocation* value = lir->getOperand(n);
17286 if (value->isConstant()) {
17287 return ConstantOrRegister(value->toConstant()->toJSValue());
17290 return TypedOrValueRegister(type, ToAnyRegister(value));
17293 void CodeGenerator::visitGetPropertyCache(LGetPropertyCache* ins) {
17294 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
17295 TypedOrValueRegister value =
17296 toConstantOrRegister(ins, LGetPropertyCache::ValueIndex,
17297 ins->mir()->value()->type())
17298 .reg();
17299 ConstantOrRegister id = toConstantOrRegister(ins, LGetPropertyCache::IdIndex,
17300 ins->mir()->idval()->type());
17301 ValueOperand output = ToOutValue(ins);
17302 addGetPropertyCache(ins, liveRegs, value, id, output);
17305 void CodeGenerator::visitGetPropSuperCache(LGetPropSuperCache* ins) {
17306 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
17307 Register obj = ToRegister(ins->obj());
17308 TypedOrValueRegister receiver =
17309 toConstantOrRegister(ins, LGetPropSuperCache::ReceiverIndex,
17310 ins->mir()->receiver()->type())
17311 .reg();
17312 ConstantOrRegister id = toConstantOrRegister(ins, LGetPropSuperCache::IdIndex,
17313 ins->mir()->idval()->type());
17314 ValueOperand output = ToOutValue(ins);
17316 CacheKind kind = CacheKind::GetElemSuper;
17317 if (id.constant() && id.value().isString()) {
17318 JSString* idString = id.value().toString();
17319 if (idString->isAtom() && !idString->asAtom().isIndex()) {
17320 kind = CacheKind::GetPropSuper;
17324 IonGetPropSuperIC cache(kind, liveRegs, obj, receiver, id, output);
17325 addIC(ins, allocateIC(cache));
17328 void CodeGenerator::visitBindNameCache(LBindNameCache* ins) {
17329 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
17330 Register envChain = ToRegister(ins->environmentChain());
17331 Register output = ToRegister(ins->output());
17332 Register temp = ToRegister(ins->temp0());
17334 IonBindNameIC ic(liveRegs, envChain, output, temp);
17335 addIC(ins, allocateIC(ic));
17338 void CodeGenerator::visitHasOwnCache(LHasOwnCache* ins) {
17339 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
17340 TypedOrValueRegister value =
17341 toConstantOrRegister(ins, LHasOwnCache::ValueIndex,
17342 ins->mir()->value()->type())
17343 .reg();
17344 TypedOrValueRegister id = toConstantOrRegister(ins, LHasOwnCache::IdIndex,
17345 ins->mir()->idval()->type())
17346 .reg();
17347 Register output = ToRegister(ins->output());
17349 IonHasOwnIC cache(liveRegs, value, id, output);
17350 addIC(ins, allocateIC(cache));
17353 void CodeGenerator::visitCheckPrivateFieldCache(LCheckPrivateFieldCache* ins) {
17354 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
17355 TypedOrValueRegister value =
17356 toConstantOrRegister(ins, LCheckPrivateFieldCache::ValueIndex,
17357 ins->mir()->value()->type())
17358 .reg();
17359 TypedOrValueRegister id =
17360 toConstantOrRegister(ins, LCheckPrivateFieldCache::IdIndex,
17361 ins->mir()->idval()->type())
17362 .reg();
17363 Register output = ToRegister(ins->output());
17365 IonCheckPrivateFieldIC cache(liveRegs, value, id, output);
17366 addIC(ins, allocateIC(cache));
17369 void CodeGenerator::visitNewPrivateName(LNewPrivateName* ins) {
17370 pushArg(ImmGCPtr(ins->mir()->name()));
17372 using Fn = JS::Symbol* (*)(JSContext*, Handle<JSAtom*>);
17373 callVM<Fn, NewPrivateName>(ins);
17376 void CodeGenerator::visitCallDeleteProperty(LCallDeleteProperty* lir) {
17377 pushArg(ImmGCPtr(lir->mir()->name()));
17378 pushArg(ToValue(lir, LCallDeleteProperty::ValueIndex));
17380 using Fn = bool (*)(JSContext*, HandleValue, Handle<PropertyName*>, bool*);
17381 if (lir->mir()->strict()) {
17382 callVM<Fn, DelPropOperation<true>>(lir);
17383 } else {
17384 callVM<Fn, DelPropOperation<false>>(lir);
17388 void CodeGenerator::visitCallDeleteElement(LCallDeleteElement* lir) {
17389 pushArg(ToValue(lir, LCallDeleteElement::IndexIndex));
17390 pushArg(ToValue(lir, LCallDeleteElement::ValueIndex));
17392 using Fn = bool (*)(JSContext*, HandleValue, HandleValue, bool*);
17393 if (lir->mir()->strict()) {
17394 callVM<Fn, DelElemOperation<true>>(lir);
17395 } else {
17396 callVM<Fn, DelElemOperation<false>>(lir);
17400 void CodeGenerator::visitObjectToIterator(LObjectToIterator* lir) {
17401 Register obj = ToRegister(lir->object());
17402 Register iterObj = ToRegister(lir->output());
17403 Register temp = ToRegister(lir->temp0());
17404 Register temp2 = ToRegister(lir->temp1());
17405 Register temp3 = ToRegister(lir->temp2());
17407 using Fn = PropertyIteratorObject* (*)(JSContext*, HandleObject);
17408 OutOfLineCode* ool = (lir->mir()->wantsIndices())
17409 ? oolCallVM<Fn, GetIteratorWithIndices>(
17410 lir, ArgList(obj), StoreRegisterTo(iterObj))
17411 : oolCallVM<Fn, GetIterator>(
17412 lir, ArgList(obj), StoreRegisterTo(iterObj));
17414 masm.maybeLoadIteratorFromShape(obj, iterObj, temp, temp2, temp3,
17415 ool->entry());
17417 Register nativeIter = temp;
17418 masm.loadPrivate(
17419 Address(iterObj, PropertyIteratorObject::offsetOfIteratorSlot()),
17420 nativeIter);
17422 if (lir->mir()->wantsIndices()) {
17423 // At least one consumer of the output of this iterator has been optimized
17424 // to use iterator indices. If the cached iterator doesn't include indices,
17425 // but it was marked to indicate that we can create them if needed, then we
17426 // do a VM call to replace the cached iterator with a fresh iterator
17427 // including indices.
17428 masm.branchNativeIteratorIndices(Assembler::Equal, nativeIter, temp2,
17429 NativeIteratorIndices::AvailableOnRequest,
17430 ool->entry());
17433 Address iterFlagsAddr(nativeIter, NativeIterator::offsetOfFlagsAndCount());
17434 masm.storePtr(
17435 obj, Address(nativeIter, NativeIterator::offsetOfObjectBeingIterated()));
17436 masm.or32(Imm32(NativeIterator::Flags::Active), iterFlagsAddr);
17438 Register enumeratorsAddr = temp2;
17439 masm.movePtr(ImmPtr(lir->mir()->enumeratorsAddr()), enumeratorsAddr);
17440 masm.registerIterator(enumeratorsAddr, nativeIter, temp3);
17442 // Generate post-write barrier for storing to |iterObj->objectBeingIterated_|.
17443 // We already know that |iterObj| is tenured, so we only have to check |obj|.
17444 Label skipBarrier;
17445 masm.branchPtrInNurseryChunk(Assembler::NotEqual, obj, temp2, &skipBarrier);
17447 LiveRegisterSet save = liveVolatileRegs(lir);
17448 save.takeUnchecked(temp);
17449 save.takeUnchecked(temp2);
17450 save.takeUnchecked(temp3);
17451 if (iterObj.volatile_()) {
17452 save.addUnchecked(iterObj);
17455 masm.PushRegsInMask(save);
17456 emitPostWriteBarrier(iterObj);
17457 masm.PopRegsInMask(save);
17459 masm.bind(&skipBarrier);
17461 masm.bind(ool->rejoin());
17464 void CodeGenerator::visitValueToIterator(LValueToIterator* lir) {
17465 pushArg(ToValue(lir, LValueToIterator::ValueIndex));
17467 using Fn = PropertyIteratorObject* (*)(JSContext*, HandleValue);
17468 callVM<Fn, ValueToIterator>(lir);
17471 void CodeGenerator::visitIteratorHasIndicesAndBranch(
17472 LIteratorHasIndicesAndBranch* lir) {
17473 Register iterator = ToRegister(lir->iterator());
17474 Register object = ToRegister(lir->object());
17475 Register temp = ToRegister(lir->temp());
17476 Register temp2 = ToRegister(lir->temp2());
17477 Label* ifTrue = getJumpLabelForBranch(lir->ifTrue());
17478 Label* ifFalse = getJumpLabelForBranch(lir->ifFalse());
17480 // Check that the iterator has indices available.
17481 Address nativeIterAddr(iterator,
17482 PropertyIteratorObject::offsetOfIteratorSlot());
17483 masm.loadPrivate(nativeIterAddr, temp);
17484 masm.branchNativeIteratorIndices(Assembler::NotEqual, temp, temp2,
17485 NativeIteratorIndices::Valid, ifFalse);
17487 // Guard that the first shape stored in the iterator matches the current
17488 // shape of the iterated object.
17489 Address firstShapeAddr(temp, NativeIterator::offsetOfFirstShape());
17490 masm.loadPtr(firstShapeAddr, temp);
17491 masm.branchTestObjShape(Assembler::NotEqual, object, temp, temp2, object,
17492 ifFalse);
17494 if (!isNextBlock(lir->ifTrue()->lir())) {
17495 masm.jump(ifTrue);
17499 void CodeGenerator::visitLoadSlotByIteratorIndex(
17500 LLoadSlotByIteratorIndex* lir) {
17501 Register object = ToRegister(lir->object());
17502 Register iterator = ToRegister(lir->iterator());
17503 Register temp = ToRegister(lir->temp0());
17504 Register temp2 = ToRegister(lir->temp1());
17505 ValueOperand result = ToOutValue(lir);
17507 masm.extractCurrentIndexAndKindFromIterator(iterator, temp, temp2);
17509 Label notDynamicSlot, notFixedSlot, done;
17510 masm.branch32(Assembler::NotEqual, temp2,
17511 Imm32(uint32_t(PropertyIndex::Kind::DynamicSlot)),
17512 &notDynamicSlot);
17513 masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
17514 masm.loadValue(BaseValueIndex(temp2, temp), result);
17515 masm.jump(&done);
17517 masm.bind(&notDynamicSlot);
17518 masm.branch32(Assembler::NotEqual, temp2,
17519 Imm32(uint32_t(PropertyIndex::Kind::FixedSlot)), &notFixedSlot);
17520 // Fixed slot
17521 masm.loadValue(BaseValueIndex(object, temp, sizeof(NativeObject)), result);
17522 masm.jump(&done);
17523 masm.bind(&notFixedSlot);
17525 #ifdef DEBUG
17526 Label kindOkay;
17527 masm.branch32(Assembler::Equal, temp2,
17528 Imm32(uint32_t(PropertyIndex::Kind::Element)), &kindOkay);
17529 masm.assumeUnreachable("Invalid PropertyIndex::Kind");
17530 masm.bind(&kindOkay);
17531 #endif
17533 // Dense element
17534 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp2);
17535 Label indexOkay;
17536 Address initLength(temp2, ObjectElements::offsetOfInitializedLength());
17537 masm.branch32(Assembler::Above, initLength, temp, &indexOkay);
17538 masm.assumeUnreachable("Dense element out of bounds");
17539 masm.bind(&indexOkay);
17541 masm.loadValue(BaseObjectElementIndex(temp2, temp), result);
17542 masm.bind(&done);
17545 void CodeGenerator::visitStoreSlotByIteratorIndex(
17546 LStoreSlotByIteratorIndex* lir) {
17547 Register object = ToRegister(lir->object());
17548 Register iterator = ToRegister(lir->iterator());
17549 ValueOperand value = ToValue(lir, LStoreSlotByIteratorIndex::ValueIndex);
17550 Register temp = ToRegister(lir->temp0());
17551 Register temp2 = ToRegister(lir->temp1());
17553 masm.extractCurrentIndexAndKindFromIterator(iterator, temp, temp2);
17555 Label notDynamicSlot, notFixedSlot, done, doStore;
17556 masm.branch32(Assembler::NotEqual, temp2,
17557 Imm32(uint32_t(PropertyIndex::Kind::DynamicSlot)),
17558 &notDynamicSlot);
17559 masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
17560 masm.computeEffectiveAddress(BaseValueIndex(temp2, temp), temp);
17561 masm.jump(&doStore);
17563 masm.bind(&notDynamicSlot);
17564 masm.branch32(Assembler::NotEqual, temp2,
17565 Imm32(uint32_t(PropertyIndex::Kind::FixedSlot)), &notFixedSlot);
17566 // Fixed slot
17567 masm.computeEffectiveAddress(
17568 BaseValueIndex(object, temp, sizeof(NativeObject)), temp);
17569 masm.jump(&doStore);
17570 masm.bind(&notFixedSlot);
17572 #ifdef DEBUG
17573 Label kindOkay;
17574 masm.branch32(Assembler::Equal, temp2,
17575 Imm32(uint32_t(PropertyIndex::Kind::Element)), &kindOkay);
17576 masm.assumeUnreachable("Invalid PropertyIndex::Kind");
17577 masm.bind(&kindOkay);
17578 #endif
17580 // Dense element
17581 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp2);
17582 Label indexOkay;
17583 Address initLength(temp2, ObjectElements::offsetOfInitializedLength());
17584 masm.branch32(Assembler::Above, initLength, temp, &indexOkay);
17585 masm.assumeUnreachable("Dense element out of bounds");
17586 masm.bind(&indexOkay);
17588 BaseObjectElementIndex elementAddress(temp2, temp);
17589 masm.computeEffectiveAddress(elementAddress, temp);
17591 masm.bind(&doStore);
17592 Address storeAddress(temp, 0);
17593 emitPreBarrier(storeAddress);
17594 masm.storeValue(value, storeAddress);
17596 masm.branchPtrInNurseryChunk(Assembler::Equal, object, temp2, &done);
17597 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp2, &done);
17599 saveVolatile(temp2);
17600 emitPostWriteBarrier(object);
17601 restoreVolatile(temp2);
17603 masm.bind(&done);
17606 void CodeGenerator::visitSetPropertyCache(LSetPropertyCache* ins) {
17607 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
17608 Register objReg = ToRegister(ins->object());
17609 Register temp = ToRegister(ins->temp0());
17611 ConstantOrRegister id = toConstantOrRegister(ins, LSetPropertyCache::IdIndex,
17612 ins->mir()->idval()->type());
17613 ConstantOrRegister value = toConstantOrRegister(
17614 ins, LSetPropertyCache::ValueIndex, ins->mir()->value()->type());
17616 addSetPropertyCache(ins, liveRegs, objReg, temp, id, value,
17617 ins->mir()->strict());
17620 void CodeGenerator::visitThrow(LThrow* lir) {
17621 pushArg(ToValue(lir, LThrow::ValueIndex));
17623 using Fn = bool (*)(JSContext*, HandleValue);
17624 callVM<Fn, js::ThrowOperation>(lir);
17627 void CodeGenerator::visitThrowWithStack(LThrowWithStack* lir) {
17628 pushArg(ToValue(lir, LThrowWithStack::StackIndex));
17629 pushArg(ToValue(lir, LThrowWithStack::ValueIndex));
17631 using Fn = bool (*)(JSContext*, HandleValue, HandleValue);
17632 callVM<Fn, js::ThrowWithStackOperation>(lir);
17635 class OutOfLineTypeOfV : public OutOfLineCodeBase<CodeGenerator> {
17636 LTypeOfV* ins_;
17638 public:
17639 explicit OutOfLineTypeOfV(LTypeOfV* ins) : ins_(ins) {}
17641 void accept(CodeGenerator* codegen) override {
17642 codegen->visitOutOfLineTypeOfV(this);
17644 LTypeOfV* ins() const { return ins_; }
17647 void CodeGenerator::emitTypeOfJSType(JSValueType type, Register output) {
17648 switch (type) {
17649 case JSVAL_TYPE_OBJECT:
17650 masm.move32(Imm32(JSTYPE_OBJECT), output);
17651 break;
17652 case JSVAL_TYPE_DOUBLE:
17653 case JSVAL_TYPE_INT32:
17654 masm.move32(Imm32(JSTYPE_NUMBER), output);
17655 break;
17656 case JSVAL_TYPE_BOOLEAN:
17657 masm.move32(Imm32(JSTYPE_BOOLEAN), output);
17658 break;
17659 case JSVAL_TYPE_UNDEFINED:
17660 masm.move32(Imm32(JSTYPE_UNDEFINED), output);
17661 break;
17662 case JSVAL_TYPE_NULL:
17663 masm.move32(Imm32(JSTYPE_OBJECT), output);
17664 break;
17665 case JSVAL_TYPE_STRING:
17666 masm.move32(Imm32(JSTYPE_STRING), output);
17667 break;
17668 case JSVAL_TYPE_SYMBOL:
17669 masm.move32(Imm32(JSTYPE_SYMBOL), output);
17670 break;
17671 case JSVAL_TYPE_BIGINT:
17672 masm.move32(Imm32(JSTYPE_BIGINT), output);
17673 break;
17674 default:
17675 MOZ_CRASH("Unsupported JSValueType");
17679 void CodeGenerator::emitTypeOfCheck(JSValueType type, Register tag,
17680 Register output, Label* done,
17681 Label* oolObject) {
17682 Label notMatch;
17683 switch (type) {
17684 case JSVAL_TYPE_OBJECT:
17685 // The input may be a callable object (result is "function") or
17686 // may emulate undefined (result is "undefined"). Use an OOL path.
17687 masm.branchTestObject(Assembler::Equal, tag, oolObject);
17688 return;
17689 case JSVAL_TYPE_DOUBLE:
17690 case JSVAL_TYPE_INT32:
17691 masm.branchTestNumber(Assembler::NotEqual, tag, &notMatch);
17692 break;
17693 default:
17694 masm.branchTestType(Assembler::NotEqual, tag, type, &notMatch);
17695 break;
17698 emitTypeOfJSType(type, output);
17699 masm.jump(done);
17700 masm.bind(&notMatch);
17703 void CodeGenerator::visitTypeOfV(LTypeOfV* lir) {
17704 const ValueOperand value = ToValue(lir, LTypeOfV::InputIndex);
17705 Register output = ToRegister(lir->output());
17706 Register tag = masm.extractTag(value, output);
17708 Label done;
17710 auto* ool = new (alloc()) OutOfLineTypeOfV(lir);
17711 addOutOfLineCode(ool, lir->mir());
17713 const std::initializer_list<JSValueType> defaultOrder = {
17714 JSVAL_TYPE_OBJECT, JSVAL_TYPE_DOUBLE, JSVAL_TYPE_UNDEFINED,
17715 JSVAL_TYPE_NULL, JSVAL_TYPE_BOOLEAN, JSVAL_TYPE_STRING,
17716 JSVAL_TYPE_SYMBOL, JSVAL_TYPE_BIGINT};
17718 mozilla::EnumSet<JSValueType, uint32_t> remaining(defaultOrder);
17720 // Generate checks for previously observed types first.
17721 // The TypeDataList is sorted by descending frequency.
17722 for (auto& observed : lir->mir()->observedTypes()) {
17723 JSValueType type = observed.type();
17725 // Unify number types.
17726 if (type == JSVAL_TYPE_INT32) {
17727 type = JSVAL_TYPE_DOUBLE;
17730 remaining -= type;
17732 emitTypeOfCheck(type, tag, output, &done, ool->entry());
17735 // Generate checks for remaining types.
17736 for (auto type : defaultOrder) {
17737 if (!remaining.contains(type)) {
17738 continue;
17740 remaining -= type;
17742 if (remaining.isEmpty() && type != JSVAL_TYPE_OBJECT) {
17743 // We can skip the check for the last remaining type, unless the type is
17744 // JSVAL_TYPE_OBJECT, which may have to go through the OOL path.
17745 #ifdef DEBUG
17746 emitTypeOfCheck(type, tag, output, &done, ool->entry());
17747 masm.assumeUnreachable("Unexpected Value type in visitTypeOfV");
17748 #else
17749 emitTypeOfJSType(type, output);
17750 #endif
17751 } else {
17752 emitTypeOfCheck(type, tag, output, &done, ool->entry());
17755 MOZ_ASSERT(remaining.isEmpty());
17757 masm.bind(&done);
17758 masm.bind(ool->rejoin());
17761 void CodeGenerator::emitTypeOfObject(Register obj, Register output,
17762 Label* done) {
17763 Label slowCheck, isObject, isCallable, isUndefined;
17764 masm.typeOfObject(obj, output, &slowCheck, &isObject, &isCallable,
17765 &isUndefined);
17767 masm.bind(&isCallable);
17768 masm.move32(Imm32(JSTYPE_FUNCTION), output);
17769 masm.jump(done);
17771 masm.bind(&isUndefined);
17772 masm.move32(Imm32(JSTYPE_UNDEFINED), output);
17773 masm.jump(done);
17775 masm.bind(&isObject);
17776 masm.move32(Imm32(JSTYPE_OBJECT), output);
17777 masm.jump(done);
17779 masm.bind(&slowCheck);
17781 saveVolatile(output);
17782 using Fn = JSType (*)(JSObject*);
17783 masm.setupAlignedABICall();
17784 masm.passABIArg(obj);
17785 masm.callWithABI<Fn, js::TypeOfObject>();
17786 masm.storeCallInt32Result(output);
17787 restoreVolatile(output);
17790 void CodeGenerator::visitOutOfLineTypeOfV(OutOfLineTypeOfV* ool) {
17791 LTypeOfV* ins = ool->ins();
17793 ValueOperand input = ToValue(ins, LTypeOfV::InputIndex);
17794 Register temp = ToTempUnboxRegister(ins->temp0());
17795 Register output = ToRegister(ins->output());
17797 Register obj = masm.extractObject(input, temp);
17798 emitTypeOfObject(obj, output, ool->rejoin());
17799 masm.jump(ool->rejoin());
17802 void CodeGenerator::visitTypeOfO(LTypeOfO* lir) {
17803 Register obj = ToRegister(lir->object());
17804 Register output = ToRegister(lir->output());
17806 Label done;
17807 emitTypeOfObject(obj, output, &done);
17808 masm.bind(&done);
17811 void CodeGenerator::visitTypeOfName(LTypeOfName* lir) {
17812 Register input = ToRegister(lir->input());
17813 Register output = ToRegister(lir->output());
17815 #ifdef DEBUG
17816 Label ok;
17817 masm.branch32(Assembler::Below, input, Imm32(JSTYPE_LIMIT), &ok);
17818 masm.assumeUnreachable("bad JSType");
17819 masm.bind(&ok);
17820 #endif
17822 static_assert(JSTYPE_UNDEFINED == 0);
17824 masm.movePtr(ImmPtr(&gen->runtime->names().undefined), output);
17825 masm.loadPtr(BaseIndex(output, input, ScalePointer), output);
17828 class OutOfLineTypeOfIsNonPrimitiveV : public OutOfLineCodeBase<CodeGenerator> {
17829 LTypeOfIsNonPrimitiveV* ins_;
17831 public:
17832 explicit OutOfLineTypeOfIsNonPrimitiveV(LTypeOfIsNonPrimitiveV* ins)
17833 : ins_(ins) {}
17835 void accept(CodeGenerator* codegen) override {
17836 codegen->visitOutOfLineTypeOfIsNonPrimitiveV(this);
17838 auto* ins() const { return ins_; }
17841 class OutOfLineTypeOfIsNonPrimitiveO : public OutOfLineCodeBase<CodeGenerator> {
17842 LTypeOfIsNonPrimitiveO* ins_;
17844 public:
17845 explicit OutOfLineTypeOfIsNonPrimitiveO(LTypeOfIsNonPrimitiveO* ins)
17846 : ins_(ins) {}
17848 void accept(CodeGenerator* codegen) override {
17849 codegen->visitOutOfLineTypeOfIsNonPrimitiveO(this);
17851 auto* ins() const { return ins_; }
17854 void CodeGenerator::emitTypeOfIsObjectOOL(MTypeOfIs* mir, Register obj,
17855 Register output) {
17856 saveVolatile(output);
17857 using Fn = JSType (*)(JSObject*);
17858 masm.setupAlignedABICall();
17859 masm.passABIArg(obj);
17860 masm.callWithABI<Fn, js::TypeOfObject>();
17861 masm.storeCallInt32Result(output);
17862 restoreVolatile(output);
17864 auto cond = JSOpToCondition(mir->jsop(), /* isSigned = */ false);
17865 masm.cmp32Set(cond, output, Imm32(mir->jstype()), output);
17868 void CodeGenerator::visitOutOfLineTypeOfIsNonPrimitiveV(
17869 OutOfLineTypeOfIsNonPrimitiveV* ool) {
17870 auto* ins = ool->ins();
17871 ValueOperand input = ToValue(ins, LTypeOfIsNonPrimitiveV::InputIndex);
17872 Register output = ToRegister(ins->output());
17873 Register temp = ToTempUnboxRegister(ins->temp0());
17875 Register obj = masm.extractObject(input, temp);
17877 emitTypeOfIsObjectOOL(ins->mir(), obj, output);
17879 masm.jump(ool->rejoin());
17882 void CodeGenerator::visitOutOfLineTypeOfIsNonPrimitiveO(
17883 OutOfLineTypeOfIsNonPrimitiveO* ool) {
17884 auto* ins = ool->ins();
17885 Register input = ToRegister(ins->input());
17886 Register output = ToRegister(ins->output());
17888 emitTypeOfIsObjectOOL(ins->mir(), input, output);
17890 masm.jump(ool->rejoin());
17893 void CodeGenerator::emitTypeOfIsObject(MTypeOfIs* mir, Register obj,
17894 Register output, Label* success,
17895 Label* fail, Label* slowCheck) {
17896 Label* isObject = fail;
17897 Label* isFunction = fail;
17898 Label* isUndefined = fail;
17900 switch (mir->jstype()) {
17901 case JSTYPE_UNDEFINED:
17902 isUndefined = success;
17903 break;
17905 case JSTYPE_OBJECT:
17906 isObject = success;
17907 break;
17909 case JSTYPE_FUNCTION:
17910 isFunction = success;
17911 break;
17913 case JSTYPE_STRING:
17914 case JSTYPE_NUMBER:
17915 case JSTYPE_BOOLEAN:
17916 case JSTYPE_SYMBOL:
17917 case JSTYPE_BIGINT:
17918 #ifdef ENABLE_RECORD_TUPLE
17919 case JSTYPE_RECORD:
17920 case JSTYPE_TUPLE:
17921 #endif
17922 case JSTYPE_LIMIT:
17923 MOZ_CRASH("Primitive type");
17926 masm.typeOfObject(obj, output, slowCheck, isObject, isFunction, isUndefined);
17928 auto op = mir->jsop();
17930 Label done;
17931 masm.bind(fail);
17932 masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
17933 masm.jump(&done);
17934 masm.bind(success);
17935 masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq), output);
17936 masm.bind(&done);
17939 void CodeGenerator::visitTypeOfIsNonPrimitiveV(LTypeOfIsNonPrimitiveV* lir) {
17940 ValueOperand input = ToValue(lir, LTypeOfIsNonPrimitiveV::InputIndex);
17941 Register output = ToRegister(lir->output());
17942 Register temp = ToTempUnboxRegister(lir->temp0());
17944 auto* mir = lir->mir();
17946 auto* ool = new (alloc()) OutOfLineTypeOfIsNonPrimitiveV(lir);
17947 addOutOfLineCode(ool, mir);
17949 Label success, fail;
17951 switch (mir->jstype()) {
17952 case JSTYPE_UNDEFINED: {
17953 ScratchTagScope tag(masm, input);
17954 masm.splitTagForTest(input, tag);
17956 masm.branchTestUndefined(Assembler::Equal, tag, &success);
17957 masm.branchTestObject(Assembler::NotEqual, tag, &fail);
17958 break;
17961 case JSTYPE_OBJECT: {
17962 ScratchTagScope tag(masm, input);
17963 masm.splitTagForTest(input, tag);
17965 masm.branchTestNull(Assembler::Equal, tag, &success);
17966 masm.branchTestObject(Assembler::NotEqual, tag, &fail);
17967 break;
17970 case JSTYPE_FUNCTION: {
17971 masm.branchTestObject(Assembler::NotEqual, input, &fail);
17972 break;
17975 case JSTYPE_STRING:
17976 case JSTYPE_NUMBER:
17977 case JSTYPE_BOOLEAN:
17978 case JSTYPE_SYMBOL:
17979 case JSTYPE_BIGINT:
17980 #ifdef ENABLE_RECORD_TUPLE
17981 case JSTYPE_RECORD:
17982 case JSTYPE_TUPLE:
17983 #endif
17984 case JSTYPE_LIMIT:
17985 MOZ_CRASH("Primitive type");
17988 Register obj = masm.extractObject(input, temp);
17990 emitTypeOfIsObject(mir, obj, output, &success, &fail, ool->entry());
17992 masm.bind(ool->rejoin());
17995 void CodeGenerator::visitTypeOfIsNonPrimitiveO(LTypeOfIsNonPrimitiveO* lir) {
17996 Register input = ToRegister(lir->input());
17997 Register output = ToRegister(lir->output());
17999 auto* mir = lir->mir();
18001 auto* ool = new (alloc()) OutOfLineTypeOfIsNonPrimitiveO(lir);
18002 addOutOfLineCode(ool, mir);
18004 Label success, fail;
18005 emitTypeOfIsObject(mir, input, output, &success, &fail, ool->entry());
18007 masm.bind(ool->rejoin());
18010 void CodeGenerator::visitTypeOfIsPrimitive(LTypeOfIsPrimitive* lir) {
18011 ValueOperand input = ToValue(lir, LTypeOfIsPrimitive::InputIndex);
18012 Register output = ToRegister(lir->output());
18014 auto* mir = lir->mir();
18015 auto cond = JSOpToCondition(mir->jsop(), /* isSigned = */ false);
18017 switch (mir->jstype()) {
18018 case JSTYPE_STRING:
18019 masm.testStringSet(cond, input, output);
18020 break;
18021 case JSTYPE_NUMBER:
18022 masm.testNumberSet(cond, input, output);
18023 break;
18024 case JSTYPE_BOOLEAN:
18025 masm.testBooleanSet(cond, input, output);
18026 break;
18027 case JSTYPE_SYMBOL:
18028 masm.testSymbolSet(cond, input, output);
18029 break;
18030 case JSTYPE_BIGINT:
18031 masm.testBigIntSet(cond, input, output);
18032 break;
18034 case JSTYPE_UNDEFINED:
18035 case JSTYPE_OBJECT:
18036 case JSTYPE_FUNCTION:
18037 #ifdef ENABLE_RECORD_TUPLE
18038 case JSTYPE_RECORD:
18039 case JSTYPE_TUPLE:
18040 #endif
18041 case JSTYPE_LIMIT:
18042 MOZ_CRASH("Non-primitive type");
18046 void CodeGenerator::visitToAsyncIter(LToAsyncIter* lir) {
18047 pushArg(ToValue(lir, LToAsyncIter::NextMethodIndex));
18048 pushArg(ToRegister(lir->iterator()));
18050 using Fn = JSObject* (*)(JSContext*, HandleObject, HandleValue);
18051 callVM<Fn, js::CreateAsyncFromSyncIterator>(lir);
18054 void CodeGenerator::visitToPropertyKeyCache(LToPropertyKeyCache* lir) {
18055 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
18056 ValueOperand input = ToValue(lir, LToPropertyKeyCache::InputIndex);
18057 ValueOperand output = ToOutValue(lir);
18059 IonToPropertyKeyIC ic(liveRegs, input, output);
18060 addIC(lir, allocateIC(ic));
18063 void CodeGenerator::visitLoadElementV(LLoadElementV* load) {
18064 Register elements = ToRegister(load->elements());
18065 const ValueOperand out = ToOutValue(load);
18067 if (load->index()->isConstant()) {
18068 NativeObject::elementsSizeMustNotOverflow();
18069 int32_t offset = ToInt32(load->index()) * sizeof(Value);
18070 masm.loadValue(Address(elements, offset), out);
18071 } else {
18072 masm.loadValue(BaseObjectElementIndex(elements, ToRegister(load->index())),
18073 out);
18076 Label testMagic;
18077 masm.branchTestMagic(Assembler::Equal, out, &testMagic);
18078 bailoutFrom(&testMagic, load->snapshot());
18081 void CodeGenerator::visitLoadElementHole(LLoadElementHole* lir) {
18082 Register elements = ToRegister(lir->elements());
18083 Register index = ToRegister(lir->index());
18084 Register initLength = ToRegister(lir->initLength());
18085 const ValueOperand out = ToOutValue(lir);
18087 const MLoadElementHole* mir = lir->mir();
18089 // If the index is out of bounds, load |undefined|. Otherwise, load the
18090 // value.
18091 Label outOfBounds, done;
18092 masm.spectreBoundsCheck32(index, initLength, out.scratchReg(), &outOfBounds);
18094 masm.loadValue(BaseObjectElementIndex(elements, index), out);
18096 // If the value wasn't a hole, we're done. Otherwise, we'll load undefined.
18097 masm.branchTestMagic(Assembler::NotEqual, out, &done);
18099 if (mir->needsNegativeIntCheck()) {
18100 Label loadUndefined;
18101 masm.jump(&loadUndefined);
18103 masm.bind(&outOfBounds);
18105 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
18107 masm.bind(&loadUndefined);
18108 } else {
18109 masm.bind(&outOfBounds);
18111 masm.moveValue(UndefinedValue(), out);
18113 masm.bind(&done);
18116 void CodeGenerator::visitLoadUnboxedScalar(LLoadUnboxedScalar* lir) {
18117 Register elements = ToRegister(lir->elements());
18118 Register temp0 = ToTempRegisterOrInvalid(lir->temp0());
18119 Register temp1 = ToTempRegisterOrInvalid(lir->temp1());
18120 AnyRegister out = ToAnyRegister(lir->output());
18122 const MLoadUnboxedScalar* mir = lir->mir();
18124 Scalar::Type storageType = mir->storageType();
18126 LiveRegisterSet volatileRegs;
18127 if (MacroAssembler::LoadRequiresCall(storageType)) {
18128 volatileRegs = liveVolatileRegs(lir);
18131 Label fail;
18132 if (lir->index()->isConstant()) {
18133 Address source =
18134 ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
18135 masm.loadFromTypedArray(storageType, source, out, temp0, temp1, &fail,
18136 volatileRegs);
18137 } else {
18138 BaseIndex source(elements, ToRegister(lir->index()),
18139 ScaleFromScalarType(storageType), mir->offsetAdjustment());
18140 masm.loadFromTypedArray(storageType, source, out, temp0, temp1, &fail,
18141 volatileRegs);
18144 if (fail.used()) {
18145 bailoutFrom(&fail, lir->snapshot());
18149 void CodeGenerator::visitLoadUnboxedBigInt(LLoadUnboxedBigInt* lir) {
18150 Register elements = ToRegister(lir->elements());
18151 Register temp = ToRegister(lir->temp());
18152 Register64 temp64 = ToRegister64(lir->temp64());
18153 Register out = ToRegister(lir->output());
18155 const MLoadUnboxedScalar* mir = lir->mir();
18157 Scalar::Type storageType = mir->storageType();
18159 if (lir->index()->isConstant()) {
18160 Address source =
18161 ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
18162 masm.load64(source, temp64);
18163 } else {
18164 BaseIndex source(elements, ToRegister(lir->index()),
18165 ScaleFromScalarType(storageType), mir->offsetAdjustment());
18166 masm.load64(source, temp64);
18169 emitCreateBigInt(lir, storageType, temp64, out, temp);
18172 void CodeGenerator::visitLoadDataViewElement(LLoadDataViewElement* lir) {
18173 Register elements = ToRegister(lir->elements());
18174 const LAllocation* littleEndian = lir->littleEndian();
18175 Register temp1 = ToTempRegisterOrInvalid(lir->temp1());
18176 Register temp2 = ToTempRegisterOrInvalid(lir->temp2());
18177 Register64 temp64 = ToTempRegister64OrInvalid(lir->temp64());
18178 AnyRegister out = ToAnyRegister(lir->output());
18180 const MLoadDataViewElement* mir = lir->mir();
18181 Scalar::Type storageType = mir->storageType();
18183 LiveRegisterSet volatileRegs;
18184 if (MacroAssembler::LoadRequiresCall(storageType)) {
18185 volatileRegs = liveVolatileRegs(lir);
18188 BaseIndex source(elements, ToRegister(lir->index()), TimesOne);
18190 bool noSwap = littleEndian->isConstant() &&
18191 ToBoolean(littleEndian) == MOZ_LITTLE_ENDIAN();
18193 // Directly load if no byte swap is needed and the platform supports unaligned
18194 // accesses for the access. (Such support is assumed for integer types.)
18195 if (noSwap && (!Scalar::isFloatingType(storageType) ||
18196 MacroAssembler::SupportsFastUnalignedFPAccesses())) {
18197 if (!Scalar::isBigIntType(storageType)) {
18198 Label fail;
18199 masm.loadFromTypedArray(storageType, source, out, temp1, temp2, &fail,
18200 volatileRegs);
18202 if (fail.used()) {
18203 bailoutFrom(&fail, lir->snapshot());
18205 } else {
18206 masm.load64(source, temp64);
18208 emitCreateBigInt(lir, storageType, temp64, out.gpr(), temp1);
18210 return;
18213 // Load the value into a gpr register.
18214 switch (storageType) {
18215 case Scalar::Int16:
18216 masm.load16UnalignedSignExtend(source, out.gpr());
18217 break;
18218 case Scalar::Uint16:
18219 masm.load16UnalignedZeroExtend(source, out.gpr());
18220 break;
18221 case Scalar::Int32:
18222 masm.load32Unaligned(source, out.gpr());
18223 break;
18224 case Scalar::Uint32:
18225 masm.load32Unaligned(source, out.isFloat() ? temp1 : out.gpr());
18226 break;
18227 case Scalar::Float16:
18228 masm.load16UnalignedZeroExtend(source, temp1);
18229 break;
18230 case Scalar::Float32:
18231 masm.load32Unaligned(source, temp1);
18232 break;
18233 case Scalar::Float64:
18234 case Scalar::BigInt64:
18235 case Scalar::BigUint64:
18236 masm.load64Unaligned(source, temp64);
18237 break;
18238 case Scalar::Int8:
18239 case Scalar::Uint8:
18240 case Scalar::Uint8Clamped:
18241 default:
18242 MOZ_CRASH("Invalid typed array type");
18245 if (!noSwap) {
18246 // Swap the bytes in the loaded value.
18247 Label skip;
18248 if (!littleEndian->isConstant()) {
18249 masm.branch32(
18250 MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
18251 ToRegister(littleEndian), Imm32(0), &skip);
18254 switch (storageType) {
18255 case Scalar::Int16:
18256 masm.byteSwap16SignExtend(out.gpr());
18257 break;
18258 case Scalar::Uint16:
18259 masm.byteSwap16ZeroExtend(out.gpr());
18260 break;
18261 case Scalar::Int32:
18262 masm.byteSwap32(out.gpr());
18263 break;
18264 case Scalar::Uint32:
18265 masm.byteSwap32(out.isFloat() ? temp1 : out.gpr());
18266 break;
18267 case Scalar::Float16:
18268 masm.byteSwap16ZeroExtend(temp1);
18269 break;
18270 case Scalar::Float32:
18271 masm.byteSwap32(temp1);
18272 break;
18273 case Scalar::Float64:
18274 case Scalar::BigInt64:
18275 case Scalar::BigUint64:
18276 masm.byteSwap64(temp64);
18277 break;
18278 case Scalar::Int8:
18279 case Scalar::Uint8:
18280 case Scalar::Uint8Clamped:
18281 default:
18282 MOZ_CRASH("Invalid typed array type");
18285 if (skip.used()) {
18286 masm.bind(&skip);
18290 // Move the value into the output register.
18291 switch (storageType) {
18292 case Scalar::Int16:
18293 case Scalar::Uint16:
18294 case Scalar::Int32:
18295 break;
18296 case Scalar::Uint32:
18297 if (out.isFloat()) {
18298 masm.convertUInt32ToDouble(temp1, out.fpu());
18299 } else {
18300 // Bail out if the value doesn't fit into a signed int32 value. This
18301 // is what allows MLoadDataViewElement to have a type() of
18302 // MIRType::Int32 for UInt32 array loads.
18303 bailoutTest32(Assembler::Signed, out.gpr(), out.gpr(), lir->snapshot());
18305 break;
18306 case Scalar::Float16:
18307 masm.moveGPRToFloat16(temp1, out.fpu(), temp2, volatileRegs);
18308 masm.canonicalizeFloat(out.fpu());
18309 break;
18310 case Scalar::Float32:
18311 masm.moveGPRToFloat32(temp1, out.fpu());
18312 masm.canonicalizeFloat(out.fpu());
18313 break;
18314 case Scalar::Float64:
18315 masm.moveGPR64ToDouble(temp64, out.fpu());
18316 masm.canonicalizeDouble(out.fpu());
18317 break;
18318 case Scalar::BigInt64:
18319 case Scalar::BigUint64:
18320 emitCreateBigInt(lir, storageType, temp64, out.gpr(), temp1);
18321 break;
18322 case Scalar::Int8:
18323 case Scalar::Uint8:
18324 case Scalar::Uint8Clamped:
18325 default:
18326 MOZ_CRASH("Invalid typed array type");
18330 void CodeGenerator::visitLoadTypedArrayElementHole(
18331 LLoadTypedArrayElementHole* lir) {
18332 Register elements = ToRegister(lir->elements());
18333 Register index = ToRegister(lir->index());
18334 Register length = ToRegister(lir->length());
18335 Register temp = ToTempRegisterOrInvalid(lir->temp0());
18336 const ValueOperand out = ToOutValue(lir);
18338 Register scratch = out.scratchReg();
18340 // Load undefined if index >= length.
18341 Label outOfBounds, done;
18342 masm.spectreBoundsCheckPtr(index, length, scratch, &outOfBounds);
18344 Scalar::Type arrayType = lir->mir()->arrayType();
18346 LiveRegisterSet volatileRegs;
18347 if (MacroAssembler::LoadRequiresCall(arrayType)) {
18348 volatileRegs = liveVolatileRegs(lir);
18351 Label fail;
18352 BaseIndex source(elements, index, ScaleFromScalarType(arrayType));
18353 MacroAssembler::Uint32Mode uint32Mode =
18354 lir->mir()->forceDouble() ? MacroAssembler::Uint32Mode::ForceDouble
18355 : MacroAssembler::Uint32Mode::FailOnDouble;
18356 masm.loadFromTypedArray(arrayType, source, out, uint32Mode, temp, &fail,
18357 volatileRegs);
18358 masm.jump(&done);
18360 masm.bind(&outOfBounds);
18361 masm.moveValue(UndefinedValue(), out);
18363 if (fail.used()) {
18364 bailoutFrom(&fail, lir->snapshot());
18367 masm.bind(&done);
18370 void CodeGenerator::visitLoadTypedArrayElementHoleBigInt(
18371 LLoadTypedArrayElementHoleBigInt* lir) {
18372 Register elements = ToRegister(lir->elements());
18373 Register index = ToRegister(lir->index());
18374 Register length = ToRegister(lir->length());
18375 const ValueOperand out = ToOutValue(lir);
18377 Register temp = ToRegister(lir->temp());
18379 // On x86 there are not enough registers. In that case reuse the output
18380 // registers as temporaries.
18381 #ifdef JS_CODEGEN_X86
18382 MOZ_ASSERT(lir->temp64().isBogusTemp());
18383 Register64 temp64 = out.toRegister64();
18384 #else
18385 Register64 temp64 = ToRegister64(lir->temp64());
18386 #endif
18388 // Load undefined if index >= length.
18389 Label outOfBounds, done;
18390 masm.spectreBoundsCheckPtr(index, length, temp, &outOfBounds);
18392 Scalar::Type arrayType = lir->mir()->arrayType();
18393 BaseIndex source(elements, index, ScaleFromScalarType(arrayType));
18394 masm.load64(source, temp64);
18396 #ifdef JS_CODEGEN_X86
18397 Register bigInt = temp;
18398 Register maybeTemp = InvalidReg;
18399 #else
18400 Register bigInt = out.scratchReg();
18401 Register maybeTemp = temp;
18402 #endif
18403 emitCreateBigInt(lir, arrayType, temp64, bigInt, maybeTemp);
18405 masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, out);
18406 masm.jump(&done);
18408 masm.bind(&outOfBounds);
18409 masm.moveValue(UndefinedValue(), out);
18411 masm.bind(&done);
18414 template <SwitchTableType tableType>
18415 class OutOfLineSwitch : public OutOfLineCodeBase<CodeGenerator> {
18416 using LabelsVector = Vector<Label, 0, JitAllocPolicy>;
18417 using CodeLabelsVector = Vector<CodeLabel, 0, JitAllocPolicy>;
18418 LabelsVector labels_;
18419 CodeLabelsVector codeLabels_;
18420 CodeLabel start_;
18421 bool isOutOfLine_;
18423 void accept(CodeGenerator* codegen) override {
18424 codegen->visitOutOfLineSwitch(this);
18427 public:
18428 explicit OutOfLineSwitch(TempAllocator& alloc)
18429 : labels_(alloc), codeLabels_(alloc), isOutOfLine_(false) {}
18431 CodeLabel* start() { return &start_; }
18433 CodeLabelsVector& codeLabels() { return codeLabels_; }
18434 LabelsVector& labels() { return labels_; }
18436 void jumpToCodeEntries(MacroAssembler& masm, Register index, Register temp) {
18437 Register base;
18438 if (tableType == SwitchTableType::Inline) {
18439 #if defined(JS_CODEGEN_ARM)
18440 base = ::js::jit::pc;
18441 #else
18442 MOZ_CRASH("NYI: SwitchTableType::Inline");
18443 #endif
18444 } else {
18445 #if defined(JS_CODEGEN_ARM)
18446 MOZ_CRASH("NYI: SwitchTableType::OutOfLine");
18447 #else
18448 masm.mov(start(), temp);
18449 base = temp;
18450 #endif
18452 BaseIndex jumpTarget(base, index, ScalePointer);
18453 masm.branchToComputedAddress(jumpTarget);
18456 // Register an entry in the switch table.
18457 void addTableEntry(MacroAssembler& masm) {
18458 if ((!isOutOfLine_ && tableType == SwitchTableType::Inline) ||
18459 (isOutOfLine_ && tableType == SwitchTableType::OutOfLine)) {
18460 CodeLabel cl;
18461 masm.writeCodePointer(&cl);
18462 masm.propagateOOM(codeLabels_.append(std::move(cl)));
18465 // Register the code, to which the table will jump to.
18466 void addCodeEntry(MacroAssembler& masm) {
18467 Label entry;
18468 masm.bind(&entry);
18469 masm.propagateOOM(labels_.append(std::move(entry)));
18472 void setOutOfLine() { isOutOfLine_ = true; }
18475 template <SwitchTableType tableType>
18476 void CodeGenerator::visitOutOfLineSwitch(
18477 OutOfLineSwitch<tableType>* jumpTable) {
18478 jumpTable->setOutOfLine();
18479 auto& labels = jumpTable->labels();
18481 if (tableType == SwitchTableType::OutOfLine) {
18482 #if defined(JS_CODEGEN_ARM)
18483 MOZ_CRASH("NYI: SwitchTableType::OutOfLine");
18484 #elif defined(JS_CODEGEN_NONE)
18485 MOZ_CRASH();
18486 #else
18488 # if defined(JS_CODEGEN_ARM64)
18489 AutoForbidPoolsAndNops afp(
18490 &masm,
18491 (labels.length() + 1) * (sizeof(void*) / vixl::kInstructionSize));
18492 # endif
18494 masm.haltingAlign(sizeof(void*));
18496 // Bind the address of the jump table and reserve the space for code
18497 // pointers to jump in the newly generated code.
18498 masm.bind(jumpTable->start());
18499 masm.addCodeLabel(*jumpTable->start());
18500 for (size_t i = 0, e = labels.length(); i < e; i++) {
18501 jumpTable->addTableEntry(masm);
18503 #endif
18506 // Register all reserved pointers of the jump table to target labels. The
18507 // entries of the jump table need to be absolute addresses and thus must be
18508 // patched after codegen is finished.
18509 auto& codeLabels = jumpTable->codeLabels();
18510 for (size_t i = 0, e = codeLabels.length(); i < e; i++) {
18511 auto& cl = codeLabels[i];
18512 cl.target()->bind(labels[i].offset());
18513 masm.addCodeLabel(cl);
18517 template void CodeGenerator::visitOutOfLineSwitch(
18518 OutOfLineSwitch<SwitchTableType::Inline>* jumpTable);
18519 template void CodeGenerator::visitOutOfLineSwitch(
18520 OutOfLineSwitch<SwitchTableType::OutOfLine>* jumpTable);
18522 template <typename T>
18523 static inline void StoreToTypedArray(MacroAssembler& masm,
18524 Scalar::Type writeType,
18525 const LAllocation* value, const T& dest,
18526 Register temp,
18527 LiveRegisterSet volatileRegs) {
18528 if (Scalar::isFloatingType(writeType)) {
18529 masm.storeToTypedFloatArray(writeType, ToFloatRegister(value), dest, temp,
18530 volatileRegs);
18531 } else {
18532 if (value->isConstant()) {
18533 masm.storeToTypedIntArray(writeType, Imm32(ToInt32(value)), dest);
18534 } else {
18535 masm.storeToTypedIntArray(writeType, ToRegister(value), dest);
18540 void CodeGenerator::visitStoreUnboxedScalar(LStoreUnboxedScalar* lir) {
18541 Register elements = ToRegister(lir->elements());
18542 Register temp = ToTempRegisterOrInvalid(lir->temp0());
18543 const LAllocation* value = lir->value();
18545 const MStoreUnboxedScalar* mir = lir->mir();
18547 Scalar::Type writeType = mir->writeType();
18549 LiveRegisterSet volatileRegs;
18550 if (MacroAssembler::StoreRequiresCall(writeType)) {
18551 volatileRegs = liveVolatileRegs(lir);
18554 if (lir->index()->isConstant()) {
18555 Address dest = ToAddress(elements, lir->index(), writeType);
18556 StoreToTypedArray(masm, writeType, value, dest, temp, volatileRegs);
18557 } else {
18558 BaseIndex dest(elements, ToRegister(lir->index()),
18559 ScaleFromScalarType(writeType));
18560 StoreToTypedArray(masm, writeType, value, dest, temp, volatileRegs);
18564 void CodeGenerator::visitStoreUnboxedBigInt(LStoreUnboxedBigInt* lir) {
18565 Register elements = ToRegister(lir->elements());
18566 Register value = ToRegister(lir->value());
18567 Register64 temp = ToRegister64(lir->temp());
18569 Scalar::Type writeType = lir->mir()->writeType();
18571 masm.loadBigInt64(value, temp);
18573 if (lir->index()->isConstant()) {
18574 Address dest = ToAddress(elements, lir->index(), writeType);
18575 masm.storeToTypedBigIntArray(writeType, temp, dest);
18576 } else {
18577 BaseIndex dest(elements, ToRegister(lir->index()),
18578 ScaleFromScalarType(writeType));
18579 masm.storeToTypedBigIntArray(writeType, temp, dest);
18583 void CodeGenerator::visitStoreDataViewElement(LStoreDataViewElement* lir) {
18584 Register elements = ToRegister(lir->elements());
18585 const LAllocation* value = lir->value();
18586 const LAllocation* littleEndian = lir->littleEndian();
18587 Register temp = ToTempRegisterOrInvalid(lir->temp());
18588 Register64 temp64 = ToTempRegister64OrInvalid(lir->temp64());
18590 const MStoreDataViewElement* mir = lir->mir();
18591 Scalar::Type writeType = mir->writeType();
18593 LiveRegisterSet volatileRegs;
18594 if (MacroAssembler::StoreRequiresCall(writeType)) {
18595 volatileRegs = liveVolatileRegs(lir);
18598 BaseIndex dest(elements, ToRegister(lir->index()), TimesOne);
18600 bool noSwap = littleEndian->isConstant() &&
18601 ToBoolean(littleEndian) == MOZ_LITTLE_ENDIAN();
18603 // Directly store if no byte swap is needed and the platform supports
18604 // unaligned accesses for the access. (Such support is assumed for integer
18605 // types.)
18606 if (noSwap && (!Scalar::isFloatingType(writeType) ||
18607 MacroAssembler::SupportsFastUnalignedFPAccesses())) {
18608 if (!Scalar::isBigIntType(writeType)) {
18609 StoreToTypedArray(masm, writeType, value, dest, temp, volatileRegs);
18610 } else {
18611 masm.loadBigInt64(ToRegister(value), temp64);
18612 masm.storeToTypedBigIntArray(writeType, temp64, dest);
18614 return;
18617 // Load the value into a gpr register.
18618 switch (writeType) {
18619 case Scalar::Int16:
18620 case Scalar::Uint16:
18621 case Scalar::Int32:
18622 case Scalar::Uint32:
18623 if (value->isConstant()) {
18624 masm.move32(Imm32(ToInt32(value)), temp);
18625 } else {
18626 masm.move32(ToRegister(value), temp);
18628 break;
18629 case Scalar::Float16: {
18630 FloatRegister fvalue = ToFloatRegister(value);
18631 masm.canonicalizeFloatIfDeterministic(fvalue);
18632 masm.moveFloat16ToGPR(fvalue, temp, volatileRegs);
18633 break;
18635 case Scalar::Float32: {
18636 FloatRegister fvalue = ToFloatRegister(value);
18637 masm.canonicalizeFloatIfDeterministic(fvalue);
18638 masm.moveFloat32ToGPR(fvalue, temp);
18639 break;
18641 case Scalar::Float64: {
18642 FloatRegister fvalue = ToFloatRegister(value);
18643 masm.canonicalizeDoubleIfDeterministic(fvalue);
18644 masm.moveDoubleToGPR64(fvalue, temp64);
18645 break;
18647 case Scalar::BigInt64:
18648 case Scalar::BigUint64:
18649 masm.loadBigInt64(ToRegister(value), temp64);
18650 break;
18651 case Scalar::Int8:
18652 case Scalar::Uint8:
18653 case Scalar::Uint8Clamped:
18654 default:
18655 MOZ_CRASH("Invalid typed array type");
18658 if (!noSwap) {
18659 // Swap the bytes in the loaded value.
18660 Label skip;
18661 if (!littleEndian->isConstant()) {
18662 masm.branch32(
18663 MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
18664 ToRegister(littleEndian), Imm32(0), &skip);
18667 switch (writeType) {
18668 case Scalar::Int16:
18669 masm.byteSwap16SignExtend(temp);
18670 break;
18671 case Scalar::Uint16:
18672 case Scalar::Float16:
18673 masm.byteSwap16ZeroExtend(temp);
18674 break;
18675 case Scalar::Int32:
18676 case Scalar::Uint32:
18677 case Scalar::Float32:
18678 masm.byteSwap32(temp);
18679 break;
18680 case Scalar::Float64:
18681 case Scalar::BigInt64:
18682 case Scalar::BigUint64:
18683 masm.byteSwap64(temp64);
18684 break;
18685 case Scalar::Int8:
18686 case Scalar::Uint8:
18687 case Scalar::Uint8Clamped:
18688 default:
18689 MOZ_CRASH("Invalid typed array type");
18692 if (skip.used()) {
18693 masm.bind(&skip);
18697 // Store the value into the destination.
18698 switch (writeType) {
18699 case Scalar::Int16:
18700 case Scalar::Uint16:
18701 case Scalar::Float16:
18702 masm.store16Unaligned(temp, dest);
18703 break;
18704 case Scalar::Int32:
18705 case Scalar::Uint32:
18706 case Scalar::Float32:
18707 masm.store32Unaligned(temp, dest);
18708 break;
18709 case Scalar::Float64:
18710 case Scalar::BigInt64:
18711 case Scalar::BigUint64:
18712 masm.store64Unaligned(temp64, dest);
18713 break;
18714 case Scalar::Int8:
18715 case Scalar::Uint8:
18716 case Scalar::Uint8Clamped:
18717 default:
18718 MOZ_CRASH("Invalid typed array type");
18722 void CodeGenerator::visitStoreTypedArrayElementHole(
18723 LStoreTypedArrayElementHole* lir) {
18724 Register elements = ToRegister(lir->elements());
18725 const LAllocation* value = lir->value();
18727 Scalar::Type arrayType = lir->mir()->arrayType();
18729 Register index = ToRegister(lir->index());
18730 const LAllocation* length = lir->length();
18731 Register temp = ToTempRegisterOrInvalid(lir->temp0());
18733 LiveRegisterSet volatileRegs;
18734 if (MacroAssembler::StoreRequiresCall(arrayType)) {
18735 volatileRegs = liveVolatileRegs(lir);
18738 Label skip;
18739 if (length->isRegister()) {
18740 masm.spectreBoundsCheckPtr(index, ToRegister(length), temp, &skip);
18741 } else {
18742 masm.spectreBoundsCheckPtr(index, ToAddress(length), temp, &skip);
18745 BaseIndex dest(elements, index, ScaleFromScalarType(arrayType));
18746 StoreToTypedArray(masm, arrayType, value, dest, temp, volatileRegs);
18748 masm.bind(&skip);
18751 void CodeGenerator::visitStoreTypedArrayElementHoleBigInt(
18752 LStoreTypedArrayElementHoleBigInt* lir) {
18753 Register elements = ToRegister(lir->elements());
18754 Register value = ToRegister(lir->value());
18755 Register64 temp = ToRegister64(lir->temp());
18757 Scalar::Type arrayType = lir->mir()->arrayType();
18759 Register index = ToRegister(lir->index());
18760 const LAllocation* length = lir->length();
18761 Register spectreTemp = temp.scratchReg();
18763 Label skip;
18764 if (length->isRegister()) {
18765 masm.spectreBoundsCheckPtr(index, ToRegister(length), spectreTemp, &skip);
18766 } else {
18767 masm.spectreBoundsCheckPtr(index, ToAddress(length), spectreTemp, &skip);
18770 masm.loadBigInt64(value, temp);
18772 BaseIndex dest(elements, index, ScaleFromScalarType(arrayType));
18773 masm.storeToTypedBigIntArray(arrayType, temp, dest);
18775 masm.bind(&skip);
18778 void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
18779 masm.memoryBarrier(ins->type());
18782 void CodeGenerator::visitAtomicIsLockFree(LAtomicIsLockFree* lir) {
18783 Register value = ToRegister(lir->value());
18784 Register output = ToRegister(lir->output());
18786 masm.atomicIsLockFreeJS(value, output);
18789 void CodeGenerator::visitClampIToUint8(LClampIToUint8* lir) {
18790 Register output = ToRegister(lir->output());
18791 MOZ_ASSERT(output == ToRegister(lir->input()));
18792 masm.clampIntToUint8(output);
18795 void CodeGenerator::visitClampDToUint8(LClampDToUint8* lir) {
18796 FloatRegister input = ToFloatRegister(lir->input());
18797 Register output = ToRegister(lir->output());
18798 masm.clampDoubleToUint8(input, output);
18801 void CodeGenerator::visitClampVToUint8(LClampVToUint8* lir) {
18802 ValueOperand operand = ToValue(lir, LClampVToUint8::InputIndex);
18803 FloatRegister tempFloat = ToFloatRegister(lir->temp0());
18804 Register output = ToRegister(lir->output());
18806 using Fn = bool (*)(JSContext*, JSString*, double*);
18807 OutOfLineCode* oolString = oolCallVM<Fn, StringToNumber>(
18808 lir, ArgList(output), StoreFloatRegisterTo(tempFloat));
18809 Label* stringEntry = oolString->entry();
18810 Label* stringRejoin = oolString->rejoin();
18812 Label fails;
18813 masm.clampValueToUint8(operand, stringEntry, stringRejoin, output, tempFloat,
18814 output, &fails);
18816 bailoutFrom(&fails, lir->snapshot());
18819 void CodeGenerator::visitInCache(LInCache* ins) {
18820 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
18822 ConstantOrRegister key =
18823 toConstantOrRegister(ins, LInCache::LhsIndex, ins->mir()->key()->type());
18824 Register object = ToRegister(ins->rhs());
18825 Register output = ToRegister(ins->output());
18826 Register temp = ToRegister(ins->temp0());
18828 IonInIC cache(liveRegs, key, object, output, temp);
18829 addIC(ins, allocateIC(cache));
18832 void CodeGenerator::visitInArray(LInArray* lir) {
18833 const MInArray* mir = lir->mir();
18834 Register elements = ToRegister(lir->elements());
18835 Register initLength = ToRegister(lir->initLength());
18836 Register output = ToRegister(lir->output());
18838 Label falseBranch, done, trueBranch;
18840 if (lir->index()->isConstant()) {
18841 int32_t index = ToInt32(lir->index());
18843 if (index < 0) {
18844 MOZ_ASSERT(mir->needsNegativeIntCheck());
18845 bailout(lir->snapshot());
18846 return;
18849 masm.branch32(Assembler::BelowOrEqual, initLength, Imm32(index),
18850 &falseBranch);
18852 NativeObject::elementsSizeMustNotOverflow();
18853 Address address = Address(elements, index * sizeof(Value));
18854 masm.branchTestMagic(Assembler::Equal, address, &falseBranch);
18855 } else {
18856 Register index = ToRegister(lir->index());
18858 Label negativeIntCheck;
18859 Label* failedInitLength = &falseBranch;
18860 if (mir->needsNegativeIntCheck()) {
18861 failedInitLength = &negativeIntCheck;
18864 masm.branch32(Assembler::BelowOrEqual, initLength, index, failedInitLength);
18866 BaseObjectElementIndex address(elements, index);
18867 masm.branchTestMagic(Assembler::Equal, address, &falseBranch);
18869 if (mir->needsNegativeIntCheck()) {
18870 masm.jump(&trueBranch);
18871 masm.bind(&negativeIntCheck);
18873 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
18875 masm.jump(&falseBranch);
18879 masm.bind(&trueBranch);
18880 masm.move32(Imm32(1), output);
18881 masm.jump(&done);
18883 masm.bind(&falseBranch);
18884 masm.move32(Imm32(0), output);
18885 masm.bind(&done);
18888 void CodeGenerator::visitGuardElementNotHole(LGuardElementNotHole* lir) {
18889 Register elements = ToRegister(lir->elements());
18890 const LAllocation* index = lir->index();
18892 Label testMagic;
18893 if (index->isConstant()) {
18894 Address address(elements, ToInt32(index) * sizeof(js::Value));
18895 masm.branchTestMagic(Assembler::Equal, address, &testMagic);
18896 } else {
18897 BaseObjectElementIndex address(elements, ToRegister(index));
18898 masm.branchTestMagic(Assembler::Equal, address, &testMagic);
18900 bailoutFrom(&testMagic, lir->snapshot());
18903 void CodeGenerator::visitInstanceOfO(LInstanceOfO* ins) {
18904 Register protoReg = ToRegister(ins->rhs());
18905 emitInstanceOf(ins, protoReg);
18908 void CodeGenerator::visitInstanceOfV(LInstanceOfV* ins) {
18909 Register protoReg = ToRegister(ins->rhs());
18910 emitInstanceOf(ins, protoReg);
18913 void CodeGenerator::emitInstanceOf(LInstruction* ins, Register protoReg) {
18914 // This path implements fun_hasInstance when the function's prototype is
18915 // known to be the object in protoReg
18917 Label done;
18918 Register output = ToRegister(ins->getDef(0));
18920 // If the lhs is a primitive, the result is false.
18921 Register objReg;
18922 if (ins->isInstanceOfV()) {
18923 Label isObject;
18924 ValueOperand lhsValue = ToValue(ins, LInstanceOfV::LhsIndex);
18925 masm.branchTestObject(Assembler::Equal, lhsValue, &isObject);
18926 masm.mov(ImmWord(0), output);
18927 masm.jump(&done);
18928 masm.bind(&isObject);
18929 objReg = masm.extractObject(lhsValue, output);
18930 } else {
18931 objReg = ToRegister(ins->toInstanceOfO()->lhs());
18934 // Crawl the lhs's prototype chain in a loop to search for prototypeObject.
18935 // This follows the main loop of js::IsPrototypeOf, though additionally breaks
18936 // out of the loop on Proxy::LazyProto.
18938 // Load the lhs's prototype.
18939 masm.loadObjProto(objReg, output);
18941 Label testLazy;
18943 Label loopPrototypeChain;
18944 masm.bind(&loopPrototypeChain);
18946 // Test for the target prototype object.
18947 Label notPrototypeObject;
18948 masm.branchPtr(Assembler::NotEqual, output, protoReg, &notPrototypeObject);
18949 masm.mov(ImmWord(1), output);
18950 masm.jump(&done);
18951 masm.bind(&notPrototypeObject);
18953 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
18955 // Test for nullptr or Proxy::LazyProto
18956 masm.branchPtr(Assembler::BelowOrEqual, output, ImmWord(1), &testLazy);
18958 // Load the current object's prototype.
18959 masm.loadObjProto(output, output);
18961 masm.jump(&loopPrototypeChain);
18964 // Make a VM call if an object with a lazy proto was found on the prototype
18965 // chain. This currently occurs only for cross compartment wrappers, which
18966 // we do not expect to be compared with non-wrapper functions from this
18967 // compartment. Otherwise, we stopped on a nullptr prototype and the output
18968 // register is already correct.
18970 using Fn = bool (*)(JSContext*, HandleObject, JSObject*, bool*);
18971 auto* ool = oolCallVM<Fn, IsPrototypeOf>(ins, ArgList(protoReg, objReg),
18972 StoreRegisterTo(output));
18974 // Regenerate the original lhs object for the VM call.
18975 Label regenerate, *lazyEntry;
18976 if (objReg != output) {
18977 lazyEntry = ool->entry();
18978 } else {
18979 masm.bind(&regenerate);
18980 lazyEntry = &regenerate;
18981 if (ins->isInstanceOfV()) {
18982 ValueOperand lhsValue = ToValue(ins, LInstanceOfV::LhsIndex);
18983 objReg = masm.extractObject(lhsValue, output);
18984 } else {
18985 objReg = ToRegister(ins->toInstanceOfO()->lhs());
18987 MOZ_ASSERT(objReg == output);
18988 masm.jump(ool->entry());
18991 masm.bind(&testLazy);
18992 masm.branchPtr(Assembler::Equal, output, ImmWord(1), lazyEntry);
18994 masm.bind(&done);
18995 masm.bind(ool->rejoin());
18998 void CodeGenerator::visitInstanceOfCache(LInstanceOfCache* ins) {
18999 // The Lowering ensures that RHS is an object, and that LHS is a value.
19000 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
19001 TypedOrValueRegister lhs =
19002 TypedOrValueRegister(ToValue(ins, LInstanceOfCache::LHS));
19003 Register rhs = ToRegister(ins->rhs());
19004 Register output = ToRegister(ins->output());
19006 IonInstanceOfIC ic(liveRegs, lhs, rhs, output);
19007 addIC(ins, allocateIC(ic));
19010 void CodeGenerator::visitGetDOMProperty(LGetDOMProperty* ins) {
19011 const Register JSContextReg = ToRegister(ins->getJSContextReg());
19012 const Register ObjectReg = ToRegister(ins->getObjectReg());
19013 const Register PrivateReg = ToRegister(ins->getPrivReg());
19014 const Register ValueReg = ToRegister(ins->getValueReg());
19016 Label haveValue;
19017 if (ins->mir()->valueMayBeInSlot()) {
19018 size_t slot = ins->mir()->domMemberSlotIndex();
19019 // It's a bit annoying to redo these slot calculations, which duplcate
19020 // LSlots and a few other things like that, but I'm not sure there's a
19021 // way to reuse those here.
19023 // If this ever gets fixed to work with proxies (by not assuming that
19024 // reserved slot indices, which is what domMemberSlotIndex() returns,
19025 // match fixed slot indices), we can reenable MGetDOMProperty for
19026 // proxies in IonBuilder.
19027 if (slot < NativeObject::MAX_FIXED_SLOTS) {
19028 masm.loadValue(Address(ObjectReg, NativeObject::getFixedSlotOffset(slot)),
19029 JSReturnOperand);
19030 } else {
19031 // It's a dynamic slot.
19032 slot -= NativeObject::MAX_FIXED_SLOTS;
19033 // Use PrivateReg as a scratch register for the slots pointer.
19034 masm.loadPtr(Address(ObjectReg, NativeObject::offsetOfSlots()),
19035 PrivateReg);
19036 masm.loadValue(Address(PrivateReg, slot * sizeof(js::Value)),
19037 JSReturnOperand);
19039 masm.branchTestUndefined(Assembler::NotEqual, JSReturnOperand, &haveValue);
19042 DebugOnly<uint32_t> initialStack = masm.framePushed();
19044 masm.checkStackAlignment();
19046 // Make space for the outparam. Pre-initialize it to UndefinedValue so we
19047 // can trace it at GC time.
19048 masm.Push(UndefinedValue());
19049 // We pass the pointer to our out param as an instance of
19050 // JSJitGetterCallArgs, since on the binary level it's the same thing.
19051 static_assert(sizeof(JSJitGetterCallArgs) == sizeof(Value*));
19052 masm.moveStackPtrTo(ValueReg);
19054 masm.Push(ObjectReg);
19056 LoadDOMPrivate(masm, ObjectReg, PrivateReg, ins->mir()->objectKind());
19058 // Rooting will happen at GC time.
19059 masm.moveStackPtrTo(ObjectReg);
19061 Realm* getterRealm = ins->mir()->getterRealm();
19062 if (gen->realm->realmPtr() != getterRealm) {
19063 // We use JSContextReg as scratch register here.
19064 masm.switchToRealm(getterRealm, JSContextReg);
19067 uint32_t safepointOffset = masm.buildFakeExitFrame(JSContextReg);
19068 masm.loadJSContext(JSContextReg);
19069 masm.enterFakeExitFrame(JSContextReg, JSContextReg,
19070 ExitFrameType::IonDOMGetter);
19072 markSafepointAt(safepointOffset, ins);
19074 masm.setupAlignedABICall();
19075 masm.loadJSContext(JSContextReg);
19076 masm.passABIArg(JSContextReg);
19077 masm.passABIArg(ObjectReg);
19078 masm.passABIArg(PrivateReg);
19079 masm.passABIArg(ValueReg);
19080 ensureOsiSpace();
19081 masm.callWithABI(DynamicFunction<JSJitGetterOp>(ins->mir()->fun()),
19082 ABIType::General,
19083 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
19085 if (ins->mir()->isInfallible()) {
19086 masm.loadValue(Address(masm.getStackPointer(),
19087 IonDOMExitFrameLayout::offsetOfResult()),
19088 JSReturnOperand);
19089 } else {
19090 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
19092 masm.loadValue(Address(masm.getStackPointer(),
19093 IonDOMExitFrameLayout::offsetOfResult()),
19094 JSReturnOperand);
19097 // Switch back to the current realm if needed. Note: if the getter threw an
19098 // exception, the exception handler will do this.
19099 if (gen->realm->realmPtr() != getterRealm) {
19100 static_assert(!JSReturnOperand.aliases(ReturnReg),
19101 "Clobbering ReturnReg should not affect the return value");
19102 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
19105 // Until C++ code is instrumented against Spectre, prevent speculative
19106 // execution from returning any private data.
19107 if (JitOptions.spectreJitToCxxCalls && ins->mir()->hasLiveDefUses()) {
19108 masm.speculationBarrier();
19111 masm.adjustStack(IonDOMExitFrameLayout::Size());
19113 masm.bind(&haveValue);
19115 MOZ_ASSERT(masm.framePushed() == initialStack);
19118 void CodeGenerator::visitGetDOMMemberV(LGetDOMMemberV* ins) {
19119 // It's simpler to duplicate visitLoadFixedSlotV here than it is to try to
19120 // use an LLoadFixedSlotV or some subclass of it for this case: that would
19121 // require us to have MGetDOMMember inherit from MLoadFixedSlot, and then
19122 // we'd have to duplicate a bunch of stuff we now get for free from
19123 // MGetDOMProperty.
19125 // If this ever gets fixed to work with proxies (by not assuming that
19126 // reserved slot indices, which is what domMemberSlotIndex() returns,
19127 // match fixed slot indices), we can reenable MGetDOMMember for
19128 // proxies in IonBuilder.
19129 Register object = ToRegister(ins->object());
19130 size_t slot = ins->mir()->domMemberSlotIndex();
19131 ValueOperand result = ToOutValue(ins);
19133 masm.loadValue(Address(object, NativeObject::getFixedSlotOffset(slot)),
19134 result);
19137 void CodeGenerator::visitGetDOMMemberT(LGetDOMMemberT* ins) {
19138 // It's simpler to duplicate visitLoadFixedSlotT here than it is to try to
19139 // use an LLoadFixedSlotT or some subclass of it for this case: that would
19140 // require us to have MGetDOMMember inherit from MLoadFixedSlot, and then
19141 // we'd have to duplicate a bunch of stuff we now get for free from
19142 // MGetDOMProperty.
19144 // If this ever gets fixed to work with proxies (by not assuming that
19145 // reserved slot indices, which is what domMemberSlotIndex() returns,
19146 // match fixed slot indices), we can reenable MGetDOMMember for
19147 // proxies in IonBuilder.
19148 Register object = ToRegister(ins->object());
19149 size_t slot = ins->mir()->domMemberSlotIndex();
19150 AnyRegister result = ToAnyRegister(ins->getDef(0));
19151 MIRType type = ins->mir()->type();
19153 masm.loadUnboxedValue(Address(object, NativeObject::getFixedSlotOffset(slot)),
19154 type, result);
19157 void CodeGenerator::visitSetDOMProperty(LSetDOMProperty* ins) {
19158 const Register JSContextReg = ToRegister(ins->getJSContextReg());
19159 const Register ObjectReg = ToRegister(ins->getObjectReg());
19160 const Register PrivateReg = ToRegister(ins->getPrivReg());
19161 const Register ValueReg = ToRegister(ins->getValueReg());
19163 DebugOnly<uint32_t> initialStack = masm.framePushed();
19165 masm.checkStackAlignment();
19167 // Push the argument. Rooting will happen at GC time.
19168 ValueOperand argVal = ToValue(ins, LSetDOMProperty::Value);
19169 masm.Push(argVal);
19170 // We pass the pointer to our out param as an instance of
19171 // JSJitGetterCallArgs, since on the binary level it's the same thing.
19172 static_assert(sizeof(JSJitSetterCallArgs) == sizeof(Value*));
19173 masm.moveStackPtrTo(ValueReg);
19175 masm.Push(ObjectReg);
19177 LoadDOMPrivate(masm, ObjectReg, PrivateReg, ins->mir()->objectKind());
19179 // Rooting will happen at GC time.
19180 masm.moveStackPtrTo(ObjectReg);
19182 Realm* setterRealm = ins->mir()->setterRealm();
19183 if (gen->realm->realmPtr() != setterRealm) {
19184 // We use JSContextReg as scratch register here.
19185 masm.switchToRealm(setterRealm, JSContextReg);
19188 uint32_t safepointOffset = masm.buildFakeExitFrame(JSContextReg);
19189 masm.loadJSContext(JSContextReg);
19190 masm.enterFakeExitFrame(JSContextReg, JSContextReg,
19191 ExitFrameType::IonDOMSetter);
19193 markSafepointAt(safepointOffset, ins);
19195 masm.setupAlignedABICall();
19196 masm.loadJSContext(JSContextReg);
19197 masm.passABIArg(JSContextReg);
19198 masm.passABIArg(ObjectReg);
19199 masm.passABIArg(PrivateReg);
19200 masm.passABIArg(ValueReg);
19201 ensureOsiSpace();
19202 masm.callWithABI(DynamicFunction<JSJitSetterOp>(ins->mir()->fun()),
19203 ABIType::General,
19204 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
19206 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
19208 // Switch back to the current realm if needed. Note: if the setter threw an
19209 // exception, the exception handler will do this.
19210 if (gen->realm->realmPtr() != setterRealm) {
19211 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
19214 masm.adjustStack(IonDOMExitFrameLayout::Size());
19216 MOZ_ASSERT(masm.framePushed() == initialStack);
19219 void CodeGenerator::visitLoadDOMExpandoValue(LLoadDOMExpandoValue* ins) {
19220 Register proxy = ToRegister(ins->proxy());
19221 ValueOperand out = ToOutValue(ins);
19223 masm.loadPtr(Address(proxy, ProxyObject::offsetOfReservedSlots()),
19224 out.scratchReg());
19225 masm.loadValue(Address(out.scratchReg(),
19226 js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
19227 out);
19230 void CodeGenerator::visitLoadDOMExpandoValueGuardGeneration(
19231 LLoadDOMExpandoValueGuardGeneration* ins) {
19232 Register proxy = ToRegister(ins->proxy());
19233 ValueOperand out = ToOutValue(ins);
19235 Label bail;
19236 masm.loadDOMExpandoValueGuardGeneration(proxy, out,
19237 ins->mir()->expandoAndGeneration(),
19238 ins->mir()->generation(), &bail);
19239 bailoutFrom(&bail, ins->snapshot());
19242 void CodeGenerator::visitLoadDOMExpandoValueIgnoreGeneration(
19243 LLoadDOMExpandoValueIgnoreGeneration* ins) {
19244 Register proxy = ToRegister(ins->proxy());
19245 ValueOperand out = ToOutValue(ins);
19247 masm.loadPtr(Address(proxy, ProxyObject::offsetOfReservedSlots()),
19248 out.scratchReg());
19250 // Load the ExpandoAndGeneration* from the PrivateValue.
19251 masm.loadPrivate(
19252 Address(out.scratchReg(),
19253 js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
19254 out.scratchReg());
19256 // Load expandoAndGeneration->expando into the output Value register.
19257 masm.loadValue(
19258 Address(out.scratchReg(), ExpandoAndGeneration::offsetOfExpando()), out);
19261 void CodeGenerator::visitGuardDOMExpandoMissingOrGuardShape(
19262 LGuardDOMExpandoMissingOrGuardShape* ins) {
19263 Register temp = ToRegister(ins->temp0());
19264 ValueOperand input =
19265 ToValue(ins, LGuardDOMExpandoMissingOrGuardShape::InputIndex);
19267 Label done;
19268 masm.branchTestUndefined(Assembler::Equal, input, &done);
19270 masm.debugAssertIsObject(input);
19271 masm.unboxObject(input, temp);
19272 // The expando object is not used in this case, so we don't need Spectre
19273 // mitigations.
19274 Label bail;
19275 masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, temp,
19276 ins->mir()->shape(), &bail);
19277 bailoutFrom(&bail, ins->snapshot());
19279 masm.bind(&done);
19282 class OutOfLineIsCallable : public OutOfLineCodeBase<CodeGenerator> {
19283 Register object_;
19284 Register output_;
19286 public:
19287 OutOfLineIsCallable(Register object, Register output)
19288 : object_(object), output_(output) {}
19290 void accept(CodeGenerator* codegen) override {
19291 codegen->visitOutOfLineIsCallable(this);
19293 Register object() const { return object_; }
19294 Register output() const { return output_; }
19297 void CodeGenerator::visitIsCallableO(LIsCallableO* ins) {
19298 Register object = ToRegister(ins->object());
19299 Register output = ToRegister(ins->output());
19301 OutOfLineIsCallable* ool = new (alloc()) OutOfLineIsCallable(object, output);
19302 addOutOfLineCode(ool, ins->mir());
19304 masm.isCallable(object, output, ool->entry());
19306 masm.bind(ool->rejoin());
19309 void CodeGenerator::visitIsCallableV(LIsCallableV* ins) {
19310 ValueOperand val = ToValue(ins, LIsCallableV::ObjectIndex);
19311 Register output = ToRegister(ins->output());
19312 Register temp = ToRegister(ins->temp0());
19314 Label notObject;
19315 masm.fallibleUnboxObject(val, temp, &notObject);
19317 OutOfLineIsCallable* ool = new (alloc()) OutOfLineIsCallable(temp, output);
19318 addOutOfLineCode(ool, ins->mir());
19320 masm.isCallable(temp, output, ool->entry());
19321 masm.jump(ool->rejoin());
19323 masm.bind(&notObject);
19324 masm.move32(Imm32(0), output);
19326 masm.bind(ool->rejoin());
19329 void CodeGenerator::visitOutOfLineIsCallable(OutOfLineIsCallable* ool) {
19330 Register object = ool->object();
19331 Register output = ool->output();
19333 saveVolatile(output);
19334 using Fn = bool (*)(JSObject* obj);
19335 masm.setupAlignedABICall();
19336 masm.passABIArg(object);
19337 masm.callWithABI<Fn, ObjectIsCallable>();
19338 masm.storeCallBoolResult(output);
19339 restoreVolatile(output);
19340 masm.jump(ool->rejoin());
19343 class OutOfLineIsConstructor : public OutOfLineCodeBase<CodeGenerator> {
19344 LIsConstructor* ins_;
19346 public:
19347 explicit OutOfLineIsConstructor(LIsConstructor* ins) : ins_(ins) {}
19349 void accept(CodeGenerator* codegen) override {
19350 codegen->visitOutOfLineIsConstructor(this);
19352 LIsConstructor* ins() const { return ins_; }
19355 void CodeGenerator::visitIsConstructor(LIsConstructor* ins) {
19356 Register object = ToRegister(ins->object());
19357 Register output = ToRegister(ins->output());
19359 OutOfLineIsConstructor* ool = new (alloc()) OutOfLineIsConstructor(ins);
19360 addOutOfLineCode(ool, ins->mir());
19362 masm.isConstructor(object, output, ool->entry());
19364 masm.bind(ool->rejoin());
19367 void CodeGenerator::visitOutOfLineIsConstructor(OutOfLineIsConstructor* ool) {
19368 LIsConstructor* ins = ool->ins();
19369 Register object = ToRegister(ins->object());
19370 Register output = ToRegister(ins->output());
19372 saveVolatile(output);
19373 using Fn = bool (*)(JSObject* obj);
19374 masm.setupAlignedABICall();
19375 masm.passABIArg(object);
19376 masm.callWithABI<Fn, ObjectIsConstructor>();
19377 masm.storeCallBoolResult(output);
19378 restoreVolatile(output);
19379 masm.jump(ool->rejoin());
19382 void CodeGenerator::visitIsCrossRealmArrayConstructor(
19383 LIsCrossRealmArrayConstructor* ins) {
19384 Register object = ToRegister(ins->object());
19385 Register output = ToRegister(ins->output());
19387 masm.setIsCrossRealmArrayConstructor(object, output);
19390 static void EmitObjectIsArray(MacroAssembler& masm, OutOfLineCode* ool,
19391 Register obj, Register output,
19392 Label* notArray = nullptr) {
19393 masm.loadObjClassUnsafe(obj, output);
19395 Label isArray;
19396 masm.branchPtr(Assembler::Equal, output, ImmPtr(&ArrayObject::class_),
19397 &isArray);
19399 // Branch to OOL path if it's a proxy.
19400 masm.branchTestClassIsProxy(true, output, ool->entry());
19402 if (notArray) {
19403 masm.bind(notArray);
19405 masm.move32(Imm32(0), output);
19406 masm.jump(ool->rejoin());
19408 masm.bind(&isArray);
19409 masm.move32(Imm32(1), output);
19411 masm.bind(ool->rejoin());
19414 void CodeGenerator::visitIsArrayO(LIsArrayO* lir) {
19415 Register object = ToRegister(lir->object());
19416 Register output = ToRegister(lir->output());
19418 using Fn = bool (*)(JSContext*, HandleObject, bool*);
19419 OutOfLineCode* ool = oolCallVM<Fn, js::IsArrayFromJit>(
19420 lir, ArgList(object), StoreRegisterTo(output));
19421 EmitObjectIsArray(masm, ool, object, output);
19424 void CodeGenerator::visitIsArrayV(LIsArrayV* lir) {
19425 ValueOperand val = ToValue(lir, LIsArrayV::ValueIndex);
19426 Register output = ToRegister(lir->output());
19427 Register temp = ToRegister(lir->temp0());
19429 Label notArray;
19430 masm.fallibleUnboxObject(val, temp, &notArray);
19432 using Fn = bool (*)(JSContext*, HandleObject, bool*);
19433 OutOfLineCode* ool = oolCallVM<Fn, js::IsArrayFromJit>(
19434 lir, ArgList(temp), StoreRegisterTo(output));
19435 EmitObjectIsArray(masm, ool, temp, output, &notArray);
19438 void CodeGenerator::visitIsTypedArray(LIsTypedArray* lir) {
19439 Register object = ToRegister(lir->object());
19440 Register output = ToRegister(lir->output());
19442 OutOfLineCode* ool = nullptr;
19443 if (lir->mir()->isPossiblyWrapped()) {
19444 using Fn = bool (*)(JSContext*, JSObject*, bool*);
19445 ool = oolCallVM<Fn, jit::IsPossiblyWrappedTypedArray>(
19446 lir, ArgList(object), StoreRegisterTo(output));
19449 Label notTypedArray;
19450 Label done;
19452 masm.loadObjClassUnsafe(object, output);
19453 masm.branchIfClassIsNotTypedArray(output, &notTypedArray);
19455 masm.move32(Imm32(1), output);
19456 masm.jump(&done);
19457 masm.bind(&notTypedArray);
19458 if (ool) {
19459 masm.branchTestClassIsProxy(true, output, ool->entry());
19461 masm.move32(Imm32(0), output);
19462 masm.bind(&done);
19463 if (ool) {
19464 masm.bind(ool->rejoin());
19468 void CodeGenerator::visitIsObject(LIsObject* ins) {
19469 Register output = ToRegister(ins->output());
19470 ValueOperand value = ToValue(ins, LIsObject::ObjectIndex);
19471 masm.testObjectSet(Assembler::Equal, value, output);
19474 void CodeGenerator::visitIsObjectAndBranch(LIsObjectAndBranch* ins) {
19475 ValueOperand value = ToValue(ins, LIsObjectAndBranch::Input);
19476 testObjectEmitBranch(Assembler::Equal, value, ins->ifTrue(), ins->ifFalse());
19479 void CodeGenerator::visitIsNullOrUndefined(LIsNullOrUndefined* ins) {
19480 Register output = ToRegister(ins->output());
19481 ValueOperand value = ToValue(ins, LIsNullOrUndefined::InputIndex);
19483 Label isNotNull, done;
19484 masm.branchTestNull(Assembler::NotEqual, value, &isNotNull);
19486 masm.move32(Imm32(1), output);
19487 masm.jump(&done);
19489 masm.bind(&isNotNull);
19490 masm.testUndefinedSet(Assembler::Equal, value, output);
19492 masm.bind(&done);
19495 void CodeGenerator::visitIsNullOrUndefinedAndBranch(
19496 LIsNullOrUndefinedAndBranch* ins) {
19497 Label* ifTrue = getJumpLabelForBranch(ins->ifTrue());
19498 Label* ifFalse = getJumpLabelForBranch(ins->ifFalse());
19499 ValueOperand value = ToValue(ins, LIsNullOrUndefinedAndBranch::Input);
19501 ScratchTagScope tag(masm, value);
19502 masm.splitTagForTest(value, tag);
19504 masm.branchTestNull(Assembler::Equal, tag, ifTrue);
19505 masm.branchTestUndefined(Assembler::Equal, tag, ifTrue);
19507 if (!isNextBlock(ins->ifFalse()->lir())) {
19508 masm.jump(ifFalse);
19512 void CodeGenerator::loadOutermostJSScript(Register reg) {
19513 // The "outermost" JSScript means the script that we are compiling
19514 // basically; this is not always the script associated with the
19515 // current basic block, which might be an inlined script.
19517 MIRGraph& graph = current->mir()->graph();
19518 MBasicBlock* entryBlock = graph.entryBlock();
19519 masm.movePtr(ImmGCPtr(entryBlock->info().script()), reg);
19522 void CodeGenerator::loadJSScriptForBlock(MBasicBlock* block, Register reg) {
19523 // The current JSScript means the script for the current
19524 // basic block. This may be an inlined script.
19526 JSScript* script = block->info().script();
19527 masm.movePtr(ImmGCPtr(script), reg);
19530 void CodeGenerator::visitHasClass(LHasClass* ins) {
19531 Register lhs = ToRegister(ins->lhs());
19532 Register output = ToRegister(ins->output());
19534 masm.loadObjClassUnsafe(lhs, output);
19535 masm.cmpPtrSet(Assembler::Equal, output, ImmPtr(ins->mir()->getClass()),
19536 output);
19539 void CodeGenerator::visitGuardToClass(LGuardToClass* ins) {
19540 Register lhs = ToRegister(ins->lhs());
19541 Register temp = ToRegister(ins->temp0());
19543 // branchTestObjClass may zero the object register on speculative paths
19544 // (we should have a defineReuseInput allocation in this case).
19545 Register spectreRegToZero = lhs;
19547 Label notEqual;
19549 masm.branchTestObjClass(Assembler::NotEqual, lhs, ins->mir()->getClass(),
19550 temp, spectreRegToZero, &notEqual);
19552 // Can't return null-return here, so bail.
19553 bailoutFrom(&notEqual, ins->snapshot());
19556 void CodeGenerator::visitGuardToEitherClass(LGuardToEitherClass* ins) {
19557 Register lhs = ToRegister(ins->lhs());
19558 Register temp = ToRegister(ins->temp0());
19560 // branchTestObjClass may zero the object register on speculative paths
19561 // (we should have a defineReuseInput allocation in this case).
19562 Register spectreRegToZero = lhs;
19564 Label notEqual;
19566 masm.branchTestObjClass(Assembler::NotEqual, lhs,
19567 {ins->mir()->getClass1(), ins->mir()->getClass2()},
19568 temp, spectreRegToZero, &notEqual);
19570 // Can't return null-return here, so bail.
19571 bailoutFrom(&notEqual, ins->snapshot());
19574 void CodeGenerator::visitGuardToFunction(LGuardToFunction* ins) {
19575 Register lhs = ToRegister(ins->lhs());
19576 Register temp = ToRegister(ins->temp0());
19578 // branchTestObjClass may zero the object register on speculative paths
19579 // (we should have a defineReuseInput allocation in this case).
19580 Register spectreRegToZero = lhs;
19582 Label notEqual;
19584 masm.branchTestObjIsFunction(Assembler::NotEqual, lhs, temp, spectreRegToZero,
19585 &notEqual);
19587 // Can't return null-return here, so bail.
19588 bailoutFrom(&notEqual, ins->snapshot());
19591 void CodeGenerator::visitObjectClassToString(LObjectClassToString* lir) {
19592 Register obj = ToRegister(lir->lhs());
19593 Register temp = ToRegister(lir->temp0());
19595 using Fn = JSString* (*)(JSContext*, JSObject*);
19596 masm.setupAlignedABICall();
19597 masm.loadJSContext(temp);
19598 masm.passABIArg(temp);
19599 masm.passABIArg(obj);
19600 masm.callWithABI<Fn, js::ObjectClassToString>();
19602 bailoutCmpPtr(Assembler::Equal, ReturnReg, ImmWord(0), lir->snapshot());
19605 void CodeGenerator::visitWasmParameter(LWasmParameter* lir) {}
19607 void CodeGenerator::visitWasmParameterI64(LWasmParameterI64* lir) {}
19609 void CodeGenerator::visitWasmReturn(LWasmReturn* lir) {
19610 // Don't emit a jump to the return label if this is the last block.
19611 if (current->mir() != *gen->graph().poBegin()) {
19612 masm.jump(&returnLabel_);
19616 void CodeGenerator::visitWasmReturnI64(LWasmReturnI64* lir) {
19617 // Don't emit a jump to the return label if this is the last block.
19618 if (current->mir() != *gen->graph().poBegin()) {
19619 masm.jump(&returnLabel_);
19623 void CodeGenerator::visitWasmReturnVoid(LWasmReturnVoid* lir) {
19624 // Don't emit a jump to the return label if this is the last block.
19625 if (current->mir() != *gen->graph().poBegin()) {
19626 masm.jump(&returnLabel_);
19630 void CodeGenerator::emitAssertRangeI(MIRType type, const Range* r,
19631 Register input) {
19632 // Check the lower bound.
19633 if (r->hasInt32LowerBound() && r->lower() > INT32_MIN) {
19634 Label success;
19635 if (type == MIRType::Int32 || type == MIRType::Boolean) {
19636 masm.branch32(Assembler::GreaterThanOrEqual, input, Imm32(r->lower()),
19637 &success);
19638 } else {
19639 MOZ_ASSERT(type == MIRType::IntPtr);
19640 masm.branchPtr(Assembler::GreaterThanOrEqual, input, Imm32(r->lower()),
19641 &success);
19643 masm.assumeUnreachable(
19644 "Integer input should be equal or higher than Lowerbound.");
19645 masm.bind(&success);
19648 // Check the upper bound.
19649 if (r->hasInt32UpperBound() && r->upper() < INT32_MAX) {
19650 Label success;
19651 if (type == MIRType::Int32 || type == MIRType::Boolean) {
19652 masm.branch32(Assembler::LessThanOrEqual, input, Imm32(r->upper()),
19653 &success);
19654 } else {
19655 MOZ_ASSERT(type == MIRType::IntPtr);
19656 masm.branchPtr(Assembler::LessThanOrEqual, input, Imm32(r->upper()),
19657 &success);
19659 masm.assumeUnreachable(
19660 "Integer input should be lower or equal than Upperbound.");
19661 masm.bind(&success);
19664 // For r->canHaveFractionalPart(), r->canBeNegativeZero(), and
19665 // r->exponent(), there's nothing to check, because if we ended up in the
19666 // integer range checking code, the value is already in an integer register
19667 // in the integer range.
19670 void CodeGenerator::emitAssertRangeD(const Range* r, FloatRegister input,
19671 FloatRegister temp) {
19672 // Check the lower bound.
19673 if (r->hasInt32LowerBound()) {
19674 Label success;
19675 masm.loadConstantDouble(r->lower(), temp);
19676 if (r->canBeNaN()) {
19677 masm.branchDouble(Assembler::DoubleUnordered, input, input, &success);
19679 masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, input, temp,
19680 &success);
19681 masm.assumeUnreachable(
19682 "Double input should be equal or higher than Lowerbound.");
19683 masm.bind(&success);
19685 // Check the upper bound.
19686 if (r->hasInt32UpperBound()) {
19687 Label success;
19688 masm.loadConstantDouble(r->upper(), temp);
19689 if (r->canBeNaN()) {
19690 masm.branchDouble(Assembler::DoubleUnordered, input, input, &success);
19692 masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, temp, &success);
19693 masm.assumeUnreachable(
19694 "Double input should be lower or equal than Upperbound.");
19695 masm.bind(&success);
19698 // This code does not yet check r->canHaveFractionalPart(). This would require
19699 // new assembler interfaces to make rounding instructions available.
19701 if (!r->canBeNegativeZero()) {
19702 Label success;
19704 // First, test for being equal to 0.0, which also includes -0.0.
19705 masm.loadConstantDouble(0.0, temp);
19706 masm.branchDouble(Assembler::DoubleNotEqualOrUnordered, input, temp,
19707 &success);
19709 // The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0 is
19710 // -Infinity instead of Infinity.
19711 masm.loadConstantDouble(1.0, temp);
19712 masm.divDouble(input, temp);
19713 masm.branchDouble(Assembler::DoubleGreaterThan, temp, input, &success);
19715 masm.assumeUnreachable("Input shouldn't be negative zero.");
19717 masm.bind(&success);
19720 if (!r->hasInt32Bounds() && !r->canBeInfiniteOrNaN() &&
19721 r->exponent() < FloatingPoint<double>::kExponentBias) {
19722 // Check the bounds implied by the maximum exponent.
19723 Label exponentLoOk;
19724 masm.loadConstantDouble(pow(2.0, r->exponent() + 1), temp);
19725 masm.branchDouble(Assembler::DoubleUnordered, input, input, &exponentLoOk);
19726 masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, temp,
19727 &exponentLoOk);
19728 masm.assumeUnreachable("Check for exponent failed.");
19729 masm.bind(&exponentLoOk);
19731 Label exponentHiOk;
19732 masm.loadConstantDouble(-pow(2.0, r->exponent() + 1), temp);
19733 masm.branchDouble(Assembler::DoubleUnordered, input, input, &exponentHiOk);
19734 masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, input, temp,
19735 &exponentHiOk);
19736 masm.assumeUnreachable("Check for exponent failed.");
19737 masm.bind(&exponentHiOk);
19738 } else if (!r->hasInt32Bounds() && !r->canBeNaN()) {
19739 // If we think the value can't be NaN, check that it isn't.
19740 Label notnan;
19741 masm.branchDouble(Assembler::DoubleOrdered, input, input, &notnan);
19742 masm.assumeUnreachable("Input shouldn't be NaN.");
19743 masm.bind(&notnan);
19745 // If we think the value also can't be an infinity, check that it isn't.
19746 if (!r->canBeInfiniteOrNaN()) {
19747 Label notposinf;
19748 masm.loadConstantDouble(PositiveInfinity<double>(), temp);
19749 masm.branchDouble(Assembler::DoubleLessThan, input, temp, &notposinf);
19750 masm.assumeUnreachable("Input shouldn't be +Inf.");
19751 masm.bind(&notposinf);
19753 Label notneginf;
19754 masm.loadConstantDouble(NegativeInfinity<double>(), temp);
19755 masm.branchDouble(Assembler::DoubleGreaterThan, input, temp, &notneginf);
19756 masm.assumeUnreachable("Input shouldn't be -Inf.");
19757 masm.bind(&notneginf);
19762 void CodeGenerator::visitAssertClass(LAssertClass* ins) {
19763 Register obj = ToRegister(ins->input());
19764 Register temp = ToRegister(ins->getTemp(0));
19766 Label success;
19767 if (ins->mir()->getClass() == &FunctionClass) {
19768 // Allow both possible function classes here.
19769 masm.branchTestObjIsFunctionNoSpectreMitigations(Assembler::Equal, obj,
19770 temp, &success);
19771 } else {
19772 masm.branchTestObjClassNoSpectreMitigations(
19773 Assembler::Equal, obj, ins->mir()->getClass(), temp, &success);
19775 masm.assumeUnreachable("Wrong KnownClass during run-time");
19776 masm.bind(&success);
19779 void CodeGenerator::visitAssertShape(LAssertShape* ins) {
19780 Register obj = ToRegister(ins->input());
19782 Label success;
19783 masm.branchTestObjShapeNoSpectreMitigations(Assembler::Equal, obj,
19784 ins->mir()->shape(), &success);
19785 masm.assumeUnreachable("Wrong Shape during run-time");
19786 masm.bind(&success);
19789 void CodeGenerator::visitAssertRangeI(LAssertRangeI* ins) {
19790 Register input = ToRegister(ins->input());
19791 const Range* r = ins->range();
19793 emitAssertRangeI(ins->mir()->input()->type(), r, input);
19796 void CodeGenerator::visitAssertRangeD(LAssertRangeD* ins) {
19797 FloatRegister input = ToFloatRegister(ins->input());
19798 FloatRegister temp = ToFloatRegister(ins->temp());
19799 const Range* r = ins->range();
19801 emitAssertRangeD(r, input, temp);
19804 void CodeGenerator::visitAssertRangeF(LAssertRangeF* ins) {
19805 FloatRegister input = ToFloatRegister(ins->input());
19806 FloatRegister temp = ToFloatRegister(ins->temp());
19807 FloatRegister temp2 = ToFloatRegister(ins->temp2());
19809 const Range* r = ins->range();
19811 masm.convertFloat32ToDouble(input, temp);
19812 emitAssertRangeD(r, temp, temp2);
19815 void CodeGenerator::visitAssertRangeV(LAssertRangeV* ins) {
19816 const Range* r = ins->range();
19817 const ValueOperand value = ToValue(ins, LAssertRangeV::Input);
19818 Label done;
19821 ScratchTagScope tag(masm, value);
19822 masm.splitTagForTest(value, tag);
19825 Label isNotInt32;
19826 masm.branchTestInt32(Assembler::NotEqual, tag, &isNotInt32);
19828 ScratchTagScopeRelease _(&tag);
19829 Register unboxInt32 = ToTempUnboxRegister(ins->temp());
19830 Register input = masm.extractInt32(value, unboxInt32);
19831 emitAssertRangeI(MIRType::Int32, r, input);
19832 masm.jump(&done);
19834 masm.bind(&isNotInt32);
19838 Label isNotDouble;
19839 masm.branchTestDouble(Assembler::NotEqual, tag, &isNotDouble);
19841 ScratchTagScopeRelease _(&tag);
19842 FloatRegister input = ToFloatRegister(ins->floatTemp1());
19843 FloatRegister temp = ToFloatRegister(ins->floatTemp2());
19844 masm.unboxDouble(value, input);
19845 emitAssertRangeD(r, input, temp);
19846 masm.jump(&done);
19848 masm.bind(&isNotDouble);
19852 masm.assumeUnreachable("Incorrect range for Value.");
19853 masm.bind(&done);
19856 void CodeGenerator::visitInterruptCheck(LInterruptCheck* lir) {
19857 using Fn = bool (*)(JSContext*);
19858 OutOfLineCode* ool =
19859 oolCallVM<Fn, InterruptCheck>(lir, ArgList(), StoreNothing());
19861 const void* interruptAddr = gen->runtime->addressOfInterruptBits();
19862 masm.branch32(Assembler::NotEqual, AbsoluteAddress(interruptAddr), Imm32(0),
19863 ool->entry());
19864 masm.bind(ool->rejoin());
19867 void CodeGenerator::visitOutOfLineResumableWasmTrap(
19868 OutOfLineResumableWasmTrap* ool) {
19869 LInstruction* lir = ool->lir();
19870 masm.wasmTrap(ool->trap(), ool->bytecodeOffset());
19872 markSafepointAt(masm.currentOffset(), lir);
19874 // Note that masm.framePushed() doesn't include the register dump area.
19875 // That will be taken into account when the StackMap is created from the
19876 // LSafepoint.
19877 lir->safepoint()->setFramePushedAtStackMapBase(ool->framePushed());
19878 lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::Trap);
19880 masm.jump(ool->rejoin());
19883 void CodeGenerator::visitOutOfLineAbortingWasmTrap(
19884 OutOfLineAbortingWasmTrap* ool) {
19885 masm.wasmTrap(ool->trap(), ool->bytecodeOffset());
19888 void CodeGenerator::visitWasmInterruptCheck(LWasmInterruptCheck* lir) {
19889 MOZ_ASSERT(gen->compilingWasm());
19891 OutOfLineResumableWasmTrap* ool = new (alloc()) OutOfLineResumableWasmTrap(
19892 lir, masm.framePushed(), lir->mir()->bytecodeOffset(),
19893 wasm::Trap::CheckInterrupt);
19894 addOutOfLineCode(ool, lir->mir());
19895 masm.branch32(
19896 Assembler::NotEqual,
19897 Address(ToRegister(lir->instance()), wasm::Instance::offsetOfInterrupt()),
19898 Imm32(0), ool->entry());
19899 masm.bind(ool->rejoin());
19902 void CodeGenerator::visitWasmTrap(LWasmTrap* lir) {
19903 MOZ_ASSERT(gen->compilingWasm());
19904 const MWasmTrap* mir = lir->mir();
19906 masm.wasmTrap(mir->trap(), mir->bytecodeOffset());
19909 void CodeGenerator::visitWasmTrapIfNull(LWasmTrapIfNull* lir) {
19910 MOZ_ASSERT(gen->compilingWasm());
19911 const MWasmTrapIfNull* mir = lir->mir();
19912 Label nonNull;
19913 Register ref = ToRegister(lir->ref());
19915 masm.branchWasmAnyRefIsNull(false, ref, &nonNull);
19916 masm.wasmTrap(mir->trap(), mir->bytecodeOffset());
19917 masm.bind(&nonNull);
19920 void CodeGenerator::visitWasmRefIsSubtypeOfAbstract(
19921 LWasmRefIsSubtypeOfAbstract* ins) {
19922 MOZ_ASSERT(gen->compilingWasm());
19924 const MWasmRefIsSubtypeOfAbstract* mir = ins->mir();
19925 MOZ_ASSERT(!mir->destType().isTypeRef());
19927 Register ref = ToRegister(ins->ref());
19928 Register superSTV = Register::Invalid();
19929 Register scratch1 = ToTempRegisterOrInvalid(ins->temp0());
19930 Register scratch2 = Register::Invalid();
19931 Register result = ToRegister(ins->output());
19932 Label onSuccess;
19933 Label onFail;
19934 Label join;
19935 masm.branchWasmRefIsSubtype(ref, mir->sourceType(), mir->destType(),
19936 &onSuccess, /*onSuccess=*/true, superSTV,
19937 scratch1, scratch2);
19938 masm.bind(&onFail);
19939 masm.xor32(result, result);
19940 masm.jump(&join);
19941 masm.bind(&onSuccess);
19942 masm.move32(Imm32(1), result);
19943 masm.bind(&join);
19946 void CodeGenerator::visitWasmRefIsSubtypeOfConcrete(
19947 LWasmRefIsSubtypeOfConcrete* ins) {
19948 MOZ_ASSERT(gen->compilingWasm());
19950 const MWasmRefIsSubtypeOfConcrete* mir = ins->mir();
19951 MOZ_ASSERT(mir->destType().isTypeRef());
19953 Register ref = ToRegister(ins->ref());
19954 Register superSTV = ToRegister(ins->superSTV());
19955 Register scratch1 = ToRegister(ins->temp0());
19956 Register scratch2 = ToTempRegisterOrInvalid(ins->temp1());
19957 Register result = ToRegister(ins->output());
19958 Label onSuccess;
19959 Label join;
19960 masm.branchWasmRefIsSubtype(ref, mir->sourceType(), mir->destType(),
19961 &onSuccess, /*onSuccess=*/true, superSTV,
19962 scratch1, scratch2);
19963 masm.move32(Imm32(0), result);
19964 masm.jump(&join);
19965 masm.bind(&onSuccess);
19966 masm.move32(Imm32(1), result);
19967 masm.bind(&join);
19970 void CodeGenerator::visitWasmRefIsSubtypeOfAbstractAndBranch(
19971 LWasmRefIsSubtypeOfAbstractAndBranch* ins) {
19972 MOZ_ASSERT(gen->compilingWasm());
19973 Register ref = ToRegister(ins->ref());
19974 Register scratch1 = ToTempRegisterOrInvalid(ins->temp0());
19975 Label* onSuccess = getJumpLabelForBranch(ins->ifTrue());
19976 Label* onFail = getJumpLabelForBranch(ins->ifFalse());
19977 masm.branchWasmRefIsSubtype(
19978 ref, ins->sourceType(), ins->destType(), onSuccess, /*onSuccess=*/true,
19979 Register::Invalid(), scratch1, Register::Invalid());
19980 masm.jump(onFail);
19983 void CodeGenerator::visitWasmRefIsSubtypeOfConcreteAndBranch(
19984 LWasmRefIsSubtypeOfConcreteAndBranch* ins) {
19985 MOZ_ASSERT(gen->compilingWasm());
19986 Register ref = ToRegister(ins->ref());
19987 Register superSTV = ToRegister(ins->superSTV());
19988 Register scratch1 = ToRegister(ins->temp0());
19989 Register scratch2 = ToTempRegisterOrInvalid(ins->temp1());
19990 Label* onSuccess = getJumpLabelForBranch(ins->ifTrue());
19991 Label* onFail = getJumpLabelForBranch(ins->ifFalse());
19992 masm.branchWasmRefIsSubtype(ref, ins->sourceType(), ins->destType(),
19993 onSuccess, /*onSuccess=*/true, superSTV, scratch1,
19994 scratch2);
19995 masm.jump(onFail);
19998 void CodeGenerator::callWasmStructAllocFun(LInstruction* lir,
19999 wasm::SymbolicAddress fun,
20000 Register typeDefData,
20001 Register output) {
20002 masm.Push(InstanceReg);
20003 int32_t framePushedAfterInstance = masm.framePushed();
20004 saveLive(lir);
20006 masm.setupWasmABICall();
20007 masm.passABIArg(InstanceReg);
20008 masm.passABIArg(typeDefData);
20009 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
20010 CodeOffset offset =
20011 masm.callWithABI(wasm::BytecodeOffset(0), fun,
20012 mozilla::Some(instanceOffset), ABIType::General);
20013 masm.storeCallPointerResult(output);
20015 markSafepointAt(offset.offset(), lir);
20016 lir->safepoint()->setFramePushedAtStackMapBase(framePushedAfterInstance);
20017 lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::CodegenCall);
20019 restoreLive(lir);
20020 masm.Pop(InstanceReg);
20021 #if JS_CODEGEN_ARM64
20022 masm.syncStackPtr();
20023 #endif
20026 // Out-of-line path to allocate wasm GC structs
20027 class OutOfLineWasmNewStruct : public OutOfLineCodeBase<CodeGenerator> {
20028 LInstruction* lir_;
20029 wasm::SymbolicAddress fun_;
20030 Register typeDefData_;
20031 Register output_;
20033 public:
20034 OutOfLineWasmNewStruct(LInstruction* lir, wasm::SymbolicAddress fun,
20035 Register typeDefData, Register output)
20036 : lir_(lir), fun_(fun), typeDefData_(typeDefData), output_(output) {}
20038 void accept(CodeGenerator* codegen) override {
20039 codegen->visitOutOfLineWasmNewStruct(this);
20042 LInstruction* lir() const { return lir_; }
20043 wasm::SymbolicAddress fun() const { return fun_; }
20044 Register typeDefData() const { return typeDefData_; }
20045 Register output() const { return output_; }
20048 void CodeGenerator::visitOutOfLineWasmNewStruct(OutOfLineWasmNewStruct* ool) {
20049 callWasmStructAllocFun(ool->lir(), ool->fun(), ool->typeDefData(),
20050 ool->output());
20051 masm.jump(ool->rejoin());
20054 void CodeGenerator::visitWasmNewStructObject(LWasmNewStructObject* lir) {
20055 MOZ_ASSERT(gen->compilingWasm());
20057 MWasmNewStructObject* mir = lir->mir();
20059 Register typeDefData = ToRegister(lir->typeDefData());
20060 Register output = ToRegister(lir->output());
20062 if (mir->isOutline()) {
20063 wasm::SymbolicAddress fun = mir->zeroFields()
20064 ? wasm::SymbolicAddress::StructNewOOL_true
20065 : wasm::SymbolicAddress::StructNewOOL_false;
20066 callWasmStructAllocFun(lir, fun, typeDefData, output);
20067 } else {
20068 wasm::SymbolicAddress fun = mir->zeroFields()
20069 ? wasm::SymbolicAddress::StructNewIL_true
20070 : wasm::SymbolicAddress::StructNewIL_false;
20072 Register instance = ToRegister(lir->instance());
20073 MOZ_ASSERT(instance == InstanceReg);
20075 auto ool =
20076 new (alloc()) OutOfLineWasmNewStruct(lir, fun, typeDefData, output);
20077 addOutOfLineCode(ool, lir->mir());
20079 Register temp1 = ToRegister(lir->temp0());
20080 Register temp2 = ToRegister(lir->temp1());
20081 masm.wasmNewStructObject(instance, output, typeDefData, temp1, temp2,
20082 ool->entry(), mir->allocKind(), mir->zeroFields());
20084 masm.bind(ool->rejoin());
20088 void CodeGenerator::callWasmArrayAllocFun(LInstruction* lir,
20089 wasm::SymbolicAddress fun,
20090 Register numElements,
20091 Register typeDefData, Register output,
20092 wasm::BytecodeOffset bytecodeOffset) {
20093 masm.Push(InstanceReg);
20094 int32_t framePushedAfterInstance = masm.framePushed();
20095 saveLive(lir);
20097 masm.setupWasmABICall();
20098 masm.passABIArg(InstanceReg);
20099 masm.passABIArg(numElements);
20100 masm.passABIArg(typeDefData);
20101 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
20102 CodeOffset offset = masm.callWithABI(
20103 bytecodeOffset, fun, mozilla::Some(instanceOffset), ABIType::General);
20104 masm.storeCallPointerResult(output);
20106 markSafepointAt(offset.offset(), lir);
20107 lir->safepoint()->setFramePushedAtStackMapBase(framePushedAfterInstance);
20108 lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::CodegenCall);
20110 restoreLive(lir);
20111 masm.Pop(InstanceReg);
20112 #if JS_CODEGEN_ARM64
20113 masm.syncStackPtr();
20114 #endif
20116 Label ok;
20117 masm.branchPtr(Assembler::NonZero, output, ImmWord(0), &ok);
20118 masm.wasmTrap(wasm::Trap::ThrowReported, bytecodeOffset);
20119 masm.bind(&ok);
20122 // Out-of-line path to allocate wasm GC arrays
20123 class OutOfLineWasmNewArray : public OutOfLineCodeBase<CodeGenerator> {
20124 LInstruction* lir_;
20125 wasm::SymbolicAddress fun_;
20126 Register numElementsReg_;
20127 mozilla::Maybe<uint32_t> numElements_;
20128 Register typeDefData_;
20129 Register output_;
20130 wasm::BytecodeOffset bytecodeOffset_;
20132 public:
20133 OutOfLineWasmNewArray(LInstruction* lir, wasm::SymbolicAddress fun,
20134 Register numElementsReg,
20135 mozilla::Maybe<uint32_t> numElements,
20136 Register typeDefData, Register output,
20137 wasm::BytecodeOffset bytecodeOffset)
20138 : lir_(lir),
20139 fun_(fun),
20140 numElementsReg_(numElementsReg),
20141 numElements_(numElements),
20142 typeDefData_(typeDefData),
20143 output_(output),
20144 bytecodeOffset_(bytecodeOffset) {}
20146 void accept(CodeGenerator* codegen) override {
20147 codegen->visitOutOfLineWasmNewArray(this);
20150 LInstruction* lir() const { return lir_; }
20151 wasm::SymbolicAddress fun() const { return fun_; }
20152 Register numElementsReg() const { return numElementsReg_; }
20153 mozilla::Maybe<uint32_t> numElements() const { return numElements_; }
20154 Register typeDefData() const { return typeDefData_; }
20155 Register output() const { return output_; }
20156 wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
20159 void CodeGenerator::visitOutOfLineWasmNewArray(OutOfLineWasmNewArray* ool) {
20160 if (ool->numElements().isSome()) {
20161 masm.move32(Imm32(ool->numElements().value()), ool->numElementsReg());
20163 callWasmArrayAllocFun(ool->lir(), ool->fun(), ool->numElementsReg(),
20164 ool->typeDefData(), ool->output(),
20165 ool->bytecodeOffset());
20166 masm.jump(ool->rejoin());
20169 void CodeGenerator::visitWasmNewArrayObject(LWasmNewArrayObject* lir) {
20170 MOZ_ASSERT(gen->compilingWasm());
20172 MWasmNewArrayObject* mir = lir->mir();
20174 Register typeDefData = ToRegister(lir->typeDefData());
20175 Register output = ToRegister(lir->output());
20176 Register temp1 = ToRegister(lir->temp0());
20177 Register temp2 = ToRegister(lir->temp1());
20179 wasm::SymbolicAddress fun = mir->zeroFields()
20180 ? wasm::SymbolicAddress::ArrayNew_true
20181 : wasm::SymbolicAddress::ArrayNew_false;
20183 if (lir->numElements()->isConstant()) {
20184 // numElements is constant, so we can do optimized code generation.
20185 uint32_t numElements = lir->numElements()->toConstant()->toInt32();
20186 CheckedUint32 storageBytes =
20187 WasmArrayObject::calcStorageBytesChecked(mir->elemSize(), numElements);
20188 if (!storageBytes.isValid() ||
20189 storageBytes.value() > WasmArrayObject_MaxInlineBytes) {
20190 // Too much array data to store inline. Immediately perform an instance
20191 // call to handle the out-of-line storage (or the trap).
20192 masm.move32(Imm32(numElements), temp1);
20193 callWasmArrayAllocFun(lir, fun, temp1, typeDefData, output,
20194 mir->bytecodeOffset());
20195 } else {
20196 // storageBytes is small enough to be stored inline in WasmArrayObject.
20197 // Attempt a nursery allocation and fall back to an instance call if it
20198 // fails.
20199 Register instance = ToRegister(lir->instance());
20200 MOZ_ASSERT(instance == InstanceReg);
20202 auto ool = new (alloc())
20203 OutOfLineWasmNewArray(lir, fun, temp1, mozilla::Some(numElements),
20204 typeDefData, output, mir->bytecodeOffset());
20205 addOutOfLineCode(ool, lir->mir());
20207 masm.wasmNewArrayObjectFixed(instance, output, typeDefData, temp1, temp2,
20208 ool->entry(), numElements,
20209 storageBytes.value(), mir->zeroFields());
20211 masm.bind(ool->rejoin());
20213 } else {
20214 // numElements is dynamic. Attempt a dynamic inline-storage nursery
20215 // allocation and fall back to an instance call if it fails.
20216 Register instance = ToRegister(lir->instance());
20217 MOZ_ASSERT(instance == InstanceReg);
20218 Register numElements = ToRegister(lir->numElements());
20220 auto ool = new (alloc())
20221 OutOfLineWasmNewArray(lir, fun, numElements, mozilla::Nothing(),
20222 typeDefData, output, mir->bytecodeOffset());
20223 addOutOfLineCode(ool, lir->mir());
20225 masm.wasmNewArrayObject(instance, output, numElements, typeDefData, temp1,
20226 ool->entry(), mir->elemSize(), mir->zeroFields());
20228 masm.bind(ool->rejoin());
20232 void CodeGenerator::visitWasmHeapReg(LWasmHeapReg* ins) {
20233 #ifdef WASM_HAS_HEAPREG
20234 masm.movePtr(HeapReg, ToRegister(ins->output()));
20235 #else
20236 MOZ_CRASH();
20237 #endif
20240 void CodeGenerator::visitWasmBoundsCheck(LWasmBoundsCheck* ins) {
20241 const MWasmBoundsCheck* mir = ins->mir();
20242 Register ptr = ToRegister(ins->ptr());
20243 Register boundsCheckLimit = ToRegister(ins->boundsCheckLimit());
20244 // When there are no spectre mitigations in place, branching out-of-line to
20245 // the trap is a big performance win, but with mitigations it's trickier. See
20246 // bug 1680243.
20247 if (JitOptions.spectreIndexMasking) {
20248 Label ok;
20249 masm.wasmBoundsCheck32(Assembler::Below, ptr, boundsCheckLimit, &ok);
20250 masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
20251 masm.bind(&ok);
20252 } else {
20253 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
20254 mir->bytecodeOffset(), wasm::Trap::OutOfBounds);
20255 addOutOfLineCode(ool, mir);
20256 masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptr, boundsCheckLimit,
20257 ool->entry());
20261 void CodeGenerator::visitWasmBoundsCheck64(LWasmBoundsCheck64* ins) {
20262 const MWasmBoundsCheck* mir = ins->mir();
20263 Register64 ptr = ToRegister64(ins->ptr());
20264 Register64 boundsCheckLimit = ToRegister64(ins->boundsCheckLimit());
20265 // See above.
20266 if (JitOptions.spectreIndexMasking) {
20267 Label ok;
20268 masm.wasmBoundsCheck64(Assembler::Below, ptr, boundsCheckLimit, &ok);
20269 masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
20270 masm.bind(&ok);
20271 } else {
20272 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
20273 mir->bytecodeOffset(), wasm::Trap::OutOfBounds);
20274 addOutOfLineCode(ool, mir);
20275 masm.wasmBoundsCheck64(Assembler::AboveOrEqual, ptr, boundsCheckLimit,
20276 ool->entry());
20280 void CodeGenerator::visitWasmBoundsCheckRange32(LWasmBoundsCheckRange32* ins) {
20281 const MWasmBoundsCheckRange32* mir = ins->mir();
20282 Register index = ToRegister(ins->index());
20283 Register length = ToRegister(ins->length());
20284 Register limit = ToRegister(ins->limit());
20285 Register tmp = ToRegister(ins->temp0());
20287 masm.wasmBoundsCheckRange32(index, length, limit, tmp, mir->bytecodeOffset());
20290 void CodeGenerator::visitWasmAlignmentCheck(LWasmAlignmentCheck* ins) {
20291 const MWasmAlignmentCheck* mir = ins->mir();
20292 Register ptr = ToRegister(ins->ptr());
20293 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
20294 mir->bytecodeOffset(), wasm::Trap::UnalignedAccess);
20295 addOutOfLineCode(ool, mir);
20296 masm.branchTest32(Assembler::NonZero, ptr, Imm32(mir->byteSize() - 1),
20297 ool->entry());
20300 void CodeGenerator::visitWasmAlignmentCheck64(LWasmAlignmentCheck64* ins) {
20301 const MWasmAlignmentCheck* mir = ins->mir();
20302 Register64 ptr = ToRegister64(ins->ptr());
20303 #ifdef JS_64BIT
20304 Register r = ptr.reg;
20305 #else
20306 Register r = ptr.low;
20307 #endif
20308 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
20309 mir->bytecodeOffset(), wasm::Trap::UnalignedAccess);
20310 addOutOfLineCode(ool, mir);
20311 masm.branchTestPtr(Assembler::NonZero, r, Imm32(mir->byteSize() - 1),
20312 ool->entry());
20315 void CodeGenerator::visitWasmLoadInstance(LWasmLoadInstance* ins) {
20316 switch (ins->mir()->type()) {
20317 case MIRType::WasmAnyRef:
20318 case MIRType::Pointer:
20319 masm.loadPtr(Address(ToRegister(ins->instance()), ins->mir()->offset()),
20320 ToRegister(ins->output()));
20321 break;
20322 case MIRType::Int32:
20323 masm.load32(Address(ToRegister(ins->instance()), ins->mir()->offset()),
20324 ToRegister(ins->output()));
20325 break;
20326 default:
20327 MOZ_CRASH("MIRType not supported in WasmLoadInstance");
20331 void CodeGenerator::visitWasmLoadInstance64(LWasmLoadInstance64* ins) {
20332 MOZ_ASSERT(ins->mir()->type() == MIRType::Int64);
20333 masm.load64(Address(ToRegister(ins->instance()), ins->mir()->offset()),
20334 ToOutRegister64(ins));
20337 void CodeGenerator::incrementWarmUpCounter(AbsoluteAddress warmUpCount,
20338 JSScript* script, Register tmp) {
20339 // The code depends on the JitScript* not being discarded without also
20340 // invalidating Ion code. Assert this.
20341 #ifdef DEBUG
20342 Label ok;
20343 masm.movePtr(ImmGCPtr(script), tmp);
20344 masm.loadJitScript(tmp, tmp);
20345 masm.branchPtr(Assembler::Equal, tmp, ImmPtr(script->jitScript()), &ok);
20346 masm.assumeUnreachable("Didn't find JitScript?");
20347 masm.bind(&ok);
20348 #endif
20350 masm.load32(warmUpCount, tmp);
20351 masm.add32(Imm32(1), tmp);
20352 masm.store32(tmp, warmUpCount);
20355 void CodeGenerator::visitIncrementWarmUpCounter(LIncrementWarmUpCounter* ins) {
20356 Register tmp = ToRegister(ins->temp0());
20358 AbsoluteAddress warmUpCount =
20359 AbsoluteAddress(ins->mir()->script()->jitScript())
20360 .offset(JitScript::offsetOfWarmUpCount());
20361 incrementWarmUpCounter(warmUpCount, ins->mir()->script(), tmp);
20364 void CodeGenerator::visitLexicalCheck(LLexicalCheck* ins) {
20365 ValueOperand inputValue = ToValue(ins, LLexicalCheck::InputIndex);
20366 Label bail;
20367 masm.branchTestMagicValue(Assembler::Equal, inputValue,
20368 JS_UNINITIALIZED_LEXICAL, &bail);
20369 bailoutFrom(&bail, ins->snapshot());
20372 void CodeGenerator::visitThrowRuntimeLexicalError(
20373 LThrowRuntimeLexicalError* ins) {
20374 pushArg(Imm32(ins->mir()->errorNumber()));
20376 using Fn = bool (*)(JSContext*, unsigned);
20377 callVM<Fn, jit::ThrowRuntimeLexicalError>(ins);
20380 void CodeGenerator::visitThrowMsg(LThrowMsg* ins) {
20381 pushArg(Imm32(static_cast<int32_t>(ins->mir()->throwMsgKind())));
20383 using Fn = bool (*)(JSContext*, unsigned);
20384 callVM<Fn, js::ThrowMsgOperation>(ins);
20387 void CodeGenerator::visitGlobalDeclInstantiation(
20388 LGlobalDeclInstantiation* ins) {
20389 pushArg(ImmPtr(ins->mir()->resumePoint()->pc()));
20390 pushArg(ImmGCPtr(ins->mir()->block()->info().script()));
20392 using Fn = bool (*)(JSContext*, HandleScript, const jsbytecode*);
20393 callVM<Fn, GlobalDeclInstantiationFromIon>(ins);
20396 void CodeGenerator::visitDebugger(LDebugger* ins) {
20397 Register cx = ToRegister(ins->temp0());
20399 masm.loadJSContext(cx);
20400 using Fn = bool (*)(JSContext* cx);
20401 masm.setupAlignedABICall();
20402 masm.passABIArg(cx);
20403 masm.callWithABI<Fn, GlobalHasLiveOnDebuggerStatement>();
20405 Label bail;
20406 masm.branchIfTrueBool(ReturnReg, &bail);
20407 bailoutFrom(&bail, ins->snapshot());
20410 void CodeGenerator::visitNewTarget(LNewTarget* ins) {
20411 ValueOperand output = ToOutValue(ins);
20413 // if (isConstructing) output = argv[Max(numActualArgs, numFormalArgs)]
20414 Label notConstructing, done;
20415 Address calleeToken(FramePointer, JitFrameLayout::offsetOfCalleeToken());
20416 masm.branchTestPtr(Assembler::Zero, calleeToken,
20417 Imm32(CalleeToken_FunctionConstructing), &notConstructing);
20419 Register argvLen = output.scratchReg();
20420 masm.loadNumActualArgs(FramePointer, argvLen);
20422 Label useNFormals;
20424 size_t numFormalArgs = ins->mirRaw()->block()->info().nargs();
20425 masm.branchPtr(Assembler::Below, argvLen, Imm32(numFormalArgs), &useNFormals);
20427 size_t argsOffset = JitFrameLayout::offsetOfActualArgs();
20429 BaseValueIndex newTarget(FramePointer, argvLen, argsOffset);
20430 masm.loadValue(newTarget, output);
20431 masm.jump(&done);
20434 masm.bind(&useNFormals);
20437 Address newTarget(FramePointer,
20438 argsOffset + (numFormalArgs * sizeof(Value)));
20439 masm.loadValue(newTarget, output);
20440 masm.jump(&done);
20443 // else output = undefined
20444 masm.bind(&notConstructing);
20445 masm.moveValue(UndefinedValue(), output);
20446 masm.bind(&done);
20449 void CodeGenerator::visitCheckReturn(LCheckReturn* ins) {
20450 ValueOperand returnValue = ToValue(ins, LCheckReturn::ReturnValueIndex);
20451 ValueOperand thisValue = ToValue(ins, LCheckReturn::ThisValueIndex);
20452 ValueOperand output = ToOutValue(ins);
20454 using Fn = bool (*)(JSContext*, HandleValue);
20455 OutOfLineCode* ool = oolCallVM<Fn, ThrowBadDerivedReturnOrUninitializedThis>(
20456 ins, ArgList(returnValue), StoreNothing());
20458 Label noChecks;
20459 masm.branchTestObject(Assembler::Equal, returnValue, &noChecks);
20460 masm.branchTestUndefined(Assembler::NotEqual, returnValue, ool->entry());
20461 masm.branchTestMagic(Assembler::Equal, thisValue, ool->entry());
20462 masm.moveValue(thisValue, output);
20463 masm.jump(ool->rejoin());
20464 masm.bind(&noChecks);
20465 masm.moveValue(returnValue, output);
20466 masm.bind(ool->rejoin());
20469 void CodeGenerator::visitCheckIsObj(LCheckIsObj* ins) {
20470 ValueOperand value = ToValue(ins, LCheckIsObj::ValueIndex);
20471 Register output = ToRegister(ins->output());
20473 using Fn = bool (*)(JSContext*, CheckIsObjectKind);
20474 OutOfLineCode* ool = oolCallVM<Fn, ThrowCheckIsObject>(
20475 ins, ArgList(Imm32(ins->mir()->checkKind())), StoreNothing());
20477 masm.fallibleUnboxObject(value, output, ool->entry());
20478 masm.bind(ool->rejoin());
20481 void CodeGenerator::visitCheckObjCoercible(LCheckObjCoercible* ins) {
20482 ValueOperand checkValue = ToValue(ins, LCheckObjCoercible::ValueIndex);
20484 using Fn = bool (*)(JSContext*, HandleValue);
20485 OutOfLineCode* ool = oolCallVM<Fn, ThrowObjectCoercible>(
20486 ins, ArgList(checkValue), StoreNothing());
20487 masm.branchTestNull(Assembler::Equal, checkValue, ool->entry());
20488 masm.branchTestUndefined(Assembler::Equal, checkValue, ool->entry());
20489 masm.bind(ool->rejoin());
20492 void CodeGenerator::visitCheckClassHeritage(LCheckClassHeritage* ins) {
20493 ValueOperand heritage = ToValue(ins, LCheckClassHeritage::HeritageIndex);
20494 Register temp0 = ToRegister(ins->temp0());
20495 Register temp1 = ToRegister(ins->temp1());
20497 using Fn = bool (*)(JSContext*, HandleValue);
20498 OutOfLineCode* ool = oolCallVM<Fn, CheckClassHeritageOperation>(
20499 ins, ArgList(heritage), StoreNothing());
20501 masm.branchTestNull(Assembler::Equal, heritage, ool->rejoin());
20502 masm.fallibleUnboxObject(heritage, temp0, ool->entry());
20504 masm.isConstructor(temp0, temp1, ool->entry());
20505 masm.branchTest32(Assembler::Zero, temp1, temp1, ool->entry());
20507 masm.bind(ool->rejoin());
20510 void CodeGenerator::visitCheckThis(LCheckThis* ins) {
20511 ValueOperand thisValue = ToValue(ins, LCheckThis::ValueIndex);
20513 using Fn = bool (*)(JSContext*);
20514 OutOfLineCode* ool =
20515 oolCallVM<Fn, ThrowUninitializedThis>(ins, ArgList(), StoreNothing());
20516 masm.branchTestMagic(Assembler::Equal, thisValue, ool->entry());
20517 masm.bind(ool->rejoin());
20520 void CodeGenerator::visitCheckThisReinit(LCheckThisReinit* ins) {
20521 ValueOperand thisValue = ToValue(ins, LCheckThisReinit::ThisValueIndex);
20523 using Fn = bool (*)(JSContext*);
20524 OutOfLineCode* ool =
20525 oolCallVM<Fn, ThrowInitializedThis>(ins, ArgList(), StoreNothing());
20526 masm.branchTestMagic(Assembler::NotEqual, thisValue, ool->entry());
20527 masm.bind(ool->rejoin());
20530 void CodeGenerator::visitGenerator(LGenerator* lir) {
20531 Register callee = ToRegister(lir->callee());
20532 Register environmentChain = ToRegister(lir->environmentChain());
20533 Register argsObject = ToRegister(lir->argsObject());
20535 pushArg(argsObject);
20536 pushArg(environmentChain);
20537 pushArg(ImmGCPtr(current->mir()->info().script()));
20538 pushArg(callee);
20540 using Fn = JSObject* (*)(JSContext* cx, HandleFunction, HandleScript,
20541 HandleObject, HandleObject);
20542 callVM<Fn, CreateGenerator>(lir);
20545 void CodeGenerator::visitAsyncResolve(LAsyncResolve* lir) {
20546 Register generator = ToRegister(lir->generator());
20547 ValueOperand value = ToValue(lir, LAsyncResolve::ValueIndex);
20549 pushArg(value);
20550 pushArg(generator);
20552 using Fn = JSObject* (*)(JSContext*, Handle<AsyncFunctionGeneratorObject*>,
20553 HandleValue);
20554 callVM<Fn, js::AsyncFunctionResolve>(lir);
20557 void CodeGenerator::visitAsyncReject(LAsyncReject* lir) {
20558 Register generator = ToRegister(lir->generator());
20559 ValueOperand reason = ToValue(lir, LAsyncReject::ReasonIndex);
20560 ValueOperand stack = ToValue(lir, LAsyncReject::StackIndex);
20562 pushArg(stack);
20563 pushArg(reason);
20564 pushArg(generator);
20566 using Fn = JSObject* (*)(JSContext*, Handle<AsyncFunctionGeneratorObject*>,
20567 HandleValue, HandleValue);
20568 callVM<Fn, js::AsyncFunctionReject>(lir);
20571 void CodeGenerator::visitAsyncAwait(LAsyncAwait* lir) {
20572 ValueOperand value = ToValue(lir, LAsyncAwait::ValueIndex);
20573 Register generator = ToRegister(lir->generator());
20575 pushArg(value);
20576 pushArg(generator);
20578 using Fn =
20579 JSObject* (*)(JSContext* cx, Handle<AsyncFunctionGeneratorObject*> genObj,
20580 HandleValue value);
20581 callVM<Fn, js::AsyncFunctionAwait>(lir);
20584 void CodeGenerator::visitCanSkipAwait(LCanSkipAwait* lir) {
20585 ValueOperand value = ToValue(lir, LCanSkipAwait::ValueIndex);
20587 pushArg(value);
20589 using Fn = bool (*)(JSContext*, HandleValue, bool* canSkip);
20590 callVM<Fn, js::CanSkipAwait>(lir);
20593 void CodeGenerator::visitMaybeExtractAwaitValue(LMaybeExtractAwaitValue* lir) {
20594 ValueOperand value = ToValue(lir, LMaybeExtractAwaitValue::ValueIndex);
20595 ValueOperand output = ToOutValue(lir);
20596 Register canSkip = ToRegister(lir->canSkip());
20598 Label cantExtract, finished;
20599 masm.branchIfFalseBool(canSkip, &cantExtract);
20601 pushArg(value);
20603 using Fn = bool (*)(JSContext*, HandleValue, MutableHandleValue);
20604 callVM<Fn, js::ExtractAwaitValue>(lir);
20605 masm.jump(&finished);
20606 masm.bind(&cantExtract);
20608 masm.moveValue(value, output);
20610 masm.bind(&finished);
20613 void CodeGenerator::visitDebugCheckSelfHosted(LDebugCheckSelfHosted* ins) {
20614 ValueOperand checkValue = ToValue(ins, LDebugCheckSelfHosted::ValueIndex);
20615 pushArg(checkValue);
20616 using Fn = bool (*)(JSContext*, HandleValue);
20617 callVM<Fn, js::Debug_CheckSelfHosted>(ins);
20620 void CodeGenerator::visitRandom(LRandom* ins) {
20621 using mozilla::non_crypto::XorShift128PlusRNG;
20623 FloatRegister output = ToFloatRegister(ins->output());
20624 Register rngReg = ToRegister(ins->temp0());
20626 Register64 temp1 = ToRegister64(ins->temp1());
20627 Register64 temp2 = ToRegister64(ins->temp2());
20629 const XorShift128PlusRNG* rng = gen->realm->addressOfRandomNumberGenerator();
20630 masm.movePtr(ImmPtr(rng), rngReg);
20632 masm.randomDouble(rngReg, output, temp1, temp2);
20633 if (js::SupportDifferentialTesting()) {
20634 masm.loadConstantDouble(0.0, output);
20638 void CodeGenerator::visitSignExtendInt32(LSignExtendInt32* ins) {
20639 Register input = ToRegister(ins->input());
20640 Register output = ToRegister(ins->output());
20642 switch (ins->mode()) {
20643 case MSignExtendInt32::Byte:
20644 masm.move8SignExtend(input, output);
20645 break;
20646 case MSignExtendInt32::Half:
20647 masm.move16SignExtend(input, output);
20648 break;
20652 void CodeGenerator::visitRotate(LRotate* ins) {
20653 MRotate* mir = ins->mir();
20654 Register input = ToRegister(ins->input());
20655 Register dest = ToRegister(ins->output());
20657 const LAllocation* count = ins->count();
20658 if (count->isConstant()) {
20659 int32_t c = ToInt32(count) & 0x1F;
20660 if (mir->isLeftRotate()) {
20661 masm.rotateLeft(Imm32(c), input, dest);
20662 } else {
20663 masm.rotateRight(Imm32(c), input, dest);
20665 } else {
20666 Register creg = ToRegister(count);
20667 if (mir->isLeftRotate()) {
20668 masm.rotateLeft(creg, input, dest);
20669 } else {
20670 masm.rotateRight(creg, input, dest);
20675 class OutOfLineNaNToZero : public OutOfLineCodeBase<CodeGenerator> {
20676 LNaNToZero* lir_;
20678 public:
20679 explicit OutOfLineNaNToZero(LNaNToZero* lir) : lir_(lir) {}
20681 void accept(CodeGenerator* codegen) override {
20682 codegen->visitOutOfLineNaNToZero(this);
20684 LNaNToZero* lir() const { return lir_; }
20687 void CodeGenerator::visitOutOfLineNaNToZero(OutOfLineNaNToZero* ool) {
20688 FloatRegister output = ToFloatRegister(ool->lir()->output());
20689 masm.loadConstantDouble(0.0, output);
20690 masm.jump(ool->rejoin());
20693 void CodeGenerator::visitNaNToZero(LNaNToZero* lir) {
20694 FloatRegister input = ToFloatRegister(lir->input());
20696 OutOfLineNaNToZero* ool = new (alloc()) OutOfLineNaNToZero(lir);
20697 addOutOfLineCode(ool, lir->mir());
20699 if (lir->mir()->operandIsNeverNegativeZero()) {
20700 masm.branchDouble(Assembler::DoubleUnordered, input, input, ool->entry());
20701 } else {
20702 FloatRegister scratch = ToFloatRegister(lir->temp0());
20703 masm.loadConstantDouble(0.0, scratch);
20704 masm.branchDouble(Assembler::DoubleEqualOrUnordered, input, scratch,
20705 ool->entry());
20707 masm.bind(ool->rejoin());
20710 void CodeGenerator::visitIsPackedArray(LIsPackedArray* lir) {
20711 Register obj = ToRegister(lir->object());
20712 Register output = ToRegister(lir->output());
20713 Register temp = ToRegister(lir->temp0());
20715 masm.setIsPackedArray(obj, output, temp);
20718 void CodeGenerator::visitGuardArrayIsPacked(LGuardArrayIsPacked* lir) {
20719 Register array = ToRegister(lir->array());
20720 Register temp0 = ToRegister(lir->temp0());
20721 Register temp1 = ToRegister(lir->temp1());
20723 Label bail;
20724 masm.branchArrayIsNotPacked(array, temp0, temp1, &bail);
20725 bailoutFrom(&bail, lir->snapshot());
20728 void CodeGenerator::visitGetPrototypeOf(LGetPrototypeOf* lir) {
20729 Register target = ToRegister(lir->target());
20730 ValueOperand out = ToOutValue(lir);
20731 Register scratch = out.scratchReg();
20733 using Fn = bool (*)(JSContext*, HandleObject, MutableHandleValue);
20734 OutOfLineCode* ool = oolCallVM<Fn, jit::GetPrototypeOf>(lir, ArgList(target),
20735 StoreValueTo(out));
20737 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
20739 masm.loadObjProto(target, scratch);
20741 Label hasProto;
20742 masm.branchPtr(Assembler::Above, scratch, ImmWord(1), &hasProto);
20744 // Call into the VM for lazy prototypes.
20745 masm.branchPtr(Assembler::Equal, scratch, ImmWord(1), ool->entry());
20747 masm.moveValue(NullValue(), out);
20748 masm.jump(ool->rejoin());
20750 masm.bind(&hasProto);
20751 masm.tagValue(JSVAL_TYPE_OBJECT, scratch, out);
20753 masm.bind(ool->rejoin());
20756 void CodeGenerator::visitObjectWithProto(LObjectWithProto* lir) {
20757 pushArg(ToValue(lir, LObjectWithProto::PrototypeIndex));
20759 using Fn = PlainObject* (*)(JSContext*, HandleValue);
20760 callVM<Fn, js::ObjectWithProtoOperation>(lir);
20763 void CodeGenerator::visitObjectStaticProto(LObjectStaticProto* lir) {
20764 Register obj = ToRegister(lir->input());
20765 Register output = ToRegister(lir->output());
20767 masm.loadObjProto(obj, output);
20769 #ifdef DEBUG
20770 // We shouldn't encounter a null or lazy proto.
20771 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
20773 Label done;
20774 masm.branchPtr(Assembler::Above, output, ImmWord(1), &done);
20775 masm.assumeUnreachable("Unexpected null or lazy proto in MObjectStaticProto");
20776 masm.bind(&done);
20777 #endif
20780 void CodeGenerator::visitBuiltinObject(LBuiltinObject* lir) {
20781 pushArg(Imm32(static_cast<int32_t>(lir->mir()->builtinObjectKind())));
20783 using Fn = JSObject* (*)(JSContext*, BuiltinObjectKind);
20784 callVM<Fn, js::BuiltinObjectOperation>(lir);
20787 void CodeGenerator::visitSuperFunction(LSuperFunction* lir) {
20788 Register callee = ToRegister(lir->callee());
20789 ValueOperand out = ToOutValue(lir);
20790 Register temp = ToRegister(lir->temp0());
20792 #ifdef DEBUG
20793 Label classCheckDone;
20794 masm.branchTestObjIsFunction(Assembler::Equal, callee, temp, callee,
20795 &classCheckDone);
20796 masm.assumeUnreachable("Unexpected non-JSFunction callee in JSOp::SuperFun");
20797 masm.bind(&classCheckDone);
20798 #endif
20800 // Load prototype of callee
20801 masm.loadObjProto(callee, temp);
20803 #ifdef DEBUG
20804 // We won't encounter a lazy proto, because |callee| is guaranteed to be a
20805 // JSFunction and only proxy objects can have a lazy proto.
20806 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
20808 Label proxyCheckDone;
20809 masm.branchPtr(Assembler::NotEqual, temp, ImmWord(1), &proxyCheckDone);
20810 masm.assumeUnreachable("Unexpected lazy proto in JSOp::SuperFun");
20811 masm.bind(&proxyCheckDone);
20812 #endif
20814 Label nullProto, done;
20815 masm.branchPtr(Assembler::Equal, temp, ImmWord(0), &nullProto);
20817 // Box prototype and return
20818 masm.tagValue(JSVAL_TYPE_OBJECT, temp, out);
20819 masm.jump(&done);
20821 masm.bind(&nullProto);
20822 masm.moveValue(NullValue(), out);
20824 masm.bind(&done);
20827 void CodeGenerator::visitInitHomeObject(LInitHomeObject* lir) {
20828 Register func = ToRegister(lir->function());
20829 ValueOperand homeObject = ToValue(lir, LInitHomeObject::HomeObjectIndex);
20831 masm.assertFunctionIsExtended(func);
20833 Address addr(func, FunctionExtended::offsetOfMethodHomeObjectSlot());
20835 emitPreBarrier(addr);
20836 masm.storeValue(homeObject, addr);
20839 void CodeGenerator::visitIsTypedArrayConstructor(
20840 LIsTypedArrayConstructor* lir) {
20841 Register object = ToRegister(lir->object());
20842 Register output = ToRegister(lir->output());
20844 masm.setIsDefinitelyTypedArrayConstructor(object, output);
20847 void CodeGenerator::visitLoadValueTag(LLoadValueTag* lir) {
20848 ValueOperand value = ToValue(lir, LLoadValueTag::ValueIndex);
20849 Register output = ToRegister(lir->output());
20851 Register tag = masm.extractTag(value, output);
20852 if (tag != output) {
20853 masm.mov(tag, output);
20857 void CodeGenerator::visitGuardTagNotEqual(LGuardTagNotEqual* lir) {
20858 Register lhs = ToRegister(lir->lhs());
20859 Register rhs = ToRegister(lir->rhs());
20861 bailoutCmp32(Assembler::Equal, lhs, rhs, lir->snapshot());
20863 // If both lhs and rhs are numbers, can't use tag comparison to do inequality
20864 // comparison
20865 Label done;
20866 masm.branchTestNumber(Assembler::NotEqual, lhs, &done);
20867 masm.branchTestNumber(Assembler::NotEqual, rhs, &done);
20868 bailout(lir->snapshot());
20870 masm.bind(&done);
20873 void CodeGenerator::visitLoadWrapperTarget(LLoadWrapperTarget* lir) {
20874 Register object = ToRegister(lir->object());
20875 Register output = ToRegister(lir->output());
20877 masm.loadPtr(Address(object, ProxyObject::offsetOfReservedSlots()), output);
20879 // Bail for revoked proxies.
20880 Label bail;
20881 Address targetAddr(output,
20882 js::detail::ProxyReservedSlots::offsetOfPrivateSlot());
20883 if (lir->mir()->fallible()) {
20884 masm.fallibleUnboxObject(targetAddr, output, &bail);
20885 bailoutFrom(&bail, lir->snapshot());
20886 } else {
20887 masm.unboxObject(targetAddr, output);
20891 void CodeGenerator::visitGuardHasGetterSetter(LGuardHasGetterSetter* lir) {
20892 Register object = ToRegister(lir->object());
20893 Register temp0 = ToRegister(lir->temp0());
20894 Register temp1 = ToRegister(lir->temp1());
20895 Register temp2 = ToRegister(lir->temp2());
20897 masm.movePropertyKey(lir->mir()->propId(), temp1);
20898 masm.movePtr(ImmGCPtr(lir->mir()->getterSetter()), temp2);
20900 using Fn = bool (*)(JSContext* cx, JSObject* obj, jsid id,
20901 GetterSetter* getterSetter);
20902 masm.setupAlignedABICall();
20903 masm.loadJSContext(temp0);
20904 masm.passABIArg(temp0);
20905 masm.passABIArg(object);
20906 masm.passABIArg(temp1);
20907 masm.passABIArg(temp2);
20908 masm.callWithABI<Fn, ObjectHasGetterSetterPure>();
20910 bailoutIfFalseBool(ReturnReg, lir->snapshot());
20913 void CodeGenerator::visitGuardIsExtensible(LGuardIsExtensible* lir) {
20914 Register object = ToRegister(lir->object());
20915 Register temp = ToRegister(lir->temp0());
20917 Label bail;
20918 masm.branchIfObjectNotExtensible(object, temp, &bail);
20919 bailoutFrom(&bail, lir->snapshot());
20922 void CodeGenerator::visitGuardInt32IsNonNegative(
20923 LGuardInt32IsNonNegative* lir) {
20924 Register index = ToRegister(lir->index());
20926 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
20929 void CodeGenerator::visitGuardInt32Range(LGuardInt32Range* lir) {
20930 Register input = ToRegister(lir->input());
20932 bailoutCmp32(Assembler::LessThan, input, Imm32(lir->mir()->minimum()),
20933 lir->snapshot());
20934 bailoutCmp32(Assembler::GreaterThan, input, Imm32(lir->mir()->maximum()),
20935 lir->snapshot());
20938 void CodeGenerator::visitGuardIndexIsNotDenseElement(
20939 LGuardIndexIsNotDenseElement* lir) {
20940 Register object = ToRegister(lir->object());
20941 Register index = ToRegister(lir->index());
20942 Register temp = ToRegister(lir->temp0());
20943 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
20945 // Load obj->elements.
20946 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
20948 // Ensure index >= initLength or the element is a hole.
20949 Label notDense;
20950 Address capacity(temp, ObjectElements::offsetOfInitializedLength());
20951 masm.spectreBoundsCheck32(index, capacity, spectreTemp, &notDense);
20953 BaseValueIndex element(temp, index);
20954 masm.branchTestMagic(Assembler::Equal, element, &notDense);
20956 bailout(lir->snapshot());
20958 masm.bind(&notDense);
20961 void CodeGenerator::visitGuardIndexIsValidUpdateOrAdd(
20962 LGuardIndexIsValidUpdateOrAdd* lir) {
20963 Register object = ToRegister(lir->object());
20964 Register index = ToRegister(lir->index());
20965 Register temp = ToRegister(lir->temp0());
20966 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
20968 // Load obj->elements.
20969 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
20971 Label success;
20973 // If length is writable, branch to &success. All indices are writable.
20974 Address flags(temp, ObjectElements::offsetOfFlags());
20975 masm.branchTest32(Assembler::Zero, flags,
20976 Imm32(ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH),
20977 &success);
20979 // Otherwise, ensure index is in bounds.
20980 Label bail;
20981 Address length(temp, ObjectElements::offsetOfLength());
20982 masm.spectreBoundsCheck32(index, length, spectreTemp, &bail);
20983 masm.bind(&success);
20985 bailoutFrom(&bail, lir->snapshot());
20988 void CodeGenerator::visitCallAddOrUpdateSparseElement(
20989 LCallAddOrUpdateSparseElement* lir) {
20990 Register object = ToRegister(lir->object());
20991 Register index = ToRegister(lir->index());
20992 ValueOperand value = ToValue(lir, LCallAddOrUpdateSparseElement::ValueIndex);
20994 pushArg(Imm32(lir->mir()->strict()));
20995 pushArg(value);
20996 pushArg(index);
20997 pushArg(object);
20999 using Fn =
21000 bool (*)(JSContext*, Handle<NativeObject*>, int32_t, HandleValue, bool);
21001 callVM<Fn, js::AddOrUpdateSparseElementHelper>(lir);
21004 void CodeGenerator::visitCallGetSparseElement(LCallGetSparseElement* lir) {
21005 Register object = ToRegister(lir->object());
21006 Register index = ToRegister(lir->index());
21008 pushArg(index);
21009 pushArg(object);
21011 using Fn =
21012 bool (*)(JSContext*, Handle<NativeObject*>, int32_t, MutableHandleValue);
21013 callVM<Fn, js::GetSparseElementHelper>(lir);
21016 void CodeGenerator::visitCallNativeGetElement(LCallNativeGetElement* lir) {
21017 Register object = ToRegister(lir->object());
21018 Register index = ToRegister(lir->index());
21020 pushArg(index);
21021 pushArg(TypedOrValueRegister(MIRType::Object, AnyRegister(object)));
21022 pushArg(object);
21024 using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
21025 MutableHandleValue);
21026 callVM<Fn, js::NativeGetElement>(lir);
21029 void CodeGenerator::visitCallNativeGetElementSuper(
21030 LCallNativeGetElementSuper* lir) {
21031 Register object = ToRegister(lir->object());
21032 Register index = ToRegister(lir->index());
21033 ValueOperand receiver =
21034 ToValue(lir, LCallNativeGetElementSuper::ReceiverIndex);
21036 pushArg(index);
21037 pushArg(receiver);
21038 pushArg(object);
21040 using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
21041 MutableHandleValue);
21042 callVM<Fn, js::NativeGetElement>(lir);
21045 void CodeGenerator::visitCallObjectHasSparseElement(
21046 LCallObjectHasSparseElement* lir) {
21047 Register object = ToRegister(lir->object());
21048 Register index = ToRegister(lir->index());
21049 Register temp0 = ToRegister(lir->temp0());
21050 Register temp1 = ToRegister(lir->temp1());
21051 Register output = ToRegister(lir->output());
21053 masm.reserveStack(sizeof(Value));
21054 masm.moveStackPtrTo(temp1);
21056 using Fn = bool (*)(JSContext*, NativeObject*, int32_t, Value*);
21057 masm.setupAlignedABICall();
21058 masm.loadJSContext(temp0);
21059 masm.passABIArg(temp0);
21060 masm.passABIArg(object);
21061 masm.passABIArg(index);
21062 masm.passABIArg(temp1);
21063 masm.callWithABI<Fn, HasNativeElementPure>();
21064 masm.storeCallPointerResult(temp0);
21066 Label bail, ok;
21067 uint32_t framePushed = masm.framePushed();
21068 masm.branchIfTrueBool(temp0, &ok);
21069 masm.adjustStack(sizeof(Value));
21070 masm.jump(&bail);
21072 masm.bind(&ok);
21073 masm.setFramePushed(framePushed);
21074 masm.unboxBoolean(Address(masm.getStackPointer(), 0), output);
21075 masm.adjustStack(sizeof(Value));
21077 bailoutFrom(&bail, lir->snapshot());
21080 void CodeGenerator::visitBigIntAsIntN(LBigIntAsIntN* ins) {
21081 Register bits = ToRegister(ins->bits());
21082 Register input = ToRegister(ins->input());
21084 pushArg(bits);
21085 pushArg(input);
21087 using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
21088 callVM<Fn, jit::BigIntAsIntN>(ins);
21091 void CodeGenerator::visitBigIntAsIntN64(LBigIntAsIntN64* ins) {
21092 Register input = ToRegister(ins->input());
21093 Register temp = ToRegister(ins->temp());
21094 Register64 temp64 = ToRegister64(ins->temp64());
21095 Register output = ToRegister(ins->output());
21097 Label done, create;
21099 masm.movePtr(input, output);
21101 // Load the BigInt value as an int64.
21102 masm.loadBigInt64(input, temp64);
21104 // Create a new BigInt when the input exceeds the int64 range.
21105 masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
21106 Imm32(64 / BigInt::DigitBits), &create);
21108 // And create a new BigInt when the value and the BigInt have different signs.
21109 Label nonNegative;
21110 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
21111 masm.branchTest64(Assembler::NotSigned, temp64, temp64, temp, &create);
21112 masm.jump(&done);
21114 masm.bind(&nonNegative);
21115 masm.branchTest64(Assembler::NotSigned, temp64, temp64, temp, &done);
21117 masm.bind(&create);
21118 emitCreateBigInt(ins, Scalar::BigInt64, temp64, output, temp);
21120 masm.bind(&done);
21123 void CodeGenerator::visitBigIntAsIntN32(LBigIntAsIntN32* ins) {
21124 Register input = ToRegister(ins->input());
21125 Register temp = ToRegister(ins->temp());
21126 Register64 temp64 = ToRegister64(ins->temp64());
21127 Register output = ToRegister(ins->output());
21129 Label done, create;
21131 masm.movePtr(input, output);
21133 // Load the absolute value of the first digit.
21134 masm.loadFirstBigIntDigitOrZero(input, temp);
21136 // If the absolute value exceeds the int32 range, create a new BigInt.
21137 masm.branchPtr(Assembler::Above, temp, Imm32(INT32_MAX), &create);
21139 // Also create a new BigInt if we have more than one digit.
21140 masm.branch32(Assembler::BelowOrEqual,
21141 Address(input, BigInt::offsetOfLength()), Imm32(1), &done);
21143 masm.bind(&create);
21145 // |temp| stores the absolute value, negate it when the sign flag is set.
21146 Label nonNegative;
21147 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
21148 masm.negPtr(temp);
21149 masm.bind(&nonNegative);
21151 masm.move32To64SignExtend(temp, temp64);
21152 emitCreateBigInt(ins, Scalar::BigInt64, temp64, output, temp);
21154 masm.bind(&done);
21157 void CodeGenerator::visitBigIntAsUintN(LBigIntAsUintN* ins) {
21158 Register bits = ToRegister(ins->bits());
21159 Register input = ToRegister(ins->input());
21161 pushArg(bits);
21162 pushArg(input);
21164 using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
21165 callVM<Fn, jit::BigIntAsUintN>(ins);
21168 void CodeGenerator::visitBigIntAsUintN64(LBigIntAsUintN64* ins) {
21169 Register input = ToRegister(ins->input());
21170 Register temp = ToRegister(ins->temp());
21171 Register64 temp64 = ToRegister64(ins->temp64());
21172 Register output = ToRegister(ins->output());
21174 Label done, create;
21176 masm.movePtr(input, output);
21178 // Load the BigInt value as an uint64.
21179 masm.loadBigInt64(input, temp64);
21181 // Create a new BigInt when the input exceeds the uint64 range.
21182 masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
21183 Imm32(64 / BigInt::DigitBits), &create);
21185 // And create a new BigInt when the input has the sign flag set.
21186 masm.branchIfBigIntIsNonNegative(input, &done);
21188 masm.bind(&create);
21189 emitCreateBigInt(ins, Scalar::BigUint64, temp64, output, temp);
21191 masm.bind(&done);
21194 void CodeGenerator::visitBigIntAsUintN32(LBigIntAsUintN32* ins) {
21195 Register input = ToRegister(ins->input());
21196 Register temp = ToRegister(ins->temp());
21197 Register64 temp64 = ToRegister64(ins->temp64());
21198 Register output = ToRegister(ins->output());
21200 Label done, create;
21202 masm.movePtr(input, output);
21204 // Load the absolute value of the first digit.
21205 masm.loadFirstBigIntDigitOrZero(input, temp);
21207 // If the absolute value exceeds the uint32 range, create a new BigInt.
21208 #if JS_PUNBOX64
21209 masm.branchPtr(Assembler::Above, temp, ImmWord(UINT32_MAX), &create);
21210 #endif
21212 // Also create a new BigInt if we have more than one digit.
21213 masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
21214 Imm32(1), &create);
21216 // And create a new BigInt when the input has the sign flag set.
21217 masm.branchIfBigIntIsNonNegative(input, &done);
21219 masm.bind(&create);
21221 // |temp| stores the absolute value, negate it when the sign flag is set.
21222 Label nonNegative;
21223 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
21224 masm.negPtr(temp);
21225 masm.bind(&nonNegative);
21227 masm.move32To64ZeroExtend(temp, temp64);
21228 emitCreateBigInt(ins, Scalar::BigUint64, temp64, output, temp);
21230 masm.bind(&done);
21233 void CodeGenerator::visitGuardNonGCThing(LGuardNonGCThing* ins) {
21234 ValueOperand input = ToValue(ins, LGuardNonGCThing::InputIndex);
21236 Label bail;
21237 masm.branchTestGCThing(Assembler::Equal, input, &bail);
21238 bailoutFrom(&bail, ins->snapshot());
21241 void CodeGenerator::visitToHashableNonGCThing(LToHashableNonGCThing* ins) {
21242 ValueOperand input = ToValue(ins, LToHashableNonGCThing::InputIndex);
21243 FloatRegister tempFloat = ToFloatRegister(ins->temp0());
21244 ValueOperand output = ToOutValue(ins);
21246 masm.toHashableNonGCThing(input, output, tempFloat);
21249 void CodeGenerator::visitToHashableString(LToHashableString* ins) {
21250 Register input = ToRegister(ins->input());
21251 Register output = ToRegister(ins->output());
21253 using Fn = JSAtom* (*)(JSContext*, JSString*);
21254 auto* ool = oolCallVM<Fn, js::AtomizeString>(ins, ArgList(input),
21255 StoreRegisterTo(output));
21257 Label isAtom;
21258 masm.branchTest32(Assembler::NonZero,
21259 Address(input, JSString::offsetOfFlags()),
21260 Imm32(JSString::ATOM_BIT), &isAtom);
21262 masm.tryFastAtomize(input, output, output, ool->entry());
21263 masm.jump(ool->rejoin());
21264 masm.bind(&isAtom);
21265 masm.movePtr(input, output);
21266 masm.bind(ool->rejoin());
21269 void CodeGenerator::visitToHashableValue(LToHashableValue* ins) {
21270 ValueOperand input = ToValue(ins, LToHashableValue::InputIndex);
21271 FloatRegister tempFloat = ToFloatRegister(ins->temp0());
21272 ValueOperand output = ToOutValue(ins);
21274 Register str = output.scratchReg();
21276 using Fn = JSAtom* (*)(JSContext*, JSString*);
21277 auto* ool =
21278 oolCallVM<Fn, js::AtomizeString>(ins, ArgList(str), StoreRegisterTo(str));
21280 masm.toHashableValue(input, output, tempFloat, ool->entry(), ool->rejoin());
21283 void CodeGenerator::visitHashNonGCThing(LHashNonGCThing* ins) {
21284 ValueOperand input = ToValue(ins, LHashNonGCThing::InputIndex);
21285 Register temp = ToRegister(ins->temp0());
21286 Register output = ToRegister(ins->output());
21288 masm.prepareHashNonGCThing(input, output, temp);
21291 void CodeGenerator::visitHashString(LHashString* ins) {
21292 Register input = ToRegister(ins->input());
21293 Register temp = ToRegister(ins->temp0());
21294 Register output = ToRegister(ins->output());
21296 masm.prepareHashString(input, output, temp);
21299 void CodeGenerator::visitHashSymbol(LHashSymbol* ins) {
21300 Register input = ToRegister(ins->input());
21301 Register output = ToRegister(ins->output());
21303 masm.prepareHashSymbol(input, output);
21306 void CodeGenerator::visitHashBigInt(LHashBigInt* ins) {
21307 Register input = ToRegister(ins->input());
21308 Register temp0 = ToRegister(ins->temp0());
21309 Register temp1 = ToRegister(ins->temp1());
21310 Register temp2 = ToRegister(ins->temp2());
21311 Register output = ToRegister(ins->output());
21313 masm.prepareHashBigInt(input, output, temp0, temp1, temp2);
21316 void CodeGenerator::visitHashObject(LHashObject* ins) {
21317 Register setObj = ToRegister(ins->setObject());
21318 ValueOperand input = ToValue(ins, LHashObject::InputIndex);
21319 Register temp0 = ToRegister(ins->temp0());
21320 Register temp1 = ToRegister(ins->temp1());
21321 Register temp2 = ToRegister(ins->temp2());
21322 Register temp3 = ToRegister(ins->temp3());
21323 Register output = ToRegister(ins->output());
21325 masm.prepareHashObject(setObj, input, output, temp0, temp1, temp2, temp3);
21328 void CodeGenerator::visitHashValue(LHashValue* ins) {
21329 Register setObj = ToRegister(ins->setObject());
21330 ValueOperand input = ToValue(ins, LHashValue::InputIndex);
21331 Register temp0 = ToRegister(ins->temp0());
21332 Register temp1 = ToRegister(ins->temp1());
21333 Register temp2 = ToRegister(ins->temp2());
21334 Register temp3 = ToRegister(ins->temp3());
21335 Register output = ToRegister(ins->output());
21337 masm.prepareHashValue(setObj, input, output, temp0, temp1, temp2, temp3);
21340 void CodeGenerator::visitSetObjectHasNonBigInt(LSetObjectHasNonBigInt* ins) {
21341 Register setObj = ToRegister(ins->setObject());
21342 ValueOperand input = ToValue(ins, LSetObjectHasNonBigInt::InputIndex);
21343 Register hash = ToRegister(ins->hash());
21344 Register temp0 = ToRegister(ins->temp0());
21345 Register temp1 = ToRegister(ins->temp1());
21346 Register output = ToRegister(ins->output());
21348 masm.setObjectHasNonBigInt(setObj, input, hash, output, temp0, temp1);
21351 void CodeGenerator::visitSetObjectHasBigInt(LSetObjectHasBigInt* ins) {
21352 Register setObj = ToRegister(ins->setObject());
21353 ValueOperand input = ToValue(ins, LSetObjectHasBigInt::InputIndex);
21354 Register hash = ToRegister(ins->hash());
21355 Register temp0 = ToRegister(ins->temp0());
21356 Register temp1 = ToRegister(ins->temp1());
21357 Register temp2 = ToRegister(ins->temp2());
21358 Register temp3 = ToRegister(ins->temp3());
21359 Register output = ToRegister(ins->output());
21361 masm.setObjectHasBigInt(setObj, input, hash, output, temp0, temp1, temp2,
21362 temp3);
21365 void CodeGenerator::visitSetObjectHasValue(LSetObjectHasValue* ins) {
21366 Register setObj = ToRegister(ins->setObject());
21367 ValueOperand input = ToValue(ins, LSetObjectHasValue::InputIndex);
21368 Register hash = ToRegister(ins->hash());
21369 Register temp0 = ToRegister(ins->temp0());
21370 Register temp1 = ToRegister(ins->temp1());
21371 Register temp2 = ToRegister(ins->temp2());
21372 Register temp3 = ToRegister(ins->temp3());
21373 Register output = ToRegister(ins->output());
21375 masm.setObjectHasValue(setObj, input, hash, output, temp0, temp1, temp2,
21376 temp3);
21379 void CodeGenerator::visitSetObjectHasValueVMCall(
21380 LSetObjectHasValueVMCall* ins) {
21381 pushArg(ToValue(ins, LSetObjectHasValueVMCall::InputIndex));
21382 pushArg(ToRegister(ins->setObject()));
21384 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
21385 callVM<Fn, jit::SetObjectHas>(ins);
21388 void CodeGenerator::visitSetObjectSize(LSetObjectSize* ins) {
21389 Register setObj = ToRegister(ins->setObject());
21390 Register output = ToRegister(ins->output());
21392 masm.loadSetObjectSize(setObj, output);
21395 void CodeGenerator::visitMapObjectHasNonBigInt(LMapObjectHasNonBigInt* ins) {
21396 Register mapObj = ToRegister(ins->mapObject());
21397 ValueOperand input = ToValue(ins, LMapObjectHasNonBigInt::InputIndex);
21398 Register hash = ToRegister(ins->hash());
21399 Register temp0 = ToRegister(ins->temp0());
21400 Register temp1 = ToRegister(ins->temp1());
21401 Register output = ToRegister(ins->output());
21403 masm.mapObjectHasNonBigInt(mapObj, input, hash, output, temp0, temp1);
21406 void CodeGenerator::visitMapObjectHasBigInt(LMapObjectHasBigInt* ins) {
21407 Register mapObj = ToRegister(ins->mapObject());
21408 ValueOperand input = ToValue(ins, LMapObjectHasBigInt::InputIndex);
21409 Register hash = ToRegister(ins->hash());
21410 Register temp0 = ToRegister(ins->temp0());
21411 Register temp1 = ToRegister(ins->temp1());
21412 Register temp2 = ToRegister(ins->temp2());
21413 Register temp3 = ToRegister(ins->temp3());
21414 Register output = ToRegister(ins->output());
21416 masm.mapObjectHasBigInt(mapObj, input, hash, output, temp0, temp1, temp2,
21417 temp3);
21420 void CodeGenerator::visitMapObjectHasValue(LMapObjectHasValue* ins) {
21421 Register mapObj = ToRegister(ins->mapObject());
21422 ValueOperand input = ToValue(ins, LMapObjectHasValue::InputIndex);
21423 Register hash = ToRegister(ins->hash());
21424 Register temp0 = ToRegister(ins->temp0());
21425 Register temp1 = ToRegister(ins->temp1());
21426 Register temp2 = ToRegister(ins->temp2());
21427 Register temp3 = ToRegister(ins->temp3());
21428 Register output = ToRegister(ins->output());
21430 masm.mapObjectHasValue(mapObj, input, hash, output, temp0, temp1, temp2,
21431 temp3);
21434 void CodeGenerator::visitMapObjectHasValueVMCall(
21435 LMapObjectHasValueVMCall* ins) {
21436 pushArg(ToValue(ins, LMapObjectHasValueVMCall::InputIndex));
21437 pushArg(ToRegister(ins->mapObject()));
21439 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
21440 callVM<Fn, jit::MapObjectHas>(ins);
21443 void CodeGenerator::visitMapObjectGetNonBigInt(LMapObjectGetNonBigInt* ins) {
21444 Register mapObj = ToRegister(ins->mapObject());
21445 ValueOperand input = ToValue(ins, LMapObjectGetNonBigInt::InputIndex);
21446 Register hash = ToRegister(ins->hash());
21447 Register temp0 = ToRegister(ins->temp0());
21448 Register temp1 = ToRegister(ins->temp1());
21449 ValueOperand output = ToOutValue(ins);
21451 masm.mapObjectGetNonBigInt(mapObj, input, hash, output, temp0, temp1,
21452 output.scratchReg());
21455 void CodeGenerator::visitMapObjectGetBigInt(LMapObjectGetBigInt* ins) {
21456 Register mapObj = ToRegister(ins->mapObject());
21457 ValueOperand input = ToValue(ins, LMapObjectGetBigInt::InputIndex);
21458 Register hash = ToRegister(ins->hash());
21459 Register temp0 = ToRegister(ins->temp0());
21460 Register temp1 = ToRegister(ins->temp1());
21461 Register temp2 = ToRegister(ins->temp2());
21462 Register temp3 = ToRegister(ins->temp3());
21463 ValueOperand output = ToOutValue(ins);
21465 masm.mapObjectGetBigInt(mapObj, input, hash, output, temp0, temp1, temp2,
21466 temp3, output.scratchReg());
21469 void CodeGenerator::visitMapObjectGetValue(LMapObjectGetValue* ins) {
21470 Register mapObj = ToRegister(ins->mapObject());
21471 ValueOperand input = ToValue(ins, LMapObjectGetValue::InputIndex);
21472 Register hash = ToRegister(ins->hash());
21473 Register temp0 = ToRegister(ins->temp0());
21474 Register temp1 = ToRegister(ins->temp1());
21475 Register temp2 = ToRegister(ins->temp2());
21476 Register temp3 = ToRegister(ins->temp3());
21477 ValueOperand output = ToOutValue(ins);
21479 masm.mapObjectGetValue(mapObj, input, hash, output, temp0, temp1, temp2,
21480 temp3, output.scratchReg());
21483 void CodeGenerator::visitMapObjectGetValueVMCall(
21484 LMapObjectGetValueVMCall* ins) {
21485 pushArg(ToValue(ins, LMapObjectGetValueVMCall::InputIndex));
21486 pushArg(ToRegister(ins->mapObject()));
21488 using Fn =
21489 bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
21490 callVM<Fn, jit::MapObjectGet>(ins);
21493 void CodeGenerator::visitMapObjectSize(LMapObjectSize* ins) {
21494 Register mapObj = ToRegister(ins->mapObject());
21495 Register output = ToRegister(ins->output());
21497 masm.loadMapObjectSize(mapObj, output);
21500 template <size_t NumDefs>
21501 void CodeGenerator::emitIonToWasmCallBase(LIonToWasmCallBase<NumDefs>* lir) {
21502 wasm::JitCallStackArgVector stackArgs;
21503 masm.propagateOOM(stackArgs.reserve(lir->numOperands()));
21504 if (masm.oom()) {
21505 return;
21508 MIonToWasmCall* mir = lir->mir();
21509 const wasm::FuncExport& funcExport = mir->funcExport();
21510 const wasm::FuncType& sig =
21511 mir->instance()->code().codeMeta().getFuncType(funcExport.funcIndex());
21513 WasmABIArgGenerator abi;
21514 for (size_t i = 0; i < lir->numOperands(); i++) {
21515 MIRType argMir;
21516 switch (sig.args()[i].kind()) {
21517 case wasm::ValType::I32:
21518 case wasm::ValType::I64:
21519 case wasm::ValType::F32:
21520 case wasm::ValType::F64:
21521 argMir = sig.args()[i].toMIRType();
21522 break;
21523 case wasm::ValType::V128:
21524 MOZ_CRASH("unexpected argument type when calling from ion to wasm");
21525 case wasm::ValType::Ref:
21526 // temporarilyUnsupportedReftypeForEntry() restricts args to externref
21527 MOZ_RELEASE_ASSERT(sig.args()[i].refType().isExtern());
21528 // Argument is boxed on the JS side to an anyref, so passed as a
21529 // pointer here.
21530 argMir = sig.args()[i].toMIRType();
21531 break;
21534 ABIArg arg = abi.next(argMir);
21535 switch (arg.kind()) {
21536 case ABIArg::GPR:
21537 case ABIArg::FPU: {
21538 MOZ_ASSERT(ToAnyRegister(lir->getOperand(i)) == arg.reg());
21539 stackArgs.infallibleEmplaceBack(wasm::JitCallStackArg());
21540 break;
21542 case ABIArg::Stack: {
21543 const LAllocation* larg = lir->getOperand(i);
21544 if (larg->isConstant()) {
21545 stackArgs.infallibleEmplaceBack(ToInt32(larg));
21546 } else if (larg->isGeneralReg()) {
21547 stackArgs.infallibleEmplaceBack(ToRegister(larg));
21548 } else if (larg->isFloatReg()) {
21549 stackArgs.infallibleEmplaceBack(ToFloatRegister(larg));
21550 } else {
21551 // Always use the stack pointer here because GenerateDirectCallFromJit
21552 // depends on this.
21553 Address addr = ToAddress<BaseRegForAddress::SP>(larg);
21554 stackArgs.infallibleEmplaceBack(addr);
21556 break;
21558 #ifdef JS_CODEGEN_REGISTER_PAIR
21559 case ABIArg::GPR_PAIR: {
21560 MOZ_CRASH(
21561 "no way to pass i64, and wasm uses hardfp for function calls");
21563 #endif
21564 case ABIArg::Uninitialized: {
21565 MOZ_CRASH("Uninitialized ABIArg kind");
21570 const wasm::ValTypeVector& results = sig.results();
21571 if (results.length() == 0) {
21572 MOZ_ASSERT(lir->mir()->type() == MIRType::Value);
21573 } else {
21574 MOZ_ASSERT(results.length() == 1, "multi-value return unimplemented");
21575 switch (results[0].kind()) {
21576 case wasm::ValType::I32:
21577 MOZ_ASSERT(lir->mir()->type() == MIRType::Int32);
21578 MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
21579 break;
21580 case wasm::ValType::I64:
21581 MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
21582 MOZ_ASSERT(ToOutRegister64(lir) == ReturnReg64);
21583 break;
21584 case wasm::ValType::F32:
21585 MOZ_ASSERT(lir->mir()->type() == MIRType::Float32);
21586 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnFloat32Reg);
21587 break;
21588 case wasm::ValType::F64:
21589 MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
21590 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
21591 break;
21592 case wasm::ValType::V128:
21593 MOZ_CRASH("unexpected return type when calling from ion to wasm");
21594 case wasm::ValType::Ref:
21595 // The wasm stubs layer unboxes anything that needs to be unboxed
21596 // and leaves it in a Value. A FuncRef/EqRef we could in principle
21597 // leave it as a raw object pointer but for now it complicates the
21598 // API to do so.
21599 MOZ_ASSERT(lir->mir()->type() == MIRType::Value);
21600 break;
21604 WasmInstanceObject* instObj = lir->mir()->instanceObject();
21606 Register scratch = ToRegister(lir->temp());
21608 uint32_t callOffset;
21609 ensureOsiSpace();
21610 GenerateDirectCallFromJit(masm, funcExport, instObj->instance(), stackArgs,
21611 scratch, &callOffset);
21613 // Add the instance object to the constant pool, so it is transferred to
21614 // the owning IonScript and so that it gets traced as long as the IonScript
21615 // lives.
21617 uint32_t unused;
21618 masm.propagateOOM(graph.addConstantToPool(ObjectValue(*instObj), &unused));
21620 markSafepointAt(callOffset, lir);
21623 void CodeGenerator::visitIonToWasmCall(LIonToWasmCall* lir) {
21624 emitIonToWasmCallBase(lir);
21626 void CodeGenerator::visitIonToWasmCallV(LIonToWasmCallV* lir) {
21627 emitIonToWasmCallBase(lir);
21629 void CodeGenerator::visitIonToWasmCallI64(LIonToWasmCallI64* lir) {
21630 emitIonToWasmCallBase(lir);
21633 void CodeGenerator::visitWasmNullConstant(LWasmNullConstant* lir) {
21634 masm.xorPtr(ToRegister(lir->output()), ToRegister(lir->output()));
21637 void CodeGenerator::visitWasmFence(LWasmFence* lir) {
21638 MOZ_ASSERT(gen->compilingWasm());
21639 masm.memoryBarrier(MembarFull);
21642 void CodeGenerator::visitWasmAnyRefFromJSValue(LWasmAnyRefFromJSValue* lir) {
21643 ValueOperand input = ToValue(lir, LWasmAnyRefFromJSValue::InputIndex);
21644 Register output = ToRegister(lir->output());
21645 FloatRegister tempFloat = ToFloatRegister(lir->temp0());
21647 using Fn = JSObject* (*)(JSContext* cx, HandleValue value);
21648 OutOfLineCode* oolBoxValue = oolCallVM<Fn, wasm::AnyRef::boxValue>(
21649 lir, ArgList(input), StoreRegisterTo(output));
21650 masm.convertValueToWasmAnyRef(input, output, tempFloat, oolBoxValue->entry());
21651 masm.bind(oolBoxValue->rejoin());
21654 void CodeGenerator::visitWasmAnyRefFromJSObject(LWasmAnyRefFromJSObject* lir) {
21655 Register input = ToRegister(lir->input());
21656 Register output = ToRegister(lir->output());
21657 masm.convertObjectToWasmAnyRef(input, output);
21660 void CodeGenerator::visitWasmAnyRefFromJSString(LWasmAnyRefFromJSString* lir) {
21661 Register input = ToRegister(lir->input());
21662 Register output = ToRegister(lir->output());
21663 masm.convertStringToWasmAnyRef(input, output);
21666 void CodeGenerator::visitWasmAnyRefIsJSString(LWasmAnyRefIsJSString* lir) {
21667 Register input = ToRegister(lir->input());
21668 Register output = ToRegister(lir->output());
21669 Register temp = ToRegister(lir->temp0());
21670 Label fallthrough;
21671 Label isJSString;
21672 masm.branchWasmAnyRefIsJSString(true, input, temp, &isJSString);
21673 masm.move32(Imm32(0), output);
21674 masm.jump(&fallthrough);
21675 masm.bind(&isJSString);
21676 masm.move32(Imm32(1), output);
21677 masm.bind(&fallthrough);
21680 void CodeGenerator::visitWasmTrapIfAnyRefIsNotJSString(
21681 LWasmTrapIfAnyRefIsNotJSString* lir) {
21682 Register input = ToRegister(lir->input());
21683 Register temp = ToRegister(lir->temp0());
21684 Label isJSString;
21685 masm.branchWasmAnyRefIsJSString(true, input, temp, &isJSString);
21686 masm.wasmTrap(lir->mir()->trap(), lir->mir()->bytecodeOffset());
21687 masm.bind(&isJSString);
21690 void CodeGenerator::visitWasmNewI31Ref(LWasmNewI31Ref* lir) {
21691 if (lir->value()->isConstant()) {
21692 // i31ref are often created with constants. If that's the case we will
21693 // do the operation statically here. This is similar to what is done
21694 // in masm.truncate32ToWasmI31Ref.
21695 Register output = ToRegister(lir->output());
21696 uint32_t value =
21697 static_cast<uint32_t>(lir->value()->toConstant()->toInt32());
21698 uintptr_t ptr = wasm::AnyRef::fromUint32Truncate(value).rawValue();
21699 masm.movePtr(ImmWord(ptr), output);
21700 } else {
21701 Register value = ToRegister(lir->value());
21702 Register output = ToRegister(lir->output());
21703 masm.truncate32ToWasmI31Ref(value, output);
21707 void CodeGenerator::visitWasmI31RefGet(LWasmI31RefGet* lir) {
21708 Register value = ToRegister(lir->value());
21709 Register output = ToRegister(lir->output());
21710 if (lir->mir()->wideningOp() == wasm::FieldWideningOp::Signed) {
21711 masm.convertWasmI31RefTo32Signed(value, output);
21712 } else {
21713 masm.convertWasmI31RefTo32Unsigned(value, output);
21717 #ifdef FUZZING_JS_FUZZILLI
21718 void CodeGenerator::emitFuzzilliHashObject(LInstruction* lir, Register obj,
21719 Register output) {
21720 using Fn = void (*)(JSContext* cx, JSObject* obj, uint32_t* out);
21721 OutOfLineCode* ool = oolCallVM<Fn, FuzzilliHashObjectInl>(
21722 lir, ArgList(obj), StoreRegisterTo(output));
21724 masm.jump(ool->entry());
21725 masm.bind(ool->rejoin());
21728 void CodeGenerator::emitFuzzilliHashBigInt(LInstruction* lir, Register bigInt,
21729 Register output) {
21730 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
21731 volatileRegs.takeUnchecked(output);
21733 masm.PushRegsInMask(volatileRegs);
21735 using Fn = uint32_t (*)(BigInt* bigInt);
21736 masm.setupUnalignedABICall(output);
21737 masm.passABIArg(bigInt);
21738 masm.callWithABI<Fn, js::FuzzilliHashBigInt>();
21739 masm.storeCallInt32Result(output);
21741 masm.PopRegsInMask(volatileRegs);
21744 void CodeGenerator::visitFuzzilliHashV(LFuzzilliHashV* ins) {
21745 ValueOperand value = ToValue(ins, 0);
21747 FloatRegister scratchFloat = ToFloatRegister(ins->getTemp(1));
21748 Register scratch = ToRegister(ins->getTemp(0));
21749 Register output = ToRegister(ins->output());
21750 MOZ_ASSERT(scratch != output);
21752 Label hashDouble, done;
21754 Label isInt32, isDouble, isNull, isUndefined, isBoolean, isBigInt, isObject;
21756 ScratchTagScope tag(masm, value);
21757 masm.splitTagForTest(value, tag);
21759 masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
21760 masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
21761 masm.branchTestNull(Assembler::Equal, tag, &isNull);
21762 masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
21763 masm.branchTestBoolean(Assembler::Equal, tag, &isBoolean);
21764 masm.branchTestBigInt(Assembler::Equal, tag, &isBigInt);
21765 masm.branchTestObject(Assembler::Equal, tag, &isObject);
21767 // Symbol or String.
21768 masm.move32(Imm32(0), output);
21769 masm.jump(&done);
21772 masm.bind(&isInt32);
21774 masm.unboxInt32(value, scratch);
21775 masm.convertInt32ToDouble(scratch, scratchFloat);
21776 masm.jump(&hashDouble);
21779 masm.bind(&isDouble);
21781 masm.unboxDouble(value, scratchFloat);
21782 masm.jump(&hashDouble);
21785 masm.bind(&isNull);
21787 masm.loadConstantDouble(1.0, scratchFloat);
21788 masm.jump(&hashDouble);
21791 masm.bind(&isUndefined);
21793 masm.loadConstantDouble(2.0, scratchFloat);
21794 masm.jump(&hashDouble);
21797 masm.bind(&isBoolean);
21799 masm.unboxBoolean(value, scratch);
21800 masm.add32(Imm32(3), scratch);
21801 masm.convertInt32ToDouble(scratch, scratchFloat);
21802 masm.jump(&hashDouble);
21805 masm.bind(&isBigInt);
21807 masm.unboxBigInt(value, scratch);
21808 emitFuzzilliHashBigInt(ins, scratch, output);
21809 masm.jump(&done);
21812 masm.bind(&isObject);
21814 masm.unboxObject(value, scratch);
21815 emitFuzzilliHashObject(ins, scratch, output);
21816 masm.jump(&done);
21819 masm.bind(&hashDouble);
21820 masm.fuzzilliHashDouble(scratchFloat, output, scratch);
21822 masm.bind(&done);
21825 void CodeGenerator::visitFuzzilliHashT(LFuzzilliHashT* ins) {
21826 const LAllocation* value = ins->value();
21827 MIRType mirType = ins->mir()->getOperand(0)->type();
21829 Register scratch = ToTempRegisterOrInvalid(ins->getTemp(0));
21830 FloatRegister scratchFloat = ToTempFloatRegisterOrInvalid(ins->getTemp(1));
21832 Register output = ToRegister(ins->output());
21833 MOZ_ASSERT(scratch != output);
21835 switch (mirType) {
21836 case MIRType::Undefined: {
21837 masm.loadConstantDouble(2.0, scratchFloat);
21838 masm.fuzzilliHashDouble(scratchFloat, output, scratch);
21839 break;
21842 case MIRType::Null: {
21843 masm.loadConstantDouble(1.0, scratchFloat);
21844 masm.fuzzilliHashDouble(scratchFloat, output, scratch);
21845 break;
21848 case MIRType::Int32: {
21849 masm.move32(ToRegister(value), scratch);
21850 masm.convertInt32ToDouble(scratch, scratchFloat);
21851 masm.fuzzilliHashDouble(scratchFloat, output, scratch);
21852 break;
21855 case MIRType::Double: {
21856 masm.moveDouble(ToFloatRegister(value), scratchFloat);
21857 masm.fuzzilliHashDouble(scratchFloat, output, scratch);
21858 break;
21861 case MIRType::Float32: {
21862 masm.convertFloat32ToDouble(ToFloatRegister(value), scratchFloat);
21863 masm.fuzzilliHashDouble(scratchFloat, output, scratch);
21864 break;
21867 case MIRType::Boolean: {
21868 masm.move32(ToRegister(value), scratch);
21869 masm.add32(Imm32(3), scratch);
21870 masm.convertInt32ToDouble(scratch, scratchFloat);
21871 masm.fuzzilliHashDouble(scratchFloat, output, scratch);
21872 break;
21875 case MIRType::BigInt: {
21876 emitFuzzilliHashBigInt(ins, ToRegister(value), output);
21877 break;
21880 case MIRType::Object: {
21881 emitFuzzilliHashObject(ins, ToRegister(value), output);
21882 break;
21885 default:
21886 MOZ_CRASH("unexpected type");
21890 void CodeGenerator::visitFuzzilliHashStore(LFuzzilliHashStore* ins) {
21891 Register value = ToRegister(ins->value());
21892 Register temp0 = ToRegister(ins->getTemp(0));
21893 Register temp1 = ToRegister(ins->getTemp(1));
21895 masm.fuzzilliStoreHash(value, temp0, temp1);
21897 #endif
21899 static_assert(!std::is_polymorphic_v<CodeGenerator>,
21900 "CodeGenerator should not have any virtual methods");
21902 } // namespace jit
21903 } // namespace js