Bug 1842773 - Part 19: Add guard instruction for fixed-length typed arrays. r=sfink...
[gecko.git] / js / src / jit / CodeGenerator.cpp
blob9748a257666d9d0667ead1143b20462d0a1869e8
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/CodeGenerator.h"
9 #include "mozilla/Assertions.h"
10 #include "mozilla/Casting.h"
11 #include "mozilla/DebugOnly.h"
12 #include "mozilla/EndianUtils.h"
13 #include "mozilla/EnumeratedArray.h"
14 #include "mozilla/EnumeratedRange.h"
15 #include "mozilla/EnumSet.h"
16 #include "mozilla/IntegerTypeTraits.h"
17 #include "mozilla/Latin1.h"
18 #include "mozilla/MathAlgorithms.h"
19 #include "mozilla/ScopeExit.h"
20 #include "mozilla/SIMD.h"
22 #include <limits>
23 #include <type_traits>
24 #include <utility>
26 #include "jslibmath.h"
27 #include "jsmath.h"
28 #include "jsnum.h"
30 #include "builtin/MapObject.h"
31 #include "builtin/RegExp.h"
32 #include "builtin/String.h"
33 #include "irregexp/RegExpTypes.h"
34 #include "jit/ABIArgGenerator.h"
35 #include "jit/CompileInfo.h"
36 #include "jit/InlineScriptTree.h"
37 #include "jit/Invalidation.h"
38 #include "jit/IonGenericCallStub.h"
39 #include "jit/IonIC.h"
40 #include "jit/IonScript.h"
41 #include "jit/JitcodeMap.h"
42 #include "jit/JitFrames.h"
43 #include "jit/JitRuntime.h"
44 #include "jit/JitSpewer.h"
45 #include "jit/JitZone.h"
46 #include "jit/Linker.h"
47 #include "jit/MIRGenerator.h"
48 #include "jit/MoveEmitter.h"
49 #include "jit/RangeAnalysis.h"
50 #include "jit/RegExpStubConstants.h"
51 #include "jit/SafepointIndex.h"
52 #include "jit/SharedICHelpers.h"
53 #include "jit/SharedICRegisters.h"
54 #include "jit/VMFunctions.h"
55 #include "jit/WarpSnapshot.h"
56 #include "js/ColumnNumber.h" // JS::LimitedColumnNumberOneOrigin
57 #include "js/experimental/JitInfo.h" // JSJit{Getter,Setter}CallArgs, JSJitMethodCallArgsTraits, JSJitInfo
58 #include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
59 #include "js/RegExpFlags.h" // JS::RegExpFlag
60 #include "js/ScalarType.h" // js::Scalar::Type
61 #include "proxy/DOMProxy.h"
62 #include "proxy/ScriptedProxyHandler.h"
63 #include "util/CheckedArithmetic.h"
64 #include "util/Unicode.h"
65 #include "vm/ArrayBufferViewObject.h"
66 #include "vm/AsyncFunction.h"
67 #include "vm/AsyncIteration.h"
68 #include "vm/BuiltinObjectKind.h"
69 #include "vm/FunctionFlags.h" // js::FunctionFlags
70 #include "vm/Interpreter.h"
71 #include "vm/JSAtomUtils.h" // AtomizeString
72 #include "vm/MatchPairs.h"
73 #include "vm/RegExpObject.h"
74 #include "vm/RegExpStatics.h"
75 #include "vm/StaticStrings.h"
76 #include "vm/StringObject.h"
77 #include "vm/StringType.h"
78 #include "vm/TypedArrayObject.h"
79 #include "wasm/WasmCodegenConstants.h"
80 #include "wasm/WasmValType.h"
81 #ifdef MOZ_VTUNE
82 # include "vtune/VTuneWrapper.h"
83 #endif
84 #include "wasm/WasmBinary.h"
85 #include "wasm/WasmGC.h"
86 #include "wasm/WasmGcObject.h"
87 #include "wasm/WasmStubs.h"
89 #include "builtin/Boolean-inl.h"
90 #include "jit/MacroAssembler-inl.h"
91 #include "jit/shared/CodeGenerator-shared-inl.h"
92 #include "jit/TemplateObject-inl.h"
93 #include "jit/VMFunctionList-inl.h"
94 #include "vm/JSScript-inl.h"
95 #include "wasm/WasmInstance-inl.h"
97 using namespace js;
98 using namespace js::jit;
100 using JS::GenericNaN;
101 using mozilla::AssertedCast;
102 using mozilla::DebugOnly;
103 using mozilla::FloatingPoint;
104 using mozilla::Maybe;
105 using mozilla::NegativeInfinity;
106 using mozilla::PositiveInfinity;
108 using JS::ExpandoAndGeneration;
110 namespace js {
111 namespace jit {
113 #ifdef CHECK_OSIPOINT_REGISTERS
114 template <class Op>
115 static void HandleRegisterDump(Op op, MacroAssembler& masm,
116 LiveRegisterSet liveRegs, Register activation,
117 Register scratch) {
118 const size_t baseOffset = JitActivation::offsetOfRegs();
120 // Handle live GPRs.
121 for (GeneralRegisterIterator iter(liveRegs.gprs()); iter.more(); ++iter) {
122 Register reg = *iter;
123 Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
125 if (reg == activation) {
126 // To use the original value of the activation register (that's
127 // now on top of the stack), we need the scratch register.
128 masm.push(scratch);
129 masm.loadPtr(Address(masm.getStackPointer(), sizeof(uintptr_t)), scratch);
130 op(scratch, dump);
131 masm.pop(scratch);
132 } else {
133 op(reg, dump);
137 // Handle live FPRs.
138 for (FloatRegisterIterator iter(liveRegs.fpus()); iter.more(); ++iter) {
139 FloatRegister reg = *iter;
140 Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
141 op(reg, dump);
145 class StoreOp {
146 MacroAssembler& masm;
148 public:
149 explicit StoreOp(MacroAssembler& masm) : masm(masm) {}
151 void operator()(Register reg, Address dump) { masm.storePtr(reg, dump); }
152 void operator()(FloatRegister reg, Address dump) {
153 if (reg.isDouble()) {
154 masm.storeDouble(reg, dump);
155 } else if (reg.isSingle()) {
156 masm.storeFloat32(reg, dump);
157 } else if (reg.isSimd128()) {
158 MOZ_CRASH("Unexpected case for SIMD");
159 } else {
160 MOZ_CRASH("Unexpected register type.");
165 class VerifyOp {
166 MacroAssembler& masm;
167 Label* failure_;
169 public:
170 VerifyOp(MacroAssembler& masm, Label* failure)
171 : masm(masm), failure_(failure) {}
173 void operator()(Register reg, Address dump) {
174 masm.branchPtr(Assembler::NotEqual, dump, reg, failure_);
176 void operator()(FloatRegister reg, Address dump) {
177 if (reg.isDouble()) {
178 ScratchDoubleScope scratch(masm);
179 masm.loadDouble(dump, scratch);
180 masm.branchDouble(Assembler::DoubleNotEqual, scratch, reg, failure_);
181 } else if (reg.isSingle()) {
182 ScratchFloat32Scope scratch(masm);
183 masm.loadFloat32(dump, scratch);
184 masm.branchFloat(Assembler::DoubleNotEqual, scratch, reg, failure_);
185 } else if (reg.isSimd128()) {
186 MOZ_CRASH("Unexpected case for SIMD");
187 } else {
188 MOZ_CRASH("Unexpected register type.");
193 void CodeGenerator::verifyOsiPointRegs(LSafepoint* safepoint) {
194 // Ensure the live registers stored by callVM did not change between
195 // the call and this OsiPoint. Try-catch relies on this invariant.
197 // Load pointer to the JitActivation in a scratch register.
198 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
199 Register scratch = allRegs.takeAny();
200 masm.push(scratch);
201 masm.loadJitActivation(scratch);
203 // If we should not check registers (because the instruction did not call
204 // into the VM, or a GC happened), we're done.
205 Label failure, done;
206 Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
207 masm.branch32(Assembler::Equal, checkRegs, Imm32(0), &done);
209 // Having more than one VM function call made in one visit function at
210 // runtime is a sec-ciritcal error, because if we conservatively assume that
211 // one of the function call can re-enter Ion, then the invalidation process
212 // will potentially add a call at a random location, by patching the code
213 // before the return address.
214 masm.branch32(Assembler::NotEqual, checkRegs, Imm32(1), &failure);
216 // Set checkRegs to 0, so that we don't try to verify registers after we
217 // return from this script to the caller.
218 masm.store32(Imm32(0), checkRegs);
220 // Ignore clobbered registers. Some instructions (like LValueToInt32) modify
221 // temps after calling into the VM. This is fine because no other
222 // instructions (including this OsiPoint) will depend on them. Also
223 // backtracking can also use the same register for an input and an output.
224 // These are marked as clobbered and shouldn't get checked.
225 LiveRegisterSet liveRegs;
226 liveRegs.set() = RegisterSet::Intersect(
227 safepoint->liveRegs().set(),
228 RegisterSet::Not(safepoint->clobberedRegs().set()));
230 VerifyOp op(masm, &failure);
231 HandleRegisterDump<VerifyOp>(op, masm, liveRegs, scratch, allRegs.getAny());
233 masm.jump(&done);
235 // Do not profile the callWithABI that occurs below. This is to avoid a
236 // rare corner case that occurs when profiling interacts with itself:
238 // When slow profiling assertions are turned on, FunctionBoundary ops
239 // (which update the profiler pseudo-stack) may emit a callVM, which
240 // forces them to have an osi point associated with them. The
241 // FunctionBoundary for inline function entry is added to the caller's
242 // graph with a PC from the caller's code, but during codegen it modifies
243 // Gecko Profiler instrumentation to add the callee as the current top-most
244 // script. When codegen gets to the OSIPoint, and the callWithABI below is
245 // emitted, the codegen thinks that the current frame is the callee, but
246 // the PC it's using from the OSIPoint refers to the caller. This causes
247 // the profiler instrumentation of the callWithABI below to ASSERT, since
248 // the script and pc are mismatched. To avoid this, we simply omit
249 // instrumentation for these callWithABIs.
251 // Any live register captured by a safepoint (other than temp registers)
252 // must remain unchanged between the call and the OsiPoint instruction.
253 masm.bind(&failure);
254 masm.assumeUnreachable("Modified registers between VM call and OsiPoint");
256 masm.bind(&done);
257 masm.pop(scratch);
260 bool CodeGenerator::shouldVerifyOsiPointRegs(LSafepoint* safepoint) {
261 if (!checkOsiPointRegisters) {
262 return false;
265 if (safepoint->liveRegs().emptyGeneral() &&
266 safepoint->liveRegs().emptyFloat()) {
267 return false; // No registers to check.
270 return true;
273 void CodeGenerator::resetOsiPointRegs(LSafepoint* safepoint) {
274 if (!shouldVerifyOsiPointRegs(safepoint)) {
275 return;
278 // Set checkRegs to 0. If we perform a VM call, the instruction
279 // will set it to 1.
280 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
281 Register scratch = allRegs.takeAny();
282 masm.push(scratch);
283 masm.loadJitActivation(scratch);
284 Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
285 masm.store32(Imm32(0), checkRegs);
286 masm.pop(scratch);
289 static void StoreAllLiveRegs(MacroAssembler& masm, LiveRegisterSet liveRegs) {
290 // Store a copy of all live registers before performing the call.
291 // When we reach the OsiPoint, we can use this to check nothing
292 // modified them in the meantime.
294 // Load pointer to the JitActivation in a scratch register.
295 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
296 Register scratch = allRegs.takeAny();
297 masm.push(scratch);
298 masm.loadJitActivation(scratch);
300 Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
301 masm.add32(Imm32(1), checkRegs);
303 StoreOp op(masm);
304 HandleRegisterDump<StoreOp>(op, masm, liveRegs, scratch, allRegs.getAny());
306 masm.pop(scratch);
308 #endif // CHECK_OSIPOINT_REGISTERS
310 // Before doing any call to Cpp, you should ensure that volatile
311 // registers are evicted by the register allocator.
312 void CodeGenerator::callVMInternal(VMFunctionId id, LInstruction* ins) {
313 TrampolinePtr code = gen->jitRuntime()->getVMWrapper(id);
314 const VMFunctionData& fun = GetVMFunction(id);
316 // Stack is:
317 // ... frame ...
318 // [args]
319 #ifdef DEBUG
320 MOZ_ASSERT(pushedArgs_ == fun.explicitArgs);
321 pushedArgs_ = 0;
322 #endif
324 #ifdef CHECK_OSIPOINT_REGISTERS
325 if (shouldVerifyOsiPointRegs(ins->safepoint())) {
326 StoreAllLiveRegs(masm, ins->safepoint()->liveRegs());
328 #endif
330 #ifdef DEBUG
331 if (ins->mirRaw()) {
332 MOZ_ASSERT(ins->mirRaw()->isInstruction());
333 MInstruction* mir = ins->mirRaw()->toInstruction();
334 MOZ_ASSERT_IF(mir->needsResumePoint(), mir->resumePoint());
336 // If this MIR instruction has an overridden AliasSet, set the JitRuntime's
337 // disallowArbitraryCode_ flag so we can assert this VMFunction doesn't call
338 // RunScript. Whitelist MInterruptCheck and MCheckOverRecursed because
339 // interrupt callbacks can call JS (chrome JS or shell testing functions).
340 bool isWhitelisted = mir->isInterruptCheck() || mir->isCheckOverRecursed();
341 if (!mir->hasDefaultAliasSet() && !isWhitelisted) {
342 const void* addr = gen->jitRuntime()->addressOfDisallowArbitraryCode();
343 masm.move32(Imm32(1), ReturnReg);
344 masm.store32(ReturnReg, AbsoluteAddress(addr));
347 #endif
349 // Push an exit frame descriptor.
350 masm.PushFrameDescriptor(FrameType::IonJS);
352 // Call the wrapper function. The wrapper is in charge to unwind the stack
353 // when returning from the call. Failures are handled with exceptions based
354 // on the return value of the C functions. To guard the outcome of the
355 // returned value, use another LIR instruction.
356 ensureOsiSpace();
357 uint32_t callOffset = masm.callJit(code);
358 markSafepointAt(callOffset, ins);
360 #ifdef DEBUG
361 // Reset the disallowArbitraryCode flag after the call.
363 const void* addr = gen->jitRuntime()->addressOfDisallowArbitraryCode();
364 masm.push(ReturnReg);
365 masm.move32(Imm32(0), ReturnReg);
366 masm.store32(ReturnReg, AbsoluteAddress(addr));
367 masm.pop(ReturnReg);
369 #endif
371 // Pop rest of the exit frame and the arguments left on the stack.
372 int framePop =
373 sizeof(ExitFrameLayout) - ExitFrameLayout::bytesPoppedAfterCall();
374 masm.implicitPop(fun.explicitStackSlots() * sizeof(void*) + framePop);
376 // Stack is:
377 // ... frame ...
380 template <typename Fn, Fn fn>
381 void CodeGenerator::callVM(LInstruction* ins) {
382 VMFunctionId id = VMFunctionToId<Fn, fn>::id;
383 callVMInternal(id, ins);
386 // ArgSeq store arguments for OutOfLineCallVM.
388 // OutOfLineCallVM are created with "oolCallVM" function. The third argument of
389 // this function is an instance of a class which provides a "generate" in charge
390 // of pushing the argument, with "pushArg", for a VMFunction.
392 // Such list of arguments can be created by using the "ArgList" function which
393 // creates one instance of "ArgSeq", where the type of the arguments are
394 // inferred from the type of the arguments.
396 // The list of arguments must be written in the same order as if you were
397 // calling the function in C++.
399 // Example:
400 // ArgList(ToRegister(lir->lhs()), ToRegister(lir->rhs()))
402 template <typename... ArgTypes>
403 class ArgSeq {
404 std::tuple<std::remove_reference_t<ArgTypes>...> args_;
406 template <std::size_t... ISeq>
407 inline void generate(CodeGenerator* codegen,
408 std::index_sequence<ISeq...>) const {
409 // Arguments are pushed in reverse order, from last argument to first
410 // argument.
411 (codegen->pushArg(std::get<sizeof...(ISeq) - 1 - ISeq>(args_)), ...);
414 public:
415 explicit ArgSeq(ArgTypes&&... args)
416 : args_(std::forward<ArgTypes>(args)...) {}
418 inline void generate(CodeGenerator* codegen) const {
419 generate(codegen, std::index_sequence_for<ArgTypes...>{});
422 #ifdef DEBUG
423 static constexpr size_t numArgs = sizeof...(ArgTypes);
424 #endif
427 template <typename... ArgTypes>
428 inline ArgSeq<ArgTypes...> ArgList(ArgTypes&&... args) {
429 return ArgSeq<ArgTypes...>(std::forward<ArgTypes>(args)...);
432 // Store wrappers, to generate the right move of data after the VM call.
434 struct StoreNothing {
435 inline void generate(CodeGenerator* codegen) const {}
436 inline LiveRegisterSet clobbered() const {
437 return LiveRegisterSet(); // No register gets clobbered
441 class StoreRegisterTo {
442 private:
443 Register out_;
445 public:
446 explicit StoreRegisterTo(Register out) : out_(out) {}
448 inline void generate(CodeGenerator* codegen) const {
449 // It's okay to use storePointerResultTo here - the VMFunction wrapper
450 // ensures the upper bytes are zero for bool/int32 return values.
451 codegen->storePointerResultTo(out_);
453 inline LiveRegisterSet clobbered() const {
454 LiveRegisterSet set;
455 set.add(out_);
456 return set;
460 class StoreFloatRegisterTo {
461 private:
462 FloatRegister out_;
464 public:
465 explicit StoreFloatRegisterTo(FloatRegister out) : out_(out) {}
467 inline void generate(CodeGenerator* codegen) const {
468 codegen->storeFloatResultTo(out_);
470 inline LiveRegisterSet clobbered() const {
471 LiveRegisterSet set;
472 set.add(out_);
473 return set;
477 template <typename Output>
478 class StoreValueTo_ {
479 private:
480 Output out_;
482 public:
483 explicit StoreValueTo_(const Output& out) : out_(out) {}
485 inline void generate(CodeGenerator* codegen) const {
486 codegen->storeResultValueTo(out_);
488 inline LiveRegisterSet clobbered() const {
489 LiveRegisterSet set;
490 set.add(out_);
491 return set;
495 template <typename Output>
496 StoreValueTo_<Output> StoreValueTo(const Output& out) {
497 return StoreValueTo_<Output>(out);
500 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
501 class OutOfLineCallVM : public OutOfLineCodeBase<CodeGenerator> {
502 private:
503 LInstruction* lir_;
504 ArgSeq args_;
505 StoreOutputTo out_;
507 public:
508 OutOfLineCallVM(LInstruction* lir, const ArgSeq& args,
509 const StoreOutputTo& out)
510 : lir_(lir), args_(args), out_(out) {}
512 void accept(CodeGenerator* codegen) override {
513 codegen->visitOutOfLineCallVM(this);
516 LInstruction* lir() const { return lir_; }
517 const ArgSeq& args() const { return args_; }
518 const StoreOutputTo& out() const { return out_; }
521 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
522 OutOfLineCode* CodeGenerator::oolCallVM(LInstruction* lir, const ArgSeq& args,
523 const StoreOutputTo& out) {
524 MOZ_ASSERT(lir->mirRaw());
525 MOZ_ASSERT(lir->mirRaw()->isInstruction());
527 #ifdef DEBUG
528 VMFunctionId id = VMFunctionToId<Fn, fn>::id;
529 const VMFunctionData& fun = GetVMFunction(id);
530 MOZ_ASSERT(fun.explicitArgs == args.numArgs);
531 MOZ_ASSERT(fun.returnsData() !=
532 (std::is_same_v<StoreOutputTo, StoreNothing>));
533 #endif
535 OutOfLineCode* ool = new (alloc())
536 OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>(lir, args, out);
537 addOutOfLineCode(ool, lir->mirRaw()->toInstruction());
538 return ool;
541 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
542 void CodeGenerator::visitOutOfLineCallVM(
543 OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>* ool) {
544 LInstruction* lir = ool->lir();
546 #ifdef JS_JITSPEW
547 JitSpewStart(JitSpew_Codegen, " # LIR=%s",
548 lir->opName());
549 if (const char* extra = lir->getExtraName()) {
550 JitSpewCont(JitSpew_Codegen, ":%s", extra);
552 JitSpewFin(JitSpew_Codegen);
553 #endif
554 perfSpewer_.recordInstruction(masm, lir);
555 saveLive(lir);
556 ool->args().generate(this);
557 callVM<Fn, fn>(lir);
558 ool->out().generate(this);
559 restoreLiveIgnore(lir, ool->out().clobbered());
560 masm.jump(ool->rejoin());
563 class OutOfLineICFallback : public OutOfLineCodeBase<CodeGenerator> {
564 private:
565 LInstruction* lir_;
566 size_t cacheIndex_;
567 size_t cacheInfoIndex_;
569 public:
570 OutOfLineICFallback(LInstruction* lir, size_t cacheIndex,
571 size_t cacheInfoIndex)
572 : lir_(lir), cacheIndex_(cacheIndex), cacheInfoIndex_(cacheInfoIndex) {}
574 void bind(MacroAssembler* masm) override {
575 // The binding of the initial jump is done in
576 // CodeGenerator::visitOutOfLineICFallback.
579 size_t cacheIndex() const { return cacheIndex_; }
580 size_t cacheInfoIndex() const { return cacheInfoIndex_; }
581 LInstruction* lir() const { return lir_; }
583 void accept(CodeGenerator* codegen) override {
584 codegen->visitOutOfLineICFallback(this);
588 void CodeGeneratorShared::addIC(LInstruction* lir, size_t cacheIndex) {
589 if (cacheIndex == SIZE_MAX) {
590 masm.setOOM();
591 return;
594 DataPtr<IonIC> cache(this, cacheIndex);
595 MInstruction* mir = lir->mirRaw()->toInstruction();
596 cache->setScriptedLocation(mir->block()->info().script(),
597 mir->resumePoint()->pc());
599 Register temp = cache->scratchRegisterForEntryJump();
600 icInfo_.back().icOffsetForJump = masm.movWithPatch(ImmWord(-1), temp);
601 masm.jump(Address(temp, 0));
603 MOZ_ASSERT(!icInfo_.empty());
605 OutOfLineICFallback* ool =
606 new (alloc()) OutOfLineICFallback(lir, cacheIndex, icInfo_.length() - 1);
607 addOutOfLineCode(ool, mir);
609 masm.bind(ool->rejoin());
610 cache->setRejoinOffset(CodeOffset(ool->rejoin()->offset()));
613 void CodeGenerator::visitOutOfLineICFallback(OutOfLineICFallback* ool) {
614 LInstruction* lir = ool->lir();
615 size_t cacheIndex = ool->cacheIndex();
616 size_t cacheInfoIndex = ool->cacheInfoIndex();
618 DataPtr<IonIC> ic(this, cacheIndex);
620 // Register the location of the OOL path in the IC.
621 ic->setFallbackOffset(CodeOffset(masm.currentOffset()));
623 switch (ic->kind()) {
624 case CacheKind::GetProp:
625 case CacheKind::GetElem: {
626 IonGetPropertyIC* getPropIC = ic->asGetPropertyIC();
628 saveLive(lir);
630 pushArg(getPropIC->id());
631 pushArg(getPropIC->value());
632 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
633 pushArg(ImmGCPtr(gen->outerInfo().script()));
635 using Fn = bool (*)(JSContext*, HandleScript, IonGetPropertyIC*,
636 HandleValue, HandleValue, MutableHandleValue);
637 callVM<Fn, IonGetPropertyIC::update>(lir);
639 StoreValueTo(getPropIC->output()).generate(this);
640 restoreLiveIgnore(lir, StoreValueTo(getPropIC->output()).clobbered());
642 masm.jump(ool->rejoin());
643 return;
645 case CacheKind::GetPropSuper:
646 case CacheKind::GetElemSuper: {
647 IonGetPropSuperIC* getPropSuperIC = ic->asGetPropSuperIC();
649 saveLive(lir);
651 pushArg(getPropSuperIC->id());
652 pushArg(getPropSuperIC->receiver());
653 pushArg(getPropSuperIC->object());
654 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
655 pushArg(ImmGCPtr(gen->outerInfo().script()));
657 using Fn =
658 bool (*)(JSContext*, HandleScript, IonGetPropSuperIC*, HandleObject,
659 HandleValue, HandleValue, MutableHandleValue);
660 callVM<Fn, IonGetPropSuperIC::update>(lir);
662 StoreValueTo(getPropSuperIC->output()).generate(this);
663 restoreLiveIgnore(lir,
664 StoreValueTo(getPropSuperIC->output()).clobbered());
666 masm.jump(ool->rejoin());
667 return;
669 case CacheKind::SetProp:
670 case CacheKind::SetElem: {
671 IonSetPropertyIC* setPropIC = ic->asSetPropertyIC();
673 saveLive(lir);
675 pushArg(setPropIC->rhs());
676 pushArg(setPropIC->id());
677 pushArg(setPropIC->object());
678 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
679 pushArg(ImmGCPtr(gen->outerInfo().script()));
681 using Fn = bool (*)(JSContext*, HandleScript, IonSetPropertyIC*,
682 HandleObject, HandleValue, HandleValue);
683 callVM<Fn, IonSetPropertyIC::update>(lir);
685 restoreLive(lir);
687 masm.jump(ool->rejoin());
688 return;
690 case CacheKind::GetName: {
691 IonGetNameIC* getNameIC = ic->asGetNameIC();
693 saveLive(lir);
695 pushArg(getNameIC->environment());
696 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
697 pushArg(ImmGCPtr(gen->outerInfo().script()));
699 using Fn = bool (*)(JSContext*, HandleScript, IonGetNameIC*, HandleObject,
700 MutableHandleValue);
701 callVM<Fn, IonGetNameIC::update>(lir);
703 StoreValueTo(getNameIC->output()).generate(this);
704 restoreLiveIgnore(lir, StoreValueTo(getNameIC->output()).clobbered());
706 masm.jump(ool->rejoin());
707 return;
709 case CacheKind::BindName: {
710 IonBindNameIC* bindNameIC = ic->asBindNameIC();
712 saveLive(lir);
714 pushArg(bindNameIC->environment());
715 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
716 pushArg(ImmGCPtr(gen->outerInfo().script()));
718 using Fn =
719 JSObject* (*)(JSContext*, HandleScript, IonBindNameIC*, HandleObject);
720 callVM<Fn, IonBindNameIC::update>(lir);
722 StoreRegisterTo(bindNameIC->output()).generate(this);
723 restoreLiveIgnore(lir, StoreRegisterTo(bindNameIC->output()).clobbered());
725 masm.jump(ool->rejoin());
726 return;
728 case CacheKind::GetIterator: {
729 IonGetIteratorIC* getIteratorIC = ic->asGetIteratorIC();
731 saveLive(lir);
733 pushArg(getIteratorIC->value());
734 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
735 pushArg(ImmGCPtr(gen->outerInfo().script()));
737 using Fn = JSObject* (*)(JSContext*, HandleScript, IonGetIteratorIC*,
738 HandleValue);
739 callVM<Fn, IonGetIteratorIC::update>(lir);
741 StoreRegisterTo(getIteratorIC->output()).generate(this);
742 restoreLiveIgnore(lir,
743 StoreRegisterTo(getIteratorIC->output()).clobbered());
745 masm.jump(ool->rejoin());
746 return;
748 case CacheKind::OptimizeSpreadCall: {
749 auto* optimizeSpreadCallIC = ic->asOptimizeSpreadCallIC();
751 saveLive(lir);
753 pushArg(optimizeSpreadCallIC->value());
754 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
755 pushArg(ImmGCPtr(gen->outerInfo().script()));
757 using Fn = bool (*)(JSContext*, HandleScript, IonOptimizeSpreadCallIC*,
758 HandleValue, MutableHandleValue);
759 callVM<Fn, IonOptimizeSpreadCallIC::update>(lir);
761 StoreValueTo(optimizeSpreadCallIC->output()).generate(this);
762 restoreLiveIgnore(
763 lir, StoreValueTo(optimizeSpreadCallIC->output()).clobbered());
765 masm.jump(ool->rejoin());
766 return;
768 case CacheKind::In: {
769 IonInIC* inIC = ic->asInIC();
771 saveLive(lir);
773 pushArg(inIC->object());
774 pushArg(inIC->key());
775 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
776 pushArg(ImmGCPtr(gen->outerInfo().script()));
778 using Fn = bool (*)(JSContext*, HandleScript, IonInIC*, HandleValue,
779 HandleObject, bool*);
780 callVM<Fn, IonInIC::update>(lir);
782 StoreRegisterTo(inIC->output()).generate(this);
783 restoreLiveIgnore(lir, StoreRegisterTo(inIC->output()).clobbered());
785 masm.jump(ool->rejoin());
786 return;
788 case CacheKind::HasOwn: {
789 IonHasOwnIC* hasOwnIC = ic->asHasOwnIC();
791 saveLive(lir);
793 pushArg(hasOwnIC->id());
794 pushArg(hasOwnIC->value());
795 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
796 pushArg(ImmGCPtr(gen->outerInfo().script()));
798 using Fn = bool (*)(JSContext*, HandleScript, IonHasOwnIC*, HandleValue,
799 HandleValue, int32_t*);
800 callVM<Fn, IonHasOwnIC::update>(lir);
802 StoreRegisterTo(hasOwnIC->output()).generate(this);
803 restoreLiveIgnore(lir, StoreRegisterTo(hasOwnIC->output()).clobbered());
805 masm.jump(ool->rejoin());
806 return;
808 case CacheKind::CheckPrivateField: {
809 IonCheckPrivateFieldIC* checkPrivateFieldIC = ic->asCheckPrivateFieldIC();
811 saveLive(lir);
813 pushArg(checkPrivateFieldIC->id());
814 pushArg(checkPrivateFieldIC->value());
816 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
817 pushArg(ImmGCPtr(gen->outerInfo().script()));
819 using Fn = bool (*)(JSContext*, HandleScript, IonCheckPrivateFieldIC*,
820 HandleValue, HandleValue, bool*);
821 callVM<Fn, IonCheckPrivateFieldIC::update>(lir);
823 StoreRegisterTo(checkPrivateFieldIC->output()).generate(this);
824 restoreLiveIgnore(
825 lir, StoreRegisterTo(checkPrivateFieldIC->output()).clobbered());
827 masm.jump(ool->rejoin());
828 return;
830 case CacheKind::InstanceOf: {
831 IonInstanceOfIC* hasInstanceOfIC = ic->asInstanceOfIC();
833 saveLive(lir);
835 pushArg(hasInstanceOfIC->rhs());
836 pushArg(hasInstanceOfIC->lhs());
837 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
838 pushArg(ImmGCPtr(gen->outerInfo().script()));
840 using Fn = bool (*)(JSContext*, HandleScript, IonInstanceOfIC*,
841 HandleValue lhs, HandleObject rhs, bool* res);
842 callVM<Fn, IonInstanceOfIC::update>(lir);
844 StoreRegisterTo(hasInstanceOfIC->output()).generate(this);
845 restoreLiveIgnore(lir,
846 StoreRegisterTo(hasInstanceOfIC->output()).clobbered());
848 masm.jump(ool->rejoin());
849 return;
851 case CacheKind::UnaryArith: {
852 IonUnaryArithIC* unaryArithIC = ic->asUnaryArithIC();
854 saveLive(lir);
856 pushArg(unaryArithIC->input());
857 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
858 pushArg(ImmGCPtr(gen->outerInfo().script()));
860 using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
861 IonUnaryArithIC* stub, HandleValue val,
862 MutableHandleValue res);
863 callVM<Fn, IonUnaryArithIC::update>(lir);
865 StoreValueTo(unaryArithIC->output()).generate(this);
866 restoreLiveIgnore(lir, StoreValueTo(unaryArithIC->output()).clobbered());
868 masm.jump(ool->rejoin());
869 return;
871 case CacheKind::ToPropertyKey: {
872 IonToPropertyKeyIC* toPropertyKeyIC = ic->asToPropertyKeyIC();
874 saveLive(lir);
876 pushArg(toPropertyKeyIC->input());
877 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
878 pushArg(ImmGCPtr(gen->outerInfo().script()));
880 using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
881 IonToPropertyKeyIC* ic, HandleValue val,
882 MutableHandleValue res);
883 callVM<Fn, IonToPropertyKeyIC::update>(lir);
885 StoreValueTo(toPropertyKeyIC->output()).generate(this);
886 restoreLiveIgnore(lir,
887 StoreValueTo(toPropertyKeyIC->output()).clobbered());
889 masm.jump(ool->rejoin());
890 return;
892 case CacheKind::BinaryArith: {
893 IonBinaryArithIC* binaryArithIC = ic->asBinaryArithIC();
895 saveLive(lir);
897 pushArg(binaryArithIC->rhs());
898 pushArg(binaryArithIC->lhs());
899 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
900 pushArg(ImmGCPtr(gen->outerInfo().script()));
902 using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
903 IonBinaryArithIC* stub, HandleValue lhs,
904 HandleValue rhs, MutableHandleValue res);
905 callVM<Fn, IonBinaryArithIC::update>(lir);
907 StoreValueTo(binaryArithIC->output()).generate(this);
908 restoreLiveIgnore(lir, StoreValueTo(binaryArithIC->output()).clobbered());
910 masm.jump(ool->rejoin());
911 return;
913 case CacheKind::Compare: {
914 IonCompareIC* compareIC = ic->asCompareIC();
916 saveLive(lir);
918 pushArg(compareIC->rhs());
919 pushArg(compareIC->lhs());
920 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
921 pushArg(ImmGCPtr(gen->outerInfo().script()));
923 using Fn =
924 bool (*)(JSContext* cx, HandleScript outerScript, IonCompareIC* stub,
925 HandleValue lhs, HandleValue rhs, bool* res);
926 callVM<Fn, IonCompareIC::update>(lir);
928 StoreRegisterTo(compareIC->output()).generate(this);
929 restoreLiveIgnore(lir, StoreRegisterTo(compareIC->output()).clobbered());
931 masm.jump(ool->rejoin());
932 return;
934 case CacheKind::CloseIter: {
935 IonCloseIterIC* closeIterIC = ic->asCloseIterIC();
937 saveLive(lir);
939 pushArg(closeIterIC->iter());
940 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
941 pushArg(ImmGCPtr(gen->outerInfo().script()));
943 using Fn =
944 bool (*)(JSContext*, HandleScript, IonCloseIterIC*, HandleObject);
945 callVM<Fn, IonCloseIterIC::update>(lir);
947 restoreLive(lir);
949 masm.jump(ool->rejoin());
950 return;
952 case CacheKind::OptimizeGetIterator: {
953 auto* optimizeGetIteratorIC = ic->asOptimizeGetIteratorIC();
955 saveLive(lir);
957 pushArg(optimizeGetIteratorIC->value());
958 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
959 pushArg(ImmGCPtr(gen->outerInfo().script()));
961 using Fn = bool (*)(JSContext*, HandleScript, IonOptimizeGetIteratorIC*,
962 HandleValue, bool* res);
963 callVM<Fn, IonOptimizeGetIteratorIC::update>(lir);
965 StoreRegisterTo(optimizeGetIteratorIC->output()).generate(this);
966 restoreLiveIgnore(
967 lir, StoreRegisterTo(optimizeGetIteratorIC->output()).clobbered());
969 masm.jump(ool->rejoin());
970 return;
972 case CacheKind::Call:
973 case CacheKind::TypeOf:
974 case CacheKind::ToBool:
975 case CacheKind::GetIntrinsic:
976 case CacheKind::NewArray:
977 case CacheKind::NewObject:
978 MOZ_CRASH("Unsupported IC");
980 MOZ_CRASH();
983 StringObject* MNewStringObject::templateObj() const {
984 return &templateObj_->as<StringObject>();
987 CodeGenerator::CodeGenerator(MIRGenerator* gen, LIRGraph* graph,
988 MacroAssembler* masm)
989 : CodeGeneratorSpecific(gen, graph, masm),
990 ionScriptLabels_(gen->alloc()),
991 ionNurseryObjectLabels_(gen->alloc()),
992 scriptCounts_(nullptr),
993 zoneStubsToReadBarrier_(0) {}
995 CodeGenerator::~CodeGenerator() { js_delete(scriptCounts_); }
997 void CodeGenerator::visitValueToInt32(LValueToInt32* lir) {
998 ValueOperand operand = ToValue(lir, LValueToInt32::Input);
999 Register output = ToRegister(lir->output());
1000 FloatRegister temp = ToFloatRegister(lir->tempFloat());
1002 Label fails;
1003 if (lir->mode() == LValueToInt32::TRUNCATE) {
1004 OutOfLineCode* oolDouble = oolTruncateDouble(temp, output, lir->mir());
1006 // We can only handle strings in truncation contexts, like bitwise
1007 // operations.
1008 Register stringReg = ToRegister(lir->temp());
1009 using Fn = bool (*)(JSContext*, JSString*, double*);
1010 auto* oolString = oolCallVM<Fn, StringToNumber>(lir, ArgList(stringReg),
1011 StoreFloatRegisterTo(temp));
1012 Label* stringEntry = oolString->entry();
1013 Label* stringRejoin = oolString->rejoin();
1015 masm.truncateValueToInt32(operand, stringEntry, stringRejoin,
1016 oolDouble->entry(), stringReg, temp, output,
1017 &fails);
1018 masm.bind(oolDouble->rejoin());
1019 } else {
1020 MOZ_ASSERT(lir->mode() == LValueToInt32::NORMAL);
1021 masm.convertValueToInt32(operand, temp, output, &fails,
1022 lir->mirNormal()->needsNegativeZeroCheck(),
1023 lir->mirNormal()->conversion());
1026 bailoutFrom(&fails, lir->snapshot());
1029 void CodeGenerator::visitValueToDouble(LValueToDouble* lir) {
1030 ValueOperand operand = ToValue(lir, LValueToDouble::InputIndex);
1031 FloatRegister output = ToFloatRegister(lir->output());
1033 // Set if we can handle other primitives beside strings, as long as they're
1034 // guaranteed to never throw. This rules out symbols and BigInts, but allows
1035 // booleans, undefined, and null.
1036 bool hasNonStringPrimitives =
1037 lir->mir()->conversion() == MToFPInstruction::NonStringPrimitives;
1039 Label isDouble, isInt32, isBool, isNull, isUndefined, done;
1042 ScratchTagScope tag(masm, operand);
1043 masm.splitTagForTest(operand, tag);
1045 masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
1046 masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
1048 if (hasNonStringPrimitives) {
1049 masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
1050 masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
1051 masm.branchTestNull(Assembler::Equal, tag, &isNull);
1055 bailout(lir->snapshot());
1057 if (hasNonStringPrimitives) {
1058 masm.bind(&isNull);
1059 masm.loadConstantDouble(0.0, output);
1060 masm.jump(&done);
1063 if (hasNonStringPrimitives) {
1064 masm.bind(&isUndefined);
1065 masm.loadConstantDouble(GenericNaN(), output);
1066 masm.jump(&done);
1069 if (hasNonStringPrimitives) {
1070 masm.bind(&isBool);
1071 masm.boolValueToDouble(operand, output);
1072 masm.jump(&done);
1075 masm.bind(&isInt32);
1076 masm.int32ValueToDouble(operand, output);
1077 masm.jump(&done);
1079 masm.bind(&isDouble);
1080 masm.unboxDouble(operand, output);
1081 masm.bind(&done);
1084 void CodeGenerator::visitValueToFloat32(LValueToFloat32* lir) {
1085 ValueOperand operand = ToValue(lir, LValueToFloat32::InputIndex);
1086 FloatRegister output = ToFloatRegister(lir->output());
1088 // Set if we can handle other primitives beside strings, as long as they're
1089 // guaranteed to never throw. This rules out symbols and BigInts, but allows
1090 // booleans, undefined, and null.
1091 bool hasNonStringPrimitives =
1092 lir->mir()->conversion() == MToFPInstruction::NonStringPrimitives;
1094 Label isDouble, isInt32, isBool, isNull, isUndefined, done;
1097 ScratchTagScope tag(masm, operand);
1098 masm.splitTagForTest(operand, tag);
1100 masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
1101 masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
1103 if (hasNonStringPrimitives) {
1104 masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
1105 masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
1106 masm.branchTestNull(Assembler::Equal, tag, &isNull);
1110 bailout(lir->snapshot());
1112 if (hasNonStringPrimitives) {
1113 masm.bind(&isNull);
1114 masm.loadConstantFloat32(0.0f, output);
1115 masm.jump(&done);
1118 if (hasNonStringPrimitives) {
1119 masm.bind(&isUndefined);
1120 masm.loadConstantFloat32(float(GenericNaN()), output);
1121 masm.jump(&done);
1124 if (hasNonStringPrimitives) {
1125 masm.bind(&isBool);
1126 masm.boolValueToFloat32(operand, output);
1127 masm.jump(&done);
1130 masm.bind(&isInt32);
1131 masm.int32ValueToFloat32(operand, output);
1132 masm.jump(&done);
1134 masm.bind(&isDouble);
1135 // ARM and MIPS may not have a double register available if we've
1136 // allocated output as a float32.
1137 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32)
1138 ScratchDoubleScope fpscratch(masm);
1139 masm.unboxDouble(operand, fpscratch);
1140 masm.convertDoubleToFloat32(fpscratch, output);
1141 #else
1142 masm.unboxDouble(operand, output);
1143 masm.convertDoubleToFloat32(output, output);
1144 #endif
1145 masm.bind(&done);
1148 void CodeGenerator::visitValueToBigInt(LValueToBigInt* lir) {
1149 ValueOperand operand = ToValue(lir, LValueToBigInt::InputIndex);
1150 Register output = ToRegister(lir->output());
1152 using Fn = BigInt* (*)(JSContext*, HandleValue);
1153 auto* ool =
1154 oolCallVM<Fn, ToBigInt>(lir, ArgList(operand), StoreRegisterTo(output));
1156 Register tag = masm.extractTag(operand, output);
1158 Label notBigInt, done;
1159 masm.branchTestBigInt(Assembler::NotEqual, tag, &notBigInt);
1160 masm.unboxBigInt(operand, output);
1161 masm.jump(&done);
1162 masm.bind(&notBigInt);
1164 masm.branchTestBoolean(Assembler::Equal, tag, ool->entry());
1165 masm.branchTestString(Assembler::Equal, tag, ool->entry());
1167 // ToBigInt(object) can have side-effects; all other types throw a TypeError.
1168 bailout(lir->snapshot());
1170 masm.bind(ool->rejoin());
1171 masm.bind(&done);
1174 void CodeGenerator::visitInt32ToDouble(LInt32ToDouble* lir) {
1175 masm.convertInt32ToDouble(ToRegister(lir->input()),
1176 ToFloatRegister(lir->output()));
1179 void CodeGenerator::visitFloat32ToDouble(LFloat32ToDouble* lir) {
1180 masm.convertFloat32ToDouble(ToFloatRegister(lir->input()),
1181 ToFloatRegister(lir->output()));
1184 void CodeGenerator::visitDoubleToFloat32(LDoubleToFloat32* lir) {
1185 masm.convertDoubleToFloat32(ToFloatRegister(lir->input()),
1186 ToFloatRegister(lir->output()));
1189 void CodeGenerator::visitInt32ToFloat32(LInt32ToFloat32* lir) {
1190 masm.convertInt32ToFloat32(ToRegister(lir->input()),
1191 ToFloatRegister(lir->output()));
1194 void CodeGenerator::visitDoubleToInt32(LDoubleToInt32* lir) {
1195 Label fail;
1196 FloatRegister input = ToFloatRegister(lir->input());
1197 Register output = ToRegister(lir->output());
1198 masm.convertDoubleToInt32(input, output, &fail,
1199 lir->mir()->needsNegativeZeroCheck());
1200 bailoutFrom(&fail, lir->snapshot());
1203 void CodeGenerator::visitFloat32ToInt32(LFloat32ToInt32* lir) {
1204 Label fail;
1205 FloatRegister input = ToFloatRegister(lir->input());
1206 Register output = ToRegister(lir->output());
1207 masm.convertFloat32ToInt32(input, output, &fail,
1208 lir->mir()->needsNegativeZeroCheck());
1209 bailoutFrom(&fail, lir->snapshot());
1212 void CodeGenerator::visitInt32ToIntPtr(LInt32ToIntPtr* lir) {
1213 #ifdef JS_64BIT
1214 // This LIR instruction is only used if the input can be negative.
1215 MOZ_ASSERT(lir->mir()->canBeNegative());
1217 Register output = ToRegister(lir->output());
1218 const LAllocation* input = lir->input();
1219 if (input->isRegister()) {
1220 masm.move32SignExtendToPtr(ToRegister(input), output);
1221 } else {
1222 masm.load32SignExtendToPtr(ToAddress(input), output);
1224 #else
1225 MOZ_CRASH("Not used on 32-bit platforms");
1226 #endif
1229 void CodeGenerator::visitNonNegativeIntPtrToInt32(
1230 LNonNegativeIntPtrToInt32* lir) {
1231 #ifdef JS_64BIT
1232 Register output = ToRegister(lir->output());
1233 MOZ_ASSERT(ToRegister(lir->input()) == output);
1235 Label bail;
1236 masm.guardNonNegativeIntPtrToInt32(output, &bail);
1237 bailoutFrom(&bail, lir->snapshot());
1238 #else
1239 MOZ_CRASH("Not used on 32-bit platforms");
1240 #endif
1243 void CodeGenerator::visitIntPtrToDouble(LIntPtrToDouble* lir) {
1244 Register input = ToRegister(lir->input());
1245 FloatRegister output = ToFloatRegister(lir->output());
1246 masm.convertIntPtrToDouble(input, output);
1249 void CodeGenerator::visitAdjustDataViewLength(LAdjustDataViewLength* lir) {
1250 Register output = ToRegister(lir->output());
1251 MOZ_ASSERT(ToRegister(lir->input()) == output);
1253 uint32_t byteSize = lir->mir()->byteSize();
1255 #ifdef DEBUG
1256 Label ok;
1257 masm.branchTestPtr(Assembler::NotSigned, output, output, &ok);
1258 masm.assumeUnreachable("Unexpected negative value in LAdjustDataViewLength");
1259 masm.bind(&ok);
1260 #endif
1262 Label bail;
1263 masm.branchSubPtr(Assembler::Signed, Imm32(byteSize - 1), output, &bail);
1264 bailoutFrom(&bail, lir->snapshot());
1267 void CodeGenerator::emitOOLTestObject(Register objreg,
1268 Label* ifEmulatesUndefined,
1269 Label* ifDoesntEmulateUndefined,
1270 Register scratch) {
1271 saveVolatile(scratch);
1272 using Fn = bool (*)(JSObject* obj);
1273 masm.setupAlignedABICall();
1274 masm.passABIArg(objreg);
1275 masm.callWithABI<Fn, js::EmulatesUndefined>();
1276 masm.storeCallPointerResult(scratch);
1277 restoreVolatile(scratch);
1279 masm.branchIfTrueBool(scratch, ifEmulatesUndefined);
1280 masm.jump(ifDoesntEmulateUndefined);
1283 // Base out-of-line code generator for all tests of the truthiness of an
1284 // object, where the object might not be truthy. (Recall that per spec all
1285 // objects are truthy, but we implement the JSCLASS_EMULATES_UNDEFINED class
1286 // flag to permit objects to look like |undefined| in certain contexts,
1287 // including in object truthiness testing.) We check truthiness inline except
1288 // when we're testing it on a proxy, in which case out-of-line code will call
1289 // EmulatesUndefined for a conclusive answer.
1290 class OutOfLineTestObject : public OutOfLineCodeBase<CodeGenerator> {
1291 Register objreg_;
1292 Register scratch_;
1294 Label* ifEmulatesUndefined_;
1295 Label* ifDoesntEmulateUndefined_;
1297 #ifdef DEBUG
1298 bool initialized() { return ifEmulatesUndefined_ != nullptr; }
1299 #endif
1301 public:
1302 OutOfLineTestObject()
1303 : ifEmulatesUndefined_(nullptr), ifDoesntEmulateUndefined_(nullptr) {}
1305 void accept(CodeGenerator* codegen) final {
1306 MOZ_ASSERT(initialized());
1307 codegen->emitOOLTestObject(objreg_, ifEmulatesUndefined_,
1308 ifDoesntEmulateUndefined_, scratch_);
1311 // Specify the register where the object to be tested is found, labels to
1312 // jump to if the object is truthy or falsy, and a scratch register for
1313 // use in the out-of-line path.
1314 void setInputAndTargets(Register objreg, Label* ifEmulatesUndefined,
1315 Label* ifDoesntEmulateUndefined, Register scratch) {
1316 MOZ_ASSERT(!initialized());
1317 MOZ_ASSERT(ifEmulatesUndefined);
1318 objreg_ = objreg;
1319 scratch_ = scratch;
1320 ifEmulatesUndefined_ = ifEmulatesUndefined;
1321 ifDoesntEmulateUndefined_ = ifDoesntEmulateUndefined;
1325 // A subclass of OutOfLineTestObject containing two extra labels, for use when
1326 // the ifTruthy/ifFalsy labels are needed in inline code as well as out-of-line
1327 // code. The user should bind these labels in inline code, and specify them as
1328 // targets via setInputAndTargets, as appropriate.
1329 class OutOfLineTestObjectWithLabels : public OutOfLineTestObject {
1330 Label label1_;
1331 Label label2_;
1333 public:
1334 OutOfLineTestObjectWithLabels() = default;
1336 Label* label1() { return &label1_; }
1337 Label* label2() { return &label2_; }
1340 void CodeGenerator::testObjectEmulatesUndefinedKernel(
1341 Register objreg, Label* ifEmulatesUndefined,
1342 Label* ifDoesntEmulateUndefined, Register scratch,
1343 OutOfLineTestObject* ool) {
1344 ool->setInputAndTargets(objreg, ifEmulatesUndefined, ifDoesntEmulateUndefined,
1345 scratch);
1347 // Perform a fast-path check of the object's class flags if the object's
1348 // not a proxy. Let out-of-line code handle the slow cases that require
1349 // saving registers, making a function call, and restoring registers.
1350 masm.branchIfObjectEmulatesUndefined(objreg, scratch, ool->entry(),
1351 ifEmulatesUndefined);
1354 void CodeGenerator::branchTestObjectEmulatesUndefined(
1355 Register objreg, Label* ifEmulatesUndefined,
1356 Label* ifDoesntEmulateUndefined, Register scratch,
1357 OutOfLineTestObject* ool) {
1358 MOZ_ASSERT(!ifDoesntEmulateUndefined->bound(),
1359 "ifDoesntEmulateUndefined will be bound to the fallthrough path");
1361 testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined,
1362 ifDoesntEmulateUndefined, scratch, ool);
1363 masm.bind(ifDoesntEmulateUndefined);
1366 void CodeGenerator::testObjectEmulatesUndefined(Register objreg,
1367 Label* ifEmulatesUndefined,
1368 Label* ifDoesntEmulateUndefined,
1369 Register scratch,
1370 OutOfLineTestObject* ool) {
1371 testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined,
1372 ifDoesntEmulateUndefined, scratch, ool);
1373 masm.jump(ifDoesntEmulateUndefined);
1376 void CodeGenerator::testValueTruthyForType(
1377 JSValueType type, ScratchTagScope& tag, const ValueOperand& value,
1378 Register tempToUnbox, Register temp, FloatRegister floatTemp,
1379 Label* ifTruthy, Label* ifFalsy, OutOfLineTestObject* ool,
1380 bool skipTypeTest) {
1381 #ifdef DEBUG
1382 if (skipTypeTest) {
1383 Label expected;
1384 masm.branchTestType(Assembler::Equal, tag, type, &expected);
1385 masm.assumeUnreachable("Unexpected Value type in testValueTruthyForType");
1386 masm.bind(&expected);
1388 #endif
1390 // Handle irregular types first.
1391 switch (type) {
1392 case JSVAL_TYPE_UNDEFINED:
1393 case JSVAL_TYPE_NULL:
1394 // Undefined and null are falsy.
1395 if (!skipTypeTest) {
1396 masm.branchTestType(Assembler::Equal, tag, type, ifFalsy);
1397 } else {
1398 masm.jump(ifFalsy);
1400 return;
1401 case JSVAL_TYPE_SYMBOL:
1402 // Symbols are truthy.
1403 if (!skipTypeTest) {
1404 masm.branchTestSymbol(Assembler::Equal, tag, ifTruthy);
1405 } else {
1406 masm.jump(ifTruthy);
1408 return;
1409 case JSVAL_TYPE_OBJECT: {
1410 Label notObject;
1411 if (!skipTypeTest) {
1412 masm.branchTestObject(Assembler::NotEqual, tag, &notObject);
1414 ScratchTagScopeRelease _(&tag);
1415 Register objreg = masm.extractObject(value, tempToUnbox);
1416 testObjectEmulatesUndefined(objreg, ifFalsy, ifTruthy, temp, ool);
1417 masm.bind(&notObject);
1418 return;
1420 default:
1421 break;
1424 // Check the type of the value (unless this is the last possible type).
1425 Label differentType;
1426 if (!skipTypeTest) {
1427 masm.branchTestType(Assembler::NotEqual, tag, type, &differentType);
1430 // Branch if the value is falsy.
1431 ScratchTagScopeRelease _(&tag);
1432 switch (type) {
1433 case JSVAL_TYPE_BOOLEAN: {
1434 masm.branchTestBooleanTruthy(false, value, ifFalsy);
1435 break;
1437 case JSVAL_TYPE_INT32: {
1438 masm.branchTestInt32Truthy(false, value, ifFalsy);
1439 break;
1441 case JSVAL_TYPE_STRING: {
1442 masm.branchTestStringTruthy(false, value, ifFalsy);
1443 break;
1445 case JSVAL_TYPE_BIGINT: {
1446 masm.branchTestBigIntTruthy(false, value, ifFalsy);
1447 break;
1449 case JSVAL_TYPE_DOUBLE: {
1450 masm.unboxDouble(value, floatTemp);
1451 masm.branchTestDoubleTruthy(false, floatTemp, ifFalsy);
1452 break;
1454 default:
1455 MOZ_CRASH("Unexpected value type");
1458 // If we reach this point, the value is truthy. We fall through for
1459 // truthy on the last test; otherwise, branch.
1460 if (!skipTypeTest) {
1461 masm.jump(ifTruthy);
1464 masm.bind(&differentType);
1467 void CodeGenerator::testValueTruthy(const ValueOperand& value,
1468 Register tempToUnbox, Register temp,
1469 FloatRegister floatTemp,
1470 const TypeDataList& observedTypes,
1471 Label* ifTruthy, Label* ifFalsy,
1472 OutOfLineTestObject* ool) {
1473 ScratchTagScope tag(masm, value);
1474 masm.splitTagForTest(value, tag);
1476 const std::initializer_list<JSValueType> defaultOrder = {
1477 JSVAL_TYPE_UNDEFINED, JSVAL_TYPE_NULL, JSVAL_TYPE_BOOLEAN,
1478 JSVAL_TYPE_INT32, JSVAL_TYPE_OBJECT, JSVAL_TYPE_STRING,
1479 JSVAL_TYPE_DOUBLE, JSVAL_TYPE_SYMBOL, JSVAL_TYPE_BIGINT};
1481 mozilla::EnumSet<JSValueType, uint32_t> remaining(defaultOrder);
1483 // Generate tests for previously observed types first.
1484 // The TypeDataList is sorted by descending frequency.
1485 for (auto& observed : observedTypes) {
1486 JSValueType type = observed.type();
1487 remaining -= type;
1489 testValueTruthyForType(type, tag, value, tempToUnbox, temp, floatTemp,
1490 ifTruthy, ifFalsy, ool, /*skipTypeTest*/ false);
1493 // Generate tests for remaining types.
1494 for (auto type : defaultOrder) {
1495 if (!remaining.contains(type)) {
1496 continue;
1498 remaining -= type;
1500 // We don't need a type test for the last possible type.
1501 bool skipTypeTest = remaining.isEmpty();
1502 testValueTruthyForType(type, tag, value, tempToUnbox, temp, floatTemp,
1503 ifTruthy, ifFalsy, ool, skipTypeTest);
1505 MOZ_ASSERT(remaining.isEmpty());
1507 // We fall through if the final test is truthy.
1510 void CodeGenerator::visitTestBIAndBranch(LTestBIAndBranch* lir) {
1511 Label* ifTrueLabel = getJumpLabelForBranch(lir->ifTrue());
1512 Label* ifFalseLabel = getJumpLabelForBranch(lir->ifFalse());
1513 Register input = ToRegister(lir->input());
1515 if (isNextBlock(lir->ifFalse()->lir())) {
1516 masm.branchIfBigIntIsNonZero(input, ifTrueLabel);
1517 } else if (isNextBlock(lir->ifTrue()->lir())) {
1518 masm.branchIfBigIntIsZero(input, ifFalseLabel);
1519 } else {
1520 masm.branchIfBigIntIsZero(input, ifFalseLabel);
1521 jumpToBlock(lir->ifTrue());
1525 void CodeGenerator::visitTestOAndBranch(LTestOAndBranch* lir) {
1526 Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
1527 Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
1528 Register input = ToRegister(lir->input());
1530 auto* ool = new (alloc()) OutOfLineTestObject();
1531 addOutOfLineCode(ool, lir->mir());
1533 testObjectEmulatesUndefined(input, falsy, truthy, ToRegister(lir->temp()),
1534 ool);
1537 void CodeGenerator::visitTestVAndBranch(LTestVAndBranch* lir) {
1538 auto* ool = new (alloc()) OutOfLineTestObject();
1539 addOutOfLineCode(ool, lir->mir());
1541 Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
1542 Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
1544 ValueOperand input = ToValue(lir, LTestVAndBranch::Input);
1545 Register tempToUnbox = ToTempUnboxRegister(lir->temp1());
1546 Register temp = ToRegister(lir->temp2());
1547 FloatRegister floatTemp = ToFloatRegister(lir->tempFloat());
1548 const TypeDataList& observedTypes = lir->mir()->observedTypes();
1550 testValueTruthy(input, tempToUnbox, temp, floatTemp, observedTypes, truthy,
1551 falsy, ool);
1552 masm.jump(truthy);
1555 void CodeGenerator::visitBooleanToString(LBooleanToString* lir) {
1556 Register input = ToRegister(lir->input());
1557 Register output = ToRegister(lir->output());
1558 const JSAtomState& names = gen->runtime->names();
1559 Label true_, done;
1561 masm.branchTest32(Assembler::NonZero, input, input, &true_);
1562 masm.movePtr(ImmGCPtr(names.false_), output);
1563 masm.jump(&done);
1565 masm.bind(&true_);
1566 masm.movePtr(ImmGCPtr(names.true_), output);
1568 masm.bind(&done);
1571 void CodeGenerator::visitIntToString(LIntToString* lir) {
1572 Register input = ToRegister(lir->input());
1573 Register output = ToRegister(lir->output());
1575 using Fn = JSLinearString* (*)(JSContext*, int);
1576 OutOfLineCode* ool = oolCallVM<Fn, Int32ToString<CanGC>>(
1577 lir, ArgList(input), StoreRegisterTo(output));
1579 masm.lookupStaticIntString(input, output, gen->runtime->staticStrings(),
1580 ool->entry());
1582 masm.bind(ool->rejoin());
1585 void CodeGenerator::visitDoubleToString(LDoubleToString* lir) {
1586 FloatRegister input = ToFloatRegister(lir->input());
1587 Register temp = ToRegister(lir->temp0());
1588 Register output = ToRegister(lir->output());
1590 using Fn = JSString* (*)(JSContext*, double);
1591 OutOfLineCode* ool = oolCallVM<Fn, NumberToString<CanGC>>(
1592 lir, ArgList(input), StoreRegisterTo(output));
1594 // Try double to integer conversion and run integer to string code.
1595 masm.convertDoubleToInt32(input, temp, ool->entry(), false);
1596 masm.lookupStaticIntString(temp, output, gen->runtime->staticStrings(),
1597 ool->entry());
1599 masm.bind(ool->rejoin());
1602 void CodeGenerator::visitValueToString(LValueToString* lir) {
1603 ValueOperand input = ToValue(lir, LValueToString::InputIndex);
1604 Register output = ToRegister(lir->output());
1606 using Fn = JSString* (*)(JSContext*, HandleValue);
1607 OutOfLineCode* ool = oolCallVM<Fn, ToStringSlow<CanGC>>(
1608 lir, ArgList(input), StoreRegisterTo(output));
1610 Label done;
1611 Register tag = masm.extractTag(input, output);
1612 const JSAtomState& names = gen->runtime->names();
1614 // String
1616 Label notString;
1617 masm.branchTestString(Assembler::NotEqual, tag, &notString);
1618 masm.unboxString(input, output);
1619 masm.jump(&done);
1620 masm.bind(&notString);
1623 // Integer
1625 Label notInteger;
1626 masm.branchTestInt32(Assembler::NotEqual, tag, &notInteger);
1627 Register unboxed = ToTempUnboxRegister(lir->temp0());
1628 unboxed = masm.extractInt32(input, unboxed);
1629 masm.lookupStaticIntString(unboxed, output, gen->runtime->staticStrings(),
1630 ool->entry());
1631 masm.jump(&done);
1632 masm.bind(&notInteger);
1635 // Double
1637 // Note: no fastpath. Need two extra registers and can only convert doubles
1638 // that fit integers and are smaller than StaticStrings::INT_STATIC_LIMIT.
1639 masm.branchTestDouble(Assembler::Equal, tag, ool->entry());
1642 // Undefined
1644 Label notUndefined;
1645 masm.branchTestUndefined(Assembler::NotEqual, tag, &notUndefined);
1646 masm.movePtr(ImmGCPtr(names.undefined), output);
1647 masm.jump(&done);
1648 masm.bind(&notUndefined);
1651 // Null
1653 Label notNull;
1654 masm.branchTestNull(Assembler::NotEqual, tag, &notNull);
1655 masm.movePtr(ImmGCPtr(names.null), output);
1656 masm.jump(&done);
1657 masm.bind(&notNull);
1660 // Boolean
1662 Label notBoolean, true_;
1663 masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
1664 masm.branchTestBooleanTruthy(true, input, &true_);
1665 masm.movePtr(ImmGCPtr(names.false_), output);
1666 masm.jump(&done);
1667 masm.bind(&true_);
1668 masm.movePtr(ImmGCPtr(names.true_), output);
1669 masm.jump(&done);
1670 masm.bind(&notBoolean);
1673 // Objects/symbols are only possible when |mir->mightHaveSideEffects()|.
1674 if (lir->mir()->mightHaveSideEffects()) {
1675 // Object
1676 if (lir->mir()->supportSideEffects()) {
1677 masm.branchTestObject(Assembler::Equal, tag, ool->entry());
1678 } else {
1679 // Bail.
1680 MOZ_ASSERT(lir->mir()->needsSnapshot());
1681 Label bail;
1682 masm.branchTestObject(Assembler::Equal, tag, &bail);
1683 bailoutFrom(&bail, lir->snapshot());
1686 // Symbol
1687 if (lir->mir()->supportSideEffects()) {
1688 masm.branchTestSymbol(Assembler::Equal, tag, ool->entry());
1689 } else {
1690 // Bail.
1691 MOZ_ASSERT(lir->mir()->needsSnapshot());
1692 Label bail;
1693 masm.branchTestSymbol(Assembler::Equal, tag, &bail);
1694 bailoutFrom(&bail, lir->snapshot());
1698 // BigInt
1700 // No fastpath currently implemented.
1701 masm.branchTestBigInt(Assembler::Equal, tag, ool->entry());
1704 masm.assumeUnreachable("Unexpected type for LValueToString.");
1706 masm.bind(&done);
1707 masm.bind(ool->rejoin());
1710 using StoreBufferMutationFn = void (*)(js::gc::StoreBuffer*, js::gc::Cell**);
1712 static void EmitStoreBufferMutation(MacroAssembler& masm, Register holder,
1713 size_t offset, Register buffer,
1714 LiveGeneralRegisterSet& liveVolatiles,
1715 StoreBufferMutationFn fun) {
1716 Label callVM;
1717 Label exit;
1719 // Call into the VM to barrier the write. The only registers that need to
1720 // be preserved are those in liveVolatiles, so once they are saved on the
1721 // stack all volatile registers are available for use.
1722 masm.bind(&callVM);
1723 masm.PushRegsInMask(liveVolatiles);
1725 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
1726 regs.takeUnchecked(buffer);
1727 regs.takeUnchecked(holder);
1728 Register addrReg = regs.takeAny();
1730 masm.computeEffectiveAddress(Address(holder, offset), addrReg);
1732 bool needExtraReg = !regs.hasAny<GeneralRegisterSet::DefaultType>();
1733 if (needExtraReg) {
1734 masm.push(holder);
1735 masm.setupUnalignedABICall(holder);
1736 } else {
1737 masm.setupUnalignedABICall(regs.takeAny());
1739 masm.passABIArg(buffer);
1740 masm.passABIArg(addrReg);
1741 masm.callWithABI(DynamicFunction<StoreBufferMutationFn>(fun),
1742 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
1744 if (needExtraReg) {
1745 masm.pop(holder);
1747 masm.PopRegsInMask(liveVolatiles);
1748 masm.bind(&exit);
1751 // Warning: this function modifies prev and next.
1752 static void EmitPostWriteBarrierS(MacroAssembler& masm, Register holder,
1753 size_t offset, Register prev, Register next,
1754 LiveGeneralRegisterSet& liveVolatiles) {
1755 Label exit;
1756 Label checkRemove, putCell;
1758 // if (next && (buffer = next->storeBuffer()))
1759 // but we never pass in nullptr for next.
1760 Register storebuffer = next;
1761 masm.loadStoreBuffer(next, storebuffer);
1762 masm.branchPtr(Assembler::Equal, storebuffer, ImmWord(0), &checkRemove);
1764 // if (prev && prev->storeBuffer())
1765 masm.branchPtr(Assembler::Equal, prev, ImmWord(0), &putCell);
1766 masm.loadStoreBuffer(prev, prev);
1767 masm.branchPtr(Assembler::NotEqual, prev, ImmWord(0), &exit);
1769 // buffer->putCell(cellp)
1770 masm.bind(&putCell);
1771 EmitStoreBufferMutation(masm, holder, offset, storebuffer, liveVolatiles,
1772 JSString::addCellAddressToStoreBuffer);
1773 masm.jump(&exit);
1775 // if (prev && (buffer = prev->storeBuffer()))
1776 masm.bind(&checkRemove);
1777 masm.branchPtr(Assembler::Equal, prev, ImmWord(0), &exit);
1778 masm.loadStoreBuffer(prev, storebuffer);
1779 masm.branchPtr(Assembler::Equal, storebuffer, ImmWord(0), &exit);
1780 EmitStoreBufferMutation(masm, holder, offset, storebuffer, liveVolatiles,
1781 JSString::removeCellAddressFromStoreBuffer);
1783 masm.bind(&exit);
1786 void CodeGenerator::visitRegExp(LRegExp* lir) {
1787 Register output = ToRegister(lir->output());
1788 Register temp = ToRegister(lir->temp0());
1789 JSObject* source = lir->mir()->source();
1791 using Fn = JSObject* (*)(JSContext*, Handle<RegExpObject*>);
1792 OutOfLineCode* ool = oolCallVM<Fn, CloneRegExpObject>(
1793 lir, ArgList(ImmGCPtr(source)), StoreRegisterTo(output));
1794 if (lir->mir()->hasShared()) {
1795 TemplateObject templateObject(source);
1796 masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
1797 ool->entry());
1798 } else {
1799 masm.jump(ool->entry());
1801 masm.bind(ool->rejoin());
1804 static constexpr int32_t RegExpPairsVectorStartOffset(
1805 int32_t inputOutputDataStartOffset) {
1806 return inputOutputDataStartOffset + int32_t(InputOutputDataSize) +
1807 int32_t(sizeof(MatchPairs));
1810 static Address RegExpPairCountAddress(MacroAssembler& masm,
1811 int32_t inputOutputDataStartOffset) {
1812 return Address(FramePointer, inputOutputDataStartOffset +
1813 int32_t(InputOutputDataSize) +
1814 MatchPairs::offsetOfPairCount());
1817 static void UpdateRegExpStatics(MacroAssembler& masm, Register regexp,
1818 Register input, Register lastIndex,
1819 Register staticsReg, Register temp1,
1820 Register temp2, gc::Heap initialStringHeap,
1821 LiveGeneralRegisterSet& volatileRegs) {
1822 Address pendingInputAddress(staticsReg,
1823 RegExpStatics::offsetOfPendingInput());
1824 Address matchesInputAddress(staticsReg,
1825 RegExpStatics::offsetOfMatchesInput());
1826 Address lazySourceAddress(staticsReg, RegExpStatics::offsetOfLazySource());
1827 Address lazyIndexAddress(staticsReg, RegExpStatics::offsetOfLazyIndex());
1829 masm.guardedCallPreBarrier(pendingInputAddress, MIRType::String);
1830 masm.guardedCallPreBarrier(matchesInputAddress, MIRType::String);
1831 masm.guardedCallPreBarrier(lazySourceAddress, MIRType::String);
1833 if (initialStringHeap == gc::Heap::Default) {
1834 // Writing into RegExpStatics tenured memory; must post-barrier.
1835 if (staticsReg.volatile_()) {
1836 volatileRegs.add(staticsReg);
1839 masm.loadPtr(pendingInputAddress, temp1);
1840 masm.storePtr(input, pendingInputAddress);
1841 masm.movePtr(input, temp2);
1842 EmitPostWriteBarrierS(masm, staticsReg,
1843 RegExpStatics::offsetOfPendingInput(),
1844 temp1 /* prev */, temp2 /* next */, volatileRegs);
1846 masm.loadPtr(matchesInputAddress, temp1);
1847 masm.storePtr(input, matchesInputAddress);
1848 masm.movePtr(input, temp2);
1849 EmitPostWriteBarrierS(masm, staticsReg,
1850 RegExpStatics::offsetOfMatchesInput(),
1851 temp1 /* prev */, temp2 /* next */, volatileRegs);
1852 } else {
1853 masm.debugAssertGCThingIsTenured(input, temp1);
1854 masm.storePtr(input, pendingInputAddress);
1855 masm.storePtr(input, matchesInputAddress);
1858 masm.storePtr(lastIndex,
1859 Address(staticsReg, RegExpStatics::offsetOfLazyIndex()));
1860 masm.store32(
1861 Imm32(1),
1862 Address(staticsReg, RegExpStatics::offsetOfPendingLazyEvaluation()));
1864 masm.unboxNonDouble(Address(regexp, NativeObject::getFixedSlotOffset(
1865 RegExpObject::SHARED_SLOT)),
1866 temp1, JSVAL_TYPE_PRIVATE_GCTHING);
1867 masm.loadPtr(Address(temp1, RegExpShared::offsetOfSource()), temp2);
1868 masm.storePtr(temp2, lazySourceAddress);
1869 static_assert(sizeof(JS::RegExpFlags) == 1, "load size must match flag size");
1870 masm.load8ZeroExtend(Address(temp1, RegExpShared::offsetOfFlags()), temp2);
1871 masm.store8(temp2, Address(staticsReg, RegExpStatics::offsetOfLazyFlags()));
1874 // Prepare an InputOutputData and optional MatchPairs which space has been
1875 // allocated for on the stack, and try to execute a RegExp on a string input.
1876 // If the RegExp was successfully executed and matched the input, fallthrough.
1877 // Otherwise, jump to notFound or failure.
1879 // inputOutputDataStartOffset is the offset relative to the frame pointer
1880 // register. This offset is negative for the RegExpExecTest stub.
1881 static bool PrepareAndExecuteRegExp(MacroAssembler& masm, Register regexp,
1882 Register input, Register lastIndex,
1883 Register temp1, Register temp2,
1884 Register temp3,
1885 int32_t inputOutputDataStartOffset,
1886 gc::Heap initialStringHeap, Label* notFound,
1887 Label* failure) {
1888 JitSpew(JitSpew_Codegen, "# Emitting PrepareAndExecuteRegExp");
1890 using irregexp::InputOutputData;
1893 * [SMDOC] Stack layout for PrepareAndExecuteRegExp
1895 * Before this function is called, the caller is responsible for
1896 * allocating enough stack space for the following data:
1898 * inputOutputDataStartOffset +-----> +---------------+
1899 * |InputOutputData|
1900 * inputStartAddress +----------> inputStart|
1901 * inputEndAddress +----------> inputEnd|
1902 * startIndexAddress +----------> startIndex|
1903 * matchesAddress +----------> matches|-----+
1904 * +---------------+ |
1905 * matchPairs(Address|Offset) +-----> +---------------+ <--+
1906 * | MatchPairs |
1907 * pairCountAddress +----------> count |
1908 * pairsPointerAddress +----------> pairs |-----+
1909 * +---------------+ |
1910 * pairsArray(Address|Offset) +-----> +---------------+ <--+
1911 * | MatchPair |
1912 * firstMatchStartAddress +----------> start | <--+
1913 * | limit | |
1914 * +---------------+ |
1915 * . |
1916 * . Reserved space for
1917 * . RegExpObject::MaxPairCount
1918 * . MatchPair objects
1919 * . |
1920 * +---------------+ |
1921 * | MatchPair | |
1922 * | start | |
1923 * | limit | <--+
1924 * +---------------+
1927 int32_t ioOffset = inputOutputDataStartOffset;
1928 int32_t matchPairsOffset = ioOffset + int32_t(sizeof(InputOutputData));
1929 int32_t pairsArrayOffset = matchPairsOffset + int32_t(sizeof(MatchPairs));
1931 Address inputStartAddress(FramePointer,
1932 ioOffset + InputOutputData::offsetOfInputStart());
1933 Address inputEndAddress(FramePointer,
1934 ioOffset + InputOutputData::offsetOfInputEnd());
1935 Address startIndexAddress(FramePointer,
1936 ioOffset + InputOutputData::offsetOfStartIndex());
1937 Address matchesAddress(FramePointer,
1938 ioOffset + InputOutputData::offsetOfMatches());
1940 Address matchPairsAddress(FramePointer, matchPairsOffset);
1941 Address pairCountAddress(FramePointer,
1942 matchPairsOffset + MatchPairs::offsetOfPairCount());
1943 Address pairsPointerAddress(FramePointer,
1944 matchPairsOffset + MatchPairs::offsetOfPairs());
1946 Address pairsArrayAddress(FramePointer, pairsArrayOffset);
1947 Address firstMatchStartAddress(FramePointer,
1948 pairsArrayOffset + MatchPair::offsetOfStart());
1950 // First, fill in a skeletal MatchPairs instance on the stack. This will be
1951 // passed to the OOL stub in the caller if we aren't able to execute the
1952 // RegExp inline, and that stub needs to be able to determine whether the
1953 // execution finished successfully.
1955 // Initialize MatchPairs::pairCount to 1. The correct value can only
1956 // be determined after loading the RegExpShared. If the RegExpShared
1957 // has Kind::Atom, this is the correct pairCount.
1958 masm.store32(Imm32(1), pairCountAddress);
1960 // Initialize MatchPairs::pairs pointer
1961 masm.computeEffectiveAddress(pairsArrayAddress, temp1);
1962 masm.storePtr(temp1, pairsPointerAddress);
1964 // Initialize MatchPairs::pairs[0]::start to MatchPair::NoMatch
1965 masm.store32(Imm32(MatchPair::NoMatch), firstMatchStartAddress);
1967 // Determine the set of volatile inputs to save when calling into C++ or
1968 // regexp code.
1969 LiveGeneralRegisterSet volatileRegs;
1970 if (lastIndex.volatile_()) {
1971 volatileRegs.add(lastIndex);
1973 if (input.volatile_()) {
1974 volatileRegs.add(input);
1976 if (regexp.volatile_()) {
1977 volatileRegs.add(regexp);
1980 // Ensure the input string is not a rope.
1981 Label isLinear;
1982 masm.branchIfNotRope(input, &isLinear);
1984 masm.PushRegsInMask(volatileRegs);
1986 using Fn = JSLinearString* (*)(JSString*);
1987 masm.setupUnalignedABICall(temp1);
1988 masm.passABIArg(input);
1989 masm.callWithABI<Fn, js::jit::LinearizeForCharAccessPure>();
1991 MOZ_ASSERT(!volatileRegs.has(temp1));
1992 masm.storeCallPointerResult(temp1);
1993 masm.PopRegsInMask(volatileRegs);
1995 masm.branchTestPtr(Assembler::Zero, temp1, temp1, failure);
1997 masm.bind(&isLinear);
1999 // Load the RegExpShared.
2000 Register regexpReg = temp1;
2001 Address sharedSlot = Address(
2002 regexp, NativeObject::getFixedSlotOffset(RegExpObject::SHARED_SLOT));
2003 masm.branchTestUndefined(Assembler::Equal, sharedSlot, failure);
2004 masm.unboxNonDouble(sharedSlot, regexpReg, JSVAL_TYPE_PRIVATE_GCTHING);
2006 // Handle Atom matches
2007 Label notAtom, checkSuccess;
2008 masm.branchPtr(Assembler::Equal,
2009 Address(regexpReg, RegExpShared::offsetOfPatternAtom()),
2010 ImmWord(0), &notAtom);
2012 masm.computeEffectiveAddress(matchPairsAddress, temp3);
2014 masm.PushRegsInMask(volatileRegs);
2015 using Fn = RegExpRunStatus (*)(RegExpShared* re, JSLinearString* input,
2016 size_t start, MatchPairs* matchPairs);
2017 masm.setupUnalignedABICall(temp2);
2018 masm.passABIArg(regexpReg);
2019 masm.passABIArg(input);
2020 masm.passABIArg(lastIndex);
2021 masm.passABIArg(temp3);
2022 masm.callWithABI<Fn, js::ExecuteRegExpAtomRaw>();
2024 MOZ_ASSERT(!volatileRegs.has(temp1));
2025 masm.storeCallInt32Result(temp1);
2026 masm.PopRegsInMask(volatileRegs);
2028 masm.jump(&checkSuccess);
2030 masm.bind(&notAtom);
2032 // Don't handle regexps with too many capture pairs.
2033 masm.load32(Address(regexpReg, RegExpShared::offsetOfPairCount()), temp2);
2034 masm.branch32(Assembler::Above, temp2, Imm32(RegExpObject::MaxPairCount),
2035 failure);
2037 // Fill in the pair count in the MatchPairs on the stack.
2038 masm.store32(temp2, pairCountAddress);
2040 // Load code pointer and length of input (in bytes).
2041 // Store the input start in the InputOutputData.
2042 Register codePointer = temp1; // Note: temp1 was previously regexpReg.
2043 Register byteLength = temp3;
2045 Label isLatin1, done;
2046 masm.loadStringLength(input, byteLength);
2048 masm.branchLatin1String(input, &isLatin1);
2050 // Two-byte input
2051 masm.loadStringChars(input, temp2, CharEncoding::TwoByte);
2052 masm.storePtr(temp2, inputStartAddress);
2053 masm.loadPtr(
2054 Address(regexpReg, RegExpShared::offsetOfJitCode(/*latin1 =*/false)),
2055 codePointer);
2056 masm.lshiftPtr(Imm32(1), byteLength);
2057 masm.jump(&done);
2059 // Latin1 input
2060 masm.bind(&isLatin1);
2061 masm.loadStringChars(input, temp2, CharEncoding::Latin1);
2062 masm.storePtr(temp2, inputStartAddress);
2063 masm.loadPtr(
2064 Address(regexpReg, RegExpShared::offsetOfJitCode(/*latin1 =*/true)),
2065 codePointer);
2067 masm.bind(&done);
2069 // Store end pointer
2070 masm.addPtr(byteLength, temp2);
2071 masm.storePtr(temp2, inputEndAddress);
2074 // Guard that the RegExpShared has been compiled for this type of input.
2075 // If it has not been compiled, we fall back to the OOL case, which will
2076 // do a VM call into the interpreter.
2077 // TODO: add an interpreter trampoline?
2078 masm.branchPtr(Assembler::Equal, codePointer, ImmWord(0), failure);
2079 masm.loadPtr(Address(codePointer, JitCode::offsetOfCode()), codePointer);
2081 // Finish filling in the InputOutputData instance on the stack
2082 masm.computeEffectiveAddress(matchPairsAddress, temp2);
2083 masm.storePtr(temp2, matchesAddress);
2084 masm.storePtr(lastIndex, startIndexAddress);
2086 // Execute the RegExp.
2087 masm.computeEffectiveAddress(
2088 Address(FramePointer, inputOutputDataStartOffset), temp2);
2089 masm.PushRegsInMask(volatileRegs);
2090 masm.setupUnalignedABICall(temp3);
2091 masm.passABIArg(temp2);
2092 masm.callWithABI(codePointer);
2093 masm.storeCallInt32Result(temp1);
2094 masm.PopRegsInMask(volatileRegs);
2096 masm.bind(&checkSuccess);
2097 masm.branch32(Assembler::Equal, temp1,
2098 Imm32(int32_t(RegExpRunStatus::Success_NotFound)), notFound);
2099 masm.branch32(Assembler::Equal, temp1, Imm32(int32_t(RegExpRunStatus::Error)),
2100 failure);
2102 // Lazily update the RegExpStatics.
2103 size_t offset = GlobalObjectData::offsetOfRegExpRealm() +
2104 RegExpRealm::offsetOfRegExpStatics();
2105 masm.loadGlobalObjectData(temp1);
2106 masm.loadPtr(Address(temp1, offset), temp1);
2107 UpdateRegExpStatics(masm, regexp, input, lastIndex, temp1, temp2, temp3,
2108 initialStringHeap, volatileRegs);
2110 return true;
2113 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
2114 Register len, Register byteOpScratch,
2115 CharEncoding encoding,
2116 size_t maximumLength = SIZE_MAX);
2118 class CreateDependentString {
2119 CharEncoding encoding_;
2120 Register string_;
2121 Register temp1_;
2122 Register temp2_;
2123 Label* failure_;
2125 enum class FallbackKind : uint8_t {
2126 InlineString,
2127 FatInlineString,
2128 NotInlineString,
2129 Count
2131 mozilla::EnumeratedArray<FallbackKind, FallbackKind::Count, Label> fallbacks_,
2132 joins_;
2134 public:
2135 CreateDependentString(CharEncoding encoding, Register string, Register temp1,
2136 Register temp2, Label* failure)
2137 : encoding_(encoding),
2138 string_(string),
2139 temp1_(temp1),
2140 temp2_(temp2),
2141 failure_(failure) {}
2143 Register string() const { return string_; }
2144 CharEncoding encoding() const { return encoding_; }
2146 // Generate code that creates DependentString.
2147 // Caller should call generateFallback after masm.ret(), to generate
2148 // fallback path.
2149 void generate(MacroAssembler& masm, const JSAtomState& names,
2150 CompileRuntime* runtime, Register base,
2151 BaseIndex startIndexAddress, BaseIndex limitIndexAddress,
2152 gc::Heap initialStringHeap);
2154 // Generate fallback path for creating DependentString.
2155 void generateFallback(MacroAssembler& masm);
2158 void CreateDependentString::generate(MacroAssembler& masm,
2159 const JSAtomState& names,
2160 CompileRuntime* runtime, Register base,
2161 BaseIndex startIndexAddress,
2162 BaseIndex limitIndexAddress,
2163 gc::Heap initialStringHeap) {
2164 JitSpew(JitSpew_Codegen, "# Emitting CreateDependentString (encoding=%s)",
2165 (encoding_ == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
2167 auto newGCString = [&](FallbackKind kind) {
2168 uint32_t flags = kind == FallbackKind::InlineString
2169 ? JSString::INIT_THIN_INLINE_FLAGS
2170 : kind == FallbackKind::FatInlineString
2171 ? JSString::INIT_FAT_INLINE_FLAGS
2172 : JSString::INIT_DEPENDENT_FLAGS;
2173 if (encoding_ == CharEncoding::Latin1) {
2174 flags |= JSString::LATIN1_CHARS_BIT;
2177 if (kind != FallbackKind::FatInlineString) {
2178 masm.newGCString(string_, temp2_, initialStringHeap, &fallbacks_[kind]);
2179 } else {
2180 masm.newGCFatInlineString(string_, temp2_, initialStringHeap,
2181 &fallbacks_[kind]);
2183 masm.bind(&joins_[kind]);
2184 masm.store32(Imm32(flags), Address(string_, JSString::offsetOfFlags()));
2187 // Compute the string length.
2188 masm.load32(startIndexAddress, temp2_);
2189 masm.load32(limitIndexAddress, temp1_);
2190 masm.sub32(temp2_, temp1_);
2192 Label done, nonEmpty;
2194 // Zero length matches use the empty string.
2195 masm.branchTest32(Assembler::NonZero, temp1_, temp1_, &nonEmpty);
2196 masm.movePtr(ImmGCPtr(names.empty_), string_);
2197 masm.jump(&done);
2199 masm.bind(&nonEmpty);
2201 // Complete matches use the base string.
2202 Label nonBaseStringMatch;
2203 masm.branchTest32(Assembler::NonZero, temp2_, temp2_, &nonBaseStringMatch);
2204 masm.branch32(Assembler::NotEqual, Address(base, JSString::offsetOfLength()),
2205 temp1_, &nonBaseStringMatch);
2206 masm.movePtr(base, string_);
2207 masm.jump(&done);
2209 masm.bind(&nonBaseStringMatch);
2211 Label notInline;
2213 int32_t maxInlineLength = encoding_ == CharEncoding::Latin1
2214 ? JSFatInlineString::MAX_LENGTH_LATIN1
2215 : JSFatInlineString::MAX_LENGTH_TWO_BYTE;
2216 masm.branch32(Assembler::Above, temp1_, Imm32(maxInlineLength), &notInline);
2218 // Make a thin or fat inline string.
2219 Label stringAllocated, fatInline;
2221 int32_t maxThinInlineLength = encoding_ == CharEncoding::Latin1
2222 ? JSThinInlineString::MAX_LENGTH_LATIN1
2223 : JSThinInlineString::MAX_LENGTH_TWO_BYTE;
2224 masm.branch32(Assembler::Above, temp1_, Imm32(maxThinInlineLength),
2225 &fatInline);
2226 if (encoding_ == CharEncoding::Latin1) {
2227 // One character Latin-1 strings can be loaded directly from the
2228 // static strings table.
2229 Label thinInline;
2230 masm.branch32(Assembler::Above, temp1_, Imm32(1), &thinInline);
2232 static_assert(
2233 StaticStrings::UNIT_STATIC_LIMIT - 1 == JSString::MAX_LATIN1_CHAR,
2234 "Latin-1 strings can be loaded from static strings");
2236 masm.loadStringChars(base, temp1_, encoding_);
2237 masm.loadChar(temp1_, temp2_, temp1_, encoding_);
2239 masm.lookupStaticString(temp1_, string_, runtime->staticStrings());
2241 masm.jump(&done);
2243 masm.bind(&thinInline);
2246 newGCString(FallbackKind::InlineString);
2247 masm.jump(&stringAllocated);
2249 masm.bind(&fatInline);
2250 { newGCString(FallbackKind::FatInlineString); }
2251 masm.bind(&stringAllocated);
2253 masm.store32(temp1_, Address(string_, JSString::offsetOfLength()));
2255 masm.push(string_);
2256 masm.push(base);
2258 MOZ_ASSERT(startIndexAddress.base == FramePointer,
2259 "startIndexAddress is still valid after stack pushes");
2261 // Load chars pointer for the new string.
2262 masm.loadInlineStringCharsForStore(string_, string_);
2264 // Load the source characters pointer.
2265 masm.loadStringChars(base, temp2_, encoding_);
2266 masm.load32(startIndexAddress, base);
2267 masm.addToCharPtr(temp2_, base, encoding_);
2269 CopyStringChars(masm, string_, temp2_, temp1_, base, encoding_);
2271 masm.pop(base);
2272 masm.pop(string_);
2274 masm.jump(&done);
2277 masm.bind(&notInline);
2280 // Make a dependent string.
2281 // Warning: string may be tenured (if the fallback case is hit), so
2282 // stores into it must be post barriered.
2283 newGCString(FallbackKind::NotInlineString);
2285 masm.store32(temp1_, Address(string_, JSString::offsetOfLength()));
2287 masm.loadNonInlineStringChars(base, temp1_, encoding_);
2288 masm.load32(startIndexAddress, temp2_);
2289 masm.addToCharPtr(temp1_, temp2_, encoding_);
2290 masm.storeNonInlineStringChars(temp1_, string_);
2291 masm.storeDependentStringBase(base, string_);
2292 masm.movePtr(base, temp1_);
2294 // Follow any base pointer if the input is itself a dependent string.
2295 // Watch for undepended strings, which have a base pointer but don't
2296 // actually share their characters with it.
2297 Label noBase;
2298 masm.load32(Address(base, JSString::offsetOfFlags()), temp2_);
2299 masm.and32(Imm32(JSString::TYPE_FLAGS_MASK), temp2_);
2300 masm.branchTest32(Assembler::Zero, temp2_, Imm32(JSString::DEPENDENT_BIT),
2301 &noBase);
2302 masm.loadDependentStringBase(base, temp1_);
2303 masm.storeDependentStringBase(temp1_, string_);
2304 masm.bind(&noBase);
2306 // Post-barrier the base store, whether it was the direct or indirect
2307 // base (both will end up in temp1 here).
2308 masm.branchPtrInNurseryChunk(Assembler::Equal, string_, temp2_, &done);
2309 masm.branchPtrInNurseryChunk(Assembler::NotEqual, temp1_, temp2_, &done);
2311 LiveRegisterSet regsToSave(RegisterSet::Volatile());
2312 regsToSave.takeUnchecked(temp1_);
2313 regsToSave.takeUnchecked(temp2_);
2315 masm.PushRegsInMask(regsToSave);
2317 masm.mov(ImmPtr(runtime), temp1_);
2319 using Fn = void (*)(JSRuntime* rt, js::gc::Cell* cell);
2320 masm.setupUnalignedABICall(temp2_);
2321 masm.passABIArg(temp1_);
2322 masm.passABIArg(string_);
2323 masm.callWithABI<Fn, PostWriteBarrier>();
2325 masm.PopRegsInMask(regsToSave);
2328 masm.bind(&done);
2331 void CreateDependentString::generateFallback(MacroAssembler& masm) {
2332 JitSpew(JitSpew_Codegen,
2333 "# Emitting CreateDependentString fallback (encoding=%s)",
2334 (encoding_ == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
2336 LiveRegisterSet regsToSave(RegisterSet::Volatile());
2337 regsToSave.takeUnchecked(string_);
2338 regsToSave.takeUnchecked(temp2_);
2340 for (FallbackKind kind : mozilla::MakeEnumeratedRange(FallbackKind::Count)) {
2341 masm.bind(&fallbacks_[kind]);
2343 masm.PushRegsInMask(regsToSave);
2345 using Fn = void* (*)(JSContext* cx);
2346 masm.setupUnalignedABICall(string_);
2347 masm.loadJSContext(string_);
2348 masm.passABIArg(string_);
2349 if (kind == FallbackKind::FatInlineString) {
2350 masm.callWithABI<Fn, AllocateFatInlineString>();
2351 } else {
2352 masm.callWithABI<Fn, AllocateDependentString>();
2354 masm.storeCallPointerResult(string_);
2356 masm.PopRegsInMask(regsToSave);
2358 masm.branchPtr(Assembler::Equal, string_, ImmWord(0), failure_);
2360 masm.jump(&joins_[kind]);
2364 // Generate the RegExpMatcher and RegExpExecMatch stubs. These are very similar,
2365 // but RegExpExecMatch also has to load and update .lastIndex for global/sticky
2366 // regular expressions.
2367 static JitCode* GenerateRegExpMatchStubShared(JSContext* cx,
2368 gc::Heap initialStringHeap,
2369 bool isExecMatch) {
2370 if (isExecMatch) {
2371 JitSpew(JitSpew_Codegen, "# Emitting RegExpExecMatch stub");
2372 } else {
2373 JitSpew(JitSpew_Codegen, "# Emitting RegExpMatcher stub");
2376 // |initialStringHeap| could be stale after a GC.
2377 JS::AutoCheckCannotGC nogc(cx);
2379 Register regexp = RegExpMatcherRegExpReg;
2380 Register input = RegExpMatcherStringReg;
2381 Register lastIndex = RegExpMatcherLastIndexReg;
2382 ValueOperand result = JSReturnOperand;
2384 // We are free to clobber all registers, as LRegExpMatcher is a call
2385 // instruction.
2386 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2387 regs.take(input);
2388 regs.take(regexp);
2389 regs.take(lastIndex);
2391 Register temp1 = regs.takeAny();
2392 Register temp2 = regs.takeAny();
2393 Register temp3 = regs.takeAny();
2394 Register maybeTemp4 = InvalidReg;
2395 if (!regs.empty()) {
2396 // There are not enough registers on x86.
2397 maybeTemp4 = regs.takeAny();
2399 Register maybeTemp5 = InvalidReg;
2400 if (!regs.empty()) {
2401 // There are not enough registers on x86.
2402 maybeTemp5 = regs.takeAny();
2405 Address flagsSlot(regexp, RegExpObject::offsetOfFlags());
2406 Address lastIndexSlot(regexp, RegExpObject::offsetOfLastIndex());
2408 TempAllocator temp(&cx->tempLifoAlloc());
2409 JitContext jcx(cx);
2410 StackMacroAssembler masm(cx, temp);
2411 AutoCreatedBy acb(masm, "GenerateRegExpMatchStubShared");
2413 #ifdef JS_USE_LINK_REGISTER
2414 masm.pushReturnAddress();
2415 #endif
2416 masm.push(FramePointer);
2417 masm.moveStackPtrTo(FramePointer);
2419 Label notFoundZeroLastIndex;
2420 if (isExecMatch) {
2421 masm.loadRegExpLastIndex(regexp, input, lastIndex, &notFoundZeroLastIndex);
2424 // The InputOutputData is placed above the frame pointer and return address on
2425 // the stack.
2426 int32_t inputOutputDataStartOffset = 2 * sizeof(void*);
2428 Label notFound, oolEntry;
2429 if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
2430 temp3, inputOutputDataStartOffset,
2431 initialStringHeap, &notFound, &oolEntry)) {
2432 return nullptr;
2435 // If a regexp has named captures, fall back to the OOL stub, which
2436 // will end up calling CreateRegExpMatchResults.
2437 Register shared = temp2;
2438 masm.unboxNonDouble(Address(regexp, NativeObject::getFixedSlotOffset(
2439 RegExpObject::SHARED_SLOT)),
2440 shared, JSVAL_TYPE_PRIVATE_GCTHING);
2441 masm.branchPtr(Assembler::NotEqual,
2442 Address(shared, RegExpShared::offsetOfGroupsTemplate()),
2443 ImmWord(0), &oolEntry);
2445 // Similarly, if the |hasIndices| flag is set, fall back to the OOL stub.
2446 masm.branchTest32(Assembler::NonZero,
2447 Address(shared, RegExpShared::offsetOfFlags()),
2448 Imm32(int32_t(JS::RegExpFlag::HasIndices)), &oolEntry);
2450 Address pairCountAddress =
2451 RegExpPairCountAddress(masm, inputOutputDataStartOffset);
2453 // Construct the result.
2454 Register object = temp1;
2456 // In most cases, the array will have just 1-2 elements, so we optimize for
2457 // that by emitting separate code paths for capacity 2/6/14 (= 4/8/16 slots
2458 // because two slots are used for the elements header).
2460 // Load the array length in temp2 and the shape in temp3.
2461 Label allocated;
2462 masm.load32(pairCountAddress, temp2);
2463 size_t offset = GlobalObjectData::offsetOfRegExpRealm() +
2464 RegExpRealm::offsetOfNormalMatchResultShape();
2465 masm.loadGlobalObjectData(temp3);
2466 masm.loadPtr(Address(temp3, offset), temp3);
2468 auto emitAllocObject = [&](size_t elementCapacity) {
2469 gc::AllocKind kind = GuessArrayGCKind(elementCapacity);
2470 MOZ_ASSERT(CanChangeToBackgroundAllocKind(kind, &ArrayObject::class_));
2471 kind = ForegroundToBackgroundAllocKind(kind);
2473 #ifdef DEBUG
2474 // Assert all of the available slots are used for |elementCapacity|
2475 // elements.
2476 size_t usedSlots = ObjectElements::VALUES_PER_HEADER + elementCapacity;
2477 MOZ_ASSERT(usedSlots == GetGCKindSlots(kind));
2478 #endif
2480 constexpr size_t numUsedDynamicSlots =
2481 RegExpRealm::MatchResultObjectSlotSpan;
2482 constexpr size_t numDynamicSlots =
2483 RegExpRealm::MatchResultObjectNumDynamicSlots;
2484 constexpr size_t arrayLength = 1;
2485 masm.createArrayWithFixedElements(object, temp3, temp2, temp3,
2486 arrayLength, elementCapacity,
2487 numUsedDynamicSlots, numDynamicSlots,
2488 kind, gc::Heap::Default, &oolEntry);
2491 Label moreThan2;
2492 masm.branch32(Assembler::Above, temp2, Imm32(2), &moreThan2);
2493 emitAllocObject(2);
2494 masm.jump(&allocated);
2496 Label moreThan6;
2497 masm.bind(&moreThan2);
2498 masm.branch32(Assembler::Above, temp2, Imm32(6), &moreThan6);
2499 emitAllocObject(6);
2500 masm.jump(&allocated);
2502 masm.bind(&moreThan6);
2503 static_assert(RegExpObject::MaxPairCount == 14);
2504 emitAllocObject(RegExpObject::MaxPairCount);
2506 masm.bind(&allocated);
2509 // clang-format off
2511 * [SMDOC] Stack layout for the RegExpMatcher stub
2513 * +---------------+
2514 * FramePointer +-----> |Caller-FramePtr|
2515 * +---------------+
2516 * |Return-Address |
2517 * +---------------+
2518 * inputOutputDataStartOffset +-----> +---------------+
2519 * |InputOutputData|
2520 * +---------------+
2521 * +---------------+
2522 * | MatchPairs |
2523 * pairsCountAddress +-----------> count |
2524 * | pairs |
2525 * | |
2526 * +---------------+
2527 * pairsVectorStartOffset +-----> +---------------+
2528 * | MatchPair |
2529 * matchPairStart +------------> start | <-------+
2530 * matchPairLimit +------------> limit | | Reserved space for
2531 * +---------------+ | `RegExpObject::MaxPairCount`
2532 * . | MatchPair objects.
2533 * . |
2534 * . | `count` objects will be
2535 * +---------------+ | initialized and can be
2536 * | MatchPair | | accessed below.
2537 * | start | <-------+
2538 * | limit |
2539 * +---------------+
2541 // clang-format on
2543 static_assert(sizeof(MatchPair) == 2 * sizeof(int32_t),
2544 "MatchPair consists of two int32 values representing the start"
2545 "and the end offset of the match");
2547 int32_t pairsVectorStartOffset =
2548 RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
2550 // Incremented by one below for each match pair.
2551 Register matchIndex = temp2;
2552 masm.move32(Imm32(0), matchIndex);
2554 // The element in which to store the result of the current match.
2555 size_t elementsOffset = NativeObject::offsetOfFixedElements();
2556 BaseObjectElementIndex objectMatchElement(object, matchIndex, elementsOffset);
2558 // The current match pair's "start" and "limit" member.
2559 BaseIndex matchPairStart(FramePointer, matchIndex, TimesEight,
2560 pairsVectorStartOffset + MatchPair::offsetOfStart());
2561 BaseIndex matchPairLimit(FramePointer, matchIndex, TimesEight,
2562 pairsVectorStartOffset + MatchPair::offsetOfLimit());
2564 Label* depStrFailure = &oolEntry;
2565 Label restoreRegExpAndLastIndex;
2567 Register temp4;
2568 if (maybeTemp4 == InvalidReg) {
2569 depStrFailure = &restoreRegExpAndLastIndex;
2571 // We don't have enough registers for a fourth temporary. Reuse |regexp|
2572 // as a temporary. We restore its value at |restoreRegExpAndLastIndex|.
2573 masm.push(regexp);
2574 temp4 = regexp;
2575 } else {
2576 temp4 = maybeTemp4;
2579 Register temp5;
2580 if (maybeTemp5 == InvalidReg) {
2581 depStrFailure = &restoreRegExpAndLastIndex;
2583 // We don't have enough registers for a fifth temporary. Reuse |lastIndex|
2584 // as a temporary. We restore its value at |restoreRegExpAndLastIndex|.
2585 masm.push(lastIndex);
2586 temp5 = lastIndex;
2587 } else {
2588 temp5 = maybeTemp5;
2591 auto maybeRestoreRegExpAndLastIndex = [&]() {
2592 if (maybeTemp5 == InvalidReg) {
2593 masm.pop(lastIndex);
2595 if (maybeTemp4 == InvalidReg) {
2596 masm.pop(regexp);
2600 // Loop to construct the match strings. There are two different loops,
2601 // depending on whether the input is a Two-Byte or a Latin-1 string.
2602 CreateDependentString depStrs[]{
2603 {CharEncoding::TwoByte, temp3, temp4, temp5, depStrFailure},
2604 {CharEncoding::Latin1, temp3, temp4, temp5, depStrFailure},
2608 Label isLatin1, done;
2609 masm.branchLatin1String(input, &isLatin1);
2611 for (auto& depStr : depStrs) {
2612 if (depStr.encoding() == CharEncoding::Latin1) {
2613 masm.bind(&isLatin1);
2616 Label matchLoop;
2617 masm.bind(&matchLoop);
2619 static_assert(MatchPair::NoMatch == -1,
2620 "MatchPair::start is negative if no match was found");
2622 Label isUndefined, storeDone;
2623 masm.branch32(Assembler::LessThan, matchPairStart, Imm32(0),
2624 &isUndefined);
2626 depStr.generate(masm, cx->names(), CompileRuntime::get(cx->runtime()),
2627 input, matchPairStart, matchPairLimit,
2628 initialStringHeap);
2630 // Storing into nursery-allocated results object's elements; no post
2631 // barrier.
2632 masm.storeValue(JSVAL_TYPE_STRING, depStr.string(), objectMatchElement);
2633 masm.jump(&storeDone);
2635 masm.bind(&isUndefined);
2636 { masm.storeValue(UndefinedValue(), objectMatchElement); }
2637 masm.bind(&storeDone);
2639 masm.add32(Imm32(1), matchIndex);
2640 masm.branch32(Assembler::LessThanOrEqual, pairCountAddress, matchIndex,
2641 &done);
2642 masm.jump(&matchLoop);
2645 #ifdef DEBUG
2646 masm.assumeUnreachable("The match string loop doesn't fall through.");
2647 #endif
2649 masm.bind(&done);
2652 maybeRestoreRegExpAndLastIndex();
2654 // Fill in the rest of the output object.
2655 masm.store32(
2656 matchIndex,
2657 Address(object,
2658 elementsOffset + ObjectElements::offsetOfInitializedLength()));
2659 masm.store32(
2660 matchIndex,
2661 Address(object, elementsOffset + ObjectElements::offsetOfLength()));
2663 Address firstMatchPairStartAddress(
2664 FramePointer, pairsVectorStartOffset + MatchPair::offsetOfStart());
2665 Address firstMatchPairLimitAddress(
2666 FramePointer, pairsVectorStartOffset + MatchPair::offsetOfLimit());
2668 static_assert(RegExpRealm::MatchResultObjectIndexSlot == 0,
2669 "First slot holds the 'index' property");
2670 static_assert(RegExpRealm::MatchResultObjectInputSlot == 1,
2671 "Second slot holds the 'input' property");
2673 masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
2675 masm.load32(firstMatchPairStartAddress, temp3);
2676 masm.storeValue(JSVAL_TYPE_INT32, temp3, Address(temp2, 0));
2678 // No post barrier needed (address is within nursery object.)
2679 masm.storeValue(JSVAL_TYPE_STRING, input, Address(temp2, sizeof(Value)));
2681 // For the ExecMatch stub, if the regular expression is global or sticky, we
2682 // have to update its .lastIndex slot.
2683 if (isExecMatch) {
2684 MOZ_ASSERT(object != lastIndex);
2685 Label notGlobalOrSticky;
2686 masm.branchTest32(Assembler::Zero, flagsSlot,
2687 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
2688 &notGlobalOrSticky);
2689 masm.load32(firstMatchPairLimitAddress, lastIndex);
2690 masm.storeValue(JSVAL_TYPE_INT32, lastIndex, lastIndexSlot);
2691 masm.bind(&notGlobalOrSticky);
2694 // All done!
2695 masm.tagValue(JSVAL_TYPE_OBJECT, object, result);
2696 masm.pop(FramePointer);
2697 masm.ret();
2699 masm.bind(&notFound);
2700 if (isExecMatch) {
2701 Label notGlobalOrSticky;
2702 masm.branchTest32(Assembler::Zero, flagsSlot,
2703 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
2704 &notGlobalOrSticky);
2705 masm.bind(&notFoundZeroLastIndex);
2706 masm.storeValue(Int32Value(0), lastIndexSlot);
2707 masm.bind(&notGlobalOrSticky);
2709 masm.moveValue(NullValue(), result);
2710 masm.pop(FramePointer);
2711 masm.ret();
2713 // Fallback paths for CreateDependentString.
2714 for (auto& depStr : depStrs) {
2715 depStr.generateFallback(masm);
2718 // Fall-through to the ool entry after restoring the registers.
2719 masm.bind(&restoreRegExpAndLastIndex);
2720 maybeRestoreRegExpAndLastIndex();
2722 // Use an undefined value to signal to the caller that the OOL stub needs to
2723 // be called.
2724 masm.bind(&oolEntry);
2725 masm.moveValue(UndefinedValue(), result);
2726 masm.pop(FramePointer);
2727 masm.ret();
2729 Linker linker(masm);
2730 JitCode* code = linker.newCode(cx, CodeKind::Other);
2731 if (!code) {
2732 return nullptr;
2735 const char* name = isExecMatch ? "RegExpExecMatchStub" : "RegExpMatcherStub";
2736 CollectPerfSpewerJitCodeProfile(code, name);
2737 #ifdef MOZ_VTUNE
2738 vtune::MarkStub(code, name);
2739 #endif
2741 return code;
2744 JitCode* JitZone::generateRegExpMatcherStub(JSContext* cx) {
2745 return GenerateRegExpMatchStubShared(cx, initialStringHeap,
2746 /* isExecMatch = */ false);
2749 JitCode* JitZone::generateRegExpExecMatchStub(JSContext* cx) {
2750 return GenerateRegExpMatchStubShared(cx, initialStringHeap,
2751 /* isExecMatch = */ true);
2754 class OutOfLineRegExpMatcher : public OutOfLineCodeBase<CodeGenerator> {
2755 LRegExpMatcher* lir_;
2757 public:
2758 explicit OutOfLineRegExpMatcher(LRegExpMatcher* lir) : lir_(lir) {}
2760 void accept(CodeGenerator* codegen) override {
2761 codegen->visitOutOfLineRegExpMatcher(this);
2764 LRegExpMatcher* lir() const { return lir_; }
2767 void CodeGenerator::visitOutOfLineRegExpMatcher(OutOfLineRegExpMatcher* ool) {
2768 LRegExpMatcher* lir = ool->lir();
2769 Register lastIndex = ToRegister(lir->lastIndex());
2770 Register input = ToRegister(lir->string());
2771 Register regexp = ToRegister(lir->regexp());
2773 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2774 regs.take(lastIndex);
2775 regs.take(input);
2776 regs.take(regexp);
2777 Register temp = regs.takeAny();
2779 masm.computeEffectiveAddress(
2780 Address(masm.getStackPointer(), InputOutputDataSize), temp);
2782 pushArg(temp);
2783 pushArg(lastIndex);
2784 pushArg(input);
2785 pushArg(regexp);
2787 // We are not using oolCallVM because we are in a Call, and that live
2788 // registers are already saved by the the register allocator.
2789 using Fn =
2790 bool (*)(JSContext*, HandleObject regexp, HandleString input,
2791 int32_t lastIndex, MatchPairs* pairs, MutableHandleValue output);
2792 callVM<Fn, RegExpMatcherRaw>(lir);
2794 masm.jump(ool->rejoin());
2797 void CodeGenerator::visitRegExpMatcher(LRegExpMatcher* lir) {
2798 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpMatcherRegExpReg);
2799 MOZ_ASSERT(ToRegister(lir->string()) == RegExpMatcherStringReg);
2800 MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpMatcherLastIndexReg);
2801 MOZ_ASSERT(ToOutValue(lir) == JSReturnOperand);
2803 #if defined(JS_NUNBOX32)
2804 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Type);
2805 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Data);
2806 static_assert(RegExpMatcherStringReg != JSReturnReg_Type);
2807 static_assert(RegExpMatcherStringReg != JSReturnReg_Data);
2808 static_assert(RegExpMatcherLastIndexReg != JSReturnReg_Type);
2809 static_assert(RegExpMatcherLastIndexReg != JSReturnReg_Data);
2810 #elif defined(JS_PUNBOX64)
2811 static_assert(RegExpMatcherRegExpReg != JSReturnReg);
2812 static_assert(RegExpMatcherStringReg != JSReturnReg);
2813 static_assert(RegExpMatcherLastIndexReg != JSReturnReg);
2814 #endif
2816 masm.reserveStack(RegExpReservedStack);
2818 OutOfLineRegExpMatcher* ool = new (alloc()) OutOfLineRegExpMatcher(lir);
2819 addOutOfLineCode(ool, lir->mir());
2821 const JitZone* jitZone = gen->realm->zone()->jitZone();
2822 JitCode* regExpMatcherStub =
2823 jitZone->regExpMatcherStubNoBarrier(&zoneStubsToReadBarrier_);
2824 masm.call(regExpMatcherStub);
2825 masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, ool->entry());
2826 masm.bind(ool->rejoin());
2828 masm.freeStack(RegExpReservedStack);
2831 class OutOfLineRegExpExecMatch : public OutOfLineCodeBase<CodeGenerator> {
2832 LRegExpExecMatch* lir_;
2834 public:
2835 explicit OutOfLineRegExpExecMatch(LRegExpExecMatch* lir) : lir_(lir) {}
2837 void accept(CodeGenerator* codegen) override {
2838 codegen->visitOutOfLineRegExpExecMatch(this);
2841 LRegExpExecMatch* lir() const { return lir_; }
2844 void CodeGenerator::visitOutOfLineRegExpExecMatch(
2845 OutOfLineRegExpExecMatch* ool) {
2846 LRegExpExecMatch* lir = ool->lir();
2847 Register input = ToRegister(lir->string());
2848 Register regexp = ToRegister(lir->regexp());
2850 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2851 regs.take(input);
2852 regs.take(regexp);
2853 Register temp = regs.takeAny();
2855 masm.computeEffectiveAddress(
2856 Address(masm.getStackPointer(), InputOutputDataSize), temp);
2858 pushArg(temp);
2859 pushArg(input);
2860 pushArg(regexp);
2862 // We are not using oolCallVM because we are in a Call and live registers have
2863 // already been saved by the register allocator.
2864 using Fn =
2865 bool (*)(JSContext*, Handle<RegExpObject*> regexp, HandleString input,
2866 MatchPairs* pairs, MutableHandleValue output);
2867 callVM<Fn, RegExpBuiltinExecMatchFromJit>(lir);
2868 masm.jump(ool->rejoin());
2871 void CodeGenerator::visitRegExpExecMatch(LRegExpExecMatch* lir) {
2872 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpMatcherRegExpReg);
2873 MOZ_ASSERT(ToRegister(lir->string()) == RegExpMatcherStringReg);
2874 MOZ_ASSERT(ToOutValue(lir) == JSReturnOperand);
2876 #if defined(JS_NUNBOX32)
2877 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Type);
2878 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Data);
2879 static_assert(RegExpMatcherStringReg != JSReturnReg_Type);
2880 static_assert(RegExpMatcherStringReg != JSReturnReg_Data);
2881 #elif defined(JS_PUNBOX64)
2882 static_assert(RegExpMatcherRegExpReg != JSReturnReg);
2883 static_assert(RegExpMatcherStringReg != JSReturnReg);
2884 #endif
2886 masm.reserveStack(RegExpReservedStack);
2888 auto* ool = new (alloc()) OutOfLineRegExpExecMatch(lir);
2889 addOutOfLineCode(ool, lir->mir());
2891 const JitZone* jitZone = gen->realm->zone()->jitZone();
2892 JitCode* regExpExecMatchStub =
2893 jitZone->regExpExecMatchStubNoBarrier(&zoneStubsToReadBarrier_);
2894 masm.call(regExpExecMatchStub);
2895 masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, ool->entry());
2897 masm.bind(ool->rejoin());
2898 masm.freeStack(RegExpReservedStack);
2901 JitCode* JitZone::generateRegExpSearcherStub(JSContext* cx) {
2902 JitSpew(JitSpew_Codegen, "# Emitting RegExpSearcher stub");
2904 Register regexp = RegExpSearcherRegExpReg;
2905 Register input = RegExpSearcherStringReg;
2906 Register lastIndex = RegExpSearcherLastIndexReg;
2907 Register result = ReturnReg;
2909 // We are free to clobber all registers, as LRegExpSearcher is a call
2910 // instruction.
2911 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2912 regs.take(input);
2913 regs.take(regexp);
2914 regs.take(lastIndex);
2916 Register temp1 = regs.takeAny();
2917 Register temp2 = regs.takeAny();
2918 Register temp3 = regs.takeAny();
2920 TempAllocator temp(&cx->tempLifoAlloc());
2921 JitContext jcx(cx);
2922 StackMacroAssembler masm(cx, temp);
2923 AutoCreatedBy acb(masm, "JitZone::generateRegExpSearcherStub");
2925 #ifdef JS_USE_LINK_REGISTER
2926 masm.pushReturnAddress();
2927 #endif
2928 masm.push(FramePointer);
2929 masm.moveStackPtrTo(FramePointer);
2931 #ifdef DEBUG
2932 // Store sentinel value to cx->regExpSearcherLastLimit.
2933 // See comment in RegExpSearcherImpl.
2934 masm.loadJSContext(temp1);
2935 masm.store32(Imm32(RegExpSearcherLastLimitSentinel),
2936 Address(temp1, JSContext::offsetOfRegExpSearcherLastLimit()));
2937 #endif
2939 // The InputOutputData is placed above the frame pointer and return address on
2940 // the stack.
2941 int32_t inputOutputDataStartOffset = 2 * sizeof(void*);
2943 Label notFound, oolEntry;
2944 if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
2945 temp3, inputOutputDataStartOffset,
2946 initialStringHeap, &notFound, &oolEntry)) {
2947 return nullptr;
2950 // clang-format off
2952 * [SMDOC] Stack layout for the RegExpSearcher stub
2954 * +---------------+
2955 * FramePointer +-----> |Caller-FramePtr|
2956 * +---------------+
2957 * |Return-Address |
2958 * +---------------+
2959 * inputOutputDataStartOffset +-----> +---------------+
2960 * |InputOutputData|
2961 * +---------------+
2962 * +---------------+
2963 * | MatchPairs |
2964 * | count |
2965 * | pairs |
2966 * | |
2967 * +---------------+
2968 * pairsVectorStartOffset +-----> +---------------+
2969 * | MatchPair |
2970 * matchPairStart +------------> start | <-------+
2971 * matchPairLimit +------------> limit | | Reserved space for
2972 * +---------------+ | `RegExpObject::MaxPairCount`
2973 * . | MatchPair objects.
2974 * . |
2975 * . | Only a single object will
2976 * +---------------+ | be initialized and can be
2977 * | MatchPair | | accessed below.
2978 * | start | <-------+
2979 * | limit |
2980 * +---------------+
2982 // clang-format on
2984 int32_t pairsVectorStartOffset =
2985 RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
2986 Address matchPairStart(FramePointer,
2987 pairsVectorStartOffset + MatchPair::offsetOfStart());
2988 Address matchPairLimit(FramePointer,
2989 pairsVectorStartOffset + MatchPair::offsetOfLimit());
2991 // Store match limit to cx->regExpSearcherLastLimit and return the index.
2992 masm.load32(matchPairLimit, result);
2993 masm.loadJSContext(input);
2994 masm.store32(result,
2995 Address(input, JSContext::offsetOfRegExpSearcherLastLimit()));
2996 masm.load32(matchPairStart, result);
2997 masm.pop(FramePointer);
2998 masm.ret();
3000 masm.bind(&notFound);
3001 masm.move32(Imm32(RegExpSearcherResultNotFound), result);
3002 masm.pop(FramePointer);
3003 masm.ret();
3005 masm.bind(&oolEntry);
3006 masm.move32(Imm32(RegExpSearcherResultFailed), result);
3007 masm.pop(FramePointer);
3008 masm.ret();
3010 Linker linker(masm);
3011 JitCode* code = linker.newCode(cx, CodeKind::Other);
3012 if (!code) {
3013 return nullptr;
3016 CollectPerfSpewerJitCodeProfile(code, "RegExpSearcherStub");
3017 #ifdef MOZ_VTUNE
3018 vtune::MarkStub(code, "RegExpSearcherStub");
3019 #endif
3021 return code;
3024 class OutOfLineRegExpSearcher : public OutOfLineCodeBase<CodeGenerator> {
3025 LRegExpSearcher* lir_;
3027 public:
3028 explicit OutOfLineRegExpSearcher(LRegExpSearcher* lir) : lir_(lir) {}
3030 void accept(CodeGenerator* codegen) override {
3031 codegen->visitOutOfLineRegExpSearcher(this);
3034 LRegExpSearcher* lir() const { return lir_; }
3037 void CodeGenerator::visitOutOfLineRegExpSearcher(OutOfLineRegExpSearcher* ool) {
3038 LRegExpSearcher* lir = ool->lir();
3039 Register lastIndex = ToRegister(lir->lastIndex());
3040 Register input = ToRegister(lir->string());
3041 Register regexp = ToRegister(lir->regexp());
3043 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
3044 regs.take(lastIndex);
3045 regs.take(input);
3046 regs.take(regexp);
3047 Register temp = regs.takeAny();
3049 masm.computeEffectiveAddress(
3050 Address(masm.getStackPointer(), InputOutputDataSize), temp);
3052 pushArg(temp);
3053 pushArg(lastIndex);
3054 pushArg(input);
3055 pushArg(regexp);
3057 // We are not using oolCallVM because we are in a Call, and that live
3058 // registers are already saved by the the register allocator.
3059 using Fn = bool (*)(JSContext* cx, HandleObject regexp, HandleString input,
3060 int32_t lastIndex, MatchPairs* pairs, int32_t* result);
3061 callVM<Fn, RegExpSearcherRaw>(lir);
3063 masm.jump(ool->rejoin());
3066 void CodeGenerator::visitRegExpSearcher(LRegExpSearcher* lir) {
3067 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpSearcherRegExpReg);
3068 MOZ_ASSERT(ToRegister(lir->string()) == RegExpSearcherStringReg);
3069 MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpSearcherLastIndexReg);
3070 MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
3072 static_assert(RegExpSearcherRegExpReg != ReturnReg);
3073 static_assert(RegExpSearcherStringReg != ReturnReg);
3074 static_assert(RegExpSearcherLastIndexReg != ReturnReg);
3076 masm.reserveStack(RegExpReservedStack);
3078 OutOfLineRegExpSearcher* ool = new (alloc()) OutOfLineRegExpSearcher(lir);
3079 addOutOfLineCode(ool, lir->mir());
3081 const JitZone* jitZone = gen->realm->zone()->jitZone();
3082 JitCode* regExpSearcherStub =
3083 jitZone->regExpSearcherStubNoBarrier(&zoneStubsToReadBarrier_);
3084 masm.call(regExpSearcherStub);
3085 masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpSearcherResultFailed),
3086 ool->entry());
3087 masm.bind(ool->rejoin());
3089 masm.freeStack(RegExpReservedStack);
3092 void CodeGenerator::visitRegExpSearcherLastLimit(
3093 LRegExpSearcherLastLimit* lir) {
3094 Register result = ToRegister(lir->output());
3095 Register scratch = ToRegister(lir->temp0());
3097 masm.loadAndClearRegExpSearcherLastLimit(result, scratch);
3100 JitCode* JitZone::generateRegExpExecTestStub(JSContext* cx) {
3101 JitSpew(JitSpew_Codegen, "# Emitting RegExpExecTest stub");
3103 Register regexp = RegExpExecTestRegExpReg;
3104 Register input = RegExpExecTestStringReg;
3105 Register result = ReturnReg;
3107 TempAllocator temp(&cx->tempLifoAlloc());
3108 JitContext jcx(cx);
3109 StackMacroAssembler masm(cx, temp);
3110 AutoCreatedBy acb(masm, "JitZone::generateRegExpExecTestStub");
3112 #ifdef JS_USE_LINK_REGISTER
3113 masm.pushReturnAddress();
3114 #endif
3115 masm.push(FramePointer);
3116 masm.moveStackPtrTo(FramePointer);
3118 // We are free to clobber all registers, as LRegExpExecTest is a call
3119 // instruction.
3120 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
3121 regs.take(input);
3122 regs.take(regexp);
3124 // Ensure lastIndex != result.
3125 regs.take(result);
3126 Register lastIndex = regs.takeAny();
3127 regs.add(result);
3128 Register temp1 = regs.takeAny();
3129 Register temp2 = regs.takeAny();
3130 Register temp3 = regs.takeAny();
3132 Address flagsSlot(regexp, RegExpObject::offsetOfFlags());
3133 Address lastIndexSlot(regexp, RegExpObject::offsetOfLastIndex());
3135 masm.reserveStack(RegExpReservedStack);
3137 // Load lastIndex and skip RegExp execution if needed.
3138 Label notFoundZeroLastIndex;
3139 masm.loadRegExpLastIndex(regexp, input, lastIndex, &notFoundZeroLastIndex);
3141 // In visitRegExpMatcher and visitRegExpSearcher, we reserve stack space
3142 // before calling the stub. For RegExpExecTest we call the stub before
3143 // reserving stack space, so the offset of the InputOutputData relative to the
3144 // frame pointer is negative.
3145 constexpr int32_t inputOutputDataStartOffset = -int32_t(RegExpReservedStack);
3147 // On ARM64, load/store instructions can encode an immediate offset in the
3148 // range [-256, 4095]. If we ever fail this assertion, it would be more
3149 // efficient to store the data above the frame pointer similar to
3150 // RegExpMatcher and RegExpSearcher.
3151 static_assert(inputOutputDataStartOffset >= -256);
3153 Label notFound, oolEntry;
3154 if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
3155 temp3, inputOutputDataStartOffset,
3156 initialStringHeap, &notFound, &oolEntry)) {
3157 return nullptr;
3160 // Set `result` to true/false to indicate found/not-found, or to
3161 // RegExpExecTestResultFailed if we have to retry in C++. If the regular
3162 // expression is global or sticky, we also have to update its .lastIndex slot.
3164 Label done;
3165 int32_t pairsVectorStartOffset =
3166 RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
3167 Address matchPairLimit(FramePointer,
3168 pairsVectorStartOffset + MatchPair::offsetOfLimit());
3170 masm.move32(Imm32(1), result);
3171 masm.branchTest32(Assembler::Zero, flagsSlot,
3172 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
3173 &done);
3174 masm.load32(matchPairLimit, lastIndex);
3175 masm.storeValue(JSVAL_TYPE_INT32, lastIndex, lastIndexSlot);
3176 masm.jump(&done);
3178 masm.bind(&notFound);
3179 masm.move32(Imm32(0), result);
3180 masm.branchTest32(Assembler::Zero, flagsSlot,
3181 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
3182 &done);
3183 masm.storeValue(Int32Value(0), lastIndexSlot);
3184 masm.jump(&done);
3186 masm.bind(&notFoundZeroLastIndex);
3187 masm.move32(Imm32(0), result);
3188 masm.storeValue(Int32Value(0), lastIndexSlot);
3189 masm.jump(&done);
3191 masm.bind(&oolEntry);
3192 masm.move32(Imm32(RegExpExecTestResultFailed), result);
3194 masm.bind(&done);
3195 masm.freeStack(RegExpReservedStack);
3196 masm.pop(FramePointer);
3197 masm.ret();
3199 Linker linker(masm);
3200 JitCode* code = linker.newCode(cx, CodeKind::Other);
3201 if (!code) {
3202 return nullptr;
3205 CollectPerfSpewerJitCodeProfile(code, "RegExpExecTestStub");
3206 #ifdef MOZ_VTUNE
3207 vtune::MarkStub(code, "RegExpExecTestStub");
3208 #endif
3210 return code;
3213 class OutOfLineRegExpExecTest : public OutOfLineCodeBase<CodeGenerator> {
3214 LRegExpExecTest* lir_;
3216 public:
3217 explicit OutOfLineRegExpExecTest(LRegExpExecTest* lir) : lir_(lir) {}
3219 void accept(CodeGenerator* codegen) override {
3220 codegen->visitOutOfLineRegExpExecTest(this);
3223 LRegExpExecTest* lir() const { return lir_; }
3226 void CodeGenerator::visitOutOfLineRegExpExecTest(OutOfLineRegExpExecTest* ool) {
3227 LRegExpExecTest* lir = ool->lir();
3228 Register input = ToRegister(lir->string());
3229 Register regexp = ToRegister(lir->regexp());
3231 pushArg(input);
3232 pushArg(regexp);
3234 // We are not using oolCallVM because we are in a Call and live registers have
3235 // already been saved by the register allocator.
3236 using Fn = bool (*)(JSContext* cx, Handle<RegExpObject*> regexp,
3237 HandleString input, bool* result);
3238 callVM<Fn, RegExpBuiltinExecTestFromJit>(lir);
3240 masm.jump(ool->rejoin());
3243 void CodeGenerator::visitRegExpExecTest(LRegExpExecTest* lir) {
3244 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpExecTestRegExpReg);
3245 MOZ_ASSERT(ToRegister(lir->string()) == RegExpExecTestStringReg);
3246 MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
3248 static_assert(RegExpExecTestRegExpReg != ReturnReg);
3249 static_assert(RegExpExecTestStringReg != ReturnReg);
3251 auto* ool = new (alloc()) OutOfLineRegExpExecTest(lir);
3252 addOutOfLineCode(ool, lir->mir());
3254 const JitZone* jitZone = gen->realm->zone()->jitZone();
3255 JitCode* regExpExecTestStub =
3256 jitZone->regExpExecTestStubNoBarrier(&zoneStubsToReadBarrier_);
3257 masm.call(regExpExecTestStub);
3259 masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpExecTestResultFailed),
3260 ool->entry());
3262 masm.bind(ool->rejoin());
3265 void CodeGenerator::visitRegExpHasCaptureGroups(LRegExpHasCaptureGroups* ins) {
3266 Register regexp = ToRegister(ins->regexp());
3267 Register input = ToRegister(ins->input());
3268 Register output = ToRegister(ins->output());
3270 using Fn =
3271 bool (*)(JSContext*, Handle<RegExpObject*>, Handle<JSString*>, bool*);
3272 auto* ool = oolCallVM<Fn, js::RegExpHasCaptureGroups>(
3273 ins, ArgList(regexp, input), StoreRegisterTo(output));
3275 // Load RegExpShared in |output|.
3276 Label vmCall;
3277 masm.loadParsedRegExpShared(regexp, output, ool->entry());
3279 // Return true iff pairCount > 1.
3280 Label returnTrue;
3281 masm.branch32(Assembler::Above,
3282 Address(output, RegExpShared::offsetOfPairCount()), Imm32(1),
3283 &returnTrue);
3284 masm.move32(Imm32(0), output);
3285 masm.jump(ool->rejoin());
3287 masm.bind(&returnTrue);
3288 masm.move32(Imm32(1), output);
3290 masm.bind(ool->rejoin());
3293 class OutOfLineRegExpPrototypeOptimizable
3294 : public OutOfLineCodeBase<CodeGenerator> {
3295 LRegExpPrototypeOptimizable* ins_;
3297 public:
3298 explicit OutOfLineRegExpPrototypeOptimizable(LRegExpPrototypeOptimizable* ins)
3299 : ins_(ins) {}
3301 void accept(CodeGenerator* codegen) override {
3302 codegen->visitOutOfLineRegExpPrototypeOptimizable(this);
3304 LRegExpPrototypeOptimizable* ins() const { return ins_; }
3307 void CodeGenerator::visitRegExpPrototypeOptimizable(
3308 LRegExpPrototypeOptimizable* ins) {
3309 Register object = ToRegister(ins->object());
3310 Register output = ToRegister(ins->output());
3311 Register temp = ToRegister(ins->temp0());
3313 OutOfLineRegExpPrototypeOptimizable* ool =
3314 new (alloc()) OutOfLineRegExpPrototypeOptimizable(ins);
3315 addOutOfLineCode(ool, ins->mir());
3317 const GlobalObject* global = gen->realm->maybeGlobal();
3318 MOZ_ASSERT(global);
3319 masm.branchIfNotRegExpPrototypeOptimizable(object, temp, global,
3320 ool->entry());
3321 masm.move32(Imm32(0x1), output);
3323 masm.bind(ool->rejoin());
3326 void CodeGenerator::visitOutOfLineRegExpPrototypeOptimizable(
3327 OutOfLineRegExpPrototypeOptimizable* ool) {
3328 LRegExpPrototypeOptimizable* ins = ool->ins();
3329 Register object = ToRegister(ins->object());
3330 Register output = ToRegister(ins->output());
3332 saveVolatile(output);
3334 using Fn = bool (*)(JSContext* cx, JSObject* proto);
3335 masm.setupAlignedABICall();
3336 masm.loadJSContext(output);
3337 masm.passABIArg(output);
3338 masm.passABIArg(object);
3339 masm.callWithABI<Fn, RegExpPrototypeOptimizableRaw>();
3340 masm.storeCallBoolResult(output);
3342 restoreVolatile(output);
3344 masm.jump(ool->rejoin());
3347 class OutOfLineRegExpInstanceOptimizable
3348 : public OutOfLineCodeBase<CodeGenerator> {
3349 LRegExpInstanceOptimizable* ins_;
3351 public:
3352 explicit OutOfLineRegExpInstanceOptimizable(LRegExpInstanceOptimizable* ins)
3353 : ins_(ins) {}
3355 void accept(CodeGenerator* codegen) override {
3356 codegen->visitOutOfLineRegExpInstanceOptimizable(this);
3358 LRegExpInstanceOptimizable* ins() const { return ins_; }
3361 void CodeGenerator::visitRegExpInstanceOptimizable(
3362 LRegExpInstanceOptimizable* ins) {
3363 Register object = ToRegister(ins->object());
3364 Register output = ToRegister(ins->output());
3365 Register temp = ToRegister(ins->temp0());
3367 OutOfLineRegExpInstanceOptimizable* ool =
3368 new (alloc()) OutOfLineRegExpInstanceOptimizable(ins);
3369 addOutOfLineCode(ool, ins->mir());
3371 const GlobalObject* global = gen->realm->maybeGlobal();
3372 MOZ_ASSERT(global);
3373 masm.branchIfNotRegExpInstanceOptimizable(object, temp, global, ool->entry());
3374 masm.move32(Imm32(0x1), output);
3376 masm.bind(ool->rejoin());
3379 void CodeGenerator::visitOutOfLineRegExpInstanceOptimizable(
3380 OutOfLineRegExpInstanceOptimizable* ool) {
3381 LRegExpInstanceOptimizable* ins = ool->ins();
3382 Register object = ToRegister(ins->object());
3383 Register proto = ToRegister(ins->proto());
3384 Register output = ToRegister(ins->output());
3386 saveVolatile(output);
3388 using Fn = bool (*)(JSContext* cx, JSObject* obj, JSObject* proto);
3389 masm.setupAlignedABICall();
3390 masm.loadJSContext(output);
3391 masm.passABIArg(output);
3392 masm.passABIArg(object);
3393 masm.passABIArg(proto);
3394 masm.callWithABI<Fn, RegExpInstanceOptimizableRaw>();
3395 masm.storeCallBoolResult(output);
3397 restoreVolatile(output);
3399 masm.jump(ool->rejoin());
3402 static void FindFirstDollarIndex(MacroAssembler& masm, Register str,
3403 Register len, Register temp0, Register temp1,
3404 Register output, CharEncoding encoding) {
3405 #ifdef DEBUG
3406 Label ok;
3407 masm.branch32(Assembler::GreaterThan, len, Imm32(0), &ok);
3408 masm.assumeUnreachable("Length should be greater than 0.");
3409 masm.bind(&ok);
3410 #endif
3412 Register chars = temp0;
3413 masm.loadStringChars(str, chars, encoding);
3415 masm.move32(Imm32(0), output);
3417 Label start, done;
3418 masm.bind(&start);
3420 Register currentChar = temp1;
3421 masm.loadChar(chars, output, currentChar, encoding);
3422 masm.branch32(Assembler::Equal, currentChar, Imm32('$'), &done);
3424 masm.add32(Imm32(1), output);
3425 masm.branch32(Assembler::NotEqual, output, len, &start);
3427 masm.move32(Imm32(-1), output);
3429 masm.bind(&done);
3432 void CodeGenerator::visitGetFirstDollarIndex(LGetFirstDollarIndex* ins) {
3433 Register str = ToRegister(ins->str());
3434 Register output = ToRegister(ins->output());
3435 Register temp0 = ToRegister(ins->temp0());
3436 Register temp1 = ToRegister(ins->temp1());
3437 Register len = ToRegister(ins->temp2());
3439 using Fn = bool (*)(JSContext*, JSString*, int32_t*);
3440 OutOfLineCode* ool = oolCallVM<Fn, GetFirstDollarIndexRaw>(
3441 ins, ArgList(str), StoreRegisterTo(output));
3443 masm.branchIfRope(str, ool->entry());
3444 masm.loadStringLength(str, len);
3446 Label isLatin1, done;
3447 masm.branchLatin1String(str, &isLatin1);
3449 FindFirstDollarIndex(masm, str, len, temp0, temp1, output,
3450 CharEncoding::TwoByte);
3451 masm.jump(&done);
3453 masm.bind(&isLatin1);
3455 FindFirstDollarIndex(masm, str, len, temp0, temp1, output,
3456 CharEncoding::Latin1);
3458 masm.bind(&done);
3459 masm.bind(ool->rejoin());
3462 void CodeGenerator::visitStringReplace(LStringReplace* lir) {
3463 if (lir->replacement()->isConstant()) {
3464 pushArg(ImmGCPtr(lir->replacement()->toConstant()->toString()));
3465 } else {
3466 pushArg(ToRegister(lir->replacement()));
3469 if (lir->pattern()->isConstant()) {
3470 pushArg(ImmGCPtr(lir->pattern()->toConstant()->toString()));
3471 } else {
3472 pushArg(ToRegister(lir->pattern()));
3475 if (lir->string()->isConstant()) {
3476 pushArg(ImmGCPtr(lir->string()->toConstant()->toString()));
3477 } else {
3478 pushArg(ToRegister(lir->string()));
3481 using Fn =
3482 JSString* (*)(JSContext*, HandleString, HandleString, HandleString);
3483 if (lir->mir()->isFlatReplacement()) {
3484 callVM<Fn, StringFlatReplaceString>(lir);
3485 } else {
3486 callVM<Fn, StringReplace>(lir);
3490 void CodeGenerator::visitBinaryValueCache(LBinaryValueCache* lir) {
3491 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
3492 TypedOrValueRegister lhs =
3493 TypedOrValueRegister(ToValue(lir, LBinaryValueCache::LhsIndex));
3494 TypedOrValueRegister rhs =
3495 TypedOrValueRegister(ToValue(lir, LBinaryValueCache::RhsIndex));
3496 ValueOperand output = ToOutValue(lir);
3498 JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc());
3500 switch (jsop) {
3501 case JSOp::Add:
3502 case JSOp::Sub:
3503 case JSOp::Mul:
3504 case JSOp::Div:
3505 case JSOp::Mod:
3506 case JSOp::Pow:
3507 case JSOp::BitAnd:
3508 case JSOp::BitOr:
3509 case JSOp::BitXor:
3510 case JSOp::Lsh:
3511 case JSOp::Rsh:
3512 case JSOp::Ursh: {
3513 IonBinaryArithIC ic(liveRegs, lhs, rhs, output);
3514 addIC(lir, allocateIC(ic));
3515 return;
3517 default:
3518 MOZ_CRASH("Unsupported jsop in MBinaryValueCache");
3522 void CodeGenerator::visitBinaryBoolCache(LBinaryBoolCache* lir) {
3523 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
3524 TypedOrValueRegister lhs =
3525 TypedOrValueRegister(ToValue(lir, LBinaryBoolCache::LhsIndex));
3526 TypedOrValueRegister rhs =
3527 TypedOrValueRegister(ToValue(lir, LBinaryBoolCache::RhsIndex));
3528 Register output = ToRegister(lir->output());
3530 JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc());
3532 switch (jsop) {
3533 case JSOp::Lt:
3534 case JSOp::Le:
3535 case JSOp::Gt:
3536 case JSOp::Ge:
3537 case JSOp::Eq:
3538 case JSOp::Ne:
3539 case JSOp::StrictEq:
3540 case JSOp::StrictNe: {
3541 IonCompareIC ic(liveRegs, lhs, rhs, output);
3542 addIC(lir, allocateIC(ic));
3543 return;
3545 default:
3546 MOZ_CRASH("Unsupported jsop in MBinaryBoolCache");
3550 void CodeGenerator::visitUnaryCache(LUnaryCache* lir) {
3551 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
3552 TypedOrValueRegister input =
3553 TypedOrValueRegister(ToValue(lir, LUnaryCache::InputIndex));
3554 ValueOperand output = ToOutValue(lir);
3556 IonUnaryArithIC ic(liveRegs, input, output);
3557 addIC(lir, allocateIC(ic));
3560 void CodeGenerator::visitModuleMetadata(LModuleMetadata* lir) {
3561 pushArg(ImmPtr(lir->mir()->module()));
3563 using Fn = JSObject* (*)(JSContext*, HandleObject);
3564 callVM<Fn, js::GetOrCreateModuleMetaObject>(lir);
3567 void CodeGenerator::visitDynamicImport(LDynamicImport* lir) {
3568 pushArg(ToValue(lir, LDynamicImport::OptionsIndex));
3569 pushArg(ToValue(lir, LDynamicImport::SpecifierIndex));
3570 pushArg(ImmGCPtr(current->mir()->info().script()));
3572 using Fn = JSObject* (*)(JSContext*, HandleScript, HandleValue, HandleValue);
3573 callVM<Fn, js::StartDynamicModuleImport>(lir);
3576 void CodeGenerator::visitLambda(LLambda* lir) {
3577 Register envChain = ToRegister(lir->environmentChain());
3578 Register output = ToRegister(lir->output());
3579 Register tempReg = ToRegister(lir->temp0());
3581 JSFunction* fun = lir->mir()->templateFunction();
3583 using Fn = JSObject* (*)(JSContext*, HandleFunction, HandleObject);
3584 OutOfLineCode* ool = oolCallVM<Fn, js::Lambda>(
3585 lir, ArgList(ImmGCPtr(fun), envChain), StoreRegisterTo(output));
3587 TemplateObject templateObject(fun);
3588 masm.createGCObject(output, tempReg, templateObject, gc::Heap::Default,
3589 ool->entry());
3591 masm.storeValue(JSVAL_TYPE_OBJECT, envChain,
3592 Address(output, JSFunction::offsetOfEnvironment()));
3593 // No post barrier needed because output is guaranteed to be allocated in
3594 // the nursery.
3596 masm.bind(ool->rejoin());
3599 void CodeGenerator::visitFunctionWithProto(LFunctionWithProto* lir) {
3600 Register envChain = ToRegister(lir->envChain());
3601 Register prototype = ToRegister(lir->prototype());
3603 pushArg(prototype);
3604 pushArg(envChain);
3605 pushArg(ImmGCPtr(lir->mir()->function()));
3607 using Fn =
3608 JSObject* (*)(JSContext*, HandleFunction, HandleObject, HandleObject);
3609 callVM<Fn, js::FunWithProtoOperation>(lir);
3612 void CodeGenerator::visitSetFunName(LSetFunName* lir) {
3613 pushArg(Imm32(lir->mir()->prefixKind()));
3614 pushArg(ToValue(lir, LSetFunName::NameIndex));
3615 pushArg(ToRegister(lir->fun()));
3617 using Fn =
3618 bool (*)(JSContext*, HandleFunction, HandleValue, FunctionPrefixKind);
3619 callVM<Fn, js::SetFunctionName>(lir);
3622 void CodeGenerator::visitOsiPoint(LOsiPoint* lir) {
3623 // Note: markOsiPoint ensures enough space exists between the last
3624 // LOsiPoint and this one to patch adjacent call instructions.
3626 MOZ_ASSERT(masm.framePushed() == frameSize());
3628 uint32_t osiCallPointOffset = markOsiPoint(lir);
3630 LSafepoint* safepoint = lir->associatedSafepoint();
3631 MOZ_ASSERT(!safepoint->osiCallPointOffset());
3632 safepoint->setOsiCallPointOffset(osiCallPointOffset);
3634 #ifdef DEBUG
3635 // There should be no movegroups or other instructions between
3636 // an instruction and its OsiPoint. This is necessary because
3637 // we use the OsiPoint's snapshot from within VM calls.
3638 for (LInstructionReverseIterator iter(current->rbegin(lir));
3639 iter != current->rend(); iter++) {
3640 if (*iter == lir) {
3641 continue;
3643 MOZ_ASSERT(!iter->isMoveGroup());
3644 MOZ_ASSERT(iter->safepoint() == safepoint);
3645 break;
3647 #endif
3649 #ifdef CHECK_OSIPOINT_REGISTERS
3650 if (shouldVerifyOsiPointRegs(safepoint)) {
3651 verifyOsiPointRegs(safepoint);
3653 #endif
3656 void CodeGenerator::visitPhi(LPhi* lir) {
3657 MOZ_CRASH("Unexpected LPhi in CodeGenerator");
3660 void CodeGenerator::visitGoto(LGoto* lir) { jumpToBlock(lir->target()); }
3662 void CodeGenerator::visitTableSwitch(LTableSwitch* ins) {
3663 MTableSwitch* mir = ins->mir();
3664 Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
3665 const LAllocation* temp;
3667 if (mir->getOperand(0)->type() != MIRType::Int32) {
3668 temp = ins->tempInt()->output();
3670 // The input is a double, so try and convert it to an integer.
3671 // If it does not fit in an integer, take the default case.
3672 masm.convertDoubleToInt32(ToFloatRegister(ins->index()), ToRegister(temp),
3673 defaultcase, false);
3674 } else {
3675 temp = ins->index();
3678 emitTableSwitchDispatch(mir, ToRegister(temp),
3679 ToRegisterOrInvalid(ins->tempPointer()));
3682 void CodeGenerator::visitTableSwitchV(LTableSwitchV* ins) {
3683 MTableSwitch* mir = ins->mir();
3684 Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
3686 Register index = ToRegister(ins->tempInt());
3687 ValueOperand value = ToValue(ins, LTableSwitchV::InputValue);
3688 Register tag = masm.extractTag(value, index);
3689 masm.branchTestNumber(Assembler::NotEqual, tag, defaultcase);
3691 Label unboxInt, isInt;
3692 masm.branchTestInt32(Assembler::Equal, tag, &unboxInt);
3694 FloatRegister floatIndex = ToFloatRegister(ins->tempFloat());
3695 masm.unboxDouble(value, floatIndex);
3696 masm.convertDoubleToInt32(floatIndex, index, defaultcase, false);
3697 masm.jump(&isInt);
3700 masm.bind(&unboxInt);
3701 masm.unboxInt32(value, index);
3703 masm.bind(&isInt);
3705 emitTableSwitchDispatch(mir, index, ToRegisterOrInvalid(ins->tempPointer()));
3708 void CodeGenerator::visitParameter(LParameter* lir) {}
3710 void CodeGenerator::visitCallee(LCallee* lir) {
3711 Register callee = ToRegister(lir->output());
3712 Address ptr(FramePointer, JitFrameLayout::offsetOfCalleeToken());
3714 masm.loadFunctionFromCalleeToken(ptr, callee);
3717 void CodeGenerator::visitIsConstructing(LIsConstructing* lir) {
3718 Register output = ToRegister(lir->output());
3719 Address calleeToken(FramePointer, JitFrameLayout::offsetOfCalleeToken());
3720 masm.loadPtr(calleeToken, output);
3722 // We must be inside a function.
3723 MOZ_ASSERT(current->mir()->info().script()->function());
3725 // The low bit indicates whether this call is constructing, just clear the
3726 // other bits.
3727 static_assert(CalleeToken_Function == 0x0,
3728 "CalleeTokenTag value should match");
3729 static_assert(CalleeToken_FunctionConstructing == 0x1,
3730 "CalleeTokenTag value should match");
3731 masm.andPtr(Imm32(0x1), output);
3734 void CodeGenerator::visitReturn(LReturn* lir) {
3735 #if defined(JS_NUNBOX32)
3736 DebugOnly<LAllocation*> type = lir->getOperand(TYPE_INDEX);
3737 DebugOnly<LAllocation*> payload = lir->getOperand(PAYLOAD_INDEX);
3738 MOZ_ASSERT(ToRegister(type) == JSReturnReg_Type);
3739 MOZ_ASSERT(ToRegister(payload) == JSReturnReg_Data);
3740 #elif defined(JS_PUNBOX64)
3741 DebugOnly<LAllocation*> result = lir->getOperand(0);
3742 MOZ_ASSERT(ToRegister(result) == JSReturnReg);
3743 #endif
3744 // Don't emit a jump to the return label if this is the last block, as
3745 // it'll fall through to the epilogue.
3747 // This is -not- true however for a Generator-return, which may appear in the
3748 // middle of the last block, so we should always emit the jump there.
3749 if (current->mir() != *gen->graph().poBegin() || lir->isGenerator()) {
3750 masm.jump(&returnLabel_);
3754 void CodeGenerator::visitOsrEntry(LOsrEntry* lir) {
3755 Register temp = ToRegister(lir->temp());
3757 // Remember the OSR entry offset into the code buffer.
3758 masm.flushBuffer();
3759 setOsrEntryOffset(masm.size());
3761 // Allocate the full frame for this function
3762 // Note we have a new entry here. So we reset MacroAssembler::framePushed()
3763 // to 0, before reserving the stack.
3764 MOZ_ASSERT(masm.framePushed() == frameSize());
3765 masm.setFramePushed(0);
3767 // The Baseline code ensured both the frame pointer and stack pointer point to
3768 // the JitFrameLayout on the stack.
3770 // If profiling, save the current frame pointer to a per-thread global field.
3771 if (isProfilerInstrumentationEnabled()) {
3772 masm.profilerEnterFrame(FramePointer, temp);
3775 masm.reserveStack(frameSize());
3776 MOZ_ASSERT(masm.framePushed() == frameSize());
3778 // Ensure that the Ion frames is properly aligned.
3779 masm.assertStackAlignment(JitStackAlignment, 0);
3782 void CodeGenerator::visitOsrEnvironmentChain(LOsrEnvironmentChain* lir) {
3783 const LAllocation* frame = lir->getOperand(0);
3784 const LDefinition* object = lir->getDef(0);
3786 const ptrdiff_t frameOffset =
3787 BaselineFrame::reverseOffsetOfEnvironmentChain();
3789 masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
3792 void CodeGenerator::visitOsrArgumentsObject(LOsrArgumentsObject* lir) {
3793 const LAllocation* frame = lir->getOperand(0);
3794 const LDefinition* object = lir->getDef(0);
3796 const ptrdiff_t frameOffset = BaselineFrame::reverseOffsetOfArgsObj();
3798 masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
3801 void CodeGenerator::visitOsrValue(LOsrValue* value) {
3802 const LAllocation* frame = value->getOperand(0);
3803 const ValueOperand out = ToOutValue(value);
3805 const ptrdiff_t frameOffset = value->mir()->frameOffset();
3807 masm.loadValue(Address(ToRegister(frame), frameOffset), out);
3810 void CodeGenerator::visitOsrReturnValue(LOsrReturnValue* lir) {
3811 const LAllocation* frame = lir->getOperand(0);
3812 const ValueOperand out = ToOutValue(lir);
3814 Address flags =
3815 Address(ToRegister(frame), BaselineFrame::reverseOffsetOfFlags());
3816 Address retval =
3817 Address(ToRegister(frame), BaselineFrame::reverseOffsetOfReturnValue());
3819 masm.moveValue(UndefinedValue(), out);
3821 Label done;
3822 masm.branchTest32(Assembler::Zero, flags, Imm32(BaselineFrame::HAS_RVAL),
3823 &done);
3824 masm.loadValue(retval, out);
3825 masm.bind(&done);
3828 void CodeGenerator::visitStackArgT(LStackArgT* lir) {
3829 const LAllocation* arg = lir->arg();
3830 MIRType argType = lir->type();
3831 uint32_t argslot = lir->argslot();
3832 MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());
3834 Address dest = AddressOfPassedArg(argslot);
3836 if (arg->isFloatReg()) {
3837 masm.boxDouble(ToFloatRegister(arg), dest);
3838 } else if (arg->isRegister()) {
3839 masm.storeValue(ValueTypeFromMIRType(argType), ToRegister(arg), dest);
3840 } else {
3841 masm.storeValue(arg->toConstant()->toJSValue(), dest);
3845 void CodeGenerator::visitStackArgV(LStackArgV* lir) {
3846 ValueOperand val = ToValue(lir, 0);
3847 uint32_t argslot = lir->argslot();
3848 MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());
3850 masm.storeValue(val, AddressOfPassedArg(argslot));
3853 void CodeGenerator::visitMoveGroup(LMoveGroup* group) {
3854 if (!group->numMoves()) {
3855 return;
3858 MoveResolver& resolver = masm.moveResolver();
3860 for (size_t i = 0; i < group->numMoves(); i++) {
3861 const LMove& move = group->getMove(i);
3863 LAllocation from = move.from();
3864 LAllocation to = move.to();
3865 LDefinition::Type type = move.type();
3867 // No bogus moves.
3868 MOZ_ASSERT(from != to);
3869 MOZ_ASSERT(!from.isConstant());
3870 MoveOp::Type moveType;
3871 switch (type) {
3872 case LDefinition::OBJECT:
3873 case LDefinition::SLOTS:
3874 case LDefinition::WASM_ANYREF:
3875 #ifdef JS_NUNBOX32
3876 case LDefinition::TYPE:
3877 case LDefinition::PAYLOAD:
3878 #else
3879 case LDefinition::BOX:
3880 #endif
3881 case LDefinition::GENERAL:
3882 case LDefinition::STACKRESULTS:
3883 moveType = MoveOp::GENERAL;
3884 break;
3885 case LDefinition::INT32:
3886 moveType = MoveOp::INT32;
3887 break;
3888 case LDefinition::FLOAT32:
3889 moveType = MoveOp::FLOAT32;
3890 break;
3891 case LDefinition::DOUBLE:
3892 moveType = MoveOp::DOUBLE;
3893 break;
3894 case LDefinition::SIMD128:
3895 moveType = MoveOp::SIMD128;
3896 break;
3897 default:
3898 MOZ_CRASH("Unexpected move type");
3901 masm.propagateOOM(
3902 resolver.addMove(toMoveOperand(from), toMoveOperand(to), moveType));
3905 masm.propagateOOM(resolver.resolve());
3906 if (masm.oom()) {
3907 return;
3910 MoveEmitter emitter(masm);
3912 #ifdef JS_CODEGEN_X86
3913 if (group->maybeScratchRegister().isGeneralReg()) {
3914 emitter.setScratchRegister(
3915 group->maybeScratchRegister().toGeneralReg()->reg());
3916 } else {
3917 resolver.sortMemoryToMemoryMoves();
3919 #endif
3921 emitter.emit(resolver);
3922 emitter.finish();
3925 void CodeGenerator::visitInteger(LInteger* lir) {
3926 masm.move32(Imm32(lir->i32()), ToRegister(lir->output()));
3929 void CodeGenerator::visitInteger64(LInteger64* lir) {
3930 masm.move64(Imm64(lir->i64()), ToOutRegister64(lir));
3933 void CodeGenerator::visitPointer(LPointer* lir) {
3934 masm.movePtr(ImmGCPtr(lir->gcptr()), ToRegister(lir->output()));
3937 void CodeGenerator::visitNurseryObject(LNurseryObject* lir) {
3938 Register output = ToRegister(lir->output());
3939 uint32_t nurseryIndex = lir->mir()->nurseryIndex();
3941 // Load a pointer to the entry in IonScript's nursery objects list.
3942 CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), output);
3943 masm.propagateOOM(ionNurseryObjectLabels_.emplaceBack(label, nurseryIndex));
3945 // Load the JSObject*.
3946 masm.loadPtr(Address(output, 0), output);
3949 void CodeGenerator::visitKeepAliveObject(LKeepAliveObject* lir) {
3950 // No-op.
3953 void CodeGenerator::visitDebugEnterGCUnsafeRegion(
3954 LDebugEnterGCUnsafeRegion* lir) {
3955 Register temp = ToRegister(lir->temp0());
3957 masm.loadJSContext(temp);
3959 Address inUnsafeRegion(temp, JSContext::offsetOfInUnsafeRegion());
3960 masm.add32(Imm32(1), inUnsafeRegion);
3962 Label ok;
3963 masm.branch32(Assembler::GreaterThan, inUnsafeRegion, Imm32(0), &ok);
3964 masm.assumeUnreachable("unbalanced enter/leave GC unsafe region");
3965 masm.bind(&ok);
3968 void CodeGenerator::visitDebugLeaveGCUnsafeRegion(
3969 LDebugLeaveGCUnsafeRegion* lir) {
3970 Register temp = ToRegister(lir->temp0());
3972 masm.loadJSContext(temp);
3974 Address inUnsafeRegion(temp, JSContext::offsetOfInUnsafeRegion());
3975 masm.add32(Imm32(-1), inUnsafeRegion);
3977 Label ok;
3978 masm.branch32(Assembler::GreaterThanOrEqual, inUnsafeRegion, Imm32(0), &ok);
3979 masm.assumeUnreachable("unbalanced enter/leave GC unsafe region");
3980 masm.bind(&ok);
3983 void CodeGenerator::visitSlots(LSlots* lir) {
3984 Address slots(ToRegister(lir->object()), NativeObject::offsetOfSlots());
3985 masm.loadPtr(slots, ToRegister(lir->output()));
3988 void CodeGenerator::visitLoadDynamicSlotV(LLoadDynamicSlotV* lir) {
3989 ValueOperand dest = ToOutValue(lir);
3990 Register base = ToRegister(lir->input());
3991 int32_t offset = lir->mir()->slot() * sizeof(js::Value);
3993 masm.loadValue(Address(base, offset), dest);
3996 static ConstantOrRegister ToConstantOrRegister(const LAllocation* value,
3997 MIRType valueType) {
3998 if (value->isConstant()) {
3999 return ConstantOrRegister(value->toConstant()->toJSValue());
4001 return TypedOrValueRegister(valueType, ToAnyRegister(value));
4004 void CodeGenerator::visitStoreDynamicSlotT(LStoreDynamicSlotT* lir) {
4005 Register base = ToRegister(lir->slots());
4006 int32_t offset = lir->mir()->slot() * sizeof(js::Value);
4007 Address dest(base, offset);
4009 if (lir->mir()->needsBarrier()) {
4010 emitPreBarrier(dest);
4013 MIRType valueType = lir->mir()->value()->type();
4014 ConstantOrRegister value = ToConstantOrRegister(lir->value(), valueType);
4015 masm.storeUnboxedValue(value, valueType, dest);
4018 void CodeGenerator::visitStoreDynamicSlotV(LStoreDynamicSlotV* lir) {
4019 Register base = ToRegister(lir->slots());
4020 int32_t offset = lir->mir()->slot() * sizeof(Value);
4022 const ValueOperand value = ToValue(lir, LStoreDynamicSlotV::ValueIndex);
4024 if (lir->mir()->needsBarrier()) {
4025 emitPreBarrier(Address(base, offset));
4028 masm.storeValue(value, Address(base, offset));
4031 void CodeGenerator::visitElements(LElements* lir) {
4032 Address elements(ToRegister(lir->object()), NativeObject::offsetOfElements());
4033 masm.loadPtr(elements, ToRegister(lir->output()));
4036 void CodeGenerator::visitFunctionEnvironment(LFunctionEnvironment* lir) {
4037 Address environment(ToRegister(lir->function()),
4038 JSFunction::offsetOfEnvironment());
4039 masm.unboxObject(environment, ToRegister(lir->output()));
4042 void CodeGenerator::visitHomeObject(LHomeObject* lir) {
4043 Register func = ToRegister(lir->function());
4044 Address homeObject(func, FunctionExtended::offsetOfMethodHomeObjectSlot());
4046 masm.assertFunctionIsExtended(func);
4047 #ifdef DEBUG
4048 Label isObject;
4049 masm.branchTestObject(Assembler::Equal, homeObject, &isObject);
4050 masm.assumeUnreachable("[[HomeObject]] must be Object");
4051 masm.bind(&isObject);
4052 #endif
4054 masm.unboxObject(homeObject, ToRegister(lir->output()));
4057 void CodeGenerator::visitHomeObjectSuperBase(LHomeObjectSuperBase* lir) {
4058 Register homeObject = ToRegister(lir->homeObject());
4059 ValueOperand output = ToOutValue(lir);
4060 Register temp = output.scratchReg();
4062 masm.loadObjProto(homeObject, temp);
4064 #ifdef DEBUG
4065 // We won't encounter a lazy proto, because the prototype is guaranteed to
4066 // either be a JSFunction or a PlainObject, and only proxy objects can have a
4067 // lazy proto.
4068 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
4070 Label proxyCheckDone;
4071 masm.branchPtr(Assembler::NotEqual, temp, ImmWord(1), &proxyCheckDone);
4072 masm.assumeUnreachable("Unexpected lazy proto in JSOp::SuperBase");
4073 masm.bind(&proxyCheckDone);
4074 #endif
4076 Label nullProto, done;
4077 masm.branchPtr(Assembler::Equal, temp, ImmWord(0), &nullProto);
4079 // Box prototype and return
4080 masm.tagValue(JSVAL_TYPE_OBJECT, temp, output);
4081 masm.jump(&done);
4083 masm.bind(&nullProto);
4084 masm.moveValue(NullValue(), output);
4086 masm.bind(&done);
4089 template <class T>
4090 static T* ToConstantObject(MDefinition* def) {
4091 MOZ_ASSERT(def->isConstant());
4092 return &def->toConstant()->toObject().as<T>();
4095 void CodeGenerator::visitNewLexicalEnvironmentObject(
4096 LNewLexicalEnvironmentObject* lir) {
4097 Register output = ToRegister(lir->output());
4098 Register temp = ToRegister(lir->temp0());
4100 auto* templateObj = ToConstantObject<BlockLexicalEnvironmentObject>(
4101 lir->mir()->templateObj());
4102 auto* scope = &templateObj->scope();
4103 gc::Heap initialHeap = gc::Heap::Default;
4105 using Fn =
4106 BlockLexicalEnvironmentObject* (*)(JSContext*, Handle<LexicalScope*>);
4107 auto* ool =
4108 oolCallVM<Fn, BlockLexicalEnvironmentObject::createWithoutEnclosing>(
4109 lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
4111 TemplateObject templateObject(templateObj);
4112 masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
4114 masm.bind(ool->rejoin());
4117 void CodeGenerator::visitNewClassBodyEnvironmentObject(
4118 LNewClassBodyEnvironmentObject* lir) {
4119 Register output = ToRegister(lir->output());
4120 Register temp = ToRegister(lir->temp0());
4122 auto* templateObj = ToConstantObject<ClassBodyLexicalEnvironmentObject>(
4123 lir->mir()->templateObj());
4124 auto* scope = &templateObj->scope();
4125 gc::Heap initialHeap = gc::Heap::Default;
4127 using Fn = ClassBodyLexicalEnvironmentObject* (*)(JSContext*,
4128 Handle<ClassBodyScope*>);
4129 auto* ool =
4130 oolCallVM<Fn, ClassBodyLexicalEnvironmentObject::createWithoutEnclosing>(
4131 lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
4133 TemplateObject templateObject(templateObj);
4134 masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
4136 masm.bind(ool->rejoin());
4139 void CodeGenerator::visitNewVarEnvironmentObject(
4140 LNewVarEnvironmentObject* lir) {
4141 Register output = ToRegister(lir->output());
4142 Register temp = ToRegister(lir->temp0());
4144 auto* templateObj =
4145 ToConstantObject<VarEnvironmentObject>(lir->mir()->templateObj());
4146 auto* scope = &templateObj->scope().as<VarScope>();
4147 gc::Heap initialHeap = gc::Heap::Default;
4149 using Fn = VarEnvironmentObject* (*)(JSContext*, Handle<VarScope*>);
4150 auto* ool = oolCallVM<Fn, VarEnvironmentObject::createWithoutEnclosing>(
4151 lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
4153 TemplateObject templateObject(templateObj);
4154 masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
4156 masm.bind(ool->rejoin());
4159 void CodeGenerator::visitGuardShape(LGuardShape* guard) {
4160 Register obj = ToRegister(guard->input());
4161 Register temp = ToTempRegisterOrInvalid(guard->temp0());
4162 Label bail;
4163 masm.branchTestObjShape(Assembler::NotEqual, obj, guard->mir()->shape(), temp,
4164 obj, &bail);
4165 bailoutFrom(&bail, guard->snapshot());
4168 void CodeGenerator::visitGuardFuse(LGuardFuse* guard) {
4169 Register temp = ToRegister(guard->temp0());
4170 Label bail;
4172 // Bake specific fuse address for Ion code, because we won't share this code
4173 // across realms.
4174 GuardFuse* fuse =
4175 mirGen().realm->realmFuses().getFuseByIndex(guard->mir()->fuseIndex());
4176 masm.loadPtr(AbsoluteAddress(fuse->fuseRef()), temp);
4177 masm.branchPtr(Assembler::NotEqual, temp, ImmPtr(nullptr), &bail);
4179 bailoutFrom(&bail, guard->snapshot());
4182 void CodeGenerator::visitGuardMultipleShapes(LGuardMultipleShapes* guard) {
4183 Register obj = ToRegister(guard->object());
4184 Register shapeList = ToRegister(guard->shapeList());
4185 Register temp = ToRegister(guard->temp0());
4186 Register temp2 = ToRegister(guard->temp1());
4187 Register temp3 = ToRegister(guard->temp2());
4188 Register spectre = ToTempRegisterOrInvalid(guard->temp3());
4190 Label bail;
4191 masm.loadPtr(Address(shapeList, NativeObject::offsetOfElements()), temp);
4192 masm.branchTestObjShapeList(Assembler::NotEqual, obj, temp, temp2, temp3,
4193 spectre, &bail);
4194 bailoutFrom(&bail, guard->snapshot());
4197 void CodeGenerator::visitGuardProto(LGuardProto* guard) {
4198 Register obj = ToRegister(guard->object());
4199 Register expected = ToRegister(guard->expected());
4200 Register temp = ToRegister(guard->temp0());
4202 masm.loadObjProto(obj, temp);
4204 Label bail;
4205 masm.branchPtr(Assembler::NotEqual, temp, expected, &bail);
4206 bailoutFrom(&bail, guard->snapshot());
4209 void CodeGenerator::visitGuardNullProto(LGuardNullProto* guard) {
4210 Register obj = ToRegister(guard->input());
4211 Register temp = ToRegister(guard->temp0());
4213 masm.loadObjProto(obj, temp);
4215 Label bail;
4216 masm.branchTestPtr(Assembler::NonZero, temp, temp, &bail);
4217 bailoutFrom(&bail, guard->snapshot());
4220 void CodeGenerator::visitGuardIsNativeObject(LGuardIsNativeObject* guard) {
4221 Register obj = ToRegister(guard->input());
4222 Register temp = ToRegister(guard->temp0());
4224 Label bail;
4225 masm.branchIfNonNativeObj(obj, temp, &bail);
4226 bailoutFrom(&bail, guard->snapshot());
4229 void CodeGenerator::visitGuardGlobalGeneration(LGuardGlobalGeneration* guard) {
4230 Register temp = ToRegister(guard->temp0());
4231 Label bail;
4233 masm.load32(AbsoluteAddress(guard->mir()->generationAddr()), temp);
4234 masm.branch32(Assembler::NotEqual, temp, Imm32(guard->mir()->expected()),
4235 &bail);
4236 bailoutFrom(&bail, guard->snapshot());
4239 void CodeGenerator::visitGuardIsProxy(LGuardIsProxy* guard) {
4240 Register obj = ToRegister(guard->input());
4241 Register temp = ToRegister(guard->temp0());
4243 Label bail;
4244 masm.branchTestObjectIsProxy(false, obj, temp, &bail);
4245 bailoutFrom(&bail, guard->snapshot());
4248 void CodeGenerator::visitGuardIsNotProxy(LGuardIsNotProxy* guard) {
4249 Register obj = ToRegister(guard->input());
4250 Register temp = ToRegister(guard->temp0());
4252 Label bail;
4253 masm.branchTestObjectIsProxy(true, obj, temp, &bail);
4254 bailoutFrom(&bail, guard->snapshot());
4257 void CodeGenerator::visitGuardIsNotDOMProxy(LGuardIsNotDOMProxy* guard) {
4258 Register proxy = ToRegister(guard->proxy());
4259 Register temp = ToRegister(guard->temp0());
4261 Label bail;
4262 masm.branchTestProxyHandlerFamily(Assembler::Equal, proxy, temp,
4263 GetDOMProxyHandlerFamily(), &bail);
4264 bailoutFrom(&bail, guard->snapshot());
4267 void CodeGenerator::visitProxyGet(LProxyGet* lir) {
4268 Register proxy = ToRegister(lir->proxy());
4269 Register temp = ToRegister(lir->temp0());
4271 pushArg(lir->mir()->id(), temp);
4272 pushArg(proxy);
4274 using Fn = bool (*)(JSContext*, HandleObject, HandleId, MutableHandleValue);
4275 callVM<Fn, ProxyGetProperty>(lir);
4278 void CodeGenerator::visitProxyGetByValue(LProxyGetByValue* lir) {
4279 Register proxy = ToRegister(lir->proxy());
4280 ValueOperand idVal = ToValue(lir, LProxyGetByValue::IdIndex);
4282 pushArg(idVal);
4283 pushArg(proxy);
4285 using Fn =
4286 bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
4287 callVM<Fn, ProxyGetPropertyByValue>(lir);
4290 void CodeGenerator::visitProxyHasProp(LProxyHasProp* lir) {
4291 Register proxy = ToRegister(lir->proxy());
4292 ValueOperand idVal = ToValue(lir, LProxyHasProp::IdIndex);
4294 pushArg(idVal);
4295 pushArg(proxy);
4297 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
4298 if (lir->mir()->hasOwn()) {
4299 callVM<Fn, ProxyHasOwn>(lir);
4300 } else {
4301 callVM<Fn, ProxyHas>(lir);
4305 void CodeGenerator::visitProxySet(LProxySet* lir) {
4306 Register proxy = ToRegister(lir->proxy());
4307 ValueOperand rhs = ToValue(lir, LProxySet::RhsIndex);
4308 Register temp = ToRegister(lir->temp0());
4310 pushArg(Imm32(lir->mir()->strict()));
4311 pushArg(rhs);
4312 pushArg(lir->mir()->id(), temp);
4313 pushArg(proxy);
4315 using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
4316 callVM<Fn, ProxySetProperty>(lir);
4319 void CodeGenerator::visitProxySetByValue(LProxySetByValue* lir) {
4320 Register proxy = ToRegister(lir->proxy());
4321 ValueOperand idVal = ToValue(lir, LProxySetByValue::IdIndex);
4322 ValueOperand rhs = ToValue(lir, LProxySetByValue::RhsIndex);
4324 pushArg(Imm32(lir->mir()->strict()));
4325 pushArg(rhs);
4326 pushArg(idVal);
4327 pushArg(proxy);
4329 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
4330 callVM<Fn, ProxySetPropertyByValue>(lir);
4333 void CodeGenerator::visitCallSetArrayLength(LCallSetArrayLength* lir) {
4334 Register obj = ToRegister(lir->obj());
4335 ValueOperand rhs = ToValue(lir, LCallSetArrayLength::RhsIndex);
4337 pushArg(Imm32(lir->mir()->strict()));
4338 pushArg(rhs);
4339 pushArg(obj);
4341 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool);
4342 callVM<Fn, jit::SetArrayLength>(lir);
4345 void CodeGenerator::visitMegamorphicLoadSlot(LMegamorphicLoadSlot* lir) {
4346 Register obj = ToRegister(lir->object());
4347 Register temp0 = ToRegister(lir->temp0());
4348 Register temp1 = ToRegister(lir->temp1());
4349 Register temp2 = ToRegister(lir->temp2());
4350 Register temp3 = ToRegister(lir->temp3());
4351 ValueOperand output = ToOutValue(lir);
4353 Label bail, cacheHit;
4354 masm.emitMegamorphicCacheLookup(lir->mir()->name(), obj, temp0, temp1, temp2,
4355 output, &cacheHit);
4357 masm.branchIfNonNativeObj(obj, temp0, &bail);
4359 masm.Push(UndefinedValue());
4360 masm.moveStackPtrTo(temp3);
4362 using Fn = bool (*)(JSContext* cx, JSObject* obj, PropertyKey id,
4363 MegamorphicCache::Entry* cacheEntry, Value* vp);
4364 masm.setupAlignedABICall();
4365 masm.loadJSContext(temp0);
4366 masm.passABIArg(temp0);
4367 masm.passABIArg(obj);
4368 masm.movePropertyKey(lir->mir()->name(), temp1);
4369 masm.passABIArg(temp1);
4370 masm.passABIArg(temp2);
4371 masm.passABIArg(temp3);
4373 masm.callWithABI<Fn, GetNativeDataPropertyPure>();
4375 MOZ_ASSERT(!output.aliases(ReturnReg));
4376 masm.Pop(output);
4378 masm.branchIfFalseBool(ReturnReg, &bail);
4380 masm.bind(&cacheHit);
4381 bailoutFrom(&bail, lir->snapshot());
4384 void CodeGenerator::visitMegamorphicLoadSlotByValue(
4385 LMegamorphicLoadSlotByValue* lir) {
4386 Register obj = ToRegister(lir->object());
4387 ValueOperand idVal = ToValue(lir, LMegamorphicLoadSlotByValue::IdIndex);
4388 Register temp0 = ToRegister(lir->temp0());
4389 Register temp1 = ToRegister(lir->temp1());
4390 Register temp2 = ToRegister(lir->temp2());
4391 ValueOperand output = ToOutValue(lir);
4393 Label bail, cacheHit;
4394 masm.emitMegamorphicCacheLookupByValue(idVal, obj, temp0, temp1, temp2,
4395 output, &cacheHit);
4397 masm.branchIfNonNativeObj(obj, temp0, &bail);
4399 // idVal will be in vp[0], result will be stored in vp[1].
4400 masm.reserveStack(sizeof(Value));
4401 masm.Push(idVal);
4402 masm.moveStackPtrTo(temp0);
4404 using Fn = bool (*)(JSContext* cx, JSObject* obj,
4405 MegamorphicCache::Entry* cacheEntry, Value* vp);
4406 masm.setupAlignedABICall();
4407 masm.loadJSContext(temp1);
4408 masm.passABIArg(temp1);
4409 masm.passABIArg(obj);
4410 masm.passABIArg(temp2);
4411 masm.passABIArg(temp0);
4412 masm.callWithABI<Fn, GetNativeDataPropertyByValuePure>();
4414 MOZ_ASSERT(!idVal.aliases(temp0));
4415 masm.storeCallPointerResult(temp0);
4416 masm.Pop(idVal);
4418 uint32_t framePushed = masm.framePushed();
4419 Label ok;
4420 masm.branchIfTrueBool(temp0, &ok);
4421 masm.freeStack(sizeof(Value)); // Discard result Value.
4422 masm.jump(&bail);
4424 masm.bind(&ok);
4425 masm.setFramePushed(framePushed);
4426 masm.Pop(output);
4428 masm.bind(&cacheHit);
4429 bailoutFrom(&bail, lir->snapshot());
4432 void CodeGenerator::visitMegamorphicStoreSlot(LMegamorphicStoreSlot* lir) {
4433 Register obj = ToRegister(lir->object());
4434 ValueOperand value = ToValue(lir, LMegamorphicStoreSlot::RhsIndex);
4436 Register temp0 = ToRegister(lir->temp0());
4437 #ifndef JS_CODEGEN_X86
4438 Register temp1 = ToRegister(lir->temp1());
4439 Register temp2 = ToRegister(lir->temp2());
4440 #endif
4442 Label cacheHit, done;
4443 #ifdef JS_CODEGEN_X86
4444 masm.emitMegamorphicCachedSetSlot(
4445 lir->mir()->name(), obj, temp0, value, &cacheHit,
4446 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
4447 EmitPreBarrier(masm, addr, mirType);
4449 #else
4450 masm.emitMegamorphicCachedSetSlot(
4451 lir->mir()->name(), obj, temp0, temp1, temp2, value, &cacheHit,
4452 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
4453 EmitPreBarrier(masm, addr, mirType);
4455 #endif
4457 pushArg(Imm32(lir->mir()->strict()));
4458 pushArg(value);
4459 pushArg(lir->mir()->name(), temp0);
4460 pushArg(obj);
4462 using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
4463 callVM<Fn, SetPropertyMegamorphic<true>>(lir);
4465 masm.jump(&done);
4466 masm.bind(&cacheHit);
4468 masm.branchPtrInNurseryChunk(Assembler::Equal, obj, temp0, &done);
4469 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp0, &done);
4471 saveVolatile(temp0);
4472 emitPostWriteBarrier(obj);
4473 restoreVolatile(temp0);
4475 masm.bind(&done);
4478 void CodeGenerator::visitMegamorphicHasProp(LMegamorphicHasProp* lir) {
4479 Register obj = ToRegister(lir->object());
4480 ValueOperand idVal = ToValue(lir, LMegamorphicHasProp::IdIndex);
4481 Register temp0 = ToRegister(lir->temp0());
4482 Register temp1 = ToRegister(lir->temp1());
4483 Register temp2 = ToRegister(lir->temp2());
4484 Register output = ToRegister(lir->output());
4486 Label bail, cacheHit;
4487 masm.emitMegamorphicCacheLookupExists(idVal, obj, temp0, temp1, temp2, output,
4488 &cacheHit, lir->mir()->hasOwn());
4490 masm.branchIfNonNativeObj(obj, temp0, &bail);
4492 // idVal will be in vp[0], result will be stored in vp[1].
4493 masm.reserveStack(sizeof(Value));
4494 masm.Push(idVal);
4495 masm.moveStackPtrTo(temp0);
4497 using Fn = bool (*)(JSContext* cx, JSObject* obj,
4498 MegamorphicCache::Entry* cacheEntry, Value* vp);
4499 masm.setupAlignedABICall();
4500 masm.loadJSContext(temp1);
4501 masm.passABIArg(temp1);
4502 masm.passABIArg(obj);
4503 masm.passABIArg(temp2);
4504 masm.passABIArg(temp0);
4505 if (lir->mir()->hasOwn()) {
4506 masm.callWithABI<Fn, HasNativeDataPropertyPure<true>>();
4507 } else {
4508 masm.callWithABI<Fn, HasNativeDataPropertyPure<false>>();
4511 MOZ_ASSERT(!idVal.aliases(temp0));
4512 masm.storeCallPointerResult(temp0);
4513 masm.Pop(idVal);
4515 uint32_t framePushed = masm.framePushed();
4516 Label ok;
4517 masm.branchIfTrueBool(temp0, &ok);
4518 masm.freeStack(sizeof(Value)); // Discard result Value.
4519 masm.jump(&bail);
4521 masm.bind(&ok);
4522 masm.setFramePushed(framePushed);
4523 masm.unboxBoolean(Address(masm.getStackPointer(), 0), output);
4524 masm.freeStack(sizeof(Value));
4525 masm.bind(&cacheHit);
4527 bailoutFrom(&bail, lir->snapshot());
4530 void CodeGenerator::visitGuardIsNotArrayBufferMaybeShared(
4531 LGuardIsNotArrayBufferMaybeShared* guard) {
4532 Register obj = ToRegister(guard->input());
4533 Register temp = ToRegister(guard->temp0());
4535 Label bail;
4536 masm.loadObjClassUnsafe(obj, temp);
4537 masm.branchPtr(Assembler::Equal, temp,
4538 ImmPtr(&FixedLengthArrayBufferObject::class_), &bail);
4539 masm.branchPtr(Assembler::Equal, temp,
4540 ImmPtr(&SharedArrayBufferObject::class_), &bail);
4541 masm.branchPtr(Assembler::Equal, temp,
4542 ImmPtr(&ResizableArrayBufferObject::class_), &bail);
4543 bailoutFrom(&bail, guard->snapshot());
4546 void CodeGenerator::visitGuardIsTypedArray(LGuardIsTypedArray* guard) {
4547 Register obj = ToRegister(guard->input());
4548 Register temp = ToRegister(guard->temp0());
4550 Label bail;
4551 masm.loadObjClassUnsafe(obj, temp);
4552 masm.branchIfClassIsNotTypedArray(temp, &bail);
4553 bailoutFrom(&bail, guard->snapshot());
4556 void CodeGenerator::visitGuardIsFixedLengthTypedArray(
4557 LGuardIsFixedLengthTypedArray* guard) {
4558 Register obj = ToRegister(guard->input());
4559 Register temp = ToRegister(guard->temp0());
4561 Label bail;
4562 masm.loadObjClassUnsafe(obj, temp);
4563 masm.branchIfClassIsNotFixedLengthTypedArray(temp, &bail);
4564 bailoutFrom(&bail, guard->snapshot());
4567 void CodeGenerator::visitGuardHasProxyHandler(LGuardHasProxyHandler* guard) {
4568 Register obj = ToRegister(guard->input());
4570 Label bail;
4572 Address handlerAddr(obj, ProxyObject::offsetOfHandler());
4573 masm.branchPtr(Assembler::NotEqual, handlerAddr,
4574 ImmPtr(guard->mir()->handler()), &bail);
4576 bailoutFrom(&bail, guard->snapshot());
4579 void CodeGenerator::visitGuardObjectIdentity(LGuardObjectIdentity* guard) {
4580 Register input = ToRegister(guard->input());
4581 Register expected = ToRegister(guard->expected());
4583 Assembler::Condition cond =
4584 guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
4585 bailoutCmpPtr(cond, input, expected, guard->snapshot());
4588 void CodeGenerator::visitGuardSpecificFunction(LGuardSpecificFunction* guard) {
4589 Register input = ToRegister(guard->input());
4590 Register expected = ToRegister(guard->expected());
4592 bailoutCmpPtr(Assembler::NotEqual, input, expected, guard->snapshot());
4595 void CodeGenerator::visitGuardSpecificAtom(LGuardSpecificAtom* guard) {
4596 Register str = ToRegister(guard->str());
4597 Register scratch = ToRegister(guard->temp0());
4599 LiveRegisterSet volatileRegs = liveVolatileRegs(guard);
4600 volatileRegs.takeUnchecked(scratch);
4602 Label bail;
4603 masm.guardSpecificAtom(str, guard->mir()->atom(), scratch, volatileRegs,
4604 &bail);
4605 bailoutFrom(&bail, guard->snapshot());
4608 void CodeGenerator::visitGuardSpecificSymbol(LGuardSpecificSymbol* guard) {
4609 Register symbol = ToRegister(guard->symbol());
4611 bailoutCmpPtr(Assembler::NotEqual, symbol, ImmGCPtr(guard->mir()->expected()),
4612 guard->snapshot());
4615 void CodeGenerator::visitGuardSpecificInt32(LGuardSpecificInt32* guard) {
4616 Register num = ToRegister(guard->num());
4618 bailoutCmp32(Assembler::NotEqual, num, Imm32(guard->mir()->expected()),
4619 guard->snapshot());
4622 void CodeGenerator::visitGuardStringToIndex(LGuardStringToIndex* lir) {
4623 Register str = ToRegister(lir->string());
4624 Register output = ToRegister(lir->output());
4626 Label vmCall, done;
4627 masm.loadStringIndexValue(str, output, &vmCall);
4628 masm.jump(&done);
4631 masm.bind(&vmCall);
4633 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
4634 volatileRegs.takeUnchecked(output);
4635 masm.PushRegsInMask(volatileRegs);
4637 using Fn = int32_t (*)(JSString* str);
4638 masm.setupAlignedABICall();
4639 masm.passABIArg(str);
4640 masm.callWithABI<Fn, GetIndexFromString>();
4641 masm.storeCallInt32Result(output);
4643 masm.PopRegsInMask(volatileRegs);
4645 // GetIndexFromString returns a negative value on failure.
4646 bailoutTest32(Assembler::Signed, output, output, lir->snapshot());
4649 masm.bind(&done);
4652 void CodeGenerator::visitGuardStringToInt32(LGuardStringToInt32* lir) {
4653 Register str = ToRegister(lir->string());
4654 Register output = ToRegister(lir->output());
4655 Register temp = ToRegister(lir->temp0());
4657 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
4659 Label bail;
4660 masm.guardStringToInt32(str, output, temp, volatileRegs, &bail);
4661 bailoutFrom(&bail, lir->snapshot());
4664 void CodeGenerator::visitGuardStringToDouble(LGuardStringToDouble* lir) {
4665 Register str = ToRegister(lir->string());
4666 FloatRegister output = ToFloatRegister(lir->output());
4667 Register temp0 = ToRegister(lir->temp0());
4668 Register temp1 = ToRegister(lir->temp1());
4670 Label vmCall, done;
4671 // Use indexed value as fast path if possible.
4672 masm.loadStringIndexValue(str, temp0, &vmCall);
4673 masm.convertInt32ToDouble(temp0, output);
4674 masm.jump(&done);
4676 masm.bind(&vmCall);
4678 // Reserve stack for holding the result value of the call.
4679 masm.reserveStack(sizeof(double));
4680 masm.moveStackPtrTo(temp0);
4682 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
4683 volatileRegs.takeUnchecked(temp0);
4684 volatileRegs.takeUnchecked(temp1);
4685 masm.PushRegsInMask(volatileRegs);
4687 using Fn = bool (*)(JSContext* cx, JSString* str, double* result);
4688 masm.setupAlignedABICall();
4689 masm.loadJSContext(temp1);
4690 masm.passABIArg(temp1);
4691 masm.passABIArg(str);
4692 masm.passABIArg(temp0);
4693 masm.callWithABI<Fn, StringToNumberPure>();
4694 masm.storeCallPointerResult(temp0);
4696 masm.PopRegsInMask(volatileRegs);
4698 Label ok;
4699 masm.branchIfTrueBool(temp0, &ok);
4701 // OOM path, recovered by StringToNumberPure.
4703 // Use addToStackPtr instead of freeStack as freeStack tracks stack height
4704 // flow-insensitively, and using it here would confuse the stack height
4705 // tracking.
4706 masm.addToStackPtr(Imm32(sizeof(double)));
4707 bailout(lir->snapshot());
4709 masm.bind(&ok);
4710 masm.Pop(output);
4712 masm.bind(&done);
4715 void CodeGenerator::visitGuardNoDenseElements(LGuardNoDenseElements* guard) {
4716 Register obj = ToRegister(guard->input());
4717 Register temp = ToRegister(guard->temp0());
4719 // Load obj->elements.
4720 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), temp);
4722 // Make sure there are no dense elements.
4723 Address initLength(temp, ObjectElements::offsetOfInitializedLength());
4724 bailoutCmp32(Assembler::NotEqual, initLength, Imm32(0), guard->snapshot());
4727 void CodeGenerator::visitBooleanToInt64(LBooleanToInt64* lir) {
4728 Register input = ToRegister(lir->input());
4729 Register64 output = ToOutRegister64(lir);
4731 masm.move32To64ZeroExtend(input, output);
4734 void CodeGenerator::emitStringToInt64(LInstruction* lir, Register input,
4735 Register64 output) {
4736 Register temp = output.scratchReg();
4738 saveLive(lir);
4740 masm.reserveStack(sizeof(uint64_t));
4741 masm.moveStackPtrTo(temp);
4742 pushArg(temp);
4743 pushArg(input);
4745 using Fn = bool (*)(JSContext*, HandleString, uint64_t*);
4746 callVM<Fn, DoStringToInt64>(lir);
4748 masm.load64(Address(masm.getStackPointer(), 0), output);
4749 masm.freeStack(sizeof(uint64_t));
4751 restoreLiveIgnore(lir, StoreValueTo(output).clobbered());
4754 void CodeGenerator::visitStringToInt64(LStringToInt64* lir) {
4755 Register input = ToRegister(lir->input());
4756 Register64 output = ToOutRegister64(lir);
4758 emitStringToInt64(lir, input, output);
4761 void CodeGenerator::visitValueToInt64(LValueToInt64* lir) {
4762 ValueOperand input = ToValue(lir, LValueToInt64::InputIndex);
4763 Register temp = ToRegister(lir->temp0());
4764 Register64 output = ToOutRegister64(lir);
4766 int checks = 3;
4768 Label fail, done;
4769 // Jump to fail if this is the last check and we fail it,
4770 // otherwise to the next test.
4771 auto emitTestAndUnbox = [&](auto testAndUnbox) {
4772 MOZ_ASSERT(checks > 0);
4774 checks--;
4775 Label notType;
4776 Label* target = checks ? &notType : &fail;
4778 testAndUnbox(target);
4780 if (checks) {
4781 masm.jump(&done);
4782 masm.bind(&notType);
4786 Register tag = masm.extractTag(input, temp);
4788 // BigInt.
4789 emitTestAndUnbox([&](Label* target) {
4790 masm.branchTestBigInt(Assembler::NotEqual, tag, target);
4791 masm.unboxBigInt(input, temp);
4792 masm.loadBigInt64(temp, output);
4795 // Boolean
4796 emitTestAndUnbox([&](Label* target) {
4797 masm.branchTestBoolean(Assembler::NotEqual, tag, target);
4798 masm.unboxBoolean(input, temp);
4799 masm.move32To64ZeroExtend(temp, output);
4802 // String
4803 emitTestAndUnbox([&](Label* target) {
4804 masm.branchTestString(Assembler::NotEqual, tag, target);
4805 masm.unboxString(input, temp);
4806 emitStringToInt64(lir, temp, output);
4809 MOZ_ASSERT(checks == 0);
4811 bailoutFrom(&fail, lir->snapshot());
4812 masm.bind(&done);
4815 void CodeGenerator::visitTruncateBigIntToInt64(LTruncateBigIntToInt64* lir) {
4816 Register operand = ToRegister(lir->input());
4817 Register64 output = ToOutRegister64(lir);
4819 masm.loadBigInt64(operand, output);
4822 OutOfLineCode* CodeGenerator::createBigIntOutOfLine(LInstruction* lir,
4823 Scalar::Type type,
4824 Register64 input,
4825 Register output) {
4826 #if JS_BITS_PER_WORD == 32
4827 using Fn = BigInt* (*)(JSContext*, uint32_t, uint32_t);
4828 auto args = ArgList(input.low, input.high);
4829 #else
4830 using Fn = BigInt* (*)(JSContext*, uint64_t);
4831 auto args = ArgList(input);
4832 #endif
4834 if (type == Scalar::BigInt64) {
4835 return oolCallVM<Fn, jit::CreateBigIntFromInt64>(lir, args,
4836 StoreRegisterTo(output));
4838 MOZ_ASSERT(type == Scalar::BigUint64);
4839 return oolCallVM<Fn, jit::CreateBigIntFromUint64>(lir, args,
4840 StoreRegisterTo(output));
4843 void CodeGenerator::emitCreateBigInt(LInstruction* lir, Scalar::Type type,
4844 Register64 input, Register output,
4845 Register maybeTemp) {
4846 OutOfLineCode* ool = createBigIntOutOfLine(lir, type, input, output);
4848 if (maybeTemp != InvalidReg) {
4849 masm.newGCBigInt(output, maybeTemp, initialBigIntHeap(), ool->entry());
4850 } else {
4851 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
4852 regs.take(input);
4853 regs.take(output);
4855 Register temp = regs.takeAny();
4857 masm.push(temp);
4859 Label fail, ok;
4860 masm.newGCBigInt(output, temp, initialBigIntHeap(), &fail);
4861 masm.pop(temp);
4862 masm.jump(&ok);
4863 masm.bind(&fail);
4864 masm.pop(temp);
4865 masm.jump(ool->entry());
4866 masm.bind(&ok);
4868 masm.initializeBigInt64(type, output, input);
4869 masm.bind(ool->rejoin());
4872 void CodeGenerator::visitInt64ToBigInt(LInt64ToBigInt* lir) {
4873 Register64 input = ToRegister64(lir->input());
4874 Register temp = ToRegister(lir->temp0());
4875 Register output = ToRegister(lir->output());
4877 emitCreateBigInt(lir, Scalar::BigInt64, input, output, temp);
4880 void CodeGenerator::visitGuardValue(LGuardValue* lir) {
4881 ValueOperand input = ToValue(lir, LGuardValue::InputIndex);
4882 Value expected = lir->mir()->expected();
4883 Label bail;
4884 masm.branchTestValue(Assembler::NotEqual, input, expected, &bail);
4885 bailoutFrom(&bail, lir->snapshot());
4888 void CodeGenerator::visitGuardNullOrUndefined(LGuardNullOrUndefined* lir) {
4889 ValueOperand input = ToValue(lir, LGuardNullOrUndefined::InputIndex);
4891 ScratchTagScope tag(masm, input);
4892 masm.splitTagForTest(input, tag);
4894 Label done;
4895 masm.branchTestNull(Assembler::Equal, tag, &done);
4897 Label bail;
4898 masm.branchTestUndefined(Assembler::NotEqual, tag, &bail);
4899 bailoutFrom(&bail, lir->snapshot());
4901 masm.bind(&done);
4904 void CodeGenerator::visitGuardIsNotObject(LGuardIsNotObject* lir) {
4905 ValueOperand input = ToValue(lir, LGuardIsNotObject::InputIndex);
4907 Label bail;
4908 masm.branchTestObject(Assembler::Equal, input, &bail);
4909 bailoutFrom(&bail, lir->snapshot());
4912 void CodeGenerator::visitGuardFunctionFlags(LGuardFunctionFlags* lir) {
4913 Register function = ToRegister(lir->function());
4915 Label bail;
4916 if (uint16_t flags = lir->mir()->expectedFlags()) {
4917 masm.branchTestFunctionFlags(function, flags, Assembler::Zero, &bail);
4919 if (uint16_t flags = lir->mir()->unexpectedFlags()) {
4920 masm.branchTestFunctionFlags(function, flags, Assembler::NonZero, &bail);
4922 bailoutFrom(&bail, lir->snapshot());
4925 void CodeGenerator::visitGuardFunctionIsNonBuiltinCtor(
4926 LGuardFunctionIsNonBuiltinCtor* lir) {
4927 Register function = ToRegister(lir->function());
4928 Register temp = ToRegister(lir->temp0());
4930 Label bail;
4931 masm.branchIfNotFunctionIsNonBuiltinCtor(function, temp, &bail);
4932 bailoutFrom(&bail, lir->snapshot());
4935 void CodeGenerator::visitGuardFunctionKind(LGuardFunctionKind* lir) {
4936 Register function = ToRegister(lir->function());
4937 Register temp = ToRegister(lir->temp0());
4939 Assembler::Condition cond =
4940 lir->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
4942 Label bail;
4943 masm.branchFunctionKind(cond, lir->mir()->expected(), function, temp, &bail);
4944 bailoutFrom(&bail, lir->snapshot());
4947 void CodeGenerator::visitGuardFunctionScript(LGuardFunctionScript* lir) {
4948 Register function = ToRegister(lir->function());
4950 Address scriptAddr(function, JSFunction::offsetOfJitInfoOrScript());
4951 bailoutCmpPtr(Assembler::NotEqual, scriptAddr,
4952 ImmGCPtr(lir->mir()->expected()), lir->snapshot());
4955 // Out-of-line path to update the store buffer.
4956 class OutOfLineCallPostWriteBarrier : public OutOfLineCodeBase<CodeGenerator> {
4957 LInstruction* lir_;
4958 const LAllocation* object_;
4960 public:
4961 OutOfLineCallPostWriteBarrier(LInstruction* lir, const LAllocation* object)
4962 : lir_(lir), object_(object) {}
4964 void accept(CodeGenerator* codegen) override {
4965 codegen->visitOutOfLineCallPostWriteBarrier(this);
4968 LInstruction* lir() const { return lir_; }
4969 const LAllocation* object() const { return object_; }
4972 static void EmitStoreBufferCheckForConstant(MacroAssembler& masm,
4973 const gc::TenuredCell* cell,
4974 AllocatableGeneralRegisterSet& regs,
4975 Label* exit, Label* callVM) {
4976 Register temp = regs.takeAny();
4978 gc::Arena* arena = cell->arena();
4980 Register cells = temp;
4981 masm.loadPtr(AbsoluteAddress(&arena->bufferedCells()), cells);
4983 size_t index = gc::ArenaCellSet::getCellIndex(cell);
4984 size_t word;
4985 uint32_t mask;
4986 gc::ArenaCellSet::getWordIndexAndMask(index, &word, &mask);
4987 size_t offset = gc::ArenaCellSet::offsetOfBits() + word * sizeof(uint32_t);
4989 masm.branchTest32(Assembler::NonZero, Address(cells, offset), Imm32(mask),
4990 exit);
4992 // Check whether this is the sentinel set and if so call the VM to allocate
4993 // one for this arena.
4994 masm.branchPtr(Assembler::Equal,
4995 Address(cells, gc::ArenaCellSet::offsetOfArena()),
4996 ImmPtr(nullptr), callVM);
4998 // Add the cell to the set.
4999 masm.or32(Imm32(mask), Address(cells, offset));
5000 masm.jump(exit);
5002 regs.add(temp);
5005 static void EmitPostWriteBarrier(MacroAssembler& masm, CompileRuntime* runtime,
5006 Register objreg, JSObject* maybeConstant,
5007 bool isGlobal,
5008 AllocatableGeneralRegisterSet& regs) {
5009 MOZ_ASSERT_IF(isGlobal, maybeConstant);
5011 Label callVM;
5012 Label exit;
5014 Register temp = regs.takeAny();
5016 // We already have a fast path to check whether a global is in the store
5017 // buffer.
5018 if (!isGlobal) {
5019 if (maybeConstant) {
5020 // Check store buffer bitmap directly for known object.
5021 EmitStoreBufferCheckForConstant(masm, &maybeConstant->asTenured(), regs,
5022 &exit, &callVM);
5023 } else {
5024 // Check one element cache to avoid VM call.
5025 masm.branchPtr(Assembler::Equal,
5026 AbsoluteAddress(runtime->addressOfLastBufferedWholeCell()),
5027 objreg, &exit);
5031 // Call into the VM to barrier the write.
5032 masm.bind(&callVM);
5034 Register runtimereg = temp;
5035 masm.mov(ImmPtr(runtime), runtimereg);
5037 masm.setupAlignedABICall();
5038 masm.passABIArg(runtimereg);
5039 masm.passABIArg(objreg);
5040 if (isGlobal) {
5041 using Fn = void (*)(JSRuntime* rt, GlobalObject* obj);
5042 masm.callWithABI<Fn, PostGlobalWriteBarrier>();
5043 } else {
5044 using Fn = void (*)(JSRuntime* rt, js::gc::Cell* obj);
5045 masm.callWithABI<Fn, PostWriteBarrier>();
5048 masm.bind(&exit);
5051 void CodeGenerator::emitPostWriteBarrier(const LAllocation* obj) {
5052 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5054 Register objreg;
5055 JSObject* object = nullptr;
5056 bool isGlobal = false;
5057 if (obj->isConstant()) {
5058 object = &obj->toConstant()->toObject();
5059 isGlobal = isGlobalObject(object);
5060 objreg = regs.takeAny();
5061 masm.movePtr(ImmGCPtr(object), objreg);
5062 } else {
5063 objreg = ToRegister(obj);
5064 regs.takeUnchecked(objreg);
5067 EmitPostWriteBarrier(masm, gen->runtime, objreg, object, isGlobal, regs);
5070 // Returns true if `def` might be allocated in the nursery.
5071 static bool ValueNeedsPostBarrier(MDefinition* def) {
5072 if (def->isBox()) {
5073 def = def->toBox()->input();
5075 if (def->type() == MIRType::Value) {
5076 return true;
5078 return NeedsPostBarrier(def->type());
5081 class OutOfLineElementPostWriteBarrier
5082 : public OutOfLineCodeBase<CodeGenerator> {
5083 LiveRegisterSet liveVolatileRegs_;
5084 const LAllocation* index_;
5085 int32_t indexDiff_;
5086 Register obj_;
5087 Register scratch_;
5089 public:
5090 OutOfLineElementPostWriteBarrier(const LiveRegisterSet& liveVolatileRegs,
5091 Register obj, const LAllocation* index,
5092 Register scratch, int32_t indexDiff)
5093 : liveVolatileRegs_(liveVolatileRegs),
5094 index_(index),
5095 indexDiff_(indexDiff),
5096 obj_(obj),
5097 scratch_(scratch) {}
5099 void accept(CodeGenerator* codegen) override {
5100 codegen->visitOutOfLineElementPostWriteBarrier(this);
5103 const LiveRegisterSet& liveVolatileRegs() const { return liveVolatileRegs_; }
5104 const LAllocation* index() const { return index_; }
5105 int32_t indexDiff() const { return indexDiff_; }
5107 Register object() const { return obj_; }
5108 Register scratch() const { return scratch_; }
5111 void CodeGenerator::emitElementPostWriteBarrier(
5112 MInstruction* mir, const LiveRegisterSet& liveVolatileRegs, Register obj,
5113 const LAllocation* index, Register scratch, const ConstantOrRegister& val,
5114 int32_t indexDiff) {
5115 if (val.constant()) {
5116 MOZ_ASSERT_IF(val.value().isGCThing(),
5117 !IsInsideNursery(val.value().toGCThing()));
5118 return;
5121 TypedOrValueRegister reg = val.reg();
5122 if (reg.hasTyped() && !NeedsPostBarrier(reg.type())) {
5123 return;
5126 auto* ool = new (alloc()) OutOfLineElementPostWriteBarrier(
5127 liveVolatileRegs, obj, index, scratch, indexDiff);
5128 addOutOfLineCode(ool, mir);
5130 masm.branchPtrInNurseryChunk(Assembler::Equal, obj, scratch, ool->rejoin());
5132 if (reg.hasValue()) {
5133 masm.branchValueIsNurseryCell(Assembler::Equal, reg.valueReg(), scratch,
5134 ool->entry());
5135 } else {
5136 masm.branchPtrInNurseryChunk(Assembler::Equal, reg.typedReg().gpr(),
5137 scratch, ool->entry());
5140 masm.bind(ool->rejoin());
5143 void CodeGenerator::visitOutOfLineElementPostWriteBarrier(
5144 OutOfLineElementPostWriteBarrier* ool) {
5145 Register obj = ool->object();
5146 Register scratch = ool->scratch();
5147 const LAllocation* index = ool->index();
5148 int32_t indexDiff = ool->indexDiff();
5150 masm.PushRegsInMask(ool->liveVolatileRegs());
5152 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5153 regs.takeUnchecked(obj);
5154 regs.takeUnchecked(scratch);
5156 Register indexReg;
5157 if (index->isConstant()) {
5158 indexReg = regs.takeAny();
5159 masm.move32(Imm32(ToInt32(index) + indexDiff), indexReg);
5160 } else {
5161 indexReg = ToRegister(index);
5162 regs.takeUnchecked(indexReg);
5163 if (indexDiff != 0) {
5164 masm.add32(Imm32(indexDiff), indexReg);
5168 masm.setupUnalignedABICall(scratch);
5169 masm.movePtr(ImmPtr(gen->runtime), scratch);
5170 masm.passABIArg(scratch);
5171 masm.passABIArg(obj);
5172 masm.passABIArg(indexReg);
5173 using Fn = void (*)(JSRuntime* rt, JSObject* obj, int32_t index);
5174 masm.callWithABI<Fn, PostWriteElementBarrier>();
5176 // We don't need a sub32 here because indexReg must be in liveVolatileRegs
5177 // if indexDiff is not zero, so it will be restored below.
5178 MOZ_ASSERT_IF(indexDiff != 0, ool->liveVolatileRegs().has(indexReg));
5180 masm.PopRegsInMask(ool->liveVolatileRegs());
5182 masm.jump(ool->rejoin());
5185 void CodeGenerator::emitPostWriteBarrier(Register objreg) {
5186 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5187 regs.takeUnchecked(objreg);
5188 EmitPostWriteBarrier(masm, gen->runtime, objreg, nullptr, false, regs);
5191 void CodeGenerator::visitOutOfLineCallPostWriteBarrier(
5192 OutOfLineCallPostWriteBarrier* ool) {
5193 saveLiveVolatile(ool->lir());
5194 const LAllocation* obj = ool->object();
5195 emitPostWriteBarrier(obj);
5196 restoreLiveVolatile(ool->lir());
5198 masm.jump(ool->rejoin());
5201 void CodeGenerator::maybeEmitGlobalBarrierCheck(const LAllocation* maybeGlobal,
5202 OutOfLineCode* ool) {
5203 // Check whether an object is a global that we have already barriered before
5204 // calling into the VM.
5206 // We only check for the script's global, not other globals within the same
5207 // compartment, because we bake in a pointer to realm->globalWriteBarriered
5208 // and doing that would be invalid for other realms because they could be
5209 // collected before the Ion code is discarded.
5211 if (!maybeGlobal->isConstant()) {
5212 return;
5215 JSObject* obj = &maybeGlobal->toConstant()->toObject();
5216 if (gen->realm->maybeGlobal() != obj) {
5217 return;
5220 const uint32_t* addr = gen->realm->addressOfGlobalWriteBarriered();
5221 masm.branch32(Assembler::NotEqual, AbsoluteAddress(addr), Imm32(0),
5222 ool->rejoin());
5225 template <class LPostBarrierType, MIRType nurseryType>
5226 void CodeGenerator::visitPostWriteBarrierCommon(LPostBarrierType* lir,
5227 OutOfLineCode* ool) {
5228 static_assert(NeedsPostBarrier(nurseryType));
5230 addOutOfLineCode(ool, lir->mir());
5232 Register temp = ToTempRegisterOrInvalid(lir->temp0());
5234 if (lir->object()->isConstant()) {
5235 // Constant nursery objects cannot appear here, see
5236 // LIRGenerator::visitPostWriteElementBarrier.
5237 MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject()));
5238 } else {
5239 masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()),
5240 temp, ool->rejoin());
5243 maybeEmitGlobalBarrierCheck(lir->object(), ool);
5245 Register value = ToRegister(lir->value());
5246 if constexpr (nurseryType == MIRType::Object) {
5247 MOZ_ASSERT(lir->mir()->value()->type() == MIRType::Object);
5248 } else if constexpr (nurseryType == MIRType::String) {
5249 MOZ_ASSERT(lir->mir()->value()->type() == MIRType::String);
5250 } else {
5251 static_assert(nurseryType == MIRType::BigInt);
5252 MOZ_ASSERT(lir->mir()->value()->type() == MIRType::BigInt);
5254 masm.branchPtrInNurseryChunk(Assembler::Equal, value, temp, ool->entry());
5256 masm.bind(ool->rejoin());
5259 template <class LPostBarrierType>
5260 void CodeGenerator::visitPostWriteBarrierCommonV(LPostBarrierType* lir,
5261 OutOfLineCode* ool) {
5262 addOutOfLineCode(ool, lir->mir());
5264 Register temp = ToTempRegisterOrInvalid(lir->temp0());
5266 if (lir->object()->isConstant()) {
5267 // Constant nursery objects cannot appear here, see
5268 // LIRGenerator::visitPostWriteElementBarrier.
5269 MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject()));
5270 } else {
5271 masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()),
5272 temp, ool->rejoin());
5275 maybeEmitGlobalBarrierCheck(lir->object(), ool);
5277 ValueOperand value = ToValue(lir, LPostBarrierType::ValueIndex);
5278 masm.branchValueIsNurseryCell(Assembler::Equal, value, temp, ool->entry());
5280 masm.bind(ool->rejoin());
5283 void CodeGenerator::visitPostWriteBarrierO(LPostWriteBarrierO* lir) {
5284 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5285 visitPostWriteBarrierCommon<LPostWriteBarrierO, MIRType::Object>(lir, ool);
5288 void CodeGenerator::visitPostWriteBarrierS(LPostWriteBarrierS* lir) {
5289 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5290 visitPostWriteBarrierCommon<LPostWriteBarrierS, MIRType::String>(lir, ool);
5293 void CodeGenerator::visitPostWriteBarrierBI(LPostWriteBarrierBI* lir) {
5294 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5295 visitPostWriteBarrierCommon<LPostWriteBarrierBI, MIRType::BigInt>(lir, ool);
5298 void CodeGenerator::visitPostWriteBarrierV(LPostWriteBarrierV* lir) {
5299 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5300 visitPostWriteBarrierCommonV(lir, ool);
5303 // Out-of-line path to update the store buffer.
5304 class OutOfLineCallPostWriteElementBarrier
5305 : public OutOfLineCodeBase<CodeGenerator> {
5306 LInstruction* lir_;
5307 const LAllocation* object_;
5308 const LAllocation* index_;
5310 public:
5311 OutOfLineCallPostWriteElementBarrier(LInstruction* lir,
5312 const LAllocation* object,
5313 const LAllocation* index)
5314 : lir_(lir), object_(object), index_(index) {}
5316 void accept(CodeGenerator* codegen) override {
5317 codegen->visitOutOfLineCallPostWriteElementBarrier(this);
5320 LInstruction* lir() const { return lir_; }
5322 const LAllocation* object() const { return object_; }
5324 const LAllocation* index() const { return index_; }
5327 void CodeGenerator::visitOutOfLineCallPostWriteElementBarrier(
5328 OutOfLineCallPostWriteElementBarrier* ool) {
5329 saveLiveVolatile(ool->lir());
5331 const LAllocation* obj = ool->object();
5332 const LAllocation* index = ool->index();
5334 Register objreg = obj->isConstant() ? InvalidReg : ToRegister(obj);
5335 Register indexreg = ToRegister(index);
5337 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5338 regs.takeUnchecked(indexreg);
5340 if (obj->isConstant()) {
5341 objreg = regs.takeAny();
5342 masm.movePtr(ImmGCPtr(&obj->toConstant()->toObject()), objreg);
5343 } else {
5344 regs.takeUnchecked(objreg);
5347 Register runtimereg = regs.takeAny();
5348 using Fn = void (*)(JSRuntime* rt, JSObject* obj, int32_t index);
5349 masm.setupAlignedABICall();
5350 masm.mov(ImmPtr(gen->runtime), runtimereg);
5351 masm.passABIArg(runtimereg);
5352 masm.passABIArg(objreg);
5353 masm.passABIArg(indexreg);
5354 masm.callWithABI<Fn, PostWriteElementBarrier>();
5356 restoreLiveVolatile(ool->lir());
5358 masm.jump(ool->rejoin());
5361 void CodeGenerator::visitPostWriteElementBarrierO(
5362 LPostWriteElementBarrierO* lir) {
5363 auto ool = new (alloc())
5364 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5365 visitPostWriteBarrierCommon<LPostWriteElementBarrierO, MIRType::Object>(lir,
5366 ool);
5369 void CodeGenerator::visitPostWriteElementBarrierS(
5370 LPostWriteElementBarrierS* lir) {
5371 auto ool = new (alloc())
5372 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5373 visitPostWriteBarrierCommon<LPostWriteElementBarrierS, MIRType::String>(lir,
5374 ool);
5377 void CodeGenerator::visitPostWriteElementBarrierBI(
5378 LPostWriteElementBarrierBI* lir) {
5379 auto ool = new (alloc())
5380 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5381 visitPostWriteBarrierCommon<LPostWriteElementBarrierBI, MIRType::BigInt>(lir,
5382 ool);
5385 void CodeGenerator::visitPostWriteElementBarrierV(
5386 LPostWriteElementBarrierV* lir) {
5387 auto ool = new (alloc())
5388 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5389 visitPostWriteBarrierCommonV(lir, ool);
5392 void CodeGenerator::visitAssertCanElidePostWriteBarrier(
5393 LAssertCanElidePostWriteBarrier* lir) {
5394 Register object = ToRegister(lir->object());
5395 ValueOperand value =
5396 ToValue(lir, LAssertCanElidePostWriteBarrier::ValueIndex);
5397 Register temp = ToRegister(lir->temp0());
5399 Label ok;
5400 masm.branchPtrInNurseryChunk(Assembler::Equal, object, temp, &ok);
5401 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp, &ok);
5403 masm.assumeUnreachable("Unexpected missing post write barrier");
5405 masm.bind(&ok);
5408 template <typename LCallIns>
5409 void CodeGenerator::emitCallNative(LCallIns* call, JSNative native) {
5410 MCallBase* mir = call->mir();
5412 uint32_t unusedStack = UnusedStackBytesForCall(mir->paddedNumStackArgs());
5414 // Registers used for callWithABI() argument-passing.
5415 const Register argContextReg = ToRegister(call->getArgContextReg());
5416 const Register argUintNReg = ToRegister(call->getArgUintNReg());
5417 const Register argVpReg = ToRegister(call->getArgVpReg());
5419 // Misc. temporary registers.
5420 const Register tempReg = ToRegister(call->getTempReg());
5422 DebugOnly<uint32_t> initialStack = masm.framePushed();
5424 masm.checkStackAlignment();
5426 // Native functions have the signature:
5427 // bool (*)(JSContext*, unsigned, Value* vp)
5428 // Where vp[0] is space for an outparam, vp[1] is |this|, and vp[2] onward
5429 // are the function arguments.
5431 // Allocate space for the outparam, moving the StackPointer to what will be
5432 // &vp[1].
5433 masm.adjustStack(unusedStack);
5435 // Push a Value containing the callee object: natives are allowed to access
5436 // their callee before setting the return value. The StackPointer is moved
5437 // to &vp[0].
5438 if constexpr (std::is_same_v<LCallIns, LCallClassHook>) {
5439 Register calleeReg = ToRegister(call->getCallee());
5440 masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(calleeReg)));
5442 if (call->mir()->maybeCrossRealm()) {
5443 masm.switchToObjectRealm(calleeReg, tempReg);
5445 } else {
5446 WrappedFunction* target = call->getSingleTarget();
5447 masm.Push(ObjectValue(*target->rawNativeJSFunction()));
5449 if (call->mir()->maybeCrossRealm()) {
5450 masm.movePtr(ImmGCPtr(target->rawNativeJSFunction()), tempReg);
5451 masm.switchToObjectRealm(tempReg, tempReg);
5455 // Preload arguments into registers.
5456 masm.loadJSContext(argContextReg);
5457 masm.move32(Imm32(call->mir()->numActualArgs()), argUintNReg);
5458 masm.moveStackPtrTo(argVpReg);
5460 masm.Push(argUintNReg);
5462 // Construct native exit frame.
5463 uint32_t safepointOffset = masm.buildFakeExitFrame(tempReg);
5464 masm.enterFakeExitFrameForNative(argContextReg, tempReg,
5465 call->mir()->isConstructing());
5467 markSafepointAt(safepointOffset, call);
5469 // Construct and execute call.
5470 masm.setupAlignedABICall();
5471 masm.passABIArg(argContextReg);
5472 masm.passABIArg(argUintNReg);
5473 masm.passABIArg(argVpReg);
5475 ensureOsiSpace();
5476 // If we're using a simulator build, `native` will already point to the
5477 // simulator's call-redirection code for LCallClassHook. Load the address in
5478 // a register first so that we don't try to redirect it a second time.
5479 bool emittedCall = false;
5480 #ifdef JS_SIMULATOR
5481 if constexpr (std::is_same_v<LCallIns, LCallClassHook>) {
5482 masm.movePtr(ImmPtr(native), tempReg);
5483 masm.callWithABI(tempReg);
5484 emittedCall = true;
5486 #endif
5487 if (!emittedCall) {
5488 masm.callWithABI(DynamicFunction<JSNative>(native), ABIType::General,
5489 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
5492 // Test for failure.
5493 masm.branchIfFalseBool(ReturnReg, masm.failureLabel());
5495 if (call->mir()->maybeCrossRealm()) {
5496 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
5499 // Load the outparam vp[0] into output register(s).
5500 masm.loadValue(
5501 Address(masm.getStackPointer(), NativeExitFrameLayout::offsetOfResult()),
5502 JSReturnOperand);
5504 // Until C++ code is instrumented against Spectre, prevent speculative
5505 // execution from returning any private data.
5506 if (JitOptions.spectreJitToCxxCalls && !call->mir()->ignoresReturnValue() &&
5507 mir->hasLiveDefUses()) {
5508 masm.speculationBarrier();
5511 // The next instruction is removing the footer of the exit frame, so there
5512 // is no need for leaveFakeExitFrame.
5514 // Move the StackPointer back to its original location, unwinding the native
5515 // exit frame.
5516 masm.adjustStack(NativeExitFrameLayout::Size() - unusedStack);
5517 MOZ_ASSERT(masm.framePushed() == initialStack);
5520 void CodeGenerator::visitCallNative(LCallNative* call) {
5521 WrappedFunction* target = call->getSingleTarget();
5522 MOZ_ASSERT(target);
5523 MOZ_ASSERT(target->isNativeWithoutJitEntry());
5525 JSNative native = target->native();
5526 if (call->ignoresReturnValue() && target->hasJitInfo()) {
5527 const JSJitInfo* jitInfo = target->jitInfo();
5528 if (jitInfo->type() == JSJitInfo::IgnoresReturnValueNative) {
5529 native = jitInfo->ignoresReturnValueMethod;
5532 emitCallNative(call, native);
5535 void CodeGenerator::visitCallClassHook(LCallClassHook* call) {
5536 emitCallNative(call, call->mir()->target());
5539 static void LoadDOMPrivate(MacroAssembler& masm, Register obj, Register priv,
5540 DOMObjectKind kind) {
5541 // Load the value in DOM_OBJECT_SLOT for a native or proxy DOM object. This
5542 // will be in the first slot but may be fixed or non-fixed.
5543 MOZ_ASSERT(obj != priv);
5545 switch (kind) {
5546 case DOMObjectKind::Native:
5547 // If it's a native object, the value must be in a fixed slot.
5548 // See CanAttachDOMCall in CacheIR.cpp.
5549 masm.debugAssertObjHasFixedSlots(obj, priv);
5550 masm.loadPrivate(Address(obj, NativeObject::getFixedSlotOffset(0)), priv);
5551 break;
5552 case DOMObjectKind::Proxy: {
5553 #ifdef DEBUG
5554 // Sanity check: it must be a DOM proxy.
5555 Label isDOMProxy;
5556 masm.branchTestProxyHandlerFamily(
5557 Assembler::Equal, obj, priv, GetDOMProxyHandlerFamily(), &isDOMProxy);
5558 masm.assumeUnreachable("Expected a DOM proxy");
5559 masm.bind(&isDOMProxy);
5560 #endif
5561 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), priv);
5562 masm.loadPrivate(
5563 Address(priv, js::detail::ProxyReservedSlots::offsetOfSlot(0)), priv);
5564 break;
5569 void CodeGenerator::visitCallDOMNative(LCallDOMNative* call) {
5570 WrappedFunction* target = call->getSingleTarget();
5571 MOZ_ASSERT(target);
5572 MOZ_ASSERT(target->isNativeWithoutJitEntry());
5573 MOZ_ASSERT(target->hasJitInfo());
5574 MOZ_ASSERT(call->mir()->isCallDOMNative());
5576 int unusedStack = UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
5578 // Registers used for callWithABI() argument-passing.
5579 const Register argJSContext = ToRegister(call->getArgJSContext());
5580 const Register argObj = ToRegister(call->getArgObj());
5581 const Register argPrivate = ToRegister(call->getArgPrivate());
5582 const Register argArgs = ToRegister(call->getArgArgs());
5584 DebugOnly<uint32_t> initialStack = masm.framePushed();
5586 masm.checkStackAlignment();
5588 // DOM methods have the signature:
5589 // bool (*)(JSContext*, HandleObject, void* private, const
5590 // JSJitMethodCallArgs& args)
5591 // Where args is initialized from an argc and a vp, vp[0] is space for an
5592 // outparam and the callee, vp[1] is |this|, and vp[2] onward are the
5593 // function arguments. Note that args stores the argv, not the vp, and
5594 // argv == vp + 2.
5596 // Nestle the stack up against the pushed arguments, leaving StackPointer at
5597 // &vp[1]
5598 masm.adjustStack(unusedStack);
5599 // argObj is filled with the extracted object, then returned.
5600 Register obj = masm.extractObject(Address(masm.getStackPointer(), 0), argObj);
5601 MOZ_ASSERT(obj == argObj);
5603 // Push a Value containing the callee object: natives are allowed to access
5604 // their callee before setting the return value. After this the StackPointer
5605 // points to &vp[0].
5606 masm.Push(ObjectValue(*target->rawNativeJSFunction()));
5608 // Now compute the argv value. Since StackPointer is pointing to &vp[0] and
5609 // argv is &vp[2] we just need to add 2*sizeof(Value) to the current
5610 // StackPointer.
5611 static_assert(JSJitMethodCallArgsTraits::offsetOfArgv == 0);
5612 static_assert(JSJitMethodCallArgsTraits::offsetOfArgc ==
5613 IonDOMMethodExitFrameLayoutTraits::offsetOfArgcFromArgv);
5614 masm.computeEffectiveAddress(
5615 Address(masm.getStackPointer(), 2 * sizeof(Value)), argArgs);
5617 LoadDOMPrivate(masm, obj, argPrivate,
5618 static_cast<MCallDOMNative*>(call->mir())->objectKind());
5620 // Push argc from the call instruction into what will become the IonExitFrame
5621 masm.Push(Imm32(call->numActualArgs()));
5623 // Push our argv onto the stack
5624 masm.Push(argArgs);
5625 // And store our JSJitMethodCallArgs* in argArgs.
5626 masm.moveStackPtrTo(argArgs);
5628 // Push |this| object for passing HandleObject. We push after argc to
5629 // maintain the same sp-relative location of the object pointer with other
5630 // DOMExitFrames.
5631 masm.Push(argObj);
5632 masm.moveStackPtrTo(argObj);
5634 if (call->mir()->maybeCrossRealm()) {
5635 // We use argJSContext as scratch register here.
5636 masm.movePtr(ImmGCPtr(target->rawNativeJSFunction()), argJSContext);
5637 masm.switchToObjectRealm(argJSContext, argJSContext);
5640 // Construct native exit frame.
5641 uint32_t safepointOffset = masm.buildFakeExitFrame(argJSContext);
5642 masm.loadJSContext(argJSContext);
5643 masm.enterFakeExitFrame(argJSContext, argJSContext,
5644 ExitFrameType::IonDOMMethod);
5646 markSafepointAt(safepointOffset, call);
5648 // Construct and execute call.
5649 masm.setupAlignedABICall();
5650 masm.loadJSContext(argJSContext);
5651 masm.passABIArg(argJSContext);
5652 masm.passABIArg(argObj);
5653 masm.passABIArg(argPrivate);
5654 masm.passABIArg(argArgs);
5655 ensureOsiSpace();
5656 masm.callWithABI(DynamicFunction<JSJitMethodOp>(target->jitInfo()->method),
5657 ABIType::General,
5658 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
5660 if (target->jitInfo()->isInfallible) {
5661 masm.loadValue(Address(masm.getStackPointer(),
5662 IonDOMMethodExitFrameLayout::offsetOfResult()),
5663 JSReturnOperand);
5664 } else {
5665 // Test for failure.
5666 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
5668 // Load the outparam vp[0] into output register(s).
5669 masm.loadValue(Address(masm.getStackPointer(),
5670 IonDOMMethodExitFrameLayout::offsetOfResult()),
5671 JSReturnOperand);
5674 // Switch back to the current realm if needed. Note: if the DOM method threw
5675 // an exception, the exception handler will do this.
5676 if (call->mir()->maybeCrossRealm()) {
5677 static_assert(!JSReturnOperand.aliases(ReturnReg),
5678 "Clobbering ReturnReg should not affect the return value");
5679 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
5682 // Until C++ code is instrumented against Spectre, prevent speculative
5683 // execution from returning any private data.
5684 if (JitOptions.spectreJitToCxxCalls && call->mir()->hasLiveDefUses()) {
5685 masm.speculationBarrier();
5688 // The next instruction is removing the footer of the exit frame, so there
5689 // is no need for leaveFakeExitFrame.
5691 // Move the StackPointer back to its original location, unwinding the native
5692 // exit frame.
5693 masm.adjustStack(IonDOMMethodExitFrameLayout::Size() - unusedStack);
5694 MOZ_ASSERT(masm.framePushed() == initialStack);
5697 void CodeGenerator::visitCallGetIntrinsicValue(LCallGetIntrinsicValue* lir) {
5698 pushArg(ImmGCPtr(lir->mir()->name()));
5700 using Fn = bool (*)(JSContext* cx, Handle<PropertyName*>, MutableHandleValue);
5701 callVM<Fn, GetIntrinsicValue>(lir);
5704 void CodeGenerator::emitCallInvokeFunction(
5705 LInstruction* call, Register calleereg, bool constructing,
5706 bool ignoresReturnValue, uint32_t argc, uint32_t unusedStack) {
5707 // Nestle %esp up to the argument vector.
5708 // Each path must account for framePushed_ separately, for callVM to be valid.
5709 masm.freeStack(unusedStack);
5711 pushArg(masm.getStackPointer()); // argv.
5712 pushArg(Imm32(argc)); // argc.
5713 pushArg(Imm32(ignoresReturnValue));
5714 pushArg(Imm32(constructing)); // constructing.
5715 pushArg(calleereg); // JSFunction*.
5717 using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
5718 MutableHandleValue);
5719 callVM<Fn, jit::InvokeFunction>(call);
5721 // Un-nestle %esp from the argument vector. No prefix was pushed.
5722 masm.reserveStack(unusedStack);
5725 void CodeGenerator::visitCallGeneric(LCallGeneric* call) {
5726 // The callee is passed straight through to the trampoline.
5727 MOZ_ASSERT(ToRegister(call->getCallee()) == IonGenericCallCalleeReg);
5729 Register argcReg = ToRegister(call->getArgc());
5730 uint32_t unusedStack =
5731 UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
5733 // Known-target case is handled by LCallKnown.
5734 MOZ_ASSERT(!call->hasSingleTarget());
5736 masm.checkStackAlignment();
5738 masm.move32(Imm32(call->numActualArgs()), argcReg);
5740 // Nestle the StackPointer up to the argument vector.
5741 masm.freeStack(unusedStack);
5742 ensureOsiSpace();
5744 auto kind = call->mir()->isConstructing() ? IonGenericCallKind::Construct
5745 : IonGenericCallKind::Call;
5747 TrampolinePtr genericCallStub =
5748 gen->jitRuntime()->getIonGenericCallStub(kind);
5749 uint32_t callOffset = masm.callJit(genericCallStub);
5750 markSafepointAt(callOffset, call);
5752 if (call->mir()->maybeCrossRealm()) {
5753 static_assert(!JSReturnOperand.aliases(ReturnReg),
5754 "ReturnReg available as scratch after scripted calls");
5755 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
5758 // Restore stack pointer.
5759 masm.setFramePushed(frameSize());
5760 emitRestoreStackPointerFromFP();
5762 // If the return value of the constructing function is Primitive,
5763 // replace the return value with the Object from CreateThis.
5764 if (call->mir()->isConstructing()) {
5765 Label notPrimitive;
5766 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
5767 &notPrimitive);
5768 masm.loadValue(Address(masm.getStackPointer(), unusedStack),
5769 JSReturnOperand);
5770 #ifdef DEBUG
5771 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
5772 &notPrimitive);
5773 masm.assumeUnreachable("CreateThis creates an object");
5774 #endif
5775 masm.bind(&notPrimitive);
5779 void JitRuntime::generateIonGenericCallArgumentsShift(
5780 MacroAssembler& masm, Register argc, Register curr, Register end,
5781 Register scratch, Label* done) {
5782 static_assert(sizeof(Value) == 8);
5783 // There are |argc| Values on the stack. Shift them all down by 8 bytes,
5784 // overwriting the first value.
5786 // Initialize `curr` to the destination of the first copy, and `end` to the
5787 // final value of curr.
5788 masm.moveStackPtrTo(curr);
5789 masm.computeEffectiveAddress(BaseValueIndex(curr, argc), end);
5791 Label loop;
5792 masm.bind(&loop);
5793 masm.branchPtr(Assembler::Equal, curr, end, done);
5794 masm.loadPtr(Address(curr, 8), scratch);
5795 masm.storePtr(scratch, Address(curr, 0));
5796 masm.addPtr(Imm32(sizeof(uintptr_t)), curr);
5797 masm.jump(&loop);
5800 void JitRuntime::generateIonGenericCallStub(MacroAssembler& masm,
5801 IonGenericCallKind kind) {
5802 AutoCreatedBy acb(masm, "JitRuntime::generateIonGenericCallStub");
5803 ionGenericCallStubOffset_[kind] = startTrampolineCode(masm);
5805 // This code is tightly coupled with visitCallGeneric.
5807 // Upon entry:
5808 // IonGenericCallCalleeReg contains a pointer to the callee object.
5809 // IonGenericCallArgcReg contains the number of actual args.
5810 // The arguments have been pushed onto the stack:
5811 // [newTarget] (iff isConstructing)
5812 // [argN]
5813 // ...
5814 // [arg1]
5815 // [arg0]
5816 // [this]
5817 // <return address> (if not JS_USE_LINK_REGISTER)
5819 // This trampoline is responsible for entering the callee's realm,
5820 // massaging the stack into the right shape, and then performing a
5821 // tail call. We will return directly to the Ion code from the
5822 // callee.
5824 // To do a tail call, we keep the return address in a register, even
5825 // on platforms that don't normally use a link register, and push it
5826 // just before jumping to the callee, after we are done setting up
5827 // the stack.
5829 // The caller is responsible for switching back to the caller's
5830 // realm and cleaning up the stack.
5832 Register calleeReg = IonGenericCallCalleeReg;
5833 Register argcReg = IonGenericCallArgcReg;
5834 Register scratch = IonGenericCallScratch;
5835 Register scratch2 = IonGenericCallScratch2;
5837 #ifndef JS_USE_LINK_REGISTER
5838 Register returnAddrReg = IonGenericCallReturnAddrReg;
5839 masm.pop(returnAddrReg);
5840 #endif
5842 #ifdef JS_CODEGEN_ARM
5843 // The default second scratch register on arm is lr, which we need
5844 // preserved for tail calls.
5845 AutoNonDefaultSecondScratchRegister andssr(masm, IonGenericSecondScratchReg);
5846 #endif
5848 bool isConstructing = kind == IonGenericCallKind::Construct;
5850 Label entry, notFunction, noJitEntry, vmCall;
5851 masm.bind(&entry);
5853 // Guard that the callee is actually a function.
5854 masm.branchTestObjIsFunction(Assembler::NotEqual, calleeReg, scratch,
5855 calleeReg, &notFunction);
5857 // Guard that the callee supports the [[Call]] or [[Construct]] operation.
5858 // If these tests fail, we will call into the VM to throw an exception.
5859 if (isConstructing) {
5860 masm.branchTestFunctionFlags(calleeReg, FunctionFlags::CONSTRUCTOR,
5861 Assembler::Zero, &vmCall);
5862 } else {
5863 masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
5864 calleeReg, scratch, &vmCall);
5867 if (isConstructing) {
5868 // Use the slow path if CreateThis was unable to create the |this| object.
5869 Address thisAddr(masm.getStackPointer(), 0);
5870 masm.branchTestNull(Assembler::Equal, thisAddr, &vmCall);
5873 masm.switchToObjectRealm(calleeReg, scratch);
5875 // Load jitCodeRaw for callee if it exists.
5876 masm.branchIfFunctionHasNoJitEntry(calleeReg, isConstructing, &noJitEntry);
5878 // ****************************
5879 // * Functions with jit entry *
5880 // ****************************
5881 masm.loadJitCodeRaw(calleeReg, scratch2);
5883 // Construct the JitFrameLayout.
5884 masm.PushCalleeToken(calleeReg, isConstructing);
5885 masm.PushFrameDescriptorForJitCall(FrameType::IonJS, argcReg, scratch);
5886 #ifndef JS_USE_LINK_REGISTER
5887 masm.push(returnAddrReg);
5888 #endif
5890 // Check whether we need a rectifier frame.
5891 Label noRectifier;
5892 masm.loadFunctionArgCount(calleeReg, scratch);
5893 masm.branch32(Assembler::BelowOrEqual, scratch, argcReg, &noRectifier);
5895 // Tail-call the arguments rectifier.
5896 // Because all trampolines are created at the same time,
5897 // we can't create a TrampolinePtr for the arguments rectifier,
5898 // because it hasn't been linked yet. We can, however, directly
5899 // encode its offset.
5900 Label rectifier;
5901 bindLabelToOffset(&rectifier, argumentsRectifierOffset_);
5903 masm.jump(&rectifier);
5906 // Tail call the jit entry.
5907 masm.bind(&noRectifier);
5908 masm.jump(scratch2);
5910 // ********************
5911 // * Native functions *
5912 // ********************
5913 masm.bind(&noJitEntry);
5914 if (!isConstructing) {
5915 generateIonGenericCallFunCall(masm, &entry, &vmCall);
5917 generateIonGenericCallNativeFunction(masm, isConstructing);
5919 // *******************
5920 // * Bound functions *
5921 // *******************
5922 // TODO: support class hooks?
5923 masm.bind(&notFunction);
5924 if (!isConstructing) {
5925 // TODO: support generic bound constructors?
5926 generateIonGenericCallBoundFunction(masm, &entry, &vmCall);
5929 // ********************
5930 // * Fallback VM call *
5931 // ********************
5932 masm.bind(&vmCall);
5934 masm.push(masm.getStackPointer()); // argv
5935 masm.push(argcReg); // argc
5936 masm.push(Imm32(false)); // ignores return value
5937 masm.push(Imm32(isConstructing)); // constructing
5938 masm.push(calleeReg); // callee
5940 using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
5941 MutableHandleValue);
5942 VMFunctionId id = VMFunctionToId<Fn, jit::InvokeFunction>::id;
5943 uint32_t invokeFunctionOffset = functionWrapperOffsets_[size_t(id)];
5944 Label invokeFunctionVMEntry;
5945 bindLabelToOffset(&invokeFunctionVMEntry, invokeFunctionOffset);
5947 masm.pushFrameDescriptor(FrameType::IonJS);
5948 #ifndef JS_USE_LINK_REGISTER
5949 masm.push(returnAddrReg);
5950 #endif
5951 masm.jump(&invokeFunctionVMEntry);
5954 void JitRuntime::generateIonGenericCallNativeFunction(MacroAssembler& masm,
5955 bool isConstructing) {
5956 Register calleeReg = IonGenericCallCalleeReg;
5957 Register argcReg = IonGenericCallArgcReg;
5958 Register scratch = IonGenericCallScratch;
5959 Register scratch2 = IonGenericCallScratch2;
5960 Register contextReg = IonGenericCallScratch3;
5961 #ifndef JS_USE_LINK_REGISTER
5962 Register returnAddrReg = IonGenericCallReturnAddrReg;
5963 #endif
5965 // Push a value containing the callee, which will become argv[0].
5966 masm.pushValue(JSVAL_TYPE_OBJECT, calleeReg);
5968 // Load the callee address into calleeReg.
5969 #ifdef JS_SIMULATOR
5970 masm.movePtr(ImmPtr(RedirectedCallAnyNative()), calleeReg);
5971 #else
5972 masm.loadPrivate(Address(calleeReg, JSFunction::offsetOfNativeOrEnv()),
5973 calleeReg);
5974 #endif
5976 // Load argv into scratch2.
5977 masm.moveStackPtrTo(scratch2);
5979 // Push argc.
5980 masm.push(argcReg);
5982 masm.loadJSContext(contextReg);
5984 // Construct native exit frame. Note that unlike other cases in this
5985 // trampoline, this code does not use a tail call.
5986 masm.pushFrameDescriptor(FrameType::IonJS);
5987 #ifdef JS_USE_LINK_REGISTER
5988 masm.pushReturnAddress();
5989 #else
5990 masm.push(returnAddrReg);
5991 #endif
5993 masm.push(FramePointer);
5994 masm.moveStackPtrTo(FramePointer);
5995 masm.enterFakeExitFrameForNative(contextReg, scratch, isConstructing);
5997 masm.setupUnalignedABICall(scratch);
5998 masm.passABIArg(contextReg); // cx
5999 masm.passABIArg(argcReg); // argc
6000 masm.passABIArg(scratch2); // argv
6002 masm.callWithABI(calleeReg);
6004 // Test for failure.
6005 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
6007 masm.loadValue(
6008 Address(masm.getStackPointer(), NativeExitFrameLayout::offsetOfResult()),
6009 JSReturnOperand);
6011 // Leave the exit frame.
6012 masm.moveToStackPtr(FramePointer);
6013 masm.pop(FramePointer);
6015 // Return.
6016 masm.ret();
6019 void JitRuntime::generateIonGenericCallFunCall(MacroAssembler& masm,
6020 Label* entry, Label* vmCall) {
6021 Register calleeReg = IonGenericCallCalleeReg;
6022 Register argcReg = IonGenericCallArgcReg;
6023 Register scratch = IonGenericCallScratch;
6024 Register scratch2 = IonGenericCallScratch2;
6025 Register scratch3 = IonGenericCallScratch3;
6027 Label notFunCall;
6028 masm.branchPtr(Assembler::NotEqual,
6029 Address(calleeReg, JSFunction::offsetOfNativeOrEnv()),
6030 ImmPtr(js::fun_call), &notFunCall);
6032 // In general, we can implement fun_call by replacing calleeReg with
6033 // |this|, sliding all the other arguments down, and decrementing argc.
6035 // *BEFORE* *AFTER*
6036 // [argN] argc = N+1 <padding>
6037 // ... [argN] argc = N
6038 // [arg1] ...
6039 // [arg0] [arg1] <- now arg0
6040 // [this] <- top of stack (aligned) [arg0] <- now this
6042 // The only exception is when argc is already 0, in which case instead
6043 // of shifting arguments down we replace [this] with UndefinedValue():
6045 // *BEFORE* *AFTER*
6046 // [this] argc = 0 [undef] argc = 0
6048 // After making this transformation, we can jump back to the beginning
6049 // of this trampoline to handle the inner call.
6051 // Guard that |this| is an object. If it is, replace calleeReg.
6052 masm.fallibleUnboxObject(Address(masm.getStackPointer(), 0), scratch, vmCall);
6053 masm.movePtr(scratch, calleeReg);
6055 Label hasArgs;
6056 masm.branch32(Assembler::NotEqual, argcReg, Imm32(0), &hasArgs);
6058 // No arguments. Replace |this| with |undefined| and start from the top.
6059 masm.storeValue(UndefinedValue(), Address(masm.getStackPointer(), 0));
6060 masm.jump(entry);
6062 masm.bind(&hasArgs);
6064 Label doneSliding;
6065 generateIonGenericCallArgumentsShift(masm, argcReg, scratch, scratch2,
6066 scratch3, &doneSliding);
6067 masm.bind(&doneSliding);
6068 masm.sub32(Imm32(1), argcReg);
6070 masm.jump(entry);
6072 masm.bind(&notFunCall);
6075 void JitRuntime::generateIonGenericCallBoundFunction(MacroAssembler& masm,
6076 Label* entry,
6077 Label* vmCall) {
6078 Register calleeReg = IonGenericCallCalleeReg;
6079 Register argcReg = IonGenericCallArgcReg;
6080 Register scratch = IonGenericCallScratch;
6081 Register scratch2 = IonGenericCallScratch2;
6082 Register scratch3 = IonGenericCallScratch3;
6084 masm.branchTestObjClass(Assembler::NotEqual, calleeReg,
6085 &BoundFunctionObject::class_, scratch, calleeReg,
6086 vmCall);
6088 Address targetSlot(calleeReg, BoundFunctionObject::offsetOfTargetSlot());
6089 Address flagsSlot(calleeReg, BoundFunctionObject::offsetOfFlagsSlot());
6090 Address thisSlot(calleeReg, BoundFunctionObject::offsetOfBoundThisSlot());
6091 Address firstInlineArgSlot(
6092 calleeReg, BoundFunctionObject::offsetOfFirstInlineBoundArg());
6094 // Check that we won't be pushing too many arguments.
6095 masm.load32(flagsSlot, scratch);
6096 masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), scratch);
6097 masm.add32(argcReg, scratch);
6098 masm.branch32(Assembler::Above, scratch, Imm32(JIT_ARGS_LENGTH_MAX), vmCall);
6100 // The stack is currently correctly aligned for a jit call. We will
6101 // be updating the `this` value and potentially adding additional
6102 // arguments. On platforms with 16-byte alignment, if the number of
6103 // bound arguments is odd, we have to move the arguments that are
6104 // currently on the stack. For example, with one bound argument:
6106 // *BEFORE* *AFTER*
6107 // [argN] <padding>
6108 // ... [argN] |
6109 // [arg1] ... | These arguments have been
6110 // [arg0] [arg1] | shifted down 8 bytes.
6111 // [this] <- top of stack (aligned) [arg0] v
6112 // [bound0] <- one bound argument (odd)
6113 // [boundThis] <- top of stack (aligned)
6115 Label poppedThis;
6116 if (JitStackValueAlignment > 1) {
6117 Label alreadyAligned;
6118 masm.branchTest32(Assembler::Zero, flagsSlot,
6119 Imm32(1 << BoundFunctionObject::NumBoundArgsShift),
6120 &alreadyAligned);
6122 // We have an odd number of bound arguments. Shift the existing arguments
6123 // down by 8 bytes.
6124 generateIonGenericCallArgumentsShift(masm, argcReg, scratch, scratch2,
6125 scratch3, &poppedThis);
6126 masm.bind(&alreadyAligned);
6129 // Pop the current `this`. It will be replaced with the bound `this`.
6130 masm.freeStack(sizeof(Value));
6131 masm.bind(&poppedThis);
6133 // Load the number of bound arguments in scratch
6134 masm.load32(flagsSlot, scratch);
6135 masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), scratch);
6137 Label donePushingBoundArguments;
6138 masm.branch32(Assembler::Equal, scratch, Imm32(0),
6139 &donePushingBoundArguments);
6141 // Update argc to include bound arguments.
6142 masm.add32(scratch, argcReg);
6144 // Load &boundArgs[0] in scratch2.
6145 Label outOfLineBoundArguments, haveBoundArguments;
6146 masm.branch32(Assembler::Above, scratch,
6147 Imm32(BoundFunctionObject::MaxInlineBoundArgs),
6148 &outOfLineBoundArguments);
6149 masm.computeEffectiveAddress(firstInlineArgSlot, scratch2);
6150 masm.jump(&haveBoundArguments);
6152 masm.bind(&outOfLineBoundArguments);
6153 masm.unboxObject(firstInlineArgSlot, scratch2);
6154 masm.loadPtr(Address(scratch2, NativeObject::offsetOfElements()), scratch2);
6156 masm.bind(&haveBoundArguments);
6158 // Load &boundArgs[numBoundArgs] in scratch.
6159 BaseObjectElementIndex lastBoundArg(scratch2, scratch);
6160 masm.computeEffectiveAddress(lastBoundArg, scratch);
6162 // Push the bound arguments, starting with the last one.
6163 // Copying pre-decrements scratch until scratch2 is reached.
6164 Label boundArgumentsLoop;
6165 masm.bind(&boundArgumentsLoop);
6166 masm.subPtr(Imm32(sizeof(Value)), scratch);
6167 masm.pushValue(Address(scratch, 0));
6168 masm.branchPtr(Assembler::Above, scratch, scratch2, &boundArgumentsLoop);
6169 masm.bind(&donePushingBoundArguments);
6171 // Push the bound `this`.
6172 masm.pushValue(thisSlot);
6174 // Load the target in calleeReg.
6175 masm.unboxObject(targetSlot, calleeReg);
6177 // At this point, all preconditions for entering the trampoline are met:
6178 // - calleeReg contains a pointer to the callee object
6179 // - argcReg contains the number of actual args (now including bound args)
6180 // - the arguments are on the stack with the correct alignment.
6181 // Instead of generating more code, we can jump back to the entry point
6182 // of the trampoline to call the bound target.
6183 masm.jump(entry);
6186 void CodeGenerator::visitCallKnown(LCallKnown* call) {
6187 Register calleereg = ToRegister(call->getFunction());
6188 Register objreg = ToRegister(call->getTempObject());
6189 uint32_t unusedStack =
6190 UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
6191 WrappedFunction* target = call->getSingleTarget();
6193 // Native single targets (except wasm) are handled by LCallNative.
6194 MOZ_ASSERT(target->hasJitEntry());
6196 // Missing arguments must have been explicitly appended by WarpBuilder.
6197 DebugOnly<unsigned> numNonArgsOnStack = 1 + call->isConstructing();
6198 MOZ_ASSERT(target->nargs() <=
6199 call->mir()->numStackArgs() - numNonArgsOnStack);
6201 MOZ_ASSERT_IF(call->isConstructing(), target->isConstructor());
6203 masm.checkStackAlignment();
6205 if (target->isClassConstructor() && !call->isConstructing()) {
6206 emitCallInvokeFunction(call, calleereg, call->isConstructing(),
6207 call->ignoresReturnValue(), call->numActualArgs(),
6208 unusedStack);
6209 return;
6212 MOZ_ASSERT_IF(target->isClassConstructor(), call->isConstructing());
6214 MOZ_ASSERT(!call->mir()->needsThisCheck());
6216 if (call->mir()->maybeCrossRealm()) {
6217 masm.switchToObjectRealm(calleereg, objreg);
6220 masm.loadJitCodeRaw(calleereg, objreg);
6222 // Nestle the StackPointer up to the argument vector.
6223 masm.freeStack(unusedStack);
6225 // Construct the JitFrameLayout.
6226 masm.PushCalleeToken(calleereg, call->mir()->isConstructing());
6227 masm.PushFrameDescriptorForJitCall(FrameType::IonJS, call->numActualArgs());
6229 // Finally call the function in objreg.
6230 ensureOsiSpace();
6231 uint32_t callOffset = masm.callJit(objreg);
6232 markSafepointAt(callOffset, call);
6234 if (call->mir()->maybeCrossRealm()) {
6235 static_assert(!JSReturnOperand.aliases(ReturnReg),
6236 "ReturnReg available as scratch after scripted calls");
6237 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
6240 // Restore stack pointer: pop JitFrameLayout fields still left on the stack
6241 // and undo the earlier |freeStack(unusedStack)|.
6242 int prefixGarbage =
6243 sizeof(JitFrameLayout) - JitFrameLayout::bytesPoppedAfterCall();
6244 masm.adjustStack(prefixGarbage - unusedStack);
6246 // If the return value of the constructing function is Primitive,
6247 // replace the return value with the Object from CreateThis.
6248 if (call->mir()->isConstructing()) {
6249 Label notPrimitive;
6250 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6251 &notPrimitive);
6252 masm.loadValue(Address(masm.getStackPointer(), unusedStack),
6253 JSReturnOperand);
6254 #ifdef DEBUG
6255 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6256 &notPrimitive);
6257 masm.assumeUnreachable("CreateThis creates an object");
6258 #endif
6259 masm.bind(&notPrimitive);
6263 template <typename T>
6264 void CodeGenerator::emitCallInvokeFunction(T* apply) {
6265 Register objreg = ToRegister(apply->getTempObject());
6267 // Push the space used by the arguments.
6268 masm.moveStackPtrTo(objreg);
6270 pushArg(objreg); // argv.
6271 pushArg(ToRegister(apply->getArgc())); // argc.
6272 pushArg(Imm32(apply->mir()->ignoresReturnValue())); // ignoresReturnValue.
6273 pushArg(Imm32(apply->mir()->isConstructing())); // isConstructing.
6274 pushArg(ToRegister(apply->getFunction())); // JSFunction*.
6276 using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
6277 MutableHandleValue);
6278 callVM<Fn, jit::InvokeFunction>(apply);
6281 // Do not bailout after the execution of this function since the stack no longer
6282 // correspond to what is expected by the snapshots.
6283 void CodeGenerator::emitAllocateSpaceForApply(Register argcreg,
6284 Register scratch) {
6285 // Use scratch register to calculate stack space (including padding).
6286 masm.movePtr(argcreg, scratch);
6288 // Align the JitFrameLayout on the JitStackAlignment.
6289 if (JitStackValueAlignment > 1) {
6290 MOZ_ASSERT(frameSize() % JitStackAlignment == 0,
6291 "Stack padding assumes that the frameSize is correct");
6292 MOZ_ASSERT(JitStackValueAlignment == 2);
6293 Label noPaddingNeeded;
6294 // if the number of arguments is odd, then we do not need any padding.
6295 masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
6296 masm.addPtr(Imm32(1), scratch);
6297 masm.bind(&noPaddingNeeded);
6300 // Reserve space for copying the arguments.
6301 NativeObject::elementsSizeMustNotOverflow();
6302 masm.lshiftPtr(Imm32(ValueShift), scratch);
6303 masm.subFromStackPtr(scratch);
6305 #ifdef DEBUG
6306 // Put a magic value in the space reserved for padding. Note, this code
6307 // cannot be merged with the previous test, as not all architectures can
6308 // write below their stack pointers.
6309 if (JitStackValueAlignment > 1) {
6310 MOZ_ASSERT(JitStackValueAlignment == 2);
6311 Label noPaddingNeeded;
6312 // if the number of arguments is odd, then we do not need any padding.
6313 masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
6314 BaseValueIndex dstPtr(masm.getStackPointer(), argcreg);
6315 masm.storeValue(MagicValue(JS_ARG_POISON), dstPtr);
6316 masm.bind(&noPaddingNeeded);
6318 #endif
6321 // Do not bailout after the execution of this function since the stack no longer
6322 // correspond to what is expected by the snapshots.
6323 void CodeGenerator::emitAllocateSpaceForConstructAndPushNewTarget(
6324 Register argcreg, Register newTargetAndScratch) {
6325 // Align the JitFrameLayout on the JitStackAlignment. Contrary to
6326 // |emitAllocateSpaceForApply()|, we're always pushing a magic value, because
6327 // we can't write to |newTargetAndScratch| before |new.target| has
6328 // been pushed onto the stack.
6329 if (JitStackValueAlignment > 1) {
6330 MOZ_ASSERT(frameSize() % JitStackAlignment == 0,
6331 "Stack padding assumes that the frameSize is correct");
6332 MOZ_ASSERT(JitStackValueAlignment == 2);
6334 Label noPaddingNeeded;
6335 // If the number of arguments is even, then we do not need any padding.
6336 masm.branchTestPtr(Assembler::Zero, argcreg, Imm32(1), &noPaddingNeeded);
6337 masm.pushValue(MagicValue(JS_ARG_POISON));
6338 masm.bind(&noPaddingNeeded);
6341 // Push |new.target| after the padding value, but before any arguments.
6342 masm.pushValue(JSVAL_TYPE_OBJECT, newTargetAndScratch);
6344 // Use newTargetAndScratch to calculate stack space (including padding).
6345 masm.movePtr(argcreg, newTargetAndScratch);
6347 // Reserve space for copying the arguments.
6348 NativeObject::elementsSizeMustNotOverflow();
6349 masm.lshiftPtr(Imm32(ValueShift), newTargetAndScratch);
6350 masm.subFromStackPtr(newTargetAndScratch);
6353 // Destroys argvIndex and copyreg.
6354 void CodeGenerator::emitCopyValuesForApply(Register argvSrcBase,
6355 Register argvIndex, Register copyreg,
6356 size_t argvSrcOffset,
6357 size_t argvDstOffset) {
6358 Label loop;
6359 masm.bind(&loop);
6361 // As argvIndex is off by 1, and we use the decBranchPtr instruction
6362 // to loop back, we have to substract the size of the word which are
6363 // copied.
6364 BaseValueIndex srcPtr(argvSrcBase, argvIndex,
6365 int32_t(argvSrcOffset) - sizeof(void*));
6366 BaseValueIndex dstPtr(masm.getStackPointer(), argvIndex,
6367 int32_t(argvDstOffset) - sizeof(void*));
6368 masm.loadPtr(srcPtr, copyreg);
6369 masm.storePtr(copyreg, dstPtr);
6371 // Handle 32 bits architectures.
6372 if (sizeof(Value) == 2 * sizeof(void*)) {
6373 BaseValueIndex srcPtrLow(argvSrcBase, argvIndex,
6374 int32_t(argvSrcOffset) - 2 * sizeof(void*));
6375 BaseValueIndex dstPtrLow(masm.getStackPointer(), argvIndex,
6376 int32_t(argvDstOffset) - 2 * sizeof(void*));
6377 masm.loadPtr(srcPtrLow, copyreg);
6378 masm.storePtr(copyreg, dstPtrLow);
6381 masm.decBranchPtr(Assembler::NonZero, argvIndex, Imm32(1), &loop);
6384 void CodeGenerator::emitRestoreStackPointerFromFP() {
6385 // This is used to restore the stack pointer after a call with a dynamic
6386 // number of arguments.
6388 MOZ_ASSERT(masm.framePushed() == frameSize());
6390 int32_t offset = -int32_t(frameSize());
6391 masm.computeEffectiveAddress(Address(FramePointer, offset),
6392 masm.getStackPointer());
6395 void CodeGenerator::emitPushArguments(Register argcreg, Register scratch,
6396 Register copyreg, uint32_t extraFormals) {
6397 Label end;
6399 // Skip the copy of arguments if there are none.
6400 masm.branchTestPtr(Assembler::Zero, argcreg, argcreg, &end);
6402 // clang-format off
6404 // We are making a copy of the arguments which are above the JitFrameLayout
6405 // of the current Ion frame.
6407 // [arg1] [arg0] <- src [this] [JitFrameLayout] [.. frameSize ..] [pad] [arg1] [arg0] <- dst
6409 // clang-format on
6411 // Compute the source and destination offsets into the stack.
6412 Register argvSrcBase = FramePointer;
6413 size_t argvSrcOffset =
6414 JitFrameLayout::offsetOfActualArgs() + extraFormals * sizeof(JS::Value);
6415 size_t argvDstOffset = 0;
6417 Register argvIndex = scratch;
6418 masm.move32(argcreg, argvIndex);
6420 // Copy arguments.
6421 emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, argvSrcOffset,
6422 argvDstOffset);
6424 // Join with all arguments copied and the extra stack usage computed.
6425 masm.bind(&end);
6428 void CodeGenerator::emitPushArguments(LApplyArgsGeneric* apply,
6429 Register scratch) {
6430 // Holds the function nargs. Initially the number of args to the caller.
6431 Register argcreg = ToRegister(apply->getArgc());
6432 Register copyreg = ToRegister(apply->getTempObject());
6433 uint32_t extraFormals = apply->numExtraFormals();
6435 emitAllocateSpaceForApply(argcreg, scratch);
6437 emitPushArguments(argcreg, scratch, copyreg, extraFormals);
6439 // Push |this|.
6440 masm.pushValue(ToValue(apply, LApplyArgsGeneric::ThisIndex));
6443 void CodeGenerator::emitPushArguments(LApplyArgsObj* apply, Register scratch) {
6444 // argc and argsObj are mapped to the same calltemp register.
6445 MOZ_ASSERT(apply->getArgsObj() == apply->getArgc());
6447 Register tmpArgc = ToRegister(apply->getTempObject());
6448 Register argsObj = ToRegister(apply->getArgsObj());
6450 // Load argc into tmpArgc.
6451 Address lengthAddr(argsObj, ArgumentsObject::getInitialLengthSlotOffset());
6452 masm.unboxInt32(lengthAddr, tmpArgc);
6453 masm.rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT), tmpArgc);
6455 // Allocate space on the stack for arguments. This modifies scratch.
6456 emitAllocateSpaceForApply(tmpArgc, scratch);
6458 // Load arguments data
6459 masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
6460 argsObj);
6461 size_t argsSrcOffset = ArgumentsData::offsetOfArgs();
6463 // This is the end of the lifetime of argsObj.
6464 // After this call, the argsObj register holds the argument count instead.
6465 emitPushArrayAsArguments(tmpArgc, argsObj, scratch, argsSrcOffset);
6467 masm.pushValue(ToValue(apply, LApplyArgsObj::ThisIndex));
6470 void CodeGenerator::emitPushArrayAsArguments(Register tmpArgc,
6471 Register srcBaseAndArgc,
6472 Register scratch,
6473 size_t argvSrcOffset) {
6474 // Preconditions:
6475 // 1. |tmpArgc| * sizeof(Value) bytes have been allocated at the top of
6476 // the stack to hold arguments.
6477 // 2. |srcBaseAndArgc| + |srcOffset| points to an array of |tmpArgc| values.
6479 // Postconditions:
6480 // 1. The arguments at |srcBaseAndArgc| + |srcOffset| have been copied into
6481 // the allocated space.
6482 // 2. |srcBaseAndArgc| now contains the original value of |tmpArgc|.
6484 // |scratch| is used as a temp register within this function and clobbered.
6486 Label noCopy, epilogue;
6488 // Skip the copy of arguments if there are none.
6489 masm.branchTestPtr(Assembler::Zero, tmpArgc, tmpArgc, &noCopy);
6491 // Copy the values. This code is skipped entirely if there are
6492 // no values.
6493 size_t argvDstOffset = 0;
6495 Register argvSrcBase = srcBaseAndArgc;
6496 Register copyreg = scratch;
6498 masm.push(tmpArgc);
6499 Register argvIndex = tmpArgc;
6500 argvDstOffset += sizeof(void*);
6502 // Copy
6503 emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, argvSrcOffset,
6504 argvDstOffset);
6506 // Restore.
6507 masm.pop(srcBaseAndArgc); // srcBaseAndArgc now contains argc.
6508 masm.jump(&epilogue);
6510 // Clear argc if we skipped the copy step.
6511 masm.bind(&noCopy);
6512 masm.movePtr(ImmWord(0), srcBaseAndArgc);
6514 // Join with all arguments copied and the extra stack usage computed.
6515 // Note, "srcBase" has become "argc".
6516 masm.bind(&epilogue);
6519 void CodeGenerator::emitPushArguments(LApplyArrayGeneric* apply,
6520 Register scratch) {
6521 Register tmpArgc = ToRegister(apply->getTempObject());
6522 Register elementsAndArgc = ToRegister(apply->getElements());
6524 // Invariants guarded in the caller:
6525 // - the array is not too long
6526 // - the array length equals its initialized length
6528 // The array length is our argc for the purposes of allocating space.
6529 Address length(ToRegister(apply->getElements()),
6530 ObjectElements::offsetOfLength());
6531 masm.load32(length, tmpArgc);
6533 // Allocate space for the values.
6534 emitAllocateSpaceForApply(tmpArgc, scratch);
6536 // After this call "elements" has become "argc".
6537 size_t elementsOffset = 0;
6538 emitPushArrayAsArguments(tmpArgc, elementsAndArgc, scratch, elementsOffset);
6540 // Push |this|.
6541 masm.pushValue(ToValue(apply, LApplyArrayGeneric::ThisIndex));
6544 void CodeGenerator::emitPushArguments(LConstructArgsGeneric* construct,
6545 Register scratch) {
6546 MOZ_ASSERT(scratch == ToRegister(construct->getNewTarget()));
6548 // Holds the function nargs. Initially the number of args to the caller.
6549 Register argcreg = ToRegister(construct->getArgc());
6550 Register copyreg = ToRegister(construct->getTempObject());
6551 uint32_t extraFormals = construct->numExtraFormals();
6553 // Allocate space for the values.
6554 // After this call "newTarget" has become "scratch".
6555 emitAllocateSpaceForConstructAndPushNewTarget(argcreg, scratch);
6557 emitPushArguments(argcreg, scratch, copyreg, extraFormals);
6559 // Push |this|.
6560 masm.pushValue(ToValue(construct, LConstructArgsGeneric::ThisIndex));
6563 void CodeGenerator::emitPushArguments(LConstructArrayGeneric* construct,
6564 Register scratch) {
6565 MOZ_ASSERT(scratch == ToRegister(construct->getNewTarget()));
6567 Register tmpArgc = ToRegister(construct->getTempObject());
6568 Register elementsAndArgc = ToRegister(construct->getElements());
6570 // Invariants guarded in the caller:
6571 // - the array is not too long
6572 // - the array length equals its initialized length
6574 // The array length is our argc for the purposes of allocating space.
6575 Address length(ToRegister(construct->getElements()),
6576 ObjectElements::offsetOfLength());
6577 masm.load32(length, tmpArgc);
6579 // Allocate space for the values.
6580 emitAllocateSpaceForConstructAndPushNewTarget(tmpArgc, scratch);
6582 // After this call "elements" has become "argc" and "newTarget" has become
6583 // "scratch".
6584 size_t elementsOffset = 0;
6585 emitPushArrayAsArguments(tmpArgc, elementsAndArgc, scratch, elementsOffset);
6587 // Push |this|.
6588 masm.pushValue(ToValue(construct, LConstructArrayGeneric::ThisIndex));
6591 template <typename T>
6592 void CodeGenerator::emitApplyGeneric(T* apply) {
6593 // Holds the function object.
6594 Register calleereg = ToRegister(apply->getFunction());
6596 // Temporary register for modifying the function object.
6597 Register objreg = ToRegister(apply->getTempObject());
6598 Register scratch = ToRegister(apply->getTempForArgCopy());
6600 // Holds the function nargs, computed in the invoker or (for ApplyArray,
6601 // ConstructArray, or ApplyArgsObj) in the argument pusher.
6602 Register argcreg = ToRegister(apply->getArgc());
6604 // Copy the arguments of the current function.
6606 // In the case of ApplyArray, ConstructArray, or ApplyArgsObj, also
6607 // compute argc. The argc register and the elements/argsObj register
6608 // are the same; argc must not be referenced before the call to
6609 // emitPushArguments() and elements/argsObj must not be referenced
6610 // after it returns.
6612 // In the case of ConstructArray or ConstructArgs, also overwrite newTarget
6613 // with scratch; newTarget must not be referenced after this point.
6615 // objreg is dead across this call.
6616 emitPushArguments(apply, scratch);
6618 masm.checkStackAlignment();
6620 bool constructing = apply->mir()->isConstructing();
6622 // If the function is native, only emit the call to InvokeFunction.
6623 if (apply->hasSingleTarget() &&
6624 apply->getSingleTarget()->isNativeWithoutJitEntry()) {
6625 emitCallInvokeFunction(apply);
6627 #ifdef DEBUG
6628 // Native constructors are guaranteed to return an Object value, so we never
6629 // have to replace a primitive result with the previously allocated Object
6630 // from CreateThis.
6631 if (constructing) {
6632 Label notPrimitive;
6633 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6634 &notPrimitive);
6635 masm.assumeUnreachable("native constructors don't return primitives");
6636 masm.bind(&notPrimitive);
6638 #endif
6640 emitRestoreStackPointerFromFP();
6641 return;
6644 Label end, invoke;
6646 // Unless already known, guard that calleereg is actually a function object.
6647 if (!apply->hasSingleTarget()) {
6648 masm.branchTestObjIsFunction(Assembler::NotEqual, calleereg, objreg,
6649 calleereg, &invoke);
6652 // Guard that calleereg is an interpreted function with a JSScript.
6653 masm.branchIfFunctionHasNoJitEntry(calleereg, constructing, &invoke);
6655 // Guard that callee allows the [[Call]] or [[Construct]] operation required.
6656 if (constructing) {
6657 masm.branchTestFunctionFlags(calleereg, FunctionFlags::CONSTRUCTOR,
6658 Assembler::Zero, &invoke);
6659 } else {
6660 masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
6661 calleereg, objreg, &invoke);
6664 // Use the slow path if CreateThis was unable to create the |this| object.
6665 if (constructing) {
6666 Address thisAddr(masm.getStackPointer(), 0);
6667 masm.branchTestNull(Assembler::Equal, thisAddr, &invoke);
6670 // Call with an Ion frame or a rectifier frame.
6672 if (apply->mir()->maybeCrossRealm()) {
6673 masm.switchToObjectRealm(calleereg, objreg);
6676 // Knowing that calleereg is a non-native function, load jitcode.
6677 masm.loadJitCodeRaw(calleereg, objreg);
6679 masm.PushCalleeToken(calleereg, constructing);
6680 masm.PushFrameDescriptorForJitCall(FrameType::IonJS, argcreg, scratch);
6682 Label underflow, rejoin;
6684 // Check whether the provided arguments satisfy target argc.
6685 if (!apply->hasSingleTarget()) {
6686 Register nformals = scratch;
6687 masm.loadFunctionArgCount(calleereg, nformals);
6688 masm.branch32(Assembler::Below, argcreg, nformals, &underflow);
6689 } else {
6690 masm.branch32(Assembler::Below, argcreg,
6691 Imm32(apply->getSingleTarget()->nargs()), &underflow);
6694 // Skip the construction of the rectifier frame because we have no
6695 // underflow.
6696 masm.jump(&rejoin);
6698 // Argument fixup needed. Get ready to call the argumentsRectifier.
6700 masm.bind(&underflow);
6702 // Hardcode the address of the argumentsRectifier code.
6703 TrampolinePtr argumentsRectifier =
6704 gen->jitRuntime()->getArgumentsRectifier();
6705 masm.movePtr(argumentsRectifier, objreg);
6708 masm.bind(&rejoin);
6710 // Finally call the function in objreg, as assigned by one of the paths
6711 // above.
6712 ensureOsiSpace();
6713 uint32_t callOffset = masm.callJit(objreg);
6714 markSafepointAt(callOffset, apply);
6716 if (apply->mir()->maybeCrossRealm()) {
6717 static_assert(!JSReturnOperand.aliases(ReturnReg),
6718 "ReturnReg available as scratch after scripted calls");
6719 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
6722 // Discard JitFrameLayout fields still left on the stack.
6723 masm.freeStack(sizeof(JitFrameLayout) -
6724 JitFrameLayout::bytesPoppedAfterCall());
6725 masm.jump(&end);
6728 // Handle uncompiled or native functions.
6730 masm.bind(&invoke);
6731 emitCallInvokeFunction(apply);
6734 masm.bind(&end);
6736 // If the return value of the constructing function is Primitive,
6737 // replace the return value with the Object from CreateThis.
6738 if (constructing) {
6739 Label notPrimitive;
6740 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6741 &notPrimitive);
6742 masm.loadValue(Address(masm.getStackPointer(), 0), JSReturnOperand);
6744 #ifdef DEBUG
6745 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6746 &notPrimitive);
6747 masm.assumeUnreachable("CreateThis creates an object");
6748 #endif
6750 masm.bind(&notPrimitive);
6753 // Pop arguments and continue.
6754 emitRestoreStackPointerFromFP();
6757 void CodeGenerator::visitApplyArgsGeneric(LApplyArgsGeneric* apply) {
6758 LSnapshot* snapshot = apply->snapshot();
6759 Register argcreg = ToRegister(apply->getArgc());
6761 // Ensure that we have a reasonable number of arguments.
6762 bailoutCmp32(Assembler::Above, argcreg, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
6764 emitApplyGeneric(apply);
6767 void CodeGenerator::visitApplyArgsObj(LApplyArgsObj* apply) {
6768 Register argsObj = ToRegister(apply->getArgsObj());
6769 Register temp = ToRegister(apply->getTempObject());
6771 Label bail;
6772 masm.loadArgumentsObjectLength(argsObj, temp, &bail);
6773 masm.branch32(Assembler::Above, temp, Imm32(JIT_ARGS_LENGTH_MAX), &bail);
6774 bailoutFrom(&bail, apply->snapshot());
6776 emitApplyGeneric(apply);
6779 void CodeGenerator::visitApplyArrayGeneric(LApplyArrayGeneric* apply) {
6780 LSnapshot* snapshot = apply->snapshot();
6781 Register tmp = ToRegister(apply->getTempObject());
6783 Address length(ToRegister(apply->getElements()),
6784 ObjectElements::offsetOfLength());
6785 masm.load32(length, tmp);
6787 // Ensure that we have a reasonable number of arguments.
6788 bailoutCmp32(Assembler::Above, tmp, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
6790 // Ensure that the array does not contain an uninitialized tail.
6792 Address initializedLength(ToRegister(apply->getElements()),
6793 ObjectElements::offsetOfInitializedLength());
6794 masm.sub32(initializedLength, tmp);
6795 bailoutCmp32(Assembler::NotEqual, tmp, Imm32(0), snapshot);
6797 emitApplyGeneric(apply);
6800 void CodeGenerator::visitConstructArgsGeneric(LConstructArgsGeneric* lir) {
6801 LSnapshot* snapshot = lir->snapshot();
6802 Register argcreg = ToRegister(lir->getArgc());
6804 // Ensure that we have a reasonable number of arguments.
6805 bailoutCmp32(Assembler::Above, argcreg, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
6807 emitApplyGeneric(lir);
6810 void CodeGenerator::visitConstructArrayGeneric(LConstructArrayGeneric* lir) {
6811 LSnapshot* snapshot = lir->snapshot();
6812 Register tmp = ToRegister(lir->getTempObject());
6814 Address length(ToRegister(lir->getElements()),
6815 ObjectElements::offsetOfLength());
6816 masm.load32(length, tmp);
6818 // Ensure that we have a reasonable number of arguments.
6819 bailoutCmp32(Assembler::Above, tmp, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
6821 // Ensure that the array does not contain an uninitialized tail.
6823 Address initializedLength(ToRegister(lir->getElements()),
6824 ObjectElements::offsetOfInitializedLength());
6825 masm.sub32(initializedLength, tmp);
6826 bailoutCmp32(Assembler::NotEqual, tmp, Imm32(0), snapshot);
6828 emitApplyGeneric(lir);
6831 void CodeGenerator::visitBail(LBail* lir) { bailout(lir->snapshot()); }
6833 void CodeGenerator::visitUnreachable(LUnreachable* lir) {
6834 masm.assumeUnreachable("end-of-block assumed unreachable");
6837 void CodeGenerator::visitEncodeSnapshot(LEncodeSnapshot* lir) {
6838 encode(lir->snapshot());
6841 void CodeGenerator::visitUnreachableResultV(LUnreachableResultV* lir) {
6842 masm.assumeUnreachable("must be unreachable");
6845 void CodeGenerator::visitUnreachableResultT(LUnreachableResultT* lir) {
6846 masm.assumeUnreachable("must be unreachable");
6849 // Out-of-line path to report over-recursed error and fail.
6850 class CheckOverRecursedFailure : public OutOfLineCodeBase<CodeGenerator> {
6851 LInstruction* lir_;
6853 public:
6854 explicit CheckOverRecursedFailure(LInstruction* lir) : lir_(lir) {}
6856 void accept(CodeGenerator* codegen) override {
6857 codegen->visitCheckOverRecursedFailure(this);
6860 LInstruction* lir() const { return lir_; }
6863 void CodeGenerator::visitCheckOverRecursed(LCheckOverRecursed* lir) {
6864 // If we don't push anything on the stack, skip the check.
6865 if (omitOverRecursedCheck()) {
6866 return;
6869 // Ensure that this frame will not cross the stack limit.
6870 // This is a weak check, justified by Ion using the C stack: we must always
6871 // be some distance away from the actual limit, since if the limit is
6872 // crossed, an error must be thrown, which requires more frames.
6874 // It must always be possible to trespass past the stack limit.
6875 // Ion may legally place frames very close to the limit. Calling additional
6876 // C functions may then violate the limit without any checking.
6878 // Since Ion frames exist on the C stack, the stack limit may be
6879 // dynamically set by JS_SetThreadStackLimit() and JS_SetNativeStackQuota().
6881 CheckOverRecursedFailure* ool = new (alloc()) CheckOverRecursedFailure(lir);
6882 addOutOfLineCode(ool, lir->mir());
6884 // Conditional forward (unlikely) branch to failure.
6885 const void* limitAddr = gen->runtime->addressOfJitStackLimit();
6886 masm.branchStackPtrRhs(Assembler::AboveOrEqual, AbsoluteAddress(limitAddr),
6887 ool->entry());
6888 masm.bind(ool->rejoin());
6891 void CodeGenerator::visitCheckOverRecursedFailure(
6892 CheckOverRecursedFailure* ool) {
6893 // The OOL path is hit if the recursion depth has been exceeded.
6894 // Throw an InternalError for over-recursion.
6896 // LFunctionEnvironment can appear before LCheckOverRecursed, so we have
6897 // to save all live registers to avoid crashes if CheckOverRecursed triggers
6898 // a GC.
6899 saveLive(ool->lir());
6901 using Fn = bool (*)(JSContext*);
6902 callVM<Fn, CheckOverRecursed>(ool->lir());
6904 restoreLive(ool->lir());
6905 masm.jump(ool->rejoin());
6908 IonScriptCounts* CodeGenerator::maybeCreateScriptCounts() {
6909 // If scripts are being profiled, create a new IonScriptCounts for the
6910 // profiling data, which will be attached to the associated JSScript or
6911 // wasm module after code generation finishes.
6912 if (!gen->hasProfilingScripts()) {
6913 return nullptr;
6916 // This test inhibits IonScriptCount creation for wasm code which is
6917 // currently incompatible with wasm codegen for two reasons: (1) wasm code
6918 // must be serializable and script count codegen bakes in absolute
6919 // addresses, (2) wasm code does not have a JSScript with which to associate
6920 // code coverage data.
6921 JSScript* script = gen->outerInfo().script();
6922 if (!script) {
6923 return nullptr;
6926 auto counts = MakeUnique<IonScriptCounts>();
6927 if (!counts || !counts->init(graph.numBlocks())) {
6928 return nullptr;
6931 for (size_t i = 0; i < graph.numBlocks(); i++) {
6932 MBasicBlock* block = graph.getBlock(i)->mir();
6934 uint32_t offset = 0;
6935 char* description = nullptr;
6936 if (MResumePoint* resume = block->entryResumePoint()) {
6937 // Find a PC offset in the outermost script to use. If this
6938 // block is from an inlined script, find a location in the
6939 // outer script to associate information about the inlining
6940 // with.
6941 while (resume->caller()) {
6942 resume = resume->caller();
6944 offset = script->pcToOffset(resume->pc());
6946 if (block->entryResumePoint()->caller()) {
6947 // Get the filename and line number of the inner script.
6948 JSScript* innerScript = block->info().script();
6949 description = js_pod_calloc<char>(200);
6950 if (description) {
6951 snprintf(description, 200, "%s:%u", innerScript->filename(),
6952 innerScript->lineno());
6957 if (!counts->block(i).init(block->id(), offset, description,
6958 block->numSuccessors())) {
6959 return nullptr;
6962 for (size_t j = 0; j < block->numSuccessors(); j++) {
6963 counts->block(i).setSuccessor(
6964 j, skipTrivialBlocks(block->getSuccessor(j))->id());
6968 scriptCounts_ = counts.release();
6969 return scriptCounts_;
6972 // Structure for managing the state tracked for a block by script counters.
6973 struct ScriptCountBlockState {
6974 IonBlockCounts& block;
6975 MacroAssembler& masm;
6977 Sprinter printer;
6979 public:
6980 ScriptCountBlockState(IonBlockCounts* block, MacroAssembler* masm)
6981 : block(*block), masm(*masm), printer(GetJitContext()->cx, false) {}
6983 bool init() {
6984 if (!printer.init()) {
6985 return false;
6988 // Bump the hit count for the block at the start. This code is not
6989 // included in either the text for the block or the instruction byte
6990 // counts.
6991 masm.inc64(AbsoluteAddress(block.addressOfHitCount()));
6993 // Collect human readable assembly for the code generated in the block.
6994 masm.setPrinter(&printer);
6996 return true;
6999 void visitInstruction(LInstruction* ins) {
7000 #ifdef JS_JITSPEW
7001 // Prefix stream of assembly instructions with their LIR instruction
7002 // name and any associated high level info.
7003 if (const char* extra = ins->getExtraName()) {
7004 printer.printf("[%s:%s]\n", ins->opName(), extra);
7005 } else {
7006 printer.printf("[%s]\n", ins->opName());
7008 #endif
7011 ~ScriptCountBlockState() {
7012 masm.setPrinter(nullptr);
7014 if (JS::UniqueChars str = printer.release()) {
7015 block.setCode(str.get());
7020 void CodeGenerator::branchIfInvalidated(Register temp, Label* invalidated) {
7021 CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), temp);
7022 masm.propagateOOM(ionScriptLabels_.append(label));
7024 // If IonScript::invalidationCount_ != 0, the script has been invalidated.
7025 masm.branch32(Assembler::NotEqual,
7026 Address(temp, IonScript::offsetOfInvalidationCount()), Imm32(0),
7027 invalidated);
7030 #ifdef DEBUG
7031 void CodeGenerator::emitAssertGCThingResult(Register input,
7032 const MDefinition* mir) {
7033 MIRType type = mir->type();
7034 MOZ_ASSERT(type == MIRType::Object || type == MIRType::String ||
7035 type == MIRType::Symbol || type == MIRType::BigInt);
7037 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
7038 regs.take(input);
7040 Register temp = regs.takeAny();
7041 masm.push(temp);
7043 // Don't check if the script has been invalidated. In that case invalid
7044 // types are expected (until we reach the OsiPoint and bailout).
7045 Label done;
7046 branchIfInvalidated(temp, &done);
7048 # ifndef JS_SIMULATOR
7049 // Check that we have a valid GC pointer.
7050 // Disable for wasm because we don't have a context on wasm compilation
7051 // threads and this needs a context.
7052 // Also disable for simulator builds because the C++ call is a lot slower
7053 // there than on actual hardware.
7054 if (JitOptions.fullDebugChecks && !IsCompilingWasm()) {
7055 saveVolatile();
7056 masm.setupUnalignedABICall(temp);
7057 masm.loadJSContext(temp);
7058 masm.passABIArg(temp);
7059 masm.passABIArg(input);
7061 switch (type) {
7062 case MIRType::Object: {
7063 using Fn = void (*)(JSContext* cx, JSObject* obj);
7064 masm.callWithABI<Fn, AssertValidObjectPtr>();
7065 break;
7067 case MIRType::String: {
7068 using Fn = void (*)(JSContext* cx, JSString* str);
7069 masm.callWithABI<Fn, AssertValidStringPtr>();
7070 break;
7072 case MIRType::Symbol: {
7073 using Fn = void (*)(JSContext* cx, JS::Symbol* sym);
7074 masm.callWithABI<Fn, AssertValidSymbolPtr>();
7075 break;
7077 case MIRType::BigInt: {
7078 using Fn = void (*)(JSContext* cx, JS::BigInt* bi);
7079 masm.callWithABI<Fn, AssertValidBigIntPtr>();
7080 break;
7082 default:
7083 MOZ_CRASH();
7086 restoreVolatile();
7088 # endif
7090 masm.bind(&done);
7091 masm.pop(temp);
7094 void CodeGenerator::emitAssertResultV(const ValueOperand input,
7095 const MDefinition* mir) {
7096 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
7097 regs.take(input);
7099 Register temp1 = regs.takeAny();
7100 Register temp2 = regs.takeAny();
7101 masm.push(temp1);
7102 masm.push(temp2);
7104 // Don't check if the script has been invalidated. In that case invalid
7105 // types are expected (until we reach the OsiPoint and bailout).
7106 Label done;
7107 branchIfInvalidated(temp1, &done);
7109 // Check that we have a valid GC pointer.
7110 if (JitOptions.fullDebugChecks) {
7111 saveVolatile();
7113 masm.pushValue(input);
7114 masm.moveStackPtrTo(temp1);
7116 using Fn = void (*)(JSContext* cx, Value* v);
7117 masm.setupUnalignedABICall(temp2);
7118 masm.loadJSContext(temp2);
7119 masm.passABIArg(temp2);
7120 masm.passABIArg(temp1);
7121 masm.callWithABI<Fn, AssertValidValue>();
7122 masm.popValue(input);
7123 restoreVolatile();
7126 masm.bind(&done);
7127 masm.pop(temp2);
7128 masm.pop(temp1);
7131 void CodeGenerator::emitGCThingResultChecks(LInstruction* lir,
7132 MDefinition* mir) {
7133 if (lir->numDefs() == 0) {
7134 return;
7137 MOZ_ASSERT(lir->numDefs() == 1);
7138 if (lir->getDef(0)->isBogusTemp()) {
7139 return;
7142 Register output = ToRegister(lir->getDef(0));
7143 emitAssertGCThingResult(output, mir);
7146 void CodeGenerator::emitValueResultChecks(LInstruction* lir, MDefinition* mir) {
7147 if (lir->numDefs() == 0) {
7148 return;
7151 MOZ_ASSERT(lir->numDefs() == BOX_PIECES);
7152 if (!lir->getDef(0)->output()->isRegister()) {
7153 return;
7156 ValueOperand output = ToOutValue(lir);
7158 emitAssertResultV(output, mir);
7161 void CodeGenerator::emitDebugResultChecks(LInstruction* ins) {
7162 // In debug builds, check that LIR instructions return valid values.
7164 MDefinition* mir = ins->mirRaw();
7165 if (!mir) {
7166 return;
7169 switch (mir->type()) {
7170 case MIRType::Object:
7171 case MIRType::String:
7172 case MIRType::Symbol:
7173 case MIRType::BigInt:
7174 emitGCThingResultChecks(ins, mir);
7175 break;
7176 case MIRType::Value:
7177 emitValueResultChecks(ins, mir);
7178 break;
7179 default:
7180 break;
7184 void CodeGenerator::emitDebugForceBailing(LInstruction* lir) {
7185 if (MOZ_LIKELY(!gen->options.ionBailAfterEnabled())) {
7186 return;
7188 if (!lir->snapshot()) {
7189 return;
7191 if (lir->isOsiPoint()) {
7192 return;
7195 masm.comment("emitDebugForceBailing");
7196 const void* bailAfterCounterAddr =
7197 gen->runtime->addressOfIonBailAfterCounter();
7199 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
7201 Label done, notBail;
7202 masm.branch32(Assembler::Equal, AbsoluteAddress(bailAfterCounterAddr),
7203 Imm32(0), &done);
7205 Register temp = regs.takeAny();
7207 masm.push(temp);
7208 masm.load32(AbsoluteAddress(bailAfterCounterAddr), temp);
7209 masm.sub32(Imm32(1), temp);
7210 masm.store32(temp, AbsoluteAddress(bailAfterCounterAddr));
7212 masm.branch32(Assembler::NotEqual, temp, Imm32(0), &notBail);
7214 masm.pop(temp);
7215 bailout(lir->snapshot());
7217 masm.bind(&notBail);
7218 masm.pop(temp);
7220 masm.bind(&done);
7222 #endif
7224 bool CodeGenerator::generateBody() {
7225 JitSpewCont(JitSpew_Codegen, "\n");
7226 AutoCreatedBy acb(masm, "CodeGenerator::generateBody");
7228 JitSpew(JitSpew_Codegen, "==== BEGIN CodeGenerator::generateBody ====");
7229 IonScriptCounts* counts = maybeCreateScriptCounts();
7231 const bool compilingWasm = gen->compilingWasm();
7233 for (size_t i = 0; i < graph.numBlocks(); i++) {
7234 current = graph.getBlock(i);
7236 // Don't emit any code for trivial blocks, containing just a goto. Such
7237 // blocks are created to split critical edges, and if we didn't end up
7238 // putting any instructions in them, we can skip them.
7239 if (current->isTrivial()) {
7240 continue;
7243 #ifdef JS_JITSPEW
7244 const char* filename = nullptr;
7245 size_t lineNumber = 0;
7246 JS::LimitedColumnNumberOneOrigin columnNumber;
7247 if (current->mir()->info().script()) {
7248 filename = current->mir()->info().script()->filename();
7249 if (current->mir()->pc()) {
7250 lineNumber = PCToLineNumber(current->mir()->info().script(),
7251 current->mir()->pc(), &columnNumber);
7254 JitSpew(JitSpew_Codegen, "--------------------------------");
7255 JitSpew(JitSpew_Codegen, "# block%zu %s:%zu:%u%s:", i,
7256 filename ? filename : "?", lineNumber,
7257 columnNumber.oneOriginValue(),
7258 current->mir()->isLoopHeader() ? " (loop header)" : "");
7259 #endif
7261 if (current->mir()->isLoopHeader() && compilingWasm) {
7262 masm.nopAlign(CodeAlignment);
7265 masm.bind(current->label());
7267 mozilla::Maybe<ScriptCountBlockState> blockCounts;
7268 if (counts) {
7269 blockCounts.emplace(&counts->block(i), &masm);
7270 if (!blockCounts->init()) {
7271 return false;
7275 for (LInstructionIterator iter = current->begin(); iter != current->end();
7276 iter++) {
7277 if (!alloc().ensureBallast()) {
7278 return false;
7281 perfSpewer_.recordInstruction(masm, *iter);
7282 #ifdef JS_JITSPEW
7283 JitSpewStart(JitSpew_Codegen, " # LIR=%s",
7284 iter->opName());
7285 if (const char* extra = iter->getExtraName()) {
7286 JitSpewCont(JitSpew_Codegen, ":%s", extra);
7288 JitSpewFin(JitSpew_Codegen);
7289 #endif
7291 if (counts) {
7292 blockCounts->visitInstruction(*iter);
7295 #ifdef CHECK_OSIPOINT_REGISTERS
7296 if (iter->safepoint() && !compilingWasm) {
7297 resetOsiPointRegs(iter->safepoint());
7299 #endif
7301 if (!compilingWasm) {
7302 if (MDefinition* mir = iter->mirRaw()) {
7303 if (!addNativeToBytecodeEntry(mir->trackedSite())) {
7304 return false;
7309 setElement(*iter); // needed to encode correct snapshot location.
7311 #ifdef DEBUG
7312 emitDebugForceBailing(*iter);
7313 #endif
7315 switch (iter->op()) {
7316 #ifndef JS_CODEGEN_NONE
7317 # define LIROP(op) \
7318 case LNode::Opcode::op: \
7319 visit##op(iter->to##op()); \
7320 break;
7321 LIR_OPCODE_LIST(LIROP)
7322 # undef LIROP
7323 #endif
7324 case LNode::Opcode::Invalid:
7325 default:
7326 MOZ_CRASH("Invalid LIR op");
7329 #ifdef DEBUG
7330 if (!counts) {
7331 emitDebugResultChecks(*iter);
7333 #endif
7335 if (masm.oom()) {
7336 return false;
7340 JitSpew(JitSpew_Codegen, "==== END CodeGenerator::generateBody ====\n");
7341 return true;
7344 // Out-of-line object allocation for LNewArray.
7345 class OutOfLineNewArray : public OutOfLineCodeBase<CodeGenerator> {
7346 LNewArray* lir_;
7348 public:
7349 explicit OutOfLineNewArray(LNewArray* lir) : lir_(lir) {}
7351 void accept(CodeGenerator* codegen) override {
7352 codegen->visitOutOfLineNewArray(this);
7355 LNewArray* lir() const { return lir_; }
7358 void CodeGenerator::visitNewArrayCallVM(LNewArray* lir) {
7359 Register objReg = ToRegister(lir->output());
7361 MOZ_ASSERT(!lir->isCall());
7362 saveLive(lir);
7364 JSObject* templateObject = lir->mir()->templateObject();
7366 if (templateObject) {
7367 pushArg(ImmGCPtr(templateObject->shape()));
7368 pushArg(Imm32(lir->mir()->length()));
7370 using Fn = ArrayObject* (*)(JSContext*, uint32_t, Handle<Shape*>);
7371 callVM<Fn, NewArrayWithShape>(lir);
7372 } else {
7373 pushArg(Imm32(GenericObject));
7374 pushArg(Imm32(lir->mir()->length()));
7376 using Fn = ArrayObject* (*)(JSContext*, uint32_t, NewObjectKind);
7377 callVM<Fn, NewArrayOperation>(lir);
7380 masm.storeCallPointerResult(objReg);
7382 MOZ_ASSERT(!lir->safepoint()->liveRegs().has(objReg));
7383 restoreLive(lir);
7386 void CodeGenerator::visitAtan2D(LAtan2D* lir) {
7387 FloatRegister y = ToFloatRegister(lir->y());
7388 FloatRegister x = ToFloatRegister(lir->x());
7390 using Fn = double (*)(double x, double y);
7391 masm.setupAlignedABICall();
7392 masm.passABIArg(y, ABIType::Float64);
7393 masm.passABIArg(x, ABIType::Float64);
7394 masm.callWithABI<Fn, ecmaAtan2>(ABIType::Float64);
7396 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
7399 void CodeGenerator::visitHypot(LHypot* lir) {
7400 uint32_t numArgs = lir->numArgs();
7401 masm.setupAlignedABICall();
7403 for (uint32_t i = 0; i < numArgs; ++i) {
7404 masm.passABIArg(ToFloatRegister(lir->getOperand(i)), ABIType::Float64);
7407 switch (numArgs) {
7408 case 2: {
7409 using Fn = double (*)(double x, double y);
7410 masm.callWithABI<Fn, ecmaHypot>(ABIType::Float64);
7411 break;
7413 case 3: {
7414 using Fn = double (*)(double x, double y, double z);
7415 masm.callWithABI<Fn, hypot3>(ABIType::Float64);
7416 break;
7418 case 4: {
7419 using Fn = double (*)(double x, double y, double z, double w);
7420 masm.callWithABI<Fn, hypot4>(ABIType::Float64);
7421 break;
7423 default:
7424 MOZ_CRASH("Unexpected number of arguments to hypot function.");
7426 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
7429 void CodeGenerator::visitNewArray(LNewArray* lir) {
7430 Register objReg = ToRegister(lir->output());
7431 Register tempReg = ToRegister(lir->temp());
7432 DebugOnly<uint32_t> length = lir->mir()->length();
7434 MOZ_ASSERT(length <= NativeObject::MAX_DENSE_ELEMENTS_COUNT);
7436 if (lir->mir()->isVMCall()) {
7437 visitNewArrayCallVM(lir);
7438 return;
7441 OutOfLineNewArray* ool = new (alloc()) OutOfLineNewArray(lir);
7442 addOutOfLineCode(ool, lir->mir());
7444 TemplateObject templateObject(lir->mir()->templateObject());
7445 #ifdef DEBUG
7446 size_t numInlineElements = gc::GetGCKindSlots(templateObject.getAllocKind()) -
7447 ObjectElements::VALUES_PER_HEADER;
7448 MOZ_ASSERT(length <= numInlineElements,
7449 "Inline allocation only supports inline elements");
7450 #endif
7451 masm.createGCObject(objReg, tempReg, templateObject,
7452 lir->mir()->initialHeap(), ool->entry());
7454 masm.bind(ool->rejoin());
7457 void CodeGenerator::visitOutOfLineNewArray(OutOfLineNewArray* ool) {
7458 visitNewArrayCallVM(ool->lir());
7459 masm.jump(ool->rejoin());
7462 void CodeGenerator::visitNewArrayDynamicLength(LNewArrayDynamicLength* lir) {
7463 Register lengthReg = ToRegister(lir->length());
7464 Register objReg = ToRegister(lir->output());
7465 Register tempReg = ToRegister(lir->temp0());
7467 JSObject* templateObject = lir->mir()->templateObject();
7468 gc::Heap initialHeap = lir->mir()->initialHeap();
7470 using Fn = ArrayObject* (*)(JSContext*, Handle<ArrayObject*>, int32_t length);
7471 OutOfLineCode* ool = oolCallVM<Fn, ArrayConstructorOneArg>(
7472 lir, ArgList(ImmGCPtr(templateObject), lengthReg),
7473 StoreRegisterTo(objReg));
7475 bool canInline = true;
7476 size_t inlineLength = 0;
7477 if (templateObject->as<ArrayObject>().hasFixedElements()) {
7478 size_t numSlots =
7479 gc::GetGCKindSlots(templateObject->asTenured().getAllocKind());
7480 inlineLength = numSlots - ObjectElements::VALUES_PER_HEADER;
7481 } else {
7482 canInline = false;
7485 if (canInline) {
7486 // Try to do the allocation inline if the template object is big enough
7487 // for the length in lengthReg. If the length is bigger we could still
7488 // use the template object and not allocate the elements, but it's more
7489 // efficient to do a single big allocation than (repeatedly) reallocating
7490 // the array later on when filling it.
7491 masm.branch32(Assembler::Above, lengthReg, Imm32(inlineLength),
7492 ool->entry());
7494 TemplateObject templateObj(templateObject);
7495 masm.createGCObject(objReg, tempReg, templateObj, initialHeap,
7496 ool->entry());
7498 size_t lengthOffset = NativeObject::offsetOfFixedElements() +
7499 ObjectElements::offsetOfLength();
7500 masm.store32(lengthReg, Address(objReg, lengthOffset));
7501 } else {
7502 masm.jump(ool->entry());
7505 masm.bind(ool->rejoin());
7508 void CodeGenerator::visitNewIterator(LNewIterator* lir) {
7509 Register objReg = ToRegister(lir->output());
7510 Register tempReg = ToRegister(lir->temp0());
7512 OutOfLineCode* ool;
7513 switch (lir->mir()->type()) {
7514 case MNewIterator::ArrayIterator: {
7515 using Fn = ArrayIteratorObject* (*)(JSContext*);
7516 ool = oolCallVM<Fn, NewArrayIterator>(lir, ArgList(),
7517 StoreRegisterTo(objReg));
7518 break;
7520 case MNewIterator::StringIterator: {
7521 using Fn = StringIteratorObject* (*)(JSContext*);
7522 ool = oolCallVM<Fn, NewStringIterator>(lir, ArgList(),
7523 StoreRegisterTo(objReg));
7524 break;
7526 case MNewIterator::RegExpStringIterator: {
7527 using Fn = RegExpStringIteratorObject* (*)(JSContext*);
7528 ool = oolCallVM<Fn, NewRegExpStringIterator>(lir, ArgList(),
7529 StoreRegisterTo(objReg));
7530 break;
7532 default:
7533 MOZ_CRASH("unexpected iterator type");
7536 TemplateObject templateObject(lir->mir()->templateObject());
7537 masm.createGCObject(objReg, tempReg, templateObject, gc::Heap::Default,
7538 ool->entry());
7540 masm.bind(ool->rejoin());
7543 void CodeGenerator::visitNewTypedArray(LNewTypedArray* lir) {
7544 Register objReg = ToRegister(lir->output());
7545 Register tempReg = ToRegister(lir->temp0());
7546 Register lengthReg = ToRegister(lir->temp1());
7547 LiveRegisterSet liveRegs = liveVolatileRegs(lir);
7549 JSObject* templateObject = lir->mir()->templateObject();
7550 gc::Heap initialHeap = lir->mir()->initialHeap();
7552 auto* ttemplate = &templateObject->as<FixedLengthTypedArrayObject>();
7554 size_t n = ttemplate->length();
7555 MOZ_ASSERT(n <= INT32_MAX,
7556 "Template objects are only created for int32 lengths");
7558 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t length);
7559 OutOfLineCode* ool = oolCallVM<Fn, NewTypedArrayWithTemplateAndLength>(
7560 lir, ArgList(ImmGCPtr(templateObject), Imm32(n)),
7561 StoreRegisterTo(objReg));
7563 TemplateObject templateObj(templateObject);
7564 masm.createGCObject(objReg, tempReg, templateObj, initialHeap, ool->entry());
7566 masm.initTypedArraySlots(objReg, tempReg, lengthReg, liveRegs, ool->entry(),
7567 ttemplate, MacroAssembler::TypedArrayLength::Fixed);
7569 masm.bind(ool->rejoin());
7572 void CodeGenerator::visitNewTypedArrayDynamicLength(
7573 LNewTypedArrayDynamicLength* lir) {
7574 Register lengthReg = ToRegister(lir->length());
7575 Register objReg = ToRegister(lir->output());
7576 Register tempReg = ToRegister(lir->temp0());
7577 LiveRegisterSet liveRegs = liveVolatileRegs(lir);
7579 JSObject* templateObject = lir->mir()->templateObject();
7580 gc::Heap initialHeap = lir->mir()->initialHeap();
7582 auto* ttemplate = &templateObject->as<FixedLengthTypedArrayObject>();
7584 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t length);
7585 OutOfLineCode* ool = oolCallVM<Fn, NewTypedArrayWithTemplateAndLength>(
7586 lir, ArgList(ImmGCPtr(templateObject), lengthReg),
7587 StoreRegisterTo(objReg));
7589 // Volatile |lengthReg| is saved across the ABI call in |initTypedArraySlots|.
7590 MOZ_ASSERT_IF(lengthReg.volatile_(), liveRegs.has(lengthReg));
7592 TemplateObject templateObj(templateObject);
7593 masm.createGCObject(objReg, tempReg, templateObj, initialHeap, ool->entry());
7595 masm.initTypedArraySlots(objReg, tempReg, lengthReg, liveRegs, ool->entry(),
7596 ttemplate,
7597 MacroAssembler::TypedArrayLength::Dynamic);
7599 masm.bind(ool->rejoin());
7602 void CodeGenerator::visitNewTypedArrayFromArray(LNewTypedArrayFromArray* lir) {
7603 pushArg(ToRegister(lir->array()));
7604 pushArg(ImmGCPtr(lir->mir()->templateObject()));
7606 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject);
7607 callVM<Fn, js::NewTypedArrayWithTemplateAndArray>(lir);
7610 void CodeGenerator::visitNewTypedArrayFromArrayBuffer(
7611 LNewTypedArrayFromArrayBuffer* lir) {
7612 pushArg(ToValue(lir, LNewTypedArrayFromArrayBuffer::LengthIndex));
7613 pushArg(ToValue(lir, LNewTypedArrayFromArrayBuffer::ByteOffsetIndex));
7614 pushArg(ToRegister(lir->arrayBuffer()));
7615 pushArg(ImmGCPtr(lir->mir()->templateObject()));
7617 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject,
7618 HandleValue, HandleValue);
7619 callVM<Fn, js::NewTypedArrayWithTemplateAndBuffer>(lir);
7622 void CodeGenerator::visitBindFunction(LBindFunction* lir) {
7623 Register target = ToRegister(lir->target());
7624 Register temp1 = ToRegister(lir->temp0());
7625 Register temp2 = ToRegister(lir->temp1());
7627 // Try to allocate a new BoundFunctionObject we can pass to the VM function.
7628 // If this fails, we set temp1 to nullptr so we do the allocation in C++.
7629 TemplateObject templateObject(lir->mir()->templateObject());
7630 Label allocOk, allocFailed;
7631 masm.createGCObject(temp1, temp2, templateObject, gc::Heap::Default,
7632 &allocFailed);
7633 masm.jump(&allocOk);
7635 masm.bind(&allocFailed);
7636 masm.movePtr(ImmWord(0), temp1);
7638 masm.bind(&allocOk);
7640 // Set temp2 to the address of the first argument on the stack.
7641 // Note that the Value slots used for arguments are currently aligned for a
7642 // JIT call, even though that's not strictly necessary for calling into C++.
7643 uint32_t argc = lir->mir()->numStackArgs();
7644 if (JitStackValueAlignment > 1) {
7645 argc = AlignBytes(argc, JitStackValueAlignment);
7647 uint32_t unusedStack = UnusedStackBytesForCall(argc);
7648 masm.computeEffectiveAddress(Address(masm.getStackPointer(), unusedStack),
7649 temp2);
7651 pushArg(temp1);
7652 pushArg(Imm32(lir->mir()->numStackArgs()));
7653 pushArg(temp2);
7654 pushArg(target);
7656 using Fn = BoundFunctionObject* (*)(JSContext*, Handle<JSObject*>, Value*,
7657 uint32_t, Handle<BoundFunctionObject*>);
7658 callVM<Fn, js::BoundFunctionObject::functionBindImpl>(lir);
7661 void CodeGenerator::visitNewBoundFunction(LNewBoundFunction* lir) {
7662 Register output = ToRegister(lir->output());
7663 Register temp = ToRegister(lir->temp0());
7665 JSObject* templateObj = lir->mir()->templateObj();
7667 using Fn = BoundFunctionObject* (*)(JSContext*, Handle<BoundFunctionObject*>);
7668 OutOfLineCode* ool = oolCallVM<Fn, BoundFunctionObject::createWithTemplate>(
7669 lir, ArgList(ImmGCPtr(templateObj)), StoreRegisterTo(output));
7671 TemplateObject templateObject(templateObj);
7672 masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
7673 ool->entry());
7675 masm.bind(ool->rejoin());
7678 // Out-of-line object allocation for JSOp::NewObject.
7679 class OutOfLineNewObject : public OutOfLineCodeBase<CodeGenerator> {
7680 LNewObject* lir_;
7682 public:
7683 explicit OutOfLineNewObject(LNewObject* lir) : lir_(lir) {}
7685 void accept(CodeGenerator* codegen) override {
7686 codegen->visitOutOfLineNewObject(this);
7689 LNewObject* lir() const { return lir_; }
7692 void CodeGenerator::visitNewObjectVMCall(LNewObject* lir) {
7693 Register objReg = ToRegister(lir->output());
7695 MOZ_ASSERT(!lir->isCall());
7696 saveLive(lir);
7698 JSObject* templateObject = lir->mir()->templateObject();
7700 // If we're making a new object with a class prototype (that is, an object
7701 // that derives its class from its prototype instead of being
7702 // PlainObject::class_'d) from self-hosted code, we need a different init
7703 // function.
7704 switch (lir->mir()->mode()) {
7705 case MNewObject::ObjectLiteral: {
7706 MOZ_ASSERT(!templateObject);
7707 pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
7708 pushArg(ImmGCPtr(lir->mir()->block()->info().script()));
7710 using Fn = JSObject* (*)(JSContext*, HandleScript, const jsbytecode* pc);
7711 callVM<Fn, NewObjectOperation>(lir);
7712 break;
7714 case MNewObject::ObjectCreate: {
7715 pushArg(ImmGCPtr(templateObject));
7717 using Fn = PlainObject* (*)(JSContext*, Handle<PlainObject*>);
7718 callVM<Fn, ObjectCreateWithTemplate>(lir);
7719 break;
7723 masm.storeCallPointerResult(objReg);
7725 MOZ_ASSERT(!lir->safepoint()->liveRegs().has(objReg));
7726 restoreLive(lir);
7729 static bool ShouldInitFixedSlots(LNewPlainObject* lir, const Shape* shape,
7730 uint32_t nfixed) {
7731 // Look for StoreFixedSlot instructions following an object allocation
7732 // that write to this object before a GC is triggered or this object is
7733 // passed to a VM call. If all fixed slots will be initialized, the
7734 // allocation code doesn't need to set the slots to |undefined|.
7736 if (nfixed == 0) {
7737 return false;
7740 // Keep track of the fixed slots that are initialized. initializedSlots is
7741 // a bit mask with a bit for each slot.
7742 MOZ_ASSERT(nfixed <= NativeObject::MAX_FIXED_SLOTS);
7743 static_assert(NativeObject::MAX_FIXED_SLOTS <= 32,
7744 "Slot bits must fit in 32 bits");
7745 uint32_t initializedSlots = 0;
7746 uint32_t numInitialized = 0;
7748 MInstruction* allocMir = lir->mir();
7749 MBasicBlock* block = allocMir->block();
7751 // Skip the allocation instruction.
7752 MInstructionIterator iter = block->begin(allocMir);
7753 MOZ_ASSERT(*iter == allocMir);
7754 iter++;
7756 // Handle the leading shape guard, if present.
7757 for (; iter != block->end(); iter++) {
7758 if (iter->isConstant()) {
7759 // This instruction won't trigger a GC or read object slots.
7760 continue;
7762 if (iter->isGuardShape()) {
7763 auto* guard = iter->toGuardShape();
7764 if (guard->object() != allocMir || guard->shape() != shape) {
7765 return true;
7767 allocMir = guard;
7768 iter++;
7770 break;
7773 for (; iter != block->end(); iter++) {
7774 if (iter->isConstant() || iter->isPostWriteBarrier()) {
7775 // These instructions won't trigger a GC or read object slots.
7776 continue;
7779 if (iter->isStoreFixedSlot()) {
7780 MStoreFixedSlot* store = iter->toStoreFixedSlot();
7781 if (store->object() != allocMir) {
7782 return true;
7785 // We may not initialize this object slot on allocation, so the
7786 // pre-barrier could read uninitialized memory. Simply disable
7787 // the barrier for this store: the object was just initialized
7788 // so the barrier is not necessary.
7789 store->setNeedsBarrier(false);
7791 uint32_t slot = store->slot();
7792 MOZ_ASSERT(slot < nfixed);
7793 if ((initializedSlots & (1 << slot)) == 0) {
7794 numInitialized++;
7795 initializedSlots |= (1 << slot);
7797 if (numInitialized == nfixed) {
7798 // All fixed slots will be initialized.
7799 MOZ_ASSERT(mozilla::CountPopulation32(initializedSlots) == nfixed);
7800 return false;
7803 continue;
7806 // Unhandled instruction, assume it bails or reads object slots.
7807 return true;
7810 MOZ_CRASH("Shouldn't get here");
7813 void CodeGenerator::visitNewObject(LNewObject* lir) {
7814 Register objReg = ToRegister(lir->output());
7815 Register tempReg = ToRegister(lir->temp());
7817 if (lir->mir()->isVMCall()) {
7818 visitNewObjectVMCall(lir);
7819 return;
7822 OutOfLineNewObject* ool = new (alloc()) OutOfLineNewObject(lir);
7823 addOutOfLineCode(ool, lir->mir());
7825 TemplateObject templateObject(lir->mir()->templateObject());
7827 masm.createGCObject(objReg, tempReg, templateObject,
7828 lir->mir()->initialHeap(), ool->entry());
7830 masm.bind(ool->rejoin());
7833 void CodeGenerator::visitOutOfLineNewObject(OutOfLineNewObject* ool) {
7834 visitNewObjectVMCall(ool->lir());
7835 masm.jump(ool->rejoin());
7838 void CodeGenerator::visitNewPlainObject(LNewPlainObject* lir) {
7839 Register objReg = ToRegister(lir->output());
7840 Register temp0Reg = ToRegister(lir->temp0());
7841 Register temp1Reg = ToRegister(lir->temp1());
7842 Register shapeReg = ToRegister(lir->temp2());
7844 auto* mir = lir->mir();
7845 const Shape* shape = mir->shape();
7846 gc::Heap initialHeap = mir->initialHeap();
7847 gc::AllocKind allocKind = mir->allocKind();
7849 using Fn =
7850 JSObject* (*)(JSContext*, Handle<SharedShape*>, gc::AllocKind, gc::Heap);
7851 OutOfLineCode* ool = oolCallVM<Fn, NewPlainObjectOptimizedFallback>(
7852 lir,
7853 ArgList(ImmGCPtr(shape), Imm32(int32_t(allocKind)),
7854 Imm32(int32_t(initialHeap))),
7855 StoreRegisterTo(objReg));
7857 bool initContents = ShouldInitFixedSlots(lir, shape, mir->numFixedSlots());
7859 masm.movePtr(ImmGCPtr(shape), shapeReg);
7860 masm.createPlainGCObject(
7861 objReg, shapeReg, temp0Reg, temp1Reg, mir->numFixedSlots(),
7862 mir->numDynamicSlots(), allocKind, initialHeap, ool->entry(),
7863 AllocSiteInput(gc::CatchAllAllocSite::Optimized), initContents);
7865 #ifdef DEBUG
7866 // ShouldInitFixedSlots expects that the leading GuardShape will never fail,
7867 // so ensure the newly created object has the correct shape. Should the guard
7868 // ever fail, we may end up with uninitialized fixed slots, which can confuse
7869 // the GC.
7870 Label ok;
7871 masm.branchTestObjShape(Assembler::Equal, objReg, shape, temp0Reg, objReg,
7872 &ok);
7873 masm.assumeUnreachable("Newly created object has the correct shape");
7874 masm.bind(&ok);
7875 #endif
7877 masm.bind(ool->rejoin());
7880 void CodeGenerator::visitNewArrayObject(LNewArrayObject* lir) {
7881 Register objReg = ToRegister(lir->output());
7882 Register temp0Reg = ToRegister(lir->temp0());
7883 Register shapeReg = ToRegister(lir->temp1());
7885 auto* mir = lir->mir();
7886 uint32_t arrayLength = mir->length();
7888 gc::AllocKind allocKind = GuessArrayGCKind(arrayLength);
7889 MOZ_ASSERT(CanChangeToBackgroundAllocKind(allocKind, &ArrayObject::class_));
7890 allocKind = ForegroundToBackgroundAllocKind(allocKind);
7892 uint32_t slotCount = GetGCKindSlots(allocKind);
7893 MOZ_ASSERT(slotCount >= ObjectElements::VALUES_PER_HEADER);
7894 uint32_t arrayCapacity = slotCount - ObjectElements::VALUES_PER_HEADER;
7896 const Shape* shape = mir->shape();
7898 NewObjectKind objectKind =
7899 mir->initialHeap() == gc::Heap::Tenured ? TenuredObject : GenericObject;
7901 using Fn =
7902 ArrayObject* (*)(JSContext*, uint32_t, gc::AllocKind, NewObjectKind);
7903 OutOfLineCode* ool = oolCallVM<Fn, NewArrayObjectOptimizedFallback>(
7904 lir,
7905 ArgList(Imm32(arrayLength), Imm32(int32_t(allocKind)), Imm32(objectKind)),
7906 StoreRegisterTo(objReg));
7908 masm.movePtr(ImmPtr(shape), shapeReg);
7909 masm.createArrayWithFixedElements(
7910 objReg, shapeReg, temp0Reg, InvalidReg, arrayLength, arrayCapacity, 0, 0,
7911 allocKind, mir->initialHeap(), ool->entry(),
7912 AllocSiteInput(gc::CatchAllAllocSite::Optimized));
7913 masm.bind(ool->rejoin());
7916 void CodeGenerator::visitNewNamedLambdaObject(LNewNamedLambdaObject* lir) {
7917 Register objReg = ToRegister(lir->output());
7918 Register tempReg = ToRegister(lir->temp0());
7919 const CompileInfo& info = lir->mir()->block()->info();
7921 using Fn = js::NamedLambdaObject* (*)(JSContext*, HandleFunction);
7922 OutOfLineCode* ool = oolCallVM<Fn, NamedLambdaObject::createWithoutEnclosing>(
7923 lir, ArgList(info.funMaybeLazy()), StoreRegisterTo(objReg));
7925 TemplateObject templateObject(lir->mir()->templateObj());
7927 masm.createGCObject(objReg, tempReg, templateObject, gc::Heap::Default,
7928 ool->entry());
7930 masm.bind(ool->rejoin());
7933 void CodeGenerator::visitNewCallObject(LNewCallObject* lir) {
7934 Register objReg = ToRegister(lir->output());
7935 Register tempReg = ToRegister(lir->temp0());
7937 CallObject* templateObj = lir->mir()->templateObject();
7939 using Fn = CallObject* (*)(JSContext*, Handle<SharedShape*>);
7940 OutOfLineCode* ool = oolCallVM<Fn, CallObject::createWithShape>(
7941 lir, ArgList(ImmGCPtr(templateObj->sharedShape())),
7942 StoreRegisterTo(objReg));
7944 // Inline call object creation, using the OOL path only for tricky cases.
7945 TemplateObject templateObject(templateObj);
7946 masm.createGCObject(objReg, tempReg, templateObject, gc::Heap::Default,
7947 ool->entry());
7949 masm.bind(ool->rejoin());
7952 void CodeGenerator::visitNewStringObject(LNewStringObject* lir) {
7953 Register input = ToRegister(lir->input());
7954 Register output = ToRegister(lir->output());
7955 Register temp = ToRegister(lir->temp0());
7957 StringObject* templateObj = lir->mir()->templateObj();
7959 using Fn = JSObject* (*)(JSContext*, HandleString);
7960 OutOfLineCode* ool = oolCallVM<Fn, NewStringObject>(lir, ArgList(input),
7961 StoreRegisterTo(output));
7963 TemplateObject templateObject(templateObj);
7964 masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
7965 ool->entry());
7967 masm.loadStringLength(input, temp);
7969 masm.storeValue(JSVAL_TYPE_STRING, input,
7970 Address(output, StringObject::offsetOfPrimitiveValue()));
7971 masm.storeValue(JSVAL_TYPE_INT32, temp,
7972 Address(output, StringObject::offsetOfLength()));
7974 masm.bind(ool->rejoin());
7977 void CodeGenerator::visitInitElemGetterSetter(LInitElemGetterSetter* lir) {
7978 Register obj = ToRegister(lir->object());
7979 Register value = ToRegister(lir->value());
7981 pushArg(value);
7982 pushArg(ToValue(lir, LInitElemGetterSetter::IdIndex));
7983 pushArg(obj);
7984 pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
7986 using Fn = bool (*)(JSContext*, jsbytecode*, HandleObject, HandleValue,
7987 HandleObject);
7988 callVM<Fn, InitElemGetterSetterOperation>(lir);
7991 void CodeGenerator::visitMutateProto(LMutateProto* lir) {
7992 Register objReg = ToRegister(lir->object());
7994 pushArg(ToValue(lir, LMutateProto::ValueIndex));
7995 pushArg(objReg);
7997 using Fn =
7998 bool (*)(JSContext* cx, Handle<PlainObject*> obj, HandleValue value);
7999 callVM<Fn, MutatePrototype>(lir);
8002 void CodeGenerator::visitInitPropGetterSetter(LInitPropGetterSetter* lir) {
8003 Register obj = ToRegister(lir->object());
8004 Register value = ToRegister(lir->value());
8006 pushArg(value);
8007 pushArg(ImmGCPtr(lir->mir()->name()));
8008 pushArg(obj);
8009 pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
8011 using Fn = bool (*)(JSContext*, jsbytecode*, HandleObject,
8012 Handle<PropertyName*>, HandleObject);
8013 callVM<Fn, InitPropGetterSetterOperation>(lir);
8016 void CodeGenerator::visitCreateThis(LCreateThis* lir) {
8017 const LAllocation* callee = lir->callee();
8018 const LAllocation* newTarget = lir->newTarget();
8020 if (newTarget->isConstant()) {
8021 pushArg(ImmGCPtr(&newTarget->toConstant()->toObject()));
8022 } else {
8023 pushArg(ToRegister(newTarget));
8026 if (callee->isConstant()) {
8027 pushArg(ImmGCPtr(&callee->toConstant()->toObject()));
8028 } else {
8029 pushArg(ToRegister(callee));
8032 using Fn = bool (*)(JSContext* cx, HandleObject callee,
8033 HandleObject newTarget, MutableHandleValue rval);
8034 callVM<Fn, jit::CreateThisFromIon>(lir);
8037 void CodeGenerator::visitCreateArgumentsObject(LCreateArgumentsObject* lir) {
8038 // This should be getting constructed in the first block only, and not any OSR
8039 // entry blocks.
8040 MOZ_ASSERT(lir->mir()->block()->id() == 0);
8042 Register callObj = ToRegister(lir->callObject());
8043 Register temp0 = ToRegister(lir->temp0());
8044 Label done;
8046 if (ArgumentsObject* templateObj = lir->mir()->templateObject()) {
8047 Register objTemp = ToRegister(lir->temp1());
8048 Register cxTemp = ToRegister(lir->temp2());
8050 masm.Push(callObj);
8052 // Try to allocate an arguments object. This will leave the reserved
8053 // slots uninitialized, so it's important we don't GC until we
8054 // initialize these slots in ArgumentsObject::finishForIonPure.
8055 Label failure;
8056 TemplateObject templateObject(templateObj);
8057 masm.createGCObject(objTemp, temp0, templateObject, gc::Heap::Default,
8058 &failure,
8059 /* initContents = */ false);
8061 masm.moveStackPtrTo(temp0);
8062 masm.addPtr(Imm32(masm.framePushed()), temp0);
8064 using Fn = ArgumentsObject* (*)(JSContext* cx, jit::JitFrameLayout* frame,
8065 JSObject* scopeChain, ArgumentsObject* obj);
8066 masm.setupAlignedABICall();
8067 masm.loadJSContext(cxTemp);
8068 masm.passABIArg(cxTemp);
8069 masm.passABIArg(temp0);
8070 masm.passABIArg(callObj);
8071 masm.passABIArg(objTemp);
8073 masm.callWithABI<Fn, ArgumentsObject::finishForIonPure>();
8074 masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, &failure);
8076 // Discard saved callObj on the stack.
8077 masm.addToStackPtr(Imm32(sizeof(uintptr_t)));
8078 masm.jump(&done);
8080 masm.bind(&failure);
8081 masm.Pop(callObj);
8084 masm.moveStackPtrTo(temp0);
8085 masm.addPtr(Imm32(frameSize()), temp0);
8087 pushArg(callObj);
8088 pushArg(temp0);
8090 using Fn = ArgumentsObject* (*)(JSContext*, JitFrameLayout*, HandleObject);
8091 callVM<Fn, ArgumentsObject::createForIon>(lir);
8093 masm.bind(&done);
8096 void CodeGenerator::visitCreateInlinedArgumentsObject(
8097 LCreateInlinedArgumentsObject* lir) {
8098 Register callObj = ToRegister(lir->getCallObject());
8099 Register callee = ToRegister(lir->getCallee());
8100 Register argsAddress = ToRegister(lir->temp1());
8101 Register argsObj = ToRegister(lir->temp2());
8103 // TODO: Do we have to worry about alignment here?
8105 // Create a contiguous array of values for ArgumentsObject::create
8106 // by pushing the arguments onto the stack in reverse order.
8107 uint32_t argc = lir->mir()->numActuals();
8108 for (uint32_t i = 0; i < argc; i++) {
8109 uint32_t argNum = argc - i - 1;
8110 uint32_t index = LCreateInlinedArgumentsObject::ArgIndex(argNum);
8111 ConstantOrRegister arg =
8112 toConstantOrRegister(lir, index, lir->mir()->getArg(argNum)->type());
8113 masm.Push(arg);
8115 masm.moveStackPtrTo(argsAddress);
8117 Label done;
8118 if (ArgumentsObject* templateObj = lir->mir()->templateObject()) {
8119 LiveRegisterSet liveRegs;
8120 liveRegs.add(callObj);
8121 liveRegs.add(callee);
8123 masm.PushRegsInMask(liveRegs);
8125 // We are free to clobber all registers, as LCreateInlinedArgumentsObject is
8126 // a call instruction.
8127 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
8128 allRegs.take(callObj);
8129 allRegs.take(callee);
8130 allRegs.take(argsObj);
8131 allRegs.take(argsAddress);
8133 Register temp3 = allRegs.takeAny();
8134 Register temp4 = allRegs.takeAny();
8136 // Try to allocate an arguments object. This will leave the reserved slots
8137 // uninitialized, so it's important we don't GC until we initialize these
8138 // slots in ArgumentsObject::finishForIonPure.
8139 Label failure;
8140 TemplateObject templateObject(templateObj);
8141 masm.createGCObject(argsObj, temp3, templateObject, gc::Heap::Default,
8142 &failure,
8143 /* initContents = */ false);
8145 Register numActuals = temp3;
8146 masm.move32(Imm32(argc), numActuals);
8148 using Fn = ArgumentsObject* (*)(JSContext*, JSObject*, JSFunction*, Value*,
8149 uint32_t, ArgumentsObject*);
8150 masm.setupAlignedABICall();
8151 masm.loadJSContext(temp4);
8152 masm.passABIArg(temp4);
8153 masm.passABIArg(callObj);
8154 masm.passABIArg(callee);
8155 masm.passABIArg(argsAddress);
8156 masm.passABIArg(numActuals);
8157 masm.passABIArg(argsObj);
8159 masm.callWithABI<Fn, ArgumentsObject::finishInlineForIonPure>();
8160 masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, &failure);
8162 // Discard saved callObj, callee, and values array on the stack.
8163 masm.addToStackPtr(
8164 Imm32(MacroAssembler::PushRegsInMaskSizeInBytes(liveRegs) +
8165 argc * sizeof(Value)));
8166 masm.jump(&done);
8168 masm.bind(&failure);
8169 masm.PopRegsInMask(liveRegs);
8171 // Reload argsAddress because it may have been overridden.
8172 masm.moveStackPtrTo(argsAddress);
8175 pushArg(Imm32(argc));
8176 pushArg(callObj);
8177 pushArg(callee);
8178 pushArg(argsAddress);
8180 using Fn = ArgumentsObject* (*)(JSContext*, Value*, HandleFunction,
8181 HandleObject, uint32_t);
8182 callVM<Fn, ArgumentsObject::createForInlinedIon>(lir);
8184 // Discard the array of values.
8185 masm.freeStack(argc * sizeof(Value));
8187 masm.bind(&done);
8190 template <class GetInlinedArgument>
8191 void CodeGenerator::emitGetInlinedArgument(GetInlinedArgument* lir,
8192 Register index,
8193 ValueOperand output) {
8194 uint32_t numActuals = lir->mir()->numActuals();
8195 MOZ_ASSERT(numActuals <= ArgumentsObject::MaxInlinedArgs);
8197 // The index has already been bounds-checked, so the code we
8198 // generate here should be unreachable. We can end up in this
8199 // situation in self-hosted code using GetArgument(), or in a
8200 // monomorphically inlined function if we've inlined some CacheIR
8201 // that was created for a different caller.
8202 if (numActuals == 0) {
8203 masm.assumeUnreachable("LGetInlinedArgument: invalid index");
8204 return;
8207 // Check the first n-1 possible indices.
8208 Label done;
8209 for (uint32_t i = 0; i < numActuals - 1; i++) {
8210 Label skip;
8211 ConstantOrRegister arg = toConstantOrRegister(
8212 lir, GetInlinedArgument::ArgIndex(i), lir->mir()->getArg(i)->type());
8213 masm.branch32(Assembler::NotEqual, index, Imm32(i), &skip);
8214 masm.moveValue(arg, output);
8216 masm.jump(&done);
8217 masm.bind(&skip);
8220 #ifdef DEBUG
8221 Label skip;
8222 masm.branch32(Assembler::Equal, index, Imm32(numActuals - 1), &skip);
8223 masm.assumeUnreachable("LGetInlinedArgument: invalid index");
8224 masm.bind(&skip);
8225 #endif
8227 // The index has already been bounds-checked, so load the last argument.
8228 uint32_t lastIdx = numActuals - 1;
8229 ConstantOrRegister arg =
8230 toConstantOrRegister(lir, GetInlinedArgument::ArgIndex(lastIdx),
8231 lir->mir()->getArg(lastIdx)->type());
8232 masm.moveValue(arg, output);
8233 masm.bind(&done);
8236 void CodeGenerator::visitGetInlinedArgument(LGetInlinedArgument* lir) {
8237 Register index = ToRegister(lir->getIndex());
8238 ValueOperand output = ToOutValue(lir);
8240 emitGetInlinedArgument(lir, index, output);
8243 void CodeGenerator::visitGetInlinedArgumentHole(LGetInlinedArgumentHole* lir) {
8244 Register index = ToRegister(lir->getIndex());
8245 ValueOperand output = ToOutValue(lir);
8247 uint32_t numActuals = lir->mir()->numActuals();
8249 if (numActuals == 0) {
8250 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
8251 masm.moveValue(UndefinedValue(), output);
8252 return;
8255 Label outOfBounds, done;
8256 masm.branch32(Assembler::AboveOrEqual, index, Imm32(numActuals),
8257 &outOfBounds);
8259 emitGetInlinedArgument(lir, index, output);
8260 masm.jump(&done);
8262 masm.bind(&outOfBounds);
8263 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
8264 masm.moveValue(UndefinedValue(), output);
8266 masm.bind(&done);
8269 void CodeGenerator::visitGetArgumentsObjectArg(LGetArgumentsObjectArg* lir) {
8270 Register temp = ToRegister(lir->temp0());
8271 Register argsObj = ToRegister(lir->argsObject());
8272 ValueOperand out = ToOutValue(lir);
8274 masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
8275 temp);
8276 Address argAddr(temp, ArgumentsData::offsetOfArgs() +
8277 lir->mir()->argno() * sizeof(Value));
8278 masm.loadValue(argAddr, out);
8279 #ifdef DEBUG
8280 Label success;
8281 masm.branchTestMagic(Assembler::NotEqual, out, &success);
8282 masm.assumeUnreachable(
8283 "Result from ArgumentObject shouldn't be JSVAL_TYPE_MAGIC.");
8284 masm.bind(&success);
8285 #endif
8288 void CodeGenerator::visitSetArgumentsObjectArg(LSetArgumentsObjectArg* lir) {
8289 Register temp = ToRegister(lir->getTemp(0));
8290 Register argsObj = ToRegister(lir->argsObject());
8291 ValueOperand value = ToValue(lir, LSetArgumentsObjectArg::ValueIndex);
8293 masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
8294 temp);
8295 Address argAddr(temp, ArgumentsData::offsetOfArgs() +
8296 lir->mir()->argno() * sizeof(Value));
8297 emitPreBarrier(argAddr);
8298 #ifdef DEBUG
8299 Label success;
8300 masm.branchTestMagic(Assembler::NotEqual, argAddr, &success);
8301 masm.assumeUnreachable(
8302 "Result in ArgumentObject shouldn't be JSVAL_TYPE_MAGIC.");
8303 masm.bind(&success);
8304 #endif
8305 masm.storeValue(value, argAddr);
8308 void CodeGenerator::visitLoadArgumentsObjectArg(LLoadArgumentsObjectArg* lir) {
8309 Register temp = ToRegister(lir->temp0());
8310 Register argsObj = ToRegister(lir->argsObject());
8311 Register index = ToRegister(lir->index());
8312 ValueOperand out = ToOutValue(lir);
8314 Label bail;
8315 masm.loadArgumentsObjectElement(argsObj, index, out, temp, &bail);
8316 bailoutFrom(&bail, lir->snapshot());
8319 void CodeGenerator::visitLoadArgumentsObjectArgHole(
8320 LLoadArgumentsObjectArgHole* lir) {
8321 Register temp = ToRegister(lir->temp0());
8322 Register argsObj = ToRegister(lir->argsObject());
8323 Register index = ToRegister(lir->index());
8324 ValueOperand out = ToOutValue(lir);
8326 Label bail;
8327 masm.loadArgumentsObjectElementHole(argsObj, index, out, temp, &bail);
8328 bailoutFrom(&bail, lir->snapshot());
8331 void CodeGenerator::visitInArgumentsObjectArg(LInArgumentsObjectArg* lir) {
8332 Register temp = ToRegister(lir->temp0());
8333 Register argsObj = ToRegister(lir->argsObject());
8334 Register index = ToRegister(lir->index());
8335 Register out = ToRegister(lir->output());
8337 Label bail;
8338 masm.loadArgumentsObjectElementExists(argsObj, index, out, temp, &bail);
8339 bailoutFrom(&bail, lir->snapshot());
8342 void CodeGenerator::visitArgumentsObjectLength(LArgumentsObjectLength* lir) {
8343 Register argsObj = ToRegister(lir->argsObject());
8344 Register out = ToRegister(lir->output());
8346 Label bail;
8347 masm.loadArgumentsObjectLength(argsObj, out, &bail);
8348 bailoutFrom(&bail, lir->snapshot());
8351 void CodeGenerator::visitArrayFromArgumentsObject(
8352 LArrayFromArgumentsObject* lir) {
8353 pushArg(ToRegister(lir->argsObject()));
8355 using Fn = ArrayObject* (*)(JSContext*, Handle<ArgumentsObject*>);
8356 callVM<Fn, js::ArrayFromArgumentsObject>(lir);
8359 void CodeGenerator::visitGuardArgumentsObjectFlags(
8360 LGuardArgumentsObjectFlags* lir) {
8361 Register argsObj = ToRegister(lir->argsObject());
8362 Register temp = ToRegister(lir->temp0());
8364 Label bail;
8365 masm.branchTestArgumentsObjectFlags(argsObj, temp, lir->mir()->flags(),
8366 Assembler::NonZero, &bail);
8367 bailoutFrom(&bail, lir->snapshot());
8370 void CodeGenerator::visitBoundFunctionNumArgs(LBoundFunctionNumArgs* lir) {
8371 Register obj = ToRegister(lir->object());
8372 Register output = ToRegister(lir->output());
8374 masm.unboxInt32(Address(obj, BoundFunctionObject::offsetOfFlagsSlot()),
8375 output);
8376 masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), output);
8379 void CodeGenerator::visitGuardBoundFunctionIsConstructor(
8380 LGuardBoundFunctionIsConstructor* lir) {
8381 Register obj = ToRegister(lir->object());
8383 Label bail;
8384 Address flagsSlot(obj, BoundFunctionObject::offsetOfFlagsSlot());
8385 masm.branchTest32(Assembler::Zero, flagsSlot,
8386 Imm32(BoundFunctionObject::IsConstructorFlag), &bail);
8387 bailoutFrom(&bail, lir->snapshot());
8390 void CodeGenerator::visitReturnFromCtor(LReturnFromCtor* lir) {
8391 ValueOperand value = ToValue(lir, LReturnFromCtor::ValueIndex);
8392 Register obj = ToRegister(lir->object());
8393 Register output = ToRegister(lir->output());
8395 Label valueIsObject, end;
8397 masm.branchTestObject(Assembler::Equal, value, &valueIsObject);
8399 // Value is not an object. Return that other object.
8400 masm.movePtr(obj, output);
8401 masm.jump(&end);
8403 // Value is an object. Return unbox(Value).
8404 masm.bind(&valueIsObject);
8405 Register payload = masm.extractObject(value, output);
8406 if (payload != output) {
8407 masm.movePtr(payload, output);
8410 masm.bind(&end);
8413 class OutOfLineBoxNonStrictThis : public OutOfLineCodeBase<CodeGenerator> {
8414 LBoxNonStrictThis* ins_;
8416 public:
8417 explicit OutOfLineBoxNonStrictThis(LBoxNonStrictThis* ins) : ins_(ins) {}
8418 void accept(CodeGenerator* codegen) override {
8419 codegen->visitOutOfLineBoxNonStrictThis(this);
8421 LBoxNonStrictThis* ins() const { return ins_; }
8424 void CodeGenerator::visitBoxNonStrictThis(LBoxNonStrictThis* lir) {
8425 ValueOperand value = ToValue(lir, LBoxNonStrictThis::ValueIndex);
8426 Register output = ToRegister(lir->output());
8428 auto* ool = new (alloc()) OutOfLineBoxNonStrictThis(lir);
8429 addOutOfLineCode(ool, lir->mir());
8431 masm.fallibleUnboxObject(value, output, ool->entry());
8432 masm.bind(ool->rejoin());
8435 void CodeGenerator::visitOutOfLineBoxNonStrictThis(
8436 OutOfLineBoxNonStrictThis* ool) {
8437 LBoxNonStrictThis* lir = ool->ins();
8439 ValueOperand value = ToValue(lir, LBoxNonStrictThis::ValueIndex);
8440 Register output = ToRegister(lir->output());
8442 Label notNullOrUndefined;
8444 Label isNullOrUndefined;
8445 ScratchTagScope tag(masm, value);
8446 masm.splitTagForTest(value, tag);
8447 masm.branchTestUndefined(Assembler::Equal, tag, &isNullOrUndefined);
8448 masm.branchTestNull(Assembler::NotEqual, tag, &notNullOrUndefined);
8449 masm.bind(&isNullOrUndefined);
8450 masm.movePtr(ImmGCPtr(lir->mir()->globalThis()), output);
8451 masm.jump(ool->rejoin());
8454 masm.bind(&notNullOrUndefined);
8456 saveLive(lir);
8458 pushArg(value);
8459 using Fn = JSObject* (*)(JSContext*, HandleValue);
8460 callVM<Fn, BoxNonStrictThis>(lir);
8462 StoreRegisterTo(output).generate(this);
8463 restoreLiveIgnore(lir, StoreRegisterTo(output).clobbered());
8465 masm.jump(ool->rejoin());
8468 void CodeGenerator::visitImplicitThis(LImplicitThis* lir) {
8469 pushArg(ImmGCPtr(lir->mir()->name()));
8470 pushArg(ToRegister(lir->env()));
8472 using Fn = bool (*)(JSContext*, HandleObject, Handle<PropertyName*>,
8473 MutableHandleValue);
8474 callVM<Fn, ImplicitThisOperation>(lir);
8477 void CodeGenerator::visitArrayLength(LArrayLength* lir) {
8478 Register elements = ToRegister(lir->elements());
8479 Register output = ToRegister(lir->output());
8481 Address length(elements, ObjectElements::offsetOfLength());
8482 masm.load32(length, output);
8484 // Bail out if the length doesn't fit in int32.
8485 bailoutTest32(Assembler::Signed, output, output, lir->snapshot());
8488 static void SetLengthFromIndex(MacroAssembler& masm, const LAllocation* index,
8489 const Address& length) {
8490 if (index->isConstant()) {
8491 masm.store32(Imm32(ToInt32(index) + 1), length);
8492 } else {
8493 Register newLength = ToRegister(index);
8494 masm.add32(Imm32(1), newLength);
8495 masm.store32(newLength, length);
8496 masm.sub32(Imm32(1), newLength);
8500 void CodeGenerator::visitSetArrayLength(LSetArrayLength* lir) {
8501 Address length(ToRegister(lir->elements()), ObjectElements::offsetOfLength());
8502 SetLengthFromIndex(masm, lir->index(), length);
8505 void CodeGenerator::visitFunctionLength(LFunctionLength* lir) {
8506 Register function = ToRegister(lir->function());
8507 Register output = ToRegister(lir->output());
8509 Label bail;
8511 // Get the JSFunction flags.
8512 masm.load32(Address(function, JSFunction::offsetOfFlagsAndArgCount()),
8513 output);
8515 // Functions with a SelfHostedLazyScript must be compiled with the slow-path
8516 // before the function length is known. If the length was previously resolved,
8517 // the length property may be shadowed.
8518 masm.branchTest32(
8519 Assembler::NonZero, output,
8520 Imm32(FunctionFlags::SELFHOSTLAZY | FunctionFlags::RESOLVED_LENGTH),
8521 &bail);
8523 masm.loadFunctionLength(function, output, output, &bail);
8525 bailoutFrom(&bail, lir->snapshot());
8528 void CodeGenerator::visitFunctionName(LFunctionName* lir) {
8529 Register function = ToRegister(lir->function());
8530 Register output = ToRegister(lir->output());
8532 Label bail;
8534 const JSAtomState& names = gen->runtime->names();
8535 masm.loadFunctionName(function, output, ImmGCPtr(names.empty_), &bail);
8537 bailoutFrom(&bail, lir->snapshot());
8540 template <class OrderedHashTable>
8541 static void RangeFront(MacroAssembler&, Register, Register, Register);
8543 template <>
8544 void RangeFront<ValueMap>(MacroAssembler& masm, Register range, Register i,
8545 Register front) {
8546 masm.loadPtr(Address(range, ValueMap::Range::offsetOfHashTable()), front);
8547 masm.loadPtr(Address(front, ValueMap::offsetOfImplData()), front);
8549 MOZ_ASSERT(ValueMap::offsetOfImplDataElement() == 0,
8550 "offsetof(Data, element) is 0");
8551 static_assert(ValueMap::sizeofImplData() == 24, "sizeof(Data) is 24");
8552 masm.mulBy3(i, i);
8553 masm.lshiftPtr(Imm32(3), i);
8554 masm.addPtr(i, front);
8557 template <>
8558 void RangeFront<ValueSet>(MacroAssembler& masm, Register range, Register i,
8559 Register front) {
8560 masm.loadPtr(Address(range, ValueSet::Range::offsetOfHashTable()), front);
8561 masm.loadPtr(Address(front, ValueSet::offsetOfImplData()), front);
8563 MOZ_ASSERT(ValueSet::offsetOfImplDataElement() == 0,
8564 "offsetof(Data, element) is 0");
8565 static_assert(ValueSet::sizeofImplData() == 16, "sizeof(Data) is 16");
8566 masm.lshiftPtr(Imm32(4), i);
8567 masm.addPtr(i, front);
8570 template <class OrderedHashTable>
8571 static void RangePopFront(MacroAssembler& masm, Register range, Register front,
8572 Register dataLength, Register temp) {
8573 Register i = temp;
8575 masm.add32(Imm32(1),
8576 Address(range, OrderedHashTable::Range::offsetOfCount()));
8578 masm.load32(Address(range, OrderedHashTable::Range::offsetOfI()), i);
8580 Label done, seek;
8581 masm.bind(&seek);
8582 masm.add32(Imm32(1), i);
8583 masm.branch32(Assembler::AboveOrEqual, i, dataLength, &done);
8585 // We can add sizeof(Data) to |front| to select the next element, because
8586 // |front| and |range.ht.data[i]| point to the same location.
8587 MOZ_ASSERT(OrderedHashTable::offsetOfImplDataElement() == 0,
8588 "offsetof(Data, element) is 0");
8589 masm.addPtr(Imm32(OrderedHashTable::sizeofImplData()), front);
8591 masm.branchTestMagic(Assembler::Equal,
8592 Address(front, OrderedHashTable::offsetOfEntryKey()),
8593 JS_HASH_KEY_EMPTY, &seek);
8595 masm.bind(&done);
8596 masm.store32(i, Address(range, OrderedHashTable::Range::offsetOfI()));
8599 template <class OrderedHashTable>
8600 static inline void RangeDestruct(MacroAssembler& masm, Register iter,
8601 Register range, Register temp0,
8602 Register temp1) {
8603 Register next = temp0;
8604 Register prevp = temp1;
8606 masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfNext()), next);
8607 masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfPrevP()), prevp);
8608 masm.storePtr(next, Address(prevp, 0));
8610 Label hasNoNext;
8611 masm.branchTestPtr(Assembler::Zero, next, next, &hasNoNext);
8613 masm.storePtr(prevp, Address(next, OrderedHashTable::Range::offsetOfPrevP()));
8615 masm.bind(&hasNoNext);
8617 Label nurseryAllocated;
8618 masm.branchPtrInNurseryChunk(Assembler::Equal, iter, temp0,
8619 &nurseryAllocated);
8621 masm.callFreeStub(range);
8623 masm.bind(&nurseryAllocated);
8626 template <>
8627 void CodeGenerator::emitLoadIteratorValues<ValueMap>(Register result,
8628 Register temp,
8629 Register front) {
8630 size_t elementsOffset = NativeObject::offsetOfFixedElements();
8632 Address keyAddress(front, ValueMap::Entry::offsetOfKey());
8633 Address valueAddress(front, ValueMap::Entry::offsetOfValue());
8634 Address keyElemAddress(result, elementsOffset);
8635 Address valueElemAddress(result, elementsOffset + sizeof(Value));
8636 masm.guardedCallPreBarrier(keyElemAddress, MIRType::Value);
8637 masm.guardedCallPreBarrier(valueElemAddress, MIRType::Value);
8638 masm.storeValue(keyAddress, keyElemAddress, temp);
8639 masm.storeValue(valueAddress, valueElemAddress, temp);
8641 Label emitBarrier, skipBarrier;
8642 masm.branchValueIsNurseryCell(Assembler::Equal, keyAddress, temp,
8643 &emitBarrier);
8644 masm.branchValueIsNurseryCell(Assembler::NotEqual, valueAddress, temp,
8645 &skipBarrier);
8647 masm.bind(&emitBarrier);
8648 saveVolatile(temp);
8649 emitPostWriteBarrier(result);
8650 restoreVolatile(temp);
8652 masm.bind(&skipBarrier);
8655 template <>
8656 void CodeGenerator::emitLoadIteratorValues<ValueSet>(Register result,
8657 Register temp,
8658 Register front) {
8659 size_t elementsOffset = NativeObject::offsetOfFixedElements();
8661 Address keyAddress(front, ValueSet::offsetOfEntryKey());
8662 Address keyElemAddress(result, elementsOffset);
8663 masm.guardedCallPreBarrier(keyElemAddress, MIRType::Value);
8664 masm.storeValue(keyAddress, keyElemAddress, temp);
8666 Label skipBarrier;
8667 masm.branchValueIsNurseryCell(Assembler::NotEqual, keyAddress, temp,
8668 &skipBarrier);
8670 saveVolatile(temp);
8671 emitPostWriteBarrier(result);
8672 restoreVolatile(temp);
8674 masm.bind(&skipBarrier);
8677 template <class IteratorObject, class OrderedHashTable>
8678 void CodeGenerator::emitGetNextEntryForIterator(LGetNextEntryForIterator* lir) {
8679 Register iter = ToRegister(lir->iter());
8680 Register result = ToRegister(lir->result());
8681 Register temp = ToRegister(lir->temp0());
8682 Register dataLength = ToRegister(lir->temp1());
8683 Register range = ToRegister(lir->temp2());
8684 Register output = ToRegister(lir->output());
8686 #ifdef DEBUG
8687 // Self-hosted code is responsible for ensuring GetNextEntryForIterator is
8688 // only called with the correct iterator class. Assert here all self-
8689 // hosted callers of GetNextEntryForIterator perform this class check.
8690 // No Spectre mitigations are needed because this is DEBUG-only code.
8691 Label success;
8692 masm.branchTestObjClassNoSpectreMitigations(
8693 Assembler::Equal, iter, &IteratorObject::class_, temp, &success);
8694 masm.assumeUnreachable("Iterator object should have the correct class.");
8695 masm.bind(&success);
8696 #endif
8698 masm.loadPrivate(Address(iter, NativeObject::getFixedSlotOffset(
8699 IteratorObject::RangeSlot)),
8700 range);
8702 Label iterAlreadyDone, iterDone, done;
8703 masm.branchTestPtr(Assembler::Zero, range, range, &iterAlreadyDone);
8705 masm.load32(Address(range, OrderedHashTable::Range::offsetOfI()), temp);
8706 masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfHashTable()),
8707 dataLength);
8708 masm.load32(Address(dataLength, OrderedHashTable::offsetOfImplDataLength()),
8709 dataLength);
8710 masm.branch32(Assembler::AboveOrEqual, temp, dataLength, &iterDone);
8712 masm.Push(iter);
8714 Register front = iter;
8715 RangeFront<OrderedHashTable>(masm, range, temp, front);
8717 emitLoadIteratorValues<OrderedHashTable>(result, temp, front);
8719 RangePopFront<OrderedHashTable>(masm, range, front, dataLength, temp);
8721 masm.Pop(iter);
8722 masm.move32(Imm32(0), output);
8724 masm.jump(&done);
8726 masm.bind(&iterDone);
8728 RangeDestruct<OrderedHashTable>(masm, iter, range, temp, dataLength);
8730 masm.storeValue(PrivateValue(nullptr),
8731 Address(iter, NativeObject::getFixedSlotOffset(
8732 IteratorObject::RangeSlot)));
8734 masm.bind(&iterAlreadyDone);
8736 masm.move32(Imm32(1), output);
8738 masm.bind(&done);
8741 void CodeGenerator::visitGetNextEntryForIterator(
8742 LGetNextEntryForIterator* lir) {
8743 if (lir->mir()->mode() == MGetNextEntryForIterator::Map) {
8744 emitGetNextEntryForIterator<MapIteratorObject, ValueMap>(lir);
8745 } else {
8746 MOZ_ASSERT(lir->mir()->mode() == MGetNextEntryForIterator::Set);
8747 emitGetNextEntryForIterator<SetIteratorObject, ValueSet>(lir);
8751 // The point of these is to inform Ion of where these values already are; they
8752 // don't normally generate (much) code.
8753 void CodeGenerator::visitWasmRegisterPairResult(LWasmRegisterPairResult* lir) {}
8754 void CodeGenerator::visitWasmStackResult(LWasmStackResult* lir) {}
8755 void CodeGenerator::visitWasmStackResult64(LWasmStackResult64* lir) {}
8757 void CodeGenerator::visitWasmStackResultArea(LWasmStackResultArea* lir) {
8758 LAllocation* output = lir->getDef(0)->output();
8759 MOZ_ASSERT(output->isStackArea());
8760 bool tempInit = false;
8761 for (auto iter = output->toStackArea()->results(); iter; iter.next()) {
8762 // Zero out ref stack results.
8763 if (iter.isWasmAnyRef()) {
8764 Register temp = ToRegister(lir->temp0());
8765 if (!tempInit) {
8766 masm.xorPtr(temp, temp);
8767 tempInit = true;
8769 masm.storePtr(temp, ToAddress(iter.alloc()));
8774 void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {
8775 #ifdef JS_64BIT
8776 if (MWasmRegisterResult* mir = lir->mir()) {
8777 if (mir->type() == MIRType::Int32) {
8778 masm.widenInt32(ToRegister(lir->output()));
8781 #endif
8784 void CodeGenerator::visitWasmCall(LWasmCall* lir) {
8785 const MWasmCallBase* callBase = lir->callBase();
8786 bool isReturnCall = lir->isReturnCall();
8788 // If this call is in Wasm try code block, initialise a wasm::TryNote for this
8789 // call.
8790 bool inTry = callBase->inTry();
8791 if (inTry) {
8792 size_t tryNoteIndex = callBase->tryNoteIndex();
8793 wasm::TryNoteVector& tryNotes = masm.tryNotes();
8794 wasm::TryNote& tryNote = tryNotes[tryNoteIndex];
8795 tryNote.setTryBodyBegin(masm.currentOffset());
8798 MOZ_ASSERT((sizeof(wasm::Frame) + masm.framePushed()) % WasmStackAlignment ==
8800 static_assert(
8801 WasmStackAlignment >= ABIStackAlignment &&
8802 WasmStackAlignment % ABIStackAlignment == 0,
8803 "The wasm stack alignment should subsume the ABI-required alignment");
8805 #ifdef DEBUG
8806 Label ok;
8807 masm.branchTestStackPtr(Assembler::Zero, Imm32(WasmStackAlignment - 1), &ok);
8808 masm.breakpoint();
8809 masm.bind(&ok);
8810 #endif
8812 // LWasmCallBase::isCallPreserved() assumes that all MWasmCalls preserve the
8813 // instance and pinned regs. The only case where where we don't have to
8814 // reload the instance and pinned regs is when the callee preserves them.
8815 bool reloadRegs = true;
8816 bool switchRealm = true;
8818 const wasm::CallSiteDesc& desc = callBase->desc();
8819 const wasm::CalleeDesc& callee = callBase->callee();
8820 CodeOffset retOffset;
8821 CodeOffset secondRetOffset;
8822 switch (callee.which()) {
8823 case wasm::CalleeDesc::Func:
8824 #ifdef ENABLE_WASM_TAIL_CALLS
8825 if (isReturnCall) {
8826 ReturnCallAdjustmentInfo retCallInfo(
8827 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
8828 masm.wasmReturnCall(desc, callee.funcIndex(), retCallInfo);
8829 // The rest of the method is unnecessary for a return call.
8830 return;
8832 #endif
8833 MOZ_ASSERT(!isReturnCall);
8834 retOffset = masm.call(desc, callee.funcIndex());
8835 reloadRegs = false;
8836 switchRealm = false;
8837 break;
8838 case wasm::CalleeDesc::Import:
8839 #ifdef ENABLE_WASM_TAIL_CALLS
8840 if (isReturnCall) {
8841 ReturnCallAdjustmentInfo retCallInfo(
8842 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
8843 masm.wasmReturnCallImport(desc, callee, retCallInfo);
8844 // The rest of the method is unnecessary for a return call.
8845 return;
8847 #endif
8848 MOZ_ASSERT(!isReturnCall);
8849 retOffset = masm.wasmCallImport(desc, callee);
8850 break;
8851 case wasm::CalleeDesc::AsmJSTable:
8852 retOffset = masm.asmCallIndirect(desc, callee);
8853 break;
8854 case wasm::CalleeDesc::WasmTable: {
8855 Label* boundsCheckFailed = nullptr;
8856 if (lir->needsBoundsCheck()) {
8857 OutOfLineAbortingWasmTrap* ool =
8858 new (alloc()) OutOfLineAbortingWasmTrap(
8859 wasm::BytecodeOffset(desc.lineOrBytecode()),
8860 wasm::Trap::OutOfBounds);
8861 if (lir->isCatchable()) {
8862 addOutOfLineCode(ool, lir->mirCatchable());
8863 } else if (isReturnCall) {
8864 #ifdef ENABLE_WASM_TAIL_CALLS
8865 addOutOfLineCode(ool, lir->mirReturnCall());
8866 #else
8867 MOZ_CRASH("Return calls are disabled.");
8868 #endif
8869 } else {
8870 addOutOfLineCode(ool, lir->mirUncatchable());
8872 boundsCheckFailed = ool->entry();
8874 Label* nullCheckFailed = nullptr;
8875 #ifndef WASM_HAS_HEAPREG
8877 OutOfLineAbortingWasmTrap* ool =
8878 new (alloc()) OutOfLineAbortingWasmTrap(
8879 wasm::BytecodeOffset(desc.lineOrBytecode()),
8880 wasm::Trap::IndirectCallToNull);
8881 if (lir->isCatchable()) {
8882 addOutOfLineCode(ool, lir->mirCatchable());
8883 } else if (isReturnCall) {
8884 # ifdef ENABLE_WASM_TAIL_CALLS
8885 addOutOfLineCode(ool, lir->mirReturnCall());
8886 # else
8887 MOZ_CRASH("Return calls are disabled.");
8888 # endif
8889 } else {
8890 addOutOfLineCode(ool, lir->mirUncatchable());
8892 nullCheckFailed = ool->entry();
8894 #endif
8895 #ifdef ENABLE_WASM_TAIL_CALLS
8896 if (isReturnCall) {
8897 ReturnCallAdjustmentInfo retCallInfo(
8898 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
8899 masm.wasmReturnCallIndirect(desc, callee, boundsCheckFailed,
8900 nullCheckFailed, mozilla::Nothing(),
8901 retCallInfo);
8902 // The rest of the method is unnecessary for a return call.
8903 return;
8905 #endif
8906 MOZ_ASSERT(!isReturnCall);
8907 masm.wasmCallIndirect(desc, callee, boundsCheckFailed, nullCheckFailed,
8908 lir->tableSize(), &retOffset, &secondRetOffset);
8909 // Register reloading and realm switching are handled dynamically inside
8910 // wasmCallIndirect. There are two return offsets, one for each call
8911 // instruction (fast path and slow path).
8912 reloadRegs = false;
8913 switchRealm = false;
8914 break;
8916 case wasm::CalleeDesc::Builtin:
8917 retOffset = masm.call(desc, callee.builtin());
8918 reloadRegs = false;
8919 switchRealm = false;
8920 break;
8921 case wasm::CalleeDesc::BuiltinInstanceMethod:
8922 retOffset = masm.wasmCallBuiltinInstanceMethod(
8923 desc, callBase->instanceArg(), callee.builtin(),
8924 callBase->builtinMethodFailureMode());
8925 switchRealm = false;
8926 break;
8927 case wasm::CalleeDesc::FuncRef:
8928 #ifdef ENABLE_WASM_TAIL_CALLS
8929 if (isReturnCall) {
8930 ReturnCallAdjustmentInfo retCallInfo(
8931 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
8932 masm.wasmReturnCallRef(desc, callee, retCallInfo);
8933 // The rest of the method is unnecessary for a return call.
8934 return;
8936 #endif
8937 MOZ_ASSERT(!isReturnCall);
8938 // Register reloading and realm switching are handled dynamically inside
8939 // wasmCallRef. There are two return offsets, one for each call
8940 // instruction (fast path and slow path).
8941 masm.wasmCallRef(desc, callee, &retOffset, &secondRetOffset);
8942 reloadRegs = false;
8943 switchRealm = false;
8944 break;
8947 // Note the assembler offset for the associated LSafePoint.
8948 MOZ_ASSERT(!isReturnCall);
8949 markSafepointAt(retOffset.offset(), lir);
8951 // Now that all the outbound in-memory args are on the stack, note the
8952 // required lower boundary point of the associated StackMap.
8953 uint32_t framePushedAtStackMapBase =
8954 masm.framePushed() - callBase->stackArgAreaSizeUnaligned();
8955 lir->safepoint()->setFramePushedAtStackMapBase(framePushedAtStackMapBase);
8956 MOZ_ASSERT(lir->safepoint()->wasmSafepointKind() ==
8957 WasmSafepointKind::LirCall);
8959 // Note the assembler offset and framePushed for use by the adjunct
8960 // LSafePoint, see visitor for LWasmCallIndirectAdjunctSafepoint below.
8961 if (callee.which() == wasm::CalleeDesc::WasmTable) {
8962 lir->adjunctSafepoint()->recordSafepointInfo(secondRetOffset,
8963 framePushedAtStackMapBase);
8966 if (reloadRegs) {
8967 masm.loadPtr(
8968 Address(masm.getStackPointer(), WasmCallerInstanceOffsetBeforeCall),
8969 InstanceReg);
8970 masm.loadWasmPinnedRegsFromInstance();
8971 if (switchRealm) {
8972 masm.switchToWasmInstanceRealm(ABINonArgReturnReg0, ABINonArgReturnReg1);
8974 } else {
8975 MOZ_ASSERT(!switchRealm);
8978 #ifdef ENABLE_WASM_TAIL_CALLS
8979 switch (callee.which()) {
8980 case wasm::CalleeDesc::Func:
8981 case wasm::CalleeDesc::Import:
8982 case wasm::CalleeDesc::WasmTable:
8983 case wasm::CalleeDesc::FuncRef:
8984 // Stack allocation could change during Wasm (return) calls,
8985 // recover pre-call state.
8986 masm.freeStackTo(masm.framePushed());
8987 break;
8988 default:
8989 break;
8991 #endif // ENABLE_WASM_TAIL_CALLS
8993 if (inTry) {
8994 // Set the end of the try note range
8995 size_t tryNoteIndex = callBase->tryNoteIndex();
8996 wasm::TryNoteVector& tryNotes = masm.tryNotes();
8997 wasm::TryNote& tryNote = tryNotes[tryNoteIndex];
8999 // Don't set the end of the try note if we've OOM'ed, as the above
9000 // instructions may not have been emitted, which will trigger an assert
9001 // about zero-length try-notes. This is okay as this compilation will be
9002 // thrown away.
9003 if (!masm.oom()) {
9004 tryNote.setTryBodyEnd(masm.currentOffset());
9007 // This instruction or the adjunct safepoint must be the last instruction
9008 // in the block. No other instructions may be inserted.
9009 LBlock* block = lir->block();
9010 MOZ_RELEASE_ASSERT(*block->rbegin() == lir ||
9011 (block->rbegin()->isWasmCallIndirectAdjunctSafepoint() &&
9012 *(++block->rbegin()) == lir));
9014 // Jump to the fallthrough block
9015 jumpToBlock(lir->mirCatchable()->getSuccessor(
9016 MWasmCallCatchable::FallthroughBranchIndex));
9020 void CodeGenerator::visitWasmCallLandingPrePad(LWasmCallLandingPrePad* lir) {
9021 LBlock* block = lir->block();
9022 MWasmCallLandingPrePad* mir = lir->mir();
9023 MBasicBlock* mirBlock = mir->block();
9024 MBasicBlock* callMirBlock = mir->callBlock();
9026 // This block must be the pre-pad successor of the call block. No blocks may
9027 // be inserted between us, such as for critical edge splitting.
9028 MOZ_RELEASE_ASSERT(mirBlock == callMirBlock->getSuccessor(
9029 MWasmCallCatchable::PrePadBranchIndex));
9031 // This instruction or a move group must be the first instruction in the
9032 // block. No other instructions may be inserted.
9033 MOZ_RELEASE_ASSERT(*block->begin() == lir || (block->begin()->isMoveGroup() &&
9034 *(++block->begin()) == lir));
9036 wasm::TryNoteVector& tryNotes = masm.tryNotes();
9037 wasm::TryNote& tryNote = tryNotes[mir->tryNoteIndex()];
9038 // Set the entry point for the call try note to be the beginning of this
9039 // block. The above assertions (and assertions in visitWasmCall) guarantee
9040 // that we are not skipping over instructions that should be executed.
9041 tryNote.setLandingPad(block->label()->offset(), masm.framePushed());
9044 void CodeGenerator::visitWasmCallIndirectAdjunctSafepoint(
9045 LWasmCallIndirectAdjunctSafepoint* lir) {
9046 markSafepointAt(lir->safepointLocation().offset(), lir);
9047 lir->safepoint()->setFramePushedAtStackMapBase(
9048 lir->framePushedAtStackMapBase());
9051 template <typename InstructionWithMaybeTrapSite>
9052 void EmitSignalNullCheckTrapSite(MacroAssembler& masm,
9053 InstructionWithMaybeTrapSite* ins,
9054 FaultingCodeOffset fco,
9055 wasm::TrapMachineInsn tmi) {
9056 if (!ins->maybeTrap()) {
9057 return;
9059 wasm::BytecodeOffset trapOffset(ins->maybeTrap()->offset);
9060 masm.append(wasm::Trap::NullPointerDereference,
9061 wasm::TrapSite(tmi, fco, trapOffset));
9064 void CodeGenerator::visitWasmLoadSlot(LWasmLoadSlot* ins) {
9065 MIRType type = ins->type();
9066 MWideningOp wideningOp = ins->wideningOp();
9067 Register container = ToRegister(ins->containerRef());
9068 Address addr(container, ins->offset());
9069 AnyRegister dst = ToAnyRegister(ins->output());
9071 FaultingCodeOffset fco;
9072 switch (type) {
9073 case MIRType::Int32:
9074 switch (wideningOp) {
9075 case MWideningOp::None:
9076 fco = masm.load32(addr, dst.gpr());
9077 EmitSignalNullCheckTrapSite(masm, ins, fco,
9078 wasm::TrapMachineInsn::Load32);
9079 break;
9080 case MWideningOp::FromU16:
9081 fco = masm.load16ZeroExtend(addr, dst.gpr());
9082 EmitSignalNullCheckTrapSite(masm, ins, fco,
9083 wasm::TrapMachineInsn::Load16);
9084 break;
9085 case MWideningOp::FromS16:
9086 fco = masm.load16SignExtend(addr, dst.gpr());
9087 EmitSignalNullCheckTrapSite(masm, ins, fco,
9088 wasm::TrapMachineInsn::Load16);
9089 break;
9090 case MWideningOp::FromU8:
9091 fco = masm.load8ZeroExtend(addr, dst.gpr());
9092 EmitSignalNullCheckTrapSite(masm, ins, fco,
9093 wasm::TrapMachineInsn::Load8);
9094 break;
9095 case MWideningOp::FromS8:
9096 fco = masm.load8SignExtend(addr, dst.gpr());
9097 EmitSignalNullCheckTrapSite(masm, ins, fco,
9098 wasm::TrapMachineInsn::Load8);
9099 break;
9100 default:
9101 MOZ_CRASH("unexpected widening op in ::visitWasmLoadSlot");
9103 break;
9104 case MIRType::Float32:
9105 MOZ_ASSERT(wideningOp == MWideningOp::None);
9106 fco = masm.loadFloat32(addr, dst.fpu());
9107 EmitSignalNullCheckTrapSite(masm, ins, fco,
9108 wasm::TrapMachineInsn::Load32);
9109 break;
9110 case MIRType::Double:
9111 MOZ_ASSERT(wideningOp == MWideningOp::None);
9112 fco = masm.loadDouble(addr, dst.fpu());
9113 EmitSignalNullCheckTrapSite(masm, ins, fco,
9114 wasm::TrapMachineInsn::Load64);
9115 break;
9116 case MIRType::Pointer:
9117 case MIRType::WasmAnyRef:
9118 MOZ_ASSERT(wideningOp == MWideningOp::None);
9119 fco = masm.loadPtr(addr, dst.gpr());
9120 EmitSignalNullCheckTrapSite(masm, ins, fco,
9121 wasm::TrapMachineInsnForLoadWord());
9122 break;
9123 #ifdef ENABLE_WASM_SIMD
9124 case MIRType::Simd128:
9125 MOZ_ASSERT(wideningOp == MWideningOp::None);
9126 fco = masm.loadUnalignedSimd128(addr, dst.fpu());
9127 EmitSignalNullCheckTrapSite(masm, ins, fco,
9128 wasm::TrapMachineInsn::Load128);
9129 break;
9130 #endif
9131 default:
9132 MOZ_CRASH("unexpected type in ::visitWasmLoadSlot");
9136 void CodeGenerator::visitWasmStoreSlot(LWasmStoreSlot* ins) {
9137 MIRType type = ins->type();
9138 MNarrowingOp narrowingOp = ins->narrowingOp();
9139 Register container = ToRegister(ins->containerRef());
9140 Address addr(container, ins->offset());
9141 AnyRegister src = ToAnyRegister(ins->value());
9142 if (type != MIRType::Int32) {
9143 MOZ_RELEASE_ASSERT(narrowingOp == MNarrowingOp::None);
9146 FaultingCodeOffset fco;
9147 switch (type) {
9148 case MIRType::Int32:
9149 switch (narrowingOp) {
9150 case MNarrowingOp::None:
9151 fco = masm.store32(src.gpr(), addr);
9152 EmitSignalNullCheckTrapSite(masm, ins, fco,
9153 wasm::TrapMachineInsn::Store32);
9154 break;
9155 case MNarrowingOp::To16:
9156 fco = masm.store16(src.gpr(), addr);
9157 EmitSignalNullCheckTrapSite(masm, ins, fco,
9158 wasm::TrapMachineInsn::Store16);
9159 break;
9160 case MNarrowingOp::To8:
9161 fco = masm.store8(src.gpr(), addr);
9162 EmitSignalNullCheckTrapSite(masm, ins, fco,
9163 wasm::TrapMachineInsn::Store8);
9164 break;
9165 default:
9166 MOZ_CRASH();
9168 break;
9169 case MIRType::Float32:
9170 fco = masm.storeFloat32(src.fpu(), addr);
9171 EmitSignalNullCheckTrapSite(masm, ins, fco,
9172 wasm::TrapMachineInsn::Store32);
9173 break;
9174 case MIRType::Double:
9175 fco = masm.storeDouble(src.fpu(), addr);
9176 EmitSignalNullCheckTrapSite(masm, ins, fco,
9177 wasm::TrapMachineInsn::Store64);
9178 break;
9179 case MIRType::Pointer:
9180 // This could be correct, but it would be a new usage, so check carefully.
9181 MOZ_CRASH("Unexpected type in visitWasmStoreSlot.");
9182 case MIRType::WasmAnyRef:
9183 MOZ_CRASH("Bad type in visitWasmStoreSlot. Use LWasmStoreRef.");
9184 #ifdef ENABLE_WASM_SIMD
9185 case MIRType::Simd128:
9186 fco = masm.storeUnalignedSimd128(src.fpu(), addr);
9187 EmitSignalNullCheckTrapSite(masm, ins, fco,
9188 wasm::TrapMachineInsn::Store128);
9189 break;
9190 #endif
9191 default:
9192 MOZ_CRASH("unexpected type in StorePrimitiveValue");
9196 void CodeGenerator::visitWasmLoadTableElement(LWasmLoadTableElement* ins) {
9197 Register elements = ToRegister(ins->elements());
9198 Register index = ToRegister(ins->index());
9199 Register output = ToRegister(ins->output());
9200 masm.loadPtr(BaseIndex(elements, index, ScalePointer), output);
9203 void CodeGenerator::visitWasmDerivedPointer(LWasmDerivedPointer* ins) {
9204 masm.movePtr(ToRegister(ins->base()), ToRegister(ins->output()));
9205 masm.addPtr(Imm32(int32_t(ins->offset())), ToRegister(ins->output()));
9208 void CodeGenerator::visitWasmDerivedIndexPointer(
9209 LWasmDerivedIndexPointer* ins) {
9210 Register base = ToRegister(ins->base());
9211 Register index = ToRegister(ins->index());
9212 Register output = ToRegister(ins->output());
9213 masm.computeEffectiveAddress(BaseIndex(base, index, ins->scale()), output);
9216 void CodeGenerator::visitWasmStoreRef(LWasmStoreRef* ins) {
9217 Register instance = ToRegister(ins->instance());
9218 Register valueBase = ToRegister(ins->valueBase());
9219 size_t offset = ins->offset();
9220 Register value = ToRegister(ins->value());
9221 Register temp = ToRegister(ins->temp0());
9223 if (ins->preBarrierKind() == WasmPreBarrierKind::Normal) {
9224 Label skipPreBarrier;
9225 wasm::EmitWasmPreBarrierGuard(
9226 masm, instance, temp, valueBase, offset, &skipPreBarrier,
9227 ins->maybeTrap() ? &ins->maybeTrap()->offset : nullptr);
9228 wasm::EmitWasmPreBarrierCall(masm, instance, temp, valueBase, offset);
9229 masm.bind(&skipPreBarrier);
9232 FaultingCodeOffset fco = masm.storePtr(value, Address(valueBase, offset));
9233 EmitSignalNullCheckTrapSite(masm, ins, fco,
9234 wasm::TrapMachineInsnForStoreWord());
9235 // The postbarrier is handled separately.
9238 // Out-of-line path to update the store buffer for wasm references.
9239 class OutOfLineWasmCallPostWriteBarrier
9240 : public OutOfLineCodeBase<CodeGenerator> {
9241 LInstruction* lir_;
9242 Register valueBase_;
9243 Register temp_;
9244 uint32_t valueOffset_;
9246 public:
9247 OutOfLineWasmCallPostWriteBarrier(LInstruction* lir, Register valueBase,
9248 Register temp, uint32_t valueOffset)
9249 : lir_(lir),
9250 valueBase_(valueBase),
9251 temp_(temp),
9252 valueOffset_(valueOffset) {}
9254 void accept(CodeGenerator* codegen) override {
9255 codegen->visitOutOfLineWasmCallPostWriteBarrier(this);
9258 LInstruction* lir() const { return lir_; }
9259 Register valueBase() const { return valueBase_; }
9260 Register temp() const { return temp_; }
9261 uint32_t valueOffset() const { return valueOffset_; }
9264 void CodeGenerator::visitOutOfLineWasmCallPostWriteBarrier(
9265 OutOfLineWasmCallPostWriteBarrier* ool) {
9266 saveLiveVolatile(ool->lir());
9267 masm.Push(InstanceReg);
9268 int32_t framePushedAfterInstance = masm.framePushed();
9270 // Fold the value offset into the value base
9271 Register valueAddr = ool->valueBase();
9272 Register temp = ool->temp();
9273 masm.computeEffectiveAddress(Address(valueAddr, ool->valueOffset()), temp);
9275 // Call Instance::postBarrier
9276 masm.setupWasmABICall();
9277 masm.passABIArg(InstanceReg);
9278 masm.passABIArg(temp);
9279 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
9280 masm.callWithABI(wasm::BytecodeOffset(0), wasm::SymbolicAddress::PostBarrier,
9281 mozilla::Some(instanceOffset), ABIType::General);
9283 masm.Pop(InstanceReg);
9284 restoreLiveVolatile(ool->lir());
9286 masm.jump(ool->rejoin());
9289 void CodeGenerator::visitWasmPostWriteBarrier(LWasmPostWriteBarrier* lir) {
9290 Register object = ToRegister(lir->object());
9291 Register value = ToRegister(lir->value());
9292 Register valueBase = ToRegister(lir->valueBase());
9293 Register temp = ToRegister(lir->temp0());
9294 MOZ_ASSERT(ToRegister(lir->instance()) == InstanceReg);
9295 auto ool = new (alloc()) OutOfLineWasmCallPostWriteBarrier(
9296 lir, valueBase, temp, lir->valueOffset());
9297 addOutOfLineCode(ool, lir->mir());
9299 wasm::EmitWasmPostBarrierGuard(masm, mozilla::Some(object), temp, value,
9300 ool->rejoin());
9301 masm.jump(ool->entry());
9302 masm.bind(ool->rejoin());
9305 void CodeGenerator::visitWasmLoadSlotI64(LWasmLoadSlotI64* ins) {
9306 Register container = ToRegister(ins->containerRef());
9307 Address addr(container, ins->offset());
9308 Register64 output = ToOutRegister64(ins);
9309 // Either 1 or 2 words. On a 32-bit target, it is hard to argue that one
9310 // transaction will always trap before the other, so it seems safest to
9311 // register both of them as potentially trapping.
9312 #ifdef JS_64BIT
9313 FaultingCodeOffset fco = masm.load64(addr, output);
9314 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Load64);
9315 #else
9316 FaultingCodeOffsetPair fcop = masm.load64(addr, output);
9317 EmitSignalNullCheckTrapSite(masm, ins, fcop.first,
9318 wasm::TrapMachineInsn::Load32);
9319 EmitSignalNullCheckTrapSite(masm, ins, fcop.second,
9320 wasm::TrapMachineInsn::Load32);
9321 #endif
9324 void CodeGenerator::visitWasmStoreSlotI64(LWasmStoreSlotI64* ins) {
9325 Register container = ToRegister(ins->containerRef());
9326 Address addr(container, ins->offset());
9327 Register64 value = ToRegister64(ins->value());
9328 // Either 1 or 2 words. As above we register both transactions in the
9329 // 2-word case.
9330 #ifdef JS_64BIT
9331 FaultingCodeOffset fco = masm.store64(value, addr);
9332 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Store64);
9333 #else
9334 FaultingCodeOffsetPair fcop = masm.store64(value, addr);
9335 EmitSignalNullCheckTrapSite(masm, ins, fcop.first,
9336 wasm::TrapMachineInsn::Store32);
9337 EmitSignalNullCheckTrapSite(masm, ins, fcop.second,
9338 wasm::TrapMachineInsn::Store32);
9339 #endif
9342 void CodeGenerator::visitArrayBufferByteLength(LArrayBufferByteLength* lir) {
9343 Register obj = ToRegister(lir->object());
9344 Register out = ToRegister(lir->output());
9345 masm.loadArrayBufferByteLengthIntPtr(obj, out);
9348 void CodeGenerator::visitArrayBufferViewLength(LArrayBufferViewLength* lir) {
9349 Register obj = ToRegister(lir->object());
9350 Register out = ToRegister(lir->output());
9351 masm.loadArrayBufferViewLengthIntPtr(obj, out);
9354 void CodeGenerator::visitArrayBufferViewByteOffset(
9355 LArrayBufferViewByteOffset* lir) {
9356 Register obj = ToRegister(lir->object());
9357 Register out = ToRegister(lir->output());
9358 masm.loadArrayBufferViewByteOffsetIntPtr(obj, out);
9361 void CodeGenerator::visitArrayBufferViewElements(
9362 LArrayBufferViewElements* lir) {
9363 Register obj = ToRegister(lir->object());
9364 Register out = ToRegister(lir->output());
9365 masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), out);
9368 void CodeGenerator::visitTypedArrayElementSize(LTypedArrayElementSize* lir) {
9369 Register obj = ToRegister(lir->object());
9370 Register out = ToRegister(lir->output());
9372 masm.typedArrayElementSize(obj, out);
9375 void CodeGenerator::visitGuardHasAttachedArrayBuffer(
9376 LGuardHasAttachedArrayBuffer* lir) {
9377 Register obj = ToRegister(lir->object());
9378 Register temp = ToRegister(lir->temp0());
9380 Label bail;
9381 masm.branchIfHasDetachedArrayBuffer(obj, temp, &bail);
9382 bailoutFrom(&bail, lir->snapshot());
9385 class OutOfLineGuardNumberToIntPtrIndex
9386 : public OutOfLineCodeBase<CodeGenerator> {
9387 LGuardNumberToIntPtrIndex* lir_;
9389 public:
9390 explicit OutOfLineGuardNumberToIntPtrIndex(LGuardNumberToIntPtrIndex* lir)
9391 : lir_(lir) {}
9393 void accept(CodeGenerator* codegen) override {
9394 codegen->visitOutOfLineGuardNumberToIntPtrIndex(this);
9396 LGuardNumberToIntPtrIndex* lir() const { return lir_; }
9399 void CodeGenerator::visitGuardNumberToIntPtrIndex(
9400 LGuardNumberToIntPtrIndex* lir) {
9401 FloatRegister input = ToFloatRegister(lir->input());
9402 Register output = ToRegister(lir->output());
9404 if (!lir->mir()->supportOOB()) {
9405 Label bail;
9406 masm.convertDoubleToPtr(input, output, &bail, false);
9407 bailoutFrom(&bail, lir->snapshot());
9408 return;
9411 auto* ool = new (alloc()) OutOfLineGuardNumberToIntPtrIndex(lir);
9412 addOutOfLineCode(ool, lir->mir());
9414 masm.convertDoubleToPtr(input, output, ool->entry(), false);
9415 masm.bind(ool->rejoin());
9418 void CodeGenerator::visitOutOfLineGuardNumberToIntPtrIndex(
9419 OutOfLineGuardNumberToIntPtrIndex* ool) {
9420 // Substitute the invalid index with an arbitrary out-of-bounds index.
9421 masm.movePtr(ImmWord(-1), ToRegister(ool->lir()->output()));
9422 masm.jump(ool->rejoin());
9425 void CodeGenerator::visitStringLength(LStringLength* lir) {
9426 Register input = ToRegister(lir->string());
9427 Register output = ToRegister(lir->output());
9429 masm.loadStringLength(input, output);
9432 void CodeGenerator::visitMinMaxI(LMinMaxI* ins) {
9433 Register first = ToRegister(ins->first());
9434 Register output = ToRegister(ins->output());
9436 MOZ_ASSERT(first == output);
9438 Assembler::Condition cond =
9439 ins->mir()->isMax() ? Assembler::GreaterThan : Assembler::LessThan;
9441 if (ins->second()->isConstant()) {
9442 Label done;
9443 masm.branch32(cond, first, Imm32(ToInt32(ins->second())), &done);
9444 masm.move32(Imm32(ToInt32(ins->second())), output);
9445 masm.bind(&done);
9446 } else {
9447 Register second = ToRegister(ins->second());
9448 masm.cmp32Move32(cond, second, first, second, output);
9452 void CodeGenerator::visitMinMaxArrayI(LMinMaxArrayI* ins) {
9453 Register array = ToRegister(ins->array());
9454 Register output = ToRegister(ins->output());
9455 Register temp1 = ToRegister(ins->temp1());
9456 Register temp2 = ToRegister(ins->temp2());
9457 Register temp3 = ToRegister(ins->temp3());
9458 bool isMax = ins->isMax();
9460 Label bail;
9461 masm.minMaxArrayInt32(array, output, temp1, temp2, temp3, isMax, &bail);
9462 bailoutFrom(&bail, ins->snapshot());
9465 void CodeGenerator::visitMinMaxArrayD(LMinMaxArrayD* ins) {
9466 Register array = ToRegister(ins->array());
9467 FloatRegister output = ToFloatRegister(ins->output());
9468 Register temp1 = ToRegister(ins->temp1());
9469 Register temp2 = ToRegister(ins->temp2());
9470 FloatRegister floatTemp = ToFloatRegister(ins->floatTemp());
9471 bool isMax = ins->isMax();
9473 Label bail;
9474 masm.minMaxArrayNumber(array, output, floatTemp, temp1, temp2, isMax, &bail);
9475 bailoutFrom(&bail, ins->snapshot());
9478 // For Abs*, lowering will have tied input to output on platforms where that is
9479 // sensible, and otherwise left them untied.
9481 void CodeGenerator::visitAbsI(LAbsI* ins) {
9482 Register input = ToRegister(ins->input());
9483 Register output = ToRegister(ins->output());
9485 if (ins->mir()->fallible()) {
9486 Label positive;
9487 if (input != output) {
9488 masm.move32(input, output);
9490 masm.branchTest32(Assembler::NotSigned, output, output, &positive);
9491 Label bail;
9492 masm.branchNeg32(Assembler::Overflow, output, &bail);
9493 bailoutFrom(&bail, ins->snapshot());
9494 masm.bind(&positive);
9495 } else {
9496 masm.abs32(input, output);
9500 void CodeGenerator::visitAbsD(LAbsD* ins) {
9501 masm.absDouble(ToFloatRegister(ins->input()), ToFloatRegister(ins->output()));
9504 void CodeGenerator::visitAbsF(LAbsF* ins) {
9505 masm.absFloat32(ToFloatRegister(ins->input()),
9506 ToFloatRegister(ins->output()));
9509 void CodeGenerator::visitPowII(LPowII* ins) {
9510 Register value = ToRegister(ins->value());
9511 Register power = ToRegister(ins->power());
9512 Register output = ToRegister(ins->output());
9513 Register temp0 = ToRegister(ins->temp0());
9514 Register temp1 = ToRegister(ins->temp1());
9516 Label bailout;
9517 masm.pow32(value, power, output, temp0, temp1, &bailout);
9518 bailoutFrom(&bailout, ins->snapshot());
9521 void CodeGenerator::visitPowI(LPowI* ins) {
9522 FloatRegister value = ToFloatRegister(ins->value());
9523 Register power = ToRegister(ins->power());
9525 using Fn = double (*)(double x, int32_t y);
9526 masm.setupAlignedABICall();
9527 masm.passABIArg(value, ABIType::Float64);
9528 masm.passABIArg(power);
9530 masm.callWithABI<Fn, js::powi>(ABIType::Float64);
9531 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
9534 void CodeGenerator::visitPowD(LPowD* ins) {
9535 FloatRegister value = ToFloatRegister(ins->value());
9536 FloatRegister power = ToFloatRegister(ins->power());
9538 using Fn = double (*)(double x, double y);
9539 masm.setupAlignedABICall();
9540 masm.passABIArg(value, ABIType::Float64);
9541 masm.passABIArg(power, ABIType::Float64);
9542 masm.callWithABI<Fn, ecmaPow>(ABIType::Float64);
9544 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
9547 void CodeGenerator::visitPowOfTwoI(LPowOfTwoI* ins) {
9548 Register power = ToRegister(ins->power());
9549 Register output = ToRegister(ins->output());
9551 uint32_t base = ins->base();
9552 MOZ_ASSERT(mozilla::IsPowerOfTwo(base));
9554 uint32_t n = mozilla::FloorLog2(base);
9555 MOZ_ASSERT(n != 0);
9557 // Hacker's Delight, 2nd edition, theorem D2.
9558 auto ceilingDiv = [](uint32_t x, uint32_t y) { return (x + y - 1) / y; };
9560 // Take bailout if |power| is greater-or-equals |log_y(2^31)| or is negative.
9561 // |2^(n*y) < 2^31| must hold, hence |n*y < 31| resp. |y < 31/n|.
9563 // Note: it's important for this condition to match the code in CacheIR.cpp
9564 // (CanAttachInt32Pow) to prevent failure loops.
9565 bailoutCmp32(Assembler::AboveOrEqual, power, Imm32(ceilingDiv(31, n)),
9566 ins->snapshot());
9568 // Compute (2^n)^y as 2^(n*y) using repeated shifts. We could directly scale
9569 // |power| and perform a single shift, but due to the lack of necessary
9570 // MacroAssembler functionality, like multiplying a register with an
9571 // immediate, we restrict the number of generated shift instructions when
9572 // lowering this operation.
9573 masm.move32(Imm32(1), output);
9574 do {
9575 masm.lshift32(power, output);
9576 n--;
9577 } while (n > 0);
9580 void CodeGenerator::visitSqrtD(LSqrtD* ins) {
9581 FloatRegister input = ToFloatRegister(ins->input());
9582 FloatRegister output = ToFloatRegister(ins->output());
9583 masm.sqrtDouble(input, output);
9586 void CodeGenerator::visitSqrtF(LSqrtF* ins) {
9587 FloatRegister input = ToFloatRegister(ins->input());
9588 FloatRegister output = ToFloatRegister(ins->output());
9589 masm.sqrtFloat32(input, output);
9592 void CodeGenerator::visitSignI(LSignI* ins) {
9593 Register input = ToRegister(ins->input());
9594 Register output = ToRegister(ins->output());
9595 masm.signInt32(input, output);
9598 void CodeGenerator::visitSignD(LSignD* ins) {
9599 FloatRegister input = ToFloatRegister(ins->input());
9600 FloatRegister output = ToFloatRegister(ins->output());
9601 masm.signDouble(input, output);
9604 void CodeGenerator::visitSignDI(LSignDI* ins) {
9605 FloatRegister input = ToFloatRegister(ins->input());
9606 FloatRegister temp = ToFloatRegister(ins->temp0());
9607 Register output = ToRegister(ins->output());
9609 Label bail;
9610 masm.signDoubleToInt32(input, output, temp, &bail);
9611 bailoutFrom(&bail, ins->snapshot());
9614 void CodeGenerator::visitMathFunctionD(LMathFunctionD* ins) {
9615 FloatRegister input = ToFloatRegister(ins->input());
9616 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
9618 UnaryMathFunction fun = ins->mir()->function();
9619 UnaryMathFunctionType funPtr = GetUnaryMathFunctionPtr(fun);
9621 masm.setupAlignedABICall();
9623 masm.passABIArg(input, ABIType::Float64);
9624 masm.callWithABI(DynamicFunction<UnaryMathFunctionType>(funPtr),
9625 ABIType::Float64);
9628 void CodeGenerator::visitMathFunctionF(LMathFunctionF* ins) {
9629 FloatRegister input = ToFloatRegister(ins->input());
9630 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnFloat32Reg);
9632 masm.setupAlignedABICall();
9633 masm.passABIArg(input, ABIType::Float32);
9635 using Fn = float (*)(float x);
9636 Fn funptr = nullptr;
9637 CheckUnsafeCallWithABI check = CheckUnsafeCallWithABI::Check;
9638 switch (ins->mir()->function()) {
9639 case UnaryMathFunction::Floor:
9640 funptr = floorf;
9641 check = CheckUnsafeCallWithABI::DontCheckOther;
9642 break;
9643 case UnaryMathFunction::Round:
9644 funptr = math_roundf_impl;
9645 break;
9646 case UnaryMathFunction::Trunc:
9647 funptr = math_truncf_impl;
9648 break;
9649 case UnaryMathFunction::Ceil:
9650 funptr = ceilf;
9651 check = CheckUnsafeCallWithABI::DontCheckOther;
9652 break;
9653 default:
9654 MOZ_CRASH("Unknown or unsupported float32 math function");
9657 masm.callWithABI(DynamicFunction<Fn>(funptr), ABIType::Float32, check);
9660 void CodeGenerator::visitModD(LModD* ins) {
9661 MOZ_ASSERT(!gen->compilingWasm());
9663 FloatRegister lhs = ToFloatRegister(ins->lhs());
9664 FloatRegister rhs = ToFloatRegister(ins->rhs());
9666 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
9668 using Fn = double (*)(double a, double b);
9669 masm.setupAlignedABICall();
9670 masm.passABIArg(lhs, ABIType::Float64);
9671 masm.passABIArg(rhs, ABIType::Float64);
9672 masm.callWithABI<Fn, NumberMod>(ABIType::Float64);
9675 void CodeGenerator::visitModPowTwoD(LModPowTwoD* ins) {
9676 FloatRegister lhs = ToFloatRegister(ins->lhs());
9677 uint32_t divisor = ins->divisor();
9678 MOZ_ASSERT(mozilla::IsPowerOfTwo(divisor));
9680 FloatRegister output = ToFloatRegister(ins->output());
9682 // Compute |n % d| using |copysign(n - (d * trunc(n / d)), n)|.
9684 // This doesn't work if |d| isn't a power of two, because we may lose too much
9685 // precision. For example |Number.MAX_VALUE % 3 == 2|, but
9686 // |3 * trunc(Number.MAX_VALUE / 3) == Infinity|.
9688 Label done;
9690 ScratchDoubleScope scratch(masm);
9692 // Subnormals can lead to performance degradation, which can make calling
9693 // |fmod| faster than this inline implementation. Work around this issue by
9694 // directly returning the input for any value in the interval ]-1, +1[.
9695 Label notSubnormal;
9696 masm.loadConstantDouble(1.0, scratch);
9697 masm.loadConstantDouble(-1.0, output);
9698 masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, lhs, scratch,
9699 &notSubnormal);
9700 masm.branchDouble(Assembler::DoubleLessThanOrEqual, lhs, output,
9701 &notSubnormal);
9703 masm.moveDouble(lhs, output);
9704 masm.jump(&done);
9706 masm.bind(&notSubnormal);
9708 if (divisor == 1) {
9709 // The pattern |n % 1 == 0| is used to detect integer numbers. We can skip
9710 // the multiplication by one in this case.
9711 masm.moveDouble(lhs, output);
9712 masm.nearbyIntDouble(RoundingMode::TowardsZero, output, scratch);
9713 masm.subDouble(scratch, output);
9714 } else {
9715 masm.loadConstantDouble(1.0 / double(divisor), scratch);
9716 masm.loadConstantDouble(double(divisor), output);
9718 masm.mulDouble(lhs, scratch);
9719 masm.nearbyIntDouble(RoundingMode::TowardsZero, scratch, scratch);
9720 masm.mulDouble(output, scratch);
9722 masm.moveDouble(lhs, output);
9723 masm.subDouble(scratch, output);
9727 masm.copySignDouble(output, lhs, output);
9728 masm.bind(&done);
9731 void CodeGenerator::visitWasmBuiltinModD(LWasmBuiltinModD* ins) {
9732 masm.Push(InstanceReg);
9733 int32_t framePushedAfterInstance = masm.framePushed();
9735 FloatRegister lhs = ToFloatRegister(ins->lhs());
9736 FloatRegister rhs = ToFloatRegister(ins->rhs());
9738 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
9740 masm.setupWasmABICall();
9741 masm.passABIArg(lhs, ABIType::Float64);
9742 masm.passABIArg(rhs, ABIType::Float64);
9744 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
9745 masm.callWithABI(ins->mir()->bytecodeOffset(), wasm::SymbolicAddress::ModD,
9746 mozilla::Some(instanceOffset), ABIType::Float64);
9748 masm.Pop(InstanceReg);
9751 void CodeGenerator::visitBigIntAdd(LBigIntAdd* ins) {
9752 Register lhs = ToRegister(ins->lhs());
9753 Register rhs = ToRegister(ins->rhs());
9754 Register temp1 = ToRegister(ins->temp1());
9755 Register temp2 = ToRegister(ins->temp2());
9756 Register output = ToRegister(ins->output());
9758 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
9759 auto* ool = oolCallVM<Fn, BigInt::add>(ins, ArgList(lhs, rhs),
9760 StoreRegisterTo(output));
9762 // 0n + x == x
9763 Label lhsNonZero;
9764 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
9765 masm.movePtr(rhs, output);
9766 masm.jump(ool->rejoin());
9767 masm.bind(&lhsNonZero);
9769 // x + 0n == x
9770 Label rhsNonZero;
9771 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
9772 masm.movePtr(lhs, output);
9773 masm.jump(ool->rejoin());
9774 masm.bind(&rhsNonZero);
9776 // Call into the VM when either operand can't be loaded into a pointer-sized
9777 // register.
9778 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
9779 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
9781 masm.branchAddPtr(Assembler::Overflow, temp2, temp1, ool->entry());
9783 // Create and return the result.
9784 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
9785 masm.initializeBigInt(output, temp1);
9787 masm.bind(ool->rejoin());
9790 void CodeGenerator::visitBigIntSub(LBigIntSub* ins) {
9791 Register lhs = ToRegister(ins->lhs());
9792 Register rhs = ToRegister(ins->rhs());
9793 Register temp1 = ToRegister(ins->temp1());
9794 Register temp2 = ToRegister(ins->temp2());
9795 Register output = ToRegister(ins->output());
9797 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
9798 auto* ool = oolCallVM<Fn, BigInt::sub>(ins, ArgList(lhs, rhs),
9799 StoreRegisterTo(output));
9801 // x - 0n == x
9802 Label rhsNonZero;
9803 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
9804 masm.movePtr(lhs, output);
9805 masm.jump(ool->rejoin());
9806 masm.bind(&rhsNonZero);
9808 // Call into the VM when either operand can't be loaded into a pointer-sized
9809 // register.
9810 masm.loadBigInt(lhs, temp1, ool->entry());
9811 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
9813 masm.branchSubPtr(Assembler::Overflow, temp2, temp1, ool->entry());
9815 // Create and return the result.
9816 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
9817 masm.initializeBigInt(output, temp1);
9819 masm.bind(ool->rejoin());
9822 void CodeGenerator::visitBigIntMul(LBigIntMul* ins) {
9823 Register lhs = ToRegister(ins->lhs());
9824 Register rhs = ToRegister(ins->rhs());
9825 Register temp1 = ToRegister(ins->temp1());
9826 Register temp2 = ToRegister(ins->temp2());
9827 Register output = ToRegister(ins->output());
9829 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
9830 auto* ool = oolCallVM<Fn, BigInt::mul>(ins, ArgList(lhs, rhs),
9831 StoreRegisterTo(output));
9833 // 0n * x == 0n
9834 Label lhsNonZero;
9835 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
9836 masm.movePtr(lhs, output);
9837 masm.jump(ool->rejoin());
9838 masm.bind(&lhsNonZero);
9840 // x * 0n == 0n
9841 Label rhsNonZero;
9842 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
9843 masm.movePtr(rhs, output);
9844 masm.jump(ool->rejoin());
9845 masm.bind(&rhsNonZero);
9847 // Call into the VM when either operand can't be loaded into a pointer-sized
9848 // register.
9849 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
9850 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
9852 masm.branchMulPtr(Assembler::Overflow, temp2, temp1, ool->entry());
9854 // Create and return the result.
9855 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
9856 masm.initializeBigInt(output, temp1);
9858 masm.bind(ool->rejoin());
9861 void CodeGenerator::visitBigIntDiv(LBigIntDiv* ins) {
9862 Register lhs = ToRegister(ins->lhs());
9863 Register rhs = ToRegister(ins->rhs());
9864 Register temp1 = ToRegister(ins->temp1());
9865 Register temp2 = ToRegister(ins->temp2());
9866 Register output = ToRegister(ins->output());
9868 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
9869 auto* ool = oolCallVM<Fn, BigInt::div>(ins, ArgList(lhs, rhs),
9870 StoreRegisterTo(output));
9872 // x / 0 throws an error.
9873 if (ins->mir()->canBeDivideByZero()) {
9874 masm.branchIfBigIntIsZero(rhs, ool->entry());
9877 // 0n / x == 0n
9878 Label lhsNonZero;
9879 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
9880 masm.movePtr(lhs, output);
9881 masm.jump(ool->rejoin());
9882 masm.bind(&lhsNonZero);
9884 // Call into the VM when either operand can't be loaded into a pointer-sized
9885 // register.
9886 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
9887 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
9889 // |BigInt::div()| returns |lhs| for |lhs / 1n|, which means there's no
9890 // allocation which might trigger a minor GC to free up nursery space. This
9891 // requires us to apply the same optimization here, otherwise we'd end up with
9892 // always entering the OOL call, because the nursery is never evicted.
9893 Label notOne;
9894 masm.branchPtr(Assembler::NotEqual, temp2, ImmWord(1), &notOne);
9895 masm.movePtr(lhs, output);
9896 masm.jump(ool->rejoin());
9897 masm.bind(&notOne);
9899 static constexpr auto DigitMin = std::numeric_limits<
9900 mozilla::SignedStdintTypeForSize<sizeof(BigInt::Digit)>::Type>::min();
9902 // Handle an integer overflow from INT{32,64}_MIN / -1.
9903 Label notOverflow;
9904 masm.branchPtr(Assembler::NotEqual, temp1, ImmWord(DigitMin), &notOverflow);
9905 masm.branchPtr(Assembler::Equal, temp2, ImmWord(-1), ool->entry());
9906 masm.bind(&notOverflow);
9908 emitBigIntDiv(ins, temp1, temp2, output, ool->entry());
9910 masm.bind(ool->rejoin());
9913 void CodeGenerator::visitBigIntMod(LBigIntMod* ins) {
9914 Register lhs = ToRegister(ins->lhs());
9915 Register rhs = ToRegister(ins->rhs());
9916 Register temp1 = ToRegister(ins->temp1());
9917 Register temp2 = ToRegister(ins->temp2());
9918 Register output = ToRegister(ins->output());
9920 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
9921 auto* ool = oolCallVM<Fn, BigInt::mod>(ins, ArgList(lhs, rhs),
9922 StoreRegisterTo(output));
9924 // x % 0 throws an error.
9925 if (ins->mir()->canBeDivideByZero()) {
9926 masm.branchIfBigIntIsZero(rhs, ool->entry());
9929 // 0n % x == 0n
9930 Label lhsNonZero;
9931 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
9932 masm.movePtr(lhs, output);
9933 masm.jump(ool->rejoin());
9934 masm.bind(&lhsNonZero);
9936 // Call into the VM when either operand can't be loaded into a pointer-sized
9937 // register.
9938 masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
9939 masm.loadBigIntAbsolute(rhs, temp2, ool->entry());
9941 // Similar to the case for BigInt division, we must apply the same allocation
9942 // optimizations as performed in |BigInt::mod()|.
9943 Label notBelow;
9944 masm.branchPtr(Assembler::AboveOrEqual, temp1, temp2, &notBelow);
9945 masm.movePtr(lhs, output);
9946 masm.jump(ool->rejoin());
9947 masm.bind(&notBelow);
9949 // Convert both digits to signed pointer-sized values.
9950 masm.bigIntDigitToSignedPtr(lhs, temp1, ool->entry());
9951 masm.bigIntDigitToSignedPtr(rhs, temp2, ool->entry());
9953 static constexpr auto DigitMin = std::numeric_limits<
9954 mozilla::SignedStdintTypeForSize<sizeof(BigInt::Digit)>::Type>::min();
9956 // Handle an integer overflow from INT{32,64}_MIN / -1.
9957 Label notOverflow;
9958 masm.branchPtr(Assembler::NotEqual, temp1, ImmWord(DigitMin), &notOverflow);
9959 masm.branchPtr(Assembler::NotEqual, temp2, ImmWord(-1), &notOverflow);
9960 masm.movePtr(ImmWord(0), temp1);
9961 masm.bind(&notOverflow);
9963 emitBigIntMod(ins, temp1, temp2, output, ool->entry());
9965 masm.bind(ool->rejoin());
9968 void CodeGenerator::visitBigIntPow(LBigIntPow* ins) {
9969 Register lhs = ToRegister(ins->lhs());
9970 Register rhs = ToRegister(ins->rhs());
9971 Register temp1 = ToRegister(ins->temp1());
9972 Register temp2 = ToRegister(ins->temp2());
9973 Register output = ToRegister(ins->output());
9975 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
9976 auto* ool = oolCallVM<Fn, BigInt::pow>(ins, ArgList(lhs, rhs),
9977 StoreRegisterTo(output));
9979 // x ** -y throws an error.
9980 if (ins->mir()->canBeNegativeExponent()) {
9981 masm.branchIfBigIntIsNegative(rhs, ool->entry());
9984 Register dest = temp1;
9985 Register base = temp2;
9986 Register exponent = output;
9988 Label done;
9989 masm.movePtr(ImmWord(1), dest); // p = 1
9991 // 1n ** y == 1n
9992 // -1n ** y == 1n when y is even
9993 // -1n ** y == -1n when y is odd
9994 Label lhsNotOne;
9995 masm.branch32(Assembler::Above, Address(lhs, BigInt::offsetOfLength()),
9996 Imm32(1), &lhsNotOne);
9997 masm.loadFirstBigIntDigitOrZero(lhs, base);
9998 masm.branchPtr(Assembler::NotEqual, base, Imm32(1), &lhsNotOne);
10000 masm.loadFirstBigIntDigitOrZero(rhs, exponent);
10002 Label lhsNonNegative;
10003 masm.branchIfBigIntIsNonNegative(lhs, &lhsNonNegative);
10004 masm.branchTestPtr(Assembler::Zero, exponent, Imm32(1), &done);
10005 masm.bind(&lhsNonNegative);
10006 masm.movePtr(lhs, output);
10007 masm.jump(ool->rejoin());
10009 masm.bind(&lhsNotOne);
10011 // x ** 0n == 1n
10012 masm.branchIfBigIntIsZero(rhs, &done);
10014 // 0n ** y == 0n with y != 0n
10015 Label lhsNonZero;
10016 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10018 masm.movePtr(lhs, output);
10019 masm.jump(ool->rejoin());
10021 masm.bind(&lhsNonZero);
10023 // Call into the VM when the exponent can't be loaded into a pointer-sized
10024 // register.
10025 masm.loadBigIntAbsolute(rhs, exponent, ool->entry());
10027 // x ** y with x > 1 and y >= DigitBits can't be pointer-sized.
10028 masm.branchPtr(Assembler::AboveOrEqual, exponent, Imm32(BigInt::DigitBits),
10029 ool->entry());
10031 // x ** 1n == x
10032 Label rhsNotOne;
10033 masm.branch32(Assembler::NotEqual, exponent, Imm32(1), &rhsNotOne);
10035 masm.movePtr(lhs, output);
10036 masm.jump(ool->rejoin());
10038 masm.bind(&rhsNotOne);
10040 // Call into the VM when the base operand can't be loaded into a pointer-sized
10041 // register.
10042 masm.loadBigIntNonZero(lhs, base, ool->entry());
10044 // MacroAssembler::pow32() adjusted to work on pointer-sized registers.
10046 // m = base
10047 // n = exponent
10049 Label start, loop;
10050 masm.jump(&start);
10051 masm.bind(&loop);
10053 // m *= m
10054 masm.branchMulPtr(Assembler::Overflow, base, base, ool->entry());
10056 masm.bind(&start);
10058 // if ((n & 1) != 0) p *= m
10059 Label even;
10060 masm.branchTest32(Assembler::Zero, exponent, Imm32(1), &even);
10061 masm.branchMulPtr(Assembler::Overflow, base, dest, ool->entry());
10062 masm.bind(&even);
10064 // n >>= 1
10065 // if (n == 0) return p
10066 masm.branchRshift32(Assembler::NonZero, Imm32(1), exponent, &loop);
10069 MOZ_ASSERT(temp1 == dest);
10071 // Create and return the result.
10072 masm.bind(&done);
10073 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10074 masm.initializeBigInt(output, temp1);
10076 masm.bind(ool->rejoin());
10079 void CodeGenerator::visitBigIntBitAnd(LBigIntBitAnd* ins) {
10080 Register lhs = ToRegister(ins->lhs());
10081 Register rhs = ToRegister(ins->rhs());
10082 Register temp1 = ToRegister(ins->temp1());
10083 Register temp2 = ToRegister(ins->temp2());
10084 Register output = ToRegister(ins->output());
10086 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10087 auto* ool = oolCallVM<Fn, BigInt::bitAnd>(ins, ArgList(lhs, rhs),
10088 StoreRegisterTo(output));
10090 // 0n & x == 0n
10091 Label lhsNonZero;
10092 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10093 masm.movePtr(lhs, output);
10094 masm.jump(ool->rejoin());
10095 masm.bind(&lhsNonZero);
10097 // x & 0n == 0n
10098 Label rhsNonZero;
10099 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10100 masm.movePtr(rhs, output);
10101 masm.jump(ool->rejoin());
10102 masm.bind(&rhsNonZero);
10104 // Call into the VM when either operand can't be loaded into a pointer-sized
10105 // register.
10106 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10107 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10109 masm.andPtr(temp2, temp1);
10111 // Create and return the result.
10112 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10113 masm.initializeBigInt(output, temp1);
10115 masm.bind(ool->rejoin());
10118 void CodeGenerator::visitBigIntBitOr(LBigIntBitOr* ins) {
10119 Register lhs = ToRegister(ins->lhs());
10120 Register rhs = ToRegister(ins->rhs());
10121 Register temp1 = ToRegister(ins->temp1());
10122 Register temp2 = ToRegister(ins->temp2());
10123 Register output = ToRegister(ins->output());
10125 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10126 auto* ool = oolCallVM<Fn, BigInt::bitOr>(ins, ArgList(lhs, rhs),
10127 StoreRegisterTo(output));
10129 // 0n | x == x
10130 Label lhsNonZero;
10131 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10132 masm.movePtr(rhs, output);
10133 masm.jump(ool->rejoin());
10134 masm.bind(&lhsNonZero);
10136 // x | 0n == x
10137 Label rhsNonZero;
10138 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10139 masm.movePtr(lhs, output);
10140 masm.jump(ool->rejoin());
10141 masm.bind(&rhsNonZero);
10143 // Call into the VM when either operand can't be loaded into a pointer-sized
10144 // register.
10145 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10146 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10148 masm.orPtr(temp2, temp1);
10150 // Create and return the result.
10151 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10152 masm.initializeBigInt(output, temp1);
10154 masm.bind(ool->rejoin());
10157 void CodeGenerator::visitBigIntBitXor(LBigIntBitXor* ins) {
10158 Register lhs = ToRegister(ins->lhs());
10159 Register rhs = ToRegister(ins->rhs());
10160 Register temp1 = ToRegister(ins->temp1());
10161 Register temp2 = ToRegister(ins->temp2());
10162 Register output = ToRegister(ins->output());
10164 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10165 auto* ool = oolCallVM<Fn, BigInt::bitXor>(ins, ArgList(lhs, rhs),
10166 StoreRegisterTo(output));
10168 // 0n ^ x == x
10169 Label lhsNonZero;
10170 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10171 masm.movePtr(rhs, output);
10172 masm.jump(ool->rejoin());
10173 masm.bind(&lhsNonZero);
10175 // x ^ 0n == x
10176 Label rhsNonZero;
10177 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10178 masm.movePtr(lhs, output);
10179 masm.jump(ool->rejoin());
10180 masm.bind(&rhsNonZero);
10182 // Call into the VM when either operand can't be loaded into a pointer-sized
10183 // register.
10184 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10185 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10187 masm.xorPtr(temp2, temp1);
10189 // Create and return the result.
10190 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10191 masm.initializeBigInt(output, temp1);
10193 masm.bind(ool->rejoin());
10196 void CodeGenerator::visitBigIntLsh(LBigIntLsh* ins) {
10197 Register lhs = ToRegister(ins->lhs());
10198 Register rhs = ToRegister(ins->rhs());
10199 Register temp1 = ToRegister(ins->temp1());
10200 Register temp2 = ToRegister(ins->temp2());
10201 Register temp3 = ToRegister(ins->temp3());
10202 Register output = ToRegister(ins->output());
10204 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10205 auto* ool = oolCallVM<Fn, BigInt::lsh>(ins, ArgList(lhs, rhs),
10206 StoreRegisterTo(output));
10208 // 0n << x == 0n
10209 Label lhsNonZero;
10210 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10211 masm.movePtr(lhs, output);
10212 masm.jump(ool->rejoin());
10213 masm.bind(&lhsNonZero);
10215 // x << 0n == x
10216 Label rhsNonZero;
10217 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10218 masm.movePtr(lhs, output);
10219 masm.jump(ool->rejoin());
10220 masm.bind(&rhsNonZero);
10222 // Inline |BigInt::lsh| for the case when |lhs| contains a single digit.
10224 Label rhsTooLarge;
10225 masm.loadBigIntAbsolute(rhs, temp2, &rhsTooLarge);
10227 // Call into the VM when the left-hand side operand can't be loaded into a
10228 // pointer-sized register.
10229 masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
10231 // Handle shifts exceeding |BigInt::DigitBits| first.
10232 Label shift, create;
10233 masm.branchPtr(Assembler::Below, temp2, Imm32(BigInt::DigitBits), &shift);
10235 masm.bind(&rhsTooLarge);
10237 // x << DigitBits with x != 0n always exceeds pointer-sized storage.
10238 masm.branchIfBigIntIsNonNegative(rhs, ool->entry());
10240 // x << -DigitBits == x >> DigitBits, which is either 0n or -1n.
10241 masm.move32(Imm32(0), temp1);
10242 masm.branchIfBigIntIsNonNegative(lhs, &create);
10243 masm.move32(Imm32(1), temp1);
10244 masm.jump(&create);
10246 masm.bind(&shift);
10248 Label nonNegative;
10249 masm.branchIfBigIntIsNonNegative(rhs, &nonNegative);
10251 masm.movePtr(temp1, temp3);
10253 // |x << -y| is computed as |x >> y|.
10254 masm.rshiftPtr(temp2, temp1);
10256 // For negative numbers, round down if any bit was shifted out.
10257 masm.branchIfBigIntIsNonNegative(lhs, &create);
10259 // Compute |mask = (static_cast<Digit>(1) << shift) - 1|.
10260 masm.movePtr(ImmWord(-1), output);
10261 masm.lshiftPtr(temp2, output);
10262 masm.notPtr(output);
10264 // Add plus one when |(lhs.digit(0) & mask) != 0|.
10265 masm.branchTestPtr(Assembler::Zero, output, temp3, &create);
10266 masm.addPtr(ImmWord(1), temp1);
10267 masm.jump(&create);
10269 masm.bind(&nonNegative);
10271 masm.movePtr(temp2, temp3);
10273 // Compute |grow = lhs.digit(0) >> (DigitBits - shift)|.
10274 masm.negPtr(temp2);
10275 masm.addPtr(Imm32(BigInt::DigitBits), temp2);
10276 masm.movePtr(temp1, output);
10277 masm.rshiftPtr(temp2, output);
10279 // Call into the VM when any bit will be shifted out.
10280 masm.branchTestPtr(Assembler::NonZero, output, output, ool->entry());
10282 masm.movePtr(temp3, temp2);
10283 masm.lshiftPtr(temp2, temp1);
10285 masm.bind(&create);
10287 // Create and return the result.
10288 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10289 masm.initializeBigIntAbsolute(output, temp1);
10291 // Set the sign bit when the left-hand side is negative.
10292 masm.branchIfBigIntIsNonNegative(lhs, ool->rejoin());
10293 masm.or32(Imm32(BigInt::signBitMask()),
10294 Address(output, BigInt::offsetOfFlags()));
10296 masm.bind(ool->rejoin());
10299 void CodeGenerator::visitBigIntRsh(LBigIntRsh* ins) {
10300 Register lhs = ToRegister(ins->lhs());
10301 Register rhs = ToRegister(ins->rhs());
10302 Register temp1 = ToRegister(ins->temp1());
10303 Register temp2 = ToRegister(ins->temp2());
10304 Register temp3 = ToRegister(ins->temp3());
10305 Register output = ToRegister(ins->output());
10307 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10308 auto* ool = oolCallVM<Fn, BigInt::rsh>(ins, ArgList(lhs, rhs),
10309 StoreRegisterTo(output));
10311 // 0n >> x == 0n
10312 Label lhsNonZero;
10313 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10314 masm.movePtr(lhs, output);
10315 masm.jump(ool->rejoin());
10316 masm.bind(&lhsNonZero);
10318 // x >> 0n == x
10319 Label rhsNonZero;
10320 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10321 masm.movePtr(lhs, output);
10322 masm.jump(ool->rejoin());
10323 masm.bind(&rhsNonZero);
10325 // Inline |BigInt::rsh| for the case when |lhs| contains a single digit.
10327 Label rhsTooLarge;
10328 masm.loadBigIntAbsolute(rhs, temp2, &rhsTooLarge);
10330 // Call into the VM when the left-hand side operand can't be loaded into a
10331 // pointer-sized register.
10332 masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
10334 // Handle shifts exceeding |BigInt::DigitBits| first.
10335 Label shift, create;
10336 masm.branchPtr(Assembler::Below, temp2, Imm32(BigInt::DigitBits), &shift);
10338 masm.bind(&rhsTooLarge);
10340 // x >> -DigitBits == x << DigitBits, which exceeds pointer-sized storage.
10341 masm.branchIfBigIntIsNegative(rhs, ool->entry());
10343 // x >> DigitBits is either 0n or -1n.
10344 masm.move32(Imm32(0), temp1);
10345 masm.branchIfBigIntIsNonNegative(lhs, &create);
10346 masm.move32(Imm32(1), temp1);
10347 masm.jump(&create);
10349 masm.bind(&shift);
10351 Label nonNegative;
10352 masm.branchIfBigIntIsNonNegative(rhs, &nonNegative);
10354 masm.movePtr(temp2, temp3);
10356 // Compute |grow = lhs.digit(0) >> (DigitBits - shift)|.
10357 masm.negPtr(temp2);
10358 masm.addPtr(Imm32(BigInt::DigitBits), temp2);
10359 masm.movePtr(temp1, output);
10360 masm.rshiftPtr(temp2, output);
10362 // Call into the VM when any bit will be shifted out.
10363 masm.branchTestPtr(Assembler::NonZero, output, output, ool->entry());
10365 // |x >> -y| is computed as |x << y|.
10366 masm.movePtr(temp3, temp2);
10367 masm.lshiftPtr(temp2, temp1);
10368 masm.jump(&create);
10370 masm.bind(&nonNegative);
10372 masm.movePtr(temp1, temp3);
10374 masm.rshiftPtr(temp2, temp1);
10376 // For negative numbers, round down if any bit was shifted out.
10377 masm.branchIfBigIntIsNonNegative(lhs, &create);
10379 // Compute |mask = (static_cast<Digit>(1) << shift) - 1|.
10380 masm.movePtr(ImmWord(-1), output);
10381 masm.lshiftPtr(temp2, output);
10382 masm.notPtr(output);
10384 // Add plus one when |(lhs.digit(0) & mask) != 0|.
10385 masm.branchTestPtr(Assembler::Zero, output, temp3, &create);
10386 masm.addPtr(ImmWord(1), temp1);
10388 masm.bind(&create);
10390 // Create and return the result.
10391 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10392 masm.initializeBigIntAbsolute(output, temp1);
10394 // Set the sign bit when the left-hand side is negative.
10395 masm.branchIfBigIntIsNonNegative(lhs, ool->rejoin());
10396 masm.or32(Imm32(BigInt::signBitMask()),
10397 Address(output, BigInt::offsetOfFlags()));
10399 masm.bind(ool->rejoin());
10402 void CodeGenerator::visitBigIntIncrement(LBigIntIncrement* ins) {
10403 Register input = ToRegister(ins->input());
10404 Register temp1 = ToRegister(ins->temp1());
10405 Register temp2 = ToRegister(ins->temp2());
10406 Register output = ToRegister(ins->output());
10408 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
10409 auto* ool =
10410 oolCallVM<Fn, BigInt::inc>(ins, ArgList(input), StoreRegisterTo(output));
10412 // Call into the VM when the input can't be loaded into a pointer-sized
10413 // register.
10414 masm.loadBigInt(input, temp1, ool->entry());
10415 masm.movePtr(ImmWord(1), temp2);
10417 masm.branchAddPtr(Assembler::Overflow, temp2, temp1, ool->entry());
10419 // Create and return the result.
10420 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10421 masm.initializeBigInt(output, temp1);
10423 masm.bind(ool->rejoin());
10426 void CodeGenerator::visitBigIntDecrement(LBigIntDecrement* ins) {
10427 Register input = ToRegister(ins->input());
10428 Register temp1 = ToRegister(ins->temp1());
10429 Register temp2 = ToRegister(ins->temp2());
10430 Register output = ToRegister(ins->output());
10432 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
10433 auto* ool =
10434 oolCallVM<Fn, BigInt::dec>(ins, ArgList(input), StoreRegisterTo(output));
10436 // Call into the VM when the input can't be loaded into a pointer-sized
10437 // register.
10438 masm.loadBigInt(input, temp1, ool->entry());
10439 masm.movePtr(ImmWord(1), temp2);
10441 masm.branchSubPtr(Assembler::Overflow, temp2, temp1, ool->entry());
10443 // Create and return the result.
10444 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10445 masm.initializeBigInt(output, temp1);
10447 masm.bind(ool->rejoin());
10450 void CodeGenerator::visitBigIntNegate(LBigIntNegate* ins) {
10451 Register input = ToRegister(ins->input());
10452 Register temp = ToRegister(ins->temp());
10453 Register output = ToRegister(ins->output());
10455 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
10456 auto* ool =
10457 oolCallVM<Fn, BigInt::neg>(ins, ArgList(input), StoreRegisterTo(output));
10459 // -0n == 0n
10460 Label lhsNonZero;
10461 masm.branchIfBigIntIsNonZero(input, &lhsNonZero);
10462 masm.movePtr(input, output);
10463 masm.jump(ool->rejoin());
10464 masm.bind(&lhsNonZero);
10466 // Call into the VM when the input uses heap digits.
10467 masm.copyBigIntWithInlineDigits(input, output, temp, initialBigIntHeap(),
10468 ool->entry());
10470 // Flip the sign bit.
10471 masm.xor32(Imm32(BigInt::signBitMask()),
10472 Address(output, BigInt::offsetOfFlags()));
10474 masm.bind(ool->rejoin());
10477 void CodeGenerator::visitBigIntBitNot(LBigIntBitNot* ins) {
10478 Register input = ToRegister(ins->input());
10479 Register temp1 = ToRegister(ins->temp1());
10480 Register temp2 = ToRegister(ins->temp2());
10481 Register output = ToRegister(ins->output());
10483 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
10484 auto* ool = oolCallVM<Fn, BigInt::bitNot>(ins, ArgList(input),
10485 StoreRegisterTo(output));
10487 masm.loadBigIntAbsolute(input, temp1, ool->entry());
10489 // This follows the C++ implementation because it let's us support the full
10490 // range [-2^64, 2^64 - 1] on 64-bit resp. [-2^32, 2^32 - 1] on 32-bit.
10491 Label nonNegative, done;
10492 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
10494 // ~(-x) == ~(~(x-1)) == x-1
10495 masm.subPtr(Imm32(1), temp1);
10496 masm.jump(&done);
10498 masm.bind(&nonNegative);
10500 // ~x == -x-1 == -(x+1)
10501 masm.movePtr(ImmWord(1), temp2);
10502 masm.branchAddPtr(Assembler::CarrySet, temp2, temp1, ool->entry());
10504 masm.bind(&done);
10506 // Create and return the result.
10507 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10508 masm.initializeBigIntAbsolute(output, temp1);
10510 // Set the sign bit when the input is positive.
10511 masm.branchIfBigIntIsNegative(input, ool->rejoin());
10512 masm.or32(Imm32(BigInt::signBitMask()),
10513 Address(output, BigInt::offsetOfFlags()));
10515 masm.bind(ool->rejoin());
10518 void CodeGenerator::visitInt32ToStringWithBase(LInt32ToStringWithBase* lir) {
10519 Register input = ToRegister(lir->input());
10520 RegisterOrInt32 base = ToRegisterOrInt32(lir->base());
10521 Register output = ToRegister(lir->output());
10522 Register temp0 = ToRegister(lir->temp0());
10523 Register temp1 = ToRegister(lir->temp1());
10525 bool lowerCase = lir->mir()->lowerCase();
10527 using Fn = JSString* (*)(JSContext*, int32_t, int32_t, bool);
10528 if (base.is<Register>()) {
10529 auto* ool = oolCallVM<Fn, js::Int32ToStringWithBase>(
10530 lir, ArgList(input, base.as<Register>(), Imm32(lowerCase)),
10531 StoreRegisterTo(output));
10533 LiveRegisterSet liveRegs = liveVolatileRegs(lir);
10534 masm.loadInt32ToStringWithBase(input, base.as<Register>(), output, temp0,
10535 temp1, gen->runtime->staticStrings(),
10536 liveRegs, lowerCase, ool->entry());
10537 masm.bind(ool->rejoin());
10538 } else {
10539 auto* ool = oolCallVM<Fn, js::Int32ToStringWithBase>(
10540 lir, ArgList(input, Imm32(base.as<int32_t>()), Imm32(lowerCase)),
10541 StoreRegisterTo(output));
10543 masm.loadInt32ToStringWithBase(input, base.as<int32_t>(), output, temp0,
10544 temp1, gen->runtime->staticStrings(),
10545 lowerCase, ool->entry());
10546 masm.bind(ool->rejoin());
10550 void CodeGenerator::visitNumberParseInt(LNumberParseInt* lir) {
10551 Register string = ToRegister(lir->string());
10552 Register radix = ToRegister(lir->radix());
10553 ValueOperand output = ToOutValue(lir);
10554 Register temp = ToRegister(lir->temp0());
10556 #ifdef DEBUG
10557 Label ok;
10558 masm.branch32(Assembler::Equal, radix, Imm32(0), &ok);
10559 masm.branch32(Assembler::Equal, radix, Imm32(10), &ok);
10560 masm.assumeUnreachable("radix must be 0 or 10 for indexed value fast path");
10561 masm.bind(&ok);
10562 #endif
10564 // Use indexed value as fast path if possible.
10565 Label vmCall, done;
10566 masm.loadStringIndexValue(string, temp, &vmCall);
10567 masm.tagValue(JSVAL_TYPE_INT32, temp, output);
10568 masm.jump(&done);
10570 masm.bind(&vmCall);
10572 pushArg(radix);
10573 pushArg(string);
10575 using Fn = bool (*)(JSContext*, HandleString, int32_t, MutableHandleValue);
10576 callVM<Fn, js::NumberParseInt>(lir);
10578 masm.bind(&done);
10581 void CodeGenerator::visitDoubleParseInt(LDoubleParseInt* lir) {
10582 FloatRegister number = ToFloatRegister(lir->number());
10583 Register output = ToRegister(lir->output());
10584 FloatRegister temp = ToFloatRegister(lir->temp0());
10586 Label bail;
10587 masm.branchDouble(Assembler::DoubleUnordered, number, number, &bail);
10588 masm.branchTruncateDoubleToInt32(number, output, &bail);
10590 Label ok;
10591 masm.branch32(Assembler::NotEqual, output, Imm32(0), &ok);
10593 // Accept both +0 and -0 and return 0.
10594 masm.loadConstantDouble(0.0, temp);
10595 masm.branchDouble(Assembler::DoubleEqual, number, temp, &ok);
10597 // Fail if a non-zero input is in the exclusive range (-1, 1.0e-6).
10598 masm.loadConstantDouble(DOUBLE_DECIMAL_IN_SHORTEST_LOW, temp);
10599 masm.branchDouble(Assembler::DoubleLessThan, number, temp, &bail);
10601 masm.bind(&ok);
10603 bailoutFrom(&bail, lir->snapshot());
10606 void CodeGenerator::visitFloor(LFloor* lir) {
10607 FloatRegister input = ToFloatRegister(lir->input());
10608 Register output = ToRegister(lir->output());
10610 Label bail;
10611 masm.floorDoubleToInt32(input, output, &bail);
10612 bailoutFrom(&bail, lir->snapshot());
10615 void CodeGenerator::visitFloorF(LFloorF* lir) {
10616 FloatRegister input = ToFloatRegister(lir->input());
10617 Register output = ToRegister(lir->output());
10619 Label bail;
10620 masm.floorFloat32ToInt32(input, output, &bail);
10621 bailoutFrom(&bail, lir->snapshot());
10624 void CodeGenerator::visitCeil(LCeil* lir) {
10625 FloatRegister input = ToFloatRegister(lir->input());
10626 Register output = ToRegister(lir->output());
10628 Label bail;
10629 masm.ceilDoubleToInt32(input, output, &bail);
10630 bailoutFrom(&bail, lir->snapshot());
10633 void CodeGenerator::visitCeilF(LCeilF* lir) {
10634 FloatRegister input = ToFloatRegister(lir->input());
10635 Register output = ToRegister(lir->output());
10637 Label bail;
10638 masm.ceilFloat32ToInt32(input, output, &bail);
10639 bailoutFrom(&bail, lir->snapshot());
10642 void CodeGenerator::visitRound(LRound* lir) {
10643 FloatRegister input = ToFloatRegister(lir->input());
10644 FloatRegister temp = ToFloatRegister(lir->temp0());
10645 Register output = ToRegister(lir->output());
10647 Label bail;
10648 masm.roundDoubleToInt32(input, output, temp, &bail);
10649 bailoutFrom(&bail, lir->snapshot());
10652 void CodeGenerator::visitRoundF(LRoundF* lir) {
10653 FloatRegister input = ToFloatRegister(lir->input());
10654 FloatRegister temp = ToFloatRegister(lir->temp0());
10655 Register output = ToRegister(lir->output());
10657 Label bail;
10658 masm.roundFloat32ToInt32(input, output, temp, &bail);
10659 bailoutFrom(&bail, lir->snapshot());
10662 void CodeGenerator::visitTrunc(LTrunc* lir) {
10663 FloatRegister input = ToFloatRegister(lir->input());
10664 Register output = ToRegister(lir->output());
10666 Label bail;
10667 masm.truncDoubleToInt32(input, output, &bail);
10668 bailoutFrom(&bail, lir->snapshot());
10671 void CodeGenerator::visitTruncF(LTruncF* lir) {
10672 FloatRegister input = ToFloatRegister(lir->input());
10673 Register output = ToRegister(lir->output());
10675 Label bail;
10676 masm.truncFloat32ToInt32(input, output, &bail);
10677 bailoutFrom(&bail, lir->snapshot());
10680 void CodeGenerator::visitCompareS(LCompareS* lir) {
10681 JSOp op = lir->mir()->jsop();
10682 Register left = ToRegister(lir->left());
10683 Register right = ToRegister(lir->right());
10684 Register output = ToRegister(lir->output());
10686 OutOfLineCode* ool = nullptr;
10688 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
10689 if (op == JSOp::Eq || op == JSOp::StrictEq) {
10690 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::Equal>>(
10691 lir, ArgList(left, right), StoreRegisterTo(output));
10692 } else if (op == JSOp::Ne || op == JSOp::StrictNe) {
10693 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::NotEqual>>(
10694 lir, ArgList(left, right), StoreRegisterTo(output));
10695 } else if (op == JSOp::Lt) {
10696 ool = oolCallVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(
10697 lir, ArgList(left, right), StoreRegisterTo(output));
10698 } else if (op == JSOp::Le) {
10699 // Push the operands in reverse order for JSOp::Le:
10700 // - |left <= right| is implemented as |right >= left|.
10701 ool =
10702 oolCallVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(
10703 lir, ArgList(right, left), StoreRegisterTo(output));
10704 } else if (op == JSOp::Gt) {
10705 // Push the operands in reverse order for JSOp::Gt:
10706 // - |left > right| is implemented as |right < left|.
10707 ool = oolCallVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(
10708 lir, ArgList(right, left), StoreRegisterTo(output));
10709 } else {
10710 MOZ_ASSERT(op == JSOp::Ge);
10711 ool =
10712 oolCallVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(
10713 lir, ArgList(left, right), StoreRegisterTo(output));
10716 masm.compareStrings(op, left, right, output, ool->entry());
10718 masm.bind(ool->rejoin());
10721 void CodeGenerator::visitCompareSInline(LCompareSInline* lir) {
10722 JSOp op = lir->mir()->jsop();
10723 MOZ_ASSERT(IsEqualityOp(op));
10725 Register input = ToRegister(lir->input());
10726 Register output = ToRegister(lir->output());
10728 const JSLinearString* str = lir->constant();
10729 MOZ_ASSERT(str->length() > 0);
10731 OutOfLineCode* ool = nullptr;
10733 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
10734 if (op == JSOp::Eq || op == JSOp::StrictEq) {
10735 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::Equal>>(
10736 lir, ArgList(ImmGCPtr(str), input), StoreRegisterTo(output));
10737 } else {
10738 MOZ_ASSERT(op == JSOp::Ne || op == JSOp::StrictNe);
10739 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::NotEqual>>(
10740 lir, ArgList(ImmGCPtr(str), input), StoreRegisterTo(output));
10743 Label compareChars;
10745 Label notPointerEqual;
10747 // If operands point to the same instance, the strings are trivially equal.
10748 masm.branchPtr(Assembler::NotEqual, input, ImmGCPtr(str), &notPointerEqual);
10749 masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq), output);
10750 masm.jump(ool->rejoin());
10752 masm.bind(&notPointerEqual);
10754 Label setNotEqualResult;
10755 if (str->isAtom()) {
10756 // Atoms cannot be equal to each other if they point to different strings.
10757 Imm32 atomBit(JSString::ATOM_BIT);
10758 masm.branchTest32(Assembler::NonZero,
10759 Address(input, JSString::offsetOfFlags()), atomBit,
10760 &setNotEqualResult);
10763 if (str->hasTwoByteChars()) {
10764 // Pure two-byte strings can't be equal to Latin-1 strings.
10765 JS::AutoCheckCannotGC nogc;
10766 if (!mozilla::IsUtf16Latin1(str->twoByteRange(nogc))) {
10767 masm.branchLatin1String(input, &setNotEqualResult);
10771 // Strings of different length can never be equal.
10772 masm.branch32(Assembler::Equal, Address(input, JSString::offsetOfLength()),
10773 Imm32(str->length()), &compareChars);
10775 masm.bind(&setNotEqualResult);
10776 masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
10777 masm.jump(ool->rejoin());
10780 masm.bind(&compareChars);
10782 // Load the input string's characters.
10783 Register stringChars = output;
10784 masm.loadStringCharsForCompare(input, str, stringChars, ool->entry());
10786 // Start comparing character by character.
10787 masm.compareStringChars(op, stringChars, str, output);
10789 masm.bind(ool->rejoin());
10792 void CodeGenerator::visitCompareSSingle(LCompareSSingle* lir) {
10793 JSOp op = lir->jsop();
10794 MOZ_ASSERT(IsRelationalOp(op));
10796 Register input = ToRegister(lir->input());
10797 Register output = ToRegister(lir->output());
10798 Register temp = ToRegister(lir->temp0());
10800 const JSLinearString* str = lir->constant();
10801 MOZ_ASSERT(str->length() == 1);
10803 char16_t ch = str->latin1OrTwoByteChar(0);
10805 masm.movePtr(input, temp);
10807 // Check if the string is empty.
10808 Label compareLength;
10809 masm.branch32(Assembler::Equal, Address(temp, JSString::offsetOfLength()),
10810 Imm32(0), &compareLength);
10812 // The first character is in the left-most rope child.
10813 Label notRope;
10814 masm.branchIfNotRope(temp, &notRope);
10816 // Unwind ropes at the start if possible.
10817 Label unwindRope;
10818 masm.bind(&unwindRope);
10819 masm.loadRopeLeftChild(temp, output);
10820 masm.movePtr(output, temp);
10822 #ifdef DEBUG
10823 Label notEmpty;
10824 masm.branch32(Assembler::NotEqual,
10825 Address(temp, JSString::offsetOfLength()), Imm32(0),
10826 &notEmpty);
10827 masm.assumeUnreachable("rope children are non-empty");
10828 masm.bind(&notEmpty);
10829 #endif
10831 // Otherwise keep unwinding ropes.
10832 masm.branchIfRope(temp, &unwindRope);
10834 masm.bind(&notRope);
10836 // Load the first character into |output|.
10837 auto loadFirstChar = [&](auto encoding) {
10838 masm.loadStringChars(temp, output, encoding);
10839 masm.loadChar(Address(output, 0), output, encoding);
10842 Label done;
10843 if (ch <= JSString::MAX_LATIN1_CHAR) {
10844 // Handle both encodings when the search character is Latin-1.
10845 Label twoByte, compare;
10846 masm.branchTwoByteString(temp, &twoByte);
10848 loadFirstChar(CharEncoding::Latin1);
10849 masm.jump(&compare);
10851 masm.bind(&twoByte);
10852 loadFirstChar(CharEncoding::TwoByte);
10854 masm.bind(&compare);
10855 } else {
10856 // The search character is a two-byte character, so it can't be equal to any
10857 // character of a Latin-1 string.
10858 masm.move32(Imm32(int32_t(op == JSOp::Lt || op == JSOp::Le)), output);
10859 masm.branchLatin1String(temp, &done);
10861 loadFirstChar(CharEncoding::TwoByte);
10864 // Compare the string length when the search character is equal to the
10865 // input's first character.
10866 masm.branch32(Assembler::Equal, output, Imm32(ch), &compareLength);
10868 // Otherwise compute the result and jump to the end.
10869 masm.cmp32Set(JSOpToCondition(op, /* isSigned = */ false), output, Imm32(ch),
10870 output);
10871 masm.jump(&done);
10873 // Compare the string length to compute the overall result.
10874 masm.bind(&compareLength);
10875 masm.cmp32Set(JSOpToCondition(op, /* isSigned = */ false),
10876 Address(temp, JSString::offsetOfLength()), Imm32(1), output);
10878 masm.bind(&done);
10881 void CodeGenerator::visitCompareBigInt(LCompareBigInt* lir) {
10882 JSOp op = lir->mir()->jsop();
10883 Register left = ToRegister(lir->left());
10884 Register right = ToRegister(lir->right());
10885 Register temp0 = ToRegister(lir->temp0());
10886 Register temp1 = ToRegister(lir->temp1());
10887 Register temp2 = ToRegister(lir->temp2());
10888 Register output = ToRegister(lir->output());
10890 Label notSame;
10891 Label compareSign;
10892 Label compareLength;
10893 Label compareDigit;
10895 Label* notSameSign;
10896 Label* notSameLength;
10897 Label* notSameDigit;
10898 if (IsEqualityOp(op)) {
10899 notSameSign = &notSame;
10900 notSameLength = &notSame;
10901 notSameDigit = &notSame;
10902 } else {
10903 notSameSign = &compareSign;
10904 notSameLength = &compareLength;
10905 notSameDigit = &compareDigit;
10908 masm.equalBigInts(left, right, temp0, temp1, temp2, output, notSameSign,
10909 notSameLength, notSameDigit);
10911 Label done;
10912 masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq || op == JSOp::Le ||
10913 op == JSOp::Ge),
10914 output);
10915 masm.jump(&done);
10917 if (IsEqualityOp(op)) {
10918 masm.bind(&notSame);
10919 masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
10920 } else {
10921 Label invertWhenNegative;
10923 // There are two cases when sign(left) != sign(right):
10924 // 1. sign(left) = positive and sign(right) = negative,
10925 // 2. or the dual case with reversed signs.
10927 // For case 1, |left| <cmp> |right| is true for cmp=Gt or cmp=Ge and false
10928 // for cmp=Lt or cmp=Le. Initialize the result for case 1 and handle case 2
10929 // with |invertWhenNegative|.
10930 masm.bind(&compareSign);
10931 masm.move32(Imm32(op == JSOp::Gt || op == JSOp::Ge), output);
10932 masm.jump(&invertWhenNegative);
10934 // For sign(left) = sign(right) and len(digits(left)) != len(digits(right)),
10935 // we have to consider the two cases:
10936 // 1. len(digits(left)) < len(digits(right))
10937 // 2. len(digits(left)) > len(digits(right))
10939 // For |left| <cmp> |right| with cmp=Lt:
10940 // Assume both BigInts are positive, then |left < right| is true for case 1
10941 // and false for case 2. When both are negative, the result is reversed.
10943 // The other comparison operators can be handled similarly.
10945 // |temp0| holds the digits length of the right-hand side operand.
10946 masm.bind(&compareLength);
10947 masm.cmp32Set(JSOpToCondition(op, /* isSigned = */ false),
10948 Address(left, BigInt::offsetOfLength()), temp0, output);
10949 masm.jump(&invertWhenNegative);
10951 // Similar to the case above, compare the current digit to determine the
10952 // overall comparison result.
10954 // |temp1| points to the current digit of the left-hand side operand.
10955 // |output| holds the current digit of the right-hand side operand.
10956 masm.bind(&compareDigit);
10957 masm.cmpPtrSet(JSOpToCondition(op, /* isSigned = */ false),
10958 Address(temp1, 0), output, output);
10960 Label nonNegative;
10961 masm.bind(&invertWhenNegative);
10962 masm.branchIfBigIntIsNonNegative(left, &nonNegative);
10963 masm.xor32(Imm32(1), output);
10964 masm.bind(&nonNegative);
10967 masm.bind(&done);
10970 void CodeGenerator::visitCompareBigIntInt32(LCompareBigIntInt32* lir) {
10971 JSOp op = lir->mir()->jsop();
10972 Register left = ToRegister(lir->left());
10973 Register right = ToRegister(lir->right());
10974 Register temp0 = ToRegister(lir->temp0());
10975 Register temp1 = ToRegister(lir->temp1());
10976 Register output = ToRegister(lir->output());
10978 Label ifTrue, ifFalse;
10979 masm.compareBigIntAndInt32(op, left, right, temp0, temp1, &ifTrue, &ifFalse);
10981 Label done;
10982 masm.bind(&ifFalse);
10983 masm.move32(Imm32(0), output);
10984 masm.jump(&done);
10985 masm.bind(&ifTrue);
10986 masm.move32(Imm32(1), output);
10987 masm.bind(&done);
10990 void CodeGenerator::visitCompareBigIntDouble(LCompareBigIntDouble* lir) {
10991 JSOp op = lir->mir()->jsop();
10992 Register left = ToRegister(lir->left());
10993 FloatRegister right = ToFloatRegister(lir->right());
10994 Register output = ToRegister(lir->output());
10996 masm.setupAlignedABICall();
10998 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
10999 // - |left <= right| is implemented as |right >= left|.
11000 // - |left > right| is implemented as |right < left|.
11001 if (op == JSOp::Le || op == JSOp::Gt) {
11002 masm.passABIArg(right, ABIType::Float64);
11003 masm.passABIArg(left);
11004 } else {
11005 masm.passABIArg(left);
11006 masm.passABIArg(right, ABIType::Float64);
11009 using FnBigIntNumber = bool (*)(BigInt*, double);
11010 using FnNumberBigInt = bool (*)(double, BigInt*);
11011 switch (op) {
11012 case JSOp::Eq: {
11013 masm.callWithABI<FnBigIntNumber,
11014 jit::BigIntNumberEqual<EqualityKind::Equal>>();
11015 break;
11017 case JSOp::Ne: {
11018 masm.callWithABI<FnBigIntNumber,
11019 jit::BigIntNumberEqual<EqualityKind::NotEqual>>();
11020 break;
11022 case JSOp::Lt: {
11023 masm.callWithABI<FnBigIntNumber,
11024 jit::BigIntNumberCompare<ComparisonKind::LessThan>>();
11025 break;
11027 case JSOp::Gt: {
11028 masm.callWithABI<FnNumberBigInt,
11029 jit::NumberBigIntCompare<ComparisonKind::LessThan>>();
11030 break;
11032 case JSOp::Le: {
11033 masm.callWithABI<
11034 FnNumberBigInt,
11035 jit::NumberBigIntCompare<ComparisonKind::GreaterThanOrEqual>>();
11036 break;
11038 case JSOp::Ge: {
11039 masm.callWithABI<
11040 FnBigIntNumber,
11041 jit::BigIntNumberCompare<ComparisonKind::GreaterThanOrEqual>>();
11042 break;
11044 default:
11045 MOZ_CRASH("unhandled op");
11048 masm.storeCallBoolResult(output);
11051 void CodeGenerator::visitCompareBigIntString(LCompareBigIntString* lir) {
11052 JSOp op = lir->mir()->jsop();
11053 Register left = ToRegister(lir->left());
11054 Register right = ToRegister(lir->right());
11056 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
11057 // - |left <= right| is implemented as |right >= left|.
11058 // - |left > right| is implemented as |right < left|.
11059 if (op == JSOp::Le || op == JSOp::Gt) {
11060 pushArg(left);
11061 pushArg(right);
11062 } else {
11063 pushArg(right);
11064 pushArg(left);
11067 using FnBigIntString =
11068 bool (*)(JSContext*, HandleBigInt, HandleString, bool*);
11069 using FnStringBigInt =
11070 bool (*)(JSContext*, HandleString, HandleBigInt, bool*);
11072 switch (op) {
11073 case JSOp::Eq: {
11074 constexpr auto Equal = EqualityKind::Equal;
11075 callVM<FnBigIntString, BigIntStringEqual<Equal>>(lir);
11076 break;
11078 case JSOp::Ne: {
11079 constexpr auto NotEqual = EqualityKind::NotEqual;
11080 callVM<FnBigIntString, BigIntStringEqual<NotEqual>>(lir);
11081 break;
11083 case JSOp::Lt: {
11084 constexpr auto LessThan = ComparisonKind::LessThan;
11085 callVM<FnBigIntString, BigIntStringCompare<LessThan>>(lir);
11086 break;
11088 case JSOp::Gt: {
11089 constexpr auto LessThan = ComparisonKind::LessThan;
11090 callVM<FnStringBigInt, StringBigIntCompare<LessThan>>(lir);
11091 break;
11093 case JSOp::Le: {
11094 constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
11095 callVM<FnStringBigInt, StringBigIntCompare<GreaterThanOrEqual>>(lir);
11096 break;
11098 case JSOp::Ge: {
11099 constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
11100 callVM<FnBigIntString, BigIntStringCompare<GreaterThanOrEqual>>(lir);
11101 break;
11103 default:
11104 MOZ_CRASH("Unexpected compare op");
11108 void CodeGenerator::visitIsNullOrLikeUndefinedV(LIsNullOrLikeUndefinedV* lir) {
11109 MOZ_ASSERT(lir->mir()->compareType() == MCompare::Compare_Undefined ||
11110 lir->mir()->compareType() == MCompare::Compare_Null);
11112 JSOp op = lir->mir()->jsop();
11113 MOZ_ASSERT(IsLooseEqualityOp(op));
11115 const ValueOperand value = ToValue(lir, LIsNullOrLikeUndefinedV::ValueIndex);
11116 Register output = ToRegister(lir->output());
11118 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
11119 addOutOfLineCode(ool, lir->mir());
11121 Label* nullOrLikeUndefined = ool->label1();
11122 Label* notNullOrLikeUndefined = ool->label2();
11125 ScratchTagScope tag(masm, value);
11126 masm.splitTagForTest(value, tag);
11128 masm.branchTestNull(Assembler::Equal, tag, nullOrLikeUndefined);
11129 masm.branchTestUndefined(Assembler::Equal, tag, nullOrLikeUndefined);
11131 // Check whether it's a truthy object or a falsy object that emulates
11132 // undefined.
11133 masm.branchTestObject(Assembler::NotEqual, tag, notNullOrLikeUndefined);
11136 Register objreg =
11137 masm.extractObject(value, ToTempUnboxRegister(lir->temp0()));
11138 branchTestObjectEmulatesUndefined(objreg, nullOrLikeUndefined,
11139 notNullOrLikeUndefined, output, ool);
11140 // fall through
11142 Label done;
11144 // It's not null or undefined, and if it's an object it doesn't
11145 // emulate undefined, so it's not like undefined.
11146 masm.move32(Imm32(op == JSOp::Ne), output);
11147 masm.jump(&done);
11149 masm.bind(nullOrLikeUndefined);
11150 masm.move32(Imm32(op == JSOp::Eq), output);
11152 // Both branches meet here.
11153 masm.bind(&done);
11156 void CodeGenerator::visitIsNullOrLikeUndefinedAndBranchV(
11157 LIsNullOrLikeUndefinedAndBranchV* lir) {
11158 MOZ_ASSERT(lir->cmpMir()->compareType() == MCompare::Compare_Undefined ||
11159 lir->cmpMir()->compareType() == MCompare::Compare_Null);
11161 JSOp op = lir->cmpMir()->jsop();
11162 MOZ_ASSERT(IsLooseEqualityOp(op));
11164 const ValueOperand value =
11165 ToValue(lir, LIsNullOrLikeUndefinedAndBranchV::Value);
11167 MBasicBlock* ifTrue = lir->ifTrue();
11168 MBasicBlock* ifFalse = lir->ifFalse();
11170 if (op == JSOp::Ne) {
11171 // Swap branches.
11172 std::swap(ifTrue, ifFalse);
11175 auto* ool = new (alloc()) OutOfLineTestObject();
11176 addOutOfLineCode(ool, lir->cmpMir());
11178 Label* ifTrueLabel = getJumpLabelForBranch(ifTrue);
11179 Label* ifFalseLabel = getJumpLabelForBranch(ifFalse);
11182 ScratchTagScope tag(masm, value);
11183 masm.splitTagForTest(value, tag);
11185 masm.branchTestNull(Assembler::Equal, tag, ifTrueLabel);
11186 masm.branchTestUndefined(Assembler::Equal, tag, ifTrueLabel);
11188 masm.branchTestObject(Assembler::NotEqual, tag, ifFalseLabel);
11191 // Objects that emulate undefined are loosely equal to null/undefined.
11192 Register objreg =
11193 masm.extractObject(value, ToTempUnboxRegister(lir->tempToUnbox()));
11194 Register scratch = ToRegister(lir->temp());
11195 testObjectEmulatesUndefined(objreg, ifTrueLabel, ifFalseLabel, scratch, ool);
11198 void CodeGenerator::visitIsNullOrLikeUndefinedT(LIsNullOrLikeUndefinedT* lir) {
11199 MOZ_ASSERT(lir->mir()->compareType() == MCompare::Compare_Undefined ||
11200 lir->mir()->compareType() == MCompare::Compare_Null);
11201 MOZ_ASSERT(lir->mir()->lhs()->type() == MIRType::Object);
11203 JSOp op = lir->mir()->jsop();
11204 MOZ_ASSERT(IsLooseEqualityOp(op), "Strict equality should have been folded");
11206 Register objreg = ToRegister(lir->input());
11207 Register output = ToRegister(lir->output());
11209 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
11210 addOutOfLineCode(ool, lir->mir());
11212 Label* emulatesUndefined = ool->label1();
11213 Label* doesntEmulateUndefined = ool->label2();
11215 branchTestObjectEmulatesUndefined(objreg, emulatesUndefined,
11216 doesntEmulateUndefined, output, ool);
11218 Label done;
11220 masm.move32(Imm32(op == JSOp::Ne), output);
11221 masm.jump(&done);
11223 masm.bind(emulatesUndefined);
11224 masm.move32(Imm32(op == JSOp::Eq), output);
11225 masm.bind(&done);
11228 void CodeGenerator::visitIsNullOrLikeUndefinedAndBranchT(
11229 LIsNullOrLikeUndefinedAndBranchT* lir) {
11230 MOZ_ASSERT(lir->cmpMir()->compareType() == MCompare::Compare_Undefined ||
11231 lir->cmpMir()->compareType() == MCompare::Compare_Null);
11232 MOZ_ASSERT(lir->cmpMir()->lhs()->type() == MIRType::Object);
11234 JSOp op = lir->cmpMir()->jsop();
11235 MOZ_ASSERT(IsLooseEqualityOp(op), "Strict equality should have been folded");
11237 MBasicBlock* ifTrue = lir->ifTrue();
11238 MBasicBlock* ifFalse = lir->ifFalse();
11240 if (op == JSOp::Ne) {
11241 // Swap branches.
11242 std::swap(ifTrue, ifFalse);
11245 Register input = ToRegister(lir->getOperand(0));
11247 auto* ool = new (alloc()) OutOfLineTestObject();
11248 addOutOfLineCode(ool, lir->cmpMir());
11250 Label* ifTrueLabel = getJumpLabelForBranch(ifTrue);
11251 Label* ifFalseLabel = getJumpLabelForBranch(ifFalse);
11253 // Objects that emulate undefined are loosely equal to null/undefined.
11254 Register scratch = ToRegister(lir->temp());
11255 testObjectEmulatesUndefined(input, ifTrueLabel, ifFalseLabel, scratch, ool);
11258 void CodeGenerator::visitIsNull(LIsNull* lir) {
11259 MCompare::CompareType compareType = lir->mir()->compareType();
11260 MOZ_ASSERT(compareType == MCompare::Compare_Null);
11262 JSOp op = lir->mir()->jsop();
11263 MOZ_ASSERT(IsStrictEqualityOp(op));
11265 const ValueOperand value = ToValue(lir, LIsNull::ValueIndex);
11266 Register output = ToRegister(lir->output());
11268 Assembler::Condition cond = JSOpToCondition(compareType, op);
11269 masm.testNullSet(cond, value, output);
11272 void CodeGenerator::visitIsUndefined(LIsUndefined* lir) {
11273 MCompare::CompareType compareType = lir->mir()->compareType();
11274 MOZ_ASSERT(compareType == MCompare::Compare_Undefined);
11276 JSOp op = lir->mir()->jsop();
11277 MOZ_ASSERT(IsStrictEqualityOp(op));
11279 const ValueOperand value = ToValue(lir, LIsUndefined::ValueIndex);
11280 Register output = ToRegister(lir->output());
11282 Assembler::Condition cond = JSOpToCondition(compareType, op);
11283 masm.testUndefinedSet(cond, value, output);
11286 void CodeGenerator::visitIsNullAndBranch(LIsNullAndBranch* lir) {
11287 MCompare::CompareType compareType = lir->cmpMir()->compareType();
11288 MOZ_ASSERT(compareType == MCompare::Compare_Null);
11290 JSOp op = lir->cmpMir()->jsop();
11291 MOZ_ASSERT(IsStrictEqualityOp(op));
11293 const ValueOperand value = ToValue(lir, LIsNullAndBranch::Value);
11295 Assembler::Condition cond = JSOpToCondition(compareType, op);
11296 testNullEmitBranch(cond, value, lir->ifTrue(), lir->ifFalse());
11299 void CodeGenerator::visitIsUndefinedAndBranch(LIsUndefinedAndBranch* lir) {
11300 MCompare::CompareType compareType = lir->cmpMir()->compareType();
11301 MOZ_ASSERT(compareType == MCompare::Compare_Undefined);
11303 JSOp op = lir->cmpMir()->jsop();
11304 MOZ_ASSERT(IsStrictEqualityOp(op));
11306 const ValueOperand value = ToValue(lir, LIsUndefinedAndBranch::Value);
11308 Assembler::Condition cond = JSOpToCondition(compareType, op);
11309 testUndefinedEmitBranch(cond, value, lir->ifTrue(), lir->ifFalse());
11312 void CodeGenerator::visitSameValueDouble(LSameValueDouble* lir) {
11313 FloatRegister left = ToFloatRegister(lir->left());
11314 FloatRegister right = ToFloatRegister(lir->right());
11315 FloatRegister temp = ToFloatRegister(lir->temp0());
11316 Register output = ToRegister(lir->output());
11318 masm.sameValueDouble(left, right, temp, output);
11321 void CodeGenerator::visitSameValue(LSameValue* lir) {
11322 ValueOperand lhs = ToValue(lir, LSameValue::LhsIndex);
11323 ValueOperand rhs = ToValue(lir, LSameValue::RhsIndex);
11324 Register output = ToRegister(lir->output());
11326 using Fn = bool (*)(JSContext*, HandleValue, HandleValue, bool*);
11327 OutOfLineCode* ool =
11328 oolCallVM<Fn, SameValue>(lir, ArgList(lhs, rhs), StoreRegisterTo(output));
11330 // First check to see if the values have identical bits.
11331 // This is correct for SameValue because SameValue(NaN,NaN) is true,
11332 // and SameValue(0,-0) is false.
11333 masm.branch64(Assembler::NotEqual, lhs.toRegister64(), rhs.toRegister64(),
11334 ool->entry());
11335 masm.move32(Imm32(1), output);
11337 // If this fails, call SameValue.
11338 masm.bind(ool->rejoin());
11341 void CodeGenerator::emitConcat(LInstruction* lir, Register lhs, Register rhs,
11342 Register output) {
11343 using Fn =
11344 JSString* (*)(JSContext*, HandleString, HandleString, js::gc::Heap);
11345 OutOfLineCode* ool = oolCallVM<Fn, ConcatStrings<CanGC>>(
11346 lir, ArgList(lhs, rhs, static_cast<Imm32>(int32_t(gc::Heap::Default))),
11347 StoreRegisterTo(output));
11349 const JitZone* jitZone = gen->realm->zone()->jitZone();
11350 JitCode* stringConcatStub =
11351 jitZone->stringConcatStubNoBarrier(&zoneStubsToReadBarrier_);
11352 masm.call(stringConcatStub);
11353 masm.branchTestPtr(Assembler::Zero, output, output, ool->entry());
11355 masm.bind(ool->rejoin());
11358 void CodeGenerator::visitConcat(LConcat* lir) {
11359 Register lhs = ToRegister(lir->lhs());
11360 Register rhs = ToRegister(lir->rhs());
11362 Register output = ToRegister(lir->output());
11364 MOZ_ASSERT(lhs == CallTempReg0);
11365 MOZ_ASSERT(rhs == CallTempReg1);
11366 MOZ_ASSERT(ToRegister(lir->temp0()) == CallTempReg0);
11367 MOZ_ASSERT(ToRegister(lir->temp1()) == CallTempReg1);
11368 MOZ_ASSERT(ToRegister(lir->temp2()) == CallTempReg2);
11369 MOZ_ASSERT(ToRegister(lir->temp3()) == CallTempReg3);
11370 MOZ_ASSERT(ToRegister(lir->temp4()) == CallTempReg4);
11371 MOZ_ASSERT(output == CallTempReg5);
11373 emitConcat(lir, lhs, rhs, output);
11376 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
11377 Register len, Register byteOpScratch,
11378 CharEncoding fromEncoding, CharEncoding toEncoding,
11379 size_t maximumLength = SIZE_MAX) {
11380 // Copy |len| char16_t code units from |from| to |to|. Assumes len > 0
11381 // (checked below in debug builds), and when done |to| must point to the
11382 // next available char.
11384 #ifdef DEBUG
11385 Label ok;
11386 masm.branch32(Assembler::GreaterThan, len, Imm32(0), &ok);
11387 masm.assumeUnreachable("Length should be greater than 0.");
11388 masm.bind(&ok);
11390 if (maximumLength != SIZE_MAX) {
11391 MOZ_ASSERT(maximumLength <= INT32_MAX, "maximum length fits into int32");
11393 Label ok;
11394 masm.branchPtr(Assembler::BelowOrEqual, len, Imm32(maximumLength), &ok);
11395 masm.assumeUnreachable("Length should not exceed maximum length.");
11396 masm.bind(&ok);
11398 #endif
11400 MOZ_ASSERT_IF(toEncoding == CharEncoding::Latin1,
11401 fromEncoding == CharEncoding::Latin1);
11403 size_t fromWidth =
11404 fromEncoding == CharEncoding::Latin1 ? sizeof(char) : sizeof(char16_t);
11405 size_t toWidth =
11406 toEncoding == CharEncoding::Latin1 ? sizeof(char) : sizeof(char16_t);
11408 // Try to copy multiple characters at once when both encoding are equal.
11409 if (fromEncoding == toEncoding) {
11410 constexpr size_t ptrWidth = sizeof(uintptr_t);
11412 // Copy |width| bytes and then adjust |from| and |to|.
11413 auto copyCharacters = [&](size_t width) {
11414 static_assert(ptrWidth <= 8, "switch handles only up to eight bytes");
11416 switch (width) {
11417 case 1:
11418 masm.load8ZeroExtend(Address(from, 0), byteOpScratch);
11419 masm.store8(byteOpScratch, Address(to, 0));
11420 break;
11421 case 2:
11422 masm.load16ZeroExtend(Address(from, 0), byteOpScratch);
11423 masm.store16(byteOpScratch, Address(to, 0));
11424 break;
11425 case 4:
11426 masm.load32(Address(from, 0), byteOpScratch);
11427 masm.store32(byteOpScratch, Address(to, 0));
11428 break;
11429 case 8:
11430 MOZ_ASSERT(width == ptrWidth);
11431 masm.loadPtr(Address(from, 0), byteOpScratch);
11432 masm.storePtr(byteOpScratch, Address(to, 0));
11433 break;
11436 masm.addPtr(Imm32(width), from);
11437 masm.addPtr(Imm32(width), to);
11440 // First align |len| to pointer width.
11441 Label done;
11442 for (size_t width = fromWidth; width < ptrWidth; width *= 2) {
11443 // Number of characters which fit into |width| bytes.
11444 size_t charsPerWidth = width / fromWidth;
11446 if (charsPerWidth < maximumLength) {
11447 Label next;
11448 masm.branchTest32(Assembler::Zero, len, Imm32(charsPerWidth), &next);
11450 copyCharacters(width);
11452 masm.branchSub32(Assembler::Zero, Imm32(charsPerWidth), len, &done);
11453 masm.bind(&next);
11454 } else if (charsPerWidth == maximumLength) {
11455 copyCharacters(width);
11456 masm.sub32(Imm32(charsPerWidth), len);
11460 size_t maxInlineLength;
11461 if (fromEncoding == CharEncoding::Latin1) {
11462 maxInlineLength = JSFatInlineString::MAX_LENGTH_LATIN1;
11463 } else {
11464 maxInlineLength = JSFatInlineString::MAX_LENGTH_TWO_BYTE;
11467 // Number of characters which fit into a single register.
11468 size_t charsPerPtr = ptrWidth / fromWidth;
11470 // Unroll small loops.
11471 constexpr size_t unrollLoopLimit = 3;
11472 size_t loopCount = std::min(maxInlineLength, maximumLength) / charsPerPtr;
11474 #ifdef JS_64BIT
11475 static constexpr size_t latin1MaxInlineByteLength =
11476 JSFatInlineString::MAX_LENGTH_LATIN1 * sizeof(char);
11477 static constexpr size_t twoByteMaxInlineByteLength =
11478 JSFatInlineString::MAX_LENGTH_TWO_BYTE * sizeof(char16_t);
11480 // |unrollLoopLimit| should be large enough to allow loop unrolling on
11481 // 64-bit targets.
11482 static_assert(latin1MaxInlineByteLength / ptrWidth == unrollLoopLimit,
11483 "Latin-1 loops are unrolled on 64-bit");
11484 static_assert(twoByteMaxInlineByteLength / ptrWidth == unrollLoopLimit,
11485 "Two-byte loops are unrolled on 64-bit");
11486 #endif
11488 if (loopCount <= unrollLoopLimit) {
11489 Label labels[unrollLoopLimit];
11491 // Check up front how many characters can be copied.
11492 for (size_t i = 1; i < loopCount; i++) {
11493 masm.branch32(Assembler::Below, len, Imm32((i + 1) * charsPerPtr),
11494 &labels[i]);
11497 // Generate the unrolled loop body.
11498 for (size_t i = loopCount; i > 0; i--) {
11499 copyCharacters(ptrWidth);
11500 masm.sub32(Imm32(charsPerPtr), len);
11502 // Jump target for the previous length check.
11503 if (i != 1) {
11504 masm.bind(&labels[i - 1]);
11507 } else {
11508 Label start;
11509 masm.bind(&start);
11510 copyCharacters(ptrWidth);
11511 masm.branchSub32(Assembler::NonZero, Imm32(charsPerPtr), len, &start);
11514 masm.bind(&done);
11515 } else {
11516 Label start;
11517 masm.bind(&start);
11518 masm.loadChar(Address(from, 0), byteOpScratch, fromEncoding);
11519 masm.storeChar(byteOpScratch, Address(to, 0), toEncoding);
11520 masm.addPtr(Imm32(fromWidth), from);
11521 masm.addPtr(Imm32(toWidth), to);
11522 masm.branchSub32(Assembler::NonZero, Imm32(1), len, &start);
11526 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
11527 Register len, Register byteOpScratch,
11528 CharEncoding encoding, size_t maximumLength) {
11529 CopyStringChars(masm, to, from, len, byteOpScratch, encoding, encoding,
11530 maximumLength);
11533 static void CopyStringCharsMaybeInflate(MacroAssembler& masm, Register input,
11534 Register destChars, Register temp1,
11535 Register temp2) {
11536 // destChars is TwoByte and input is a Latin1 or TwoByte string, so we may
11537 // have to inflate.
11539 Label isLatin1, done;
11540 masm.loadStringLength(input, temp1);
11541 masm.branchLatin1String(input, &isLatin1);
11543 masm.loadStringChars(input, temp2, CharEncoding::TwoByte);
11544 masm.movePtr(temp2, input);
11545 CopyStringChars(masm, destChars, input, temp1, temp2,
11546 CharEncoding::TwoByte);
11547 masm.jump(&done);
11549 masm.bind(&isLatin1);
11551 masm.loadStringChars(input, temp2, CharEncoding::Latin1);
11552 masm.movePtr(temp2, input);
11553 CopyStringChars(masm, destChars, input, temp1, temp2, CharEncoding::Latin1,
11554 CharEncoding::TwoByte);
11556 masm.bind(&done);
11559 static void AllocateThinOrFatInlineString(MacroAssembler& masm, Register output,
11560 Register length, Register temp,
11561 gc::Heap initialStringHeap,
11562 Label* failure,
11563 CharEncoding encoding) {
11564 #ifdef DEBUG
11565 size_t maxInlineLength;
11566 if (encoding == CharEncoding::Latin1) {
11567 maxInlineLength = JSFatInlineString::MAX_LENGTH_LATIN1;
11568 } else {
11569 maxInlineLength = JSFatInlineString::MAX_LENGTH_TWO_BYTE;
11572 Label ok;
11573 masm.branch32(Assembler::BelowOrEqual, length, Imm32(maxInlineLength), &ok);
11574 masm.assumeUnreachable("string length too large to be allocated as inline");
11575 masm.bind(&ok);
11576 #endif
11578 size_t maxThinInlineLength;
11579 if (encoding == CharEncoding::Latin1) {
11580 maxThinInlineLength = JSThinInlineString::MAX_LENGTH_LATIN1;
11581 } else {
11582 maxThinInlineLength = JSThinInlineString::MAX_LENGTH_TWO_BYTE;
11585 Label isFat, allocDone;
11586 masm.branch32(Assembler::Above, length, Imm32(maxThinInlineLength), &isFat);
11588 uint32_t flags = JSString::INIT_THIN_INLINE_FLAGS;
11589 if (encoding == CharEncoding::Latin1) {
11590 flags |= JSString::LATIN1_CHARS_BIT;
11592 masm.newGCString(output, temp, initialStringHeap, failure);
11593 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
11594 masm.jump(&allocDone);
11596 masm.bind(&isFat);
11598 uint32_t flags = JSString::INIT_FAT_INLINE_FLAGS;
11599 if (encoding == CharEncoding::Latin1) {
11600 flags |= JSString::LATIN1_CHARS_BIT;
11602 masm.newGCFatInlineString(output, temp, initialStringHeap, failure);
11603 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
11605 masm.bind(&allocDone);
11607 // Store length.
11608 masm.store32(length, Address(output, JSString::offsetOfLength()));
11611 static void ConcatInlineString(MacroAssembler& masm, Register lhs, Register rhs,
11612 Register output, Register temp1, Register temp2,
11613 Register temp3, gc::Heap initialStringHeap,
11614 Label* failure, CharEncoding encoding) {
11615 JitSpew(JitSpew_Codegen, "# Emitting ConcatInlineString (encoding=%s)",
11616 (encoding == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
11618 // State: result length in temp2.
11620 // Ensure both strings are linear.
11621 masm.branchIfRope(lhs, failure);
11622 masm.branchIfRope(rhs, failure);
11624 // Allocate a JSThinInlineString or JSFatInlineString.
11625 AllocateThinOrFatInlineString(masm, output, temp2, temp1, initialStringHeap,
11626 failure, encoding);
11628 // Load chars pointer in temp2.
11629 masm.loadInlineStringCharsForStore(output, temp2);
11631 auto copyChars = [&](Register src) {
11632 if (encoding == CharEncoding::TwoByte) {
11633 CopyStringCharsMaybeInflate(masm, src, temp2, temp1, temp3);
11634 } else {
11635 masm.loadStringLength(src, temp3);
11636 masm.loadStringChars(src, temp1, CharEncoding::Latin1);
11637 masm.movePtr(temp1, src);
11638 CopyStringChars(masm, temp2, src, temp3, temp1, CharEncoding::Latin1);
11642 // Copy lhs chars. Note that this advances temp2 to point to the next
11643 // char. This also clobbers the lhs register.
11644 copyChars(lhs);
11646 // Copy rhs chars. Clobbers the rhs register.
11647 copyChars(rhs);
11650 void CodeGenerator::visitSubstr(LSubstr* lir) {
11651 Register string = ToRegister(lir->string());
11652 Register begin = ToRegister(lir->begin());
11653 Register length = ToRegister(lir->length());
11654 Register output = ToRegister(lir->output());
11655 Register temp0 = ToRegister(lir->temp0());
11656 Register temp2 = ToRegister(lir->temp2());
11658 // On x86 there are not enough registers. In that case reuse the string
11659 // register as temporary.
11660 Register temp1 =
11661 lir->temp1()->isBogusTemp() ? string : ToRegister(lir->temp1());
11663 size_t maximumLength = SIZE_MAX;
11665 Range* range = lir->mir()->length()->range();
11666 if (range && range->hasInt32UpperBound()) {
11667 MOZ_ASSERT(range->upper() >= 0);
11668 maximumLength = size_t(range->upper());
11671 static_assert(JSThinInlineString::MAX_LENGTH_TWO_BYTE <=
11672 JSThinInlineString::MAX_LENGTH_LATIN1);
11674 static_assert(JSFatInlineString::MAX_LENGTH_TWO_BYTE <=
11675 JSFatInlineString::MAX_LENGTH_LATIN1);
11677 bool tryFatInlineOrDependent =
11678 maximumLength > JSThinInlineString::MAX_LENGTH_TWO_BYTE;
11679 bool tryDependent = maximumLength > JSFatInlineString::MAX_LENGTH_TWO_BYTE;
11681 #ifdef DEBUG
11682 if (maximumLength != SIZE_MAX) {
11683 Label ok;
11684 masm.branch32(Assembler::BelowOrEqual, length, Imm32(maximumLength), &ok);
11685 masm.assumeUnreachable("length should not exceed maximum length");
11686 masm.bind(&ok);
11688 #endif
11690 Label nonZero, nonInput;
11692 // For every edge case use the C++ variant.
11693 // Note: we also use this upon allocation failure in newGCString and
11694 // newGCFatInlineString. To squeeze out even more performance those failures
11695 // can be handled by allocate in ool code and returning to jit code to fill
11696 // in all data.
11697 using Fn = JSString* (*)(JSContext* cx, HandleString str, int32_t begin,
11698 int32_t len);
11699 OutOfLineCode* ool = oolCallVM<Fn, SubstringKernel>(
11700 lir, ArgList(string, begin, length), StoreRegisterTo(output));
11701 Label* slowPath = ool->entry();
11702 Label* done = ool->rejoin();
11704 // Zero length, return emptystring.
11705 masm.branchTest32(Assembler::NonZero, length, length, &nonZero);
11706 const JSAtomState& names = gen->runtime->names();
11707 masm.movePtr(ImmGCPtr(names.empty_), output);
11708 masm.jump(done);
11710 // Substring from 0..|str.length|, return str.
11711 masm.bind(&nonZero);
11712 masm.branch32(Assembler::NotEqual,
11713 Address(string, JSString::offsetOfLength()), length, &nonInput);
11714 #ifdef DEBUG
11716 Label ok;
11717 masm.branchTest32(Assembler::Zero, begin, begin, &ok);
11718 masm.assumeUnreachable("length == str.length implies begin == 0");
11719 masm.bind(&ok);
11721 #endif
11722 masm.movePtr(string, output);
11723 masm.jump(done);
11725 // Use slow path for ropes.
11726 masm.bind(&nonInput);
11727 masm.branchIfRope(string, slowPath);
11729 // Optimize one and two character strings.
11730 Label nonStatic;
11731 masm.branch32(Assembler::Above, length, Imm32(2), &nonStatic);
11733 Label loadLengthOne, loadLengthTwo;
11735 auto loadChars = [&](CharEncoding encoding, bool fallthru) {
11736 size_t size = encoding == CharEncoding::Latin1 ? sizeof(JS::Latin1Char)
11737 : sizeof(char16_t);
11739 masm.loadStringChars(string, temp0, encoding);
11740 masm.loadChar(temp0, begin, temp2, encoding);
11741 masm.branch32(Assembler::Equal, length, Imm32(1), &loadLengthOne);
11742 masm.loadChar(temp0, begin, temp0, encoding, int32_t(size));
11743 if (!fallthru) {
11744 masm.jump(&loadLengthTwo);
11748 Label isLatin1;
11749 masm.branchLatin1String(string, &isLatin1);
11750 loadChars(CharEncoding::TwoByte, /* fallthru = */ false);
11752 masm.bind(&isLatin1);
11753 loadChars(CharEncoding::Latin1, /* fallthru = */ true);
11755 // Try to load a length-two static string.
11756 masm.bind(&loadLengthTwo);
11757 masm.lookupStaticString(temp2, temp0, output, gen->runtime->staticStrings(),
11758 &nonStatic);
11759 masm.jump(done);
11761 // Try to load a length-one static string.
11762 masm.bind(&loadLengthOne);
11763 masm.lookupStaticString(temp2, output, gen->runtime->staticStrings(),
11764 &nonStatic);
11765 masm.jump(done);
11767 masm.bind(&nonStatic);
11769 // Allocate either a JSThinInlineString or JSFatInlineString, or jump to
11770 // notInline if we need a dependent string.
11771 Label notInline;
11773 static_assert(JSThinInlineString::MAX_LENGTH_LATIN1 <
11774 JSFatInlineString::MAX_LENGTH_LATIN1);
11775 static_assert(JSThinInlineString::MAX_LENGTH_TWO_BYTE <
11776 JSFatInlineString::MAX_LENGTH_TWO_BYTE);
11778 // Use temp2 to store the JS(Thin|Fat)InlineString flags. This avoids having
11779 // duplicate newGCString/newGCFatInlineString codegen for Latin1 vs TwoByte
11780 // strings.
11782 Label allocFat, allocDone;
11783 if (tryFatInlineOrDependent) {
11784 Label isLatin1, allocThin;
11785 masm.branchLatin1String(string, &isLatin1);
11787 if (tryDependent) {
11788 masm.branch32(Assembler::Above, length,
11789 Imm32(JSFatInlineString::MAX_LENGTH_TWO_BYTE),
11790 &notInline);
11792 masm.move32(Imm32(0), temp2);
11793 masm.branch32(Assembler::Above, length,
11794 Imm32(JSThinInlineString::MAX_LENGTH_TWO_BYTE),
11795 &allocFat);
11796 masm.jump(&allocThin);
11799 masm.bind(&isLatin1);
11801 if (tryDependent) {
11802 masm.branch32(Assembler::Above, length,
11803 Imm32(JSFatInlineString::MAX_LENGTH_LATIN1),
11804 &notInline);
11806 masm.move32(Imm32(JSString::LATIN1_CHARS_BIT), temp2);
11807 masm.branch32(Assembler::Above, length,
11808 Imm32(JSThinInlineString::MAX_LENGTH_LATIN1), &allocFat);
11811 masm.bind(&allocThin);
11812 } else {
11813 masm.load32(Address(string, JSString::offsetOfFlags()), temp2);
11814 masm.and32(Imm32(JSString::LATIN1_CHARS_BIT), temp2);
11818 masm.newGCString(output, temp0, initialStringHeap(), slowPath);
11819 masm.or32(Imm32(JSString::INIT_THIN_INLINE_FLAGS), temp2);
11822 if (tryFatInlineOrDependent) {
11823 masm.jump(&allocDone);
11825 masm.bind(&allocFat);
11827 masm.newGCFatInlineString(output, temp0, initialStringHeap(), slowPath);
11828 masm.or32(Imm32(JSString::INIT_FAT_INLINE_FLAGS), temp2);
11831 masm.bind(&allocDone);
11834 masm.store32(temp2, Address(output, JSString::offsetOfFlags()));
11835 masm.store32(length, Address(output, JSString::offsetOfLength()));
11837 auto initializeInlineString = [&](CharEncoding encoding) {
11838 masm.loadStringChars(string, temp0, encoding);
11839 masm.addToCharPtr(temp0, begin, encoding);
11840 if (temp1 == string) {
11841 masm.push(string);
11843 masm.loadInlineStringCharsForStore(output, temp1);
11844 CopyStringChars(masm, temp1, temp0, length, temp2, encoding,
11845 maximumLength);
11846 masm.loadStringLength(output, length);
11847 if (temp1 == string) {
11848 masm.pop(string);
11852 Label isInlineLatin1;
11853 masm.branchTest32(Assembler::NonZero, temp2,
11854 Imm32(JSString::LATIN1_CHARS_BIT), &isInlineLatin1);
11855 initializeInlineString(CharEncoding::TwoByte);
11856 masm.jump(done);
11858 masm.bind(&isInlineLatin1);
11859 initializeInlineString(CharEncoding::Latin1);
11862 // Handle other cases with a DependentString.
11863 if (tryDependent) {
11864 masm.jump(done);
11866 masm.bind(&notInline);
11867 masm.newGCString(output, temp0, gen->initialStringHeap(), slowPath);
11868 masm.store32(length, Address(output, JSString::offsetOfLength()));
11869 masm.storeDependentStringBase(string, output);
11871 auto initializeDependentString = [&](CharEncoding encoding) {
11872 uint32_t flags = JSString::INIT_DEPENDENT_FLAGS;
11873 if (encoding == CharEncoding::Latin1) {
11874 flags |= JSString::LATIN1_CHARS_BIT;
11877 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
11878 masm.loadNonInlineStringChars(string, temp0, encoding);
11879 masm.addToCharPtr(temp0, begin, encoding);
11880 masm.storeNonInlineStringChars(temp0, output);
11883 Label isLatin1;
11884 masm.branchLatin1String(string, &isLatin1);
11885 initializeDependentString(CharEncoding::TwoByte);
11886 masm.jump(done);
11888 masm.bind(&isLatin1);
11889 initializeDependentString(CharEncoding::Latin1);
11892 masm.bind(done);
11895 JitCode* JitZone::generateStringConcatStub(JSContext* cx) {
11896 JitSpew(JitSpew_Codegen, "# Emitting StringConcat stub");
11898 TempAllocator temp(&cx->tempLifoAlloc());
11899 JitContext jcx(cx);
11900 StackMacroAssembler masm(cx, temp);
11901 AutoCreatedBy acb(masm, "JitZone::generateStringConcatStub");
11903 Register lhs = CallTempReg0;
11904 Register rhs = CallTempReg1;
11905 Register temp1 = CallTempReg2;
11906 Register temp2 = CallTempReg3;
11907 Register temp3 = CallTempReg4;
11908 Register output = CallTempReg5;
11910 Label failure;
11911 #ifdef JS_USE_LINK_REGISTER
11912 masm.pushReturnAddress();
11913 #endif
11914 masm.Push(FramePointer);
11915 masm.moveStackPtrTo(FramePointer);
11917 // If lhs is empty, return rhs.
11918 Label leftEmpty;
11919 masm.loadStringLength(lhs, temp1);
11920 masm.branchTest32(Assembler::Zero, temp1, temp1, &leftEmpty);
11922 // If rhs is empty, return lhs.
11923 Label rightEmpty;
11924 masm.loadStringLength(rhs, temp2);
11925 masm.branchTest32(Assembler::Zero, temp2, temp2, &rightEmpty);
11927 masm.add32(temp1, temp2);
11929 // Check if we can use a JSInlineString. The result is a Latin1 string if
11930 // lhs and rhs are both Latin1, so we AND the flags.
11931 Label isInlineTwoByte, isInlineLatin1;
11932 masm.load32(Address(lhs, JSString::offsetOfFlags()), temp1);
11933 masm.and32(Address(rhs, JSString::offsetOfFlags()), temp1);
11935 Label isLatin1, notInline;
11936 masm.branchTest32(Assembler::NonZero, temp1,
11937 Imm32(JSString::LATIN1_CHARS_BIT), &isLatin1);
11939 masm.branch32(Assembler::BelowOrEqual, temp2,
11940 Imm32(JSFatInlineString::MAX_LENGTH_TWO_BYTE),
11941 &isInlineTwoByte);
11942 masm.jump(&notInline);
11944 masm.bind(&isLatin1);
11946 masm.branch32(Assembler::BelowOrEqual, temp2,
11947 Imm32(JSFatInlineString::MAX_LENGTH_LATIN1), &isInlineLatin1);
11949 masm.bind(&notInline);
11951 // Keep AND'ed flags in temp1.
11953 // Ensure result length <= JSString::MAX_LENGTH.
11954 masm.branch32(Assembler::Above, temp2, Imm32(JSString::MAX_LENGTH), &failure);
11956 // Allocate a new rope, guaranteed to be in the nursery if initialStringHeap
11957 // == gc::Heap::Default. (As a result, no post barriers are needed below.)
11958 masm.newGCString(output, temp3, initialStringHeap, &failure);
11960 // Store rope length and flags. temp1 still holds the result of AND'ing the
11961 // lhs and rhs flags, so we just have to clear the other flags to get our rope
11962 // flags (Latin1 if both lhs and rhs are Latin1).
11963 static_assert(JSString::INIT_ROPE_FLAGS == 0,
11964 "Rope type flags must have no bits set");
11965 masm.and32(Imm32(JSString::LATIN1_CHARS_BIT), temp1);
11966 masm.store32(temp1, Address(output, JSString::offsetOfFlags()));
11967 masm.store32(temp2, Address(output, JSString::offsetOfLength()));
11969 // Store left and right nodes.
11970 masm.storeRopeChildren(lhs, rhs, output);
11971 masm.pop(FramePointer);
11972 masm.ret();
11974 masm.bind(&leftEmpty);
11975 masm.mov(rhs, output);
11976 masm.pop(FramePointer);
11977 masm.ret();
11979 masm.bind(&rightEmpty);
11980 masm.mov(lhs, output);
11981 masm.pop(FramePointer);
11982 masm.ret();
11984 masm.bind(&isInlineTwoByte);
11985 ConcatInlineString(masm, lhs, rhs, output, temp1, temp2, temp3,
11986 initialStringHeap, &failure, CharEncoding::TwoByte);
11987 masm.pop(FramePointer);
11988 masm.ret();
11990 masm.bind(&isInlineLatin1);
11991 ConcatInlineString(masm, lhs, rhs, output, temp1, temp2, temp3,
11992 initialStringHeap, &failure, CharEncoding::Latin1);
11993 masm.pop(FramePointer);
11994 masm.ret();
11996 masm.bind(&failure);
11997 masm.movePtr(ImmPtr(nullptr), output);
11998 masm.pop(FramePointer);
11999 masm.ret();
12001 Linker linker(masm);
12002 JitCode* code = linker.newCode(cx, CodeKind::Other);
12004 CollectPerfSpewerJitCodeProfile(code, "StringConcatStub");
12005 #ifdef MOZ_VTUNE
12006 vtune::MarkStub(code, "StringConcatStub");
12007 #endif
12009 return code;
12012 void JitRuntime::generateFreeStub(MacroAssembler& masm) {
12013 AutoCreatedBy acb(masm, "JitRuntime::generateFreeStub");
12015 const Register regSlots = CallTempReg0;
12017 freeStubOffset_ = startTrampolineCode(masm);
12019 #ifdef JS_USE_LINK_REGISTER
12020 masm.pushReturnAddress();
12021 #endif
12022 AllocatableRegisterSet regs(RegisterSet::Volatile());
12023 regs.takeUnchecked(regSlots);
12024 LiveRegisterSet save(regs.asLiveSet());
12025 masm.PushRegsInMask(save);
12027 const Register regTemp = regs.takeAnyGeneral();
12028 MOZ_ASSERT(regTemp != regSlots);
12030 using Fn = void (*)(void* p);
12031 masm.setupUnalignedABICall(regTemp);
12032 masm.passABIArg(regSlots);
12033 masm.callWithABI<Fn, js_free>(ABIType::General,
12034 CheckUnsafeCallWithABI::DontCheckOther);
12036 masm.PopRegsInMask(save);
12038 masm.ret();
12041 void JitRuntime::generateLazyLinkStub(MacroAssembler& masm) {
12042 AutoCreatedBy acb(masm, "JitRuntime::generateLazyLinkStub");
12044 lazyLinkStubOffset_ = startTrampolineCode(masm);
12046 #ifdef JS_USE_LINK_REGISTER
12047 masm.pushReturnAddress();
12048 #endif
12049 masm.Push(FramePointer);
12050 masm.moveStackPtrTo(FramePointer);
12052 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
12053 Register temp0 = regs.takeAny();
12054 Register temp1 = regs.takeAny();
12055 Register temp2 = regs.takeAny();
12057 masm.loadJSContext(temp0);
12058 masm.enterFakeExitFrame(temp0, temp2, ExitFrameType::LazyLink);
12059 masm.moveStackPtrTo(temp1);
12061 using Fn = uint8_t* (*)(JSContext* cx, LazyLinkExitFrameLayout* frame);
12062 masm.setupUnalignedABICall(temp2);
12063 masm.passABIArg(temp0);
12064 masm.passABIArg(temp1);
12065 masm.callWithABI<Fn, LazyLinkTopActivation>(
12066 ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
12068 // Discard exit frame and restore frame pointer.
12069 masm.leaveExitFrame(0);
12070 masm.pop(FramePointer);
12072 #ifdef JS_USE_LINK_REGISTER
12073 // Restore the return address such that the emitPrologue function of the
12074 // CodeGenerator can push it back on the stack with pushReturnAddress.
12075 masm.popReturnAddress();
12076 #endif
12077 masm.jump(ReturnReg);
12080 void JitRuntime::generateInterpreterStub(MacroAssembler& masm) {
12081 AutoCreatedBy acb(masm, "JitRuntime::generateInterpreterStub");
12083 interpreterStubOffset_ = startTrampolineCode(masm);
12085 #ifdef JS_USE_LINK_REGISTER
12086 masm.pushReturnAddress();
12087 #endif
12088 masm.Push(FramePointer);
12089 masm.moveStackPtrTo(FramePointer);
12091 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
12092 Register temp0 = regs.takeAny();
12093 Register temp1 = regs.takeAny();
12094 Register temp2 = regs.takeAny();
12096 masm.loadJSContext(temp0);
12097 masm.enterFakeExitFrame(temp0, temp2, ExitFrameType::InterpreterStub);
12098 masm.moveStackPtrTo(temp1);
12100 using Fn = bool (*)(JSContext* cx, InterpreterStubExitFrameLayout* frame);
12101 masm.setupUnalignedABICall(temp2);
12102 masm.passABIArg(temp0);
12103 masm.passABIArg(temp1);
12104 masm.callWithABI<Fn, InvokeFromInterpreterStub>(
12105 ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
12107 masm.branchIfFalseBool(ReturnReg, masm.failureLabel());
12109 // Discard exit frame and restore frame pointer.
12110 masm.leaveExitFrame(0);
12111 masm.pop(FramePointer);
12113 // InvokeFromInterpreterStub stores the return value in argv[0], where the
12114 // caller stored |this|. Subtract |sizeof(void*)| for the frame pointer we
12115 // just popped.
12116 masm.loadValue(Address(masm.getStackPointer(),
12117 JitFrameLayout::offsetOfThis() - sizeof(void*)),
12118 JSReturnOperand);
12119 masm.ret();
12122 void JitRuntime::generateDoubleToInt32ValueStub(MacroAssembler& masm) {
12123 AutoCreatedBy acb(masm, "JitRuntime::generateDoubleToInt32ValueStub");
12124 doubleToInt32ValueStubOffset_ = startTrampolineCode(masm);
12126 Label done;
12127 masm.branchTestDouble(Assembler::NotEqual, R0, &done);
12129 masm.unboxDouble(R0, FloatReg0);
12130 masm.convertDoubleToInt32(FloatReg0, R1.scratchReg(), &done,
12131 /* negativeZeroCheck = */ false);
12132 masm.tagValue(JSVAL_TYPE_INT32, R1.scratchReg(), R0);
12134 masm.bind(&done);
12135 masm.abiret();
12138 void CodeGenerator::visitLinearizeString(LLinearizeString* lir) {
12139 Register str = ToRegister(lir->str());
12140 Register output = ToRegister(lir->output());
12142 using Fn = JSLinearString* (*)(JSContext*, JSString*);
12143 auto* ool = oolCallVM<Fn, jit::LinearizeForCharAccess>(
12144 lir, ArgList(str), StoreRegisterTo(output));
12146 masm.branchIfRope(str, ool->entry());
12148 masm.movePtr(str, output);
12149 masm.bind(ool->rejoin());
12152 void CodeGenerator::visitLinearizeForCharAccess(LLinearizeForCharAccess* lir) {
12153 Register str = ToRegister(lir->str());
12154 Register index = ToRegister(lir->index());
12155 Register output = ToRegister(lir->output());
12157 using Fn = JSLinearString* (*)(JSContext*, JSString*);
12158 auto* ool = oolCallVM<Fn, jit::LinearizeForCharAccess>(
12159 lir, ArgList(str), StoreRegisterTo(output));
12161 masm.branchIfNotCanLoadStringChar(str, index, output, ool->entry());
12163 masm.movePtr(str, output);
12164 masm.bind(ool->rejoin());
12167 void CodeGenerator::visitLinearizeForCodePointAccess(
12168 LLinearizeForCodePointAccess* lir) {
12169 Register str = ToRegister(lir->str());
12170 Register index = ToRegister(lir->index());
12171 Register output = ToRegister(lir->output());
12172 Register temp = ToRegister(lir->temp0());
12174 using Fn = JSLinearString* (*)(JSContext*, JSString*);
12175 auto* ool = oolCallVM<Fn, jit::LinearizeForCharAccess>(
12176 lir, ArgList(str), StoreRegisterTo(output));
12178 masm.branchIfNotCanLoadStringCodePoint(str, index, output, temp,
12179 ool->entry());
12181 masm.movePtr(str, output);
12182 masm.bind(ool->rejoin());
12185 void CodeGenerator::visitToRelativeStringIndex(LToRelativeStringIndex* lir) {
12186 Register index = ToRegister(lir->index());
12187 Register length = ToRegister(lir->length());
12188 Register output = ToRegister(lir->output());
12190 masm.move32(Imm32(0), output);
12191 masm.cmp32Move32(Assembler::LessThan, index, Imm32(0), length, output);
12192 masm.add32(index, output);
12195 void CodeGenerator::visitCharCodeAt(LCharCodeAt* lir) {
12196 Register str = ToRegister(lir->str());
12197 Register output = ToRegister(lir->output());
12198 Register temp0 = ToRegister(lir->temp0());
12199 Register temp1 = ToRegister(lir->temp1());
12201 using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
12203 if (lir->index()->isBogus()) {
12204 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, Imm32(0)),
12205 StoreRegisterTo(output));
12206 masm.loadStringChar(str, 0, output, temp0, temp1, ool->entry());
12207 masm.bind(ool->rejoin());
12208 } else {
12209 Register index = ToRegister(lir->index());
12211 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, index),
12212 StoreRegisterTo(output));
12213 masm.loadStringChar(str, index, output, temp0, temp1, ool->entry());
12214 masm.bind(ool->rejoin());
12218 void CodeGenerator::visitCharCodeAtOrNegative(LCharCodeAtOrNegative* lir) {
12219 Register str = ToRegister(lir->str());
12220 Register output = ToRegister(lir->output());
12221 Register temp0 = ToRegister(lir->temp0());
12222 Register temp1 = ToRegister(lir->temp1());
12224 using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
12226 // Return -1 for out-of-bounds access.
12227 masm.move32(Imm32(-1), output);
12229 if (lir->index()->isBogus()) {
12230 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, Imm32(0)),
12231 StoreRegisterTo(output));
12233 masm.branch32(Assembler::Equal, Address(str, JSString::offsetOfLength()),
12234 Imm32(0), ool->rejoin());
12235 masm.loadStringChar(str, 0, output, temp0, temp1, ool->entry());
12236 masm.bind(ool->rejoin());
12237 } else {
12238 Register index = ToRegister(lir->index());
12240 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, index),
12241 StoreRegisterTo(output));
12243 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
12244 temp0, ool->rejoin());
12245 masm.loadStringChar(str, index, output, temp0, temp1, ool->entry());
12246 masm.bind(ool->rejoin());
12250 void CodeGenerator::visitCodePointAt(LCodePointAt* lir) {
12251 Register str = ToRegister(lir->str());
12252 Register index = ToRegister(lir->index());
12253 Register output = ToRegister(lir->output());
12254 Register temp0 = ToRegister(lir->temp0());
12255 Register temp1 = ToRegister(lir->temp1());
12257 using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
12258 auto* ool = oolCallVM<Fn, jit::CodePointAt>(lir, ArgList(str, index),
12259 StoreRegisterTo(output));
12261 masm.loadStringCodePoint(str, index, output, temp0, temp1, ool->entry());
12262 masm.bind(ool->rejoin());
12265 void CodeGenerator::visitCodePointAtOrNegative(LCodePointAtOrNegative* lir) {
12266 Register str = ToRegister(lir->str());
12267 Register index = ToRegister(lir->index());
12268 Register output = ToRegister(lir->output());
12269 Register temp0 = ToRegister(lir->temp0());
12270 Register temp1 = ToRegister(lir->temp1());
12272 using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
12273 auto* ool = oolCallVM<Fn, jit::CodePointAt>(lir, ArgList(str, index),
12274 StoreRegisterTo(output));
12276 // Return -1 for out-of-bounds access.
12277 masm.move32(Imm32(-1), output);
12279 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
12280 temp0, ool->rejoin());
12281 masm.loadStringCodePoint(str, index, output, temp0, temp1, ool->entry());
12282 masm.bind(ool->rejoin());
12285 void CodeGenerator::visitNegativeToNaN(LNegativeToNaN* lir) {
12286 Register input = ToRegister(lir->input());
12287 ValueOperand output = ToOutValue(lir);
12289 masm.tagValue(JSVAL_TYPE_INT32, input, output);
12291 Label done;
12292 masm.branchTest32(Assembler::NotSigned, input, input, &done);
12293 masm.moveValue(JS::NaNValue(), output);
12294 masm.bind(&done);
12297 void CodeGenerator::visitNegativeToUndefined(LNegativeToUndefined* lir) {
12298 Register input = ToRegister(lir->input());
12299 ValueOperand output = ToOutValue(lir);
12301 masm.tagValue(JSVAL_TYPE_INT32, input, output);
12303 Label done;
12304 masm.branchTest32(Assembler::NotSigned, input, input, &done);
12305 masm.moveValue(JS::UndefinedValue(), output);
12306 masm.bind(&done);
12309 void CodeGenerator::visitFromCharCode(LFromCharCode* lir) {
12310 Register code = ToRegister(lir->code());
12311 Register output = ToRegister(lir->output());
12313 using Fn = JSLinearString* (*)(JSContext*, int32_t);
12314 auto* ool = oolCallVM<Fn, js::StringFromCharCode>(lir, ArgList(code),
12315 StoreRegisterTo(output));
12317 // OOL path if code >= UNIT_STATIC_LIMIT.
12318 masm.lookupStaticString(code, output, gen->runtime->staticStrings(),
12319 ool->entry());
12321 masm.bind(ool->rejoin());
12324 void CodeGenerator::visitFromCharCodeEmptyIfNegative(
12325 LFromCharCodeEmptyIfNegative* lir) {
12326 Register code = ToRegister(lir->code());
12327 Register output = ToRegister(lir->output());
12329 using Fn = JSLinearString* (*)(JSContext*, int32_t);
12330 auto* ool = oolCallVM<Fn, js::StringFromCharCode>(lir, ArgList(code),
12331 StoreRegisterTo(output));
12333 // Return the empty string for negative inputs.
12334 const JSAtomState& names = gen->runtime->names();
12335 masm.movePtr(ImmGCPtr(names.empty_), output);
12336 masm.branchTest32(Assembler::Signed, code, code, ool->rejoin());
12338 // OOL path if code >= UNIT_STATIC_LIMIT.
12339 masm.lookupStaticString(code, output, gen->runtime->staticStrings(),
12340 ool->entry());
12342 masm.bind(ool->rejoin());
12345 void CodeGenerator::visitFromCharCodeUndefinedIfNegative(
12346 LFromCharCodeUndefinedIfNegative* lir) {
12347 Register code = ToRegister(lir->code());
12348 ValueOperand output = ToOutValue(lir);
12349 Register temp = output.scratchReg();
12351 using Fn = JSLinearString* (*)(JSContext*, int32_t);
12352 auto* ool = oolCallVM<Fn, js::StringFromCharCode>(lir, ArgList(code),
12353 StoreRegisterTo(temp));
12355 // Return |undefined| for negative inputs.
12356 Label done;
12357 masm.moveValue(UndefinedValue(), output);
12358 masm.branchTest32(Assembler::Signed, code, code, &done);
12360 // OOL path if code >= UNIT_STATIC_LIMIT.
12361 masm.lookupStaticString(code, temp, gen->runtime->staticStrings(),
12362 ool->entry());
12364 masm.bind(ool->rejoin());
12365 masm.tagValue(JSVAL_TYPE_STRING, temp, output);
12367 masm.bind(&done);
12370 void CodeGenerator::visitFromCodePoint(LFromCodePoint* lir) {
12371 Register codePoint = ToRegister(lir->codePoint());
12372 Register output = ToRegister(lir->output());
12373 Register temp0 = ToRegister(lir->temp0());
12374 Register temp1 = ToRegister(lir->temp1());
12375 LSnapshot* snapshot = lir->snapshot();
12377 // The OOL path is only taken when we can't allocate the inline string.
12378 using Fn = JSLinearString* (*)(JSContext*, char32_t);
12379 auto* ool = oolCallVM<Fn, js::StringFromCodePoint>(lir, ArgList(codePoint),
12380 StoreRegisterTo(output));
12382 Label isTwoByte;
12383 Label* done = ool->rejoin();
12385 static_assert(
12386 StaticStrings::UNIT_STATIC_LIMIT - 1 == JSString::MAX_LATIN1_CHAR,
12387 "Latin-1 strings can be loaded from static strings");
12390 masm.lookupStaticString(codePoint, output, gen->runtime->staticStrings(),
12391 &isTwoByte);
12392 masm.jump(done);
12394 masm.bind(&isTwoByte);
12396 // Use a bailout if the input is not a valid code point, because
12397 // MFromCodePoint is movable and it'd be observable when a moved
12398 // fromCodePoint throws an exception before its actual call site.
12399 bailoutCmp32(Assembler::Above, codePoint, Imm32(unicode::NonBMPMax),
12400 snapshot);
12402 // Allocate a JSThinInlineString.
12404 static_assert(JSThinInlineString::MAX_LENGTH_TWO_BYTE >= 2,
12405 "JSThinInlineString can hold a supplementary code point");
12407 uint32_t flags = JSString::INIT_THIN_INLINE_FLAGS;
12408 masm.newGCString(output, temp0, gen->initialStringHeap(), ool->entry());
12409 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
12412 Label isSupplementary;
12413 masm.branch32(Assembler::AboveOrEqual, codePoint, Imm32(unicode::NonBMPMin),
12414 &isSupplementary);
12416 // Store length.
12417 masm.store32(Imm32(1), Address(output, JSString::offsetOfLength()));
12419 // Load chars pointer in temp0.
12420 masm.loadInlineStringCharsForStore(output, temp0);
12422 masm.store16(codePoint, Address(temp0, 0));
12424 masm.jump(done);
12426 masm.bind(&isSupplementary);
12428 // Store length.
12429 masm.store32(Imm32(2), Address(output, JSString::offsetOfLength()));
12431 // Load chars pointer in temp0.
12432 masm.loadInlineStringCharsForStore(output, temp0);
12434 // Inlined unicode::LeadSurrogate(uint32_t).
12435 masm.move32(codePoint, temp1);
12436 masm.rshift32(Imm32(10), temp1);
12437 masm.add32(Imm32(unicode::LeadSurrogateMin - (unicode::NonBMPMin >> 10)),
12438 temp1);
12440 masm.store16(temp1, Address(temp0, 0));
12442 // Inlined unicode::TrailSurrogate(uint32_t).
12443 masm.move32(codePoint, temp1);
12444 masm.and32(Imm32(0x3FF), temp1);
12445 masm.or32(Imm32(unicode::TrailSurrogateMin), temp1);
12447 masm.store16(temp1, Address(temp0, sizeof(char16_t)));
12451 masm.bind(done);
12454 void CodeGenerator::visitStringIncludes(LStringIncludes* lir) {
12455 pushArg(ToRegister(lir->searchString()));
12456 pushArg(ToRegister(lir->string()));
12458 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
12459 callVM<Fn, js::StringIncludes>(lir);
12462 template <typename LIns>
12463 static void CallStringMatch(MacroAssembler& masm, LIns* lir, OutOfLineCode* ool,
12464 LiveRegisterSet volatileRegs) {
12465 Register string = ToRegister(lir->string());
12466 Register output = ToRegister(lir->output());
12467 Register tempLength = ToRegister(lir->temp0());
12468 Register tempChars = ToRegister(lir->temp1());
12469 Register maybeTempPat = ToTempRegisterOrInvalid(lir->temp2());
12471 const JSLinearString* searchString = lir->searchString();
12472 size_t length = searchString->length();
12473 MOZ_ASSERT(length == 1 || length == 2);
12475 // The additional temp register is only needed when searching for two
12476 // pattern characters.
12477 MOZ_ASSERT_IF(length == 2, maybeTempPat != InvalidReg);
12479 if constexpr (std::is_same_v<LIns, LStringIncludesSIMD>) {
12480 masm.move32(Imm32(0), output);
12481 } else {
12482 masm.move32(Imm32(-1), output);
12485 masm.loadStringLength(string, tempLength);
12487 // Can't be a substring when the string is smaller than the search string.
12488 Label done;
12489 masm.branch32(Assembler::Below, tempLength, Imm32(length), ool->rejoin());
12491 bool searchStringIsPureTwoByte = false;
12492 if (searchString->hasTwoByteChars()) {
12493 JS::AutoCheckCannotGC nogc;
12494 searchStringIsPureTwoByte =
12495 !mozilla::IsUtf16Latin1(searchString->twoByteRange(nogc));
12498 // Pure two-byte strings can't occur in a Latin-1 string.
12499 if (searchStringIsPureTwoByte) {
12500 masm.branchLatin1String(string, ool->rejoin());
12503 // Slow path when we need to linearize the string.
12504 masm.branchIfRope(string, ool->entry());
12506 Label restoreVolatile;
12508 auto callMatcher = [&](CharEncoding encoding) {
12509 masm.loadStringChars(string, tempChars, encoding);
12511 LiveGeneralRegisterSet liveRegs;
12512 if constexpr (std::is_same_v<LIns, LStringIndexOfSIMD>) {
12513 // Save |tempChars| to compute the result index.
12514 liveRegs.add(tempChars);
12516 #ifdef DEBUG
12517 // Save |tempLength| in debug-mode for assertions.
12518 liveRegs.add(tempLength);
12519 #endif
12521 // Exclude non-volatile registers.
12522 liveRegs.set() = GeneralRegisterSet::Intersect(
12523 liveRegs.set(), GeneralRegisterSet::Volatile());
12525 masm.PushRegsInMask(liveRegs);
12528 if (length == 1) {
12529 char16_t pat = searchString->latin1OrTwoByteChar(0);
12530 MOZ_ASSERT_IF(encoding == CharEncoding::Latin1,
12531 pat <= JSString::MAX_LATIN1_CHAR);
12533 masm.move32(Imm32(pat), output);
12535 masm.setupAlignedABICall();
12536 masm.passABIArg(tempChars);
12537 masm.passABIArg(output);
12538 masm.passABIArg(tempLength);
12539 if (encoding == CharEncoding::Latin1) {
12540 using Fn = const char* (*)(const char*, char, size_t);
12541 masm.callWithABI<Fn, mozilla::SIMD::memchr8>(
12542 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
12543 } else {
12544 using Fn = const char16_t* (*)(const char16_t*, char16_t, size_t);
12545 masm.callWithABI<Fn, mozilla::SIMD::memchr16>(
12546 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
12548 } else {
12549 char16_t pat0 = searchString->latin1OrTwoByteChar(0);
12550 MOZ_ASSERT_IF(encoding == CharEncoding::Latin1,
12551 pat0 <= JSString::MAX_LATIN1_CHAR);
12553 char16_t pat1 = searchString->latin1OrTwoByteChar(1);
12554 MOZ_ASSERT_IF(encoding == CharEncoding::Latin1,
12555 pat1 <= JSString::MAX_LATIN1_CHAR);
12557 masm.move32(Imm32(pat0), output);
12558 masm.move32(Imm32(pat1), maybeTempPat);
12560 masm.setupAlignedABICall();
12561 masm.passABIArg(tempChars);
12562 masm.passABIArg(output);
12563 masm.passABIArg(maybeTempPat);
12564 masm.passABIArg(tempLength);
12565 if (encoding == CharEncoding::Latin1) {
12566 using Fn = const char* (*)(const char*, char, char, size_t);
12567 masm.callWithABI<Fn, mozilla::SIMD::memchr2x8>(
12568 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
12569 } else {
12570 using Fn =
12571 const char16_t* (*)(const char16_t*, char16_t, char16_t, size_t);
12572 masm.callWithABI<Fn, mozilla::SIMD::memchr2x16>(
12573 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
12577 masm.storeCallPointerResult(output);
12579 // Convert to string index for `indexOf`.
12580 if constexpr (std::is_same_v<LIns, LStringIndexOfSIMD>) {
12581 // Restore |tempChars|. (And in debug mode |tempLength|.)
12582 masm.PopRegsInMask(liveRegs);
12584 Label found;
12585 masm.branchPtr(Assembler::NotEqual, output, ImmPtr(nullptr), &found);
12587 masm.move32(Imm32(-1), output);
12588 masm.jump(&restoreVolatile);
12590 masm.bind(&found);
12592 #ifdef DEBUG
12593 // Check lower bound.
12594 Label lower;
12595 masm.branchPtr(Assembler::AboveOrEqual, output, tempChars, &lower);
12596 masm.assumeUnreachable("result pointer below string chars");
12597 masm.bind(&lower);
12599 // Compute the end position of the characters.
12600 auto scale = encoding == CharEncoding::Latin1 ? TimesOne : TimesTwo;
12601 masm.computeEffectiveAddress(BaseIndex(tempChars, tempLength, scale),
12602 tempLength);
12604 // Check upper bound.
12605 Label upper;
12606 masm.branchPtr(Assembler::Below, output, tempLength, &upper);
12607 masm.assumeUnreachable("result pointer above string chars");
12608 masm.bind(&upper);
12609 #endif
12611 masm.subPtr(tempChars, output);
12613 if (encoding == CharEncoding::TwoByte) {
12614 masm.rshiftPtr(Imm32(1), output);
12619 volatileRegs.takeUnchecked(output);
12620 volatileRegs.takeUnchecked(tempLength);
12621 volatileRegs.takeUnchecked(tempChars);
12622 if (maybeTempPat != InvalidReg) {
12623 volatileRegs.takeUnchecked(maybeTempPat);
12625 masm.PushRegsInMask(volatileRegs);
12627 // Handle the case when the input is a Latin-1 string.
12628 if (!searchStringIsPureTwoByte) {
12629 Label twoByte;
12630 masm.branchTwoByteString(string, &twoByte);
12632 callMatcher(CharEncoding::Latin1);
12633 masm.jump(&restoreVolatile);
12635 masm.bind(&twoByte);
12638 // Handle the case when the input is a two-byte string.
12639 callMatcher(CharEncoding::TwoByte);
12641 masm.bind(&restoreVolatile);
12642 masm.PopRegsInMask(volatileRegs);
12644 // Convert to bool for `includes`.
12645 if constexpr (std::is_same_v<LIns, LStringIncludesSIMD>) {
12646 masm.cmpPtrSet(Assembler::NotEqual, output, ImmPtr(nullptr), output);
12649 masm.bind(ool->rejoin());
12652 void CodeGenerator::visitStringIncludesSIMD(LStringIncludesSIMD* lir) {
12653 Register string = ToRegister(lir->string());
12654 Register output = ToRegister(lir->output());
12655 const JSLinearString* searchString = lir->searchString();
12657 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
12658 auto* ool = oolCallVM<Fn, js::StringIncludes>(
12659 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
12661 CallStringMatch(masm, lir, ool, liveVolatileRegs(lir));
12664 void CodeGenerator::visitStringIndexOf(LStringIndexOf* lir) {
12665 pushArg(ToRegister(lir->searchString()));
12666 pushArg(ToRegister(lir->string()));
12668 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
12669 callVM<Fn, js::StringIndexOf>(lir);
12672 void CodeGenerator::visitStringIndexOfSIMD(LStringIndexOfSIMD* lir) {
12673 Register string = ToRegister(lir->string());
12674 Register output = ToRegister(lir->output());
12675 const JSLinearString* searchString = lir->searchString();
12677 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
12678 auto* ool = oolCallVM<Fn, js::StringIndexOf>(
12679 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
12681 CallStringMatch(masm, lir, ool, liveVolatileRegs(lir));
12684 void CodeGenerator::visitStringLastIndexOf(LStringLastIndexOf* lir) {
12685 pushArg(ToRegister(lir->searchString()));
12686 pushArg(ToRegister(lir->string()));
12688 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
12689 callVM<Fn, js::StringLastIndexOf>(lir);
12692 void CodeGenerator::visitStringStartsWith(LStringStartsWith* lir) {
12693 pushArg(ToRegister(lir->searchString()));
12694 pushArg(ToRegister(lir->string()));
12696 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
12697 callVM<Fn, js::StringStartsWith>(lir);
12700 void CodeGenerator::visitStringStartsWithInline(LStringStartsWithInline* lir) {
12701 Register string = ToRegister(lir->string());
12702 Register output = ToRegister(lir->output());
12703 Register temp = ToRegister(lir->temp0());
12705 const JSLinearString* searchString = lir->searchString();
12707 size_t length = searchString->length();
12708 MOZ_ASSERT(length > 0);
12710 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
12711 auto* ool = oolCallVM<Fn, js::StringStartsWith>(
12712 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
12714 masm.move32(Imm32(0), output);
12716 // Can't be a prefix when the string is smaller than the search string.
12717 masm.branch32(Assembler::Below, Address(string, JSString::offsetOfLength()),
12718 Imm32(length), ool->rejoin());
12720 // Unwind ropes at the start if possible.
12721 Label compare;
12722 masm.movePtr(string, temp);
12723 masm.branchIfNotRope(temp, &compare);
12725 Label unwindRope;
12726 masm.bind(&unwindRope);
12727 masm.loadRopeLeftChild(temp, output);
12728 masm.movePtr(output, temp);
12730 // If the left child is smaller than the search string, jump into the VM to
12731 // linearize the string.
12732 masm.branch32(Assembler::Below, Address(temp, JSString::offsetOfLength()),
12733 Imm32(length), ool->entry());
12735 // Otherwise keep unwinding ropes.
12736 masm.branchIfRope(temp, &unwindRope);
12738 masm.bind(&compare);
12740 // If operands point to the same instance, it's trivially a prefix.
12741 Label notPointerEqual;
12742 masm.branchPtr(Assembler::NotEqual, temp, ImmGCPtr(searchString),
12743 &notPointerEqual);
12744 masm.move32(Imm32(1), output);
12745 masm.jump(ool->rejoin());
12746 masm.bind(&notPointerEqual);
12748 if (searchString->hasTwoByteChars()) {
12749 // Pure two-byte strings can't be a prefix of Latin-1 strings.
12750 JS::AutoCheckCannotGC nogc;
12751 if (!mozilla::IsUtf16Latin1(searchString->twoByteRange(nogc))) {
12752 Label compareChars;
12753 masm.branchTwoByteString(temp, &compareChars);
12754 masm.move32(Imm32(0), output);
12755 masm.jump(ool->rejoin());
12756 masm.bind(&compareChars);
12760 // Load the input string's characters.
12761 Register stringChars = output;
12762 masm.loadStringCharsForCompare(temp, searchString, stringChars, ool->entry());
12764 // Start comparing character by character.
12765 masm.compareStringChars(JSOp::Eq, stringChars, searchString, output);
12767 masm.bind(ool->rejoin());
12770 void CodeGenerator::visitStringEndsWith(LStringEndsWith* lir) {
12771 pushArg(ToRegister(lir->searchString()));
12772 pushArg(ToRegister(lir->string()));
12774 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
12775 callVM<Fn, js::StringEndsWith>(lir);
12778 void CodeGenerator::visitStringEndsWithInline(LStringEndsWithInline* lir) {
12779 Register string = ToRegister(lir->string());
12780 Register output = ToRegister(lir->output());
12781 Register temp = ToRegister(lir->temp0());
12783 const JSLinearString* searchString = lir->searchString();
12785 size_t length = searchString->length();
12786 MOZ_ASSERT(length > 0);
12788 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
12789 auto* ool = oolCallVM<Fn, js::StringEndsWith>(
12790 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
12792 masm.move32(Imm32(0), output);
12794 // Can't be a suffix when the string is smaller than the search string.
12795 masm.branch32(Assembler::Below, Address(string, JSString::offsetOfLength()),
12796 Imm32(length), ool->rejoin());
12798 // Unwind ropes at the end if possible.
12799 Label compare;
12800 masm.movePtr(string, temp);
12801 masm.branchIfNotRope(temp, &compare);
12803 Label unwindRope;
12804 masm.bind(&unwindRope);
12805 masm.loadRopeRightChild(temp, output);
12806 masm.movePtr(output, temp);
12808 // If the right child is smaller than the search string, jump into the VM to
12809 // linearize the string.
12810 masm.branch32(Assembler::Below, Address(temp, JSString::offsetOfLength()),
12811 Imm32(length), ool->entry());
12813 // Otherwise keep unwinding ropes.
12814 masm.branchIfRope(temp, &unwindRope);
12816 masm.bind(&compare);
12818 // If operands point to the same instance, it's trivially a suffix.
12819 Label notPointerEqual;
12820 masm.branchPtr(Assembler::NotEqual, temp, ImmGCPtr(searchString),
12821 &notPointerEqual);
12822 masm.move32(Imm32(1), output);
12823 masm.jump(ool->rejoin());
12824 masm.bind(&notPointerEqual);
12826 CharEncoding encoding = searchString->hasLatin1Chars()
12827 ? CharEncoding::Latin1
12828 : CharEncoding::TwoByte;
12829 if (encoding == CharEncoding::TwoByte) {
12830 // Pure two-byte strings can't be a suffix of Latin-1 strings.
12831 JS::AutoCheckCannotGC nogc;
12832 if (!mozilla::IsUtf16Latin1(searchString->twoByteRange(nogc))) {
12833 Label compareChars;
12834 masm.branchTwoByteString(temp, &compareChars);
12835 masm.move32(Imm32(0), output);
12836 masm.jump(ool->rejoin());
12837 masm.bind(&compareChars);
12841 // Load the input string's characters.
12842 Register stringChars = output;
12843 masm.loadStringCharsForCompare(temp, searchString, stringChars, ool->entry());
12845 // Move string-char pointer to the suffix string.
12846 masm.loadStringLength(temp, temp);
12847 masm.sub32(Imm32(length), temp);
12848 masm.addToCharPtr(stringChars, temp, encoding);
12850 // Start comparing character by character.
12851 masm.compareStringChars(JSOp::Eq, stringChars, searchString, output);
12853 masm.bind(ool->rejoin());
12856 void CodeGenerator::visitStringToLowerCase(LStringToLowerCase* lir) {
12857 Register string = ToRegister(lir->string());
12858 Register output = ToRegister(lir->output());
12859 Register temp0 = ToRegister(lir->temp0());
12860 Register temp1 = ToRegister(lir->temp1());
12861 Register temp2 = ToRegister(lir->temp2());
12863 // On x86 there are not enough registers. In that case reuse the string
12864 // register as a temporary.
12865 Register temp3 =
12866 lir->temp3()->isBogusTemp() ? string : ToRegister(lir->temp3());
12867 Register temp4 = ToRegister(lir->temp4());
12869 using Fn = JSString* (*)(JSContext*, HandleString);
12870 OutOfLineCode* ool = oolCallVM<Fn, js::StringToLowerCase>(
12871 lir, ArgList(string), StoreRegisterTo(output));
12873 // Take the slow path if the string isn't a linear Latin-1 string.
12874 Imm32 linearLatin1Bits(JSString::LINEAR_BIT | JSString::LATIN1_CHARS_BIT);
12875 Register flags = temp0;
12876 masm.load32(Address(string, JSString::offsetOfFlags()), flags);
12877 masm.and32(linearLatin1Bits, flags);
12878 masm.branch32(Assembler::NotEqual, flags, linearLatin1Bits, ool->entry());
12880 Register length = temp0;
12881 masm.loadStringLength(string, length);
12883 // Return the input if it's the empty string.
12884 Label notEmptyString;
12885 masm.branch32(Assembler::NotEqual, length, Imm32(0), &notEmptyString);
12887 masm.movePtr(string, output);
12888 masm.jump(ool->rejoin());
12890 masm.bind(&notEmptyString);
12892 Register inputChars = temp1;
12893 masm.loadStringChars(string, inputChars, CharEncoding::Latin1);
12895 Register toLowerCaseTable = temp2;
12896 masm.movePtr(ImmPtr(unicode::latin1ToLowerCaseTable), toLowerCaseTable);
12898 // Single element strings can be directly retrieved from static strings cache.
12899 Label notSingleElementString;
12900 masm.branch32(Assembler::NotEqual, length, Imm32(1), &notSingleElementString);
12902 Register current = temp4;
12904 masm.loadChar(Address(inputChars, 0), current, CharEncoding::Latin1);
12905 masm.load8ZeroExtend(BaseIndex(toLowerCaseTable, current, TimesOne),
12906 current);
12907 masm.lookupStaticString(current, output, gen->runtime->staticStrings());
12909 masm.jump(ool->rejoin());
12911 masm.bind(&notSingleElementString);
12913 // Use the OOL-path when the string is too long. This prevents scanning long
12914 // strings which have upper case characters only near the end a second time in
12915 // the VM.
12916 constexpr int32_t MaxInlineLength = 64;
12917 masm.branch32(Assembler::Above, length, Imm32(MaxInlineLength), ool->entry());
12920 // Check if there are any characters which need to be converted.
12922 // This extra loop gives a small performance improvement for strings which
12923 // are already lower cased and lets us avoid calling into the runtime for
12924 // non-inline, all lower case strings. But more importantly it avoids
12925 // repeated inline allocation failures:
12926 // |AllocateThinOrFatInlineString| below takes the OOL-path and calls the
12927 // |js::StringToLowerCase| runtime function when the result string can't be
12928 // allocated inline. And |js::StringToLowerCase| directly returns the input
12929 // string when no characters need to be converted. That means it won't
12930 // trigger GC to clear up the free nursery space, so the next toLowerCase()
12931 // call will again fail to inline allocate the result string.
12932 Label hasUpper;
12934 Register checkInputChars = output;
12935 masm.movePtr(inputChars, checkInputChars);
12937 Register current = temp4;
12939 Label start;
12940 masm.bind(&start);
12941 masm.loadChar(Address(checkInputChars, 0), current, CharEncoding::Latin1);
12942 masm.branch8(Assembler::NotEqual,
12943 BaseIndex(toLowerCaseTable, current, TimesOne), current,
12944 &hasUpper);
12945 masm.addPtr(Imm32(sizeof(Latin1Char)), checkInputChars);
12946 masm.branchSub32(Assembler::NonZero, Imm32(1), length, &start);
12948 // Input is already in lower case.
12949 masm.movePtr(string, output);
12950 masm.jump(ool->rejoin());
12952 masm.bind(&hasUpper);
12954 // |length| was clobbered above, reload.
12955 masm.loadStringLength(string, length);
12957 // Call into the runtime when we can't create an inline string.
12958 masm.branch32(Assembler::Above, length,
12959 Imm32(JSFatInlineString::MAX_LENGTH_LATIN1), ool->entry());
12961 AllocateThinOrFatInlineString(masm, output, length, temp4,
12962 initialStringHeap(), ool->entry(),
12963 CharEncoding::Latin1);
12965 if (temp3 == string) {
12966 masm.push(string);
12969 Register outputChars = temp3;
12970 masm.loadInlineStringCharsForStore(output, outputChars);
12973 Register current = temp4;
12975 Label start;
12976 masm.bind(&start);
12977 masm.loadChar(Address(inputChars, 0), current, CharEncoding::Latin1);
12978 masm.load8ZeroExtend(BaseIndex(toLowerCaseTable, current, TimesOne),
12979 current);
12980 masm.storeChar(current, Address(outputChars, 0), CharEncoding::Latin1);
12981 masm.addPtr(Imm32(sizeof(Latin1Char)), inputChars);
12982 masm.addPtr(Imm32(sizeof(Latin1Char)), outputChars);
12983 masm.branchSub32(Assembler::NonZero, Imm32(1), length, &start);
12986 if (temp3 == string) {
12987 masm.pop(string);
12991 masm.bind(ool->rejoin());
12994 void CodeGenerator::visitStringToUpperCase(LStringToUpperCase* lir) {
12995 pushArg(ToRegister(lir->string()));
12997 using Fn = JSString* (*)(JSContext*, HandleString);
12998 callVM<Fn, js::StringToUpperCase>(lir);
13001 void CodeGenerator::visitCharCodeToLowerCase(LCharCodeToLowerCase* lir) {
13002 Register code = ToRegister(lir->code());
13003 Register output = ToRegister(lir->output());
13004 Register temp = ToRegister(lir->temp0());
13006 using Fn = JSString* (*)(JSContext*, int32_t);
13007 auto* ool = oolCallVM<Fn, jit::CharCodeToLowerCase>(lir, ArgList(code),
13008 StoreRegisterTo(output));
13010 constexpr char16_t NonLatin1Min = char16_t(JSString::MAX_LATIN1_CHAR) + 1;
13012 // OOL path if code >= NonLatin1Min.
13013 masm.boundsCheck32PowerOfTwo(code, NonLatin1Min, ool->entry());
13015 // Convert to lower case.
13016 masm.movePtr(ImmPtr(unicode::latin1ToLowerCaseTable), temp);
13017 masm.load8ZeroExtend(BaseIndex(temp, code, TimesOne), temp);
13019 // Load static string for lower case character.
13020 masm.lookupStaticString(temp, output, gen->runtime->staticStrings());
13022 masm.bind(ool->rejoin());
13025 void CodeGenerator::visitCharCodeToUpperCase(LCharCodeToUpperCase* lir) {
13026 Register code = ToRegister(lir->code());
13027 Register output = ToRegister(lir->output());
13028 Register temp = ToRegister(lir->temp0());
13030 using Fn = JSString* (*)(JSContext*, int32_t);
13031 auto* ool = oolCallVM<Fn, jit::CharCodeToUpperCase>(lir, ArgList(code),
13032 StoreRegisterTo(output));
13034 constexpr char16_t NonLatin1Min = char16_t(JSString::MAX_LATIN1_CHAR) + 1;
13036 // OOL path if code >= NonLatin1Min.
13037 masm.boundsCheck32PowerOfTwo(code, NonLatin1Min, ool->entry());
13039 // Most one element Latin-1 strings can be directly retrieved from the
13040 // static strings cache, except the following three characters:
13042 // 1. ToUpper(U+00B5) = 0+039C
13043 // 2. ToUpper(U+00FF) = 0+0178
13044 // 3. ToUpper(U+00DF) = 0+0053 0+0053
13045 masm.branch32(Assembler::Equal, code, Imm32(unicode::MICRO_SIGN),
13046 ool->entry());
13047 masm.branch32(Assembler::Equal, code,
13048 Imm32(unicode::LATIN_SMALL_LETTER_Y_WITH_DIAERESIS),
13049 ool->entry());
13050 masm.branch32(Assembler::Equal, code,
13051 Imm32(unicode::LATIN_SMALL_LETTER_SHARP_S), ool->entry());
13053 // Inline unicode::ToUpperCase (without the special case for ASCII characters)
13055 constexpr size_t shift = unicode::CharInfoShift;
13057 // code >> shift
13058 masm.move32(code, temp);
13059 masm.rshift32(Imm32(shift), temp);
13061 // index = index1[code >> shift];
13062 masm.movePtr(ImmPtr(unicode::index1), output);
13063 masm.load8ZeroExtend(BaseIndex(output, temp, TimesOne), temp);
13065 // (code & ((1 << shift) - 1)
13066 masm.move32(code, output);
13067 masm.and32(Imm32((1 << shift) - 1), output);
13069 // (index << shift) + (code & ((1 << shift) - 1))
13070 masm.lshift32(Imm32(shift), temp);
13071 masm.add32(output, temp);
13073 // index = index2[(index << shift) + (code & ((1 << shift) - 1))]
13074 masm.movePtr(ImmPtr(unicode::index2), output);
13075 masm.load8ZeroExtend(BaseIndex(output, temp, TimesOne), temp);
13077 // Compute |index * 6| through |(index * 3) * TimesTwo|.
13078 static_assert(sizeof(unicode::CharacterInfo) == 6);
13079 masm.mulBy3(temp, temp);
13081 // upperCase = js_charinfo[index].upperCase
13082 masm.movePtr(ImmPtr(unicode::js_charinfo), output);
13083 masm.load16ZeroExtend(BaseIndex(output, temp, TimesTwo,
13084 offsetof(unicode::CharacterInfo, upperCase)),
13085 temp);
13087 // uint16_t(ch) + upperCase
13088 masm.add32(code, temp);
13090 // Clear any high bits added when performing the unsigned 16-bit addition
13091 // through a signed 32-bit addition.
13092 masm.move8ZeroExtend(temp, temp);
13094 // Load static string for upper case character.
13095 masm.lookupStaticString(temp, output, gen->runtime->staticStrings());
13097 masm.bind(ool->rejoin());
13100 void CodeGenerator::visitStringTrimStartIndex(LStringTrimStartIndex* lir) {
13101 Register string = ToRegister(lir->string());
13102 Register output = ToRegister(lir->output());
13104 auto volatileRegs = liveVolatileRegs(lir);
13105 volatileRegs.takeUnchecked(output);
13107 masm.PushRegsInMask(volatileRegs);
13109 using Fn = int32_t (*)(const JSString*);
13110 masm.setupAlignedABICall();
13111 masm.passABIArg(string);
13112 masm.callWithABI<Fn, jit::StringTrimStartIndex>();
13113 masm.storeCallInt32Result(output);
13115 masm.PopRegsInMask(volatileRegs);
13118 void CodeGenerator::visitStringTrimEndIndex(LStringTrimEndIndex* lir) {
13119 Register string = ToRegister(lir->string());
13120 Register start = ToRegister(lir->start());
13121 Register output = ToRegister(lir->output());
13123 auto volatileRegs = liveVolatileRegs(lir);
13124 volatileRegs.takeUnchecked(output);
13126 masm.PushRegsInMask(volatileRegs);
13128 using Fn = int32_t (*)(const JSString*, int32_t);
13129 masm.setupAlignedABICall();
13130 masm.passABIArg(string);
13131 masm.passABIArg(start);
13132 masm.callWithABI<Fn, jit::StringTrimEndIndex>();
13133 masm.storeCallInt32Result(output);
13135 masm.PopRegsInMask(volatileRegs);
13138 void CodeGenerator::visitStringSplit(LStringSplit* lir) {
13139 pushArg(Imm32(INT32_MAX));
13140 pushArg(ToRegister(lir->separator()));
13141 pushArg(ToRegister(lir->string()));
13143 using Fn = ArrayObject* (*)(JSContext*, HandleString, HandleString, uint32_t);
13144 callVM<Fn, js::StringSplitString>(lir);
13147 void CodeGenerator::visitInitializedLength(LInitializedLength* lir) {
13148 Address initLength(ToRegister(lir->elements()),
13149 ObjectElements::offsetOfInitializedLength());
13150 masm.load32(initLength, ToRegister(lir->output()));
13153 void CodeGenerator::visitSetInitializedLength(LSetInitializedLength* lir) {
13154 Address initLength(ToRegister(lir->elements()),
13155 ObjectElements::offsetOfInitializedLength());
13156 SetLengthFromIndex(masm, lir->index(), initLength);
13159 void CodeGenerator::visitNotBI(LNotBI* lir) {
13160 Register input = ToRegister(lir->input());
13161 Register output = ToRegister(lir->output());
13163 masm.cmp32Set(Assembler::Equal, Address(input, BigInt::offsetOfLength()),
13164 Imm32(0), output);
13167 void CodeGenerator::visitNotO(LNotO* lir) {
13168 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
13169 addOutOfLineCode(ool, lir->mir());
13171 Label* ifEmulatesUndefined = ool->label1();
13172 Label* ifDoesntEmulateUndefined = ool->label2();
13174 Register objreg = ToRegister(lir->input());
13175 Register output = ToRegister(lir->output());
13176 branchTestObjectEmulatesUndefined(objreg, ifEmulatesUndefined,
13177 ifDoesntEmulateUndefined, output, ool);
13178 // fall through
13180 Label join;
13182 masm.move32(Imm32(0), output);
13183 masm.jump(&join);
13185 masm.bind(ifEmulatesUndefined);
13186 masm.move32(Imm32(1), output);
13188 masm.bind(&join);
13191 void CodeGenerator::visitNotV(LNotV* lir) {
13192 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
13193 addOutOfLineCode(ool, lir->mir());
13195 Label* ifTruthy = ool->label1();
13196 Label* ifFalsy = ool->label2();
13198 ValueOperand input = ToValue(lir, LNotV::InputIndex);
13199 Register tempToUnbox = ToTempUnboxRegister(lir->temp1());
13200 FloatRegister floatTemp = ToFloatRegister(lir->temp0());
13201 Register output = ToRegister(lir->output());
13202 const TypeDataList& observedTypes = lir->mir()->observedTypes();
13204 testValueTruthy(input, tempToUnbox, output, floatTemp, observedTypes,
13205 ifTruthy, ifFalsy, ool);
13207 Label join;
13209 // Note that the testValueTruthy call above may choose to fall through
13210 // to ifTruthy instead of branching there.
13211 masm.bind(ifTruthy);
13212 masm.move32(Imm32(0), output);
13213 masm.jump(&join);
13215 masm.bind(ifFalsy);
13216 masm.move32(Imm32(1), output);
13218 // both branches meet here.
13219 masm.bind(&join);
13222 void CodeGenerator::visitBoundsCheck(LBoundsCheck* lir) {
13223 const LAllocation* index = lir->index();
13224 const LAllocation* length = lir->length();
13225 LSnapshot* snapshot = lir->snapshot();
13227 MIRType type = lir->mir()->type();
13229 auto bailoutCmp = [&](Assembler::Condition cond, auto lhs, auto rhs) {
13230 if (type == MIRType::Int32) {
13231 bailoutCmp32(cond, lhs, rhs, snapshot);
13232 } else {
13233 MOZ_ASSERT(type == MIRType::IntPtr);
13234 bailoutCmpPtr(cond, lhs, rhs, snapshot);
13238 auto bailoutCmpConstant = [&](Assembler::Condition cond, auto lhs,
13239 int32_t rhs) {
13240 if (type == MIRType::Int32) {
13241 bailoutCmp32(cond, lhs, Imm32(rhs), snapshot);
13242 } else {
13243 MOZ_ASSERT(type == MIRType::IntPtr);
13244 bailoutCmpPtr(cond, lhs, ImmWord(rhs), snapshot);
13248 if (index->isConstant()) {
13249 // Use uint32 so that the comparison is unsigned.
13250 uint32_t idx = ToInt32(index);
13251 if (length->isConstant()) {
13252 uint32_t len = ToInt32(lir->length());
13253 if (idx < len) {
13254 return;
13256 bailout(snapshot);
13257 return;
13260 if (length->isRegister()) {
13261 bailoutCmpConstant(Assembler::BelowOrEqual, ToRegister(length), idx);
13262 } else {
13263 bailoutCmpConstant(Assembler::BelowOrEqual, ToAddress(length), idx);
13265 return;
13268 Register indexReg = ToRegister(index);
13269 if (length->isConstant()) {
13270 bailoutCmpConstant(Assembler::AboveOrEqual, indexReg, ToInt32(length));
13271 } else if (length->isRegister()) {
13272 bailoutCmp(Assembler::BelowOrEqual, ToRegister(length), indexReg);
13273 } else {
13274 bailoutCmp(Assembler::BelowOrEqual, ToAddress(length), indexReg);
13278 void CodeGenerator::visitBoundsCheckRange(LBoundsCheckRange* lir) {
13279 int32_t min = lir->mir()->minimum();
13280 int32_t max = lir->mir()->maximum();
13281 MOZ_ASSERT(max >= min);
13283 LSnapshot* snapshot = lir->snapshot();
13284 MIRType type = lir->mir()->type();
13286 const LAllocation* length = lir->length();
13287 Register temp = ToRegister(lir->getTemp(0));
13289 auto bailoutCmp = [&](Assembler::Condition cond, auto lhs, auto rhs) {
13290 if (type == MIRType::Int32) {
13291 bailoutCmp32(cond, lhs, rhs, snapshot);
13292 } else {
13293 MOZ_ASSERT(type == MIRType::IntPtr);
13294 bailoutCmpPtr(cond, lhs, rhs, snapshot);
13298 auto bailoutCmpConstant = [&](Assembler::Condition cond, auto lhs,
13299 int32_t rhs) {
13300 if (type == MIRType::Int32) {
13301 bailoutCmp32(cond, lhs, Imm32(rhs), snapshot);
13302 } else {
13303 MOZ_ASSERT(type == MIRType::IntPtr);
13304 bailoutCmpPtr(cond, lhs, ImmWord(rhs), snapshot);
13308 if (lir->index()->isConstant()) {
13309 int32_t nmin, nmax;
13310 int32_t index = ToInt32(lir->index());
13311 if (SafeAdd(index, min, &nmin) && SafeAdd(index, max, &nmax) && nmin >= 0) {
13312 if (length->isRegister()) {
13313 bailoutCmpConstant(Assembler::BelowOrEqual, ToRegister(length), nmax);
13314 } else {
13315 bailoutCmpConstant(Assembler::BelowOrEqual, ToAddress(length), nmax);
13317 return;
13319 masm.mov(ImmWord(index), temp);
13320 } else {
13321 masm.mov(ToRegister(lir->index()), temp);
13324 // If the minimum and maximum differ then do an underflow check first.
13325 // If the two are the same then doing an unsigned comparison on the
13326 // length will also catch a negative index.
13327 if (min != max) {
13328 if (min != 0) {
13329 Label bail;
13330 if (type == MIRType::Int32) {
13331 masm.branchAdd32(Assembler::Overflow, Imm32(min), temp, &bail);
13332 } else {
13333 masm.branchAddPtr(Assembler::Overflow, Imm32(min), temp, &bail);
13335 bailoutFrom(&bail, snapshot);
13338 bailoutCmpConstant(Assembler::LessThan, temp, 0);
13340 if (min != 0) {
13341 int32_t diff;
13342 if (SafeSub(max, min, &diff)) {
13343 max = diff;
13344 } else {
13345 if (type == MIRType::Int32) {
13346 masm.sub32(Imm32(min), temp);
13347 } else {
13348 masm.subPtr(Imm32(min), temp);
13354 // Compute the maximum possible index. No overflow check is needed when
13355 // max > 0. We can only wraparound to a negative number, which will test as
13356 // larger than all nonnegative numbers in the unsigned comparison, and the
13357 // length is required to be nonnegative (else testing a negative length
13358 // would succeed on any nonnegative index).
13359 if (max != 0) {
13360 if (max < 0) {
13361 Label bail;
13362 if (type == MIRType::Int32) {
13363 masm.branchAdd32(Assembler::Overflow, Imm32(max), temp, &bail);
13364 } else {
13365 masm.branchAddPtr(Assembler::Overflow, Imm32(max), temp, &bail);
13367 bailoutFrom(&bail, snapshot);
13368 } else {
13369 if (type == MIRType::Int32) {
13370 masm.add32(Imm32(max), temp);
13371 } else {
13372 masm.addPtr(Imm32(max), temp);
13377 if (length->isRegister()) {
13378 bailoutCmp(Assembler::BelowOrEqual, ToRegister(length), temp);
13379 } else {
13380 bailoutCmp(Assembler::BelowOrEqual, ToAddress(length), temp);
13384 void CodeGenerator::visitBoundsCheckLower(LBoundsCheckLower* lir) {
13385 int32_t min = lir->mir()->minimum();
13386 bailoutCmp32(Assembler::LessThan, ToRegister(lir->index()), Imm32(min),
13387 lir->snapshot());
13390 void CodeGenerator::visitSpectreMaskIndex(LSpectreMaskIndex* lir) {
13391 MOZ_ASSERT(JitOptions.spectreIndexMasking);
13393 const LAllocation* length = lir->length();
13394 Register index = ToRegister(lir->index());
13395 Register output = ToRegister(lir->output());
13397 if (lir->mir()->type() == MIRType::Int32) {
13398 if (length->isRegister()) {
13399 masm.spectreMaskIndex32(index, ToRegister(length), output);
13400 } else {
13401 masm.spectreMaskIndex32(index, ToAddress(length), output);
13403 } else {
13404 MOZ_ASSERT(lir->mir()->type() == MIRType::IntPtr);
13405 if (length->isRegister()) {
13406 masm.spectreMaskIndexPtr(index, ToRegister(length), output);
13407 } else {
13408 masm.spectreMaskIndexPtr(index, ToAddress(length), output);
13413 class OutOfLineStoreElementHole : public OutOfLineCodeBase<CodeGenerator> {
13414 LInstruction* ins_;
13416 public:
13417 explicit OutOfLineStoreElementHole(LInstruction* ins) : ins_(ins) {
13418 MOZ_ASSERT(ins->isStoreElementHoleV() || ins->isStoreElementHoleT());
13421 void accept(CodeGenerator* codegen) override {
13422 codegen->visitOutOfLineStoreElementHole(this);
13425 MStoreElementHole* mir() const {
13426 return ins_->isStoreElementHoleV() ? ins_->toStoreElementHoleV()->mir()
13427 : ins_->toStoreElementHoleT()->mir();
13429 LInstruction* ins() const { return ins_; }
13432 void CodeGenerator::emitStoreHoleCheck(Register elements,
13433 const LAllocation* index,
13434 LSnapshot* snapshot) {
13435 Label bail;
13436 if (index->isConstant()) {
13437 Address dest(elements, ToInt32(index) * sizeof(js::Value));
13438 masm.branchTestMagic(Assembler::Equal, dest, &bail);
13439 } else {
13440 BaseObjectElementIndex dest(elements, ToRegister(index));
13441 masm.branchTestMagic(Assembler::Equal, dest, &bail);
13443 bailoutFrom(&bail, snapshot);
13446 void CodeGenerator::emitStoreElementTyped(const LAllocation* value,
13447 MIRType valueType, Register elements,
13448 const LAllocation* index) {
13449 MOZ_ASSERT(valueType != MIRType::MagicHole);
13450 ConstantOrRegister v = ToConstantOrRegister(value, valueType);
13451 if (index->isConstant()) {
13452 Address dest(elements, ToInt32(index) * sizeof(js::Value));
13453 masm.storeUnboxedValue(v, valueType, dest);
13454 } else {
13455 BaseObjectElementIndex dest(elements, ToRegister(index));
13456 masm.storeUnboxedValue(v, valueType, dest);
13460 void CodeGenerator::visitStoreElementT(LStoreElementT* store) {
13461 Register elements = ToRegister(store->elements());
13462 const LAllocation* index = store->index();
13464 if (store->mir()->needsBarrier()) {
13465 emitPreBarrier(elements, index);
13468 if (store->mir()->needsHoleCheck()) {
13469 emitStoreHoleCheck(elements, index, store->snapshot());
13472 emitStoreElementTyped(store->value(), store->mir()->value()->type(), elements,
13473 index);
13476 void CodeGenerator::visitStoreElementV(LStoreElementV* lir) {
13477 const ValueOperand value = ToValue(lir, LStoreElementV::Value);
13478 Register elements = ToRegister(lir->elements());
13479 const LAllocation* index = lir->index();
13481 if (lir->mir()->needsBarrier()) {
13482 emitPreBarrier(elements, index);
13485 if (lir->mir()->needsHoleCheck()) {
13486 emitStoreHoleCheck(elements, index, lir->snapshot());
13489 if (lir->index()->isConstant()) {
13490 Address dest(elements, ToInt32(lir->index()) * sizeof(js::Value));
13491 masm.storeValue(value, dest);
13492 } else {
13493 BaseObjectElementIndex dest(elements, ToRegister(lir->index()));
13494 masm.storeValue(value, dest);
13498 void CodeGenerator::visitStoreHoleValueElement(LStoreHoleValueElement* lir) {
13499 Register elements = ToRegister(lir->elements());
13500 Register index = ToRegister(lir->index());
13502 Address elementsFlags(elements, ObjectElements::offsetOfFlags());
13503 masm.or32(Imm32(ObjectElements::NON_PACKED), elementsFlags);
13505 BaseObjectElementIndex element(elements, index);
13506 masm.storeValue(MagicValue(JS_ELEMENTS_HOLE), element);
13509 void CodeGenerator::visitStoreElementHoleT(LStoreElementHoleT* lir) {
13510 auto* ool = new (alloc()) OutOfLineStoreElementHole(lir);
13511 addOutOfLineCode(ool, lir->mir());
13513 Register obj = ToRegister(lir->object());
13514 Register elements = ToRegister(lir->elements());
13515 Register index = ToRegister(lir->index());
13516 Register temp = ToRegister(lir->temp0());
13518 Address initLength(elements, ObjectElements::offsetOfInitializedLength());
13519 masm.spectreBoundsCheck32(index, initLength, temp, ool->entry());
13521 emitPreBarrier(elements, lir->index());
13523 masm.bind(ool->rejoin());
13524 emitStoreElementTyped(lir->value(), lir->mir()->value()->type(), elements,
13525 lir->index());
13527 if (ValueNeedsPostBarrier(lir->mir()->value())) {
13528 LiveRegisterSet regs = liveVolatileRegs(lir);
13529 ConstantOrRegister val =
13530 ToConstantOrRegister(lir->value(), lir->mir()->value()->type());
13531 emitElementPostWriteBarrier(lir->mir(), regs, obj, lir->index(), temp, val);
13535 void CodeGenerator::visitStoreElementHoleV(LStoreElementHoleV* lir) {
13536 auto* ool = new (alloc()) OutOfLineStoreElementHole(lir);
13537 addOutOfLineCode(ool, lir->mir());
13539 Register obj = ToRegister(lir->object());
13540 Register elements = ToRegister(lir->elements());
13541 Register index = ToRegister(lir->index());
13542 const ValueOperand value = ToValue(lir, LStoreElementHoleV::ValueIndex);
13543 Register temp = ToRegister(lir->temp0());
13545 Address initLength(elements, ObjectElements::offsetOfInitializedLength());
13546 masm.spectreBoundsCheck32(index, initLength, temp, ool->entry());
13548 emitPreBarrier(elements, lir->index());
13550 masm.bind(ool->rejoin());
13551 masm.storeValue(value, BaseObjectElementIndex(elements, index));
13553 if (ValueNeedsPostBarrier(lir->mir()->value())) {
13554 LiveRegisterSet regs = liveVolatileRegs(lir);
13555 emitElementPostWriteBarrier(lir->mir(), regs, obj, lir->index(), temp,
13556 ConstantOrRegister(value));
13560 void CodeGenerator::visitOutOfLineStoreElementHole(
13561 OutOfLineStoreElementHole* ool) {
13562 Register object, elements, index;
13563 LInstruction* ins = ool->ins();
13564 mozilla::Maybe<ConstantOrRegister> value;
13565 Register temp;
13567 if (ins->isStoreElementHoleV()) {
13568 LStoreElementHoleV* store = ins->toStoreElementHoleV();
13569 object = ToRegister(store->object());
13570 elements = ToRegister(store->elements());
13571 index = ToRegister(store->index());
13572 value.emplace(
13573 TypedOrValueRegister(ToValue(store, LStoreElementHoleV::ValueIndex)));
13574 temp = ToRegister(store->temp0());
13575 } else {
13576 LStoreElementHoleT* store = ins->toStoreElementHoleT();
13577 object = ToRegister(store->object());
13578 elements = ToRegister(store->elements());
13579 index = ToRegister(store->index());
13580 if (store->value()->isConstant()) {
13581 value.emplace(
13582 ConstantOrRegister(store->value()->toConstant()->toJSValue()));
13583 } else {
13584 MIRType valueType = store->mir()->value()->type();
13585 value.emplace(
13586 TypedOrValueRegister(valueType, ToAnyRegister(store->value())));
13588 temp = ToRegister(store->temp0());
13591 Address initLength(elements, ObjectElements::offsetOfInitializedLength());
13593 // We're out-of-bounds. We only handle the index == initlength case.
13594 // If index > initializedLength, bail out. Note that this relies on the
13595 // condition flags sticking from the incoming branch.
13596 // Also note: this branch does not need Spectre mitigations, doing that for
13597 // the capacity check below is sufficient.
13598 Label allocElement, addNewElement;
13599 #if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
13600 defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
13601 // Had to reimplement for MIPS because there are no flags.
13602 bailoutCmp32(Assembler::NotEqual, initLength, index, ins->snapshot());
13603 #else
13604 bailoutIf(Assembler::NotEqual, ins->snapshot());
13605 #endif
13607 // If index < capacity, we can add a dense element inline. If not, we need
13608 // to allocate more elements first.
13609 masm.spectreBoundsCheck32(
13610 index, Address(elements, ObjectElements::offsetOfCapacity()), temp,
13611 &allocElement);
13612 masm.jump(&addNewElement);
13614 masm.bind(&allocElement);
13616 // Save all live volatile registers, except |temp|.
13617 LiveRegisterSet liveRegs = liveVolatileRegs(ins);
13618 liveRegs.takeUnchecked(temp);
13619 masm.PushRegsInMask(liveRegs);
13621 masm.setupAlignedABICall();
13622 masm.loadJSContext(temp);
13623 masm.passABIArg(temp);
13624 masm.passABIArg(object);
13626 using Fn = bool (*)(JSContext*, NativeObject*);
13627 masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
13628 masm.storeCallPointerResult(temp);
13630 masm.PopRegsInMask(liveRegs);
13631 bailoutIfFalseBool(temp, ins->snapshot());
13633 // Load the reallocated elements pointer.
13634 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), elements);
13636 masm.bind(&addNewElement);
13638 // Increment initLength
13639 masm.add32(Imm32(1), initLength);
13641 // If length is now <= index, increment length too.
13642 Label skipIncrementLength;
13643 Address length(elements, ObjectElements::offsetOfLength());
13644 masm.branch32(Assembler::Above, length, index, &skipIncrementLength);
13645 masm.add32(Imm32(1), length);
13646 masm.bind(&skipIncrementLength);
13648 // Jump to the inline path where we will store the value.
13649 // We rejoin after the prebarrier, because the memory is uninitialized.
13650 masm.jump(ool->rejoin());
13653 void CodeGenerator::visitArrayPopShift(LArrayPopShift* lir) {
13654 Register obj = ToRegister(lir->object());
13655 Register temp1 = ToRegister(lir->temp0());
13656 Register temp2 = ToRegister(lir->temp1());
13657 ValueOperand out = ToOutValue(lir);
13659 Label bail;
13660 if (lir->mir()->mode() == MArrayPopShift::Pop) {
13661 masm.packedArrayPop(obj, out, temp1, temp2, &bail);
13662 } else {
13663 MOZ_ASSERT(lir->mir()->mode() == MArrayPopShift::Shift);
13664 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
13665 masm.packedArrayShift(obj, out, temp1, temp2, volatileRegs, &bail);
13667 bailoutFrom(&bail, lir->snapshot());
13670 class OutOfLineArrayPush : public OutOfLineCodeBase<CodeGenerator> {
13671 LArrayPush* ins_;
13673 public:
13674 explicit OutOfLineArrayPush(LArrayPush* ins) : ins_(ins) {}
13676 void accept(CodeGenerator* codegen) override {
13677 codegen->visitOutOfLineArrayPush(this);
13680 LArrayPush* ins() const { return ins_; }
13683 void CodeGenerator::visitArrayPush(LArrayPush* lir) {
13684 Register obj = ToRegister(lir->object());
13685 Register elementsTemp = ToRegister(lir->temp0());
13686 Register length = ToRegister(lir->output());
13687 ValueOperand value = ToValue(lir, LArrayPush::ValueIndex);
13688 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
13690 auto* ool = new (alloc()) OutOfLineArrayPush(lir);
13691 addOutOfLineCode(ool, lir->mir());
13693 // Load obj->elements in elementsTemp.
13694 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), elementsTemp);
13696 Address initLengthAddr(elementsTemp,
13697 ObjectElements::offsetOfInitializedLength());
13698 Address lengthAddr(elementsTemp, ObjectElements::offsetOfLength());
13699 Address capacityAddr(elementsTemp, ObjectElements::offsetOfCapacity());
13701 // Bail out if length != initLength.
13702 masm.load32(lengthAddr, length);
13703 bailoutCmp32(Assembler::NotEqual, initLengthAddr, length, lir->snapshot());
13705 // If length < capacity, we can add a dense element inline. If not, we
13706 // need to allocate more elements.
13707 masm.spectreBoundsCheck32(length, capacityAddr, spectreTemp, ool->entry());
13708 masm.bind(ool->rejoin());
13710 // Store the value.
13711 masm.storeValue(value, BaseObjectElementIndex(elementsTemp, length));
13713 // Update length and initialized length.
13714 masm.add32(Imm32(1), length);
13715 masm.store32(length, Address(elementsTemp, ObjectElements::offsetOfLength()));
13716 masm.store32(length, Address(elementsTemp,
13717 ObjectElements::offsetOfInitializedLength()));
13719 if (ValueNeedsPostBarrier(lir->mir()->value())) {
13720 LiveRegisterSet regs = liveVolatileRegs(lir);
13721 regs.addUnchecked(length);
13722 emitElementPostWriteBarrier(lir->mir(), regs, obj, lir->output()->output(),
13723 elementsTemp, ConstantOrRegister(value),
13724 /* indexDiff = */ -1);
13728 void CodeGenerator::visitOutOfLineArrayPush(OutOfLineArrayPush* ool) {
13729 LArrayPush* ins = ool->ins();
13731 Register object = ToRegister(ins->object());
13732 Register temp = ToRegister(ins->temp0());
13734 LiveRegisterSet liveRegs = liveVolatileRegs(ins);
13735 liveRegs.takeUnchecked(temp);
13736 liveRegs.addUnchecked(ToRegister(ins->output()));
13737 liveRegs.addUnchecked(ToValue(ins, LArrayPush::ValueIndex));
13739 masm.PushRegsInMask(liveRegs);
13741 masm.setupAlignedABICall();
13742 masm.loadJSContext(temp);
13743 masm.passABIArg(temp);
13744 masm.passABIArg(object);
13746 using Fn = bool (*)(JSContext*, NativeObject* obj);
13747 masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
13748 masm.storeCallPointerResult(temp);
13750 masm.PopRegsInMask(liveRegs);
13751 bailoutIfFalseBool(temp, ins->snapshot());
13753 // Load the reallocated elements pointer.
13754 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
13756 masm.jump(ool->rejoin());
13759 void CodeGenerator::visitArraySlice(LArraySlice* lir) {
13760 Register object = ToRegister(lir->object());
13761 Register begin = ToRegister(lir->begin());
13762 Register end = ToRegister(lir->end());
13763 Register temp0 = ToRegister(lir->temp0());
13764 Register temp1 = ToRegister(lir->temp1());
13766 Label call, fail;
13768 Label bail;
13769 masm.branchArrayIsNotPacked(object, temp0, temp1, &bail);
13770 bailoutFrom(&bail, lir->snapshot());
13772 // Try to allocate an object.
13773 TemplateObject templateObject(lir->mir()->templateObj());
13774 masm.createGCObject(temp0, temp1, templateObject, lir->mir()->initialHeap(),
13775 &fail);
13777 masm.jump(&call);
13779 masm.bind(&fail);
13780 masm.movePtr(ImmPtr(nullptr), temp0);
13782 masm.bind(&call);
13784 pushArg(temp0);
13785 pushArg(end);
13786 pushArg(begin);
13787 pushArg(object);
13789 using Fn =
13790 JSObject* (*)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
13791 callVM<Fn, ArraySliceDense>(lir);
13794 void CodeGenerator::visitArgumentsSlice(LArgumentsSlice* lir) {
13795 Register object = ToRegister(lir->object());
13796 Register begin = ToRegister(lir->begin());
13797 Register end = ToRegister(lir->end());
13798 Register temp0 = ToRegister(lir->temp0());
13799 Register temp1 = ToRegister(lir->temp1());
13801 Label call, fail;
13803 // Try to allocate an object.
13804 TemplateObject templateObject(lir->mir()->templateObj());
13805 masm.createGCObject(temp0, temp1, templateObject, lir->mir()->initialHeap(),
13806 &fail);
13808 masm.jump(&call);
13810 masm.bind(&fail);
13811 masm.movePtr(ImmPtr(nullptr), temp0);
13813 masm.bind(&call);
13815 pushArg(temp0);
13816 pushArg(end);
13817 pushArg(begin);
13818 pushArg(object);
13820 using Fn =
13821 JSObject* (*)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
13822 callVM<Fn, ArgumentsSliceDense>(lir);
13825 #ifdef DEBUG
13826 void CodeGenerator::emitAssertArgumentsSliceBounds(const RegisterOrInt32& begin,
13827 const RegisterOrInt32& count,
13828 Register numActualArgs) {
13829 // |begin| must be positive or zero.
13830 if (begin.is<Register>()) {
13831 Label beginOk;
13832 masm.branch32(Assembler::GreaterThanOrEqual, begin.as<Register>(), Imm32(0),
13833 &beginOk);
13834 masm.assumeUnreachable("begin < 0");
13835 masm.bind(&beginOk);
13836 } else {
13837 MOZ_ASSERT(begin.as<int32_t>() >= 0);
13840 // |count| must be positive or zero.
13841 if (count.is<Register>()) {
13842 Label countOk;
13843 masm.branch32(Assembler::GreaterThanOrEqual, count.as<Register>(), Imm32(0),
13844 &countOk);
13845 masm.assumeUnreachable("count < 0");
13846 masm.bind(&countOk);
13847 } else {
13848 MOZ_ASSERT(count.as<int32_t>() >= 0);
13851 // |begin| must be less-or-equal to |numActualArgs|.
13852 Label argsBeginOk;
13853 if (begin.is<Register>()) {
13854 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, begin.as<Register>(),
13855 &argsBeginOk);
13856 } else {
13857 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
13858 Imm32(begin.as<int32_t>()), &argsBeginOk);
13860 masm.assumeUnreachable("begin <= numActualArgs");
13861 masm.bind(&argsBeginOk);
13863 // |count| must be less-or-equal to |numActualArgs|.
13864 Label argsCountOk;
13865 if (count.is<Register>()) {
13866 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, count.as<Register>(),
13867 &argsCountOk);
13868 } else {
13869 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
13870 Imm32(count.as<int32_t>()), &argsCountOk);
13872 masm.assumeUnreachable("count <= numActualArgs");
13873 masm.bind(&argsCountOk);
13875 // |begin| and |count| must be preserved, but |numActualArgs| can be changed.
13877 // Pre-condition: |count| <= |numActualArgs|
13878 // Condition to test: |begin + count| <= |numActualArgs|
13879 // Transform to: |begin| <= |numActualArgs - count|
13880 if (count.is<Register>()) {
13881 masm.subPtr(count.as<Register>(), numActualArgs);
13882 } else {
13883 masm.subPtr(Imm32(count.as<int32_t>()), numActualArgs);
13886 // |begin + count| must be less-or-equal to |numActualArgs|.
13887 Label argsBeginCountOk;
13888 if (begin.is<Register>()) {
13889 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, begin.as<Register>(),
13890 &argsBeginCountOk);
13891 } else {
13892 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
13893 Imm32(begin.as<int32_t>()), &argsBeginCountOk);
13895 masm.assumeUnreachable("begin + count <= numActualArgs");
13896 masm.bind(&argsBeginCountOk);
13898 #endif
13900 template <class ArgumentsSlice>
13901 void CodeGenerator::emitNewArray(ArgumentsSlice* lir,
13902 const RegisterOrInt32& count, Register output,
13903 Register temp) {
13904 using Fn = ArrayObject* (*)(JSContext*, int32_t);
13905 auto* ool = count.match(
13906 [&](Register count) {
13907 return oolCallVM<Fn, NewArrayObjectEnsureDenseInitLength>(
13908 lir, ArgList(count), StoreRegisterTo(output));
13910 [&](int32_t count) {
13911 return oolCallVM<Fn, NewArrayObjectEnsureDenseInitLength>(
13912 lir, ArgList(Imm32(count)), StoreRegisterTo(output));
13915 TemplateObject templateObject(lir->mir()->templateObj());
13916 MOZ_ASSERT(templateObject.isArrayObject());
13918 auto templateNativeObj = templateObject.asTemplateNativeObject();
13919 MOZ_ASSERT(templateNativeObj.getArrayLength() == 0);
13920 MOZ_ASSERT(templateNativeObj.getDenseInitializedLength() == 0);
13921 MOZ_ASSERT(!templateNativeObj.hasDynamicElements());
13923 // Check array capacity. Call into the VM if the template object's capacity
13924 // is too small.
13925 bool tryAllocate = count.match(
13926 [&](Register count) {
13927 masm.branch32(Assembler::Above, count,
13928 Imm32(templateNativeObj.getDenseCapacity()),
13929 ool->entry());
13930 return true;
13932 [&](int32_t count) {
13933 MOZ_ASSERT(count >= 0);
13934 if (uint32_t(count) > templateNativeObj.getDenseCapacity()) {
13935 masm.jump(ool->entry());
13936 return false;
13938 return true;
13941 if (tryAllocate) {
13942 // Try to allocate an object.
13943 masm.createGCObject(output, temp, templateObject, lir->mir()->initialHeap(),
13944 ool->entry());
13946 auto setInitializedLengthAndLength = [&](auto count) {
13947 const int elementsOffset = NativeObject::offsetOfFixedElements();
13949 // Update initialized length.
13950 Address initLength(
13951 output, elementsOffset + ObjectElements::offsetOfInitializedLength());
13952 masm.store32(count, initLength);
13954 // Update length.
13955 Address length(output, elementsOffset + ObjectElements::offsetOfLength());
13956 masm.store32(count, length);
13959 // The array object was successfully created. Set the length and initialized
13960 // length and then proceed to fill the elements.
13961 count.match([&](Register count) { setInitializedLengthAndLength(count); },
13962 [&](int32_t count) {
13963 if (count > 0) {
13964 setInitializedLengthAndLength(Imm32(count));
13969 masm.bind(ool->rejoin());
13972 void CodeGenerator::visitFrameArgumentsSlice(LFrameArgumentsSlice* lir) {
13973 Register begin = ToRegister(lir->begin());
13974 Register count = ToRegister(lir->count());
13975 Register temp = ToRegister(lir->temp0());
13976 Register output = ToRegister(lir->output());
13978 #ifdef DEBUG
13979 masm.loadNumActualArgs(FramePointer, temp);
13980 emitAssertArgumentsSliceBounds(RegisterOrInt32(begin), RegisterOrInt32(count),
13981 temp);
13982 #endif
13984 emitNewArray(lir, RegisterOrInt32(count), output, temp);
13986 Label done;
13987 masm.branch32(Assembler::Equal, count, Imm32(0), &done);
13989 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
13990 allRegs.take(begin);
13991 allRegs.take(count);
13992 allRegs.take(temp);
13993 allRegs.take(output);
13995 ValueOperand value = allRegs.takeAnyValue();
13997 LiveRegisterSet liveRegs;
13998 liveRegs.add(output);
13999 liveRegs.add(begin);
14000 liveRegs.add(value);
14002 masm.PushRegsInMask(liveRegs);
14004 // Initialize all elements.
14006 Register elements = output;
14007 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
14009 Register argIndex = begin;
14011 Register index = temp;
14012 masm.move32(Imm32(0), index);
14014 size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
14015 BaseValueIndex argPtr(FramePointer, argIndex, argvOffset);
14017 Label loop;
14018 masm.bind(&loop);
14020 masm.loadValue(argPtr, value);
14022 // We don't need a pre-barrier, because the element at |index| is guaranteed
14023 // to be a non-GC thing (either uninitialized memory or the magic hole
14024 // value).
14025 masm.storeValue(value, BaseObjectElementIndex(elements, index));
14027 masm.add32(Imm32(1), index);
14028 masm.add32(Imm32(1), argIndex);
14030 masm.branch32(Assembler::LessThan, index, count, &loop);
14032 masm.PopRegsInMask(liveRegs);
14034 // Emit a post-write barrier if |output| is tenured.
14036 // We expect that |output| is nursery allocated, so it isn't worth the
14037 // trouble to check if no frame argument is a nursery thing, which would
14038 // allow to omit the post-write barrier.
14039 masm.branchPtrInNurseryChunk(Assembler::Equal, output, temp, &done);
14041 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
14042 volatileRegs.takeUnchecked(temp);
14043 if (output.volatile_()) {
14044 volatileRegs.addUnchecked(output);
14047 masm.PushRegsInMask(volatileRegs);
14048 emitPostWriteBarrier(output);
14049 masm.PopRegsInMask(volatileRegs);
14051 masm.bind(&done);
14054 CodeGenerator::RegisterOrInt32 CodeGenerator::ToRegisterOrInt32(
14055 const LAllocation* allocation) {
14056 if (allocation->isConstant()) {
14057 return RegisterOrInt32(allocation->toConstant()->toInt32());
14059 return RegisterOrInt32(ToRegister(allocation));
14062 void CodeGenerator::visitInlineArgumentsSlice(LInlineArgumentsSlice* lir) {
14063 RegisterOrInt32 begin = ToRegisterOrInt32(lir->begin());
14064 RegisterOrInt32 count = ToRegisterOrInt32(lir->count());
14065 Register temp = ToRegister(lir->temp());
14066 Register output = ToRegister(lir->output());
14068 uint32_t numActuals = lir->mir()->numActuals();
14070 #ifdef DEBUG
14071 masm.move32(Imm32(numActuals), temp);
14073 emitAssertArgumentsSliceBounds(begin, count, temp);
14074 #endif
14076 emitNewArray(lir, count, output, temp);
14078 // We're done if there are no actual arguments.
14079 if (numActuals == 0) {
14080 return;
14083 // Check if any arguments have to be copied.
14084 Label done;
14085 if (count.is<Register>()) {
14086 masm.branch32(Assembler::Equal, count.as<Register>(), Imm32(0), &done);
14087 } else if (count.as<int32_t>() == 0) {
14088 return;
14091 auto getArg = [&](uint32_t i) {
14092 return toConstantOrRegister(lir, LInlineArgumentsSlice::ArgIndex(i),
14093 lir->mir()->getArg(i)->type());
14096 auto storeArg = [&](uint32_t i, auto dest) {
14097 // We don't need a pre-barrier because the element at |index| is guaranteed
14098 // to be a non-GC thing (either uninitialized memory or the magic hole
14099 // value).
14100 masm.storeConstantOrRegister(getArg(i), dest);
14103 // Initialize all elements.
14104 if (numActuals == 1) {
14105 // There's exactly one argument. We've checked that |count| is non-zero,
14106 // which implies that |begin| must be zero.
14107 MOZ_ASSERT_IF(begin.is<int32_t>(), begin.as<int32_t>() == 0);
14109 Register elements = temp;
14110 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
14112 storeArg(0, Address(elements, 0));
14113 } else if (begin.is<Register>()) {
14114 // There is more than one argument and |begin| isn't a compile-time
14115 // constant. Iterate through 0..numActuals to search for |begin| and then
14116 // start copying |count| arguments from that index.
14118 LiveGeneralRegisterSet liveRegs;
14119 liveRegs.add(output);
14120 liveRegs.add(begin.as<Register>());
14122 masm.PushRegsInMask(liveRegs);
14124 Register elements = output;
14125 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
14127 Register argIndex = begin.as<Register>();
14129 Register index = temp;
14130 masm.move32(Imm32(0), index);
14132 Label doneLoop;
14133 for (uint32_t i = 0; i < numActuals; ++i) {
14134 Label next;
14135 masm.branch32(Assembler::NotEqual, argIndex, Imm32(i), &next);
14137 storeArg(i, BaseObjectElementIndex(elements, index));
14139 masm.add32(Imm32(1), index);
14140 masm.add32(Imm32(1), argIndex);
14142 if (count.is<Register>()) {
14143 masm.branch32(Assembler::GreaterThanOrEqual, index,
14144 count.as<Register>(), &doneLoop);
14145 } else {
14146 masm.branch32(Assembler::GreaterThanOrEqual, index,
14147 Imm32(count.as<int32_t>()), &doneLoop);
14150 masm.bind(&next);
14152 masm.bind(&doneLoop);
14154 masm.PopRegsInMask(liveRegs);
14155 } else {
14156 // There is more than one argument and |begin| is a compile-time constant.
14158 Register elements = temp;
14159 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
14161 int32_t argIndex = begin.as<int32_t>();
14163 int32_t index = 0;
14165 Label doneLoop;
14166 for (uint32_t i = argIndex; i < numActuals; ++i) {
14167 storeArg(i, Address(elements, index * sizeof(Value)));
14169 index += 1;
14171 if (count.is<Register>()) {
14172 masm.branch32(Assembler::LessThanOrEqual, count.as<Register>(),
14173 Imm32(index), &doneLoop);
14174 } else {
14175 if (index >= count.as<int32_t>()) {
14176 break;
14180 masm.bind(&doneLoop);
14183 // Determine if we have to emit post-write barrier.
14185 // If either |begin| or |count| is a constant, use their value directly.
14186 // Otherwise assume we copy all inline arguments from 0..numActuals.
14187 bool postWriteBarrier = false;
14188 uint32_t actualBegin = begin.match([](Register) { return 0; },
14189 [](int32_t value) { return value; });
14190 uint32_t actualCount =
14191 count.match([=](Register) { return numActuals; },
14192 [](int32_t value) -> uint32_t { return value; });
14193 for (uint32_t i = 0; i < actualCount; ++i) {
14194 ConstantOrRegister arg = getArg(actualBegin + i);
14195 if (arg.constant()) {
14196 Value v = arg.value();
14197 if (v.isGCThing() && IsInsideNursery(v.toGCThing())) {
14198 postWriteBarrier = true;
14200 } else {
14201 MIRType type = arg.reg().type();
14202 if (type == MIRType::Value || NeedsPostBarrier(type)) {
14203 postWriteBarrier = true;
14208 // Emit a post-write barrier if |output| is tenured and we couldn't
14209 // determine at compile-time that no barrier is needed.
14210 if (postWriteBarrier) {
14211 masm.branchPtrInNurseryChunk(Assembler::Equal, output, temp, &done);
14213 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
14214 volatileRegs.takeUnchecked(temp);
14215 if (output.volatile_()) {
14216 volatileRegs.addUnchecked(output);
14219 masm.PushRegsInMask(volatileRegs);
14220 emitPostWriteBarrier(output);
14221 masm.PopRegsInMask(volatileRegs);
14224 masm.bind(&done);
14227 void CodeGenerator::visitNormalizeSliceTerm(LNormalizeSliceTerm* lir) {
14228 Register value = ToRegister(lir->value());
14229 Register length = ToRegister(lir->length());
14230 Register output = ToRegister(lir->output());
14232 masm.move32(value, output);
14234 Label positive;
14235 masm.branch32(Assembler::GreaterThanOrEqual, value, Imm32(0), &positive);
14237 Label done;
14238 masm.add32(length, output);
14239 masm.branch32(Assembler::GreaterThanOrEqual, output, Imm32(0), &done);
14240 masm.move32(Imm32(0), output);
14241 masm.jump(&done);
14243 masm.bind(&positive);
14244 masm.cmp32Move32(Assembler::LessThan, length, value, length, output);
14246 masm.bind(&done);
14249 void CodeGenerator::visitArrayJoin(LArrayJoin* lir) {
14250 Label skipCall;
14252 Register output = ToRegister(lir->output());
14253 Register sep = ToRegister(lir->separator());
14254 Register array = ToRegister(lir->array());
14255 Register temp = ToRegister(lir->temp0());
14257 // Fast path for simple length <= 1 cases.
14259 masm.loadPtr(Address(array, NativeObject::offsetOfElements()), temp);
14260 Address length(temp, ObjectElements::offsetOfLength());
14261 Address initLength(temp, ObjectElements::offsetOfInitializedLength());
14263 // Check for length == 0
14264 Label notEmpty;
14265 masm.branch32(Assembler::NotEqual, length, Imm32(0), &notEmpty);
14266 const JSAtomState& names = gen->runtime->names();
14267 masm.movePtr(ImmGCPtr(names.empty_), output);
14268 masm.jump(&skipCall);
14270 masm.bind(&notEmpty);
14271 Label notSingleString;
14272 // Check for length == 1, initializedLength >= 1, arr[0].isString()
14273 masm.branch32(Assembler::NotEqual, length, Imm32(1), &notSingleString);
14274 masm.branch32(Assembler::LessThan, initLength, Imm32(1), &notSingleString);
14276 Address elem0(temp, 0);
14277 masm.branchTestString(Assembler::NotEqual, elem0, &notSingleString);
14279 // At this point, 'output' can be used as a scratch register, since we're
14280 // guaranteed to succeed.
14281 masm.unboxString(elem0, output);
14282 masm.jump(&skipCall);
14283 masm.bind(&notSingleString);
14286 pushArg(sep);
14287 pushArg(array);
14289 using Fn = JSString* (*)(JSContext*, HandleObject, HandleString);
14290 callVM<Fn, jit::ArrayJoin>(lir);
14291 masm.bind(&skipCall);
14294 void CodeGenerator::visitObjectKeys(LObjectKeys* lir) {
14295 Register object = ToRegister(lir->object());
14297 pushArg(object);
14299 using Fn = JSObject* (*)(JSContext*, HandleObject);
14300 callVM<Fn, jit::ObjectKeys>(lir);
14303 void CodeGenerator::visitObjectKeysLength(LObjectKeysLength* lir) {
14304 Register object = ToRegister(lir->object());
14306 pushArg(object);
14308 using Fn = bool (*)(JSContext*, HandleObject, int32_t*);
14309 callVM<Fn, jit::ObjectKeysLength>(lir);
14312 void CodeGenerator::visitGetIteratorCache(LGetIteratorCache* lir) {
14313 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
14314 TypedOrValueRegister val =
14315 toConstantOrRegister(lir, LGetIteratorCache::ValueIndex,
14316 lir->mir()->value()->type())
14317 .reg();
14318 Register output = ToRegister(lir->output());
14319 Register temp0 = ToRegister(lir->temp0());
14320 Register temp1 = ToRegister(lir->temp1());
14322 IonGetIteratorIC ic(liveRegs, val, output, temp0, temp1);
14323 addIC(lir, allocateIC(ic));
14326 void CodeGenerator::visitOptimizeSpreadCallCache(
14327 LOptimizeSpreadCallCache* lir) {
14328 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
14329 ValueOperand val = ToValue(lir, LOptimizeSpreadCallCache::ValueIndex);
14330 ValueOperand output = ToOutValue(lir);
14331 Register temp = ToRegister(lir->temp0());
14333 IonOptimizeSpreadCallIC ic(liveRegs, val, output, temp);
14334 addIC(lir, allocateIC(ic));
14337 void CodeGenerator::visitCloseIterCache(LCloseIterCache* lir) {
14338 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
14339 Register iter = ToRegister(lir->iter());
14340 Register temp = ToRegister(lir->temp0());
14341 CompletionKind kind = CompletionKind(lir->mir()->completionKind());
14343 IonCloseIterIC ic(liveRegs, iter, temp, kind);
14344 addIC(lir, allocateIC(ic));
14347 void CodeGenerator::visitOptimizeGetIteratorCache(
14348 LOptimizeGetIteratorCache* lir) {
14349 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
14350 ValueOperand val = ToValue(lir, LOptimizeGetIteratorCache::ValueIndex);
14351 Register output = ToRegister(lir->output());
14352 Register temp = ToRegister(lir->temp0());
14354 IonOptimizeGetIteratorIC ic(liveRegs, val, output, temp);
14355 addIC(lir, allocateIC(ic));
14358 void CodeGenerator::visitIteratorMore(LIteratorMore* lir) {
14359 const Register obj = ToRegister(lir->iterator());
14360 const ValueOperand output = ToOutValue(lir);
14361 const Register temp = ToRegister(lir->temp0());
14363 masm.iteratorMore(obj, output, temp);
14366 void CodeGenerator::visitIsNoIterAndBranch(LIsNoIterAndBranch* lir) {
14367 ValueOperand input = ToValue(lir, LIsNoIterAndBranch::Input);
14368 Label* ifTrue = getJumpLabelForBranch(lir->ifTrue());
14369 Label* ifFalse = getJumpLabelForBranch(lir->ifFalse());
14371 masm.branchTestMagic(Assembler::Equal, input, ifTrue);
14373 if (!isNextBlock(lir->ifFalse()->lir())) {
14374 masm.jump(ifFalse);
14378 void CodeGenerator::visitIteratorEnd(LIteratorEnd* lir) {
14379 const Register obj = ToRegister(lir->object());
14380 const Register temp0 = ToRegister(lir->temp0());
14381 const Register temp1 = ToRegister(lir->temp1());
14382 const Register temp2 = ToRegister(lir->temp2());
14384 masm.iteratorClose(obj, temp0, temp1, temp2);
14387 void CodeGenerator::visitArgumentsLength(LArgumentsLength* lir) {
14388 // read number of actual arguments from the JS frame.
14389 Register argc = ToRegister(lir->output());
14390 masm.loadNumActualArgs(FramePointer, argc);
14393 void CodeGenerator::visitGetFrameArgument(LGetFrameArgument* lir) {
14394 ValueOperand result = ToOutValue(lir);
14395 const LAllocation* index = lir->index();
14396 size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
14398 // This instruction is used to access actual arguments and formal arguments.
14399 // The number of Values on the stack is |max(numFormals, numActuals)|, so we
14400 // assert |index < numFormals || index < numActuals| in debug builds.
14401 DebugOnly<size_t> numFormals = gen->outerInfo().script()->function()->nargs();
14403 if (index->isConstant()) {
14404 int32_t i = index->toConstant()->toInt32();
14405 #ifdef DEBUG
14406 if (uint32_t(i) >= numFormals) {
14407 Label ok;
14408 Register argc = result.scratchReg();
14409 masm.loadNumActualArgs(FramePointer, argc);
14410 masm.branch32(Assembler::Above, argc, Imm32(i), &ok);
14411 masm.assumeUnreachable("Invalid argument index");
14412 masm.bind(&ok);
14414 #endif
14415 Address argPtr(FramePointer, sizeof(Value) * i + argvOffset);
14416 masm.loadValue(argPtr, result);
14417 } else {
14418 Register i = ToRegister(index);
14419 #ifdef DEBUG
14420 Label ok;
14421 Register argc = result.scratchReg();
14422 masm.branch32(Assembler::Below, i, Imm32(numFormals), &ok);
14423 masm.loadNumActualArgs(FramePointer, argc);
14424 masm.branch32(Assembler::Above, argc, i, &ok);
14425 masm.assumeUnreachable("Invalid argument index");
14426 masm.bind(&ok);
14427 #endif
14428 BaseValueIndex argPtr(FramePointer, i, argvOffset);
14429 masm.loadValue(argPtr, result);
14433 void CodeGenerator::visitGetFrameArgumentHole(LGetFrameArgumentHole* lir) {
14434 ValueOperand result = ToOutValue(lir);
14435 Register index = ToRegister(lir->index());
14436 Register length = ToRegister(lir->length());
14437 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp0());
14438 size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
14440 Label outOfBounds, done;
14441 masm.spectreBoundsCheck32(index, length, spectreTemp, &outOfBounds);
14443 BaseValueIndex argPtr(FramePointer, index, argvOffset);
14444 masm.loadValue(argPtr, result);
14445 masm.jump(&done);
14447 masm.bind(&outOfBounds);
14448 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
14449 masm.moveValue(UndefinedValue(), result);
14451 masm.bind(&done);
14454 void CodeGenerator::visitRest(LRest* lir) {
14455 Register numActuals = ToRegister(lir->numActuals());
14456 Register temp0 = ToRegister(lir->temp0());
14457 Register temp1 = ToRegister(lir->temp1());
14458 Register temp2 = ToRegister(lir->temp2());
14459 unsigned numFormals = lir->mir()->numFormals();
14461 if (Shape* shape = lir->mir()->shape()) {
14462 uint32_t arrayLength = 0;
14463 uint32_t arrayCapacity = 2;
14464 gc::AllocKind allocKind = GuessArrayGCKind(arrayCapacity);
14465 MOZ_ASSERT(CanChangeToBackgroundAllocKind(allocKind, &ArrayObject::class_));
14466 allocKind = ForegroundToBackgroundAllocKind(allocKind);
14467 MOZ_ASSERT(GetGCKindSlots(allocKind) ==
14468 arrayCapacity + ObjectElements::VALUES_PER_HEADER);
14470 Label joinAlloc, failAlloc;
14471 masm.movePtr(ImmGCPtr(shape), temp0);
14472 masm.createArrayWithFixedElements(temp2, temp0, temp1, InvalidReg,
14473 arrayLength, arrayCapacity, 0, 0,
14474 allocKind, gc::Heap::Default, &failAlloc);
14475 masm.jump(&joinAlloc);
14477 masm.bind(&failAlloc);
14478 masm.movePtr(ImmPtr(nullptr), temp2);
14480 masm.bind(&joinAlloc);
14481 } else {
14482 masm.movePtr(ImmPtr(nullptr), temp2);
14485 // Set temp1 to the address of the first actual argument.
14486 size_t actualsOffset = JitFrameLayout::offsetOfActualArgs();
14487 masm.computeEffectiveAddress(Address(FramePointer, actualsOffset), temp1);
14489 // Compute array length: max(numActuals - numFormals, 0).
14490 Register lengthReg;
14491 if (numFormals) {
14492 lengthReg = temp0;
14493 Label emptyLength, joinLength;
14494 masm.branch32(Assembler::LessThanOrEqual, numActuals, Imm32(numFormals),
14495 &emptyLength);
14497 masm.move32(numActuals, lengthReg);
14498 masm.sub32(Imm32(numFormals), lengthReg);
14500 // Skip formal arguments.
14501 masm.addPtr(Imm32(sizeof(Value) * numFormals), temp1);
14503 masm.jump(&joinLength);
14505 masm.bind(&emptyLength);
14507 masm.move32(Imm32(0), lengthReg);
14509 // Leave temp1 pointed to the start of actuals() when the rest-array
14510 // length is zero. We don't use |actuals() + numFormals| because
14511 // |numFormals| can be any non-negative int32 value when this MRest was
14512 // created from scalar replacement optimizations. And it seems
14513 // questionable to compute a Value* pointer which points to who knows
14514 // where.
14516 masm.bind(&joinLength);
14517 } else {
14518 // Use numActuals directly when there are no formals.
14519 lengthReg = numActuals;
14522 pushArg(temp2);
14523 pushArg(temp1);
14524 pushArg(lengthReg);
14526 using Fn = JSObject* (*)(JSContext*, uint32_t, Value*, HandleObject);
14527 callVM<Fn, InitRestParameter>(lir);
14530 // Create a stackmap from the given safepoint, with the structure:
14532 // <reg dump, if any>
14533 // | ++ <body (general spill)>
14534 // | | ++ <space for Frame>
14535 // | | ++ <inbound args>
14536 // | | |
14537 // Lowest Addr Highest Addr
14538 // |
14539 // framePushedAtStackMapBase
14541 // The caller owns the resulting stackmap. This assumes a grow-down stack.
14543 // For non-debug builds, if the stackmap would contain no pointers, no
14544 // stackmap is created, and nullptr is returned. For a debug build, a
14545 // stackmap is always created and returned.
14547 // Depending on the type of safepoint, the stackmap may need to account for
14548 // spilled registers. WasmSafepointKind::LirCall corresponds to LIR nodes where
14549 // isCall() == true, for which the register allocator will spill/restore all
14550 // live registers at the LIR level - in this case, the LSafepoint sees only live
14551 // values on the stack, never in registers. WasmSafepointKind::CodegenCall, on
14552 // the other hand, is for LIR nodes which may manually spill/restore live
14553 // registers in codegen, in which case the stackmap must account for this. Traps
14554 // also require tracking of live registers, but spilling is handled by the trap
14555 // mechanism.
14556 static bool CreateStackMapFromLSafepoint(LSafepoint& safepoint,
14557 const RegisterOffsets& trapExitLayout,
14558 size_t trapExitLayoutNumWords,
14559 size_t nInboundStackArgBytes,
14560 wasm::StackMap** result) {
14561 // Ensure this is defined on all return paths.
14562 *result = nullptr;
14564 // The size of the wasm::Frame itself.
14565 const size_t nFrameBytes = sizeof(wasm::Frame);
14567 // This is the number of bytes spilled for live registers, outside of a trap.
14568 // For traps, trapExitLayout and trapExitLayoutNumWords will be used.
14569 const size_t nRegisterDumpBytes =
14570 MacroAssembler::PushRegsInMaskSizeInBytes(safepoint.liveRegs());
14572 // As mentioned above, for WasmSafepointKind::LirCall, register spills and
14573 // restores are handled at the LIR level and there should therefore be no live
14574 // registers to handle here.
14575 MOZ_ASSERT_IF(safepoint.wasmSafepointKind() == WasmSafepointKind::LirCall,
14576 nRegisterDumpBytes == 0);
14577 MOZ_ASSERT(nRegisterDumpBytes % sizeof(void*) == 0);
14579 // This is the number of bytes in the general spill area, below the Frame.
14580 const size_t nBodyBytes = safepoint.framePushedAtStackMapBase();
14582 // This is the number of bytes in the general spill area, the Frame, and the
14583 // incoming args, but not including any register dump area.
14584 const size_t nNonRegisterBytes =
14585 nBodyBytes + nFrameBytes + nInboundStackArgBytes;
14586 MOZ_ASSERT(nNonRegisterBytes % sizeof(void*) == 0);
14588 // This is the number of bytes in the register dump area, if any, below the
14589 // general spill area.
14590 const size_t nRegisterBytes =
14591 (safepoint.wasmSafepointKind() == WasmSafepointKind::Trap)
14592 ? (trapExitLayoutNumWords * sizeof(void*))
14593 : nRegisterDumpBytes;
14595 // This is the total number of bytes covered by the map.
14596 const DebugOnly<size_t> nTotalBytes = nNonRegisterBytes + nRegisterBytes;
14598 // Create the stackmap initially in this vector. Since most frames will
14599 // contain 128 or fewer words, heap allocation is avoided in the majority of
14600 // cases. vec[0] is for the lowest address in the map, vec[N-1] is for the
14601 // highest address in the map.
14602 wasm::StackMapBoolVector vec;
14604 // Keep track of whether we've actually seen any refs.
14605 bool hasRefs = false;
14607 // REG DUMP AREA, if any.
14608 const LiveGeneralRegisterSet wasmAnyRefRegs = safepoint.wasmAnyRefRegs();
14609 GeneralRegisterForwardIterator wasmAnyRefRegsIter(wasmAnyRefRegs);
14610 switch (safepoint.wasmSafepointKind()) {
14611 case WasmSafepointKind::LirCall:
14612 case WasmSafepointKind::CodegenCall: {
14613 size_t spilledNumWords = nRegisterDumpBytes / sizeof(void*);
14614 if (!vec.appendN(false, spilledNumWords)) {
14615 return false;
14618 for (; wasmAnyRefRegsIter.more(); ++wasmAnyRefRegsIter) {
14619 Register reg = *wasmAnyRefRegsIter;
14620 size_t offsetFromSpillBase =
14621 safepoint.liveRegs().gprs().offsetOfPushedRegister(reg) /
14622 sizeof(void*);
14623 MOZ_ASSERT(0 < offsetFromSpillBase &&
14624 offsetFromSpillBase <= spilledNumWords);
14625 size_t offsetInVector = spilledNumWords - offsetFromSpillBase;
14627 vec[offsetInVector] = true;
14628 hasRefs = true;
14631 // Float and vector registers do not have to be handled; they cannot
14632 // contain wasm anyrefs, and they are spilled after general-purpose
14633 // registers. Gprs are therefore closest to the spill base and thus their
14634 // offset calculation does not need to account for other spills.
14635 } break;
14636 case WasmSafepointKind::Trap: {
14637 if (!vec.appendN(false, trapExitLayoutNumWords)) {
14638 return false;
14640 for (; wasmAnyRefRegsIter.more(); ++wasmAnyRefRegsIter) {
14641 Register reg = *wasmAnyRefRegsIter;
14642 size_t offsetFromTop = trapExitLayout.getOffset(reg);
14644 // If this doesn't hold, the associated register wasn't saved by
14645 // the trap exit stub. Better to crash now than much later, in
14646 // some obscure place, and possibly with security consequences.
14647 MOZ_RELEASE_ASSERT(offsetFromTop < trapExitLayoutNumWords);
14649 // offsetFromTop is an offset in words down from the highest
14650 // address in the exit stub save area. Switch it around to be an
14651 // offset up from the bottom of the (integer register) save area.
14652 size_t offsetFromBottom = trapExitLayoutNumWords - 1 - offsetFromTop;
14654 vec[offsetFromBottom] = true;
14655 hasRefs = true;
14657 } break;
14658 default:
14659 MOZ_CRASH("unreachable");
14662 // BODY (GENERAL SPILL) AREA and FRAME and INCOMING ARGS
14663 // Deal with roots on the stack.
14664 size_t wordsSoFar = vec.length();
14665 if (!vec.appendN(false, nNonRegisterBytes / sizeof(void*))) {
14666 return false;
14668 const LSafepoint::SlotList& wasmAnyRefSlots = safepoint.wasmAnyRefSlots();
14669 for (SafepointSlotEntry wasmAnyRefSlot : wasmAnyRefSlots) {
14670 // The following needs to correspond with JitFrameLayout::slotRef
14671 // wasmAnyRefSlot.stack == 0 means the slot is in the args area
14672 if (wasmAnyRefSlot.stack) {
14673 // It's a slot in the body allocation, so .slot is interpreted
14674 // as an index downwards from the Frame*
14675 MOZ_ASSERT(wasmAnyRefSlot.slot <= nBodyBytes);
14676 uint32_t offsetInBytes = nBodyBytes - wasmAnyRefSlot.slot;
14677 MOZ_ASSERT(offsetInBytes % sizeof(void*) == 0);
14678 vec[wordsSoFar + offsetInBytes / sizeof(void*)] = true;
14679 } else {
14680 // It's an argument slot
14681 MOZ_ASSERT(wasmAnyRefSlot.slot < nInboundStackArgBytes);
14682 uint32_t offsetInBytes = nBodyBytes + nFrameBytes + wasmAnyRefSlot.slot;
14683 MOZ_ASSERT(offsetInBytes % sizeof(void*) == 0);
14684 vec[wordsSoFar + offsetInBytes / sizeof(void*)] = true;
14686 hasRefs = true;
14689 #ifndef DEBUG
14690 // We saw no references, and this is a non-debug build, so don't bother
14691 // building the stackmap.
14692 if (!hasRefs) {
14693 return true;
14695 #endif
14697 // Convert vec into a wasm::StackMap.
14698 MOZ_ASSERT(vec.length() * sizeof(void*) == nTotalBytes);
14699 wasm::StackMap* stackMap =
14700 wasm::ConvertStackMapBoolVectorToStackMap(vec, hasRefs);
14701 if (!stackMap) {
14702 return false;
14704 if (safepoint.wasmSafepointKind() == WasmSafepointKind::Trap) {
14705 stackMap->setExitStubWords(trapExitLayoutNumWords);
14708 // Record in the map, how far down from the highest address the Frame* is.
14709 // Take the opportunity to check that we haven't marked any part of the
14710 // Frame itself as a pointer.
14711 stackMap->setFrameOffsetFromTop((nInboundStackArgBytes + nFrameBytes) /
14712 sizeof(void*));
14713 #ifdef DEBUG
14714 for (uint32_t i = 0; i < nFrameBytes / sizeof(void*); i++) {
14715 MOZ_ASSERT(stackMap->getBit(stackMap->header.numMappedWords -
14716 stackMap->header.frameOffsetFromTop + i) == 0);
14718 #endif
14720 *result = stackMap;
14721 return true;
14724 bool CodeGenerator::generateWasm(
14725 wasm::CallIndirectId callIndirectId, wasm::BytecodeOffset trapOffset,
14726 const wasm::ArgTypeVector& argTypes, const RegisterOffsets& trapExitLayout,
14727 size_t trapExitLayoutNumWords, wasm::FuncOffsets* offsets,
14728 wasm::StackMaps* stackMaps, wasm::Decoder* decoder) {
14729 AutoCreatedBy acb(masm, "CodeGenerator::generateWasm");
14731 JitSpew(JitSpew_Codegen, "# Emitting wasm code");
14733 size_t nInboundStackArgBytes = StackArgAreaSizeUnaligned(argTypes);
14734 inboundStackArgBytes_ = nInboundStackArgBytes;
14736 wasm::GenerateFunctionPrologue(masm, callIndirectId, mozilla::Nothing(),
14737 offsets);
14739 MOZ_ASSERT(masm.framePushed() == 0);
14741 // Very large frames are implausible, probably an attack.
14742 if (frameSize() > wasm::MaxFrameSize) {
14743 return decoder->fail(decoder->beginOffset(), "stack frame is too large");
14746 if (omitOverRecursedCheck()) {
14747 masm.reserveStack(frameSize());
14748 } else {
14749 std::pair<CodeOffset, uint32_t> pair =
14750 masm.wasmReserveStackChecked(frameSize(), trapOffset);
14751 CodeOffset trapInsnOffset = pair.first;
14752 size_t nBytesReservedBeforeTrap = pair.second;
14754 wasm::StackMap* functionEntryStackMap = nullptr;
14755 if (!CreateStackMapForFunctionEntryTrap(
14756 argTypes, trapExitLayout, trapExitLayoutNumWords,
14757 nBytesReservedBeforeTrap, nInboundStackArgBytes,
14758 &functionEntryStackMap)) {
14759 return false;
14762 // In debug builds, we'll always have a stack map, even if there are no
14763 // refs to track.
14764 MOZ_ASSERT(functionEntryStackMap);
14766 if (functionEntryStackMap &&
14767 !stackMaps->add((uint8_t*)(uintptr_t)trapInsnOffset.offset(),
14768 functionEntryStackMap)) {
14769 functionEntryStackMap->destroy();
14770 return false;
14774 MOZ_ASSERT(masm.framePushed() == frameSize());
14776 if (!generateBody()) {
14777 return false;
14780 masm.bind(&returnLabel_);
14781 wasm::GenerateFunctionEpilogue(masm, frameSize(), offsets);
14783 if (!generateOutOfLineCode()) {
14784 return false;
14787 masm.flush();
14788 if (masm.oom()) {
14789 return false;
14792 offsets->end = masm.currentOffset();
14794 MOZ_ASSERT(!masm.failureLabel()->used());
14795 MOZ_ASSERT(snapshots_.listSize() == 0);
14796 MOZ_ASSERT(snapshots_.RVATableSize() == 0);
14797 MOZ_ASSERT(recovers_.size() == 0);
14798 MOZ_ASSERT(graph.numConstants() == 0);
14799 MOZ_ASSERT(osiIndices_.empty());
14800 MOZ_ASSERT(icList_.empty());
14801 MOZ_ASSERT(safepoints_.size() == 0);
14802 MOZ_ASSERT(!scriptCounts_);
14804 // Convert the safepoints to stackmaps and add them to our running
14805 // collection thereof.
14806 for (CodegenSafepointIndex& index : safepointIndices_) {
14807 wasm::StackMap* stackMap = nullptr;
14808 if (!CreateStackMapFromLSafepoint(*index.safepoint(), trapExitLayout,
14809 trapExitLayoutNumWords,
14810 nInboundStackArgBytes, &stackMap)) {
14811 return false;
14814 // In debug builds, we'll always have a stack map.
14815 MOZ_ASSERT(stackMap);
14816 if (!stackMap) {
14817 continue;
14820 if (!stackMaps->add((uint8_t*)(uintptr_t)index.displacement(), stackMap)) {
14821 stackMap->destroy();
14822 return false;
14826 return true;
14829 bool CodeGenerator::generate() {
14830 AutoCreatedBy acb(masm, "CodeGenerator::generate");
14832 JitSpew(JitSpew_Codegen, "# Emitting code for script %s:%u:%u",
14833 gen->outerInfo().script()->filename(),
14834 gen->outerInfo().script()->lineno(),
14835 gen->outerInfo().script()->column().oneOriginValue());
14837 // Initialize native code table with an entry to the start of
14838 // top-level script.
14839 InlineScriptTree* tree = gen->outerInfo().inlineScriptTree();
14840 jsbytecode* startPC = tree->script()->code();
14841 BytecodeSite* startSite = new (gen->alloc()) BytecodeSite(tree, startPC);
14842 if (!addNativeToBytecodeEntry(startSite)) {
14843 return false;
14846 if (!safepoints_.init(gen->alloc())) {
14847 return false;
14850 perfSpewer_.recordOffset(masm, "Prologue");
14851 if (!generatePrologue()) {
14852 return false;
14855 // Reset native => bytecode map table with top-level script and startPc.
14856 if (!addNativeToBytecodeEntry(startSite)) {
14857 return false;
14860 if (!generateBody()) {
14861 return false;
14864 // Reset native => bytecode map table with top-level script and startPc.
14865 if (!addNativeToBytecodeEntry(startSite)) {
14866 return false;
14869 perfSpewer_.recordOffset(masm, "Epilogue");
14870 if (!generateEpilogue()) {
14871 return false;
14874 // Reset native => bytecode map table with top-level script and startPc.
14875 if (!addNativeToBytecodeEntry(startSite)) {
14876 return false;
14879 perfSpewer_.recordOffset(masm, "InvalidateEpilogue");
14880 generateInvalidateEpilogue();
14882 // native => bytecode entries for OOL code will be added
14883 // by CodeGeneratorShared::generateOutOfLineCode
14884 perfSpewer_.recordOffset(masm, "OOLCode");
14885 if (!generateOutOfLineCode()) {
14886 return false;
14889 // Add terminal entry.
14890 if (!addNativeToBytecodeEntry(startSite)) {
14891 return false;
14894 // Dump Native to bytecode entries to spew.
14895 dumpNativeToBytecodeEntries();
14897 // We encode safepoints after the OSI-point offsets have been determined.
14898 if (!encodeSafepoints()) {
14899 return false;
14902 return !masm.oom();
14905 static bool AddInlinedCompilations(JSContext* cx, HandleScript script,
14906 IonCompilationId compilationId,
14907 const WarpSnapshot* snapshot,
14908 bool* isValid) {
14909 MOZ_ASSERT(!*isValid);
14910 RecompileInfo recompileInfo(script, compilationId);
14912 JitZone* jitZone = cx->zone()->jitZone();
14914 for (const auto* scriptSnapshot : snapshot->scripts()) {
14915 JSScript* inlinedScript = scriptSnapshot->script();
14916 if (inlinedScript == script) {
14917 continue;
14920 // TODO(post-Warp): This matches FinishCompilation and is necessary to
14921 // ensure in-progress compilations are canceled when an inlined functon
14922 // becomes a debuggee. See the breakpoint-14.js jit-test.
14923 // When TI is gone, try to clean this up by moving AddInlinedCompilations to
14924 // WarpOracle so that we can handle this as part of addPendingRecompile
14925 // instead of requiring this separate check.
14926 if (inlinedScript->isDebuggee()) {
14927 *isValid = false;
14928 return true;
14931 if (!jitZone->addInlinedCompilation(recompileInfo, inlinedScript)) {
14932 return false;
14936 *isValid = true;
14937 return true;
14940 bool CodeGenerator::link(JSContext* cx, const WarpSnapshot* snapshot) {
14941 AutoCreatedBy acb(masm, "CodeGenerator::link");
14943 // We cancel off-thread Ion compilations in a few places during GC, but if
14944 // this compilation was performed off-thread it will already have been
14945 // removed from the relevant lists by this point. Don't allow GC here.
14946 JS::AutoAssertNoGC nogc(cx);
14948 RootedScript script(cx, gen->outerInfo().script());
14949 MOZ_ASSERT(!script->hasIonScript());
14951 // Perform any read barriers which were skipped while compiling the
14952 // script, which may have happened off-thread.
14953 JitZone* jitZone = cx->zone()->jitZone();
14954 jitZone->performStubReadBarriers(zoneStubsToReadBarrier_);
14956 if (scriptCounts_ && !script->hasScriptCounts() &&
14957 !script->initScriptCounts(cx)) {
14958 return false;
14961 IonCompilationId compilationId =
14962 cx->runtime()->jitRuntime()->nextCompilationId();
14963 jitZone->currentCompilationIdRef().emplace(compilationId);
14964 auto resetCurrentId = mozilla::MakeScopeExit(
14965 [jitZone] { jitZone->currentCompilationIdRef().reset(); });
14967 // Record constraints. If an error occured, returns false and potentially
14968 // prevent future compilations. Otherwise, if an invalidation occured, then
14969 // skip the current compilation.
14970 bool isValid = false;
14972 // If an inlined script is invalidated (for example, by attaching
14973 // a debugger), we must also invalidate the parent IonScript.
14974 if (!AddInlinedCompilations(cx, script, compilationId, snapshot, &isValid)) {
14975 return false;
14977 if (!isValid) {
14978 return true;
14981 uint32_t argumentSlots = (gen->outerInfo().nargs() + 1) * sizeof(Value);
14983 size_t numNurseryObjects = snapshot->nurseryObjects().length();
14985 IonScript* ionScript = IonScript::New(
14986 cx, compilationId, graph.localSlotsSize(), argumentSlots, frameDepth_,
14987 snapshots_.listSize(), snapshots_.RVATableSize(), recovers_.size(),
14988 graph.numConstants(), numNurseryObjects, safepointIndices_.length(),
14989 osiIndices_.length(), icList_.length(), runtimeData_.length(),
14990 safepoints_.size());
14991 if (!ionScript) {
14992 return false;
14994 #ifdef DEBUG
14995 ionScript->setICHash(snapshot->icHash());
14996 #endif
14998 auto freeIonScript = mozilla::MakeScopeExit([&ionScript] {
14999 // Use js_free instead of IonScript::Destroy: the cache list is still
15000 // uninitialized.
15001 js_free(ionScript);
15004 Linker linker(masm);
15005 JitCode* code = linker.newCode(cx, CodeKind::Ion);
15006 if (!code) {
15007 return false;
15010 // Encode native to bytecode map if profiling is enabled.
15011 if (isProfilerInstrumentationEnabled()) {
15012 // Generate native-to-bytecode main table.
15013 IonEntry::ScriptList scriptList;
15014 if (!generateCompactNativeToBytecodeMap(cx, code, scriptList)) {
15015 return false;
15018 uint8_t* ionTableAddr =
15019 ((uint8_t*)nativeToBytecodeMap_.get()) + nativeToBytecodeTableOffset_;
15020 JitcodeIonTable* ionTable = (JitcodeIonTable*)ionTableAddr;
15022 // Construct the IonEntry that will go into the global table.
15023 auto entry = MakeJitcodeGlobalEntry<IonEntry>(
15024 cx, code, code->raw(), code->rawEnd(), std::move(scriptList), ionTable);
15025 if (!entry) {
15026 return false;
15028 (void)nativeToBytecodeMap_.release(); // Table is now owned by |entry|.
15030 // Add entry to the global table.
15031 JitcodeGlobalTable* globalTable =
15032 cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
15033 if (!globalTable->addEntry(std::move(entry))) {
15034 return false;
15037 // Mark the jitcode as having a bytecode map.
15038 code->setHasBytecodeMap();
15039 } else {
15040 // Add a dumy jitcodeGlobalTable entry.
15041 auto entry = MakeJitcodeGlobalEntry<DummyEntry>(cx, code, code->raw(),
15042 code->rawEnd());
15043 if (!entry) {
15044 return false;
15047 // Add entry to the global table.
15048 JitcodeGlobalTable* globalTable =
15049 cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
15050 if (!globalTable->addEntry(std::move(entry))) {
15051 return false;
15054 // Mark the jitcode as having a bytecode map.
15055 code->setHasBytecodeMap();
15058 ionScript->setMethod(code);
15060 // If the Gecko Profiler is enabled, mark IonScript as having been
15061 // instrumented accordingly.
15062 if (isProfilerInstrumentationEnabled()) {
15063 ionScript->setHasProfilingInstrumentation();
15066 Assembler::PatchDataWithValueCheck(
15067 CodeLocationLabel(code, invalidateEpilogueData_), ImmPtr(ionScript),
15068 ImmPtr((void*)-1));
15070 for (CodeOffset offset : ionScriptLabels_) {
15071 Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, offset),
15072 ImmPtr(ionScript), ImmPtr((void*)-1));
15075 for (NurseryObjectLabel label : ionNurseryObjectLabels_) {
15076 void* entry = ionScript->addressOfNurseryObject(label.nurseryIndex);
15077 Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, label.offset),
15078 ImmPtr(entry), ImmPtr((void*)-1));
15081 // for generating inline caches during the execution.
15082 if (runtimeData_.length()) {
15083 ionScript->copyRuntimeData(&runtimeData_[0]);
15085 if (icList_.length()) {
15086 ionScript->copyICEntries(&icList_[0]);
15089 for (size_t i = 0; i < icInfo_.length(); i++) {
15090 IonIC& ic = ionScript->getICFromIndex(i);
15091 Assembler::PatchDataWithValueCheck(
15092 CodeLocationLabel(code, icInfo_[i].icOffsetForJump),
15093 ImmPtr(ic.codeRawPtr()), ImmPtr((void*)-1));
15094 Assembler::PatchDataWithValueCheck(
15095 CodeLocationLabel(code, icInfo_[i].icOffsetForPush), ImmPtr(&ic),
15096 ImmPtr((void*)-1));
15099 JitSpew(JitSpew_Codegen, "Created IonScript %p (raw %p)", (void*)ionScript,
15100 (void*)code->raw());
15102 ionScript->setInvalidationEpilogueDataOffset(
15103 invalidateEpilogueData_.offset());
15104 if (jsbytecode* osrPc = gen->outerInfo().osrPc()) {
15105 ionScript->setOsrPc(osrPc);
15106 ionScript->setOsrEntryOffset(getOsrEntryOffset());
15108 ionScript->setInvalidationEpilogueOffset(invalidate_.offset());
15110 perfSpewer_.saveProfile(cx, script, code);
15112 #ifdef MOZ_VTUNE
15113 vtune::MarkScript(code, script, "ion");
15114 #endif
15116 // Set a Ion counter hint for this script.
15117 if (cx->runtime()->jitRuntime()->hasJitHintsMap()) {
15118 JitHintsMap* jitHints = cx->runtime()->jitRuntime()->getJitHintsMap();
15119 jitHints->recordIonCompilation(script);
15122 // for marking during GC.
15123 if (safepointIndices_.length()) {
15124 ionScript->copySafepointIndices(&safepointIndices_[0]);
15126 if (safepoints_.size()) {
15127 ionScript->copySafepoints(&safepoints_);
15130 // for recovering from an Ion Frame.
15131 if (osiIndices_.length()) {
15132 ionScript->copyOsiIndices(&osiIndices_[0]);
15134 if (snapshots_.listSize()) {
15135 ionScript->copySnapshots(&snapshots_);
15137 MOZ_ASSERT_IF(snapshots_.listSize(), recovers_.size());
15138 if (recovers_.size()) {
15139 ionScript->copyRecovers(&recovers_);
15141 if (graph.numConstants()) {
15142 const Value* vp = graph.constantPool();
15143 ionScript->copyConstants(vp);
15144 for (size_t i = 0; i < graph.numConstants(); i++) {
15145 const Value& v = vp[i];
15146 if (v.isGCThing()) {
15147 if (gc::StoreBuffer* sb = v.toGCThing()->storeBuffer()) {
15148 sb->putWholeCell(script);
15149 break;
15155 // Attach any generated script counts to the script.
15156 if (IonScriptCounts* counts = extractScriptCounts()) {
15157 script->addIonCounts(counts);
15160 // WARNING: Code after this point must be infallible!
15162 // Copy the list of nursery objects. Note that the store buffer can add
15163 // HeapPtr edges that must be cleared in IonScript::Destroy. See the
15164 // infallibility warning above.
15165 const auto& nurseryObjects = snapshot->nurseryObjects();
15166 for (size_t i = 0; i < nurseryObjects.length(); i++) {
15167 ionScript->nurseryObjects()[i].init(nurseryObjects[i]);
15170 // Transfer ownership of the IonScript to the JitScript. At this point enough
15171 // of the IonScript must be initialized for IonScript::Destroy to work.
15172 freeIonScript.release();
15173 script->jitScript()->setIonScript(script, ionScript);
15175 return true;
15178 // An out-of-line path to convert a boxed int32 to either a float or double.
15179 class OutOfLineUnboxFloatingPoint : public OutOfLineCodeBase<CodeGenerator> {
15180 LUnboxFloatingPoint* unboxFloatingPoint_;
15182 public:
15183 explicit OutOfLineUnboxFloatingPoint(LUnboxFloatingPoint* unboxFloatingPoint)
15184 : unboxFloatingPoint_(unboxFloatingPoint) {}
15186 void accept(CodeGenerator* codegen) override {
15187 codegen->visitOutOfLineUnboxFloatingPoint(this);
15190 LUnboxFloatingPoint* unboxFloatingPoint() const {
15191 return unboxFloatingPoint_;
15195 void CodeGenerator::visitUnboxFloatingPoint(LUnboxFloatingPoint* lir) {
15196 const ValueOperand box = ToValue(lir, LUnboxFloatingPoint::Input);
15197 const LDefinition* result = lir->output();
15199 // Out-of-line path to convert int32 to double or bailout
15200 // if this instruction is fallible.
15201 OutOfLineUnboxFloatingPoint* ool =
15202 new (alloc()) OutOfLineUnboxFloatingPoint(lir);
15203 addOutOfLineCode(ool, lir->mir());
15205 FloatRegister resultReg = ToFloatRegister(result);
15206 masm.branchTestDouble(Assembler::NotEqual, box, ool->entry());
15207 masm.unboxDouble(box, resultReg);
15208 if (lir->type() == MIRType::Float32) {
15209 masm.convertDoubleToFloat32(resultReg, resultReg);
15211 masm.bind(ool->rejoin());
15214 void CodeGenerator::visitOutOfLineUnboxFloatingPoint(
15215 OutOfLineUnboxFloatingPoint* ool) {
15216 LUnboxFloatingPoint* ins = ool->unboxFloatingPoint();
15217 const ValueOperand value = ToValue(ins, LUnboxFloatingPoint::Input);
15219 if (ins->mir()->fallible()) {
15220 Label bail;
15221 masm.branchTestInt32(Assembler::NotEqual, value, &bail);
15222 bailoutFrom(&bail, ins->snapshot());
15224 masm.int32ValueToFloatingPoint(value, ToFloatRegister(ins->output()),
15225 ins->type());
15226 masm.jump(ool->rejoin());
15229 void CodeGenerator::visitCallBindVar(LCallBindVar* lir) {
15230 pushArg(ToRegister(lir->environmentChain()));
15232 using Fn = JSObject* (*)(JSContext*, JSObject*);
15233 callVM<Fn, BindVarOperation>(lir);
15236 void CodeGenerator::visitMegamorphicSetElement(LMegamorphicSetElement* lir) {
15237 Register obj = ToRegister(lir->getOperand(0));
15238 ValueOperand idVal = ToValue(lir, LMegamorphicSetElement::IndexIndex);
15239 ValueOperand value = ToValue(lir, LMegamorphicSetElement::ValueIndex);
15241 Register temp0 = ToRegister(lir->temp0());
15242 // See comment in LIROps.yaml (x86 is short on registers)
15243 #ifndef JS_CODEGEN_X86
15244 Register temp1 = ToRegister(lir->temp1());
15245 Register temp2 = ToRegister(lir->temp2());
15246 #endif
15248 Label cacheHit, done;
15249 #ifdef JS_CODEGEN_X86
15250 masm.emitMegamorphicCachedSetSlot(
15251 idVal, obj, temp0, value, &cacheHit,
15252 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
15253 EmitPreBarrier(masm, addr, mirType);
15255 #else
15256 masm.emitMegamorphicCachedSetSlot(
15257 idVal, obj, temp0, temp1, temp2, value, &cacheHit,
15258 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
15259 EmitPreBarrier(masm, addr, mirType);
15261 #endif
15263 pushArg(Imm32(lir->mir()->strict()));
15264 pushArg(ToValue(lir, LMegamorphicSetElement::ValueIndex));
15265 pushArg(ToValue(lir, LMegamorphicSetElement::IndexIndex));
15266 pushArg(obj);
15268 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
15269 callVM<Fn, js::jit::SetElementMegamorphic<true>>(lir);
15271 masm.jump(&done);
15272 masm.bind(&cacheHit);
15274 masm.branchPtrInNurseryChunk(Assembler::Equal, obj, temp0, &done);
15275 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp0, &done);
15277 saveVolatile(temp0);
15278 emitPostWriteBarrier(obj);
15279 restoreVolatile(temp0);
15281 masm.bind(&done);
15284 void CodeGenerator::visitLoadScriptedProxyHandler(
15285 LLoadScriptedProxyHandler* ins) {
15286 const Register obj = ToRegister(ins->getOperand(0));
15287 ValueOperand output = ToOutValue(ins);
15289 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()),
15290 output.scratchReg());
15291 masm.loadValue(
15292 Address(output.scratchReg(), js::detail::ProxyReservedSlots::offsetOfSlot(
15293 ScriptedProxyHandler::HANDLER_EXTRA)),
15294 output);
15297 #ifdef JS_PUNBOX64
15298 void CodeGenerator::visitCheckScriptedProxyGetResult(
15299 LCheckScriptedProxyGetResult* ins) {
15300 ValueOperand target = ToValue(ins, LCheckScriptedProxyGetResult::TargetIndex);
15301 ValueOperand value = ToValue(ins, LCheckScriptedProxyGetResult::ValueIndex);
15302 ValueOperand id = ToValue(ins, LCheckScriptedProxyGetResult::IdIndex);
15303 Register scratch = ToRegister(ins->temp0());
15304 Register scratch2 = ToRegister(ins->temp1());
15306 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue,
15307 MutableHandleValue);
15308 OutOfLineCode* ool = oolCallVM<Fn, CheckProxyGetByValueResult>(
15309 ins, ArgList(scratch, id, value), StoreValueTo(value));
15311 masm.unboxObject(target, scratch);
15312 masm.branchTestObjectNeedsProxyResultValidation(Assembler::NonZero, scratch,
15313 scratch2, ool->entry());
15314 masm.bind(ool->rejoin());
15316 #endif
15318 void CodeGenerator::visitIdToStringOrSymbol(LIdToStringOrSymbol* ins) {
15319 ValueOperand id = ToValue(ins, LIdToStringOrSymbol::IdIndex);
15320 ValueOperand output = ToOutValue(ins);
15321 Register scratch = ToRegister(ins->temp0());
15323 masm.moveValue(id, output);
15325 Label done, callVM;
15326 Label bail;
15328 ScratchTagScope tag(masm, output);
15329 masm.splitTagForTest(output, tag);
15330 masm.branchTestString(Assembler::Equal, tag, &done);
15331 masm.branchTestSymbol(Assembler::Equal, tag, &done);
15332 masm.branchTestInt32(Assembler::NotEqual, tag, &bail);
15335 masm.unboxInt32(output, scratch);
15337 using Fn = JSLinearString* (*)(JSContext*, int);
15338 OutOfLineCode* ool = oolCallVM<Fn, Int32ToString<CanGC>>(
15339 ins, ArgList(scratch), StoreRegisterTo(output.scratchReg()));
15341 masm.lookupStaticIntString(scratch, output.scratchReg(),
15342 gen->runtime->staticStrings(), ool->entry());
15344 masm.bind(ool->rejoin());
15345 masm.tagValue(JSVAL_TYPE_STRING, output.scratchReg(), output);
15346 masm.bind(&done);
15348 bailoutFrom(&bail, ins->snapshot());
15351 void CodeGenerator::visitLoadFixedSlotV(LLoadFixedSlotV* ins) {
15352 const Register obj = ToRegister(ins->getOperand(0));
15353 size_t slot = ins->mir()->slot();
15354 ValueOperand result = ToOutValue(ins);
15356 masm.loadValue(Address(obj, NativeObject::getFixedSlotOffset(slot)), result);
15359 void CodeGenerator::visitLoadFixedSlotT(LLoadFixedSlotT* ins) {
15360 const Register obj = ToRegister(ins->getOperand(0));
15361 size_t slot = ins->mir()->slot();
15362 AnyRegister result = ToAnyRegister(ins->getDef(0));
15363 MIRType type = ins->mir()->type();
15365 masm.loadUnboxedValue(Address(obj, NativeObject::getFixedSlotOffset(slot)),
15366 type, result);
15369 template <typename T>
15370 static void EmitLoadAndUnbox(MacroAssembler& masm, const T& src, MIRType type,
15371 bool fallible, AnyRegister dest, Label* fail) {
15372 if (type == MIRType::Double) {
15373 MOZ_ASSERT(dest.isFloat());
15374 masm.ensureDouble(src, dest.fpu(), fail);
15375 return;
15377 if (fallible) {
15378 switch (type) {
15379 case MIRType::Int32:
15380 masm.fallibleUnboxInt32(src, dest.gpr(), fail);
15381 break;
15382 case MIRType::Boolean:
15383 masm.fallibleUnboxBoolean(src, dest.gpr(), fail);
15384 break;
15385 case MIRType::Object:
15386 masm.fallibleUnboxObject(src, dest.gpr(), fail);
15387 break;
15388 case MIRType::String:
15389 masm.fallibleUnboxString(src, dest.gpr(), fail);
15390 break;
15391 case MIRType::Symbol:
15392 masm.fallibleUnboxSymbol(src, dest.gpr(), fail);
15393 break;
15394 case MIRType::BigInt:
15395 masm.fallibleUnboxBigInt(src, dest.gpr(), fail);
15396 break;
15397 default:
15398 MOZ_CRASH("Unexpected MIRType");
15400 return;
15402 masm.loadUnboxedValue(src, type, dest);
15405 void CodeGenerator::visitLoadFixedSlotAndUnbox(LLoadFixedSlotAndUnbox* ins) {
15406 const MLoadFixedSlotAndUnbox* mir = ins->mir();
15407 MIRType type = mir->type();
15408 Register input = ToRegister(ins->object());
15409 AnyRegister result = ToAnyRegister(ins->output());
15410 size_t slot = mir->slot();
15412 Address address(input, NativeObject::getFixedSlotOffset(slot));
15414 Label bail;
15415 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
15416 if (mir->fallible()) {
15417 bailoutFrom(&bail, ins->snapshot());
15421 void CodeGenerator::visitLoadDynamicSlotAndUnbox(
15422 LLoadDynamicSlotAndUnbox* ins) {
15423 const MLoadDynamicSlotAndUnbox* mir = ins->mir();
15424 MIRType type = mir->type();
15425 Register input = ToRegister(ins->slots());
15426 AnyRegister result = ToAnyRegister(ins->output());
15427 size_t slot = mir->slot();
15429 Address address(input, slot * sizeof(JS::Value));
15431 Label bail;
15432 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
15433 if (mir->fallible()) {
15434 bailoutFrom(&bail, ins->snapshot());
15438 void CodeGenerator::visitLoadElementAndUnbox(LLoadElementAndUnbox* ins) {
15439 const MLoadElementAndUnbox* mir = ins->mir();
15440 MIRType type = mir->type();
15441 Register elements = ToRegister(ins->elements());
15442 AnyRegister result = ToAnyRegister(ins->output());
15444 Label bail;
15445 if (ins->index()->isConstant()) {
15446 NativeObject::elementsSizeMustNotOverflow();
15447 int32_t offset = ToInt32(ins->index()) * sizeof(Value);
15448 Address address(elements, offset);
15449 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
15450 } else {
15451 BaseObjectElementIndex address(elements, ToRegister(ins->index()));
15452 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
15455 if (mir->fallible()) {
15456 bailoutFrom(&bail, ins->snapshot());
15460 class OutOfLineAtomizeSlot : public OutOfLineCodeBase<CodeGenerator> {
15461 LInstruction* lir_;
15462 Register stringReg_;
15463 Address slotAddr_;
15464 TypedOrValueRegister dest_;
15466 public:
15467 OutOfLineAtomizeSlot(LInstruction* lir, Register stringReg, Address slotAddr,
15468 TypedOrValueRegister dest)
15469 : lir_(lir), stringReg_(stringReg), slotAddr_(slotAddr), dest_(dest) {}
15471 void accept(CodeGenerator* codegen) final {
15472 codegen->visitOutOfLineAtomizeSlot(this);
15474 LInstruction* lir() const { return lir_; }
15475 Register stringReg() const { return stringReg_; }
15476 Address slotAddr() const { return slotAddr_; }
15477 TypedOrValueRegister dest() const { return dest_; }
15480 void CodeGenerator::visitOutOfLineAtomizeSlot(OutOfLineAtomizeSlot* ool) {
15481 LInstruction* lir = ool->lir();
15482 Register stringReg = ool->stringReg();
15483 Address slotAddr = ool->slotAddr();
15484 TypedOrValueRegister dest = ool->dest();
15486 // This code is called with a non-atomic string in |stringReg|.
15487 // When it returns, |stringReg| contains an unboxed pointer to an
15488 // atomized version of that string, and |slotAddr| contains a
15489 // StringValue pointing to that atom. If |dest| is a ValueOperand,
15490 // it contains the same StringValue; otherwise we assert that |dest|
15491 // is |stringReg|.
15493 saveLive(lir);
15494 pushArg(stringReg);
15496 using Fn = JSAtom* (*)(JSContext*, JSString*);
15497 callVM<Fn, js::AtomizeString>(lir);
15498 StoreRegisterTo(stringReg).generate(this);
15499 restoreLiveIgnore(lir, StoreRegisterTo(stringReg).clobbered());
15501 if (dest.hasValue()) {
15502 masm.moveValue(
15503 TypedOrValueRegister(MIRType::String, AnyRegister(stringReg)),
15504 dest.valueReg());
15505 } else {
15506 MOZ_ASSERT(dest.typedReg().gpr() == stringReg);
15509 emitPreBarrier(slotAddr);
15510 masm.storeTypedOrValue(dest, slotAddr);
15512 // We don't need a post-barrier because atoms aren't nursery-allocated.
15513 #ifdef DEBUG
15514 // We need a temp register for the nursery check. Spill something.
15515 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
15516 allRegs.take(stringReg);
15517 Register temp = allRegs.takeAny();
15518 masm.push(temp);
15520 Label tenured;
15521 masm.branchPtrInNurseryChunk(Assembler::NotEqual, stringReg, temp, &tenured);
15522 masm.assumeUnreachable("AtomizeString returned a nursery pointer");
15523 masm.bind(&tenured);
15525 masm.pop(temp);
15526 #endif
15528 masm.jump(ool->rejoin());
15531 void CodeGenerator::emitMaybeAtomizeSlot(LInstruction* ins, Register stringReg,
15532 Address slotAddr,
15533 TypedOrValueRegister dest) {
15534 OutOfLineAtomizeSlot* ool =
15535 new (alloc()) OutOfLineAtomizeSlot(ins, stringReg, slotAddr, dest);
15536 addOutOfLineCode(ool, ins->mirRaw()->toInstruction());
15537 masm.branchTest32(Assembler::Zero,
15538 Address(stringReg, JSString::offsetOfFlags()),
15539 Imm32(JSString::ATOM_BIT), ool->entry());
15540 masm.bind(ool->rejoin());
15543 void CodeGenerator::visitLoadFixedSlotAndAtomize(
15544 LLoadFixedSlotAndAtomize* ins) {
15545 Register obj = ToRegister(ins->getOperand(0));
15546 Register temp = ToRegister(ins->temp0());
15547 size_t slot = ins->mir()->slot();
15548 ValueOperand result = ToOutValue(ins);
15550 Address slotAddr(obj, NativeObject::getFixedSlotOffset(slot));
15551 masm.loadValue(slotAddr, result);
15553 Label notString;
15554 masm.branchTestString(Assembler::NotEqual, result, &notString);
15555 masm.unboxString(result, temp);
15556 emitMaybeAtomizeSlot(ins, temp, slotAddr, result);
15557 masm.bind(&notString);
15560 void CodeGenerator::visitLoadDynamicSlotAndAtomize(
15561 LLoadDynamicSlotAndAtomize* ins) {
15562 ValueOperand result = ToOutValue(ins);
15563 Register temp = ToRegister(ins->temp0());
15564 Register base = ToRegister(ins->input());
15565 int32_t offset = ins->mir()->slot() * sizeof(js::Value);
15567 Address slotAddr(base, offset);
15568 masm.loadValue(slotAddr, result);
15570 Label notString;
15571 masm.branchTestString(Assembler::NotEqual, result, &notString);
15572 masm.unboxString(result, temp);
15573 emitMaybeAtomizeSlot(ins, temp, slotAddr, result);
15574 masm.bind(&notString);
15577 void CodeGenerator::visitLoadFixedSlotUnboxAndAtomize(
15578 LLoadFixedSlotUnboxAndAtomize* ins) {
15579 const MLoadFixedSlotAndUnbox* mir = ins->mir();
15580 MOZ_ASSERT(mir->type() == MIRType::String);
15581 Register input = ToRegister(ins->object());
15582 AnyRegister result = ToAnyRegister(ins->output());
15583 size_t slot = mir->slot();
15585 Address slotAddr(input, NativeObject::getFixedSlotOffset(slot));
15587 Label bail;
15588 EmitLoadAndUnbox(masm, slotAddr, MIRType::String, mir->fallible(), result,
15589 &bail);
15590 emitMaybeAtomizeSlot(ins, result.gpr(), slotAddr,
15591 TypedOrValueRegister(MIRType::String, result));
15593 if (mir->fallible()) {
15594 bailoutFrom(&bail, ins->snapshot());
15598 void CodeGenerator::visitLoadDynamicSlotUnboxAndAtomize(
15599 LLoadDynamicSlotUnboxAndAtomize* ins) {
15600 const MLoadDynamicSlotAndUnbox* mir = ins->mir();
15601 MOZ_ASSERT(mir->type() == MIRType::String);
15602 Register input = ToRegister(ins->slots());
15603 AnyRegister result = ToAnyRegister(ins->output());
15604 size_t slot = mir->slot();
15606 Address slotAddr(input, slot * sizeof(JS::Value));
15608 Label bail;
15609 EmitLoadAndUnbox(masm, slotAddr, MIRType::String, mir->fallible(), result,
15610 &bail);
15611 emitMaybeAtomizeSlot(ins, result.gpr(), slotAddr,
15612 TypedOrValueRegister(MIRType::String, result));
15614 if (mir->fallible()) {
15615 bailoutFrom(&bail, ins->snapshot());
15619 void CodeGenerator::visitAddAndStoreSlot(LAddAndStoreSlot* ins) {
15620 const Register obj = ToRegister(ins->getOperand(0));
15621 const ValueOperand value = ToValue(ins, LAddAndStoreSlot::ValueIndex);
15622 const Register maybeTemp = ToTempRegisterOrInvalid(ins->temp0());
15624 Shape* shape = ins->mir()->shape();
15625 masm.storeObjShape(shape, obj, [](MacroAssembler& masm, const Address& addr) {
15626 EmitPreBarrier(masm, addr, MIRType::Shape);
15629 // Perform the store. No pre-barrier required since this is a new
15630 // initialization.
15632 uint32_t offset = ins->mir()->slotOffset();
15633 if (ins->mir()->kind() == MAddAndStoreSlot::Kind::FixedSlot) {
15634 Address slot(obj, offset);
15635 masm.storeValue(value, slot);
15636 } else {
15637 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), maybeTemp);
15638 Address slot(maybeTemp, offset);
15639 masm.storeValue(value, slot);
15643 void CodeGenerator::visitAllocateAndStoreSlot(LAllocateAndStoreSlot* ins) {
15644 const Register obj = ToRegister(ins->getOperand(0));
15645 const ValueOperand value = ToValue(ins, LAllocateAndStoreSlot::ValueIndex);
15646 const Register temp0 = ToRegister(ins->temp0());
15647 const Register temp1 = ToRegister(ins->temp1());
15649 masm.Push(obj);
15650 masm.Push(value);
15652 using Fn = bool (*)(JSContext* cx, NativeObject* obj, uint32_t newCount);
15653 masm.setupAlignedABICall();
15654 masm.loadJSContext(temp0);
15655 masm.passABIArg(temp0);
15656 masm.passABIArg(obj);
15657 masm.move32(Imm32(ins->mir()->numNewSlots()), temp1);
15658 masm.passABIArg(temp1);
15659 masm.callWithABI<Fn, NativeObject::growSlotsPure>();
15660 masm.storeCallPointerResult(temp0);
15662 masm.Pop(value);
15663 masm.Pop(obj);
15665 bailoutIfFalseBool(temp0, ins->snapshot());
15667 masm.storeObjShape(ins->mir()->shape(), obj,
15668 [](MacroAssembler& masm, const Address& addr) {
15669 EmitPreBarrier(masm, addr, MIRType::Shape);
15672 // Perform the store. No pre-barrier required since this is a new
15673 // initialization.
15674 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), temp0);
15675 Address slot(temp0, ins->mir()->slotOffset());
15676 masm.storeValue(value, slot);
15679 void CodeGenerator::visitAddSlotAndCallAddPropHook(
15680 LAddSlotAndCallAddPropHook* ins) {
15681 const Register obj = ToRegister(ins->object());
15682 const ValueOperand value =
15683 ToValue(ins, LAddSlotAndCallAddPropHook::ValueIndex);
15685 pushArg(ImmGCPtr(ins->mir()->shape()));
15686 pushArg(value);
15687 pushArg(obj);
15689 using Fn =
15690 bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, Handle<Shape*>);
15691 callVM<Fn, AddSlotAndCallAddPropHook>(ins);
15694 void CodeGenerator::visitStoreFixedSlotV(LStoreFixedSlotV* ins) {
15695 const Register obj = ToRegister(ins->getOperand(0));
15696 size_t slot = ins->mir()->slot();
15698 const ValueOperand value = ToValue(ins, LStoreFixedSlotV::ValueIndex);
15700 Address address(obj, NativeObject::getFixedSlotOffset(slot));
15701 if (ins->mir()->needsBarrier()) {
15702 emitPreBarrier(address);
15705 masm.storeValue(value, address);
15708 void CodeGenerator::visitStoreFixedSlotT(LStoreFixedSlotT* ins) {
15709 const Register obj = ToRegister(ins->getOperand(0));
15710 size_t slot = ins->mir()->slot();
15712 const LAllocation* value = ins->value();
15713 MIRType valueType = ins->mir()->value()->type();
15715 Address address(obj, NativeObject::getFixedSlotOffset(slot));
15716 if (ins->mir()->needsBarrier()) {
15717 emitPreBarrier(address);
15720 ConstantOrRegister nvalue =
15721 value->isConstant()
15722 ? ConstantOrRegister(value->toConstant()->toJSValue())
15723 : TypedOrValueRegister(valueType, ToAnyRegister(value));
15724 masm.storeConstantOrRegister(nvalue, address);
15727 void CodeGenerator::visitGetNameCache(LGetNameCache* ins) {
15728 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
15729 Register envChain = ToRegister(ins->envObj());
15730 ValueOperand output = ToOutValue(ins);
15731 Register temp = ToRegister(ins->temp0());
15733 IonGetNameIC ic(liveRegs, envChain, output, temp);
15734 addIC(ins, allocateIC(ic));
15737 void CodeGenerator::addGetPropertyCache(LInstruction* ins,
15738 LiveRegisterSet liveRegs,
15739 TypedOrValueRegister value,
15740 const ConstantOrRegister& id,
15741 ValueOperand output) {
15742 CacheKind kind = CacheKind::GetElem;
15743 if (id.constant() && id.value().isString()) {
15744 JSString* idString = id.value().toString();
15745 if (idString->isAtom() && !idString->asAtom().isIndex()) {
15746 kind = CacheKind::GetProp;
15749 IonGetPropertyIC cache(kind, liveRegs, value, id, output);
15750 addIC(ins, allocateIC(cache));
15753 void CodeGenerator::addSetPropertyCache(LInstruction* ins,
15754 LiveRegisterSet liveRegs,
15755 Register objReg, Register temp,
15756 const ConstantOrRegister& id,
15757 const ConstantOrRegister& value,
15758 bool strict) {
15759 CacheKind kind = CacheKind::SetElem;
15760 if (id.constant() && id.value().isString()) {
15761 JSString* idString = id.value().toString();
15762 if (idString->isAtom() && !idString->asAtom().isIndex()) {
15763 kind = CacheKind::SetProp;
15766 IonSetPropertyIC cache(kind, liveRegs, objReg, temp, id, value, strict);
15767 addIC(ins, allocateIC(cache));
15770 ConstantOrRegister CodeGenerator::toConstantOrRegister(LInstruction* lir,
15771 size_t n, MIRType type) {
15772 if (type == MIRType::Value) {
15773 return TypedOrValueRegister(ToValue(lir, n));
15776 const LAllocation* value = lir->getOperand(n);
15777 if (value->isConstant()) {
15778 return ConstantOrRegister(value->toConstant()->toJSValue());
15781 return TypedOrValueRegister(type, ToAnyRegister(value));
15784 void CodeGenerator::visitGetPropertyCache(LGetPropertyCache* ins) {
15785 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
15786 TypedOrValueRegister value =
15787 toConstantOrRegister(ins, LGetPropertyCache::ValueIndex,
15788 ins->mir()->value()->type())
15789 .reg();
15790 ConstantOrRegister id = toConstantOrRegister(ins, LGetPropertyCache::IdIndex,
15791 ins->mir()->idval()->type());
15792 ValueOperand output = ToOutValue(ins);
15793 addGetPropertyCache(ins, liveRegs, value, id, output);
15796 void CodeGenerator::visitGetPropSuperCache(LGetPropSuperCache* ins) {
15797 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
15798 Register obj = ToRegister(ins->obj());
15799 TypedOrValueRegister receiver =
15800 toConstantOrRegister(ins, LGetPropSuperCache::ReceiverIndex,
15801 ins->mir()->receiver()->type())
15802 .reg();
15803 ConstantOrRegister id = toConstantOrRegister(ins, LGetPropSuperCache::IdIndex,
15804 ins->mir()->idval()->type());
15805 ValueOperand output = ToOutValue(ins);
15807 CacheKind kind = CacheKind::GetElemSuper;
15808 if (id.constant() && id.value().isString()) {
15809 JSString* idString = id.value().toString();
15810 if (idString->isAtom() && !idString->asAtom().isIndex()) {
15811 kind = CacheKind::GetPropSuper;
15815 IonGetPropSuperIC cache(kind, liveRegs, obj, receiver, id, output);
15816 addIC(ins, allocateIC(cache));
15819 void CodeGenerator::visitBindNameCache(LBindNameCache* ins) {
15820 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
15821 Register envChain = ToRegister(ins->environmentChain());
15822 Register output = ToRegister(ins->output());
15823 Register temp = ToRegister(ins->temp0());
15825 IonBindNameIC ic(liveRegs, envChain, output, temp);
15826 addIC(ins, allocateIC(ic));
15829 void CodeGenerator::visitHasOwnCache(LHasOwnCache* ins) {
15830 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
15831 TypedOrValueRegister value =
15832 toConstantOrRegister(ins, LHasOwnCache::ValueIndex,
15833 ins->mir()->value()->type())
15834 .reg();
15835 TypedOrValueRegister id = toConstantOrRegister(ins, LHasOwnCache::IdIndex,
15836 ins->mir()->idval()->type())
15837 .reg();
15838 Register output = ToRegister(ins->output());
15840 IonHasOwnIC cache(liveRegs, value, id, output);
15841 addIC(ins, allocateIC(cache));
15844 void CodeGenerator::visitCheckPrivateFieldCache(LCheckPrivateFieldCache* ins) {
15845 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
15846 TypedOrValueRegister value =
15847 toConstantOrRegister(ins, LCheckPrivateFieldCache::ValueIndex,
15848 ins->mir()->value()->type())
15849 .reg();
15850 TypedOrValueRegister id =
15851 toConstantOrRegister(ins, LCheckPrivateFieldCache::IdIndex,
15852 ins->mir()->idval()->type())
15853 .reg();
15854 Register output = ToRegister(ins->output());
15856 IonCheckPrivateFieldIC cache(liveRegs, value, id, output);
15857 addIC(ins, allocateIC(cache));
15860 void CodeGenerator::visitNewPrivateName(LNewPrivateName* ins) {
15861 pushArg(ImmGCPtr(ins->mir()->name()));
15863 using Fn = JS::Symbol* (*)(JSContext*, Handle<JSAtom*>);
15864 callVM<Fn, NewPrivateName>(ins);
15867 void CodeGenerator::visitCallDeleteProperty(LCallDeleteProperty* lir) {
15868 pushArg(ImmGCPtr(lir->mir()->name()));
15869 pushArg(ToValue(lir, LCallDeleteProperty::ValueIndex));
15871 using Fn = bool (*)(JSContext*, HandleValue, Handle<PropertyName*>, bool*);
15872 if (lir->mir()->strict()) {
15873 callVM<Fn, DelPropOperation<true>>(lir);
15874 } else {
15875 callVM<Fn, DelPropOperation<false>>(lir);
15879 void CodeGenerator::visitCallDeleteElement(LCallDeleteElement* lir) {
15880 pushArg(ToValue(lir, LCallDeleteElement::IndexIndex));
15881 pushArg(ToValue(lir, LCallDeleteElement::ValueIndex));
15883 using Fn = bool (*)(JSContext*, HandleValue, HandleValue, bool*);
15884 if (lir->mir()->strict()) {
15885 callVM<Fn, DelElemOperation<true>>(lir);
15886 } else {
15887 callVM<Fn, DelElemOperation<false>>(lir);
15891 void CodeGenerator::visitObjectToIterator(LObjectToIterator* lir) {
15892 Register obj = ToRegister(lir->object());
15893 Register iterObj = ToRegister(lir->output());
15894 Register temp = ToRegister(lir->temp0());
15895 Register temp2 = ToRegister(lir->temp1());
15896 Register temp3 = ToRegister(lir->temp2());
15898 using Fn = PropertyIteratorObject* (*)(JSContext*, HandleObject);
15899 OutOfLineCode* ool = (lir->mir()->wantsIndices())
15900 ? oolCallVM<Fn, GetIteratorWithIndices>(
15901 lir, ArgList(obj), StoreRegisterTo(iterObj))
15902 : oolCallVM<Fn, GetIterator>(
15903 lir, ArgList(obj), StoreRegisterTo(iterObj));
15905 masm.maybeLoadIteratorFromShape(obj, iterObj, temp, temp2, temp3,
15906 ool->entry());
15908 Register nativeIter = temp;
15909 masm.loadPrivate(
15910 Address(iterObj, PropertyIteratorObject::offsetOfIteratorSlot()),
15911 nativeIter);
15913 if (lir->mir()->wantsIndices()) {
15914 // At least one consumer of the output of this iterator has been optimized
15915 // to use iterator indices. If the cached iterator doesn't include indices,
15916 // but it was marked to indicate that we can create them if needed, then we
15917 // do a VM call to replace the cached iterator with a fresh iterator
15918 // including indices.
15919 masm.branchNativeIteratorIndices(Assembler::Equal, nativeIter, temp2,
15920 NativeIteratorIndices::AvailableOnRequest,
15921 ool->entry());
15924 Address iterFlagsAddr(nativeIter, NativeIterator::offsetOfFlagsAndCount());
15925 masm.storePtr(
15926 obj, Address(nativeIter, NativeIterator::offsetOfObjectBeingIterated()));
15927 masm.or32(Imm32(NativeIterator::Flags::Active), iterFlagsAddr);
15929 Register enumeratorsAddr = temp2;
15930 masm.movePtr(ImmPtr(lir->mir()->enumeratorsAddr()), enumeratorsAddr);
15931 masm.registerIterator(enumeratorsAddr, nativeIter, temp3);
15933 // Generate post-write barrier for storing to |iterObj->objectBeingIterated_|.
15934 // We already know that |iterObj| is tenured, so we only have to check |obj|.
15935 Label skipBarrier;
15936 masm.branchPtrInNurseryChunk(Assembler::NotEqual, obj, temp2, &skipBarrier);
15938 LiveRegisterSet save = liveVolatileRegs(lir);
15939 save.takeUnchecked(temp);
15940 save.takeUnchecked(temp2);
15941 save.takeUnchecked(temp3);
15942 if (iterObj.volatile_()) {
15943 save.addUnchecked(iterObj);
15946 masm.PushRegsInMask(save);
15947 emitPostWriteBarrier(iterObj);
15948 masm.PopRegsInMask(save);
15950 masm.bind(&skipBarrier);
15952 masm.bind(ool->rejoin());
15955 void CodeGenerator::visitValueToIterator(LValueToIterator* lir) {
15956 pushArg(ToValue(lir, LValueToIterator::ValueIndex));
15958 using Fn = PropertyIteratorObject* (*)(JSContext*, HandleValue);
15959 callVM<Fn, ValueToIterator>(lir);
15962 void CodeGenerator::visitIteratorHasIndicesAndBranch(
15963 LIteratorHasIndicesAndBranch* lir) {
15964 Register iterator = ToRegister(lir->iterator());
15965 Register object = ToRegister(lir->object());
15966 Register temp = ToRegister(lir->temp());
15967 Register temp2 = ToRegister(lir->temp2());
15968 Label* ifTrue = getJumpLabelForBranch(lir->ifTrue());
15969 Label* ifFalse = getJumpLabelForBranch(lir->ifFalse());
15971 // Check that the iterator has indices available.
15972 Address nativeIterAddr(iterator,
15973 PropertyIteratorObject::offsetOfIteratorSlot());
15974 masm.loadPrivate(nativeIterAddr, temp);
15975 masm.branchNativeIteratorIndices(Assembler::NotEqual, temp, temp2,
15976 NativeIteratorIndices::Valid, ifFalse);
15978 // Guard that the first shape stored in the iterator matches the current
15979 // shape of the iterated object.
15980 Address firstShapeAddr(temp, NativeIterator::offsetOfFirstShape());
15981 masm.loadPtr(firstShapeAddr, temp);
15982 masm.branchTestObjShape(Assembler::NotEqual, object, temp, temp2, object,
15983 ifFalse);
15985 if (!isNextBlock(lir->ifTrue()->lir())) {
15986 masm.jump(ifTrue);
15990 void CodeGenerator::visitLoadSlotByIteratorIndex(
15991 LLoadSlotByIteratorIndex* lir) {
15992 Register object = ToRegister(lir->object());
15993 Register iterator = ToRegister(lir->iterator());
15994 Register temp = ToRegister(lir->temp0());
15995 Register temp2 = ToRegister(lir->temp1());
15996 ValueOperand result = ToOutValue(lir);
15998 masm.extractCurrentIndexAndKindFromIterator(iterator, temp, temp2);
16000 Label notDynamicSlot, notFixedSlot, done;
16001 masm.branch32(Assembler::NotEqual, temp2,
16002 Imm32(uint32_t(PropertyIndex::Kind::DynamicSlot)),
16003 &notDynamicSlot);
16004 masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
16005 masm.loadValue(BaseValueIndex(temp2, temp), result);
16006 masm.jump(&done);
16008 masm.bind(&notDynamicSlot);
16009 masm.branch32(Assembler::NotEqual, temp2,
16010 Imm32(uint32_t(PropertyIndex::Kind::FixedSlot)), &notFixedSlot);
16011 // Fixed slot
16012 masm.loadValue(BaseValueIndex(object, temp, sizeof(NativeObject)), result);
16013 masm.jump(&done);
16014 masm.bind(&notFixedSlot);
16016 #ifdef DEBUG
16017 Label kindOkay;
16018 masm.branch32(Assembler::Equal, temp2,
16019 Imm32(uint32_t(PropertyIndex::Kind::Element)), &kindOkay);
16020 masm.assumeUnreachable("Invalid PropertyIndex::Kind");
16021 masm.bind(&kindOkay);
16022 #endif
16024 // Dense element
16025 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp2);
16026 Label indexOkay;
16027 Address initLength(temp2, ObjectElements::offsetOfInitializedLength());
16028 masm.branch32(Assembler::Above, initLength, temp, &indexOkay);
16029 masm.assumeUnreachable("Dense element out of bounds");
16030 masm.bind(&indexOkay);
16032 masm.loadValue(BaseObjectElementIndex(temp2, temp), result);
16033 masm.bind(&done);
16036 void CodeGenerator::visitStoreSlotByIteratorIndex(
16037 LStoreSlotByIteratorIndex* lir) {
16038 Register object = ToRegister(lir->object());
16039 Register iterator = ToRegister(lir->iterator());
16040 ValueOperand value = ToValue(lir, LStoreSlotByIteratorIndex::ValueIndex);
16041 Register temp = ToRegister(lir->temp0());
16042 Register temp2 = ToRegister(lir->temp1());
16044 masm.extractCurrentIndexAndKindFromIterator(iterator, temp, temp2);
16046 Label notDynamicSlot, notFixedSlot, done, doStore;
16047 masm.branch32(Assembler::NotEqual, temp2,
16048 Imm32(uint32_t(PropertyIndex::Kind::DynamicSlot)),
16049 &notDynamicSlot);
16050 masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
16051 masm.computeEffectiveAddress(BaseValueIndex(temp2, temp), temp);
16052 masm.jump(&doStore);
16054 masm.bind(&notDynamicSlot);
16055 masm.branch32(Assembler::NotEqual, temp2,
16056 Imm32(uint32_t(PropertyIndex::Kind::FixedSlot)), &notFixedSlot);
16057 // Fixed slot
16058 masm.computeEffectiveAddress(
16059 BaseValueIndex(object, temp, sizeof(NativeObject)), temp);
16060 masm.jump(&doStore);
16061 masm.bind(&notFixedSlot);
16063 #ifdef DEBUG
16064 Label kindOkay;
16065 masm.branch32(Assembler::Equal, temp2,
16066 Imm32(uint32_t(PropertyIndex::Kind::Element)), &kindOkay);
16067 masm.assumeUnreachable("Invalid PropertyIndex::Kind");
16068 masm.bind(&kindOkay);
16069 #endif
16071 // Dense element
16072 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp2);
16073 Label indexOkay;
16074 Address initLength(temp2, ObjectElements::offsetOfInitializedLength());
16075 masm.branch32(Assembler::Above, initLength, temp, &indexOkay);
16076 masm.assumeUnreachable("Dense element out of bounds");
16077 masm.bind(&indexOkay);
16079 BaseObjectElementIndex elementAddress(temp2, temp);
16080 masm.computeEffectiveAddress(elementAddress, temp);
16082 masm.bind(&doStore);
16083 Address storeAddress(temp, 0);
16084 emitPreBarrier(storeAddress);
16085 masm.storeValue(value, storeAddress);
16087 masm.branchPtrInNurseryChunk(Assembler::Equal, object, temp2, &done);
16088 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp2, &done);
16090 saveVolatile(temp2);
16091 emitPostWriteBarrier(object);
16092 restoreVolatile(temp2);
16094 masm.bind(&done);
16097 void CodeGenerator::visitSetPropertyCache(LSetPropertyCache* ins) {
16098 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
16099 Register objReg = ToRegister(ins->object());
16100 Register temp = ToRegister(ins->temp0());
16102 ConstantOrRegister id = toConstantOrRegister(ins, LSetPropertyCache::IdIndex,
16103 ins->mir()->idval()->type());
16104 ConstantOrRegister value = toConstantOrRegister(
16105 ins, LSetPropertyCache::ValueIndex, ins->mir()->value()->type());
16107 addSetPropertyCache(ins, liveRegs, objReg, temp, id, value,
16108 ins->mir()->strict());
16111 void CodeGenerator::visitThrow(LThrow* lir) {
16112 pushArg(ToValue(lir, LThrow::ValueIndex));
16114 using Fn = bool (*)(JSContext*, HandleValue);
16115 callVM<Fn, js::ThrowOperation>(lir);
16118 void CodeGenerator::visitThrowWithStack(LThrowWithStack* lir) {
16119 pushArg(ToValue(lir, LThrowWithStack::StackIndex));
16120 pushArg(ToValue(lir, LThrowWithStack::ValueIndex));
16122 using Fn = bool (*)(JSContext*, HandleValue, HandleValue);
16123 callVM<Fn, js::ThrowWithStackOperation>(lir);
16126 class OutOfLineTypeOfV : public OutOfLineCodeBase<CodeGenerator> {
16127 LTypeOfV* ins_;
16129 public:
16130 explicit OutOfLineTypeOfV(LTypeOfV* ins) : ins_(ins) {}
16132 void accept(CodeGenerator* codegen) override {
16133 codegen->visitOutOfLineTypeOfV(this);
16135 LTypeOfV* ins() const { return ins_; }
16138 void CodeGenerator::emitTypeOfJSType(JSValueType type, Register output) {
16139 switch (type) {
16140 case JSVAL_TYPE_OBJECT:
16141 masm.move32(Imm32(JSTYPE_OBJECT), output);
16142 break;
16143 case JSVAL_TYPE_DOUBLE:
16144 case JSVAL_TYPE_INT32:
16145 masm.move32(Imm32(JSTYPE_NUMBER), output);
16146 break;
16147 case JSVAL_TYPE_BOOLEAN:
16148 masm.move32(Imm32(JSTYPE_BOOLEAN), output);
16149 break;
16150 case JSVAL_TYPE_UNDEFINED:
16151 masm.move32(Imm32(JSTYPE_UNDEFINED), output);
16152 break;
16153 case JSVAL_TYPE_NULL:
16154 masm.move32(Imm32(JSTYPE_OBJECT), output);
16155 break;
16156 case JSVAL_TYPE_STRING:
16157 masm.move32(Imm32(JSTYPE_STRING), output);
16158 break;
16159 case JSVAL_TYPE_SYMBOL:
16160 masm.move32(Imm32(JSTYPE_SYMBOL), output);
16161 break;
16162 case JSVAL_TYPE_BIGINT:
16163 masm.move32(Imm32(JSTYPE_BIGINT), output);
16164 break;
16165 default:
16166 MOZ_CRASH("Unsupported JSValueType");
16170 void CodeGenerator::emitTypeOfCheck(JSValueType type, Register tag,
16171 Register output, Label* done,
16172 Label* oolObject) {
16173 Label notMatch;
16174 switch (type) {
16175 case JSVAL_TYPE_OBJECT:
16176 // The input may be a callable object (result is "function") or
16177 // may emulate undefined (result is "undefined"). Use an OOL path.
16178 masm.branchTestObject(Assembler::Equal, tag, oolObject);
16179 return;
16180 case JSVAL_TYPE_DOUBLE:
16181 case JSVAL_TYPE_INT32:
16182 masm.branchTestNumber(Assembler::NotEqual, tag, &notMatch);
16183 break;
16184 default:
16185 masm.branchTestType(Assembler::NotEqual, tag, type, &notMatch);
16186 break;
16189 emitTypeOfJSType(type, output);
16190 masm.jump(done);
16191 masm.bind(&notMatch);
16194 void CodeGenerator::visitTypeOfV(LTypeOfV* lir) {
16195 const ValueOperand value = ToValue(lir, LTypeOfV::InputIndex);
16196 Register output = ToRegister(lir->output());
16197 Register tag = masm.extractTag(value, output);
16199 Label done;
16201 auto* ool = new (alloc()) OutOfLineTypeOfV(lir);
16202 addOutOfLineCode(ool, lir->mir());
16204 const std::initializer_list<JSValueType> defaultOrder = {
16205 JSVAL_TYPE_OBJECT, JSVAL_TYPE_DOUBLE, JSVAL_TYPE_UNDEFINED,
16206 JSVAL_TYPE_NULL, JSVAL_TYPE_BOOLEAN, JSVAL_TYPE_STRING,
16207 JSVAL_TYPE_SYMBOL, JSVAL_TYPE_BIGINT};
16209 mozilla::EnumSet<JSValueType, uint32_t> remaining(defaultOrder);
16211 // Generate checks for previously observed types first.
16212 // The TypeDataList is sorted by descending frequency.
16213 for (auto& observed : lir->mir()->observedTypes()) {
16214 JSValueType type = observed.type();
16216 // Unify number types.
16217 if (type == JSVAL_TYPE_INT32) {
16218 type = JSVAL_TYPE_DOUBLE;
16221 remaining -= type;
16223 emitTypeOfCheck(type, tag, output, &done, ool->entry());
16226 // Generate checks for remaining types.
16227 for (auto type : defaultOrder) {
16228 if (!remaining.contains(type)) {
16229 continue;
16231 remaining -= type;
16233 if (remaining.isEmpty() && type != JSVAL_TYPE_OBJECT) {
16234 // We can skip the check for the last remaining type, unless the type is
16235 // JSVAL_TYPE_OBJECT, which may have to go through the OOL path.
16236 #ifdef DEBUG
16237 emitTypeOfCheck(type, tag, output, &done, ool->entry());
16238 masm.assumeUnreachable("Unexpected Value type in visitTypeOfV");
16239 #else
16240 emitTypeOfJSType(type, output);
16241 #endif
16242 } else {
16243 emitTypeOfCheck(type, tag, output, &done, ool->entry());
16246 MOZ_ASSERT(remaining.isEmpty());
16248 masm.bind(&done);
16249 masm.bind(ool->rejoin());
16252 void CodeGenerator::emitTypeOfObject(Register obj, Register output,
16253 Label* done) {
16254 Label slowCheck, isObject, isCallable, isUndefined;
16255 masm.typeOfObject(obj, output, &slowCheck, &isObject, &isCallable,
16256 &isUndefined);
16258 masm.bind(&isCallable);
16259 masm.move32(Imm32(JSTYPE_FUNCTION), output);
16260 masm.jump(done);
16262 masm.bind(&isUndefined);
16263 masm.move32(Imm32(JSTYPE_UNDEFINED), output);
16264 masm.jump(done);
16266 masm.bind(&isObject);
16267 masm.move32(Imm32(JSTYPE_OBJECT), output);
16268 masm.jump(done);
16270 masm.bind(&slowCheck);
16272 saveVolatile(output);
16273 using Fn = JSType (*)(JSObject*);
16274 masm.setupAlignedABICall();
16275 masm.passABIArg(obj);
16276 masm.callWithABI<Fn, js::TypeOfObject>();
16277 masm.storeCallInt32Result(output);
16278 restoreVolatile(output);
16281 void CodeGenerator::visitOutOfLineTypeOfV(OutOfLineTypeOfV* ool) {
16282 LTypeOfV* ins = ool->ins();
16284 ValueOperand input = ToValue(ins, LTypeOfV::InputIndex);
16285 Register temp = ToTempUnboxRegister(ins->temp0());
16286 Register output = ToRegister(ins->output());
16288 Register obj = masm.extractObject(input, temp);
16289 emitTypeOfObject(obj, output, ool->rejoin());
16290 masm.jump(ool->rejoin());
16293 void CodeGenerator::visitTypeOfO(LTypeOfO* lir) {
16294 Register obj = ToRegister(lir->object());
16295 Register output = ToRegister(lir->output());
16297 Label done;
16298 emitTypeOfObject(obj, output, &done);
16299 masm.bind(&done);
16302 void CodeGenerator::visitTypeOfName(LTypeOfName* lir) {
16303 Register input = ToRegister(lir->input());
16304 Register output = ToRegister(lir->output());
16306 #ifdef DEBUG
16307 Label ok;
16308 masm.branch32(Assembler::Below, input, Imm32(JSTYPE_LIMIT), &ok);
16309 masm.assumeUnreachable("bad JSType");
16310 masm.bind(&ok);
16311 #endif
16313 static_assert(JSTYPE_UNDEFINED == 0);
16315 masm.movePtr(ImmPtr(&gen->runtime->names().undefined), output);
16316 masm.loadPtr(BaseIndex(output, input, ScalePointer), output);
16319 class OutOfLineTypeOfIsNonPrimitiveV : public OutOfLineCodeBase<CodeGenerator> {
16320 LTypeOfIsNonPrimitiveV* ins_;
16322 public:
16323 explicit OutOfLineTypeOfIsNonPrimitiveV(LTypeOfIsNonPrimitiveV* ins)
16324 : ins_(ins) {}
16326 void accept(CodeGenerator* codegen) override {
16327 codegen->visitOutOfLineTypeOfIsNonPrimitiveV(this);
16329 auto* ins() const { return ins_; }
16332 class OutOfLineTypeOfIsNonPrimitiveO : public OutOfLineCodeBase<CodeGenerator> {
16333 LTypeOfIsNonPrimitiveO* ins_;
16335 public:
16336 explicit OutOfLineTypeOfIsNonPrimitiveO(LTypeOfIsNonPrimitiveO* ins)
16337 : ins_(ins) {}
16339 void accept(CodeGenerator* codegen) override {
16340 codegen->visitOutOfLineTypeOfIsNonPrimitiveO(this);
16342 auto* ins() const { return ins_; }
16345 void CodeGenerator::emitTypeOfIsObjectOOL(MTypeOfIs* mir, Register obj,
16346 Register output) {
16347 saveVolatile(output);
16348 using Fn = JSType (*)(JSObject*);
16349 masm.setupAlignedABICall();
16350 masm.passABIArg(obj);
16351 masm.callWithABI<Fn, js::TypeOfObject>();
16352 masm.storeCallInt32Result(output);
16353 restoreVolatile(output);
16355 auto cond = JSOpToCondition(mir->jsop(), /* isSigned = */ false);
16356 masm.cmp32Set(cond, output, Imm32(mir->jstype()), output);
16359 void CodeGenerator::visitOutOfLineTypeOfIsNonPrimitiveV(
16360 OutOfLineTypeOfIsNonPrimitiveV* ool) {
16361 auto* ins = ool->ins();
16362 ValueOperand input = ToValue(ins, LTypeOfIsNonPrimitiveV::InputIndex);
16363 Register output = ToRegister(ins->output());
16364 Register temp = ToTempUnboxRegister(ins->temp0());
16366 Register obj = masm.extractObject(input, temp);
16368 emitTypeOfIsObjectOOL(ins->mir(), obj, output);
16370 masm.jump(ool->rejoin());
16373 void CodeGenerator::visitOutOfLineTypeOfIsNonPrimitiveO(
16374 OutOfLineTypeOfIsNonPrimitiveO* ool) {
16375 auto* ins = ool->ins();
16376 Register input = ToRegister(ins->input());
16377 Register output = ToRegister(ins->output());
16379 emitTypeOfIsObjectOOL(ins->mir(), input, output);
16381 masm.jump(ool->rejoin());
16384 void CodeGenerator::emitTypeOfIsObject(MTypeOfIs* mir, Register obj,
16385 Register output, Label* success,
16386 Label* fail, Label* slowCheck) {
16387 Label* isObject = fail;
16388 Label* isFunction = fail;
16389 Label* isUndefined = fail;
16391 switch (mir->jstype()) {
16392 case JSTYPE_UNDEFINED:
16393 isUndefined = success;
16394 break;
16396 case JSTYPE_OBJECT:
16397 isObject = success;
16398 break;
16400 case JSTYPE_FUNCTION:
16401 isFunction = success;
16402 break;
16404 case JSTYPE_STRING:
16405 case JSTYPE_NUMBER:
16406 case JSTYPE_BOOLEAN:
16407 case JSTYPE_SYMBOL:
16408 case JSTYPE_BIGINT:
16409 #ifdef ENABLE_RECORD_TUPLE
16410 case JSTYPE_RECORD:
16411 case JSTYPE_TUPLE:
16412 #endif
16413 case JSTYPE_LIMIT:
16414 MOZ_CRASH("Primitive type");
16417 masm.typeOfObject(obj, output, slowCheck, isObject, isFunction, isUndefined);
16419 auto op = mir->jsop();
16421 Label done;
16422 masm.bind(fail);
16423 masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
16424 masm.jump(&done);
16425 masm.bind(success);
16426 masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq), output);
16427 masm.bind(&done);
16430 void CodeGenerator::visitTypeOfIsNonPrimitiveV(LTypeOfIsNonPrimitiveV* lir) {
16431 ValueOperand input = ToValue(lir, LTypeOfIsNonPrimitiveV::InputIndex);
16432 Register output = ToRegister(lir->output());
16433 Register temp = ToTempUnboxRegister(lir->temp0());
16435 auto* mir = lir->mir();
16437 auto* ool = new (alloc()) OutOfLineTypeOfIsNonPrimitiveV(lir);
16438 addOutOfLineCode(ool, mir);
16440 Label success, fail;
16442 switch (mir->jstype()) {
16443 case JSTYPE_UNDEFINED: {
16444 ScratchTagScope tag(masm, input);
16445 masm.splitTagForTest(input, tag);
16447 masm.branchTestUndefined(Assembler::Equal, tag, &success);
16448 masm.branchTestObject(Assembler::NotEqual, tag, &fail);
16449 break;
16452 case JSTYPE_OBJECT: {
16453 ScratchTagScope tag(masm, input);
16454 masm.splitTagForTest(input, tag);
16456 masm.branchTestNull(Assembler::Equal, tag, &success);
16457 masm.branchTestObject(Assembler::NotEqual, tag, &fail);
16458 break;
16461 case JSTYPE_FUNCTION: {
16462 masm.branchTestObject(Assembler::NotEqual, input, &fail);
16463 break;
16466 case JSTYPE_STRING:
16467 case JSTYPE_NUMBER:
16468 case JSTYPE_BOOLEAN:
16469 case JSTYPE_SYMBOL:
16470 case JSTYPE_BIGINT:
16471 #ifdef ENABLE_RECORD_TUPLE
16472 case JSTYPE_RECORD:
16473 case JSTYPE_TUPLE:
16474 #endif
16475 case JSTYPE_LIMIT:
16476 MOZ_CRASH("Primitive type");
16479 Register obj = masm.extractObject(input, temp);
16481 emitTypeOfIsObject(mir, obj, output, &success, &fail, ool->entry());
16483 masm.bind(ool->rejoin());
16486 void CodeGenerator::visitTypeOfIsNonPrimitiveO(LTypeOfIsNonPrimitiveO* lir) {
16487 Register input = ToRegister(lir->input());
16488 Register output = ToRegister(lir->output());
16490 auto* mir = lir->mir();
16492 auto* ool = new (alloc()) OutOfLineTypeOfIsNonPrimitiveO(lir);
16493 addOutOfLineCode(ool, mir);
16495 Label success, fail;
16496 emitTypeOfIsObject(mir, input, output, &success, &fail, ool->entry());
16498 masm.bind(ool->rejoin());
16501 void CodeGenerator::visitTypeOfIsPrimitive(LTypeOfIsPrimitive* lir) {
16502 ValueOperand input = ToValue(lir, LTypeOfIsPrimitive::InputIndex);
16503 Register output = ToRegister(lir->output());
16505 auto* mir = lir->mir();
16506 auto cond = JSOpToCondition(mir->jsop(), /* isSigned = */ false);
16508 switch (mir->jstype()) {
16509 case JSTYPE_STRING:
16510 masm.testStringSet(cond, input, output);
16511 break;
16512 case JSTYPE_NUMBER:
16513 masm.testNumberSet(cond, input, output);
16514 break;
16515 case JSTYPE_BOOLEAN:
16516 masm.testBooleanSet(cond, input, output);
16517 break;
16518 case JSTYPE_SYMBOL:
16519 masm.testSymbolSet(cond, input, output);
16520 break;
16521 case JSTYPE_BIGINT:
16522 masm.testBigIntSet(cond, input, output);
16523 break;
16525 case JSTYPE_UNDEFINED:
16526 case JSTYPE_OBJECT:
16527 case JSTYPE_FUNCTION:
16528 #ifdef ENABLE_RECORD_TUPLE
16529 case JSTYPE_RECORD:
16530 case JSTYPE_TUPLE:
16531 #endif
16532 case JSTYPE_LIMIT:
16533 MOZ_CRASH("Non-primitive type");
16537 void CodeGenerator::visitToAsyncIter(LToAsyncIter* lir) {
16538 pushArg(ToValue(lir, LToAsyncIter::NextMethodIndex));
16539 pushArg(ToRegister(lir->iterator()));
16541 using Fn = JSObject* (*)(JSContext*, HandleObject, HandleValue);
16542 callVM<Fn, js::CreateAsyncFromSyncIterator>(lir);
16545 void CodeGenerator::visitToPropertyKeyCache(LToPropertyKeyCache* lir) {
16546 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
16547 ValueOperand input = ToValue(lir, LToPropertyKeyCache::InputIndex);
16548 ValueOperand output = ToOutValue(lir);
16550 IonToPropertyKeyIC ic(liveRegs, input, output);
16551 addIC(lir, allocateIC(ic));
16554 void CodeGenerator::visitLoadElementV(LLoadElementV* load) {
16555 Register elements = ToRegister(load->elements());
16556 const ValueOperand out = ToOutValue(load);
16558 if (load->index()->isConstant()) {
16559 NativeObject::elementsSizeMustNotOverflow();
16560 int32_t offset = ToInt32(load->index()) * sizeof(Value);
16561 masm.loadValue(Address(elements, offset), out);
16562 } else {
16563 masm.loadValue(BaseObjectElementIndex(elements, ToRegister(load->index())),
16564 out);
16567 Label testMagic;
16568 masm.branchTestMagic(Assembler::Equal, out, &testMagic);
16569 bailoutFrom(&testMagic, load->snapshot());
16572 void CodeGenerator::visitLoadElementHole(LLoadElementHole* lir) {
16573 Register elements = ToRegister(lir->elements());
16574 Register index = ToRegister(lir->index());
16575 Register initLength = ToRegister(lir->initLength());
16576 const ValueOperand out = ToOutValue(lir);
16578 const MLoadElementHole* mir = lir->mir();
16580 // If the index is out of bounds, load |undefined|. Otherwise, load the
16581 // value.
16582 Label outOfBounds, done;
16583 masm.spectreBoundsCheck32(index, initLength, out.scratchReg(), &outOfBounds);
16585 masm.loadValue(BaseObjectElementIndex(elements, index), out);
16587 // If the value wasn't a hole, we're done. Otherwise, we'll load undefined.
16588 masm.branchTestMagic(Assembler::NotEqual, out, &done);
16590 if (mir->needsNegativeIntCheck()) {
16591 Label loadUndefined;
16592 masm.jump(&loadUndefined);
16594 masm.bind(&outOfBounds);
16596 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
16598 masm.bind(&loadUndefined);
16599 } else {
16600 masm.bind(&outOfBounds);
16602 masm.moveValue(UndefinedValue(), out);
16604 masm.bind(&done);
16607 void CodeGenerator::visitLoadUnboxedScalar(LLoadUnboxedScalar* lir) {
16608 Register elements = ToRegister(lir->elements());
16609 Register temp = ToTempRegisterOrInvalid(lir->temp0());
16610 AnyRegister out = ToAnyRegister(lir->output());
16612 const MLoadUnboxedScalar* mir = lir->mir();
16614 Scalar::Type storageType = mir->storageType();
16616 Label fail;
16617 if (lir->index()->isConstant()) {
16618 Address source =
16619 ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
16620 masm.loadFromTypedArray(storageType, source, out, temp, &fail);
16621 } else {
16622 BaseIndex source(elements, ToRegister(lir->index()),
16623 ScaleFromScalarType(storageType), mir->offsetAdjustment());
16624 masm.loadFromTypedArray(storageType, source, out, temp, &fail);
16627 if (fail.used()) {
16628 bailoutFrom(&fail, lir->snapshot());
16632 void CodeGenerator::visitLoadUnboxedBigInt(LLoadUnboxedBigInt* lir) {
16633 Register elements = ToRegister(lir->elements());
16634 Register temp = ToRegister(lir->temp());
16635 Register64 temp64 = ToRegister64(lir->temp64());
16636 Register out = ToRegister(lir->output());
16638 const MLoadUnboxedScalar* mir = lir->mir();
16640 Scalar::Type storageType = mir->storageType();
16642 if (lir->index()->isConstant()) {
16643 Address source =
16644 ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
16645 masm.load64(source, temp64);
16646 } else {
16647 BaseIndex source(elements, ToRegister(lir->index()),
16648 ScaleFromScalarType(storageType), mir->offsetAdjustment());
16649 masm.load64(source, temp64);
16652 emitCreateBigInt(lir, storageType, temp64, out, temp);
16655 void CodeGenerator::visitLoadDataViewElement(LLoadDataViewElement* lir) {
16656 Register elements = ToRegister(lir->elements());
16657 const LAllocation* littleEndian = lir->littleEndian();
16658 Register temp = ToTempRegisterOrInvalid(lir->temp());
16659 Register64 temp64 = ToTempRegister64OrInvalid(lir->temp64());
16660 AnyRegister out = ToAnyRegister(lir->output());
16662 const MLoadDataViewElement* mir = lir->mir();
16663 Scalar::Type storageType = mir->storageType();
16665 BaseIndex source(elements, ToRegister(lir->index()), TimesOne);
16667 bool noSwap = littleEndian->isConstant() &&
16668 ToBoolean(littleEndian) == MOZ_LITTLE_ENDIAN();
16670 // Directly load if no byte swap is needed and the platform supports unaligned
16671 // accesses for the access. (Such support is assumed for integer types.)
16672 if (noSwap && (!Scalar::isFloatingType(storageType) ||
16673 MacroAssembler::SupportsFastUnalignedFPAccesses())) {
16674 if (!Scalar::isBigIntType(storageType)) {
16675 Label fail;
16676 masm.loadFromTypedArray(storageType, source, out, temp, &fail);
16678 if (fail.used()) {
16679 bailoutFrom(&fail, lir->snapshot());
16681 } else {
16682 masm.load64(source, temp64);
16684 emitCreateBigInt(lir, storageType, temp64, out.gpr(), temp);
16686 return;
16689 // Load the value into a gpr register.
16690 switch (storageType) {
16691 case Scalar::Int16:
16692 masm.load16UnalignedSignExtend(source, out.gpr());
16693 break;
16694 case Scalar::Uint16:
16695 masm.load16UnalignedZeroExtend(source, out.gpr());
16696 break;
16697 case Scalar::Int32:
16698 masm.load32Unaligned(source, out.gpr());
16699 break;
16700 case Scalar::Uint32:
16701 masm.load32Unaligned(source, out.isFloat() ? temp : out.gpr());
16702 break;
16703 case Scalar::Float32:
16704 masm.load32Unaligned(source, temp);
16705 break;
16706 case Scalar::Float64:
16707 case Scalar::BigInt64:
16708 case Scalar::BigUint64:
16709 masm.load64Unaligned(source, temp64);
16710 break;
16711 case Scalar::Int8:
16712 case Scalar::Uint8:
16713 case Scalar::Uint8Clamped:
16714 default:
16715 MOZ_CRASH("Invalid typed array type");
16718 if (!noSwap) {
16719 // Swap the bytes in the loaded value.
16720 Label skip;
16721 if (!littleEndian->isConstant()) {
16722 masm.branch32(
16723 MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
16724 ToRegister(littleEndian), Imm32(0), &skip);
16727 switch (storageType) {
16728 case Scalar::Int16:
16729 masm.byteSwap16SignExtend(out.gpr());
16730 break;
16731 case Scalar::Uint16:
16732 masm.byteSwap16ZeroExtend(out.gpr());
16733 break;
16734 case Scalar::Int32:
16735 masm.byteSwap32(out.gpr());
16736 break;
16737 case Scalar::Uint32:
16738 masm.byteSwap32(out.isFloat() ? temp : out.gpr());
16739 break;
16740 case Scalar::Float32:
16741 masm.byteSwap32(temp);
16742 break;
16743 case Scalar::Float64:
16744 case Scalar::BigInt64:
16745 case Scalar::BigUint64:
16746 masm.byteSwap64(temp64);
16747 break;
16748 case Scalar::Int8:
16749 case Scalar::Uint8:
16750 case Scalar::Uint8Clamped:
16751 default:
16752 MOZ_CRASH("Invalid typed array type");
16755 if (skip.used()) {
16756 masm.bind(&skip);
16760 // Move the value into the output register.
16761 switch (storageType) {
16762 case Scalar::Int16:
16763 case Scalar::Uint16:
16764 case Scalar::Int32:
16765 break;
16766 case Scalar::Uint32:
16767 if (out.isFloat()) {
16768 masm.convertUInt32ToDouble(temp, out.fpu());
16769 } else {
16770 // Bail out if the value doesn't fit into a signed int32 value. This
16771 // is what allows MLoadDataViewElement to have a type() of
16772 // MIRType::Int32 for UInt32 array loads.
16773 bailoutTest32(Assembler::Signed, out.gpr(), out.gpr(), lir->snapshot());
16775 break;
16776 case Scalar::Float32:
16777 masm.moveGPRToFloat32(temp, out.fpu());
16778 masm.canonicalizeFloat(out.fpu());
16779 break;
16780 case Scalar::Float64:
16781 masm.moveGPR64ToDouble(temp64, out.fpu());
16782 masm.canonicalizeDouble(out.fpu());
16783 break;
16784 case Scalar::BigInt64:
16785 case Scalar::BigUint64:
16786 emitCreateBigInt(lir, storageType, temp64, out.gpr(), temp);
16787 break;
16788 case Scalar::Int8:
16789 case Scalar::Uint8:
16790 case Scalar::Uint8Clamped:
16791 default:
16792 MOZ_CRASH("Invalid typed array type");
16796 void CodeGenerator::visitLoadTypedArrayElementHole(
16797 LLoadTypedArrayElementHole* lir) {
16798 Register object = ToRegister(lir->object());
16799 const ValueOperand out = ToOutValue(lir);
16801 // Load the length.
16802 Register scratch = out.scratchReg();
16803 Register scratch2 = ToRegister(lir->temp0());
16804 Register index = ToRegister(lir->index());
16805 masm.loadArrayBufferViewLengthIntPtr(object, scratch);
16807 // Load undefined if index >= length.
16808 Label outOfBounds, done;
16809 masm.spectreBoundsCheckPtr(index, scratch, scratch2, &outOfBounds);
16811 // Load the elements vector.
16812 masm.loadPtr(Address(object, ArrayBufferViewObject::dataOffset()), scratch);
16814 Scalar::Type arrayType = lir->mir()->arrayType();
16815 Label fail;
16816 BaseIndex source(scratch, index, ScaleFromScalarType(arrayType));
16817 MacroAssembler::Uint32Mode uint32Mode =
16818 lir->mir()->forceDouble() ? MacroAssembler::Uint32Mode::ForceDouble
16819 : MacroAssembler::Uint32Mode::FailOnDouble;
16820 masm.loadFromTypedArray(arrayType, source, out, uint32Mode, out.scratchReg(),
16821 &fail);
16822 masm.jump(&done);
16824 masm.bind(&outOfBounds);
16825 masm.moveValue(UndefinedValue(), out);
16827 if (fail.used()) {
16828 bailoutFrom(&fail, lir->snapshot());
16831 masm.bind(&done);
16834 void CodeGenerator::visitLoadTypedArrayElementHoleBigInt(
16835 LLoadTypedArrayElementHoleBigInt* lir) {
16836 Register object = ToRegister(lir->object());
16837 const ValueOperand out = ToOutValue(lir);
16839 // On x86 there are not enough registers. In that case reuse the output's
16840 // type register as temporary.
16841 #ifdef JS_CODEGEN_X86
16842 MOZ_ASSERT(lir->temp()->isBogusTemp());
16843 Register temp = out.typeReg();
16844 #else
16845 Register temp = ToRegister(lir->temp());
16846 #endif
16847 Register64 temp64 = ToRegister64(lir->temp64());
16849 // Load the length.
16850 Register scratch = out.scratchReg();
16851 Register index = ToRegister(lir->index());
16852 masm.loadArrayBufferViewLengthIntPtr(object, scratch);
16854 // Load undefined if index >= length.
16855 Label outOfBounds, done;
16856 masm.spectreBoundsCheckPtr(index, scratch, temp, &outOfBounds);
16858 // Load the elements vector.
16859 masm.loadPtr(Address(object, ArrayBufferViewObject::dataOffset()), scratch);
16861 Scalar::Type arrayType = lir->mir()->arrayType();
16862 BaseIndex source(scratch, index, ScaleFromScalarType(arrayType));
16863 masm.load64(source, temp64);
16865 Register bigInt = out.scratchReg();
16866 emitCreateBigInt(lir, arrayType, temp64, bigInt, temp);
16868 masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, out);
16869 masm.jump(&done);
16871 masm.bind(&outOfBounds);
16872 masm.moveValue(UndefinedValue(), out);
16874 masm.bind(&done);
16877 template <SwitchTableType tableType>
16878 class OutOfLineSwitch : public OutOfLineCodeBase<CodeGenerator> {
16879 using LabelsVector = Vector<Label, 0, JitAllocPolicy>;
16880 using CodeLabelsVector = Vector<CodeLabel, 0, JitAllocPolicy>;
16881 LabelsVector labels_;
16882 CodeLabelsVector codeLabels_;
16883 CodeLabel start_;
16884 bool isOutOfLine_;
16886 void accept(CodeGenerator* codegen) override {
16887 codegen->visitOutOfLineSwitch(this);
16890 public:
16891 explicit OutOfLineSwitch(TempAllocator& alloc)
16892 : labels_(alloc), codeLabels_(alloc), isOutOfLine_(false) {}
16894 CodeLabel* start() { return &start_; }
16896 CodeLabelsVector& codeLabels() { return codeLabels_; }
16897 LabelsVector& labels() { return labels_; }
16899 void jumpToCodeEntries(MacroAssembler& masm, Register index, Register temp) {
16900 Register base;
16901 if (tableType == SwitchTableType::Inline) {
16902 #if defined(JS_CODEGEN_ARM)
16903 base = ::js::jit::pc;
16904 #else
16905 MOZ_CRASH("NYI: SwitchTableType::Inline");
16906 #endif
16907 } else {
16908 #if defined(JS_CODEGEN_ARM)
16909 MOZ_CRASH("NYI: SwitchTableType::OutOfLine");
16910 #else
16911 masm.mov(start(), temp);
16912 base = temp;
16913 #endif
16915 BaseIndex jumpTarget(base, index, ScalePointer);
16916 masm.branchToComputedAddress(jumpTarget);
16919 // Register an entry in the switch table.
16920 void addTableEntry(MacroAssembler& masm) {
16921 if ((!isOutOfLine_ && tableType == SwitchTableType::Inline) ||
16922 (isOutOfLine_ && tableType == SwitchTableType::OutOfLine)) {
16923 CodeLabel cl;
16924 masm.writeCodePointer(&cl);
16925 masm.propagateOOM(codeLabels_.append(std::move(cl)));
16928 // Register the code, to which the table will jump to.
16929 void addCodeEntry(MacroAssembler& masm) {
16930 Label entry;
16931 masm.bind(&entry);
16932 masm.propagateOOM(labels_.append(std::move(entry)));
16935 void setOutOfLine() { isOutOfLine_ = true; }
16938 template <SwitchTableType tableType>
16939 void CodeGenerator::visitOutOfLineSwitch(
16940 OutOfLineSwitch<tableType>* jumpTable) {
16941 jumpTable->setOutOfLine();
16942 auto& labels = jumpTable->labels();
16944 if (tableType == SwitchTableType::OutOfLine) {
16945 #if defined(JS_CODEGEN_ARM)
16946 MOZ_CRASH("NYI: SwitchTableType::OutOfLine");
16947 #elif defined(JS_CODEGEN_NONE)
16948 MOZ_CRASH();
16949 #else
16951 # if defined(JS_CODEGEN_ARM64)
16952 AutoForbidPoolsAndNops afp(
16953 &masm,
16954 (labels.length() + 1) * (sizeof(void*) / vixl::kInstructionSize));
16955 # endif
16957 masm.haltingAlign(sizeof(void*));
16959 // Bind the address of the jump table and reserve the space for code
16960 // pointers to jump in the newly generated code.
16961 masm.bind(jumpTable->start());
16962 masm.addCodeLabel(*jumpTable->start());
16963 for (size_t i = 0, e = labels.length(); i < e; i++) {
16964 jumpTable->addTableEntry(masm);
16966 #endif
16969 // Register all reserved pointers of the jump table to target labels. The
16970 // entries of the jump table need to be absolute addresses and thus must be
16971 // patched after codegen is finished.
16972 auto& codeLabels = jumpTable->codeLabels();
16973 for (size_t i = 0, e = codeLabels.length(); i < e; i++) {
16974 auto& cl = codeLabels[i];
16975 cl.target()->bind(labels[i].offset());
16976 masm.addCodeLabel(cl);
16980 template void CodeGenerator::visitOutOfLineSwitch(
16981 OutOfLineSwitch<SwitchTableType::Inline>* jumpTable);
16982 template void CodeGenerator::visitOutOfLineSwitch(
16983 OutOfLineSwitch<SwitchTableType::OutOfLine>* jumpTable);
16985 template <typename T>
16986 static inline void StoreToTypedArray(MacroAssembler& masm,
16987 Scalar::Type writeType,
16988 const LAllocation* value, const T& dest) {
16989 if (writeType == Scalar::Float32 || writeType == Scalar::Float64) {
16990 masm.storeToTypedFloatArray(writeType, ToFloatRegister(value), dest);
16991 } else {
16992 if (value->isConstant()) {
16993 masm.storeToTypedIntArray(writeType, Imm32(ToInt32(value)), dest);
16994 } else {
16995 masm.storeToTypedIntArray(writeType, ToRegister(value), dest);
17000 void CodeGenerator::visitStoreUnboxedScalar(LStoreUnboxedScalar* lir) {
17001 Register elements = ToRegister(lir->elements());
17002 const LAllocation* value = lir->value();
17004 const MStoreUnboxedScalar* mir = lir->mir();
17006 Scalar::Type writeType = mir->writeType();
17008 if (lir->index()->isConstant()) {
17009 Address dest = ToAddress(elements, lir->index(), writeType);
17010 StoreToTypedArray(masm, writeType, value, dest);
17011 } else {
17012 BaseIndex dest(elements, ToRegister(lir->index()),
17013 ScaleFromScalarType(writeType));
17014 StoreToTypedArray(masm, writeType, value, dest);
17018 void CodeGenerator::visitStoreUnboxedBigInt(LStoreUnboxedBigInt* lir) {
17019 Register elements = ToRegister(lir->elements());
17020 Register value = ToRegister(lir->value());
17021 Register64 temp = ToRegister64(lir->temp());
17023 Scalar::Type writeType = lir->mir()->writeType();
17025 masm.loadBigInt64(value, temp);
17027 if (lir->index()->isConstant()) {
17028 Address dest = ToAddress(elements, lir->index(), writeType);
17029 masm.storeToTypedBigIntArray(writeType, temp, dest);
17030 } else {
17031 BaseIndex dest(elements, ToRegister(lir->index()),
17032 ScaleFromScalarType(writeType));
17033 masm.storeToTypedBigIntArray(writeType, temp, dest);
17037 void CodeGenerator::visitStoreDataViewElement(LStoreDataViewElement* lir) {
17038 Register elements = ToRegister(lir->elements());
17039 const LAllocation* value = lir->value();
17040 const LAllocation* littleEndian = lir->littleEndian();
17041 Register temp = ToTempRegisterOrInvalid(lir->temp());
17042 Register64 temp64 = ToTempRegister64OrInvalid(lir->temp64());
17044 const MStoreDataViewElement* mir = lir->mir();
17045 Scalar::Type writeType = mir->writeType();
17047 BaseIndex dest(elements, ToRegister(lir->index()), TimesOne);
17049 bool noSwap = littleEndian->isConstant() &&
17050 ToBoolean(littleEndian) == MOZ_LITTLE_ENDIAN();
17052 // Directly store if no byte swap is needed and the platform supports
17053 // unaligned accesses for the access. (Such support is assumed for integer
17054 // types.)
17055 if (noSwap && (!Scalar::isFloatingType(writeType) ||
17056 MacroAssembler::SupportsFastUnalignedFPAccesses())) {
17057 if (!Scalar::isBigIntType(writeType)) {
17058 StoreToTypedArray(masm, writeType, value, dest);
17059 } else {
17060 masm.loadBigInt64(ToRegister(value), temp64);
17061 masm.storeToTypedBigIntArray(writeType, temp64, dest);
17063 return;
17066 // Load the value into a gpr register.
17067 switch (writeType) {
17068 case Scalar::Int16:
17069 case Scalar::Uint16:
17070 case Scalar::Int32:
17071 case Scalar::Uint32:
17072 if (value->isConstant()) {
17073 masm.move32(Imm32(ToInt32(value)), temp);
17074 } else {
17075 masm.move32(ToRegister(value), temp);
17077 break;
17078 case Scalar::Float32: {
17079 FloatRegister fvalue = ToFloatRegister(value);
17080 masm.canonicalizeFloatIfDeterministic(fvalue);
17081 masm.moveFloat32ToGPR(fvalue, temp);
17082 break;
17084 case Scalar::Float64: {
17085 FloatRegister fvalue = ToFloatRegister(value);
17086 masm.canonicalizeDoubleIfDeterministic(fvalue);
17087 masm.moveDoubleToGPR64(fvalue, temp64);
17088 break;
17090 case Scalar::BigInt64:
17091 case Scalar::BigUint64:
17092 masm.loadBigInt64(ToRegister(value), temp64);
17093 break;
17094 case Scalar::Int8:
17095 case Scalar::Uint8:
17096 case Scalar::Uint8Clamped:
17097 default:
17098 MOZ_CRASH("Invalid typed array type");
17101 if (!noSwap) {
17102 // Swap the bytes in the loaded value.
17103 Label skip;
17104 if (!littleEndian->isConstant()) {
17105 masm.branch32(
17106 MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
17107 ToRegister(littleEndian), Imm32(0), &skip);
17110 switch (writeType) {
17111 case Scalar::Int16:
17112 masm.byteSwap16SignExtend(temp);
17113 break;
17114 case Scalar::Uint16:
17115 masm.byteSwap16ZeroExtend(temp);
17116 break;
17117 case Scalar::Int32:
17118 case Scalar::Uint32:
17119 case Scalar::Float32:
17120 masm.byteSwap32(temp);
17121 break;
17122 case Scalar::Float64:
17123 case Scalar::BigInt64:
17124 case Scalar::BigUint64:
17125 masm.byteSwap64(temp64);
17126 break;
17127 case Scalar::Int8:
17128 case Scalar::Uint8:
17129 case Scalar::Uint8Clamped:
17130 default:
17131 MOZ_CRASH("Invalid typed array type");
17134 if (skip.used()) {
17135 masm.bind(&skip);
17139 // Store the value into the destination.
17140 switch (writeType) {
17141 case Scalar::Int16:
17142 case Scalar::Uint16:
17143 masm.store16Unaligned(temp, dest);
17144 break;
17145 case Scalar::Int32:
17146 case Scalar::Uint32:
17147 case Scalar::Float32:
17148 masm.store32Unaligned(temp, dest);
17149 break;
17150 case Scalar::Float64:
17151 case Scalar::BigInt64:
17152 case Scalar::BigUint64:
17153 masm.store64Unaligned(temp64, dest);
17154 break;
17155 case Scalar::Int8:
17156 case Scalar::Uint8:
17157 case Scalar::Uint8Clamped:
17158 default:
17159 MOZ_CRASH("Invalid typed array type");
17163 void CodeGenerator::visitStoreTypedArrayElementHole(
17164 LStoreTypedArrayElementHole* lir) {
17165 Register elements = ToRegister(lir->elements());
17166 const LAllocation* value = lir->value();
17168 Scalar::Type arrayType = lir->mir()->arrayType();
17170 Register index = ToRegister(lir->index());
17171 const LAllocation* length = lir->length();
17172 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp0());
17174 Label skip;
17175 if (length->isRegister()) {
17176 masm.spectreBoundsCheckPtr(index, ToRegister(length), spectreTemp, &skip);
17177 } else {
17178 masm.spectreBoundsCheckPtr(index, ToAddress(length), spectreTemp, &skip);
17181 BaseIndex dest(elements, index, ScaleFromScalarType(arrayType));
17182 StoreToTypedArray(masm, arrayType, value, dest);
17184 masm.bind(&skip);
17187 void CodeGenerator::visitStoreTypedArrayElementHoleBigInt(
17188 LStoreTypedArrayElementHoleBigInt* lir) {
17189 Register elements = ToRegister(lir->elements());
17190 Register value = ToRegister(lir->value());
17191 Register64 temp = ToRegister64(lir->temp());
17193 Scalar::Type arrayType = lir->mir()->arrayType();
17195 Register index = ToRegister(lir->index());
17196 const LAllocation* length = lir->length();
17197 Register spectreTemp = temp.scratchReg();
17199 Label skip;
17200 if (length->isRegister()) {
17201 masm.spectreBoundsCheckPtr(index, ToRegister(length), spectreTemp, &skip);
17202 } else {
17203 masm.spectreBoundsCheckPtr(index, ToAddress(length), spectreTemp, &skip);
17206 masm.loadBigInt64(value, temp);
17208 BaseIndex dest(elements, index, ScaleFromScalarType(arrayType));
17209 masm.storeToTypedBigIntArray(arrayType, temp, dest);
17211 masm.bind(&skip);
17214 void CodeGenerator::visitAtomicIsLockFree(LAtomicIsLockFree* lir) {
17215 Register value = ToRegister(lir->value());
17216 Register output = ToRegister(lir->output());
17218 masm.atomicIsLockFreeJS(value, output);
17221 void CodeGenerator::visitClampIToUint8(LClampIToUint8* lir) {
17222 Register output = ToRegister(lir->output());
17223 MOZ_ASSERT(output == ToRegister(lir->input()));
17224 masm.clampIntToUint8(output);
17227 void CodeGenerator::visitClampDToUint8(LClampDToUint8* lir) {
17228 FloatRegister input = ToFloatRegister(lir->input());
17229 Register output = ToRegister(lir->output());
17230 masm.clampDoubleToUint8(input, output);
17233 void CodeGenerator::visitClampVToUint8(LClampVToUint8* lir) {
17234 ValueOperand operand = ToValue(lir, LClampVToUint8::InputIndex);
17235 FloatRegister tempFloat = ToFloatRegister(lir->temp0());
17236 Register output = ToRegister(lir->output());
17238 using Fn = bool (*)(JSContext*, JSString*, double*);
17239 OutOfLineCode* oolString = oolCallVM<Fn, StringToNumber>(
17240 lir, ArgList(output), StoreFloatRegisterTo(tempFloat));
17241 Label* stringEntry = oolString->entry();
17242 Label* stringRejoin = oolString->rejoin();
17244 Label fails;
17245 masm.clampValueToUint8(operand, stringEntry, stringRejoin, output, tempFloat,
17246 output, &fails);
17248 bailoutFrom(&fails, lir->snapshot());
17251 void CodeGenerator::visitInCache(LInCache* ins) {
17252 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
17254 ConstantOrRegister key =
17255 toConstantOrRegister(ins, LInCache::LhsIndex, ins->mir()->key()->type());
17256 Register object = ToRegister(ins->rhs());
17257 Register output = ToRegister(ins->output());
17258 Register temp = ToRegister(ins->temp0());
17260 IonInIC cache(liveRegs, key, object, output, temp);
17261 addIC(ins, allocateIC(cache));
17264 void CodeGenerator::visitInArray(LInArray* lir) {
17265 const MInArray* mir = lir->mir();
17266 Register elements = ToRegister(lir->elements());
17267 Register initLength = ToRegister(lir->initLength());
17268 Register output = ToRegister(lir->output());
17270 Label falseBranch, done, trueBranch;
17272 if (lir->index()->isConstant()) {
17273 int32_t index = ToInt32(lir->index());
17275 if (index < 0) {
17276 MOZ_ASSERT(mir->needsNegativeIntCheck());
17277 bailout(lir->snapshot());
17278 return;
17281 masm.branch32(Assembler::BelowOrEqual, initLength, Imm32(index),
17282 &falseBranch);
17284 NativeObject::elementsSizeMustNotOverflow();
17285 Address address = Address(elements, index * sizeof(Value));
17286 masm.branchTestMagic(Assembler::Equal, address, &falseBranch);
17287 } else {
17288 Register index = ToRegister(lir->index());
17290 Label negativeIntCheck;
17291 Label* failedInitLength = &falseBranch;
17292 if (mir->needsNegativeIntCheck()) {
17293 failedInitLength = &negativeIntCheck;
17296 masm.branch32(Assembler::BelowOrEqual, initLength, index, failedInitLength);
17298 BaseObjectElementIndex address(elements, index);
17299 masm.branchTestMagic(Assembler::Equal, address, &falseBranch);
17301 if (mir->needsNegativeIntCheck()) {
17302 masm.jump(&trueBranch);
17303 masm.bind(&negativeIntCheck);
17305 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
17307 masm.jump(&falseBranch);
17311 masm.bind(&trueBranch);
17312 masm.move32(Imm32(1), output);
17313 masm.jump(&done);
17315 masm.bind(&falseBranch);
17316 masm.move32(Imm32(0), output);
17317 masm.bind(&done);
17320 void CodeGenerator::visitGuardElementNotHole(LGuardElementNotHole* lir) {
17321 Register elements = ToRegister(lir->elements());
17322 const LAllocation* index = lir->index();
17324 Label testMagic;
17325 if (index->isConstant()) {
17326 Address address(elements, ToInt32(index) * sizeof(js::Value));
17327 masm.branchTestMagic(Assembler::Equal, address, &testMagic);
17328 } else {
17329 BaseObjectElementIndex address(elements, ToRegister(index));
17330 masm.branchTestMagic(Assembler::Equal, address, &testMagic);
17332 bailoutFrom(&testMagic, lir->snapshot());
17335 void CodeGenerator::visitInstanceOfO(LInstanceOfO* ins) {
17336 Register protoReg = ToRegister(ins->rhs());
17337 emitInstanceOf(ins, protoReg);
17340 void CodeGenerator::visitInstanceOfV(LInstanceOfV* ins) {
17341 Register protoReg = ToRegister(ins->rhs());
17342 emitInstanceOf(ins, protoReg);
17345 void CodeGenerator::emitInstanceOf(LInstruction* ins, Register protoReg) {
17346 // This path implements fun_hasInstance when the function's prototype is
17347 // known to be the object in protoReg
17349 Label done;
17350 Register output = ToRegister(ins->getDef(0));
17352 // If the lhs is a primitive, the result is false.
17353 Register objReg;
17354 if (ins->isInstanceOfV()) {
17355 Label isObject;
17356 ValueOperand lhsValue = ToValue(ins, LInstanceOfV::LhsIndex);
17357 masm.branchTestObject(Assembler::Equal, lhsValue, &isObject);
17358 masm.mov(ImmWord(0), output);
17359 masm.jump(&done);
17360 masm.bind(&isObject);
17361 objReg = masm.extractObject(lhsValue, output);
17362 } else {
17363 objReg = ToRegister(ins->toInstanceOfO()->lhs());
17366 // Crawl the lhs's prototype chain in a loop to search for prototypeObject.
17367 // This follows the main loop of js::IsPrototypeOf, though additionally breaks
17368 // out of the loop on Proxy::LazyProto.
17370 // Load the lhs's prototype.
17371 masm.loadObjProto(objReg, output);
17373 Label testLazy;
17375 Label loopPrototypeChain;
17376 masm.bind(&loopPrototypeChain);
17378 // Test for the target prototype object.
17379 Label notPrototypeObject;
17380 masm.branchPtr(Assembler::NotEqual, output, protoReg, &notPrototypeObject);
17381 masm.mov(ImmWord(1), output);
17382 masm.jump(&done);
17383 masm.bind(&notPrototypeObject);
17385 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
17387 // Test for nullptr or Proxy::LazyProto
17388 masm.branchPtr(Assembler::BelowOrEqual, output, ImmWord(1), &testLazy);
17390 // Load the current object's prototype.
17391 masm.loadObjProto(output, output);
17393 masm.jump(&loopPrototypeChain);
17396 // Make a VM call if an object with a lazy proto was found on the prototype
17397 // chain. This currently occurs only for cross compartment wrappers, which
17398 // we do not expect to be compared with non-wrapper functions from this
17399 // compartment. Otherwise, we stopped on a nullptr prototype and the output
17400 // register is already correct.
17402 using Fn = bool (*)(JSContext*, HandleObject, JSObject*, bool*);
17403 auto* ool = oolCallVM<Fn, IsPrototypeOf>(ins, ArgList(protoReg, objReg),
17404 StoreRegisterTo(output));
17406 // Regenerate the original lhs object for the VM call.
17407 Label regenerate, *lazyEntry;
17408 if (objReg != output) {
17409 lazyEntry = ool->entry();
17410 } else {
17411 masm.bind(&regenerate);
17412 lazyEntry = &regenerate;
17413 if (ins->isInstanceOfV()) {
17414 ValueOperand lhsValue = ToValue(ins, LInstanceOfV::LhsIndex);
17415 objReg = masm.extractObject(lhsValue, output);
17416 } else {
17417 objReg = ToRegister(ins->toInstanceOfO()->lhs());
17419 MOZ_ASSERT(objReg == output);
17420 masm.jump(ool->entry());
17423 masm.bind(&testLazy);
17424 masm.branchPtr(Assembler::Equal, output, ImmWord(1), lazyEntry);
17426 masm.bind(&done);
17427 masm.bind(ool->rejoin());
17430 void CodeGenerator::visitInstanceOfCache(LInstanceOfCache* ins) {
17431 // The Lowering ensures that RHS is an object, and that LHS is a value.
17432 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
17433 TypedOrValueRegister lhs =
17434 TypedOrValueRegister(ToValue(ins, LInstanceOfCache::LHS));
17435 Register rhs = ToRegister(ins->rhs());
17436 Register output = ToRegister(ins->output());
17438 IonInstanceOfIC ic(liveRegs, lhs, rhs, output);
17439 addIC(ins, allocateIC(ic));
17442 void CodeGenerator::visitGetDOMProperty(LGetDOMProperty* ins) {
17443 const Register JSContextReg = ToRegister(ins->getJSContextReg());
17444 const Register ObjectReg = ToRegister(ins->getObjectReg());
17445 const Register PrivateReg = ToRegister(ins->getPrivReg());
17446 const Register ValueReg = ToRegister(ins->getValueReg());
17448 Label haveValue;
17449 if (ins->mir()->valueMayBeInSlot()) {
17450 size_t slot = ins->mir()->domMemberSlotIndex();
17451 // It's a bit annoying to redo these slot calculations, which duplcate
17452 // LSlots and a few other things like that, but I'm not sure there's a
17453 // way to reuse those here.
17455 // If this ever gets fixed to work with proxies (by not assuming that
17456 // reserved slot indices, which is what domMemberSlotIndex() returns,
17457 // match fixed slot indices), we can reenable MGetDOMProperty for
17458 // proxies in IonBuilder.
17459 if (slot < NativeObject::MAX_FIXED_SLOTS) {
17460 masm.loadValue(Address(ObjectReg, NativeObject::getFixedSlotOffset(slot)),
17461 JSReturnOperand);
17462 } else {
17463 // It's a dynamic slot.
17464 slot -= NativeObject::MAX_FIXED_SLOTS;
17465 // Use PrivateReg as a scratch register for the slots pointer.
17466 masm.loadPtr(Address(ObjectReg, NativeObject::offsetOfSlots()),
17467 PrivateReg);
17468 masm.loadValue(Address(PrivateReg, slot * sizeof(js::Value)),
17469 JSReturnOperand);
17471 masm.branchTestUndefined(Assembler::NotEqual, JSReturnOperand, &haveValue);
17474 DebugOnly<uint32_t> initialStack = masm.framePushed();
17476 masm.checkStackAlignment();
17478 // Make space for the outparam. Pre-initialize it to UndefinedValue so we
17479 // can trace it at GC time.
17480 masm.Push(UndefinedValue());
17481 // We pass the pointer to our out param as an instance of
17482 // JSJitGetterCallArgs, since on the binary level it's the same thing.
17483 static_assert(sizeof(JSJitGetterCallArgs) == sizeof(Value*));
17484 masm.moveStackPtrTo(ValueReg);
17486 masm.Push(ObjectReg);
17488 LoadDOMPrivate(masm, ObjectReg, PrivateReg, ins->mir()->objectKind());
17490 // Rooting will happen at GC time.
17491 masm.moveStackPtrTo(ObjectReg);
17493 Realm* getterRealm = ins->mir()->getterRealm();
17494 if (gen->realm->realmPtr() != getterRealm) {
17495 // We use JSContextReg as scratch register here.
17496 masm.switchToRealm(getterRealm, JSContextReg);
17499 uint32_t safepointOffset = masm.buildFakeExitFrame(JSContextReg);
17500 masm.loadJSContext(JSContextReg);
17501 masm.enterFakeExitFrame(JSContextReg, JSContextReg,
17502 ExitFrameType::IonDOMGetter);
17504 markSafepointAt(safepointOffset, ins);
17506 masm.setupAlignedABICall();
17507 masm.loadJSContext(JSContextReg);
17508 masm.passABIArg(JSContextReg);
17509 masm.passABIArg(ObjectReg);
17510 masm.passABIArg(PrivateReg);
17511 masm.passABIArg(ValueReg);
17512 ensureOsiSpace();
17513 masm.callWithABI(DynamicFunction<JSJitGetterOp>(ins->mir()->fun()),
17514 ABIType::General,
17515 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
17517 if (ins->mir()->isInfallible()) {
17518 masm.loadValue(Address(masm.getStackPointer(),
17519 IonDOMExitFrameLayout::offsetOfResult()),
17520 JSReturnOperand);
17521 } else {
17522 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
17524 masm.loadValue(Address(masm.getStackPointer(),
17525 IonDOMExitFrameLayout::offsetOfResult()),
17526 JSReturnOperand);
17529 // Switch back to the current realm if needed. Note: if the getter threw an
17530 // exception, the exception handler will do this.
17531 if (gen->realm->realmPtr() != getterRealm) {
17532 static_assert(!JSReturnOperand.aliases(ReturnReg),
17533 "Clobbering ReturnReg should not affect the return value");
17534 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
17537 // Until C++ code is instrumented against Spectre, prevent speculative
17538 // execution from returning any private data.
17539 if (JitOptions.spectreJitToCxxCalls && ins->mir()->hasLiveDefUses()) {
17540 masm.speculationBarrier();
17543 masm.adjustStack(IonDOMExitFrameLayout::Size());
17545 masm.bind(&haveValue);
17547 MOZ_ASSERT(masm.framePushed() == initialStack);
17550 void CodeGenerator::visitGetDOMMemberV(LGetDOMMemberV* ins) {
17551 // It's simpler to duplicate visitLoadFixedSlotV here than it is to try to
17552 // use an LLoadFixedSlotV or some subclass of it for this case: that would
17553 // require us to have MGetDOMMember inherit from MLoadFixedSlot, and then
17554 // we'd have to duplicate a bunch of stuff we now get for free from
17555 // MGetDOMProperty.
17557 // If this ever gets fixed to work with proxies (by not assuming that
17558 // reserved slot indices, which is what domMemberSlotIndex() returns,
17559 // match fixed slot indices), we can reenable MGetDOMMember for
17560 // proxies in IonBuilder.
17561 Register object = ToRegister(ins->object());
17562 size_t slot = ins->mir()->domMemberSlotIndex();
17563 ValueOperand result = ToOutValue(ins);
17565 masm.loadValue(Address(object, NativeObject::getFixedSlotOffset(slot)),
17566 result);
17569 void CodeGenerator::visitGetDOMMemberT(LGetDOMMemberT* ins) {
17570 // It's simpler to duplicate visitLoadFixedSlotT here than it is to try to
17571 // use an LLoadFixedSlotT or some subclass of it for this case: that would
17572 // require us to have MGetDOMMember inherit from MLoadFixedSlot, and then
17573 // we'd have to duplicate a bunch of stuff we now get for free from
17574 // MGetDOMProperty.
17576 // If this ever gets fixed to work with proxies (by not assuming that
17577 // reserved slot indices, which is what domMemberSlotIndex() returns,
17578 // match fixed slot indices), we can reenable MGetDOMMember for
17579 // proxies in IonBuilder.
17580 Register object = ToRegister(ins->object());
17581 size_t slot = ins->mir()->domMemberSlotIndex();
17582 AnyRegister result = ToAnyRegister(ins->getDef(0));
17583 MIRType type = ins->mir()->type();
17585 masm.loadUnboxedValue(Address(object, NativeObject::getFixedSlotOffset(slot)),
17586 type, result);
17589 void CodeGenerator::visitSetDOMProperty(LSetDOMProperty* ins) {
17590 const Register JSContextReg = ToRegister(ins->getJSContextReg());
17591 const Register ObjectReg = ToRegister(ins->getObjectReg());
17592 const Register PrivateReg = ToRegister(ins->getPrivReg());
17593 const Register ValueReg = ToRegister(ins->getValueReg());
17595 DebugOnly<uint32_t> initialStack = masm.framePushed();
17597 masm.checkStackAlignment();
17599 // Push the argument. Rooting will happen at GC time.
17600 ValueOperand argVal = ToValue(ins, LSetDOMProperty::Value);
17601 masm.Push(argVal);
17602 // We pass the pointer to our out param as an instance of
17603 // JSJitGetterCallArgs, since on the binary level it's the same thing.
17604 static_assert(sizeof(JSJitSetterCallArgs) == sizeof(Value*));
17605 masm.moveStackPtrTo(ValueReg);
17607 masm.Push(ObjectReg);
17609 LoadDOMPrivate(masm, ObjectReg, PrivateReg, ins->mir()->objectKind());
17611 // Rooting will happen at GC time.
17612 masm.moveStackPtrTo(ObjectReg);
17614 Realm* setterRealm = ins->mir()->setterRealm();
17615 if (gen->realm->realmPtr() != setterRealm) {
17616 // We use JSContextReg as scratch register here.
17617 masm.switchToRealm(setterRealm, JSContextReg);
17620 uint32_t safepointOffset = masm.buildFakeExitFrame(JSContextReg);
17621 masm.loadJSContext(JSContextReg);
17622 masm.enterFakeExitFrame(JSContextReg, JSContextReg,
17623 ExitFrameType::IonDOMSetter);
17625 markSafepointAt(safepointOffset, ins);
17627 masm.setupAlignedABICall();
17628 masm.loadJSContext(JSContextReg);
17629 masm.passABIArg(JSContextReg);
17630 masm.passABIArg(ObjectReg);
17631 masm.passABIArg(PrivateReg);
17632 masm.passABIArg(ValueReg);
17633 ensureOsiSpace();
17634 masm.callWithABI(DynamicFunction<JSJitSetterOp>(ins->mir()->fun()),
17635 ABIType::General,
17636 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
17638 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
17640 // Switch back to the current realm if needed. Note: if the setter threw an
17641 // exception, the exception handler will do this.
17642 if (gen->realm->realmPtr() != setterRealm) {
17643 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
17646 masm.adjustStack(IonDOMExitFrameLayout::Size());
17648 MOZ_ASSERT(masm.framePushed() == initialStack);
17651 void CodeGenerator::visitLoadDOMExpandoValue(LLoadDOMExpandoValue* ins) {
17652 Register proxy = ToRegister(ins->proxy());
17653 ValueOperand out = ToOutValue(ins);
17655 masm.loadPtr(Address(proxy, ProxyObject::offsetOfReservedSlots()),
17656 out.scratchReg());
17657 masm.loadValue(Address(out.scratchReg(),
17658 js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
17659 out);
17662 void CodeGenerator::visitLoadDOMExpandoValueGuardGeneration(
17663 LLoadDOMExpandoValueGuardGeneration* ins) {
17664 Register proxy = ToRegister(ins->proxy());
17665 ValueOperand out = ToOutValue(ins);
17667 Label bail;
17668 masm.loadDOMExpandoValueGuardGeneration(proxy, out,
17669 ins->mir()->expandoAndGeneration(),
17670 ins->mir()->generation(), &bail);
17671 bailoutFrom(&bail, ins->snapshot());
17674 void CodeGenerator::visitLoadDOMExpandoValueIgnoreGeneration(
17675 LLoadDOMExpandoValueIgnoreGeneration* ins) {
17676 Register proxy = ToRegister(ins->proxy());
17677 ValueOperand out = ToOutValue(ins);
17679 masm.loadPtr(Address(proxy, ProxyObject::offsetOfReservedSlots()),
17680 out.scratchReg());
17682 // Load the ExpandoAndGeneration* from the PrivateValue.
17683 masm.loadPrivate(
17684 Address(out.scratchReg(),
17685 js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
17686 out.scratchReg());
17688 // Load expandoAndGeneration->expando into the output Value register.
17689 masm.loadValue(
17690 Address(out.scratchReg(), ExpandoAndGeneration::offsetOfExpando()), out);
17693 void CodeGenerator::visitGuardDOMExpandoMissingOrGuardShape(
17694 LGuardDOMExpandoMissingOrGuardShape* ins) {
17695 Register temp = ToRegister(ins->temp0());
17696 ValueOperand input =
17697 ToValue(ins, LGuardDOMExpandoMissingOrGuardShape::InputIndex);
17699 Label done;
17700 masm.branchTestUndefined(Assembler::Equal, input, &done);
17702 masm.debugAssertIsObject(input);
17703 masm.unboxObject(input, temp);
17704 // The expando object is not used in this case, so we don't need Spectre
17705 // mitigations.
17706 Label bail;
17707 masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, temp,
17708 ins->mir()->shape(), &bail);
17709 bailoutFrom(&bail, ins->snapshot());
17711 masm.bind(&done);
17714 class OutOfLineIsCallable : public OutOfLineCodeBase<CodeGenerator> {
17715 Register object_;
17716 Register output_;
17718 public:
17719 OutOfLineIsCallable(Register object, Register output)
17720 : object_(object), output_(output) {}
17722 void accept(CodeGenerator* codegen) override {
17723 codegen->visitOutOfLineIsCallable(this);
17725 Register object() const { return object_; }
17726 Register output() const { return output_; }
17729 void CodeGenerator::visitIsCallableO(LIsCallableO* ins) {
17730 Register object = ToRegister(ins->object());
17731 Register output = ToRegister(ins->output());
17733 OutOfLineIsCallable* ool = new (alloc()) OutOfLineIsCallable(object, output);
17734 addOutOfLineCode(ool, ins->mir());
17736 masm.isCallable(object, output, ool->entry());
17738 masm.bind(ool->rejoin());
17741 void CodeGenerator::visitIsCallableV(LIsCallableV* ins) {
17742 ValueOperand val = ToValue(ins, LIsCallableV::ObjectIndex);
17743 Register output = ToRegister(ins->output());
17744 Register temp = ToRegister(ins->temp0());
17746 Label notObject;
17747 masm.fallibleUnboxObject(val, temp, &notObject);
17749 OutOfLineIsCallable* ool = new (alloc()) OutOfLineIsCallable(temp, output);
17750 addOutOfLineCode(ool, ins->mir());
17752 masm.isCallable(temp, output, ool->entry());
17753 masm.jump(ool->rejoin());
17755 masm.bind(&notObject);
17756 masm.move32(Imm32(0), output);
17758 masm.bind(ool->rejoin());
17761 void CodeGenerator::visitOutOfLineIsCallable(OutOfLineIsCallable* ool) {
17762 Register object = ool->object();
17763 Register output = ool->output();
17765 saveVolatile(output);
17766 using Fn = bool (*)(JSObject* obj);
17767 masm.setupAlignedABICall();
17768 masm.passABIArg(object);
17769 masm.callWithABI<Fn, ObjectIsCallable>();
17770 masm.storeCallBoolResult(output);
17771 restoreVolatile(output);
17772 masm.jump(ool->rejoin());
17775 class OutOfLineIsConstructor : public OutOfLineCodeBase<CodeGenerator> {
17776 LIsConstructor* ins_;
17778 public:
17779 explicit OutOfLineIsConstructor(LIsConstructor* ins) : ins_(ins) {}
17781 void accept(CodeGenerator* codegen) override {
17782 codegen->visitOutOfLineIsConstructor(this);
17784 LIsConstructor* ins() const { return ins_; }
17787 void CodeGenerator::visitIsConstructor(LIsConstructor* ins) {
17788 Register object = ToRegister(ins->object());
17789 Register output = ToRegister(ins->output());
17791 OutOfLineIsConstructor* ool = new (alloc()) OutOfLineIsConstructor(ins);
17792 addOutOfLineCode(ool, ins->mir());
17794 masm.isConstructor(object, output, ool->entry());
17796 masm.bind(ool->rejoin());
17799 void CodeGenerator::visitOutOfLineIsConstructor(OutOfLineIsConstructor* ool) {
17800 LIsConstructor* ins = ool->ins();
17801 Register object = ToRegister(ins->object());
17802 Register output = ToRegister(ins->output());
17804 saveVolatile(output);
17805 using Fn = bool (*)(JSObject* obj);
17806 masm.setupAlignedABICall();
17807 masm.passABIArg(object);
17808 masm.callWithABI<Fn, ObjectIsConstructor>();
17809 masm.storeCallBoolResult(output);
17810 restoreVolatile(output);
17811 masm.jump(ool->rejoin());
17814 void CodeGenerator::visitIsCrossRealmArrayConstructor(
17815 LIsCrossRealmArrayConstructor* ins) {
17816 Register object = ToRegister(ins->object());
17817 Register output = ToRegister(ins->output());
17819 masm.setIsCrossRealmArrayConstructor(object, output);
17822 static void EmitObjectIsArray(MacroAssembler& masm, OutOfLineCode* ool,
17823 Register obj, Register output,
17824 Label* notArray = nullptr) {
17825 masm.loadObjClassUnsafe(obj, output);
17827 Label isArray;
17828 masm.branchPtr(Assembler::Equal, output, ImmPtr(&ArrayObject::class_),
17829 &isArray);
17831 // Branch to OOL path if it's a proxy.
17832 masm.branchTestClassIsProxy(true, output, ool->entry());
17834 if (notArray) {
17835 masm.bind(notArray);
17837 masm.move32(Imm32(0), output);
17838 masm.jump(ool->rejoin());
17840 masm.bind(&isArray);
17841 masm.move32(Imm32(1), output);
17843 masm.bind(ool->rejoin());
17846 void CodeGenerator::visitIsArrayO(LIsArrayO* lir) {
17847 Register object = ToRegister(lir->object());
17848 Register output = ToRegister(lir->output());
17850 using Fn = bool (*)(JSContext*, HandleObject, bool*);
17851 OutOfLineCode* ool = oolCallVM<Fn, js::IsArrayFromJit>(
17852 lir, ArgList(object), StoreRegisterTo(output));
17853 EmitObjectIsArray(masm, ool, object, output);
17856 void CodeGenerator::visitIsArrayV(LIsArrayV* lir) {
17857 ValueOperand val = ToValue(lir, LIsArrayV::ValueIndex);
17858 Register output = ToRegister(lir->output());
17859 Register temp = ToRegister(lir->temp0());
17861 Label notArray;
17862 masm.fallibleUnboxObject(val, temp, &notArray);
17864 using Fn = bool (*)(JSContext*, HandleObject, bool*);
17865 OutOfLineCode* ool = oolCallVM<Fn, js::IsArrayFromJit>(
17866 lir, ArgList(temp), StoreRegisterTo(output));
17867 EmitObjectIsArray(masm, ool, temp, output, &notArray);
17870 void CodeGenerator::visitIsTypedArray(LIsTypedArray* lir) {
17871 Register object = ToRegister(lir->object());
17872 Register output = ToRegister(lir->output());
17874 OutOfLineCode* ool = nullptr;
17875 if (lir->mir()->isPossiblyWrapped()) {
17876 using Fn = bool (*)(JSContext*, JSObject*, bool*);
17877 ool = oolCallVM<Fn, jit::IsPossiblyWrappedTypedArray>(
17878 lir, ArgList(object), StoreRegisterTo(output));
17881 Label notTypedArray;
17882 Label done;
17884 masm.loadObjClassUnsafe(object, output);
17885 masm.branchIfClassIsNotTypedArray(output, &notTypedArray);
17887 masm.move32(Imm32(1), output);
17888 masm.jump(&done);
17889 masm.bind(&notTypedArray);
17890 if (ool) {
17891 masm.branchTestClassIsProxy(true, output, ool->entry());
17893 masm.move32(Imm32(0), output);
17894 masm.bind(&done);
17895 if (ool) {
17896 masm.bind(ool->rejoin());
17900 void CodeGenerator::visitIsObject(LIsObject* ins) {
17901 Register output = ToRegister(ins->output());
17902 ValueOperand value = ToValue(ins, LIsObject::ObjectIndex);
17903 masm.testObjectSet(Assembler::Equal, value, output);
17906 void CodeGenerator::visitIsObjectAndBranch(LIsObjectAndBranch* ins) {
17907 ValueOperand value = ToValue(ins, LIsObjectAndBranch::Input);
17908 testObjectEmitBranch(Assembler::Equal, value, ins->ifTrue(), ins->ifFalse());
17911 void CodeGenerator::visitIsNullOrUndefined(LIsNullOrUndefined* ins) {
17912 Register output = ToRegister(ins->output());
17913 ValueOperand value = ToValue(ins, LIsNullOrUndefined::InputIndex);
17915 Label isNotNull, done;
17916 masm.branchTestNull(Assembler::NotEqual, value, &isNotNull);
17918 masm.move32(Imm32(1), output);
17919 masm.jump(&done);
17921 masm.bind(&isNotNull);
17922 masm.testUndefinedSet(Assembler::Equal, value, output);
17924 masm.bind(&done);
17927 void CodeGenerator::visitIsNullOrUndefinedAndBranch(
17928 LIsNullOrUndefinedAndBranch* ins) {
17929 Label* ifTrue = getJumpLabelForBranch(ins->ifTrue());
17930 Label* ifFalse = getJumpLabelForBranch(ins->ifFalse());
17931 ValueOperand value = ToValue(ins, LIsNullOrUndefinedAndBranch::Input);
17933 ScratchTagScope tag(masm, value);
17934 masm.splitTagForTest(value, tag);
17936 masm.branchTestNull(Assembler::Equal, tag, ifTrue);
17937 masm.branchTestUndefined(Assembler::Equal, tag, ifTrue);
17939 if (!isNextBlock(ins->ifFalse()->lir())) {
17940 masm.jump(ifFalse);
17944 void CodeGenerator::loadOutermostJSScript(Register reg) {
17945 // The "outermost" JSScript means the script that we are compiling
17946 // basically; this is not always the script associated with the
17947 // current basic block, which might be an inlined script.
17949 MIRGraph& graph = current->mir()->graph();
17950 MBasicBlock* entryBlock = graph.entryBlock();
17951 masm.movePtr(ImmGCPtr(entryBlock->info().script()), reg);
17954 void CodeGenerator::loadJSScriptForBlock(MBasicBlock* block, Register reg) {
17955 // The current JSScript means the script for the current
17956 // basic block. This may be an inlined script.
17958 JSScript* script = block->info().script();
17959 masm.movePtr(ImmGCPtr(script), reg);
17962 void CodeGenerator::visitHasClass(LHasClass* ins) {
17963 Register lhs = ToRegister(ins->lhs());
17964 Register output = ToRegister(ins->output());
17966 masm.loadObjClassUnsafe(lhs, output);
17967 masm.cmpPtrSet(Assembler::Equal, output, ImmPtr(ins->mir()->getClass()),
17968 output);
17971 void CodeGenerator::visitGuardToClass(LGuardToClass* ins) {
17972 Register lhs = ToRegister(ins->lhs());
17973 Register temp = ToRegister(ins->temp0());
17975 // branchTestObjClass may zero the object register on speculative paths
17976 // (we should have a defineReuseInput allocation in this case).
17977 Register spectreRegToZero = lhs;
17979 Label notEqual;
17981 masm.branchTestObjClass(Assembler::NotEqual, lhs, ins->mir()->getClass(),
17982 temp, spectreRegToZero, &notEqual);
17984 // Can't return null-return here, so bail.
17985 bailoutFrom(&notEqual, ins->snapshot());
17988 void CodeGenerator::visitGuardToFunction(LGuardToFunction* ins) {
17989 Register lhs = ToRegister(ins->lhs());
17990 Register temp = ToRegister(ins->temp0());
17992 // branchTestObjClass may zero the object register on speculative paths
17993 // (we should have a defineReuseInput allocation in this case).
17994 Register spectreRegToZero = lhs;
17996 Label notEqual;
17998 masm.branchTestObjIsFunction(Assembler::NotEqual, lhs, temp, spectreRegToZero,
17999 &notEqual);
18001 // Can't return null-return here, so bail.
18002 bailoutFrom(&notEqual, ins->snapshot());
18005 void CodeGenerator::visitObjectClassToString(LObjectClassToString* lir) {
18006 Register obj = ToRegister(lir->lhs());
18007 Register temp = ToRegister(lir->temp0());
18009 using Fn = JSString* (*)(JSContext*, JSObject*);
18010 masm.setupAlignedABICall();
18011 masm.loadJSContext(temp);
18012 masm.passABIArg(temp);
18013 masm.passABIArg(obj);
18014 masm.callWithABI<Fn, js::ObjectClassToString>();
18016 bailoutCmpPtr(Assembler::Equal, ReturnReg, ImmWord(0), lir->snapshot());
18019 void CodeGenerator::visitWasmParameter(LWasmParameter* lir) {}
18021 void CodeGenerator::visitWasmParameterI64(LWasmParameterI64* lir) {}
18023 void CodeGenerator::visitWasmReturn(LWasmReturn* lir) {
18024 // Don't emit a jump to the return label if this is the last block.
18025 if (current->mir() != *gen->graph().poBegin()) {
18026 masm.jump(&returnLabel_);
18030 void CodeGenerator::visitWasmReturnI64(LWasmReturnI64* lir) {
18031 // Don't emit a jump to the return label if this is the last block.
18032 if (current->mir() != *gen->graph().poBegin()) {
18033 masm.jump(&returnLabel_);
18037 void CodeGenerator::visitWasmReturnVoid(LWasmReturnVoid* lir) {
18038 // Don't emit a jump to the return label if this is the last block.
18039 if (current->mir() != *gen->graph().poBegin()) {
18040 masm.jump(&returnLabel_);
18044 void CodeGenerator::emitAssertRangeI(MIRType type, const Range* r,
18045 Register input) {
18046 // Check the lower bound.
18047 if (r->hasInt32LowerBound() && r->lower() > INT32_MIN) {
18048 Label success;
18049 if (type == MIRType::Int32 || type == MIRType::Boolean) {
18050 masm.branch32(Assembler::GreaterThanOrEqual, input, Imm32(r->lower()),
18051 &success);
18052 } else {
18053 MOZ_ASSERT(type == MIRType::IntPtr);
18054 masm.branchPtr(Assembler::GreaterThanOrEqual, input, Imm32(r->lower()),
18055 &success);
18057 masm.assumeUnreachable(
18058 "Integer input should be equal or higher than Lowerbound.");
18059 masm.bind(&success);
18062 // Check the upper bound.
18063 if (r->hasInt32UpperBound() && r->upper() < INT32_MAX) {
18064 Label success;
18065 if (type == MIRType::Int32 || type == MIRType::Boolean) {
18066 masm.branch32(Assembler::LessThanOrEqual, input, Imm32(r->upper()),
18067 &success);
18068 } else {
18069 MOZ_ASSERT(type == MIRType::IntPtr);
18070 masm.branchPtr(Assembler::LessThanOrEqual, input, Imm32(r->upper()),
18071 &success);
18073 masm.assumeUnreachable(
18074 "Integer input should be lower or equal than Upperbound.");
18075 masm.bind(&success);
18078 // For r->canHaveFractionalPart(), r->canBeNegativeZero(), and
18079 // r->exponent(), there's nothing to check, because if we ended up in the
18080 // integer range checking code, the value is already in an integer register
18081 // in the integer range.
18084 void CodeGenerator::emitAssertRangeD(const Range* r, FloatRegister input,
18085 FloatRegister temp) {
18086 // Check the lower bound.
18087 if (r->hasInt32LowerBound()) {
18088 Label success;
18089 masm.loadConstantDouble(r->lower(), temp);
18090 if (r->canBeNaN()) {
18091 masm.branchDouble(Assembler::DoubleUnordered, input, input, &success);
18093 masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, input, temp,
18094 &success);
18095 masm.assumeUnreachable(
18096 "Double input should be equal or higher than Lowerbound.");
18097 masm.bind(&success);
18099 // Check the upper bound.
18100 if (r->hasInt32UpperBound()) {
18101 Label success;
18102 masm.loadConstantDouble(r->upper(), temp);
18103 if (r->canBeNaN()) {
18104 masm.branchDouble(Assembler::DoubleUnordered, input, input, &success);
18106 masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, temp, &success);
18107 masm.assumeUnreachable(
18108 "Double input should be lower or equal than Upperbound.");
18109 masm.bind(&success);
18112 // This code does not yet check r->canHaveFractionalPart(). This would require
18113 // new assembler interfaces to make rounding instructions available.
18115 if (!r->canBeNegativeZero()) {
18116 Label success;
18118 // First, test for being equal to 0.0, which also includes -0.0.
18119 masm.loadConstantDouble(0.0, temp);
18120 masm.branchDouble(Assembler::DoubleNotEqualOrUnordered, input, temp,
18121 &success);
18123 // The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0 is
18124 // -Infinity instead of Infinity.
18125 masm.loadConstantDouble(1.0, temp);
18126 masm.divDouble(input, temp);
18127 masm.branchDouble(Assembler::DoubleGreaterThan, temp, input, &success);
18129 masm.assumeUnreachable("Input shouldn't be negative zero.");
18131 masm.bind(&success);
18134 if (!r->hasInt32Bounds() && !r->canBeInfiniteOrNaN() &&
18135 r->exponent() < FloatingPoint<double>::kExponentBias) {
18136 // Check the bounds implied by the maximum exponent.
18137 Label exponentLoOk;
18138 masm.loadConstantDouble(pow(2.0, r->exponent() + 1), temp);
18139 masm.branchDouble(Assembler::DoubleUnordered, input, input, &exponentLoOk);
18140 masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, temp,
18141 &exponentLoOk);
18142 masm.assumeUnreachable("Check for exponent failed.");
18143 masm.bind(&exponentLoOk);
18145 Label exponentHiOk;
18146 masm.loadConstantDouble(-pow(2.0, r->exponent() + 1), temp);
18147 masm.branchDouble(Assembler::DoubleUnordered, input, input, &exponentHiOk);
18148 masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, input, temp,
18149 &exponentHiOk);
18150 masm.assumeUnreachable("Check for exponent failed.");
18151 masm.bind(&exponentHiOk);
18152 } else if (!r->hasInt32Bounds() && !r->canBeNaN()) {
18153 // If we think the value can't be NaN, check that it isn't.
18154 Label notnan;
18155 masm.branchDouble(Assembler::DoubleOrdered, input, input, &notnan);
18156 masm.assumeUnreachable("Input shouldn't be NaN.");
18157 masm.bind(&notnan);
18159 // If we think the value also can't be an infinity, check that it isn't.
18160 if (!r->canBeInfiniteOrNaN()) {
18161 Label notposinf;
18162 masm.loadConstantDouble(PositiveInfinity<double>(), temp);
18163 masm.branchDouble(Assembler::DoubleLessThan, input, temp, &notposinf);
18164 masm.assumeUnreachable("Input shouldn't be +Inf.");
18165 masm.bind(&notposinf);
18167 Label notneginf;
18168 masm.loadConstantDouble(NegativeInfinity<double>(), temp);
18169 masm.branchDouble(Assembler::DoubleGreaterThan, input, temp, &notneginf);
18170 masm.assumeUnreachable("Input shouldn't be -Inf.");
18171 masm.bind(&notneginf);
18176 void CodeGenerator::visitAssertClass(LAssertClass* ins) {
18177 Register obj = ToRegister(ins->input());
18178 Register temp = ToRegister(ins->getTemp(0));
18180 Label success;
18181 if (ins->mir()->getClass() == &FunctionClass) {
18182 // Allow both possible function classes here.
18183 masm.branchTestObjIsFunctionNoSpectreMitigations(Assembler::Equal, obj,
18184 temp, &success);
18185 } else {
18186 masm.branchTestObjClassNoSpectreMitigations(
18187 Assembler::Equal, obj, ins->mir()->getClass(), temp, &success);
18189 masm.assumeUnreachable("Wrong KnownClass during run-time");
18190 masm.bind(&success);
18193 void CodeGenerator::visitAssertShape(LAssertShape* ins) {
18194 Register obj = ToRegister(ins->input());
18196 Label success;
18197 masm.branchTestObjShapeNoSpectreMitigations(Assembler::Equal, obj,
18198 ins->mir()->shape(), &success);
18199 masm.assumeUnreachable("Wrong Shape during run-time");
18200 masm.bind(&success);
18203 void CodeGenerator::visitAssertRangeI(LAssertRangeI* ins) {
18204 Register input = ToRegister(ins->input());
18205 const Range* r = ins->range();
18207 emitAssertRangeI(ins->mir()->input()->type(), r, input);
18210 void CodeGenerator::visitAssertRangeD(LAssertRangeD* ins) {
18211 FloatRegister input = ToFloatRegister(ins->input());
18212 FloatRegister temp = ToFloatRegister(ins->temp());
18213 const Range* r = ins->range();
18215 emitAssertRangeD(r, input, temp);
18218 void CodeGenerator::visitAssertRangeF(LAssertRangeF* ins) {
18219 FloatRegister input = ToFloatRegister(ins->input());
18220 FloatRegister temp = ToFloatRegister(ins->temp());
18221 FloatRegister temp2 = ToFloatRegister(ins->temp2());
18223 const Range* r = ins->range();
18225 masm.convertFloat32ToDouble(input, temp);
18226 emitAssertRangeD(r, temp, temp2);
18229 void CodeGenerator::visitAssertRangeV(LAssertRangeV* ins) {
18230 const Range* r = ins->range();
18231 const ValueOperand value = ToValue(ins, LAssertRangeV::Input);
18232 Label done;
18235 ScratchTagScope tag(masm, value);
18236 masm.splitTagForTest(value, tag);
18239 Label isNotInt32;
18240 masm.branchTestInt32(Assembler::NotEqual, tag, &isNotInt32);
18242 ScratchTagScopeRelease _(&tag);
18243 Register unboxInt32 = ToTempUnboxRegister(ins->temp());
18244 Register input = masm.extractInt32(value, unboxInt32);
18245 emitAssertRangeI(MIRType::Int32, r, input);
18246 masm.jump(&done);
18248 masm.bind(&isNotInt32);
18252 Label isNotDouble;
18253 masm.branchTestDouble(Assembler::NotEqual, tag, &isNotDouble);
18255 ScratchTagScopeRelease _(&tag);
18256 FloatRegister input = ToFloatRegister(ins->floatTemp1());
18257 FloatRegister temp = ToFloatRegister(ins->floatTemp2());
18258 masm.unboxDouble(value, input);
18259 emitAssertRangeD(r, input, temp);
18260 masm.jump(&done);
18262 masm.bind(&isNotDouble);
18266 masm.assumeUnreachable("Incorrect range for Value.");
18267 masm.bind(&done);
18270 void CodeGenerator::visitInterruptCheck(LInterruptCheck* lir) {
18271 using Fn = bool (*)(JSContext*);
18272 OutOfLineCode* ool =
18273 oolCallVM<Fn, InterruptCheck>(lir, ArgList(), StoreNothing());
18275 const void* interruptAddr = gen->runtime->addressOfInterruptBits();
18276 masm.branch32(Assembler::NotEqual, AbsoluteAddress(interruptAddr), Imm32(0),
18277 ool->entry());
18278 masm.bind(ool->rejoin());
18281 void CodeGenerator::visitOutOfLineResumableWasmTrap(
18282 OutOfLineResumableWasmTrap* ool) {
18283 LInstruction* lir = ool->lir();
18284 masm.wasmTrap(ool->trap(), ool->bytecodeOffset());
18286 markSafepointAt(masm.currentOffset(), lir);
18288 // Note that masm.framePushed() doesn't include the register dump area.
18289 // That will be taken into account when the StackMap is created from the
18290 // LSafepoint.
18291 lir->safepoint()->setFramePushedAtStackMapBase(ool->framePushed());
18292 lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::Trap);
18294 masm.jump(ool->rejoin());
18297 void CodeGenerator::visitOutOfLineAbortingWasmTrap(
18298 OutOfLineAbortingWasmTrap* ool) {
18299 masm.wasmTrap(ool->trap(), ool->bytecodeOffset());
18302 void CodeGenerator::visitWasmInterruptCheck(LWasmInterruptCheck* lir) {
18303 MOZ_ASSERT(gen->compilingWasm());
18305 OutOfLineResumableWasmTrap* ool = new (alloc()) OutOfLineResumableWasmTrap(
18306 lir, masm.framePushed(), lir->mir()->bytecodeOffset(),
18307 wasm::Trap::CheckInterrupt);
18308 addOutOfLineCode(ool, lir->mir());
18309 masm.branch32(
18310 Assembler::NotEqual,
18311 Address(ToRegister(lir->instance()), wasm::Instance::offsetOfInterrupt()),
18312 Imm32(0), ool->entry());
18313 masm.bind(ool->rejoin());
18316 void CodeGenerator::visitWasmTrap(LWasmTrap* lir) {
18317 MOZ_ASSERT(gen->compilingWasm());
18318 const MWasmTrap* mir = lir->mir();
18320 masm.wasmTrap(mir->trap(), mir->bytecodeOffset());
18323 void CodeGenerator::visitWasmTrapIfNull(LWasmTrapIfNull* lir) {
18324 MOZ_ASSERT(gen->compilingWasm());
18325 const MWasmTrapIfNull* mir = lir->mir();
18326 Label nonNull;
18327 Register ref = ToRegister(lir->ref());
18329 masm.branchWasmAnyRefIsNull(false, ref, &nonNull);
18330 masm.wasmTrap(mir->trap(), mir->bytecodeOffset());
18331 masm.bind(&nonNull);
18334 static void BranchWasmRefIsSubtype(MacroAssembler& masm, Register ref,
18335 const wasm::RefType& sourceType,
18336 const wasm::RefType& destType, Label* label,
18337 Register superSTV, Register scratch1,
18338 Register scratch2) {
18339 if (destType.isAnyHierarchy()) {
18340 masm.branchWasmRefIsSubtypeAny(ref, sourceType, destType, label,
18341 /*onSuccess=*/true, superSTV, scratch1,
18342 scratch2);
18343 } else if (destType.isFuncHierarchy()) {
18344 masm.branchWasmRefIsSubtypeFunc(ref, sourceType, destType, label,
18345 /*onSuccess=*/true, superSTV, scratch1,
18346 scratch2);
18347 } else if (destType.isExternHierarchy()) {
18348 masm.branchWasmRefIsSubtypeExtern(ref, sourceType, destType, label,
18349 /*onSuccess=*/true);
18350 } else {
18351 MOZ_CRASH("could not generate casting code for unknown type hierarchy");
18355 void CodeGenerator::visitWasmRefIsSubtypeOfAbstract(
18356 LWasmRefIsSubtypeOfAbstract* ins) {
18357 MOZ_ASSERT(gen->compilingWasm());
18359 const MWasmRefIsSubtypeOfAbstract* mir = ins->mir();
18360 MOZ_ASSERT(!mir->destType().isTypeRef());
18362 Register ref = ToRegister(ins->ref());
18363 Register superSTV = Register::Invalid();
18364 Register scratch1 = ToTempRegisterOrInvalid(ins->temp0());
18365 Register scratch2 = Register::Invalid();
18366 Register result = ToRegister(ins->output());
18367 Label onSuccess;
18368 Label onFail;
18369 Label join;
18370 BranchWasmRefIsSubtype(masm, ref, mir->sourceType(), mir->destType(),
18371 &onSuccess, superSTV, scratch1, scratch2);
18372 masm.bind(&onFail);
18373 masm.xor32(result, result);
18374 masm.jump(&join);
18375 masm.bind(&onSuccess);
18376 masm.move32(Imm32(1), result);
18377 masm.bind(&join);
18380 void CodeGenerator::visitWasmRefIsSubtypeOfConcrete(
18381 LWasmRefIsSubtypeOfConcrete* ins) {
18382 MOZ_ASSERT(gen->compilingWasm());
18384 const MWasmRefIsSubtypeOfConcrete* mir = ins->mir();
18385 MOZ_ASSERT(mir->destType().isTypeRef());
18387 Register ref = ToRegister(ins->ref());
18388 Register superSTV = ToRegister(ins->superSTV());
18389 Register scratch1 = ToRegister(ins->temp0());
18390 Register scratch2 = ToTempRegisterOrInvalid(ins->temp1());
18391 Register result = ToRegister(ins->output());
18392 Label onSuccess;
18393 Label join;
18394 BranchWasmRefIsSubtype(masm, ref, mir->sourceType(), mir->destType(),
18395 &onSuccess, superSTV, scratch1, scratch2);
18396 masm.move32(Imm32(0), result);
18397 masm.jump(&join);
18398 masm.bind(&onSuccess);
18399 masm.move32(Imm32(1), result);
18400 masm.bind(&join);
18403 void CodeGenerator::visitWasmRefIsSubtypeOfAbstractAndBranch(
18404 LWasmRefIsSubtypeOfAbstractAndBranch* ins) {
18405 MOZ_ASSERT(gen->compilingWasm());
18406 Register ref = ToRegister(ins->ref());
18407 Register scratch1 = ToTempRegisterOrInvalid(ins->temp0());
18408 Label* onSuccess = getJumpLabelForBranch(ins->ifTrue());
18409 Label* onFail = getJumpLabelForBranch(ins->ifFalse());
18410 BranchWasmRefIsSubtype(masm, ref, ins->sourceType(), ins->destType(),
18411 onSuccess, Register::Invalid(), scratch1,
18412 Register::Invalid());
18413 masm.jump(onFail);
18416 void CodeGenerator::visitWasmRefIsSubtypeOfConcreteAndBranch(
18417 LWasmRefIsSubtypeOfConcreteAndBranch* ins) {
18418 MOZ_ASSERT(gen->compilingWasm());
18419 Register ref = ToRegister(ins->ref());
18420 Register superSTV = ToRegister(ins->superSTV());
18421 Register scratch1 = ToRegister(ins->temp0());
18422 Register scratch2 = ToTempRegisterOrInvalid(ins->temp1());
18423 Label* onSuccess = getJumpLabelForBranch(ins->ifTrue());
18424 Label* onFail = getJumpLabelForBranch(ins->ifFalse());
18425 BranchWasmRefIsSubtype(masm, ref, ins->sourceType(), ins->destType(),
18426 onSuccess, superSTV, scratch1, scratch2);
18427 masm.jump(onFail);
18430 void CodeGenerator::callWasmStructAllocFun(LInstruction* lir,
18431 wasm::SymbolicAddress fun,
18432 Register typeDefData,
18433 Register output) {
18434 masm.Push(InstanceReg);
18435 int32_t framePushedAfterInstance = masm.framePushed();
18436 saveLive(lir);
18438 masm.setupWasmABICall();
18439 masm.passABIArg(InstanceReg);
18440 masm.passABIArg(typeDefData);
18441 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
18442 CodeOffset offset =
18443 masm.callWithABI(wasm::BytecodeOffset(0), fun,
18444 mozilla::Some(instanceOffset), ABIType::General);
18445 masm.storeCallPointerResult(output);
18447 markSafepointAt(offset.offset(), lir);
18448 lir->safepoint()->setFramePushedAtStackMapBase(framePushedAfterInstance);
18449 lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::CodegenCall);
18451 restoreLive(lir);
18452 masm.Pop(InstanceReg);
18453 #if JS_CODEGEN_ARM64
18454 masm.syncStackPtr();
18455 #endif
18458 // Out-of-line path to allocate wasm GC structs
18459 class OutOfLineWasmNewStruct : public OutOfLineCodeBase<CodeGenerator> {
18460 LInstruction* lir_;
18461 wasm::SymbolicAddress fun_;
18462 Register typeDefData_;
18463 Register output_;
18465 public:
18466 OutOfLineWasmNewStruct(LInstruction* lir, wasm::SymbolicAddress fun,
18467 Register typeDefData, Register output)
18468 : lir_(lir), fun_(fun), typeDefData_(typeDefData), output_(output) {}
18470 void accept(CodeGenerator* codegen) override {
18471 codegen->visitOutOfLineWasmNewStruct(this);
18474 LInstruction* lir() const { return lir_; }
18475 wasm::SymbolicAddress fun() const { return fun_; }
18476 Register typeDefData() const { return typeDefData_; }
18477 Register output() const { return output_; }
18480 void CodeGenerator::visitOutOfLineWasmNewStruct(OutOfLineWasmNewStruct* ool) {
18481 callWasmStructAllocFun(ool->lir(), ool->fun(), ool->typeDefData(),
18482 ool->output());
18483 masm.jump(ool->rejoin());
18486 void CodeGenerator::visitWasmNewStructObject(LWasmNewStructObject* lir) {
18487 MOZ_ASSERT(gen->compilingWasm());
18489 MWasmNewStructObject* mir = lir->mir();
18491 Register typeDefData = ToRegister(lir->typeDefData());
18492 Register output = ToRegister(lir->output());
18494 if (mir->isOutline()) {
18495 wasm::SymbolicAddress fun = mir->zeroFields()
18496 ? wasm::SymbolicAddress::StructNewOOL_true
18497 : wasm::SymbolicAddress::StructNewOOL_false;
18498 callWasmStructAllocFun(lir, fun, typeDefData, output);
18499 } else {
18500 wasm::SymbolicAddress fun = mir->zeroFields()
18501 ? wasm::SymbolicAddress::StructNewIL_true
18502 : wasm::SymbolicAddress::StructNewIL_false;
18504 Register instance = ToRegister(lir->instance());
18505 MOZ_ASSERT(instance == InstanceReg);
18507 auto ool =
18508 new (alloc()) OutOfLineWasmNewStruct(lir, fun, typeDefData, output);
18509 addOutOfLineCode(ool, lir->mir());
18511 Register temp1 = ToRegister(lir->temp0());
18512 Register temp2 = ToRegister(lir->temp1());
18513 masm.wasmNewStructObject(instance, output, typeDefData, temp1, temp2,
18514 ool->entry(), mir->allocKind(), mir->zeroFields());
18516 masm.bind(ool->rejoin());
18520 void CodeGenerator::visitWasmHeapReg(LWasmHeapReg* ins) {
18521 #ifdef WASM_HAS_HEAPREG
18522 masm.movePtr(HeapReg, ToRegister(ins->output()));
18523 #else
18524 MOZ_CRASH();
18525 #endif
18528 void CodeGenerator::visitWasmBoundsCheck(LWasmBoundsCheck* ins) {
18529 const MWasmBoundsCheck* mir = ins->mir();
18530 Register ptr = ToRegister(ins->ptr());
18531 Register boundsCheckLimit = ToRegister(ins->boundsCheckLimit());
18532 // When there are no spectre mitigations in place, branching out-of-line to
18533 // the trap is a big performance win, but with mitigations it's trickier. See
18534 // bug 1680243.
18535 if (JitOptions.spectreIndexMasking) {
18536 Label ok;
18537 masm.wasmBoundsCheck32(Assembler::Below, ptr, boundsCheckLimit, &ok);
18538 masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
18539 masm.bind(&ok);
18540 } else {
18541 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
18542 mir->bytecodeOffset(), wasm::Trap::OutOfBounds);
18543 addOutOfLineCode(ool, mir);
18544 masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptr, boundsCheckLimit,
18545 ool->entry());
18549 void CodeGenerator::visitWasmBoundsCheck64(LWasmBoundsCheck64* ins) {
18550 const MWasmBoundsCheck* mir = ins->mir();
18551 Register64 ptr = ToRegister64(ins->ptr());
18552 Register64 boundsCheckLimit = ToRegister64(ins->boundsCheckLimit());
18553 // See above.
18554 if (JitOptions.spectreIndexMasking) {
18555 Label ok;
18556 masm.wasmBoundsCheck64(Assembler::Below, ptr, boundsCheckLimit, &ok);
18557 masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
18558 masm.bind(&ok);
18559 } else {
18560 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
18561 mir->bytecodeOffset(), wasm::Trap::OutOfBounds);
18562 addOutOfLineCode(ool, mir);
18563 masm.wasmBoundsCheck64(Assembler::AboveOrEqual, ptr, boundsCheckLimit,
18564 ool->entry());
18568 void CodeGenerator::visitWasmBoundsCheckRange32(LWasmBoundsCheckRange32* ins) {
18569 const MWasmBoundsCheckRange32* mir = ins->mir();
18570 Register index = ToRegister(ins->index());
18571 Register length = ToRegister(ins->length());
18572 Register limit = ToRegister(ins->limit());
18573 Register tmp = ToRegister(ins->temp0());
18575 masm.wasmBoundsCheckRange32(index, length, limit, tmp, mir->bytecodeOffset());
18578 void CodeGenerator::visitWasmAlignmentCheck(LWasmAlignmentCheck* ins) {
18579 const MWasmAlignmentCheck* mir = ins->mir();
18580 Register ptr = ToRegister(ins->ptr());
18581 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
18582 mir->bytecodeOffset(), wasm::Trap::UnalignedAccess);
18583 addOutOfLineCode(ool, mir);
18584 masm.branchTest32(Assembler::NonZero, ptr, Imm32(mir->byteSize() - 1),
18585 ool->entry());
18588 void CodeGenerator::visitWasmAlignmentCheck64(LWasmAlignmentCheck64* ins) {
18589 const MWasmAlignmentCheck* mir = ins->mir();
18590 Register64 ptr = ToRegister64(ins->ptr());
18591 #ifdef JS_64BIT
18592 Register r = ptr.reg;
18593 #else
18594 Register r = ptr.low;
18595 #endif
18596 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
18597 mir->bytecodeOffset(), wasm::Trap::UnalignedAccess);
18598 addOutOfLineCode(ool, mir);
18599 masm.branchTestPtr(Assembler::NonZero, r, Imm32(mir->byteSize() - 1),
18600 ool->entry());
18603 void CodeGenerator::visitWasmLoadInstance(LWasmLoadInstance* ins) {
18604 switch (ins->mir()->type()) {
18605 case MIRType::WasmAnyRef:
18606 case MIRType::Pointer:
18607 masm.loadPtr(Address(ToRegister(ins->instance()), ins->mir()->offset()),
18608 ToRegister(ins->output()));
18609 break;
18610 case MIRType::Int32:
18611 masm.load32(Address(ToRegister(ins->instance()), ins->mir()->offset()),
18612 ToRegister(ins->output()));
18613 break;
18614 default:
18615 MOZ_CRASH("MIRType not supported in WasmLoadInstance");
18619 void CodeGenerator::visitWasmLoadInstance64(LWasmLoadInstance64* ins) {
18620 MOZ_ASSERT(ins->mir()->type() == MIRType::Int64);
18621 masm.load64(Address(ToRegister(ins->instance()), ins->mir()->offset()),
18622 ToOutRegister64(ins));
18625 void CodeGenerator::incrementWarmUpCounter(AbsoluteAddress warmUpCount,
18626 JSScript* script, Register tmp) {
18627 // The code depends on the JitScript* not being discarded without also
18628 // invalidating Ion code. Assert this.
18629 #ifdef DEBUG
18630 Label ok;
18631 masm.movePtr(ImmGCPtr(script), tmp);
18632 masm.loadJitScript(tmp, tmp);
18633 masm.branchPtr(Assembler::Equal, tmp, ImmPtr(script->jitScript()), &ok);
18634 masm.assumeUnreachable("Didn't find JitScript?");
18635 masm.bind(&ok);
18636 #endif
18638 masm.load32(warmUpCount, tmp);
18639 masm.add32(Imm32(1), tmp);
18640 masm.store32(tmp, warmUpCount);
18643 void CodeGenerator::visitIncrementWarmUpCounter(LIncrementWarmUpCounter* ins) {
18644 Register tmp = ToRegister(ins->temp0());
18646 AbsoluteAddress warmUpCount =
18647 AbsoluteAddress(ins->mir()->script()->jitScript())
18648 .offset(JitScript::offsetOfWarmUpCount());
18649 incrementWarmUpCounter(warmUpCount, ins->mir()->script(), tmp);
18652 void CodeGenerator::visitLexicalCheck(LLexicalCheck* ins) {
18653 ValueOperand inputValue = ToValue(ins, LLexicalCheck::InputIndex);
18654 Label bail;
18655 masm.branchTestMagicValue(Assembler::Equal, inputValue,
18656 JS_UNINITIALIZED_LEXICAL, &bail);
18657 bailoutFrom(&bail, ins->snapshot());
18660 void CodeGenerator::visitThrowRuntimeLexicalError(
18661 LThrowRuntimeLexicalError* ins) {
18662 pushArg(Imm32(ins->mir()->errorNumber()));
18664 using Fn = bool (*)(JSContext*, unsigned);
18665 callVM<Fn, jit::ThrowRuntimeLexicalError>(ins);
18668 void CodeGenerator::visitThrowMsg(LThrowMsg* ins) {
18669 pushArg(Imm32(static_cast<int32_t>(ins->mir()->throwMsgKind())));
18671 using Fn = bool (*)(JSContext*, unsigned);
18672 callVM<Fn, js::ThrowMsgOperation>(ins);
18675 void CodeGenerator::visitGlobalDeclInstantiation(
18676 LGlobalDeclInstantiation* ins) {
18677 pushArg(ImmPtr(ins->mir()->resumePoint()->pc()));
18678 pushArg(ImmGCPtr(ins->mir()->block()->info().script()));
18680 using Fn = bool (*)(JSContext*, HandleScript, const jsbytecode*);
18681 callVM<Fn, GlobalDeclInstantiationFromIon>(ins);
18684 void CodeGenerator::visitDebugger(LDebugger* ins) {
18685 Register cx = ToRegister(ins->temp0());
18687 masm.loadJSContext(cx);
18688 using Fn = bool (*)(JSContext* cx);
18689 masm.setupAlignedABICall();
18690 masm.passABIArg(cx);
18691 masm.callWithABI<Fn, GlobalHasLiveOnDebuggerStatement>();
18693 Label bail;
18694 masm.branchIfTrueBool(ReturnReg, &bail);
18695 bailoutFrom(&bail, ins->snapshot());
18698 void CodeGenerator::visitNewTarget(LNewTarget* ins) {
18699 ValueOperand output = ToOutValue(ins);
18701 // if (isConstructing) output = argv[Max(numActualArgs, numFormalArgs)]
18702 Label notConstructing, done;
18703 Address calleeToken(FramePointer, JitFrameLayout::offsetOfCalleeToken());
18704 masm.branchTestPtr(Assembler::Zero, calleeToken,
18705 Imm32(CalleeToken_FunctionConstructing), &notConstructing);
18707 Register argvLen = output.scratchReg();
18708 masm.loadNumActualArgs(FramePointer, argvLen);
18710 Label useNFormals;
18712 size_t numFormalArgs = ins->mirRaw()->block()->info().nargs();
18713 masm.branchPtr(Assembler::Below, argvLen, Imm32(numFormalArgs), &useNFormals);
18715 size_t argsOffset = JitFrameLayout::offsetOfActualArgs();
18717 BaseValueIndex newTarget(FramePointer, argvLen, argsOffset);
18718 masm.loadValue(newTarget, output);
18719 masm.jump(&done);
18722 masm.bind(&useNFormals);
18725 Address newTarget(FramePointer,
18726 argsOffset + (numFormalArgs * sizeof(Value)));
18727 masm.loadValue(newTarget, output);
18728 masm.jump(&done);
18731 // else output = undefined
18732 masm.bind(&notConstructing);
18733 masm.moveValue(UndefinedValue(), output);
18734 masm.bind(&done);
18737 void CodeGenerator::visitCheckReturn(LCheckReturn* ins) {
18738 ValueOperand returnValue = ToValue(ins, LCheckReturn::ReturnValueIndex);
18739 ValueOperand thisValue = ToValue(ins, LCheckReturn::ThisValueIndex);
18740 ValueOperand output = ToOutValue(ins);
18742 using Fn = bool (*)(JSContext*, HandleValue);
18743 OutOfLineCode* ool = oolCallVM<Fn, ThrowBadDerivedReturnOrUninitializedThis>(
18744 ins, ArgList(returnValue), StoreNothing());
18746 Label noChecks;
18747 masm.branchTestObject(Assembler::Equal, returnValue, &noChecks);
18748 masm.branchTestUndefined(Assembler::NotEqual, returnValue, ool->entry());
18749 masm.branchTestMagic(Assembler::Equal, thisValue, ool->entry());
18750 masm.moveValue(thisValue, output);
18751 masm.jump(ool->rejoin());
18752 masm.bind(&noChecks);
18753 masm.moveValue(returnValue, output);
18754 masm.bind(ool->rejoin());
18757 void CodeGenerator::visitCheckIsObj(LCheckIsObj* ins) {
18758 ValueOperand value = ToValue(ins, LCheckIsObj::ValueIndex);
18759 Register output = ToRegister(ins->output());
18761 using Fn = bool (*)(JSContext*, CheckIsObjectKind);
18762 OutOfLineCode* ool = oolCallVM<Fn, ThrowCheckIsObject>(
18763 ins, ArgList(Imm32(ins->mir()->checkKind())), StoreNothing());
18765 masm.fallibleUnboxObject(value, output, ool->entry());
18766 masm.bind(ool->rejoin());
18769 void CodeGenerator::visitCheckObjCoercible(LCheckObjCoercible* ins) {
18770 ValueOperand checkValue = ToValue(ins, LCheckObjCoercible::ValueIndex);
18772 using Fn = bool (*)(JSContext*, HandleValue);
18773 OutOfLineCode* ool = oolCallVM<Fn, ThrowObjectCoercible>(
18774 ins, ArgList(checkValue), StoreNothing());
18775 masm.branchTestNull(Assembler::Equal, checkValue, ool->entry());
18776 masm.branchTestUndefined(Assembler::Equal, checkValue, ool->entry());
18777 masm.bind(ool->rejoin());
18780 void CodeGenerator::visitCheckClassHeritage(LCheckClassHeritage* ins) {
18781 ValueOperand heritage = ToValue(ins, LCheckClassHeritage::HeritageIndex);
18782 Register temp0 = ToRegister(ins->temp0());
18783 Register temp1 = ToRegister(ins->temp1());
18785 using Fn = bool (*)(JSContext*, HandleValue);
18786 OutOfLineCode* ool = oolCallVM<Fn, CheckClassHeritageOperation>(
18787 ins, ArgList(heritage), StoreNothing());
18789 masm.branchTestNull(Assembler::Equal, heritage, ool->rejoin());
18790 masm.fallibleUnboxObject(heritage, temp0, ool->entry());
18792 masm.isConstructor(temp0, temp1, ool->entry());
18793 masm.branchTest32(Assembler::Zero, temp1, temp1, ool->entry());
18795 masm.bind(ool->rejoin());
18798 void CodeGenerator::visitCheckThis(LCheckThis* ins) {
18799 ValueOperand thisValue = ToValue(ins, LCheckThis::ValueIndex);
18801 using Fn = bool (*)(JSContext*);
18802 OutOfLineCode* ool =
18803 oolCallVM<Fn, ThrowUninitializedThis>(ins, ArgList(), StoreNothing());
18804 masm.branchTestMagic(Assembler::Equal, thisValue, ool->entry());
18805 masm.bind(ool->rejoin());
18808 void CodeGenerator::visitCheckThisReinit(LCheckThisReinit* ins) {
18809 ValueOperand thisValue = ToValue(ins, LCheckThisReinit::ThisValueIndex);
18811 using Fn = bool (*)(JSContext*);
18812 OutOfLineCode* ool =
18813 oolCallVM<Fn, ThrowInitializedThis>(ins, ArgList(), StoreNothing());
18814 masm.branchTestMagic(Assembler::NotEqual, thisValue, ool->entry());
18815 masm.bind(ool->rejoin());
18818 void CodeGenerator::visitGenerator(LGenerator* lir) {
18819 Register callee = ToRegister(lir->callee());
18820 Register environmentChain = ToRegister(lir->environmentChain());
18821 Register argsObject = ToRegister(lir->argsObject());
18823 pushArg(argsObject);
18824 pushArg(environmentChain);
18825 pushArg(ImmGCPtr(current->mir()->info().script()));
18826 pushArg(callee);
18828 using Fn = JSObject* (*)(JSContext* cx, HandleFunction, HandleScript,
18829 HandleObject, HandleObject);
18830 callVM<Fn, CreateGenerator>(lir);
18833 void CodeGenerator::visitAsyncResolve(LAsyncResolve* lir) {
18834 Register generator = ToRegister(lir->generator());
18835 ValueOperand value = ToValue(lir, LAsyncResolve::ValueIndex);
18837 pushArg(value);
18838 pushArg(generator);
18840 using Fn = JSObject* (*)(JSContext*, Handle<AsyncFunctionGeneratorObject*>,
18841 HandleValue);
18842 callVM<Fn, js::AsyncFunctionResolve>(lir);
18845 void CodeGenerator::visitAsyncReject(LAsyncReject* lir) {
18846 Register generator = ToRegister(lir->generator());
18847 ValueOperand reason = ToValue(lir, LAsyncReject::ReasonIndex);
18848 ValueOperand stack = ToValue(lir, LAsyncReject::StackIndex);
18850 pushArg(stack);
18851 pushArg(reason);
18852 pushArg(generator);
18854 using Fn = JSObject* (*)(JSContext*, Handle<AsyncFunctionGeneratorObject*>,
18855 HandleValue, HandleValue);
18856 callVM<Fn, js::AsyncFunctionReject>(lir);
18859 void CodeGenerator::visitAsyncAwait(LAsyncAwait* lir) {
18860 ValueOperand value = ToValue(lir, LAsyncAwait::ValueIndex);
18861 Register generator = ToRegister(lir->generator());
18863 pushArg(value);
18864 pushArg(generator);
18866 using Fn =
18867 JSObject* (*)(JSContext* cx, Handle<AsyncFunctionGeneratorObject*> genObj,
18868 HandleValue value);
18869 callVM<Fn, js::AsyncFunctionAwait>(lir);
18872 void CodeGenerator::visitCanSkipAwait(LCanSkipAwait* lir) {
18873 ValueOperand value = ToValue(lir, LCanSkipAwait::ValueIndex);
18875 pushArg(value);
18877 using Fn = bool (*)(JSContext*, HandleValue, bool* canSkip);
18878 callVM<Fn, js::CanSkipAwait>(lir);
18881 void CodeGenerator::visitMaybeExtractAwaitValue(LMaybeExtractAwaitValue* lir) {
18882 ValueOperand value = ToValue(lir, LMaybeExtractAwaitValue::ValueIndex);
18883 ValueOperand output = ToOutValue(lir);
18884 Register canSkip = ToRegister(lir->canSkip());
18886 Label cantExtract, finished;
18887 masm.branchIfFalseBool(canSkip, &cantExtract);
18889 pushArg(value);
18891 using Fn = bool (*)(JSContext*, HandleValue, MutableHandleValue);
18892 callVM<Fn, js::ExtractAwaitValue>(lir);
18893 masm.jump(&finished);
18894 masm.bind(&cantExtract);
18896 masm.moveValue(value, output);
18898 masm.bind(&finished);
18901 void CodeGenerator::visitDebugCheckSelfHosted(LDebugCheckSelfHosted* ins) {
18902 ValueOperand checkValue = ToValue(ins, LDebugCheckSelfHosted::ValueIndex);
18903 pushArg(checkValue);
18904 using Fn = bool (*)(JSContext*, HandleValue);
18905 callVM<Fn, js::Debug_CheckSelfHosted>(ins);
18908 void CodeGenerator::visitRandom(LRandom* ins) {
18909 using mozilla::non_crypto::XorShift128PlusRNG;
18911 FloatRegister output = ToFloatRegister(ins->output());
18912 Register rngReg = ToRegister(ins->temp0());
18914 Register64 temp1 = ToRegister64(ins->temp1());
18915 Register64 temp2 = ToRegister64(ins->temp2());
18917 const XorShift128PlusRNG* rng = gen->realm->addressOfRandomNumberGenerator();
18918 masm.movePtr(ImmPtr(rng), rngReg);
18920 masm.randomDouble(rngReg, output, temp1, temp2);
18921 if (js::SupportDifferentialTesting()) {
18922 masm.loadConstantDouble(0.0, output);
18926 void CodeGenerator::visitSignExtendInt32(LSignExtendInt32* ins) {
18927 Register input = ToRegister(ins->input());
18928 Register output = ToRegister(ins->output());
18930 switch (ins->mode()) {
18931 case MSignExtendInt32::Byte:
18932 masm.move8SignExtend(input, output);
18933 break;
18934 case MSignExtendInt32::Half:
18935 masm.move16SignExtend(input, output);
18936 break;
18940 void CodeGenerator::visitRotate(LRotate* ins) {
18941 MRotate* mir = ins->mir();
18942 Register input = ToRegister(ins->input());
18943 Register dest = ToRegister(ins->output());
18945 const LAllocation* count = ins->count();
18946 if (count->isConstant()) {
18947 int32_t c = ToInt32(count) & 0x1F;
18948 if (mir->isLeftRotate()) {
18949 masm.rotateLeft(Imm32(c), input, dest);
18950 } else {
18951 masm.rotateRight(Imm32(c), input, dest);
18953 } else {
18954 Register creg = ToRegister(count);
18955 if (mir->isLeftRotate()) {
18956 masm.rotateLeft(creg, input, dest);
18957 } else {
18958 masm.rotateRight(creg, input, dest);
18963 class OutOfLineNaNToZero : public OutOfLineCodeBase<CodeGenerator> {
18964 LNaNToZero* lir_;
18966 public:
18967 explicit OutOfLineNaNToZero(LNaNToZero* lir) : lir_(lir) {}
18969 void accept(CodeGenerator* codegen) override {
18970 codegen->visitOutOfLineNaNToZero(this);
18972 LNaNToZero* lir() const { return lir_; }
18975 void CodeGenerator::visitOutOfLineNaNToZero(OutOfLineNaNToZero* ool) {
18976 FloatRegister output = ToFloatRegister(ool->lir()->output());
18977 masm.loadConstantDouble(0.0, output);
18978 masm.jump(ool->rejoin());
18981 void CodeGenerator::visitNaNToZero(LNaNToZero* lir) {
18982 FloatRegister input = ToFloatRegister(lir->input());
18984 OutOfLineNaNToZero* ool = new (alloc()) OutOfLineNaNToZero(lir);
18985 addOutOfLineCode(ool, lir->mir());
18987 if (lir->mir()->operandIsNeverNegativeZero()) {
18988 masm.branchDouble(Assembler::DoubleUnordered, input, input, ool->entry());
18989 } else {
18990 FloatRegister scratch = ToFloatRegister(lir->temp0());
18991 masm.loadConstantDouble(0.0, scratch);
18992 masm.branchDouble(Assembler::DoubleEqualOrUnordered, input, scratch,
18993 ool->entry());
18995 masm.bind(ool->rejoin());
18998 void CodeGenerator::visitIsPackedArray(LIsPackedArray* lir) {
18999 Register obj = ToRegister(lir->object());
19000 Register output = ToRegister(lir->output());
19001 Register temp = ToRegister(lir->temp0());
19003 masm.setIsPackedArray(obj, output, temp);
19006 void CodeGenerator::visitGuardArrayIsPacked(LGuardArrayIsPacked* lir) {
19007 Register array = ToRegister(lir->array());
19008 Register temp0 = ToRegister(lir->temp0());
19009 Register temp1 = ToRegister(lir->temp1());
19011 Label bail;
19012 masm.branchArrayIsNotPacked(array, temp0, temp1, &bail);
19013 bailoutFrom(&bail, lir->snapshot());
19016 void CodeGenerator::visitGetPrototypeOf(LGetPrototypeOf* lir) {
19017 Register target = ToRegister(lir->target());
19018 ValueOperand out = ToOutValue(lir);
19019 Register scratch = out.scratchReg();
19021 using Fn = bool (*)(JSContext*, HandleObject, MutableHandleValue);
19022 OutOfLineCode* ool = oolCallVM<Fn, jit::GetPrototypeOf>(lir, ArgList(target),
19023 StoreValueTo(out));
19025 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
19027 masm.loadObjProto(target, scratch);
19029 Label hasProto;
19030 masm.branchPtr(Assembler::Above, scratch, ImmWord(1), &hasProto);
19032 // Call into the VM for lazy prototypes.
19033 masm.branchPtr(Assembler::Equal, scratch, ImmWord(1), ool->entry());
19035 masm.moveValue(NullValue(), out);
19036 masm.jump(ool->rejoin());
19038 masm.bind(&hasProto);
19039 masm.tagValue(JSVAL_TYPE_OBJECT, scratch, out);
19041 masm.bind(ool->rejoin());
19044 void CodeGenerator::visitObjectWithProto(LObjectWithProto* lir) {
19045 pushArg(ToValue(lir, LObjectWithProto::PrototypeIndex));
19047 using Fn = PlainObject* (*)(JSContext*, HandleValue);
19048 callVM<Fn, js::ObjectWithProtoOperation>(lir);
19051 void CodeGenerator::visitObjectStaticProto(LObjectStaticProto* lir) {
19052 Register obj = ToRegister(lir->input());
19053 Register output = ToRegister(lir->output());
19055 masm.loadObjProto(obj, output);
19057 #ifdef DEBUG
19058 // We shouldn't encounter a null or lazy proto.
19059 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
19061 Label done;
19062 masm.branchPtr(Assembler::Above, output, ImmWord(1), &done);
19063 masm.assumeUnreachable("Unexpected null or lazy proto in MObjectStaticProto");
19064 masm.bind(&done);
19065 #endif
19068 void CodeGenerator::visitBuiltinObject(LBuiltinObject* lir) {
19069 pushArg(Imm32(static_cast<int32_t>(lir->mir()->builtinObjectKind())));
19071 using Fn = JSObject* (*)(JSContext*, BuiltinObjectKind);
19072 callVM<Fn, js::BuiltinObjectOperation>(lir);
19075 void CodeGenerator::visitSuperFunction(LSuperFunction* lir) {
19076 Register callee = ToRegister(lir->callee());
19077 ValueOperand out = ToOutValue(lir);
19078 Register temp = ToRegister(lir->temp0());
19080 #ifdef DEBUG
19081 Label classCheckDone;
19082 masm.branchTestObjIsFunction(Assembler::Equal, callee, temp, callee,
19083 &classCheckDone);
19084 masm.assumeUnreachable("Unexpected non-JSFunction callee in JSOp::SuperFun");
19085 masm.bind(&classCheckDone);
19086 #endif
19088 // Load prototype of callee
19089 masm.loadObjProto(callee, temp);
19091 #ifdef DEBUG
19092 // We won't encounter a lazy proto, because |callee| is guaranteed to be a
19093 // JSFunction and only proxy objects can have a lazy proto.
19094 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
19096 Label proxyCheckDone;
19097 masm.branchPtr(Assembler::NotEqual, temp, ImmWord(1), &proxyCheckDone);
19098 masm.assumeUnreachable("Unexpected lazy proto in JSOp::SuperFun");
19099 masm.bind(&proxyCheckDone);
19100 #endif
19102 Label nullProto, done;
19103 masm.branchPtr(Assembler::Equal, temp, ImmWord(0), &nullProto);
19105 // Box prototype and return
19106 masm.tagValue(JSVAL_TYPE_OBJECT, temp, out);
19107 masm.jump(&done);
19109 masm.bind(&nullProto);
19110 masm.moveValue(NullValue(), out);
19112 masm.bind(&done);
19115 void CodeGenerator::visitInitHomeObject(LInitHomeObject* lir) {
19116 Register func = ToRegister(lir->function());
19117 ValueOperand homeObject = ToValue(lir, LInitHomeObject::HomeObjectIndex);
19119 masm.assertFunctionIsExtended(func);
19121 Address addr(func, FunctionExtended::offsetOfMethodHomeObjectSlot());
19123 emitPreBarrier(addr);
19124 masm.storeValue(homeObject, addr);
19127 void CodeGenerator::visitIsTypedArrayConstructor(
19128 LIsTypedArrayConstructor* lir) {
19129 Register object = ToRegister(lir->object());
19130 Register output = ToRegister(lir->output());
19132 masm.setIsDefinitelyTypedArrayConstructor(object, output);
19135 void CodeGenerator::visitLoadValueTag(LLoadValueTag* lir) {
19136 ValueOperand value = ToValue(lir, LLoadValueTag::ValueIndex);
19137 Register output = ToRegister(lir->output());
19139 Register tag = masm.extractTag(value, output);
19140 if (tag != output) {
19141 masm.mov(tag, output);
19145 void CodeGenerator::visitGuardTagNotEqual(LGuardTagNotEqual* lir) {
19146 Register lhs = ToRegister(lir->lhs());
19147 Register rhs = ToRegister(lir->rhs());
19149 bailoutCmp32(Assembler::Equal, lhs, rhs, lir->snapshot());
19151 // If both lhs and rhs are numbers, can't use tag comparison to do inequality
19152 // comparison
19153 Label done;
19154 masm.branchTestNumber(Assembler::NotEqual, lhs, &done);
19155 masm.branchTestNumber(Assembler::NotEqual, rhs, &done);
19156 bailout(lir->snapshot());
19158 masm.bind(&done);
19161 void CodeGenerator::visitLoadWrapperTarget(LLoadWrapperTarget* lir) {
19162 Register object = ToRegister(lir->object());
19163 Register output = ToRegister(lir->output());
19165 masm.loadPtr(Address(object, ProxyObject::offsetOfReservedSlots()), output);
19166 masm.unboxObject(
19167 Address(output, js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
19168 output);
19171 void CodeGenerator::visitGuardHasGetterSetter(LGuardHasGetterSetter* lir) {
19172 Register object = ToRegister(lir->object());
19173 Register temp0 = ToRegister(lir->temp0());
19174 Register temp1 = ToRegister(lir->temp1());
19175 Register temp2 = ToRegister(lir->temp2());
19177 masm.movePropertyKey(lir->mir()->propId(), temp1);
19178 masm.movePtr(ImmGCPtr(lir->mir()->getterSetter()), temp2);
19180 using Fn = bool (*)(JSContext* cx, JSObject* obj, jsid id,
19181 GetterSetter* getterSetter);
19182 masm.setupAlignedABICall();
19183 masm.loadJSContext(temp0);
19184 masm.passABIArg(temp0);
19185 masm.passABIArg(object);
19186 masm.passABIArg(temp1);
19187 masm.passABIArg(temp2);
19188 masm.callWithABI<Fn, ObjectHasGetterSetterPure>();
19190 bailoutIfFalseBool(ReturnReg, lir->snapshot());
19193 void CodeGenerator::visitGuardIsExtensible(LGuardIsExtensible* lir) {
19194 Register object = ToRegister(lir->object());
19195 Register temp = ToRegister(lir->temp0());
19197 Label bail;
19198 masm.branchIfObjectNotExtensible(object, temp, &bail);
19199 bailoutFrom(&bail, lir->snapshot());
19202 void CodeGenerator::visitGuardInt32IsNonNegative(
19203 LGuardInt32IsNonNegative* lir) {
19204 Register index = ToRegister(lir->index());
19206 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
19209 void CodeGenerator::visitGuardInt32Range(LGuardInt32Range* lir) {
19210 Register input = ToRegister(lir->input());
19212 bailoutCmp32(Assembler::LessThan, input, Imm32(lir->mir()->minimum()),
19213 lir->snapshot());
19214 bailoutCmp32(Assembler::GreaterThan, input, Imm32(lir->mir()->maximum()),
19215 lir->snapshot());
19218 void CodeGenerator::visitGuardIndexIsNotDenseElement(
19219 LGuardIndexIsNotDenseElement* lir) {
19220 Register object = ToRegister(lir->object());
19221 Register index = ToRegister(lir->index());
19222 Register temp = ToRegister(lir->temp0());
19223 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
19225 // Load obj->elements.
19226 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
19228 // Ensure index >= initLength or the element is a hole.
19229 Label notDense;
19230 Address capacity(temp, ObjectElements::offsetOfInitializedLength());
19231 masm.spectreBoundsCheck32(index, capacity, spectreTemp, &notDense);
19233 BaseValueIndex element(temp, index);
19234 masm.branchTestMagic(Assembler::Equal, element, &notDense);
19236 bailout(lir->snapshot());
19238 masm.bind(&notDense);
19241 void CodeGenerator::visitGuardIndexIsValidUpdateOrAdd(
19242 LGuardIndexIsValidUpdateOrAdd* lir) {
19243 Register object = ToRegister(lir->object());
19244 Register index = ToRegister(lir->index());
19245 Register temp = ToRegister(lir->temp0());
19246 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
19248 // Load obj->elements.
19249 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
19251 Label success;
19253 // If length is writable, branch to &success. All indices are writable.
19254 Address flags(temp, ObjectElements::offsetOfFlags());
19255 masm.branchTest32(Assembler::Zero, flags,
19256 Imm32(ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH),
19257 &success);
19259 // Otherwise, ensure index is in bounds.
19260 Label bail;
19261 Address length(temp, ObjectElements::offsetOfLength());
19262 masm.spectreBoundsCheck32(index, length, spectreTemp, &bail);
19263 masm.bind(&success);
19265 bailoutFrom(&bail, lir->snapshot());
19268 void CodeGenerator::visitCallAddOrUpdateSparseElement(
19269 LCallAddOrUpdateSparseElement* lir) {
19270 Register object = ToRegister(lir->object());
19271 Register index = ToRegister(lir->index());
19272 ValueOperand value = ToValue(lir, LCallAddOrUpdateSparseElement::ValueIndex);
19274 pushArg(Imm32(lir->mir()->strict()));
19275 pushArg(value);
19276 pushArg(index);
19277 pushArg(object);
19279 using Fn =
19280 bool (*)(JSContext*, Handle<NativeObject*>, int32_t, HandleValue, bool);
19281 callVM<Fn, js::AddOrUpdateSparseElementHelper>(lir);
19284 void CodeGenerator::visitCallGetSparseElement(LCallGetSparseElement* lir) {
19285 Register object = ToRegister(lir->object());
19286 Register index = ToRegister(lir->index());
19288 pushArg(index);
19289 pushArg(object);
19291 using Fn =
19292 bool (*)(JSContext*, Handle<NativeObject*>, int32_t, MutableHandleValue);
19293 callVM<Fn, js::GetSparseElementHelper>(lir);
19296 void CodeGenerator::visitCallNativeGetElement(LCallNativeGetElement* lir) {
19297 Register object = ToRegister(lir->object());
19298 Register index = ToRegister(lir->index());
19300 pushArg(index);
19301 pushArg(TypedOrValueRegister(MIRType::Object, AnyRegister(object)));
19302 pushArg(object);
19304 using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
19305 MutableHandleValue);
19306 callVM<Fn, js::NativeGetElement>(lir);
19309 void CodeGenerator::visitCallNativeGetElementSuper(
19310 LCallNativeGetElementSuper* lir) {
19311 Register object = ToRegister(lir->object());
19312 Register index = ToRegister(lir->index());
19313 ValueOperand receiver =
19314 ToValue(lir, LCallNativeGetElementSuper::ReceiverIndex);
19316 pushArg(index);
19317 pushArg(receiver);
19318 pushArg(object);
19320 using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
19321 MutableHandleValue);
19322 callVM<Fn, js::NativeGetElement>(lir);
19325 void CodeGenerator::visitCallObjectHasSparseElement(
19326 LCallObjectHasSparseElement* lir) {
19327 Register object = ToRegister(lir->object());
19328 Register index = ToRegister(lir->index());
19329 Register temp0 = ToRegister(lir->temp0());
19330 Register temp1 = ToRegister(lir->temp1());
19331 Register output = ToRegister(lir->output());
19333 masm.reserveStack(sizeof(Value));
19334 masm.moveStackPtrTo(temp1);
19336 using Fn = bool (*)(JSContext*, NativeObject*, int32_t, Value*);
19337 masm.setupAlignedABICall();
19338 masm.loadJSContext(temp0);
19339 masm.passABIArg(temp0);
19340 masm.passABIArg(object);
19341 masm.passABIArg(index);
19342 masm.passABIArg(temp1);
19343 masm.callWithABI<Fn, HasNativeElementPure>();
19344 masm.storeCallPointerResult(temp0);
19346 Label bail, ok;
19347 uint32_t framePushed = masm.framePushed();
19348 masm.branchIfTrueBool(temp0, &ok);
19349 masm.adjustStack(sizeof(Value));
19350 masm.jump(&bail);
19352 masm.bind(&ok);
19353 masm.setFramePushed(framePushed);
19354 masm.unboxBoolean(Address(masm.getStackPointer(), 0), output);
19355 masm.adjustStack(sizeof(Value));
19357 bailoutFrom(&bail, lir->snapshot());
19360 void CodeGenerator::visitBigIntAsIntN(LBigIntAsIntN* ins) {
19361 Register bits = ToRegister(ins->bits());
19362 Register input = ToRegister(ins->input());
19364 pushArg(bits);
19365 pushArg(input);
19367 using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
19368 callVM<Fn, jit::BigIntAsIntN>(ins);
19371 void CodeGenerator::visitBigIntAsIntN64(LBigIntAsIntN64* ins) {
19372 Register input = ToRegister(ins->input());
19373 Register temp = ToRegister(ins->temp());
19374 Register64 temp64 = ToRegister64(ins->temp64());
19375 Register output = ToRegister(ins->output());
19377 Label done, create;
19379 masm.movePtr(input, output);
19381 // Load the BigInt value as an int64.
19382 masm.loadBigInt64(input, temp64);
19384 // Create a new BigInt when the input exceeds the int64 range.
19385 masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
19386 Imm32(64 / BigInt::DigitBits), &create);
19388 // And create a new BigInt when the value and the BigInt have different signs.
19389 Label nonNegative;
19390 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
19391 masm.branchTest64(Assembler::NotSigned, temp64, temp64, temp, &create);
19392 masm.jump(&done);
19394 masm.bind(&nonNegative);
19395 masm.branchTest64(Assembler::NotSigned, temp64, temp64, temp, &done);
19397 masm.bind(&create);
19398 emitCreateBigInt(ins, Scalar::BigInt64, temp64, output, temp);
19400 masm.bind(&done);
19403 void CodeGenerator::visitBigIntAsIntN32(LBigIntAsIntN32* ins) {
19404 Register input = ToRegister(ins->input());
19405 Register temp = ToRegister(ins->temp());
19406 Register64 temp64 = ToRegister64(ins->temp64());
19407 Register output = ToRegister(ins->output());
19409 Label done, create;
19411 masm.movePtr(input, output);
19413 // Load the absolute value of the first digit.
19414 masm.loadFirstBigIntDigitOrZero(input, temp);
19416 // If the absolute value exceeds the int32 range, create a new BigInt.
19417 masm.branchPtr(Assembler::Above, temp, Imm32(INT32_MAX), &create);
19419 // Also create a new BigInt if we have more than one digit.
19420 masm.branch32(Assembler::BelowOrEqual,
19421 Address(input, BigInt::offsetOfLength()), Imm32(1), &done);
19423 masm.bind(&create);
19425 // |temp| stores the absolute value, negate it when the sign flag is set.
19426 Label nonNegative;
19427 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
19428 masm.negPtr(temp);
19429 masm.bind(&nonNegative);
19431 masm.move32To64SignExtend(temp, temp64);
19432 emitCreateBigInt(ins, Scalar::BigInt64, temp64, output, temp);
19434 masm.bind(&done);
19437 void CodeGenerator::visitBigIntAsUintN(LBigIntAsUintN* ins) {
19438 Register bits = ToRegister(ins->bits());
19439 Register input = ToRegister(ins->input());
19441 pushArg(bits);
19442 pushArg(input);
19444 using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
19445 callVM<Fn, jit::BigIntAsUintN>(ins);
19448 void CodeGenerator::visitBigIntAsUintN64(LBigIntAsUintN64* ins) {
19449 Register input = ToRegister(ins->input());
19450 Register temp = ToRegister(ins->temp());
19451 Register64 temp64 = ToRegister64(ins->temp64());
19452 Register output = ToRegister(ins->output());
19454 Label done, create;
19456 masm.movePtr(input, output);
19458 // Load the BigInt value as an uint64.
19459 masm.loadBigInt64(input, temp64);
19461 // Create a new BigInt when the input exceeds the uint64 range.
19462 masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
19463 Imm32(64 / BigInt::DigitBits), &create);
19465 // And create a new BigInt when the input has the sign flag set.
19466 masm.branchIfBigIntIsNonNegative(input, &done);
19468 masm.bind(&create);
19469 emitCreateBigInt(ins, Scalar::BigUint64, temp64, output, temp);
19471 masm.bind(&done);
19474 void CodeGenerator::visitBigIntAsUintN32(LBigIntAsUintN32* ins) {
19475 Register input = ToRegister(ins->input());
19476 Register temp = ToRegister(ins->temp());
19477 Register64 temp64 = ToRegister64(ins->temp64());
19478 Register output = ToRegister(ins->output());
19480 Label done, create;
19482 masm.movePtr(input, output);
19484 // Load the absolute value of the first digit.
19485 masm.loadFirstBigIntDigitOrZero(input, temp);
19487 // If the absolute value exceeds the uint32 range, create a new BigInt.
19488 #if JS_PUNBOX64
19489 masm.branchPtr(Assembler::Above, temp, ImmWord(UINT32_MAX), &create);
19490 #endif
19492 // Also create a new BigInt if we have more than one digit.
19493 masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
19494 Imm32(1), &create);
19496 // And create a new BigInt when the input has the sign flag set.
19497 masm.branchIfBigIntIsNonNegative(input, &done);
19499 masm.bind(&create);
19501 // |temp| stores the absolute value, negate it when the sign flag is set.
19502 Label nonNegative;
19503 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
19504 masm.negPtr(temp);
19505 masm.bind(&nonNegative);
19507 masm.move32To64ZeroExtend(temp, temp64);
19508 emitCreateBigInt(ins, Scalar::BigUint64, temp64, output, temp);
19510 masm.bind(&done);
19513 void CodeGenerator::visitGuardNonGCThing(LGuardNonGCThing* ins) {
19514 ValueOperand input = ToValue(ins, LGuardNonGCThing::InputIndex);
19516 Label bail;
19517 masm.branchTestGCThing(Assembler::Equal, input, &bail);
19518 bailoutFrom(&bail, ins->snapshot());
19521 void CodeGenerator::visitToHashableNonGCThing(LToHashableNonGCThing* ins) {
19522 ValueOperand input = ToValue(ins, LToHashableNonGCThing::InputIndex);
19523 FloatRegister tempFloat = ToFloatRegister(ins->temp0());
19524 ValueOperand output = ToOutValue(ins);
19526 masm.toHashableNonGCThing(input, output, tempFloat);
19529 void CodeGenerator::visitToHashableString(LToHashableString* ins) {
19530 Register input = ToRegister(ins->input());
19531 Register output = ToRegister(ins->output());
19533 using Fn = JSAtom* (*)(JSContext*, JSString*);
19534 auto* ool = oolCallVM<Fn, js::AtomizeString>(ins, ArgList(input),
19535 StoreRegisterTo(output));
19537 masm.branchTest32(Assembler::Zero, Address(input, JSString::offsetOfFlags()),
19538 Imm32(JSString::ATOM_BIT), ool->entry());
19539 masm.movePtr(input, output);
19540 masm.bind(ool->rejoin());
19543 void CodeGenerator::visitToHashableValue(LToHashableValue* ins) {
19544 ValueOperand input = ToValue(ins, LToHashableValue::InputIndex);
19545 FloatRegister tempFloat = ToFloatRegister(ins->temp0());
19546 ValueOperand output = ToOutValue(ins);
19548 Register str = output.scratchReg();
19550 using Fn = JSAtom* (*)(JSContext*, JSString*);
19551 auto* ool =
19552 oolCallVM<Fn, js::AtomizeString>(ins, ArgList(str), StoreRegisterTo(str));
19554 masm.toHashableValue(input, output, tempFloat, ool->entry(), ool->rejoin());
19557 void CodeGenerator::visitHashNonGCThing(LHashNonGCThing* ins) {
19558 ValueOperand input = ToValue(ins, LHashNonGCThing::InputIndex);
19559 Register temp = ToRegister(ins->temp0());
19560 Register output = ToRegister(ins->output());
19562 masm.prepareHashNonGCThing(input, output, temp);
19565 void CodeGenerator::visitHashString(LHashString* ins) {
19566 Register input = ToRegister(ins->input());
19567 Register temp = ToRegister(ins->temp0());
19568 Register output = ToRegister(ins->output());
19570 masm.prepareHashString(input, output, temp);
19573 void CodeGenerator::visitHashSymbol(LHashSymbol* ins) {
19574 Register input = ToRegister(ins->input());
19575 Register output = ToRegister(ins->output());
19577 masm.prepareHashSymbol(input, output);
19580 void CodeGenerator::visitHashBigInt(LHashBigInt* ins) {
19581 Register input = ToRegister(ins->input());
19582 Register temp0 = ToRegister(ins->temp0());
19583 Register temp1 = ToRegister(ins->temp1());
19584 Register temp2 = ToRegister(ins->temp2());
19585 Register output = ToRegister(ins->output());
19587 masm.prepareHashBigInt(input, output, temp0, temp1, temp2);
19590 void CodeGenerator::visitHashObject(LHashObject* ins) {
19591 Register setObj = ToRegister(ins->setObject());
19592 ValueOperand input = ToValue(ins, LHashObject::InputIndex);
19593 Register temp0 = ToRegister(ins->temp0());
19594 Register temp1 = ToRegister(ins->temp1());
19595 Register temp2 = ToRegister(ins->temp2());
19596 Register temp3 = ToRegister(ins->temp3());
19597 Register output = ToRegister(ins->output());
19599 masm.prepareHashObject(setObj, input, output, temp0, temp1, temp2, temp3);
19602 void CodeGenerator::visitHashValue(LHashValue* ins) {
19603 Register setObj = ToRegister(ins->setObject());
19604 ValueOperand input = ToValue(ins, LHashValue::InputIndex);
19605 Register temp0 = ToRegister(ins->temp0());
19606 Register temp1 = ToRegister(ins->temp1());
19607 Register temp2 = ToRegister(ins->temp2());
19608 Register temp3 = ToRegister(ins->temp3());
19609 Register output = ToRegister(ins->output());
19611 masm.prepareHashValue(setObj, input, output, temp0, temp1, temp2, temp3);
19614 void CodeGenerator::visitSetObjectHasNonBigInt(LSetObjectHasNonBigInt* ins) {
19615 Register setObj = ToRegister(ins->setObject());
19616 ValueOperand input = ToValue(ins, LSetObjectHasNonBigInt::InputIndex);
19617 Register hash = ToRegister(ins->hash());
19618 Register temp0 = ToRegister(ins->temp0());
19619 Register temp1 = ToRegister(ins->temp1());
19620 Register output = ToRegister(ins->output());
19622 masm.setObjectHasNonBigInt(setObj, input, hash, output, temp0, temp1);
19625 void CodeGenerator::visitSetObjectHasBigInt(LSetObjectHasBigInt* ins) {
19626 Register setObj = ToRegister(ins->setObject());
19627 ValueOperand input = ToValue(ins, LSetObjectHasBigInt::InputIndex);
19628 Register hash = ToRegister(ins->hash());
19629 Register temp0 = ToRegister(ins->temp0());
19630 Register temp1 = ToRegister(ins->temp1());
19631 Register temp2 = ToRegister(ins->temp2());
19632 Register temp3 = ToRegister(ins->temp3());
19633 Register output = ToRegister(ins->output());
19635 masm.setObjectHasBigInt(setObj, input, hash, output, temp0, temp1, temp2,
19636 temp3);
19639 void CodeGenerator::visitSetObjectHasValue(LSetObjectHasValue* ins) {
19640 Register setObj = ToRegister(ins->setObject());
19641 ValueOperand input = ToValue(ins, LSetObjectHasValue::InputIndex);
19642 Register hash = ToRegister(ins->hash());
19643 Register temp0 = ToRegister(ins->temp0());
19644 Register temp1 = ToRegister(ins->temp1());
19645 Register temp2 = ToRegister(ins->temp2());
19646 Register temp3 = ToRegister(ins->temp3());
19647 Register output = ToRegister(ins->output());
19649 masm.setObjectHasValue(setObj, input, hash, output, temp0, temp1, temp2,
19650 temp3);
19653 void CodeGenerator::visitSetObjectHasValueVMCall(
19654 LSetObjectHasValueVMCall* ins) {
19655 pushArg(ToValue(ins, LSetObjectHasValueVMCall::InputIndex));
19656 pushArg(ToRegister(ins->setObject()));
19658 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
19659 callVM<Fn, jit::SetObjectHas>(ins);
19662 void CodeGenerator::visitSetObjectSize(LSetObjectSize* ins) {
19663 Register setObj = ToRegister(ins->setObject());
19664 Register output = ToRegister(ins->output());
19666 masm.loadSetObjectSize(setObj, output);
19669 void CodeGenerator::visitMapObjectHasNonBigInt(LMapObjectHasNonBigInt* ins) {
19670 Register mapObj = ToRegister(ins->mapObject());
19671 ValueOperand input = ToValue(ins, LMapObjectHasNonBigInt::InputIndex);
19672 Register hash = ToRegister(ins->hash());
19673 Register temp0 = ToRegister(ins->temp0());
19674 Register temp1 = ToRegister(ins->temp1());
19675 Register output = ToRegister(ins->output());
19677 masm.mapObjectHasNonBigInt(mapObj, input, hash, output, temp0, temp1);
19680 void CodeGenerator::visitMapObjectHasBigInt(LMapObjectHasBigInt* ins) {
19681 Register mapObj = ToRegister(ins->mapObject());
19682 ValueOperand input = ToValue(ins, LMapObjectHasBigInt::InputIndex);
19683 Register hash = ToRegister(ins->hash());
19684 Register temp0 = ToRegister(ins->temp0());
19685 Register temp1 = ToRegister(ins->temp1());
19686 Register temp2 = ToRegister(ins->temp2());
19687 Register temp3 = ToRegister(ins->temp3());
19688 Register output = ToRegister(ins->output());
19690 masm.mapObjectHasBigInt(mapObj, input, hash, output, temp0, temp1, temp2,
19691 temp3);
19694 void CodeGenerator::visitMapObjectHasValue(LMapObjectHasValue* ins) {
19695 Register mapObj = ToRegister(ins->mapObject());
19696 ValueOperand input = ToValue(ins, LMapObjectHasValue::InputIndex);
19697 Register hash = ToRegister(ins->hash());
19698 Register temp0 = ToRegister(ins->temp0());
19699 Register temp1 = ToRegister(ins->temp1());
19700 Register temp2 = ToRegister(ins->temp2());
19701 Register temp3 = ToRegister(ins->temp3());
19702 Register output = ToRegister(ins->output());
19704 masm.mapObjectHasValue(mapObj, input, hash, output, temp0, temp1, temp2,
19705 temp3);
19708 void CodeGenerator::visitMapObjectHasValueVMCall(
19709 LMapObjectHasValueVMCall* ins) {
19710 pushArg(ToValue(ins, LMapObjectHasValueVMCall::InputIndex));
19711 pushArg(ToRegister(ins->mapObject()));
19713 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
19714 callVM<Fn, jit::MapObjectHas>(ins);
19717 void CodeGenerator::visitMapObjectGetNonBigInt(LMapObjectGetNonBigInt* ins) {
19718 Register mapObj = ToRegister(ins->mapObject());
19719 ValueOperand input = ToValue(ins, LMapObjectGetNonBigInt::InputIndex);
19720 Register hash = ToRegister(ins->hash());
19721 Register temp0 = ToRegister(ins->temp0());
19722 Register temp1 = ToRegister(ins->temp1());
19723 ValueOperand output = ToOutValue(ins);
19725 masm.mapObjectGetNonBigInt(mapObj, input, hash, output, temp0, temp1,
19726 output.scratchReg());
19729 void CodeGenerator::visitMapObjectGetBigInt(LMapObjectGetBigInt* ins) {
19730 Register mapObj = ToRegister(ins->mapObject());
19731 ValueOperand input = ToValue(ins, LMapObjectGetBigInt::InputIndex);
19732 Register hash = ToRegister(ins->hash());
19733 Register temp0 = ToRegister(ins->temp0());
19734 Register temp1 = ToRegister(ins->temp1());
19735 Register temp2 = ToRegister(ins->temp2());
19736 Register temp3 = ToRegister(ins->temp3());
19737 ValueOperand output = ToOutValue(ins);
19739 masm.mapObjectGetBigInt(mapObj, input, hash, output, temp0, temp1, temp2,
19740 temp3, output.scratchReg());
19743 void CodeGenerator::visitMapObjectGetValue(LMapObjectGetValue* ins) {
19744 Register mapObj = ToRegister(ins->mapObject());
19745 ValueOperand input = ToValue(ins, LMapObjectGetValue::InputIndex);
19746 Register hash = ToRegister(ins->hash());
19747 Register temp0 = ToRegister(ins->temp0());
19748 Register temp1 = ToRegister(ins->temp1());
19749 Register temp2 = ToRegister(ins->temp2());
19750 Register temp3 = ToRegister(ins->temp3());
19751 ValueOperand output = ToOutValue(ins);
19753 masm.mapObjectGetValue(mapObj, input, hash, output, temp0, temp1, temp2,
19754 temp3, output.scratchReg());
19757 void CodeGenerator::visitMapObjectGetValueVMCall(
19758 LMapObjectGetValueVMCall* ins) {
19759 pushArg(ToValue(ins, LMapObjectGetValueVMCall::InputIndex));
19760 pushArg(ToRegister(ins->mapObject()));
19762 using Fn =
19763 bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
19764 callVM<Fn, jit::MapObjectGet>(ins);
19767 void CodeGenerator::visitMapObjectSize(LMapObjectSize* ins) {
19768 Register mapObj = ToRegister(ins->mapObject());
19769 Register output = ToRegister(ins->output());
19771 masm.loadMapObjectSize(mapObj, output);
19774 template <size_t NumDefs>
19775 void CodeGenerator::emitIonToWasmCallBase(LIonToWasmCallBase<NumDefs>* lir) {
19776 wasm::JitCallStackArgVector stackArgs;
19777 masm.propagateOOM(stackArgs.reserve(lir->numOperands()));
19778 if (masm.oom()) {
19779 return;
19782 MIonToWasmCall* mir = lir->mir();
19783 const wasm::FuncExport& funcExport = mir->funcExport();
19784 const wasm::FuncType& sig =
19785 mir->instance()->metadata().getFuncExportType(funcExport);
19787 WasmABIArgGenerator abi;
19788 for (size_t i = 0; i < lir->numOperands(); i++) {
19789 MIRType argMir;
19790 switch (sig.args()[i].kind()) {
19791 case wasm::ValType::I32:
19792 case wasm::ValType::I64:
19793 case wasm::ValType::F32:
19794 case wasm::ValType::F64:
19795 argMir = sig.args()[i].toMIRType();
19796 break;
19797 case wasm::ValType::V128:
19798 MOZ_CRASH("unexpected argument type when calling from ion to wasm");
19799 case wasm::ValType::Ref:
19800 // temporarilyUnsupportedReftypeForEntry() restricts args to externref
19801 MOZ_RELEASE_ASSERT(sig.args()[i].refType().isExtern());
19802 // Argument is boxed on the JS side to an anyref, so passed as a
19803 // pointer here.
19804 argMir = sig.args()[i].toMIRType();
19805 break;
19808 ABIArg arg = abi.next(argMir);
19809 switch (arg.kind()) {
19810 case ABIArg::GPR:
19811 case ABIArg::FPU: {
19812 MOZ_ASSERT(ToAnyRegister(lir->getOperand(i)) == arg.reg());
19813 stackArgs.infallibleEmplaceBack(wasm::JitCallStackArg());
19814 break;
19816 case ABIArg::Stack: {
19817 const LAllocation* larg = lir->getOperand(i);
19818 if (larg->isConstant()) {
19819 stackArgs.infallibleEmplaceBack(ToInt32(larg));
19820 } else if (larg->isGeneralReg()) {
19821 stackArgs.infallibleEmplaceBack(ToRegister(larg));
19822 } else if (larg->isFloatReg()) {
19823 stackArgs.infallibleEmplaceBack(ToFloatRegister(larg));
19824 } else {
19825 // Always use the stack pointer here because GenerateDirectCallFromJit
19826 // depends on this.
19827 Address addr = ToAddress<BaseRegForAddress::SP>(larg);
19828 stackArgs.infallibleEmplaceBack(addr);
19830 break;
19832 #ifdef JS_CODEGEN_REGISTER_PAIR
19833 case ABIArg::GPR_PAIR: {
19834 MOZ_CRASH(
19835 "no way to pass i64, and wasm uses hardfp for function calls");
19837 #endif
19838 case ABIArg::Uninitialized: {
19839 MOZ_CRASH("Uninitialized ABIArg kind");
19844 const wasm::ValTypeVector& results = sig.results();
19845 if (results.length() == 0) {
19846 MOZ_ASSERT(lir->mir()->type() == MIRType::Value);
19847 } else {
19848 MOZ_ASSERT(results.length() == 1, "multi-value return unimplemented");
19849 switch (results[0].kind()) {
19850 case wasm::ValType::I32:
19851 MOZ_ASSERT(lir->mir()->type() == MIRType::Int32);
19852 MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
19853 break;
19854 case wasm::ValType::I64:
19855 MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
19856 MOZ_ASSERT(ToOutRegister64(lir) == ReturnReg64);
19857 break;
19858 case wasm::ValType::F32:
19859 MOZ_ASSERT(lir->mir()->type() == MIRType::Float32);
19860 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnFloat32Reg);
19861 break;
19862 case wasm::ValType::F64:
19863 MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
19864 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
19865 break;
19866 case wasm::ValType::V128:
19867 MOZ_CRASH("unexpected return type when calling from ion to wasm");
19868 case wasm::ValType::Ref:
19869 // The wasm stubs layer unboxes anything that needs to be unboxed
19870 // and leaves it in a Value. A FuncRef/EqRef we could in principle
19871 // leave it as a raw object pointer but for now it complicates the
19872 // API to do so.
19873 MOZ_ASSERT(lir->mir()->type() == MIRType::Value);
19874 break;
19878 WasmInstanceObject* instObj = lir->mir()->instanceObject();
19880 Register scratch = ToRegister(lir->temp());
19882 uint32_t callOffset;
19883 ensureOsiSpace();
19884 GenerateDirectCallFromJit(masm, funcExport, instObj->instance(), stackArgs,
19885 scratch, &callOffset);
19887 // Add the instance object to the constant pool, so it is transferred to
19888 // the owning IonScript and so that it gets traced as long as the IonScript
19889 // lives.
19891 uint32_t unused;
19892 masm.propagateOOM(graph.addConstantToPool(ObjectValue(*instObj), &unused));
19894 markSafepointAt(callOffset, lir);
19897 void CodeGenerator::visitIonToWasmCall(LIonToWasmCall* lir) {
19898 emitIonToWasmCallBase(lir);
19900 void CodeGenerator::visitIonToWasmCallV(LIonToWasmCallV* lir) {
19901 emitIonToWasmCallBase(lir);
19903 void CodeGenerator::visitIonToWasmCallI64(LIonToWasmCallI64* lir) {
19904 emitIonToWasmCallBase(lir);
19907 void CodeGenerator::visitWasmNullConstant(LWasmNullConstant* lir) {
19908 masm.xorPtr(ToRegister(lir->output()), ToRegister(lir->output()));
19911 void CodeGenerator::visitWasmFence(LWasmFence* lir) {
19912 MOZ_ASSERT(gen->compilingWasm());
19913 masm.memoryBarrier(MembarFull);
19916 void CodeGenerator::visitWasmAnyRefFromJSValue(LWasmAnyRefFromJSValue* lir) {
19917 ValueOperand input = ToValue(lir, LWasmAnyRefFromJSValue::InputIndex);
19918 Register output = ToRegister(lir->output());
19919 FloatRegister tempFloat = ToFloatRegister(lir->temp0());
19921 using Fn = JSObject* (*)(JSContext* cx, HandleValue value);
19922 OutOfLineCode* oolBoxValue = oolCallVM<Fn, wasm::AnyRef::boxValue>(
19923 lir, ArgList(input), StoreRegisterTo(output));
19924 masm.convertValueToWasmAnyRef(input, output, tempFloat, oolBoxValue->entry());
19925 masm.bind(oolBoxValue->rejoin());
19928 void CodeGenerator::visitWasmAnyRefFromJSObject(LWasmAnyRefFromJSObject* lir) {
19929 Register input = ToRegister(lir->input());
19930 Register output = ToRegister(lir->output());
19931 masm.convertObjectToWasmAnyRef(input, output);
19934 void CodeGenerator::visitWasmAnyRefFromJSString(LWasmAnyRefFromJSString* lir) {
19935 Register input = ToRegister(lir->input());
19936 Register output = ToRegister(lir->output());
19937 masm.convertStringToWasmAnyRef(input, output);
19940 void CodeGenerator::visitWasmNewI31Ref(LWasmNewI31Ref* lir) {
19941 Register value = ToRegister(lir->value());
19942 Register output = ToRegister(lir->output());
19943 masm.truncate32ToWasmI31Ref(value, output);
19946 void CodeGenerator::visitWasmI31RefGet(LWasmI31RefGet* lir) {
19947 Register value = ToRegister(lir->value());
19948 Register output = ToRegister(lir->output());
19949 if (lir->mir()->wideningOp() == wasm::FieldWideningOp::Signed) {
19950 masm.convertWasmI31RefTo32Signed(value, output);
19951 } else {
19952 masm.convertWasmI31RefTo32Unsigned(value, output);
19956 #ifdef FUZZING_JS_FUZZILLI
19957 void CodeGenerator::emitFuzzilliHashDouble(FloatRegister floatDouble,
19958 Register scratch, Register output) {
19959 # ifdef JS_PUNBOX64
19960 Register64 reg64_1(scratch);
19961 Register64 reg64_2(output);
19962 masm.moveDoubleToGPR64(floatDouble, reg64_1);
19963 masm.move64(reg64_1, reg64_2);
19964 masm.rshift64(Imm32(32), reg64_2);
19965 masm.add32(scratch, output);
19966 # else
19967 Register64 reg64(scratch, output);
19968 masm.moveDoubleToGPR64(floatDouble, reg64);
19969 masm.add32(scratch, output);
19970 # endif
19973 void CodeGenerator::emitFuzzilliHashObject(LInstruction* lir, Register obj,
19974 Register output) {
19975 using Fn = void (*)(JSContext* cx, JSObject* obj, uint32_t* out);
19976 OutOfLineCode* ool = oolCallVM<Fn, FuzzilliHashObjectInl>(
19977 lir, ArgList(obj), StoreRegisterTo(output));
19979 masm.jump(ool->entry());
19980 masm.bind(ool->rejoin());
19983 void CodeGenerator::emitFuzzilliHashBigInt(Register bigInt, Register output) {
19984 LiveRegisterSet volatileRegs(GeneralRegisterSet::All(),
19985 FloatRegisterSet::All());
19986 volatileRegs.takeUnchecked(output);
19987 masm.PushRegsInMask(volatileRegs);
19989 using Fn = uint32_t (*)(BigInt* bigInt);
19990 masm.setupUnalignedABICall(output);
19991 masm.passABIArg(bigInt);
19992 masm.callWithABI<Fn, js::FuzzilliHashBigInt>();
19993 masm.storeCallInt32Result(output);
19995 masm.PopRegsInMask(volatileRegs);
19998 void CodeGenerator::visitFuzzilliHashV(LFuzzilliHashV* ins) {
19999 MOZ_ASSERT(ins->mir()->getOperand(0)->type() == MIRType::Value);
20001 ValueOperand value = ToValue(ins, 0);
20003 Label isDouble, isObject, isBigInt, done;
20005 FloatRegister scratchFloat = ToFloatRegister(ins->getTemp(1));
20006 Register scratch = ToRegister(ins->getTemp(0));
20007 Register output = ToRegister(ins->output());
20008 MOZ_ASSERT(scratch != output);
20010 # ifdef JS_PUNBOX64
20011 Register tagReg = ToRegister(ins->getTemp(0));
20012 masm.splitTag(value, tagReg);
20013 # else
20014 Register tagReg = value.typeReg();
20015 # endif
20017 Label noBigInt;
20018 masm.branchTestBigInt(Assembler::NotEqual, tagReg, &noBigInt);
20019 masm.unboxBigInt(value, scratch);
20020 masm.jump(&isBigInt);
20021 masm.bind(&noBigInt);
20023 Label noObject;
20024 masm.branchTestObject(Assembler::NotEqual, tagReg, &noObject);
20025 masm.unboxObject(value, scratch);
20026 masm.jump(&isObject);
20027 masm.bind(&noObject);
20029 Label noInt32;
20030 masm.branchTestInt32(Assembler::NotEqual, tagReg, &noInt32);
20031 masm.unboxInt32(value, scratch);
20032 masm.convertInt32ToDouble(scratch, scratchFloat);
20033 masm.jump(&isDouble);
20034 masm.bind(&noInt32);
20036 Label noNull;
20037 masm.branchTestNull(Assembler::NotEqual, tagReg, &noNull);
20038 masm.move32(Imm32(1), scratch);
20039 masm.convertInt32ToDouble(scratch, scratchFloat);
20040 masm.jump(&isDouble);
20041 masm.bind(&noNull);
20043 Label noUndefined;
20044 masm.branchTestUndefined(Assembler::NotEqual, tagReg, &noUndefined);
20045 masm.move32(Imm32(2), scratch);
20046 masm.convertInt32ToDouble(scratch, scratchFloat);
20047 masm.jump(&isDouble);
20048 masm.bind(&noUndefined);
20050 Label noBoolean;
20051 masm.branchTestBoolean(Assembler::NotEqual, tagReg, &noBoolean);
20052 masm.unboxBoolean(value, scratch);
20053 masm.add32(Imm32(3), scratch);
20054 masm.convertInt32ToDouble(scratch, scratchFloat);
20055 masm.jump(&isDouble);
20056 masm.bind(&noBoolean);
20058 Label noDouble;
20059 masm.branchTestDouble(Assembler::NotEqual, tagReg, &noDouble);
20060 masm.unboxDouble(value, scratchFloat);
20061 masm.canonicalizeDoubleIfDeterministic(scratchFloat);
20063 masm.jump(&isDouble);
20064 masm.bind(&noDouble);
20065 masm.move32(Imm32(0), output);
20066 masm.jump(&done);
20068 masm.bind(&isBigInt);
20069 emitFuzzilliHashBigInt(scratch, output);
20070 masm.jump(&done);
20072 masm.bind(&isObject);
20073 emitFuzzilliHashObject(ins, scratch, output);
20074 masm.jump(&done);
20076 masm.bind(&isDouble);
20077 emitFuzzilliHashDouble(scratchFloat, scratch, output);
20079 masm.bind(&done);
20082 void CodeGenerator::visitFuzzilliHashT(LFuzzilliHashT* ins) {
20083 const LAllocation* value = ins->value();
20084 MIRType mirType = ins->mir()->getOperand(0)->type();
20086 FloatRegister scratchFloat = ToFloatRegister(ins->getTemp(1));
20087 Register scratch = ToRegister(ins->getTemp(0));
20088 Register output = ToRegister(ins->output());
20089 MOZ_ASSERT(scratch != output);
20091 if (mirType == MIRType::Object) {
20092 MOZ_ASSERT(value->isGeneralReg());
20093 masm.mov(value->toGeneralReg()->reg(), scratch);
20094 emitFuzzilliHashObject(ins, scratch, output);
20095 } else if (mirType == MIRType::BigInt) {
20096 MOZ_ASSERT(value->isGeneralReg());
20097 masm.mov(value->toGeneralReg()->reg(), scratch);
20098 emitFuzzilliHashBigInt(scratch, output);
20099 } else if (mirType == MIRType::Double) {
20100 MOZ_ASSERT(value->isFloatReg());
20101 masm.moveDouble(value->toFloatReg()->reg(), scratchFloat);
20102 masm.canonicalizeDoubleIfDeterministic(scratchFloat);
20103 emitFuzzilliHashDouble(scratchFloat, scratch, output);
20104 } else if (mirType == MIRType::Float32) {
20105 MOZ_ASSERT(value->isFloatReg());
20106 masm.convertFloat32ToDouble(value->toFloatReg()->reg(), scratchFloat);
20107 masm.canonicalizeDoubleIfDeterministic(scratchFloat);
20108 emitFuzzilliHashDouble(scratchFloat, scratch, output);
20109 } else if (mirType == MIRType::Int32) {
20110 MOZ_ASSERT(value->isGeneralReg());
20111 masm.mov(value->toGeneralReg()->reg(), scratch);
20112 masm.convertInt32ToDouble(scratch, scratchFloat);
20113 emitFuzzilliHashDouble(scratchFloat, scratch, output);
20114 } else if (mirType == MIRType::Null) {
20115 MOZ_ASSERT(value->isBogus());
20116 masm.move32(Imm32(1), scratch);
20117 masm.convertInt32ToDouble(scratch, scratchFloat);
20118 emitFuzzilliHashDouble(scratchFloat, scratch, output);
20119 } else if (mirType == MIRType::Undefined) {
20120 MOZ_ASSERT(value->isBogus());
20121 masm.move32(Imm32(2), scratch);
20122 masm.convertInt32ToDouble(scratch, scratchFloat);
20123 emitFuzzilliHashDouble(scratchFloat, scratch, output);
20124 } else if (mirType == MIRType::Boolean) {
20125 MOZ_ASSERT(value->isGeneralReg());
20126 masm.mov(value->toGeneralReg()->reg(), scratch);
20127 masm.add32(Imm32(3), scratch);
20128 masm.convertInt32ToDouble(scratch, scratchFloat);
20129 emitFuzzilliHashDouble(scratchFloat, scratch, output);
20130 } else {
20131 MOZ_CRASH("unexpected type");
20135 void CodeGenerator::visitFuzzilliHashStore(LFuzzilliHashStore* ins) {
20136 const LAllocation* value = ins->value();
20137 MOZ_ASSERT(ins->mir()->getOperand(0)->type() == MIRType::Int32);
20138 MOZ_ASSERT(value->isGeneralReg());
20140 Register scratchJSContext = ToRegister(ins->getTemp(0));
20141 Register scratch = ToRegister(ins->getTemp(1));
20143 masm.loadJSContext(scratchJSContext);
20145 // stats
20146 Address addrExecHashInputs(scratchJSContext,
20147 offsetof(JSContext, executionHashInputs));
20148 masm.load32(addrExecHashInputs, scratch);
20149 masm.add32(Imm32(1), scratch);
20150 masm.store32(scratch, addrExecHashInputs);
20152 Address addrExecHash(scratchJSContext, offsetof(JSContext, executionHash));
20153 masm.load32(addrExecHash, scratch);
20154 masm.add32(value->toGeneralReg()->reg(), scratch);
20155 masm.rotateLeft(Imm32(1), scratch, scratch);
20156 masm.store32(scratch, addrExecHash);
20158 #endif
20160 static_assert(!std::is_polymorphic_v<CodeGenerator>,
20161 "CodeGenerator should not have any virtual methods");
20163 } // namespace jit
20164 } // namespace js