Bug 1875096 - Part 1: Only apply StepBackToLeadSurrogate for atom matches. r=iain
[gecko.git] / js / src / jit / CodeGenerator.cpp
blob0592aa0b5a50570a5dc2732ffb6d1f20280bbb92
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/CodeGenerator.h"
9 #include "mozilla/Assertions.h"
10 #include "mozilla/Casting.h"
11 #include "mozilla/DebugOnly.h"
12 #include "mozilla/EndianUtils.h"
13 #include "mozilla/EnumeratedArray.h"
14 #include "mozilla/EnumeratedRange.h"
15 #include "mozilla/EnumSet.h"
16 #include "mozilla/IntegerTypeTraits.h"
17 #include "mozilla/Latin1.h"
18 #include "mozilla/MathAlgorithms.h"
19 #include "mozilla/ScopeExit.h"
20 #include "mozilla/SIMD.h"
22 #include <limits>
23 #include <type_traits>
24 #include <utility>
26 #include "jslibmath.h"
27 #include "jsmath.h"
28 #include "jsnum.h"
30 #include "builtin/MapObject.h"
31 #include "builtin/RegExp.h"
32 #include "builtin/String.h"
33 #include "irregexp/RegExpTypes.h"
34 #include "jit/ABIArgGenerator.h"
35 #include "jit/CompileInfo.h"
36 #include "jit/InlineScriptTree.h"
37 #include "jit/Invalidation.h"
38 #include "jit/IonGenericCallStub.h"
39 #include "jit/IonIC.h"
40 #include "jit/IonScript.h"
41 #include "jit/JitcodeMap.h"
42 #include "jit/JitFrames.h"
43 #include "jit/JitRuntime.h"
44 #include "jit/JitSpewer.h"
45 #include "jit/JitZone.h"
46 #include "jit/Linker.h"
47 #include "jit/MIRGenerator.h"
48 #include "jit/MoveEmitter.h"
49 #include "jit/RangeAnalysis.h"
50 #include "jit/RegExpStubConstants.h"
51 #include "jit/SafepointIndex.h"
52 #include "jit/SharedICHelpers.h"
53 #include "jit/SharedICRegisters.h"
54 #include "jit/VMFunctions.h"
55 #include "jit/WarpSnapshot.h"
56 #include "js/ColumnNumber.h" // JS::LimitedColumnNumberOneOrigin
57 #include "js/experimental/JitInfo.h" // JSJit{Getter,Setter}CallArgs, JSJitMethodCallArgsTraits, JSJitInfo
58 #include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
59 #include "js/RegExpFlags.h" // JS::RegExpFlag
60 #include "js/ScalarType.h" // js::Scalar::Type
61 #include "proxy/DOMProxy.h"
62 #include "proxy/ScriptedProxyHandler.h"
63 #include "util/CheckedArithmetic.h"
64 #include "util/Unicode.h"
65 #include "vm/ArrayBufferViewObject.h"
66 #include "vm/AsyncFunction.h"
67 #include "vm/AsyncIteration.h"
68 #include "vm/BuiltinObjectKind.h"
69 #include "vm/FunctionFlags.h" // js::FunctionFlags
70 #include "vm/Interpreter.h"
71 #include "vm/JSAtomUtils.h" // AtomizeString
72 #include "vm/MatchPairs.h"
73 #include "vm/RegExpObject.h"
74 #include "vm/RegExpStatics.h"
75 #include "vm/StaticStrings.h"
76 #include "vm/StringObject.h"
77 #include "vm/StringType.h"
78 #include "vm/TypedArrayObject.h"
79 #include "wasm/WasmCodegenConstants.h"
80 #include "wasm/WasmValType.h"
81 #ifdef MOZ_VTUNE
82 # include "vtune/VTuneWrapper.h"
83 #endif
84 #include "wasm/WasmBinary.h"
85 #include "wasm/WasmGC.h"
86 #include "wasm/WasmGcObject.h"
87 #include "wasm/WasmStubs.h"
89 #include "builtin/Boolean-inl.h"
90 #include "jit/MacroAssembler-inl.h"
91 #include "jit/shared/CodeGenerator-shared-inl.h"
92 #include "jit/TemplateObject-inl.h"
93 #include "jit/VMFunctionList-inl.h"
94 #include "vm/JSScript-inl.h"
95 #include "wasm/WasmInstance-inl.h"
97 using namespace js;
98 using namespace js::jit;
100 using JS::GenericNaN;
101 using mozilla::AssertedCast;
102 using mozilla::DebugOnly;
103 using mozilla::FloatingPoint;
104 using mozilla::Maybe;
105 using mozilla::NegativeInfinity;
106 using mozilla::PositiveInfinity;
108 using JS::ExpandoAndGeneration;
110 namespace js {
111 namespace jit {
113 #ifdef CHECK_OSIPOINT_REGISTERS
114 template <class Op>
115 static void HandleRegisterDump(Op op, MacroAssembler& masm,
116 LiveRegisterSet liveRegs, Register activation,
117 Register scratch) {
118 const size_t baseOffset = JitActivation::offsetOfRegs();
120 // Handle live GPRs.
121 for (GeneralRegisterIterator iter(liveRegs.gprs()); iter.more(); ++iter) {
122 Register reg = *iter;
123 Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
125 if (reg == activation) {
126 // To use the original value of the activation register (that's
127 // now on top of the stack), we need the scratch register.
128 masm.push(scratch);
129 masm.loadPtr(Address(masm.getStackPointer(), sizeof(uintptr_t)), scratch);
130 op(scratch, dump);
131 masm.pop(scratch);
132 } else {
133 op(reg, dump);
137 // Handle live FPRs.
138 for (FloatRegisterIterator iter(liveRegs.fpus()); iter.more(); ++iter) {
139 FloatRegister reg = *iter;
140 Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
141 op(reg, dump);
145 class StoreOp {
146 MacroAssembler& masm;
148 public:
149 explicit StoreOp(MacroAssembler& masm) : masm(masm) {}
151 void operator()(Register reg, Address dump) { masm.storePtr(reg, dump); }
152 void operator()(FloatRegister reg, Address dump) {
153 if (reg.isDouble()) {
154 masm.storeDouble(reg, dump);
155 } else if (reg.isSingle()) {
156 masm.storeFloat32(reg, dump);
157 } else if (reg.isSimd128()) {
158 MOZ_CRASH("Unexpected case for SIMD");
159 } else {
160 MOZ_CRASH("Unexpected register type.");
165 class VerifyOp {
166 MacroAssembler& masm;
167 Label* failure_;
169 public:
170 VerifyOp(MacroAssembler& masm, Label* failure)
171 : masm(masm), failure_(failure) {}
173 void operator()(Register reg, Address dump) {
174 masm.branchPtr(Assembler::NotEqual, dump, reg, failure_);
176 void operator()(FloatRegister reg, Address dump) {
177 if (reg.isDouble()) {
178 ScratchDoubleScope scratch(masm);
179 masm.loadDouble(dump, scratch);
180 masm.branchDouble(Assembler::DoubleNotEqual, scratch, reg, failure_);
181 } else if (reg.isSingle()) {
182 ScratchFloat32Scope scratch(masm);
183 masm.loadFloat32(dump, scratch);
184 masm.branchFloat(Assembler::DoubleNotEqual, scratch, reg, failure_);
185 } else if (reg.isSimd128()) {
186 MOZ_CRASH("Unexpected case for SIMD");
187 } else {
188 MOZ_CRASH("Unexpected register type.");
193 void CodeGenerator::verifyOsiPointRegs(LSafepoint* safepoint) {
194 // Ensure the live registers stored by callVM did not change between
195 // the call and this OsiPoint. Try-catch relies on this invariant.
197 // Load pointer to the JitActivation in a scratch register.
198 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
199 Register scratch = allRegs.takeAny();
200 masm.push(scratch);
201 masm.loadJitActivation(scratch);
203 // If we should not check registers (because the instruction did not call
204 // into the VM, or a GC happened), we're done.
205 Label failure, done;
206 Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
207 masm.branch32(Assembler::Equal, checkRegs, Imm32(0), &done);
209 // Having more than one VM function call made in one visit function at
210 // runtime is a sec-ciritcal error, because if we conservatively assume that
211 // one of the function call can re-enter Ion, then the invalidation process
212 // will potentially add a call at a random location, by patching the code
213 // before the return address.
214 masm.branch32(Assembler::NotEqual, checkRegs, Imm32(1), &failure);
216 // Set checkRegs to 0, so that we don't try to verify registers after we
217 // return from this script to the caller.
218 masm.store32(Imm32(0), checkRegs);
220 // Ignore clobbered registers. Some instructions (like LValueToInt32) modify
221 // temps after calling into the VM. This is fine because no other
222 // instructions (including this OsiPoint) will depend on them. Also
223 // backtracking can also use the same register for an input and an output.
224 // These are marked as clobbered and shouldn't get checked.
225 LiveRegisterSet liveRegs;
226 liveRegs.set() = RegisterSet::Intersect(
227 safepoint->liveRegs().set(),
228 RegisterSet::Not(safepoint->clobberedRegs().set()));
230 VerifyOp op(masm, &failure);
231 HandleRegisterDump<VerifyOp>(op, masm, liveRegs, scratch, allRegs.getAny());
233 masm.jump(&done);
235 // Do not profile the callWithABI that occurs below. This is to avoid a
236 // rare corner case that occurs when profiling interacts with itself:
238 // When slow profiling assertions are turned on, FunctionBoundary ops
239 // (which update the profiler pseudo-stack) may emit a callVM, which
240 // forces them to have an osi point associated with them. The
241 // FunctionBoundary for inline function entry is added to the caller's
242 // graph with a PC from the caller's code, but during codegen it modifies
243 // Gecko Profiler instrumentation to add the callee as the current top-most
244 // script. When codegen gets to the OSIPoint, and the callWithABI below is
245 // emitted, the codegen thinks that the current frame is the callee, but
246 // the PC it's using from the OSIPoint refers to the caller. This causes
247 // the profiler instrumentation of the callWithABI below to ASSERT, since
248 // the script and pc are mismatched. To avoid this, we simply omit
249 // instrumentation for these callWithABIs.
251 // Any live register captured by a safepoint (other than temp registers)
252 // must remain unchanged between the call and the OsiPoint instruction.
253 masm.bind(&failure);
254 masm.assumeUnreachable("Modified registers between VM call and OsiPoint");
256 masm.bind(&done);
257 masm.pop(scratch);
260 bool CodeGenerator::shouldVerifyOsiPointRegs(LSafepoint* safepoint) {
261 if (!checkOsiPointRegisters) {
262 return false;
265 if (safepoint->liveRegs().emptyGeneral() &&
266 safepoint->liveRegs().emptyFloat()) {
267 return false; // No registers to check.
270 return true;
273 void CodeGenerator::resetOsiPointRegs(LSafepoint* safepoint) {
274 if (!shouldVerifyOsiPointRegs(safepoint)) {
275 return;
278 // Set checkRegs to 0. If we perform a VM call, the instruction
279 // will set it to 1.
280 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
281 Register scratch = allRegs.takeAny();
282 masm.push(scratch);
283 masm.loadJitActivation(scratch);
284 Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
285 masm.store32(Imm32(0), checkRegs);
286 masm.pop(scratch);
289 static void StoreAllLiveRegs(MacroAssembler& masm, LiveRegisterSet liveRegs) {
290 // Store a copy of all live registers before performing the call.
291 // When we reach the OsiPoint, we can use this to check nothing
292 // modified them in the meantime.
294 // Load pointer to the JitActivation in a scratch register.
295 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
296 Register scratch = allRegs.takeAny();
297 masm.push(scratch);
298 masm.loadJitActivation(scratch);
300 Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
301 masm.add32(Imm32(1), checkRegs);
303 StoreOp op(masm);
304 HandleRegisterDump<StoreOp>(op, masm, liveRegs, scratch, allRegs.getAny());
306 masm.pop(scratch);
308 #endif // CHECK_OSIPOINT_REGISTERS
310 // Before doing any call to Cpp, you should ensure that volatile
311 // registers are evicted by the register allocator.
312 void CodeGenerator::callVMInternal(VMFunctionId id, LInstruction* ins) {
313 TrampolinePtr code = gen->jitRuntime()->getVMWrapper(id);
314 const VMFunctionData& fun = GetVMFunction(id);
316 // Stack is:
317 // ... frame ...
318 // [args]
319 #ifdef DEBUG
320 MOZ_ASSERT(pushedArgs_ == fun.explicitArgs);
321 pushedArgs_ = 0;
322 #endif
324 #ifdef CHECK_OSIPOINT_REGISTERS
325 if (shouldVerifyOsiPointRegs(ins->safepoint())) {
326 StoreAllLiveRegs(masm, ins->safepoint()->liveRegs());
328 #endif
330 #ifdef DEBUG
331 if (ins->mirRaw()) {
332 MOZ_ASSERT(ins->mirRaw()->isInstruction());
333 MInstruction* mir = ins->mirRaw()->toInstruction();
334 MOZ_ASSERT_IF(mir->needsResumePoint(), mir->resumePoint());
336 // If this MIR instruction has an overridden AliasSet, set the JitRuntime's
337 // disallowArbitraryCode_ flag so we can assert this VMFunction doesn't call
338 // RunScript. Whitelist MInterruptCheck and MCheckOverRecursed because
339 // interrupt callbacks can call JS (chrome JS or shell testing functions).
340 bool isWhitelisted = mir->isInterruptCheck() || mir->isCheckOverRecursed();
341 if (!mir->hasDefaultAliasSet() && !isWhitelisted) {
342 const void* addr = gen->jitRuntime()->addressOfDisallowArbitraryCode();
343 masm.move32(Imm32(1), ReturnReg);
344 masm.store32(ReturnReg, AbsoluteAddress(addr));
347 #endif
349 // Push an exit frame descriptor.
350 masm.PushFrameDescriptor(FrameType::IonJS);
352 // Call the wrapper function. The wrapper is in charge to unwind the stack
353 // when returning from the call. Failures are handled with exceptions based
354 // on the return value of the C functions. To guard the outcome of the
355 // returned value, use another LIR instruction.
356 ensureOsiSpace();
357 uint32_t callOffset = masm.callJit(code);
358 markSafepointAt(callOffset, ins);
360 #ifdef DEBUG
361 // Reset the disallowArbitraryCode flag after the call.
363 const void* addr = gen->jitRuntime()->addressOfDisallowArbitraryCode();
364 masm.push(ReturnReg);
365 masm.move32(Imm32(0), ReturnReg);
366 masm.store32(ReturnReg, AbsoluteAddress(addr));
367 masm.pop(ReturnReg);
369 #endif
371 // Pop rest of the exit frame and the arguments left on the stack.
372 int framePop =
373 sizeof(ExitFrameLayout) - ExitFrameLayout::bytesPoppedAfterCall();
374 masm.implicitPop(fun.explicitStackSlots() * sizeof(void*) + framePop);
376 // Stack is:
377 // ... frame ...
380 template <typename Fn, Fn fn>
381 void CodeGenerator::callVM(LInstruction* ins) {
382 VMFunctionId id = VMFunctionToId<Fn, fn>::id;
383 callVMInternal(id, ins);
386 // ArgSeq store arguments for OutOfLineCallVM.
388 // OutOfLineCallVM are created with "oolCallVM" function. The third argument of
389 // this function is an instance of a class which provides a "generate" in charge
390 // of pushing the argument, with "pushArg", for a VMFunction.
392 // Such list of arguments can be created by using the "ArgList" function which
393 // creates one instance of "ArgSeq", where the type of the arguments are
394 // inferred from the type of the arguments.
396 // The list of arguments must be written in the same order as if you were
397 // calling the function in C++.
399 // Example:
400 // ArgList(ToRegister(lir->lhs()), ToRegister(lir->rhs()))
402 template <typename... ArgTypes>
403 class ArgSeq {
404 std::tuple<std::remove_reference_t<ArgTypes>...> args_;
406 template <std::size_t... ISeq>
407 inline void generate(CodeGenerator* codegen,
408 std::index_sequence<ISeq...>) const {
409 // Arguments are pushed in reverse order, from last argument to first
410 // argument.
411 (codegen->pushArg(std::get<sizeof...(ISeq) - 1 - ISeq>(args_)), ...);
414 public:
415 explicit ArgSeq(ArgTypes&&... args)
416 : args_(std::forward<ArgTypes>(args)...) {}
418 inline void generate(CodeGenerator* codegen) const {
419 generate(codegen, std::index_sequence_for<ArgTypes...>{});
422 #ifdef DEBUG
423 static constexpr size_t numArgs = sizeof...(ArgTypes);
424 #endif
427 template <typename... ArgTypes>
428 inline ArgSeq<ArgTypes...> ArgList(ArgTypes&&... args) {
429 return ArgSeq<ArgTypes...>(std::forward<ArgTypes>(args)...);
432 // Store wrappers, to generate the right move of data after the VM call.
434 struct StoreNothing {
435 inline void generate(CodeGenerator* codegen) const {}
436 inline LiveRegisterSet clobbered() const {
437 return LiveRegisterSet(); // No register gets clobbered
441 class StoreRegisterTo {
442 private:
443 Register out_;
445 public:
446 explicit StoreRegisterTo(Register out) : out_(out) {}
448 inline void generate(CodeGenerator* codegen) const {
449 // It's okay to use storePointerResultTo here - the VMFunction wrapper
450 // ensures the upper bytes are zero for bool/int32 return values.
451 codegen->storePointerResultTo(out_);
453 inline LiveRegisterSet clobbered() const {
454 LiveRegisterSet set;
455 set.add(out_);
456 return set;
460 class StoreFloatRegisterTo {
461 private:
462 FloatRegister out_;
464 public:
465 explicit StoreFloatRegisterTo(FloatRegister out) : out_(out) {}
467 inline void generate(CodeGenerator* codegen) const {
468 codegen->storeFloatResultTo(out_);
470 inline LiveRegisterSet clobbered() const {
471 LiveRegisterSet set;
472 set.add(out_);
473 return set;
477 template <typename Output>
478 class StoreValueTo_ {
479 private:
480 Output out_;
482 public:
483 explicit StoreValueTo_(const Output& out) : out_(out) {}
485 inline void generate(CodeGenerator* codegen) const {
486 codegen->storeResultValueTo(out_);
488 inline LiveRegisterSet clobbered() const {
489 LiveRegisterSet set;
490 set.add(out_);
491 return set;
495 template <typename Output>
496 StoreValueTo_<Output> StoreValueTo(const Output& out) {
497 return StoreValueTo_<Output>(out);
500 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
501 class OutOfLineCallVM : public OutOfLineCodeBase<CodeGenerator> {
502 private:
503 LInstruction* lir_;
504 ArgSeq args_;
505 StoreOutputTo out_;
507 public:
508 OutOfLineCallVM(LInstruction* lir, const ArgSeq& args,
509 const StoreOutputTo& out)
510 : lir_(lir), args_(args), out_(out) {}
512 void accept(CodeGenerator* codegen) override {
513 codegen->visitOutOfLineCallVM(this);
516 LInstruction* lir() const { return lir_; }
517 const ArgSeq& args() const { return args_; }
518 const StoreOutputTo& out() const { return out_; }
521 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
522 OutOfLineCode* CodeGenerator::oolCallVM(LInstruction* lir, const ArgSeq& args,
523 const StoreOutputTo& out) {
524 MOZ_ASSERT(lir->mirRaw());
525 MOZ_ASSERT(lir->mirRaw()->isInstruction());
527 #ifdef DEBUG
528 VMFunctionId id = VMFunctionToId<Fn, fn>::id;
529 const VMFunctionData& fun = GetVMFunction(id);
530 MOZ_ASSERT(fun.explicitArgs == args.numArgs);
531 MOZ_ASSERT(fun.returnsData() !=
532 (std::is_same_v<StoreOutputTo, StoreNothing>));
533 #endif
535 OutOfLineCode* ool = new (alloc())
536 OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>(lir, args, out);
537 addOutOfLineCode(ool, lir->mirRaw()->toInstruction());
538 return ool;
541 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
542 void CodeGenerator::visitOutOfLineCallVM(
543 OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>* ool) {
544 LInstruction* lir = ool->lir();
546 #ifdef JS_JITSPEW
547 JitSpewStart(JitSpew_Codegen, " # LIR=%s",
548 lir->opName());
549 if (const char* extra = lir->getExtraName()) {
550 JitSpewCont(JitSpew_Codegen, ":%s", extra);
552 JitSpewFin(JitSpew_Codegen);
553 #endif
554 perfSpewer_.recordInstruction(masm, lir);
555 saveLive(lir);
556 ool->args().generate(this);
557 callVM<Fn, fn>(lir);
558 ool->out().generate(this);
559 restoreLiveIgnore(lir, ool->out().clobbered());
560 masm.jump(ool->rejoin());
563 class OutOfLineICFallback : public OutOfLineCodeBase<CodeGenerator> {
564 private:
565 LInstruction* lir_;
566 size_t cacheIndex_;
567 size_t cacheInfoIndex_;
569 public:
570 OutOfLineICFallback(LInstruction* lir, size_t cacheIndex,
571 size_t cacheInfoIndex)
572 : lir_(lir), cacheIndex_(cacheIndex), cacheInfoIndex_(cacheInfoIndex) {}
574 void bind(MacroAssembler* masm) override {
575 // The binding of the initial jump is done in
576 // CodeGenerator::visitOutOfLineICFallback.
579 size_t cacheIndex() const { return cacheIndex_; }
580 size_t cacheInfoIndex() const { return cacheInfoIndex_; }
581 LInstruction* lir() const { return lir_; }
583 void accept(CodeGenerator* codegen) override {
584 codegen->visitOutOfLineICFallback(this);
588 void CodeGeneratorShared::addIC(LInstruction* lir, size_t cacheIndex) {
589 if (cacheIndex == SIZE_MAX) {
590 masm.setOOM();
591 return;
594 DataPtr<IonIC> cache(this, cacheIndex);
595 MInstruction* mir = lir->mirRaw()->toInstruction();
596 cache->setScriptedLocation(mir->block()->info().script(),
597 mir->resumePoint()->pc());
599 Register temp = cache->scratchRegisterForEntryJump();
600 icInfo_.back().icOffsetForJump = masm.movWithPatch(ImmWord(-1), temp);
601 masm.jump(Address(temp, 0));
603 MOZ_ASSERT(!icInfo_.empty());
605 OutOfLineICFallback* ool =
606 new (alloc()) OutOfLineICFallback(lir, cacheIndex, icInfo_.length() - 1);
607 addOutOfLineCode(ool, mir);
609 masm.bind(ool->rejoin());
610 cache->setRejoinOffset(CodeOffset(ool->rejoin()->offset()));
613 void CodeGenerator::visitOutOfLineICFallback(OutOfLineICFallback* ool) {
614 LInstruction* lir = ool->lir();
615 size_t cacheIndex = ool->cacheIndex();
616 size_t cacheInfoIndex = ool->cacheInfoIndex();
618 DataPtr<IonIC> ic(this, cacheIndex);
620 // Register the location of the OOL path in the IC.
621 ic->setFallbackOffset(CodeOffset(masm.currentOffset()));
623 switch (ic->kind()) {
624 case CacheKind::GetProp:
625 case CacheKind::GetElem: {
626 IonGetPropertyIC* getPropIC = ic->asGetPropertyIC();
628 saveLive(lir);
630 pushArg(getPropIC->id());
631 pushArg(getPropIC->value());
632 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
633 pushArg(ImmGCPtr(gen->outerInfo().script()));
635 using Fn = bool (*)(JSContext*, HandleScript, IonGetPropertyIC*,
636 HandleValue, HandleValue, MutableHandleValue);
637 callVM<Fn, IonGetPropertyIC::update>(lir);
639 StoreValueTo(getPropIC->output()).generate(this);
640 restoreLiveIgnore(lir, StoreValueTo(getPropIC->output()).clobbered());
642 masm.jump(ool->rejoin());
643 return;
645 case CacheKind::GetPropSuper:
646 case CacheKind::GetElemSuper: {
647 IonGetPropSuperIC* getPropSuperIC = ic->asGetPropSuperIC();
649 saveLive(lir);
651 pushArg(getPropSuperIC->id());
652 pushArg(getPropSuperIC->receiver());
653 pushArg(getPropSuperIC->object());
654 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
655 pushArg(ImmGCPtr(gen->outerInfo().script()));
657 using Fn =
658 bool (*)(JSContext*, HandleScript, IonGetPropSuperIC*, HandleObject,
659 HandleValue, HandleValue, MutableHandleValue);
660 callVM<Fn, IonGetPropSuperIC::update>(lir);
662 StoreValueTo(getPropSuperIC->output()).generate(this);
663 restoreLiveIgnore(lir,
664 StoreValueTo(getPropSuperIC->output()).clobbered());
666 masm.jump(ool->rejoin());
667 return;
669 case CacheKind::SetProp:
670 case CacheKind::SetElem: {
671 IonSetPropertyIC* setPropIC = ic->asSetPropertyIC();
673 saveLive(lir);
675 pushArg(setPropIC->rhs());
676 pushArg(setPropIC->id());
677 pushArg(setPropIC->object());
678 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
679 pushArg(ImmGCPtr(gen->outerInfo().script()));
681 using Fn = bool (*)(JSContext*, HandleScript, IonSetPropertyIC*,
682 HandleObject, HandleValue, HandleValue);
683 callVM<Fn, IonSetPropertyIC::update>(lir);
685 restoreLive(lir);
687 masm.jump(ool->rejoin());
688 return;
690 case CacheKind::GetName: {
691 IonGetNameIC* getNameIC = ic->asGetNameIC();
693 saveLive(lir);
695 pushArg(getNameIC->environment());
696 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
697 pushArg(ImmGCPtr(gen->outerInfo().script()));
699 using Fn = bool (*)(JSContext*, HandleScript, IonGetNameIC*, HandleObject,
700 MutableHandleValue);
701 callVM<Fn, IonGetNameIC::update>(lir);
703 StoreValueTo(getNameIC->output()).generate(this);
704 restoreLiveIgnore(lir, StoreValueTo(getNameIC->output()).clobbered());
706 masm.jump(ool->rejoin());
707 return;
709 case CacheKind::BindName: {
710 IonBindNameIC* bindNameIC = ic->asBindNameIC();
712 saveLive(lir);
714 pushArg(bindNameIC->environment());
715 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
716 pushArg(ImmGCPtr(gen->outerInfo().script()));
718 using Fn =
719 JSObject* (*)(JSContext*, HandleScript, IonBindNameIC*, HandleObject);
720 callVM<Fn, IonBindNameIC::update>(lir);
722 StoreRegisterTo(bindNameIC->output()).generate(this);
723 restoreLiveIgnore(lir, StoreRegisterTo(bindNameIC->output()).clobbered());
725 masm.jump(ool->rejoin());
726 return;
728 case CacheKind::GetIterator: {
729 IonGetIteratorIC* getIteratorIC = ic->asGetIteratorIC();
731 saveLive(lir);
733 pushArg(getIteratorIC->value());
734 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
735 pushArg(ImmGCPtr(gen->outerInfo().script()));
737 using Fn = JSObject* (*)(JSContext*, HandleScript, IonGetIteratorIC*,
738 HandleValue);
739 callVM<Fn, IonGetIteratorIC::update>(lir);
741 StoreRegisterTo(getIteratorIC->output()).generate(this);
742 restoreLiveIgnore(lir,
743 StoreRegisterTo(getIteratorIC->output()).clobbered());
745 masm.jump(ool->rejoin());
746 return;
748 case CacheKind::OptimizeSpreadCall: {
749 auto* optimizeSpreadCallIC = ic->asOptimizeSpreadCallIC();
751 saveLive(lir);
753 pushArg(optimizeSpreadCallIC->value());
754 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
755 pushArg(ImmGCPtr(gen->outerInfo().script()));
757 using Fn = bool (*)(JSContext*, HandleScript, IonOptimizeSpreadCallIC*,
758 HandleValue, MutableHandleValue);
759 callVM<Fn, IonOptimizeSpreadCallIC::update>(lir);
761 StoreValueTo(optimizeSpreadCallIC->output()).generate(this);
762 restoreLiveIgnore(
763 lir, StoreValueTo(optimizeSpreadCallIC->output()).clobbered());
765 masm.jump(ool->rejoin());
766 return;
768 case CacheKind::In: {
769 IonInIC* inIC = ic->asInIC();
771 saveLive(lir);
773 pushArg(inIC->object());
774 pushArg(inIC->key());
775 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
776 pushArg(ImmGCPtr(gen->outerInfo().script()));
778 using Fn = bool (*)(JSContext*, HandleScript, IonInIC*, HandleValue,
779 HandleObject, bool*);
780 callVM<Fn, IonInIC::update>(lir);
782 StoreRegisterTo(inIC->output()).generate(this);
783 restoreLiveIgnore(lir, StoreRegisterTo(inIC->output()).clobbered());
785 masm.jump(ool->rejoin());
786 return;
788 case CacheKind::HasOwn: {
789 IonHasOwnIC* hasOwnIC = ic->asHasOwnIC();
791 saveLive(lir);
793 pushArg(hasOwnIC->id());
794 pushArg(hasOwnIC->value());
795 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
796 pushArg(ImmGCPtr(gen->outerInfo().script()));
798 using Fn = bool (*)(JSContext*, HandleScript, IonHasOwnIC*, HandleValue,
799 HandleValue, int32_t*);
800 callVM<Fn, IonHasOwnIC::update>(lir);
802 StoreRegisterTo(hasOwnIC->output()).generate(this);
803 restoreLiveIgnore(lir, StoreRegisterTo(hasOwnIC->output()).clobbered());
805 masm.jump(ool->rejoin());
806 return;
808 case CacheKind::CheckPrivateField: {
809 IonCheckPrivateFieldIC* checkPrivateFieldIC = ic->asCheckPrivateFieldIC();
811 saveLive(lir);
813 pushArg(checkPrivateFieldIC->id());
814 pushArg(checkPrivateFieldIC->value());
816 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
817 pushArg(ImmGCPtr(gen->outerInfo().script()));
819 using Fn = bool (*)(JSContext*, HandleScript, IonCheckPrivateFieldIC*,
820 HandleValue, HandleValue, bool*);
821 callVM<Fn, IonCheckPrivateFieldIC::update>(lir);
823 StoreRegisterTo(checkPrivateFieldIC->output()).generate(this);
824 restoreLiveIgnore(
825 lir, StoreRegisterTo(checkPrivateFieldIC->output()).clobbered());
827 masm.jump(ool->rejoin());
828 return;
830 case CacheKind::InstanceOf: {
831 IonInstanceOfIC* hasInstanceOfIC = ic->asInstanceOfIC();
833 saveLive(lir);
835 pushArg(hasInstanceOfIC->rhs());
836 pushArg(hasInstanceOfIC->lhs());
837 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
838 pushArg(ImmGCPtr(gen->outerInfo().script()));
840 using Fn = bool (*)(JSContext*, HandleScript, IonInstanceOfIC*,
841 HandleValue lhs, HandleObject rhs, bool* res);
842 callVM<Fn, IonInstanceOfIC::update>(lir);
844 StoreRegisterTo(hasInstanceOfIC->output()).generate(this);
845 restoreLiveIgnore(lir,
846 StoreRegisterTo(hasInstanceOfIC->output()).clobbered());
848 masm.jump(ool->rejoin());
849 return;
851 case CacheKind::UnaryArith: {
852 IonUnaryArithIC* unaryArithIC = ic->asUnaryArithIC();
854 saveLive(lir);
856 pushArg(unaryArithIC->input());
857 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
858 pushArg(ImmGCPtr(gen->outerInfo().script()));
860 using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
861 IonUnaryArithIC* stub, HandleValue val,
862 MutableHandleValue res);
863 callVM<Fn, IonUnaryArithIC::update>(lir);
865 StoreValueTo(unaryArithIC->output()).generate(this);
866 restoreLiveIgnore(lir, StoreValueTo(unaryArithIC->output()).clobbered());
868 masm.jump(ool->rejoin());
869 return;
871 case CacheKind::ToPropertyKey: {
872 IonToPropertyKeyIC* toPropertyKeyIC = ic->asToPropertyKeyIC();
874 saveLive(lir);
876 pushArg(toPropertyKeyIC->input());
877 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
878 pushArg(ImmGCPtr(gen->outerInfo().script()));
880 using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
881 IonToPropertyKeyIC* ic, HandleValue val,
882 MutableHandleValue res);
883 callVM<Fn, IonToPropertyKeyIC::update>(lir);
885 StoreValueTo(toPropertyKeyIC->output()).generate(this);
886 restoreLiveIgnore(lir,
887 StoreValueTo(toPropertyKeyIC->output()).clobbered());
889 masm.jump(ool->rejoin());
890 return;
892 case CacheKind::BinaryArith: {
893 IonBinaryArithIC* binaryArithIC = ic->asBinaryArithIC();
895 saveLive(lir);
897 pushArg(binaryArithIC->rhs());
898 pushArg(binaryArithIC->lhs());
899 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
900 pushArg(ImmGCPtr(gen->outerInfo().script()));
902 using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
903 IonBinaryArithIC* stub, HandleValue lhs,
904 HandleValue rhs, MutableHandleValue res);
905 callVM<Fn, IonBinaryArithIC::update>(lir);
907 StoreValueTo(binaryArithIC->output()).generate(this);
908 restoreLiveIgnore(lir, StoreValueTo(binaryArithIC->output()).clobbered());
910 masm.jump(ool->rejoin());
911 return;
913 case CacheKind::Compare: {
914 IonCompareIC* compareIC = ic->asCompareIC();
916 saveLive(lir);
918 pushArg(compareIC->rhs());
919 pushArg(compareIC->lhs());
920 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
921 pushArg(ImmGCPtr(gen->outerInfo().script()));
923 using Fn =
924 bool (*)(JSContext* cx, HandleScript outerScript, IonCompareIC* stub,
925 HandleValue lhs, HandleValue rhs, bool* res);
926 callVM<Fn, IonCompareIC::update>(lir);
928 StoreRegisterTo(compareIC->output()).generate(this);
929 restoreLiveIgnore(lir, StoreRegisterTo(compareIC->output()).clobbered());
931 masm.jump(ool->rejoin());
932 return;
934 case CacheKind::CloseIter: {
935 IonCloseIterIC* closeIterIC = ic->asCloseIterIC();
937 saveLive(lir);
939 pushArg(closeIterIC->iter());
940 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
941 pushArg(ImmGCPtr(gen->outerInfo().script()));
943 using Fn =
944 bool (*)(JSContext*, HandleScript, IonCloseIterIC*, HandleObject);
945 callVM<Fn, IonCloseIterIC::update>(lir);
947 restoreLive(lir);
949 masm.jump(ool->rejoin());
950 return;
952 case CacheKind::OptimizeGetIterator: {
953 auto* optimizeGetIteratorIC = ic->asOptimizeGetIteratorIC();
955 saveLive(lir);
957 pushArg(optimizeGetIteratorIC->value());
958 icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
959 pushArg(ImmGCPtr(gen->outerInfo().script()));
961 using Fn = bool (*)(JSContext*, HandleScript, IonOptimizeGetIteratorIC*,
962 HandleValue, bool* res);
963 callVM<Fn, IonOptimizeGetIteratorIC::update>(lir);
965 StoreRegisterTo(optimizeGetIteratorIC->output()).generate(this);
966 restoreLiveIgnore(
967 lir, StoreRegisterTo(optimizeGetIteratorIC->output()).clobbered());
969 masm.jump(ool->rejoin());
970 return;
972 case CacheKind::Call:
973 case CacheKind::TypeOf:
974 case CacheKind::ToBool:
975 case CacheKind::GetIntrinsic:
976 case CacheKind::NewArray:
977 case CacheKind::NewObject:
978 MOZ_CRASH("Unsupported IC");
980 MOZ_CRASH();
983 StringObject* MNewStringObject::templateObj() const {
984 return &templateObj_->as<StringObject>();
987 CodeGenerator::CodeGenerator(MIRGenerator* gen, LIRGraph* graph,
988 MacroAssembler* masm)
989 : CodeGeneratorSpecific(gen, graph, masm),
990 ionScriptLabels_(gen->alloc()),
991 ionNurseryObjectLabels_(gen->alloc()),
992 scriptCounts_(nullptr),
993 zoneStubsToReadBarrier_(0) {}
995 CodeGenerator::~CodeGenerator() { js_delete(scriptCounts_); }
997 void CodeGenerator::visitValueToInt32(LValueToInt32* lir) {
998 ValueOperand operand = ToValue(lir, LValueToInt32::Input);
999 Register output = ToRegister(lir->output());
1000 FloatRegister temp = ToFloatRegister(lir->tempFloat());
1002 Label fails;
1003 if (lir->mode() == LValueToInt32::TRUNCATE) {
1004 OutOfLineCode* oolDouble = oolTruncateDouble(temp, output, lir->mir());
1006 // We can only handle strings in truncation contexts, like bitwise
1007 // operations.
1008 Register stringReg = ToRegister(lir->temp());
1009 using Fn = bool (*)(JSContext*, JSString*, double*);
1010 auto* oolString = oolCallVM<Fn, StringToNumber>(lir, ArgList(stringReg),
1011 StoreFloatRegisterTo(temp));
1012 Label* stringEntry = oolString->entry();
1013 Label* stringRejoin = oolString->rejoin();
1015 masm.truncateValueToInt32(operand, stringEntry, stringRejoin,
1016 oolDouble->entry(), stringReg, temp, output,
1017 &fails);
1018 masm.bind(oolDouble->rejoin());
1019 } else {
1020 MOZ_ASSERT(lir->mode() == LValueToInt32::NORMAL);
1021 masm.convertValueToInt32(operand, temp, output, &fails,
1022 lir->mirNormal()->needsNegativeZeroCheck(),
1023 lir->mirNormal()->conversion());
1026 bailoutFrom(&fails, lir->snapshot());
1029 void CodeGenerator::visitValueToDouble(LValueToDouble* lir) {
1030 ValueOperand operand = ToValue(lir, LValueToDouble::InputIndex);
1031 FloatRegister output = ToFloatRegister(lir->output());
1033 // Set if we can handle other primitives beside strings, as long as they're
1034 // guaranteed to never throw. This rules out symbols and BigInts, but allows
1035 // booleans, undefined, and null.
1036 bool hasNonStringPrimitives =
1037 lir->mir()->conversion() == MToFPInstruction::NonStringPrimitives;
1039 Label isDouble, isInt32, isBool, isNull, isUndefined, done;
1042 ScratchTagScope tag(masm, operand);
1043 masm.splitTagForTest(operand, tag);
1045 masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
1046 masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
1048 if (hasNonStringPrimitives) {
1049 masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
1050 masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
1051 masm.branchTestNull(Assembler::Equal, tag, &isNull);
1055 bailout(lir->snapshot());
1057 if (hasNonStringPrimitives) {
1058 masm.bind(&isNull);
1059 masm.loadConstantDouble(0.0, output);
1060 masm.jump(&done);
1063 if (hasNonStringPrimitives) {
1064 masm.bind(&isUndefined);
1065 masm.loadConstantDouble(GenericNaN(), output);
1066 masm.jump(&done);
1069 if (hasNonStringPrimitives) {
1070 masm.bind(&isBool);
1071 masm.boolValueToDouble(operand, output);
1072 masm.jump(&done);
1075 masm.bind(&isInt32);
1076 masm.int32ValueToDouble(operand, output);
1077 masm.jump(&done);
1079 masm.bind(&isDouble);
1080 masm.unboxDouble(operand, output);
1081 masm.bind(&done);
1084 void CodeGenerator::visitValueToFloat32(LValueToFloat32* lir) {
1085 ValueOperand operand = ToValue(lir, LValueToFloat32::InputIndex);
1086 FloatRegister output = ToFloatRegister(lir->output());
1088 // Set if we can handle other primitives beside strings, as long as they're
1089 // guaranteed to never throw. This rules out symbols and BigInts, but allows
1090 // booleans, undefined, and null.
1091 bool hasNonStringPrimitives =
1092 lir->mir()->conversion() == MToFPInstruction::NonStringPrimitives;
1094 Label isDouble, isInt32, isBool, isNull, isUndefined, done;
1097 ScratchTagScope tag(masm, operand);
1098 masm.splitTagForTest(operand, tag);
1100 masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
1101 masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
1103 if (hasNonStringPrimitives) {
1104 masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
1105 masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
1106 masm.branchTestNull(Assembler::Equal, tag, &isNull);
1110 bailout(lir->snapshot());
1112 if (hasNonStringPrimitives) {
1113 masm.bind(&isNull);
1114 masm.loadConstantFloat32(0.0f, output);
1115 masm.jump(&done);
1118 if (hasNonStringPrimitives) {
1119 masm.bind(&isUndefined);
1120 masm.loadConstantFloat32(float(GenericNaN()), output);
1121 masm.jump(&done);
1124 if (hasNonStringPrimitives) {
1125 masm.bind(&isBool);
1126 masm.boolValueToFloat32(operand, output);
1127 masm.jump(&done);
1130 masm.bind(&isInt32);
1131 masm.int32ValueToFloat32(operand, output);
1132 masm.jump(&done);
1134 masm.bind(&isDouble);
1135 // ARM and MIPS may not have a double register available if we've
1136 // allocated output as a float32.
1137 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32)
1138 ScratchDoubleScope fpscratch(masm);
1139 masm.unboxDouble(operand, fpscratch);
1140 masm.convertDoubleToFloat32(fpscratch, output);
1141 #else
1142 masm.unboxDouble(operand, output);
1143 masm.convertDoubleToFloat32(output, output);
1144 #endif
1145 masm.bind(&done);
1148 void CodeGenerator::visitValueToBigInt(LValueToBigInt* lir) {
1149 ValueOperand operand = ToValue(lir, LValueToBigInt::InputIndex);
1150 Register output = ToRegister(lir->output());
1152 using Fn = BigInt* (*)(JSContext*, HandleValue);
1153 auto* ool =
1154 oolCallVM<Fn, ToBigInt>(lir, ArgList(operand), StoreRegisterTo(output));
1156 Register tag = masm.extractTag(operand, output);
1158 Label notBigInt, done;
1159 masm.branchTestBigInt(Assembler::NotEqual, tag, &notBigInt);
1160 masm.unboxBigInt(operand, output);
1161 masm.jump(&done);
1162 masm.bind(&notBigInt);
1164 masm.branchTestBoolean(Assembler::Equal, tag, ool->entry());
1165 masm.branchTestString(Assembler::Equal, tag, ool->entry());
1167 // ToBigInt(object) can have side-effects; all other types throw a TypeError.
1168 bailout(lir->snapshot());
1170 masm.bind(ool->rejoin());
1171 masm.bind(&done);
1174 void CodeGenerator::visitInt32ToDouble(LInt32ToDouble* lir) {
1175 masm.convertInt32ToDouble(ToRegister(lir->input()),
1176 ToFloatRegister(lir->output()));
1179 void CodeGenerator::visitFloat32ToDouble(LFloat32ToDouble* lir) {
1180 masm.convertFloat32ToDouble(ToFloatRegister(lir->input()),
1181 ToFloatRegister(lir->output()));
1184 void CodeGenerator::visitDoubleToFloat32(LDoubleToFloat32* lir) {
1185 masm.convertDoubleToFloat32(ToFloatRegister(lir->input()),
1186 ToFloatRegister(lir->output()));
1189 void CodeGenerator::visitInt32ToFloat32(LInt32ToFloat32* lir) {
1190 masm.convertInt32ToFloat32(ToRegister(lir->input()),
1191 ToFloatRegister(lir->output()));
1194 void CodeGenerator::visitDoubleToInt32(LDoubleToInt32* lir) {
1195 Label fail;
1196 FloatRegister input = ToFloatRegister(lir->input());
1197 Register output = ToRegister(lir->output());
1198 masm.convertDoubleToInt32(input, output, &fail,
1199 lir->mir()->needsNegativeZeroCheck());
1200 bailoutFrom(&fail, lir->snapshot());
1203 void CodeGenerator::visitFloat32ToInt32(LFloat32ToInt32* lir) {
1204 Label fail;
1205 FloatRegister input = ToFloatRegister(lir->input());
1206 Register output = ToRegister(lir->output());
1207 masm.convertFloat32ToInt32(input, output, &fail,
1208 lir->mir()->needsNegativeZeroCheck());
1209 bailoutFrom(&fail, lir->snapshot());
1212 void CodeGenerator::visitInt32ToIntPtr(LInt32ToIntPtr* lir) {
1213 #ifdef JS_64BIT
1214 // This LIR instruction is only used if the input can be negative.
1215 MOZ_ASSERT(lir->mir()->canBeNegative());
1217 Register output = ToRegister(lir->output());
1218 const LAllocation* input = lir->input();
1219 if (input->isRegister()) {
1220 masm.move32SignExtendToPtr(ToRegister(input), output);
1221 } else {
1222 masm.load32SignExtendToPtr(ToAddress(input), output);
1224 #else
1225 MOZ_CRASH("Not used on 32-bit platforms");
1226 #endif
1229 void CodeGenerator::visitNonNegativeIntPtrToInt32(
1230 LNonNegativeIntPtrToInt32* lir) {
1231 #ifdef JS_64BIT
1232 Register output = ToRegister(lir->output());
1233 MOZ_ASSERT(ToRegister(lir->input()) == output);
1235 Label bail;
1236 masm.guardNonNegativeIntPtrToInt32(output, &bail);
1237 bailoutFrom(&bail, lir->snapshot());
1238 #else
1239 MOZ_CRASH("Not used on 32-bit platforms");
1240 #endif
1243 void CodeGenerator::visitIntPtrToDouble(LIntPtrToDouble* lir) {
1244 Register input = ToRegister(lir->input());
1245 FloatRegister output = ToFloatRegister(lir->output());
1246 masm.convertIntPtrToDouble(input, output);
1249 void CodeGenerator::visitAdjustDataViewLength(LAdjustDataViewLength* lir) {
1250 Register output = ToRegister(lir->output());
1251 MOZ_ASSERT(ToRegister(lir->input()) == output);
1253 uint32_t byteSize = lir->mir()->byteSize();
1255 #ifdef DEBUG
1256 Label ok;
1257 masm.branchTestPtr(Assembler::NotSigned, output, output, &ok);
1258 masm.assumeUnreachable("Unexpected negative value in LAdjustDataViewLength");
1259 masm.bind(&ok);
1260 #endif
1262 Label bail;
1263 masm.branchSubPtr(Assembler::Signed, Imm32(byteSize - 1), output, &bail);
1264 bailoutFrom(&bail, lir->snapshot());
1267 void CodeGenerator::emitOOLTestObject(Register objreg,
1268 Label* ifEmulatesUndefined,
1269 Label* ifDoesntEmulateUndefined,
1270 Register scratch) {
1271 saveVolatile(scratch);
1272 using Fn = bool (*)(JSObject* obj);
1273 masm.setupAlignedABICall();
1274 masm.passABIArg(objreg);
1275 masm.callWithABI<Fn, js::EmulatesUndefined>();
1276 masm.storeCallPointerResult(scratch);
1277 restoreVolatile(scratch);
1279 masm.branchIfTrueBool(scratch, ifEmulatesUndefined);
1280 masm.jump(ifDoesntEmulateUndefined);
1283 // Base out-of-line code generator for all tests of the truthiness of an
1284 // object, where the object might not be truthy. (Recall that per spec all
1285 // objects are truthy, but we implement the JSCLASS_EMULATES_UNDEFINED class
1286 // flag to permit objects to look like |undefined| in certain contexts,
1287 // including in object truthiness testing.) We check truthiness inline except
1288 // when we're testing it on a proxy, in which case out-of-line code will call
1289 // EmulatesUndefined for a conclusive answer.
1290 class OutOfLineTestObject : public OutOfLineCodeBase<CodeGenerator> {
1291 Register objreg_;
1292 Register scratch_;
1294 Label* ifEmulatesUndefined_;
1295 Label* ifDoesntEmulateUndefined_;
1297 #ifdef DEBUG
1298 bool initialized() { return ifEmulatesUndefined_ != nullptr; }
1299 #endif
1301 public:
1302 OutOfLineTestObject()
1303 : ifEmulatesUndefined_(nullptr), ifDoesntEmulateUndefined_(nullptr) {}
1305 void accept(CodeGenerator* codegen) final {
1306 MOZ_ASSERT(initialized());
1307 codegen->emitOOLTestObject(objreg_, ifEmulatesUndefined_,
1308 ifDoesntEmulateUndefined_, scratch_);
1311 // Specify the register where the object to be tested is found, labels to
1312 // jump to if the object is truthy or falsy, and a scratch register for
1313 // use in the out-of-line path.
1314 void setInputAndTargets(Register objreg, Label* ifEmulatesUndefined,
1315 Label* ifDoesntEmulateUndefined, Register scratch) {
1316 MOZ_ASSERT(!initialized());
1317 MOZ_ASSERT(ifEmulatesUndefined);
1318 objreg_ = objreg;
1319 scratch_ = scratch;
1320 ifEmulatesUndefined_ = ifEmulatesUndefined;
1321 ifDoesntEmulateUndefined_ = ifDoesntEmulateUndefined;
1325 // A subclass of OutOfLineTestObject containing two extra labels, for use when
1326 // the ifTruthy/ifFalsy labels are needed in inline code as well as out-of-line
1327 // code. The user should bind these labels in inline code, and specify them as
1328 // targets via setInputAndTargets, as appropriate.
1329 class OutOfLineTestObjectWithLabels : public OutOfLineTestObject {
1330 Label label1_;
1331 Label label2_;
1333 public:
1334 OutOfLineTestObjectWithLabels() = default;
1336 Label* label1() { return &label1_; }
1337 Label* label2() { return &label2_; }
1340 void CodeGenerator::testObjectEmulatesUndefinedKernel(
1341 Register objreg, Label* ifEmulatesUndefined,
1342 Label* ifDoesntEmulateUndefined, Register scratch,
1343 OutOfLineTestObject* ool) {
1344 ool->setInputAndTargets(objreg, ifEmulatesUndefined, ifDoesntEmulateUndefined,
1345 scratch);
1347 // Perform a fast-path check of the object's class flags if the object's
1348 // not a proxy. Let out-of-line code handle the slow cases that require
1349 // saving registers, making a function call, and restoring registers.
1350 masm.branchIfObjectEmulatesUndefined(objreg, scratch, ool->entry(),
1351 ifEmulatesUndefined);
1354 void CodeGenerator::branchTestObjectEmulatesUndefined(
1355 Register objreg, Label* ifEmulatesUndefined,
1356 Label* ifDoesntEmulateUndefined, Register scratch,
1357 OutOfLineTestObject* ool) {
1358 MOZ_ASSERT(!ifDoesntEmulateUndefined->bound(),
1359 "ifDoesntEmulateUndefined will be bound to the fallthrough path");
1361 testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined,
1362 ifDoesntEmulateUndefined, scratch, ool);
1363 masm.bind(ifDoesntEmulateUndefined);
1366 void CodeGenerator::testObjectEmulatesUndefined(Register objreg,
1367 Label* ifEmulatesUndefined,
1368 Label* ifDoesntEmulateUndefined,
1369 Register scratch,
1370 OutOfLineTestObject* ool) {
1371 testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined,
1372 ifDoesntEmulateUndefined, scratch, ool);
1373 masm.jump(ifDoesntEmulateUndefined);
1376 void CodeGenerator::testValueTruthyForType(
1377 JSValueType type, ScratchTagScope& tag, const ValueOperand& value,
1378 Register tempToUnbox, Register temp, FloatRegister floatTemp,
1379 Label* ifTruthy, Label* ifFalsy, OutOfLineTestObject* ool,
1380 bool skipTypeTest) {
1381 #ifdef DEBUG
1382 if (skipTypeTest) {
1383 Label expected;
1384 masm.branchTestType(Assembler::Equal, tag, type, &expected);
1385 masm.assumeUnreachable("Unexpected Value type in testValueTruthyForType");
1386 masm.bind(&expected);
1388 #endif
1390 // Handle irregular types first.
1391 switch (type) {
1392 case JSVAL_TYPE_UNDEFINED:
1393 case JSVAL_TYPE_NULL:
1394 // Undefined and null are falsy.
1395 if (!skipTypeTest) {
1396 masm.branchTestType(Assembler::Equal, tag, type, ifFalsy);
1397 } else {
1398 masm.jump(ifFalsy);
1400 return;
1401 case JSVAL_TYPE_SYMBOL:
1402 // Symbols are truthy.
1403 if (!skipTypeTest) {
1404 masm.branchTestSymbol(Assembler::Equal, tag, ifTruthy);
1405 } else {
1406 masm.jump(ifTruthy);
1408 return;
1409 case JSVAL_TYPE_OBJECT: {
1410 Label notObject;
1411 if (!skipTypeTest) {
1412 masm.branchTestObject(Assembler::NotEqual, tag, &notObject);
1414 ScratchTagScopeRelease _(&tag);
1415 Register objreg = masm.extractObject(value, tempToUnbox);
1416 testObjectEmulatesUndefined(objreg, ifFalsy, ifTruthy, temp, ool);
1417 masm.bind(&notObject);
1418 return;
1420 default:
1421 break;
1424 // Check the type of the value (unless this is the last possible type).
1425 Label differentType;
1426 if (!skipTypeTest) {
1427 masm.branchTestType(Assembler::NotEqual, tag, type, &differentType);
1430 // Branch if the value is falsy.
1431 ScratchTagScopeRelease _(&tag);
1432 switch (type) {
1433 case JSVAL_TYPE_BOOLEAN: {
1434 masm.branchTestBooleanTruthy(false, value, ifFalsy);
1435 break;
1437 case JSVAL_TYPE_INT32: {
1438 masm.branchTestInt32Truthy(false, value, ifFalsy);
1439 break;
1441 case JSVAL_TYPE_STRING: {
1442 masm.branchTestStringTruthy(false, value, ifFalsy);
1443 break;
1445 case JSVAL_TYPE_BIGINT: {
1446 masm.branchTestBigIntTruthy(false, value, ifFalsy);
1447 break;
1449 case JSVAL_TYPE_DOUBLE: {
1450 masm.unboxDouble(value, floatTemp);
1451 masm.branchTestDoubleTruthy(false, floatTemp, ifFalsy);
1452 break;
1454 default:
1455 MOZ_CRASH("Unexpected value type");
1458 // If we reach this point, the value is truthy. We fall through for
1459 // truthy on the last test; otherwise, branch.
1460 if (!skipTypeTest) {
1461 masm.jump(ifTruthy);
1464 masm.bind(&differentType);
1467 void CodeGenerator::testValueTruthy(const ValueOperand& value,
1468 Register tempToUnbox, Register temp,
1469 FloatRegister floatTemp,
1470 const TypeDataList& observedTypes,
1471 Label* ifTruthy, Label* ifFalsy,
1472 OutOfLineTestObject* ool) {
1473 ScratchTagScope tag(masm, value);
1474 masm.splitTagForTest(value, tag);
1476 const std::initializer_list<JSValueType> defaultOrder = {
1477 JSVAL_TYPE_UNDEFINED, JSVAL_TYPE_NULL, JSVAL_TYPE_BOOLEAN,
1478 JSVAL_TYPE_INT32, JSVAL_TYPE_OBJECT, JSVAL_TYPE_STRING,
1479 JSVAL_TYPE_DOUBLE, JSVAL_TYPE_SYMBOL, JSVAL_TYPE_BIGINT};
1481 mozilla::EnumSet<JSValueType, uint32_t> remaining(defaultOrder);
1483 // Generate tests for previously observed types first.
1484 // The TypeDataList is sorted by descending frequency.
1485 for (auto& observed : observedTypes) {
1486 JSValueType type = observed.type();
1487 remaining -= type;
1489 testValueTruthyForType(type, tag, value, tempToUnbox, temp, floatTemp,
1490 ifTruthy, ifFalsy, ool, /*skipTypeTest*/ false);
1493 // Generate tests for remaining types.
1494 for (auto type : defaultOrder) {
1495 if (!remaining.contains(type)) {
1496 continue;
1498 remaining -= type;
1500 // We don't need a type test for the last possible type.
1501 bool skipTypeTest = remaining.isEmpty();
1502 testValueTruthyForType(type, tag, value, tempToUnbox, temp, floatTemp,
1503 ifTruthy, ifFalsy, ool, skipTypeTest);
1505 MOZ_ASSERT(remaining.isEmpty());
1507 // We fall through if the final test is truthy.
1510 void CodeGenerator::visitTestBIAndBranch(LTestBIAndBranch* lir) {
1511 Label* ifTrueLabel = getJumpLabelForBranch(lir->ifTrue());
1512 Label* ifFalseLabel = getJumpLabelForBranch(lir->ifFalse());
1513 Register input = ToRegister(lir->input());
1515 if (isNextBlock(lir->ifFalse()->lir())) {
1516 masm.branchIfBigIntIsNonZero(input, ifTrueLabel);
1517 } else if (isNextBlock(lir->ifTrue()->lir())) {
1518 masm.branchIfBigIntIsZero(input, ifFalseLabel);
1519 } else {
1520 masm.branchIfBigIntIsZero(input, ifFalseLabel);
1521 jumpToBlock(lir->ifTrue());
1525 void CodeGenerator::visitTestOAndBranch(LTestOAndBranch* lir) {
1526 Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
1527 Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
1528 Register input = ToRegister(lir->input());
1530 auto* ool = new (alloc()) OutOfLineTestObject();
1531 addOutOfLineCode(ool, lir->mir());
1533 testObjectEmulatesUndefined(input, falsy, truthy, ToRegister(lir->temp()),
1534 ool);
1537 void CodeGenerator::visitTestVAndBranch(LTestVAndBranch* lir) {
1538 auto* ool = new (alloc()) OutOfLineTestObject();
1539 addOutOfLineCode(ool, lir->mir());
1541 Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
1542 Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
1544 ValueOperand input = ToValue(lir, LTestVAndBranch::Input);
1545 Register tempToUnbox = ToTempUnboxRegister(lir->temp1());
1546 Register temp = ToRegister(lir->temp2());
1547 FloatRegister floatTemp = ToFloatRegister(lir->tempFloat());
1548 const TypeDataList& observedTypes = lir->mir()->observedTypes();
1550 testValueTruthy(input, tempToUnbox, temp, floatTemp, observedTypes, truthy,
1551 falsy, ool);
1552 masm.jump(truthy);
1555 void CodeGenerator::visitBooleanToString(LBooleanToString* lir) {
1556 Register input = ToRegister(lir->input());
1557 Register output = ToRegister(lir->output());
1558 const JSAtomState& names = gen->runtime->names();
1559 Label true_, done;
1561 masm.branchTest32(Assembler::NonZero, input, input, &true_);
1562 masm.movePtr(ImmGCPtr(names.false_), output);
1563 masm.jump(&done);
1565 masm.bind(&true_);
1566 masm.movePtr(ImmGCPtr(names.true_), output);
1568 masm.bind(&done);
1571 void CodeGenerator::visitIntToString(LIntToString* lir) {
1572 Register input = ToRegister(lir->input());
1573 Register output = ToRegister(lir->output());
1575 using Fn = JSLinearString* (*)(JSContext*, int);
1576 OutOfLineCode* ool = oolCallVM<Fn, Int32ToString<CanGC>>(
1577 lir, ArgList(input), StoreRegisterTo(output));
1579 masm.lookupStaticIntString(input, output, gen->runtime->staticStrings(),
1580 ool->entry());
1582 masm.bind(ool->rejoin());
1585 void CodeGenerator::visitDoubleToString(LDoubleToString* lir) {
1586 FloatRegister input = ToFloatRegister(lir->input());
1587 Register temp = ToRegister(lir->temp0());
1588 Register output = ToRegister(lir->output());
1590 using Fn = JSString* (*)(JSContext*, double);
1591 OutOfLineCode* ool = oolCallVM<Fn, NumberToString<CanGC>>(
1592 lir, ArgList(input), StoreRegisterTo(output));
1594 // Try double to integer conversion and run integer to string code.
1595 masm.convertDoubleToInt32(input, temp, ool->entry(), false);
1596 masm.lookupStaticIntString(temp, output, gen->runtime->staticStrings(),
1597 ool->entry());
1599 masm.bind(ool->rejoin());
1602 void CodeGenerator::visitValueToString(LValueToString* lir) {
1603 ValueOperand input = ToValue(lir, LValueToString::InputIndex);
1604 Register output = ToRegister(lir->output());
1606 using Fn = JSString* (*)(JSContext*, HandleValue);
1607 OutOfLineCode* ool = oolCallVM<Fn, ToStringSlow<CanGC>>(
1608 lir, ArgList(input), StoreRegisterTo(output));
1610 Label done;
1611 Register tag = masm.extractTag(input, output);
1612 const JSAtomState& names = gen->runtime->names();
1614 // String
1616 Label notString;
1617 masm.branchTestString(Assembler::NotEqual, tag, &notString);
1618 masm.unboxString(input, output);
1619 masm.jump(&done);
1620 masm.bind(&notString);
1623 // Integer
1625 Label notInteger;
1626 masm.branchTestInt32(Assembler::NotEqual, tag, &notInteger);
1627 Register unboxed = ToTempUnboxRegister(lir->temp0());
1628 unboxed = masm.extractInt32(input, unboxed);
1629 masm.lookupStaticIntString(unboxed, output, gen->runtime->staticStrings(),
1630 ool->entry());
1631 masm.jump(&done);
1632 masm.bind(&notInteger);
1635 // Double
1637 // Note: no fastpath. Need two extra registers and can only convert doubles
1638 // that fit integers and are smaller than StaticStrings::INT_STATIC_LIMIT.
1639 masm.branchTestDouble(Assembler::Equal, tag, ool->entry());
1642 // Undefined
1644 Label notUndefined;
1645 masm.branchTestUndefined(Assembler::NotEqual, tag, &notUndefined);
1646 masm.movePtr(ImmGCPtr(names.undefined), output);
1647 masm.jump(&done);
1648 masm.bind(&notUndefined);
1651 // Null
1653 Label notNull;
1654 masm.branchTestNull(Assembler::NotEqual, tag, &notNull);
1655 masm.movePtr(ImmGCPtr(names.null), output);
1656 masm.jump(&done);
1657 masm.bind(&notNull);
1660 // Boolean
1662 Label notBoolean, true_;
1663 masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
1664 masm.branchTestBooleanTruthy(true, input, &true_);
1665 masm.movePtr(ImmGCPtr(names.false_), output);
1666 masm.jump(&done);
1667 masm.bind(&true_);
1668 masm.movePtr(ImmGCPtr(names.true_), output);
1669 masm.jump(&done);
1670 masm.bind(&notBoolean);
1673 // Objects/symbols are only possible when |mir->mightHaveSideEffects()|.
1674 if (lir->mir()->mightHaveSideEffects()) {
1675 // Object
1676 if (lir->mir()->supportSideEffects()) {
1677 masm.branchTestObject(Assembler::Equal, tag, ool->entry());
1678 } else {
1679 // Bail.
1680 MOZ_ASSERT(lir->mir()->needsSnapshot());
1681 Label bail;
1682 masm.branchTestObject(Assembler::Equal, tag, &bail);
1683 bailoutFrom(&bail, lir->snapshot());
1686 // Symbol
1687 if (lir->mir()->supportSideEffects()) {
1688 masm.branchTestSymbol(Assembler::Equal, tag, ool->entry());
1689 } else {
1690 // Bail.
1691 MOZ_ASSERT(lir->mir()->needsSnapshot());
1692 Label bail;
1693 masm.branchTestSymbol(Assembler::Equal, tag, &bail);
1694 bailoutFrom(&bail, lir->snapshot());
1698 // BigInt
1700 // No fastpath currently implemented.
1701 masm.branchTestBigInt(Assembler::Equal, tag, ool->entry());
1704 masm.assumeUnreachable("Unexpected type for LValueToString.");
1706 masm.bind(&done);
1707 masm.bind(ool->rejoin());
1710 using StoreBufferMutationFn = void (*)(js::gc::StoreBuffer*, js::gc::Cell**);
1712 static void EmitStoreBufferMutation(MacroAssembler& masm, Register holder,
1713 size_t offset, Register buffer,
1714 LiveGeneralRegisterSet& liveVolatiles,
1715 StoreBufferMutationFn fun) {
1716 Label callVM;
1717 Label exit;
1719 // Call into the VM to barrier the write. The only registers that need to
1720 // be preserved are those in liveVolatiles, so once they are saved on the
1721 // stack all volatile registers are available for use.
1722 masm.bind(&callVM);
1723 masm.PushRegsInMask(liveVolatiles);
1725 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
1726 regs.takeUnchecked(buffer);
1727 regs.takeUnchecked(holder);
1728 Register addrReg = regs.takeAny();
1730 masm.computeEffectiveAddress(Address(holder, offset), addrReg);
1732 bool needExtraReg = !regs.hasAny<GeneralRegisterSet::DefaultType>();
1733 if (needExtraReg) {
1734 masm.push(holder);
1735 masm.setupUnalignedABICall(holder);
1736 } else {
1737 masm.setupUnalignedABICall(regs.takeAny());
1739 masm.passABIArg(buffer);
1740 masm.passABIArg(addrReg);
1741 masm.callWithABI(DynamicFunction<StoreBufferMutationFn>(fun),
1742 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
1744 if (needExtraReg) {
1745 masm.pop(holder);
1747 masm.PopRegsInMask(liveVolatiles);
1748 masm.bind(&exit);
1751 // Warning: this function modifies prev and next.
1752 static void EmitPostWriteBarrierS(MacroAssembler& masm, Register holder,
1753 size_t offset, Register prev, Register next,
1754 LiveGeneralRegisterSet& liveVolatiles) {
1755 Label exit;
1756 Label checkRemove, putCell;
1758 // if (next && (buffer = next->storeBuffer()))
1759 // but we never pass in nullptr for next.
1760 Register storebuffer = next;
1761 masm.loadStoreBuffer(next, storebuffer);
1762 masm.branchPtr(Assembler::Equal, storebuffer, ImmWord(0), &checkRemove);
1764 // if (prev && prev->storeBuffer())
1765 masm.branchPtr(Assembler::Equal, prev, ImmWord(0), &putCell);
1766 masm.loadStoreBuffer(prev, prev);
1767 masm.branchPtr(Assembler::NotEqual, prev, ImmWord(0), &exit);
1769 // buffer->putCell(cellp)
1770 masm.bind(&putCell);
1771 EmitStoreBufferMutation(masm, holder, offset, storebuffer, liveVolatiles,
1772 JSString::addCellAddressToStoreBuffer);
1773 masm.jump(&exit);
1775 // if (prev && (buffer = prev->storeBuffer()))
1776 masm.bind(&checkRemove);
1777 masm.branchPtr(Assembler::Equal, prev, ImmWord(0), &exit);
1778 masm.loadStoreBuffer(prev, storebuffer);
1779 masm.branchPtr(Assembler::Equal, storebuffer, ImmWord(0), &exit);
1780 EmitStoreBufferMutation(masm, holder, offset, storebuffer, liveVolatiles,
1781 JSString::removeCellAddressFromStoreBuffer);
1783 masm.bind(&exit);
1786 void CodeGenerator::visitRegExp(LRegExp* lir) {
1787 Register output = ToRegister(lir->output());
1788 Register temp = ToRegister(lir->temp0());
1789 JSObject* source = lir->mir()->source();
1791 using Fn = JSObject* (*)(JSContext*, Handle<RegExpObject*>);
1792 OutOfLineCode* ool = oolCallVM<Fn, CloneRegExpObject>(
1793 lir, ArgList(ImmGCPtr(source)), StoreRegisterTo(output));
1794 if (lir->mir()->hasShared()) {
1795 TemplateObject templateObject(source);
1796 masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
1797 ool->entry());
1798 } else {
1799 masm.jump(ool->entry());
1801 masm.bind(ool->rejoin());
1804 static constexpr int32_t RegExpPairsVectorStartOffset(
1805 int32_t inputOutputDataStartOffset) {
1806 return inputOutputDataStartOffset + int32_t(InputOutputDataSize) +
1807 int32_t(sizeof(MatchPairs));
1810 static Address RegExpPairCountAddress(MacroAssembler& masm,
1811 int32_t inputOutputDataStartOffset) {
1812 return Address(FramePointer, inputOutputDataStartOffset +
1813 int32_t(InputOutputDataSize) +
1814 MatchPairs::offsetOfPairCount());
1817 static void UpdateRegExpStatics(MacroAssembler& masm, Register regexp,
1818 Register input, Register lastIndex,
1819 Register staticsReg, Register temp1,
1820 Register temp2, gc::Heap initialStringHeap,
1821 LiveGeneralRegisterSet& volatileRegs) {
1822 Address pendingInputAddress(staticsReg,
1823 RegExpStatics::offsetOfPendingInput());
1824 Address matchesInputAddress(staticsReg,
1825 RegExpStatics::offsetOfMatchesInput());
1826 Address lazySourceAddress(staticsReg, RegExpStatics::offsetOfLazySource());
1827 Address lazyIndexAddress(staticsReg, RegExpStatics::offsetOfLazyIndex());
1829 masm.guardedCallPreBarrier(pendingInputAddress, MIRType::String);
1830 masm.guardedCallPreBarrier(matchesInputAddress, MIRType::String);
1831 masm.guardedCallPreBarrier(lazySourceAddress, MIRType::String);
1833 if (initialStringHeap == gc::Heap::Default) {
1834 // Writing into RegExpStatics tenured memory; must post-barrier.
1835 if (staticsReg.volatile_()) {
1836 volatileRegs.add(staticsReg);
1839 masm.loadPtr(pendingInputAddress, temp1);
1840 masm.storePtr(input, pendingInputAddress);
1841 masm.movePtr(input, temp2);
1842 EmitPostWriteBarrierS(masm, staticsReg,
1843 RegExpStatics::offsetOfPendingInput(),
1844 temp1 /* prev */, temp2 /* next */, volatileRegs);
1846 masm.loadPtr(matchesInputAddress, temp1);
1847 masm.storePtr(input, matchesInputAddress);
1848 masm.movePtr(input, temp2);
1849 EmitPostWriteBarrierS(masm, staticsReg,
1850 RegExpStatics::offsetOfMatchesInput(),
1851 temp1 /* prev */, temp2 /* next */, volatileRegs);
1852 } else {
1853 masm.debugAssertGCThingIsTenured(input, temp1);
1854 masm.storePtr(input, pendingInputAddress);
1855 masm.storePtr(input, matchesInputAddress);
1858 masm.storePtr(lastIndex,
1859 Address(staticsReg, RegExpStatics::offsetOfLazyIndex()));
1860 masm.store32(
1861 Imm32(1),
1862 Address(staticsReg, RegExpStatics::offsetOfPendingLazyEvaluation()));
1864 masm.unboxNonDouble(Address(regexp, NativeObject::getFixedSlotOffset(
1865 RegExpObject::SHARED_SLOT)),
1866 temp1, JSVAL_TYPE_PRIVATE_GCTHING);
1867 masm.loadPtr(Address(temp1, RegExpShared::offsetOfSource()), temp2);
1868 masm.storePtr(temp2, lazySourceAddress);
1869 static_assert(sizeof(JS::RegExpFlags) == 1, "load size must match flag size");
1870 masm.load8ZeroExtend(Address(temp1, RegExpShared::offsetOfFlags()), temp2);
1871 masm.store8(temp2, Address(staticsReg, RegExpStatics::offsetOfLazyFlags()));
1874 // Prepare an InputOutputData and optional MatchPairs which space has been
1875 // allocated for on the stack, and try to execute a RegExp on a string input.
1876 // If the RegExp was successfully executed and matched the input, fallthrough.
1877 // Otherwise, jump to notFound or failure.
1879 // inputOutputDataStartOffset is the offset relative to the frame pointer
1880 // register. This offset is negative for the RegExpExecTest stub.
1881 static bool PrepareAndExecuteRegExp(MacroAssembler& masm, Register regexp,
1882 Register input, Register lastIndex,
1883 Register temp1, Register temp2,
1884 Register temp3,
1885 int32_t inputOutputDataStartOffset,
1886 gc::Heap initialStringHeap, Label* notFound,
1887 Label* failure) {
1888 JitSpew(JitSpew_Codegen, "# Emitting PrepareAndExecuteRegExp");
1890 using irregexp::InputOutputData;
1893 * [SMDOC] Stack layout for PrepareAndExecuteRegExp
1895 * Before this function is called, the caller is responsible for
1896 * allocating enough stack space for the following data:
1898 * inputOutputDataStartOffset +-----> +---------------+
1899 * |InputOutputData|
1900 * inputStartAddress +----------> inputStart|
1901 * inputEndAddress +----------> inputEnd|
1902 * startIndexAddress +----------> startIndex|
1903 * matchesAddress +----------> matches|-----+
1904 * +---------------+ |
1905 * matchPairs(Address|Offset) +-----> +---------------+ <--+
1906 * | MatchPairs |
1907 * pairCountAddress +----------> count |
1908 * pairsPointerAddress +----------> pairs |-----+
1909 * +---------------+ |
1910 * pairsArray(Address|Offset) +-----> +---------------+ <--+
1911 * | MatchPair |
1912 * firstMatchStartAddress +----------> start | <--+
1913 * | limit | |
1914 * +---------------+ |
1915 * . |
1916 * . Reserved space for
1917 * . RegExpObject::MaxPairCount
1918 * . MatchPair objects
1919 * . |
1920 * +---------------+ |
1921 * | MatchPair | |
1922 * | start | |
1923 * | limit | <--+
1924 * +---------------+
1927 int32_t ioOffset = inputOutputDataStartOffset;
1928 int32_t matchPairsOffset = ioOffset + int32_t(sizeof(InputOutputData));
1929 int32_t pairsArrayOffset = matchPairsOffset + int32_t(sizeof(MatchPairs));
1931 Address inputStartAddress(FramePointer,
1932 ioOffset + InputOutputData::offsetOfInputStart());
1933 Address inputEndAddress(FramePointer,
1934 ioOffset + InputOutputData::offsetOfInputEnd());
1935 Address startIndexAddress(FramePointer,
1936 ioOffset + InputOutputData::offsetOfStartIndex());
1937 Address matchesAddress(FramePointer,
1938 ioOffset + InputOutputData::offsetOfMatches());
1940 Address matchPairsAddress(FramePointer, matchPairsOffset);
1941 Address pairCountAddress(FramePointer,
1942 matchPairsOffset + MatchPairs::offsetOfPairCount());
1943 Address pairsPointerAddress(FramePointer,
1944 matchPairsOffset + MatchPairs::offsetOfPairs());
1946 Address pairsArrayAddress(FramePointer, pairsArrayOffset);
1947 Address firstMatchStartAddress(FramePointer,
1948 pairsArrayOffset + MatchPair::offsetOfStart());
1950 // First, fill in a skeletal MatchPairs instance on the stack. This will be
1951 // passed to the OOL stub in the caller if we aren't able to execute the
1952 // RegExp inline, and that stub needs to be able to determine whether the
1953 // execution finished successfully.
1955 // Initialize MatchPairs::pairCount to 1. The correct value can only
1956 // be determined after loading the RegExpShared. If the RegExpShared
1957 // has Kind::Atom, this is the correct pairCount.
1958 masm.store32(Imm32(1), pairCountAddress);
1960 // Initialize MatchPairs::pairs pointer
1961 masm.computeEffectiveAddress(pairsArrayAddress, temp1);
1962 masm.storePtr(temp1, pairsPointerAddress);
1964 // Initialize MatchPairs::pairs[0]::start to MatchPair::NoMatch
1965 masm.store32(Imm32(MatchPair::NoMatch), firstMatchStartAddress);
1967 // Determine the set of volatile inputs to save when calling into C++ or
1968 // regexp code.
1969 LiveGeneralRegisterSet volatileRegs;
1970 if (lastIndex.volatile_()) {
1971 volatileRegs.add(lastIndex);
1973 if (input.volatile_()) {
1974 volatileRegs.add(input);
1976 if (regexp.volatile_()) {
1977 volatileRegs.add(regexp);
1980 // Ensure the input string is not a rope.
1981 Label isLinear;
1982 masm.branchIfNotRope(input, &isLinear);
1984 masm.PushRegsInMask(volatileRegs);
1986 using Fn = JSLinearString* (*)(JSString*);
1987 masm.setupUnalignedABICall(temp1);
1988 masm.passABIArg(input);
1989 masm.callWithABI<Fn, js::jit::LinearizeForCharAccessPure>();
1991 MOZ_ASSERT(!volatileRegs.has(temp1));
1992 masm.storeCallPointerResult(temp1);
1993 masm.PopRegsInMask(volatileRegs);
1995 masm.branchTestPtr(Assembler::Zero, temp1, temp1, failure);
1997 masm.bind(&isLinear);
1999 // Load the RegExpShared.
2000 Register regexpReg = temp1;
2001 Address sharedSlot = Address(
2002 regexp, NativeObject::getFixedSlotOffset(RegExpObject::SHARED_SLOT));
2003 masm.branchTestUndefined(Assembler::Equal, sharedSlot, failure);
2004 masm.unboxNonDouble(sharedSlot, regexpReg, JSVAL_TYPE_PRIVATE_GCTHING);
2006 // Handle Atom matches
2007 Label notAtom, checkSuccess;
2008 masm.branchPtr(Assembler::Equal,
2009 Address(regexpReg, RegExpShared::offsetOfPatternAtom()),
2010 ImmWord(0), &notAtom);
2012 masm.computeEffectiveAddress(matchPairsAddress, temp3);
2014 masm.PushRegsInMask(volatileRegs);
2015 using Fn = RegExpRunStatus (*)(RegExpShared* re, JSLinearString* input,
2016 size_t start, MatchPairs* matchPairs);
2017 masm.setupUnalignedABICall(temp2);
2018 masm.passABIArg(regexpReg);
2019 masm.passABIArg(input);
2020 masm.passABIArg(lastIndex);
2021 masm.passABIArg(temp3);
2022 masm.callWithABI<Fn, js::ExecuteRegExpAtomRaw>();
2024 MOZ_ASSERT(!volatileRegs.has(temp1));
2025 masm.storeCallInt32Result(temp1);
2026 masm.PopRegsInMask(volatileRegs);
2028 masm.jump(&checkSuccess);
2030 masm.bind(&notAtom);
2032 // Don't handle regexps with too many capture pairs.
2033 masm.load32(Address(regexpReg, RegExpShared::offsetOfPairCount()), temp2);
2034 masm.branch32(Assembler::Above, temp2, Imm32(RegExpObject::MaxPairCount),
2035 failure);
2037 // Fill in the pair count in the MatchPairs on the stack.
2038 masm.store32(temp2, pairCountAddress);
2040 // Load code pointer and length of input (in bytes).
2041 // Store the input start in the InputOutputData.
2042 Register codePointer = temp1; // Note: temp1 was previously regexpReg.
2043 Register byteLength = temp3;
2045 Label isLatin1, done;
2046 masm.loadStringLength(input, byteLength);
2048 masm.branchLatin1String(input, &isLatin1);
2050 // Two-byte input
2051 masm.loadStringChars(input, temp2, CharEncoding::TwoByte);
2052 masm.storePtr(temp2, inputStartAddress);
2053 masm.loadPtr(
2054 Address(regexpReg, RegExpShared::offsetOfJitCode(/*latin1 =*/false)),
2055 codePointer);
2056 masm.lshiftPtr(Imm32(1), byteLength);
2057 masm.jump(&done);
2059 // Latin1 input
2060 masm.bind(&isLatin1);
2061 masm.loadStringChars(input, temp2, CharEncoding::Latin1);
2062 masm.storePtr(temp2, inputStartAddress);
2063 masm.loadPtr(
2064 Address(regexpReg, RegExpShared::offsetOfJitCode(/*latin1 =*/true)),
2065 codePointer);
2067 masm.bind(&done);
2069 // Store end pointer
2070 masm.addPtr(byteLength, temp2);
2071 masm.storePtr(temp2, inputEndAddress);
2074 // Guard that the RegExpShared has been compiled for this type of input.
2075 // If it has not been compiled, we fall back to the OOL case, which will
2076 // do a VM call into the interpreter.
2077 // TODO: add an interpreter trampoline?
2078 masm.branchPtr(Assembler::Equal, codePointer, ImmWord(0), failure);
2079 masm.loadPtr(Address(codePointer, JitCode::offsetOfCode()), codePointer);
2081 // Finish filling in the InputOutputData instance on the stack
2082 masm.computeEffectiveAddress(matchPairsAddress, temp2);
2083 masm.storePtr(temp2, matchesAddress);
2084 masm.storePtr(lastIndex, startIndexAddress);
2086 // Execute the RegExp.
2087 masm.computeEffectiveAddress(
2088 Address(FramePointer, inputOutputDataStartOffset), temp2);
2089 masm.PushRegsInMask(volatileRegs);
2090 masm.setupUnalignedABICall(temp3);
2091 masm.passABIArg(temp2);
2092 masm.callWithABI(codePointer);
2093 masm.storeCallInt32Result(temp1);
2094 masm.PopRegsInMask(volatileRegs);
2096 masm.bind(&checkSuccess);
2097 masm.branch32(Assembler::Equal, temp1,
2098 Imm32(RegExpRunStatus_Success_NotFound), notFound);
2099 masm.branch32(Assembler::Equal, temp1, Imm32(RegExpRunStatus_Error), failure);
2101 // Lazily update the RegExpStatics.
2102 size_t offset = GlobalObjectData::offsetOfRegExpRealm() +
2103 RegExpRealm::offsetOfRegExpStatics();
2104 masm.loadGlobalObjectData(temp1);
2105 masm.loadPtr(Address(temp1, offset), temp1);
2106 UpdateRegExpStatics(masm, regexp, input, lastIndex, temp1, temp2, temp3,
2107 initialStringHeap, volatileRegs);
2109 return true;
2112 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
2113 Register len, Register byteOpScratch,
2114 CharEncoding encoding,
2115 size_t maximumLength = SIZE_MAX);
2117 class CreateDependentString {
2118 CharEncoding encoding_;
2119 Register string_;
2120 Register temp1_;
2121 Register temp2_;
2122 Label* failure_;
2124 enum class FallbackKind : uint8_t {
2125 InlineString,
2126 FatInlineString,
2127 NotInlineString,
2128 Count
2130 mozilla::EnumeratedArray<FallbackKind, FallbackKind::Count, Label> fallbacks_,
2131 joins_;
2133 public:
2134 CreateDependentString(CharEncoding encoding, Register string, Register temp1,
2135 Register temp2, Label* failure)
2136 : encoding_(encoding),
2137 string_(string),
2138 temp1_(temp1),
2139 temp2_(temp2),
2140 failure_(failure) {}
2142 Register string() const { return string_; }
2143 CharEncoding encoding() const { return encoding_; }
2145 // Generate code that creates DependentString.
2146 // Caller should call generateFallback after masm.ret(), to generate
2147 // fallback path.
2148 void generate(MacroAssembler& masm, const JSAtomState& names,
2149 CompileRuntime* runtime, Register base,
2150 BaseIndex startIndexAddress, BaseIndex limitIndexAddress,
2151 gc::Heap initialStringHeap);
2153 // Generate fallback path for creating DependentString.
2154 void generateFallback(MacroAssembler& masm);
2157 void CreateDependentString::generate(MacroAssembler& masm,
2158 const JSAtomState& names,
2159 CompileRuntime* runtime, Register base,
2160 BaseIndex startIndexAddress,
2161 BaseIndex limitIndexAddress,
2162 gc::Heap initialStringHeap) {
2163 JitSpew(JitSpew_Codegen, "# Emitting CreateDependentString (encoding=%s)",
2164 (encoding_ == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
2166 auto newGCString = [&](FallbackKind kind) {
2167 uint32_t flags = kind == FallbackKind::InlineString
2168 ? JSString::INIT_THIN_INLINE_FLAGS
2169 : kind == FallbackKind::FatInlineString
2170 ? JSString::INIT_FAT_INLINE_FLAGS
2171 : JSString::INIT_DEPENDENT_FLAGS;
2172 if (encoding_ == CharEncoding::Latin1) {
2173 flags |= JSString::LATIN1_CHARS_BIT;
2176 if (kind != FallbackKind::FatInlineString) {
2177 masm.newGCString(string_, temp2_, initialStringHeap, &fallbacks_[kind]);
2178 } else {
2179 masm.newGCFatInlineString(string_, temp2_, initialStringHeap,
2180 &fallbacks_[kind]);
2182 masm.bind(&joins_[kind]);
2183 masm.store32(Imm32(flags), Address(string_, JSString::offsetOfFlags()));
2186 // Compute the string length.
2187 masm.load32(startIndexAddress, temp2_);
2188 masm.load32(limitIndexAddress, temp1_);
2189 masm.sub32(temp2_, temp1_);
2191 Label done, nonEmpty;
2193 // Zero length matches use the empty string.
2194 masm.branchTest32(Assembler::NonZero, temp1_, temp1_, &nonEmpty);
2195 masm.movePtr(ImmGCPtr(names.empty_), string_);
2196 masm.jump(&done);
2198 masm.bind(&nonEmpty);
2200 // Complete matches use the base string.
2201 Label nonBaseStringMatch;
2202 masm.branchTest32(Assembler::NonZero, temp2_, temp2_, &nonBaseStringMatch);
2203 masm.branch32(Assembler::NotEqual, Address(base, JSString::offsetOfLength()),
2204 temp1_, &nonBaseStringMatch);
2205 masm.movePtr(base, string_);
2206 masm.jump(&done);
2208 masm.bind(&nonBaseStringMatch);
2210 Label notInline;
2212 int32_t maxInlineLength = encoding_ == CharEncoding::Latin1
2213 ? JSFatInlineString::MAX_LENGTH_LATIN1
2214 : JSFatInlineString::MAX_LENGTH_TWO_BYTE;
2215 masm.branch32(Assembler::Above, temp1_, Imm32(maxInlineLength), &notInline);
2217 // Make a thin or fat inline string.
2218 Label stringAllocated, fatInline;
2220 int32_t maxThinInlineLength = encoding_ == CharEncoding::Latin1
2221 ? JSThinInlineString::MAX_LENGTH_LATIN1
2222 : JSThinInlineString::MAX_LENGTH_TWO_BYTE;
2223 masm.branch32(Assembler::Above, temp1_, Imm32(maxThinInlineLength),
2224 &fatInline);
2225 if (encoding_ == CharEncoding::Latin1) {
2226 // One character Latin-1 strings can be loaded directly from the
2227 // static strings table.
2228 Label thinInline;
2229 masm.branch32(Assembler::Above, temp1_, Imm32(1), &thinInline);
2231 static_assert(
2232 StaticStrings::UNIT_STATIC_LIMIT - 1 == JSString::MAX_LATIN1_CHAR,
2233 "Latin-1 strings can be loaded from static strings");
2235 masm.loadStringChars(base, temp1_, encoding_);
2236 masm.loadChar(temp1_, temp2_, temp1_, encoding_);
2238 masm.lookupStaticString(temp1_, string_, runtime->staticStrings());
2240 masm.jump(&done);
2242 masm.bind(&thinInline);
2245 newGCString(FallbackKind::InlineString);
2246 masm.jump(&stringAllocated);
2248 masm.bind(&fatInline);
2249 { newGCString(FallbackKind::FatInlineString); }
2250 masm.bind(&stringAllocated);
2252 masm.store32(temp1_, Address(string_, JSString::offsetOfLength()));
2254 masm.push(string_);
2255 masm.push(base);
2257 MOZ_ASSERT(startIndexAddress.base == FramePointer,
2258 "startIndexAddress is still valid after stack pushes");
2260 // Load chars pointer for the new string.
2261 masm.loadInlineStringCharsForStore(string_, string_);
2263 // Load the source characters pointer.
2264 masm.loadStringChars(base, temp2_, encoding_);
2265 masm.load32(startIndexAddress, base);
2266 masm.addToCharPtr(temp2_, base, encoding_);
2268 CopyStringChars(masm, string_, temp2_, temp1_, base, encoding_);
2270 masm.pop(base);
2271 masm.pop(string_);
2273 masm.jump(&done);
2276 masm.bind(&notInline);
2279 // Make a dependent string.
2280 // Warning: string may be tenured (if the fallback case is hit), so
2281 // stores into it must be post barriered.
2282 newGCString(FallbackKind::NotInlineString);
2284 masm.store32(temp1_, Address(string_, JSString::offsetOfLength()));
2286 masm.loadNonInlineStringChars(base, temp1_, encoding_);
2287 masm.load32(startIndexAddress, temp2_);
2288 masm.addToCharPtr(temp1_, temp2_, encoding_);
2289 masm.storeNonInlineStringChars(temp1_, string_);
2290 masm.storeDependentStringBase(base, string_);
2291 masm.movePtr(base, temp1_);
2293 // Follow any base pointer if the input is itself a dependent string.
2294 // Watch for undepended strings, which have a base pointer but don't
2295 // actually share their characters with it.
2296 Label noBase;
2297 masm.load32(Address(base, JSString::offsetOfFlags()), temp2_);
2298 masm.and32(Imm32(JSString::TYPE_FLAGS_MASK), temp2_);
2299 masm.branchTest32(Assembler::Zero, temp2_, Imm32(JSString::DEPENDENT_BIT),
2300 &noBase);
2301 masm.loadDependentStringBase(base, temp1_);
2302 masm.storeDependentStringBase(temp1_, string_);
2303 masm.bind(&noBase);
2305 // Post-barrier the base store, whether it was the direct or indirect
2306 // base (both will end up in temp1 here).
2307 masm.branchPtrInNurseryChunk(Assembler::Equal, string_, temp2_, &done);
2308 masm.branchPtrInNurseryChunk(Assembler::NotEqual, temp1_, temp2_, &done);
2310 LiveRegisterSet regsToSave(RegisterSet::Volatile());
2311 regsToSave.takeUnchecked(temp1_);
2312 regsToSave.takeUnchecked(temp2_);
2314 masm.PushRegsInMask(regsToSave);
2316 masm.mov(ImmPtr(runtime), temp1_);
2318 using Fn = void (*)(JSRuntime* rt, js::gc::Cell* cell);
2319 masm.setupUnalignedABICall(temp2_);
2320 masm.passABIArg(temp1_);
2321 masm.passABIArg(string_);
2322 masm.callWithABI<Fn, PostWriteBarrier>();
2324 masm.PopRegsInMask(regsToSave);
2327 masm.bind(&done);
2330 void CreateDependentString::generateFallback(MacroAssembler& masm) {
2331 JitSpew(JitSpew_Codegen,
2332 "# Emitting CreateDependentString fallback (encoding=%s)",
2333 (encoding_ == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
2335 LiveRegisterSet regsToSave(RegisterSet::Volatile());
2336 regsToSave.takeUnchecked(string_);
2337 regsToSave.takeUnchecked(temp2_);
2339 for (FallbackKind kind : mozilla::MakeEnumeratedRange(FallbackKind::Count)) {
2340 masm.bind(&fallbacks_[kind]);
2342 masm.PushRegsInMask(regsToSave);
2344 using Fn = void* (*)(JSContext* cx);
2345 masm.setupUnalignedABICall(string_);
2346 masm.loadJSContext(string_);
2347 masm.passABIArg(string_);
2348 if (kind == FallbackKind::FatInlineString) {
2349 masm.callWithABI<Fn, AllocateFatInlineString>();
2350 } else {
2351 masm.callWithABI<Fn, AllocateDependentString>();
2353 masm.storeCallPointerResult(string_);
2355 masm.PopRegsInMask(regsToSave);
2357 masm.branchPtr(Assembler::Equal, string_, ImmWord(0), failure_);
2359 masm.jump(&joins_[kind]);
2363 // Generate the RegExpMatcher and RegExpExecMatch stubs. These are very similar,
2364 // but RegExpExecMatch also has to load and update .lastIndex for global/sticky
2365 // regular expressions.
2366 static JitCode* GenerateRegExpMatchStubShared(JSContext* cx,
2367 gc::Heap initialStringHeap,
2368 bool isExecMatch) {
2369 if (isExecMatch) {
2370 JitSpew(JitSpew_Codegen, "# Emitting RegExpExecMatch stub");
2371 } else {
2372 JitSpew(JitSpew_Codegen, "# Emitting RegExpMatcher stub");
2375 // |initialStringHeap| could be stale after a GC.
2376 JS::AutoCheckCannotGC nogc(cx);
2378 Register regexp = RegExpMatcherRegExpReg;
2379 Register input = RegExpMatcherStringReg;
2380 Register lastIndex = RegExpMatcherLastIndexReg;
2381 ValueOperand result = JSReturnOperand;
2383 // We are free to clobber all registers, as LRegExpMatcher is a call
2384 // instruction.
2385 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2386 regs.take(input);
2387 regs.take(regexp);
2388 regs.take(lastIndex);
2390 Register temp1 = regs.takeAny();
2391 Register temp2 = regs.takeAny();
2392 Register temp3 = regs.takeAny();
2393 Register maybeTemp4 = InvalidReg;
2394 if (!regs.empty()) {
2395 // There are not enough registers on x86.
2396 maybeTemp4 = regs.takeAny();
2398 Register maybeTemp5 = InvalidReg;
2399 if (!regs.empty()) {
2400 // There are not enough registers on x86.
2401 maybeTemp5 = regs.takeAny();
2404 Address flagsSlot(regexp, RegExpObject::offsetOfFlags());
2405 Address lastIndexSlot(regexp, RegExpObject::offsetOfLastIndex());
2407 TempAllocator temp(&cx->tempLifoAlloc());
2408 JitContext jcx(cx);
2409 StackMacroAssembler masm(cx, temp);
2410 AutoCreatedBy acb(masm, "GenerateRegExpMatchStubShared");
2412 #ifdef JS_USE_LINK_REGISTER
2413 masm.pushReturnAddress();
2414 #endif
2415 masm.push(FramePointer);
2416 masm.moveStackPtrTo(FramePointer);
2418 Label notFoundZeroLastIndex;
2419 if (isExecMatch) {
2420 masm.loadRegExpLastIndex(regexp, input, lastIndex, &notFoundZeroLastIndex);
2423 // The InputOutputData is placed above the frame pointer and return address on
2424 // the stack.
2425 int32_t inputOutputDataStartOffset = 2 * sizeof(void*);
2427 Label notFound, oolEntry;
2428 if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
2429 temp3, inputOutputDataStartOffset,
2430 initialStringHeap, &notFound, &oolEntry)) {
2431 return nullptr;
2434 // If a regexp has named captures, fall back to the OOL stub, which
2435 // will end up calling CreateRegExpMatchResults.
2436 Register shared = temp2;
2437 masm.unboxNonDouble(Address(regexp, NativeObject::getFixedSlotOffset(
2438 RegExpObject::SHARED_SLOT)),
2439 shared, JSVAL_TYPE_PRIVATE_GCTHING);
2440 masm.branchPtr(Assembler::NotEqual,
2441 Address(shared, RegExpShared::offsetOfGroupsTemplate()),
2442 ImmWord(0), &oolEntry);
2444 // Similarly, if the |hasIndices| flag is set, fall back to the OOL stub.
2445 masm.branchTest32(Assembler::NonZero,
2446 Address(shared, RegExpShared::offsetOfFlags()),
2447 Imm32(int32_t(JS::RegExpFlag::HasIndices)), &oolEntry);
2449 Address pairCountAddress =
2450 RegExpPairCountAddress(masm, inputOutputDataStartOffset);
2452 // Construct the result.
2453 Register object = temp1;
2455 // In most cases, the array will have just 1-2 elements, so we optimize for
2456 // that by emitting separate code paths for capacity 2/6/14 (= 4/8/16 slots
2457 // because two slots are used for the elements header).
2459 // Load the array length in temp2 and the shape in temp3.
2460 Label allocated;
2461 masm.load32(pairCountAddress, temp2);
2462 size_t offset = GlobalObjectData::offsetOfRegExpRealm() +
2463 RegExpRealm::offsetOfNormalMatchResultShape();
2464 masm.loadGlobalObjectData(temp3);
2465 masm.loadPtr(Address(temp3, offset), temp3);
2467 auto emitAllocObject = [&](size_t elementCapacity) {
2468 gc::AllocKind kind = GuessArrayGCKind(elementCapacity);
2469 MOZ_ASSERT(CanChangeToBackgroundAllocKind(kind, &ArrayObject::class_));
2470 kind = ForegroundToBackgroundAllocKind(kind);
2472 #ifdef DEBUG
2473 // Assert all of the available slots are used for |elementCapacity|
2474 // elements.
2475 size_t usedSlots = ObjectElements::VALUES_PER_HEADER + elementCapacity;
2476 MOZ_ASSERT(usedSlots == GetGCKindSlots(kind));
2477 #endif
2479 constexpr size_t numUsedDynamicSlots =
2480 RegExpRealm::MatchResultObjectSlotSpan;
2481 constexpr size_t numDynamicSlots =
2482 RegExpRealm::MatchResultObjectNumDynamicSlots;
2483 constexpr size_t arrayLength = 1;
2484 masm.createArrayWithFixedElements(object, temp3, temp2, temp3,
2485 arrayLength, elementCapacity,
2486 numUsedDynamicSlots, numDynamicSlots,
2487 kind, gc::Heap::Default, &oolEntry);
2490 Label moreThan2;
2491 masm.branch32(Assembler::Above, temp2, Imm32(2), &moreThan2);
2492 emitAllocObject(2);
2493 masm.jump(&allocated);
2495 Label moreThan6;
2496 masm.bind(&moreThan2);
2497 masm.branch32(Assembler::Above, temp2, Imm32(6), &moreThan6);
2498 emitAllocObject(6);
2499 masm.jump(&allocated);
2501 masm.bind(&moreThan6);
2502 static_assert(RegExpObject::MaxPairCount == 14);
2503 emitAllocObject(RegExpObject::MaxPairCount);
2505 masm.bind(&allocated);
2508 // clang-format off
2510 * [SMDOC] Stack layout for the RegExpMatcher stub
2512 * +---------------+
2513 * FramePointer +-----> |Caller-FramePtr|
2514 * +---------------+
2515 * |Return-Address |
2516 * +---------------+
2517 * inputOutputDataStartOffset +-----> +---------------+
2518 * |InputOutputData|
2519 * +---------------+
2520 * +---------------+
2521 * | MatchPairs |
2522 * pairsCountAddress +-----------> count |
2523 * | pairs |
2524 * | |
2525 * +---------------+
2526 * pairsVectorStartOffset +-----> +---------------+
2527 * | MatchPair |
2528 * matchPairStart +------------> start | <-------+
2529 * matchPairLimit +------------> limit | | Reserved space for
2530 * +---------------+ | `RegExpObject::MaxPairCount`
2531 * . | MatchPair objects.
2532 * . |
2533 * . | `count` objects will be
2534 * +---------------+ | initialized and can be
2535 * | MatchPair | | accessed below.
2536 * | start | <-------+
2537 * | limit |
2538 * +---------------+
2540 // clang-format on
2542 static_assert(sizeof(MatchPair) == 2 * sizeof(int32_t),
2543 "MatchPair consists of two int32 values representing the start"
2544 "and the end offset of the match");
2546 int32_t pairsVectorStartOffset =
2547 RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
2549 // Incremented by one below for each match pair.
2550 Register matchIndex = temp2;
2551 masm.move32(Imm32(0), matchIndex);
2553 // The element in which to store the result of the current match.
2554 size_t elementsOffset = NativeObject::offsetOfFixedElements();
2555 BaseObjectElementIndex objectMatchElement(object, matchIndex, elementsOffset);
2557 // The current match pair's "start" and "limit" member.
2558 BaseIndex matchPairStart(FramePointer, matchIndex, TimesEight,
2559 pairsVectorStartOffset + MatchPair::offsetOfStart());
2560 BaseIndex matchPairLimit(FramePointer, matchIndex, TimesEight,
2561 pairsVectorStartOffset + MatchPair::offsetOfLimit());
2563 Label* depStrFailure = &oolEntry;
2564 Label restoreRegExpAndLastIndex;
2566 Register temp4;
2567 if (maybeTemp4 == InvalidReg) {
2568 depStrFailure = &restoreRegExpAndLastIndex;
2570 // We don't have enough registers for a fourth temporary. Reuse |regexp|
2571 // as a temporary. We restore its value at |restoreRegExpAndLastIndex|.
2572 masm.push(regexp);
2573 temp4 = regexp;
2574 } else {
2575 temp4 = maybeTemp4;
2578 Register temp5;
2579 if (maybeTemp5 == InvalidReg) {
2580 depStrFailure = &restoreRegExpAndLastIndex;
2582 // We don't have enough registers for a fifth temporary. Reuse |lastIndex|
2583 // as a temporary. We restore its value at |restoreRegExpAndLastIndex|.
2584 masm.push(lastIndex);
2585 temp5 = lastIndex;
2586 } else {
2587 temp5 = maybeTemp5;
2590 auto maybeRestoreRegExpAndLastIndex = [&]() {
2591 if (maybeTemp5 == InvalidReg) {
2592 masm.pop(lastIndex);
2594 if (maybeTemp4 == InvalidReg) {
2595 masm.pop(regexp);
2599 // Loop to construct the match strings. There are two different loops,
2600 // depending on whether the input is a Two-Byte or a Latin-1 string.
2601 CreateDependentString depStrs[]{
2602 {CharEncoding::TwoByte, temp3, temp4, temp5, depStrFailure},
2603 {CharEncoding::Latin1, temp3, temp4, temp5, depStrFailure},
2607 Label isLatin1, done;
2608 masm.branchLatin1String(input, &isLatin1);
2610 for (auto& depStr : depStrs) {
2611 if (depStr.encoding() == CharEncoding::Latin1) {
2612 masm.bind(&isLatin1);
2615 Label matchLoop;
2616 masm.bind(&matchLoop);
2618 static_assert(MatchPair::NoMatch == -1,
2619 "MatchPair::start is negative if no match was found");
2621 Label isUndefined, storeDone;
2622 masm.branch32(Assembler::LessThan, matchPairStart, Imm32(0),
2623 &isUndefined);
2625 depStr.generate(masm, cx->names(), CompileRuntime::get(cx->runtime()),
2626 input, matchPairStart, matchPairLimit,
2627 initialStringHeap);
2629 // Storing into nursery-allocated results object's elements; no post
2630 // barrier.
2631 masm.storeValue(JSVAL_TYPE_STRING, depStr.string(), objectMatchElement);
2632 masm.jump(&storeDone);
2634 masm.bind(&isUndefined);
2635 { masm.storeValue(UndefinedValue(), objectMatchElement); }
2636 masm.bind(&storeDone);
2638 masm.add32(Imm32(1), matchIndex);
2639 masm.branch32(Assembler::LessThanOrEqual, pairCountAddress, matchIndex,
2640 &done);
2641 masm.jump(&matchLoop);
2644 #ifdef DEBUG
2645 masm.assumeUnreachable("The match string loop doesn't fall through.");
2646 #endif
2648 masm.bind(&done);
2651 maybeRestoreRegExpAndLastIndex();
2653 // Fill in the rest of the output object.
2654 masm.store32(
2655 matchIndex,
2656 Address(object,
2657 elementsOffset + ObjectElements::offsetOfInitializedLength()));
2658 masm.store32(
2659 matchIndex,
2660 Address(object, elementsOffset + ObjectElements::offsetOfLength()));
2662 Address firstMatchPairStartAddress(
2663 FramePointer, pairsVectorStartOffset + MatchPair::offsetOfStart());
2664 Address firstMatchPairLimitAddress(
2665 FramePointer, pairsVectorStartOffset + MatchPair::offsetOfLimit());
2667 static_assert(RegExpRealm::MatchResultObjectIndexSlot == 0,
2668 "First slot holds the 'index' property");
2669 static_assert(RegExpRealm::MatchResultObjectInputSlot == 1,
2670 "Second slot holds the 'input' property");
2672 masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
2674 masm.load32(firstMatchPairStartAddress, temp3);
2675 masm.storeValue(JSVAL_TYPE_INT32, temp3, Address(temp2, 0));
2677 // No post barrier needed (address is within nursery object.)
2678 masm.storeValue(JSVAL_TYPE_STRING, input, Address(temp2, sizeof(Value)));
2680 // For the ExecMatch stub, if the regular expression is global or sticky, we
2681 // have to update its .lastIndex slot.
2682 if (isExecMatch) {
2683 MOZ_ASSERT(object != lastIndex);
2684 Label notGlobalOrSticky;
2685 masm.branchTest32(Assembler::Zero, flagsSlot,
2686 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
2687 &notGlobalOrSticky);
2688 masm.load32(firstMatchPairLimitAddress, lastIndex);
2689 masm.storeValue(JSVAL_TYPE_INT32, lastIndex, lastIndexSlot);
2690 masm.bind(&notGlobalOrSticky);
2693 // All done!
2694 masm.tagValue(JSVAL_TYPE_OBJECT, object, result);
2695 masm.pop(FramePointer);
2696 masm.ret();
2698 masm.bind(&notFound);
2699 if (isExecMatch) {
2700 Label notGlobalOrSticky;
2701 masm.branchTest32(Assembler::Zero, flagsSlot,
2702 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
2703 &notGlobalOrSticky);
2704 masm.bind(&notFoundZeroLastIndex);
2705 masm.storeValue(Int32Value(0), lastIndexSlot);
2706 masm.bind(&notGlobalOrSticky);
2708 masm.moveValue(NullValue(), result);
2709 masm.pop(FramePointer);
2710 masm.ret();
2712 // Fallback paths for CreateDependentString.
2713 for (auto& depStr : depStrs) {
2714 depStr.generateFallback(masm);
2717 // Fall-through to the ool entry after restoring the registers.
2718 masm.bind(&restoreRegExpAndLastIndex);
2719 maybeRestoreRegExpAndLastIndex();
2721 // Use an undefined value to signal to the caller that the OOL stub needs to
2722 // be called.
2723 masm.bind(&oolEntry);
2724 masm.moveValue(UndefinedValue(), result);
2725 masm.pop(FramePointer);
2726 masm.ret();
2728 Linker linker(masm);
2729 JitCode* code = linker.newCode(cx, CodeKind::Other);
2730 if (!code) {
2731 return nullptr;
2734 const char* name = isExecMatch ? "RegExpExecMatchStub" : "RegExpMatcherStub";
2735 CollectPerfSpewerJitCodeProfile(code, name);
2736 #ifdef MOZ_VTUNE
2737 vtune::MarkStub(code, name);
2738 #endif
2740 return code;
2743 JitCode* JitZone::generateRegExpMatcherStub(JSContext* cx) {
2744 return GenerateRegExpMatchStubShared(cx, initialStringHeap,
2745 /* isExecMatch = */ false);
2748 JitCode* JitZone::generateRegExpExecMatchStub(JSContext* cx) {
2749 return GenerateRegExpMatchStubShared(cx, initialStringHeap,
2750 /* isExecMatch = */ true);
2753 class OutOfLineRegExpMatcher : public OutOfLineCodeBase<CodeGenerator> {
2754 LRegExpMatcher* lir_;
2756 public:
2757 explicit OutOfLineRegExpMatcher(LRegExpMatcher* lir) : lir_(lir) {}
2759 void accept(CodeGenerator* codegen) override {
2760 codegen->visitOutOfLineRegExpMatcher(this);
2763 LRegExpMatcher* lir() const { return lir_; }
2766 void CodeGenerator::visitOutOfLineRegExpMatcher(OutOfLineRegExpMatcher* ool) {
2767 LRegExpMatcher* lir = ool->lir();
2768 Register lastIndex = ToRegister(lir->lastIndex());
2769 Register input = ToRegister(lir->string());
2770 Register regexp = ToRegister(lir->regexp());
2772 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2773 regs.take(lastIndex);
2774 regs.take(input);
2775 regs.take(regexp);
2776 Register temp = regs.takeAny();
2778 masm.computeEffectiveAddress(
2779 Address(masm.getStackPointer(), InputOutputDataSize), temp);
2781 pushArg(temp);
2782 pushArg(lastIndex);
2783 pushArg(input);
2784 pushArg(regexp);
2786 // We are not using oolCallVM because we are in a Call, and that live
2787 // registers are already saved by the the register allocator.
2788 using Fn =
2789 bool (*)(JSContext*, HandleObject regexp, HandleString input,
2790 int32_t lastIndex, MatchPairs* pairs, MutableHandleValue output);
2791 callVM<Fn, RegExpMatcherRaw>(lir);
2793 masm.jump(ool->rejoin());
2796 void CodeGenerator::visitRegExpMatcher(LRegExpMatcher* lir) {
2797 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpMatcherRegExpReg);
2798 MOZ_ASSERT(ToRegister(lir->string()) == RegExpMatcherStringReg);
2799 MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpMatcherLastIndexReg);
2800 MOZ_ASSERT(ToOutValue(lir) == JSReturnOperand);
2802 #if defined(JS_NUNBOX32)
2803 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Type);
2804 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Data);
2805 static_assert(RegExpMatcherStringReg != JSReturnReg_Type);
2806 static_assert(RegExpMatcherStringReg != JSReturnReg_Data);
2807 static_assert(RegExpMatcherLastIndexReg != JSReturnReg_Type);
2808 static_assert(RegExpMatcherLastIndexReg != JSReturnReg_Data);
2809 #elif defined(JS_PUNBOX64)
2810 static_assert(RegExpMatcherRegExpReg != JSReturnReg);
2811 static_assert(RegExpMatcherStringReg != JSReturnReg);
2812 static_assert(RegExpMatcherLastIndexReg != JSReturnReg);
2813 #endif
2815 masm.reserveStack(RegExpReservedStack);
2817 OutOfLineRegExpMatcher* ool = new (alloc()) OutOfLineRegExpMatcher(lir);
2818 addOutOfLineCode(ool, lir->mir());
2820 const JitZone* jitZone = gen->realm->zone()->jitZone();
2821 JitCode* regExpMatcherStub =
2822 jitZone->regExpMatcherStubNoBarrier(&zoneStubsToReadBarrier_);
2823 masm.call(regExpMatcherStub);
2824 masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, ool->entry());
2825 masm.bind(ool->rejoin());
2827 masm.freeStack(RegExpReservedStack);
2830 class OutOfLineRegExpExecMatch : public OutOfLineCodeBase<CodeGenerator> {
2831 LRegExpExecMatch* lir_;
2833 public:
2834 explicit OutOfLineRegExpExecMatch(LRegExpExecMatch* lir) : lir_(lir) {}
2836 void accept(CodeGenerator* codegen) override {
2837 codegen->visitOutOfLineRegExpExecMatch(this);
2840 LRegExpExecMatch* lir() const { return lir_; }
2843 void CodeGenerator::visitOutOfLineRegExpExecMatch(
2844 OutOfLineRegExpExecMatch* ool) {
2845 LRegExpExecMatch* lir = ool->lir();
2846 Register input = ToRegister(lir->string());
2847 Register regexp = ToRegister(lir->regexp());
2849 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2850 regs.take(input);
2851 regs.take(regexp);
2852 Register temp = regs.takeAny();
2854 masm.computeEffectiveAddress(
2855 Address(masm.getStackPointer(), InputOutputDataSize), temp);
2857 pushArg(temp);
2858 pushArg(input);
2859 pushArg(regexp);
2861 // We are not using oolCallVM because we are in a Call and live registers have
2862 // already been saved by the register allocator.
2863 using Fn =
2864 bool (*)(JSContext*, Handle<RegExpObject*> regexp, HandleString input,
2865 MatchPairs* pairs, MutableHandleValue output);
2866 callVM<Fn, RegExpBuiltinExecMatchFromJit>(lir);
2867 masm.jump(ool->rejoin());
2870 void CodeGenerator::visitRegExpExecMatch(LRegExpExecMatch* lir) {
2871 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpMatcherRegExpReg);
2872 MOZ_ASSERT(ToRegister(lir->string()) == RegExpMatcherStringReg);
2873 MOZ_ASSERT(ToOutValue(lir) == JSReturnOperand);
2875 #if defined(JS_NUNBOX32)
2876 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Type);
2877 static_assert(RegExpMatcherRegExpReg != JSReturnReg_Data);
2878 static_assert(RegExpMatcherStringReg != JSReturnReg_Type);
2879 static_assert(RegExpMatcherStringReg != JSReturnReg_Data);
2880 #elif defined(JS_PUNBOX64)
2881 static_assert(RegExpMatcherRegExpReg != JSReturnReg);
2882 static_assert(RegExpMatcherStringReg != JSReturnReg);
2883 #endif
2885 masm.reserveStack(RegExpReservedStack);
2887 auto* ool = new (alloc()) OutOfLineRegExpExecMatch(lir);
2888 addOutOfLineCode(ool, lir->mir());
2890 const JitZone* jitZone = gen->realm->zone()->jitZone();
2891 JitCode* regExpExecMatchStub =
2892 jitZone->regExpExecMatchStubNoBarrier(&zoneStubsToReadBarrier_);
2893 masm.call(regExpExecMatchStub);
2894 masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, ool->entry());
2896 masm.bind(ool->rejoin());
2897 masm.freeStack(RegExpReservedStack);
2900 JitCode* JitZone::generateRegExpSearcherStub(JSContext* cx) {
2901 JitSpew(JitSpew_Codegen, "# Emitting RegExpSearcher stub");
2903 Register regexp = RegExpSearcherRegExpReg;
2904 Register input = RegExpSearcherStringReg;
2905 Register lastIndex = RegExpSearcherLastIndexReg;
2906 Register result = ReturnReg;
2908 // We are free to clobber all registers, as LRegExpSearcher is a call
2909 // instruction.
2910 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2911 regs.take(input);
2912 regs.take(regexp);
2913 regs.take(lastIndex);
2915 Register temp1 = regs.takeAny();
2916 Register temp2 = regs.takeAny();
2917 Register temp3 = regs.takeAny();
2919 TempAllocator temp(&cx->tempLifoAlloc());
2920 JitContext jcx(cx);
2921 StackMacroAssembler masm(cx, temp);
2922 AutoCreatedBy acb(masm, "JitZone::generateRegExpSearcherStub");
2924 #ifdef JS_USE_LINK_REGISTER
2925 masm.pushReturnAddress();
2926 #endif
2927 masm.push(FramePointer);
2928 masm.moveStackPtrTo(FramePointer);
2930 #ifdef DEBUG
2931 // Store sentinel value to cx->regExpSearcherLastLimit.
2932 // See comment in RegExpSearcherImpl.
2933 masm.loadJSContext(temp1);
2934 masm.store32(Imm32(RegExpSearcherLastLimitSentinel),
2935 Address(temp1, JSContext::offsetOfRegExpSearcherLastLimit()));
2936 #endif
2938 // The InputOutputData is placed above the frame pointer and return address on
2939 // the stack.
2940 int32_t inputOutputDataStartOffset = 2 * sizeof(void*);
2942 Label notFound, oolEntry;
2943 if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
2944 temp3, inputOutputDataStartOffset,
2945 initialStringHeap, &notFound, &oolEntry)) {
2946 return nullptr;
2949 // clang-format off
2951 * [SMDOC] Stack layout for the RegExpSearcher stub
2953 * +---------------+
2954 * FramePointer +-----> |Caller-FramePtr|
2955 * +---------------+
2956 * |Return-Address |
2957 * +---------------+
2958 * inputOutputDataStartOffset +-----> +---------------+
2959 * |InputOutputData|
2960 * +---------------+
2961 * +---------------+
2962 * | MatchPairs |
2963 * | count |
2964 * | pairs |
2965 * | |
2966 * +---------------+
2967 * pairsVectorStartOffset +-----> +---------------+
2968 * | MatchPair |
2969 * matchPairStart +------------> start | <-------+
2970 * matchPairLimit +------------> limit | | Reserved space for
2971 * +---------------+ | `RegExpObject::MaxPairCount`
2972 * . | MatchPair objects.
2973 * . |
2974 * . | Only a single object will
2975 * +---------------+ | be initialized and can be
2976 * | MatchPair | | accessed below.
2977 * | start | <-------+
2978 * | limit |
2979 * +---------------+
2981 // clang-format on
2983 int32_t pairsVectorStartOffset =
2984 RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
2985 Address matchPairStart(FramePointer,
2986 pairsVectorStartOffset + MatchPair::offsetOfStart());
2987 Address matchPairLimit(FramePointer,
2988 pairsVectorStartOffset + MatchPair::offsetOfLimit());
2990 // Store match limit to cx->regExpSearcherLastLimit and return the index.
2991 masm.load32(matchPairLimit, result);
2992 masm.loadJSContext(input);
2993 masm.store32(result,
2994 Address(input, JSContext::offsetOfRegExpSearcherLastLimit()));
2995 masm.load32(matchPairStart, result);
2996 masm.pop(FramePointer);
2997 masm.ret();
2999 masm.bind(&notFound);
3000 masm.move32(Imm32(RegExpSearcherResultNotFound), result);
3001 masm.pop(FramePointer);
3002 masm.ret();
3004 masm.bind(&oolEntry);
3005 masm.move32(Imm32(RegExpSearcherResultFailed), result);
3006 masm.pop(FramePointer);
3007 masm.ret();
3009 Linker linker(masm);
3010 JitCode* code = linker.newCode(cx, CodeKind::Other);
3011 if (!code) {
3012 return nullptr;
3015 CollectPerfSpewerJitCodeProfile(code, "RegExpSearcherStub");
3016 #ifdef MOZ_VTUNE
3017 vtune::MarkStub(code, "RegExpSearcherStub");
3018 #endif
3020 return code;
3023 class OutOfLineRegExpSearcher : public OutOfLineCodeBase<CodeGenerator> {
3024 LRegExpSearcher* lir_;
3026 public:
3027 explicit OutOfLineRegExpSearcher(LRegExpSearcher* lir) : lir_(lir) {}
3029 void accept(CodeGenerator* codegen) override {
3030 codegen->visitOutOfLineRegExpSearcher(this);
3033 LRegExpSearcher* lir() const { return lir_; }
3036 void CodeGenerator::visitOutOfLineRegExpSearcher(OutOfLineRegExpSearcher* ool) {
3037 LRegExpSearcher* lir = ool->lir();
3038 Register lastIndex = ToRegister(lir->lastIndex());
3039 Register input = ToRegister(lir->string());
3040 Register regexp = ToRegister(lir->regexp());
3042 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
3043 regs.take(lastIndex);
3044 regs.take(input);
3045 regs.take(regexp);
3046 Register temp = regs.takeAny();
3048 masm.computeEffectiveAddress(
3049 Address(masm.getStackPointer(), InputOutputDataSize), temp);
3051 pushArg(temp);
3052 pushArg(lastIndex);
3053 pushArg(input);
3054 pushArg(regexp);
3056 // We are not using oolCallVM because we are in a Call, and that live
3057 // registers are already saved by the the register allocator.
3058 using Fn = bool (*)(JSContext* cx, HandleObject regexp, HandleString input,
3059 int32_t lastIndex, MatchPairs* pairs, int32_t* result);
3060 callVM<Fn, RegExpSearcherRaw>(lir);
3062 masm.jump(ool->rejoin());
3065 void CodeGenerator::visitRegExpSearcher(LRegExpSearcher* lir) {
3066 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpSearcherRegExpReg);
3067 MOZ_ASSERT(ToRegister(lir->string()) == RegExpSearcherStringReg);
3068 MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpSearcherLastIndexReg);
3069 MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
3071 static_assert(RegExpSearcherRegExpReg != ReturnReg);
3072 static_assert(RegExpSearcherStringReg != ReturnReg);
3073 static_assert(RegExpSearcherLastIndexReg != ReturnReg);
3075 masm.reserveStack(RegExpReservedStack);
3077 OutOfLineRegExpSearcher* ool = new (alloc()) OutOfLineRegExpSearcher(lir);
3078 addOutOfLineCode(ool, lir->mir());
3080 const JitZone* jitZone = gen->realm->zone()->jitZone();
3081 JitCode* regExpSearcherStub =
3082 jitZone->regExpSearcherStubNoBarrier(&zoneStubsToReadBarrier_);
3083 masm.call(regExpSearcherStub);
3084 masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpSearcherResultFailed),
3085 ool->entry());
3086 masm.bind(ool->rejoin());
3088 masm.freeStack(RegExpReservedStack);
3091 void CodeGenerator::visitRegExpSearcherLastLimit(
3092 LRegExpSearcherLastLimit* lir) {
3093 Register result = ToRegister(lir->output());
3094 Register scratch = ToRegister(lir->temp0());
3096 masm.loadAndClearRegExpSearcherLastLimit(result, scratch);
3099 JitCode* JitZone::generateRegExpExecTestStub(JSContext* cx) {
3100 JitSpew(JitSpew_Codegen, "# Emitting RegExpExecTest stub");
3102 Register regexp = RegExpExecTestRegExpReg;
3103 Register input = RegExpExecTestStringReg;
3104 Register result = ReturnReg;
3106 TempAllocator temp(&cx->tempLifoAlloc());
3107 JitContext jcx(cx);
3108 StackMacroAssembler masm(cx, temp);
3109 AutoCreatedBy acb(masm, "JitZone::generateRegExpExecTestStub");
3111 #ifdef JS_USE_LINK_REGISTER
3112 masm.pushReturnAddress();
3113 #endif
3114 masm.push(FramePointer);
3115 masm.moveStackPtrTo(FramePointer);
3117 // We are free to clobber all registers, as LRegExpExecTest is a call
3118 // instruction.
3119 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
3120 regs.take(input);
3121 regs.take(regexp);
3123 // Ensure lastIndex != result.
3124 regs.take(result);
3125 Register lastIndex = regs.takeAny();
3126 regs.add(result);
3127 Register temp1 = regs.takeAny();
3128 Register temp2 = regs.takeAny();
3129 Register temp3 = regs.takeAny();
3131 Address flagsSlot(regexp, RegExpObject::offsetOfFlags());
3132 Address lastIndexSlot(regexp, RegExpObject::offsetOfLastIndex());
3134 masm.reserveStack(RegExpReservedStack);
3136 // Load lastIndex and skip RegExp execution if needed.
3137 Label notFoundZeroLastIndex;
3138 masm.loadRegExpLastIndex(regexp, input, lastIndex, &notFoundZeroLastIndex);
3140 // In visitRegExpMatcher and visitRegExpSearcher, we reserve stack space
3141 // before calling the stub. For RegExpExecTest we call the stub before
3142 // reserving stack space, so the offset of the InputOutputData relative to the
3143 // frame pointer is negative.
3144 constexpr int32_t inputOutputDataStartOffset = -int32_t(RegExpReservedStack);
3146 // On ARM64, load/store instructions can encode an immediate offset in the
3147 // range [-256, 4095]. If we ever fail this assertion, it would be more
3148 // efficient to store the data above the frame pointer similar to
3149 // RegExpMatcher and RegExpSearcher.
3150 static_assert(inputOutputDataStartOffset >= -256);
3152 Label notFound, oolEntry;
3153 if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
3154 temp3, inputOutputDataStartOffset,
3155 initialStringHeap, &notFound, &oolEntry)) {
3156 return nullptr;
3159 // Set `result` to true/false to indicate found/not-found, or to
3160 // RegExpExecTestResultFailed if we have to retry in C++. If the regular
3161 // expression is global or sticky, we also have to update its .lastIndex slot.
3163 Label done;
3164 int32_t pairsVectorStartOffset =
3165 RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
3166 Address matchPairLimit(FramePointer,
3167 pairsVectorStartOffset + MatchPair::offsetOfLimit());
3169 masm.move32(Imm32(1), result);
3170 masm.branchTest32(Assembler::Zero, flagsSlot,
3171 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
3172 &done);
3173 masm.load32(matchPairLimit, lastIndex);
3174 masm.storeValue(JSVAL_TYPE_INT32, lastIndex, lastIndexSlot);
3175 masm.jump(&done);
3177 masm.bind(&notFound);
3178 masm.move32(Imm32(0), result);
3179 masm.branchTest32(Assembler::Zero, flagsSlot,
3180 Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
3181 &done);
3182 masm.storeValue(Int32Value(0), lastIndexSlot);
3183 masm.jump(&done);
3185 masm.bind(&notFoundZeroLastIndex);
3186 masm.move32(Imm32(0), result);
3187 masm.storeValue(Int32Value(0), lastIndexSlot);
3188 masm.jump(&done);
3190 masm.bind(&oolEntry);
3191 masm.move32(Imm32(RegExpExecTestResultFailed), result);
3193 masm.bind(&done);
3194 masm.freeStack(RegExpReservedStack);
3195 masm.pop(FramePointer);
3196 masm.ret();
3198 Linker linker(masm);
3199 JitCode* code = linker.newCode(cx, CodeKind::Other);
3200 if (!code) {
3201 return nullptr;
3204 CollectPerfSpewerJitCodeProfile(code, "RegExpExecTestStub");
3205 #ifdef MOZ_VTUNE
3206 vtune::MarkStub(code, "RegExpExecTestStub");
3207 #endif
3209 return code;
3212 class OutOfLineRegExpExecTest : public OutOfLineCodeBase<CodeGenerator> {
3213 LRegExpExecTest* lir_;
3215 public:
3216 explicit OutOfLineRegExpExecTest(LRegExpExecTest* lir) : lir_(lir) {}
3218 void accept(CodeGenerator* codegen) override {
3219 codegen->visitOutOfLineRegExpExecTest(this);
3222 LRegExpExecTest* lir() const { return lir_; }
3225 void CodeGenerator::visitOutOfLineRegExpExecTest(OutOfLineRegExpExecTest* ool) {
3226 LRegExpExecTest* lir = ool->lir();
3227 Register input = ToRegister(lir->string());
3228 Register regexp = ToRegister(lir->regexp());
3230 pushArg(input);
3231 pushArg(regexp);
3233 // We are not using oolCallVM because we are in a Call and live registers have
3234 // already been saved by the register allocator.
3235 using Fn = bool (*)(JSContext* cx, Handle<RegExpObject*> regexp,
3236 HandleString input, bool* result);
3237 callVM<Fn, RegExpBuiltinExecTestFromJit>(lir);
3239 masm.jump(ool->rejoin());
3242 void CodeGenerator::visitRegExpExecTest(LRegExpExecTest* lir) {
3243 MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpExecTestRegExpReg);
3244 MOZ_ASSERT(ToRegister(lir->string()) == RegExpExecTestStringReg);
3245 MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
3247 static_assert(RegExpExecTestRegExpReg != ReturnReg);
3248 static_assert(RegExpExecTestStringReg != ReturnReg);
3250 auto* ool = new (alloc()) OutOfLineRegExpExecTest(lir);
3251 addOutOfLineCode(ool, lir->mir());
3253 const JitZone* jitZone = gen->realm->zone()->jitZone();
3254 JitCode* regExpExecTestStub =
3255 jitZone->regExpExecTestStubNoBarrier(&zoneStubsToReadBarrier_);
3256 masm.call(regExpExecTestStub);
3258 masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpExecTestResultFailed),
3259 ool->entry());
3261 masm.bind(ool->rejoin());
3264 void CodeGenerator::visitRegExpHasCaptureGroups(LRegExpHasCaptureGroups* ins) {
3265 Register regexp = ToRegister(ins->regexp());
3266 Register input = ToRegister(ins->input());
3267 Register output = ToRegister(ins->output());
3269 using Fn =
3270 bool (*)(JSContext*, Handle<RegExpObject*>, Handle<JSString*>, bool*);
3271 auto* ool = oolCallVM<Fn, js::RegExpHasCaptureGroups>(
3272 ins, ArgList(regexp, input), StoreRegisterTo(output));
3274 // Load RegExpShared in |output|.
3275 Label vmCall;
3276 masm.loadParsedRegExpShared(regexp, output, ool->entry());
3278 // Return true iff pairCount > 1.
3279 Label returnTrue;
3280 masm.branch32(Assembler::Above,
3281 Address(output, RegExpShared::offsetOfPairCount()), Imm32(1),
3282 &returnTrue);
3283 masm.move32(Imm32(0), output);
3284 masm.jump(ool->rejoin());
3286 masm.bind(&returnTrue);
3287 masm.move32(Imm32(1), output);
3289 masm.bind(ool->rejoin());
3292 class OutOfLineRegExpPrototypeOptimizable
3293 : public OutOfLineCodeBase<CodeGenerator> {
3294 LRegExpPrototypeOptimizable* ins_;
3296 public:
3297 explicit OutOfLineRegExpPrototypeOptimizable(LRegExpPrototypeOptimizable* ins)
3298 : ins_(ins) {}
3300 void accept(CodeGenerator* codegen) override {
3301 codegen->visitOutOfLineRegExpPrototypeOptimizable(this);
3303 LRegExpPrototypeOptimizable* ins() const { return ins_; }
3306 void CodeGenerator::visitRegExpPrototypeOptimizable(
3307 LRegExpPrototypeOptimizable* ins) {
3308 Register object = ToRegister(ins->object());
3309 Register output = ToRegister(ins->output());
3310 Register temp = ToRegister(ins->temp0());
3312 OutOfLineRegExpPrototypeOptimizable* ool =
3313 new (alloc()) OutOfLineRegExpPrototypeOptimizable(ins);
3314 addOutOfLineCode(ool, ins->mir());
3316 const GlobalObject* global = gen->realm->maybeGlobal();
3317 MOZ_ASSERT(global);
3318 masm.branchIfNotRegExpPrototypeOptimizable(object, temp, global,
3319 ool->entry());
3320 masm.move32(Imm32(0x1), output);
3322 masm.bind(ool->rejoin());
3325 void CodeGenerator::visitOutOfLineRegExpPrototypeOptimizable(
3326 OutOfLineRegExpPrototypeOptimizable* ool) {
3327 LRegExpPrototypeOptimizable* ins = ool->ins();
3328 Register object = ToRegister(ins->object());
3329 Register output = ToRegister(ins->output());
3331 saveVolatile(output);
3333 using Fn = bool (*)(JSContext* cx, JSObject* proto);
3334 masm.setupAlignedABICall();
3335 masm.loadJSContext(output);
3336 masm.passABIArg(output);
3337 masm.passABIArg(object);
3338 masm.callWithABI<Fn, RegExpPrototypeOptimizableRaw>();
3339 masm.storeCallBoolResult(output);
3341 restoreVolatile(output);
3343 masm.jump(ool->rejoin());
3346 class OutOfLineRegExpInstanceOptimizable
3347 : public OutOfLineCodeBase<CodeGenerator> {
3348 LRegExpInstanceOptimizable* ins_;
3350 public:
3351 explicit OutOfLineRegExpInstanceOptimizable(LRegExpInstanceOptimizable* ins)
3352 : ins_(ins) {}
3354 void accept(CodeGenerator* codegen) override {
3355 codegen->visitOutOfLineRegExpInstanceOptimizable(this);
3357 LRegExpInstanceOptimizable* ins() const { return ins_; }
3360 void CodeGenerator::visitRegExpInstanceOptimizable(
3361 LRegExpInstanceOptimizable* ins) {
3362 Register object = ToRegister(ins->object());
3363 Register output = ToRegister(ins->output());
3364 Register temp = ToRegister(ins->temp0());
3366 OutOfLineRegExpInstanceOptimizable* ool =
3367 new (alloc()) OutOfLineRegExpInstanceOptimizable(ins);
3368 addOutOfLineCode(ool, ins->mir());
3370 const GlobalObject* global = gen->realm->maybeGlobal();
3371 MOZ_ASSERT(global);
3372 masm.branchIfNotRegExpInstanceOptimizable(object, temp, global, ool->entry());
3373 masm.move32(Imm32(0x1), output);
3375 masm.bind(ool->rejoin());
3378 void CodeGenerator::visitOutOfLineRegExpInstanceOptimizable(
3379 OutOfLineRegExpInstanceOptimizable* ool) {
3380 LRegExpInstanceOptimizable* ins = ool->ins();
3381 Register object = ToRegister(ins->object());
3382 Register proto = ToRegister(ins->proto());
3383 Register output = ToRegister(ins->output());
3385 saveVolatile(output);
3387 using Fn = bool (*)(JSContext* cx, JSObject* obj, JSObject* proto);
3388 masm.setupAlignedABICall();
3389 masm.loadJSContext(output);
3390 masm.passABIArg(output);
3391 masm.passABIArg(object);
3392 masm.passABIArg(proto);
3393 masm.callWithABI<Fn, RegExpInstanceOptimizableRaw>();
3394 masm.storeCallBoolResult(output);
3396 restoreVolatile(output);
3398 masm.jump(ool->rejoin());
3401 static void FindFirstDollarIndex(MacroAssembler& masm, Register str,
3402 Register len, Register temp0, Register temp1,
3403 Register output, CharEncoding encoding) {
3404 #ifdef DEBUG
3405 Label ok;
3406 masm.branch32(Assembler::GreaterThan, len, Imm32(0), &ok);
3407 masm.assumeUnreachable("Length should be greater than 0.");
3408 masm.bind(&ok);
3409 #endif
3411 Register chars = temp0;
3412 masm.loadStringChars(str, chars, encoding);
3414 masm.move32(Imm32(0), output);
3416 Label start, done;
3417 masm.bind(&start);
3419 Register currentChar = temp1;
3420 masm.loadChar(chars, output, currentChar, encoding);
3421 masm.branch32(Assembler::Equal, currentChar, Imm32('$'), &done);
3423 masm.add32(Imm32(1), output);
3424 masm.branch32(Assembler::NotEqual, output, len, &start);
3426 masm.move32(Imm32(-1), output);
3428 masm.bind(&done);
3431 void CodeGenerator::visitGetFirstDollarIndex(LGetFirstDollarIndex* ins) {
3432 Register str = ToRegister(ins->str());
3433 Register output = ToRegister(ins->output());
3434 Register temp0 = ToRegister(ins->temp0());
3435 Register temp1 = ToRegister(ins->temp1());
3436 Register len = ToRegister(ins->temp2());
3438 using Fn = bool (*)(JSContext*, JSString*, int32_t*);
3439 OutOfLineCode* ool = oolCallVM<Fn, GetFirstDollarIndexRaw>(
3440 ins, ArgList(str), StoreRegisterTo(output));
3442 masm.branchIfRope(str, ool->entry());
3443 masm.loadStringLength(str, len);
3445 Label isLatin1, done;
3446 masm.branchLatin1String(str, &isLatin1);
3448 FindFirstDollarIndex(masm, str, len, temp0, temp1, output,
3449 CharEncoding::TwoByte);
3450 masm.jump(&done);
3452 masm.bind(&isLatin1);
3454 FindFirstDollarIndex(masm, str, len, temp0, temp1, output,
3455 CharEncoding::Latin1);
3457 masm.bind(&done);
3458 masm.bind(ool->rejoin());
3461 void CodeGenerator::visitStringReplace(LStringReplace* lir) {
3462 if (lir->replacement()->isConstant()) {
3463 pushArg(ImmGCPtr(lir->replacement()->toConstant()->toString()));
3464 } else {
3465 pushArg(ToRegister(lir->replacement()));
3468 if (lir->pattern()->isConstant()) {
3469 pushArg(ImmGCPtr(lir->pattern()->toConstant()->toString()));
3470 } else {
3471 pushArg(ToRegister(lir->pattern()));
3474 if (lir->string()->isConstant()) {
3475 pushArg(ImmGCPtr(lir->string()->toConstant()->toString()));
3476 } else {
3477 pushArg(ToRegister(lir->string()));
3480 using Fn =
3481 JSString* (*)(JSContext*, HandleString, HandleString, HandleString);
3482 if (lir->mir()->isFlatReplacement()) {
3483 callVM<Fn, StringFlatReplaceString>(lir);
3484 } else {
3485 callVM<Fn, StringReplace>(lir);
3489 void CodeGenerator::visitBinaryValueCache(LBinaryValueCache* lir) {
3490 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
3491 TypedOrValueRegister lhs =
3492 TypedOrValueRegister(ToValue(lir, LBinaryValueCache::LhsIndex));
3493 TypedOrValueRegister rhs =
3494 TypedOrValueRegister(ToValue(lir, LBinaryValueCache::RhsIndex));
3495 ValueOperand output = ToOutValue(lir);
3497 JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc());
3499 switch (jsop) {
3500 case JSOp::Add:
3501 case JSOp::Sub:
3502 case JSOp::Mul:
3503 case JSOp::Div:
3504 case JSOp::Mod:
3505 case JSOp::Pow:
3506 case JSOp::BitAnd:
3507 case JSOp::BitOr:
3508 case JSOp::BitXor:
3509 case JSOp::Lsh:
3510 case JSOp::Rsh:
3511 case JSOp::Ursh: {
3512 IonBinaryArithIC ic(liveRegs, lhs, rhs, output);
3513 addIC(lir, allocateIC(ic));
3514 return;
3516 default:
3517 MOZ_CRASH("Unsupported jsop in MBinaryValueCache");
3521 void CodeGenerator::visitBinaryBoolCache(LBinaryBoolCache* lir) {
3522 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
3523 TypedOrValueRegister lhs =
3524 TypedOrValueRegister(ToValue(lir, LBinaryBoolCache::LhsIndex));
3525 TypedOrValueRegister rhs =
3526 TypedOrValueRegister(ToValue(lir, LBinaryBoolCache::RhsIndex));
3527 Register output = ToRegister(lir->output());
3529 JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc());
3531 switch (jsop) {
3532 case JSOp::Lt:
3533 case JSOp::Le:
3534 case JSOp::Gt:
3535 case JSOp::Ge:
3536 case JSOp::Eq:
3537 case JSOp::Ne:
3538 case JSOp::StrictEq:
3539 case JSOp::StrictNe: {
3540 IonCompareIC ic(liveRegs, lhs, rhs, output);
3541 addIC(lir, allocateIC(ic));
3542 return;
3544 default:
3545 MOZ_CRASH("Unsupported jsop in MBinaryBoolCache");
3549 void CodeGenerator::visitUnaryCache(LUnaryCache* lir) {
3550 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
3551 TypedOrValueRegister input =
3552 TypedOrValueRegister(ToValue(lir, LUnaryCache::InputIndex));
3553 ValueOperand output = ToOutValue(lir);
3555 IonUnaryArithIC ic(liveRegs, input, output);
3556 addIC(lir, allocateIC(ic));
3559 void CodeGenerator::visitModuleMetadata(LModuleMetadata* lir) {
3560 pushArg(ImmPtr(lir->mir()->module()));
3562 using Fn = JSObject* (*)(JSContext*, HandleObject);
3563 callVM<Fn, js::GetOrCreateModuleMetaObject>(lir);
3566 void CodeGenerator::visitDynamicImport(LDynamicImport* lir) {
3567 pushArg(ToValue(lir, LDynamicImport::OptionsIndex));
3568 pushArg(ToValue(lir, LDynamicImport::SpecifierIndex));
3569 pushArg(ImmGCPtr(current->mir()->info().script()));
3571 using Fn = JSObject* (*)(JSContext*, HandleScript, HandleValue, HandleValue);
3572 callVM<Fn, js::StartDynamicModuleImport>(lir);
3575 void CodeGenerator::visitLambda(LLambda* lir) {
3576 Register envChain = ToRegister(lir->environmentChain());
3577 Register output = ToRegister(lir->output());
3578 Register tempReg = ToRegister(lir->temp0());
3580 JSFunction* fun = lir->mir()->templateFunction();
3582 using Fn = JSObject* (*)(JSContext*, HandleFunction, HandleObject);
3583 OutOfLineCode* ool = oolCallVM<Fn, js::Lambda>(
3584 lir, ArgList(ImmGCPtr(fun), envChain), StoreRegisterTo(output));
3586 TemplateObject templateObject(fun);
3587 masm.createGCObject(output, tempReg, templateObject, gc::Heap::Default,
3588 ool->entry());
3590 masm.storeValue(JSVAL_TYPE_OBJECT, envChain,
3591 Address(output, JSFunction::offsetOfEnvironment()));
3592 // No post barrier needed because output is guaranteed to be allocated in
3593 // the nursery.
3595 masm.bind(ool->rejoin());
3598 void CodeGenerator::visitFunctionWithProto(LFunctionWithProto* lir) {
3599 Register envChain = ToRegister(lir->envChain());
3600 Register prototype = ToRegister(lir->prototype());
3602 pushArg(prototype);
3603 pushArg(envChain);
3604 pushArg(ImmGCPtr(lir->mir()->function()));
3606 using Fn =
3607 JSObject* (*)(JSContext*, HandleFunction, HandleObject, HandleObject);
3608 callVM<Fn, js::FunWithProtoOperation>(lir);
3611 void CodeGenerator::visitSetFunName(LSetFunName* lir) {
3612 pushArg(Imm32(lir->mir()->prefixKind()));
3613 pushArg(ToValue(lir, LSetFunName::NameIndex));
3614 pushArg(ToRegister(lir->fun()));
3616 using Fn =
3617 bool (*)(JSContext*, HandleFunction, HandleValue, FunctionPrefixKind);
3618 callVM<Fn, js::SetFunctionName>(lir);
3621 void CodeGenerator::visitOsiPoint(LOsiPoint* lir) {
3622 // Note: markOsiPoint ensures enough space exists between the last
3623 // LOsiPoint and this one to patch adjacent call instructions.
3625 MOZ_ASSERT(masm.framePushed() == frameSize());
3627 uint32_t osiCallPointOffset = markOsiPoint(lir);
3629 LSafepoint* safepoint = lir->associatedSafepoint();
3630 MOZ_ASSERT(!safepoint->osiCallPointOffset());
3631 safepoint->setOsiCallPointOffset(osiCallPointOffset);
3633 #ifdef DEBUG
3634 // There should be no movegroups or other instructions between
3635 // an instruction and its OsiPoint. This is necessary because
3636 // we use the OsiPoint's snapshot from within VM calls.
3637 for (LInstructionReverseIterator iter(current->rbegin(lir));
3638 iter != current->rend(); iter++) {
3639 if (*iter == lir) {
3640 continue;
3642 MOZ_ASSERT(!iter->isMoveGroup());
3643 MOZ_ASSERT(iter->safepoint() == safepoint);
3644 break;
3646 #endif
3648 #ifdef CHECK_OSIPOINT_REGISTERS
3649 if (shouldVerifyOsiPointRegs(safepoint)) {
3650 verifyOsiPointRegs(safepoint);
3652 #endif
3655 void CodeGenerator::visitPhi(LPhi* lir) {
3656 MOZ_CRASH("Unexpected LPhi in CodeGenerator");
3659 void CodeGenerator::visitGoto(LGoto* lir) { jumpToBlock(lir->target()); }
3661 void CodeGenerator::visitTableSwitch(LTableSwitch* ins) {
3662 MTableSwitch* mir = ins->mir();
3663 Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
3664 const LAllocation* temp;
3666 if (mir->getOperand(0)->type() != MIRType::Int32) {
3667 temp = ins->tempInt()->output();
3669 // The input is a double, so try and convert it to an integer.
3670 // If it does not fit in an integer, take the default case.
3671 masm.convertDoubleToInt32(ToFloatRegister(ins->index()), ToRegister(temp),
3672 defaultcase, false);
3673 } else {
3674 temp = ins->index();
3677 emitTableSwitchDispatch(mir, ToRegister(temp),
3678 ToRegisterOrInvalid(ins->tempPointer()));
3681 void CodeGenerator::visitTableSwitchV(LTableSwitchV* ins) {
3682 MTableSwitch* mir = ins->mir();
3683 Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
3685 Register index = ToRegister(ins->tempInt());
3686 ValueOperand value = ToValue(ins, LTableSwitchV::InputValue);
3687 Register tag = masm.extractTag(value, index);
3688 masm.branchTestNumber(Assembler::NotEqual, tag, defaultcase);
3690 Label unboxInt, isInt;
3691 masm.branchTestInt32(Assembler::Equal, tag, &unboxInt);
3693 FloatRegister floatIndex = ToFloatRegister(ins->tempFloat());
3694 masm.unboxDouble(value, floatIndex);
3695 masm.convertDoubleToInt32(floatIndex, index, defaultcase, false);
3696 masm.jump(&isInt);
3699 masm.bind(&unboxInt);
3700 masm.unboxInt32(value, index);
3702 masm.bind(&isInt);
3704 emitTableSwitchDispatch(mir, index, ToRegisterOrInvalid(ins->tempPointer()));
3707 void CodeGenerator::visitParameter(LParameter* lir) {}
3709 void CodeGenerator::visitCallee(LCallee* lir) {
3710 Register callee = ToRegister(lir->output());
3711 Address ptr(FramePointer, JitFrameLayout::offsetOfCalleeToken());
3713 masm.loadFunctionFromCalleeToken(ptr, callee);
3716 void CodeGenerator::visitIsConstructing(LIsConstructing* lir) {
3717 Register output = ToRegister(lir->output());
3718 Address calleeToken(FramePointer, JitFrameLayout::offsetOfCalleeToken());
3719 masm.loadPtr(calleeToken, output);
3721 // We must be inside a function.
3722 MOZ_ASSERT(current->mir()->info().script()->function());
3724 // The low bit indicates whether this call is constructing, just clear the
3725 // other bits.
3726 static_assert(CalleeToken_Function == 0x0,
3727 "CalleeTokenTag value should match");
3728 static_assert(CalleeToken_FunctionConstructing == 0x1,
3729 "CalleeTokenTag value should match");
3730 masm.andPtr(Imm32(0x1), output);
3733 void CodeGenerator::visitReturn(LReturn* lir) {
3734 #if defined(JS_NUNBOX32)
3735 DebugOnly<LAllocation*> type = lir->getOperand(TYPE_INDEX);
3736 DebugOnly<LAllocation*> payload = lir->getOperand(PAYLOAD_INDEX);
3737 MOZ_ASSERT(ToRegister(type) == JSReturnReg_Type);
3738 MOZ_ASSERT(ToRegister(payload) == JSReturnReg_Data);
3739 #elif defined(JS_PUNBOX64)
3740 DebugOnly<LAllocation*> result = lir->getOperand(0);
3741 MOZ_ASSERT(ToRegister(result) == JSReturnReg);
3742 #endif
3743 // Don't emit a jump to the return label if this is the last block, as
3744 // it'll fall through to the epilogue.
3746 // This is -not- true however for a Generator-return, which may appear in the
3747 // middle of the last block, so we should always emit the jump there.
3748 if (current->mir() != *gen->graph().poBegin() || lir->isGenerator()) {
3749 masm.jump(&returnLabel_);
3753 void CodeGenerator::visitOsrEntry(LOsrEntry* lir) {
3754 Register temp = ToRegister(lir->temp());
3756 // Remember the OSR entry offset into the code buffer.
3757 masm.flushBuffer();
3758 setOsrEntryOffset(masm.size());
3760 // Allocate the full frame for this function
3761 // Note we have a new entry here. So we reset MacroAssembler::framePushed()
3762 // to 0, before reserving the stack.
3763 MOZ_ASSERT(masm.framePushed() == frameSize());
3764 masm.setFramePushed(0);
3766 // The Baseline code ensured both the frame pointer and stack pointer point to
3767 // the JitFrameLayout on the stack.
3769 // If profiling, save the current frame pointer to a per-thread global field.
3770 if (isProfilerInstrumentationEnabled()) {
3771 masm.profilerEnterFrame(FramePointer, temp);
3774 masm.reserveStack(frameSize());
3775 MOZ_ASSERT(masm.framePushed() == frameSize());
3777 // Ensure that the Ion frames is properly aligned.
3778 masm.assertStackAlignment(JitStackAlignment, 0);
3781 void CodeGenerator::visitOsrEnvironmentChain(LOsrEnvironmentChain* lir) {
3782 const LAllocation* frame = lir->getOperand(0);
3783 const LDefinition* object = lir->getDef(0);
3785 const ptrdiff_t frameOffset =
3786 BaselineFrame::reverseOffsetOfEnvironmentChain();
3788 masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
3791 void CodeGenerator::visitOsrArgumentsObject(LOsrArgumentsObject* lir) {
3792 const LAllocation* frame = lir->getOperand(0);
3793 const LDefinition* object = lir->getDef(0);
3795 const ptrdiff_t frameOffset = BaselineFrame::reverseOffsetOfArgsObj();
3797 masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
3800 void CodeGenerator::visitOsrValue(LOsrValue* value) {
3801 const LAllocation* frame = value->getOperand(0);
3802 const ValueOperand out = ToOutValue(value);
3804 const ptrdiff_t frameOffset = value->mir()->frameOffset();
3806 masm.loadValue(Address(ToRegister(frame), frameOffset), out);
3809 void CodeGenerator::visitOsrReturnValue(LOsrReturnValue* lir) {
3810 const LAllocation* frame = lir->getOperand(0);
3811 const ValueOperand out = ToOutValue(lir);
3813 Address flags =
3814 Address(ToRegister(frame), BaselineFrame::reverseOffsetOfFlags());
3815 Address retval =
3816 Address(ToRegister(frame), BaselineFrame::reverseOffsetOfReturnValue());
3818 masm.moveValue(UndefinedValue(), out);
3820 Label done;
3821 masm.branchTest32(Assembler::Zero, flags, Imm32(BaselineFrame::HAS_RVAL),
3822 &done);
3823 masm.loadValue(retval, out);
3824 masm.bind(&done);
3827 void CodeGenerator::visitStackArgT(LStackArgT* lir) {
3828 const LAllocation* arg = lir->arg();
3829 MIRType argType = lir->type();
3830 uint32_t argslot = lir->argslot();
3831 MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());
3833 Address dest = AddressOfPassedArg(argslot);
3835 if (arg->isFloatReg()) {
3836 masm.boxDouble(ToFloatRegister(arg), dest);
3837 } else if (arg->isRegister()) {
3838 masm.storeValue(ValueTypeFromMIRType(argType), ToRegister(arg), dest);
3839 } else {
3840 masm.storeValue(arg->toConstant()->toJSValue(), dest);
3844 void CodeGenerator::visitStackArgV(LStackArgV* lir) {
3845 ValueOperand val = ToValue(lir, 0);
3846 uint32_t argslot = lir->argslot();
3847 MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());
3849 masm.storeValue(val, AddressOfPassedArg(argslot));
3852 void CodeGenerator::visitMoveGroup(LMoveGroup* group) {
3853 if (!group->numMoves()) {
3854 return;
3857 MoveResolver& resolver = masm.moveResolver();
3859 for (size_t i = 0; i < group->numMoves(); i++) {
3860 const LMove& move = group->getMove(i);
3862 LAllocation from = move.from();
3863 LAllocation to = move.to();
3864 LDefinition::Type type = move.type();
3866 // No bogus moves.
3867 MOZ_ASSERT(from != to);
3868 MOZ_ASSERT(!from.isConstant());
3869 MoveOp::Type moveType;
3870 switch (type) {
3871 case LDefinition::OBJECT:
3872 case LDefinition::SLOTS:
3873 case LDefinition::WASM_ANYREF:
3874 #ifdef JS_NUNBOX32
3875 case LDefinition::TYPE:
3876 case LDefinition::PAYLOAD:
3877 #else
3878 case LDefinition::BOX:
3879 #endif
3880 case LDefinition::GENERAL:
3881 case LDefinition::STACKRESULTS:
3882 moveType = MoveOp::GENERAL;
3883 break;
3884 case LDefinition::INT32:
3885 moveType = MoveOp::INT32;
3886 break;
3887 case LDefinition::FLOAT32:
3888 moveType = MoveOp::FLOAT32;
3889 break;
3890 case LDefinition::DOUBLE:
3891 moveType = MoveOp::DOUBLE;
3892 break;
3893 case LDefinition::SIMD128:
3894 moveType = MoveOp::SIMD128;
3895 break;
3896 default:
3897 MOZ_CRASH("Unexpected move type");
3900 masm.propagateOOM(
3901 resolver.addMove(toMoveOperand(from), toMoveOperand(to), moveType));
3904 masm.propagateOOM(resolver.resolve());
3905 if (masm.oom()) {
3906 return;
3909 MoveEmitter emitter(masm);
3911 #ifdef JS_CODEGEN_X86
3912 if (group->maybeScratchRegister().isGeneralReg()) {
3913 emitter.setScratchRegister(
3914 group->maybeScratchRegister().toGeneralReg()->reg());
3915 } else {
3916 resolver.sortMemoryToMemoryMoves();
3918 #endif
3920 emitter.emit(resolver);
3921 emitter.finish();
3924 void CodeGenerator::visitInteger(LInteger* lir) {
3925 masm.move32(Imm32(lir->i32()), ToRegister(lir->output()));
3928 void CodeGenerator::visitInteger64(LInteger64* lir) {
3929 masm.move64(Imm64(lir->i64()), ToOutRegister64(lir));
3932 void CodeGenerator::visitPointer(LPointer* lir) {
3933 masm.movePtr(ImmGCPtr(lir->gcptr()), ToRegister(lir->output()));
3936 void CodeGenerator::visitNurseryObject(LNurseryObject* lir) {
3937 Register output = ToRegister(lir->output());
3938 uint32_t nurseryIndex = lir->mir()->nurseryIndex();
3940 // Load a pointer to the entry in IonScript's nursery objects list.
3941 CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), output);
3942 masm.propagateOOM(ionNurseryObjectLabels_.emplaceBack(label, nurseryIndex));
3944 // Load the JSObject*.
3945 masm.loadPtr(Address(output, 0), output);
3948 void CodeGenerator::visitKeepAliveObject(LKeepAliveObject* lir) {
3949 // No-op.
3952 void CodeGenerator::visitDebugEnterGCUnsafeRegion(
3953 LDebugEnterGCUnsafeRegion* lir) {
3954 Register temp = ToRegister(lir->temp0());
3956 masm.loadJSContext(temp);
3958 Address inUnsafeRegion(temp, JSContext::offsetOfInUnsafeRegion());
3959 masm.add32(Imm32(1), inUnsafeRegion);
3961 Label ok;
3962 masm.branch32(Assembler::GreaterThan, inUnsafeRegion, Imm32(0), &ok);
3963 masm.assumeUnreachable("unbalanced enter/leave GC unsafe region");
3964 masm.bind(&ok);
3967 void CodeGenerator::visitDebugLeaveGCUnsafeRegion(
3968 LDebugLeaveGCUnsafeRegion* lir) {
3969 Register temp = ToRegister(lir->temp0());
3971 masm.loadJSContext(temp);
3973 Address inUnsafeRegion(temp, JSContext::offsetOfInUnsafeRegion());
3974 masm.add32(Imm32(-1), inUnsafeRegion);
3976 Label ok;
3977 masm.branch32(Assembler::GreaterThanOrEqual, inUnsafeRegion, Imm32(0), &ok);
3978 masm.assumeUnreachable("unbalanced enter/leave GC unsafe region");
3979 masm.bind(&ok);
3982 void CodeGenerator::visitSlots(LSlots* lir) {
3983 Address slots(ToRegister(lir->object()), NativeObject::offsetOfSlots());
3984 masm.loadPtr(slots, ToRegister(lir->output()));
3987 void CodeGenerator::visitLoadDynamicSlotV(LLoadDynamicSlotV* lir) {
3988 ValueOperand dest = ToOutValue(lir);
3989 Register base = ToRegister(lir->input());
3990 int32_t offset = lir->mir()->slot() * sizeof(js::Value);
3992 masm.loadValue(Address(base, offset), dest);
3995 static ConstantOrRegister ToConstantOrRegister(const LAllocation* value,
3996 MIRType valueType) {
3997 if (value->isConstant()) {
3998 return ConstantOrRegister(value->toConstant()->toJSValue());
4000 return TypedOrValueRegister(valueType, ToAnyRegister(value));
4003 void CodeGenerator::visitStoreDynamicSlotT(LStoreDynamicSlotT* lir) {
4004 Register base = ToRegister(lir->slots());
4005 int32_t offset = lir->mir()->slot() * sizeof(js::Value);
4006 Address dest(base, offset);
4008 if (lir->mir()->needsBarrier()) {
4009 emitPreBarrier(dest);
4012 MIRType valueType = lir->mir()->value()->type();
4013 ConstantOrRegister value = ToConstantOrRegister(lir->value(), valueType);
4014 masm.storeUnboxedValue(value, valueType, dest);
4017 void CodeGenerator::visitStoreDynamicSlotV(LStoreDynamicSlotV* lir) {
4018 Register base = ToRegister(lir->slots());
4019 int32_t offset = lir->mir()->slot() * sizeof(Value);
4021 const ValueOperand value = ToValue(lir, LStoreDynamicSlotV::ValueIndex);
4023 if (lir->mir()->needsBarrier()) {
4024 emitPreBarrier(Address(base, offset));
4027 masm.storeValue(value, Address(base, offset));
4030 void CodeGenerator::visitElements(LElements* lir) {
4031 Address elements(ToRegister(lir->object()), NativeObject::offsetOfElements());
4032 masm.loadPtr(elements, ToRegister(lir->output()));
4035 void CodeGenerator::visitFunctionEnvironment(LFunctionEnvironment* lir) {
4036 Address environment(ToRegister(lir->function()),
4037 JSFunction::offsetOfEnvironment());
4038 masm.unboxObject(environment, ToRegister(lir->output()));
4041 void CodeGenerator::visitHomeObject(LHomeObject* lir) {
4042 Register func = ToRegister(lir->function());
4043 Address homeObject(func, FunctionExtended::offsetOfMethodHomeObjectSlot());
4045 masm.assertFunctionIsExtended(func);
4046 #ifdef DEBUG
4047 Label isObject;
4048 masm.branchTestObject(Assembler::Equal, homeObject, &isObject);
4049 masm.assumeUnreachable("[[HomeObject]] must be Object");
4050 masm.bind(&isObject);
4051 #endif
4053 masm.unboxObject(homeObject, ToRegister(lir->output()));
4056 void CodeGenerator::visitHomeObjectSuperBase(LHomeObjectSuperBase* lir) {
4057 Register homeObject = ToRegister(lir->homeObject());
4058 ValueOperand output = ToOutValue(lir);
4059 Register temp = output.scratchReg();
4061 masm.loadObjProto(homeObject, temp);
4063 #ifdef DEBUG
4064 // We won't encounter a lazy proto, because the prototype is guaranteed to
4065 // either be a JSFunction or a PlainObject, and only proxy objects can have a
4066 // lazy proto.
4067 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
4069 Label proxyCheckDone;
4070 masm.branchPtr(Assembler::NotEqual, temp, ImmWord(1), &proxyCheckDone);
4071 masm.assumeUnreachable("Unexpected lazy proto in JSOp::SuperBase");
4072 masm.bind(&proxyCheckDone);
4073 #endif
4075 Label nullProto, done;
4076 masm.branchPtr(Assembler::Equal, temp, ImmWord(0), &nullProto);
4078 // Box prototype and return
4079 masm.tagValue(JSVAL_TYPE_OBJECT, temp, output);
4080 masm.jump(&done);
4082 masm.bind(&nullProto);
4083 masm.moveValue(NullValue(), output);
4085 masm.bind(&done);
4088 template <class T>
4089 static T* ToConstantObject(MDefinition* def) {
4090 MOZ_ASSERT(def->isConstant());
4091 return &def->toConstant()->toObject().as<T>();
4094 void CodeGenerator::visitNewLexicalEnvironmentObject(
4095 LNewLexicalEnvironmentObject* lir) {
4096 Register output = ToRegister(lir->output());
4097 Register temp = ToRegister(lir->temp0());
4099 auto* templateObj = ToConstantObject<BlockLexicalEnvironmentObject>(
4100 lir->mir()->templateObj());
4101 auto* scope = &templateObj->scope();
4102 gc::Heap initialHeap = gc::Heap::Default;
4104 using Fn =
4105 BlockLexicalEnvironmentObject* (*)(JSContext*, Handle<LexicalScope*>);
4106 auto* ool =
4107 oolCallVM<Fn, BlockLexicalEnvironmentObject::createWithoutEnclosing>(
4108 lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
4110 TemplateObject templateObject(templateObj);
4111 masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
4113 masm.bind(ool->rejoin());
4116 void CodeGenerator::visitNewClassBodyEnvironmentObject(
4117 LNewClassBodyEnvironmentObject* lir) {
4118 Register output = ToRegister(lir->output());
4119 Register temp = ToRegister(lir->temp0());
4121 auto* templateObj = ToConstantObject<ClassBodyLexicalEnvironmentObject>(
4122 lir->mir()->templateObj());
4123 auto* scope = &templateObj->scope();
4124 gc::Heap initialHeap = gc::Heap::Default;
4126 using Fn = ClassBodyLexicalEnvironmentObject* (*)(JSContext*,
4127 Handle<ClassBodyScope*>);
4128 auto* ool =
4129 oolCallVM<Fn, ClassBodyLexicalEnvironmentObject::createWithoutEnclosing>(
4130 lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
4132 TemplateObject templateObject(templateObj);
4133 masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
4135 masm.bind(ool->rejoin());
4138 void CodeGenerator::visitNewVarEnvironmentObject(
4139 LNewVarEnvironmentObject* lir) {
4140 Register output = ToRegister(lir->output());
4141 Register temp = ToRegister(lir->temp0());
4143 auto* templateObj =
4144 ToConstantObject<VarEnvironmentObject>(lir->mir()->templateObj());
4145 auto* scope = &templateObj->scope().as<VarScope>();
4146 gc::Heap initialHeap = gc::Heap::Default;
4148 using Fn = VarEnvironmentObject* (*)(JSContext*, Handle<VarScope*>);
4149 auto* ool = oolCallVM<Fn, VarEnvironmentObject::createWithoutEnclosing>(
4150 lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
4152 TemplateObject templateObject(templateObj);
4153 masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
4155 masm.bind(ool->rejoin());
4158 void CodeGenerator::visitGuardShape(LGuardShape* guard) {
4159 Register obj = ToRegister(guard->input());
4160 Register temp = ToTempRegisterOrInvalid(guard->temp0());
4161 Label bail;
4162 masm.branchTestObjShape(Assembler::NotEqual, obj, guard->mir()->shape(), temp,
4163 obj, &bail);
4164 bailoutFrom(&bail, guard->snapshot());
4167 void CodeGenerator::visitGuardFuse(LGuardFuse* guard) {
4168 Register temp = ToRegister(guard->temp0());
4169 Label bail;
4171 // Bake specific fuse address for Ion code, because we won't share this code
4172 // across realms.
4173 GuardFuse* fuse =
4174 mirGen().realm->realmFuses().getFuseByIndex(guard->mir()->fuseIndex());
4175 masm.loadPtr(AbsoluteAddress(fuse->fuseRef()), temp);
4176 masm.branchPtr(Assembler::NotEqual, temp, ImmPtr(nullptr), &bail);
4178 bailoutFrom(&bail, guard->snapshot());
4181 void CodeGenerator::visitGuardMultipleShapes(LGuardMultipleShapes* guard) {
4182 Register obj = ToRegister(guard->object());
4183 Register shapeList = ToRegister(guard->shapeList());
4184 Register temp = ToRegister(guard->temp0());
4185 Register temp2 = ToRegister(guard->temp1());
4186 Register temp3 = ToRegister(guard->temp2());
4187 Register spectre = ToTempRegisterOrInvalid(guard->temp3());
4189 Label bail;
4190 masm.loadPtr(Address(shapeList, NativeObject::offsetOfElements()), temp);
4191 masm.branchTestObjShapeList(Assembler::NotEqual, obj, temp, temp2, temp3,
4192 spectre, &bail);
4193 bailoutFrom(&bail, guard->snapshot());
4196 void CodeGenerator::visitGuardProto(LGuardProto* guard) {
4197 Register obj = ToRegister(guard->object());
4198 Register expected = ToRegister(guard->expected());
4199 Register temp = ToRegister(guard->temp0());
4201 masm.loadObjProto(obj, temp);
4203 Label bail;
4204 masm.branchPtr(Assembler::NotEqual, temp, expected, &bail);
4205 bailoutFrom(&bail, guard->snapshot());
4208 void CodeGenerator::visitGuardNullProto(LGuardNullProto* guard) {
4209 Register obj = ToRegister(guard->input());
4210 Register temp = ToRegister(guard->temp0());
4212 masm.loadObjProto(obj, temp);
4214 Label bail;
4215 masm.branchTestPtr(Assembler::NonZero, temp, temp, &bail);
4216 bailoutFrom(&bail, guard->snapshot());
4219 void CodeGenerator::visitGuardIsNativeObject(LGuardIsNativeObject* guard) {
4220 Register obj = ToRegister(guard->input());
4221 Register temp = ToRegister(guard->temp0());
4223 Label bail;
4224 masm.branchIfNonNativeObj(obj, temp, &bail);
4225 bailoutFrom(&bail, guard->snapshot());
4228 void CodeGenerator::visitGuardGlobalGeneration(LGuardGlobalGeneration* guard) {
4229 Register temp = ToRegister(guard->temp0());
4230 Label bail;
4232 masm.load32(AbsoluteAddress(guard->mir()->generationAddr()), temp);
4233 masm.branch32(Assembler::NotEqual, temp, Imm32(guard->mir()->expected()),
4234 &bail);
4235 bailoutFrom(&bail, guard->snapshot());
4238 void CodeGenerator::visitGuardIsProxy(LGuardIsProxy* guard) {
4239 Register obj = ToRegister(guard->input());
4240 Register temp = ToRegister(guard->temp0());
4242 Label bail;
4243 masm.branchTestObjectIsProxy(false, obj, temp, &bail);
4244 bailoutFrom(&bail, guard->snapshot());
4247 void CodeGenerator::visitGuardIsNotProxy(LGuardIsNotProxy* guard) {
4248 Register obj = ToRegister(guard->input());
4249 Register temp = ToRegister(guard->temp0());
4251 Label bail;
4252 masm.branchTestObjectIsProxy(true, obj, temp, &bail);
4253 bailoutFrom(&bail, guard->snapshot());
4256 void CodeGenerator::visitGuardIsNotDOMProxy(LGuardIsNotDOMProxy* guard) {
4257 Register proxy = ToRegister(guard->proxy());
4258 Register temp = ToRegister(guard->temp0());
4260 Label bail;
4261 masm.branchTestProxyHandlerFamily(Assembler::Equal, proxy, temp,
4262 GetDOMProxyHandlerFamily(), &bail);
4263 bailoutFrom(&bail, guard->snapshot());
4266 void CodeGenerator::visitProxyGet(LProxyGet* lir) {
4267 Register proxy = ToRegister(lir->proxy());
4268 Register temp = ToRegister(lir->temp0());
4270 pushArg(lir->mir()->id(), temp);
4271 pushArg(proxy);
4273 using Fn = bool (*)(JSContext*, HandleObject, HandleId, MutableHandleValue);
4274 callVM<Fn, ProxyGetProperty>(lir);
4277 void CodeGenerator::visitProxyGetByValue(LProxyGetByValue* lir) {
4278 Register proxy = ToRegister(lir->proxy());
4279 ValueOperand idVal = ToValue(lir, LProxyGetByValue::IdIndex);
4281 pushArg(idVal);
4282 pushArg(proxy);
4284 using Fn =
4285 bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
4286 callVM<Fn, ProxyGetPropertyByValue>(lir);
4289 void CodeGenerator::visitProxyHasProp(LProxyHasProp* lir) {
4290 Register proxy = ToRegister(lir->proxy());
4291 ValueOperand idVal = ToValue(lir, LProxyHasProp::IdIndex);
4293 pushArg(idVal);
4294 pushArg(proxy);
4296 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
4297 if (lir->mir()->hasOwn()) {
4298 callVM<Fn, ProxyHasOwn>(lir);
4299 } else {
4300 callVM<Fn, ProxyHas>(lir);
4304 void CodeGenerator::visitProxySet(LProxySet* lir) {
4305 Register proxy = ToRegister(lir->proxy());
4306 ValueOperand rhs = ToValue(lir, LProxySet::RhsIndex);
4307 Register temp = ToRegister(lir->temp0());
4309 pushArg(Imm32(lir->mir()->strict()));
4310 pushArg(rhs);
4311 pushArg(lir->mir()->id(), temp);
4312 pushArg(proxy);
4314 using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
4315 callVM<Fn, ProxySetProperty>(lir);
4318 void CodeGenerator::visitProxySetByValue(LProxySetByValue* lir) {
4319 Register proxy = ToRegister(lir->proxy());
4320 ValueOperand idVal = ToValue(lir, LProxySetByValue::IdIndex);
4321 ValueOperand rhs = ToValue(lir, LProxySetByValue::RhsIndex);
4323 pushArg(Imm32(lir->mir()->strict()));
4324 pushArg(rhs);
4325 pushArg(idVal);
4326 pushArg(proxy);
4328 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
4329 callVM<Fn, ProxySetPropertyByValue>(lir);
4332 void CodeGenerator::visitCallSetArrayLength(LCallSetArrayLength* lir) {
4333 Register obj = ToRegister(lir->obj());
4334 ValueOperand rhs = ToValue(lir, LCallSetArrayLength::RhsIndex);
4336 pushArg(Imm32(lir->mir()->strict()));
4337 pushArg(rhs);
4338 pushArg(obj);
4340 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool);
4341 callVM<Fn, jit::SetArrayLength>(lir);
4344 void CodeGenerator::visitMegamorphicLoadSlot(LMegamorphicLoadSlot* lir) {
4345 Register obj = ToRegister(lir->object());
4346 Register temp0 = ToRegister(lir->temp0());
4347 Register temp1 = ToRegister(lir->temp1());
4348 Register temp2 = ToRegister(lir->temp2());
4349 Register temp3 = ToRegister(lir->temp3());
4350 ValueOperand output = ToOutValue(lir);
4352 Label bail, cacheHit;
4353 masm.emitMegamorphicCacheLookup(lir->mir()->name(), obj, temp0, temp1, temp2,
4354 output, &cacheHit);
4356 masm.branchIfNonNativeObj(obj, temp0, &bail);
4358 masm.Push(UndefinedValue());
4359 masm.moveStackPtrTo(temp3);
4361 using Fn = bool (*)(JSContext* cx, JSObject* obj, PropertyKey id,
4362 MegamorphicCache::Entry* cacheEntry, Value* vp);
4363 masm.setupAlignedABICall();
4364 masm.loadJSContext(temp0);
4365 masm.passABIArg(temp0);
4366 masm.passABIArg(obj);
4367 masm.movePropertyKey(lir->mir()->name(), temp1);
4368 masm.passABIArg(temp1);
4369 masm.passABIArg(temp2);
4370 masm.passABIArg(temp3);
4372 masm.callWithABI<Fn, GetNativeDataPropertyPure>();
4374 MOZ_ASSERT(!output.aliases(ReturnReg));
4375 masm.Pop(output);
4377 masm.branchIfFalseBool(ReturnReg, &bail);
4379 masm.bind(&cacheHit);
4380 bailoutFrom(&bail, lir->snapshot());
4383 void CodeGenerator::visitMegamorphicLoadSlotByValue(
4384 LMegamorphicLoadSlotByValue* lir) {
4385 Register obj = ToRegister(lir->object());
4386 ValueOperand idVal = ToValue(lir, LMegamorphicLoadSlotByValue::IdIndex);
4387 Register temp0 = ToRegister(lir->temp0());
4388 Register temp1 = ToRegister(lir->temp1());
4389 Register temp2 = ToRegister(lir->temp2());
4390 ValueOperand output = ToOutValue(lir);
4392 Label bail, cacheHit;
4393 masm.emitMegamorphicCacheLookupByValue(idVal, obj, temp0, temp1, temp2,
4394 output, &cacheHit);
4396 masm.branchIfNonNativeObj(obj, temp0, &bail);
4398 // idVal will be in vp[0], result will be stored in vp[1].
4399 masm.reserveStack(sizeof(Value));
4400 masm.Push(idVal);
4401 masm.moveStackPtrTo(temp0);
4403 using Fn = bool (*)(JSContext* cx, JSObject* obj,
4404 MegamorphicCache::Entry* cacheEntry, Value* vp);
4405 masm.setupAlignedABICall();
4406 masm.loadJSContext(temp1);
4407 masm.passABIArg(temp1);
4408 masm.passABIArg(obj);
4409 masm.passABIArg(temp2);
4410 masm.passABIArg(temp0);
4411 masm.callWithABI<Fn, GetNativeDataPropertyByValuePure>();
4413 MOZ_ASSERT(!idVal.aliases(temp0));
4414 masm.storeCallPointerResult(temp0);
4415 masm.Pop(idVal);
4417 uint32_t framePushed = masm.framePushed();
4418 Label ok;
4419 masm.branchIfTrueBool(temp0, &ok);
4420 masm.freeStack(sizeof(Value)); // Discard result Value.
4421 masm.jump(&bail);
4423 masm.bind(&ok);
4424 masm.setFramePushed(framePushed);
4425 masm.Pop(output);
4427 masm.bind(&cacheHit);
4428 bailoutFrom(&bail, lir->snapshot());
4431 void CodeGenerator::visitMegamorphicStoreSlot(LMegamorphicStoreSlot* lir) {
4432 Register obj = ToRegister(lir->object());
4433 ValueOperand value = ToValue(lir, LMegamorphicStoreSlot::RhsIndex);
4435 Register temp0 = ToRegister(lir->temp0());
4436 #ifndef JS_CODEGEN_X86
4437 Register temp1 = ToRegister(lir->temp1());
4438 Register temp2 = ToRegister(lir->temp2());
4439 #endif
4441 Label cacheHit, done;
4442 #ifdef JS_CODEGEN_X86
4443 masm.emitMegamorphicCachedSetSlot(
4444 lir->mir()->name(), obj, temp0, value, &cacheHit,
4445 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
4446 EmitPreBarrier(masm, addr, mirType);
4448 #else
4449 masm.emitMegamorphicCachedSetSlot(
4450 lir->mir()->name(), obj, temp0, temp1, temp2, value, &cacheHit,
4451 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
4452 EmitPreBarrier(masm, addr, mirType);
4454 #endif
4456 pushArg(Imm32(lir->mir()->strict()));
4457 pushArg(value);
4458 pushArg(lir->mir()->name(), temp0);
4459 pushArg(obj);
4461 using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
4462 callVM<Fn, SetPropertyMegamorphic<true>>(lir);
4464 masm.jump(&done);
4465 masm.bind(&cacheHit);
4467 masm.branchPtrInNurseryChunk(Assembler::Equal, obj, temp0, &done);
4468 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp0, &done);
4470 saveVolatile(temp0);
4471 emitPostWriteBarrier(obj);
4472 restoreVolatile(temp0);
4474 masm.bind(&done);
4477 void CodeGenerator::visitMegamorphicHasProp(LMegamorphicHasProp* lir) {
4478 Register obj = ToRegister(lir->object());
4479 ValueOperand idVal = ToValue(lir, LMegamorphicHasProp::IdIndex);
4480 Register temp0 = ToRegister(lir->temp0());
4481 Register temp1 = ToRegister(lir->temp1());
4482 Register temp2 = ToRegister(lir->temp2());
4483 Register output = ToRegister(lir->output());
4485 Label bail, cacheHit;
4486 masm.emitMegamorphicCacheLookupExists(idVal, obj, temp0, temp1, temp2, output,
4487 &cacheHit, lir->mir()->hasOwn());
4489 masm.branchIfNonNativeObj(obj, temp0, &bail);
4491 // idVal will be in vp[0], result will be stored in vp[1].
4492 masm.reserveStack(sizeof(Value));
4493 masm.Push(idVal);
4494 masm.moveStackPtrTo(temp0);
4496 using Fn = bool (*)(JSContext* cx, JSObject* obj,
4497 MegamorphicCache::Entry* cacheEntry, Value* vp);
4498 masm.setupAlignedABICall();
4499 masm.loadJSContext(temp1);
4500 masm.passABIArg(temp1);
4501 masm.passABIArg(obj);
4502 masm.passABIArg(temp2);
4503 masm.passABIArg(temp0);
4504 if (lir->mir()->hasOwn()) {
4505 masm.callWithABI<Fn, HasNativeDataPropertyPure<true>>();
4506 } else {
4507 masm.callWithABI<Fn, HasNativeDataPropertyPure<false>>();
4510 MOZ_ASSERT(!idVal.aliases(temp0));
4511 masm.storeCallPointerResult(temp0);
4512 masm.Pop(idVal);
4514 uint32_t framePushed = masm.framePushed();
4515 Label ok;
4516 masm.branchIfTrueBool(temp0, &ok);
4517 masm.freeStack(sizeof(Value)); // Discard result Value.
4518 masm.jump(&bail);
4520 masm.bind(&ok);
4521 masm.setFramePushed(framePushed);
4522 masm.unboxBoolean(Address(masm.getStackPointer(), 0), output);
4523 masm.freeStack(sizeof(Value));
4524 masm.bind(&cacheHit);
4526 bailoutFrom(&bail, lir->snapshot());
4529 void CodeGenerator::visitGuardIsNotArrayBufferMaybeShared(
4530 LGuardIsNotArrayBufferMaybeShared* guard) {
4531 Register obj = ToRegister(guard->input());
4532 Register temp = ToRegister(guard->temp0());
4534 Label bail;
4535 masm.loadObjClassUnsafe(obj, temp);
4536 masm.branchPtr(Assembler::Equal, temp, ImmPtr(&ArrayBufferObject::class_),
4537 &bail);
4538 masm.branchPtr(Assembler::Equal, temp,
4539 ImmPtr(&SharedArrayBufferObject::class_), &bail);
4540 bailoutFrom(&bail, guard->snapshot());
4543 void CodeGenerator::visitGuardIsTypedArray(LGuardIsTypedArray* guard) {
4544 Register obj = ToRegister(guard->input());
4545 Register temp = ToRegister(guard->temp0());
4547 Label bail;
4548 masm.loadObjClassUnsafe(obj, temp);
4549 masm.branchIfClassIsNotTypedArray(temp, &bail);
4550 bailoutFrom(&bail, guard->snapshot());
4553 void CodeGenerator::visitGuardHasProxyHandler(LGuardHasProxyHandler* guard) {
4554 Register obj = ToRegister(guard->input());
4556 Label bail;
4558 Address handlerAddr(obj, ProxyObject::offsetOfHandler());
4559 masm.branchPtr(Assembler::NotEqual, handlerAddr,
4560 ImmPtr(guard->mir()->handler()), &bail);
4562 bailoutFrom(&bail, guard->snapshot());
4565 void CodeGenerator::visitGuardObjectIdentity(LGuardObjectIdentity* guard) {
4566 Register input = ToRegister(guard->input());
4567 Register expected = ToRegister(guard->expected());
4569 Assembler::Condition cond =
4570 guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
4571 bailoutCmpPtr(cond, input, expected, guard->snapshot());
4574 void CodeGenerator::visitGuardSpecificFunction(LGuardSpecificFunction* guard) {
4575 Register input = ToRegister(guard->input());
4576 Register expected = ToRegister(guard->expected());
4578 bailoutCmpPtr(Assembler::NotEqual, input, expected, guard->snapshot());
4581 void CodeGenerator::visitGuardSpecificAtom(LGuardSpecificAtom* guard) {
4582 Register str = ToRegister(guard->str());
4583 Register scratch = ToRegister(guard->temp0());
4585 LiveRegisterSet volatileRegs = liveVolatileRegs(guard);
4586 volatileRegs.takeUnchecked(scratch);
4588 Label bail;
4589 masm.guardSpecificAtom(str, guard->mir()->atom(), scratch, volatileRegs,
4590 &bail);
4591 bailoutFrom(&bail, guard->snapshot());
4594 void CodeGenerator::visitGuardSpecificSymbol(LGuardSpecificSymbol* guard) {
4595 Register symbol = ToRegister(guard->symbol());
4597 bailoutCmpPtr(Assembler::NotEqual, symbol, ImmGCPtr(guard->mir()->expected()),
4598 guard->snapshot());
4601 void CodeGenerator::visitGuardSpecificInt32(LGuardSpecificInt32* guard) {
4602 Register num = ToRegister(guard->num());
4604 bailoutCmp32(Assembler::NotEqual, num, Imm32(guard->mir()->expected()),
4605 guard->snapshot());
4608 void CodeGenerator::visitGuardStringToIndex(LGuardStringToIndex* lir) {
4609 Register str = ToRegister(lir->string());
4610 Register output = ToRegister(lir->output());
4612 Label vmCall, done;
4613 masm.loadStringIndexValue(str, output, &vmCall);
4614 masm.jump(&done);
4617 masm.bind(&vmCall);
4619 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
4620 volatileRegs.takeUnchecked(output);
4621 masm.PushRegsInMask(volatileRegs);
4623 using Fn = int32_t (*)(JSString* str);
4624 masm.setupAlignedABICall();
4625 masm.passABIArg(str);
4626 masm.callWithABI<Fn, GetIndexFromString>();
4627 masm.storeCallInt32Result(output);
4629 masm.PopRegsInMask(volatileRegs);
4631 // GetIndexFromString returns a negative value on failure.
4632 bailoutTest32(Assembler::Signed, output, output, lir->snapshot());
4635 masm.bind(&done);
4638 void CodeGenerator::visitGuardStringToInt32(LGuardStringToInt32* lir) {
4639 Register str = ToRegister(lir->string());
4640 Register output = ToRegister(lir->output());
4641 Register temp = ToRegister(lir->temp0());
4643 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
4645 Label bail;
4646 masm.guardStringToInt32(str, output, temp, volatileRegs, &bail);
4647 bailoutFrom(&bail, lir->snapshot());
4650 void CodeGenerator::visitGuardStringToDouble(LGuardStringToDouble* lir) {
4651 Register str = ToRegister(lir->string());
4652 FloatRegister output = ToFloatRegister(lir->output());
4653 Register temp0 = ToRegister(lir->temp0());
4654 Register temp1 = ToRegister(lir->temp1());
4656 Label vmCall, done;
4657 // Use indexed value as fast path if possible.
4658 masm.loadStringIndexValue(str, temp0, &vmCall);
4659 masm.convertInt32ToDouble(temp0, output);
4660 masm.jump(&done);
4662 masm.bind(&vmCall);
4664 // Reserve stack for holding the result value of the call.
4665 masm.reserveStack(sizeof(double));
4666 masm.moveStackPtrTo(temp0);
4668 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
4669 volatileRegs.takeUnchecked(temp0);
4670 volatileRegs.takeUnchecked(temp1);
4671 masm.PushRegsInMask(volatileRegs);
4673 using Fn = bool (*)(JSContext* cx, JSString* str, double* result);
4674 masm.setupAlignedABICall();
4675 masm.loadJSContext(temp1);
4676 masm.passABIArg(temp1);
4677 masm.passABIArg(str);
4678 masm.passABIArg(temp0);
4679 masm.callWithABI<Fn, StringToNumberPure>();
4680 masm.storeCallPointerResult(temp0);
4682 masm.PopRegsInMask(volatileRegs);
4684 Label ok;
4685 masm.branchIfTrueBool(temp0, &ok);
4687 // OOM path, recovered by StringToNumberPure.
4689 // Use addToStackPtr instead of freeStack as freeStack tracks stack height
4690 // flow-insensitively, and using it here would confuse the stack height
4691 // tracking.
4692 masm.addToStackPtr(Imm32(sizeof(double)));
4693 bailout(lir->snapshot());
4695 masm.bind(&ok);
4696 masm.Pop(output);
4698 masm.bind(&done);
4701 void CodeGenerator::visitGuardNoDenseElements(LGuardNoDenseElements* guard) {
4702 Register obj = ToRegister(guard->input());
4703 Register temp = ToRegister(guard->temp0());
4705 // Load obj->elements.
4706 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), temp);
4708 // Make sure there are no dense elements.
4709 Address initLength(temp, ObjectElements::offsetOfInitializedLength());
4710 bailoutCmp32(Assembler::NotEqual, initLength, Imm32(0), guard->snapshot());
4713 void CodeGenerator::visitBooleanToInt64(LBooleanToInt64* lir) {
4714 Register input = ToRegister(lir->input());
4715 Register64 output = ToOutRegister64(lir);
4717 masm.move32To64ZeroExtend(input, output);
4720 void CodeGenerator::emitStringToInt64(LInstruction* lir, Register input,
4721 Register64 output) {
4722 Register temp = output.scratchReg();
4724 saveLive(lir);
4726 masm.reserveStack(sizeof(uint64_t));
4727 masm.moveStackPtrTo(temp);
4728 pushArg(temp);
4729 pushArg(input);
4731 using Fn = bool (*)(JSContext*, HandleString, uint64_t*);
4732 callVM<Fn, DoStringToInt64>(lir);
4734 masm.load64(Address(masm.getStackPointer(), 0), output);
4735 masm.freeStack(sizeof(uint64_t));
4737 restoreLiveIgnore(lir, StoreValueTo(output).clobbered());
4740 void CodeGenerator::visitStringToInt64(LStringToInt64* lir) {
4741 Register input = ToRegister(lir->input());
4742 Register64 output = ToOutRegister64(lir);
4744 emitStringToInt64(lir, input, output);
4747 void CodeGenerator::visitValueToInt64(LValueToInt64* lir) {
4748 ValueOperand input = ToValue(lir, LValueToInt64::InputIndex);
4749 Register temp = ToRegister(lir->temp0());
4750 Register64 output = ToOutRegister64(lir);
4752 int checks = 3;
4754 Label fail, done;
4755 // Jump to fail if this is the last check and we fail it,
4756 // otherwise to the next test.
4757 auto emitTestAndUnbox = [&](auto testAndUnbox) {
4758 MOZ_ASSERT(checks > 0);
4760 checks--;
4761 Label notType;
4762 Label* target = checks ? &notType : &fail;
4764 testAndUnbox(target);
4766 if (checks) {
4767 masm.jump(&done);
4768 masm.bind(&notType);
4772 Register tag = masm.extractTag(input, temp);
4774 // BigInt.
4775 emitTestAndUnbox([&](Label* target) {
4776 masm.branchTestBigInt(Assembler::NotEqual, tag, target);
4777 masm.unboxBigInt(input, temp);
4778 masm.loadBigInt64(temp, output);
4781 // Boolean
4782 emitTestAndUnbox([&](Label* target) {
4783 masm.branchTestBoolean(Assembler::NotEqual, tag, target);
4784 masm.unboxBoolean(input, temp);
4785 masm.move32To64ZeroExtend(temp, output);
4788 // String
4789 emitTestAndUnbox([&](Label* target) {
4790 masm.branchTestString(Assembler::NotEqual, tag, target);
4791 masm.unboxString(input, temp);
4792 emitStringToInt64(lir, temp, output);
4795 MOZ_ASSERT(checks == 0);
4797 bailoutFrom(&fail, lir->snapshot());
4798 masm.bind(&done);
4801 void CodeGenerator::visitTruncateBigIntToInt64(LTruncateBigIntToInt64* lir) {
4802 Register operand = ToRegister(lir->input());
4803 Register64 output = ToOutRegister64(lir);
4805 masm.loadBigInt64(operand, output);
4808 OutOfLineCode* CodeGenerator::createBigIntOutOfLine(LInstruction* lir,
4809 Scalar::Type type,
4810 Register64 input,
4811 Register output) {
4812 #if JS_BITS_PER_WORD == 32
4813 using Fn = BigInt* (*)(JSContext*, uint32_t, uint32_t);
4814 auto args = ArgList(input.low, input.high);
4815 #else
4816 using Fn = BigInt* (*)(JSContext*, uint64_t);
4817 auto args = ArgList(input);
4818 #endif
4820 if (type == Scalar::BigInt64) {
4821 return oolCallVM<Fn, jit::CreateBigIntFromInt64>(lir, args,
4822 StoreRegisterTo(output));
4824 MOZ_ASSERT(type == Scalar::BigUint64);
4825 return oolCallVM<Fn, jit::CreateBigIntFromUint64>(lir, args,
4826 StoreRegisterTo(output));
4829 void CodeGenerator::emitCreateBigInt(LInstruction* lir, Scalar::Type type,
4830 Register64 input, Register output,
4831 Register maybeTemp) {
4832 OutOfLineCode* ool = createBigIntOutOfLine(lir, type, input, output);
4834 if (maybeTemp != InvalidReg) {
4835 masm.newGCBigInt(output, maybeTemp, initialBigIntHeap(), ool->entry());
4836 } else {
4837 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
4838 regs.take(input);
4839 regs.take(output);
4841 Register temp = regs.takeAny();
4843 masm.push(temp);
4845 Label fail, ok;
4846 masm.newGCBigInt(output, temp, initialBigIntHeap(), &fail);
4847 masm.pop(temp);
4848 masm.jump(&ok);
4849 masm.bind(&fail);
4850 masm.pop(temp);
4851 masm.jump(ool->entry());
4852 masm.bind(&ok);
4854 masm.initializeBigInt64(type, output, input);
4855 masm.bind(ool->rejoin());
4858 void CodeGenerator::visitInt64ToBigInt(LInt64ToBigInt* lir) {
4859 Register64 input = ToRegister64(lir->input());
4860 Register temp = ToRegister(lir->temp0());
4861 Register output = ToRegister(lir->output());
4863 emitCreateBigInt(lir, Scalar::BigInt64, input, output, temp);
4866 void CodeGenerator::visitGuardValue(LGuardValue* lir) {
4867 ValueOperand input = ToValue(lir, LGuardValue::InputIndex);
4868 Value expected = lir->mir()->expected();
4869 Label bail;
4870 masm.branchTestValue(Assembler::NotEqual, input, expected, &bail);
4871 bailoutFrom(&bail, lir->snapshot());
4874 void CodeGenerator::visitGuardNullOrUndefined(LGuardNullOrUndefined* lir) {
4875 ValueOperand input = ToValue(lir, LGuardNullOrUndefined::InputIndex);
4877 ScratchTagScope tag(masm, input);
4878 masm.splitTagForTest(input, tag);
4880 Label done;
4881 masm.branchTestNull(Assembler::Equal, tag, &done);
4883 Label bail;
4884 masm.branchTestUndefined(Assembler::NotEqual, tag, &bail);
4885 bailoutFrom(&bail, lir->snapshot());
4887 masm.bind(&done);
4890 void CodeGenerator::visitGuardIsNotObject(LGuardIsNotObject* lir) {
4891 ValueOperand input = ToValue(lir, LGuardIsNotObject::InputIndex);
4893 Label bail;
4894 masm.branchTestObject(Assembler::Equal, input, &bail);
4895 bailoutFrom(&bail, lir->snapshot());
4898 void CodeGenerator::visitGuardFunctionFlags(LGuardFunctionFlags* lir) {
4899 Register function = ToRegister(lir->function());
4901 Label bail;
4902 if (uint16_t flags = lir->mir()->expectedFlags()) {
4903 masm.branchTestFunctionFlags(function, flags, Assembler::Zero, &bail);
4905 if (uint16_t flags = lir->mir()->unexpectedFlags()) {
4906 masm.branchTestFunctionFlags(function, flags, Assembler::NonZero, &bail);
4908 bailoutFrom(&bail, lir->snapshot());
4911 void CodeGenerator::visitGuardFunctionIsNonBuiltinCtor(
4912 LGuardFunctionIsNonBuiltinCtor* lir) {
4913 Register function = ToRegister(lir->function());
4914 Register temp = ToRegister(lir->temp0());
4916 Label bail;
4917 masm.branchIfNotFunctionIsNonBuiltinCtor(function, temp, &bail);
4918 bailoutFrom(&bail, lir->snapshot());
4921 void CodeGenerator::visitGuardFunctionKind(LGuardFunctionKind* lir) {
4922 Register function = ToRegister(lir->function());
4923 Register temp = ToRegister(lir->temp0());
4925 Assembler::Condition cond =
4926 lir->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
4928 Label bail;
4929 masm.branchFunctionKind(cond, lir->mir()->expected(), function, temp, &bail);
4930 bailoutFrom(&bail, lir->snapshot());
4933 void CodeGenerator::visitGuardFunctionScript(LGuardFunctionScript* lir) {
4934 Register function = ToRegister(lir->function());
4936 Address scriptAddr(function, JSFunction::offsetOfJitInfoOrScript());
4937 bailoutCmpPtr(Assembler::NotEqual, scriptAddr,
4938 ImmGCPtr(lir->mir()->expected()), lir->snapshot());
4941 // Out-of-line path to update the store buffer.
4942 class OutOfLineCallPostWriteBarrier : public OutOfLineCodeBase<CodeGenerator> {
4943 LInstruction* lir_;
4944 const LAllocation* object_;
4946 public:
4947 OutOfLineCallPostWriteBarrier(LInstruction* lir, const LAllocation* object)
4948 : lir_(lir), object_(object) {}
4950 void accept(CodeGenerator* codegen) override {
4951 codegen->visitOutOfLineCallPostWriteBarrier(this);
4954 LInstruction* lir() const { return lir_; }
4955 const LAllocation* object() const { return object_; }
4958 static void EmitStoreBufferCheckForConstant(MacroAssembler& masm,
4959 const gc::TenuredCell* cell,
4960 AllocatableGeneralRegisterSet& regs,
4961 Label* exit, Label* callVM) {
4962 Register temp = regs.takeAny();
4964 gc::Arena* arena = cell->arena();
4966 Register cells = temp;
4967 masm.loadPtr(AbsoluteAddress(&arena->bufferedCells()), cells);
4969 size_t index = gc::ArenaCellSet::getCellIndex(cell);
4970 size_t word;
4971 uint32_t mask;
4972 gc::ArenaCellSet::getWordIndexAndMask(index, &word, &mask);
4973 size_t offset = gc::ArenaCellSet::offsetOfBits() + word * sizeof(uint32_t);
4975 masm.branchTest32(Assembler::NonZero, Address(cells, offset), Imm32(mask),
4976 exit);
4978 // Check whether this is the sentinel set and if so call the VM to allocate
4979 // one for this arena.
4980 masm.branchPtr(Assembler::Equal,
4981 Address(cells, gc::ArenaCellSet::offsetOfArena()),
4982 ImmPtr(nullptr), callVM);
4984 // Add the cell to the set.
4985 masm.or32(Imm32(mask), Address(cells, offset));
4986 masm.jump(exit);
4988 regs.add(temp);
4991 static void EmitPostWriteBarrier(MacroAssembler& masm, CompileRuntime* runtime,
4992 Register objreg, JSObject* maybeConstant,
4993 bool isGlobal,
4994 AllocatableGeneralRegisterSet& regs) {
4995 MOZ_ASSERT_IF(isGlobal, maybeConstant);
4997 Label callVM;
4998 Label exit;
5000 Register temp = regs.takeAny();
5002 // We already have a fast path to check whether a global is in the store
5003 // buffer.
5004 if (!isGlobal) {
5005 if (maybeConstant) {
5006 // Check store buffer bitmap directly for known object.
5007 EmitStoreBufferCheckForConstant(masm, &maybeConstant->asTenured(), regs,
5008 &exit, &callVM);
5009 } else {
5010 // Check one element cache to avoid VM call.
5011 masm.branchPtr(Assembler::Equal,
5012 AbsoluteAddress(runtime->addressOfLastBufferedWholeCell()),
5013 objreg, &exit);
5017 // Call into the VM to barrier the write.
5018 masm.bind(&callVM);
5020 Register runtimereg = temp;
5021 masm.mov(ImmPtr(runtime), runtimereg);
5023 masm.setupAlignedABICall();
5024 masm.passABIArg(runtimereg);
5025 masm.passABIArg(objreg);
5026 if (isGlobal) {
5027 using Fn = void (*)(JSRuntime* rt, GlobalObject* obj);
5028 masm.callWithABI<Fn, PostGlobalWriteBarrier>();
5029 } else {
5030 using Fn = void (*)(JSRuntime* rt, js::gc::Cell* obj);
5031 masm.callWithABI<Fn, PostWriteBarrier>();
5034 masm.bind(&exit);
5037 void CodeGenerator::emitPostWriteBarrier(const LAllocation* obj) {
5038 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5040 Register objreg;
5041 JSObject* object = nullptr;
5042 bool isGlobal = false;
5043 if (obj->isConstant()) {
5044 object = &obj->toConstant()->toObject();
5045 isGlobal = isGlobalObject(object);
5046 objreg = regs.takeAny();
5047 masm.movePtr(ImmGCPtr(object), objreg);
5048 } else {
5049 objreg = ToRegister(obj);
5050 regs.takeUnchecked(objreg);
5053 EmitPostWriteBarrier(masm, gen->runtime, objreg, object, isGlobal, regs);
5056 // Returns true if `def` might be allocated in the nursery.
5057 static bool ValueNeedsPostBarrier(MDefinition* def) {
5058 if (def->isBox()) {
5059 def = def->toBox()->input();
5061 if (def->type() == MIRType::Value) {
5062 return true;
5064 return NeedsPostBarrier(def->type());
5067 class OutOfLineElementPostWriteBarrier
5068 : public OutOfLineCodeBase<CodeGenerator> {
5069 LiveRegisterSet liveVolatileRegs_;
5070 const LAllocation* index_;
5071 int32_t indexDiff_;
5072 Register obj_;
5073 Register scratch_;
5075 public:
5076 OutOfLineElementPostWriteBarrier(const LiveRegisterSet& liveVolatileRegs,
5077 Register obj, const LAllocation* index,
5078 Register scratch, int32_t indexDiff)
5079 : liveVolatileRegs_(liveVolatileRegs),
5080 index_(index),
5081 indexDiff_(indexDiff),
5082 obj_(obj),
5083 scratch_(scratch) {}
5085 void accept(CodeGenerator* codegen) override {
5086 codegen->visitOutOfLineElementPostWriteBarrier(this);
5089 const LiveRegisterSet& liveVolatileRegs() const { return liveVolatileRegs_; }
5090 const LAllocation* index() const { return index_; }
5091 int32_t indexDiff() const { return indexDiff_; }
5093 Register object() const { return obj_; }
5094 Register scratch() const { return scratch_; }
5097 void CodeGenerator::emitElementPostWriteBarrier(
5098 MInstruction* mir, const LiveRegisterSet& liveVolatileRegs, Register obj,
5099 const LAllocation* index, Register scratch, const ConstantOrRegister& val,
5100 int32_t indexDiff) {
5101 if (val.constant()) {
5102 MOZ_ASSERT_IF(val.value().isGCThing(),
5103 !IsInsideNursery(val.value().toGCThing()));
5104 return;
5107 TypedOrValueRegister reg = val.reg();
5108 if (reg.hasTyped() && !NeedsPostBarrier(reg.type())) {
5109 return;
5112 auto* ool = new (alloc()) OutOfLineElementPostWriteBarrier(
5113 liveVolatileRegs, obj, index, scratch, indexDiff);
5114 addOutOfLineCode(ool, mir);
5116 masm.branchPtrInNurseryChunk(Assembler::Equal, obj, scratch, ool->rejoin());
5118 if (reg.hasValue()) {
5119 masm.branchValueIsNurseryCell(Assembler::Equal, reg.valueReg(), scratch,
5120 ool->entry());
5121 } else {
5122 masm.branchPtrInNurseryChunk(Assembler::Equal, reg.typedReg().gpr(),
5123 scratch, ool->entry());
5126 masm.bind(ool->rejoin());
5129 void CodeGenerator::visitOutOfLineElementPostWriteBarrier(
5130 OutOfLineElementPostWriteBarrier* ool) {
5131 Register obj = ool->object();
5132 Register scratch = ool->scratch();
5133 const LAllocation* index = ool->index();
5134 int32_t indexDiff = ool->indexDiff();
5136 masm.PushRegsInMask(ool->liveVolatileRegs());
5138 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5139 regs.takeUnchecked(obj);
5140 regs.takeUnchecked(scratch);
5142 Register indexReg;
5143 if (index->isConstant()) {
5144 indexReg = regs.takeAny();
5145 masm.move32(Imm32(ToInt32(index) + indexDiff), indexReg);
5146 } else {
5147 indexReg = ToRegister(index);
5148 regs.takeUnchecked(indexReg);
5149 if (indexDiff != 0) {
5150 masm.add32(Imm32(indexDiff), indexReg);
5154 masm.setupUnalignedABICall(scratch);
5155 masm.movePtr(ImmPtr(gen->runtime), scratch);
5156 masm.passABIArg(scratch);
5157 masm.passABIArg(obj);
5158 masm.passABIArg(indexReg);
5159 using Fn = void (*)(JSRuntime* rt, JSObject* obj, int32_t index);
5160 masm.callWithABI<Fn, PostWriteElementBarrier>();
5162 // We don't need a sub32 here because indexReg must be in liveVolatileRegs
5163 // if indexDiff is not zero, so it will be restored below.
5164 MOZ_ASSERT_IF(indexDiff != 0, ool->liveVolatileRegs().has(indexReg));
5166 masm.PopRegsInMask(ool->liveVolatileRegs());
5168 masm.jump(ool->rejoin());
5171 void CodeGenerator::emitPostWriteBarrier(Register objreg) {
5172 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5173 regs.takeUnchecked(objreg);
5174 EmitPostWriteBarrier(masm, gen->runtime, objreg, nullptr, false, regs);
5177 void CodeGenerator::visitOutOfLineCallPostWriteBarrier(
5178 OutOfLineCallPostWriteBarrier* ool) {
5179 saveLiveVolatile(ool->lir());
5180 const LAllocation* obj = ool->object();
5181 emitPostWriteBarrier(obj);
5182 restoreLiveVolatile(ool->lir());
5184 masm.jump(ool->rejoin());
5187 void CodeGenerator::maybeEmitGlobalBarrierCheck(const LAllocation* maybeGlobal,
5188 OutOfLineCode* ool) {
5189 // Check whether an object is a global that we have already barriered before
5190 // calling into the VM.
5192 // We only check for the script's global, not other globals within the same
5193 // compartment, because we bake in a pointer to realm->globalWriteBarriered
5194 // and doing that would be invalid for other realms because they could be
5195 // collected before the Ion code is discarded.
5197 if (!maybeGlobal->isConstant()) {
5198 return;
5201 JSObject* obj = &maybeGlobal->toConstant()->toObject();
5202 if (gen->realm->maybeGlobal() != obj) {
5203 return;
5206 const uint32_t* addr = gen->realm->addressOfGlobalWriteBarriered();
5207 masm.branch32(Assembler::NotEqual, AbsoluteAddress(addr), Imm32(0),
5208 ool->rejoin());
5211 template <class LPostBarrierType, MIRType nurseryType>
5212 void CodeGenerator::visitPostWriteBarrierCommon(LPostBarrierType* lir,
5213 OutOfLineCode* ool) {
5214 static_assert(NeedsPostBarrier(nurseryType));
5216 addOutOfLineCode(ool, lir->mir());
5218 Register temp = ToTempRegisterOrInvalid(lir->temp0());
5220 if (lir->object()->isConstant()) {
5221 // Constant nursery objects cannot appear here, see
5222 // LIRGenerator::visitPostWriteElementBarrier.
5223 MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject()));
5224 } else {
5225 masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()),
5226 temp, ool->rejoin());
5229 maybeEmitGlobalBarrierCheck(lir->object(), ool);
5231 Register value = ToRegister(lir->value());
5232 if constexpr (nurseryType == MIRType::Object) {
5233 MOZ_ASSERT(lir->mir()->value()->type() == MIRType::Object);
5234 } else if constexpr (nurseryType == MIRType::String) {
5235 MOZ_ASSERT(lir->mir()->value()->type() == MIRType::String);
5236 } else {
5237 static_assert(nurseryType == MIRType::BigInt);
5238 MOZ_ASSERT(lir->mir()->value()->type() == MIRType::BigInt);
5240 masm.branchPtrInNurseryChunk(Assembler::Equal, value, temp, ool->entry());
5242 masm.bind(ool->rejoin());
5245 template <class LPostBarrierType>
5246 void CodeGenerator::visitPostWriteBarrierCommonV(LPostBarrierType* lir,
5247 OutOfLineCode* ool) {
5248 addOutOfLineCode(ool, lir->mir());
5250 Register temp = ToTempRegisterOrInvalid(lir->temp0());
5252 if (lir->object()->isConstant()) {
5253 // Constant nursery objects cannot appear here, see
5254 // LIRGenerator::visitPostWriteElementBarrier.
5255 MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject()));
5256 } else {
5257 masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()),
5258 temp, ool->rejoin());
5261 maybeEmitGlobalBarrierCheck(lir->object(), ool);
5263 ValueOperand value = ToValue(lir, LPostBarrierType::ValueIndex);
5264 masm.branchValueIsNurseryCell(Assembler::Equal, value, temp, ool->entry());
5266 masm.bind(ool->rejoin());
5269 void CodeGenerator::visitPostWriteBarrierO(LPostWriteBarrierO* lir) {
5270 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5271 visitPostWriteBarrierCommon<LPostWriteBarrierO, MIRType::Object>(lir, ool);
5274 void CodeGenerator::visitPostWriteBarrierS(LPostWriteBarrierS* lir) {
5275 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5276 visitPostWriteBarrierCommon<LPostWriteBarrierS, MIRType::String>(lir, ool);
5279 void CodeGenerator::visitPostWriteBarrierBI(LPostWriteBarrierBI* lir) {
5280 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5281 visitPostWriteBarrierCommon<LPostWriteBarrierBI, MIRType::BigInt>(lir, ool);
5284 void CodeGenerator::visitPostWriteBarrierV(LPostWriteBarrierV* lir) {
5285 auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
5286 visitPostWriteBarrierCommonV(lir, ool);
5289 // Out-of-line path to update the store buffer.
5290 class OutOfLineCallPostWriteElementBarrier
5291 : public OutOfLineCodeBase<CodeGenerator> {
5292 LInstruction* lir_;
5293 const LAllocation* object_;
5294 const LAllocation* index_;
5296 public:
5297 OutOfLineCallPostWriteElementBarrier(LInstruction* lir,
5298 const LAllocation* object,
5299 const LAllocation* index)
5300 : lir_(lir), object_(object), index_(index) {}
5302 void accept(CodeGenerator* codegen) override {
5303 codegen->visitOutOfLineCallPostWriteElementBarrier(this);
5306 LInstruction* lir() const { return lir_; }
5308 const LAllocation* object() const { return object_; }
5310 const LAllocation* index() const { return index_; }
5313 void CodeGenerator::visitOutOfLineCallPostWriteElementBarrier(
5314 OutOfLineCallPostWriteElementBarrier* ool) {
5315 saveLiveVolatile(ool->lir());
5317 const LAllocation* obj = ool->object();
5318 const LAllocation* index = ool->index();
5320 Register objreg = obj->isConstant() ? InvalidReg : ToRegister(obj);
5321 Register indexreg = ToRegister(index);
5323 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
5324 regs.takeUnchecked(indexreg);
5326 if (obj->isConstant()) {
5327 objreg = regs.takeAny();
5328 masm.movePtr(ImmGCPtr(&obj->toConstant()->toObject()), objreg);
5329 } else {
5330 regs.takeUnchecked(objreg);
5333 Register runtimereg = regs.takeAny();
5334 using Fn = void (*)(JSRuntime* rt, JSObject* obj, int32_t index);
5335 masm.setupAlignedABICall();
5336 masm.mov(ImmPtr(gen->runtime), runtimereg);
5337 masm.passABIArg(runtimereg);
5338 masm.passABIArg(objreg);
5339 masm.passABIArg(indexreg);
5340 masm.callWithABI<Fn, PostWriteElementBarrier>();
5342 restoreLiveVolatile(ool->lir());
5344 masm.jump(ool->rejoin());
5347 void CodeGenerator::visitPostWriteElementBarrierO(
5348 LPostWriteElementBarrierO* lir) {
5349 auto ool = new (alloc())
5350 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5351 visitPostWriteBarrierCommon<LPostWriteElementBarrierO, MIRType::Object>(lir,
5352 ool);
5355 void CodeGenerator::visitPostWriteElementBarrierS(
5356 LPostWriteElementBarrierS* lir) {
5357 auto ool = new (alloc())
5358 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5359 visitPostWriteBarrierCommon<LPostWriteElementBarrierS, MIRType::String>(lir,
5360 ool);
5363 void CodeGenerator::visitPostWriteElementBarrierBI(
5364 LPostWriteElementBarrierBI* lir) {
5365 auto ool = new (alloc())
5366 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5367 visitPostWriteBarrierCommon<LPostWriteElementBarrierBI, MIRType::BigInt>(lir,
5368 ool);
5371 void CodeGenerator::visitPostWriteElementBarrierV(
5372 LPostWriteElementBarrierV* lir) {
5373 auto ool = new (alloc())
5374 OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5375 visitPostWriteBarrierCommonV(lir, ool);
5378 void CodeGenerator::visitAssertCanElidePostWriteBarrier(
5379 LAssertCanElidePostWriteBarrier* lir) {
5380 Register object = ToRegister(lir->object());
5381 ValueOperand value =
5382 ToValue(lir, LAssertCanElidePostWriteBarrier::ValueIndex);
5383 Register temp = ToRegister(lir->temp0());
5385 Label ok;
5386 masm.branchPtrInNurseryChunk(Assembler::Equal, object, temp, &ok);
5387 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp, &ok);
5389 masm.assumeUnreachable("Unexpected missing post write barrier");
5391 masm.bind(&ok);
5394 template <typename LCallIns>
5395 void CodeGenerator::emitCallNative(LCallIns* call, JSNative native) {
5396 MCallBase* mir = call->mir();
5398 uint32_t unusedStack = UnusedStackBytesForCall(mir->paddedNumStackArgs());
5400 // Registers used for callWithABI() argument-passing.
5401 const Register argContextReg = ToRegister(call->getArgContextReg());
5402 const Register argUintNReg = ToRegister(call->getArgUintNReg());
5403 const Register argVpReg = ToRegister(call->getArgVpReg());
5405 // Misc. temporary registers.
5406 const Register tempReg = ToRegister(call->getTempReg());
5408 DebugOnly<uint32_t> initialStack = masm.framePushed();
5410 masm.checkStackAlignment();
5412 // Native functions have the signature:
5413 // bool (*)(JSContext*, unsigned, Value* vp)
5414 // Where vp[0] is space for an outparam, vp[1] is |this|, and vp[2] onward
5415 // are the function arguments.
5417 // Allocate space for the outparam, moving the StackPointer to what will be
5418 // &vp[1].
5419 masm.adjustStack(unusedStack);
5421 // Push a Value containing the callee object: natives are allowed to access
5422 // their callee before setting the return value. The StackPointer is moved
5423 // to &vp[0].
5424 if constexpr (std::is_same_v<LCallIns, LCallClassHook>) {
5425 Register calleeReg = ToRegister(call->getCallee());
5426 masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(calleeReg)));
5428 if (call->mir()->maybeCrossRealm()) {
5429 masm.switchToObjectRealm(calleeReg, tempReg);
5431 } else {
5432 WrappedFunction* target = call->getSingleTarget();
5433 masm.Push(ObjectValue(*target->rawNativeJSFunction()));
5435 if (call->mir()->maybeCrossRealm()) {
5436 masm.movePtr(ImmGCPtr(target->rawNativeJSFunction()), tempReg);
5437 masm.switchToObjectRealm(tempReg, tempReg);
5441 // Preload arguments into registers.
5442 masm.loadJSContext(argContextReg);
5443 masm.move32(Imm32(call->mir()->numActualArgs()), argUintNReg);
5444 masm.moveStackPtrTo(argVpReg);
5446 masm.Push(argUintNReg);
5448 // Construct native exit frame.
5449 uint32_t safepointOffset = masm.buildFakeExitFrame(tempReg);
5450 masm.enterFakeExitFrameForNative(argContextReg, tempReg,
5451 call->mir()->isConstructing());
5453 markSafepointAt(safepointOffset, call);
5455 // Construct and execute call.
5456 masm.setupAlignedABICall();
5457 masm.passABIArg(argContextReg);
5458 masm.passABIArg(argUintNReg);
5459 masm.passABIArg(argVpReg);
5461 ensureOsiSpace();
5462 // If we're using a simulator build, `native` will already point to the
5463 // simulator's call-redirection code for LCallClassHook. Load the address in
5464 // a register first so that we don't try to redirect it a second time.
5465 bool emittedCall = false;
5466 #ifdef JS_SIMULATOR
5467 if constexpr (std::is_same_v<LCallIns, LCallClassHook>) {
5468 masm.movePtr(ImmPtr(native), tempReg);
5469 masm.callWithABI(tempReg);
5470 emittedCall = true;
5472 #endif
5473 if (!emittedCall) {
5474 masm.callWithABI(DynamicFunction<JSNative>(native), ABIType::General,
5475 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
5478 // Test for failure.
5479 masm.branchIfFalseBool(ReturnReg, masm.failureLabel());
5481 if (call->mir()->maybeCrossRealm()) {
5482 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
5485 // Load the outparam vp[0] into output register(s).
5486 masm.loadValue(
5487 Address(masm.getStackPointer(), NativeExitFrameLayout::offsetOfResult()),
5488 JSReturnOperand);
5490 // Until C++ code is instrumented against Spectre, prevent speculative
5491 // execution from returning any private data.
5492 if (JitOptions.spectreJitToCxxCalls && !call->mir()->ignoresReturnValue() &&
5493 mir->hasLiveDefUses()) {
5494 masm.speculationBarrier();
5497 // The next instruction is removing the footer of the exit frame, so there
5498 // is no need for leaveFakeExitFrame.
5500 // Move the StackPointer back to its original location, unwinding the native
5501 // exit frame.
5502 masm.adjustStack(NativeExitFrameLayout::Size() - unusedStack);
5503 MOZ_ASSERT(masm.framePushed() == initialStack);
5506 void CodeGenerator::visitCallNative(LCallNative* call) {
5507 WrappedFunction* target = call->getSingleTarget();
5508 MOZ_ASSERT(target);
5509 MOZ_ASSERT(target->isNativeWithoutJitEntry());
5511 JSNative native = target->native();
5512 if (call->ignoresReturnValue() && target->hasJitInfo()) {
5513 const JSJitInfo* jitInfo = target->jitInfo();
5514 if (jitInfo->type() == JSJitInfo::IgnoresReturnValueNative) {
5515 native = jitInfo->ignoresReturnValueMethod;
5518 emitCallNative(call, native);
5521 void CodeGenerator::visitCallClassHook(LCallClassHook* call) {
5522 emitCallNative(call, call->mir()->target());
5525 static void LoadDOMPrivate(MacroAssembler& masm, Register obj, Register priv,
5526 DOMObjectKind kind) {
5527 // Load the value in DOM_OBJECT_SLOT for a native or proxy DOM object. This
5528 // will be in the first slot but may be fixed or non-fixed.
5529 MOZ_ASSERT(obj != priv);
5531 switch (kind) {
5532 case DOMObjectKind::Native:
5533 // If it's a native object, the value must be in a fixed slot.
5534 // See CanAttachDOMCall in CacheIR.cpp.
5535 masm.debugAssertObjHasFixedSlots(obj, priv);
5536 masm.loadPrivate(Address(obj, NativeObject::getFixedSlotOffset(0)), priv);
5537 break;
5538 case DOMObjectKind::Proxy: {
5539 #ifdef DEBUG
5540 // Sanity check: it must be a DOM proxy.
5541 Label isDOMProxy;
5542 masm.branchTestProxyHandlerFamily(
5543 Assembler::Equal, obj, priv, GetDOMProxyHandlerFamily(), &isDOMProxy);
5544 masm.assumeUnreachable("Expected a DOM proxy");
5545 masm.bind(&isDOMProxy);
5546 #endif
5547 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), priv);
5548 masm.loadPrivate(
5549 Address(priv, js::detail::ProxyReservedSlots::offsetOfSlot(0)), priv);
5550 break;
5555 void CodeGenerator::visitCallDOMNative(LCallDOMNative* call) {
5556 WrappedFunction* target = call->getSingleTarget();
5557 MOZ_ASSERT(target);
5558 MOZ_ASSERT(target->isNativeWithoutJitEntry());
5559 MOZ_ASSERT(target->hasJitInfo());
5560 MOZ_ASSERT(call->mir()->isCallDOMNative());
5562 int unusedStack = UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
5564 // Registers used for callWithABI() argument-passing.
5565 const Register argJSContext = ToRegister(call->getArgJSContext());
5566 const Register argObj = ToRegister(call->getArgObj());
5567 const Register argPrivate = ToRegister(call->getArgPrivate());
5568 const Register argArgs = ToRegister(call->getArgArgs());
5570 DebugOnly<uint32_t> initialStack = masm.framePushed();
5572 masm.checkStackAlignment();
5574 // DOM methods have the signature:
5575 // bool (*)(JSContext*, HandleObject, void* private, const
5576 // JSJitMethodCallArgs& args)
5577 // Where args is initialized from an argc and a vp, vp[0] is space for an
5578 // outparam and the callee, vp[1] is |this|, and vp[2] onward are the
5579 // function arguments. Note that args stores the argv, not the vp, and
5580 // argv == vp + 2.
5582 // Nestle the stack up against the pushed arguments, leaving StackPointer at
5583 // &vp[1]
5584 masm.adjustStack(unusedStack);
5585 // argObj is filled with the extracted object, then returned.
5586 Register obj = masm.extractObject(Address(masm.getStackPointer(), 0), argObj);
5587 MOZ_ASSERT(obj == argObj);
5589 // Push a Value containing the callee object: natives are allowed to access
5590 // their callee before setting the return value. After this the StackPointer
5591 // points to &vp[0].
5592 masm.Push(ObjectValue(*target->rawNativeJSFunction()));
5594 // Now compute the argv value. Since StackPointer is pointing to &vp[0] and
5595 // argv is &vp[2] we just need to add 2*sizeof(Value) to the current
5596 // StackPointer.
5597 static_assert(JSJitMethodCallArgsTraits::offsetOfArgv == 0);
5598 static_assert(JSJitMethodCallArgsTraits::offsetOfArgc ==
5599 IonDOMMethodExitFrameLayoutTraits::offsetOfArgcFromArgv);
5600 masm.computeEffectiveAddress(
5601 Address(masm.getStackPointer(), 2 * sizeof(Value)), argArgs);
5603 LoadDOMPrivate(masm, obj, argPrivate,
5604 static_cast<MCallDOMNative*>(call->mir())->objectKind());
5606 // Push argc from the call instruction into what will become the IonExitFrame
5607 masm.Push(Imm32(call->numActualArgs()));
5609 // Push our argv onto the stack
5610 masm.Push(argArgs);
5611 // And store our JSJitMethodCallArgs* in argArgs.
5612 masm.moveStackPtrTo(argArgs);
5614 // Push |this| object for passing HandleObject. We push after argc to
5615 // maintain the same sp-relative location of the object pointer with other
5616 // DOMExitFrames.
5617 masm.Push(argObj);
5618 masm.moveStackPtrTo(argObj);
5620 if (call->mir()->maybeCrossRealm()) {
5621 // We use argJSContext as scratch register here.
5622 masm.movePtr(ImmGCPtr(target->rawNativeJSFunction()), argJSContext);
5623 masm.switchToObjectRealm(argJSContext, argJSContext);
5626 // Construct native exit frame.
5627 uint32_t safepointOffset = masm.buildFakeExitFrame(argJSContext);
5628 masm.loadJSContext(argJSContext);
5629 masm.enterFakeExitFrame(argJSContext, argJSContext,
5630 ExitFrameType::IonDOMMethod);
5632 markSafepointAt(safepointOffset, call);
5634 // Construct and execute call.
5635 masm.setupAlignedABICall();
5636 masm.loadJSContext(argJSContext);
5637 masm.passABIArg(argJSContext);
5638 masm.passABIArg(argObj);
5639 masm.passABIArg(argPrivate);
5640 masm.passABIArg(argArgs);
5641 ensureOsiSpace();
5642 masm.callWithABI(DynamicFunction<JSJitMethodOp>(target->jitInfo()->method),
5643 ABIType::General,
5644 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
5646 if (target->jitInfo()->isInfallible) {
5647 masm.loadValue(Address(masm.getStackPointer(),
5648 IonDOMMethodExitFrameLayout::offsetOfResult()),
5649 JSReturnOperand);
5650 } else {
5651 // Test for failure.
5652 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
5654 // Load the outparam vp[0] into output register(s).
5655 masm.loadValue(Address(masm.getStackPointer(),
5656 IonDOMMethodExitFrameLayout::offsetOfResult()),
5657 JSReturnOperand);
5660 // Switch back to the current realm if needed. Note: if the DOM method threw
5661 // an exception, the exception handler will do this.
5662 if (call->mir()->maybeCrossRealm()) {
5663 static_assert(!JSReturnOperand.aliases(ReturnReg),
5664 "Clobbering ReturnReg should not affect the return value");
5665 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
5668 // Until C++ code is instrumented against Spectre, prevent speculative
5669 // execution from returning any private data.
5670 if (JitOptions.spectreJitToCxxCalls && call->mir()->hasLiveDefUses()) {
5671 masm.speculationBarrier();
5674 // The next instruction is removing the footer of the exit frame, so there
5675 // is no need for leaveFakeExitFrame.
5677 // Move the StackPointer back to its original location, unwinding the native
5678 // exit frame.
5679 masm.adjustStack(IonDOMMethodExitFrameLayout::Size() - unusedStack);
5680 MOZ_ASSERT(masm.framePushed() == initialStack);
5683 void CodeGenerator::visitCallGetIntrinsicValue(LCallGetIntrinsicValue* lir) {
5684 pushArg(ImmGCPtr(lir->mir()->name()));
5686 using Fn = bool (*)(JSContext* cx, Handle<PropertyName*>, MutableHandleValue);
5687 callVM<Fn, GetIntrinsicValue>(lir);
5690 void CodeGenerator::emitCallInvokeFunction(
5691 LInstruction* call, Register calleereg, bool constructing,
5692 bool ignoresReturnValue, uint32_t argc, uint32_t unusedStack) {
5693 // Nestle %esp up to the argument vector.
5694 // Each path must account for framePushed_ separately, for callVM to be valid.
5695 masm.freeStack(unusedStack);
5697 pushArg(masm.getStackPointer()); // argv.
5698 pushArg(Imm32(argc)); // argc.
5699 pushArg(Imm32(ignoresReturnValue));
5700 pushArg(Imm32(constructing)); // constructing.
5701 pushArg(calleereg); // JSFunction*.
5703 using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
5704 MutableHandleValue);
5705 callVM<Fn, jit::InvokeFunction>(call);
5707 // Un-nestle %esp from the argument vector. No prefix was pushed.
5708 masm.reserveStack(unusedStack);
5711 void CodeGenerator::visitCallGeneric(LCallGeneric* call) {
5712 // The callee is passed straight through to the trampoline.
5713 MOZ_ASSERT(ToRegister(call->getCallee()) == IonGenericCallCalleeReg);
5715 Register argcReg = ToRegister(call->getArgc());
5716 uint32_t unusedStack =
5717 UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
5719 // Known-target case is handled by LCallKnown.
5720 MOZ_ASSERT(!call->hasSingleTarget());
5722 masm.checkStackAlignment();
5724 masm.move32(Imm32(call->numActualArgs()), argcReg);
5726 // Nestle the StackPointer up to the argument vector.
5727 masm.freeStack(unusedStack);
5728 ensureOsiSpace();
5730 auto kind = call->mir()->isConstructing() ? IonGenericCallKind::Construct
5731 : IonGenericCallKind::Call;
5733 TrampolinePtr genericCallStub =
5734 gen->jitRuntime()->getIonGenericCallStub(kind);
5735 uint32_t callOffset = masm.callJit(genericCallStub);
5736 markSafepointAt(callOffset, call);
5738 if (call->mir()->maybeCrossRealm()) {
5739 static_assert(!JSReturnOperand.aliases(ReturnReg),
5740 "ReturnReg available as scratch after scripted calls");
5741 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
5744 // Restore stack pointer.
5745 masm.setFramePushed(frameSize());
5746 emitRestoreStackPointerFromFP();
5748 // If the return value of the constructing function is Primitive,
5749 // replace the return value with the Object from CreateThis.
5750 if (call->mir()->isConstructing()) {
5751 Label notPrimitive;
5752 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
5753 &notPrimitive);
5754 masm.loadValue(Address(masm.getStackPointer(), unusedStack),
5755 JSReturnOperand);
5756 #ifdef DEBUG
5757 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
5758 &notPrimitive);
5759 masm.assumeUnreachable("CreateThis creates an object");
5760 #endif
5761 masm.bind(&notPrimitive);
5765 void JitRuntime::generateIonGenericCallArgumentsShift(
5766 MacroAssembler& masm, Register argc, Register curr, Register end,
5767 Register scratch, Label* done) {
5768 static_assert(sizeof(Value) == 8);
5769 // There are |argc| Values on the stack. Shift them all down by 8 bytes,
5770 // overwriting the first value.
5772 // Initialize `curr` to the destination of the first copy, and `end` to the
5773 // final value of curr.
5774 masm.moveStackPtrTo(curr);
5775 masm.computeEffectiveAddress(BaseValueIndex(curr, argc), end);
5777 Label loop;
5778 masm.bind(&loop);
5779 masm.branchPtr(Assembler::Equal, curr, end, done);
5780 masm.loadPtr(Address(curr, 8), scratch);
5781 masm.storePtr(scratch, Address(curr, 0));
5782 masm.addPtr(Imm32(sizeof(uintptr_t)), curr);
5783 masm.jump(&loop);
5786 void JitRuntime::generateIonGenericCallStub(MacroAssembler& masm,
5787 IonGenericCallKind kind) {
5788 AutoCreatedBy acb(masm, "JitRuntime::generateIonGenericCallStub");
5789 ionGenericCallStubOffset_[kind] = startTrampolineCode(masm);
5791 // This code is tightly coupled with visitCallGeneric.
5793 // Upon entry:
5794 // IonGenericCallCalleeReg contains a pointer to the callee object.
5795 // IonGenericCallArgcReg contains the number of actual args.
5796 // The arguments have been pushed onto the stack:
5797 // [newTarget] (iff isConstructing)
5798 // [argN]
5799 // ...
5800 // [arg1]
5801 // [arg0]
5802 // [this]
5803 // <return address> (if not JS_USE_LINK_REGISTER)
5805 // This trampoline is responsible for entering the callee's realm,
5806 // massaging the stack into the right shape, and then performing a
5807 // tail call. We will return directly to the Ion code from the
5808 // callee.
5810 // To do a tail call, we keep the return address in a register, even
5811 // on platforms that don't normally use a link register, and push it
5812 // just before jumping to the callee, after we are done setting up
5813 // the stack.
5815 // The caller is responsible for switching back to the caller's
5816 // realm and cleaning up the stack.
5818 Register calleeReg = IonGenericCallCalleeReg;
5819 Register argcReg = IonGenericCallArgcReg;
5820 Register scratch = IonGenericCallScratch;
5821 Register scratch2 = IonGenericCallScratch2;
5823 #ifndef JS_USE_LINK_REGISTER
5824 Register returnAddrReg = IonGenericCallReturnAddrReg;
5825 masm.pop(returnAddrReg);
5826 #endif
5828 #ifdef JS_CODEGEN_ARM
5829 // The default second scratch register on arm is lr, which we need
5830 // preserved for tail calls.
5831 AutoNonDefaultSecondScratchRegister andssr(masm, IonGenericSecondScratchReg);
5832 #endif
5834 bool isConstructing = kind == IonGenericCallKind::Construct;
5836 Label entry, notFunction, noJitEntry, vmCall;
5837 masm.bind(&entry);
5839 // Guard that the callee is actually a function.
5840 masm.branchTestObjIsFunction(Assembler::NotEqual, calleeReg, scratch,
5841 calleeReg, &notFunction);
5843 // Guard that the callee supports the [[Call]] or [[Construct]] operation.
5844 // If these tests fail, we will call into the VM to throw an exception.
5845 if (isConstructing) {
5846 masm.branchTestFunctionFlags(calleeReg, FunctionFlags::CONSTRUCTOR,
5847 Assembler::Zero, &vmCall);
5848 } else {
5849 masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
5850 calleeReg, scratch, &vmCall);
5853 if (isConstructing) {
5854 // Use the slow path if CreateThis was unable to create the |this| object.
5855 Address thisAddr(masm.getStackPointer(), 0);
5856 masm.branchTestNull(Assembler::Equal, thisAddr, &vmCall);
5859 masm.switchToObjectRealm(calleeReg, scratch);
5861 // Load jitCodeRaw for callee if it exists.
5862 masm.branchIfFunctionHasNoJitEntry(calleeReg, isConstructing, &noJitEntry);
5864 // ****************************
5865 // * Functions with jit entry *
5866 // ****************************
5867 masm.loadJitCodeRaw(calleeReg, scratch2);
5869 // Construct the JitFrameLayout.
5870 masm.PushCalleeToken(calleeReg, isConstructing);
5871 masm.PushFrameDescriptorForJitCall(FrameType::IonJS, argcReg, scratch);
5872 #ifndef JS_USE_LINK_REGISTER
5873 masm.push(returnAddrReg);
5874 #endif
5876 // Check whether we need a rectifier frame.
5877 Label noRectifier;
5878 masm.loadFunctionArgCount(calleeReg, scratch);
5879 masm.branch32(Assembler::BelowOrEqual, scratch, argcReg, &noRectifier);
5881 // Tail-call the arguments rectifier.
5882 // Because all trampolines are created at the same time,
5883 // we can't create a TrampolinePtr for the arguments rectifier,
5884 // because it hasn't been linked yet. We can, however, directly
5885 // encode its offset.
5886 Label rectifier;
5887 bindLabelToOffset(&rectifier, argumentsRectifierOffset_);
5889 masm.jump(&rectifier);
5892 // Tail call the jit entry.
5893 masm.bind(&noRectifier);
5894 masm.jump(scratch2);
5896 // ********************
5897 // * Native functions *
5898 // ********************
5899 masm.bind(&noJitEntry);
5900 if (!isConstructing) {
5901 generateIonGenericCallFunCall(masm, &entry, &vmCall);
5903 generateIonGenericCallNativeFunction(masm, isConstructing);
5905 // *******************
5906 // * Bound functions *
5907 // *******************
5908 // TODO: support class hooks?
5909 masm.bind(&notFunction);
5910 if (!isConstructing) {
5911 // TODO: support generic bound constructors?
5912 generateIonGenericCallBoundFunction(masm, &entry, &vmCall);
5915 // ********************
5916 // * Fallback VM call *
5917 // ********************
5918 masm.bind(&vmCall);
5920 masm.push(masm.getStackPointer()); // argv
5921 masm.push(argcReg); // argc
5922 masm.push(Imm32(false)); // ignores return value
5923 masm.push(Imm32(isConstructing)); // constructing
5924 masm.push(calleeReg); // callee
5926 using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
5927 MutableHandleValue);
5928 VMFunctionId id = VMFunctionToId<Fn, jit::InvokeFunction>::id;
5929 uint32_t invokeFunctionOffset = functionWrapperOffsets_[size_t(id)];
5930 Label invokeFunctionVMEntry;
5931 bindLabelToOffset(&invokeFunctionVMEntry, invokeFunctionOffset);
5933 masm.pushFrameDescriptor(FrameType::IonJS);
5934 #ifndef JS_USE_LINK_REGISTER
5935 masm.push(returnAddrReg);
5936 #endif
5937 masm.jump(&invokeFunctionVMEntry);
5940 void JitRuntime::generateIonGenericCallNativeFunction(MacroAssembler& masm,
5941 bool isConstructing) {
5942 Register calleeReg = IonGenericCallCalleeReg;
5943 Register argcReg = IonGenericCallArgcReg;
5944 Register scratch = IonGenericCallScratch;
5945 Register scratch2 = IonGenericCallScratch2;
5946 Register contextReg = IonGenericCallScratch3;
5947 #ifndef JS_USE_LINK_REGISTER
5948 Register returnAddrReg = IonGenericCallReturnAddrReg;
5949 #endif
5951 // Push a value containing the callee, which will become argv[0].
5952 masm.pushValue(JSVAL_TYPE_OBJECT, calleeReg);
5954 // Load the callee address into calleeReg.
5955 #ifdef JS_SIMULATOR
5956 masm.movePtr(ImmPtr(RedirectedCallAnyNative()), calleeReg);
5957 #else
5958 masm.loadPrivate(Address(calleeReg, JSFunction::offsetOfNativeOrEnv()),
5959 calleeReg);
5960 #endif
5962 // Load argv into scratch2.
5963 masm.moveStackPtrTo(scratch2);
5965 // Push argc.
5966 masm.push(argcReg);
5968 masm.loadJSContext(contextReg);
5970 // Construct native exit frame. Note that unlike other cases in this
5971 // trampoline, this code does not use a tail call.
5972 masm.pushFrameDescriptor(FrameType::IonJS);
5973 #ifdef JS_USE_LINK_REGISTER
5974 masm.pushReturnAddress();
5975 #else
5976 masm.push(returnAddrReg);
5977 #endif
5979 masm.push(FramePointer);
5980 masm.moveStackPtrTo(FramePointer);
5981 masm.enterFakeExitFrameForNative(contextReg, scratch, isConstructing);
5983 masm.setupUnalignedABICall(scratch);
5984 masm.passABIArg(contextReg); // cx
5985 masm.passABIArg(argcReg); // argc
5986 masm.passABIArg(scratch2); // argv
5988 masm.callWithABI(calleeReg);
5990 // Test for failure.
5991 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
5993 masm.loadValue(
5994 Address(masm.getStackPointer(), NativeExitFrameLayout::offsetOfResult()),
5995 JSReturnOperand);
5997 // Leave the exit frame.
5998 masm.moveToStackPtr(FramePointer);
5999 masm.pop(FramePointer);
6001 // Return.
6002 masm.ret();
6005 void JitRuntime::generateIonGenericCallFunCall(MacroAssembler& masm,
6006 Label* entry, Label* vmCall) {
6007 Register calleeReg = IonGenericCallCalleeReg;
6008 Register argcReg = IonGenericCallArgcReg;
6009 Register scratch = IonGenericCallScratch;
6010 Register scratch2 = IonGenericCallScratch2;
6011 Register scratch3 = IonGenericCallScratch3;
6013 Label notFunCall;
6014 masm.branchPtr(Assembler::NotEqual,
6015 Address(calleeReg, JSFunction::offsetOfNativeOrEnv()),
6016 ImmPtr(js::fun_call), &notFunCall);
6018 // In general, we can implement fun_call by replacing calleeReg with
6019 // |this|, sliding all the other arguments down, and decrementing argc.
6021 // *BEFORE* *AFTER*
6022 // [argN] argc = N+1 <padding>
6023 // ... [argN] argc = N
6024 // [arg1] ...
6025 // [arg0] [arg1] <- now arg0
6026 // [this] <- top of stack (aligned) [arg0] <- now this
6028 // The only exception is when argc is already 0, in which case instead
6029 // of shifting arguments down we replace [this] with UndefinedValue():
6031 // *BEFORE* *AFTER*
6032 // [this] argc = 0 [undef] argc = 0
6034 // After making this transformation, we can jump back to the beginning
6035 // of this trampoline to handle the inner call.
6037 // Guard that |this| is an object. If it is, replace calleeReg.
6038 masm.fallibleUnboxObject(Address(masm.getStackPointer(), 0), scratch, vmCall);
6039 masm.movePtr(scratch, calleeReg);
6041 Label hasArgs;
6042 masm.branch32(Assembler::NotEqual, argcReg, Imm32(0), &hasArgs);
6044 // No arguments. Replace |this| with |undefined| and start from the top.
6045 masm.storeValue(UndefinedValue(), Address(masm.getStackPointer(), 0));
6046 masm.jump(entry);
6048 masm.bind(&hasArgs);
6050 Label doneSliding;
6051 generateIonGenericCallArgumentsShift(masm, argcReg, scratch, scratch2,
6052 scratch3, &doneSliding);
6053 masm.bind(&doneSliding);
6054 masm.sub32(Imm32(1), argcReg);
6056 masm.jump(entry);
6058 masm.bind(&notFunCall);
6061 void JitRuntime::generateIonGenericCallBoundFunction(MacroAssembler& masm,
6062 Label* entry,
6063 Label* vmCall) {
6064 Register calleeReg = IonGenericCallCalleeReg;
6065 Register argcReg = IonGenericCallArgcReg;
6066 Register scratch = IonGenericCallScratch;
6067 Register scratch2 = IonGenericCallScratch2;
6068 Register scratch3 = IonGenericCallScratch3;
6070 masm.branchTestObjClass(Assembler::NotEqual, calleeReg,
6071 &BoundFunctionObject::class_, scratch, calleeReg,
6072 vmCall);
6074 Address targetSlot(calleeReg, BoundFunctionObject::offsetOfTargetSlot());
6075 Address flagsSlot(calleeReg, BoundFunctionObject::offsetOfFlagsSlot());
6076 Address thisSlot(calleeReg, BoundFunctionObject::offsetOfBoundThisSlot());
6077 Address firstInlineArgSlot(
6078 calleeReg, BoundFunctionObject::offsetOfFirstInlineBoundArg());
6080 // Check that we won't be pushing too many arguments.
6081 masm.load32(flagsSlot, scratch);
6082 masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), scratch);
6083 masm.add32(argcReg, scratch);
6084 masm.branch32(Assembler::Above, scratch, Imm32(JIT_ARGS_LENGTH_MAX), vmCall);
6086 // The stack is currently correctly aligned for a jit call. We will
6087 // be updating the `this` value and potentially adding additional
6088 // arguments. On platforms with 16-byte alignment, if the number of
6089 // bound arguments is odd, we have to move the arguments that are
6090 // currently on the stack. For example, with one bound argument:
6092 // *BEFORE* *AFTER*
6093 // [argN] <padding>
6094 // ... [argN] |
6095 // [arg1] ... | These arguments have been
6096 // [arg0] [arg1] | shifted down 8 bytes.
6097 // [this] <- top of stack (aligned) [arg0] v
6098 // [bound0] <- one bound argument (odd)
6099 // [boundThis] <- top of stack (aligned)
6101 Label poppedThis;
6102 if (JitStackValueAlignment > 1) {
6103 Label alreadyAligned;
6104 masm.branchTest32(Assembler::Zero, flagsSlot,
6105 Imm32(1 << BoundFunctionObject::NumBoundArgsShift),
6106 &alreadyAligned);
6108 // We have an odd number of bound arguments. Shift the existing arguments
6109 // down by 8 bytes.
6110 generateIonGenericCallArgumentsShift(masm, argcReg, scratch, scratch2,
6111 scratch3, &poppedThis);
6112 masm.bind(&alreadyAligned);
6115 // Pop the current `this`. It will be replaced with the bound `this`.
6116 masm.freeStack(sizeof(Value));
6117 masm.bind(&poppedThis);
6119 // Load the number of bound arguments in scratch
6120 masm.load32(flagsSlot, scratch);
6121 masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), scratch);
6123 Label donePushingBoundArguments;
6124 masm.branch32(Assembler::Equal, scratch, Imm32(0),
6125 &donePushingBoundArguments);
6127 // Update argc to include bound arguments.
6128 masm.add32(scratch, argcReg);
6130 // Load &boundArgs[0] in scratch2.
6131 Label outOfLineBoundArguments, haveBoundArguments;
6132 masm.branch32(Assembler::Above, scratch,
6133 Imm32(BoundFunctionObject::MaxInlineBoundArgs),
6134 &outOfLineBoundArguments);
6135 masm.computeEffectiveAddress(firstInlineArgSlot, scratch2);
6136 masm.jump(&haveBoundArguments);
6138 masm.bind(&outOfLineBoundArguments);
6139 masm.unboxObject(firstInlineArgSlot, scratch2);
6140 masm.loadPtr(Address(scratch2, NativeObject::offsetOfElements()), scratch2);
6142 masm.bind(&haveBoundArguments);
6144 // Load &boundArgs[numBoundArgs] in scratch.
6145 BaseObjectElementIndex lastBoundArg(scratch2, scratch);
6146 masm.computeEffectiveAddress(lastBoundArg, scratch);
6148 // Push the bound arguments, starting with the last one.
6149 // Copying pre-decrements scratch until scratch2 is reached.
6150 Label boundArgumentsLoop;
6151 masm.bind(&boundArgumentsLoop);
6152 masm.subPtr(Imm32(sizeof(Value)), scratch);
6153 masm.pushValue(Address(scratch, 0));
6154 masm.branchPtr(Assembler::Above, scratch, scratch2, &boundArgumentsLoop);
6155 masm.bind(&donePushingBoundArguments);
6157 // Push the bound `this`.
6158 masm.pushValue(thisSlot);
6160 // Load the target in calleeReg.
6161 masm.unboxObject(targetSlot, calleeReg);
6163 // At this point, all preconditions for entering the trampoline are met:
6164 // - calleeReg contains a pointer to the callee object
6165 // - argcReg contains the number of actual args (now including bound args)
6166 // - the arguments are on the stack with the correct alignment.
6167 // Instead of generating more code, we can jump back to the entry point
6168 // of the trampoline to call the bound target.
6169 masm.jump(entry);
6172 void CodeGenerator::visitCallKnown(LCallKnown* call) {
6173 Register calleereg = ToRegister(call->getFunction());
6174 Register objreg = ToRegister(call->getTempObject());
6175 uint32_t unusedStack =
6176 UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
6177 WrappedFunction* target = call->getSingleTarget();
6179 // Native single targets (except wasm) are handled by LCallNative.
6180 MOZ_ASSERT(target->hasJitEntry());
6182 // Missing arguments must have been explicitly appended by WarpBuilder.
6183 DebugOnly<unsigned> numNonArgsOnStack = 1 + call->isConstructing();
6184 MOZ_ASSERT(target->nargs() <=
6185 call->mir()->numStackArgs() - numNonArgsOnStack);
6187 MOZ_ASSERT_IF(call->isConstructing(), target->isConstructor());
6189 masm.checkStackAlignment();
6191 if (target->isClassConstructor() && !call->isConstructing()) {
6192 emitCallInvokeFunction(call, calleereg, call->isConstructing(),
6193 call->ignoresReturnValue(), call->numActualArgs(),
6194 unusedStack);
6195 return;
6198 MOZ_ASSERT_IF(target->isClassConstructor(), call->isConstructing());
6200 MOZ_ASSERT(!call->mir()->needsThisCheck());
6202 if (call->mir()->maybeCrossRealm()) {
6203 masm.switchToObjectRealm(calleereg, objreg);
6206 masm.loadJitCodeRaw(calleereg, objreg);
6208 // Nestle the StackPointer up to the argument vector.
6209 masm.freeStack(unusedStack);
6211 // Construct the JitFrameLayout.
6212 masm.PushCalleeToken(calleereg, call->mir()->isConstructing());
6213 masm.PushFrameDescriptorForJitCall(FrameType::IonJS, call->numActualArgs());
6215 // Finally call the function in objreg.
6216 ensureOsiSpace();
6217 uint32_t callOffset = masm.callJit(objreg);
6218 markSafepointAt(callOffset, call);
6220 if (call->mir()->maybeCrossRealm()) {
6221 static_assert(!JSReturnOperand.aliases(ReturnReg),
6222 "ReturnReg available as scratch after scripted calls");
6223 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
6226 // Restore stack pointer: pop JitFrameLayout fields still left on the stack
6227 // and undo the earlier |freeStack(unusedStack)|.
6228 int prefixGarbage =
6229 sizeof(JitFrameLayout) - JitFrameLayout::bytesPoppedAfterCall();
6230 masm.adjustStack(prefixGarbage - unusedStack);
6232 // If the return value of the constructing function is Primitive,
6233 // replace the return value with the Object from CreateThis.
6234 if (call->mir()->isConstructing()) {
6235 Label notPrimitive;
6236 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6237 &notPrimitive);
6238 masm.loadValue(Address(masm.getStackPointer(), unusedStack),
6239 JSReturnOperand);
6240 #ifdef DEBUG
6241 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6242 &notPrimitive);
6243 masm.assumeUnreachable("CreateThis creates an object");
6244 #endif
6245 masm.bind(&notPrimitive);
6249 template <typename T>
6250 void CodeGenerator::emitCallInvokeFunction(T* apply) {
6251 Register objreg = ToRegister(apply->getTempObject());
6253 // Push the space used by the arguments.
6254 masm.moveStackPtrTo(objreg);
6256 pushArg(objreg); // argv.
6257 pushArg(ToRegister(apply->getArgc())); // argc.
6258 pushArg(Imm32(apply->mir()->ignoresReturnValue())); // ignoresReturnValue.
6259 pushArg(Imm32(apply->mir()->isConstructing())); // isConstructing.
6260 pushArg(ToRegister(apply->getFunction())); // JSFunction*.
6262 using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
6263 MutableHandleValue);
6264 callVM<Fn, jit::InvokeFunction>(apply);
6267 // Do not bailout after the execution of this function since the stack no longer
6268 // correspond to what is expected by the snapshots.
6269 void CodeGenerator::emitAllocateSpaceForApply(Register argcreg,
6270 Register scratch) {
6271 // Use scratch register to calculate stack space (including padding).
6272 masm.movePtr(argcreg, scratch);
6274 // Align the JitFrameLayout on the JitStackAlignment.
6275 if (JitStackValueAlignment > 1) {
6276 MOZ_ASSERT(frameSize() % JitStackAlignment == 0,
6277 "Stack padding assumes that the frameSize is correct");
6278 MOZ_ASSERT(JitStackValueAlignment == 2);
6279 Label noPaddingNeeded;
6280 // if the number of arguments is odd, then we do not need any padding.
6281 masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
6282 masm.addPtr(Imm32(1), scratch);
6283 masm.bind(&noPaddingNeeded);
6286 // Reserve space for copying the arguments.
6287 NativeObject::elementsSizeMustNotOverflow();
6288 masm.lshiftPtr(Imm32(ValueShift), scratch);
6289 masm.subFromStackPtr(scratch);
6291 #ifdef DEBUG
6292 // Put a magic value in the space reserved for padding. Note, this code
6293 // cannot be merged with the previous test, as not all architectures can
6294 // write below their stack pointers.
6295 if (JitStackValueAlignment > 1) {
6296 MOZ_ASSERT(JitStackValueAlignment == 2);
6297 Label noPaddingNeeded;
6298 // if the number of arguments is odd, then we do not need any padding.
6299 masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
6300 BaseValueIndex dstPtr(masm.getStackPointer(), argcreg);
6301 masm.storeValue(MagicValue(JS_ARG_POISON), dstPtr);
6302 masm.bind(&noPaddingNeeded);
6304 #endif
6307 // Do not bailout after the execution of this function since the stack no longer
6308 // correspond to what is expected by the snapshots.
6309 void CodeGenerator::emitAllocateSpaceForConstructAndPushNewTarget(
6310 Register argcreg, Register newTargetAndScratch) {
6311 // Align the JitFrameLayout on the JitStackAlignment. Contrary to
6312 // |emitAllocateSpaceForApply()|, we're always pushing a magic value, because
6313 // we can't write to |newTargetAndScratch| before |new.target| has
6314 // been pushed onto the stack.
6315 if (JitStackValueAlignment > 1) {
6316 MOZ_ASSERT(frameSize() % JitStackAlignment == 0,
6317 "Stack padding assumes that the frameSize is correct");
6318 MOZ_ASSERT(JitStackValueAlignment == 2);
6320 Label noPaddingNeeded;
6321 // If the number of arguments is even, then we do not need any padding.
6322 masm.branchTestPtr(Assembler::Zero, argcreg, Imm32(1), &noPaddingNeeded);
6323 masm.pushValue(MagicValue(JS_ARG_POISON));
6324 masm.bind(&noPaddingNeeded);
6327 // Push |new.target| after the padding value, but before any arguments.
6328 masm.pushValue(JSVAL_TYPE_OBJECT, newTargetAndScratch);
6330 // Use newTargetAndScratch to calculate stack space (including padding).
6331 masm.movePtr(argcreg, newTargetAndScratch);
6333 // Reserve space for copying the arguments.
6334 NativeObject::elementsSizeMustNotOverflow();
6335 masm.lshiftPtr(Imm32(ValueShift), newTargetAndScratch);
6336 masm.subFromStackPtr(newTargetAndScratch);
6339 // Destroys argvIndex and copyreg.
6340 void CodeGenerator::emitCopyValuesForApply(Register argvSrcBase,
6341 Register argvIndex, Register copyreg,
6342 size_t argvSrcOffset,
6343 size_t argvDstOffset) {
6344 Label loop;
6345 masm.bind(&loop);
6347 // As argvIndex is off by 1, and we use the decBranchPtr instruction
6348 // to loop back, we have to substract the size of the word which are
6349 // copied.
6350 BaseValueIndex srcPtr(argvSrcBase, argvIndex,
6351 int32_t(argvSrcOffset) - sizeof(void*));
6352 BaseValueIndex dstPtr(masm.getStackPointer(), argvIndex,
6353 int32_t(argvDstOffset) - sizeof(void*));
6354 masm.loadPtr(srcPtr, copyreg);
6355 masm.storePtr(copyreg, dstPtr);
6357 // Handle 32 bits architectures.
6358 if (sizeof(Value) == 2 * sizeof(void*)) {
6359 BaseValueIndex srcPtrLow(argvSrcBase, argvIndex,
6360 int32_t(argvSrcOffset) - 2 * sizeof(void*));
6361 BaseValueIndex dstPtrLow(masm.getStackPointer(), argvIndex,
6362 int32_t(argvDstOffset) - 2 * sizeof(void*));
6363 masm.loadPtr(srcPtrLow, copyreg);
6364 masm.storePtr(copyreg, dstPtrLow);
6367 masm.decBranchPtr(Assembler::NonZero, argvIndex, Imm32(1), &loop);
6370 void CodeGenerator::emitRestoreStackPointerFromFP() {
6371 // This is used to restore the stack pointer after a call with a dynamic
6372 // number of arguments.
6374 MOZ_ASSERT(masm.framePushed() == frameSize());
6376 int32_t offset = -int32_t(frameSize());
6377 masm.computeEffectiveAddress(Address(FramePointer, offset),
6378 masm.getStackPointer());
6381 void CodeGenerator::emitPushArguments(Register argcreg, Register scratch,
6382 Register copyreg, uint32_t extraFormals) {
6383 Label end;
6385 // Skip the copy of arguments if there are none.
6386 masm.branchTestPtr(Assembler::Zero, argcreg, argcreg, &end);
6388 // clang-format off
6390 // We are making a copy of the arguments which are above the JitFrameLayout
6391 // of the current Ion frame.
6393 // [arg1] [arg0] <- src [this] [JitFrameLayout] [.. frameSize ..] [pad] [arg1] [arg0] <- dst
6395 // clang-format on
6397 // Compute the source and destination offsets into the stack.
6398 Register argvSrcBase = FramePointer;
6399 size_t argvSrcOffset =
6400 JitFrameLayout::offsetOfActualArgs() + extraFormals * sizeof(JS::Value);
6401 size_t argvDstOffset = 0;
6403 Register argvIndex = scratch;
6404 masm.move32(argcreg, argvIndex);
6406 // Copy arguments.
6407 emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, argvSrcOffset,
6408 argvDstOffset);
6410 // Join with all arguments copied and the extra stack usage computed.
6411 masm.bind(&end);
6414 void CodeGenerator::emitPushArguments(LApplyArgsGeneric* apply,
6415 Register scratch) {
6416 // Holds the function nargs. Initially the number of args to the caller.
6417 Register argcreg = ToRegister(apply->getArgc());
6418 Register copyreg = ToRegister(apply->getTempObject());
6419 uint32_t extraFormals = apply->numExtraFormals();
6421 emitAllocateSpaceForApply(argcreg, scratch);
6423 emitPushArguments(argcreg, scratch, copyreg, extraFormals);
6425 // Push |this|.
6426 masm.pushValue(ToValue(apply, LApplyArgsGeneric::ThisIndex));
6429 void CodeGenerator::emitPushArguments(LApplyArgsObj* apply, Register scratch) {
6430 // argc and argsObj are mapped to the same calltemp register.
6431 MOZ_ASSERT(apply->getArgsObj() == apply->getArgc());
6433 Register tmpArgc = ToRegister(apply->getTempObject());
6434 Register argsObj = ToRegister(apply->getArgsObj());
6436 // Load argc into tmpArgc.
6437 Address lengthAddr(argsObj, ArgumentsObject::getInitialLengthSlotOffset());
6438 masm.unboxInt32(lengthAddr, tmpArgc);
6439 masm.rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT), tmpArgc);
6441 // Allocate space on the stack for arguments. This modifies scratch.
6442 emitAllocateSpaceForApply(tmpArgc, scratch);
6444 // Load arguments data
6445 masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
6446 argsObj);
6447 size_t argsSrcOffset = ArgumentsData::offsetOfArgs();
6449 // This is the end of the lifetime of argsObj.
6450 // After this call, the argsObj register holds the argument count instead.
6451 emitPushArrayAsArguments(tmpArgc, argsObj, scratch, argsSrcOffset);
6453 masm.pushValue(ToValue(apply, LApplyArgsObj::ThisIndex));
6456 void CodeGenerator::emitPushArrayAsArguments(Register tmpArgc,
6457 Register srcBaseAndArgc,
6458 Register scratch,
6459 size_t argvSrcOffset) {
6460 // Preconditions:
6461 // 1. |tmpArgc| * sizeof(Value) bytes have been allocated at the top of
6462 // the stack to hold arguments.
6463 // 2. |srcBaseAndArgc| + |srcOffset| points to an array of |tmpArgc| values.
6465 // Postconditions:
6466 // 1. The arguments at |srcBaseAndArgc| + |srcOffset| have been copied into
6467 // the allocated space.
6468 // 2. |srcBaseAndArgc| now contains the original value of |tmpArgc|.
6470 // |scratch| is used as a temp register within this function and clobbered.
6472 Label noCopy, epilogue;
6474 // Skip the copy of arguments if there are none.
6475 masm.branchTestPtr(Assembler::Zero, tmpArgc, tmpArgc, &noCopy);
6477 // Copy the values. This code is skipped entirely if there are
6478 // no values.
6479 size_t argvDstOffset = 0;
6481 Register argvSrcBase = srcBaseAndArgc;
6482 Register copyreg = scratch;
6484 masm.push(tmpArgc);
6485 Register argvIndex = tmpArgc;
6486 argvDstOffset += sizeof(void*);
6488 // Copy
6489 emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, argvSrcOffset,
6490 argvDstOffset);
6492 // Restore.
6493 masm.pop(srcBaseAndArgc); // srcBaseAndArgc now contains argc.
6494 masm.jump(&epilogue);
6496 // Clear argc if we skipped the copy step.
6497 masm.bind(&noCopy);
6498 masm.movePtr(ImmWord(0), srcBaseAndArgc);
6500 // Join with all arguments copied and the extra stack usage computed.
6501 // Note, "srcBase" has become "argc".
6502 masm.bind(&epilogue);
6505 void CodeGenerator::emitPushArguments(LApplyArrayGeneric* apply,
6506 Register scratch) {
6507 Register tmpArgc = ToRegister(apply->getTempObject());
6508 Register elementsAndArgc = ToRegister(apply->getElements());
6510 // Invariants guarded in the caller:
6511 // - the array is not too long
6512 // - the array length equals its initialized length
6514 // The array length is our argc for the purposes of allocating space.
6515 Address length(ToRegister(apply->getElements()),
6516 ObjectElements::offsetOfLength());
6517 masm.load32(length, tmpArgc);
6519 // Allocate space for the values.
6520 emitAllocateSpaceForApply(tmpArgc, scratch);
6522 // After this call "elements" has become "argc".
6523 size_t elementsOffset = 0;
6524 emitPushArrayAsArguments(tmpArgc, elementsAndArgc, scratch, elementsOffset);
6526 // Push |this|.
6527 masm.pushValue(ToValue(apply, LApplyArrayGeneric::ThisIndex));
6530 void CodeGenerator::emitPushArguments(LConstructArgsGeneric* construct,
6531 Register scratch) {
6532 MOZ_ASSERT(scratch == ToRegister(construct->getNewTarget()));
6534 // Holds the function nargs. Initially the number of args to the caller.
6535 Register argcreg = ToRegister(construct->getArgc());
6536 Register copyreg = ToRegister(construct->getTempObject());
6537 uint32_t extraFormals = construct->numExtraFormals();
6539 // Allocate space for the values.
6540 // After this call "newTarget" has become "scratch".
6541 emitAllocateSpaceForConstructAndPushNewTarget(argcreg, scratch);
6543 emitPushArguments(argcreg, scratch, copyreg, extraFormals);
6545 // Push |this|.
6546 masm.pushValue(ToValue(construct, LConstructArgsGeneric::ThisIndex));
6549 void CodeGenerator::emitPushArguments(LConstructArrayGeneric* construct,
6550 Register scratch) {
6551 MOZ_ASSERT(scratch == ToRegister(construct->getNewTarget()));
6553 Register tmpArgc = ToRegister(construct->getTempObject());
6554 Register elementsAndArgc = ToRegister(construct->getElements());
6556 // Invariants guarded in the caller:
6557 // - the array is not too long
6558 // - the array length equals its initialized length
6560 // The array length is our argc for the purposes of allocating space.
6561 Address length(ToRegister(construct->getElements()),
6562 ObjectElements::offsetOfLength());
6563 masm.load32(length, tmpArgc);
6565 // Allocate space for the values.
6566 emitAllocateSpaceForConstructAndPushNewTarget(tmpArgc, scratch);
6568 // After this call "elements" has become "argc" and "newTarget" has become
6569 // "scratch".
6570 size_t elementsOffset = 0;
6571 emitPushArrayAsArguments(tmpArgc, elementsAndArgc, scratch, elementsOffset);
6573 // Push |this|.
6574 masm.pushValue(ToValue(construct, LConstructArrayGeneric::ThisIndex));
6577 template <typename T>
6578 void CodeGenerator::emitApplyGeneric(T* apply) {
6579 // Holds the function object.
6580 Register calleereg = ToRegister(apply->getFunction());
6582 // Temporary register for modifying the function object.
6583 Register objreg = ToRegister(apply->getTempObject());
6584 Register scratch = ToRegister(apply->getTempForArgCopy());
6586 // Holds the function nargs, computed in the invoker or (for ApplyArray,
6587 // ConstructArray, or ApplyArgsObj) in the argument pusher.
6588 Register argcreg = ToRegister(apply->getArgc());
6590 // Copy the arguments of the current function.
6592 // In the case of ApplyArray, ConstructArray, or ApplyArgsObj, also
6593 // compute argc. The argc register and the elements/argsObj register
6594 // are the same; argc must not be referenced before the call to
6595 // emitPushArguments() and elements/argsObj must not be referenced
6596 // after it returns.
6598 // In the case of ConstructArray or ConstructArgs, also overwrite newTarget
6599 // with scratch; newTarget must not be referenced after this point.
6601 // objreg is dead across this call.
6602 emitPushArguments(apply, scratch);
6604 masm.checkStackAlignment();
6606 bool constructing = apply->mir()->isConstructing();
6608 // If the function is native, only emit the call to InvokeFunction.
6609 if (apply->hasSingleTarget() &&
6610 apply->getSingleTarget()->isNativeWithoutJitEntry()) {
6611 emitCallInvokeFunction(apply);
6613 #ifdef DEBUG
6614 // Native constructors are guaranteed to return an Object value, so we never
6615 // have to replace a primitive result with the previously allocated Object
6616 // from CreateThis.
6617 if (constructing) {
6618 Label notPrimitive;
6619 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6620 &notPrimitive);
6621 masm.assumeUnreachable("native constructors don't return primitives");
6622 masm.bind(&notPrimitive);
6624 #endif
6626 emitRestoreStackPointerFromFP();
6627 return;
6630 Label end, invoke;
6632 // Unless already known, guard that calleereg is actually a function object.
6633 if (!apply->hasSingleTarget()) {
6634 masm.branchTestObjIsFunction(Assembler::NotEqual, calleereg, objreg,
6635 calleereg, &invoke);
6638 // Guard that calleereg is an interpreted function with a JSScript.
6639 masm.branchIfFunctionHasNoJitEntry(calleereg, constructing, &invoke);
6641 // Guard that callee allows the [[Call]] or [[Construct]] operation required.
6642 if (constructing) {
6643 masm.branchTestFunctionFlags(calleereg, FunctionFlags::CONSTRUCTOR,
6644 Assembler::Zero, &invoke);
6645 } else {
6646 masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
6647 calleereg, objreg, &invoke);
6650 // Use the slow path if CreateThis was unable to create the |this| object.
6651 if (constructing) {
6652 Address thisAddr(masm.getStackPointer(), 0);
6653 masm.branchTestNull(Assembler::Equal, thisAddr, &invoke);
6656 // Call with an Ion frame or a rectifier frame.
6658 if (apply->mir()->maybeCrossRealm()) {
6659 masm.switchToObjectRealm(calleereg, objreg);
6662 // Knowing that calleereg is a non-native function, load jitcode.
6663 masm.loadJitCodeRaw(calleereg, objreg);
6665 masm.PushCalleeToken(calleereg, constructing);
6666 masm.PushFrameDescriptorForJitCall(FrameType::IonJS, argcreg, scratch);
6668 Label underflow, rejoin;
6670 // Check whether the provided arguments satisfy target argc.
6671 if (!apply->hasSingleTarget()) {
6672 Register nformals = scratch;
6673 masm.loadFunctionArgCount(calleereg, nformals);
6674 masm.branch32(Assembler::Below, argcreg, nformals, &underflow);
6675 } else {
6676 masm.branch32(Assembler::Below, argcreg,
6677 Imm32(apply->getSingleTarget()->nargs()), &underflow);
6680 // Skip the construction of the rectifier frame because we have no
6681 // underflow.
6682 masm.jump(&rejoin);
6684 // Argument fixup needed. Get ready to call the argumentsRectifier.
6686 masm.bind(&underflow);
6688 // Hardcode the address of the argumentsRectifier code.
6689 TrampolinePtr argumentsRectifier =
6690 gen->jitRuntime()->getArgumentsRectifier();
6691 masm.movePtr(argumentsRectifier, objreg);
6694 masm.bind(&rejoin);
6696 // Finally call the function in objreg, as assigned by one of the paths
6697 // above.
6698 ensureOsiSpace();
6699 uint32_t callOffset = masm.callJit(objreg);
6700 markSafepointAt(callOffset, apply);
6702 if (apply->mir()->maybeCrossRealm()) {
6703 static_assert(!JSReturnOperand.aliases(ReturnReg),
6704 "ReturnReg available as scratch after scripted calls");
6705 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
6708 // Discard JitFrameLayout fields still left on the stack.
6709 masm.freeStack(sizeof(JitFrameLayout) -
6710 JitFrameLayout::bytesPoppedAfterCall());
6711 masm.jump(&end);
6714 // Handle uncompiled or native functions.
6716 masm.bind(&invoke);
6717 emitCallInvokeFunction(apply);
6720 masm.bind(&end);
6722 // If the return value of the constructing function is Primitive,
6723 // replace the return value with the Object from CreateThis.
6724 if (constructing) {
6725 Label notPrimitive;
6726 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6727 &notPrimitive);
6728 masm.loadValue(Address(masm.getStackPointer(), 0), JSReturnOperand);
6730 #ifdef DEBUG
6731 masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6732 &notPrimitive);
6733 masm.assumeUnreachable("CreateThis creates an object");
6734 #endif
6736 masm.bind(&notPrimitive);
6739 // Pop arguments and continue.
6740 emitRestoreStackPointerFromFP();
6743 void CodeGenerator::visitApplyArgsGeneric(LApplyArgsGeneric* apply) {
6744 LSnapshot* snapshot = apply->snapshot();
6745 Register argcreg = ToRegister(apply->getArgc());
6747 // Ensure that we have a reasonable number of arguments.
6748 bailoutCmp32(Assembler::Above, argcreg, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
6750 emitApplyGeneric(apply);
6753 void CodeGenerator::visitApplyArgsObj(LApplyArgsObj* apply) {
6754 Register argsObj = ToRegister(apply->getArgsObj());
6755 Register temp = ToRegister(apply->getTempObject());
6757 Label bail;
6758 masm.loadArgumentsObjectLength(argsObj, temp, &bail);
6759 masm.branch32(Assembler::Above, temp, Imm32(JIT_ARGS_LENGTH_MAX), &bail);
6760 bailoutFrom(&bail, apply->snapshot());
6762 emitApplyGeneric(apply);
6765 void CodeGenerator::visitApplyArrayGeneric(LApplyArrayGeneric* apply) {
6766 LSnapshot* snapshot = apply->snapshot();
6767 Register tmp = ToRegister(apply->getTempObject());
6769 Address length(ToRegister(apply->getElements()),
6770 ObjectElements::offsetOfLength());
6771 masm.load32(length, tmp);
6773 // Ensure that we have a reasonable number of arguments.
6774 bailoutCmp32(Assembler::Above, tmp, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
6776 // Ensure that the array does not contain an uninitialized tail.
6778 Address initializedLength(ToRegister(apply->getElements()),
6779 ObjectElements::offsetOfInitializedLength());
6780 masm.sub32(initializedLength, tmp);
6781 bailoutCmp32(Assembler::NotEqual, tmp, Imm32(0), snapshot);
6783 emitApplyGeneric(apply);
6786 void CodeGenerator::visitConstructArgsGeneric(LConstructArgsGeneric* lir) {
6787 LSnapshot* snapshot = lir->snapshot();
6788 Register argcreg = ToRegister(lir->getArgc());
6790 // Ensure that we have a reasonable number of arguments.
6791 bailoutCmp32(Assembler::Above, argcreg, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
6793 emitApplyGeneric(lir);
6796 void CodeGenerator::visitConstructArrayGeneric(LConstructArrayGeneric* lir) {
6797 LSnapshot* snapshot = lir->snapshot();
6798 Register tmp = ToRegister(lir->getTempObject());
6800 Address length(ToRegister(lir->getElements()),
6801 ObjectElements::offsetOfLength());
6802 masm.load32(length, tmp);
6804 // Ensure that we have a reasonable number of arguments.
6805 bailoutCmp32(Assembler::Above, tmp, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
6807 // Ensure that the array does not contain an uninitialized tail.
6809 Address initializedLength(ToRegister(lir->getElements()),
6810 ObjectElements::offsetOfInitializedLength());
6811 masm.sub32(initializedLength, tmp);
6812 bailoutCmp32(Assembler::NotEqual, tmp, Imm32(0), snapshot);
6814 emitApplyGeneric(lir);
6817 void CodeGenerator::visitBail(LBail* lir) { bailout(lir->snapshot()); }
6819 void CodeGenerator::visitUnreachable(LUnreachable* lir) {
6820 masm.assumeUnreachable("end-of-block assumed unreachable");
6823 void CodeGenerator::visitEncodeSnapshot(LEncodeSnapshot* lir) {
6824 encode(lir->snapshot());
6827 void CodeGenerator::visitUnreachableResultV(LUnreachableResultV* lir) {
6828 masm.assumeUnreachable("must be unreachable");
6831 void CodeGenerator::visitUnreachableResultT(LUnreachableResultT* lir) {
6832 masm.assumeUnreachable("must be unreachable");
6835 // Out-of-line path to report over-recursed error and fail.
6836 class CheckOverRecursedFailure : public OutOfLineCodeBase<CodeGenerator> {
6837 LInstruction* lir_;
6839 public:
6840 explicit CheckOverRecursedFailure(LInstruction* lir) : lir_(lir) {}
6842 void accept(CodeGenerator* codegen) override {
6843 codegen->visitCheckOverRecursedFailure(this);
6846 LInstruction* lir() const { return lir_; }
6849 void CodeGenerator::visitCheckOverRecursed(LCheckOverRecursed* lir) {
6850 // If we don't push anything on the stack, skip the check.
6851 if (omitOverRecursedCheck()) {
6852 return;
6855 // Ensure that this frame will not cross the stack limit.
6856 // This is a weak check, justified by Ion using the C stack: we must always
6857 // be some distance away from the actual limit, since if the limit is
6858 // crossed, an error must be thrown, which requires more frames.
6860 // It must always be possible to trespass past the stack limit.
6861 // Ion may legally place frames very close to the limit. Calling additional
6862 // C functions may then violate the limit without any checking.
6864 // Since Ion frames exist on the C stack, the stack limit may be
6865 // dynamically set by JS_SetThreadStackLimit() and JS_SetNativeStackQuota().
6867 CheckOverRecursedFailure* ool = new (alloc()) CheckOverRecursedFailure(lir);
6868 addOutOfLineCode(ool, lir->mir());
6870 // Conditional forward (unlikely) branch to failure.
6871 const void* limitAddr = gen->runtime->addressOfJitStackLimit();
6872 masm.branchStackPtrRhs(Assembler::AboveOrEqual, AbsoluteAddress(limitAddr),
6873 ool->entry());
6874 masm.bind(ool->rejoin());
6877 void CodeGenerator::visitCheckOverRecursedFailure(
6878 CheckOverRecursedFailure* ool) {
6879 // The OOL path is hit if the recursion depth has been exceeded.
6880 // Throw an InternalError for over-recursion.
6882 // LFunctionEnvironment can appear before LCheckOverRecursed, so we have
6883 // to save all live registers to avoid crashes if CheckOverRecursed triggers
6884 // a GC.
6885 saveLive(ool->lir());
6887 using Fn = bool (*)(JSContext*);
6888 callVM<Fn, CheckOverRecursed>(ool->lir());
6890 restoreLive(ool->lir());
6891 masm.jump(ool->rejoin());
6894 IonScriptCounts* CodeGenerator::maybeCreateScriptCounts() {
6895 // If scripts are being profiled, create a new IonScriptCounts for the
6896 // profiling data, which will be attached to the associated JSScript or
6897 // wasm module after code generation finishes.
6898 if (!gen->hasProfilingScripts()) {
6899 return nullptr;
6902 // This test inhibits IonScriptCount creation for wasm code which is
6903 // currently incompatible with wasm codegen for two reasons: (1) wasm code
6904 // must be serializable and script count codegen bakes in absolute
6905 // addresses, (2) wasm code does not have a JSScript with which to associate
6906 // code coverage data.
6907 JSScript* script = gen->outerInfo().script();
6908 if (!script) {
6909 return nullptr;
6912 auto counts = MakeUnique<IonScriptCounts>();
6913 if (!counts || !counts->init(graph.numBlocks())) {
6914 return nullptr;
6917 for (size_t i = 0; i < graph.numBlocks(); i++) {
6918 MBasicBlock* block = graph.getBlock(i)->mir();
6920 uint32_t offset = 0;
6921 char* description = nullptr;
6922 if (MResumePoint* resume = block->entryResumePoint()) {
6923 // Find a PC offset in the outermost script to use. If this
6924 // block is from an inlined script, find a location in the
6925 // outer script to associate information about the inlining
6926 // with.
6927 while (resume->caller()) {
6928 resume = resume->caller();
6930 offset = script->pcToOffset(resume->pc());
6932 if (block->entryResumePoint()->caller()) {
6933 // Get the filename and line number of the inner script.
6934 JSScript* innerScript = block->info().script();
6935 description = js_pod_calloc<char>(200);
6936 if (description) {
6937 snprintf(description, 200, "%s:%u", innerScript->filename(),
6938 innerScript->lineno());
6943 if (!counts->block(i).init(block->id(), offset, description,
6944 block->numSuccessors())) {
6945 return nullptr;
6948 for (size_t j = 0; j < block->numSuccessors(); j++) {
6949 counts->block(i).setSuccessor(
6950 j, skipTrivialBlocks(block->getSuccessor(j))->id());
6954 scriptCounts_ = counts.release();
6955 return scriptCounts_;
6958 // Structure for managing the state tracked for a block by script counters.
6959 struct ScriptCountBlockState {
6960 IonBlockCounts& block;
6961 MacroAssembler& masm;
6963 Sprinter printer;
6965 public:
6966 ScriptCountBlockState(IonBlockCounts* block, MacroAssembler* masm)
6967 : block(*block), masm(*masm), printer(GetJitContext()->cx, false) {}
6969 bool init() {
6970 if (!printer.init()) {
6971 return false;
6974 // Bump the hit count for the block at the start. This code is not
6975 // included in either the text for the block or the instruction byte
6976 // counts.
6977 masm.inc64(AbsoluteAddress(block.addressOfHitCount()));
6979 // Collect human readable assembly for the code generated in the block.
6980 masm.setPrinter(&printer);
6982 return true;
6985 void visitInstruction(LInstruction* ins) {
6986 #ifdef JS_JITSPEW
6987 // Prefix stream of assembly instructions with their LIR instruction
6988 // name and any associated high level info.
6989 if (const char* extra = ins->getExtraName()) {
6990 printer.printf("[%s:%s]\n", ins->opName(), extra);
6991 } else {
6992 printer.printf("[%s]\n", ins->opName());
6994 #endif
6997 ~ScriptCountBlockState() {
6998 masm.setPrinter(nullptr);
7000 if (JS::UniqueChars str = printer.release()) {
7001 block.setCode(str.get());
7006 void CodeGenerator::branchIfInvalidated(Register temp, Label* invalidated) {
7007 CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), temp);
7008 masm.propagateOOM(ionScriptLabels_.append(label));
7010 // If IonScript::invalidationCount_ != 0, the script has been invalidated.
7011 masm.branch32(Assembler::NotEqual,
7012 Address(temp, IonScript::offsetOfInvalidationCount()), Imm32(0),
7013 invalidated);
7016 #ifdef DEBUG
7017 void CodeGenerator::emitAssertGCThingResult(Register input,
7018 const MDefinition* mir) {
7019 MIRType type = mir->type();
7020 MOZ_ASSERT(type == MIRType::Object || type == MIRType::String ||
7021 type == MIRType::Symbol || type == MIRType::BigInt);
7023 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
7024 regs.take(input);
7026 Register temp = regs.takeAny();
7027 masm.push(temp);
7029 // Don't check if the script has been invalidated. In that case invalid
7030 // types are expected (until we reach the OsiPoint and bailout).
7031 Label done;
7032 branchIfInvalidated(temp, &done);
7034 # ifndef JS_SIMULATOR
7035 // Check that we have a valid GC pointer.
7036 // Disable for wasm because we don't have a context on wasm compilation
7037 // threads and this needs a context.
7038 // Also disable for simulator builds because the C++ call is a lot slower
7039 // there than on actual hardware.
7040 if (JitOptions.fullDebugChecks && !IsCompilingWasm()) {
7041 saveVolatile();
7042 masm.setupUnalignedABICall(temp);
7043 masm.loadJSContext(temp);
7044 masm.passABIArg(temp);
7045 masm.passABIArg(input);
7047 switch (type) {
7048 case MIRType::Object: {
7049 using Fn = void (*)(JSContext* cx, JSObject* obj);
7050 masm.callWithABI<Fn, AssertValidObjectPtr>();
7051 break;
7053 case MIRType::String: {
7054 using Fn = void (*)(JSContext* cx, JSString* str);
7055 masm.callWithABI<Fn, AssertValidStringPtr>();
7056 break;
7058 case MIRType::Symbol: {
7059 using Fn = void (*)(JSContext* cx, JS::Symbol* sym);
7060 masm.callWithABI<Fn, AssertValidSymbolPtr>();
7061 break;
7063 case MIRType::BigInt: {
7064 using Fn = void (*)(JSContext* cx, JS::BigInt* bi);
7065 masm.callWithABI<Fn, AssertValidBigIntPtr>();
7066 break;
7068 default:
7069 MOZ_CRASH();
7072 restoreVolatile();
7074 # endif
7076 masm.bind(&done);
7077 masm.pop(temp);
7080 void CodeGenerator::emitAssertResultV(const ValueOperand input,
7081 const MDefinition* mir) {
7082 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
7083 regs.take(input);
7085 Register temp1 = regs.takeAny();
7086 Register temp2 = regs.takeAny();
7087 masm.push(temp1);
7088 masm.push(temp2);
7090 // Don't check if the script has been invalidated. In that case invalid
7091 // types are expected (until we reach the OsiPoint and bailout).
7092 Label done;
7093 branchIfInvalidated(temp1, &done);
7095 // Check that we have a valid GC pointer.
7096 if (JitOptions.fullDebugChecks) {
7097 saveVolatile();
7099 masm.pushValue(input);
7100 masm.moveStackPtrTo(temp1);
7102 using Fn = void (*)(JSContext* cx, Value* v);
7103 masm.setupUnalignedABICall(temp2);
7104 masm.loadJSContext(temp2);
7105 masm.passABIArg(temp2);
7106 masm.passABIArg(temp1);
7107 masm.callWithABI<Fn, AssertValidValue>();
7108 masm.popValue(input);
7109 restoreVolatile();
7112 masm.bind(&done);
7113 masm.pop(temp2);
7114 masm.pop(temp1);
7117 void CodeGenerator::emitGCThingResultChecks(LInstruction* lir,
7118 MDefinition* mir) {
7119 if (lir->numDefs() == 0) {
7120 return;
7123 MOZ_ASSERT(lir->numDefs() == 1);
7124 if (lir->getDef(0)->isBogusTemp()) {
7125 return;
7128 Register output = ToRegister(lir->getDef(0));
7129 emitAssertGCThingResult(output, mir);
7132 void CodeGenerator::emitValueResultChecks(LInstruction* lir, MDefinition* mir) {
7133 if (lir->numDefs() == 0) {
7134 return;
7137 MOZ_ASSERT(lir->numDefs() == BOX_PIECES);
7138 if (!lir->getDef(0)->output()->isRegister()) {
7139 return;
7142 ValueOperand output = ToOutValue(lir);
7144 emitAssertResultV(output, mir);
7147 void CodeGenerator::emitDebugResultChecks(LInstruction* ins) {
7148 // In debug builds, check that LIR instructions return valid values.
7150 MDefinition* mir = ins->mirRaw();
7151 if (!mir) {
7152 return;
7155 switch (mir->type()) {
7156 case MIRType::Object:
7157 case MIRType::String:
7158 case MIRType::Symbol:
7159 case MIRType::BigInt:
7160 emitGCThingResultChecks(ins, mir);
7161 break;
7162 case MIRType::Value:
7163 emitValueResultChecks(ins, mir);
7164 break;
7165 default:
7166 break;
7170 void CodeGenerator::emitDebugForceBailing(LInstruction* lir) {
7171 if (MOZ_LIKELY(!gen->options.ionBailAfterEnabled())) {
7172 return;
7174 if (!lir->snapshot()) {
7175 return;
7177 if (lir->isOsiPoint()) {
7178 return;
7181 masm.comment("emitDebugForceBailing");
7182 const void* bailAfterCounterAddr =
7183 gen->runtime->addressOfIonBailAfterCounter();
7185 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
7187 Label done, notBail;
7188 masm.branch32(Assembler::Equal, AbsoluteAddress(bailAfterCounterAddr),
7189 Imm32(0), &done);
7191 Register temp = regs.takeAny();
7193 masm.push(temp);
7194 masm.load32(AbsoluteAddress(bailAfterCounterAddr), temp);
7195 masm.sub32(Imm32(1), temp);
7196 masm.store32(temp, AbsoluteAddress(bailAfterCounterAddr));
7198 masm.branch32(Assembler::NotEqual, temp, Imm32(0), &notBail);
7200 masm.pop(temp);
7201 bailout(lir->snapshot());
7203 masm.bind(&notBail);
7204 masm.pop(temp);
7206 masm.bind(&done);
7208 #endif
7210 bool CodeGenerator::generateBody() {
7211 JitSpewCont(JitSpew_Codegen, "\n");
7212 AutoCreatedBy acb(masm, "CodeGenerator::generateBody");
7214 JitSpew(JitSpew_Codegen, "==== BEGIN CodeGenerator::generateBody ====");
7215 IonScriptCounts* counts = maybeCreateScriptCounts();
7217 const bool compilingWasm = gen->compilingWasm();
7219 for (size_t i = 0; i < graph.numBlocks(); i++) {
7220 current = graph.getBlock(i);
7222 // Don't emit any code for trivial blocks, containing just a goto. Such
7223 // blocks are created to split critical edges, and if we didn't end up
7224 // putting any instructions in them, we can skip them.
7225 if (current->isTrivial()) {
7226 continue;
7229 #ifdef JS_JITSPEW
7230 const char* filename = nullptr;
7231 size_t lineNumber = 0;
7232 JS::LimitedColumnNumberOneOrigin columnNumber;
7233 if (current->mir()->info().script()) {
7234 filename = current->mir()->info().script()->filename();
7235 if (current->mir()->pc()) {
7236 lineNumber = PCToLineNumber(current->mir()->info().script(),
7237 current->mir()->pc(), &columnNumber);
7240 JitSpew(JitSpew_Codegen, "--------------------------------");
7241 JitSpew(JitSpew_Codegen, "# block%zu %s:%zu:%u%s:", i,
7242 filename ? filename : "?", lineNumber,
7243 columnNumber.oneOriginValue(),
7244 current->mir()->isLoopHeader() ? " (loop header)" : "");
7245 #endif
7247 if (current->mir()->isLoopHeader() && compilingWasm) {
7248 masm.nopAlign(CodeAlignment);
7251 masm.bind(current->label());
7253 mozilla::Maybe<ScriptCountBlockState> blockCounts;
7254 if (counts) {
7255 blockCounts.emplace(&counts->block(i), &masm);
7256 if (!blockCounts->init()) {
7257 return false;
7261 for (LInstructionIterator iter = current->begin(); iter != current->end();
7262 iter++) {
7263 if (!alloc().ensureBallast()) {
7264 return false;
7267 perfSpewer_.recordInstruction(masm, *iter);
7268 #ifdef JS_JITSPEW
7269 JitSpewStart(JitSpew_Codegen, " # LIR=%s",
7270 iter->opName());
7271 if (const char* extra = iter->getExtraName()) {
7272 JitSpewCont(JitSpew_Codegen, ":%s", extra);
7274 JitSpewFin(JitSpew_Codegen);
7275 #endif
7277 if (counts) {
7278 blockCounts->visitInstruction(*iter);
7281 #ifdef CHECK_OSIPOINT_REGISTERS
7282 if (iter->safepoint() && !compilingWasm) {
7283 resetOsiPointRegs(iter->safepoint());
7285 #endif
7287 if (!compilingWasm) {
7288 if (MDefinition* mir = iter->mirRaw()) {
7289 if (!addNativeToBytecodeEntry(mir->trackedSite())) {
7290 return false;
7295 setElement(*iter); // needed to encode correct snapshot location.
7297 #ifdef DEBUG
7298 emitDebugForceBailing(*iter);
7299 #endif
7301 switch (iter->op()) {
7302 #ifndef JS_CODEGEN_NONE
7303 # define LIROP(op) \
7304 case LNode::Opcode::op: \
7305 visit##op(iter->to##op()); \
7306 break;
7307 LIR_OPCODE_LIST(LIROP)
7308 # undef LIROP
7309 #endif
7310 case LNode::Opcode::Invalid:
7311 default:
7312 MOZ_CRASH("Invalid LIR op");
7315 #ifdef DEBUG
7316 if (!counts) {
7317 emitDebugResultChecks(*iter);
7319 #endif
7321 if (masm.oom()) {
7322 return false;
7326 JitSpew(JitSpew_Codegen, "==== END CodeGenerator::generateBody ====\n");
7327 return true;
7330 // Out-of-line object allocation for LNewArray.
7331 class OutOfLineNewArray : public OutOfLineCodeBase<CodeGenerator> {
7332 LNewArray* lir_;
7334 public:
7335 explicit OutOfLineNewArray(LNewArray* lir) : lir_(lir) {}
7337 void accept(CodeGenerator* codegen) override {
7338 codegen->visitOutOfLineNewArray(this);
7341 LNewArray* lir() const { return lir_; }
7344 void CodeGenerator::visitNewArrayCallVM(LNewArray* lir) {
7345 Register objReg = ToRegister(lir->output());
7347 MOZ_ASSERT(!lir->isCall());
7348 saveLive(lir);
7350 JSObject* templateObject = lir->mir()->templateObject();
7352 if (templateObject) {
7353 pushArg(ImmGCPtr(templateObject->shape()));
7354 pushArg(Imm32(lir->mir()->length()));
7356 using Fn = ArrayObject* (*)(JSContext*, uint32_t, Handle<Shape*>);
7357 callVM<Fn, NewArrayWithShape>(lir);
7358 } else {
7359 pushArg(Imm32(GenericObject));
7360 pushArg(Imm32(lir->mir()->length()));
7362 using Fn = ArrayObject* (*)(JSContext*, uint32_t, NewObjectKind);
7363 callVM<Fn, NewArrayOperation>(lir);
7366 masm.storeCallPointerResult(objReg);
7368 MOZ_ASSERT(!lir->safepoint()->liveRegs().has(objReg));
7369 restoreLive(lir);
7372 void CodeGenerator::visitAtan2D(LAtan2D* lir) {
7373 FloatRegister y = ToFloatRegister(lir->y());
7374 FloatRegister x = ToFloatRegister(lir->x());
7376 using Fn = double (*)(double x, double y);
7377 masm.setupAlignedABICall();
7378 masm.passABIArg(y, ABIType::Float64);
7379 masm.passABIArg(x, ABIType::Float64);
7380 masm.callWithABI<Fn, ecmaAtan2>(ABIType::Float64);
7382 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
7385 void CodeGenerator::visitHypot(LHypot* lir) {
7386 uint32_t numArgs = lir->numArgs();
7387 masm.setupAlignedABICall();
7389 for (uint32_t i = 0; i < numArgs; ++i) {
7390 masm.passABIArg(ToFloatRegister(lir->getOperand(i)), ABIType::Float64);
7393 switch (numArgs) {
7394 case 2: {
7395 using Fn = double (*)(double x, double y);
7396 masm.callWithABI<Fn, ecmaHypot>(ABIType::Float64);
7397 break;
7399 case 3: {
7400 using Fn = double (*)(double x, double y, double z);
7401 masm.callWithABI<Fn, hypot3>(ABIType::Float64);
7402 break;
7404 case 4: {
7405 using Fn = double (*)(double x, double y, double z, double w);
7406 masm.callWithABI<Fn, hypot4>(ABIType::Float64);
7407 break;
7409 default:
7410 MOZ_CRASH("Unexpected number of arguments to hypot function.");
7412 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
7415 void CodeGenerator::visitNewArray(LNewArray* lir) {
7416 Register objReg = ToRegister(lir->output());
7417 Register tempReg = ToRegister(lir->temp());
7418 DebugOnly<uint32_t> length = lir->mir()->length();
7420 MOZ_ASSERT(length <= NativeObject::MAX_DENSE_ELEMENTS_COUNT);
7422 if (lir->mir()->isVMCall()) {
7423 visitNewArrayCallVM(lir);
7424 return;
7427 OutOfLineNewArray* ool = new (alloc()) OutOfLineNewArray(lir);
7428 addOutOfLineCode(ool, lir->mir());
7430 TemplateObject templateObject(lir->mir()->templateObject());
7431 #ifdef DEBUG
7432 size_t numInlineElements = gc::GetGCKindSlots(templateObject.getAllocKind()) -
7433 ObjectElements::VALUES_PER_HEADER;
7434 MOZ_ASSERT(length <= numInlineElements,
7435 "Inline allocation only supports inline elements");
7436 #endif
7437 masm.createGCObject(objReg, tempReg, templateObject,
7438 lir->mir()->initialHeap(), ool->entry());
7440 masm.bind(ool->rejoin());
7443 void CodeGenerator::visitOutOfLineNewArray(OutOfLineNewArray* ool) {
7444 visitNewArrayCallVM(ool->lir());
7445 masm.jump(ool->rejoin());
7448 void CodeGenerator::visitNewArrayDynamicLength(LNewArrayDynamicLength* lir) {
7449 Register lengthReg = ToRegister(lir->length());
7450 Register objReg = ToRegister(lir->output());
7451 Register tempReg = ToRegister(lir->temp0());
7453 JSObject* templateObject = lir->mir()->templateObject();
7454 gc::Heap initialHeap = lir->mir()->initialHeap();
7456 using Fn = ArrayObject* (*)(JSContext*, Handle<ArrayObject*>, int32_t length);
7457 OutOfLineCode* ool = oolCallVM<Fn, ArrayConstructorOneArg>(
7458 lir, ArgList(ImmGCPtr(templateObject), lengthReg),
7459 StoreRegisterTo(objReg));
7461 bool canInline = true;
7462 size_t inlineLength = 0;
7463 if (templateObject->as<ArrayObject>().hasFixedElements()) {
7464 size_t numSlots =
7465 gc::GetGCKindSlots(templateObject->asTenured().getAllocKind());
7466 inlineLength = numSlots - ObjectElements::VALUES_PER_HEADER;
7467 } else {
7468 canInline = false;
7471 if (canInline) {
7472 // Try to do the allocation inline if the template object is big enough
7473 // for the length in lengthReg. If the length is bigger we could still
7474 // use the template object and not allocate the elements, but it's more
7475 // efficient to do a single big allocation than (repeatedly) reallocating
7476 // the array later on when filling it.
7477 masm.branch32(Assembler::Above, lengthReg, Imm32(inlineLength),
7478 ool->entry());
7480 TemplateObject templateObj(templateObject);
7481 masm.createGCObject(objReg, tempReg, templateObj, initialHeap,
7482 ool->entry());
7484 size_t lengthOffset = NativeObject::offsetOfFixedElements() +
7485 ObjectElements::offsetOfLength();
7486 masm.store32(lengthReg, Address(objReg, lengthOffset));
7487 } else {
7488 masm.jump(ool->entry());
7491 masm.bind(ool->rejoin());
7494 void CodeGenerator::visitNewIterator(LNewIterator* lir) {
7495 Register objReg = ToRegister(lir->output());
7496 Register tempReg = ToRegister(lir->temp0());
7498 OutOfLineCode* ool;
7499 switch (lir->mir()->type()) {
7500 case MNewIterator::ArrayIterator: {
7501 using Fn = ArrayIteratorObject* (*)(JSContext*);
7502 ool = oolCallVM<Fn, NewArrayIterator>(lir, ArgList(),
7503 StoreRegisterTo(objReg));
7504 break;
7506 case MNewIterator::StringIterator: {
7507 using Fn = StringIteratorObject* (*)(JSContext*);
7508 ool = oolCallVM<Fn, NewStringIterator>(lir, ArgList(),
7509 StoreRegisterTo(objReg));
7510 break;
7512 case MNewIterator::RegExpStringIterator: {
7513 using Fn = RegExpStringIteratorObject* (*)(JSContext*);
7514 ool = oolCallVM<Fn, NewRegExpStringIterator>(lir, ArgList(),
7515 StoreRegisterTo(objReg));
7516 break;
7518 default:
7519 MOZ_CRASH("unexpected iterator type");
7522 TemplateObject templateObject(lir->mir()->templateObject());
7523 masm.createGCObject(objReg, tempReg, templateObject, gc::Heap::Default,
7524 ool->entry());
7526 masm.bind(ool->rejoin());
7529 void CodeGenerator::visitNewTypedArray(LNewTypedArray* lir) {
7530 Register objReg = ToRegister(lir->output());
7531 Register tempReg = ToRegister(lir->temp0());
7532 Register lengthReg = ToRegister(lir->temp1());
7533 LiveRegisterSet liveRegs = liveVolatileRegs(lir);
7535 JSObject* templateObject = lir->mir()->templateObject();
7536 gc::Heap initialHeap = lir->mir()->initialHeap();
7538 TypedArrayObject* ttemplate = &templateObject->as<TypedArrayObject>();
7540 size_t n = ttemplate->length();
7541 MOZ_ASSERT(n <= INT32_MAX,
7542 "Template objects are only created for int32 lengths");
7544 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t length);
7545 OutOfLineCode* ool = oolCallVM<Fn, NewTypedArrayWithTemplateAndLength>(
7546 lir, ArgList(ImmGCPtr(templateObject), Imm32(n)),
7547 StoreRegisterTo(objReg));
7549 TemplateObject templateObj(templateObject);
7550 masm.createGCObject(objReg, tempReg, templateObj, initialHeap, ool->entry());
7552 masm.initTypedArraySlots(objReg, tempReg, lengthReg, liveRegs, ool->entry(),
7553 ttemplate, MacroAssembler::TypedArrayLength::Fixed);
7555 masm.bind(ool->rejoin());
7558 void CodeGenerator::visitNewTypedArrayDynamicLength(
7559 LNewTypedArrayDynamicLength* lir) {
7560 Register lengthReg = ToRegister(lir->length());
7561 Register objReg = ToRegister(lir->output());
7562 Register tempReg = ToRegister(lir->temp0());
7563 LiveRegisterSet liveRegs = liveVolatileRegs(lir);
7565 JSObject* templateObject = lir->mir()->templateObject();
7566 gc::Heap initialHeap = lir->mir()->initialHeap();
7568 TypedArrayObject* ttemplate = &templateObject->as<TypedArrayObject>();
7570 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t length);
7571 OutOfLineCode* ool = oolCallVM<Fn, NewTypedArrayWithTemplateAndLength>(
7572 lir, ArgList(ImmGCPtr(templateObject), lengthReg),
7573 StoreRegisterTo(objReg));
7575 // Volatile |lengthReg| is saved across the ABI call in |initTypedArraySlots|.
7576 MOZ_ASSERT_IF(lengthReg.volatile_(), liveRegs.has(lengthReg));
7578 TemplateObject templateObj(templateObject);
7579 masm.createGCObject(objReg, tempReg, templateObj, initialHeap, ool->entry());
7581 masm.initTypedArraySlots(objReg, tempReg, lengthReg, liveRegs, ool->entry(),
7582 ttemplate,
7583 MacroAssembler::TypedArrayLength::Dynamic);
7585 masm.bind(ool->rejoin());
7588 void CodeGenerator::visitNewTypedArrayFromArray(LNewTypedArrayFromArray* lir) {
7589 pushArg(ToRegister(lir->array()));
7590 pushArg(ImmGCPtr(lir->mir()->templateObject()));
7592 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject);
7593 callVM<Fn, js::NewTypedArrayWithTemplateAndArray>(lir);
7596 void CodeGenerator::visitNewTypedArrayFromArrayBuffer(
7597 LNewTypedArrayFromArrayBuffer* lir) {
7598 pushArg(ToValue(lir, LNewTypedArrayFromArrayBuffer::LengthIndex));
7599 pushArg(ToValue(lir, LNewTypedArrayFromArrayBuffer::ByteOffsetIndex));
7600 pushArg(ToRegister(lir->arrayBuffer()));
7601 pushArg(ImmGCPtr(lir->mir()->templateObject()));
7603 using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject,
7604 HandleValue, HandleValue);
7605 callVM<Fn, js::NewTypedArrayWithTemplateAndBuffer>(lir);
7608 void CodeGenerator::visitBindFunction(LBindFunction* lir) {
7609 Register target = ToRegister(lir->target());
7610 Register temp1 = ToRegister(lir->temp0());
7611 Register temp2 = ToRegister(lir->temp1());
7613 // Try to allocate a new BoundFunctionObject we can pass to the VM function.
7614 // If this fails, we set temp1 to nullptr so we do the allocation in C++.
7615 TemplateObject templateObject(lir->mir()->templateObject());
7616 Label allocOk, allocFailed;
7617 masm.createGCObject(temp1, temp2, templateObject, gc::Heap::Default,
7618 &allocFailed);
7619 masm.jump(&allocOk);
7621 masm.bind(&allocFailed);
7622 masm.movePtr(ImmWord(0), temp1);
7624 masm.bind(&allocOk);
7626 // Set temp2 to the address of the first argument on the stack.
7627 // Note that the Value slots used for arguments are currently aligned for a
7628 // JIT call, even though that's not strictly necessary for calling into C++.
7629 uint32_t argc = lir->mir()->numStackArgs();
7630 if (JitStackValueAlignment > 1) {
7631 argc = AlignBytes(argc, JitStackValueAlignment);
7633 uint32_t unusedStack = UnusedStackBytesForCall(argc);
7634 masm.computeEffectiveAddress(Address(masm.getStackPointer(), unusedStack),
7635 temp2);
7637 pushArg(temp1);
7638 pushArg(Imm32(lir->mir()->numStackArgs()));
7639 pushArg(temp2);
7640 pushArg(target);
7642 using Fn = BoundFunctionObject* (*)(JSContext*, Handle<JSObject*>, Value*,
7643 uint32_t, Handle<BoundFunctionObject*>);
7644 callVM<Fn, js::BoundFunctionObject::functionBindImpl>(lir);
7647 void CodeGenerator::visitNewBoundFunction(LNewBoundFunction* lir) {
7648 Register output = ToRegister(lir->output());
7649 Register temp = ToRegister(lir->temp0());
7651 JSObject* templateObj = lir->mir()->templateObj();
7653 using Fn = BoundFunctionObject* (*)(JSContext*, Handle<BoundFunctionObject*>);
7654 OutOfLineCode* ool = oolCallVM<Fn, BoundFunctionObject::createWithTemplate>(
7655 lir, ArgList(ImmGCPtr(templateObj)), StoreRegisterTo(output));
7657 TemplateObject templateObject(templateObj);
7658 masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
7659 ool->entry());
7661 masm.bind(ool->rejoin());
7664 // Out-of-line object allocation for JSOp::NewObject.
7665 class OutOfLineNewObject : public OutOfLineCodeBase<CodeGenerator> {
7666 LNewObject* lir_;
7668 public:
7669 explicit OutOfLineNewObject(LNewObject* lir) : lir_(lir) {}
7671 void accept(CodeGenerator* codegen) override {
7672 codegen->visitOutOfLineNewObject(this);
7675 LNewObject* lir() const { return lir_; }
7678 void CodeGenerator::visitNewObjectVMCall(LNewObject* lir) {
7679 Register objReg = ToRegister(lir->output());
7681 MOZ_ASSERT(!lir->isCall());
7682 saveLive(lir);
7684 JSObject* templateObject = lir->mir()->templateObject();
7686 // If we're making a new object with a class prototype (that is, an object
7687 // that derives its class from its prototype instead of being
7688 // PlainObject::class_'d) from self-hosted code, we need a different init
7689 // function.
7690 switch (lir->mir()->mode()) {
7691 case MNewObject::ObjectLiteral: {
7692 MOZ_ASSERT(!templateObject);
7693 pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
7694 pushArg(ImmGCPtr(lir->mir()->block()->info().script()));
7696 using Fn = JSObject* (*)(JSContext*, HandleScript, const jsbytecode* pc);
7697 callVM<Fn, NewObjectOperation>(lir);
7698 break;
7700 case MNewObject::ObjectCreate: {
7701 pushArg(ImmGCPtr(templateObject));
7703 using Fn = PlainObject* (*)(JSContext*, Handle<PlainObject*>);
7704 callVM<Fn, ObjectCreateWithTemplate>(lir);
7705 break;
7709 masm.storeCallPointerResult(objReg);
7711 MOZ_ASSERT(!lir->safepoint()->liveRegs().has(objReg));
7712 restoreLive(lir);
7715 static bool ShouldInitFixedSlots(LNewPlainObject* lir, const Shape* shape,
7716 uint32_t nfixed) {
7717 // Look for StoreFixedSlot instructions following an object allocation
7718 // that write to this object before a GC is triggered or this object is
7719 // passed to a VM call. If all fixed slots will be initialized, the
7720 // allocation code doesn't need to set the slots to |undefined|.
7722 if (nfixed == 0) {
7723 return false;
7726 // Keep track of the fixed slots that are initialized. initializedSlots is
7727 // a bit mask with a bit for each slot.
7728 MOZ_ASSERT(nfixed <= NativeObject::MAX_FIXED_SLOTS);
7729 static_assert(NativeObject::MAX_FIXED_SLOTS <= 32,
7730 "Slot bits must fit in 32 bits");
7731 uint32_t initializedSlots = 0;
7732 uint32_t numInitialized = 0;
7734 MInstruction* allocMir = lir->mir();
7735 MBasicBlock* block = allocMir->block();
7737 // Skip the allocation instruction.
7738 MInstructionIterator iter = block->begin(allocMir);
7739 MOZ_ASSERT(*iter == allocMir);
7740 iter++;
7742 // Handle the leading shape guard, if present.
7743 for (; iter != block->end(); iter++) {
7744 if (iter->isConstant()) {
7745 // This instruction won't trigger a GC or read object slots.
7746 continue;
7748 if (iter->isGuardShape()) {
7749 auto* guard = iter->toGuardShape();
7750 if (guard->object() != allocMir || guard->shape() != shape) {
7751 return true;
7753 allocMir = guard;
7754 iter++;
7756 break;
7759 for (; iter != block->end(); iter++) {
7760 if (iter->isConstant() || iter->isPostWriteBarrier()) {
7761 // These instructions won't trigger a GC or read object slots.
7762 continue;
7765 if (iter->isStoreFixedSlot()) {
7766 MStoreFixedSlot* store = iter->toStoreFixedSlot();
7767 if (store->object() != allocMir) {
7768 return true;
7771 // We may not initialize this object slot on allocation, so the
7772 // pre-barrier could read uninitialized memory. Simply disable
7773 // the barrier for this store: the object was just initialized
7774 // so the barrier is not necessary.
7775 store->setNeedsBarrier(false);
7777 uint32_t slot = store->slot();
7778 MOZ_ASSERT(slot < nfixed);
7779 if ((initializedSlots & (1 << slot)) == 0) {
7780 numInitialized++;
7781 initializedSlots |= (1 << slot);
7783 if (numInitialized == nfixed) {
7784 // All fixed slots will be initialized.
7785 MOZ_ASSERT(mozilla::CountPopulation32(initializedSlots) == nfixed);
7786 return false;
7789 continue;
7792 // Unhandled instruction, assume it bails or reads object slots.
7793 return true;
7796 MOZ_CRASH("Shouldn't get here");
7799 void CodeGenerator::visitNewObject(LNewObject* lir) {
7800 Register objReg = ToRegister(lir->output());
7801 Register tempReg = ToRegister(lir->temp());
7803 if (lir->mir()->isVMCall()) {
7804 visitNewObjectVMCall(lir);
7805 return;
7808 OutOfLineNewObject* ool = new (alloc()) OutOfLineNewObject(lir);
7809 addOutOfLineCode(ool, lir->mir());
7811 TemplateObject templateObject(lir->mir()->templateObject());
7813 masm.createGCObject(objReg, tempReg, templateObject,
7814 lir->mir()->initialHeap(), ool->entry());
7816 masm.bind(ool->rejoin());
7819 void CodeGenerator::visitOutOfLineNewObject(OutOfLineNewObject* ool) {
7820 visitNewObjectVMCall(ool->lir());
7821 masm.jump(ool->rejoin());
7824 void CodeGenerator::visitNewPlainObject(LNewPlainObject* lir) {
7825 Register objReg = ToRegister(lir->output());
7826 Register temp0Reg = ToRegister(lir->temp0());
7827 Register temp1Reg = ToRegister(lir->temp1());
7828 Register shapeReg = ToRegister(lir->temp2());
7830 auto* mir = lir->mir();
7831 const Shape* shape = mir->shape();
7832 gc::Heap initialHeap = mir->initialHeap();
7833 gc::AllocKind allocKind = mir->allocKind();
7835 using Fn =
7836 JSObject* (*)(JSContext*, Handle<SharedShape*>, gc::AllocKind, gc::Heap);
7837 OutOfLineCode* ool = oolCallVM<Fn, NewPlainObjectOptimizedFallback>(
7838 lir,
7839 ArgList(ImmGCPtr(shape), Imm32(int32_t(allocKind)),
7840 Imm32(int32_t(initialHeap))),
7841 StoreRegisterTo(objReg));
7843 bool initContents = ShouldInitFixedSlots(lir, shape, mir->numFixedSlots());
7845 masm.movePtr(ImmGCPtr(shape), shapeReg);
7846 masm.createPlainGCObject(
7847 objReg, shapeReg, temp0Reg, temp1Reg, mir->numFixedSlots(),
7848 mir->numDynamicSlots(), allocKind, initialHeap, ool->entry(),
7849 AllocSiteInput(gc::CatchAllAllocSite::Optimized), initContents);
7851 #ifdef DEBUG
7852 // ShouldInitFixedSlots expects that the leading GuardShape will never fail,
7853 // so ensure the newly created object has the correct shape. Should the guard
7854 // ever fail, we may end up with uninitialized fixed slots, which can confuse
7855 // the GC.
7856 Label ok;
7857 masm.branchTestObjShape(Assembler::Equal, objReg, shape, temp0Reg, objReg,
7858 &ok);
7859 masm.assumeUnreachable("Newly created object has the correct shape");
7860 masm.bind(&ok);
7861 #endif
7863 masm.bind(ool->rejoin());
7866 void CodeGenerator::visitNewArrayObject(LNewArrayObject* lir) {
7867 Register objReg = ToRegister(lir->output());
7868 Register temp0Reg = ToRegister(lir->temp0());
7869 Register shapeReg = ToRegister(lir->temp1());
7871 auto* mir = lir->mir();
7872 uint32_t arrayLength = mir->length();
7874 gc::AllocKind allocKind = GuessArrayGCKind(arrayLength);
7875 MOZ_ASSERT(CanChangeToBackgroundAllocKind(allocKind, &ArrayObject::class_));
7876 allocKind = ForegroundToBackgroundAllocKind(allocKind);
7878 uint32_t slotCount = GetGCKindSlots(allocKind);
7879 MOZ_ASSERT(slotCount >= ObjectElements::VALUES_PER_HEADER);
7880 uint32_t arrayCapacity = slotCount - ObjectElements::VALUES_PER_HEADER;
7882 const Shape* shape = mir->shape();
7884 NewObjectKind objectKind =
7885 mir->initialHeap() == gc::Heap::Tenured ? TenuredObject : GenericObject;
7887 using Fn =
7888 ArrayObject* (*)(JSContext*, uint32_t, gc::AllocKind, NewObjectKind);
7889 OutOfLineCode* ool = oolCallVM<Fn, NewArrayObjectOptimizedFallback>(
7890 lir,
7891 ArgList(Imm32(arrayLength), Imm32(int32_t(allocKind)), Imm32(objectKind)),
7892 StoreRegisterTo(objReg));
7894 masm.movePtr(ImmPtr(shape), shapeReg);
7895 masm.createArrayWithFixedElements(
7896 objReg, shapeReg, temp0Reg, InvalidReg, arrayLength, arrayCapacity, 0, 0,
7897 allocKind, mir->initialHeap(), ool->entry(),
7898 AllocSiteInput(gc::CatchAllAllocSite::Optimized));
7899 masm.bind(ool->rejoin());
7902 void CodeGenerator::visitNewNamedLambdaObject(LNewNamedLambdaObject* lir) {
7903 Register objReg = ToRegister(lir->output());
7904 Register tempReg = ToRegister(lir->temp0());
7905 const CompileInfo& info = lir->mir()->block()->info();
7907 using Fn = js::NamedLambdaObject* (*)(JSContext*, HandleFunction);
7908 OutOfLineCode* ool = oolCallVM<Fn, NamedLambdaObject::createWithoutEnclosing>(
7909 lir, ArgList(info.funMaybeLazy()), StoreRegisterTo(objReg));
7911 TemplateObject templateObject(lir->mir()->templateObj());
7913 masm.createGCObject(objReg, tempReg, templateObject, gc::Heap::Default,
7914 ool->entry());
7916 masm.bind(ool->rejoin());
7919 void CodeGenerator::visitNewCallObject(LNewCallObject* lir) {
7920 Register objReg = ToRegister(lir->output());
7921 Register tempReg = ToRegister(lir->temp0());
7923 CallObject* templateObj = lir->mir()->templateObject();
7925 using Fn = CallObject* (*)(JSContext*, Handle<SharedShape*>);
7926 OutOfLineCode* ool = oolCallVM<Fn, CallObject::createWithShape>(
7927 lir, ArgList(ImmGCPtr(templateObj->sharedShape())),
7928 StoreRegisterTo(objReg));
7930 // Inline call object creation, using the OOL path only for tricky cases.
7931 TemplateObject templateObject(templateObj);
7932 masm.createGCObject(objReg, tempReg, templateObject, gc::Heap::Default,
7933 ool->entry());
7935 masm.bind(ool->rejoin());
7938 void CodeGenerator::visitNewStringObject(LNewStringObject* lir) {
7939 Register input = ToRegister(lir->input());
7940 Register output = ToRegister(lir->output());
7941 Register temp = ToRegister(lir->temp0());
7943 StringObject* templateObj = lir->mir()->templateObj();
7945 using Fn = JSObject* (*)(JSContext*, HandleString);
7946 OutOfLineCode* ool = oolCallVM<Fn, NewStringObject>(lir, ArgList(input),
7947 StoreRegisterTo(output));
7949 TemplateObject templateObject(templateObj);
7950 masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
7951 ool->entry());
7953 masm.loadStringLength(input, temp);
7955 masm.storeValue(JSVAL_TYPE_STRING, input,
7956 Address(output, StringObject::offsetOfPrimitiveValue()));
7957 masm.storeValue(JSVAL_TYPE_INT32, temp,
7958 Address(output, StringObject::offsetOfLength()));
7960 masm.bind(ool->rejoin());
7963 void CodeGenerator::visitInitElemGetterSetter(LInitElemGetterSetter* lir) {
7964 Register obj = ToRegister(lir->object());
7965 Register value = ToRegister(lir->value());
7967 pushArg(value);
7968 pushArg(ToValue(lir, LInitElemGetterSetter::IdIndex));
7969 pushArg(obj);
7970 pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
7972 using Fn = bool (*)(JSContext*, jsbytecode*, HandleObject, HandleValue,
7973 HandleObject);
7974 callVM<Fn, InitElemGetterSetterOperation>(lir);
7977 void CodeGenerator::visitMutateProto(LMutateProto* lir) {
7978 Register objReg = ToRegister(lir->object());
7980 pushArg(ToValue(lir, LMutateProto::ValueIndex));
7981 pushArg(objReg);
7983 using Fn =
7984 bool (*)(JSContext* cx, Handle<PlainObject*> obj, HandleValue value);
7985 callVM<Fn, MutatePrototype>(lir);
7988 void CodeGenerator::visitInitPropGetterSetter(LInitPropGetterSetter* lir) {
7989 Register obj = ToRegister(lir->object());
7990 Register value = ToRegister(lir->value());
7992 pushArg(value);
7993 pushArg(ImmGCPtr(lir->mir()->name()));
7994 pushArg(obj);
7995 pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
7997 using Fn = bool (*)(JSContext*, jsbytecode*, HandleObject,
7998 Handle<PropertyName*>, HandleObject);
7999 callVM<Fn, InitPropGetterSetterOperation>(lir);
8002 void CodeGenerator::visitCreateThis(LCreateThis* lir) {
8003 const LAllocation* callee = lir->callee();
8004 const LAllocation* newTarget = lir->newTarget();
8006 if (newTarget->isConstant()) {
8007 pushArg(ImmGCPtr(&newTarget->toConstant()->toObject()));
8008 } else {
8009 pushArg(ToRegister(newTarget));
8012 if (callee->isConstant()) {
8013 pushArg(ImmGCPtr(&callee->toConstant()->toObject()));
8014 } else {
8015 pushArg(ToRegister(callee));
8018 using Fn = bool (*)(JSContext* cx, HandleObject callee,
8019 HandleObject newTarget, MutableHandleValue rval);
8020 callVM<Fn, jit::CreateThisFromIon>(lir);
8023 void CodeGenerator::visitCreateArgumentsObject(LCreateArgumentsObject* lir) {
8024 // This should be getting constructed in the first block only, and not any OSR
8025 // entry blocks.
8026 MOZ_ASSERT(lir->mir()->block()->id() == 0);
8028 Register callObj = ToRegister(lir->callObject());
8029 Register temp0 = ToRegister(lir->temp0());
8030 Label done;
8032 if (ArgumentsObject* templateObj = lir->mir()->templateObject()) {
8033 Register objTemp = ToRegister(lir->temp1());
8034 Register cxTemp = ToRegister(lir->temp2());
8036 masm.Push(callObj);
8038 // Try to allocate an arguments object. This will leave the reserved
8039 // slots uninitialized, so it's important we don't GC until we
8040 // initialize these slots in ArgumentsObject::finishForIonPure.
8041 Label failure;
8042 TemplateObject templateObject(templateObj);
8043 masm.createGCObject(objTemp, temp0, templateObject, gc::Heap::Default,
8044 &failure,
8045 /* initContents = */ false);
8047 masm.moveStackPtrTo(temp0);
8048 masm.addPtr(Imm32(masm.framePushed()), temp0);
8050 using Fn = ArgumentsObject* (*)(JSContext* cx, jit::JitFrameLayout* frame,
8051 JSObject* scopeChain, ArgumentsObject* obj);
8052 masm.setupAlignedABICall();
8053 masm.loadJSContext(cxTemp);
8054 masm.passABIArg(cxTemp);
8055 masm.passABIArg(temp0);
8056 masm.passABIArg(callObj);
8057 masm.passABIArg(objTemp);
8059 masm.callWithABI<Fn, ArgumentsObject::finishForIonPure>();
8060 masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, &failure);
8062 // Discard saved callObj on the stack.
8063 masm.addToStackPtr(Imm32(sizeof(uintptr_t)));
8064 masm.jump(&done);
8066 masm.bind(&failure);
8067 masm.Pop(callObj);
8070 masm.moveStackPtrTo(temp0);
8071 masm.addPtr(Imm32(frameSize()), temp0);
8073 pushArg(callObj);
8074 pushArg(temp0);
8076 using Fn = ArgumentsObject* (*)(JSContext*, JitFrameLayout*, HandleObject);
8077 callVM<Fn, ArgumentsObject::createForIon>(lir);
8079 masm.bind(&done);
8082 void CodeGenerator::visitCreateInlinedArgumentsObject(
8083 LCreateInlinedArgumentsObject* lir) {
8084 Register callObj = ToRegister(lir->getCallObject());
8085 Register callee = ToRegister(lir->getCallee());
8086 Register argsAddress = ToRegister(lir->temp1());
8087 Register argsObj = ToRegister(lir->temp2());
8089 // TODO: Do we have to worry about alignment here?
8091 // Create a contiguous array of values for ArgumentsObject::create
8092 // by pushing the arguments onto the stack in reverse order.
8093 uint32_t argc = lir->mir()->numActuals();
8094 for (uint32_t i = 0; i < argc; i++) {
8095 uint32_t argNum = argc - i - 1;
8096 uint32_t index = LCreateInlinedArgumentsObject::ArgIndex(argNum);
8097 ConstantOrRegister arg =
8098 toConstantOrRegister(lir, index, lir->mir()->getArg(argNum)->type());
8099 masm.Push(arg);
8101 masm.moveStackPtrTo(argsAddress);
8103 Label done;
8104 if (ArgumentsObject* templateObj = lir->mir()->templateObject()) {
8105 LiveRegisterSet liveRegs;
8106 liveRegs.add(callObj);
8107 liveRegs.add(callee);
8109 masm.PushRegsInMask(liveRegs);
8111 // We are free to clobber all registers, as LCreateInlinedArgumentsObject is
8112 // a call instruction.
8113 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
8114 allRegs.take(callObj);
8115 allRegs.take(callee);
8116 allRegs.take(argsObj);
8117 allRegs.take(argsAddress);
8119 Register temp3 = allRegs.takeAny();
8120 Register temp4 = allRegs.takeAny();
8122 // Try to allocate an arguments object. This will leave the reserved slots
8123 // uninitialized, so it's important we don't GC until we initialize these
8124 // slots in ArgumentsObject::finishForIonPure.
8125 Label failure;
8126 TemplateObject templateObject(templateObj);
8127 masm.createGCObject(argsObj, temp3, templateObject, gc::Heap::Default,
8128 &failure,
8129 /* initContents = */ false);
8131 Register numActuals = temp3;
8132 masm.move32(Imm32(argc), numActuals);
8134 using Fn = ArgumentsObject* (*)(JSContext*, JSObject*, JSFunction*, Value*,
8135 uint32_t, ArgumentsObject*);
8136 masm.setupAlignedABICall();
8137 masm.loadJSContext(temp4);
8138 masm.passABIArg(temp4);
8139 masm.passABIArg(callObj);
8140 masm.passABIArg(callee);
8141 masm.passABIArg(argsAddress);
8142 masm.passABIArg(numActuals);
8143 masm.passABIArg(argsObj);
8145 masm.callWithABI<Fn, ArgumentsObject::finishInlineForIonPure>();
8146 masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, &failure);
8148 // Discard saved callObj, callee, and values array on the stack.
8149 masm.addToStackPtr(
8150 Imm32(MacroAssembler::PushRegsInMaskSizeInBytes(liveRegs) +
8151 argc * sizeof(Value)));
8152 masm.jump(&done);
8154 masm.bind(&failure);
8155 masm.PopRegsInMask(liveRegs);
8157 // Reload argsAddress because it may have been overridden.
8158 masm.moveStackPtrTo(argsAddress);
8161 pushArg(Imm32(argc));
8162 pushArg(callObj);
8163 pushArg(callee);
8164 pushArg(argsAddress);
8166 using Fn = ArgumentsObject* (*)(JSContext*, Value*, HandleFunction,
8167 HandleObject, uint32_t);
8168 callVM<Fn, ArgumentsObject::createForInlinedIon>(lir);
8170 // Discard the array of values.
8171 masm.freeStack(argc * sizeof(Value));
8173 masm.bind(&done);
8176 template <class GetInlinedArgument>
8177 void CodeGenerator::emitGetInlinedArgument(GetInlinedArgument* lir,
8178 Register index,
8179 ValueOperand output) {
8180 uint32_t numActuals = lir->mir()->numActuals();
8181 MOZ_ASSERT(numActuals <= ArgumentsObject::MaxInlinedArgs);
8183 // The index has already been bounds-checked, so the code we
8184 // generate here should be unreachable. We can end up in this
8185 // situation in self-hosted code using GetArgument(), or in a
8186 // monomorphically inlined function if we've inlined some CacheIR
8187 // that was created for a different caller.
8188 if (numActuals == 0) {
8189 masm.assumeUnreachable("LGetInlinedArgument: invalid index");
8190 return;
8193 // Check the first n-1 possible indices.
8194 Label done;
8195 for (uint32_t i = 0; i < numActuals - 1; i++) {
8196 Label skip;
8197 ConstantOrRegister arg = toConstantOrRegister(
8198 lir, GetInlinedArgument::ArgIndex(i), lir->mir()->getArg(i)->type());
8199 masm.branch32(Assembler::NotEqual, index, Imm32(i), &skip);
8200 masm.moveValue(arg, output);
8202 masm.jump(&done);
8203 masm.bind(&skip);
8206 #ifdef DEBUG
8207 Label skip;
8208 masm.branch32(Assembler::Equal, index, Imm32(numActuals - 1), &skip);
8209 masm.assumeUnreachable("LGetInlinedArgument: invalid index");
8210 masm.bind(&skip);
8211 #endif
8213 // The index has already been bounds-checked, so load the last argument.
8214 uint32_t lastIdx = numActuals - 1;
8215 ConstantOrRegister arg =
8216 toConstantOrRegister(lir, GetInlinedArgument::ArgIndex(lastIdx),
8217 lir->mir()->getArg(lastIdx)->type());
8218 masm.moveValue(arg, output);
8219 masm.bind(&done);
8222 void CodeGenerator::visitGetInlinedArgument(LGetInlinedArgument* lir) {
8223 Register index = ToRegister(lir->getIndex());
8224 ValueOperand output = ToOutValue(lir);
8226 emitGetInlinedArgument(lir, index, output);
8229 void CodeGenerator::visitGetInlinedArgumentHole(LGetInlinedArgumentHole* lir) {
8230 Register index = ToRegister(lir->getIndex());
8231 ValueOperand output = ToOutValue(lir);
8233 uint32_t numActuals = lir->mir()->numActuals();
8235 if (numActuals == 0) {
8236 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
8237 masm.moveValue(UndefinedValue(), output);
8238 return;
8241 Label outOfBounds, done;
8242 masm.branch32(Assembler::AboveOrEqual, index, Imm32(numActuals),
8243 &outOfBounds);
8245 emitGetInlinedArgument(lir, index, output);
8246 masm.jump(&done);
8248 masm.bind(&outOfBounds);
8249 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
8250 masm.moveValue(UndefinedValue(), output);
8252 masm.bind(&done);
8255 void CodeGenerator::visitGetArgumentsObjectArg(LGetArgumentsObjectArg* lir) {
8256 Register temp = ToRegister(lir->temp0());
8257 Register argsObj = ToRegister(lir->argsObject());
8258 ValueOperand out = ToOutValue(lir);
8260 masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
8261 temp);
8262 Address argAddr(temp, ArgumentsData::offsetOfArgs() +
8263 lir->mir()->argno() * sizeof(Value));
8264 masm.loadValue(argAddr, out);
8265 #ifdef DEBUG
8266 Label success;
8267 masm.branchTestMagic(Assembler::NotEqual, out, &success);
8268 masm.assumeUnreachable(
8269 "Result from ArgumentObject shouldn't be JSVAL_TYPE_MAGIC.");
8270 masm.bind(&success);
8271 #endif
8274 void CodeGenerator::visitSetArgumentsObjectArg(LSetArgumentsObjectArg* lir) {
8275 Register temp = ToRegister(lir->getTemp(0));
8276 Register argsObj = ToRegister(lir->argsObject());
8277 ValueOperand value = ToValue(lir, LSetArgumentsObjectArg::ValueIndex);
8279 masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
8280 temp);
8281 Address argAddr(temp, ArgumentsData::offsetOfArgs() +
8282 lir->mir()->argno() * sizeof(Value));
8283 emitPreBarrier(argAddr);
8284 #ifdef DEBUG
8285 Label success;
8286 masm.branchTestMagic(Assembler::NotEqual, argAddr, &success);
8287 masm.assumeUnreachable(
8288 "Result in ArgumentObject shouldn't be JSVAL_TYPE_MAGIC.");
8289 masm.bind(&success);
8290 #endif
8291 masm.storeValue(value, argAddr);
8294 void CodeGenerator::visitLoadArgumentsObjectArg(LLoadArgumentsObjectArg* lir) {
8295 Register temp = ToRegister(lir->temp0());
8296 Register argsObj = ToRegister(lir->argsObject());
8297 Register index = ToRegister(lir->index());
8298 ValueOperand out = ToOutValue(lir);
8300 Label bail;
8301 masm.loadArgumentsObjectElement(argsObj, index, out, temp, &bail);
8302 bailoutFrom(&bail, lir->snapshot());
8305 void CodeGenerator::visitLoadArgumentsObjectArgHole(
8306 LLoadArgumentsObjectArgHole* lir) {
8307 Register temp = ToRegister(lir->temp0());
8308 Register argsObj = ToRegister(lir->argsObject());
8309 Register index = ToRegister(lir->index());
8310 ValueOperand out = ToOutValue(lir);
8312 Label bail;
8313 masm.loadArgumentsObjectElementHole(argsObj, index, out, temp, &bail);
8314 bailoutFrom(&bail, lir->snapshot());
8317 void CodeGenerator::visitInArgumentsObjectArg(LInArgumentsObjectArg* lir) {
8318 Register temp = ToRegister(lir->temp0());
8319 Register argsObj = ToRegister(lir->argsObject());
8320 Register index = ToRegister(lir->index());
8321 Register out = ToRegister(lir->output());
8323 Label bail;
8324 masm.loadArgumentsObjectElementExists(argsObj, index, out, temp, &bail);
8325 bailoutFrom(&bail, lir->snapshot());
8328 void CodeGenerator::visitArgumentsObjectLength(LArgumentsObjectLength* lir) {
8329 Register argsObj = ToRegister(lir->argsObject());
8330 Register out = ToRegister(lir->output());
8332 Label bail;
8333 masm.loadArgumentsObjectLength(argsObj, out, &bail);
8334 bailoutFrom(&bail, lir->snapshot());
8337 void CodeGenerator::visitArrayFromArgumentsObject(
8338 LArrayFromArgumentsObject* lir) {
8339 pushArg(ToRegister(lir->argsObject()));
8341 using Fn = ArrayObject* (*)(JSContext*, Handle<ArgumentsObject*>);
8342 callVM<Fn, js::ArrayFromArgumentsObject>(lir);
8345 void CodeGenerator::visitGuardArgumentsObjectFlags(
8346 LGuardArgumentsObjectFlags* lir) {
8347 Register argsObj = ToRegister(lir->argsObject());
8348 Register temp = ToRegister(lir->temp0());
8350 Label bail;
8351 masm.branchTestArgumentsObjectFlags(argsObj, temp, lir->mir()->flags(),
8352 Assembler::NonZero, &bail);
8353 bailoutFrom(&bail, lir->snapshot());
8356 void CodeGenerator::visitBoundFunctionNumArgs(LBoundFunctionNumArgs* lir) {
8357 Register obj = ToRegister(lir->object());
8358 Register output = ToRegister(lir->output());
8360 masm.unboxInt32(Address(obj, BoundFunctionObject::offsetOfFlagsSlot()),
8361 output);
8362 masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), output);
8365 void CodeGenerator::visitGuardBoundFunctionIsConstructor(
8366 LGuardBoundFunctionIsConstructor* lir) {
8367 Register obj = ToRegister(lir->object());
8369 Label bail;
8370 Address flagsSlot(obj, BoundFunctionObject::offsetOfFlagsSlot());
8371 masm.branchTest32(Assembler::Zero, flagsSlot,
8372 Imm32(BoundFunctionObject::IsConstructorFlag), &bail);
8373 bailoutFrom(&bail, lir->snapshot());
8376 void CodeGenerator::visitReturnFromCtor(LReturnFromCtor* lir) {
8377 ValueOperand value = ToValue(lir, LReturnFromCtor::ValueIndex);
8378 Register obj = ToRegister(lir->object());
8379 Register output = ToRegister(lir->output());
8381 Label valueIsObject, end;
8383 masm.branchTestObject(Assembler::Equal, value, &valueIsObject);
8385 // Value is not an object. Return that other object.
8386 masm.movePtr(obj, output);
8387 masm.jump(&end);
8389 // Value is an object. Return unbox(Value).
8390 masm.bind(&valueIsObject);
8391 Register payload = masm.extractObject(value, output);
8392 if (payload != output) {
8393 masm.movePtr(payload, output);
8396 masm.bind(&end);
8399 class OutOfLineBoxNonStrictThis : public OutOfLineCodeBase<CodeGenerator> {
8400 LBoxNonStrictThis* ins_;
8402 public:
8403 explicit OutOfLineBoxNonStrictThis(LBoxNonStrictThis* ins) : ins_(ins) {}
8404 void accept(CodeGenerator* codegen) override {
8405 codegen->visitOutOfLineBoxNonStrictThis(this);
8407 LBoxNonStrictThis* ins() const { return ins_; }
8410 void CodeGenerator::visitBoxNonStrictThis(LBoxNonStrictThis* lir) {
8411 ValueOperand value = ToValue(lir, LBoxNonStrictThis::ValueIndex);
8412 Register output = ToRegister(lir->output());
8414 auto* ool = new (alloc()) OutOfLineBoxNonStrictThis(lir);
8415 addOutOfLineCode(ool, lir->mir());
8417 masm.fallibleUnboxObject(value, output, ool->entry());
8418 masm.bind(ool->rejoin());
8421 void CodeGenerator::visitOutOfLineBoxNonStrictThis(
8422 OutOfLineBoxNonStrictThis* ool) {
8423 LBoxNonStrictThis* lir = ool->ins();
8425 ValueOperand value = ToValue(lir, LBoxNonStrictThis::ValueIndex);
8426 Register output = ToRegister(lir->output());
8428 Label notNullOrUndefined;
8430 Label isNullOrUndefined;
8431 ScratchTagScope tag(masm, value);
8432 masm.splitTagForTest(value, tag);
8433 masm.branchTestUndefined(Assembler::Equal, tag, &isNullOrUndefined);
8434 masm.branchTestNull(Assembler::NotEqual, tag, &notNullOrUndefined);
8435 masm.bind(&isNullOrUndefined);
8436 masm.movePtr(ImmGCPtr(lir->mir()->globalThis()), output);
8437 masm.jump(ool->rejoin());
8440 masm.bind(&notNullOrUndefined);
8442 saveLive(lir);
8444 pushArg(value);
8445 using Fn = JSObject* (*)(JSContext*, HandleValue);
8446 callVM<Fn, BoxNonStrictThis>(lir);
8448 StoreRegisterTo(output).generate(this);
8449 restoreLiveIgnore(lir, StoreRegisterTo(output).clobbered());
8451 masm.jump(ool->rejoin());
8454 void CodeGenerator::visitImplicitThis(LImplicitThis* lir) {
8455 pushArg(ImmGCPtr(lir->mir()->name()));
8456 pushArg(ToRegister(lir->env()));
8458 using Fn = bool (*)(JSContext*, HandleObject, Handle<PropertyName*>,
8459 MutableHandleValue);
8460 callVM<Fn, ImplicitThisOperation>(lir);
8463 void CodeGenerator::visitArrayLength(LArrayLength* lir) {
8464 Register elements = ToRegister(lir->elements());
8465 Register output = ToRegister(lir->output());
8467 Address length(elements, ObjectElements::offsetOfLength());
8468 masm.load32(length, output);
8470 // Bail out if the length doesn't fit in int32.
8471 bailoutTest32(Assembler::Signed, output, output, lir->snapshot());
8474 static void SetLengthFromIndex(MacroAssembler& masm, const LAllocation* index,
8475 const Address& length) {
8476 if (index->isConstant()) {
8477 masm.store32(Imm32(ToInt32(index) + 1), length);
8478 } else {
8479 Register newLength = ToRegister(index);
8480 masm.add32(Imm32(1), newLength);
8481 masm.store32(newLength, length);
8482 masm.sub32(Imm32(1), newLength);
8486 void CodeGenerator::visitSetArrayLength(LSetArrayLength* lir) {
8487 Address length(ToRegister(lir->elements()), ObjectElements::offsetOfLength());
8488 SetLengthFromIndex(masm, lir->index(), length);
8491 void CodeGenerator::visitFunctionLength(LFunctionLength* lir) {
8492 Register function = ToRegister(lir->function());
8493 Register output = ToRegister(lir->output());
8495 Label bail;
8497 // Get the JSFunction flags.
8498 masm.load32(Address(function, JSFunction::offsetOfFlagsAndArgCount()),
8499 output);
8501 // Functions with a SelfHostedLazyScript must be compiled with the slow-path
8502 // before the function length is known. If the length was previously resolved,
8503 // the length property may be shadowed.
8504 masm.branchTest32(
8505 Assembler::NonZero, output,
8506 Imm32(FunctionFlags::SELFHOSTLAZY | FunctionFlags::RESOLVED_LENGTH),
8507 &bail);
8509 masm.loadFunctionLength(function, output, output, &bail);
8511 bailoutFrom(&bail, lir->snapshot());
8514 void CodeGenerator::visitFunctionName(LFunctionName* lir) {
8515 Register function = ToRegister(lir->function());
8516 Register output = ToRegister(lir->output());
8518 Label bail;
8520 const JSAtomState& names = gen->runtime->names();
8521 masm.loadFunctionName(function, output, ImmGCPtr(names.empty_), &bail);
8523 bailoutFrom(&bail, lir->snapshot());
8526 template <class OrderedHashTable>
8527 static void RangeFront(MacroAssembler&, Register, Register, Register);
8529 template <>
8530 void RangeFront<ValueMap>(MacroAssembler& masm, Register range, Register i,
8531 Register front) {
8532 masm.loadPtr(Address(range, ValueMap::Range::offsetOfHashTable()), front);
8533 masm.loadPtr(Address(front, ValueMap::offsetOfImplData()), front);
8535 MOZ_ASSERT(ValueMap::offsetOfImplDataElement() == 0,
8536 "offsetof(Data, element) is 0");
8537 static_assert(ValueMap::sizeofImplData() == 24, "sizeof(Data) is 24");
8538 masm.mulBy3(i, i);
8539 masm.lshiftPtr(Imm32(3), i);
8540 masm.addPtr(i, front);
8543 template <>
8544 void RangeFront<ValueSet>(MacroAssembler& masm, Register range, Register i,
8545 Register front) {
8546 masm.loadPtr(Address(range, ValueSet::Range::offsetOfHashTable()), front);
8547 masm.loadPtr(Address(front, ValueSet::offsetOfImplData()), front);
8549 MOZ_ASSERT(ValueSet::offsetOfImplDataElement() == 0,
8550 "offsetof(Data, element) is 0");
8551 static_assert(ValueSet::sizeofImplData() == 16, "sizeof(Data) is 16");
8552 masm.lshiftPtr(Imm32(4), i);
8553 masm.addPtr(i, front);
8556 template <class OrderedHashTable>
8557 static void RangePopFront(MacroAssembler& masm, Register range, Register front,
8558 Register dataLength, Register temp) {
8559 Register i = temp;
8561 masm.add32(Imm32(1),
8562 Address(range, OrderedHashTable::Range::offsetOfCount()));
8564 masm.load32(Address(range, OrderedHashTable::Range::offsetOfI()), i);
8566 Label done, seek;
8567 masm.bind(&seek);
8568 masm.add32(Imm32(1), i);
8569 masm.branch32(Assembler::AboveOrEqual, i, dataLength, &done);
8571 // We can add sizeof(Data) to |front| to select the next element, because
8572 // |front| and |range.ht.data[i]| point to the same location.
8573 MOZ_ASSERT(OrderedHashTable::offsetOfImplDataElement() == 0,
8574 "offsetof(Data, element) is 0");
8575 masm.addPtr(Imm32(OrderedHashTable::sizeofImplData()), front);
8577 masm.branchTestMagic(Assembler::Equal,
8578 Address(front, OrderedHashTable::offsetOfEntryKey()),
8579 JS_HASH_KEY_EMPTY, &seek);
8581 masm.bind(&done);
8582 masm.store32(i, Address(range, OrderedHashTable::Range::offsetOfI()));
8585 template <class OrderedHashTable>
8586 static inline void RangeDestruct(MacroAssembler& masm, Register iter,
8587 Register range, Register temp0,
8588 Register temp1) {
8589 Register next = temp0;
8590 Register prevp = temp1;
8592 masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfNext()), next);
8593 masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfPrevP()), prevp);
8594 masm.storePtr(next, Address(prevp, 0));
8596 Label hasNoNext;
8597 masm.branchTestPtr(Assembler::Zero, next, next, &hasNoNext);
8599 masm.storePtr(prevp, Address(next, OrderedHashTable::Range::offsetOfPrevP()));
8601 masm.bind(&hasNoNext);
8603 Label nurseryAllocated;
8604 masm.branchPtrInNurseryChunk(Assembler::Equal, iter, temp0,
8605 &nurseryAllocated);
8607 masm.callFreeStub(range);
8609 masm.bind(&nurseryAllocated);
8612 template <>
8613 void CodeGenerator::emitLoadIteratorValues<ValueMap>(Register result,
8614 Register temp,
8615 Register front) {
8616 size_t elementsOffset = NativeObject::offsetOfFixedElements();
8618 Address keyAddress(front, ValueMap::Entry::offsetOfKey());
8619 Address valueAddress(front, ValueMap::Entry::offsetOfValue());
8620 Address keyElemAddress(result, elementsOffset);
8621 Address valueElemAddress(result, elementsOffset + sizeof(Value));
8622 masm.guardedCallPreBarrier(keyElemAddress, MIRType::Value);
8623 masm.guardedCallPreBarrier(valueElemAddress, MIRType::Value);
8624 masm.storeValue(keyAddress, keyElemAddress, temp);
8625 masm.storeValue(valueAddress, valueElemAddress, temp);
8627 Label emitBarrier, skipBarrier;
8628 masm.branchValueIsNurseryCell(Assembler::Equal, keyAddress, temp,
8629 &emitBarrier);
8630 masm.branchValueIsNurseryCell(Assembler::NotEqual, valueAddress, temp,
8631 &skipBarrier);
8633 masm.bind(&emitBarrier);
8634 saveVolatile(temp);
8635 emitPostWriteBarrier(result);
8636 restoreVolatile(temp);
8638 masm.bind(&skipBarrier);
8641 template <>
8642 void CodeGenerator::emitLoadIteratorValues<ValueSet>(Register result,
8643 Register temp,
8644 Register front) {
8645 size_t elementsOffset = NativeObject::offsetOfFixedElements();
8647 Address keyAddress(front, ValueSet::offsetOfEntryKey());
8648 Address keyElemAddress(result, elementsOffset);
8649 masm.guardedCallPreBarrier(keyElemAddress, MIRType::Value);
8650 masm.storeValue(keyAddress, keyElemAddress, temp);
8652 Label skipBarrier;
8653 masm.branchValueIsNurseryCell(Assembler::NotEqual, keyAddress, temp,
8654 &skipBarrier);
8656 saveVolatile(temp);
8657 emitPostWriteBarrier(result);
8658 restoreVolatile(temp);
8660 masm.bind(&skipBarrier);
8663 template <class IteratorObject, class OrderedHashTable>
8664 void CodeGenerator::emitGetNextEntryForIterator(LGetNextEntryForIterator* lir) {
8665 Register iter = ToRegister(lir->iter());
8666 Register result = ToRegister(lir->result());
8667 Register temp = ToRegister(lir->temp0());
8668 Register dataLength = ToRegister(lir->temp1());
8669 Register range = ToRegister(lir->temp2());
8670 Register output = ToRegister(lir->output());
8672 #ifdef DEBUG
8673 // Self-hosted code is responsible for ensuring GetNextEntryForIterator is
8674 // only called with the correct iterator class. Assert here all self-
8675 // hosted callers of GetNextEntryForIterator perform this class check.
8676 // No Spectre mitigations are needed because this is DEBUG-only code.
8677 Label success;
8678 masm.branchTestObjClassNoSpectreMitigations(
8679 Assembler::Equal, iter, &IteratorObject::class_, temp, &success);
8680 masm.assumeUnreachable("Iterator object should have the correct class.");
8681 masm.bind(&success);
8682 #endif
8684 masm.loadPrivate(Address(iter, NativeObject::getFixedSlotOffset(
8685 IteratorObject::RangeSlot)),
8686 range);
8688 Label iterAlreadyDone, iterDone, done;
8689 masm.branchTestPtr(Assembler::Zero, range, range, &iterAlreadyDone);
8691 masm.load32(Address(range, OrderedHashTable::Range::offsetOfI()), temp);
8692 masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfHashTable()),
8693 dataLength);
8694 masm.load32(Address(dataLength, OrderedHashTable::offsetOfImplDataLength()),
8695 dataLength);
8696 masm.branch32(Assembler::AboveOrEqual, temp, dataLength, &iterDone);
8698 masm.Push(iter);
8700 Register front = iter;
8701 RangeFront<OrderedHashTable>(masm, range, temp, front);
8703 emitLoadIteratorValues<OrderedHashTable>(result, temp, front);
8705 RangePopFront<OrderedHashTable>(masm, range, front, dataLength, temp);
8707 masm.Pop(iter);
8708 masm.move32(Imm32(0), output);
8710 masm.jump(&done);
8712 masm.bind(&iterDone);
8714 RangeDestruct<OrderedHashTable>(masm, iter, range, temp, dataLength);
8716 masm.storeValue(PrivateValue(nullptr),
8717 Address(iter, NativeObject::getFixedSlotOffset(
8718 IteratorObject::RangeSlot)));
8720 masm.bind(&iterAlreadyDone);
8722 masm.move32(Imm32(1), output);
8724 masm.bind(&done);
8727 void CodeGenerator::visitGetNextEntryForIterator(
8728 LGetNextEntryForIterator* lir) {
8729 if (lir->mir()->mode() == MGetNextEntryForIterator::Map) {
8730 emitGetNextEntryForIterator<MapIteratorObject, ValueMap>(lir);
8731 } else {
8732 MOZ_ASSERT(lir->mir()->mode() == MGetNextEntryForIterator::Set);
8733 emitGetNextEntryForIterator<SetIteratorObject, ValueSet>(lir);
8737 // The point of these is to inform Ion of where these values already are; they
8738 // don't normally generate (much) code.
8739 void CodeGenerator::visitWasmRegisterPairResult(LWasmRegisterPairResult* lir) {}
8740 void CodeGenerator::visitWasmStackResult(LWasmStackResult* lir) {}
8741 void CodeGenerator::visitWasmStackResult64(LWasmStackResult64* lir) {}
8743 void CodeGenerator::visitWasmStackResultArea(LWasmStackResultArea* lir) {
8744 LAllocation* output = lir->getDef(0)->output();
8745 MOZ_ASSERT(output->isStackArea());
8746 bool tempInit = false;
8747 for (auto iter = output->toStackArea()->results(); iter; iter.next()) {
8748 // Zero out ref stack results.
8749 if (iter.isWasmAnyRef()) {
8750 Register temp = ToRegister(lir->temp0());
8751 if (!tempInit) {
8752 masm.xorPtr(temp, temp);
8753 tempInit = true;
8755 masm.storePtr(temp, ToAddress(iter.alloc()));
8760 void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {
8761 #ifdef JS_64BIT
8762 if (MWasmRegisterResult* mir = lir->mir()) {
8763 if (mir->type() == MIRType::Int32) {
8764 masm.widenInt32(ToRegister(lir->output()));
8767 #endif
8770 void CodeGenerator::visitWasmCall(LWasmCall* lir) {
8771 const MWasmCallBase* callBase = lir->callBase();
8772 bool isReturnCall = lir->isReturnCall();
8774 // If this call is in Wasm try code block, initialise a wasm::TryNote for this
8775 // call.
8776 bool inTry = callBase->inTry();
8777 if (inTry) {
8778 size_t tryNoteIndex = callBase->tryNoteIndex();
8779 wasm::TryNoteVector& tryNotes = masm.tryNotes();
8780 wasm::TryNote& tryNote = tryNotes[tryNoteIndex];
8781 tryNote.setTryBodyBegin(masm.currentOffset());
8784 MOZ_ASSERT((sizeof(wasm::Frame) + masm.framePushed()) % WasmStackAlignment ==
8786 static_assert(
8787 WasmStackAlignment >= ABIStackAlignment &&
8788 WasmStackAlignment % ABIStackAlignment == 0,
8789 "The wasm stack alignment should subsume the ABI-required alignment");
8791 #ifdef DEBUG
8792 Label ok;
8793 masm.branchTestStackPtr(Assembler::Zero, Imm32(WasmStackAlignment - 1), &ok);
8794 masm.breakpoint();
8795 masm.bind(&ok);
8796 #endif
8798 // LWasmCallBase::isCallPreserved() assumes that all MWasmCalls preserve the
8799 // instance and pinned regs. The only case where where we don't have to
8800 // reload the instance and pinned regs is when the callee preserves them.
8801 bool reloadRegs = true;
8802 bool switchRealm = true;
8804 const wasm::CallSiteDesc& desc = callBase->desc();
8805 const wasm::CalleeDesc& callee = callBase->callee();
8806 CodeOffset retOffset;
8807 CodeOffset secondRetOffset;
8808 switch (callee.which()) {
8809 case wasm::CalleeDesc::Func:
8810 #ifdef ENABLE_WASM_TAIL_CALLS
8811 if (isReturnCall) {
8812 ReturnCallAdjustmentInfo retCallInfo(
8813 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
8814 masm.wasmReturnCall(desc, callee.funcIndex(), retCallInfo);
8815 // The rest of the method is unnecessary for a return call.
8816 return;
8818 #endif
8819 MOZ_ASSERT(!isReturnCall);
8820 retOffset = masm.call(desc, callee.funcIndex());
8821 reloadRegs = false;
8822 switchRealm = false;
8823 break;
8824 case wasm::CalleeDesc::Import:
8825 #ifdef ENABLE_WASM_TAIL_CALLS
8826 if (isReturnCall) {
8827 ReturnCallAdjustmentInfo retCallInfo(
8828 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
8829 masm.wasmReturnCallImport(desc, callee, retCallInfo);
8830 // The rest of the method is unnecessary for a return call.
8831 return;
8833 #endif
8834 MOZ_ASSERT(!isReturnCall);
8835 retOffset = masm.wasmCallImport(desc, callee);
8836 break;
8837 case wasm::CalleeDesc::AsmJSTable:
8838 retOffset = masm.asmCallIndirect(desc, callee);
8839 break;
8840 case wasm::CalleeDesc::WasmTable: {
8841 Label* boundsCheckFailed = nullptr;
8842 if (lir->needsBoundsCheck()) {
8843 OutOfLineAbortingWasmTrap* ool =
8844 new (alloc()) OutOfLineAbortingWasmTrap(
8845 wasm::BytecodeOffset(desc.lineOrBytecode()),
8846 wasm::Trap::OutOfBounds);
8847 if (lir->isCatchable()) {
8848 addOutOfLineCode(ool, lir->mirCatchable());
8849 } else if (isReturnCall) {
8850 #ifdef ENABLE_WASM_TAIL_CALLS
8851 addOutOfLineCode(ool, lir->mirReturnCall());
8852 #else
8853 MOZ_CRASH("Return calls are disabled.");
8854 #endif
8855 } else {
8856 addOutOfLineCode(ool, lir->mirUncatchable());
8858 boundsCheckFailed = ool->entry();
8860 Label* nullCheckFailed = nullptr;
8861 #ifndef WASM_HAS_HEAPREG
8863 OutOfLineAbortingWasmTrap* ool =
8864 new (alloc()) OutOfLineAbortingWasmTrap(
8865 wasm::BytecodeOffset(desc.lineOrBytecode()),
8866 wasm::Trap::IndirectCallToNull);
8867 if (lir->isCatchable()) {
8868 addOutOfLineCode(ool, lir->mirCatchable());
8869 } else if (isReturnCall) {
8870 # ifdef ENABLE_WASM_TAIL_CALLS
8871 addOutOfLineCode(ool, lir->mirReturnCall());
8872 # else
8873 MOZ_CRASH("Return calls are disabled.");
8874 # endif
8875 } else {
8876 addOutOfLineCode(ool, lir->mirUncatchable());
8878 nullCheckFailed = ool->entry();
8880 #endif
8881 #ifdef ENABLE_WASM_TAIL_CALLS
8882 if (isReturnCall) {
8883 ReturnCallAdjustmentInfo retCallInfo(
8884 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
8885 masm.wasmReturnCallIndirect(desc, callee, boundsCheckFailed,
8886 nullCheckFailed, mozilla::Nothing(),
8887 retCallInfo);
8888 // The rest of the method is unnecessary for a return call.
8889 return;
8891 #endif
8892 MOZ_ASSERT(!isReturnCall);
8893 masm.wasmCallIndirect(desc, callee, boundsCheckFailed, nullCheckFailed,
8894 lir->tableSize(), &retOffset, &secondRetOffset);
8895 // Register reloading and realm switching are handled dynamically inside
8896 // wasmCallIndirect. There are two return offsets, one for each call
8897 // instruction (fast path and slow path).
8898 reloadRegs = false;
8899 switchRealm = false;
8900 break;
8902 case wasm::CalleeDesc::Builtin:
8903 retOffset = masm.call(desc, callee.builtin());
8904 reloadRegs = false;
8905 switchRealm = false;
8906 break;
8907 case wasm::CalleeDesc::BuiltinInstanceMethod:
8908 retOffset = masm.wasmCallBuiltinInstanceMethod(
8909 desc, callBase->instanceArg(), callee.builtin(),
8910 callBase->builtinMethodFailureMode());
8911 switchRealm = false;
8912 break;
8913 case wasm::CalleeDesc::FuncRef:
8914 #ifdef ENABLE_WASM_TAIL_CALLS
8915 if (isReturnCall) {
8916 ReturnCallAdjustmentInfo retCallInfo(
8917 callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
8918 masm.wasmReturnCallRef(desc, callee, retCallInfo);
8919 // The rest of the method is unnecessary for a return call.
8920 return;
8922 #endif
8923 MOZ_ASSERT(!isReturnCall);
8924 // Register reloading and realm switching are handled dynamically inside
8925 // wasmCallRef. There are two return offsets, one for each call
8926 // instruction (fast path and slow path).
8927 masm.wasmCallRef(desc, callee, &retOffset, &secondRetOffset);
8928 reloadRegs = false;
8929 switchRealm = false;
8930 break;
8933 // Note the assembler offset for the associated LSafePoint.
8934 MOZ_ASSERT(!isReturnCall);
8935 markSafepointAt(retOffset.offset(), lir);
8937 // Now that all the outbound in-memory args are on the stack, note the
8938 // required lower boundary point of the associated StackMap.
8939 uint32_t framePushedAtStackMapBase =
8940 masm.framePushed() - callBase->stackArgAreaSizeUnaligned();
8941 lir->safepoint()->setFramePushedAtStackMapBase(framePushedAtStackMapBase);
8942 MOZ_ASSERT(lir->safepoint()->wasmSafepointKind() ==
8943 WasmSafepointKind::LirCall);
8945 // Note the assembler offset and framePushed for use by the adjunct
8946 // LSafePoint, see visitor for LWasmCallIndirectAdjunctSafepoint below.
8947 if (callee.which() == wasm::CalleeDesc::WasmTable) {
8948 lir->adjunctSafepoint()->recordSafepointInfo(secondRetOffset,
8949 framePushedAtStackMapBase);
8952 if (reloadRegs) {
8953 masm.loadPtr(
8954 Address(masm.getStackPointer(), WasmCallerInstanceOffsetBeforeCall),
8955 InstanceReg);
8956 masm.loadWasmPinnedRegsFromInstance();
8957 if (switchRealm) {
8958 masm.switchToWasmInstanceRealm(ABINonArgReturnReg0, ABINonArgReturnReg1);
8960 } else {
8961 MOZ_ASSERT(!switchRealm);
8964 #ifdef ENABLE_WASM_TAIL_CALLS
8965 switch (callee.which()) {
8966 case wasm::CalleeDesc::Func:
8967 case wasm::CalleeDesc::Import:
8968 case wasm::CalleeDesc::WasmTable:
8969 case wasm::CalleeDesc::FuncRef:
8970 // Stack allocation could change during Wasm (return) calls,
8971 // recover pre-call state.
8972 masm.freeStackTo(masm.framePushed());
8973 break;
8974 default:
8975 break;
8977 #endif // ENABLE_WASM_TAIL_CALLS
8979 if (inTry) {
8980 // Set the end of the try note range
8981 size_t tryNoteIndex = callBase->tryNoteIndex();
8982 wasm::TryNoteVector& tryNotes = masm.tryNotes();
8983 wasm::TryNote& tryNote = tryNotes[tryNoteIndex];
8985 // Don't set the end of the try note if we've OOM'ed, as the above
8986 // instructions may not have been emitted, which will trigger an assert
8987 // about zero-length try-notes. This is okay as this compilation will be
8988 // thrown away.
8989 if (!masm.oom()) {
8990 tryNote.setTryBodyEnd(masm.currentOffset());
8993 // This instruction or the adjunct safepoint must be the last instruction
8994 // in the block. No other instructions may be inserted.
8995 LBlock* block = lir->block();
8996 MOZ_RELEASE_ASSERT(*block->rbegin() == lir ||
8997 (block->rbegin()->isWasmCallIndirectAdjunctSafepoint() &&
8998 *(++block->rbegin()) == lir));
9000 // Jump to the fallthrough block
9001 jumpToBlock(lir->mirCatchable()->getSuccessor(
9002 MWasmCallCatchable::FallthroughBranchIndex));
9006 void CodeGenerator::visitWasmCallLandingPrePad(LWasmCallLandingPrePad* lir) {
9007 LBlock* block = lir->block();
9008 MWasmCallLandingPrePad* mir = lir->mir();
9009 MBasicBlock* mirBlock = mir->block();
9010 MBasicBlock* callMirBlock = mir->callBlock();
9012 // This block must be the pre-pad successor of the call block. No blocks may
9013 // be inserted between us, such as for critical edge splitting.
9014 MOZ_RELEASE_ASSERT(mirBlock == callMirBlock->getSuccessor(
9015 MWasmCallCatchable::PrePadBranchIndex));
9017 // This instruction or a move group must be the first instruction in the
9018 // block. No other instructions may be inserted.
9019 MOZ_RELEASE_ASSERT(*block->begin() == lir || (block->begin()->isMoveGroup() &&
9020 *(++block->begin()) == lir));
9022 wasm::TryNoteVector& tryNotes = masm.tryNotes();
9023 wasm::TryNote& tryNote = tryNotes[mir->tryNoteIndex()];
9024 // Set the entry point for the call try note to be the beginning of this
9025 // block. The above assertions (and assertions in visitWasmCall) guarantee
9026 // that we are not skipping over instructions that should be executed.
9027 tryNote.setLandingPad(block->label()->offset(), masm.framePushed());
9030 void CodeGenerator::visitWasmCallIndirectAdjunctSafepoint(
9031 LWasmCallIndirectAdjunctSafepoint* lir) {
9032 markSafepointAt(lir->safepointLocation().offset(), lir);
9033 lir->safepoint()->setFramePushedAtStackMapBase(
9034 lir->framePushedAtStackMapBase());
9037 template <typename InstructionWithMaybeTrapSite>
9038 void EmitSignalNullCheckTrapSite(MacroAssembler& masm,
9039 InstructionWithMaybeTrapSite* ins,
9040 FaultingCodeOffset fco,
9041 wasm::TrapMachineInsn tmi) {
9042 if (!ins->maybeTrap()) {
9043 return;
9045 wasm::BytecodeOffset trapOffset(ins->maybeTrap()->offset);
9046 masm.append(wasm::Trap::NullPointerDereference,
9047 wasm::TrapSite(tmi, fco, trapOffset));
9050 void CodeGenerator::visitWasmLoadSlot(LWasmLoadSlot* ins) {
9051 MIRType type = ins->type();
9052 MWideningOp wideningOp = ins->wideningOp();
9053 Register container = ToRegister(ins->containerRef());
9054 Address addr(container, ins->offset());
9055 AnyRegister dst = ToAnyRegister(ins->output());
9057 FaultingCodeOffset fco;
9058 switch (type) {
9059 case MIRType::Int32:
9060 switch (wideningOp) {
9061 case MWideningOp::None:
9062 fco = masm.load32(addr, dst.gpr());
9063 EmitSignalNullCheckTrapSite(masm, ins, fco,
9064 wasm::TrapMachineInsn::Load32);
9065 break;
9066 case MWideningOp::FromU16:
9067 fco = masm.load16ZeroExtend(addr, dst.gpr());
9068 EmitSignalNullCheckTrapSite(masm, ins, fco,
9069 wasm::TrapMachineInsn::Load16);
9070 break;
9071 case MWideningOp::FromS16:
9072 fco = masm.load16SignExtend(addr, dst.gpr());
9073 EmitSignalNullCheckTrapSite(masm, ins, fco,
9074 wasm::TrapMachineInsn::Load16);
9075 break;
9076 case MWideningOp::FromU8:
9077 fco = masm.load8ZeroExtend(addr, dst.gpr());
9078 EmitSignalNullCheckTrapSite(masm, ins, fco,
9079 wasm::TrapMachineInsn::Load8);
9080 break;
9081 case MWideningOp::FromS8:
9082 fco = masm.load8SignExtend(addr, dst.gpr());
9083 EmitSignalNullCheckTrapSite(masm, ins, fco,
9084 wasm::TrapMachineInsn::Load8);
9085 break;
9086 default:
9087 MOZ_CRASH("unexpected widening op in ::visitWasmLoadSlot");
9089 break;
9090 case MIRType::Float32:
9091 MOZ_ASSERT(wideningOp == MWideningOp::None);
9092 fco = masm.loadFloat32(addr, dst.fpu());
9093 EmitSignalNullCheckTrapSite(masm, ins, fco,
9094 wasm::TrapMachineInsn::Load32);
9095 break;
9096 case MIRType::Double:
9097 MOZ_ASSERT(wideningOp == MWideningOp::None);
9098 fco = masm.loadDouble(addr, dst.fpu());
9099 EmitSignalNullCheckTrapSite(masm, ins, fco,
9100 wasm::TrapMachineInsn::Load64);
9101 break;
9102 case MIRType::Pointer:
9103 case MIRType::WasmAnyRef:
9104 MOZ_ASSERT(wideningOp == MWideningOp::None);
9105 fco = masm.loadPtr(addr, dst.gpr());
9106 EmitSignalNullCheckTrapSite(masm, ins, fco,
9107 wasm::TrapMachineInsnForLoadWord());
9108 break;
9109 #ifdef ENABLE_WASM_SIMD
9110 case MIRType::Simd128:
9111 MOZ_ASSERT(wideningOp == MWideningOp::None);
9112 fco = masm.loadUnalignedSimd128(addr, dst.fpu());
9113 EmitSignalNullCheckTrapSite(masm, ins, fco,
9114 wasm::TrapMachineInsn::Load128);
9115 break;
9116 #endif
9117 default:
9118 MOZ_CRASH("unexpected type in ::visitWasmLoadSlot");
9122 void CodeGenerator::visitWasmStoreSlot(LWasmStoreSlot* ins) {
9123 MIRType type = ins->type();
9124 MNarrowingOp narrowingOp = ins->narrowingOp();
9125 Register container = ToRegister(ins->containerRef());
9126 Address addr(container, ins->offset());
9127 AnyRegister src = ToAnyRegister(ins->value());
9128 if (type != MIRType::Int32) {
9129 MOZ_RELEASE_ASSERT(narrowingOp == MNarrowingOp::None);
9132 FaultingCodeOffset fco;
9133 switch (type) {
9134 case MIRType::Int32:
9135 switch (narrowingOp) {
9136 case MNarrowingOp::None:
9137 fco = masm.store32(src.gpr(), addr);
9138 EmitSignalNullCheckTrapSite(masm, ins, fco,
9139 wasm::TrapMachineInsn::Store32);
9140 break;
9141 case MNarrowingOp::To16:
9142 fco = masm.store16(src.gpr(), addr);
9143 EmitSignalNullCheckTrapSite(masm, ins, fco,
9144 wasm::TrapMachineInsn::Store16);
9145 break;
9146 case MNarrowingOp::To8:
9147 fco = masm.store8(src.gpr(), addr);
9148 EmitSignalNullCheckTrapSite(masm, ins, fco,
9149 wasm::TrapMachineInsn::Store8);
9150 break;
9151 default:
9152 MOZ_CRASH();
9154 break;
9155 case MIRType::Float32:
9156 fco = masm.storeFloat32(src.fpu(), addr);
9157 EmitSignalNullCheckTrapSite(masm, ins, fco,
9158 wasm::TrapMachineInsn::Store32);
9159 break;
9160 case MIRType::Double:
9161 fco = masm.storeDouble(src.fpu(), addr);
9162 EmitSignalNullCheckTrapSite(masm, ins, fco,
9163 wasm::TrapMachineInsn::Store64);
9164 break;
9165 case MIRType::Pointer:
9166 // This could be correct, but it would be a new usage, so check carefully.
9167 MOZ_CRASH("Unexpected type in visitWasmStoreSlot.");
9168 case MIRType::WasmAnyRef:
9169 MOZ_CRASH("Bad type in visitWasmStoreSlot. Use LWasmStoreRef.");
9170 #ifdef ENABLE_WASM_SIMD
9171 case MIRType::Simd128:
9172 fco = masm.storeUnalignedSimd128(src.fpu(), addr);
9173 EmitSignalNullCheckTrapSite(masm, ins, fco,
9174 wasm::TrapMachineInsn::Store128);
9175 break;
9176 #endif
9177 default:
9178 MOZ_CRASH("unexpected type in StorePrimitiveValue");
9182 void CodeGenerator::visitWasmLoadTableElement(LWasmLoadTableElement* ins) {
9183 Register elements = ToRegister(ins->elements());
9184 Register index = ToRegister(ins->index());
9185 Register output = ToRegister(ins->output());
9186 masm.loadPtr(BaseIndex(elements, index, ScalePointer), output);
9189 void CodeGenerator::visitWasmDerivedPointer(LWasmDerivedPointer* ins) {
9190 masm.movePtr(ToRegister(ins->base()), ToRegister(ins->output()));
9191 masm.addPtr(Imm32(int32_t(ins->offset())), ToRegister(ins->output()));
9194 void CodeGenerator::visitWasmDerivedIndexPointer(
9195 LWasmDerivedIndexPointer* ins) {
9196 Register base = ToRegister(ins->base());
9197 Register index = ToRegister(ins->index());
9198 Register output = ToRegister(ins->output());
9199 masm.computeEffectiveAddress(BaseIndex(base, index, ins->scale()), output);
9202 void CodeGenerator::visitWasmStoreRef(LWasmStoreRef* ins) {
9203 Register instance = ToRegister(ins->instance());
9204 Register valueBase = ToRegister(ins->valueBase());
9205 size_t offset = ins->offset();
9206 Register value = ToRegister(ins->value());
9207 Register temp = ToRegister(ins->temp0());
9209 if (ins->preBarrierKind() == WasmPreBarrierKind::Normal) {
9210 Label skipPreBarrier;
9211 wasm::EmitWasmPreBarrierGuard(
9212 masm, instance, temp, valueBase, offset, &skipPreBarrier,
9213 ins->maybeTrap() ? &ins->maybeTrap()->offset : nullptr);
9214 wasm::EmitWasmPreBarrierCall(masm, instance, temp, valueBase, offset);
9215 masm.bind(&skipPreBarrier);
9218 FaultingCodeOffset fco = masm.storePtr(value, Address(valueBase, offset));
9219 EmitSignalNullCheckTrapSite(masm, ins, fco,
9220 wasm::TrapMachineInsnForStoreWord());
9221 // The postbarrier is handled separately.
9224 // Out-of-line path to update the store buffer for wasm references.
9225 class OutOfLineWasmCallPostWriteBarrier
9226 : public OutOfLineCodeBase<CodeGenerator> {
9227 LInstruction* lir_;
9228 Register valueBase_;
9229 Register temp_;
9230 uint32_t valueOffset_;
9232 public:
9233 OutOfLineWasmCallPostWriteBarrier(LInstruction* lir, Register valueBase,
9234 Register temp, uint32_t valueOffset)
9235 : lir_(lir),
9236 valueBase_(valueBase),
9237 temp_(temp),
9238 valueOffset_(valueOffset) {}
9240 void accept(CodeGenerator* codegen) override {
9241 codegen->visitOutOfLineWasmCallPostWriteBarrier(this);
9244 LInstruction* lir() const { return lir_; }
9245 Register valueBase() const { return valueBase_; }
9246 Register temp() const { return temp_; }
9247 uint32_t valueOffset() const { return valueOffset_; }
9250 void CodeGenerator::visitOutOfLineWasmCallPostWriteBarrier(
9251 OutOfLineWasmCallPostWriteBarrier* ool) {
9252 saveLiveVolatile(ool->lir());
9253 masm.Push(InstanceReg);
9254 int32_t framePushedAfterInstance = masm.framePushed();
9256 // Fold the value offset into the value base
9257 Register valueAddr = ool->valueBase();
9258 Register temp = ool->temp();
9259 masm.computeEffectiveAddress(Address(valueAddr, ool->valueOffset()), temp);
9261 // Call Instance::postBarrier
9262 masm.setupWasmABICall();
9263 masm.passABIArg(InstanceReg);
9264 masm.passABIArg(temp);
9265 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
9266 masm.callWithABI(wasm::BytecodeOffset(0), wasm::SymbolicAddress::PostBarrier,
9267 mozilla::Some(instanceOffset), ABIType::General);
9269 masm.Pop(InstanceReg);
9270 restoreLiveVolatile(ool->lir());
9272 masm.jump(ool->rejoin());
9275 void CodeGenerator::visitWasmPostWriteBarrier(LWasmPostWriteBarrier* lir) {
9276 Register object = ToRegister(lir->object());
9277 Register value = ToRegister(lir->value());
9278 Register valueBase = ToRegister(lir->valueBase());
9279 Register temp = ToRegister(lir->temp0());
9280 MOZ_ASSERT(ToRegister(lir->instance()) == InstanceReg);
9281 auto ool = new (alloc()) OutOfLineWasmCallPostWriteBarrier(
9282 lir, valueBase, temp, lir->valueOffset());
9283 addOutOfLineCode(ool, lir->mir());
9285 wasm::EmitWasmPostBarrierGuard(masm, mozilla::Some(object), temp, value,
9286 ool->rejoin());
9287 masm.jump(ool->entry());
9288 masm.bind(ool->rejoin());
9291 void CodeGenerator::visitWasmLoadSlotI64(LWasmLoadSlotI64* ins) {
9292 Register container = ToRegister(ins->containerRef());
9293 Address addr(container, ins->offset());
9294 Register64 output = ToOutRegister64(ins);
9295 // Either 1 or 2 words. On a 32-bit target, it is hard to argue that one
9296 // transaction will always trap before the other, so it seems safest to
9297 // register both of them as potentially trapping.
9298 #ifdef JS_64BIT
9299 FaultingCodeOffset fco = masm.load64(addr, output);
9300 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Load64);
9301 #else
9302 FaultingCodeOffsetPair fcop = masm.load64(addr, output);
9303 EmitSignalNullCheckTrapSite(masm, ins, fcop.first,
9304 wasm::TrapMachineInsn::Load32);
9305 EmitSignalNullCheckTrapSite(masm, ins, fcop.second,
9306 wasm::TrapMachineInsn::Load32);
9307 #endif
9310 void CodeGenerator::visitWasmStoreSlotI64(LWasmStoreSlotI64* ins) {
9311 Register container = ToRegister(ins->containerRef());
9312 Address addr(container, ins->offset());
9313 Register64 value = ToRegister64(ins->value());
9314 // Either 1 or 2 words. As above we register both transactions in the
9315 // 2-word case.
9316 #ifdef JS_64BIT
9317 FaultingCodeOffset fco = masm.store64(value, addr);
9318 EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Store64);
9319 #else
9320 FaultingCodeOffsetPair fcop = masm.store64(value, addr);
9321 EmitSignalNullCheckTrapSite(masm, ins, fcop.first,
9322 wasm::TrapMachineInsn::Store32);
9323 EmitSignalNullCheckTrapSite(masm, ins, fcop.second,
9324 wasm::TrapMachineInsn::Store32);
9325 #endif
9328 void CodeGenerator::visitArrayBufferByteLength(LArrayBufferByteLength* lir) {
9329 Register obj = ToRegister(lir->object());
9330 Register out = ToRegister(lir->output());
9331 masm.loadArrayBufferByteLengthIntPtr(obj, out);
9334 void CodeGenerator::visitArrayBufferViewLength(LArrayBufferViewLength* lir) {
9335 Register obj = ToRegister(lir->object());
9336 Register out = ToRegister(lir->output());
9337 masm.loadArrayBufferViewLengthIntPtr(obj, out);
9340 void CodeGenerator::visitArrayBufferViewByteOffset(
9341 LArrayBufferViewByteOffset* lir) {
9342 Register obj = ToRegister(lir->object());
9343 Register out = ToRegister(lir->output());
9344 masm.loadArrayBufferViewByteOffsetIntPtr(obj, out);
9347 void CodeGenerator::visitArrayBufferViewElements(
9348 LArrayBufferViewElements* lir) {
9349 Register obj = ToRegister(lir->object());
9350 Register out = ToRegister(lir->output());
9351 masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), out);
9354 void CodeGenerator::visitTypedArrayElementSize(LTypedArrayElementSize* lir) {
9355 Register obj = ToRegister(lir->object());
9356 Register out = ToRegister(lir->output());
9358 masm.typedArrayElementSize(obj, out);
9361 void CodeGenerator::visitGuardHasAttachedArrayBuffer(
9362 LGuardHasAttachedArrayBuffer* lir) {
9363 Register obj = ToRegister(lir->object());
9364 Register temp = ToRegister(lir->temp0());
9366 Label bail;
9367 masm.branchIfHasDetachedArrayBuffer(obj, temp, &bail);
9368 bailoutFrom(&bail, lir->snapshot());
9371 class OutOfLineGuardNumberToIntPtrIndex
9372 : public OutOfLineCodeBase<CodeGenerator> {
9373 LGuardNumberToIntPtrIndex* lir_;
9375 public:
9376 explicit OutOfLineGuardNumberToIntPtrIndex(LGuardNumberToIntPtrIndex* lir)
9377 : lir_(lir) {}
9379 void accept(CodeGenerator* codegen) override {
9380 codegen->visitOutOfLineGuardNumberToIntPtrIndex(this);
9382 LGuardNumberToIntPtrIndex* lir() const { return lir_; }
9385 void CodeGenerator::visitGuardNumberToIntPtrIndex(
9386 LGuardNumberToIntPtrIndex* lir) {
9387 FloatRegister input = ToFloatRegister(lir->input());
9388 Register output = ToRegister(lir->output());
9390 if (!lir->mir()->supportOOB()) {
9391 Label bail;
9392 masm.convertDoubleToPtr(input, output, &bail, false);
9393 bailoutFrom(&bail, lir->snapshot());
9394 return;
9397 auto* ool = new (alloc()) OutOfLineGuardNumberToIntPtrIndex(lir);
9398 addOutOfLineCode(ool, lir->mir());
9400 masm.convertDoubleToPtr(input, output, ool->entry(), false);
9401 masm.bind(ool->rejoin());
9404 void CodeGenerator::visitOutOfLineGuardNumberToIntPtrIndex(
9405 OutOfLineGuardNumberToIntPtrIndex* ool) {
9406 // Substitute the invalid index with an arbitrary out-of-bounds index.
9407 masm.movePtr(ImmWord(-1), ToRegister(ool->lir()->output()));
9408 masm.jump(ool->rejoin());
9411 void CodeGenerator::visitStringLength(LStringLength* lir) {
9412 Register input = ToRegister(lir->string());
9413 Register output = ToRegister(lir->output());
9415 masm.loadStringLength(input, output);
9418 void CodeGenerator::visitMinMaxI(LMinMaxI* ins) {
9419 Register first = ToRegister(ins->first());
9420 Register output = ToRegister(ins->output());
9422 MOZ_ASSERT(first == output);
9424 Assembler::Condition cond =
9425 ins->mir()->isMax() ? Assembler::GreaterThan : Assembler::LessThan;
9427 if (ins->second()->isConstant()) {
9428 Label done;
9429 masm.branch32(cond, first, Imm32(ToInt32(ins->second())), &done);
9430 masm.move32(Imm32(ToInt32(ins->second())), output);
9431 masm.bind(&done);
9432 } else {
9433 Register second = ToRegister(ins->second());
9434 masm.cmp32Move32(cond, second, first, second, output);
9438 void CodeGenerator::visitMinMaxArrayI(LMinMaxArrayI* ins) {
9439 Register array = ToRegister(ins->array());
9440 Register output = ToRegister(ins->output());
9441 Register temp1 = ToRegister(ins->temp1());
9442 Register temp2 = ToRegister(ins->temp2());
9443 Register temp3 = ToRegister(ins->temp3());
9444 bool isMax = ins->isMax();
9446 Label bail;
9447 masm.minMaxArrayInt32(array, output, temp1, temp2, temp3, isMax, &bail);
9448 bailoutFrom(&bail, ins->snapshot());
9451 void CodeGenerator::visitMinMaxArrayD(LMinMaxArrayD* ins) {
9452 Register array = ToRegister(ins->array());
9453 FloatRegister output = ToFloatRegister(ins->output());
9454 Register temp1 = ToRegister(ins->temp1());
9455 Register temp2 = ToRegister(ins->temp2());
9456 FloatRegister floatTemp = ToFloatRegister(ins->floatTemp());
9457 bool isMax = ins->isMax();
9459 Label bail;
9460 masm.minMaxArrayNumber(array, output, floatTemp, temp1, temp2, isMax, &bail);
9461 bailoutFrom(&bail, ins->snapshot());
9464 // For Abs*, lowering will have tied input to output on platforms where that is
9465 // sensible, and otherwise left them untied.
9467 void CodeGenerator::visitAbsI(LAbsI* ins) {
9468 Register input = ToRegister(ins->input());
9469 Register output = ToRegister(ins->output());
9471 if (ins->mir()->fallible()) {
9472 Label positive;
9473 if (input != output) {
9474 masm.move32(input, output);
9476 masm.branchTest32(Assembler::NotSigned, output, output, &positive);
9477 Label bail;
9478 masm.branchNeg32(Assembler::Overflow, output, &bail);
9479 bailoutFrom(&bail, ins->snapshot());
9480 masm.bind(&positive);
9481 } else {
9482 masm.abs32(input, output);
9486 void CodeGenerator::visitAbsD(LAbsD* ins) {
9487 masm.absDouble(ToFloatRegister(ins->input()), ToFloatRegister(ins->output()));
9490 void CodeGenerator::visitAbsF(LAbsF* ins) {
9491 masm.absFloat32(ToFloatRegister(ins->input()),
9492 ToFloatRegister(ins->output()));
9495 void CodeGenerator::visitPowII(LPowII* ins) {
9496 Register value = ToRegister(ins->value());
9497 Register power = ToRegister(ins->power());
9498 Register output = ToRegister(ins->output());
9499 Register temp0 = ToRegister(ins->temp0());
9500 Register temp1 = ToRegister(ins->temp1());
9502 Label bailout;
9503 masm.pow32(value, power, output, temp0, temp1, &bailout);
9504 bailoutFrom(&bailout, ins->snapshot());
9507 void CodeGenerator::visitPowI(LPowI* ins) {
9508 FloatRegister value = ToFloatRegister(ins->value());
9509 Register power = ToRegister(ins->power());
9511 using Fn = double (*)(double x, int32_t y);
9512 masm.setupAlignedABICall();
9513 masm.passABIArg(value, ABIType::Float64);
9514 masm.passABIArg(power);
9516 masm.callWithABI<Fn, js::powi>(ABIType::Float64);
9517 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
9520 void CodeGenerator::visitPowD(LPowD* ins) {
9521 FloatRegister value = ToFloatRegister(ins->value());
9522 FloatRegister power = ToFloatRegister(ins->power());
9524 using Fn = double (*)(double x, double y);
9525 masm.setupAlignedABICall();
9526 masm.passABIArg(value, ABIType::Float64);
9527 masm.passABIArg(power, ABIType::Float64);
9528 masm.callWithABI<Fn, ecmaPow>(ABIType::Float64);
9530 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
9533 void CodeGenerator::visitPowOfTwoI(LPowOfTwoI* ins) {
9534 Register power = ToRegister(ins->power());
9535 Register output = ToRegister(ins->output());
9537 uint32_t base = ins->base();
9538 MOZ_ASSERT(mozilla::IsPowerOfTwo(base));
9540 uint32_t n = mozilla::FloorLog2(base);
9541 MOZ_ASSERT(n != 0);
9543 // Hacker's Delight, 2nd edition, theorem D2.
9544 auto ceilingDiv = [](uint32_t x, uint32_t y) { return (x + y - 1) / y; };
9546 // Take bailout if |power| is greater-or-equals |log_y(2^31)| or is negative.
9547 // |2^(n*y) < 2^31| must hold, hence |n*y < 31| resp. |y < 31/n|.
9549 // Note: it's important for this condition to match the code in CacheIR.cpp
9550 // (CanAttachInt32Pow) to prevent failure loops.
9551 bailoutCmp32(Assembler::AboveOrEqual, power, Imm32(ceilingDiv(31, n)),
9552 ins->snapshot());
9554 // Compute (2^n)^y as 2^(n*y) using repeated shifts. We could directly scale
9555 // |power| and perform a single shift, but due to the lack of necessary
9556 // MacroAssembler functionality, like multiplying a register with an
9557 // immediate, we restrict the number of generated shift instructions when
9558 // lowering this operation.
9559 masm.move32(Imm32(1), output);
9560 do {
9561 masm.lshift32(power, output);
9562 n--;
9563 } while (n > 0);
9566 void CodeGenerator::visitSqrtD(LSqrtD* ins) {
9567 FloatRegister input = ToFloatRegister(ins->input());
9568 FloatRegister output = ToFloatRegister(ins->output());
9569 masm.sqrtDouble(input, output);
9572 void CodeGenerator::visitSqrtF(LSqrtF* ins) {
9573 FloatRegister input = ToFloatRegister(ins->input());
9574 FloatRegister output = ToFloatRegister(ins->output());
9575 masm.sqrtFloat32(input, output);
9578 void CodeGenerator::visitSignI(LSignI* ins) {
9579 Register input = ToRegister(ins->input());
9580 Register output = ToRegister(ins->output());
9581 masm.signInt32(input, output);
9584 void CodeGenerator::visitSignD(LSignD* ins) {
9585 FloatRegister input = ToFloatRegister(ins->input());
9586 FloatRegister output = ToFloatRegister(ins->output());
9587 masm.signDouble(input, output);
9590 void CodeGenerator::visitSignDI(LSignDI* ins) {
9591 FloatRegister input = ToFloatRegister(ins->input());
9592 FloatRegister temp = ToFloatRegister(ins->temp0());
9593 Register output = ToRegister(ins->output());
9595 Label bail;
9596 masm.signDoubleToInt32(input, output, temp, &bail);
9597 bailoutFrom(&bail, ins->snapshot());
9600 void CodeGenerator::visitMathFunctionD(LMathFunctionD* ins) {
9601 FloatRegister input = ToFloatRegister(ins->input());
9602 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
9604 UnaryMathFunction fun = ins->mir()->function();
9605 UnaryMathFunctionType funPtr = GetUnaryMathFunctionPtr(fun);
9607 masm.setupAlignedABICall();
9609 masm.passABIArg(input, ABIType::Float64);
9610 masm.callWithABI(DynamicFunction<UnaryMathFunctionType>(funPtr),
9611 ABIType::Float64);
9614 void CodeGenerator::visitMathFunctionF(LMathFunctionF* ins) {
9615 FloatRegister input = ToFloatRegister(ins->input());
9616 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnFloat32Reg);
9618 masm.setupAlignedABICall();
9619 masm.passABIArg(input, ABIType::Float32);
9621 using Fn = float (*)(float x);
9622 Fn funptr = nullptr;
9623 CheckUnsafeCallWithABI check = CheckUnsafeCallWithABI::Check;
9624 switch (ins->mir()->function()) {
9625 case UnaryMathFunction::Floor:
9626 funptr = floorf;
9627 check = CheckUnsafeCallWithABI::DontCheckOther;
9628 break;
9629 case UnaryMathFunction::Round:
9630 funptr = math_roundf_impl;
9631 break;
9632 case UnaryMathFunction::Trunc:
9633 funptr = math_truncf_impl;
9634 break;
9635 case UnaryMathFunction::Ceil:
9636 funptr = ceilf;
9637 check = CheckUnsafeCallWithABI::DontCheckOther;
9638 break;
9639 default:
9640 MOZ_CRASH("Unknown or unsupported float32 math function");
9643 masm.callWithABI(DynamicFunction<Fn>(funptr), ABIType::Float32, check);
9646 void CodeGenerator::visitModD(LModD* ins) {
9647 MOZ_ASSERT(!gen->compilingWasm());
9649 FloatRegister lhs = ToFloatRegister(ins->lhs());
9650 FloatRegister rhs = ToFloatRegister(ins->rhs());
9652 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
9654 using Fn = double (*)(double a, double b);
9655 masm.setupAlignedABICall();
9656 masm.passABIArg(lhs, ABIType::Float64);
9657 masm.passABIArg(rhs, ABIType::Float64);
9658 masm.callWithABI<Fn, NumberMod>(ABIType::Float64);
9661 void CodeGenerator::visitModPowTwoD(LModPowTwoD* ins) {
9662 FloatRegister lhs = ToFloatRegister(ins->lhs());
9663 uint32_t divisor = ins->divisor();
9664 MOZ_ASSERT(mozilla::IsPowerOfTwo(divisor));
9666 FloatRegister output = ToFloatRegister(ins->output());
9668 // Compute |n % d| using |copysign(n - (d * trunc(n / d)), n)|.
9670 // This doesn't work if |d| isn't a power of two, because we may lose too much
9671 // precision. For example |Number.MAX_VALUE % 3 == 2|, but
9672 // |3 * trunc(Number.MAX_VALUE / 3) == Infinity|.
9674 Label done;
9676 ScratchDoubleScope scratch(masm);
9678 // Subnormals can lead to performance degradation, which can make calling
9679 // |fmod| faster than this inline implementation. Work around this issue by
9680 // directly returning the input for any value in the interval ]-1, +1[.
9681 Label notSubnormal;
9682 masm.loadConstantDouble(1.0, scratch);
9683 masm.loadConstantDouble(-1.0, output);
9684 masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, lhs, scratch,
9685 &notSubnormal);
9686 masm.branchDouble(Assembler::DoubleLessThanOrEqual, lhs, output,
9687 &notSubnormal);
9689 masm.moveDouble(lhs, output);
9690 masm.jump(&done);
9692 masm.bind(&notSubnormal);
9694 if (divisor == 1) {
9695 // The pattern |n % 1 == 0| is used to detect integer numbers. We can skip
9696 // the multiplication by one in this case.
9697 masm.moveDouble(lhs, output);
9698 masm.nearbyIntDouble(RoundingMode::TowardsZero, output, scratch);
9699 masm.subDouble(scratch, output);
9700 } else {
9701 masm.loadConstantDouble(1.0 / double(divisor), scratch);
9702 masm.loadConstantDouble(double(divisor), output);
9704 masm.mulDouble(lhs, scratch);
9705 masm.nearbyIntDouble(RoundingMode::TowardsZero, scratch, scratch);
9706 masm.mulDouble(output, scratch);
9708 masm.moveDouble(lhs, output);
9709 masm.subDouble(scratch, output);
9713 masm.copySignDouble(output, lhs, output);
9714 masm.bind(&done);
9717 void CodeGenerator::visitWasmBuiltinModD(LWasmBuiltinModD* ins) {
9718 masm.Push(InstanceReg);
9719 int32_t framePushedAfterInstance = masm.framePushed();
9721 FloatRegister lhs = ToFloatRegister(ins->lhs());
9722 FloatRegister rhs = ToFloatRegister(ins->rhs());
9724 MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
9726 masm.setupWasmABICall();
9727 masm.passABIArg(lhs, ABIType::Float64);
9728 masm.passABIArg(rhs, ABIType::Float64);
9730 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
9731 masm.callWithABI(ins->mir()->bytecodeOffset(), wasm::SymbolicAddress::ModD,
9732 mozilla::Some(instanceOffset), ABIType::Float64);
9734 masm.Pop(InstanceReg);
9737 void CodeGenerator::visitBigIntAdd(LBigIntAdd* ins) {
9738 Register lhs = ToRegister(ins->lhs());
9739 Register rhs = ToRegister(ins->rhs());
9740 Register temp1 = ToRegister(ins->temp1());
9741 Register temp2 = ToRegister(ins->temp2());
9742 Register output = ToRegister(ins->output());
9744 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
9745 auto* ool = oolCallVM<Fn, BigInt::add>(ins, ArgList(lhs, rhs),
9746 StoreRegisterTo(output));
9748 // 0n + x == x
9749 Label lhsNonZero;
9750 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
9751 masm.movePtr(rhs, output);
9752 masm.jump(ool->rejoin());
9753 masm.bind(&lhsNonZero);
9755 // x + 0n == x
9756 Label rhsNonZero;
9757 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
9758 masm.movePtr(lhs, output);
9759 masm.jump(ool->rejoin());
9760 masm.bind(&rhsNonZero);
9762 // Call into the VM when either operand can't be loaded into a pointer-sized
9763 // register.
9764 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
9765 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
9767 masm.branchAddPtr(Assembler::Overflow, temp2, temp1, ool->entry());
9769 // Create and return the result.
9770 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
9771 masm.initializeBigInt(output, temp1);
9773 masm.bind(ool->rejoin());
9776 void CodeGenerator::visitBigIntSub(LBigIntSub* ins) {
9777 Register lhs = ToRegister(ins->lhs());
9778 Register rhs = ToRegister(ins->rhs());
9779 Register temp1 = ToRegister(ins->temp1());
9780 Register temp2 = ToRegister(ins->temp2());
9781 Register output = ToRegister(ins->output());
9783 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
9784 auto* ool = oolCallVM<Fn, BigInt::sub>(ins, ArgList(lhs, rhs),
9785 StoreRegisterTo(output));
9787 // x - 0n == x
9788 Label rhsNonZero;
9789 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
9790 masm.movePtr(lhs, output);
9791 masm.jump(ool->rejoin());
9792 masm.bind(&rhsNonZero);
9794 // Call into the VM when either operand can't be loaded into a pointer-sized
9795 // register.
9796 masm.loadBigInt(lhs, temp1, ool->entry());
9797 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
9799 masm.branchSubPtr(Assembler::Overflow, temp2, temp1, ool->entry());
9801 // Create and return the result.
9802 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
9803 masm.initializeBigInt(output, temp1);
9805 masm.bind(ool->rejoin());
9808 void CodeGenerator::visitBigIntMul(LBigIntMul* ins) {
9809 Register lhs = ToRegister(ins->lhs());
9810 Register rhs = ToRegister(ins->rhs());
9811 Register temp1 = ToRegister(ins->temp1());
9812 Register temp2 = ToRegister(ins->temp2());
9813 Register output = ToRegister(ins->output());
9815 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
9816 auto* ool = oolCallVM<Fn, BigInt::mul>(ins, ArgList(lhs, rhs),
9817 StoreRegisterTo(output));
9819 // 0n * x == 0n
9820 Label lhsNonZero;
9821 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
9822 masm.movePtr(lhs, output);
9823 masm.jump(ool->rejoin());
9824 masm.bind(&lhsNonZero);
9826 // x * 0n == 0n
9827 Label rhsNonZero;
9828 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
9829 masm.movePtr(rhs, output);
9830 masm.jump(ool->rejoin());
9831 masm.bind(&rhsNonZero);
9833 // Call into the VM when either operand can't be loaded into a pointer-sized
9834 // register.
9835 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
9836 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
9838 masm.branchMulPtr(Assembler::Overflow, temp2, temp1, ool->entry());
9840 // Create and return the result.
9841 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
9842 masm.initializeBigInt(output, temp1);
9844 masm.bind(ool->rejoin());
9847 void CodeGenerator::visitBigIntDiv(LBigIntDiv* ins) {
9848 Register lhs = ToRegister(ins->lhs());
9849 Register rhs = ToRegister(ins->rhs());
9850 Register temp1 = ToRegister(ins->temp1());
9851 Register temp2 = ToRegister(ins->temp2());
9852 Register output = ToRegister(ins->output());
9854 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
9855 auto* ool = oolCallVM<Fn, BigInt::div>(ins, ArgList(lhs, rhs),
9856 StoreRegisterTo(output));
9858 // x / 0 throws an error.
9859 if (ins->mir()->canBeDivideByZero()) {
9860 masm.branchIfBigIntIsZero(rhs, ool->entry());
9863 // 0n / x == 0n
9864 Label lhsNonZero;
9865 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
9866 masm.movePtr(lhs, output);
9867 masm.jump(ool->rejoin());
9868 masm.bind(&lhsNonZero);
9870 // Call into the VM when either operand can't be loaded into a pointer-sized
9871 // register.
9872 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
9873 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
9875 // |BigInt::div()| returns |lhs| for |lhs / 1n|, which means there's no
9876 // allocation which might trigger a minor GC to free up nursery space. This
9877 // requires us to apply the same optimization here, otherwise we'd end up with
9878 // always entering the OOL call, because the nursery is never evicted.
9879 Label notOne;
9880 masm.branchPtr(Assembler::NotEqual, temp2, ImmWord(1), &notOne);
9881 masm.movePtr(lhs, output);
9882 masm.jump(ool->rejoin());
9883 masm.bind(&notOne);
9885 static constexpr auto DigitMin = std::numeric_limits<
9886 mozilla::SignedStdintTypeForSize<sizeof(BigInt::Digit)>::Type>::min();
9888 // Handle an integer overflow from INT{32,64}_MIN / -1.
9889 Label notOverflow;
9890 masm.branchPtr(Assembler::NotEqual, temp1, ImmWord(DigitMin), &notOverflow);
9891 masm.branchPtr(Assembler::Equal, temp2, ImmWord(-1), ool->entry());
9892 masm.bind(&notOverflow);
9894 emitBigIntDiv(ins, temp1, temp2, output, ool->entry());
9896 masm.bind(ool->rejoin());
9899 void CodeGenerator::visitBigIntMod(LBigIntMod* ins) {
9900 Register lhs = ToRegister(ins->lhs());
9901 Register rhs = ToRegister(ins->rhs());
9902 Register temp1 = ToRegister(ins->temp1());
9903 Register temp2 = ToRegister(ins->temp2());
9904 Register output = ToRegister(ins->output());
9906 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
9907 auto* ool = oolCallVM<Fn, BigInt::mod>(ins, ArgList(lhs, rhs),
9908 StoreRegisterTo(output));
9910 // x % 0 throws an error.
9911 if (ins->mir()->canBeDivideByZero()) {
9912 masm.branchIfBigIntIsZero(rhs, ool->entry());
9915 // 0n % x == 0n
9916 Label lhsNonZero;
9917 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
9918 masm.movePtr(lhs, output);
9919 masm.jump(ool->rejoin());
9920 masm.bind(&lhsNonZero);
9922 // Call into the VM when either operand can't be loaded into a pointer-sized
9923 // register.
9924 masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
9925 masm.loadBigIntAbsolute(rhs, temp2, ool->entry());
9927 // Similar to the case for BigInt division, we must apply the same allocation
9928 // optimizations as performed in |BigInt::mod()|.
9929 Label notBelow;
9930 masm.branchPtr(Assembler::AboveOrEqual, temp1, temp2, &notBelow);
9931 masm.movePtr(lhs, output);
9932 masm.jump(ool->rejoin());
9933 masm.bind(&notBelow);
9935 // Convert both digits to signed pointer-sized values.
9936 masm.bigIntDigitToSignedPtr(lhs, temp1, ool->entry());
9937 masm.bigIntDigitToSignedPtr(rhs, temp2, ool->entry());
9939 static constexpr auto DigitMin = std::numeric_limits<
9940 mozilla::SignedStdintTypeForSize<sizeof(BigInt::Digit)>::Type>::min();
9942 // Handle an integer overflow from INT{32,64}_MIN / -1.
9943 Label notOverflow;
9944 masm.branchPtr(Assembler::NotEqual, temp1, ImmWord(DigitMin), &notOverflow);
9945 masm.branchPtr(Assembler::NotEqual, temp2, ImmWord(-1), &notOverflow);
9946 masm.movePtr(ImmWord(0), temp1);
9947 masm.bind(&notOverflow);
9949 emitBigIntMod(ins, temp1, temp2, output, ool->entry());
9951 masm.bind(ool->rejoin());
9954 void CodeGenerator::visitBigIntPow(LBigIntPow* ins) {
9955 Register lhs = ToRegister(ins->lhs());
9956 Register rhs = ToRegister(ins->rhs());
9957 Register temp1 = ToRegister(ins->temp1());
9958 Register temp2 = ToRegister(ins->temp2());
9959 Register output = ToRegister(ins->output());
9961 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
9962 auto* ool = oolCallVM<Fn, BigInt::pow>(ins, ArgList(lhs, rhs),
9963 StoreRegisterTo(output));
9965 // x ** -y throws an error.
9966 if (ins->mir()->canBeNegativeExponent()) {
9967 masm.branchIfBigIntIsNegative(rhs, ool->entry());
9970 Register dest = temp1;
9971 Register base = temp2;
9972 Register exponent = output;
9974 Label done;
9975 masm.movePtr(ImmWord(1), dest); // p = 1
9977 // 1n ** y == 1n
9978 // -1n ** y == 1n when y is even
9979 // -1n ** y == -1n when y is odd
9980 Label lhsNotOne;
9981 masm.branch32(Assembler::Above, Address(lhs, BigInt::offsetOfLength()),
9982 Imm32(1), &lhsNotOne);
9983 masm.loadFirstBigIntDigitOrZero(lhs, base);
9984 masm.branchPtr(Assembler::NotEqual, base, Imm32(1), &lhsNotOne);
9986 masm.loadFirstBigIntDigitOrZero(rhs, exponent);
9988 Label lhsNonNegative;
9989 masm.branchIfBigIntIsNonNegative(lhs, &lhsNonNegative);
9990 masm.branchTestPtr(Assembler::Zero, exponent, Imm32(1), &done);
9991 masm.bind(&lhsNonNegative);
9992 masm.movePtr(lhs, output);
9993 masm.jump(ool->rejoin());
9995 masm.bind(&lhsNotOne);
9997 // x ** 0n == 1n
9998 masm.branchIfBigIntIsZero(rhs, &done);
10000 // 0n ** y == 0n with y != 0n
10001 Label lhsNonZero;
10002 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10004 masm.movePtr(lhs, output);
10005 masm.jump(ool->rejoin());
10007 masm.bind(&lhsNonZero);
10009 // Call into the VM when the exponent can't be loaded into a pointer-sized
10010 // register.
10011 masm.loadBigIntAbsolute(rhs, exponent, ool->entry());
10013 // x ** y with x > 1 and y >= DigitBits can't be pointer-sized.
10014 masm.branchPtr(Assembler::AboveOrEqual, exponent, Imm32(BigInt::DigitBits),
10015 ool->entry());
10017 // x ** 1n == x
10018 Label rhsNotOne;
10019 masm.branch32(Assembler::NotEqual, exponent, Imm32(1), &rhsNotOne);
10021 masm.movePtr(lhs, output);
10022 masm.jump(ool->rejoin());
10024 masm.bind(&rhsNotOne);
10026 // Call into the VM when the base operand can't be loaded into a pointer-sized
10027 // register.
10028 masm.loadBigIntNonZero(lhs, base, ool->entry());
10030 // MacroAssembler::pow32() adjusted to work on pointer-sized registers.
10032 // m = base
10033 // n = exponent
10035 Label start, loop;
10036 masm.jump(&start);
10037 masm.bind(&loop);
10039 // m *= m
10040 masm.branchMulPtr(Assembler::Overflow, base, base, ool->entry());
10042 masm.bind(&start);
10044 // if ((n & 1) != 0) p *= m
10045 Label even;
10046 masm.branchTest32(Assembler::Zero, exponent, Imm32(1), &even);
10047 masm.branchMulPtr(Assembler::Overflow, base, dest, ool->entry());
10048 masm.bind(&even);
10050 // n >>= 1
10051 // if (n == 0) return p
10052 masm.branchRshift32(Assembler::NonZero, Imm32(1), exponent, &loop);
10055 MOZ_ASSERT(temp1 == dest);
10057 // Create and return the result.
10058 masm.bind(&done);
10059 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10060 masm.initializeBigInt(output, temp1);
10062 masm.bind(ool->rejoin());
10065 void CodeGenerator::visitBigIntBitAnd(LBigIntBitAnd* ins) {
10066 Register lhs = ToRegister(ins->lhs());
10067 Register rhs = ToRegister(ins->rhs());
10068 Register temp1 = ToRegister(ins->temp1());
10069 Register temp2 = ToRegister(ins->temp2());
10070 Register output = ToRegister(ins->output());
10072 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10073 auto* ool = oolCallVM<Fn, BigInt::bitAnd>(ins, ArgList(lhs, rhs),
10074 StoreRegisterTo(output));
10076 // 0n & x == 0n
10077 Label lhsNonZero;
10078 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10079 masm.movePtr(lhs, output);
10080 masm.jump(ool->rejoin());
10081 masm.bind(&lhsNonZero);
10083 // x & 0n == 0n
10084 Label rhsNonZero;
10085 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10086 masm.movePtr(rhs, output);
10087 masm.jump(ool->rejoin());
10088 masm.bind(&rhsNonZero);
10090 // Call into the VM when either operand can't be loaded into a pointer-sized
10091 // register.
10092 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10093 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10095 masm.andPtr(temp2, temp1);
10097 // Create and return the result.
10098 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10099 masm.initializeBigInt(output, temp1);
10101 masm.bind(ool->rejoin());
10104 void CodeGenerator::visitBigIntBitOr(LBigIntBitOr* ins) {
10105 Register lhs = ToRegister(ins->lhs());
10106 Register rhs = ToRegister(ins->rhs());
10107 Register temp1 = ToRegister(ins->temp1());
10108 Register temp2 = ToRegister(ins->temp2());
10109 Register output = ToRegister(ins->output());
10111 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10112 auto* ool = oolCallVM<Fn, BigInt::bitOr>(ins, ArgList(lhs, rhs),
10113 StoreRegisterTo(output));
10115 // 0n | x == x
10116 Label lhsNonZero;
10117 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10118 masm.movePtr(rhs, output);
10119 masm.jump(ool->rejoin());
10120 masm.bind(&lhsNonZero);
10122 // x | 0n == x
10123 Label rhsNonZero;
10124 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10125 masm.movePtr(lhs, output);
10126 masm.jump(ool->rejoin());
10127 masm.bind(&rhsNonZero);
10129 // Call into the VM when either operand can't be loaded into a pointer-sized
10130 // register.
10131 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10132 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10134 masm.orPtr(temp2, temp1);
10136 // Create and return the result.
10137 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10138 masm.initializeBigInt(output, temp1);
10140 masm.bind(ool->rejoin());
10143 void CodeGenerator::visitBigIntBitXor(LBigIntBitXor* ins) {
10144 Register lhs = ToRegister(ins->lhs());
10145 Register rhs = ToRegister(ins->rhs());
10146 Register temp1 = ToRegister(ins->temp1());
10147 Register temp2 = ToRegister(ins->temp2());
10148 Register output = ToRegister(ins->output());
10150 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10151 auto* ool = oolCallVM<Fn, BigInt::bitXor>(ins, ArgList(lhs, rhs),
10152 StoreRegisterTo(output));
10154 // 0n ^ x == x
10155 Label lhsNonZero;
10156 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10157 masm.movePtr(rhs, output);
10158 masm.jump(ool->rejoin());
10159 masm.bind(&lhsNonZero);
10161 // x ^ 0n == x
10162 Label rhsNonZero;
10163 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10164 masm.movePtr(lhs, output);
10165 masm.jump(ool->rejoin());
10166 masm.bind(&rhsNonZero);
10168 // Call into the VM when either operand can't be loaded into a pointer-sized
10169 // register.
10170 masm.loadBigIntNonZero(lhs, temp1, ool->entry());
10171 masm.loadBigIntNonZero(rhs, temp2, ool->entry());
10173 masm.xorPtr(temp2, temp1);
10175 // Create and return the result.
10176 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10177 masm.initializeBigInt(output, temp1);
10179 masm.bind(ool->rejoin());
10182 void CodeGenerator::visitBigIntLsh(LBigIntLsh* ins) {
10183 Register lhs = ToRegister(ins->lhs());
10184 Register rhs = ToRegister(ins->rhs());
10185 Register temp1 = ToRegister(ins->temp1());
10186 Register temp2 = ToRegister(ins->temp2());
10187 Register temp3 = ToRegister(ins->temp3());
10188 Register output = ToRegister(ins->output());
10190 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10191 auto* ool = oolCallVM<Fn, BigInt::lsh>(ins, ArgList(lhs, rhs),
10192 StoreRegisterTo(output));
10194 // 0n << x == 0n
10195 Label lhsNonZero;
10196 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10197 masm.movePtr(lhs, output);
10198 masm.jump(ool->rejoin());
10199 masm.bind(&lhsNonZero);
10201 // x << 0n == x
10202 Label rhsNonZero;
10203 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10204 masm.movePtr(lhs, output);
10205 masm.jump(ool->rejoin());
10206 masm.bind(&rhsNonZero);
10208 // Inline |BigInt::lsh| for the case when |lhs| contains a single digit.
10210 Label rhsTooLarge;
10211 masm.loadBigIntAbsolute(rhs, temp2, &rhsTooLarge);
10213 // Call into the VM when the left-hand side operand can't be loaded into a
10214 // pointer-sized register.
10215 masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
10217 // Handle shifts exceeding |BigInt::DigitBits| first.
10218 Label shift, create;
10219 masm.branchPtr(Assembler::Below, temp2, Imm32(BigInt::DigitBits), &shift);
10221 masm.bind(&rhsTooLarge);
10223 // x << DigitBits with x != 0n always exceeds pointer-sized storage.
10224 masm.branchIfBigIntIsNonNegative(rhs, ool->entry());
10226 // x << -DigitBits == x >> DigitBits, which is either 0n or -1n.
10227 masm.move32(Imm32(0), temp1);
10228 masm.branchIfBigIntIsNonNegative(lhs, &create);
10229 masm.move32(Imm32(1), temp1);
10230 masm.jump(&create);
10232 masm.bind(&shift);
10234 Label nonNegative;
10235 masm.branchIfBigIntIsNonNegative(rhs, &nonNegative);
10237 masm.movePtr(temp1, temp3);
10239 // |x << -y| is computed as |x >> y|.
10240 masm.rshiftPtr(temp2, temp1);
10242 // For negative numbers, round down if any bit was shifted out.
10243 masm.branchIfBigIntIsNonNegative(lhs, &create);
10245 // Compute |mask = (static_cast<Digit>(1) << shift) - 1|.
10246 masm.movePtr(ImmWord(-1), output);
10247 masm.lshiftPtr(temp2, output);
10248 masm.notPtr(output);
10250 // Add plus one when |(lhs.digit(0) & mask) != 0|.
10251 masm.branchTestPtr(Assembler::Zero, output, temp3, &create);
10252 masm.addPtr(ImmWord(1), temp1);
10253 masm.jump(&create);
10255 masm.bind(&nonNegative);
10257 masm.movePtr(temp2, temp3);
10259 // Compute |grow = lhs.digit(0) >> (DigitBits - shift)|.
10260 masm.negPtr(temp2);
10261 masm.addPtr(Imm32(BigInt::DigitBits), temp2);
10262 masm.movePtr(temp1, output);
10263 masm.rshiftPtr(temp2, output);
10265 // Call into the VM when any bit will be shifted out.
10266 masm.branchTestPtr(Assembler::NonZero, output, output, ool->entry());
10268 masm.movePtr(temp3, temp2);
10269 masm.lshiftPtr(temp2, temp1);
10271 masm.bind(&create);
10273 // Create and return the result.
10274 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10275 masm.initializeBigIntAbsolute(output, temp1);
10277 // Set the sign bit when the left-hand side is negative.
10278 masm.branchIfBigIntIsNonNegative(lhs, ool->rejoin());
10279 masm.or32(Imm32(BigInt::signBitMask()),
10280 Address(output, BigInt::offsetOfFlags()));
10282 masm.bind(ool->rejoin());
10285 void CodeGenerator::visitBigIntRsh(LBigIntRsh* ins) {
10286 Register lhs = ToRegister(ins->lhs());
10287 Register rhs = ToRegister(ins->rhs());
10288 Register temp1 = ToRegister(ins->temp1());
10289 Register temp2 = ToRegister(ins->temp2());
10290 Register temp3 = ToRegister(ins->temp3());
10291 Register output = ToRegister(ins->output());
10293 using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
10294 auto* ool = oolCallVM<Fn, BigInt::rsh>(ins, ArgList(lhs, rhs),
10295 StoreRegisterTo(output));
10297 // 0n >> x == 0n
10298 Label lhsNonZero;
10299 masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
10300 masm.movePtr(lhs, output);
10301 masm.jump(ool->rejoin());
10302 masm.bind(&lhsNonZero);
10304 // x >> 0n == x
10305 Label rhsNonZero;
10306 masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
10307 masm.movePtr(lhs, output);
10308 masm.jump(ool->rejoin());
10309 masm.bind(&rhsNonZero);
10311 // Inline |BigInt::rsh| for the case when |lhs| contains a single digit.
10313 Label rhsTooLarge;
10314 masm.loadBigIntAbsolute(rhs, temp2, &rhsTooLarge);
10316 // Call into the VM when the left-hand side operand can't be loaded into a
10317 // pointer-sized register.
10318 masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
10320 // Handle shifts exceeding |BigInt::DigitBits| first.
10321 Label shift, create;
10322 masm.branchPtr(Assembler::Below, temp2, Imm32(BigInt::DigitBits), &shift);
10324 masm.bind(&rhsTooLarge);
10326 // x >> -DigitBits == x << DigitBits, which exceeds pointer-sized storage.
10327 masm.branchIfBigIntIsNegative(rhs, ool->entry());
10329 // x >> DigitBits is either 0n or -1n.
10330 masm.move32(Imm32(0), temp1);
10331 masm.branchIfBigIntIsNonNegative(lhs, &create);
10332 masm.move32(Imm32(1), temp1);
10333 masm.jump(&create);
10335 masm.bind(&shift);
10337 Label nonNegative;
10338 masm.branchIfBigIntIsNonNegative(rhs, &nonNegative);
10340 masm.movePtr(temp2, temp3);
10342 // Compute |grow = lhs.digit(0) >> (DigitBits - shift)|.
10343 masm.negPtr(temp2);
10344 masm.addPtr(Imm32(BigInt::DigitBits), temp2);
10345 masm.movePtr(temp1, output);
10346 masm.rshiftPtr(temp2, output);
10348 // Call into the VM when any bit will be shifted out.
10349 masm.branchTestPtr(Assembler::NonZero, output, output, ool->entry());
10351 // |x >> -y| is computed as |x << y|.
10352 masm.movePtr(temp3, temp2);
10353 masm.lshiftPtr(temp2, temp1);
10354 masm.jump(&create);
10356 masm.bind(&nonNegative);
10358 masm.movePtr(temp1, temp3);
10360 masm.rshiftPtr(temp2, temp1);
10362 // For negative numbers, round down if any bit was shifted out.
10363 masm.branchIfBigIntIsNonNegative(lhs, &create);
10365 // Compute |mask = (static_cast<Digit>(1) << shift) - 1|.
10366 masm.movePtr(ImmWord(-1), output);
10367 masm.lshiftPtr(temp2, output);
10368 masm.notPtr(output);
10370 // Add plus one when |(lhs.digit(0) & mask) != 0|.
10371 masm.branchTestPtr(Assembler::Zero, output, temp3, &create);
10372 masm.addPtr(ImmWord(1), temp1);
10374 masm.bind(&create);
10376 // Create and return the result.
10377 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10378 masm.initializeBigIntAbsolute(output, temp1);
10380 // Set the sign bit when the left-hand side is negative.
10381 masm.branchIfBigIntIsNonNegative(lhs, ool->rejoin());
10382 masm.or32(Imm32(BigInt::signBitMask()),
10383 Address(output, BigInt::offsetOfFlags()));
10385 masm.bind(ool->rejoin());
10388 void CodeGenerator::visitBigIntIncrement(LBigIntIncrement* ins) {
10389 Register input = ToRegister(ins->input());
10390 Register temp1 = ToRegister(ins->temp1());
10391 Register temp2 = ToRegister(ins->temp2());
10392 Register output = ToRegister(ins->output());
10394 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
10395 auto* ool =
10396 oolCallVM<Fn, BigInt::inc>(ins, ArgList(input), StoreRegisterTo(output));
10398 // Call into the VM when the input can't be loaded into a pointer-sized
10399 // register.
10400 masm.loadBigInt(input, temp1, ool->entry());
10401 masm.movePtr(ImmWord(1), temp2);
10403 masm.branchAddPtr(Assembler::Overflow, temp2, temp1, ool->entry());
10405 // Create and return the result.
10406 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10407 masm.initializeBigInt(output, temp1);
10409 masm.bind(ool->rejoin());
10412 void CodeGenerator::visitBigIntDecrement(LBigIntDecrement* ins) {
10413 Register input = ToRegister(ins->input());
10414 Register temp1 = ToRegister(ins->temp1());
10415 Register temp2 = ToRegister(ins->temp2());
10416 Register output = ToRegister(ins->output());
10418 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
10419 auto* ool =
10420 oolCallVM<Fn, BigInt::dec>(ins, ArgList(input), StoreRegisterTo(output));
10422 // Call into the VM when the input can't be loaded into a pointer-sized
10423 // register.
10424 masm.loadBigInt(input, temp1, ool->entry());
10425 masm.movePtr(ImmWord(1), temp2);
10427 masm.branchSubPtr(Assembler::Overflow, temp2, temp1, ool->entry());
10429 // Create and return the result.
10430 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10431 masm.initializeBigInt(output, temp1);
10433 masm.bind(ool->rejoin());
10436 void CodeGenerator::visitBigIntNegate(LBigIntNegate* ins) {
10437 Register input = ToRegister(ins->input());
10438 Register temp = ToRegister(ins->temp());
10439 Register output = ToRegister(ins->output());
10441 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
10442 auto* ool =
10443 oolCallVM<Fn, BigInt::neg>(ins, ArgList(input), StoreRegisterTo(output));
10445 // -0n == 0n
10446 Label lhsNonZero;
10447 masm.branchIfBigIntIsNonZero(input, &lhsNonZero);
10448 masm.movePtr(input, output);
10449 masm.jump(ool->rejoin());
10450 masm.bind(&lhsNonZero);
10452 // Call into the VM when the input uses heap digits.
10453 masm.copyBigIntWithInlineDigits(input, output, temp, initialBigIntHeap(),
10454 ool->entry());
10456 // Flip the sign bit.
10457 masm.xor32(Imm32(BigInt::signBitMask()),
10458 Address(output, BigInt::offsetOfFlags()));
10460 masm.bind(ool->rejoin());
10463 void CodeGenerator::visitBigIntBitNot(LBigIntBitNot* ins) {
10464 Register input = ToRegister(ins->input());
10465 Register temp1 = ToRegister(ins->temp1());
10466 Register temp2 = ToRegister(ins->temp2());
10467 Register output = ToRegister(ins->output());
10469 using Fn = BigInt* (*)(JSContext*, HandleBigInt);
10470 auto* ool = oolCallVM<Fn, BigInt::bitNot>(ins, ArgList(input),
10471 StoreRegisterTo(output));
10473 masm.loadBigIntAbsolute(input, temp1, ool->entry());
10475 // This follows the C++ implementation because it let's us support the full
10476 // range [-2^64, 2^64 - 1] on 64-bit resp. [-2^32, 2^32 - 1] on 32-bit.
10477 Label nonNegative, done;
10478 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
10480 // ~(-x) == ~(~(x-1)) == x-1
10481 masm.subPtr(Imm32(1), temp1);
10482 masm.jump(&done);
10484 masm.bind(&nonNegative);
10486 // ~x == -x-1 == -(x+1)
10487 masm.movePtr(ImmWord(1), temp2);
10488 masm.branchAddPtr(Assembler::CarrySet, temp2, temp1, ool->entry());
10490 masm.bind(&done);
10492 // Create and return the result.
10493 masm.newGCBigInt(output, temp2, initialBigIntHeap(), ool->entry());
10494 masm.initializeBigIntAbsolute(output, temp1);
10496 // Set the sign bit when the input is positive.
10497 masm.branchIfBigIntIsNegative(input, ool->rejoin());
10498 masm.or32(Imm32(BigInt::signBitMask()),
10499 Address(output, BigInt::offsetOfFlags()));
10501 masm.bind(ool->rejoin());
10504 void CodeGenerator::visitInt32ToStringWithBase(LInt32ToStringWithBase* lir) {
10505 Register input = ToRegister(lir->input());
10506 RegisterOrInt32 base = ToRegisterOrInt32(lir->base());
10507 Register output = ToRegister(lir->output());
10508 Register temp0 = ToRegister(lir->temp0());
10509 Register temp1 = ToRegister(lir->temp1());
10511 bool lowerCase = lir->mir()->lowerCase();
10513 using Fn = JSString* (*)(JSContext*, int32_t, int32_t, bool);
10514 if (base.is<Register>()) {
10515 auto* ool = oolCallVM<Fn, js::Int32ToStringWithBase>(
10516 lir, ArgList(input, base.as<Register>(), Imm32(lowerCase)),
10517 StoreRegisterTo(output));
10519 LiveRegisterSet liveRegs = liveVolatileRegs(lir);
10520 masm.loadInt32ToStringWithBase(input, base.as<Register>(), output, temp0,
10521 temp1, gen->runtime->staticStrings(),
10522 liveRegs, lowerCase, ool->entry());
10523 masm.bind(ool->rejoin());
10524 } else {
10525 auto* ool = oolCallVM<Fn, js::Int32ToStringWithBase>(
10526 lir, ArgList(input, Imm32(base.as<int32_t>()), Imm32(lowerCase)),
10527 StoreRegisterTo(output));
10529 masm.loadInt32ToStringWithBase(input, base.as<int32_t>(), output, temp0,
10530 temp1, gen->runtime->staticStrings(),
10531 lowerCase, ool->entry());
10532 masm.bind(ool->rejoin());
10536 void CodeGenerator::visitNumberParseInt(LNumberParseInt* lir) {
10537 Register string = ToRegister(lir->string());
10538 Register radix = ToRegister(lir->radix());
10539 ValueOperand output = ToOutValue(lir);
10540 Register temp = ToRegister(lir->temp0());
10542 #ifdef DEBUG
10543 Label ok;
10544 masm.branch32(Assembler::Equal, radix, Imm32(0), &ok);
10545 masm.branch32(Assembler::Equal, radix, Imm32(10), &ok);
10546 masm.assumeUnreachable("radix must be 0 or 10 for indexed value fast path");
10547 masm.bind(&ok);
10548 #endif
10550 // Use indexed value as fast path if possible.
10551 Label vmCall, done;
10552 masm.loadStringIndexValue(string, temp, &vmCall);
10553 masm.tagValue(JSVAL_TYPE_INT32, temp, output);
10554 masm.jump(&done);
10556 masm.bind(&vmCall);
10558 pushArg(radix);
10559 pushArg(string);
10561 using Fn = bool (*)(JSContext*, HandleString, int32_t, MutableHandleValue);
10562 callVM<Fn, js::NumberParseInt>(lir);
10564 masm.bind(&done);
10567 void CodeGenerator::visitDoubleParseInt(LDoubleParseInt* lir) {
10568 FloatRegister number = ToFloatRegister(lir->number());
10569 Register output = ToRegister(lir->output());
10570 FloatRegister temp = ToFloatRegister(lir->temp0());
10572 Label bail;
10573 masm.branchDouble(Assembler::DoubleUnordered, number, number, &bail);
10574 masm.branchTruncateDoubleToInt32(number, output, &bail);
10576 Label ok;
10577 masm.branch32(Assembler::NotEqual, output, Imm32(0), &ok);
10579 // Accept both +0 and -0 and return 0.
10580 masm.loadConstantDouble(0.0, temp);
10581 masm.branchDouble(Assembler::DoubleEqual, number, temp, &ok);
10583 // Fail if a non-zero input is in the exclusive range (-1, 1.0e-6).
10584 masm.loadConstantDouble(DOUBLE_DECIMAL_IN_SHORTEST_LOW, temp);
10585 masm.branchDouble(Assembler::DoubleLessThan, number, temp, &bail);
10587 masm.bind(&ok);
10589 bailoutFrom(&bail, lir->snapshot());
10592 void CodeGenerator::visitFloor(LFloor* lir) {
10593 FloatRegister input = ToFloatRegister(lir->input());
10594 Register output = ToRegister(lir->output());
10596 Label bail;
10597 masm.floorDoubleToInt32(input, output, &bail);
10598 bailoutFrom(&bail, lir->snapshot());
10601 void CodeGenerator::visitFloorF(LFloorF* lir) {
10602 FloatRegister input = ToFloatRegister(lir->input());
10603 Register output = ToRegister(lir->output());
10605 Label bail;
10606 masm.floorFloat32ToInt32(input, output, &bail);
10607 bailoutFrom(&bail, lir->snapshot());
10610 void CodeGenerator::visitCeil(LCeil* lir) {
10611 FloatRegister input = ToFloatRegister(lir->input());
10612 Register output = ToRegister(lir->output());
10614 Label bail;
10615 masm.ceilDoubleToInt32(input, output, &bail);
10616 bailoutFrom(&bail, lir->snapshot());
10619 void CodeGenerator::visitCeilF(LCeilF* lir) {
10620 FloatRegister input = ToFloatRegister(lir->input());
10621 Register output = ToRegister(lir->output());
10623 Label bail;
10624 masm.ceilFloat32ToInt32(input, output, &bail);
10625 bailoutFrom(&bail, lir->snapshot());
10628 void CodeGenerator::visitRound(LRound* lir) {
10629 FloatRegister input = ToFloatRegister(lir->input());
10630 FloatRegister temp = ToFloatRegister(lir->temp0());
10631 Register output = ToRegister(lir->output());
10633 Label bail;
10634 masm.roundDoubleToInt32(input, output, temp, &bail);
10635 bailoutFrom(&bail, lir->snapshot());
10638 void CodeGenerator::visitRoundF(LRoundF* lir) {
10639 FloatRegister input = ToFloatRegister(lir->input());
10640 FloatRegister temp = ToFloatRegister(lir->temp0());
10641 Register output = ToRegister(lir->output());
10643 Label bail;
10644 masm.roundFloat32ToInt32(input, output, temp, &bail);
10645 bailoutFrom(&bail, lir->snapshot());
10648 void CodeGenerator::visitTrunc(LTrunc* lir) {
10649 FloatRegister input = ToFloatRegister(lir->input());
10650 Register output = ToRegister(lir->output());
10652 Label bail;
10653 masm.truncDoubleToInt32(input, output, &bail);
10654 bailoutFrom(&bail, lir->snapshot());
10657 void CodeGenerator::visitTruncF(LTruncF* lir) {
10658 FloatRegister input = ToFloatRegister(lir->input());
10659 Register output = ToRegister(lir->output());
10661 Label bail;
10662 masm.truncFloat32ToInt32(input, output, &bail);
10663 bailoutFrom(&bail, lir->snapshot());
10666 void CodeGenerator::visitCompareS(LCompareS* lir) {
10667 JSOp op = lir->mir()->jsop();
10668 Register left = ToRegister(lir->left());
10669 Register right = ToRegister(lir->right());
10670 Register output = ToRegister(lir->output());
10672 OutOfLineCode* ool = nullptr;
10674 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
10675 if (op == JSOp::Eq || op == JSOp::StrictEq) {
10676 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::Equal>>(
10677 lir, ArgList(left, right), StoreRegisterTo(output));
10678 } else if (op == JSOp::Ne || op == JSOp::StrictNe) {
10679 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::NotEqual>>(
10680 lir, ArgList(left, right), StoreRegisterTo(output));
10681 } else if (op == JSOp::Lt) {
10682 ool = oolCallVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(
10683 lir, ArgList(left, right), StoreRegisterTo(output));
10684 } else if (op == JSOp::Le) {
10685 // Push the operands in reverse order for JSOp::Le:
10686 // - |left <= right| is implemented as |right >= left|.
10687 ool =
10688 oolCallVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(
10689 lir, ArgList(right, left), StoreRegisterTo(output));
10690 } else if (op == JSOp::Gt) {
10691 // Push the operands in reverse order for JSOp::Gt:
10692 // - |left > right| is implemented as |right < left|.
10693 ool = oolCallVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(
10694 lir, ArgList(right, left), StoreRegisterTo(output));
10695 } else {
10696 MOZ_ASSERT(op == JSOp::Ge);
10697 ool =
10698 oolCallVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(
10699 lir, ArgList(left, right), StoreRegisterTo(output));
10702 masm.compareStrings(op, left, right, output, ool->entry());
10704 masm.bind(ool->rejoin());
10707 void CodeGenerator::visitCompareSInline(LCompareSInline* lir) {
10708 JSOp op = lir->mir()->jsop();
10709 MOZ_ASSERT(IsEqualityOp(op));
10711 Register input = ToRegister(lir->input());
10712 Register output = ToRegister(lir->output());
10714 const JSLinearString* str = lir->constant();
10715 MOZ_ASSERT(str->length() > 0);
10717 OutOfLineCode* ool = nullptr;
10719 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
10720 if (op == JSOp::Eq || op == JSOp::StrictEq) {
10721 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::Equal>>(
10722 lir, ArgList(ImmGCPtr(str), input), StoreRegisterTo(output));
10723 } else {
10724 MOZ_ASSERT(op == JSOp::Ne || op == JSOp::StrictNe);
10725 ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::NotEqual>>(
10726 lir, ArgList(ImmGCPtr(str), input), StoreRegisterTo(output));
10729 Label compareChars;
10731 Label notPointerEqual;
10733 // If operands point to the same instance, the strings are trivially equal.
10734 masm.branchPtr(Assembler::NotEqual, input, ImmGCPtr(str), &notPointerEqual);
10735 masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq), output);
10736 masm.jump(ool->rejoin());
10738 masm.bind(&notPointerEqual);
10740 Label setNotEqualResult;
10741 if (str->isAtom()) {
10742 // Atoms cannot be equal to each other if they point to different strings.
10743 Imm32 atomBit(JSString::ATOM_BIT);
10744 masm.branchTest32(Assembler::NonZero,
10745 Address(input, JSString::offsetOfFlags()), atomBit,
10746 &setNotEqualResult);
10749 if (str->hasTwoByteChars()) {
10750 // Pure two-byte strings can't be equal to Latin-1 strings.
10751 JS::AutoCheckCannotGC nogc;
10752 if (!mozilla::IsUtf16Latin1(str->twoByteRange(nogc))) {
10753 masm.branchLatin1String(input, &setNotEqualResult);
10757 // Strings of different length can never be equal.
10758 masm.branch32(Assembler::Equal, Address(input, JSString::offsetOfLength()),
10759 Imm32(str->length()), &compareChars);
10761 masm.bind(&setNotEqualResult);
10762 masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
10763 masm.jump(ool->rejoin());
10766 masm.bind(&compareChars);
10768 // Load the input string's characters.
10769 Register stringChars = output;
10770 masm.loadStringCharsForCompare(input, str, stringChars, ool->entry());
10772 // Start comparing character by character.
10773 masm.compareStringChars(op, stringChars, str, output);
10775 masm.bind(ool->rejoin());
10778 void CodeGenerator::visitCompareSSingle(LCompareSSingle* lir) {
10779 JSOp op = lir->jsop();
10780 MOZ_ASSERT(IsRelationalOp(op));
10782 Register input = ToRegister(lir->input());
10783 Register output = ToRegister(lir->output());
10784 Register temp = ToRegister(lir->temp0());
10786 const JSLinearString* str = lir->constant();
10787 MOZ_ASSERT(str->length() == 1);
10789 char16_t ch = str->latin1OrTwoByteChar(0);
10791 masm.movePtr(input, temp);
10793 // Check if the string is empty.
10794 Label compareLength;
10795 masm.branch32(Assembler::Equal, Address(temp, JSString::offsetOfLength()),
10796 Imm32(0), &compareLength);
10798 // The first character is in the left-most rope child.
10799 Label notRope;
10800 masm.branchIfNotRope(temp, &notRope);
10802 // Unwind ropes at the start if possible.
10803 Label unwindRope;
10804 masm.bind(&unwindRope);
10805 masm.loadRopeLeftChild(temp, output);
10806 masm.movePtr(output, temp);
10808 #ifdef DEBUG
10809 Label notEmpty;
10810 masm.branch32(Assembler::NotEqual,
10811 Address(temp, JSString::offsetOfLength()), Imm32(0),
10812 &notEmpty);
10813 masm.assumeUnreachable("rope children are non-empty");
10814 masm.bind(&notEmpty);
10815 #endif
10817 // Otherwise keep unwinding ropes.
10818 masm.branchIfRope(temp, &unwindRope);
10820 masm.bind(&notRope);
10822 // Load the first character into |output|.
10823 auto loadFirstChar = [&](auto encoding) {
10824 masm.loadStringChars(temp, output, encoding);
10825 masm.loadChar(Address(output, 0), output, encoding);
10828 Label done;
10829 if (ch <= JSString::MAX_LATIN1_CHAR) {
10830 // Handle both encodings when the search character is Latin-1.
10831 Label twoByte, compare;
10832 masm.branchTwoByteString(temp, &twoByte);
10834 loadFirstChar(CharEncoding::Latin1);
10835 masm.jump(&compare);
10837 masm.bind(&twoByte);
10838 loadFirstChar(CharEncoding::TwoByte);
10840 masm.bind(&compare);
10841 } else {
10842 // The search character is a two-byte character, so it can't be equal to any
10843 // character of a Latin-1 string.
10844 masm.move32(Imm32(int32_t(op == JSOp::Lt || op == JSOp::Le)), output);
10845 masm.branchLatin1String(temp, &done);
10847 loadFirstChar(CharEncoding::TwoByte);
10850 // Compare the string length when the search character is equal to the
10851 // input's first character.
10852 masm.branch32(Assembler::Equal, output, Imm32(ch), &compareLength);
10854 // Otherwise compute the result and jump to the end.
10855 masm.cmp32Set(JSOpToCondition(op, /* isSigned = */ false), output, Imm32(ch),
10856 output);
10857 masm.jump(&done);
10859 // Compare the string length to compute the overall result.
10860 masm.bind(&compareLength);
10861 masm.cmp32Set(JSOpToCondition(op, /* isSigned = */ false),
10862 Address(temp, JSString::offsetOfLength()), Imm32(1), output);
10864 masm.bind(&done);
10867 void CodeGenerator::visitCompareBigInt(LCompareBigInt* lir) {
10868 JSOp op = lir->mir()->jsop();
10869 Register left = ToRegister(lir->left());
10870 Register right = ToRegister(lir->right());
10871 Register temp0 = ToRegister(lir->temp0());
10872 Register temp1 = ToRegister(lir->temp1());
10873 Register temp2 = ToRegister(lir->temp2());
10874 Register output = ToRegister(lir->output());
10876 Label notSame;
10877 Label compareSign;
10878 Label compareLength;
10879 Label compareDigit;
10881 Label* notSameSign;
10882 Label* notSameLength;
10883 Label* notSameDigit;
10884 if (IsEqualityOp(op)) {
10885 notSameSign = &notSame;
10886 notSameLength = &notSame;
10887 notSameDigit = &notSame;
10888 } else {
10889 notSameSign = &compareSign;
10890 notSameLength = &compareLength;
10891 notSameDigit = &compareDigit;
10894 masm.equalBigInts(left, right, temp0, temp1, temp2, output, notSameSign,
10895 notSameLength, notSameDigit);
10897 Label done;
10898 masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq || op == JSOp::Le ||
10899 op == JSOp::Ge),
10900 output);
10901 masm.jump(&done);
10903 if (IsEqualityOp(op)) {
10904 masm.bind(&notSame);
10905 masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
10906 } else {
10907 Label invertWhenNegative;
10909 // There are two cases when sign(left) != sign(right):
10910 // 1. sign(left) = positive and sign(right) = negative,
10911 // 2. or the dual case with reversed signs.
10913 // For case 1, |left| <cmp> |right| is true for cmp=Gt or cmp=Ge and false
10914 // for cmp=Lt or cmp=Le. Initialize the result for case 1 and handle case 2
10915 // with |invertWhenNegative|.
10916 masm.bind(&compareSign);
10917 masm.move32(Imm32(op == JSOp::Gt || op == JSOp::Ge), output);
10918 masm.jump(&invertWhenNegative);
10920 // For sign(left) = sign(right) and len(digits(left)) != len(digits(right)),
10921 // we have to consider the two cases:
10922 // 1. len(digits(left)) < len(digits(right))
10923 // 2. len(digits(left)) > len(digits(right))
10925 // For |left| <cmp> |right| with cmp=Lt:
10926 // Assume both BigInts are positive, then |left < right| is true for case 1
10927 // and false for case 2. When both are negative, the result is reversed.
10929 // The other comparison operators can be handled similarly.
10931 // |temp0| holds the digits length of the right-hand side operand.
10932 masm.bind(&compareLength);
10933 masm.cmp32Set(JSOpToCondition(op, /* isSigned = */ false),
10934 Address(left, BigInt::offsetOfLength()), temp0, output);
10935 masm.jump(&invertWhenNegative);
10937 // Similar to the case above, compare the current digit to determine the
10938 // overall comparison result.
10940 // |temp1| points to the current digit of the left-hand side operand.
10941 // |output| holds the current digit of the right-hand side operand.
10942 masm.bind(&compareDigit);
10943 masm.cmpPtrSet(JSOpToCondition(op, /* isSigned = */ false),
10944 Address(temp1, 0), output, output);
10946 Label nonNegative;
10947 masm.bind(&invertWhenNegative);
10948 masm.branchIfBigIntIsNonNegative(left, &nonNegative);
10949 masm.xor32(Imm32(1), output);
10950 masm.bind(&nonNegative);
10953 masm.bind(&done);
10956 void CodeGenerator::visitCompareBigIntInt32(LCompareBigIntInt32* lir) {
10957 JSOp op = lir->mir()->jsop();
10958 Register left = ToRegister(lir->left());
10959 Register right = ToRegister(lir->right());
10960 Register temp0 = ToRegister(lir->temp0());
10961 Register temp1 = ToRegister(lir->temp1());
10962 Register output = ToRegister(lir->output());
10964 Label ifTrue, ifFalse;
10965 masm.compareBigIntAndInt32(op, left, right, temp0, temp1, &ifTrue, &ifFalse);
10967 Label done;
10968 masm.bind(&ifFalse);
10969 masm.move32(Imm32(0), output);
10970 masm.jump(&done);
10971 masm.bind(&ifTrue);
10972 masm.move32(Imm32(1), output);
10973 masm.bind(&done);
10976 void CodeGenerator::visitCompareBigIntDouble(LCompareBigIntDouble* lir) {
10977 JSOp op = lir->mir()->jsop();
10978 Register left = ToRegister(lir->left());
10979 FloatRegister right = ToFloatRegister(lir->right());
10980 Register output = ToRegister(lir->output());
10982 masm.setupAlignedABICall();
10984 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
10985 // - |left <= right| is implemented as |right >= left|.
10986 // - |left > right| is implemented as |right < left|.
10987 if (op == JSOp::Le || op == JSOp::Gt) {
10988 masm.passABIArg(right, ABIType::Float64);
10989 masm.passABIArg(left);
10990 } else {
10991 masm.passABIArg(left);
10992 masm.passABIArg(right, ABIType::Float64);
10995 using FnBigIntNumber = bool (*)(BigInt*, double);
10996 using FnNumberBigInt = bool (*)(double, BigInt*);
10997 switch (op) {
10998 case JSOp::Eq: {
10999 masm.callWithABI<FnBigIntNumber,
11000 jit::BigIntNumberEqual<EqualityKind::Equal>>();
11001 break;
11003 case JSOp::Ne: {
11004 masm.callWithABI<FnBigIntNumber,
11005 jit::BigIntNumberEqual<EqualityKind::NotEqual>>();
11006 break;
11008 case JSOp::Lt: {
11009 masm.callWithABI<FnBigIntNumber,
11010 jit::BigIntNumberCompare<ComparisonKind::LessThan>>();
11011 break;
11013 case JSOp::Gt: {
11014 masm.callWithABI<FnNumberBigInt,
11015 jit::NumberBigIntCompare<ComparisonKind::LessThan>>();
11016 break;
11018 case JSOp::Le: {
11019 masm.callWithABI<
11020 FnNumberBigInt,
11021 jit::NumberBigIntCompare<ComparisonKind::GreaterThanOrEqual>>();
11022 break;
11024 case JSOp::Ge: {
11025 masm.callWithABI<
11026 FnBigIntNumber,
11027 jit::BigIntNumberCompare<ComparisonKind::GreaterThanOrEqual>>();
11028 break;
11030 default:
11031 MOZ_CRASH("unhandled op");
11034 masm.storeCallBoolResult(output);
11037 void CodeGenerator::visitCompareBigIntString(LCompareBigIntString* lir) {
11038 JSOp op = lir->mir()->jsop();
11039 Register left = ToRegister(lir->left());
11040 Register right = ToRegister(lir->right());
11042 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
11043 // - |left <= right| is implemented as |right >= left|.
11044 // - |left > right| is implemented as |right < left|.
11045 if (op == JSOp::Le || op == JSOp::Gt) {
11046 pushArg(left);
11047 pushArg(right);
11048 } else {
11049 pushArg(right);
11050 pushArg(left);
11053 using FnBigIntString =
11054 bool (*)(JSContext*, HandleBigInt, HandleString, bool*);
11055 using FnStringBigInt =
11056 bool (*)(JSContext*, HandleString, HandleBigInt, bool*);
11058 switch (op) {
11059 case JSOp::Eq: {
11060 constexpr auto Equal = EqualityKind::Equal;
11061 callVM<FnBigIntString, BigIntStringEqual<Equal>>(lir);
11062 break;
11064 case JSOp::Ne: {
11065 constexpr auto NotEqual = EqualityKind::NotEqual;
11066 callVM<FnBigIntString, BigIntStringEqual<NotEqual>>(lir);
11067 break;
11069 case JSOp::Lt: {
11070 constexpr auto LessThan = ComparisonKind::LessThan;
11071 callVM<FnBigIntString, BigIntStringCompare<LessThan>>(lir);
11072 break;
11074 case JSOp::Gt: {
11075 constexpr auto LessThan = ComparisonKind::LessThan;
11076 callVM<FnStringBigInt, StringBigIntCompare<LessThan>>(lir);
11077 break;
11079 case JSOp::Le: {
11080 constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
11081 callVM<FnStringBigInt, StringBigIntCompare<GreaterThanOrEqual>>(lir);
11082 break;
11084 case JSOp::Ge: {
11085 constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
11086 callVM<FnBigIntString, BigIntStringCompare<GreaterThanOrEqual>>(lir);
11087 break;
11089 default:
11090 MOZ_CRASH("Unexpected compare op");
11094 void CodeGenerator::visitIsNullOrLikeUndefinedV(LIsNullOrLikeUndefinedV* lir) {
11095 MOZ_ASSERT(lir->mir()->compareType() == MCompare::Compare_Undefined ||
11096 lir->mir()->compareType() == MCompare::Compare_Null);
11098 JSOp op = lir->mir()->jsop();
11099 MOZ_ASSERT(IsLooseEqualityOp(op));
11101 const ValueOperand value = ToValue(lir, LIsNullOrLikeUndefinedV::ValueIndex);
11102 Register output = ToRegister(lir->output());
11104 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
11105 addOutOfLineCode(ool, lir->mir());
11107 Label* nullOrLikeUndefined = ool->label1();
11108 Label* notNullOrLikeUndefined = ool->label2();
11111 ScratchTagScope tag(masm, value);
11112 masm.splitTagForTest(value, tag);
11114 masm.branchTestNull(Assembler::Equal, tag, nullOrLikeUndefined);
11115 masm.branchTestUndefined(Assembler::Equal, tag, nullOrLikeUndefined);
11117 // Check whether it's a truthy object or a falsy object that emulates
11118 // undefined.
11119 masm.branchTestObject(Assembler::NotEqual, tag, notNullOrLikeUndefined);
11122 Register objreg =
11123 masm.extractObject(value, ToTempUnboxRegister(lir->temp0()));
11124 branchTestObjectEmulatesUndefined(objreg, nullOrLikeUndefined,
11125 notNullOrLikeUndefined, output, ool);
11126 // fall through
11128 Label done;
11130 // It's not null or undefined, and if it's an object it doesn't
11131 // emulate undefined, so it's not like undefined.
11132 masm.move32(Imm32(op == JSOp::Ne), output);
11133 masm.jump(&done);
11135 masm.bind(nullOrLikeUndefined);
11136 masm.move32(Imm32(op == JSOp::Eq), output);
11138 // Both branches meet here.
11139 masm.bind(&done);
11142 void CodeGenerator::visitIsNullOrLikeUndefinedAndBranchV(
11143 LIsNullOrLikeUndefinedAndBranchV* lir) {
11144 MOZ_ASSERT(lir->cmpMir()->compareType() == MCompare::Compare_Undefined ||
11145 lir->cmpMir()->compareType() == MCompare::Compare_Null);
11147 JSOp op = lir->cmpMir()->jsop();
11148 MOZ_ASSERT(IsLooseEqualityOp(op));
11150 const ValueOperand value =
11151 ToValue(lir, LIsNullOrLikeUndefinedAndBranchV::Value);
11153 MBasicBlock* ifTrue = lir->ifTrue();
11154 MBasicBlock* ifFalse = lir->ifFalse();
11156 if (op == JSOp::Ne) {
11157 // Swap branches.
11158 std::swap(ifTrue, ifFalse);
11161 auto* ool = new (alloc()) OutOfLineTestObject();
11162 addOutOfLineCode(ool, lir->cmpMir());
11164 Label* ifTrueLabel = getJumpLabelForBranch(ifTrue);
11165 Label* ifFalseLabel = getJumpLabelForBranch(ifFalse);
11168 ScratchTagScope tag(masm, value);
11169 masm.splitTagForTest(value, tag);
11171 masm.branchTestNull(Assembler::Equal, tag, ifTrueLabel);
11172 masm.branchTestUndefined(Assembler::Equal, tag, ifTrueLabel);
11174 masm.branchTestObject(Assembler::NotEqual, tag, ifFalseLabel);
11177 // Objects that emulate undefined are loosely equal to null/undefined.
11178 Register objreg =
11179 masm.extractObject(value, ToTempUnboxRegister(lir->tempToUnbox()));
11180 Register scratch = ToRegister(lir->temp());
11181 testObjectEmulatesUndefined(objreg, ifTrueLabel, ifFalseLabel, scratch, ool);
11184 void CodeGenerator::visitIsNullOrLikeUndefinedT(LIsNullOrLikeUndefinedT* lir) {
11185 MOZ_ASSERT(lir->mir()->compareType() == MCompare::Compare_Undefined ||
11186 lir->mir()->compareType() == MCompare::Compare_Null);
11187 MOZ_ASSERT(lir->mir()->lhs()->type() == MIRType::Object);
11189 JSOp op = lir->mir()->jsop();
11190 MOZ_ASSERT(IsLooseEqualityOp(op), "Strict equality should have been folded");
11192 Register objreg = ToRegister(lir->input());
11193 Register output = ToRegister(lir->output());
11195 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
11196 addOutOfLineCode(ool, lir->mir());
11198 Label* emulatesUndefined = ool->label1();
11199 Label* doesntEmulateUndefined = ool->label2();
11201 branchTestObjectEmulatesUndefined(objreg, emulatesUndefined,
11202 doesntEmulateUndefined, output, ool);
11204 Label done;
11206 masm.move32(Imm32(op == JSOp::Ne), output);
11207 masm.jump(&done);
11209 masm.bind(emulatesUndefined);
11210 masm.move32(Imm32(op == JSOp::Eq), output);
11211 masm.bind(&done);
11214 void CodeGenerator::visitIsNullOrLikeUndefinedAndBranchT(
11215 LIsNullOrLikeUndefinedAndBranchT* lir) {
11216 MOZ_ASSERT(lir->cmpMir()->compareType() == MCompare::Compare_Undefined ||
11217 lir->cmpMir()->compareType() == MCompare::Compare_Null);
11218 MOZ_ASSERT(lir->cmpMir()->lhs()->type() == MIRType::Object);
11220 JSOp op = lir->cmpMir()->jsop();
11221 MOZ_ASSERT(IsLooseEqualityOp(op), "Strict equality should have been folded");
11223 MBasicBlock* ifTrue = lir->ifTrue();
11224 MBasicBlock* ifFalse = lir->ifFalse();
11226 if (op == JSOp::Ne) {
11227 // Swap branches.
11228 std::swap(ifTrue, ifFalse);
11231 Register input = ToRegister(lir->getOperand(0));
11233 auto* ool = new (alloc()) OutOfLineTestObject();
11234 addOutOfLineCode(ool, lir->cmpMir());
11236 Label* ifTrueLabel = getJumpLabelForBranch(ifTrue);
11237 Label* ifFalseLabel = getJumpLabelForBranch(ifFalse);
11239 // Objects that emulate undefined are loosely equal to null/undefined.
11240 Register scratch = ToRegister(lir->temp());
11241 testObjectEmulatesUndefined(input, ifTrueLabel, ifFalseLabel, scratch, ool);
11244 void CodeGenerator::visitIsNull(LIsNull* lir) {
11245 MCompare::CompareType compareType = lir->mir()->compareType();
11246 MOZ_ASSERT(compareType == MCompare::Compare_Null);
11248 JSOp op = lir->mir()->jsop();
11249 MOZ_ASSERT(IsStrictEqualityOp(op));
11251 const ValueOperand value = ToValue(lir, LIsNull::ValueIndex);
11252 Register output = ToRegister(lir->output());
11254 Assembler::Condition cond = JSOpToCondition(compareType, op);
11255 masm.testNullSet(cond, value, output);
11258 void CodeGenerator::visitIsUndefined(LIsUndefined* lir) {
11259 MCompare::CompareType compareType = lir->mir()->compareType();
11260 MOZ_ASSERT(compareType == MCompare::Compare_Undefined);
11262 JSOp op = lir->mir()->jsop();
11263 MOZ_ASSERT(IsStrictEqualityOp(op));
11265 const ValueOperand value = ToValue(lir, LIsUndefined::ValueIndex);
11266 Register output = ToRegister(lir->output());
11268 Assembler::Condition cond = JSOpToCondition(compareType, op);
11269 masm.testUndefinedSet(cond, value, output);
11272 void CodeGenerator::visitIsNullAndBranch(LIsNullAndBranch* lir) {
11273 MCompare::CompareType compareType = lir->cmpMir()->compareType();
11274 MOZ_ASSERT(compareType == MCompare::Compare_Null);
11276 JSOp op = lir->cmpMir()->jsop();
11277 MOZ_ASSERT(IsStrictEqualityOp(op));
11279 const ValueOperand value = ToValue(lir, LIsNullAndBranch::Value);
11281 Assembler::Condition cond = JSOpToCondition(compareType, op);
11282 testNullEmitBranch(cond, value, lir->ifTrue(), lir->ifFalse());
11285 void CodeGenerator::visitIsUndefinedAndBranch(LIsUndefinedAndBranch* lir) {
11286 MCompare::CompareType compareType = lir->cmpMir()->compareType();
11287 MOZ_ASSERT(compareType == MCompare::Compare_Undefined);
11289 JSOp op = lir->cmpMir()->jsop();
11290 MOZ_ASSERT(IsStrictEqualityOp(op));
11292 const ValueOperand value = ToValue(lir, LIsUndefinedAndBranch::Value);
11294 Assembler::Condition cond = JSOpToCondition(compareType, op);
11295 testUndefinedEmitBranch(cond, value, lir->ifTrue(), lir->ifFalse());
11298 void CodeGenerator::visitSameValueDouble(LSameValueDouble* lir) {
11299 FloatRegister left = ToFloatRegister(lir->left());
11300 FloatRegister right = ToFloatRegister(lir->right());
11301 FloatRegister temp = ToFloatRegister(lir->temp0());
11302 Register output = ToRegister(lir->output());
11304 masm.sameValueDouble(left, right, temp, output);
11307 void CodeGenerator::visitSameValue(LSameValue* lir) {
11308 ValueOperand lhs = ToValue(lir, LSameValue::LhsIndex);
11309 ValueOperand rhs = ToValue(lir, LSameValue::RhsIndex);
11310 Register output = ToRegister(lir->output());
11312 using Fn = bool (*)(JSContext*, HandleValue, HandleValue, bool*);
11313 OutOfLineCode* ool =
11314 oolCallVM<Fn, SameValue>(lir, ArgList(lhs, rhs), StoreRegisterTo(output));
11316 // First check to see if the values have identical bits.
11317 // This is correct for SameValue because SameValue(NaN,NaN) is true,
11318 // and SameValue(0,-0) is false.
11319 masm.branch64(Assembler::NotEqual, lhs.toRegister64(), rhs.toRegister64(),
11320 ool->entry());
11321 masm.move32(Imm32(1), output);
11323 // If this fails, call SameValue.
11324 masm.bind(ool->rejoin());
11327 void CodeGenerator::emitConcat(LInstruction* lir, Register lhs, Register rhs,
11328 Register output) {
11329 using Fn =
11330 JSString* (*)(JSContext*, HandleString, HandleString, js::gc::Heap);
11331 OutOfLineCode* ool = oolCallVM<Fn, ConcatStrings<CanGC>>(
11332 lir, ArgList(lhs, rhs, static_cast<Imm32>(int32_t(gc::Heap::Default))),
11333 StoreRegisterTo(output));
11335 const JitZone* jitZone = gen->realm->zone()->jitZone();
11336 JitCode* stringConcatStub =
11337 jitZone->stringConcatStubNoBarrier(&zoneStubsToReadBarrier_);
11338 masm.call(stringConcatStub);
11339 masm.branchTestPtr(Assembler::Zero, output, output, ool->entry());
11341 masm.bind(ool->rejoin());
11344 void CodeGenerator::visitConcat(LConcat* lir) {
11345 Register lhs = ToRegister(lir->lhs());
11346 Register rhs = ToRegister(lir->rhs());
11348 Register output = ToRegister(lir->output());
11350 MOZ_ASSERT(lhs == CallTempReg0);
11351 MOZ_ASSERT(rhs == CallTempReg1);
11352 MOZ_ASSERT(ToRegister(lir->temp0()) == CallTempReg0);
11353 MOZ_ASSERT(ToRegister(lir->temp1()) == CallTempReg1);
11354 MOZ_ASSERT(ToRegister(lir->temp2()) == CallTempReg2);
11355 MOZ_ASSERT(ToRegister(lir->temp3()) == CallTempReg3);
11356 MOZ_ASSERT(ToRegister(lir->temp4()) == CallTempReg4);
11357 MOZ_ASSERT(output == CallTempReg5);
11359 emitConcat(lir, lhs, rhs, output);
11362 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
11363 Register len, Register byteOpScratch,
11364 CharEncoding fromEncoding, CharEncoding toEncoding,
11365 size_t maximumLength = SIZE_MAX) {
11366 // Copy |len| char16_t code units from |from| to |to|. Assumes len > 0
11367 // (checked below in debug builds), and when done |to| must point to the
11368 // next available char.
11370 #ifdef DEBUG
11371 Label ok;
11372 masm.branch32(Assembler::GreaterThan, len, Imm32(0), &ok);
11373 masm.assumeUnreachable("Length should be greater than 0.");
11374 masm.bind(&ok);
11376 if (maximumLength != SIZE_MAX) {
11377 MOZ_ASSERT(maximumLength <= INT32_MAX, "maximum length fits into int32");
11379 Label ok;
11380 masm.branchPtr(Assembler::BelowOrEqual, len, Imm32(maximumLength), &ok);
11381 masm.assumeUnreachable("Length should not exceed maximum length.");
11382 masm.bind(&ok);
11384 #endif
11386 MOZ_ASSERT_IF(toEncoding == CharEncoding::Latin1,
11387 fromEncoding == CharEncoding::Latin1);
11389 size_t fromWidth =
11390 fromEncoding == CharEncoding::Latin1 ? sizeof(char) : sizeof(char16_t);
11391 size_t toWidth =
11392 toEncoding == CharEncoding::Latin1 ? sizeof(char) : sizeof(char16_t);
11394 // Try to copy multiple characters at once when both encoding are equal.
11395 if (fromEncoding == toEncoding) {
11396 constexpr size_t ptrWidth = sizeof(uintptr_t);
11398 // Copy |width| bytes and then adjust |from| and |to|.
11399 auto copyCharacters = [&](size_t width) {
11400 static_assert(ptrWidth <= 8, "switch handles only up to eight bytes");
11402 switch (width) {
11403 case 1:
11404 masm.load8ZeroExtend(Address(from, 0), byteOpScratch);
11405 masm.store8(byteOpScratch, Address(to, 0));
11406 break;
11407 case 2:
11408 masm.load16ZeroExtend(Address(from, 0), byteOpScratch);
11409 masm.store16(byteOpScratch, Address(to, 0));
11410 break;
11411 case 4:
11412 masm.load32(Address(from, 0), byteOpScratch);
11413 masm.store32(byteOpScratch, Address(to, 0));
11414 break;
11415 case 8:
11416 MOZ_ASSERT(width == ptrWidth);
11417 masm.loadPtr(Address(from, 0), byteOpScratch);
11418 masm.storePtr(byteOpScratch, Address(to, 0));
11419 break;
11422 masm.addPtr(Imm32(width), from);
11423 masm.addPtr(Imm32(width), to);
11426 // First align |len| to pointer width.
11427 Label done;
11428 for (size_t width = fromWidth; width < ptrWidth; width *= 2) {
11429 // Number of characters which fit into |width| bytes.
11430 size_t charsPerWidth = width / fromWidth;
11432 if (charsPerWidth < maximumLength) {
11433 Label next;
11434 masm.branchTest32(Assembler::Zero, len, Imm32(charsPerWidth), &next);
11436 copyCharacters(width);
11438 masm.branchSub32(Assembler::Zero, Imm32(charsPerWidth), len, &done);
11439 masm.bind(&next);
11440 } else if (charsPerWidth == maximumLength) {
11441 copyCharacters(width);
11442 masm.sub32(Imm32(charsPerWidth), len);
11446 size_t maxInlineLength;
11447 if (fromEncoding == CharEncoding::Latin1) {
11448 maxInlineLength = JSFatInlineString::MAX_LENGTH_LATIN1;
11449 } else {
11450 maxInlineLength = JSFatInlineString::MAX_LENGTH_TWO_BYTE;
11453 // Number of characters which fit into a single register.
11454 size_t charsPerPtr = ptrWidth / fromWidth;
11456 // Unroll small loops.
11457 constexpr size_t unrollLoopLimit = 3;
11458 size_t loopCount = std::min(maxInlineLength, maximumLength) / charsPerPtr;
11460 #ifdef JS_64BIT
11461 static constexpr size_t latin1MaxInlineByteLength =
11462 JSFatInlineString::MAX_LENGTH_LATIN1 * sizeof(char);
11463 static constexpr size_t twoByteMaxInlineByteLength =
11464 JSFatInlineString::MAX_LENGTH_TWO_BYTE * sizeof(char16_t);
11466 // |unrollLoopLimit| should be large enough to allow loop unrolling on
11467 // 64-bit targets.
11468 static_assert(latin1MaxInlineByteLength / ptrWidth == unrollLoopLimit,
11469 "Latin-1 loops are unrolled on 64-bit");
11470 static_assert(twoByteMaxInlineByteLength / ptrWidth == unrollLoopLimit,
11471 "Two-byte loops are unrolled on 64-bit");
11472 #endif
11474 if (loopCount <= unrollLoopLimit) {
11475 Label labels[unrollLoopLimit];
11477 // Check up front how many characters can be copied.
11478 for (size_t i = 1; i < loopCount; i++) {
11479 masm.branch32(Assembler::Below, len, Imm32((i + 1) * charsPerPtr),
11480 &labels[i]);
11483 // Generate the unrolled loop body.
11484 for (size_t i = loopCount; i > 0; i--) {
11485 copyCharacters(ptrWidth);
11486 masm.sub32(Imm32(charsPerPtr), len);
11488 // Jump target for the previous length check.
11489 if (i != 1) {
11490 masm.bind(&labels[i - 1]);
11493 } else {
11494 Label start;
11495 masm.bind(&start);
11496 copyCharacters(ptrWidth);
11497 masm.branchSub32(Assembler::NonZero, Imm32(charsPerPtr), len, &start);
11500 masm.bind(&done);
11501 } else {
11502 Label start;
11503 masm.bind(&start);
11504 masm.loadChar(Address(from, 0), byteOpScratch, fromEncoding);
11505 masm.storeChar(byteOpScratch, Address(to, 0), toEncoding);
11506 masm.addPtr(Imm32(fromWidth), from);
11507 masm.addPtr(Imm32(toWidth), to);
11508 masm.branchSub32(Assembler::NonZero, Imm32(1), len, &start);
11512 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
11513 Register len, Register byteOpScratch,
11514 CharEncoding encoding, size_t maximumLength) {
11515 CopyStringChars(masm, to, from, len, byteOpScratch, encoding, encoding,
11516 maximumLength);
11519 static void CopyStringCharsMaybeInflate(MacroAssembler& masm, Register input,
11520 Register destChars, Register temp1,
11521 Register temp2) {
11522 // destChars is TwoByte and input is a Latin1 or TwoByte string, so we may
11523 // have to inflate.
11525 Label isLatin1, done;
11526 masm.loadStringLength(input, temp1);
11527 masm.branchLatin1String(input, &isLatin1);
11529 masm.loadStringChars(input, temp2, CharEncoding::TwoByte);
11530 masm.movePtr(temp2, input);
11531 CopyStringChars(masm, destChars, input, temp1, temp2,
11532 CharEncoding::TwoByte);
11533 masm.jump(&done);
11535 masm.bind(&isLatin1);
11537 masm.loadStringChars(input, temp2, CharEncoding::Latin1);
11538 masm.movePtr(temp2, input);
11539 CopyStringChars(masm, destChars, input, temp1, temp2, CharEncoding::Latin1,
11540 CharEncoding::TwoByte);
11542 masm.bind(&done);
11545 static void AllocateThinOrFatInlineString(MacroAssembler& masm, Register output,
11546 Register length, Register temp,
11547 gc::Heap initialStringHeap,
11548 Label* failure,
11549 CharEncoding encoding) {
11550 #ifdef DEBUG
11551 size_t maxInlineLength;
11552 if (encoding == CharEncoding::Latin1) {
11553 maxInlineLength = JSFatInlineString::MAX_LENGTH_LATIN1;
11554 } else {
11555 maxInlineLength = JSFatInlineString::MAX_LENGTH_TWO_BYTE;
11558 Label ok;
11559 masm.branch32(Assembler::BelowOrEqual, length, Imm32(maxInlineLength), &ok);
11560 masm.assumeUnreachable("string length too large to be allocated as inline");
11561 masm.bind(&ok);
11562 #endif
11564 size_t maxThinInlineLength;
11565 if (encoding == CharEncoding::Latin1) {
11566 maxThinInlineLength = JSThinInlineString::MAX_LENGTH_LATIN1;
11567 } else {
11568 maxThinInlineLength = JSThinInlineString::MAX_LENGTH_TWO_BYTE;
11571 Label isFat, allocDone;
11572 masm.branch32(Assembler::Above, length, Imm32(maxThinInlineLength), &isFat);
11574 uint32_t flags = JSString::INIT_THIN_INLINE_FLAGS;
11575 if (encoding == CharEncoding::Latin1) {
11576 flags |= JSString::LATIN1_CHARS_BIT;
11578 masm.newGCString(output, temp, initialStringHeap, failure);
11579 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
11580 masm.jump(&allocDone);
11582 masm.bind(&isFat);
11584 uint32_t flags = JSString::INIT_FAT_INLINE_FLAGS;
11585 if (encoding == CharEncoding::Latin1) {
11586 flags |= JSString::LATIN1_CHARS_BIT;
11588 masm.newGCFatInlineString(output, temp, initialStringHeap, failure);
11589 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
11591 masm.bind(&allocDone);
11593 // Store length.
11594 masm.store32(length, Address(output, JSString::offsetOfLength()));
11597 static void ConcatInlineString(MacroAssembler& masm, Register lhs, Register rhs,
11598 Register output, Register temp1, Register temp2,
11599 Register temp3, gc::Heap initialStringHeap,
11600 Label* failure, CharEncoding encoding) {
11601 JitSpew(JitSpew_Codegen, "# Emitting ConcatInlineString (encoding=%s)",
11602 (encoding == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
11604 // State: result length in temp2.
11606 // Ensure both strings are linear.
11607 masm.branchIfRope(lhs, failure);
11608 masm.branchIfRope(rhs, failure);
11610 // Allocate a JSThinInlineString or JSFatInlineString.
11611 AllocateThinOrFatInlineString(masm, output, temp2, temp1, initialStringHeap,
11612 failure, encoding);
11614 // Load chars pointer in temp2.
11615 masm.loadInlineStringCharsForStore(output, temp2);
11617 auto copyChars = [&](Register src) {
11618 if (encoding == CharEncoding::TwoByte) {
11619 CopyStringCharsMaybeInflate(masm, src, temp2, temp1, temp3);
11620 } else {
11621 masm.loadStringLength(src, temp3);
11622 masm.loadStringChars(src, temp1, CharEncoding::Latin1);
11623 masm.movePtr(temp1, src);
11624 CopyStringChars(masm, temp2, src, temp3, temp1, CharEncoding::Latin1);
11628 // Copy lhs chars. Note that this advances temp2 to point to the next
11629 // char. This also clobbers the lhs register.
11630 copyChars(lhs);
11632 // Copy rhs chars. Clobbers the rhs register.
11633 copyChars(rhs);
11636 void CodeGenerator::visitSubstr(LSubstr* lir) {
11637 Register string = ToRegister(lir->string());
11638 Register begin = ToRegister(lir->begin());
11639 Register length = ToRegister(lir->length());
11640 Register output = ToRegister(lir->output());
11641 Register temp0 = ToRegister(lir->temp0());
11642 Register temp2 = ToRegister(lir->temp2());
11644 // On x86 there are not enough registers. In that case reuse the string
11645 // register as temporary.
11646 Register temp1 =
11647 lir->temp1()->isBogusTemp() ? string : ToRegister(lir->temp1());
11649 size_t maximumLength = SIZE_MAX;
11651 Range* range = lir->mir()->length()->range();
11652 if (range && range->hasInt32UpperBound()) {
11653 MOZ_ASSERT(range->upper() >= 0);
11654 maximumLength = size_t(range->upper());
11657 static_assert(JSThinInlineString::MAX_LENGTH_TWO_BYTE <=
11658 JSThinInlineString::MAX_LENGTH_LATIN1);
11660 static_assert(JSFatInlineString::MAX_LENGTH_TWO_BYTE <=
11661 JSFatInlineString::MAX_LENGTH_LATIN1);
11663 bool tryFatInlineOrDependent =
11664 maximumLength > JSThinInlineString::MAX_LENGTH_TWO_BYTE;
11665 bool tryDependent = maximumLength > JSFatInlineString::MAX_LENGTH_TWO_BYTE;
11667 #ifdef DEBUG
11668 if (maximumLength != SIZE_MAX) {
11669 Label ok;
11670 masm.branch32(Assembler::BelowOrEqual, length, Imm32(maximumLength), &ok);
11671 masm.assumeUnreachable("length should not exceed maximum length");
11672 masm.bind(&ok);
11674 #endif
11676 Label nonZero, nonInput;
11678 // For every edge case use the C++ variant.
11679 // Note: we also use this upon allocation failure in newGCString and
11680 // newGCFatInlineString. To squeeze out even more performance those failures
11681 // can be handled by allocate in ool code and returning to jit code to fill
11682 // in all data.
11683 using Fn = JSString* (*)(JSContext* cx, HandleString str, int32_t begin,
11684 int32_t len);
11685 OutOfLineCode* ool = oolCallVM<Fn, SubstringKernel>(
11686 lir, ArgList(string, begin, length), StoreRegisterTo(output));
11687 Label* slowPath = ool->entry();
11688 Label* done = ool->rejoin();
11690 // Zero length, return emptystring.
11691 masm.branchTest32(Assembler::NonZero, length, length, &nonZero);
11692 const JSAtomState& names = gen->runtime->names();
11693 masm.movePtr(ImmGCPtr(names.empty_), output);
11694 masm.jump(done);
11696 // Substring from 0..|str.length|, return str.
11697 masm.bind(&nonZero);
11698 masm.branch32(Assembler::NotEqual,
11699 Address(string, JSString::offsetOfLength()), length, &nonInput);
11700 #ifdef DEBUG
11702 Label ok;
11703 masm.branchTest32(Assembler::Zero, begin, begin, &ok);
11704 masm.assumeUnreachable("length == str.length implies begin == 0");
11705 masm.bind(&ok);
11707 #endif
11708 masm.movePtr(string, output);
11709 masm.jump(done);
11711 // Use slow path for ropes.
11712 masm.bind(&nonInput);
11713 masm.branchIfRope(string, slowPath);
11715 // Optimize one and two character strings.
11716 Label nonStatic;
11717 masm.branch32(Assembler::Above, length, Imm32(2), &nonStatic);
11719 Label loadLengthOne, loadLengthTwo;
11721 auto loadChars = [&](CharEncoding encoding, bool fallthru) {
11722 size_t size = encoding == CharEncoding::Latin1 ? sizeof(JS::Latin1Char)
11723 : sizeof(char16_t);
11725 masm.loadStringChars(string, temp0, encoding);
11726 masm.loadChar(temp0, begin, temp2, encoding);
11727 masm.branch32(Assembler::Equal, length, Imm32(1), &loadLengthOne);
11728 masm.loadChar(temp0, begin, temp0, encoding, int32_t(size));
11729 if (!fallthru) {
11730 masm.jump(&loadLengthTwo);
11734 Label isLatin1;
11735 masm.branchLatin1String(string, &isLatin1);
11736 loadChars(CharEncoding::TwoByte, /* fallthru = */ false);
11738 masm.bind(&isLatin1);
11739 loadChars(CharEncoding::Latin1, /* fallthru = */ true);
11741 // Try to load a length-two static string.
11742 masm.bind(&loadLengthTwo);
11743 masm.lookupStaticString(temp2, temp0, output, gen->runtime->staticStrings(),
11744 &nonStatic);
11745 masm.jump(done);
11747 // Try to load a length-one static string.
11748 masm.bind(&loadLengthOne);
11749 masm.lookupStaticString(temp2, output, gen->runtime->staticStrings(),
11750 &nonStatic);
11751 masm.jump(done);
11753 masm.bind(&nonStatic);
11755 // Allocate either a JSThinInlineString or JSFatInlineString, or jump to
11756 // notInline if we need a dependent string.
11757 Label notInline;
11759 static_assert(JSThinInlineString::MAX_LENGTH_LATIN1 <
11760 JSFatInlineString::MAX_LENGTH_LATIN1);
11761 static_assert(JSThinInlineString::MAX_LENGTH_TWO_BYTE <
11762 JSFatInlineString::MAX_LENGTH_TWO_BYTE);
11764 // Use temp2 to store the JS(Thin|Fat)InlineString flags. This avoids having
11765 // duplicate newGCString/newGCFatInlineString codegen for Latin1 vs TwoByte
11766 // strings.
11768 Label allocFat, allocDone;
11769 if (tryFatInlineOrDependent) {
11770 Label isLatin1, allocThin;
11771 masm.branchLatin1String(string, &isLatin1);
11773 if (tryDependent) {
11774 masm.branch32(Assembler::Above, length,
11775 Imm32(JSFatInlineString::MAX_LENGTH_TWO_BYTE),
11776 &notInline);
11778 masm.move32(Imm32(0), temp2);
11779 masm.branch32(Assembler::Above, length,
11780 Imm32(JSThinInlineString::MAX_LENGTH_TWO_BYTE),
11781 &allocFat);
11782 masm.jump(&allocThin);
11785 masm.bind(&isLatin1);
11787 if (tryDependent) {
11788 masm.branch32(Assembler::Above, length,
11789 Imm32(JSFatInlineString::MAX_LENGTH_LATIN1),
11790 &notInline);
11792 masm.move32(Imm32(JSString::LATIN1_CHARS_BIT), temp2);
11793 masm.branch32(Assembler::Above, length,
11794 Imm32(JSThinInlineString::MAX_LENGTH_LATIN1), &allocFat);
11797 masm.bind(&allocThin);
11798 } else {
11799 masm.load32(Address(string, JSString::offsetOfFlags()), temp2);
11800 masm.and32(Imm32(JSString::LATIN1_CHARS_BIT), temp2);
11804 masm.newGCString(output, temp0, initialStringHeap(), slowPath);
11805 masm.or32(Imm32(JSString::INIT_THIN_INLINE_FLAGS), temp2);
11808 if (tryFatInlineOrDependent) {
11809 masm.jump(&allocDone);
11811 masm.bind(&allocFat);
11813 masm.newGCFatInlineString(output, temp0, initialStringHeap(), slowPath);
11814 masm.or32(Imm32(JSString::INIT_FAT_INLINE_FLAGS), temp2);
11817 masm.bind(&allocDone);
11820 masm.store32(temp2, Address(output, JSString::offsetOfFlags()));
11821 masm.store32(length, Address(output, JSString::offsetOfLength()));
11823 auto initializeInlineString = [&](CharEncoding encoding) {
11824 masm.loadStringChars(string, temp0, encoding);
11825 masm.addToCharPtr(temp0, begin, encoding);
11826 if (temp1 == string) {
11827 masm.push(string);
11829 masm.loadInlineStringCharsForStore(output, temp1);
11830 CopyStringChars(masm, temp1, temp0, length, temp2, encoding,
11831 maximumLength);
11832 masm.loadStringLength(output, length);
11833 if (temp1 == string) {
11834 masm.pop(string);
11838 Label isInlineLatin1;
11839 masm.branchTest32(Assembler::NonZero, temp2,
11840 Imm32(JSString::LATIN1_CHARS_BIT), &isInlineLatin1);
11841 initializeInlineString(CharEncoding::TwoByte);
11842 masm.jump(done);
11844 masm.bind(&isInlineLatin1);
11845 initializeInlineString(CharEncoding::Latin1);
11848 // Handle other cases with a DependentString.
11849 if (tryDependent) {
11850 masm.jump(done);
11852 masm.bind(&notInline);
11853 masm.newGCString(output, temp0, gen->initialStringHeap(), slowPath);
11854 masm.store32(length, Address(output, JSString::offsetOfLength()));
11855 masm.storeDependentStringBase(string, output);
11857 auto initializeDependentString = [&](CharEncoding encoding) {
11858 uint32_t flags = JSString::INIT_DEPENDENT_FLAGS;
11859 if (encoding == CharEncoding::Latin1) {
11860 flags |= JSString::LATIN1_CHARS_BIT;
11863 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
11864 masm.loadNonInlineStringChars(string, temp0, encoding);
11865 masm.addToCharPtr(temp0, begin, encoding);
11866 masm.storeNonInlineStringChars(temp0, output);
11869 Label isLatin1;
11870 masm.branchLatin1String(string, &isLatin1);
11871 initializeDependentString(CharEncoding::TwoByte);
11872 masm.jump(done);
11874 masm.bind(&isLatin1);
11875 initializeDependentString(CharEncoding::Latin1);
11878 masm.bind(done);
11881 JitCode* JitZone::generateStringConcatStub(JSContext* cx) {
11882 JitSpew(JitSpew_Codegen, "# Emitting StringConcat stub");
11884 TempAllocator temp(&cx->tempLifoAlloc());
11885 JitContext jcx(cx);
11886 StackMacroAssembler masm(cx, temp);
11887 AutoCreatedBy acb(masm, "JitZone::generateStringConcatStub");
11889 Register lhs = CallTempReg0;
11890 Register rhs = CallTempReg1;
11891 Register temp1 = CallTempReg2;
11892 Register temp2 = CallTempReg3;
11893 Register temp3 = CallTempReg4;
11894 Register output = CallTempReg5;
11896 Label failure;
11897 #ifdef JS_USE_LINK_REGISTER
11898 masm.pushReturnAddress();
11899 #endif
11900 masm.Push(FramePointer);
11901 masm.moveStackPtrTo(FramePointer);
11903 // If lhs is empty, return rhs.
11904 Label leftEmpty;
11905 masm.loadStringLength(lhs, temp1);
11906 masm.branchTest32(Assembler::Zero, temp1, temp1, &leftEmpty);
11908 // If rhs is empty, return lhs.
11909 Label rightEmpty;
11910 masm.loadStringLength(rhs, temp2);
11911 masm.branchTest32(Assembler::Zero, temp2, temp2, &rightEmpty);
11913 masm.add32(temp1, temp2);
11915 // Check if we can use a JSInlineString. The result is a Latin1 string if
11916 // lhs and rhs are both Latin1, so we AND the flags.
11917 Label isInlineTwoByte, isInlineLatin1;
11918 masm.load32(Address(lhs, JSString::offsetOfFlags()), temp1);
11919 masm.and32(Address(rhs, JSString::offsetOfFlags()), temp1);
11921 Label isLatin1, notInline;
11922 masm.branchTest32(Assembler::NonZero, temp1,
11923 Imm32(JSString::LATIN1_CHARS_BIT), &isLatin1);
11925 masm.branch32(Assembler::BelowOrEqual, temp2,
11926 Imm32(JSFatInlineString::MAX_LENGTH_TWO_BYTE),
11927 &isInlineTwoByte);
11928 masm.jump(&notInline);
11930 masm.bind(&isLatin1);
11932 masm.branch32(Assembler::BelowOrEqual, temp2,
11933 Imm32(JSFatInlineString::MAX_LENGTH_LATIN1), &isInlineLatin1);
11935 masm.bind(&notInline);
11937 // Keep AND'ed flags in temp1.
11939 // Ensure result length <= JSString::MAX_LENGTH.
11940 masm.branch32(Assembler::Above, temp2, Imm32(JSString::MAX_LENGTH), &failure);
11942 // Allocate a new rope, guaranteed to be in the nursery if initialStringHeap
11943 // == gc::Heap::Default. (As a result, no post barriers are needed below.)
11944 masm.newGCString(output, temp3, initialStringHeap, &failure);
11946 // Store rope length and flags. temp1 still holds the result of AND'ing the
11947 // lhs and rhs flags, so we just have to clear the other flags to get our rope
11948 // flags (Latin1 if both lhs and rhs are Latin1).
11949 static_assert(JSString::INIT_ROPE_FLAGS == 0,
11950 "Rope type flags must have no bits set");
11951 masm.and32(Imm32(JSString::LATIN1_CHARS_BIT), temp1);
11952 masm.store32(temp1, Address(output, JSString::offsetOfFlags()));
11953 masm.store32(temp2, Address(output, JSString::offsetOfLength()));
11955 // Store left and right nodes.
11956 masm.storeRopeChildren(lhs, rhs, output);
11957 masm.pop(FramePointer);
11958 masm.ret();
11960 masm.bind(&leftEmpty);
11961 masm.mov(rhs, output);
11962 masm.pop(FramePointer);
11963 masm.ret();
11965 masm.bind(&rightEmpty);
11966 masm.mov(lhs, output);
11967 masm.pop(FramePointer);
11968 masm.ret();
11970 masm.bind(&isInlineTwoByte);
11971 ConcatInlineString(masm, lhs, rhs, output, temp1, temp2, temp3,
11972 initialStringHeap, &failure, CharEncoding::TwoByte);
11973 masm.pop(FramePointer);
11974 masm.ret();
11976 masm.bind(&isInlineLatin1);
11977 ConcatInlineString(masm, lhs, rhs, output, temp1, temp2, temp3,
11978 initialStringHeap, &failure, CharEncoding::Latin1);
11979 masm.pop(FramePointer);
11980 masm.ret();
11982 masm.bind(&failure);
11983 masm.movePtr(ImmPtr(nullptr), output);
11984 masm.pop(FramePointer);
11985 masm.ret();
11987 Linker linker(masm);
11988 JitCode* code = linker.newCode(cx, CodeKind::Other);
11990 CollectPerfSpewerJitCodeProfile(code, "StringConcatStub");
11991 #ifdef MOZ_VTUNE
11992 vtune::MarkStub(code, "StringConcatStub");
11993 #endif
11995 return code;
11998 void JitRuntime::generateFreeStub(MacroAssembler& masm) {
11999 AutoCreatedBy acb(masm, "JitRuntime::generateFreeStub");
12001 const Register regSlots = CallTempReg0;
12003 freeStubOffset_ = startTrampolineCode(masm);
12005 #ifdef JS_USE_LINK_REGISTER
12006 masm.pushReturnAddress();
12007 #endif
12008 AllocatableRegisterSet regs(RegisterSet::Volatile());
12009 regs.takeUnchecked(regSlots);
12010 LiveRegisterSet save(regs.asLiveSet());
12011 masm.PushRegsInMask(save);
12013 const Register regTemp = regs.takeAnyGeneral();
12014 MOZ_ASSERT(regTemp != regSlots);
12016 using Fn = void (*)(void* p);
12017 masm.setupUnalignedABICall(regTemp);
12018 masm.passABIArg(regSlots);
12019 masm.callWithABI<Fn, js_free>(ABIType::General,
12020 CheckUnsafeCallWithABI::DontCheckOther);
12022 masm.PopRegsInMask(save);
12024 masm.ret();
12027 void JitRuntime::generateLazyLinkStub(MacroAssembler& masm) {
12028 AutoCreatedBy acb(masm, "JitRuntime::generateLazyLinkStub");
12030 lazyLinkStubOffset_ = startTrampolineCode(masm);
12032 #ifdef JS_USE_LINK_REGISTER
12033 masm.pushReturnAddress();
12034 #endif
12035 masm.Push(FramePointer);
12036 masm.moveStackPtrTo(FramePointer);
12038 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
12039 Register temp0 = regs.takeAny();
12040 Register temp1 = regs.takeAny();
12041 Register temp2 = regs.takeAny();
12043 masm.loadJSContext(temp0);
12044 masm.enterFakeExitFrame(temp0, temp2, ExitFrameType::LazyLink);
12045 masm.moveStackPtrTo(temp1);
12047 using Fn = uint8_t* (*)(JSContext* cx, LazyLinkExitFrameLayout* frame);
12048 masm.setupUnalignedABICall(temp2);
12049 masm.passABIArg(temp0);
12050 masm.passABIArg(temp1);
12051 masm.callWithABI<Fn, LazyLinkTopActivation>(
12052 ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
12054 // Discard exit frame and restore frame pointer.
12055 masm.leaveExitFrame(0);
12056 masm.pop(FramePointer);
12058 #ifdef JS_USE_LINK_REGISTER
12059 // Restore the return address such that the emitPrologue function of the
12060 // CodeGenerator can push it back on the stack with pushReturnAddress.
12061 masm.popReturnAddress();
12062 #endif
12063 masm.jump(ReturnReg);
12066 void JitRuntime::generateInterpreterStub(MacroAssembler& masm) {
12067 AutoCreatedBy acb(masm, "JitRuntime::generateInterpreterStub");
12069 interpreterStubOffset_ = startTrampolineCode(masm);
12071 #ifdef JS_USE_LINK_REGISTER
12072 masm.pushReturnAddress();
12073 #endif
12074 masm.Push(FramePointer);
12075 masm.moveStackPtrTo(FramePointer);
12077 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
12078 Register temp0 = regs.takeAny();
12079 Register temp1 = regs.takeAny();
12080 Register temp2 = regs.takeAny();
12082 masm.loadJSContext(temp0);
12083 masm.enterFakeExitFrame(temp0, temp2, ExitFrameType::InterpreterStub);
12084 masm.moveStackPtrTo(temp1);
12086 using Fn = bool (*)(JSContext* cx, InterpreterStubExitFrameLayout* frame);
12087 masm.setupUnalignedABICall(temp2);
12088 masm.passABIArg(temp0);
12089 masm.passABIArg(temp1);
12090 masm.callWithABI<Fn, InvokeFromInterpreterStub>(
12091 ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
12093 masm.branchIfFalseBool(ReturnReg, masm.failureLabel());
12095 // Discard exit frame and restore frame pointer.
12096 masm.leaveExitFrame(0);
12097 masm.pop(FramePointer);
12099 // InvokeFromInterpreterStub stores the return value in argv[0], where the
12100 // caller stored |this|. Subtract |sizeof(void*)| for the frame pointer we
12101 // just popped.
12102 masm.loadValue(Address(masm.getStackPointer(),
12103 JitFrameLayout::offsetOfThis() - sizeof(void*)),
12104 JSReturnOperand);
12105 masm.ret();
12108 void JitRuntime::generateDoubleToInt32ValueStub(MacroAssembler& masm) {
12109 AutoCreatedBy acb(masm, "JitRuntime::generateDoubleToInt32ValueStub");
12110 doubleToInt32ValueStubOffset_ = startTrampolineCode(masm);
12112 Label done;
12113 masm.branchTestDouble(Assembler::NotEqual, R0, &done);
12115 masm.unboxDouble(R0, FloatReg0);
12116 masm.convertDoubleToInt32(FloatReg0, R1.scratchReg(), &done,
12117 /* negativeZeroCheck = */ false);
12118 masm.tagValue(JSVAL_TYPE_INT32, R1.scratchReg(), R0);
12120 masm.bind(&done);
12121 masm.abiret();
12124 void CodeGenerator::visitLinearizeString(LLinearizeString* lir) {
12125 Register str = ToRegister(lir->str());
12126 Register output = ToRegister(lir->output());
12128 using Fn = JSLinearString* (*)(JSContext*, JSString*);
12129 auto* ool = oolCallVM<Fn, jit::LinearizeForCharAccess>(
12130 lir, ArgList(str), StoreRegisterTo(output));
12132 masm.branchIfRope(str, ool->entry());
12134 masm.movePtr(str, output);
12135 masm.bind(ool->rejoin());
12138 void CodeGenerator::visitLinearizeForCharAccess(LLinearizeForCharAccess* lir) {
12139 Register str = ToRegister(lir->str());
12140 Register index = ToRegister(lir->index());
12141 Register output = ToRegister(lir->output());
12143 using Fn = JSLinearString* (*)(JSContext*, JSString*);
12144 auto* ool = oolCallVM<Fn, jit::LinearizeForCharAccess>(
12145 lir, ArgList(str), StoreRegisterTo(output));
12147 masm.branchIfNotCanLoadStringChar(str, index, output, ool->entry());
12149 masm.movePtr(str, output);
12150 masm.bind(ool->rejoin());
12153 void CodeGenerator::visitCharCodeAt(LCharCodeAt* lir) {
12154 Register str = ToRegister(lir->str());
12155 Register output = ToRegister(lir->output());
12156 Register temp0 = ToRegister(lir->temp0());
12157 Register temp1 = ToRegister(lir->temp1());
12159 using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
12161 if (lir->index()->isBogus()) {
12162 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, Imm32(0)),
12163 StoreRegisterTo(output));
12164 masm.loadStringChar(str, 0, output, temp0, temp1, ool->entry());
12165 masm.bind(ool->rejoin());
12166 } else {
12167 Register index = ToRegister(lir->index());
12169 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, index),
12170 StoreRegisterTo(output));
12171 masm.loadStringChar(str, index, output, temp0, temp1, ool->entry());
12172 masm.bind(ool->rejoin());
12176 void CodeGenerator::visitCharCodeAtOrNegative(LCharCodeAtOrNegative* lir) {
12177 Register str = ToRegister(lir->str());
12178 Register output = ToRegister(lir->output());
12179 Register temp0 = ToRegister(lir->temp0());
12180 Register temp1 = ToRegister(lir->temp1());
12182 using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
12184 // Return -1 for out-of-bounds access.
12185 masm.move32(Imm32(-1), output);
12187 if (lir->index()->isBogus()) {
12188 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, Imm32(0)),
12189 StoreRegisterTo(output));
12191 masm.branch32(Assembler::Equal, Address(str, JSString::offsetOfLength()),
12192 Imm32(0), ool->rejoin());
12193 masm.loadStringChar(str, 0, output, temp0, temp1, ool->entry());
12194 masm.bind(ool->rejoin());
12195 } else {
12196 Register index = ToRegister(lir->index());
12198 auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, index),
12199 StoreRegisterTo(output));
12201 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
12202 temp0, ool->rejoin());
12203 masm.loadStringChar(str, index, output, temp0, temp1, ool->entry());
12204 masm.bind(ool->rejoin());
12208 void CodeGenerator::visitNegativeToNaN(LNegativeToNaN* lir) {
12209 Register input = ToRegister(lir->input());
12210 ValueOperand output = ToOutValue(lir);
12212 masm.tagValue(JSVAL_TYPE_INT32, input, output);
12214 Label done;
12215 masm.branchTest32(Assembler::NotSigned, input, input, &done);
12216 masm.moveValue(JS::NaNValue(), output);
12217 masm.bind(&done);
12220 void CodeGenerator::visitFromCharCode(LFromCharCode* lir) {
12221 Register code = ToRegister(lir->code());
12222 Register output = ToRegister(lir->output());
12224 using Fn = JSLinearString* (*)(JSContext*, int32_t);
12225 OutOfLineCode* ool = oolCallVM<Fn, jit::StringFromCharCode>(
12226 lir, ArgList(code), StoreRegisterTo(output));
12228 // OOL path if code >= UNIT_STATIC_LIMIT.
12229 masm.lookupStaticString(code, output, gen->runtime->staticStrings(),
12230 ool->entry());
12232 masm.bind(ool->rejoin());
12235 void CodeGenerator::visitFromCharCodeEmptyIfNegative(
12236 LFromCharCodeEmptyIfNegative* lir) {
12237 Register code = ToRegister(lir->code());
12238 Register output = ToRegister(lir->output());
12240 using Fn = JSLinearString* (*)(JSContext*, int32_t);
12241 auto* ool = oolCallVM<Fn, jit::StringFromCharCode>(lir, ArgList(code),
12242 StoreRegisterTo(output));
12244 // Return the empty string for negative inputs.
12245 const JSAtomState& names = gen->runtime->names();
12246 masm.movePtr(ImmGCPtr(names.empty_), output);
12247 masm.branchTest32(Assembler::Signed, code, code, ool->rejoin());
12249 // OOL path if code >= UNIT_STATIC_LIMIT.
12250 masm.lookupStaticString(code, output, gen->runtime->staticStrings(),
12251 ool->entry());
12253 masm.bind(ool->rejoin());
12256 void CodeGenerator::visitFromCodePoint(LFromCodePoint* lir) {
12257 Register codePoint = ToRegister(lir->codePoint());
12258 Register output = ToRegister(lir->output());
12259 Register temp0 = ToRegister(lir->temp0());
12260 Register temp1 = ToRegister(lir->temp1());
12261 LSnapshot* snapshot = lir->snapshot();
12263 // The OOL path is only taken when we can't allocate the inline string.
12264 using Fn = JSString* (*)(JSContext*, int32_t);
12265 OutOfLineCode* ool = oolCallVM<Fn, jit::StringFromCodePoint>(
12266 lir, ArgList(codePoint), StoreRegisterTo(output));
12268 Label isTwoByte;
12269 Label* done = ool->rejoin();
12271 static_assert(
12272 StaticStrings::UNIT_STATIC_LIMIT - 1 == JSString::MAX_LATIN1_CHAR,
12273 "Latin-1 strings can be loaded from static strings");
12276 masm.lookupStaticString(codePoint, output, gen->runtime->staticStrings(),
12277 &isTwoByte);
12278 masm.jump(done);
12280 masm.bind(&isTwoByte);
12282 // Use a bailout if the input is not a valid code point, because
12283 // MFromCodePoint is movable and it'd be observable when a moved
12284 // fromCodePoint throws an exception before its actual call site.
12285 bailoutCmp32(Assembler::Above, codePoint, Imm32(unicode::NonBMPMax),
12286 snapshot);
12288 // Allocate a JSThinInlineString.
12290 static_assert(JSThinInlineString::MAX_LENGTH_TWO_BYTE >= 2,
12291 "JSThinInlineString can hold a supplementary code point");
12293 uint32_t flags = JSString::INIT_THIN_INLINE_FLAGS;
12294 masm.newGCString(output, temp0, gen->initialStringHeap(), ool->entry());
12295 masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
12298 Label isSupplementary;
12299 masm.branch32(Assembler::AboveOrEqual, codePoint, Imm32(unicode::NonBMPMin),
12300 &isSupplementary);
12302 // Store length.
12303 masm.store32(Imm32(1), Address(output, JSString::offsetOfLength()));
12305 // Load chars pointer in temp0.
12306 masm.loadInlineStringCharsForStore(output, temp0);
12308 masm.store16(codePoint, Address(temp0, 0));
12310 masm.jump(done);
12312 masm.bind(&isSupplementary);
12314 // Store length.
12315 masm.store32(Imm32(2), Address(output, JSString::offsetOfLength()));
12317 // Load chars pointer in temp0.
12318 masm.loadInlineStringCharsForStore(output, temp0);
12320 // Inlined unicode::LeadSurrogate(uint32_t).
12321 masm.move32(codePoint, temp1);
12322 masm.rshift32(Imm32(10), temp1);
12323 masm.add32(Imm32(unicode::LeadSurrogateMin - (unicode::NonBMPMin >> 10)),
12324 temp1);
12326 masm.store16(temp1, Address(temp0, 0));
12328 // Inlined unicode::TrailSurrogate(uint32_t).
12329 masm.move32(codePoint, temp1);
12330 masm.and32(Imm32(0x3FF), temp1);
12331 masm.or32(Imm32(unicode::TrailSurrogateMin), temp1);
12333 masm.store16(temp1, Address(temp0, sizeof(char16_t)));
12337 masm.bind(done);
12340 void CodeGenerator::visitStringIncludes(LStringIncludes* lir) {
12341 pushArg(ToRegister(lir->searchString()));
12342 pushArg(ToRegister(lir->string()));
12344 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
12345 callVM<Fn, js::StringIncludes>(lir);
12348 template <typename LIns>
12349 static void CallStringMatch(MacroAssembler& masm, LIns* lir, OutOfLineCode* ool,
12350 LiveRegisterSet volatileRegs) {
12351 Register string = ToRegister(lir->string());
12352 Register output = ToRegister(lir->output());
12353 Register tempLength = ToRegister(lir->temp0());
12354 Register tempChars = ToRegister(lir->temp1());
12355 Register maybeTempPat = ToTempRegisterOrInvalid(lir->temp2());
12357 const JSLinearString* searchString = lir->searchString();
12358 size_t length = searchString->length();
12359 MOZ_ASSERT(length == 1 || length == 2);
12361 // The additional temp register is only needed when searching for two
12362 // pattern characters.
12363 MOZ_ASSERT_IF(length == 2, maybeTempPat != InvalidReg);
12365 if constexpr (std::is_same_v<LIns, LStringIncludesSIMD>) {
12366 masm.move32(Imm32(0), output);
12367 } else {
12368 masm.move32(Imm32(-1), output);
12371 masm.loadStringLength(string, tempLength);
12373 // Can't be a substring when the string is smaller than the search string.
12374 Label done;
12375 masm.branch32(Assembler::Below, tempLength, Imm32(length), ool->rejoin());
12377 bool searchStringIsPureTwoByte = false;
12378 if (searchString->hasTwoByteChars()) {
12379 JS::AutoCheckCannotGC nogc;
12380 searchStringIsPureTwoByte =
12381 !mozilla::IsUtf16Latin1(searchString->twoByteRange(nogc));
12384 // Pure two-byte strings can't occur in a Latin-1 string.
12385 if (searchStringIsPureTwoByte) {
12386 masm.branchLatin1String(string, ool->rejoin());
12389 // Slow path when we need to linearize the string.
12390 masm.branchIfRope(string, ool->entry());
12392 Label restoreVolatile;
12394 auto callMatcher = [&](CharEncoding encoding) {
12395 masm.loadStringChars(string, tempChars, encoding);
12397 LiveGeneralRegisterSet liveRegs;
12398 if constexpr (std::is_same_v<LIns, LStringIndexOfSIMD>) {
12399 // Save |tempChars| to compute the result index.
12400 liveRegs.add(tempChars);
12402 #ifdef DEBUG
12403 // Save |tempLength| in debug-mode for assertions.
12404 liveRegs.add(tempLength);
12405 #endif
12407 // Exclude non-volatile registers.
12408 liveRegs.set() = GeneralRegisterSet::Intersect(
12409 liveRegs.set(), GeneralRegisterSet::Volatile());
12411 masm.PushRegsInMask(liveRegs);
12414 if (length == 1) {
12415 char16_t pat = searchString->latin1OrTwoByteChar(0);
12416 MOZ_ASSERT_IF(encoding == CharEncoding::Latin1,
12417 pat <= JSString::MAX_LATIN1_CHAR);
12419 masm.move32(Imm32(pat), output);
12421 masm.setupAlignedABICall();
12422 masm.passABIArg(tempChars);
12423 masm.passABIArg(output);
12424 masm.passABIArg(tempLength);
12425 if (encoding == CharEncoding::Latin1) {
12426 using Fn = const char* (*)(const char*, char, size_t);
12427 masm.callWithABI<Fn, mozilla::SIMD::memchr8>(
12428 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
12429 } else {
12430 using Fn = const char16_t* (*)(const char16_t*, char16_t, size_t);
12431 masm.callWithABI<Fn, mozilla::SIMD::memchr16>(
12432 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
12434 } else {
12435 char16_t pat0 = searchString->latin1OrTwoByteChar(0);
12436 MOZ_ASSERT_IF(encoding == CharEncoding::Latin1,
12437 pat0 <= JSString::MAX_LATIN1_CHAR);
12439 char16_t pat1 = searchString->latin1OrTwoByteChar(1);
12440 MOZ_ASSERT_IF(encoding == CharEncoding::Latin1,
12441 pat1 <= JSString::MAX_LATIN1_CHAR);
12443 masm.move32(Imm32(pat0), output);
12444 masm.move32(Imm32(pat1), maybeTempPat);
12446 masm.setupAlignedABICall();
12447 masm.passABIArg(tempChars);
12448 masm.passABIArg(output);
12449 masm.passABIArg(maybeTempPat);
12450 masm.passABIArg(tempLength);
12451 if (encoding == CharEncoding::Latin1) {
12452 using Fn = const char* (*)(const char*, char, char, size_t);
12453 masm.callWithABI<Fn, mozilla::SIMD::memchr2x8>(
12454 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
12455 } else {
12456 using Fn =
12457 const char16_t* (*)(const char16_t*, char16_t, char16_t, size_t);
12458 masm.callWithABI<Fn, mozilla::SIMD::memchr2x16>(
12459 ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
12463 masm.storeCallPointerResult(output);
12465 // Convert to string index for `indexOf`.
12466 if constexpr (std::is_same_v<LIns, LStringIndexOfSIMD>) {
12467 // Restore |tempChars|. (And in debug mode |tempLength|.)
12468 masm.PopRegsInMask(liveRegs);
12470 Label found;
12471 masm.branchPtr(Assembler::NotEqual, output, ImmPtr(nullptr), &found);
12473 masm.move32(Imm32(-1), output);
12474 masm.jump(&restoreVolatile);
12476 masm.bind(&found);
12478 #ifdef DEBUG
12479 // Check lower bound.
12480 Label lower;
12481 masm.branchPtr(Assembler::AboveOrEqual, output, tempChars, &lower);
12482 masm.assumeUnreachable("result pointer below string chars");
12483 masm.bind(&lower);
12485 // Compute the end position of the characters.
12486 auto scale = encoding == CharEncoding::Latin1 ? TimesOne : TimesTwo;
12487 masm.computeEffectiveAddress(BaseIndex(tempChars, tempLength, scale),
12488 tempLength);
12490 // Check upper bound.
12491 Label upper;
12492 masm.branchPtr(Assembler::Below, output, tempLength, &upper);
12493 masm.assumeUnreachable("result pointer above string chars");
12494 masm.bind(&upper);
12495 #endif
12497 masm.subPtr(tempChars, output);
12499 if (encoding == CharEncoding::TwoByte) {
12500 masm.rshiftPtr(Imm32(1), output);
12505 volatileRegs.takeUnchecked(output);
12506 volatileRegs.takeUnchecked(tempLength);
12507 volatileRegs.takeUnchecked(tempChars);
12508 if (maybeTempPat != InvalidReg) {
12509 volatileRegs.takeUnchecked(maybeTempPat);
12511 masm.PushRegsInMask(volatileRegs);
12513 // Handle the case when the input is a Latin-1 string.
12514 if (!searchStringIsPureTwoByte) {
12515 Label twoByte;
12516 masm.branchTwoByteString(string, &twoByte);
12518 callMatcher(CharEncoding::Latin1);
12519 masm.jump(&restoreVolatile);
12521 masm.bind(&twoByte);
12524 // Handle the case when the input is a two-byte string.
12525 callMatcher(CharEncoding::TwoByte);
12527 masm.bind(&restoreVolatile);
12528 masm.PopRegsInMask(volatileRegs);
12530 // Convert to bool for `includes`.
12531 if constexpr (std::is_same_v<LIns, LStringIncludesSIMD>) {
12532 masm.cmpPtrSet(Assembler::NotEqual, output, ImmPtr(nullptr), output);
12535 masm.bind(ool->rejoin());
12538 void CodeGenerator::visitStringIncludesSIMD(LStringIncludesSIMD* lir) {
12539 Register string = ToRegister(lir->string());
12540 Register output = ToRegister(lir->output());
12541 const JSLinearString* searchString = lir->searchString();
12543 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
12544 auto* ool = oolCallVM<Fn, js::StringIncludes>(
12545 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
12547 CallStringMatch(masm, lir, ool, liveVolatileRegs(lir));
12550 void CodeGenerator::visitStringIndexOf(LStringIndexOf* lir) {
12551 pushArg(ToRegister(lir->searchString()));
12552 pushArg(ToRegister(lir->string()));
12554 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
12555 callVM<Fn, js::StringIndexOf>(lir);
12558 void CodeGenerator::visitStringIndexOfSIMD(LStringIndexOfSIMD* lir) {
12559 Register string = ToRegister(lir->string());
12560 Register output = ToRegister(lir->output());
12561 const JSLinearString* searchString = lir->searchString();
12563 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
12564 auto* ool = oolCallVM<Fn, js::StringIndexOf>(
12565 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
12567 CallStringMatch(masm, lir, ool, liveVolatileRegs(lir));
12570 void CodeGenerator::visitStringLastIndexOf(LStringLastIndexOf* lir) {
12571 pushArg(ToRegister(lir->searchString()));
12572 pushArg(ToRegister(lir->string()));
12574 using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
12575 callVM<Fn, js::StringLastIndexOf>(lir);
12578 void CodeGenerator::visitStringStartsWith(LStringStartsWith* lir) {
12579 pushArg(ToRegister(lir->searchString()));
12580 pushArg(ToRegister(lir->string()));
12582 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
12583 callVM<Fn, js::StringStartsWith>(lir);
12586 void CodeGenerator::visitStringStartsWithInline(LStringStartsWithInline* lir) {
12587 Register string = ToRegister(lir->string());
12588 Register output = ToRegister(lir->output());
12589 Register temp = ToRegister(lir->temp0());
12591 const JSLinearString* searchString = lir->searchString();
12593 size_t length = searchString->length();
12594 MOZ_ASSERT(length > 0);
12596 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
12597 auto* ool = oolCallVM<Fn, js::StringStartsWith>(
12598 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
12600 masm.move32(Imm32(0), output);
12602 // Can't be a prefix when the string is smaller than the search string.
12603 masm.branch32(Assembler::Below, Address(string, JSString::offsetOfLength()),
12604 Imm32(length), ool->rejoin());
12606 // Unwind ropes at the start if possible.
12607 Label compare;
12608 masm.movePtr(string, temp);
12609 masm.branchIfNotRope(temp, &compare);
12611 Label unwindRope;
12612 masm.bind(&unwindRope);
12613 masm.loadRopeLeftChild(temp, output);
12614 masm.movePtr(output, temp);
12616 // If the left child is smaller than the search string, jump into the VM to
12617 // linearize the string.
12618 masm.branch32(Assembler::Below, Address(temp, JSString::offsetOfLength()),
12619 Imm32(length), ool->entry());
12621 // Otherwise keep unwinding ropes.
12622 masm.branchIfRope(temp, &unwindRope);
12624 masm.bind(&compare);
12626 // If operands point to the same instance, it's trivially a prefix.
12627 Label notPointerEqual;
12628 masm.branchPtr(Assembler::NotEqual, temp, ImmGCPtr(searchString),
12629 &notPointerEqual);
12630 masm.move32(Imm32(1), output);
12631 masm.jump(ool->rejoin());
12632 masm.bind(&notPointerEqual);
12634 if (searchString->hasTwoByteChars()) {
12635 // Pure two-byte strings can't be a prefix of Latin-1 strings.
12636 JS::AutoCheckCannotGC nogc;
12637 if (!mozilla::IsUtf16Latin1(searchString->twoByteRange(nogc))) {
12638 Label compareChars;
12639 masm.branchTwoByteString(temp, &compareChars);
12640 masm.move32(Imm32(0), output);
12641 masm.jump(ool->rejoin());
12642 masm.bind(&compareChars);
12646 // Load the input string's characters.
12647 Register stringChars = output;
12648 masm.loadStringCharsForCompare(temp, searchString, stringChars, ool->entry());
12650 // Start comparing character by character.
12651 masm.compareStringChars(JSOp::Eq, stringChars, searchString, output);
12653 masm.bind(ool->rejoin());
12656 void CodeGenerator::visitStringEndsWith(LStringEndsWith* lir) {
12657 pushArg(ToRegister(lir->searchString()));
12658 pushArg(ToRegister(lir->string()));
12660 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
12661 callVM<Fn, js::StringEndsWith>(lir);
12664 void CodeGenerator::visitStringEndsWithInline(LStringEndsWithInline* lir) {
12665 Register string = ToRegister(lir->string());
12666 Register output = ToRegister(lir->output());
12667 Register temp = ToRegister(lir->temp0());
12669 const JSLinearString* searchString = lir->searchString();
12671 size_t length = searchString->length();
12672 MOZ_ASSERT(length > 0);
12674 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
12675 auto* ool = oolCallVM<Fn, js::StringEndsWith>(
12676 lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
12678 masm.move32(Imm32(0), output);
12680 // Can't be a suffix when the string is smaller than the search string.
12681 masm.branch32(Assembler::Below, Address(string, JSString::offsetOfLength()),
12682 Imm32(length), ool->rejoin());
12684 // Unwind ropes at the end if possible.
12685 Label compare;
12686 masm.movePtr(string, temp);
12687 masm.branchIfNotRope(temp, &compare);
12689 Label unwindRope;
12690 masm.bind(&unwindRope);
12691 masm.loadRopeRightChild(temp, output);
12692 masm.movePtr(output, temp);
12694 // If the right child is smaller than the search string, jump into the VM to
12695 // linearize the string.
12696 masm.branch32(Assembler::Below, Address(temp, JSString::offsetOfLength()),
12697 Imm32(length), ool->entry());
12699 // Otherwise keep unwinding ropes.
12700 masm.branchIfRope(temp, &unwindRope);
12702 masm.bind(&compare);
12704 // If operands point to the same instance, it's trivially a suffix.
12705 Label notPointerEqual;
12706 masm.branchPtr(Assembler::NotEqual, temp, ImmGCPtr(searchString),
12707 &notPointerEqual);
12708 masm.move32(Imm32(1), output);
12709 masm.jump(ool->rejoin());
12710 masm.bind(&notPointerEqual);
12712 CharEncoding encoding = searchString->hasLatin1Chars()
12713 ? CharEncoding::Latin1
12714 : CharEncoding::TwoByte;
12715 if (encoding == CharEncoding::TwoByte) {
12716 // Pure two-byte strings can't be a suffix of Latin-1 strings.
12717 JS::AutoCheckCannotGC nogc;
12718 if (!mozilla::IsUtf16Latin1(searchString->twoByteRange(nogc))) {
12719 Label compareChars;
12720 masm.branchTwoByteString(temp, &compareChars);
12721 masm.move32(Imm32(0), output);
12722 masm.jump(ool->rejoin());
12723 masm.bind(&compareChars);
12727 // Load the input string's characters.
12728 Register stringChars = output;
12729 masm.loadStringCharsForCompare(temp, searchString, stringChars, ool->entry());
12731 // Move string-char pointer to the suffix string.
12732 masm.loadStringLength(temp, temp);
12733 masm.sub32(Imm32(length), temp);
12734 masm.addToCharPtr(stringChars, temp, encoding);
12736 // Start comparing character by character.
12737 masm.compareStringChars(JSOp::Eq, stringChars, searchString, output);
12739 masm.bind(ool->rejoin());
12742 void CodeGenerator::visitStringToLowerCase(LStringToLowerCase* lir) {
12743 Register string = ToRegister(lir->string());
12744 Register output = ToRegister(lir->output());
12745 Register temp0 = ToRegister(lir->temp0());
12746 Register temp1 = ToRegister(lir->temp1());
12747 Register temp2 = ToRegister(lir->temp2());
12749 // On x86 there are not enough registers. In that case reuse the string
12750 // register as a temporary.
12751 Register temp3 =
12752 lir->temp3()->isBogusTemp() ? string : ToRegister(lir->temp3());
12753 Register temp4 = ToRegister(lir->temp4());
12755 using Fn = JSString* (*)(JSContext*, HandleString);
12756 OutOfLineCode* ool = oolCallVM<Fn, js::StringToLowerCase>(
12757 lir, ArgList(string), StoreRegisterTo(output));
12759 // Take the slow path if the string isn't a linear Latin-1 string.
12760 Imm32 linearLatin1Bits(JSString::LINEAR_BIT | JSString::LATIN1_CHARS_BIT);
12761 Register flags = temp0;
12762 masm.load32(Address(string, JSString::offsetOfFlags()), flags);
12763 masm.and32(linearLatin1Bits, flags);
12764 masm.branch32(Assembler::NotEqual, flags, linearLatin1Bits, ool->entry());
12766 Register length = temp0;
12767 masm.loadStringLength(string, length);
12769 // Return the input if it's the empty string.
12770 Label notEmptyString;
12771 masm.branch32(Assembler::NotEqual, length, Imm32(0), &notEmptyString);
12773 masm.movePtr(string, output);
12774 masm.jump(ool->rejoin());
12776 masm.bind(&notEmptyString);
12778 Register inputChars = temp1;
12779 masm.loadStringChars(string, inputChars, CharEncoding::Latin1);
12781 Register toLowerCaseTable = temp2;
12782 masm.movePtr(ImmPtr(unicode::latin1ToLowerCaseTable), toLowerCaseTable);
12784 // Single element strings can be directly retrieved from static strings cache.
12785 Label notSingleElementString;
12786 masm.branch32(Assembler::NotEqual, length, Imm32(1), &notSingleElementString);
12788 Register current = temp4;
12790 masm.loadChar(Address(inputChars, 0), current, CharEncoding::Latin1);
12791 masm.load8ZeroExtend(BaseIndex(toLowerCaseTable, current, TimesOne),
12792 current);
12793 masm.lookupStaticString(current, output, gen->runtime->staticStrings());
12795 masm.jump(ool->rejoin());
12797 masm.bind(&notSingleElementString);
12799 // Use the OOL-path when the string is too long. This prevents scanning long
12800 // strings which have upper case characters only near the end a second time in
12801 // the VM.
12802 constexpr int32_t MaxInlineLength = 64;
12803 masm.branch32(Assembler::Above, length, Imm32(MaxInlineLength), ool->entry());
12806 // Check if there are any characters which need to be converted.
12808 // This extra loop gives a small performance improvement for strings which
12809 // are already lower cased and lets us avoid calling into the runtime for
12810 // non-inline, all lower case strings. But more importantly it avoids
12811 // repeated inline allocation failures:
12812 // |AllocateThinOrFatInlineString| below takes the OOL-path and calls the
12813 // |js::StringToLowerCase| runtime function when the result string can't be
12814 // allocated inline. And |js::StringToLowerCase| directly returns the input
12815 // string when no characters need to be converted. That means it won't
12816 // trigger GC to clear up the free nursery space, so the next toLowerCase()
12817 // call will again fail to inline allocate the result string.
12818 Label hasUpper;
12820 Register checkInputChars = output;
12821 masm.movePtr(inputChars, checkInputChars);
12823 Register current = temp4;
12825 Label start;
12826 masm.bind(&start);
12827 masm.loadChar(Address(checkInputChars, 0), current, CharEncoding::Latin1);
12828 masm.branch8(Assembler::NotEqual,
12829 BaseIndex(toLowerCaseTable, current, TimesOne), current,
12830 &hasUpper);
12831 masm.addPtr(Imm32(sizeof(Latin1Char)), checkInputChars);
12832 masm.branchSub32(Assembler::NonZero, Imm32(1), length, &start);
12834 // Input is already in lower case.
12835 masm.movePtr(string, output);
12836 masm.jump(ool->rejoin());
12838 masm.bind(&hasUpper);
12840 // |length| was clobbered above, reload.
12841 masm.loadStringLength(string, length);
12843 // Call into the runtime when we can't create an inline string.
12844 masm.branch32(Assembler::Above, length,
12845 Imm32(JSFatInlineString::MAX_LENGTH_LATIN1), ool->entry());
12847 AllocateThinOrFatInlineString(masm, output, length, temp4,
12848 initialStringHeap(), ool->entry(),
12849 CharEncoding::Latin1);
12851 if (temp3 == string) {
12852 masm.push(string);
12855 Register outputChars = temp3;
12856 masm.loadInlineStringCharsForStore(output, outputChars);
12859 Register current = temp4;
12861 Label start;
12862 masm.bind(&start);
12863 masm.loadChar(Address(inputChars, 0), current, CharEncoding::Latin1);
12864 masm.load8ZeroExtend(BaseIndex(toLowerCaseTable, current, TimesOne),
12865 current);
12866 masm.storeChar(current, Address(outputChars, 0), CharEncoding::Latin1);
12867 masm.addPtr(Imm32(sizeof(Latin1Char)), inputChars);
12868 masm.addPtr(Imm32(sizeof(Latin1Char)), outputChars);
12869 masm.branchSub32(Assembler::NonZero, Imm32(1), length, &start);
12872 if (temp3 == string) {
12873 masm.pop(string);
12877 masm.bind(ool->rejoin());
12880 void CodeGenerator::visitStringToUpperCase(LStringToUpperCase* lir) {
12881 pushArg(ToRegister(lir->string()));
12883 using Fn = JSString* (*)(JSContext*, HandleString);
12884 callVM<Fn, js::StringToUpperCase>(lir);
12887 void CodeGenerator::visitCharCodeToLowerCase(LCharCodeToLowerCase* lir) {
12888 Register code = ToRegister(lir->code());
12889 Register output = ToRegister(lir->output());
12890 Register temp = ToRegister(lir->temp0());
12892 using Fn = JSString* (*)(JSContext*, int32_t);
12893 auto* ool = oolCallVM<Fn, jit::CharCodeToLowerCase>(lir, ArgList(code),
12894 StoreRegisterTo(output));
12896 constexpr char16_t NonLatin1Min = char16_t(JSString::MAX_LATIN1_CHAR) + 1;
12898 // OOL path if code >= NonLatin1Min.
12899 masm.boundsCheck32PowerOfTwo(code, NonLatin1Min, ool->entry());
12901 // Convert to lower case.
12902 masm.movePtr(ImmPtr(unicode::latin1ToLowerCaseTable), temp);
12903 masm.load8ZeroExtend(BaseIndex(temp, code, TimesOne), temp);
12905 // Load static string for lower case character.
12906 masm.lookupStaticString(temp, output, gen->runtime->staticStrings());
12908 masm.bind(ool->rejoin());
12911 void CodeGenerator::visitCharCodeToUpperCase(LCharCodeToUpperCase* lir) {
12912 Register code = ToRegister(lir->code());
12913 Register output = ToRegister(lir->output());
12914 Register temp = ToRegister(lir->temp0());
12916 using Fn = JSString* (*)(JSContext*, int32_t);
12917 auto* ool = oolCallVM<Fn, jit::CharCodeToUpperCase>(lir, ArgList(code),
12918 StoreRegisterTo(output));
12920 constexpr char16_t NonLatin1Min = char16_t(JSString::MAX_LATIN1_CHAR) + 1;
12922 // OOL path if code >= NonLatin1Min.
12923 masm.boundsCheck32PowerOfTwo(code, NonLatin1Min, ool->entry());
12925 // Most one element Latin-1 strings can be directly retrieved from the
12926 // static strings cache, except the following three characters:
12928 // 1. ToUpper(U+00B5) = 0+039C
12929 // 2. ToUpper(U+00FF) = 0+0178
12930 // 3. ToUpper(U+00DF) = 0+0053 0+0053
12931 masm.branch32(Assembler::Equal, code, Imm32(unicode::MICRO_SIGN),
12932 ool->entry());
12933 masm.branch32(Assembler::Equal, code,
12934 Imm32(unicode::LATIN_SMALL_LETTER_Y_WITH_DIAERESIS),
12935 ool->entry());
12936 masm.branch32(Assembler::Equal, code,
12937 Imm32(unicode::LATIN_SMALL_LETTER_SHARP_S), ool->entry());
12939 // Inline unicode::ToUpperCase (without the special case for ASCII characters)
12941 constexpr size_t shift = unicode::CharInfoShift;
12943 // code >> shift
12944 masm.move32(code, temp);
12945 masm.rshift32(Imm32(shift), temp);
12947 // index = index1[code >> shift];
12948 masm.movePtr(ImmPtr(unicode::index1), output);
12949 masm.load8ZeroExtend(BaseIndex(output, temp, TimesOne), temp);
12951 // (code & ((1 << shift) - 1)
12952 masm.move32(code, output);
12953 masm.and32(Imm32((1 << shift) - 1), output);
12955 // (index << shift) + (code & ((1 << shift) - 1))
12956 masm.lshift32(Imm32(shift), temp);
12957 masm.add32(output, temp);
12959 // index = index2[(index << shift) + (code & ((1 << shift) - 1))]
12960 masm.movePtr(ImmPtr(unicode::index2), output);
12961 masm.load8ZeroExtend(BaseIndex(output, temp, TimesOne), temp);
12963 // Compute |index * 6| through |(index * 3) * TimesTwo|.
12964 static_assert(sizeof(unicode::CharacterInfo) == 6);
12965 masm.mulBy3(temp, temp);
12967 // upperCase = js_charinfo[index].upperCase
12968 masm.movePtr(ImmPtr(unicode::js_charinfo), output);
12969 masm.load16ZeroExtend(BaseIndex(output, temp, TimesTwo,
12970 offsetof(unicode::CharacterInfo, upperCase)),
12971 temp);
12973 // uint16_t(ch) + upperCase
12974 masm.add32(code, temp);
12976 // Clear any high bits added when performing the unsigned 16-bit addition
12977 // through a signed 32-bit addition.
12978 masm.move8ZeroExtend(temp, temp);
12980 // Load static string for upper case character.
12981 masm.lookupStaticString(temp, output, gen->runtime->staticStrings());
12983 masm.bind(ool->rejoin());
12986 void CodeGenerator::visitStringTrimStartIndex(LStringTrimStartIndex* lir) {
12987 Register string = ToRegister(lir->string());
12988 Register output = ToRegister(lir->output());
12990 auto volatileRegs = liveVolatileRegs(lir);
12991 volatileRegs.takeUnchecked(output);
12993 masm.PushRegsInMask(volatileRegs);
12995 using Fn = int32_t (*)(const JSString*);
12996 masm.setupAlignedABICall();
12997 masm.passABIArg(string);
12998 masm.callWithABI<Fn, jit::StringTrimStartIndex>();
12999 masm.storeCallInt32Result(output);
13001 masm.PopRegsInMask(volatileRegs);
13004 void CodeGenerator::visitStringTrimEndIndex(LStringTrimEndIndex* lir) {
13005 Register string = ToRegister(lir->string());
13006 Register start = ToRegister(lir->start());
13007 Register output = ToRegister(lir->output());
13009 auto volatileRegs = liveVolatileRegs(lir);
13010 volatileRegs.takeUnchecked(output);
13012 masm.PushRegsInMask(volatileRegs);
13014 using Fn = int32_t (*)(const JSString*, int32_t);
13015 masm.setupAlignedABICall();
13016 masm.passABIArg(string);
13017 masm.passABIArg(start);
13018 masm.callWithABI<Fn, jit::StringTrimEndIndex>();
13019 masm.storeCallInt32Result(output);
13021 masm.PopRegsInMask(volatileRegs);
13024 void CodeGenerator::visitStringSplit(LStringSplit* lir) {
13025 pushArg(Imm32(INT32_MAX));
13026 pushArg(ToRegister(lir->separator()));
13027 pushArg(ToRegister(lir->string()));
13029 using Fn = ArrayObject* (*)(JSContext*, HandleString, HandleString, uint32_t);
13030 callVM<Fn, js::StringSplitString>(lir);
13033 void CodeGenerator::visitInitializedLength(LInitializedLength* lir) {
13034 Address initLength(ToRegister(lir->elements()),
13035 ObjectElements::offsetOfInitializedLength());
13036 masm.load32(initLength, ToRegister(lir->output()));
13039 void CodeGenerator::visitSetInitializedLength(LSetInitializedLength* lir) {
13040 Address initLength(ToRegister(lir->elements()),
13041 ObjectElements::offsetOfInitializedLength());
13042 SetLengthFromIndex(masm, lir->index(), initLength);
13045 void CodeGenerator::visitNotBI(LNotBI* lir) {
13046 Register input = ToRegister(lir->input());
13047 Register output = ToRegister(lir->output());
13049 masm.cmp32Set(Assembler::Equal, Address(input, BigInt::offsetOfLength()),
13050 Imm32(0), output);
13053 void CodeGenerator::visitNotO(LNotO* lir) {
13054 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
13055 addOutOfLineCode(ool, lir->mir());
13057 Label* ifEmulatesUndefined = ool->label1();
13058 Label* ifDoesntEmulateUndefined = ool->label2();
13060 Register objreg = ToRegister(lir->input());
13061 Register output = ToRegister(lir->output());
13062 branchTestObjectEmulatesUndefined(objreg, ifEmulatesUndefined,
13063 ifDoesntEmulateUndefined, output, ool);
13064 // fall through
13066 Label join;
13068 masm.move32(Imm32(0), output);
13069 masm.jump(&join);
13071 masm.bind(ifEmulatesUndefined);
13072 masm.move32(Imm32(1), output);
13074 masm.bind(&join);
13077 void CodeGenerator::visitNotV(LNotV* lir) {
13078 auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
13079 addOutOfLineCode(ool, lir->mir());
13081 Label* ifTruthy = ool->label1();
13082 Label* ifFalsy = ool->label2();
13084 ValueOperand input = ToValue(lir, LNotV::InputIndex);
13085 Register tempToUnbox = ToTempUnboxRegister(lir->temp1());
13086 FloatRegister floatTemp = ToFloatRegister(lir->temp0());
13087 Register output = ToRegister(lir->output());
13088 const TypeDataList& observedTypes = lir->mir()->observedTypes();
13090 testValueTruthy(input, tempToUnbox, output, floatTemp, observedTypes,
13091 ifTruthy, ifFalsy, ool);
13093 Label join;
13095 // Note that the testValueTruthy call above may choose to fall through
13096 // to ifTruthy instead of branching there.
13097 masm.bind(ifTruthy);
13098 masm.move32(Imm32(0), output);
13099 masm.jump(&join);
13101 masm.bind(ifFalsy);
13102 masm.move32(Imm32(1), output);
13104 // both branches meet here.
13105 masm.bind(&join);
13108 void CodeGenerator::visitBoundsCheck(LBoundsCheck* lir) {
13109 const LAllocation* index = lir->index();
13110 const LAllocation* length = lir->length();
13111 LSnapshot* snapshot = lir->snapshot();
13113 MIRType type = lir->mir()->type();
13115 auto bailoutCmp = [&](Assembler::Condition cond, auto lhs, auto rhs) {
13116 if (type == MIRType::Int32) {
13117 bailoutCmp32(cond, lhs, rhs, snapshot);
13118 } else {
13119 MOZ_ASSERT(type == MIRType::IntPtr);
13120 bailoutCmpPtr(cond, lhs, rhs, snapshot);
13124 auto bailoutCmpConstant = [&](Assembler::Condition cond, auto lhs,
13125 int32_t rhs) {
13126 if (type == MIRType::Int32) {
13127 bailoutCmp32(cond, lhs, Imm32(rhs), snapshot);
13128 } else {
13129 MOZ_ASSERT(type == MIRType::IntPtr);
13130 bailoutCmpPtr(cond, lhs, ImmWord(rhs), snapshot);
13134 if (index->isConstant()) {
13135 // Use uint32 so that the comparison is unsigned.
13136 uint32_t idx = ToInt32(index);
13137 if (length->isConstant()) {
13138 uint32_t len = ToInt32(lir->length());
13139 if (idx < len) {
13140 return;
13142 bailout(snapshot);
13143 return;
13146 if (length->isRegister()) {
13147 bailoutCmpConstant(Assembler::BelowOrEqual, ToRegister(length), idx);
13148 } else {
13149 bailoutCmpConstant(Assembler::BelowOrEqual, ToAddress(length), idx);
13151 return;
13154 Register indexReg = ToRegister(index);
13155 if (length->isConstant()) {
13156 bailoutCmpConstant(Assembler::AboveOrEqual, indexReg, ToInt32(length));
13157 } else if (length->isRegister()) {
13158 bailoutCmp(Assembler::BelowOrEqual, ToRegister(length), indexReg);
13159 } else {
13160 bailoutCmp(Assembler::BelowOrEqual, ToAddress(length), indexReg);
13164 void CodeGenerator::visitBoundsCheckRange(LBoundsCheckRange* lir) {
13165 int32_t min = lir->mir()->minimum();
13166 int32_t max = lir->mir()->maximum();
13167 MOZ_ASSERT(max >= min);
13169 LSnapshot* snapshot = lir->snapshot();
13170 MIRType type = lir->mir()->type();
13172 const LAllocation* length = lir->length();
13173 Register temp = ToRegister(lir->getTemp(0));
13175 auto bailoutCmp = [&](Assembler::Condition cond, auto lhs, auto rhs) {
13176 if (type == MIRType::Int32) {
13177 bailoutCmp32(cond, lhs, rhs, snapshot);
13178 } else {
13179 MOZ_ASSERT(type == MIRType::IntPtr);
13180 bailoutCmpPtr(cond, lhs, rhs, snapshot);
13184 auto bailoutCmpConstant = [&](Assembler::Condition cond, auto lhs,
13185 int32_t rhs) {
13186 if (type == MIRType::Int32) {
13187 bailoutCmp32(cond, lhs, Imm32(rhs), snapshot);
13188 } else {
13189 MOZ_ASSERT(type == MIRType::IntPtr);
13190 bailoutCmpPtr(cond, lhs, ImmWord(rhs), snapshot);
13194 if (lir->index()->isConstant()) {
13195 int32_t nmin, nmax;
13196 int32_t index = ToInt32(lir->index());
13197 if (SafeAdd(index, min, &nmin) && SafeAdd(index, max, &nmax) && nmin >= 0) {
13198 if (length->isRegister()) {
13199 bailoutCmpConstant(Assembler::BelowOrEqual, ToRegister(length), nmax);
13200 } else {
13201 bailoutCmpConstant(Assembler::BelowOrEqual, ToAddress(length), nmax);
13203 return;
13205 masm.mov(ImmWord(index), temp);
13206 } else {
13207 masm.mov(ToRegister(lir->index()), temp);
13210 // If the minimum and maximum differ then do an underflow check first.
13211 // If the two are the same then doing an unsigned comparison on the
13212 // length will also catch a negative index.
13213 if (min != max) {
13214 if (min != 0) {
13215 Label bail;
13216 if (type == MIRType::Int32) {
13217 masm.branchAdd32(Assembler::Overflow, Imm32(min), temp, &bail);
13218 } else {
13219 masm.branchAddPtr(Assembler::Overflow, Imm32(min), temp, &bail);
13221 bailoutFrom(&bail, snapshot);
13224 bailoutCmpConstant(Assembler::LessThan, temp, 0);
13226 if (min != 0) {
13227 int32_t diff;
13228 if (SafeSub(max, min, &diff)) {
13229 max = diff;
13230 } else {
13231 if (type == MIRType::Int32) {
13232 masm.sub32(Imm32(min), temp);
13233 } else {
13234 masm.subPtr(Imm32(min), temp);
13240 // Compute the maximum possible index. No overflow check is needed when
13241 // max > 0. We can only wraparound to a negative number, which will test as
13242 // larger than all nonnegative numbers in the unsigned comparison, and the
13243 // length is required to be nonnegative (else testing a negative length
13244 // would succeed on any nonnegative index).
13245 if (max != 0) {
13246 if (max < 0) {
13247 Label bail;
13248 if (type == MIRType::Int32) {
13249 masm.branchAdd32(Assembler::Overflow, Imm32(max), temp, &bail);
13250 } else {
13251 masm.branchAddPtr(Assembler::Overflow, Imm32(max), temp, &bail);
13253 bailoutFrom(&bail, snapshot);
13254 } else {
13255 if (type == MIRType::Int32) {
13256 masm.add32(Imm32(max), temp);
13257 } else {
13258 masm.addPtr(Imm32(max), temp);
13263 if (length->isRegister()) {
13264 bailoutCmp(Assembler::BelowOrEqual, ToRegister(length), temp);
13265 } else {
13266 bailoutCmp(Assembler::BelowOrEqual, ToAddress(length), temp);
13270 void CodeGenerator::visitBoundsCheckLower(LBoundsCheckLower* lir) {
13271 int32_t min = lir->mir()->minimum();
13272 bailoutCmp32(Assembler::LessThan, ToRegister(lir->index()), Imm32(min),
13273 lir->snapshot());
13276 void CodeGenerator::visitSpectreMaskIndex(LSpectreMaskIndex* lir) {
13277 MOZ_ASSERT(JitOptions.spectreIndexMasking);
13279 const LAllocation* length = lir->length();
13280 Register index = ToRegister(lir->index());
13281 Register output = ToRegister(lir->output());
13283 if (lir->mir()->type() == MIRType::Int32) {
13284 if (length->isRegister()) {
13285 masm.spectreMaskIndex32(index, ToRegister(length), output);
13286 } else {
13287 masm.spectreMaskIndex32(index, ToAddress(length), output);
13289 } else {
13290 MOZ_ASSERT(lir->mir()->type() == MIRType::IntPtr);
13291 if (length->isRegister()) {
13292 masm.spectreMaskIndexPtr(index, ToRegister(length), output);
13293 } else {
13294 masm.spectreMaskIndexPtr(index, ToAddress(length), output);
13299 class OutOfLineStoreElementHole : public OutOfLineCodeBase<CodeGenerator> {
13300 LInstruction* ins_;
13302 public:
13303 explicit OutOfLineStoreElementHole(LInstruction* ins) : ins_(ins) {
13304 MOZ_ASSERT(ins->isStoreElementHoleV() || ins->isStoreElementHoleT());
13307 void accept(CodeGenerator* codegen) override {
13308 codegen->visitOutOfLineStoreElementHole(this);
13311 MStoreElementHole* mir() const {
13312 return ins_->isStoreElementHoleV() ? ins_->toStoreElementHoleV()->mir()
13313 : ins_->toStoreElementHoleT()->mir();
13315 LInstruction* ins() const { return ins_; }
13318 void CodeGenerator::emitStoreHoleCheck(Register elements,
13319 const LAllocation* index,
13320 LSnapshot* snapshot) {
13321 Label bail;
13322 if (index->isConstant()) {
13323 Address dest(elements, ToInt32(index) * sizeof(js::Value));
13324 masm.branchTestMagic(Assembler::Equal, dest, &bail);
13325 } else {
13326 BaseObjectElementIndex dest(elements, ToRegister(index));
13327 masm.branchTestMagic(Assembler::Equal, dest, &bail);
13329 bailoutFrom(&bail, snapshot);
13332 void CodeGenerator::emitStoreElementTyped(const LAllocation* value,
13333 MIRType valueType, Register elements,
13334 const LAllocation* index) {
13335 MOZ_ASSERT(valueType != MIRType::MagicHole);
13336 ConstantOrRegister v = ToConstantOrRegister(value, valueType);
13337 if (index->isConstant()) {
13338 Address dest(elements, ToInt32(index) * sizeof(js::Value));
13339 masm.storeUnboxedValue(v, valueType, dest);
13340 } else {
13341 BaseObjectElementIndex dest(elements, ToRegister(index));
13342 masm.storeUnboxedValue(v, valueType, dest);
13346 void CodeGenerator::visitStoreElementT(LStoreElementT* store) {
13347 Register elements = ToRegister(store->elements());
13348 const LAllocation* index = store->index();
13350 if (store->mir()->needsBarrier()) {
13351 emitPreBarrier(elements, index);
13354 if (store->mir()->needsHoleCheck()) {
13355 emitStoreHoleCheck(elements, index, store->snapshot());
13358 emitStoreElementTyped(store->value(), store->mir()->value()->type(), elements,
13359 index);
13362 void CodeGenerator::visitStoreElementV(LStoreElementV* lir) {
13363 const ValueOperand value = ToValue(lir, LStoreElementV::Value);
13364 Register elements = ToRegister(lir->elements());
13365 const LAllocation* index = lir->index();
13367 if (lir->mir()->needsBarrier()) {
13368 emitPreBarrier(elements, index);
13371 if (lir->mir()->needsHoleCheck()) {
13372 emitStoreHoleCheck(elements, index, lir->snapshot());
13375 if (lir->index()->isConstant()) {
13376 Address dest(elements, ToInt32(lir->index()) * sizeof(js::Value));
13377 masm.storeValue(value, dest);
13378 } else {
13379 BaseObjectElementIndex dest(elements, ToRegister(lir->index()));
13380 masm.storeValue(value, dest);
13384 void CodeGenerator::visitStoreHoleValueElement(LStoreHoleValueElement* lir) {
13385 Register elements = ToRegister(lir->elements());
13386 Register index = ToRegister(lir->index());
13388 Address elementsFlags(elements, ObjectElements::offsetOfFlags());
13389 masm.or32(Imm32(ObjectElements::NON_PACKED), elementsFlags);
13391 BaseObjectElementIndex element(elements, index);
13392 masm.storeValue(MagicValue(JS_ELEMENTS_HOLE), element);
13395 void CodeGenerator::visitStoreElementHoleT(LStoreElementHoleT* lir) {
13396 auto* ool = new (alloc()) OutOfLineStoreElementHole(lir);
13397 addOutOfLineCode(ool, lir->mir());
13399 Register obj = ToRegister(lir->object());
13400 Register elements = ToRegister(lir->elements());
13401 Register index = ToRegister(lir->index());
13402 Register temp = ToRegister(lir->temp0());
13404 Address initLength(elements, ObjectElements::offsetOfInitializedLength());
13405 masm.spectreBoundsCheck32(index, initLength, temp, ool->entry());
13407 emitPreBarrier(elements, lir->index());
13409 masm.bind(ool->rejoin());
13410 emitStoreElementTyped(lir->value(), lir->mir()->value()->type(), elements,
13411 lir->index());
13413 if (ValueNeedsPostBarrier(lir->mir()->value())) {
13414 LiveRegisterSet regs = liveVolatileRegs(lir);
13415 ConstantOrRegister val =
13416 ToConstantOrRegister(lir->value(), lir->mir()->value()->type());
13417 emitElementPostWriteBarrier(lir->mir(), regs, obj, lir->index(), temp, val);
13421 void CodeGenerator::visitStoreElementHoleV(LStoreElementHoleV* lir) {
13422 auto* ool = new (alloc()) OutOfLineStoreElementHole(lir);
13423 addOutOfLineCode(ool, lir->mir());
13425 Register obj = ToRegister(lir->object());
13426 Register elements = ToRegister(lir->elements());
13427 Register index = ToRegister(lir->index());
13428 const ValueOperand value = ToValue(lir, LStoreElementHoleV::ValueIndex);
13429 Register temp = ToRegister(lir->temp0());
13431 Address initLength(elements, ObjectElements::offsetOfInitializedLength());
13432 masm.spectreBoundsCheck32(index, initLength, temp, ool->entry());
13434 emitPreBarrier(elements, lir->index());
13436 masm.bind(ool->rejoin());
13437 masm.storeValue(value, BaseObjectElementIndex(elements, index));
13439 if (ValueNeedsPostBarrier(lir->mir()->value())) {
13440 LiveRegisterSet regs = liveVolatileRegs(lir);
13441 emitElementPostWriteBarrier(lir->mir(), regs, obj, lir->index(), temp,
13442 ConstantOrRegister(value));
13446 void CodeGenerator::visitOutOfLineStoreElementHole(
13447 OutOfLineStoreElementHole* ool) {
13448 Register object, elements, index;
13449 LInstruction* ins = ool->ins();
13450 mozilla::Maybe<ConstantOrRegister> value;
13451 Register temp;
13453 if (ins->isStoreElementHoleV()) {
13454 LStoreElementHoleV* store = ins->toStoreElementHoleV();
13455 object = ToRegister(store->object());
13456 elements = ToRegister(store->elements());
13457 index = ToRegister(store->index());
13458 value.emplace(
13459 TypedOrValueRegister(ToValue(store, LStoreElementHoleV::ValueIndex)));
13460 temp = ToRegister(store->temp0());
13461 } else {
13462 LStoreElementHoleT* store = ins->toStoreElementHoleT();
13463 object = ToRegister(store->object());
13464 elements = ToRegister(store->elements());
13465 index = ToRegister(store->index());
13466 if (store->value()->isConstant()) {
13467 value.emplace(
13468 ConstantOrRegister(store->value()->toConstant()->toJSValue()));
13469 } else {
13470 MIRType valueType = store->mir()->value()->type();
13471 value.emplace(
13472 TypedOrValueRegister(valueType, ToAnyRegister(store->value())));
13474 temp = ToRegister(store->temp0());
13477 Address initLength(elements, ObjectElements::offsetOfInitializedLength());
13479 // We're out-of-bounds. We only handle the index == initlength case.
13480 // If index > initializedLength, bail out. Note that this relies on the
13481 // condition flags sticking from the incoming branch.
13482 // Also note: this branch does not need Spectre mitigations, doing that for
13483 // the capacity check below is sufficient.
13484 Label allocElement, addNewElement;
13485 #if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
13486 defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
13487 // Had to reimplement for MIPS because there are no flags.
13488 bailoutCmp32(Assembler::NotEqual, initLength, index, ins->snapshot());
13489 #else
13490 bailoutIf(Assembler::NotEqual, ins->snapshot());
13491 #endif
13493 // If index < capacity, we can add a dense element inline. If not, we need
13494 // to allocate more elements first.
13495 masm.spectreBoundsCheck32(
13496 index, Address(elements, ObjectElements::offsetOfCapacity()), temp,
13497 &allocElement);
13498 masm.jump(&addNewElement);
13500 masm.bind(&allocElement);
13502 // Save all live volatile registers, except |temp|.
13503 LiveRegisterSet liveRegs = liveVolatileRegs(ins);
13504 liveRegs.takeUnchecked(temp);
13505 masm.PushRegsInMask(liveRegs);
13507 masm.setupAlignedABICall();
13508 masm.loadJSContext(temp);
13509 masm.passABIArg(temp);
13510 masm.passABIArg(object);
13512 using Fn = bool (*)(JSContext*, NativeObject*);
13513 masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
13514 masm.storeCallPointerResult(temp);
13516 masm.PopRegsInMask(liveRegs);
13517 bailoutIfFalseBool(temp, ins->snapshot());
13519 // Load the reallocated elements pointer.
13520 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), elements);
13522 masm.bind(&addNewElement);
13524 // Increment initLength
13525 masm.add32(Imm32(1), initLength);
13527 // If length is now <= index, increment length too.
13528 Label skipIncrementLength;
13529 Address length(elements, ObjectElements::offsetOfLength());
13530 masm.branch32(Assembler::Above, length, index, &skipIncrementLength);
13531 masm.add32(Imm32(1), length);
13532 masm.bind(&skipIncrementLength);
13534 // Jump to the inline path where we will store the value.
13535 // We rejoin after the prebarrier, because the memory is uninitialized.
13536 masm.jump(ool->rejoin());
13539 void CodeGenerator::visitArrayPopShift(LArrayPopShift* lir) {
13540 Register obj = ToRegister(lir->object());
13541 Register temp1 = ToRegister(lir->temp0());
13542 Register temp2 = ToRegister(lir->temp1());
13543 ValueOperand out = ToOutValue(lir);
13545 Label bail;
13546 if (lir->mir()->mode() == MArrayPopShift::Pop) {
13547 masm.packedArrayPop(obj, out, temp1, temp2, &bail);
13548 } else {
13549 MOZ_ASSERT(lir->mir()->mode() == MArrayPopShift::Shift);
13550 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
13551 masm.packedArrayShift(obj, out, temp1, temp2, volatileRegs, &bail);
13553 bailoutFrom(&bail, lir->snapshot());
13556 class OutOfLineArrayPush : public OutOfLineCodeBase<CodeGenerator> {
13557 LArrayPush* ins_;
13559 public:
13560 explicit OutOfLineArrayPush(LArrayPush* ins) : ins_(ins) {}
13562 void accept(CodeGenerator* codegen) override {
13563 codegen->visitOutOfLineArrayPush(this);
13566 LArrayPush* ins() const { return ins_; }
13569 void CodeGenerator::visitArrayPush(LArrayPush* lir) {
13570 Register obj = ToRegister(lir->object());
13571 Register elementsTemp = ToRegister(lir->temp0());
13572 Register length = ToRegister(lir->output());
13573 ValueOperand value = ToValue(lir, LArrayPush::ValueIndex);
13574 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
13576 auto* ool = new (alloc()) OutOfLineArrayPush(lir);
13577 addOutOfLineCode(ool, lir->mir());
13579 // Load obj->elements in elementsTemp.
13580 masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), elementsTemp);
13582 Address initLengthAddr(elementsTemp,
13583 ObjectElements::offsetOfInitializedLength());
13584 Address lengthAddr(elementsTemp, ObjectElements::offsetOfLength());
13585 Address capacityAddr(elementsTemp, ObjectElements::offsetOfCapacity());
13587 // Bail out if length != initLength.
13588 masm.load32(lengthAddr, length);
13589 bailoutCmp32(Assembler::NotEqual, initLengthAddr, length, lir->snapshot());
13591 // If length < capacity, we can add a dense element inline. If not, we
13592 // need to allocate more elements.
13593 masm.spectreBoundsCheck32(length, capacityAddr, spectreTemp, ool->entry());
13594 masm.bind(ool->rejoin());
13596 // Store the value.
13597 masm.storeValue(value, BaseObjectElementIndex(elementsTemp, length));
13599 // Update length and initialized length.
13600 masm.add32(Imm32(1), length);
13601 masm.store32(length, Address(elementsTemp, ObjectElements::offsetOfLength()));
13602 masm.store32(length, Address(elementsTemp,
13603 ObjectElements::offsetOfInitializedLength()));
13605 if (ValueNeedsPostBarrier(lir->mir()->value())) {
13606 LiveRegisterSet regs = liveVolatileRegs(lir);
13607 regs.addUnchecked(length);
13608 emitElementPostWriteBarrier(lir->mir(), regs, obj, lir->output()->output(),
13609 elementsTemp, ConstantOrRegister(value),
13610 /* indexDiff = */ -1);
13614 void CodeGenerator::visitOutOfLineArrayPush(OutOfLineArrayPush* ool) {
13615 LArrayPush* ins = ool->ins();
13617 Register object = ToRegister(ins->object());
13618 Register temp = ToRegister(ins->temp0());
13620 LiveRegisterSet liveRegs = liveVolatileRegs(ins);
13621 liveRegs.takeUnchecked(temp);
13622 liveRegs.addUnchecked(ToRegister(ins->output()));
13623 liveRegs.addUnchecked(ToValue(ins, LArrayPush::ValueIndex));
13625 masm.PushRegsInMask(liveRegs);
13627 masm.setupAlignedABICall();
13628 masm.loadJSContext(temp);
13629 masm.passABIArg(temp);
13630 masm.passABIArg(object);
13632 using Fn = bool (*)(JSContext*, NativeObject* obj);
13633 masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
13634 masm.storeCallPointerResult(temp);
13636 masm.PopRegsInMask(liveRegs);
13637 bailoutIfFalseBool(temp, ins->snapshot());
13639 // Load the reallocated elements pointer.
13640 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
13642 masm.jump(ool->rejoin());
13645 void CodeGenerator::visitArraySlice(LArraySlice* lir) {
13646 Register object = ToRegister(lir->object());
13647 Register begin = ToRegister(lir->begin());
13648 Register end = ToRegister(lir->end());
13649 Register temp0 = ToRegister(lir->temp0());
13650 Register temp1 = ToRegister(lir->temp1());
13652 Label call, fail;
13654 Label bail;
13655 masm.branchArrayIsNotPacked(object, temp0, temp1, &bail);
13656 bailoutFrom(&bail, lir->snapshot());
13658 // Try to allocate an object.
13659 TemplateObject templateObject(lir->mir()->templateObj());
13660 masm.createGCObject(temp0, temp1, templateObject, lir->mir()->initialHeap(),
13661 &fail);
13663 masm.jump(&call);
13665 masm.bind(&fail);
13666 masm.movePtr(ImmPtr(nullptr), temp0);
13668 masm.bind(&call);
13670 pushArg(temp0);
13671 pushArg(end);
13672 pushArg(begin);
13673 pushArg(object);
13675 using Fn =
13676 JSObject* (*)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
13677 callVM<Fn, ArraySliceDense>(lir);
13680 void CodeGenerator::visitArgumentsSlice(LArgumentsSlice* lir) {
13681 Register object = ToRegister(lir->object());
13682 Register begin = ToRegister(lir->begin());
13683 Register end = ToRegister(lir->end());
13684 Register temp0 = ToRegister(lir->temp0());
13685 Register temp1 = ToRegister(lir->temp1());
13687 Label call, fail;
13689 // Try to allocate an object.
13690 TemplateObject templateObject(lir->mir()->templateObj());
13691 masm.createGCObject(temp0, temp1, templateObject, lir->mir()->initialHeap(),
13692 &fail);
13694 masm.jump(&call);
13696 masm.bind(&fail);
13697 masm.movePtr(ImmPtr(nullptr), temp0);
13699 masm.bind(&call);
13701 pushArg(temp0);
13702 pushArg(end);
13703 pushArg(begin);
13704 pushArg(object);
13706 using Fn =
13707 JSObject* (*)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
13708 callVM<Fn, ArgumentsSliceDense>(lir);
13711 #ifdef DEBUG
13712 void CodeGenerator::emitAssertArgumentsSliceBounds(const RegisterOrInt32& begin,
13713 const RegisterOrInt32& count,
13714 Register numActualArgs) {
13715 // |begin| must be positive or zero.
13716 if (begin.is<Register>()) {
13717 Label beginOk;
13718 masm.branch32(Assembler::GreaterThanOrEqual, begin.as<Register>(), Imm32(0),
13719 &beginOk);
13720 masm.assumeUnreachable("begin < 0");
13721 masm.bind(&beginOk);
13722 } else {
13723 MOZ_ASSERT(begin.as<int32_t>() >= 0);
13726 // |count| must be positive or zero.
13727 if (count.is<Register>()) {
13728 Label countOk;
13729 masm.branch32(Assembler::GreaterThanOrEqual, count.as<Register>(), Imm32(0),
13730 &countOk);
13731 masm.assumeUnreachable("count < 0");
13732 masm.bind(&countOk);
13733 } else {
13734 MOZ_ASSERT(count.as<int32_t>() >= 0);
13737 // |begin| must be less-or-equal to |numActualArgs|.
13738 Label argsBeginOk;
13739 if (begin.is<Register>()) {
13740 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, begin.as<Register>(),
13741 &argsBeginOk);
13742 } else {
13743 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
13744 Imm32(begin.as<int32_t>()), &argsBeginOk);
13746 masm.assumeUnreachable("begin <= numActualArgs");
13747 masm.bind(&argsBeginOk);
13749 // |count| must be less-or-equal to |numActualArgs|.
13750 Label argsCountOk;
13751 if (count.is<Register>()) {
13752 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, count.as<Register>(),
13753 &argsCountOk);
13754 } else {
13755 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
13756 Imm32(count.as<int32_t>()), &argsCountOk);
13758 masm.assumeUnreachable("count <= numActualArgs");
13759 masm.bind(&argsCountOk);
13761 // |begin| and |count| must be preserved, but |numActualArgs| can be changed.
13763 // Pre-condition: |count| <= |numActualArgs|
13764 // Condition to test: |begin + count| <= |numActualArgs|
13765 // Transform to: |begin| <= |numActualArgs - count|
13766 if (count.is<Register>()) {
13767 masm.subPtr(count.as<Register>(), numActualArgs);
13768 } else {
13769 masm.subPtr(Imm32(count.as<int32_t>()), numActualArgs);
13772 // |begin + count| must be less-or-equal to |numActualArgs|.
13773 Label argsBeginCountOk;
13774 if (begin.is<Register>()) {
13775 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, begin.as<Register>(),
13776 &argsBeginCountOk);
13777 } else {
13778 masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
13779 Imm32(begin.as<int32_t>()), &argsBeginCountOk);
13781 masm.assumeUnreachable("begin + count <= numActualArgs");
13782 masm.bind(&argsBeginCountOk);
13784 #endif
13786 template <class ArgumentsSlice>
13787 void CodeGenerator::emitNewArray(ArgumentsSlice* lir,
13788 const RegisterOrInt32& count, Register output,
13789 Register temp) {
13790 using Fn = ArrayObject* (*)(JSContext*, int32_t);
13791 auto* ool = count.match(
13792 [&](Register count) {
13793 return oolCallVM<Fn, NewArrayObjectEnsureDenseInitLength>(
13794 lir, ArgList(count), StoreRegisterTo(output));
13796 [&](int32_t count) {
13797 return oolCallVM<Fn, NewArrayObjectEnsureDenseInitLength>(
13798 lir, ArgList(Imm32(count)), StoreRegisterTo(output));
13801 TemplateObject templateObject(lir->mir()->templateObj());
13802 MOZ_ASSERT(templateObject.isArrayObject());
13804 auto templateNativeObj = templateObject.asTemplateNativeObject();
13805 MOZ_ASSERT(templateNativeObj.getArrayLength() == 0);
13806 MOZ_ASSERT(templateNativeObj.getDenseInitializedLength() == 0);
13807 MOZ_ASSERT(!templateNativeObj.hasDynamicElements());
13809 // Check array capacity. Call into the VM if the template object's capacity
13810 // is too small.
13811 bool tryAllocate = count.match(
13812 [&](Register count) {
13813 masm.branch32(Assembler::Above, count,
13814 Imm32(templateNativeObj.getDenseCapacity()),
13815 ool->entry());
13816 return true;
13818 [&](int32_t count) {
13819 MOZ_ASSERT(count >= 0);
13820 if (uint32_t(count) > templateNativeObj.getDenseCapacity()) {
13821 masm.jump(ool->entry());
13822 return false;
13824 return true;
13827 if (tryAllocate) {
13828 // Try to allocate an object.
13829 masm.createGCObject(output, temp, templateObject, lir->mir()->initialHeap(),
13830 ool->entry());
13832 auto setInitializedLengthAndLength = [&](auto count) {
13833 const int elementsOffset = NativeObject::offsetOfFixedElements();
13835 // Update initialized length.
13836 Address initLength(
13837 output, elementsOffset + ObjectElements::offsetOfInitializedLength());
13838 masm.store32(count, initLength);
13840 // Update length.
13841 Address length(output, elementsOffset + ObjectElements::offsetOfLength());
13842 masm.store32(count, length);
13845 // The array object was successfully created. Set the length and initialized
13846 // length and then proceed to fill the elements.
13847 count.match([&](Register count) { setInitializedLengthAndLength(count); },
13848 [&](int32_t count) {
13849 if (count > 0) {
13850 setInitializedLengthAndLength(Imm32(count));
13855 masm.bind(ool->rejoin());
13858 void CodeGenerator::visitFrameArgumentsSlice(LFrameArgumentsSlice* lir) {
13859 Register begin = ToRegister(lir->begin());
13860 Register count = ToRegister(lir->count());
13861 Register temp = ToRegister(lir->temp0());
13862 Register output = ToRegister(lir->output());
13864 #ifdef DEBUG
13865 masm.loadNumActualArgs(FramePointer, temp);
13866 emitAssertArgumentsSliceBounds(RegisterOrInt32(begin), RegisterOrInt32(count),
13867 temp);
13868 #endif
13870 emitNewArray(lir, RegisterOrInt32(count), output, temp);
13872 Label done;
13873 masm.branch32(Assembler::Equal, count, Imm32(0), &done);
13875 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
13876 allRegs.take(begin);
13877 allRegs.take(count);
13878 allRegs.take(temp);
13879 allRegs.take(output);
13881 ValueOperand value = allRegs.takeAnyValue();
13883 LiveRegisterSet liveRegs;
13884 liveRegs.add(output);
13885 liveRegs.add(begin);
13886 liveRegs.add(value);
13888 masm.PushRegsInMask(liveRegs);
13890 // Initialize all elements.
13892 Register elements = output;
13893 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
13895 Register argIndex = begin;
13897 Register index = temp;
13898 masm.move32(Imm32(0), index);
13900 size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
13901 BaseValueIndex argPtr(FramePointer, argIndex, argvOffset);
13903 Label loop;
13904 masm.bind(&loop);
13906 masm.loadValue(argPtr, value);
13908 // We don't need a pre-barrier, because the element at |index| is guaranteed
13909 // to be a non-GC thing (either uninitialized memory or the magic hole
13910 // value).
13911 masm.storeValue(value, BaseObjectElementIndex(elements, index));
13913 masm.add32(Imm32(1), index);
13914 masm.add32(Imm32(1), argIndex);
13916 masm.branch32(Assembler::LessThan, index, count, &loop);
13918 masm.PopRegsInMask(liveRegs);
13920 // Emit a post-write barrier if |output| is tenured.
13922 // We expect that |output| is nursery allocated, so it isn't worth the
13923 // trouble to check if no frame argument is a nursery thing, which would
13924 // allow to omit the post-write barrier.
13925 masm.branchPtrInNurseryChunk(Assembler::Equal, output, temp, &done);
13927 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
13928 volatileRegs.takeUnchecked(temp);
13929 if (output.volatile_()) {
13930 volatileRegs.addUnchecked(output);
13933 masm.PushRegsInMask(volatileRegs);
13934 emitPostWriteBarrier(output);
13935 masm.PopRegsInMask(volatileRegs);
13937 masm.bind(&done);
13940 CodeGenerator::RegisterOrInt32 CodeGenerator::ToRegisterOrInt32(
13941 const LAllocation* allocation) {
13942 if (allocation->isConstant()) {
13943 return RegisterOrInt32(allocation->toConstant()->toInt32());
13945 return RegisterOrInt32(ToRegister(allocation));
13948 void CodeGenerator::visitInlineArgumentsSlice(LInlineArgumentsSlice* lir) {
13949 RegisterOrInt32 begin = ToRegisterOrInt32(lir->begin());
13950 RegisterOrInt32 count = ToRegisterOrInt32(lir->count());
13951 Register temp = ToRegister(lir->temp());
13952 Register output = ToRegister(lir->output());
13954 uint32_t numActuals = lir->mir()->numActuals();
13956 #ifdef DEBUG
13957 masm.move32(Imm32(numActuals), temp);
13959 emitAssertArgumentsSliceBounds(begin, count, temp);
13960 #endif
13962 emitNewArray(lir, count, output, temp);
13964 // We're done if there are no actual arguments.
13965 if (numActuals == 0) {
13966 return;
13969 // Check if any arguments have to be copied.
13970 Label done;
13971 if (count.is<Register>()) {
13972 masm.branch32(Assembler::Equal, count.as<Register>(), Imm32(0), &done);
13973 } else if (count.as<int32_t>() == 0) {
13974 return;
13977 auto getArg = [&](uint32_t i) {
13978 return toConstantOrRegister(lir, LInlineArgumentsSlice::ArgIndex(i),
13979 lir->mir()->getArg(i)->type());
13982 auto storeArg = [&](uint32_t i, auto dest) {
13983 // We don't need a pre-barrier because the element at |index| is guaranteed
13984 // to be a non-GC thing (either uninitialized memory or the magic hole
13985 // value).
13986 masm.storeConstantOrRegister(getArg(i), dest);
13989 // Initialize all elements.
13990 if (numActuals == 1) {
13991 // There's exactly one argument. We've checked that |count| is non-zero,
13992 // which implies that |begin| must be zero.
13993 MOZ_ASSERT_IF(begin.is<int32_t>(), begin.as<int32_t>() == 0);
13995 Register elements = temp;
13996 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
13998 storeArg(0, Address(elements, 0));
13999 } else if (begin.is<Register>()) {
14000 // There is more than one argument and |begin| isn't a compile-time
14001 // constant. Iterate through 0..numActuals to search for |begin| and then
14002 // start copying |count| arguments from that index.
14004 LiveGeneralRegisterSet liveRegs;
14005 liveRegs.add(output);
14006 liveRegs.add(begin.as<Register>());
14008 masm.PushRegsInMask(liveRegs);
14010 Register elements = output;
14011 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
14013 Register argIndex = begin.as<Register>();
14015 Register index = temp;
14016 masm.move32(Imm32(0), index);
14018 Label doneLoop;
14019 for (uint32_t i = 0; i < numActuals; ++i) {
14020 Label next;
14021 masm.branch32(Assembler::NotEqual, argIndex, Imm32(i), &next);
14023 storeArg(i, BaseObjectElementIndex(elements, index));
14025 masm.add32(Imm32(1), index);
14026 masm.add32(Imm32(1), argIndex);
14028 if (count.is<Register>()) {
14029 masm.branch32(Assembler::GreaterThanOrEqual, index,
14030 count.as<Register>(), &doneLoop);
14031 } else {
14032 masm.branch32(Assembler::GreaterThanOrEqual, index,
14033 Imm32(count.as<int32_t>()), &doneLoop);
14036 masm.bind(&next);
14038 masm.bind(&doneLoop);
14040 masm.PopRegsInMask(liveRegs);
14041 } else {
14042 // There is more than one argument and |begin| is a compile-time constant.
14044 Register elements = temp;
14045 masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
14047 int32_t argIndex = begin.as<int32_t>();
14049 int32_t index = 0;
14051 Label doneLoop;
14052 for (uint32_t i = argIndex; i < numActuals; ++i) {
14053 storeArg(i, Address(elements, index * sizeof(Value)));
14055 index += 1;
14057 if (count.is<Register>()) {
14058 masm.branch32(Assembler::LessThanOrEqual, count.as<Register>(),
14059 Imm32(index), &doneLoop);
14060 } else {
14061 if (index >= count.as<int32_t>()) {
14062 break;
14066 masm.bind(&doneLoop);
14069 // Determine if we have to emit post-write barrier.
14071 // If either |begin| or |count| is a constant, use their value directly.
14072 // Otherwise assume we copy all inline arguments from 0..numActuals.
14073 bool postWriteBarrier = false;
14074 uint32_t actualBegin = begin.match([](Register) { return 0; },
14075 [](int32_t value) { return value; });
14076 uint32_t actualCount =
14077 count.match([=](Register) { return numActuals; },
14078 [](int32_t value) -> uint32_t { return value; });
14079 for (uint32_t i = 0; i < actualCount; ++i) {
14080 ConstantOrRegister arg = getArg(actualBegin + i);
14081 if (arg.constant()) {
14082 Value v = arg.value();
14083 if (v.isGCThing() && IsInsideNursery(v.toGCThing())) {
14084 postWriteBarrier = true;
14086 } else {
14087 MIRType type = arg.reg().type();
14088 if (type == MIRType::Value || NeedsPostBarrier(type)) {
14089 postWriteBarrier = true;
14094 // Emit a post-write barrier if |output| is tenured and we couldn't
14095 // determine at compile-time that no barrier is needed.
14096 if (postWriteBarrier) {
14097 masm.branchPtrInNurseryChunk(Assembler::Equal, output, temp, &done);
14099 LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
14100 volatileRegs.takeUnchecked(temp);
14101 if (output.volatile_()) {
14102 volatileRegs.addUnchecked(output);
14105 masm.PushRegsInMask(volatileRegs);
14106 emitPostWriteBarrier(output);
14107 masm.PopRegsInMask(volatileRegs);
14110 masm.bind(&done);
14113 void CodeGenerator::visitNormalizeSliceTerm(LNormalizeSliceTerm* lir) {
14114 Register value = ToRegister(lir->value());
14115 Register length = ToRegister(lir->length());
14116 Register output = ToRegister(lir->output());
14118 masm.move32(value, output);
14120 Label positive;
14121 masm.branch32(Assembler::GreaterThanOrEqual, value, Imm32(0), &positive);
14123 Label done;
14124 masm.add32(length, output);
14125 masm.branch32(Assembler::GreaterThanOrEqual, output, Imm32(0), &done);
14126 masm.move32(Imm32(0), output);
14127 masm.jump(&done);
14129 masm.bind(&positive);
14130 masm.cmp32Move32(Assembler::LessThan, length, value, length, output);
14132 masm.bind(&done);
14135 void CodeGenerator::visitArrayJoin(LArrayJoin* lir) {
14136 Label skipCall;
14138 Register output = ToRegister(lir->output());
14139 Register sep = ToRegister(lir->separator());
14140 Register array = ToRegister(lir->array());
14141 Register temp = ToRegister(lir->temp0());
14143 // Fast path for simple length <= 1 cases.
14145 masm.loadPtr(Address(array, NativeObject::offsetOfElements()), temp);
14146 Address length(temp, ObjectElements::offsetOfLength());
14147 Address initLength(temp, ObjectElements::offsetOfInitializedLength());
14149 // Check for length == 0
14150 Label notEmpty;
14151 masm.branch32(Assembler::NotEqual, length, Imm32(0), &notEmpty);
14152 const JSAtomState& names = gen->runtime->names();
14153 masm.movePtr(ImmGCPtr(names.empty_), output);
14154 masm.jump(&skipCall);
14156 masm.bind(&notEmpty);
14157 Label notSingleString;
14158 // Check for length == 1, initializedLength >= 1, arr[0].isString()
14159 masm.branch32(Assembler::NotEqual, length, Imm32(1), &notSingleString);
14160 masm.branch32(Assembler::LessThan, initLength, Imm32(1), &notSingleString);
14162 Address elem0(temp, 0);
14163 masm.branchTestString(Assembler::NotEqual, elem0, &notSingleString);
14165 // At this point, 'output' can be used as a scratch register, since we're
14166 // guaranteed to succeed.
14167 masm.unboxString(elem0, output);
14168 masm.jump(&skipCall);
14169 masm.bind(&notSingleString);
14172 pushArg(sep);
14173 pushArg(array);
14175 using Fn = JSString* (*)(JSContext*, HandleObject, HandleString);
14176 callVM<Fn, jit::ArrayJoin>(lir);
14177 masm.bind(&skipCall);
14180 void CodeGenerator::visitObjectKeys(LObjectKeys* lir) {
14181 Register object = ToRegister(lir->object());
14183 pushArg(object);
14185 using Fn = JSObject* (*)(JSContext*, HandleObject);
14186 callVM<Fn, jit::ObjectKeys>(lir);
14189 void CodeGenerator::visitObjectKeysLength(LObjectKeysLength* lir) {
14190 Register object = ToRegister(lir->object());
14192 pushArg(object);
14194 using Fn = bool (*)(JSContext*, HandleObject, int32_t*);
14195 callVM<Fn, jit::ObjectKeysLength>(lir);
14198 void CodeGenerator::visitGetIteratorCache(LGetIteratorCache* lir) {
14199 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
14200 TypedOrValueRegister val =
14201 toConstantOrRegister(lir, LGetIteratorCache::ValueIndex,
14202 lir->mir()->value()->type())
14203 .reg();
14204 Register output = ToRegister(lir->output());
14205 Register temp0 = ToRegister(lir->temp0());
14206 Register temp1 = ToRegister(lir->temp1());
14208 IonGetIteratorIC ic(liveRegs, val, output, temp0, temp1);
14209 addIC(lir, allocateIC(ic));
14212 void CodeGenerator::visitOptimizeSpreadCallCache(
14213 LOptimizeSpreadCallCache* lir) {
14214 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
14215 ValueOperand val = ToValue(lir, LOptimizeSpreadCallCache::ValueIndex);
14216 ValueOperand output = ToOutValue(lir);
14217 Register temp = ToRegister(lir->temp0());
14219 IonOptimizeSpreadCallIC ic(liveRegs, val, output, temp);
14220 addIC(lir, allocateIC(ic));
14223 void CodeGenerator::visitCloseIterCache(LCloseIterCache* lir) {
14224 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
14225 Register iter = ToRegister(lir->iter());
14226 Register temp = ToRegister(lir->temp0());
14227 CompletionKind kind = CompletionKind(lir->mir()->completionKind());
14229 IonCloseIterIC ic(liveRegs, iter, temp, kind);
14230 addIC(lir, allocateIC(ic));
14233 void CodeGenerator::visitOptimizeGetIteratorCache(
14234 LOptimizeGetIteratorCache* lir) {
14235 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
14236 ValueOperand val = ToValue(lir, LOptimizeGetIteratorCache::ValueIndex);
14237 Register output = ToRegister(lir->output());
14238 Register temp = ToRegister(lir->temp0());
14240 IonOptimizeGetIteratorIC ic(liveRegs, val, output, temp);
14241 addIC(lir, allocateIC(ic));
14244 void CodeGenerator::visitIteratorMore(LIteratorMore* lir) {
14245 const Register obj = ToRegister(lir->iterator());
14246 const ValueOperand output = ToOutValue(lir);
14247 const Register temp = ToRegister(lir->temp0());
14249 masm.iteratorMore(obj, output, temp);
14252 void CodeGenerator::visitIsNoIterAndBranch(LIsNoIterAndBranch* lir) {
14253 ValueOperand input = ToValue(lir, LIsNoIterAndBranch::Input);
14254 Label* ifTrue = getJumpLabelForBranch(lir->ifTrue());
14255 Label* ifFalse = getJumpLabelForBranch(lir->ifFalse());
14257 masm.branchTestMagic(Assembler::Equal, input, ifTrue);
14259 if (!isNextBlock(lir->ifFalse()->lir())) {
14260 masm.jump(ifFalse);
14264 void CodeGenerator::visitIteratorEnd(LIteratorEnd* lir) {
14265 const Register obj = ToRegister(lir->object());
14266 const Register temp0 = ToRegister(lir->temp0());
14267 const Register temp1 = ToRegister(lir->temp1());
14268 const Register temp2 = ToRegister(lir->temp2());
14270 masm.iteratorClose(obj, temp0, temp1, temp2);
14273 void CodeGenerator::visitArgumentsLength(LArgumentsLength* lir) {
14274 // read number of actual arguments from the JS frame.
14275 Register argc = ToRegister(lir->output());
14276 masm.loadNumActualArgs(FramePointer, argc);
14279 void CodeGenerator::visitGetFrameArgument(LGetFrameArgument* lir) {
14280 ValueOperand result = ToOutValue(lir);
14281 const LAllocation* index = lir->index();
14282 size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
14284 // This instruction is used to access actual arguments and formal arguments.
14285 // The number of Values on the stack is |max(numFormals, numActuals)|, so we
14286 // assert |index < numFormals || index < numActuals| in debug builds.
14287 DebugOnly<size_t> numFormals = gen->outerInfo().script()->function()->nargs();
14289 if (index->isConstant()) {
14290 int32_t i = index->toConstant()->toInt32();
14291 #ifdef DEBUG
14292 if (uint32_t(i) >= numFormals) {
14293 Label ok;
14294 Register argc = result.scratchReg();
14295 masm.loadNumActualArgs(FramePointer, argc);
14296 masm.branch32(Assembler::Above, argc, Imm32(i), &ok);
14297 masm.assumeUnreachable("Invalid argument index");
14298 masm.bind(&ok);
14300 #endif
14301 Address argPtr(FramePointer, sizeof(Value) * i + argvOffset);
14302 masm.loadValue(argPtr, result);
14303 } else {
14304 Register i = ToRegister(index);
14305 #ifdef DEBUG
14306 Label ok;
14307 Register argc = result.scratchReg();
14308 masm.branch32(Assembler::Below, i, Imm32(numFormals), &ok);
14309 masm.loadNumActualArgs(FramePointer, argc);
14310 masm.branch32(Assembler::Above, argc, i, &ok);
14311 masm.assumeUnreachable("Invalid argument index");
14312 masm.bind(&ok);
14313 #endif
14314 BaseValueIndex argPtr(FramePointer, i, argvOffset);
14315 masm.loadValue(argPtr, result);
14319 void CodeGenerator::visitGetFrameArgumentHole(LGetFrameArgumentHole* lir) {
14320 ValueOperand result = ToOutValue(lir);
14321 Register index = ToRegister(lir->index());
14322 Register length = ToRegister(lir->length());
14323 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp0());
14324 size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
14326 Label outOfBounds, done;
14327 masm.spectreBoundsCheck32(index, length, spectreTemp, &outOfBounds);
14329 BaseValueIndex argPtr(FramePointer, index, argvOffset);
14330 masm.loadValue(argPtr, result);
14331 masm.jump(&done);
14333 masm.bind(&outOfBounds);
14334 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
14335 masm.moveValue(UndefinedValue(), result);
14337 masm.bind(&done);
14340 void CodeGenerator::visitRest(LRest* lir) {
14341 Register numActuals = ToRegister(lir->numActuals());
14342 Register temp0 = ToRegister(lir->temp0());
14343 Register temp1 = ToRegister(lir->temp1());
14344 Register temp2 = ToRegister(lir->temp2());
14345 unsigned numFormals = lir->mir()->numFormals();
14347 if (Shape* shape = lir->mir()->shape()) {
14348 uint32_t arrayLength = 0;
14349 uint32_t arrayCapacity = 2;
14350 gc::AllocKind allocKind = GuessArrayGCKind(arrayCapacity);
14351 MOZ_ASSERT(CanChangeToBackgroundAllocKind(allocKind, &ArrayObject::class_));
14352 allocKind = ForegroundToBackgroundAllocKind(allocKind);
14353 MOZ_ASSERT(GetGCKindSlots(allocKind) ==
14354 arrayCapacity + ObjectElements::VALUES_PER_HEADER);
14356 Label joinAlloc, failAlloc;
14357 masm.movePtr(ImmGCPtr(shape), temp0);
14358 masm.createArrayWithFixedElements(temp2, temp0, temp1, InvalidReg,
14359 arrayLength, arrayCapacity, 0, 0,
14360 allocKind, gc::Heap::Default, &failAlloc);
14361 masm.jump(&joinAlloc);
14363 masm.bind(&failAlloc);
14364 masm.movePtr(ImmPtr(nullptr), temp2);
14366 masm.bind(&joinAlloc);
14367 } else {
14368 masm.movePtr(ImmPtr(nullptr), temp2);
14371 // Set temp1 to the address of the first actual argument.
14372 size_t actualsOffset = JitFrameLayout::offsetOfActualArgs();
14373 masm.computeEffectiveAddress(Address(FramePointer, actualsOffset), temp1);
14375 // Compute array length: max(numActuals - numFormals, 0).
14376 Register lengthReg;
14377 if (numFormals) {
14378 lengthReg = temp0;
14379 Label emptyLength, joinLength;
14380 masm.branch32(Assembler::LessThanOrEqual, numActuals, Imm32(numFormals),
14381 &emptyLength);
14383 masm.move32(numActuals, lengthReg);
14384 masm.sub32(Imm32(numFormals), lengthReg);
14386 // Skip formal arguments.
14387 masm.addPtr(Imm32(sizeof(Value) * numFormals), temp1);
14389 masm.jump(&joinLength);
14391 masm.bind(&emptyLength);
14393 masm.move32(Imm32(0), lengthReg);
14395 // Leave temp1 pointed to the start of actuals() when the rest-array
14396 // length is zero. We don't use |actuals() + numFormals| because
14397 // |numFormals| can be any non-negative int32 value when this MRest was
14398 // created from scalar replacement optimizations. And it seems
14399 // questionable to compute a Value* pointer which points to who knows
14400 // where.
14402 masm.bind(&joinLength);
14403 } else {
14404 // Use numActuals directly when there are no formals.
14405 lengthReg = numActuals;
14408 pushArg(temp2);
14409 pushArg(temp1);
14410 pushArg(lengthReg);
14412 using Fn = JSObject* (*)(JSContext*, uint32_t, Value*, HandleObject);
14413 callVM<Fn, InitRestParameter>(lir);
14416 // Create a stackmap from the given safepoint, with the structure:
14418 // <reg dump, if any>
14419 // | ++ <body (general spill)>
14420 // | | ++ <space for Frame>
14421 // | | ++ <inbound args>
14422 // | | |
14423 // Lowest Addr Highest Addr
14424 // |
14425 // framePushedAtStackMapBase
14427 // The caller owns the resulting stackmap. This assumes a grow-down stack.
14429 // For non-debug builds, if the stackmap would contain no pointers, no
14430 // stackmap is created, and nullptr is returned. For a debug build, a
14431 // stackmap is always created and returned.
14433 // Depending on the type of safepoint, the stackmap may need to account for
14434 // spilled registers. WasmSafepointKind::LirCall corresponds to LIR nodes where
14435 // isCall() == true, for which the register allocator will spill/restore all
14436 // live registers at the LIR level - in this case, the LSafepoint sees only live
14437 // values on the stack, never in registers. WasmSafepointKind::CodegenCall, on
14438 // the other hand, is for LIR nodes which may manually spill/restore live
14439 // registers in codegen, in which case the stackmap must account for this. Traps
14440 // also require tracking of live registers, but spilling is handled by the trap
14441 // mechanism.
14442 static bool CreateStackMapFromLSafepoint(LSafepoint& safepoint,
14443 const RegisterOffsets& trapExitLayout,
14444 size_t trapExitLayoutNumWords,
14445 size_t nInboundStackArgBytes,
14446 wasm::StackMap** result) {
14447 // Ensure this is defined on all return paths.
14448 *result = nullptr;
14450 // The size of the wasm::Frame itself.
14451 const size_t nFrameBytes = sizeof(wasm::Frame);
14453 // This is the number of bytes spilled for live registers, outside of a trap.
14454 // For traps, trapExitLayout and trapExitLayoutNumWords will be used.
14455 const size_t nRegisterDumpBytes =
14456 MacroAssembler::PushRegsInMaskSizeInBytes(safepoint.liveRegs());
14458 // As mentioned above, for WasmSafepointKind::LirCall, register spills and
14459 // restores are handled at the LIR level and there should therefore be no live
14460 // registers to handle here.
14461 MOZ_ASSERT_IF(safepoint.wasmSafepointKind() == WasmSafepointKind::LirCall,
14462 nRegisterDumpBytes == 0);
14463 MOZ_ASSERT(nRegisterDumpBytes % sizeof(void*) == 0);
14465 // This is the number of bytes in the general spill area, below the Frame.
14466 const size_t nBodyBytes = safepoint.framePushedAtStackMapBase();
14468 // This is the number of bytes in the general spill area, the Frame, and the
14469 // incoming args, but not including any register dump area.
14470 const size_t nNonRegisterBytes =
14471 nBodyBytes + nFrameBytes + nInboundStackArgBytes;
14472 MOZ_ASSERT(nNonRegisterBytes % sizeof(void*) == 0);
14474 // This is the number of bytes in the register dump area, if any, below the
14475 // general spill area.
14476 const size_t nRegisterBytes =
14477 (safepoint.wasmSafepointKind() == WasmSafepointKind::Trap)
14478 ? (trapExitLayoutNumWords * sizeof(void*))
14479 : nRegisterDumpBytes;
14481 // This is the total number of bytes covered by the map.
14482 const DebugOnly<size_t> nTotalBytes = nNonRegisterBytes + nRegisterBytes;
14484 // Create the stackmap initially in this vector. Since most frames will
14485 // contain 128 or fewer words, heap allocation is avoided in the majority of
14486 // cases. vec[0] is for the lowest address in the map, vec[N-1] is for the
14487 // highest address in the map.
14488 wasm::StackMapBoolVector vec;
14490 // Keep track of whether we've actually seen any refs.
14491 bool hasRefs = false;
14493 // REG DUMP AREA, if any.
14494 const LiveGeneralRegisterSet wasmAnyRefRegs = safepoint.wasmAnyRefRegs();
14495 GeneralRegisterForwardIterator wasmAnyRefRegsIter(wasmAnyRefRegs);
14496 switch (safepoint.wasmSafepointKind()) {
14497 case WasmSafepointKind::LirCall:
14498 case WasmSafepointKind::CodegenCall: {
14499 size_t spilledNumWords = nRegisterDumpBytes / sizeof(void*);
14500 if (!vec.appendN(false, spilledNumWords)) {
14501 return false;
14504 for (; wasmAnyRefRegsIter.more(); ++wasmAnyRefRegsIter) {
14505 Register reg = *wasmAnyRefRegsIter;
14506 size_t offsetFromSpillBase =
14507 safepoint.liveRegs().gprs().offsetOfPushedRegister(reg) /
14508 sizeof(void*);
14509 MOZ_ASSERT(0 < offsetFromSpillBase &&
14510 offsetFromSpillBase <= spilledNumWords);
14511 size_t offsetInVector = spilledNumWords - offsetFromSpillBase;
14513 vec[offsetInVector] = true;
14514 hasRefs = true;
14517 // Float and vector registers do not have to be handled; they cannot
14518 // contain wasm anyrefs, and they are spilled after general-purpose
14519 // registers. Gprs are therefore closest to the spill base and thus their
14520 // offset calculation does not need to account for other spills.
14521 } break;
14522 case WasmSafepointKind::Trap: {
14523 if (!vec.appendN(false, trapExitLayoutNumWords)) {
14524 return false;
14526 for (; wasmAnyRefRegsIter.more(); ++wasmAnyRefRegsIter) {
14527 Register reg = *wasmAnyRefRegsIter;
14528 size_t offsetFromTop = trapExitLayout.getOffset(reg);
14530 // If this doesn't hold, the associated register wasn't saved by
14531 // the trap exit stub. Better to crash now than much later, in
14532 // some obscure place, and possibly with security consequences.
14533 MOZ_RELEASE_ASSERT(offsetFromTop < trapExitLayoutNumWords);
14535 // offsetFromTop is an offset in words down from the highest
14536 // address in the exit stub save area. Switch it around to be an
14537 // offset up from the bottom of the (integer register) save area.
14538 size_t offsetFromBottom = trapExitLayoutNumWords - 1 - offsetFromTop;
14540 vec[offsetFromBottom] = true;
14541 hasRefs = true;
14543 } break;
14544 default:
14545 MOZ_CRASH("unreachable");
14548 // BODY (GENERAL SPILL) AREA and FRAME and INCOMING ARGS
14549 // Deal with roots on the stack.
14550 size_t wordsSoFar = vec.length();
14551 if (!vec.appendN(false, nNonRegisterBytes / sizeof(void*))) {
14552 return false;
14554 const LSafepoint::SlotList& wasmAnyRefSlots = safepoint.wasmAnyRefSlots();
14555 for (SafepointSlotEntry wasmAnyRefSlot : wasmAnyRefSlots) {
14556 // The following needs to correspond with JitFrameLayout::slotRef
14557 // wasmAnyRefSlot.stack == 0 means the slot is in the args area
14558 if (wasmAnyRefSlot.stack) {
14559 // It's a slot in the body allocation, so .slot is interpreted
14560 // as an index downwards from the Frame*
14561 MOZ_ASSERT(wasmAnyRefSlot.slot <= nBodyBytes);
14562 uint32_t offsetInBytes = nBodyBytes - wasmAnyRefSlot.slot;
14563 MOZ_ASSERT(offsetInBytes % sizeof(void*) == 0);
14564 vec[wordsSoFar + offsetInBytes / sizeof(void*)] = true;
14565 } else {
14566 // It's an argument slot
14567 MOZ_ASSERT(wasmAnyRefSlot.slot < nInboundStackArgBytes);
14568 uint32_t offsetInBytes = nBodyBytes + nFrameBytes + wasmAnyRefSlot.slot;
14569 MOZ_ASSERT(offsetInBytes % sizeof(void*) == 0);
14570 vec[wordsSoFar + offsetInBytes / sizeof(void*)] = true;
14572 hasRefs = true;
14575 #ifndef DEBUG
14576 // We saw no references, and this is a non-debug build, so don't bother
14577 // building the stackmap.
14578 if (!hasRefs) {
14579 return true;
14581 #endif
14583 // Convert vec into a wasm::StackMap.
14584 MOZ_ASSERT(vec.length() * sizeof(void*) == nTotalBytes);
14585 wasm::StackMap* stackMap =
14586 wasm::ConvertStackMapBoolVectorToStackMap(vec, hasRefs);
14587 if (!stackMap) {
14588 return false;
14590 if (safepoint.wasmSafepointKind() == WasmSafepointKind::Trap) {
14591 stackMap->setExitStubWords(trapExitLayoutNumWords);
14594 // Record in the map, how far down from the highest address the Frame* is.
14595 // Take the opportunity to check that we haven't marked any part of the
14596 // Frame itself as a pointer.
14597 stackMap->setFrameOffsetFromTop((nInboundStackArgBytes + nFrameBytes) /
14598 sizeof(void*));
14599 #ifdef DEBUG
14600 for (uint32_t i = 0; i < nFrameBytes / sizeof(void*); i++) {
14601 MOZ_ASSERT(stackMap->getBit(stackMap->header.numMappedWords -
14602 stackMap->header.frameOffsetFromTop + i) == 0);
14604 #endif
14606 *result = stackMap;
14607 return true;
14610 bool CodeGenerator::generateWasm(
14611 wasm::CallIndirectId callIndirectId, wasm::BytecodeOffset trapOffset,
14612 const wasm::ArgTypeVector& argTypes, const RegisterOffsets& trapExitLayout,
14613 size_t trapExitLayoutNumWords, wasm::FuncOffsets* offsets,
14614 wasm::StackMaps* stackMaps, wasm::Decoder* decoder) {
14615 AutoCreatedBy acb(masm, "CodeGenerator::generateWasm");
14617 JitSpew(JitSpew_Codegen, "# Emitting wasm code");
14619 size_t nInboundStackArgBytes = StackArgAreaSizeUnaligned(argTypes);
14620 inboundStackArgBytes_ = nInboundStackArgBytes;
14622 wasm::GenerateFunctionPrologue(masm, callIndirectId, mozilla::Nothing(),
14623 offsets);
14625 MOZ_ASSERT(masm.framePushed() == 0);
14627 // Very large frames are implausible, probably an attack.
14628 if (frameSize() > wasm::MaxFrameSize) {
14629 return decoder->fail(decoder->beginOffset(), "stack frame is too large");
14632 if (omitOverRecursedCheck()) {
14633 masm.reserveStack(frameSize());
14634 } else {
14635 std::pair<CodeOffset, uint32_t> pair =
14636 masm.wasmReserveStackChecked(frameSize(), trapOffset);
14637 CodeOffset trapInsnOffset = pair.first;
14638 size_t nBytesReservedBeforeTrap = pair.second;
14640 wasm::StackMap* functionEntryStackMap = nullptr;
14641 if (!CreateStackMapForFunctionEntryTrap(
14642 argTypes, trapExitLayout, trapExitLayoutNumWords,
14643 nBytesReservedBeforeTrap, nInboundStackArgBytes,
14644 &functionEntryStackMap)) {
14645 return false;
14648 // In debug builds, we'll always have a stack map, even if there are no
14649 // refs to track.
14650 MOZ_ASSERT(functionEntryStackMap);
14652 if (functionEntryStackMap &&
14653 !stackMaps->add((uint8_t*)(uintptr_t)trapInsnOffset.offset(),
14654 functionEntryStackMap)) {
14655 functionEntryStackMap->destroy();
14656 return false;
14660 MOZ_ASSERT(masm.framePushed() == frameSize());
14662 if (!generateBody()) {
14663 return false;
14666 masm.bind(&returnLabel_);
14667 wasm::GenerateFunctionEpilogue(masm, frameSize(), offsets);
14669 if (!generateOutOfLineCode()) {
14670 return false;
14673 masm.flush();
14674 if (masm.oom()) {
14675 return false;
14678 offsets->end = masm.currentOffset();
14680 MOZ_ASSERT(!masm.failureLabel()->used());
14681 MOZ_ASSERT(snapshots_.listSize() == 0);
14682 MOZ_ASSERT(snapshots_.RVATableSize() == 0);
14683 MOZ_ASSERT(recovers_.size() == 0);
14684 MOZ_ASSERT(graph.numConstants() == 0);
14685 MOZ_ASSERT(osiIndices_.empty());
14686 MOZ_ASSERT(icList_.empty());
14687 MOZ_ASSERT(safepoints_.size() == 0);
14688 MOZ_ASSERT(!scriptCounts_);
14690 // Convert the safepoints to stackmaps and add them to our running
14691 // collection thereof.
14692 for (CodegenSafepointIndex& index : safepointIndices_) {
14693 wasm::StackMap* stackMap = nullptr;
14694 if (!CreateStackMapFromLSafepoint(*index.safepoint(), trapExitLayout,
14695 trapExitLayoutNumWords,
14696 nInboundStackArgBytes, &stackMap)) {
14697 return false;
14700 // In debug builds, we'll always have a stack map.
14701 MOZ_ASSERT(stackMap);
14702 if (!stackMap) {
14703 continue;
14706 if (!stackMaps->add((uint8_t*)(uintptr_t)index.displacement(), stackMap)) {
14707 stackMap->destroy();
14708 return false;
14712 return true;
14715 bool CodeGenerator::generate() {
14716 AutoCreatedBy acb(masm, "CodeGenerator::generate");
14718 JitSpew(JitSpew_Codegen, "# Emitting code for script %s:%u:%u",
14719 gen->outerInfo().script()->filename(),
14720 gen->outerInfo().script()->lineno(),
14721 gen->outerInfo().script()->column().oneOriginValue());
14723 // Initialize native code table with an entry to the start of
14724 // top-level script.
14725 InlineScriptTree* tree = gen->outerInfo().inlineScriptTree();
14726 jsbytecode* startPC = tree->script()->code();
14727 BytecodeSite* startSite = new (gen->alloc()) BytecodeSite(tree, startPC);
14728 if (!addNativeToBytecodeEntry(startSite)) {
14729 return false;
14732 if (!safepoints_.init(gen->alloc())) {
14733 return false;
14736 perfSpewer_.recordOffset(masm, "Prologue");
14737 if (!generatePrologue()) {
14738 return false;
14741 // Reset native => bytecode map table with top-level script and startPc.
14742 if (!addNativeToBytecodeEntry(startSite)) {
14743 return false;
14746 if (!generateBody()) {
14747 return false;
14750 // Reset native => bytecode map table with top-level script and startPc.
14751 if (!addNativeToBytecodeEntry(startSite)) {
14752 return false;
14755 perfSpewer_.recordOffset(masm, "Epilogue");
14756 if (!generateEpilogue()) {
14757 return false;
14760 // Reset native => bytecode map table with top-level script and startPc.
14761 if (!addNativeToBytecodeEntry(startSite)) {
14762 return false;
14765 perfSpewer_.recordOffset(masm, "InvalidateEpilogue");
14766 generateInvalidateEpilogue();
14768 // native => bytecode entries for OOL code will be added
14769 // by CodeGeneratorShared::generateOutOfLineCode
14770 perfSpewer_.recordOffset(masm, "OOLCode");
14771 if (!generateOutOfLineCode()) {
14772 return false;
14775 // Add terminal entry.
14776 if (!addNativeToBytecodeEntry(startSite)) {
14777 return false;
14780 // Dump Native to bytecode entries to spew.
14781 dumpNativeToBytecodeEntries();
14783 // We encode safepoints after the OSI-point offsets have been determined.
14784 if (!encodeSafepoints()) {
14785 return false;
14788 return !masm.oom();
14791 static bool AddInlinedCompilations(JSContext* cx, HandleScript script,
14792 IonCompilationId compilationId,
14793 const WarpSnapshot* snapshot,
14794 bool* isValid) {
14795 MOZ_ASSERT(!*isValid);
14796 RecompileInfo recompileInfo(script, compilationId);
14798 JitZone* jitZone = cx->zone()->jitZone();
14800 for (const auto* scriptSnapshot : snapshot->scripts()) {
14801 JSScript* inlinedScript = scriptSnapshot->script();
14802 if (inlinedScript == script) {
14803 continue;
14806 // TODO(post-Warp): This matches FinishCompilation and is necessary to
14807 // ensure in-progress compilations are canceled when an inlined functon
14808 // becomes a debuggee. See the breakpoint-14.js jit-test.
14809 // When TI is gone, try to clean this up by moving AddInlinedCompilations to
14810 // WarpOracle so that we can handle this as part of addPendingRecompile
14811 // instead of requiring this separate check.
14812 if (inlinedScript->isDebuggee()) {
14813 *isValid = false;
14814 return true;
14817 if (!jitZone->addInlinedCompilation(recompileInfo, inlinedScript)) {
14818 return false;
14822 *isValid = true;
14823 return true;
14826 bool CodeGenerator::link(JSContext* cx, const WarpSnapshot* snapshot) {
14827 AutoCreatedBy acb(masm, "CodeGenerator::link");
14829 // We cancel off-thread Ion compilations in a few places during GC, but if
14830 // this compilation was performed off-thread it will already have been
14831 // removed from the relevant lists by this point. Don't allow GC here.
14832 JS::AutoAssertNoGC nogc(cx);
14834 RootedScript script(cx, gen->outerInfo().script());
14835 MOZ_ASSERT(!script->hasIonScript());
14837 // Perform any read barriers which were skipped while compiling the
14838 // script, which may have happened off-thread.
14839 JitZone* jitZone = cx->zone()->jitZone();
14840 jitZone->performStubReadBarriers(zoneStubsToReadBarrier_);
14842 if (scriptCounts_ && !script->hasScriptCounts() &&
14843 !script->initScriptCounts(cx)) {
14844 return false;
14847 IonCompilationId compilationId =
14848 cx->runtime()->jitRuntime()->nextCompilationId();
14849 jitZone->currentCompilationIdRef().emplace(compilationId);
14850 auto resetCurrentId = mozilla::MakeScopeExit(
14851 [jitZone] { jitZone->currentCompilationIdRef().reset(); });
14853 // Record constraints. If an error occured, returns false and potentially
14854 // prevent future compilations. Otherwise, if an invalidation occured, then
14855 // skip the current compilation.
14856 bool isValid = false;
14858 // If an inlined script is invalidated (for example, by attaching
14859 // a debugger), we must also invalidate the parent IonScript.
14860 if (!AddInlinedCompilations(cx, script, compilationId, snapshot, &isValid)) {
14861 return false;
14863 if (!isValid) {
14864 return true;
14867 uint32_t argumentSlots = (gen->outerInfo().nargs() + 1) * sizeof(Value);
14869 size_t numNurseryObjects = snapshot->nurseryObjects().length();
14871 IonScript* ionScript = IonScript::New(
14872 cx, compilationId, graph.localSlotsSize(), argumentSlots, frameDepth_,
14873 snapshots_.listSize(), snapshots_.RVATableSize(), recovers_.size(),
14874 graph.numConstants(), numNurseryObjects, safepointIndices_.length(),
14875 osiIndices_.length(), icList_.length(), runtimeData_.length(),
14876 safepoints_.size());
14877 if (!ionScript) {
14878 return false;
14880 #ifdef DEBUG
14881 ionScript->setICHash(snapshot->icHash());
14882 #endif
14884 auto freeIonScript = mozilla::MakeScopeExit([&ionScript] {
14885 // Use js_free instead of IonScript::Destroy: the cache list is still
14886 // uninitialized.
14887 js_free(ionScript);
14890 Linker linker(masm);
14891 JitCode* code = linker.newCode(cx, CodeKind::Ion);
14892 if (!code) {
14893 return false;
14896 // Encode native to bytecode map if profiling is enabled.
14897 if (isProfilerInstrumentationEnabled()) {
14898 // Generate native-to-bytecode main table.
14899 IonEntry::ScriptList scriptList;
14900 if (!generateCompactNativeToBytecodeMap(cx, code, scriptList)) {
14901 return false;
14904 uint8_t* ionTableAddr =
14905 ((uint8_t*)nativeToBytecodeMap_.get()) + nativeToBytecodeTableOffset_;
14906 JitcodeIonTable* ionTable = (JitcodeIonTable*)ionTableAddr;
14908 // Construct the IonEntry that will go into the global table.
14909 auto entry = MakeJitcodeGlobalEntry<IonEntry>(
14910 cx, code, code->raw(), code->rawEnd(), std::move(scriptList), ionTable);
14911 if (!entry) {
14912 return false;
14914 (void)nativeToBytecodeMap_.release(); // Table is now owned by |entry|.
14916 // Add entry to the global table.
14917 JitcodeGlobalTable* globalTable =
14918 cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
14919 if (!globalTable->addEntry(std::move(entry))) {
14920 return false;
14923 // Mark the jitcode as having a bytecode map.
14924 code->setHasBytecodeMap();
14925 } else {
14926 // Add a dumy jitcodeGlobalTable entry.
14927 auto entry = MakeJitcodeGlobalEntry<DummyEntry>(cx, code, code->raw(),
14928 code->rawEnd());
14929 if (!entry) {
14930 return false;
14933 // Add entry to the global table.
14934 JitcodeGlobalTable* globalTable =
14935 cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
14936 if (!globalTable->addEntry(std::move(entry))) {
14937 return false;
14940 // Mark the jitcode as having a bytecode map.
14941 code->setHasBytecodeMap();
14944 ionScript->setMethod(code);
14946 // If the Gecko Profiler is enabled, mark IonScript as having been
14947 // instrumented accordingly.
14948 if (isProfilerInstrumentationEnabled()) {
14949 ionScript->setHasProfilingInstrumentation();
14952 Assembler::PatchDataWithValueCheck(
14953 CodeLocationLabel(code, invalidateEpilogueData_), ImmPtr(ionScript),
14954 ImmPtr((void*)-1));
14956 for (CodeOffset offset : ionScriptLabels_) {
14957 Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, offset),
14958 ImmPtr(ionScript), ImmPtr((void*)-1));
14961 for (NurseryObjectLabel label : ionNurseryObjectLabels_) {
14962 void* entry = ionScript->addressOfNurseryObject(label.nurseryIndex);
14963 Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, label.offset),
14964 ImmPtr(entry), ImmPtr((void*)-1));
14967 // for generating inline caches during the execution.
14968 if (runtimeData_.length()) {
14969 ionScript->copyRuntimeData(&runtimeData_[0]);
14971 if (icList_.length()) {
14972 ionScript->copyICEntries(&icList_[0]);
14975 for (size_t i = 0; i < icInfo_.length(); i++) {
14976 IonIC& ic = ionScript->getICFromIndex(i);
14977 Assembler::PatchDataWithValueCheck(
14978 CodeLocationLabel(code, icInfo_[i].icOffsetForJump),
14979 ImmPtr(ic.codeRawPtr()), ImmPtr((void*)-1));
14980 Assembler::PatchDataWithValueCheck(
14981 CodeLocationLabel(code, icInfo_[i].icOffsetForPush), ImmPtr(&ic),
14982 ImmPtr((void*)-1));
14985 JitSpew(JitSpew_Codegen, "Created IonScript %p (raw %p)", (void*)ionScript,
14986 (void*)code->raw());
14988 ionScript->setInvalidationEpilogueDataOffset(
14989 invalidateEpilogueData_.offset());
14990 if (jsbytecode* osrPc = gen->outerInfo().osrPc()) {
14991 ionScript->setOsrPc(osrPc);
14992 ionScript->setOsrEntryOffset(getOsrEntryOffset());
14994 ionScript->setInvalidationEpilogueOffset(invalidate_.offset());
14996 perfSpewer_.saveProfile(cx, script, code);
14998 #ifdef MOZ_VTUNE
14999 vtune::MarkScript(code, script, "ion");
15000 #endif
15002 // Set a Ion counter hint for this script.
15003 if (cx->runtime()->jitRuntime()->hasJitHintsMap()) {
15004 JitHintsMap* jitHints = cx->runtime()->jitRuntime()->getJitHintsMap();
15005 jitHints->recordIonCompilation(script);
15008 // for marking during GC.
15009 if (safepointIndices_.length()) {
15010 ionScript->copySafepointIndices(&safepointIndices_[0]);
15012 if (safepoints_.size()) {
15013 ionScript->copySafepoints(&safepoints_);
15016 // for recovering from an Ion Frame.
15017 if (osiIndices_.length()) {
15018 ionScript->copyOsiIndices(&osiIndices_[0]);
15020 if (snapshots_.listSize()) {
15021 ionScript->copySnapshots(&snapshots_);
15023 MOZ_ASSERT_IF(snapshots_.listSize(), recovers_.size());
15024 if (recovers_.size()) {
15025 ionScript->copyRecovers(&recovers_);
15027 if (graph.numConstants()) {
15028 const Value* vp = graph.constantPool();
15029 ionScript->copyConstants(vp);
15030 for (size_t i = 0; i < graph.numConstants(); i++) {
15031 const Value& v = vp[i];
15032 if (v.isGCThing()) {
15033 if (gc::StoreBuffer* sb = v.toGCThing()->storeBuffer()) {
15034 sb->putWholeCell(script);
15035 break;
15041 // Attach any generated script counts to the script.
15042 if (IonScriptCounts* counts = extractScriptCounts()) {
15043 script->addIonCounts(counts);
15046 // WARNING: Code after this point must be infallible!
15048 // Copy the list of nursery objects. Note that the store buffer can add
15049 // HeapPtr edges that must be cleared in IonScript::Destroy. See the
15050 // infallibility warning above.
15051 const auto& nurseryObjects = snapshot->nurseryObjects();
15052 for (size_t i = 0; i < nurseryObjects.length(); i++) {
15053 ionScript->nurseryObjects()[i].init(nurseryObjects[i]);
15056 // Transfer ownership of the IonScript to the JitScript. At this point enough
15057 // of the IonScript must be initialized for IonScript::Destroy to work.
15058 freeIonScript.release();
15059 script->jitScript()->setIonScript(script, ionScript);
15061 return true;
15064 // An out-of-line path to convert a boxed int32 to either a float or double.
15065 class OutOfLineUnboxFloatingPoint : public OutOfLineCodeBase<CodeGenerator> {
15066 LUnboxFloatingPoint* unboxFloatingPoint_;
15068 public:
15069 explicit OutOfLineUnboxFloatingPoint(LUnboxFloatingPoint* unboxFloatingPoint)
15070 : unboxFloatingPoint_(unboxFloatingPoint) {}
15072 void accept(CodeGenerator* codegen) override {
15073 codegen->visitOutOfLineUnboxFloatingPoint(this);
15076 LUnboxFloatingPoint* unboxFloatingPoint() const {
15077 return unboxFloatingPoint_;
15081 void CodeGenerator::visitUnboxFloatingPoint(LUnboxFloatingPoint* lir) {
15082 const ValueOperand box = ToValue(lir, LUnboxFloatingPoint::Input);
15083 const LDefinition* result = lir->output();
15085 // Out-of-line path to convert int32 to double or bailout
15086 // if this instruction is fallible.
15087 OutOfLineUnboxFloatingPoint* ool =
15088 new (alloc()) OutOfLineUnboxFloatingPoint(lir);
15089 addOutOfLineCode(ool, lir->mir());
15091 FloatRegister resultReg = ToFloatRegister(result);
15092 masm.branchTestDouble(Assembler::NotEqual, box, ool->entry());
15093 masm.unboxDouble(box, resultReg);
15094 if (lir->type() == MIRType::Float32) {
15095 masm.convertDoubleToFloat32(resultReg, resultReg);
15097 masm.bind(ool->rejoin());
15100 void CodeGenerator::visitOutOfLineUnboxFloatingPoint(
15101 OutOfLineUnboxFloatingPoint* ool) {
15102 LUnboxFloatingPoint* ins = ool->unboxFloatingPoint();
15103 const ValueOperand value = ToValue(ins, LUnboxFloatingPoint::Input);
15105 if (ins->mir()->fallible()) {
15106 Label bail;
15107 masm.branchTestInt32(Assembler::NotEqual, value, &bail);
15108 bailoutFrom(&bail, ins->snapshot());
15110 masm.int32ValueToFloatingPoint(value, ToFloatRegister(ins->output()),
15111 ins->type());
15112 masm.jump(ool->rejoin());
15115 void CodeGenerator::visitCallBindVar(LCallBindVar* lir) {
15116 pushArg(ToRegister(lir->environmentChain()));
15118 using Fn = JSObject* (*)(JSContext*, JSObject*);
15119 callVM<Fn, BindVarOperation>(lir);
15122 void CodeGenerator::visitMegamorphicSetElement(LMegamorphicSetElement* lir) {
15123 Register obj = ToRegister(lir->getOperand(0));
15124 ValueOperand idVal = ToValue(lir, LMegamorphicSetElement::IndexIndex);
15125 ValueOperand value = ToValue(lir, LMegamorphicSetElement::ValueIndex);
15127 Register temp0 = ToRegister(lir->temp0());
15128 // See comment in LIROps.yaml (x86 is short on registers)
15129 #ifndef JS_CODEGEN_X86
15130 Register temp1 = ToRegister(lir->temp1());
15131 Register temp2 = ToRegister(lir->temp2());
15132 #endif
15134 Label cacheHit, done;
15135 #ifdef JS_CODEGEN_X86
15136 masm.emitMegamorphicCachedSetSlot(
15137 idVal, obj, temp0, value, &cacheHit,
15138 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
15139 EmitPreBarrier(masm, addr, mirType);
15141 #else
15142 masm.emitMegamorphicCachedSetSlot(
15143 idVal, obj, temp0, temp1, temp2, value, &cacheHit,
15144 [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
15145 EmitPreBarrier(masm, addr, mirType);
15147 #endif
15149 pushArg(Imm32(lir->mir()->strict()));
15150 pushArg(ToValue(lir, LMegamorphicSetElement::ValueIndex));
15151 pushArg(ToValue(lir, LMegamorphicSetElement::IndexIndex));
15152 pushArg(obj);
15154 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
15155 callVM<Fn, js::jit::SetElementMegamorphic<true>>(lir);
15157 masm.jump(&done);
15158 masm.bind(&cacheHit);
15160 masm.branchPtrInNurseryChunk(Assembler::Equal, obj, temp0, &done);
15161 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp0, &done);
15163 saveVolatile(temp0);
15164 emitPostWriteBarrier(obj);
15165 restoreVolatile(temp0);
15167 masm.bind(&done);
15170 void CodeGenerator::visitLoadScriptedProxyHandler(
15171 LLoadScriptedProxyHandler* ins) {
15172 const Register obj = ToRegister(ins->getOperand(0));
15173 ValueOperand output = ToOutValue(ins);
15175 masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()),
15176 output.scratchReg());
15177 masm.loadValue(
15178 Address(output.scratchReg(), js::detail::ProxyReservedSlots::offsetOfSlot(
15179 ScriptedProxyHandler::HANDLER_EXTRA)),
15180 output);
15183 #ifdef JS_PUNBOX64
15184 void CodeGenerator::visitCheckScriptedProxyGetResult(
15185 LCheckScriptedProxyGetResult* ins) {
15186 ValueOperand target = ToValue(ins, LCheckScriptedProxyGetResult::TargetIndex);
15187 ValueOperand value = ToValue(ins, LCheckScriptedProxyGetResult::ValueIndex);
15188 ValueOperand id = ToValue(ins, LCheckScriptedProxyGetResult::IdIndex);
15189 Register scratch = ToRegister(ins->temp0());
15190 Register scratch2 = ToRegister(ins->temp1());
15192 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue,
15193 MutableHandleValue);
15194 OutOfLineCode* ool = oolCallVM<Fn, CheckProxyGetByValueResult>(
15195 ins, ArgList(scratch, id, value), StoreValueTo(value));
15197 masm.unboxObject(target, scratch);
15198 masm.branchTestObjectNeedsProxyResultValidation(Assembler::NonZero, scratch,
15199 scratch2, ool->entry());
15200 masm.bind(ool->rejoin());
15202 #endif
15204 void CodeGenerator::visitIdToStringOrSymbol(LIdToStringOrSymbol* ins) {
15205 ValueOperand id = ToValue(ins, LIdToStringOrSymbol::IdIndex);
15206 ValueOperand output = ToOutValue(ins);
15207 Register scratch = ToRegister(ins->temp0());
15209 masm.moveValue(id, output);
15211 Label done, callVM;
15212 Label bail;
15214 ScratchTagScope tag(masm, output);
15215 masm.splitTagForTest(output, tag);
15216 masm.branchTestString(Assembler::Equal, tag, &done);
15217 masm.branchTestSymbol(Assembler::Equal, tag, &done);
15218 masm.branchTestInt32(Assembler::NotEqual, tag, &bail);
15221 masm.unboxInt32(output, scratch);
15223 using Fn = JSLinearString* (*)(JSContext*, int);
15224 OutOfLineCode* ool = oolCallVM<Fn, Int32ToString<CanGC>>(
15225 ins, ArgList(scratch), StoreRegisterTo(output.scratchReg()));
15227 masm.lookupStaticIntString(scratch, output.scratchReg(),
15228 gen->runtime->staticStrings(), ool->entry());
15230 masm.bind(ool->rejoin());
15231 masm.tagValue(JSVAL_TYPE_STRING, output.scratchReg(), output);
15232 masm.bind(&done);
15234 bailoutFrom(&bail, ins->snapshot());
15237 void CodeGenerator::visitLoadFixedSlotV(LLoadFixedSlotV* ins) {
15238 const Register obj = ToRegister(ins->getOperand(0));
15239 size_t slot = ins->mir()->slot();
15240 ValueOperand result = ToOutValue(ins);
15242 masm.loadValue(Address(obj, NativeObject::getFixedSlotOffset(slot)), result);
15245 void CodeGenerator::visitLoadFixedSlotT(LLoadFixedSlotT* ins) {
15246 const Register obj = ToRegister(ins->getOperand(0));
15247 size_t slot = ins->mir()->slot();
15248 AnyRegister result = ToAnyRegister(ins->getDef(0));
15249 MIRType type = ins->mir()->type();
15251 masm.loadUnboxedValue(Address(obj, NativeObject::getFixedSlotOffset(slot)),
15252 type, result);
15255 template <typename T>
15256 static void EmitLoadAndUnbox(MacroAssembler& masm, const T& src, MIRType type,
15257 bool fallible, AnyRegister dest, Label* fail) {
15258 if (type == MIRType::Double) {
15259 MOZ_ASSERT(dest.isFloat());
15260 masm.ensureDouble(src, dest.fpu(), fail);
15261 return;
15263 if (fallible) {
15264 switch (type) {
15265 case MIRType::Int32:
15266 masm.fallibleUnboxInt32(src, dest.gpr(), fail);
15267 break;
15268 case MIRType::Boolean:
15269 masm.fallibleUnboxBoolean(src, dest.gpr(), fail);
15270 break;
15271 case MIRType::Object:
15272 masm.fallibleUnboxObject(src, dest.gpr(), fail);
15273 break;
15274 case MIRType::String:
15275 masm.fallibleUnboxString(src, dest.gpr(), fail);
15276 break;
15277 case MIRType::Symbol:
15278 masm.fallibleUnboxSymbol(src, dest.gpr(), fail);
15279 break;
15280 case MIRType::BigInt:
15281 masm.fallibleUnboxBigInt(src, dest.gpr(), fail);
15282 break;
15283 default:
15284 MOZ_CRASH("Unexpected MIRType");
15286 return;
15288 masm.loadUnboxedValue(src, type, dest);
15291 void CodeGenerator::visitLoadFixedSlotAndUnbox(LLoadFixedSlotAndUnbox* ins) {
15292 const MLoadFixedSlotAndUnbox* mir = ins->mir();
15293 MIRType type = mir->type();
15294 Register input = ToRegister(ins->object());
15295 AnyRegister result = ToAnyRegister(ins->output());
15296 size_t slot = mir->slot();
15298 Address address(input, NativeObject::getFixedSlotOffset(slot));
15300 Label bail;
15301 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
15302 if (mir->fallible()) {
15303 bailoutFrom(&bail, ins->snapshot());
15307 void CodeGenerator::visitLoadDynamicSlotAndUnbox(
15308 LLoadDynamicSlotAndUnbox* ins) {
15309 const MLoadDynamicSlotAndUnbox* mir = ins->mir();
15310 MIRType type = mir->type();
15311 Register input = ToRegister(ins->slots());
15312 AnyRegister result = ToAnyRegister(ins->output());
15313 size_t slot = mir->slot();
15315 Address address(input, slot * sizeof(JS::Value));
15317 Label bail;
15318 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
15319 if (mir->fallible()) {
15320 bailoutFrom(&bail, ins->snapshot());
15324 void CodeGenerator::visitLoadElementAndUnbox(LLoadElementAndUnbox* ins) {
15325 const MLoadElementAndUnbox* mir = ins->mir();
15326 MIRType type = mir->type();
15327 Register elements = ToRegister(ins->elements());
15328 AnyRegister result = ToAnyRegister(ins->output());
15330 Label bail;
15331 if (ins->index()->isConstant()) {
15332 NativeObject::elementsSizeMustNotOverflow();
15333 int32_t offset = ToInt32(ins->index()) * sizeof(Value);
15334 Address address(elements, offset);
15335 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
15336 } else {
15337 BaseObjectElementIndex address(elements, ToRegister(ins->index()));
15338 EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
15341 if (mir->fallible()) {
15342 bailoutFrom(&bail, ins->snapshot());
15346 class OutOfLineAtomizeSlot : public OutOfLineCodeBase<CodeGenerator> {
15347 LInstruction* lir_;
15348 Register stringReg_;
15349 Address slotAddr_;
15350 TypedOrValueRegister dest_;
15352 public:
15353 OutOfLineAtomizeSlot(LInstruction* lir, Register stringReg, Address slotAddr,
15354 TypedOrValueRegister dest)
15355 : lir_(lir), stringReg_(stringReg), slotAddr_(slotAddr), dest_(dest) {}
15357 void accept(CodeGenerator* codegen) final {
15358 codegen->visitOutOfLineAtomizeSlot(this);
15360 LInstruction* lir() const { return lir_; }
15361 Register stringReg() const { return stringReg_; }
15362 Address slotAddr() const { return slotAddr_; }
15363 TypedOrValueRegister dest() const { return dest_; }
15366 void CodeGenerator::visitOutOfLineAtomizeSlot(OutOfLineAtomizeSlot* ool) {
15367 LInstruction* lir = ool->lir();
15368 Register stringReg = ool->stringReg();
15369 Address slotAddr = ool->slotAddr();
15370 TypedOrValueRegister dest = ool->dest();
15372 // This code is called with a non-atomic string in |stringReg|.
15373 // When it returns, |stringReg| contains an unboxed pointer to an
15374 // atomized version of that string, and |slotAddr| contains a
15375 // StringValue pointing to that atom. If |dest| is a ValueOperand,
15376 // it contains the same StringValue; otherwise we assert that |dest|
15377 // is |stringReg|.
15379 saveLive(lir);
15380 pushArg(stringReg);
15382 using Fn = JSAtom* (*)(JSContext*, JSString*);
15383 callVM<Fn, js::AtomizeString>(lir);
15384 StoreRegisterTo(stringReg).generate(this);
15385 restoreLiveIgnore(lir, StoreRegisterTo(stringReg).clobbered());
15387 if (dest.hasValue()) {
15388 masm.moveValue(
15389 TypedOrValueRegister(MIRType::String, AnyRegister(stringReg)),
15390 dest.valueReg());
15391 } else {
15392 MOZ_ASSERT(dest.typedReg().gpr() == stringReg);
15395 emitPreBarrier(slotAddr);
15396 masm.storeTypedOrValue(dest, slotAddr);
15398 // We don't need a post-barrier because atoms aren't nursery-allocated.
15399 #ifdef DEBUG
15400 // We need a temp register for the nursery check. Spill something.
15401 AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
15402 allRegs.take(stringReg);
15403 Register temp = allRegs.takeAny();
15404 masm.push(temp);
15406 Label tenured;
15407 masm.branchPtrInNurseryChunk(Assembler::NotEqual, stringReg, temp, &tenured);
15408 masm.assumeUnreachable("AtomizeString returned a nursery pointer");
15409 masm.bind(&tenured);
15411 masm.pop(temp);
15412 #endif
15414 masm.jump(ool->rejoin());
15417 void CodeGenerator::emitMaybeAtomizeSlot(LInstruction* ins, Register stringReg,
15418 Address slotAddr,
15419 TypedOrValueRegister dest) {
15420 OutOfLineAtomizeSlot* ool =
15421 new (alloc()) OutOfLineAtomizeSlot(ins, stringReg, slotAddr, dest);
15422 addOutOfLineCode(ool, ins->mirRaw()->toInstruction());
15423 masm.branchTest32(Assembler::Zero,
15424 Address(stringReg, JSString::offsetOfFlags()),
15425 Imm32(JSString::ATOM_BIT), ool->entry());
15426 masm.bind(ool->rejoin());
15429 void CodeGenerator::visitLoadFixedSlotAndAtomize(
15430 LLoadFixedSlotAndAtomize* ins) {
15431 Register obj = ToRegister(ins->getOperand(0));
15432 Register temp = ToRegister(ins->temp0());
15433 size_t slot = ins->mir()->slot();
15434 ValueOperand result = ToOutValue(ins);
15436 Address slotAddr(obj, NativeObject::getFixedSlotOffset(slot));
15437 masm.loadValue(slotAddr, result);
15439 Label notString;
15440 masm.branchTestString(Assembler::NotEqual, result, &notString);
15441 masm.unboxString(result, temp);
15442 emitMaybeAtomizeSlot(ins, temp, slotAddr, result);
15443 masm.bind(&notString);
15446 void CodeGenerator::visitLoadDynamicSlotAndAtomize(
15447 LLoadDynamicSlotAndAtomize* ins) {
15448 ValueOperand result = ToOutValue(ins);
15449 Register temp = ToRegister(ins->temp0());
15450 Register base = ToRegister(ins->input());
15451 int32_t offset = ins->mir()->slot() * sizeof(js::Value);
15453 Address slotAddr(base, offset);
15454 masm.loadValue(slotAddr, result);
15456 Label notString;
15457 masm.branchTestString(Assembler::NotEqual, result, &notString);
15458 masm.unboxString(result, temp);
15459 emitMaybeAtomizeSlot(ins, temp, slotAddr, result);
15460 masm.bind(&notString);
15463 void CodeGenerator::visitLoadFixedSlotUnboxAndAtomize(
15464 LLoadFixedSlotUnboxAndAtomize* ins) {
15465 const MLoadFixedSlotAndUnbox* mir = ins->mir();
15466 MOZ_ASSERT(mir->type() == MIRType::String);
15467 Register input = ToRegister(ins->object());
15468 AnyRegister result = ToAnyRegister(ins->output());
15469 size_t slot = mir->slot();
15471 Address slotAddr(input, NativeObject::getFixedSlotOffset(slot));
15473 Label bail;
15474 EmitLoadAndUnbox(masm, slotAddr, MIRType::String, mir->fallible(), result,
15475 &bail);
15476 emitMaybeAtomizeSlot(ins, result.gpr(), slotAddr,
15477 TypedOrValueRegister(MIRType::String, result));
15479 if (mir->fallible()) {
15480 bailoutFrom(&bail, ins->snapshot());
15484 void CodeGenerator::visitLoadDynamicSlotUnboxAndAtomize(
15485 LLoadDynamicSlotUnboxAndAtomize* ins) {
15486 const MLoadDynamicSlotAndUnbox* mir = ins->mir();
15487 MOZ_ASSERT(mir->type() == MIRType::String);
15488 Register input = ToRegister(ins->slots());
15489 AnyRegister result = ToAnyRegister(ins->output());
15490 size_t slot = mir->slot();
15492 Address slotAddr(input, slot * sizeof(JS::Value));
15494 Label bail;
15495 EmitLoadAndUnbox(masm, slotAddr, MIRType::String, mir->fallible(), result,
15496 &bail);
15497 emitMaybeAtomizeSlot(ins, result.gpr(), slotAddr,
15498 TypedOrValueRegister(MIRType::String, result));
15500 if (mir->fallible()) {
15501 bailoutFrom(&bail, ins->snapshot());
15505 void CodeGenerator::visitAddAndStoreSlot(LAddAndStoreSlot* ins) {
15506 const Register obj = ToRegister(ins->getOperand(0));
15507 const ValueOperand value = ToValue(ins, LAddAndStoreSlot::ValueIndex);
15508 const Register maybeTemp = ToTempRegisterOrInvalid(ins->temp0());
15510 Shape* shape = ins->mir()->shape();
15511 masm.storeObjShape(shape, obj, [](MacroAssembler& masm, const Address& addr) {
15512 EmitPreBarrier(masm, addr, MIRType::Shape);
15515 // Perform the store. No pre-barrier required since this is a new
15516 // initialization.
15518 uint32_t offset = ins->mir()->slotOffset();
15519 if (ins->mir()->kind() == MAddAndStoreSlot::Kind::FixedSlot) {
15520 Address slot(obj, offset);
15521 masm.storeValue(value, slot);
15522 } else {
15523 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), maybeTemp);
15524 Address slot(maybeTemp, offset);
15525 masm.storeValue(value, slot);
15529 void CodeGenerator::visitAllocateAndStoreSlot(LAllocateAndStoreSlot* ins) {
15530 const Register obj = ToRegister(ins->getOperand(0));
15531 const ValueOperand value = ToValue(ins, LAllocateAndStoreSlot::ValueIndex);
15532 const Register temp0 = ToRegister(ins->temp0());
15533 const Register temp1 = ToRegister(ins->temp1());
15535 masm.Push(obj);
15536 masm.Push(value);
15538 using Fn = bool (*)(JSContext* cx, NativeObject* obj, uint32_t newCount);
15539 masm.setupAlignedABICall();
15540 masm.loadJSContext(temp0);
15541 masm.passABIArg(temp0);
15542 masm.passABIArg(obj);
15543 masm.move32(Imm32(ins->mir()->numNewSlots()), temp1);
15544 masm.passABIArg(temp1);
15545 masm.callWithABI<Fn, NativeObject::growSlotsPure>();
15546 masm.storeCallPointerResult(temp0);
15548 masm.Pop(value);
15549 masm.Pop(obj);
15551 bailoutIfFalseBool(temp0, ins->snapshot());
15553 masm.storeObjShape(ins->mir()->shape(), obj,
15554 [](MacroAssembler& masm, const Address& addr) {
15555 EmitPreBarrier(masm, addr, MIRType::Shape);
15558 // Perform the store. No pre-barrier required since this is a new
15559 // initialization.
15560 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), temp0);
15561 Address slot(temp0, ins->mir()->slotOffset());
15562 masm.storeValue(value, slot);
15565 void CodeGenerator::visitAddSlotAndCallAddPropHook(
15566 LAddSlotAndCallAddPropHook* ins) {
15567 const Register obj = ToRegister(ins->object());
15568 const ValueOperand value =
15569 ToValue(ins, LAddSlotAndCallAddPropHook::ValueIndex);
15571 pushArg(ImmGCPtr(ins->mir()->shape()));
15572 pushArg(value);
15573 pushArg(obj);
15575 using Fn =
15576 bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, Handle<Shape*>);
15577 callVM<Fn, AddSlotAndCallAddPropHook>(ins);
15580 void CodeGenerator::visitStoreFixedSlotV(LStoreFixedSlotV* ins) {
15581 const Register obj = ToRegister(ins->getOperand(0));
15582 size_t slot = ins->mir()->slot();
15584 const ValueOperand value = ToValue(ins, LStoreFixedSlotV::ValueIndex);
15586 Address address(obj, NativeObject::getFixedSlotOffset(slot));
15587 if (ins->mir()->needsBarrier()) {
15588 emitPreBarrier(address);
15591 masm.storeValue(value, address);
15594 void CodeGenerator::visitStoreFixedSlotT(LStoreFixedSlotT* ins) {
15595 const Register obj = ToRegister(ins->getOperand(0));
15596 size_t slot = ins->mir()->slot();
15598 const LAllocation* value = ins->value();
15599 MIRType valueType = ins->mir()->value()->type();
15601 Address address(obj, NativeObject::getFixedSlotOffset(slot));
15602 if (ins->mir()->needsBarrier()) {
15603 emitPreBarrier(address);
15606 ConstantOrRegister nvalue =
15607 value->isConstant()
15608 ? ConstantOrRegister(value->toConstant()->toJSValue())
15609 : TypedOrValueRegister(valueType, ToAnyRegister(value));
15610 masm.storeConstantOrRegister(nvalue, address);
15613 void CodeGenerator::visitGetNameCache(LGetNameCache* ins) {
15614 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
15615 Register envChain = ToRegister(ins->envObj());
15616 ValueOperand output = ToOutValue(ins);
15617 Register temp = ToRegister(ins->temp0());
15619 IonGetNameIC ic(liveRegs, envChain, output, temp);
15620 addIC(ins, allocateIC(ic));
15623 void CodeGenerator::addGetPropertyCache(LInstruction* ins,
15624 LiveRegisterSet liveRegs,
15625 TypedOrValueRegister value,
15626 const ConstantOrRegister& id,
15627 ValueOperand output) {
15628 CacheKind kind = CacheKind::GetElem;
15629 if (id.constant() && id.value().isString()) {
15630 JSString* idString = id.value().toString();
15631 if (idString->isAtom() && !idString->asAtom().isIndex()) {
15632 kind = CacheKind::GetProp;
15635 IonGetPropertyIC cache(kind, liveRegs, value, id, output);
15636 addIC(ins, allocateIC(cache));
15639 void CodeGenerator::addSetPropertyCache(LInstruction* ins,
15640 LiveRegisterSet liveRegs,
15641 Register objReg, Register temp,
15642 const ConstantOrRegister& id,
15643 const ConstantOrRegister& value,
15644 bool strict) {
15645 CacheKind kind = CacheKind::SetElem;
15646 if (id.constant() && id.value().isString()) {
15647 JSString* idString = id.value().toString();
15648 if (idString->isAtom() && !idString->asAtom().isIndex()) {
15649 kind = CacheKind::SetProp;
15652 IonSetPropertyIC cache(kind, liveRegs, objReg, temp, id, value, strict);
15653 addIC(ins, allocateIC(cache));
15656 ConstantOrRegister CodeGenerator::toConstantOrRegister(LInstruction* lir,
15657 size_t n, MIRType type) {
15658 if (type == MIRType::Value) {
15659 return TypedOrValueRegister(ToValue(lir, n));
15662 const LAllocation* value = lir->getOperand(n);
15663 if (value->isConstant()) {
15664 return ConstantOrRegister(value->toConstant()->toJSValue());
15667 return TypedOrValueRegister(type, ToAnyRegister(value));
15670 void CodeGenerator::visitGetPropertyCache(LGetPropertyCache* ins) {
15671 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
15672 TypedOrValueRegister value =
15673 toConstantOrRegister(ins, LGetPropertyCache::ValueIndex,
15674 ins->mir()->value()->type())
15675 .reg();
15676 ConstantOrRegister id = toConstantOrRegister(ins, LGetPropertyCache::IdIndex,
15677 ins->mir()->idval()->type());
15678 ValueOperand output = ToOutValue(ins);
15679 addGetPropertyCache(ins, liveRegs, value, id, output);
15682 void CodeGenerator::visitGetPropSuperCache(LGetPropSuperCache* ins) {
15683 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
15684 Register obj = ToRegister(ins->obj());
15685 TypedOrValueRegister receiver =
15686 toConstantOrRegister(ins, LGetPropSuperCache::ReceiverIndex,
15687 ins->mir()->receiver()->type())
15688 .reg();
15689 ConstantOrRegister id = toConstantOrRegister(ins, LGetPropSuperCache::IdIndex,
15690 ins->mir()->idval()->type());
15691 ValueOperand output = ToOutValue(ins);
15693 CacheKind kind = CacheKind::GetElemSuper;
15694 if (id.constant() && id.value().isString()) {
15695 JSString* idString = id.value().toString();
15696 if (idString->isAtom() && !idString->asAtom().isIndex()) {
15697 kind = CacheKind::GetPropSuper;
15701 IonGetPropSuperIC cache(kind, liveRegs, obj, receiver, id, output);
15702 addIC(ins, allocateIC(cache));
15705 void CodeGenerator::visitBindNameCache(LBindNameCache* ins) {
15706 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
15707 Register envChain = ToRegister(ins->environmentChain());
15708 Register output = ToRegister(ins->output());
15709 Register temp = ToRegister(ins->temp0());
15711 IonBindNameIC ic(liveRegs, envChain, output, temp);
15712 addIC(ins, allocateIC(ic));
15715 void CodeGenerator::visitHasOwnCache(LHasOwnCache* ins) {
15716 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
15717 TypedOrValueRegister value =
15718 toConstantOrRegister(ins, LHasOwnCache::ValueIndex,
15719 ins->mir()->value()->type())
15720 .reg();
15721 TypedOrValueRegister id = toConstantOrRegister(ins, LHasOwnCache::IdIndex,
15722 ins->mir()->idval()->type())
15723 .reg();
15724 Register output = ToRegister(ins->output());
15726 IonHasOwnIC cache(liveRegs, value, id, output);
15727 addIC(ins, allocateIC(cache));
15730 void CodeGenerator::visitCheckPrivateFieldCache(LCheckPrivateFieldCache* ins) {
15731 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
15732 TypedOrValueRegister value =
15733 toConstantOrRegister(ins, LCheckPrivateFieldCache::ValueIndex,
15734 ins->mir()->value()->type())
15735 .reg();
15736 TypedOrValueRegister id =
15737 toConstantOrRegister(ins, LCheckPrivateFieldCache::IdIndex,
15738 ins->mir()->idval()->type())
15739 .reg();
15740 Register output = ToRegister(ins->output());
15742 IonCheckPrivateFieldIC cache(liveRegs, value, id, output);
15743 addIC(ins, allocateIC(cache));
15746 void CodeGenerator::visitNewPrivateName(LNewPrivateName* ins) {
15747 pushArg(ImmGCPtr(ins->mir()->name()));
15749 using Fn = JS::Symbol* (*)(JSContext*, Handle<JSAtom*>);
15750 callVM<Fn, NewPrivateName>(ins);
15753 void CodeGenerator::visitCallDeleteProperty(LCallDeleteProperty* lir) {
15754 pushArg(ImmGCPtr(lir->mir()->name()));
15755 pushArg(ToValue(lir, LCallDeleteProperty::ValueIndex));
15757 using Fn = bool (*)(JSContext*, HandleValue, Handle<PropertyName*>, bool*);
15758 if (lir->mir()->strict()) {
15759 callVM<Fn, DelPropOperation<true>>(lir);
15760 } else {
15761 callVM<Fn, DelPropOperation<false>>(lir);
15765 void CodeGenerator::visitCallDeleteElement(LCallDeleteElement* lir) {
15766 pushArg(ToValue(lir, LCallDeleteElement::IndexIndex));
15767 pushArg(ToValue(lir, LCallDeleteElement::ValueIndex));
15769 using Fn = bool (*)(JSContext*, HandleValue, HandleValue, bool*);
15770 if (lir->mir()->strict()) {
15771 callVM<Fn, DelElemOperation<true>>(lir);
15772 } else {
15773 callVM<Fn, DelElemOperation<false>>(lir);
15777 void CodeGenerator::visitObjectToIterator(LObjectToIterator* lir) {
15778 Register obj = ToRegister(lir->object());
15779 Register iterObj = ToRegister(lir->output());
15780 Register temp = ToRegister(lir->temp0());
15781 Register temp2 = ToRegister(lir->temp1());
15782 Register temp3 = ToRegister(lir->temp2());
15784 using Fn = PropertyIteratorObject* (*)(JSContext*, HandleObject);
15785 OutOfLineCode* ool = (lir->mir()->wantsIndices())
15786 ? oolCallVM<Fn, GetIteratorWithIndices>(
15787 lir, ArgList(obj), StoreRegisterTo(iterObj))
15788 : oolCallVM<Fn, GetIterator>(
15789 lir, ArgList(obj), StoreRegisterTo(iterObj));
15791 masm.maybeLoadIteratorFromShape(obj, iterObj, temp, temp2, temp3,
15792 ool->entry());
15794 Register nativeIter = temp;
15795 masm.loadPrivate(
15796 Address(iterObj, PropertyIteratorObject::offsetOfIteratorSlot()),
15797 nativeIter);
15799 if (lir->mir()->wantsIndices()) {
15800 // At least one consumer of the output of this iterator has been optimized
15801 // to use iterator indices. If the cached iterator doesn't include indices,
15802 // but it was marked to indicate that we can create them if needed, then we
15803 // do a VM call to replace the cached iterator with a fresh iterator
15804 // including indices.
15805 masm.branchNativeIteratorIndices(Assembler::Equal, nativeIter, temp2,
15806 NativeIteratorIndices::AvailableOnRequest,
15807 ool->entry());
15810 Address iterFlagsAddr(nativeIter, NativeIterator::offsetOfFlagsAndCount());
15811 masm.storePtr(
15812 obj, Address(nativeIter, NativeIterator::offsetOfObjectBeingIterated()));
15813 masm.or32(Imm32(NativeIterator::Flags::Active), iterFlagsAddr);
15815 Register enumeratorsAddr = temp2;
15816 masm.movePtr(ImmPtr(lir->mir()->enumeratorsAddr()), enumeratorsAddr);
15817 masm.registerIterator(enumeratorsAddr, nativeIter, temp3);
15819 // Generate post-write barrier for storing to |iterObj->objectBeingIterated_|.
15820 // We already know that |iterObj| is tenured, so we only have to check |obj|.
15821 Label skipBarrier;
15822 masm.branchPtrInNurseryChunk(Assembler::NotEqual, obj, temp2, &skipBarrier);
15824 LiveRegisterSet save = liveVolatileRegs(lir);
15825 save.takeUnchecked(temp);
15826 save.takeUnchecked(temp2);
15827 save.takeUnchecked(temp3);
15828 if (iterObj.volatile_()) {
15829 save.addUnchecked(iterObj);
15832 masm.PushRegsInMask(save);
15833 emitPostWriteBarrier(iterObj);
15834 masm.PopRegsInMask(save);
15836 masm.bind(&skipBarrier);
15838 masm.bind(ool->rejoin());
15841 void CodeGenerator::visitValueToIterator(LValueToIterator* lir) {
15842 pushArg(ToValue(lir, LValueToIterator::ValueIndex));
15844 using Fn = PropertyIteratorObject* (*)(JSContext*, HandleValue);
15845 callVM<Fn, ValueToIterator>(lir);
15848 void CodeGenerator::visitIteratorHasIndicesAndBranch(
15849 LIteratorHasIndicesAndBranch* lir) {
15850 Register iterator = ToRegister(lir->iterator());
15851 Register object = ToRegister(lir->object());
15852 Register temp = ToRegister(lir->temp());
15853 Register temp2 = ToRegister(lir->temp2());
15854 Label* ifTrue = getJumpLabelForBranch(lir->ifTrue());
15855 Label* ifFalse = getJumpLabelForBranch(lir->ifFalse());
15857 // Check that the iterator has indices available.
15858 Address nativeIterAddr(iterator,
15859 PropertyIteratorObject::offsetOfIteratorSlot());
15860 masm.loadPrivate(nativeIterAddr, temp);
15861 masm.branchNativeIteratorIndices(Assembler::NotEqual, temp, temp2,
15862 NativeIteratorIndices::Valid, ifFalse);
15864 // Guard that the first shape stored in the iterator matches the current
15865 // shape of the iterated object.
15866 Address firstShapeAddr(temp, NativeIterator::offsetOfFirstShape());
15867 masm.loadPtr(firstShapeAddr, temp);
15868 masm.branchTestObjShape(Assembler::NotEqual, object, temp, temp2, object,
15869 ifFalse);
15871 if (!isNextBlock(lir->ifTrue()->lir())) {
15872 masm.jump(ifTrue);
15876 void CodeGenerator::visitLoadSlotByIteratorIndex(
15877 LLoadSlotByIteratorIndex* lir) {
15878 Register object = ToRegister(lir->object());
15879 Register iterator = ToRegister(lir->iterator());
15880 Register temp = ToRegister(lir->temp0());
15881 Register temp2 = ToRegister(lir->temp1());
15882 ValueOperand result = ToOutValue(lir);
15884 masm.extractCurrentIndexAndKindFromIterator(iterator, temp, temp2);
15886 Label notDynamicSlot, notFixedSlot, done;
15887 masm.branch32(Assembler::NotEqual, temp2,
15888 Imm32(uint32_t(PropertyIndex::Kind::DynamicSlot)),
15889 &notDynamicSlot);
15890 masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
15891 masm.loadValue(BaseValueIndex(temp2, temp), result);
15892 masm.jump(&done);
15894 masm.bind(&notDynamicSlot);
15895 masm.branch32(Assembler::NotEqual, temp2,
15896 Imm32(uint32_t(PropertyIndex::Kind::FixedSlot)), &notFixedSlot);
15897 // Fixed slot
15898 masm.loadValue(BaseValueIndex(object, temp, sizeof(NativeObject)), result);
15899 masm.jump(&done);
15900 masm.bind(&notFixedSlot);
15902 #ifdef DEBUG
15903 Label kindOkay;
15904 masm.branch32(Assembler::Equal, temp2,
15905 Imm32(uint32_t(PropertyIndex::Kind::Element)), &kindOkay);
15906 masm.assumeUnreachable("Invalid PropertyIndex::Kind");
15907 masm.bind(&kindOkay);
15908 #endif
15910 // Dense element
15911 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp2);
15912 Label indexOkay;
15913 Address initLength(temp2, ObjectElements::offsetOfInitializedLength());
15914 masm.branch32(Assembler::Above, initLength, temp, &indexOkay);
15915 masm.assumeUnreachable("Dense element out of bounds");
15916 masm.bind(&indexOkay);
15918 masm.loadValue(BaseObjectElementIndex(temp2, temp), result);
15919 masm.bind(&done);
15922 void CodeGenerator::visitStoreSlotByIteratorIndex(
15923 LStoreSlotByIteratorIndex* lir) {
15924 Register object = ToRegister(lir->object());
15925 Register iterator = ToRegister(lir->iterator());
15926 ValueOperand value = ToValue(lir, LStoreSlotByIteratorIndex::ValueIndex);
15927 Register temp = ToRegister(lir->temp0());
15928 Register temp2 = ToRegister(lir->temp1());
15930 masm.extractCurrentIndexAndKindFromIterator(iterator, temp, temp2);
15932 Label notDynamicSlot, notFixedSlot, done, doStore;
15933 masm.branch32(Assembler::NotEqual, temp2,
15934 Imm32(uint32_t(PropertyIndex::Kind::DynamicSlot)),
15935 &notDynamicSlot);
15936 masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
15937 masm.computeEffectiveAddress(BaseValueIndex(temp2, temp), temp);
15938 masm.jump(&doStore);
15940 masm.bind(&notDynamicSlot);
15941 masm.branch32(Assembler::NotEqual, temp2,
15942 Imm32(uint32_t(PropertyIndex::Kind::FixedSlot)), &notFixedSlot);
15943 // Fixed slot
15944 masm.computeEffectiveAddress(
15945 BaseValueIndex(object, temp, sizeof(NativeObject)), temp);
15946 masm.jump(&doStore);
15947 masm.bind(&notFixedSlot);
15949 #ifdef DEBUG
15950 Label kindOkay;
15951 masm.branch32(Assembler::Equal, temp2,
15952 Imm32(uint32_t(PropertyIndex::Kind::Element)), &kindOkay);
15953 masm.assumeUnreachable("Invalid PropertyIndex::Kind");
15954 masm.bind(&kindOkay);
15955 #endif
15957 // Dense element
15958 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp2);
15959 Label indexOkay;
15960 Address initLength(temp2, ObjectElements::offsetOfInitializedLength());
15961 masm.branch32(Assembler::Above, initLength, temp, &indexOkay);
15962 masm.assumeUnreachable("Dense element out of bounds");
15963 masm.bind(&indexOkay);
15965 BaseObjectElementIndex elementAddress(temp2, temp);
15966 masm.computeEffectiveAddress(elementAddress, temp);
15968 masm.bind(&doStore);
15969 Address storeAddress(temp, 0);
15970 emitPreBarrier(storeAddress);
15971 masm.storeValue(value, storeAddress);
15973 masm.branchPtrInNurseryChunk(Assembler::Equal, object, temp2, &done);
15974 masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp2, &done);
15976 saveVolatile(temp2);
15977 emitPostWriteBarrier(object);
15978 restoreVolatile(temp2);
15980 masm.bind(&done);
15983 void CodeGenerator::visitSetPropertyCache(LSetPropertyCache* ins) {
15984 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
15985 Register objReg = ToRegister(ins->object());
15986 Register temp = ToRegister(ins->temp0());
15988 ConstantOrRegister id = toConstantOrRegister(ins, LSetPropertyCache::IdIndex,
15989 ins->mir()->idval()->type());
15990 ConstantOrRegister value = toConstantOrRegister(
15991 ins, LSetPropertyCache::ValueIndex, ins->mir()->value()->type());
15993 addSetPropertyCache(ins, liveRegs, objReg, temp, id, value,
15994 ins->mir()->strict());
15997 void CodeGenerator::visitThrow(LThrow* lir) {
15998 pushArg(ToValue(lir, LThrow::ValueIndex));
16000 using Fn = bool (*)(JSContext*, HandleValue);
16001 callVM<Fn, js::ThrowOperation>(lir);
16004 class OutOfLineTypeOfV : public OutOfLineCodeBase<CodeGenerator> {
16005 LTypeOfV* ins_;
16007 public:
16008 explicit OutOfLineTypeOfV(LTypeOfV* ins) : ins_(ins) {}
16010 void accept(CodeGenerator* codegen) override {
16011 codegen->visitOutOfLineTypeOfV(this);
16013 LTypeOfV* ins() const { return ins_; }
16016 void CodeGenerator::emitTypeOfJSType(JSValueType type, Register output) {
16017 switch (type) {
16018 case JSVAL_TYPE_OBJECT:
16019 masm.move32(Imm32(JSTYPE_OBJECT), output);
16020 break;
16021 case JSVAL_TYPE_DOUBLE:
16022 case JSVAL_TYPE_INT32:
16023 masm.move32(Imm32(JSTYPE_NUMBER), output);
16024 break;
16025 case JSVAL_TYPE_BOOLEAN:
16026 masm.move32(Imm32(JSTYPE_BOOLEAN), output);
16027 break;
16028 case JSVAL_TYPE_UNDEFINED:
16029 masm.move32(Imm32(JSTYPE_UNDEFINED), output);
16030 break;
16031 case JSVAL_TYPE_NULL:
16032 masm.move32(Imm32(JSTYPE_OBJECT), output);
16033 break;
16034 case JSVAL_TYPE_STRING:
16035 masm.move32(Imm32(JSTYPE_STRING), output);
16036 break;
16037 case JSVAL_TYPE_SYMBOL:
16038 masm.move32(Imm32(JSTYPE_SYMBOL), output);
16039 break;
16040 case JSVAL_TYPE_BIGINT:
16041 masm.move32(Imm32(JSTYPE_BIGINT), output);
16042 break;
16043 default:
16044 MOZ_CRASH("Unsupported JSValueType");
16048 void CodeGenerator::emitTypeOfCheck(JSValueType type, Register tag,
16049 Register output, Label* done,
16050 Label* oolObject) {
16051 Label notMatch;
16052 switch (type) {
16053 case JSVAL_TYPE_OBJECT:
16054 // The input may be a callable object (result is "function") or
16055 // may emulate undefined (result is "undefined"). Use an OOL path.
16056 masm.branchTestObject(Assembler::Equal, tag, oolObject);
16057 return;
16058 case JSVAL_TYPE_DOUBLE:
16059 case JSVAL_TYPE_INT32:
16060 masm.branchTestNumber(Assembler::NotEqual, tag, &notMatch);
16061 break;
16062 default:
16063 masm.branchTestType(Assembler::NotEqual, tag, type, &notMatch);
16064 break;
16067 emitTypeOfJSType(type, output);
16068 masm.jump(done);
16069 masm.bind(&notMatch);
16072 void CodeGenerator::visitTypeOfV(LTypeOfV* lir) {
16073 const ValueOperand value = ToValue(lir, LTypeOfV::InputIndex);
16074 Register output = ToRegister(lir->output());
16075 Register tag = masm.extractTag(value, output);
16077 Label done;
16079 auto* ool = new (alloc()) OutOfLineTypeOfV(lir);
16080 addOutOfLineCode(ool, lir->mir());
16082 const std::initializer_list<JSValueType> defaultOrder = {
16083 JSVAL_TYPE_OBJECT, JSVAL_TYPE_DOUBLE, JSVAL_TYPE_UNDEFINED,
16084 JSVAL_TYPE_NULL, JSVAL_TYPE_BOOLEAN, JSVAL_TYPE_STRING,
16085 JSVAL_TYPE_SYMBOL, JSVAL_TYPE_BIGINT};
16087 mozilla::EnumSet<JSValueType, uint32_t> remaining(defaultOrder);
16089 // Generate checks for previously observed types first.
16090 // The TypeDataList is sorted by descending frequency.
16091 for (auto& observed : lir->mir()->observedTypes()) {
16092 JSValueType type = observed.type();
16094 // Unify number types.
16095 if (type == JSVAL_TYPE_INT32) {
16096 type = JSVAL_TYPE_DOUBLE;
16099 remaining -= type;
16101 emitTypeOfCheck(type, tag, output, &done, ool->entry());
16104 // Generate checks for remaining types.
16105 for (auto type : defaultOrder) {
16106 if (!remaining.contains(type)) {
16107 continue;
16109 remaining -= type;
16111 if (remaining.isEmpty() && type != JSVAL_TYPE_OBJECT) {
16112 // We can skip the check for the last remaining type, unless the type is
16113 // JSVAL_TYPE_OBJECT, which may have to go through the OOL path.
16114 #ifdef DEBUG
16115 emitTypeOfCheck(type, tag, output, &done, ool->entry());
16116 masm.assumeUnreachable("Unexpected Value type in visitTypeOfV");
16117 #else
16118 emitTypeOfJSType(type, output);
16119 #endif
16120 } else {
16121 emitTypeOfCheck(type, tag, output, &done, ool->entry());
16124 MOZ_ASSERT(remaining.isEmpty());
16126 masm.bind(&done);
16127 masm.bind(ool->rejoin());
16130 void CodeGenerator::emitTypeOfObject(Register obj, Register output,
16131 Label* done) {
16132 Label slowCheck, isObject, isCallable, isUndefined;
16133 masm.typeOfObject(obj, output, &slowCheck, &isObject, &isCallable,
16134 &isUndefined);
16136 masm.bind(&isCallable);
16137 masm.move32(Imm32(JSTYPE_FUNCTION), output);
16138 masm.jump(done);
16140 masm.bind(&isUndefined);
16141 masm.move32(Imm32(JSTYPE_UNDEFINED), output);
16142 masm.jump(done);
16144 masm.bind(&isObject);
16145 masm.move32(Imm32(JSTYPE_OBJECT), output);
16146 masm.jump(done);
16148 masm.bind(&slowCheck);
16150 saveVolatile(output);
16151 using Fn = JSType (*)(JSObject*);
16152 masm.setupAlignedABICall();
16153 masm.passABIArg(obj);
16154 masm.callWithABI<Fn, js::TypeOfObject>();
16155 masm.storeCallInt32Result(output);
16156 restoreVolatile(output);
16159 void CodeGenerator::visitOutOfLineTypeOfV(OutOfLineTypeOfV* ool) {
16160 LTypeOfV* ins = ool->ins();
16162 ValueOperand input = ToValue(ins, LTypeOfV::InputIndex);
16163 Register temp = ToTempUnboxRegister(ins->temp0());
16164 Register output = ToRegister(ins->output());
16166 Register obj = masm.extractObject(input, temp);
16167 emitTypeOfObject(obj, output, ool->rejoin());
16168 masm.jump(ool->rejoin());
16171 void CodeGenerator::visitTypeOfO(LTypeOfO* lir) {
16172 Register obj = ToRegister(lir->object());
16173 Register output = ToRegister(lir->output());
16175 Label done;
16176 emitTypeOfObject(obj, output, &done);
16177 masm.bind(&done);
16180 void CodeGenerator::visitTypeOfName(LTypeOfName* lir) {
16181 Register input = ToRegister(lir->input());
16182 Register output = ToRegister(lir->output());
16184 #ifdef DEBUG
16185 Label ok;
16186 masm.branch32(Assembler::Below, input, Imm32(JSTYPE_LIMIT), &ok);
16187 masm.assumeUnreachable("bad JSType");
16188 masm.bind(&ok);
16189 #endif
16191 static_assert(JSTYPE_UNDEFINED == 0);
16193 masm.movePtr(ImmPtr(&gen->runtime->names().undefined), output);
16194 masm.loadPtr(BaseIndex(output, input, ScalePointer), output);
16197 class OutOfLineTypeOfIsNonPrimitiveV : public OutOfLineCodeBase<CodeGenerator> {
16198 LTypeOfIsNonPrimitiveV* ins_;
16200 public:
16201 explicit OutOfLineTypeOfIsNonPrimitiveV(LTypeOfIsNonPrimitiveV* ins)
16202 : ins_(ins) {}
16204 void accept(CodeGenerator* codegen) override {
16205 codegen->visitOutOfLineTypeOfIsNonPrimitiveV(this);
16207 auto* ins() const { return ins_; }
16210 class OutOfLineTypeOfIsNonPrimitiveO : public OutOfLineCodeBase<CodeGenerator> {
16211 LTypeOfIsNonPrimitiveO* ins_;
16213 public:
16214 explicit OutOfLineTypeOfIsNonPrimitiveO(LTypeOfIsNonPrimitiveO* ins)
16215 : ins_(ins) {}
16217 void accept(CodeGenerator* codegen) override {
16218 codegen->visitOutOfLineTypeOfIsNonPrimitiveO(this);
16220 auto* ins() const { return ins_; }
16223 void CodeGenerator::emitTypeOfIsObjectOOL(MTypeOfIs* mir, Register obj,
16224 Register output) {
16225 saveVolatile(output);
16226 using Fn = JSType (*)(JSObject*);
16227 masm.setupAlignedABICall();
16228 masm.passABIArg(obj);
16229 masm.callWithABI<Fn, js::TypeOfObject>();
16230 masm.storeCallInt32Result(output);
16231 restoreVolatile(output);
16233 auto cond = JSOpToCondition(mir->jsop(), /* isSigned = */ false);
16234 masm.cmp32Set(cond, output, Imm32(mir->jstype()), output);
16237 void CodeGenerator::visitOutOfLineTypeOfIsNonPrimitiveV(
16238 OutOfLineTypeOfIsNonPrimitiveV* ool) {
16239 auto* ins = ool->ins();
16240 ValueOperand input = ToValue(ins, LTypeOfIsNonPrimitiveV::InputIndex);
16241 Register output = ToRegister(ins->output());
16242 Register temp = ToTempUnboxRegister(ins->temp0());
16244 Register obj = masm.extractObject(input, temp);
16246 emitTypeOfIsObjectOOL(ins->mir(), obj, output);
16248 masm.jump(ool->rejoin());
16251 void CodeGenerator::visitOutOfLineTypeOfIsNonPrimitiveO(
16252 OutOfLineTypeOfIsNonPrimitiveO* ool) {
16253 auto* ins = ool->ins();
16254 Register input = ToRegister(ins->input());
16255 Register output = ToRegister(ins->output());
16257 emitTypeOfIsObjectOOL(ins->mir(), input, output);
16259 masm.jump(ool->rejoin());
16262 void CodeGenerator::emitTypeOfIsObject(MTypeOfIs* mir, Register obj,
16263 Register output, Label* success,
16264 Label* fail, Label* slowCheck) {
16265 Label* isObject = fail;
16266 Label* isFunction = fail;
16267 Label* isUndefined = fail;
16269 switch (mir->jstype()) {
16270 case JSTYPE_UNDEFINED:
16271 isUndefined = success;
16272 break;
16274 case JSTYPE_OBJECT:
16275 isObject = success;
16276 break;
16278 case JSTYPE_FUNCTION:
16279 isFunction = success;
16280 break;
16282 case JSTYPE_STRING:
16283 case JSTYPE_NUMBER:
16284 case JSTYPE_BOOLEAN:
16285 case JSTYPE_SYMBOL:
16286 case JSTYPE_BIGINT:
16287 #ifdef ENABLE_RECORD_TUPLE
16288 case JSTYPE_RECORD:
16289 case JSTYPE_TUPLE:
16290 #endif
16291 case JSTYPE_LIMIT:
16292 MOZ_CRASH("Primitive type");
16295 masm.typeOfObject(obj, output, slowCheck, isObject, isFunction, isUndefined);
16297 auto op = mir->jsop();
16299 Label done;
16300 masm.bind(fail);
16301 masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
16302 masm.jump(&done);
16303 masm.bind(success);
16304 masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq), output);
16305 masm.bind(&done);
16308 void CodeGenerator::visitTypeOfIsNonPrimitiveV(LTypeOfIsNonPrimitiveV* lir) {
16309 ValueOperand input = ToValue(lir, LTypeOfIsNonPrimitiveV::InputIndex);
16310 Register output = ToRegister(lir->output());
16311 Register temp = ToTempUnboxRegister(lir->temp0());
16313 auto* mir = lir->mir();
16315 auto* ool = new (alloc()) OutOfLineTypeOfIsNonPrimitiveV(lir);
16316 addOutOfLineCode(ool, mir);
16318 Label success, fail;
16320 switch (mir->jstype()) {
16321 case JSTYPE_UNDEFINED: {
16322 ScratchTagScope tag(masm, input);
16323 masm.splitTagForTest(input, tag);
16325 masm.branchTestUndefined(Assembler::Equal, tag, &success);
16326 masm.branchTestObject(Assembler::NotEqual, tag, &fail);
16327 break;
16330 case JSTYPE_OBJECT: {
16331 ScratchTagScope tag(masm, input);
16332 masm.splitTagForTest(input, tag);
16334 masm.branchTestNull(Assembler::Equal, tag, &success);
16335 masm.branchTestObject(Assembler::NotEqual, tag, &fail);
16336 break;
16339 case JSTYPE_FUNCTION: {
16340 masm.branchTestObject(Assembler::NotEqual, input, &fail);
16341 break;
16344 case JSTYPE_STRING:
16345 case JSTYPE_NUMBER:
16346 case JSTYPE_BOOLEAN:
16347 case JSTYPE_SYMBOL:
16348 case JSTYPE_BIGINT:
16349 #ifdef ENABLE_RECORD_TUPLE
16350 case JSTYPE_RECORD:
16351 case JSTYPE_TUPLE:
16352 #endif
16353 case JSTYPE_LIMIT:
16354 MOZ_CRASH("Primitive type");
16357 Register obj = masm.extractObject(input, temp);
16359 emitTypeOfIsObject(mir, obj, output, &success, &fail, ool->entry());
16361 masm.bind(ool->rejoin());
16364 void CodeGenerator::visitTypeOfIsNonPrimitiveO(LTypeOfIsNonPrimitiveO* lir) {
16365 Register input = ToRegister(lir->input());
16366 Register output = ToRegister(lir->output());
16368 auto* mir = lir->mir();
16370 auto* ool = new (alloc()) OutOfLineTypeOfIsNonPrimitiveO(lir);
16371 addOutOfLineCode(ool, mir);
16373 Label success, fail;
16374 emitTypeOfIsObject(mir, input, output, &success, &fail, ool->entry());
16376 masm.bind(ool->rejoin());
16379 void CodeGenerator::visitTypeOfIsPrimitive(LTypeOfIsPrimitive* lir) {
16380 ValueOperand input = ToValue(lir, LTypeOfIsPrimitive::InputIndex);
16381 Register output = ToRegister(lir->output());
16383 auto* mir = lir->mir();
16384 auto cond = JSOpToCondition(mir->jsop(), /* isSigned = */ false);
16386 switch (mir->jstype()) {
16387 case JSTYPE_STRING:
16388 masm.testStringSet(cond, input, output);
16389 break;
16390 case JSTYPE_NUMBER:
16391 masm.testNumberSet(cond, input, output);
16392 break;
16393 case JSTYPE_BOOLEAN:
16394 masm.testBooleanSet(cond, input, output);
16395 break;
16396 case JSTYPE_SYMBOL:
16397 masm.testSymbolSet(cond, input, output);
16398 break;
16399 case JSTYPE_BIGINT:
16400 masm.testBigIntSet(cond, input, output);
16401 break;
16403 case JSTYPE_UNDEFINED:
16404 case JSTYPE_OBJECT:
16405 case JSTYPE_FUNCTION:
16406 #ifdef ENABLE_RECORD_TUPLE
16407 case JSTYPE_RECORD:
16408 case JSTYPE_TUPLE:
16409 #endif
16410 case JSTYPE_LIMIT:
16411 MOZ_CRASH("Non-primitive type");
16415 void CodeGenerator::visitToAsyncIter(LToAsyncIter* lir) {
16416 pushArg(ToValue(lir, LToAsyncIter::NextMethodIndex));
16417 pushArg(ToRegister(lir->iterator()));
16419 using Fn = JSObject* (*)(JSContext*, HandleObject, HandleValue);
16420 callVM<Fn, js::CreateAsyncFromSyncIterator>(lir);
16423 void CodeGenerator::visitToPropertyKeyCache(LToPropertyKeyCache* lir) {
16424 LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
16425 ValueOperand input = ToValue(lir, LToPropertyKeyCache::InputIndex);
16426 ValueOperand output = ToOutValue(lir);
16428 IonToPropertyKeyIC ic(liveRegs, input, output);
16429 addIC(lir, allocateIC(ic));
16432 void CodeGenerator::visitLoadElementV(LLoadElementV* load) {
16433 Register elements = ToRegister(load->elements());
16434 const ValueOperand out = ToOutValue(load);
16436 if (load->index()->isConstant()) {
16437 NativeObject::elementsSizeMustNotOverflow();
16438 int32_t offset = ToInt32(load->index()) * sizeof(Value);
16439 masm.loadValue(Address(elements, offset), out);
16440 } else {
16441 masm.loadValue(BaseObjectElementIndex(elements, ToRegister(load->index())),
16442 out);
16445 Label testMagic;
16446 masm.branchTestMagic(Assembler::Equal, out, &testMagic);
16447 bailoutFrom(&testMagic, load->snapshot());
16450 void CodeGenerator::visitLoadElementHole(LLoadElementHole* lir) {
16451 Register elements = ToRegister(lir->elements());
16452 Register index = ToRegister(lir->index());
16453 Register initLength = ToRegister(lir->initLength());
16454 const ValueOperand out = ToOutValue(lir);
16456 const MLoadElementHole* mir = lir->mir();
16458 // If the index is out of bounds, load |undefined|. Otherwise, load the
16459 // value.
16460 Label outOfBounds, done;
16461 masm.spectreBoundsCheck32(index, initLength, out.scratchReg(), &outOfBounds);
16463 masm.loadValue(BaseObjectElementIndex(elements, index), out);
16465 // If the value wasn't a hole, we're done. Otherwise, we'll load undefined.
16466 masm.branchTestMagic(Assembler::NotEqual, out, &done);
16468 if (mir->needsNegativeIntCheck()) {
16469 Label loadUndefined;
16470 masm.jump(&loadUndefined);
16472 masm.bind(&outOfBounds);
16474 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
16476 masm.bind(&loadUndefined);
16477 } else {
16478 masm.bind(&outOfBounds);
16480 masm.moveValue(UndefinedValue(), out);
16482 masm.bind(&done);
16485 void CodeGenerator::visitLoadUnboxedScalar(LLoadUnboxedScalar* lir) {
16486 Register elements = ToRegister(lir->elements());
16487 Register temp = ToTempRegisterOrInvalid(lir->temp0());
16488 AnyRegister out = ToAnyRegister(lir->output());
16490 const MLoadUnboxedScalar* mir = lir->mir();
16492 Scalar::Type storageType = mir->storageType();
16494 Label fail;
16495 if (lir->index()->isConstant()) {
16496 Address source =
16497 ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
16498 masm.loadFromTypedArray(storageType, source, out, temp, &fail);
16499 } else {
16500 BaseIndex source(elements, ToRegister(lir->index()),
16501 ScaleFromScalarType(storageType), mir->offsetAdjustment());
16502 masm.loadFromTypedArray(storageType, source, out, temp, &fail);
16505 if (fail.used()) {
16506 bailoutFrom(&fail, lir->snapshot());
16510 void CodeGenerator::visitLoadUnboxedBigInt(LLoadUnboxedBigInt* lir) {
16511 Register elements = ToRegister(lir->elements());
16512 Register temp = ToRegister(lir->temp());
16513 Register64 temp64 = ToRegister64(lir->temp64());
16514 Register out = ToRegister(lir->output());
16516 const MLoadUnboxedScalar* mir = lir->mir();
16518 Scalar::Type storageType = mir->storageType();
16520 if (lir->index()->isConstant()) {
16521 Address source =
16522 ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
16523 masm.load64(source, temp64);
16524 } else {
16525 BaseIndex source(elements, ToRegister(lir->index()),
16526 ScaleFromScalarType(storageType), mir->offsetAdjustment());
16527 masm.load64(source, temp64);
16530 emitCreateBigInt(lir, storageType, temp64, out, temp);
16533 void CodeGenerator::visitLoadDataViewElement(LLoadDataViewElement* lir) {
16534 Register elements = ToRegister(lir->elements());
16535 const LAllocation* littleEndian = lir->littleEndian();
16536 Register temp = ToTempRegisterOrInvalid(lir->temp());
16537 Register64 temp64 = ToTempRegister64OrInvalid(lir->temp64());
16538 AnyRegister out = ToAnyRegister(lir->output());
16540 const MLoadDataViewElement* mir = lir->mir();
16541 Scalar::Type storageType = mir->storageType();
16543 BaseIndex source(elements, ToRegister(lir->index()), TimesOne);
16545 bool noSwap = littleEndian->isConstant() &&
16546 ToBoolean(littleEndian) == MOZ_LITTLE_ENDIAN();
16548 // Directly load if no byte swap is needed and the platform supports unaligned
16549 // accesses for the access. (Such support is assumed for integer types.)
16550 if (noSwap && (!Scalar::isFloatingType(storageType) ||
16551 MacroAssembler::SupportsFastUnalignedFPAccesses())) {
16552 if (!Scalar::isBigIntType(storageType)) {
16553 Label fail;
16554 masm.loadFromTypedArray(storageType, source, out, temp, &fail);
16556 if (fail.used()) {
16557 bailoutFrom(&fail, lir->snapshot());
16559 } else {
16560 masm.load64(source, temp64);
16562 emitCreateBigInt(lir, storageType, temp64, out.gpr(), temp);
16564 return;
16567 // Load the value into a gpr register.
16568 switch (storageType) {
16569 case Scalar::Int16:
16570 masm.load16UnalignedSignExtend(source, out.gpr());
16571 break;
16572 case Scalar::Uint16:
16573 masm.load16UnalignedZeroExtend(source, out.gpr());
16574 break;
16575 case Scalar::Int32:
16576 masm.load32Unaligned(source, out.gpr());
16577 break;
16578 case Scalar::Uint32:
16579 masm.load32Unaligned(source, out.isFloat() ? temp : out.gpr());
16580 break;
16581 case Scalar::Float32:
16582 masm.load32Unaligned(source, temp);
16583 break;
16584 case Scalar::Float64:
16585 case Scalar::BigInt64:
16586 case Scalar::BigUint64:
16587 masm.load64Unaligned(source, temp64);
16588 break;
16589 case Scalar::Int8:
16590 case Scalar::Uint8:
16591 case Scalar::Uint8Clamped:
16592 default:
16593 MOZ_CRASH("Invalid typed array type");
16596 if (!noSwap) {
16597 // Swap the bytes in the loaded value.
16598 Label skip;
16599 if (!littleEndian->isConstant()) {
16600 masm.branch32(
16601 MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
16602 ToRegister(littleEndian), Imm32(0), &skip);
16605 switch (storageType) {
16606 case Scalar::Int16:
16607 masm.byteSwap16SignExtend(out.gpr());
16608 break;
16609 case Scalar::Uint16:
16610 masm.byteSwap16ZeroExtend(out.gpr());
16611 break;
16612 case Scalar::Int32:
16613 masm.byteSwap32(out.gpr());
16614 break;
16615 case Scalar::Uint32:
16616 masm.byteSwap32(out.isFloat() ? temp : out.gpr());
16617 break;
16618 case Scalar::Float32:
16619 masm.byteSwap32(temp);
16620 break;
16621 case Scalar::Float64:
16622 case Scalar::BigInt64:
16623 case Scalar::BigUint64:
16624 masm.byteSwap64(temp64);
16625 break;
16626 case Scalar::Int8:
16627 case Scalar::Uint8:
16628 case Scalar::Uint8Clamped:
16629 default:
16630 MOZ_CRASH("Invalid typed array type");
16633 if (skip.used()) {
16634 masm.bind(&skip);
16638 // Move the value into the output register.
16639 switch (storageType) {
16640 case Scalar::Int16:
16641 case Scalar::Uint16:
16642 case Scalar::Int32:
16643 break;
16644 case Scalar::Uint32:
16645 if (out.isFloat()) {
16646 masm.convertUInt32ToDouble(temp, out.fpu());
16647 } else {
16648 // Bail out if the value doesn't fit into a signed int32 value. This
16649 // is what allows MLoadDataViewElement to have a type() of
16650 // MIRType::Int32 for UInt32 array loads.
16651 bailoutTest32(Assembler::Signed, out.gpr(), out.gpr(), lir->snapshot());
16653 break;
16654 case Scalar::Float32:
16655 masm.moveGPRToFloat32(temp, out.fpu());
16656 masm.canonicalizeFloat(out.fpu());
16657 break;
16658 case Scalar::Float64:
16659 masm.moveGPR64ToDouble(temp64, out.fpu());
16660 masm.canonicalizeDouble(out.fpu());
16661 break;
16662 case Scalar::BigInt64:
16663 case Scalar::BigUint64:
16664 emitCreateBigInt(lir, storageType, temp64, out.gpr(), temp);
16665 break;
16666 case Scalar::Int8:
16667 case Scalar::Uint8:
16668 case Scalar::Uint8Clamped:
16669 default:
16670 MOZ_CRASH("Invalid typed array type");
16674 void CodeGenerator::visitLoadTypedArrayElementHole(
16675 LLoadTypedArrayElementHole* lir) {
16676 Register object = ToRegister(lir->object());
16677 const ValueOperand out = ToOutValue(lir);
16679 // Load the length.
16680 Register scratch = out.scratchReg();
16681 Register scratch2 = ToRegister(lir->temp0());
16682 Register index = ToRegister(lir->index());
16683 masm.loadArrayBufferViewLengthIntPtr(object, scratch);
16685 // Load undefined if index >= length.
16686 Label outOfBounds, done;
16687 masm.spectreBoundsCheckPtr(index, scratch, scratch2, &outOfBounds);
16689 // Load the elements vector.
16690 masm.loadPtr(Address(object, ArrayBufferViewObject::dataOffset()), scratch);
16692 Scalar::Type arrayType = lir->mir()->arrayType();
16693 Label fail;
16694 BaseIndex source(scratch, index, ScaleFromScalarType(arrayType));
16695 MacroAssembler::Uint32Mode uint32Mode =
16696 lir->mir()->forceDouble() ? MacroAssembler::Uint32Mode::ForceDouble
16697 : MacroAssembler::Uint32Mode::FailOnDouble;
16698 masm.loadFromTypedArray(arrayType, source, out, uint32Mode, out.scratchReg(),
16699 &fail);
16700 masm.jump(&done);
16702 masm.bind(&outOfBounds);
16703 masm.moveValue(UndefinedValue(), out);
16705 if (fail.used()) {
16706 bailoutFrom(&fail, lir->snapshot());
16709 masm.bind(&done);
16712 void CodeGenerator::visitLoadTypedArrayElementHoleBigInt(
16713 LLoadTypedArrayElementHoleBigInt* lir) {
16714 Register object = ToRegister(lir->object());
16715 const ValueOperand out = ToOutValue(lir);
16717 // On x86 there are not enough registers. In that case reuse the output's
16718 // type register as temporary.
16719 #ifdef JS_CODEGEN_X86
16720 MOZ_ASSERT(lir->temp()->isBogusTemp());
16721 Register temp = out.typeReg();
16722 #else
16723 Register temp = ToRegister(lir->temp());
16724 #endif
16725 Register64 temp64 = ToRegister64(lir->temp64());
16727 // Load the length.
16728 Register scratch = out.scratchReg();
16729 Register index = ToRegister(lir->index());
16730 masm.loadArrayBufferViewLengthIntPtr(object, scratch);
16732 // Load undefined if index >= length.
16733 Label outOfBounds, done;
16734 masm.spectreBoundsCheckPtr(index, scratch, temp, &outOfBounds);
16736 // Load the elements vector.
16737 masm.loadPtr(Address(object, ArrayBufferViewObject::dataOffset()), scratch);
16739 Scalar::Type arrayType = lir->mir()->arrayType();
16740 BaseIndex source(scratch, index, ScaleFromScalarType(arrayType));
16741 masm.load64(source, temp64);
16743 Register bigInt = out.scratchReg();
16744 emitCreateBigInt(lir, arrayType, temp64, bigInt, temp);
16746 masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, out);
16747 masm.jump(&done);
16749 masm.bind(&outOfBounds);
16750 masm.moveValue(UndefinedValue(), out);
16752 masm.bind(&done);
16755 template <SwitchTableType tableType>
16756 class OutOfLineSwitch : public OutOfLineCodeBase<CodeGenerator> {
16757 using LabelsVector = Vector<Label, 0, JitAllocPolicy>;
16758 using CodeLabelsVector = Vector<CodeLabel, 0, JitAllocPolicy>;
16759 LabelsVector labels_;
16760 CodeLabelsVector codeLabels_;
16761 CodeLabel start_;
16762 bool isOutOfLine_;
16764 void accept(CodeGenerator* codegen) override {
16765 codegen->visitOutOfLineSwitch(this);
16768 public:
16769 explicit OutOfLineSwitch(TempAllocator& alloc)
16770 : labels_(alloc), codeLabels_(alloc), isOutOfLine_(false) {}
16772 CodeLabel* start() { return &start_; }
16774 CodeLabelsVector& codeLabels() { return codeLabels_; }
16775 LabelsVector& labels() { return labels_; }
16777 void jumpToCodeEntries(MacroAssembler& masm, Register index, Register temp) {
16778 Register base;
16779 if (tableType == SwitchTableType::Inline) {
16780 #if defined(JS_CODEGEN_ARM)
16781 base = ::js::jit::pc;
16782 #else
16783 MOZ_CRASH("NYI: SwitchTableType::Inline");
16784 #endif
16785 } else {
16786 #if defined(JS_CODEGEN_ARM)
16787 MOZ_CRASH("NYI: SwitchTableType::OutOfLine");
16788 #else
16789 masm.mov(start(), temp);
16790 base = temp;
16791 #endif
16793 BaseIndex jumpTarget(base, index, ScalePointer);
16794 masm.branchToComputedAddress(jumpTarget);
16797 // Register an entry in the switch table.
16798 void addTableEntry(MacroAssembler& masm) {
16799 if ((!isOutOfLine_ && tableType == SwitchTableType::Inline) ||
16800 (isOutOfLine_ && tableType == SwitchTableType::OutOfLine)) {
16801 CodeLabel cl;
16802 masm.writeCodePointer(&cl);
16803 masm.propagateOOM(codeLabels_.append(std::move(cl)));
16806 // Register the code, to which the table will jump to.
16807 void addCodeEntry(MacroAssembler& masm) {
16808 Label entry;
16809 masm.bind(&entry);
16810 masm.propagateOOM(labels_.append(std::move(entry)));
16813 void setOutOfLine() { isOutOfLine_ = true; }
16816 template <SwitchTableType tableType>
16817 void CodeGenerator::visitOutOfLineSwitch(
16818 OutOfLineSwitch<tableType>* jumpTable) {
16819 jumpTable->setOutOfLine();
16820 auto& labels = jumpTable->labels();
16822 if (tableType == SwitchTableType::OutOfLine) {
16823 #if defined(JS_CODEGEN_ARM)
16824 MOZ_CRASH("NYI: SwitchTableType::OutOfLine");
16825 #elif defined(JS_CODEGEN_NONE)
16826 MOZ_CRASH();
16827 #else
16829 # if defined(JS_CODEGEN_ARM64)
16830 AutoForbidPoolsAndNops afp(
16831 &masm,
16832 (labels.length() + 1) * (sizeof(void*) / vixl::kInstructionSize));
16833 # endif
16835 masm.haltingAlign(sizeof(void*));
16837 // Bind the address of the jump table and reserve the space for code
16838 // pointers to jump in the newly generated code.
16839 masm.bind(jumpTable->start());
16840 masm.addCodeLabel(*jumpTable->start());
16841 for (size_t i = 0, e = labels.length(); i < e; i++) {
16842 jumpTable->addTableEntry(masm);
16844 #endif
16847 // Register all reserved pointers of the jump table to target labels. The
16848 // entries of the jump table need to be absolute addresses and thus must be
16849 // patched after codegen is finished.
16850 auto& codeLabels = jumpTable->codeLabels();
16851 for (size_t i = 0, e = codeLabels.length(); i < e; i++) {
16852 auto& cl = codeLabels[i];
16853 cl.target()->bind(labels[i].offset());
16854 masm.addCodeLabel(cl);
16858 template void CodeGenerator::visitOutOfLineSwitch(
16859 OutOfLineSwitch<SwitchTableType::Inline>* jumpTable);
16860 template void CodeGenerator::visitOutOfLineSwitch(
16861 OutOfLineSwitch<SwitchTableType::OutOfLine>* jumpTable);
16863 template <typename T>
16864 static inline void StoreToTypedArray(MacroAssembler& masm,
16865 Scalar::Type writeType,
16866 const LAllocation* value, const T& dest) {
16867 if (writeType == Scalar::Float32 || writeType == Scalar::Float64) {
16868 masm.storeToTypedFloatArray(writeType, ToFloatRegister(value), dest);
16869 } else {
16870 if (value->isConstant()) {
16871 masm.storeToTypedIntArray(writeType, Imm32(ToInt32(value)), dest);
16872 } else {
16873 masm.storeToTypedIntArray(writeType, ToRegister(value), dest);
16878 void CodeGenerator::visitStoreUnboxedScalar(LStoreUnboxedScalar* lir) {
16879 Register elements = ToRegister(lir->elements());
16880 const LAllocation* value = lir->value();
16882 const MStoreUnboxedScalar* mir = lir->mir();
16884 Scalar::Type writeType = mir->writeType();
16886 if (lir->index()->isConstant()) {
16887 Address dest = ToAddress(elements, lir->index(), writeType);
16888 StoreToTypedArray(masm, writeType, value, dest);
16889 } else {
16890 BaseIndex dest(elements, ToRegister(lir->index()),
16891 ScaleFromScalarType(writeType));
16892 StoreToTypedArray(masm, writeType, value, dest);
16896 void CodeGenerator::visitStoreUnboxedBigInt(LStoreUnboxedBigInt* lir) {
16897 Register elements = ToRegister(lir->elements());
16898 Register value = ToRegister(lir->value());
16899 Register64 temp = ToRegister64(lir->temp());
16901 Scalar::Type writeType = lir->mir()->writeType();
16903 masm.loadBigInt64(value, temp);
16905 if (lir->index()->isConstant()) {
16906 Address dest = ToAddress(elements, lir->index(), writeType);
16907 masm.storeToTypedBigIntArray(writeType, temp, dest);
16908 } else {
16909 BaseIndex dest(elements, ToRegister(lir->index()),
16910 ScaleFromScalarType(writeType));
16911 masm.storeToTypedBigIntArray(writeType, temp, dest);
16915 void CodeGenerator::visitStoreDataViewElement(LStoreDataViewElement* lir) {
16916 Register elements = ToRegister(lir->elements());
16917 const LAllocation* value = lir->value();
16918 const LAllocation* littleEndian = lir->littleEndian();
16919 Register temp = ToTempRegisterOrInvalid(lir->temp());
16920 Register64 temp64 = ToTempRegister64OrInvalid(lir->temp64());
16922 const MStoreDataViewElement* mir = lir->mir();
16923 Scalar::Type writeType = mir->writeType();
16925 BaseIndex dest(elements, ToRegister(lir->index()), TimesOne);
16927 bool noSwap = littleEndian->isConstant() &&
16928 ToBoolean(littleEndian) == MOZ_LITTLE_ENDIAN();
16930 // Directly store if no byte swap is needed and the platform supports
16931 // unaligned accesses for the access. (Such support is assumed for integer
16932 // types.)
16933 if (noSwap && (!Scalar::isFloatingType(writeType) ||
16934 MacroAssembler::SupportsFastUnalignedFPAccesses())) {
16935 if (!Scalar::isBigIntType(writeType)) {
16936 StoreToTypedArray(masm, writeType, value, dest);
16937 } else {
16938 masm.loadBigInt64(ToRegister(value), temp64);
16939 masm.storeToTypedBigIntArray(writeType, temp64, dest);
16941 return;
16944 // Load the value into a gpr register.
16945 switch (writeType) {
16946 case Scalar::Int16:
16947 case Scalar::Uint16:
16948 case Scalar::Int32:
16949 case Scalar::Uint32:
16950 if (value->isConstant()) {
16951 masm.move32(Imm32(ToInt32(value)), temp);
16952 } else {
16953 masm.move32(ToRegister(value), temp);
16955 break;
16956 case Scalar::Float32: {
16957 FloatRegister fvalue = ToFloatRegister(value);
16958 masm.canonicalizeFloatIfDeterministic(fvalue);
16959 masm.moveFloat32ToGPR(fvalue, temp);
16960 break;
16962 case Scalar::Float64: {
16963 FloatRegister fvalue = ToFloatRegister(value);
16964 masm.canonicalizeDoubleIfDeterministic(fvalue);
16965 masm.moveDoubleToGPR64(fvalue, temp64);
16966 break;
16968 case Scalar::BigInt64:
16969 case Scalar::BigUint64:
16970 masm.loadBigInt64(ToRegister(value), temp64);
16971 break;
16972 case Scalar::Int8:
16973 case Scalar::Uint8:
16974 case Scalar::Uint8Clamped:
16975 default:
16976 MOZ_CRASH("Invalid typed array type");
16979 if (!noSwap) {
16980 // Swap the bytes in the loaded value.
16981 Label skip;
16982 if (!littleEndian->isConstant()) {
16983 masm.branch32(
16984 MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
16985 ToRegister(littleEndian), Imm32(0), &skip);
16988 switch (writeType) {
16989 case Scalar::Int16:
16990 masm.byteSwap16SignExtend(temp);
16991 break;
16992 case Scalar::Uint16:
16993 masm.byteSwap16ZeroExtend(temp);
16994 break;
16995 case Scalar::Int32:
16996 case Scalar::Uint32:
16997 case Scalar::Float32:
16998 masm.byteSwap32(temp);
16999 break;
17000 case Scalar::Float64:
17001 case Scalar::BigInt64:
17002 case Scalar::BigUint64:
17003 masm.byteSwap64(temp64);
17004 break;
17005 case Scalar::Int8:
17006 case Scalar::Uint8:
17007 case Scalar::Uint8Clamped:
17008 default:
17009 MOZ_CRASH("Invalid typed array type");
17012 if (skip.used()) {
17013 masm.bind(&skip);
17017 // Store the value into the destination.
17018 switch (writeType) {
17019 case Scalar::Int16:
17020 case Scalar::Uint16:
17021 masm.store16Unaligned(temp, dest);
17022 break;
17023 case Scalar::Int32:
17024 case Scalar::Uint32:
17025 case Scalar::Float32:
17026 masm.store32Unaligned(temp, dest);
17027 break;
17028 case Scalar::Float64:
17029 case Scalar::BigInt64:
17030 case Scalar::BigUint64:
17031 masm.store64Unaligned(temp64, dest);
17032 break;
17033 case Scalar::Int8:
17034 case Scalar::Uint8:
17035 case Scalar::Uint8Clamped:
17036 default:
17037 MOZ_CRASH("Invalid typed array type");
17041 void CodeGenerator::visitStoreTypedArrayElementHole(
17042 LStoreTypedArrayElementHole* lir) {
17043 Register elements = ToRegister(lir->elements());
17044 const LAllocation* value = lir->value();
17046 Scalar::Type arrayType = lir->mir()->arrayType();
17048 Register index = ToRegister(lir->index());
17049 const LAllocation* length = lir->length();
17050 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp0());
17052 Label skip;
17053 if (length->isRegister()) {
17054 masm.spectreBoundsCheckPtr(index, ToRegister(length), spectreTemp, &skip);
17055 } else {
17056 masm.spectreBoundsCheckPtr(index, ToAddress(length), spectreTemp, &skip);
17059 BaseIndex dest(elements, index, ScaleFromScalarType(arrayType));
17060 StoreToTypedArray(masm, arrayType, value, dest);
17062 masm.bind(&skip);
17065 void CodeGenerator::visitStoreTypedArrayElementHoleBigInt(
17066 LStoreTypedArrayElementHoleBigInt* lir) {
17067 Register elements = ToRegister(lir->elements());
17068 Register value = ToRegister(lir->value());
17069 Register64 temp = ToRegister64(lir->temp());
17071 Scalar::Type arrayType = lir->mir()->arrayType();
17073 Register index = ToRegister(lir->index());
17074 const LAllocation* length = lir->length();
17075 Register spectreTemp = temp.scratchReg();
17077 Label skip;
17078 if (length->isRegister()) {
17079 masm.spectreBoundsCheckPtr(index, ToRegister(length), spectreTemp, &skip);
17080 } else {
17081 masm.spectreBoundsCheckPtr(index, ToAddress(length), spectreTemp, &skip);
17084 masm.loadBigInt64(value, temp);
17086 BaseIndex dest(elements, index, ScaleFromScalarType(arrayType));
17087 masm.storeToTypedBigIntArray(arrayType, temp, dest);
17089 masm.bind(&skip);
17092 void CodeGenerator::visitAtomicIsLockFree(LAtomicIsLockFree* lir) {
17093 Register value = ToRegister(lir->value());
17094 Register output = ToRegister(lir->output());
17096 masm.atomicIsLockFreeJS(value, output);
17099 void CodeGenerator::visitClampIToUint8(LClampIToUint8* lir) {
17100 Register output = ToRegister(lir->output());
17101 MOZ_ASSERT(output == ToRegister(lir->input()));
17102 masm.clampIntToUint8(output);
17105 void CodeGenerator::visitClampDToUint8(LClampDToUint8* lir) {
17106 FloatRegister input = ToFloatRegister(lir->input());
17107 Register output = ToRegister(lir->output());
17108 masm.clampDoubleToUint8(input, output);
17111 void CodeGenerator::visitClampVToUint8(LClampVToUint8* lir) {
17112 ValueOperand operand = ToValue(lir, LClampVToUint8::InputIndex);
17113 FloatRegister tempFloat = ToFloatRegister(lir->temp0());
17114 Register output = ToRegister(lir->output());
17116 using Fn = bool (*)(JSContext*, JSString*, double*);
17117 OutOfLineCode* oolString = oolCallVM<Fn, StringToNumber>(
17118 lir, ArgList(output), StoreFloatRegisterTo(tempFloat));
17119 Label* stringEntry = oolString->entry();
17120 Label* stringRejoin = oolString->rejoin();
17122 Label fails;
17123 masm.clampValueToUint8(operand, stringEntry, stringRejoin, output, tempFloat,
17124 output, &fails);
17126 bailoutFrom(&fails, lir->snapshot());
17129 void CodeGenerator::visitInCache(LInCache* ins) {
17130 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
17132 ConstantOrRegister key =
17133 toConstantOrRegister(ins, LInCache::LhsIndex, ins->mir()->key()->type());
17134 Register object = ToRegister(ins->rhs());
17135 Register output = ToRegister(ins->output());
17136 Register temp = ToRegister(ins->temp0());
17138 IonInIC cache(liveRegs, key, object, output, temp);
17139 addIC(ins, allocateIC(cache));
17142 void CodeGenerator::visitInArray(LInArray* lir) {
17143 const MInArray* mir = lir->mir();
17144 Register elements = ToRegister(lir->elements());
17145 Register initLength = ToRegister(lir->initLength());
17146 Register output = ToRegister(lir->output());
17148 Label falseBranch, done, trueBranch;
17150 if (lir->index()->isConstant()) {
17151 int32_t index = ToInt32(lir->index());
17153 if (index < 0) {
17154 MOZ_ASSERT(mir->needsNegativeIntCheck());
17155 bailout(lir->snapshot());
17156 return;
17159 masm.branch32(Assembler::BelowOrEqual, initLength, Imm32(index),
17160 &falseBranch);
17162 NativeObject::elementsSizeMustNotOverflow();
17163 Address address = Address(elements, index * sizeof(Value));
17164 masm.branchTestMagic(Assembler::Equal, address, &falseBranch);
17165 } else {
17166 Register index = ToRegister(lir->index());
17168 Label negativeIntCheck;
17169 Label* failedInitLength = &falseBranch;
17170 if (mir->needsNegativeIntCheck()) {
17171 failedInitLength = &negativeIntCheck;
17174 masm.branch32(Assembler::BelowOrEqual, initLength, index, failedInitLength);
17176 BaseObjectElementIndex address(elements, index);
17177 masm.branchTestMagic(Assembler::Equal, address, &falseBranch);
17179 if (mir->needsNegativeIntCheck()) {
17180 masm.jump(&trueBranch);
17181 masm.bind(&negativeIntCheck);
17183 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
17185 masm.jump(&falseBranch);
17189 masm.bind(&trueBranch);
17190 masm.move32(Imm32(1), output);
17191 masm.jump(&done);
17193 masm.bind(&falseBranch);
17194 masm.move32(Imm32(0), output);
17195 masm.bind(&done);
17198 void CodeGenerator::visitGuardElementNotHole(LGuardElementNotHole* lir) {
17199 Register elements = ToRegister(lir->elements());
17200 const LAllocation* index = lir->index();
17202 Label testMagic;
17203 if (index->isConstant()) {
17204 Address address(elements, ToInt32(index) * sizeof(js::Value));
17205 masm.branchTestMagic(Assembler::Equal, address, &testMagic);
17206 } else {
17207 BaseObjectElementIndex address(elements, ToRegister(index));
17208 masm.branchTestMagic(Assembler::Equal, address, &testMagic);
17210 bailoutFrom(&testMagic, lir->snapshot());
17213 void CodeGenerator::visitInstanceOfO(LInstanceOfO* ins) {
17214 Register protoReg = ToRegister(ins->rhs());
17215 emitInstanceOf(ins, protoReg);
17218 void CodeGenerator::visitInstanceOfV(LInstanceOfV* ins) {
17219 Register protoReg = ToRegister(ins->rhs());
17220 emitInstanceOf(ins, protoReg);
17223 void CodeGenerator::emitInstanceOf(LInstruction* ins, Register protoReg) {
17224 // This path implements fun_hasInstance when the function's prototype is
17225 // known to be the object in protoReg
17227 Label done;
17228 Register output = ToRegister(ins->getDef(0));
17230 // If the lhs is a primitive, the result is false.
17231 Register objReg;
17232 if (ins->isInstanceOfV()) {
17233 Label isObject;
17234 ValueOperand lhsValue = ToValue(ins, LInstanceOfV::LhsIndex);
17235 masm.branchTestObject(Assembler::Equal, lhsValue, &isObject);
17236 masm.mov(ImmWord(0), output);
17237 masm.jump(&done);
17238 masm.bind(&isObject);
17239 objReg = masm.extractObject(lhsValue, output);
17240 } else {
17241 objReg = ToRegister(ins->toInstanceOfO()->lhs());
17244 // Crawl the lhs's prototype chain in a loop to search for prototypeObject.
17245 // This follows the main loop of js::IsPrototypeOf, though additionally breaks
17246 // out of the loop on Proxy::LazyProto.
17248 // Load the lhs's prototype.
17249 masm.loadObjProto(objReg, output);
17251 Label testLazy;
17253 Label loopPrototypeChain;
17254 masm.bind(&loopPrototypeChain);
17256 // Test for the target prototype object.
17257 Label notPrototypeObject;
17258 masm.branchPtr(Assembler::NotEqual, output, protoReg, &notPrototypeObject);
17259 masm.mov(ImmWord(1), output);
17260 masm.jump(&done);
17261 masm.bind(&notPrototypeObject);
17263 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
17265 // Test for nullptr or Proxy::LazyProto
17266 masm.branchPtr(Assembler::BelowOrEqual, output, ImmWord(1), &testLazy);
17268 // Load the current object's prototype.
17269 masm.loadObjProto(output, output);
17271 masm.jump(&loopPrototypeChain);
17274 // Make a VM call if an object with a lazy proto was found on the prototype
17275 // chain. This currently occurs only for cross compartment wrappers, which
17276 // we do not expect to be compared with non-wrapper functions from this
17277 // compartment. Otherwise, we stopped on a nullptr prototype and the output
17278 // register is already correct.
17280 using Fn = bool (*)(JSContext*, HandleObject, JSObject*, bool*);
17281 auto* ool = oolCallVM<Fn, IsPrototypeOf>(ins, ArgList(protoReg, objReg),
17282 StoreRegisterTo(output));
17284 // Regenerate the original lhs object for the VM call.
17285 Label regenerate, *lazyEntry;
17286 if (objReg != output) {
17287 lazyEntry = ool->entry();
17288 } else {
17289 masm.bind(&regenerate);
17290 lazyEntry = &regenerate;
17291 if (ins->isInstanceOfV()) {
17292 ValueOperand lhsValue = ToValue(ins, LInstanceOfV::LhsIndex);
17293 objReg = masm.extractObject(lhsValue, output);
17294 } else {
17295 objReg = ToRegister(ins->toInstanceOfO()->lhs());
17297 MOZ_ASSERT(objReg == output);
17298 masm.jump(ool->entry());
17301 masm.bind(&testLazy);
17302 masm.branchPtr(Assembler::Equal, output, ImmWord(1), lazyEntry);
17304 masm.bind(&done);
17305 masm.bind(ool->rejoin());
17308 void CodeGenerator::visitInstanceOfCache(LInstanceOfCache* ins) {
17309 // The Lowering ensures that RHS is an object, and that LHS is a value.
17310 LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
17311 TypedOrValueRegister lhs =
17312 TypedOrValueRegister(ToValue(ins, LInstanceOfCache::LHS));
17313 Register rhs = ToRegister(ins->rhs());
17314 Register output = ToRegister(ins->output());
17316 IonInstanceOfIC ic(liveRegs, lhs, rhs, output);
17317 addIC(ins, allocateIC(ic));
17320 void CodeGenerator::visitGetDOMProperty(LGetDOMProperty* ins) {
17321 const Register JSContextReg = ToRegister(ins->getJSContextReg());
17322 const Register ObjectReg = ToRegister(ins->getObjectReg());
17323 const Register PrivateReg = ToRegister(ins->getPrivReg());
17324 const Register ValueReg = ToRegister(ins->getValueReg());
17326 Label haveValue;
17327 if (ins->mir()->valueMayBeInSlot()) {
17328 size_t slot = ins->mir()->domMemberSlotIndex();
17329 // It's a bit annoying to redo these slot calculations, which duplcate
17330 // LSlots and a few other things like that, but I'm not sure there's a
17331 // way to reuse those here.
17333 // If this ever gets fixed to work with proxies (by not assuming that
17334 // reserved slot indices, which is what domMemberSlotIndex() returns,
17335 // match fixed slot indices), we can reenable MGetDOMProperty for
17336 // proxies in IonBuilder.
17337 if (slot < NativeObject::MAX_FIXED_SLOTS) {
17338 masm.loadValue(Address(ObjectReg, NativeObject::getFixedSlotOffset(slot)),
17339 JSReturnOperand);
17340 } else {
17341 // It's a dynamic slot.
17342 slot -= NativeObject::MAX_FIXED_SLOTS;
17343 // Use PrivateReg as a scratch register for the slots pointer.
17344 masm.loadPtr(Address(ObjectReg, NativeObject::offsetOfSlots()),
17345 PrivateReg);
17346 masm.loadValue(Address(PrivateReg, slot * sizeof(js::Value)),
17347 JSReturnOperand);
17349 masm.branchTestUndefined(Assembler::NotEqual, JSReturnOperand, &haveValue);
17352 DebugOnly<uint32_t> initialStack = masm.framePushed();
17354 masm.checkStackAlignment();
17356 // Make space for the outparam. Pre-initialize it to UndefinedValue so we
17357 // can trace it at GC time.
17358 masm.Push(UndefinedValue());
17359 // We pass the pointer to our out param as an instance of
17360 // JSJitGetterCallArgs, since on the binary level it's the same thing.
17361 static_assert(sizeof(JSJitGetterCallArgs) == sizeof(Value*));
17362 masm.moveStackPtrTo(ValueReg);
17364 masm.Push(ObjectReg);
17366 LoadDOMPrivate(masm, ObjectReg, PrivateReg, ins->mir()->objectKind());
17368 // Rooting will happen at GC time.
17369 masm.moveStackPtrTo(ObjectReg);
17371 Realm* getterRealm = ins->mir()->getterRealm();
17372 if (gen->realm->realmPtr() != getterRealm) {
17373 // We use JSContextReg as scratch register here.
17374 masm.switchToRealm(getterRealm, JSContextReg);
17377 uint32_t safepointOffset = masm.buildFakeExitFrame(JSContextReg);
17378 masm.loadJSContext(JSContextReg);
17379 masm.enterFakeExitFrame(JSContextReg, JSContextReg,
17380 ExitFrameType::IonDOMGetter);
17382 markSafepointAt(safepointOffset, ins);
17384 masm.setupAlignedABICall();
17385 masm.loadJSContext(JSContextReg);
17386 masm.passABIArg(JSContextReg);
17387 masm.passABIArg(ObjectReg);
17388 masm.passABIArg(PrivateReg);
17389 masm.passABIArg(ValueReg);
17390 ensureOsiSpace();
17391 masm.callWithABI(DynamicFunction<JSJitGetterOp>(ins->mir()->fun()),
17392 ABIType::General,
17393 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
17395 if (ins->mir()->isInfallible()) {
17396 masm.loadValue(Address(masm.getStackPointer(),
17397 IonDOMExitFrameLayout::offsetOfResult()),
17398 JSReturnOperand);
17399 } else {
17400 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
17402 masm.loadValue(Address(masm.getStackPointer(),
17403 IonDOMExitFrameLayout::offsetOfResult()),
17404 JSReturnOperand);
17407 // Switch back to the current realm if needed. Note: if the getter threw an
17408 // exception, the exception handler will do this.
17409 if (gen->realm->realmPtr() != getterRealm) {
17410 static_assert(!JSReturnOperand.aliases(ReturnReg),
17411 "Clobbering ReturnReg should not affect the return value");
17412 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
17415 // Until C++ code is instrumented against Spectre, prevent speculative
17416 // execution from returning any private data.
17417 if (JitOptions.spectreJitToCxxCalls && ins->mir()->hasLiveDefUses()) {
17418 masm.speculationBarrier();
17421 masm.adjustStack(IonDOMExitFrameLayout::Size());
17423 masm.bind(&haveValue);
17425 MOZ_ASSERT(masm.framePushed() == initialStack);
17428 void CodeGenerator::visitGetDOMMemberV(LGetDOMMemberV* ins) {
17429 // It's simpler to duplicate visitLoadFixedSlotV here than it is to try to
17430 // use an LLoadFixedSlotV or some subclass of it for this case: that would
17431 // require us to have MGetDOMMember inherit from MLoadFixedSlot, and then
17432 // we'd have to duplicate a bunch of stuff we now get for free from
17433 // MGetDOMProperty.
17435 // If this ever gets fixed to work with proxies (by not assuming that
17436 // reserved slot indices, which is what domMemberSlotIndex() returns,
17437 // match fixed slot indices), we can reenable MGetDOMMember for
17438 // proxies in IonBuilder.
17439 Register object = ToRegister(ins->object());
17440 size_t slot = ins->mir()->domMemberSlotIndex();
17441 ValueOperand result = ToOutValue(ins);
17443 masm.loadValue(Address(object, NativeObject::getFixedSlotOffset(slot)),
17444 result);
17447 void CodeGenerator::visitGetDOMMemberT(LGetDOMMemberT* ins) {
17448 // It's simpler to duplicate visitLoadFixedSlotT here than it is to try to
17449 // use an LLoadFixedSlotT or some subclass of it for this case: that would
17450 // require us to have MGetDOMMember inherit from MLoadFixedSlot, and then
17451 // we'd have to duplicate a bunch of stuff we now get for free from
17452 // MGetDOMProperty.
17454 // If this ever gets fixed to work with proxies (by not assuming that
17455 // reserved slot indices, which is what domMemberSlotIndex() returns,
17456 // match fixed slot indices), we can reenable MGetDOMMember for
17457 // proxies in IonBuilder.
17458 Register object = ToRegister(ins->object());
17459 size_t slot = ins->mir()->domMemberSlotIndex();
17460 AnyRegister result = ToAnyRegister(ins->getDef(0));
17461 MIRType type = ins->mir()->type();
17463 masm.loadUnboxedValue(Address(object, NativeObject::getFixedSlotOffset(slot)),
17464 type, result);
17467 void CodeGenerator::visitSetDOMProperty(LSetDOMProperty* ins) {
17468 const Register JSContextReg = ToRegister(ins->getJSContextReg());
17469 const Register ObjectReg = ToRegister(ins->getObjectReg());
17470 const Register PrivateReg = ToRegister(ins->getPrivReg());
17471 const Register ValueReg = ToRegister(ins->getValueReg());
17473 DebugOnly<uint32_t> initialStack = masm.framePushed();
17475 masm.checkStackAlignment();
17477 // Push the argument. Rooting will happen at GC time.
17478 ValueOperand argVal = ToValue(ins, LSetDOMProperty::Value);
17479 masm.Push(argVal);
17480 // We pass the pointer to our out param as an instance of
17481 // JSJitGetterCallArgs, since on the binary level it's the same thing.
17482 static_assert(sizeof(JSJitSetterCallArgs) == sizeof(Value*));
17483 masm.moveStackPtrTo(ValueReg);
17485 masm.Push(ObjectReg);
17487 LoadDOMPrivate(masm, ObjectReg, PrivateReg, ins->mir()->objectKind());
17489 // Rooting will happen at GC time.
17490 masm.moveStackPtrTo(ObjectReg);
17492 Realm* setterRealm = ins->mir()->setterRealm();
17493 if (gen->realm->realmPtr() != setterRealm) {
17494 // We use JSContextReg as scratch register here.
17495 masm.switchToRealm(setterRealm, JSContextReg);
17498 uint32_t safepointOffset = masm.buildFakeExitFrame(JSContextReg);
17499 masm.loadJSContext(JSContextReg);
17500 masm.enterFakeExitFrame(JSContextReg, JSContextReg,
17501 ExitFrameType::IonDOMSetter);
17503 markSafepointAt(safepointOffset, ins);
17505 masm.setupAlignedABICall();
17506 masm.loadJSContext(JSContextReg);
17507 masm.passABIArg(JSContextReg);
17508 masm.passABIArg(ObjectReg);
17509 masm.passABIArg(PrivateReg);
17510 masm.passABIArg(ValueReg);
17511 ensureOsiSpace();
17512 masm.callWithABI(DynamicFunction<JSJitSetterOp>(ins->mir()->fun()),
17513 ABIType::General,
17514 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
17516 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
17518 // Switch back to the current realm if needed. Note: if the setter threw an
17519 // exception, the exception handler will do this.
17520 if (gen->realm->realmPtr() != setterRealm) {
17521 masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
17524 masm.adjustStack(IonDOMExitFrameLayout::Size());
17526 MOZ_ASSERT(masm.framePushed() == initialStack);
17529 void CodeGenerator::visitLoadDOMExpandoValue(LLoadDOMExpandoValue* ins) {
17530 Register proxy = ToRegister(ins->proxy());
17531 ValueOperand out = ToOutValue(ins);
17533 masm.loadPtr(Address(proxy, ProxyObject::offsetOfReservedSlots()),
17534 out.scratchReg());
17535 masm.loadValue(Address(out.scratchReg(),
17536 js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
17537 out);
17540 void CodeGenerator::visitLoadDOMExpandoValueGuardGeneration(
17541 LLoadDOMExpandoValueGuardGeneration* ins) {
17542 Register proxy = ToRegister(ins->proxy());
17543 ValueOperand out = ToOutValue(ins);
17545 Label bail;
17546 masm.loadDOMExpandoValueGuardGeneration(proxy, out,
17547 ins->mir()->expandoAndGeneration(),
17548 ins->mir()->generation(), &bail);
17549 bailoutFrom(&bail, ins->snapshot());
17552 void CodeGenerator::visitLoadDOMExpandoValueIgnoreGeneration(
17553 LLoadDOMExpandoValueIgnoreGeneration* ins) {
17554 Register proxy = ToRegister(ins->proxy());
17555 ValueOperand out = ToOutValue(ins);
17557 masm.loadPtr(Address(proxy, ProxyObject::offsetOfReservedSlots()),
17558 out.scratchReg());
17560 // Load the ExpandoAndGeneration* from the PrivateValue.
17561 masm.loadPrivate(
17562 Address(out.scratchReg(),
17563 js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
17564 out.scratchReg());
17566 // Load expandoAndGeneration->expando into the output Value register.
17567 masm.loadValue(
17568 Address(out.scratchReg(), ExpandoAndGeneration::offsetOfExpando()), out);
17571 void CodeGenerator::visitGuardDOMExpandoMissingOrGuardShape(
17572 LGuardDOMExpandoMissingOrGuardShape* ins) {
17573 Register temp = ToRegister(ins->temp0());
17574 ValueOperand input =
17575 ToValue(ins, LGuardDOMExpandoMissingOrGuardShape::InputIndex);
17577 Label done;
17578 masm.branchTestUndefined(Assembler::Equal, input, &done);
17580 masm.debugAssertIsObject(input);
17581 masm.unboxObject(input, temp);
17582 // The expando object is not used in this case, so we don't need Spectre
17583 // mitigations.
17584 Label bail;
17585 masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, temp,
17586 ins->mir()->shape(), &bail);
17587 bailoutFrom(&bail, ins->snapshot());
17589 masm.bind(&done);
17592 class OutOfLineIsCallable : public OutOfLineCodeBase<CodeGenerator> {
17593 Register object_;
17594 Register output_;
17596 public:
17597 OutOfLineIsCallable(Register object, Register output)
17598 : object_(object), output_(output) {}
17600 void accept(CodeGenerator* codegen) override {
17601 codegen->visitOutOfLineIsCallable(this);
17603 Register object() const { return object_; }
17604 Register output() const { return output_; }
17607 void CodeGenerator::visitIsCallableO(LIsCallableO* ins) {
17608 Register object = ToRegister(ins->object());
17609 Register output = ToRegister(ins->output());
17611 OutOfLineIsCallable* ool = new (alloc()) OutOfLineIsCallable(object, output);
17612 addOutOfLineCode(ool, ins->mir());
17614 masm.isCallable(object, output, ool->entry());
17616 masm.bind(ool->rejoin());
17619 void CodeGenerator::visitIsCallableV(LIsCallableV* ins) {
17620 ValueOperand val = ToValue(ins, LIsCallableV::ObjectIndex);
17621 Register output = ToRegister(ins->output());
17622 Register temp = ToRegister(ins->temp0());
17624 Label notObject;
17625 masm.fallibleUnboxObject(val, temp, &notObject);
17627 OutOfLineIsCallable* ool = new (alloc()) OutOfLineIsCallable(temp, output);
17628 addOutOfLineCode(ool, ins->mir());
17630 masm.isCallable(temp, output, ool->entry());
17631 masm.jump(ool->rejoin());
17633 masm.bind(&notObject);
17634 masm.move32(Imm32(0), output);
17636 masm.bind(ool->rejoin());
17639 void CodeGenerator::visitOutOfLineIsCallable(OutOfLineIsCallable* ool) {
17640 Register object = ool->object();
17641 Register output = ool->output();
17643 saveVolatile(output);
17644 using Fn = bool (*)(JSObject* obj);
17645 masm.setupAlignedABICall();
17646 masm.passABIArg(object);
17647 masm.callWithABI<Fn, ObjectIsCallable>();
17648 masm.storeCallBoolResult(output);
17649 restoreVolatile(output);
17650 masm.jump(ool->rejoin());
17653 class OutOfLineIsConstructor : public OutOfLineCodeBase<CodeGenerator> {
17654 LIsConstructor* ins_;
17656 public:
17657 explicit OutOfLineIsConstructor(LIsConstructor* ins) : ins_(ins) {}
17659 void accept(CodeGenerator* codegen) override {
17660 codegen->visitOutOfLineIsConstructor(this);
17662 LIsConstructor* ins() const { return ins_; }
17665 void CodeGenerator::visitIsConstructor(LIsConstructor* ins) {
17666 Register object = ToRegister(ins->object());
17667 Register output = ToRegister(ins->output());
17669 OutOfLineIsConstructor* ool = new (alloc()) OutOfLineIsConstructor(ins);
17670 addOutOfLineCode(ool, ins->mir());
17672 masm.isConstructor(object, output, ool->entry());
17674 masm.bind(ool->rejoin());
17677 void CodeGenerator::visitOutOfLineIsConstructor(OutOfLineIsConstructor* ool) {
17678 LIsConstructor* ins = ool->ins();
17679 Register object = ToRegister(ins->object());
17680 Register output = ToRegister(ins->output());
17682 saveVolatile(output);
17683 using Fn = bool (*)(JSObject* obj);
17684 masm.setupAlignedABICall();
17685 masm.passABIArg(object);
17686 masm.callWithABI<Fn, ObjectIsConstructor>();
17687 masm.storeCallBoolResult(output);
17688 restoreVolatile(output);
17689 masm.jump(ool->rejoin());
17692 void CodeGenerator::visitIsCrossRealmArrayConstructor(
17693 LIsCrossRealmArrayConstructor* ins) {
17694 Register object = ToRegister(ins->object());
17695 Register output = ToRegister(ins->output());
17697 masm.setIsCrossRealmArrayConstructor(object, output);
17700 static void EmitObjectIsArray(MacroAssembler& masm, OutOfLineCode* ool,
17701 Register obj, Register output,
17702 Label* notArray = nullptr) {
17703 masm.loadObjClassUnsafe(obj, output);
17705 Label isArray;
17706 masm.branchPtr(Assembler::Equal, output, ImmPtr(&ArrayObject::class_),
17707 &isArray);
17709 // Branch to OOL path if it's a proxy.
17710 masm.branchTestClassIsProxy(true, output, ool->entry());
17712 if (notArray) {
17713 masm.bind(notArray);
17715 masm.move32(Imm32(0), output);
17716 masm.jump(ool->rejoin());
17718 masm.bind(&isArray);
17719 masm.move32(Imm32(1), output);
17721 masm.bind(ool->rejoin());
17724 void CodeGenerator::visitIsArrayO(LIsArrayO* lir) {
17725 Register object = ToRegister(lir->object());
17726 Register output = ToRegister(lir->output());
17728 using Fn = bool (*)(JSContext*, HandleObject, bool*);
17729 OutOfLineCode* ool = oolCallVM<Fn, js::IsArrayFromJit>(
17730 lir, ArgList(object), StoreRegisterTo(output));
17731 EmitObjectIsArray(masm, ool, object, output);
17734 void CodeGenerator::visitIsArrayV(LIsArrayV* lir) {
17735 ValueOperand val = ToValue(lir, LIsArrayV::ValueIndex);
17736 Register output = ToRegister(lir->output());
17737 Register temp = ToRegister(lir->temp0());
17739 Label notArray;
17740 masm.fallibleUnboxObject(val, temp, &notArray);
17742 using Fn = bool (*)(JSContext*, HandleObject, bool*);
17743 OutOfLineCode* ool = oolCallVM<Fn, js::IsArrayFromJit>(
17744 lir, ArgList(temp), StoreRegisterTo(output));
17745 EmitObjectIsArray(masm, ool, temp, output, &notArray);
17748 void CodeGenerator::visitIsTypedArray(LIsTypedArray* lir) {
17749 Register object = ToRegister(lir->object());
17750 Register output = ToRegister(lir->output());
17752 OutOfLineCode* ool = nullptr;
17753 if (lir->mir()->isPossiblyWrapped()) {
17754 using Fn = bool (*)(JSContext*, JSObject*, bool*);
17755 ool = oolCallVM<Fn, jit::IsPossiblyWrappedTypedArray>(
17756 lir, ArgList(object), StoreRegisterTo(output));
17759 Label notTypedArray;
17760 Label done;
17762 masm.loadObjClassUnsafe(object, output);
17763 masm.branchIfClassIsNotTypedArray(output, &notTypedArray);
17765 masm.move32(Imm32(1), output);
17766 masm.jump(&done);
17767 masm.bind(&notTypedArray);
17768 if (ool) {
17769 masm.branchTestClassIsProxy(true, output, ool->entry());
17771 masm.move32(Imm32(0), output);
17772 masm.bind(&done);
17773 if (ool) {
17774 masm.bind(ool->rejoin());
17778 void CodeGenerator::visitIsObject(LIsObject* ins) {
17779 Register output = ToRegister(ins->output());
17780 ValueOperand value = ToValue(ins, LIsObject::ObjectIndex);
17781 masm.testObjectSet(Assembler::Equal, value, output);
17784 void CodeGenerator::visitIsObjectAndBranch(LIsObjectAndBranch* ins) {
17785 ValueOperand value = ToValue(ins, LIsObjectAndBranch::Input);
17786 testObjectEmitBranch(Assembler::Equal, value, ins->ifTrue(), ins->ifFalse());
17789 void CodeGenerator::visitIsNullOrUndefined(LIsNullOrUndefined* ins) {
17790 Register output = ToRegister(ins->output());
17791 ValueOperand value = ToValue(ins, LIsNullOrUndefined::InputIndex);
17793 Label isNotNull, done;
17794 masm.branchTestNull(Assembler::NotEqual, value, &isNotNull);
17796 masm.move32(Imm32(1), output);
17797 masm.jump(&done);
17799 masm.bind(&isNotNull);
17800 masm.testUndefinedSet(Assembler::Equal, value, output);
17802 masm.bind(&done);
17805 void CodeGenerator::visitIsNullOrUndefinedAndBranch(
17806 LIsNullOrUndefinedAndBranch* ins) {
17807 Label* ifTrue = getJumpLabelForBranch(ins->ifTrue());
17808 Label* ifFalse = getJumpLabelForBranch(ins->ifFalse());
17809 ValueOperand value = ToValue(ins, LIsNullOrUndefinedAndBranch::Input);
17811 ScratchTagScope tag(masm, value);
17812 masm.splitTagForTest(value, tag);
17814 masm.branchTestNull(Assembler::Equal, tag, ifTrue);
17815 masm.branchTestUndefined(Assembler::Equal, tag, ifTrue);
17817 if (!isNextBlock(ins->ifFalse()->lir())) {
17818 masm.jump(ifFalse);
17822 void CodeGenerator::loadOutermostJSScript(Register reg) {
17823 // The "outermost" JSScript means the script that we are compiling
17824 // basically; this is not always the script associated with the
17825 // current basic block, which might be an inlined script.
17827 MIRGraph& graph = current->mir()->graph();
17828 MBasicBlock* entryBlock = graph.entryBlock();
17829 masm.movePtr(ImmGCPtr(entryBlock->info().script()), reg);
17832 void CodeGenerator::loadJSScriptForBlock(MBasicBlock* block, Register reg) {
17833 // The current JSScript means the script for the current
17834 // basic block. This may be an inlined script.
17836 JSScript* script = block->info().script();
17837 masm.movePtr(ImmGCPtr(script), reg);
17840 void CodeGenerator::visitHasClass(LHasClass* ins) {
17841 Register lhs = ToRegister(ins->lhs());
17842 Register output = ToRegister(ins->output());
17844 masm.loadObjClassUnsafe(lhs, output);
17845 masm.cmpPtrSet(Assembler::Equal, output, ImmPtr(ins->mir()->getClass()),
17846 output);
17849 void CodeGenerator::visitGuardToClass(LGuardToClass* ins) {
17850 Register lhs = ToRegister(ins->lhs());
17851 Register temp = ToRegister(ins->temp0());
17853 // branchTestObjClass may zero the object register on speculative paths
17854 // (we should have a defineReuseInput allocation in this case).
17855 Register spectreRegToZero = lhs;
17857 Label notEqual;
17859 masm.branchTestObjClass(Assembler::NotEqual, lhs, ins->mir()->getClass(),
17860 temp, spectreRegToZero, &notEqual);
17862 // Can't return null-return here, so bail.
17863 bailoutFrom(&notEqual, ins->snapshot());
17866 void CodeGenerator::visitGuardToFunction(LGuardToFunction* ins) {
17867 Register lhs = ToRegister(ins->lhs());
17868 Register temp = ToRegister(ins->temp0());
17870 // branchTestObjClass may zero the object register on speculative paths
17871 // (we should have a defineReuseInput allocation in this case).
17872 Register spectreRegToZero = lhs;
17874 Label notEqual;
17876 masm.branchTestObjIsFunction(Assembler::NotEqual, lhs, temp, spectreRegToZero,
17877 &notEqual);
17879 // Can't return null-return here, so bail.
17880 bailoutFrom(&notEqual, ins->snapshot());
17883 void CodeGenerator::visitObjectClassToString(LObjectClassToString* lir) {
17884 Register obj = ToRegister(lir->lhs());
17885 Register temp = ToRegister(lir->temp0());
17887 using Fn = JSString* (*)(JSContext*, JSObject*);
17888 masm.setupAlignedABICall();
17889 masm.loadJSContext(temp);
17890 masm.passABIArg(temp);
17891 masm.passABIArg(obj);
17892 masm.callWithABI<Fn, js::ObjectClassToString>();
17894 bailoutCmpPtr(Assembler::Equal, ReturnReg, ImmWord(0), lir->snapshot());
17897 void CodeGenerator::visitWasmParameter(LWasmParameter* lir) {}
17899 void CodeGenerator::visitWasmParameterI64(LWasmParameterI64* lir) {}
17901 void CodeGenerator::visitWasmReturn(LWasmReturn* lir) {
17902 // Don't emit a jump to the return label if this is the last block.
17903 if (current->mir() != *gen->graph().poBegin()) {
17904 masm.jump(&returnLabel_);
17908 void CodeGenerator::visitWasmReturnI64(LWasmReturnI64* lir) {
17909 // Don't emit a jump to the return label if this is the last block.
17910 if (current->mir() != *gen->graph().poBegin()) {
17911 masm.jump(&returnLabel_);
17915 void CodeGenerator::visitWasmReturnVoid(LWasmReturnVoid* lir) {
17916 // Don't emit a jump to the return label if this is the last block.
17917 if (current->mir() != *gen->graph().poBegin()) {
17918 masm.jump(&returnLabel_);
17922 void CodeGenerator::emitAssertRangeI(MIRType type, const Range* r,
17923 Register input) {
17924 // Check the lower bound.
17925 if (r->hasInt32LowerBound() && r->lower() > INT32_MIN) {
17926 Label success;
17927 if (type == MIRType::Int32 || type == MIRType::Boolean) {
17928 masm.branch32(Assembler::GreaterThanOrEqual, input, Imm32(r->lower()),
17929 &success);
17930 } else {
17931 MOZ_ASSERT(type == MIRType::IntPtr);
17932 masm.branchPtr(Assembler::GreaterThanOrEqual, input, Imm32(r->lower()),
17933 &success);
17935 masm.assumeUnreachable(
17936 "Integer input should be equal or higher than Lowerbound.");
17937 masm.bind(&success);
17940 // Check the upper bound.
17941 if (r->hasInt32UpperBound() && r->upper() < INT32_MAX) {
17942 Label success;
17943 if (type == MIRType::Int32 || type == MIRType::Boolean) {
17944 masm.branch32(Assembler::LessThanOrEqual, input, Imm32(r->upper()),
17945 &success);
17946 } else {
17947 MOZ_ASSERT(type == MIRType::IntPtr);
17948 masm.branchPtr(Assembler::LessThanOrEqual, input, Imm32(r->upper()),
17949 &success);
17951 masm.assumeUnreachable(
17952 "Integer input should be lower or equal than Upperbound.");
17953 masm.bind(&success);
17956 // For r->canHaveFractionalPart(), r->canBeNegativeZero(), and
17957 // r->exponent(), there's nothing to check, because if we ended up in the
17958 // integer range checking code, the value is already in an integer register
17959 // in the integer range.
17962 void CodeGenerator::emitAssertRangeD(const Range* r, FloatRegister input,
17963 FloatRegister temp) {
17964 // Check the lower bound.
17965 if (r->hasInt32LowerBound()) {
17966 Label success;
17967 masm.loadConstantDouble(r->lower(), temp);
17968 if (r->canBeNaN()) {
17969 masm.branchDouble(Assembler::DoubleUnordered, input, input, &success);
17971 masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, input, temp,
17972 &success);
17973 masm.assumeUnreachable(
17974 "Double input should be equal or higher than Lowerbound.");
17975 masm.bind(&success);
17977 // Check the upper bound.
17978 if (r->hasInt32UpperBound()) {
17979 Label success;
17980 masm.loadConstantDouble(r->upper(), temp);
17981 if (r->canBeNaN()) {
17982 masm.branchDouble(Assembler::DoubleUnordered, input, input, &success);
17984 masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, temp, &success);
17985 masm.assumeUnreachable(
17986 "Double input should be lower or equal than Upperbound.");
17987 masm.bind(&success);
17990 // This code does not yet check r->canHaveFractionalPart(). This would require
17991 // new assembler interfaces to make rounding instructions available.
17993 if (!r->canBeNegativeZero()) {
17994 Label success;
17996 // First, test for being equal to 0.0, which also includes -0.0.
17997 masm.loadConstantDouble(0.0, temp);
17998 masm.branchDouble(Assembler::DoubleNotEqualOrUnordered, input, temp,
17999 &success);
18001 // The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0 is
18002 // -Infinity instead of Infinity.
18003 masm.loadConstantDouble(1.0, temp);
18004 masm.divDouble(input, temp);
18005 masm.branchDouble(Assembler::DoubleGreaterThan, temp, input, &success);
18007 masm.assumeUnreachable("Input shouldn't be negative zero.");
18009 masm.bind(&success);
18012 if (!r->hasInt32Bounds() && !r->canBeInfiniteOrNaN() &&
18013 r->exponent() < FloatingPoint<double>::kExponentBias) {
18014 // Check the bounds implied by the maximum exponent.
18015 Label exponentLoOk;
18016 masm.loadConstantDouble(pow(2.0, r->exponent() + 1), temp);
18017 masm.branchDouble(Assembler::DoubleUnordered, input, input, &exponentLoOk);
18018 masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, temp,
18019 &exponentLoOk);
18020 masm.assumeUnreachable("Check for exponent failed.");
18021 masm.bind(&exponentLoOk);
18023 Label exponentHiOk;
18024 masm.loadConstantDouble(-pow(2.0, r->exponent() + 1), temp);
18025 masm.branchDouble(Assembler::DoubleUnordered, input, input, &exponentHiOk);
18026 masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, input, temp,
18027 &exponentHiOk);
18028 masm.assumeUnreachable("Check for exponent failed.");
18029 masm.bind(&exponentHiOk);
18030 } else if (!r->hasInt32Bounds() && !r->canBeNaN()) {
18031 // If we think the value can't be NaN, check that it isn't.
18032 Label notnan;
18033 masm.branchDouble(Assembler::DoubleOrdered, input, input, &notnan);
18034 masm.assumeUnreachable("Input shouldn't be NaN.");
18035 masm.bind(&notnan);
18037 // If we think the value also can't be an infinity, check that it isn't.
18038 if (!r->canBeInfiniteOrNaN()) {
18039 Label notposinf;
18040 masm.loadConstantDouble(PositiveInfinity<double>(), temp);
18041 masm.branchDouble(Assembler::DoubleLessThan, input, temp, &notposinf);
18042 masm.assumeUnreachable("Input shouldn't be +Inf.");
18043 masm.bind(&notposinf);
18045 Label notneginf;
18046 masm.loadConstantDouble(NegativeInfinity<double>(), temp);
18047 masm.branchDouble(Assembler::DoubleGreaterThan, input, temp, &notneginf);
18048 masm.assumeUnreachable("Input shouldn't be -Inf.");
18049 masm.bind(&notneginf);
18054 void CodeGenerator::visitAssertClass(LAssertClass* ins) {
18055 Register obj = ToRegister(ins->input());
18056 Register temp = ToRegister(ins->getTemp(0));
18058 Label success;
18059 if (ins->mir()->getClass() == &FunctionClass) {
18060 // Allow both possible function classes here.
18061 masm.branchTestObjIsFunctionNoSpectreMitigations(Assembler::Equal, obj,
18062 temp, &success);
18063 } else {
18064 masm.branchTestObjClassNoSpectreMitigations(
18065 Assembler::Equal, obj, ins->mir()->getClass(), temp, &success);
18067 masm.assumeUnreachable("Wrong KnownClass during run-time");
18068 masm.bind(&success);
18071 void CodeGenerator::visitAssertShape(LAssertShape* ins) {
18072 Register obj = ToRegister(ins->input());
18074 Label success;
18075 masm.branchTestObjShapeNoSpectreMitigations(Assembler::Equal, obj,
18076 ins->mir()->shape(), &success);
18077 masm.assumeUnreachable("Wrong Shape during run-time");
18078 masm.bind(&success);
18081 void CodeGenerator::visitAssertRangeI(LAssertRangeI* ins) {
18082 Register input = ToRegister(ins->input());
18083 const Range* r = ins->range();
18085 emitAssertRangeI(ins->mir()->input()->type(), r, input);
18088 void CodeGenerator::visitAssertRangeD(LAssertRangeD* ins) {
18089 FloatRegister input = ToFloatRegister(ins->input());
18090 FloatRegister temp = ToFloatRegister(ins->temp());
18091 const Range* r = ins->range();
18093 emitAssertRangeD(r, input, temp);
18096 void CodeGenerator::visitAssertRangeF(LAssertRangeF* ins) {
18097 FloatRegister input = ToFloatRegister(ins->input());
18098 FloatRegister temp = ToFloatRegister(ins->temp());
18099 FloatRegister temp2 = ToFloatRegister(ins->temp2());
18101 const Range* r = ins->range();
18103 masm.convertFloat32ToDouble(input, temp);
18104 emitAssertRangeD(r, temp, temp2);
18107 void CodeGenerator::visitAssertRangeV(LAssertRangeV* ins) {
18108 const Range* r = ins->range();
18109 const ValueOperand value = ToValue(ins, LAssertRangeV::Input);
18110 Label done;
18113 ScratchTagScope tag(masm, value);
18114 masm.splitTagForTest(value, tag);
18117 Label isNotInt32;
18118 masm.branchTestInt32(Assembler::NotEqual, tag, &isNotInt32);
18120 ScratchTagScopeRelease _(&tag);
18121 Register unboxInt32 = ToTempUnboxRegister(ins->temp());
18122 Register input = masm.extractInt32(value, unboxInt32);
18123 emitAssertRangeI(MIRType::Int32, r, input);
18124 masm.jump(&done);
18126 masm.bind(&isNotInt32);
18130 Label isNotDouble;
18131 masm.branchTestDouble(Assembler::NotEqual, tag, &isNotDouble);
18133 ScratchTagScopeRelease _(&tag);
18134 FloatRegister input = ToFloatRegister(ins->floatTemp1());
18135 FloatRegister temp = ToFloatRegister(ins->floatTemp2());
18136 masm.unboxDouble(value, input);
18137 emitAssertRangeD(r, input, temp);
18138 masm.jump(&done);
18140 masm.bind(&isNotDouble);
18144 masm.assumeUnreachable("Incorrect range for Value.");
18145 masm.bind(&done);
18148 void CodeGenerator::visitInterruptCheck(LInterruptCheck* lir) {
18149 using Fn = bool (*)(JSContext*);
18150 OutOfLineCode* ool =
18151 oolCallVM<Fn, InterruptCheck>(lir, ArgList(), StoreNothing());
18153 const void* interruptAddr = gen->runtime->addressOfInterruptBits();
18154 masm.branch32(Assembler::NotEqual, AbsoluteAddress(interruptAddr), Imm32(0),
18155 ool->entry());
18156 masm.bind(ool->rejoin());
18159 void CodeGenerator::visitOutOfLineResumableWasmTrap(
18160 OutOfLineResumableWasmTrap* ool) {
18161 LInstruction* lir = ool->lir();
18162 masm.wasmTrap(ool->trap(), ool->bytecodeOffset());
18164 markSafepointAt(masm.currentOffset(), lir);
18166 // Note that masm.framePushed() doesn't include the register dump area.
18167 // That will be taken into account when the StackMap is created from the
18168 // LSafepoint.
18169 lir->safepoint()->setFramePushedAtStackMapBase(ool->framePushed());
18170 lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::Trap);
18172 masm.jump(ool->rejoin());
18175 void CodeGenerator::visitOutOfLineAbortingWasmTrap(
18176 OutOfLineAbortingWasmTrap* ool) {
18177 masm.wasmTrap(ool->trap(), ool->bytecodeOffset());
18180 void CodeGenerator::visitWasmInterruptCheck(LWasmInterruptCheck* lir) {
18181 MOZ_ASSERT(gen->compilingWasm());
18183 OutOfLineResumableWasmTrap* ool = new (alloc()) OutOfLineResumableWasmTrap(
18184 lir, masm.framePushed(), lir->mir()->bytecodeOffset(),
18185 wasm::Trap::CheckInterrupt);
18186 addOutOfLineCode(ool, lir->mir());
18187 masm.branch32(
18188 Assembler::NotEqual,
18189 Address(ToRegister(lir->instance()), wasm::Instance::offsetOfInterrupt()),
18190 Imm32(0), ool->entry());
18191 masm.bind(ool->rejoin());
18194 void CodeGenerator::visitWasmTrap(LWasmTrap* lir) {
18195 MOZ_ASSERT(gen->compilingWasm());
18196 const MWasmTrap* mir = lir->mir();
18198 masm.wasmTrap(mir->trap(), mir->bytecodeOffset());
18201 void CodeGenerator::visitWasmTrapIfNull(LWasmTrapIfNull* lir) {
18202 MOZ_ASSERT(gen->compilingWasm());
18203 const MWasmTrapIfNull* mir = lir->mir();
18204 Label nonNull;
18205 Register ref = ToRegister(lir->ref());
18207 masm.branchWasmAnyRefIsNull(false, ref, &nonNull);
18208 masm.wasmTrap(mir->trap(), mir->bytecodeOffset());
18209 masm.bind(&nonNull);
18212 static void BranchWasmRefIsSubtype(MacroAssembler& masm, Register ref,
18213 const wasm::RefType& sourceType,
18214 const wasm::RefType& destType, Label* label,
18215 Register superSTV, Register scratch1,
18216 Register scratch2) {
18217 if (destType.isAnyHierarchy()) {
18218 masm.branchWasmRefIsSubtypeAny(ref, sourceType, destType, label,
18219 /*onSuccess=*/true, superSTV, scratch1,
18220 scratch2);
18221 } else if (destType.isFuncHierarchy()) {
18222 masm.branchWasmRefIsSubtypeFunc(ref, sourceType, destType, label,
18223 /*onSuccess=*/true, superSTV, scratch1,
18224 scratch2);
18225 } else if (destType.isExternHierarchy()) {
18226 masm.branchWasmRefIsSubtypeExtern(ref, sourceType, destType, label,
18227 /*onSuccess=*/true);
18228 } else {
18229 MOZ_CRASH("could not generate casting code for unknown type hierarchy");
18233 void CodeGenerator::visitWasmRefIsSubtypeOfAbstract(
18234 LWasmRefIsSubtypeOfAbstract* ins) {
18235 MOZ_ASSERT(gen->compilingWasm());
18237 const MWasmRefIsSubtypeOfAbstract* mir = ins->mir();
18238 MOZ_ASSERT(!mir->destType().isTypeRef());
18240 Register ref = ToRegister(ins->ref());
18241 Register superSTV = Register::Invalid();
18242 Register scratch1 = ToTempRegisterOrInvalid(ins->temp0());
18243 Register scratch2 = Register::Invalid();
18244 Register result = ToRegister(ins->output());
18245 Label onSuccess;
18246 Label onFail;
18247 Label join;
18248 BranchWasmRefIsSubtype(masm, ref, mir->sourceType(), mir->destType(),
18249 &onSuccess, superSTV, scratch1, scratch2);
18250 masm.bind(&onFail);
18251 masm.xor32(result, result);
18252 masm.jump(&join);
18253 masm.bind(&onSuccess);
18254 masm.move32(Imm32(1), result);
18255 masm.bind(&join);
18258 void CodeGenerator::visitWasmRefIsSubtypeOfConcrete(
18259 LWasmRefIsSubtypeOfConcrete* ins) {
18260 MOZ_ASSERT(gen->compilingWasm());
18262 const MWasmRefIsSubtypeOfConcrete* mir = ins->mir();
18263 MOZ_ASSERT(mir->destType().isTypeRef());
18265 Register ref = ToRegister(ins->ref());
18266 Register superSTV = ToRegister(ins->superSTV());
18267 Register scratch1 = ToRegister(ins->temp0());
18268 Register scratch2 = ToTempRegisterOrInvalid(ins->temp1());
18269 Register result = ToRegister(ins->output());
18270 Label onSuccess;
18271 Label join;
18272 BranchWasmRefIsSubtype(masm, ref, mir->sourceType(), mir->destType(),
18273 &onSuccess, superSTV, scratch1, scratch2);
18274 masm.move32(Imm32(0), result);
18275 masm.jump(&join);
18276 masm.bind(&onSuccess);
18277 masm.move32(Imm32(1), result);
18278 masm.bind(&join);
18281 void CodeGenerator::visitWasmRefIsSubtypeOfAbstractAndBranch(
18282 LWasmRefIsSubtypeOfAbstractAndBranch* ins) {
18283 MOZ_ASSERT(gen->compilingWasm());
18284 Register ref = ToRegister(ins->ref());
18285 Register scratch1 = ToTempRegisterOrInvalid(ins->temp0());
18286 Label* onSuccess = getJumpLabelForBranch(ins->ifTrue());
18287 Label* onFail = getJumpLabelForBranch(ins->ifFalse());
18288 BranchWasmRefIsSubtype(masm, ref, ins->sourceType(), ins->destType(),
18289 onSuccess, Register::Invalid(), scratch1,
18290 Register::Invalid());
18291 masm.jump(onFail);
18294 void CodeGenerator::visitWasmRefIsSubtypeOfConcreteAndBranch(
18295 LWasmRefIsSubtypeOfConcreteAndBranch* ins) {
18296 MOZ_ASSERT(gen->compilingWasm());
18297 Register ref = ToRegister(ins->ref());
18298 Register superSTV = ToRegister(ins->superSTV());
18299 Register scratch1 = ToRegister(ins->temp0());
18300 Register scratch2 = ToTempRegisterOrInvalid(ins->temp1());
18301 Label* onSuccess = getJumpLabelForBranch(ins->ifTrue());
18302 Label* onFail = getJumpLabelForBranch(ins->ifFalse());
18303 BranchWasmRefIsSubtype(masm, ref, ins->sourceType(), ins->destType(),
18304 onSuccess, superSTV, scratch1, scratch2);
18305 masm.jump(onFail);
18308 void CodeGenerator::callWasmStructAllocFun(LInstruction* lir,
18309 wasm::SymbolicAddress fun,
18310 Register typeDefData,
18311 Register output) {
18312 masm.Push(InstanceReg);
18313 int32_t framePushedAfterInstance = masm.framePushed();
18314 saveLive(lir);
18316 masm.setupWasmABICall();
18317 masm.passABIArg(InstanceReg);
18318 masm.passABIArg(typeDefData);
18319 int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
18320 CodeOffset offset =
18321 masm.callWithABI(wasm::BytecodeOffset(0), fun,
18322 mozilla::Some(instanceOffset), ABIType::General);
18323 masm.storeCallPointerResult(output);
18325 markSafepointAt(offset.offset(), lir);
18326 lir->safepoint()->setFramePushedAtStackMapBase(framePushedAfterInstance);
18327 lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::CodegenCall);
18329 restoreLive(lir);
18330 masm.Pop(InstanceReg);
18331 #if JS_CODEGEN_ARM64
18332 masm.syncStackPtr();
18333 #endif
18336 // Out-of-line path to allocate wasm GC structs
18337 class OutOfLineWasmNewStruct : public OutOfLineCodeBase<CodeGenerator> {
18338 LInstruction* lir_;
18339 wasm::SymbolicAddress fun_;
18340 Register typeDefData_;
18341 Register output_;
18343 public:
18344 OutOfLineWasmNewStruct(LInstruction* lir, wasm::SymbolicAddress fun,
18345 Register typeDefData, Register output)
18346 : lir_(lir), fun_(fun), typeDefData_(typeDefData), output_(output) {}
18348 void accept(CodeGenerator* codegen) override {
18349 codegen->visitOutOfLineWasmNewStruct(this);
18352 LInstruction* lir() const { return lir_; }
18353 wasm::SymbolicAddress fun() const { return fun_; }
18354 Register typeDefData() const { return typeDefData_; }
18355 Register output() const { return output_; }
18358 void CodeGenerator::visitOutOfLineWasmNewStruct(OutOfLineWasmNewStruct* ool) {
18359 callWasmStructAllocFun(ool->lir(), ool->fun(), ool->typeDefData(),
18360 ool->output());
18361 masm.jump(ool->rejoin());
18364 void CodeGenerator::visitWasmNewStructObject(LWasmNewStructObject* lir) {
18365 MOZ_ASSERT(gen->compilingWasm());
18367 MWasmNewStructObject* mir = lir->mir();
18369 Register typeDefData = ToRegister(lir->typeDefData());
18370 Register output = ToRegister(lir->output());
18372 if (mir->isOutline()) {
18373 wasm::SymbolicAddress fun = mir->zeroFields()
18374 ? wasm::SymbolicAddress::StructNewOOL_true
18375 : wasm::SymbolicAddress::StructNewOOL_false;
18376 callWasmStructAllocFun(lir, fun, typeDefData, output);
18377 } else {
18378 wasm::SymbolicAddress fun = mir->zeroFields()
18379 ? wasm::SymbolicAddress::StructNewIL_true
18380 : wasm::SymbolicAddress::StructNewIL_false;
18382 Register instance = ToRegister(lir->instance());
18383 MOZ_ASSERT(instance == InstanceReg);
18385 auto ool =
18386 new (alloc()) OutOfLineWasmNewStruct(lir, fun, typeDefData, output);
18387 addOutOfLineCode(ool, lir->mir());
18389 Register temp1 = ToRegister(lir->temp0());
18390 Register temp2 = ToRegister(lir->temp1());
18391 masm.wasmNewStructObject(instance, output, typeDefData, temp1, temp2,
18392 ool->entry(), mir->allocKind(), mir->zeroFields());
18394 masm.bind(ool->rejoin());
18398 void CodeGenerator::visitWasmHeapReg(LWasmHeapReg* ins) {
18399 #ifdef WASM_HAS_HEAPREG
18400 masm.movePtr(HeapReg, ToRegister(ins->output()));
18401 #else
18402 MOZ_CRASH();
18403 #endif
18406 void CodeGenerator::visitWasmBoundsCheck(LWasmBoundsCheck* ins) {
18407 const MWasmBoundsCheck* mir = ins->mir();
18408 Register ptr = ToRegister(ins->ptr());
18409 Register boundsCheckLimit = ToRegister(ins->boundsCheckLimit());
18410 // When there are no spectre mitigations in place, branching out-of-line to
18411 // the trap is a big performance win, but with mitigations it's trickier. See
18412 // bug 1680243.
18413 if (JitOptions.spectreIndexMasking) {
18414 Label ok;
18415 masm.wasmBoundsCheck32(Assembler::Below, ptr, boundsCheckLimit, &ok);
18416 masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
18417 masm.bind(&ok);
18418 } else {
18419 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
18420 mir->bytecodeOffset(), wasm::Trap::OutOfBounds);
18421 addOutOfLineCode(ool, mir);
18422 masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptr, boundsCheckLimit,
18423 ool->entry());
18427 void CodeGenerator::visitWasmBoundsCheck64(LWasmBoundsCheck64* ins) {
18428 const MWasmBoundsCheck* mir = ins->mir();
18429 Register64 ptr = ToRegister64(ins->ptr());
18430 Register64 boundsCheckLimit = ToRegister64(ins->boundsCheckLimit());
18431 // See above.
18432 if (JitOptions.spectreIndexMasking) {
18433 Label ok;
18434 masm.wasmBoundsCheck64(Assembler::Below, ptr, boundsCheckLimit, &ok);
18435 masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
18436 masm.bind(&ok);
18437 } else {
18438 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
18439 mir->bytecodeOffset(), wasm::Trap::OutOfBounds);
18440 addOutOfLineCode(ool, mir);
18441 masm.wasmBoundsCheck64(Assembler::AboveOrEqual, ptr, boundsCheckLimit,
18442 ool->entry());
18446 void CodeGenerator::visitWasmBoundsCheckRange32(LWasmBoundsCheckRange32* ins) {
18447 const MWasmBoundsCheckRange32* mir = ins->mir();
18448 Register index = ToRegister(ins->index());
18449 Register length = ToRegister(ins->length());
18450 Register limit = ToRegister(ins->limit());
18451 Register tmp = ToRegister(ins->temp0());
18453 masm.wasmBoundsCheckRange32(index, length, limit, tmp, mir->bytecodeOffset());
18456 void CodeGenerator::visitWasmAlignmentCheck(LWasmAlignmentCheck* ins) {
18457 const MWasmAlignmentCheck* mir = ins->mir();
18458 Register ptr = ToRegister(ins->ptr());
18459 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
18460 mir->bytecodeOffset(), wasm::Trap::UnalignedAccess);
18461 addOutOfLineCode(ool, mir);
18462 masm.branchTest32(Assembler::NonZero, ptr, Imm32(mir->byteSize() - 1),
18463 ool->entry());
18466 void CodeGenerator::visitWasmAlignmentCheck64(LWasmAlignmentCheck64* ins) {
18467 const MWasmAlignmentCheck* mir = ins->mir();
18468 Register64 ptr = ToRegister64(ins->ptr());
18469 #ifdef JS_64BIT
18470 Register r = ptr.reg;
18471 #else
18472 Register r = ptr.low;
18473 #endif
18474 OutOfLineAbortingWasmTrap* ool = new (alloc()) OutOfLineAbortingWasmTrap(
18475 mir->bytecodeOffset(), wasm::Trap::UnalignedAccess);
18476 addOutOfLineCode(ool, mir);
18477 masm.branchTestPtr(Assembler::NonZero, r, Imm32(mir->byteSize() - 1),
18478 ool->entry());
18481 void CodeGenerator::visitWasmLoadInstance(LWasmLoadInstance* ins) {
18482 switch (ins->mir()->type()) {
18483 case MIRType::WasmAnyRef:
18484 case MIRType::Pointer:
18485 masm.loadPtr(Address(ToRegister(ins->instance()), ins->mir()->offset()),
18486 ToRegister(ins->output()));
18487 break;
18488 case MIRType::Int32:
18489 masm.load32(Address(ToRegister(ins->instance()), ins->mir()->offset()),
18490 ToRegister(ins->output()));
18491 break;
18492 default:
18493 MOZ_CRASH("MIRType not supported in WasmLoadInstance");
18497 void CodeGenerator::visitWasmLoadInstance64(LWasmLoadInstance64* ins) {
18498 MOZ_ASSERT(ins->mir()->type() == MIRType::Int64);
18499 masm.load64(Address(ToRegister(ins->instance()), ins->mir()->offset()),
18500 ToOutRegister64(ins));
18503 void CodeGenerator::incrementWarmUpCounter(AbsoluteAddress warmUpCount,
18504 JSScript* script, Register tmp) {
18505 // The code depends on the JitScript* not being discarded without also
18506 // invalidating Ion code. Assert this.
18507 #ifdef DEBUG
18508 Label ok;
18509 masm.movePtr(ImmGCPtr(script), tmp);
18510 masm.loadJitScript(tmp, tmp);
18511 masm.branchPtr(Assembler::Equal, tmp, ImmPtr(script->jitScript()), &ok);
18512 masm.assumeUnreachable("Didn't find JitScript?");
18513 masm.bind(&ok);
18514 #endif
18516 masm.load32(warmUpCount, tmp);
18517 masm.add32(Imm32(1), tmp);
18518 masm.store32(tmp, warmUpCount);
18521 void CodeGenerator::visitIncrementWarmUpCounter(LIncrementWarmUpCounter* ins) {
18522 Register tmp = ToRegister(ins->temp0());
18524 AbsoluteAddress warmUpCount =
18525 AbsoluteAddress(ins->mir()->script()->jitScript())
18526 .offset(JitScript::offsetOfWarmUpCount());
18527 incrementWarmUpCounter(warmUpCount, ins->mir()->script(), tmp);
18530 void CodeGenerator::visitLexicalCheck(LLexicalCheck* ins) {
18531 ValueOperand inputValue = ToValue(ins, LLexicalCheck::InputIndex);
18532 Label bail;
18533 masm.branchTestMagicValue(Assembler::Equal, inputValue,
18534 JS_UNINITIALIZED_LEXICAL, &bail);
18535 bailoutFrom(&bail, ins->snapshot());
18538 void CodeGenerator::visitThrowRuntimeLexicalError(
18539 LThrowRuntimeLexicalError* ins) {
18540 pushArg(Imm32(ins->mir()->errorNumber()));
18542 using Fn = bool (*)(JSContext*, unsigned);
18543 callVM<Fn, jit::ThrowRuntimeLexicalError>(ins);
18546 void CodeGenerator::visitThrowMsg(LThrowMsg* ins) {
18547 pushArg(Imm32(static_cast<int32_t>(ins->mir()->throwMsgKind())));
18549 using Fn = bool (*)(JSContext*, unsigned);
18550 callVM<Fn, js::ThrowMsgOperation>(ins);
18553 void CodeGenerator::visitGlobalDeclInstantiation(
18554 LGlobalDeclInstantiation* ins) {
18555 pushArg(ImmPtr(ins->mir()->resumePoint()->pc()));
18556 pushArg(ImmGCPtr(ins->mir()->block()->info().script()));
18558 using Fn = bool (*)(JSContext*, HandleScript, const jsbytecode*);
18559 callVM<Fn, GlobalDeclInstantiationFromIon>(ins);
18562 void CodeGenerator::visitDebugger(LDebugger* ins) {
18563 Register cx = ToRegister(ins->temp0());
18565 masm.loadJSContext(cx);
18566 using Fn = bool (*)(JSContext* cx);
18567 masm.setupAlignedABICall();
18568 masm.passABIArg(cx);
18569 masm.callWithABI<Fn, GlobalHasLiveOnDebuggerStatement>();
18571 Label bail;
18572 masm.branchIfTrueBool(ReturnReg, &bail);
18573 bailoutFrom(&bail, ins->snapshot());
18576 void CodeGenerator::visitNewTarget(LNewTarget* ins) {
18577 ValueOperand output = ToOutValue(ins);
18579 // if (isConstructing) output = argv[Max(numActualArgs, numFormalArgs)]
18580 Label notConstructing, done;
18581 Address calleeToken(FramePointer, JitFrameLayout::offsetOfCalleeToken());
18582 masm.branchTestPtr(Assembler::Zero, calleeToken,
18583 Imm32(CalleeToken_FunctionConstructing), &notConstructing);
18585 Register argvLen = output.scratchReg();
18586 masm.loadNumActualArgs(FramePointer, argvLen);
18588 Label useNFormals;
18590 size_t numFormalArgs = ins->mirRaw()->block()->info().nargs();
18591 masm.branchPtr(Assembler::Below, argvLen, Imm32(numFormalArgs), &useNFormals);
18593 size_t argsOffset = JitFrameLayout::offsetOfActualArgs();
18595 BaseValueIndex newTarget(FramePointer, argvLen, argsOffset);
18596 masm.loadValue(newTarget, output);
18597 masm.jump(&done);
18600 masm.bind(&useNFormals);
18603 Address newTarget(FramePointer,
18604 argsOffset + (numFormalArgs * sizeof(Value)));
18605 masm.loadValue(newTarget, output);
18606 masm.jump(&done);
18609 // else output = undefined
18610 masm.bind(&notConstructing);
18611 masm.moveValue(UndefinedValue(), output);
18612 masm.bind(&done);
18615 void CodeGenerator::visitCheckReturn(LCheckReturn* ins) {
18616 ValueOperand returnValue = ToValue(ins, LCheckReturn::ReturnValueIndex);
18617 ValueOperand thisValue = ToValue(ins, LCheckReturn::ThisValueIndex);
18618 ValueOperand output = ToOutValue(ins);
18620 using Fn = bool (*)(JSContext*, HandleValue);
18621 OutOfLineCode* ool = oolCallVM<Fn, ThrowBadDerivedReturnOrUninitializedThis>(
18622 ins, ArgList(returnValue), StoreNothing());
18624 Label noChecks;
18625 masm.branchTestObject(Assembler::Equal, returnValue, &noChecks);
18626 masm.branchTestUndefined(Assembler::NotEqual, returnValue, ool->entry());
18627 masm.branchTestMagic(Assembler::Equal, thisValue, ool->entry());
18628 masm.moveValue(thisValue, output);
18629 masm.jump(ool->rejoin());
18630 masm.bind(&noChecks);
18631 masm.moveValue(returnValue, output);
18632 masm.bind(ool->rejoin());
18635 void CodeGenerator::visitCheckIsObj(LCheckIsObj* ins) {
18636 ValueOperand value = ToValue(ins, LCheckIsObj::ValueIndex);
18637 Register output = ToRegister(ins->output());
18639 using Fn = bool (*)(JSContext*, CheckIsObjectKind);
18640 OutOfLineCode* ool = oolCallVM<Fn, ThrowCheckIsObject>(
18641 ins, ArgList(Imm32(ins->mir()->checkKind())), StoreNothing());
18643 masm.fallibleUnboxObject(value, output, ool->entry());
18644 masm.bind(ool->rejoin());
18647 void CodeGenerator::visitCheckObjCoercible(LCheckObjCoercible* ins) {
18648 ValueOperand checkValue = ToValue(ins, LCheckObjCoercible::ValueIndex);
18650 using Fn = bool (*)(JSContext*, HandleValue);
18651 OutOfLineCode* ool = oolCallVM<Fn, ThrowObjectCoercible>(
18652 ins, ArgList(checkValue), StoreNothing());
18653 masm.branchTestNull(Assembler::Equal, checkValue, ool->entry());
18654 masm.branchTestUndefined(Assembler::Equal, checkValue, ool->entry());
18655 masm.bind(ool->rejoin());
18658 void CodeGenerator::visitCheckClassHeritage(LCheckClassHeritage* ins) {
18659 ValueOperand heritage = ToValue(ins, LCheckClassHeritage::HeritageIndex);
18660 Register temp0 = ToRegister(ins->temp0());
18661 Register temp1 = ToRegister(ins->temp1());
18663 using Fn = bool (*)(JSContext*, HandleValue);
18664 OutOfLineCode* ool = oolCallVM<Fn, CheckClassHeritageOperation>(
18665 ins, ArgList(heritage), StoreNothing());
18667 masm.branchTestNull(Assembler::Equal, heritage, ool->rejoin());
18668 masm.fallibleUnboxObject(heritage, temp0, ool->entry());
18670 masm.isConstructor(temp0, temp1, ool->entry());
18671 masm.branchTest32(Assembler::Zero, temp1, temp1, ool->entry());
18673 masm.bind(ool->rejoin());
18676 void CodeGenerator::visitCheckThis(LCheckThis* ins) {
18677 ValueOperand thisValue = ToValue(ins, LCheckThis::ValueIndex);
18679 using Fn = bool (*)(JSContext*);
18680 OutOfLineCode* ool =
18681 oolCallVM<Fn, ThrowUninitializedThis>(ins, ArgList(), StoreNothing());
18682 masm.branchTestMagic(Assembler::Equal, thisValue, ool->entry());
18683 masm.bind(ool->rejoin());
18686 void CodeGenerator::visitCheckThisReinit(LCheckThisReinit* ins) {
18687 ValueOperand thisValue = ToValue(ins, LCheckThisReinit::ThisValueIndex);
18689 using Fn = bool (*)(JSContext*);
18690 OutOfLineCode* ool =
18691 oolCallVM<Fn, ThrowInitializedThis>(ins, ArgList(), StoreNothing());
18692 masm.branchTestMagic(Assembler::NotEqual, thisValue, ool->entry());
18693 masm.bind(ool->rejoin());
18696 void CodeGenerator::visitGenerator(LGenerator* lir) {
18697 Register callee = ToRegister(lir->callee());
18698 Register environmentChain = ToRegister(lir->environmentChain());
18699 Register argsObject = ToRegister(lir->argsObject());
18701 pushArg(argsObject);
18702 pushArg(environmentChain);
18703 pushArg(ImmGCPtr(current->mir()->info().script()));
18704 pushArg(callee);
18706 using Fn = JSObject* (*)(JSContext* cx, HandleFunction, HandleScript,
18707 HandleObject, HandleObject);
18708 callVM<Fn, CreateGenerator>(lir);
18711 void CodeGenerator::visitAsyncResolve(LAsyncResolve* lir) {
18712 Register generator = ToRegister(lir->generator());
18713 ValueOperand valueOrReason = ToValue(lir, LAsyncResolve::ValueOrReasonIndex);
18714 AsyncFunctionResolveKind resolveKind = lir->mir()->resolveKind();
18716 pushArg(Imm32(static_cast<int32_t>(resolveKind)));
18717 pushArg(valueOrReason);
18718 pushArg(generator);
18720 using Fn = JSObject* (*)(JSContext*, Handle<AsyncFunctionGeneratorObject*>,
18721 HandleValue, AsyncFunctionResolveKind);
18722 callVM<Fn, js::AsyncFunctionResolve>(lir);
18725 void CodeGenerator::visitAsyncAwait(LAsyncAwait* lir) {
18726 ValueOperand value = ToValue(lir, LAsyncAwait::ValueIndex);
18727 Register generator = ToRegister(lir->generator());
18729 pushArg(value);
18730 pushArg(generator);
18732 using Fn =
18733 JSObject* (*)(JSContext* cx, Handle<AsyncFunctionGeneratorObject*> genObj,
18734 HandleValue value);
18735 callVM<Fn, js::AsyncFunctionAwait>(lir);
18738 void CodeGenerator::visitCanSkipAwait(LCanSkipAwait* lir) {
18739 ValueOperand value = ToValue(lir, LCanSkipAwait::ValueIndex);
18741 pushArg(value);
18743 using Fn = bool (*)(JSContext*, HandleValue, bool* canSkip);
18744 callVM<Fn, js::CanSkipAwait>(lir);
18747 void CodeGenerator::visitMaybeExtractAwaitValue(LMaybeExtractAwaitValue* lir) {
18748 ValueOperand value = ToValue(lir, LMaybeExtractAwaitValue::ValueIndex);
18749 ValueOperand output = ToOutValue(lir);
18750 Register canSkip = ToRegister(lir->canSkip());
18752 Label cantExtract, finished;
18753 masm.branchIfFalseBool(canSkip, &cantExtract);
18755 pushArg(value);
18757 using Fn = bool (*)(JSContext*, HandleValue, MutableHandleValue);
18758 callVM<Fn, js::ExtractAwaitValue>(lir);
18759 masm.jump(&finished);
18760 masm.bind(&cantExtract);
18762 masm.moveValue(value, output);
18764 masm.bind(&finished);
18767 void CodeGenerator::visitDebugCheckSelfHosted(LDebugCheckSelfHosted* ins) {
18768 ValueOperand checkValue = ToValue(ins, LDebugCheckSelfHosted::ValueIndex);
18769 pushArg(checkValue);
18770 using Fn = bool (*)(JSContext*, HandleValue);
18771 callVM<Fn, js::Debug_CheckSelfHosted>(ins);
18774 void CodeGenerator::visitRandom(LRandom* ins) {
18775 using mozilla::non_crypto::XorShift128PlusRNG;
18777 FloatRegister output = ToFloatRegister(ins->output());
18778 Register rngReg = ToRegister(ins->temp0());
18780 Register64 temp1 = ToRegister64(ins->temp1());
18781 Register64 temp2 = ToRegister64(ins->temp2());
18783 const XorShift128PlusRNG* rng = gen->realm->addressOfRandomNumberGenerator();
18784 masm.movePtr(ImmPtr(rng), rngReg);
18786 masm.randomDouble(rngReg, output, temp1, temp2);
18787 if (js::SupportDifferentialTesting()) {
18788 masm.loadConstantDouble(0.0, output);
18792 void CodeGenerator::visitSignExtendInt32(LSignExtendInt32* ins) {
18793 Register input = ToRegister(ins->input());
18794 Register output = ToRegister(ins->output());
18796 switch (ins->mode()) {
18797 case MSignExtendInt32::Byte:
18798 masm.move8SignExtend(input, output);
18799 break;
18800 case MSignExtendInt32::Half:
18801 masm.move16SignExtend(input, output);
18802 break;
18806 void CodeGenerator::visitRotate(LRotate* ins) {
18807 MRotate* mir = ins->mir();
18808 Register input = ToRegister(ins->input());
18809 Register dest = ToRegister(ins->output());
18811 const LAllocation* count = ins->count();
18812 if (count->isConstant()) {
18813 int32_t c = ToInt32(count) & 0x1F;
18814 if (mir->isLeftRotate()) {
18815 masm.rotateLeft(Imm32(c), input, dest);
18816 } else {
18817 masm.rotateRight(Imm32(c), input, dest);
18819 } else {
18820 Register creg = ToRegister(count);
18821 if (mir->isLeftRotate()) {
18822 masm.rotateLeft(creg, input, dest);
18823 } else {
18824 masm.rotateRight(creg, input, dest);
18829 class OutOfLineNaNToZero : public OutOfLineCodeBase<CodeGenerator> {
18830 LNaNToZero* lir_;
18832 public:
18833 explicit OutOfLineNaNToZero(LNaNToZero* lir) : lir_(lir) {}
18835 void accept(CodeGenerator* codegen) override {
18836 codegen->visitOutOfLineNaNToZero(this);
18838 LNaNToZero* lir() const { return lir_; }
18841 void CodeGenerator::visitOutOfLineNaNToZero(OutOfLineNaNToZero* ool) {
18842 FloatRegister output = ToFloatRegister(ool->lir()->output());
18843 masm.loadConstantDouble(0.0, output);
18844 masm.jump(ool->rejoin());
18847 void CodeGenerator::visitNaNToZero(LNaNToZero* lir) {
18848 FloatRegister input = ToFloatRegister(lir->input());
18850 OutOfLineNaNToZero* ool = new (alloc()) OutOfLineNaNToZero(lir);
18851 addOutOfLineCode(ool, lir->mir());
18853 if (lir->mir()->operandIsNeverNegativeZero()) {
18854 masm.branchDouble(Assembler::DoubleUnordered, input, input, ool->entry());
18855 } else {
18856 FloatRegister scratch = ToFloatRegister(lir->temp0());
18857 masm.loadConstantDouble(0.0, scratch);
18858 masm.branchDouble(Assembler::DoubleEqualOrUnordered, input, scratch,
18859 ool->entry());
18861 masm.bind(ool->rejoin());
18864 void CodeGenerator::visitIsPackedArray(LIsPackedArray* lir) {
18865 Register obj = ToRegister(lir->object());
18866 Register output = ToRegister(lir->output());
18867 Register temp = ToRegister(lir->temp0());
18869 masm.setIsPackedArray(obj, output, temp);
18872 void CodeGenerator::visitGuardArrayIsPacked(LGuardArrayIsPacked* lir) {
18873 Register array = ToRegister(lir->array());
18874 Register temp0 = ToRegister(lir->temp0());
18875 Register temp1 = ToRegister(lir->temp1());
18877 Label bail;
18878 masm.branchArrayIsNotPacked(array, temp0, temp1, &bail);
18879 bailoutFrom(&bail, lir->snapshot());
18882 void CodeGenerator::visitGetPrototypeOf(LGetPrototypeOf* lir) {
18883 Register target = ToRegister(lir->target());
18884 ValueOperand out = ToOutValue(lir);
18885 Register scratch = out.scratchReg();
18887 using Fn = bool (*)(JSContext*, HandleObject, MutableHandleValue);
18888 OutOfLineCode* ool = oolCallVM<Fn, jit::GetPrototypeOf>(lir, ArgList(target),
18889 StoreValueTo(out));
18891 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
18893 masm.loadObjProto(target, scratch);
18895 Label hasProto;
18896 masm.branchPtr(Assembler::Above, scratch, ImmWord(1), &hasProto);
18898 // Call into the VM for lazy prototypes.
18899 masm.branchPtr(Assembler::Equal, scratch, ImmWord(1), ool->entry());
18901 masm.moveValue(NullValue(), out);
18902 masm.jump(ool->rejoin());
18904 masm.bind(&hasProto);
18905 masm.tagValue(JSVAL_TYPE_OBJECT, scratch, out);
18907 masm.bind(ool->rejoin());
18910 void CodeGenerator::visitObjectWithProto(LObjectWithProto* lir) {
18911 pushArg(ToValue(lir, LObjectWithProto::PrototypeIndex));
18913 using Fn = PlainObject* (*)(JSContext*, HandleValue);
18914 callVM<Fn, js::ObjectWithProtoOperation>(lir);
18917 void CodeGenerator::visitObjectStaticProto(LObjectStaticProto* lir) {
18918 Register obj = ToRegister(lir->input());
18919 Register output = ToRegister(lir->output());
18921 masm.loadObjProto(obj, output);
18923 #ifdef DEBUG
18924 // We shouldn't encounter a null or lazy proto.
18925 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
18927 Label done;
18928 masm.branchPtr(Assembler::Above, output, ImmWord(1), &done);
18929 masm.assumeUnreachable("Unexpected null or lazy proto in MObjectStaticProto");
18930 masm.bind(&done);
18931 #endif
18934 void CodeGenerator::visitBuiltinObject(LBuiltinObject* lir) {
18935 pushArg(Imm32(static_cast<int32_t>(lir->mir()->builtinObjectKind())));
18937 using Fn = JSObject* (*)(JSContext*, BuiltinObjectKind);
18938 callVM<Fn, js::BuiltinObjectOperation>(lir);
18941 void CodeGenerator::visitSuperFunction(LSuperFunction* lir) {
18942 Register callee = ToRegister(lir->callee());
18943 ValueOperand out = ToOutValue(lir);
18944 Register temp = ToRegister(lir->temp0());
18946 #ifdef DEBUG
18947 Label classCheckDone;
18948 masm.branchTestObjIsFunction(Assembler::Equal, callee, temp, callee,
18949 &classCheckDone);
18950 masm.assumeUnreachable("Unexpected non-JSFunction callee in JSOp::SuperFun");
18951 masm.bind(&classCheckDone);
18952 #endif
18954 // Load prototype of callee
18955 masm.loadObjProto(callee, temp);
18957 #ifdef DEBUG
18958 // We won't encounter a lazy proto, because |callee| is guaranteed to be a
18959 // JSFunction and only proxy objects can have a lazy proto.
18960 MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
18962 Label proxyCheckDone;
18963 masm.branchPtr(Assembler::NotEqual, temp, ImmWord(1), &proxyCheckDone);
18964 masm.assumeUnreachable("Unexpected lazy proto in JSOp::SuperFun");
18965 masm.bind(&proxyCheckDone);
18966 #endif
18968 Label nullProto, done;
18969 masm.branchPtr(Assembler::Equal, temp, ImmWord(0), &nullProto);
18971 // Box prototype and return
18972 masm.tagValue(JSVAL_TYPE_OBJECT, temp, out);
18973 masm.jump(&done);
18975 masm.bind(&nullProto);
18976 masm.moveValue(NullValue(), out);
18978 masm.bind(&done);
18981 void CodeGenerator::visitInitHomeObject(LInitHomeObject* lir) {
18982 Register func = ToRegister(lir->function());
18983 ValueOperand homeObject = ToValue(lir, LInitHomeObject::HomeObjectIndex);
18985 masm.assertFunctionIsExtended(func);
18987 Address addr(func, FunctionExtended::offsetOfMethodHomeObjectSlot());
18989 emitPreBarrier(addr);
18990 masm.storeValue(homeObject, addr);
18993 void CodeGenerator::visitIsTypedArrayConstructor(
18994 LIsTypedArrayConstructor* lir) {
18995 Register object = ToRegister(lir->object());
18996 Register output = ToRegister(lir->output());
18998 masm.setIsDefinitelyTypedArrayConstructor(object, output);
19001 void CodeGenerator::visitLoadValueTag(LLoadValueTag* lir) {
19002 ValueOperand value = ToValue(lir, LLoadValueTag::ValueIndex);
19003 Register output = ToRegister(lir->output());
19005 Register tag = masm.extractTag(value, output);
19006 if (tag != output) {
19007 masm.mov(tag, output);
19011 void CodeGenerator::visitGuardTagNotEqual(LGuardTagNotEqual* lir) {
19012 Register lhs = ToRegister(lir->lhs());
19013 Register rhs = ToRegister(lir->rhs());
19015 bailoutCmp32(Assembler::Equal, lhs, rhs, lir->snapshot());
19017 // If both lhs and rhs are numbers, can't use tag comparison to do inequality
19018 // comparison
19019 Label done;
19020 masm.branchTestNumber(Assembler::NotEqual, lhs, &done);
19021 masm.branchTestNumber(Assembler::NotEqual, rhs, &done);
19022 bailout(lir->snapshot());
19024 masm.bind(&done);
19027 void CodeGenerator::visitLoadWrapperTarget(LLoadWrapperTarget* lir) {
19028 Register object = ToRegister(lir->object());
19029 Register output = ToRegister(lir->output());
19031 masm.loadPtr(Address(object, ProxyObject::offsetOfReservedSlots()), output);
19032 masm.unboxObject(
19033 Address(output, js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
19034 output);
19037 void CodeGenerator::visitGuardHasGetterSetter(LGuardHasGetterSetter* lir) {
19038 Register object = ToRegister(lir->object());
19039 Register temp0 = ToRegister(lir->temp0());
19040 Register temp1 = ToRegister(lir->temp1());
19041 Register temp2 = ToRegister(lir->temp2());
19043 masm.movePropertyKey(lir->mir()->propId(), temp1);
19044 masm.movePtr(ImmGCPtr(lir->mir()->getterSetter()), temp2);
19046 using Fn = bool (*)(JSContext* cx, JSObject* obj, jsid id,
19047 GetterSetter* getterSetter);
19048 masm.setupAlignedABICall();
19049 masm.loadJSContext(temp0);
19050 masm.passABIArg(temp0);
19051 masm.passABIArg(object);
19052 masm.passABIArg(temp1);
19053 masm.passABIArg(temp2);
19054 masm.callWithABI<Fn, ObjectHasGetterSetterPure>();
19056 bailoutIfFalseBool(ReturnReg, lir->snapshot());
19059 void CodeGenerator::visitGuardIsExtensible(LGuardIsExtensible* lir) {
19060 Register object = ToRegister(lir->object());
19061 Register temp = ToRegister(lir->temp0());
19063 Label bail;
19064 masm.branchIfObjectNotExtensible(object, temp, &bail);
19065 bailoutFrom(&bail, lir->snapshot());
19068 void CodeGenerator::visitGuardInt32IsNonNegative(
19069 LGuardInt32IsNonNegative* lir) {
19070 Register index = ToRegister(lir->index());
19072 bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
19075 void CodeGenerator::visitGuardInt32Range(LGuardInt32Range* lir) {
19076 Register input = ToRegister(lir->input());
19078 bailoutCmp32(Assembler::LessThan, input, Imm32(lir->mir()->minimum()),
19079 lir->snapshot());
19080 bailoutCmp32(Assembler::GreaterThan, input, Imm32(lir->mir()->maximum()),
19081 lir->snapshot());
19084 void CodeGenerator::visitGuardIndexIsNotDenseElement(
19085 LGuardIndexIsNotDenseElement* lir) {
19086 Register object = ToRegister(lir->object());
19087 Register index = ToRegister(lir->index());
19088 Register temp = ToRegister(lir->temp0());
19089 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
19091 // Load obj->elements.
19092 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
19094 // Ensure index >= initLength or the element is a hole.
19095 Label notDense;
19096 Address capacity(temp, ObjectElements::offsetOfInitializedLength());
19097 masm.spectreBoundsCheck32(index, capacity, spectreTemp, &notDense);
19099 BaseValueIndex element(temp, index);
19100 masm.branchTestMagic(Assembler::Equal, element, &notDense);
19102 bailout(lir->snapshot());
19104 masm.bind(&notDense);
19107 void CodeGenerator::visitGuardIndexIsValidUpdateOrAdd(
19108 LGuardIndexIsValidUpdateOrAdd* lir) {
19109 Register object = ToRegister(lir->object());
19110 Register index = ToRegister(lir->index());
19111 Register temp = ToRegister(lir->temp0());
19112 Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
19114 // Load obj->elements.
19115 masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
19117 Label success;
19119 // If length is writable, branch to &success. All indices are writable.
19120 Address flags(temp, ObjectElements::offsetOfFlags());
19121 masm.branchTest32(Assembler::Zero, flags,
19122 Imm32(ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH),
19123 &success);
19125 // Otherwise, ensure index is in bounds.
19126 Label bail;
19127 Address length(temp, ObjectElements::offsetOfLength());
19128 masm.spectreBoundsCheck32(index, length, spectreTemp, &bail);
19129 masm.bind(&success);
19131 bailoutFrom(&bail, lir->snapshot());
19134 void CodeGenerator::visitCallAddOrUpdateSparseElement(
19135 LCallAddOrUpdateSparseElement* lir) {
19136 Register object = ToRegister(lir->object());
19137 Register index = ToRegister(lir->index());
19138 ValueOperand value = ToValue(lir, LCallAddOrUpdateSparseElement::ValueIndex);
19140 pushArg(Imm32(lir->mir()->strict()));
19141 pushArg(value);
19142 pushArg(index);
19143 pushArg(object);
19145 using Fn =
19146 bool (*)(JSContext*, Handle<NativeObject*>, int32_t, HandleValue, bool);
19147 callVM<Fn, js::AddOrUpdateSparseElementHelper>(lir);
19150 void CodeGenerator::visitCallGetSparseElement(LCallGetSparseElement* lir) {
19151 Register object = ToRegister(lir->object());
19152 Register index = ToRegister(lir->index());
19154 pushArg(index);
19155 pushArg(object);
19157 using Fn =
19158 bool (*)(JSContext*, Handle<NativeObject*>, int32_t, MutableHandleValue);
19159 callVM<Fn, js::GetSparseElementHelper>(lir);
19162 void CodeGenerator::visitCallNativeGetElement(LCallNativeGetElement* lir) {
19163 Register object = ToRegister(lir->object());
19164 Register index = ToRegister(lir->index());
19166 pushArg(index);
19167 pushArg(TypedOrValueRegister(MIRType::Object, AnyRegister(object)));
19168 pushArg(object);
19170 using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
19171 MutableHandleValue);
19172 callVM<Fn, js::NativeGetElement>(lir);
19175 void CodeGenerator::visitCallNativeGetElementSuper(
19176 LCallNativeGetElementSuper* lir) {
19177 Register object = ToRegister(lir->object());
19178 Register index = ToRegister(lir->index());
19179 ValueOperand receiver =
19180 ToValue(lir, LCallNativeGetElementSuper::ReceiverIndex);
19182 pushArg(index);
19183 pushArg(receiver);
19184 pushArg(object);
19186 using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
19187 MutableHandleValue);
19188 callVM<Fn, js::NativeGetElement>(lir);
19191 void CodeGenerator::visitCallObjectHasSparseElement(
19192 LCallObjectHasSparseElement* lir) {
19193 Register object = ToRegister(lir->object());
19194 Register index = ToRegister(lir->index());
19195 Register temp0 = ToRegister(lir->temp0());
19196 Register temp1 = ToRegister(lir->temp1());
19197 Register output = ToRegister(lir->output());
19199 masm.reserveStack(sizeof(Value));
19200 masm.moveStackPtrTo(temp1);
19202 using Fn = bool (*)(JSContext*, NativeObject*, int32_t, Value*);
19203 masm.setupAlignedABICall();
19204 masm.loadJSContext(temp0);
19205 masm.passABIArg(temp0);
19206 masm.passABIArg(object);
19207 masm.passABIArg(index);
19208 masm.passABIArg(temp1);
19209 masm.callWithABI<Fn, HasNativeElementPure>();
19210 masm.storeCallPointerResult(temp0);
19212 Label bail, ok;
19213 uint32_t framePushed = masm.framePushed();
19214 masm.branchIfTrueBool(temp0, &ok);
19215 masm.adjustStack(sizeof(Value));
19216 masm.jump(&bail);
19218 masm.bind(&ok);
19219 masm.setFramePushed(framePushed);
19220 masm.unboxBoolean(Address(masm.getStackPointer(), 0), output);
19221 masm.adjustStack(sizeof(Value));
19223 bailoutFrom(&bail, lir->snapshot());
19226 void CodeGenerator::visitBigIntAsIntN(LBigIntAsIntN* ins) {
19227 Register bits = ToRegister(ins->bits());
19228 Register input = ToRegister(ins->input());
19230 pushArg(bits);
19231 pushArg(input);
19233 using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
19234 callVM<Fn, jit::BigIntAsIntN>(ins);
19237 void CodeGenerator::visitBigIntAsIntN64(LBigIntAsIntN64* ins) {
19238 Register input = ToRegister(ins->input());
19239 Register temp = ToRegister(ins->temp());
19240 Register64 temp64 = ToRegister64(ins->temp64());
19241 Register output = ToRegister(ins->output());
19243 Label done, create;
19245 masm.movePtr(input, output);
19247 // Load the BigInt value as an int64.
19248 masm.loadBigInt64(input, temp64);
19250 // Create a new BigInt when the input exceeds the int64 range.
19251 masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
19252 Imm32(64 / BigInt::DigitBits), &create);
19254 // And create a new BigInt when the value and the BigInt have different signs.
19255 Label nonNegative;
19256 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
19257 masm.branchTest64(Assembler::NotSigned, temp64, temp64, temp, &create);
19258 masm.jump(&done);
19260 masm.bind(&nonNegative);
19261 masm.branchTest64(Assembler::NotSigned, temp64, temp64, temp, &done);
19263 masm.bind(&create);
19264 emitCreateBigInt(ins, Scalar::BigInt64, temp64, output, temp);
19266 masm.bind(&done);
19269 void CodeGenerator::visitBigIntAsIntN32(LBigIntAsIntN32* ins) {
19270 Register input = ToRegister(ins->input());
19271 Register temp = ToRegister(ins->temp());
19272 Register64 temp64 = ToRegister64(ins->temp64());
19273 Register output = ToRegister(ins->output());
19275 Label done, create;
19277 masm.movePtr(input, output);
19279 // Load the absolute value of the first digit.
19280 masm.loadFirstBigIntDigitOrZero(input, temp);
19282 // If the absolute value exceeds the int32 range, create a new BigInt.
19283 masm.branchPtr(Assembler::Above, temp, Imm32(INT32_MAX), &create);
19285 // Also create a new BigInt if we have more than one digit.
19286 masm.branch32(Assembler::BelowOrEqual,
19287 Address(input, BigInt::offsetOfLength()), Imm32(1), &done);
19289 masm.bind(&create);
19291 // |temp| stores the absolute value, negate it when the sign flag is set.
19292 Label nonNegative;
19293 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
19294 masm.negPtr(temp);
19295 masm.bind(&nonNegative);
19297 masm.move32To64SignExtend(temp, temp64);
19298 emitCreateBigInt(ins, Scalar::BigInt64, temp64, output, temp);
19300 masm.bind(&done);
19303 void CodeGenerator::visitBigIntAsUintN(LBigIntAsUintN* ins) {
19304 Register bits = ToRegister(ins->bits());
19305 Register input = ToRegister(ins->input());
19307 pushArg(bits);
19308 pushArg(input);
19310 using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
19311 callVM<Fn, jit::BigIntAsUintN>(ins);
19314 void CodeGenerator::visitBigIntAsUintN64(LBigIntAsUintN64* ins) {
19315 Register input = ToRegister(ins->input());
19316 Register temp = ToRegister(ins->temp());
19317 Register64 temp64 = ToRegister64(ins->temp64());
19318 Register output = ToRegister(ins->output());
19320 Label done, create;
19322 masm.movePtr(input, output);
19324 // Load the BigInt value as an uint64.
19325 masm.loadBigInt64(input, temp64);
19327 // Create a new BigInt when the input exceeds the uint64 range.
19328 masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
19329 Imm32(64 / BigInt::DigitBits), &create);
19331 // And create a new BigInt when the input has the sign flag set.
19332 masm.branchIfBigIntIsNonNegative(input, &done);
19334 masm.bind(&create);
19335 emitCreateBigInt(ins, Scalar::BigUint64, temp64, output, temp);
19337 masm.bind(&done);
19340 void CodeGenerator::visitBigIntAsUintN32(LBigIntAsUintN32* ins) {
19341 Register input = ToRegister(ins->input());
19342 Register temp = ToRegister(ins->temp());
19343 Register64 temp64 = ToRegister64(ins->temp64());
19344 Register output = ToRegister(ins->output());
19346 Label done, create;
19348 masm.movePtr(input, output);
19350 // Load the absolute value of the first digit.
19351 masm.loadFirstBigIntDigitOrZero(input, temp);
19353 // If the absolute value exceeds the uint32 range, create a new BigInt.
19354 #if JS_PUNBOX64
19355 masm.branchPtr(Assembler::Above, temp, ImmWord(UINT32_MAX), &create);
19356 #endif
19358 // Also create a new BigInt if we have more than one digit.
19359 masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
19360 Imm32(1), &create);
19362 // And create a new BigInt when the input has the sign flag set.
19363 masm.branchIfBigIntIsNonNegative(input, &done);
19365 masm.bind(&create);
19367 // |temp| stores the absolute value, negate it when the sign flag is set.
19368 Label nonNegative;
19369 masm.branchIfBigIntIsNonNegative(input, &nonNegative);
19370 masm.negPtr(temp);
19371 masm.bind(&nonNegative);
19373 masm.move32To64ZeroExtend(temp, temp64);
19374 emitCreateBigInt(ins, Scalar::BigUint64, temp64, output, temp);
19376 masm.bind(&done);
19379 void CodeGenerator::visitGuardNonGCThing(LGuardNonGCThing* ins) {
19380 ValueOperand input = ToValue(ins, LGuardNonGCThing::InputIndex);
19382 Label bail;
19383 masm.branchTestGCThing(Assembler::Equal, input, &bail);
19384 bailoutFrom(&bail, ins->snapshot());
19387 void CodeGenerator::visitToHashableNonGCThing(LToHashableNonGCThing* ins) {
19388 ValueOperand input = ToValue(ins, LToHashableNonGCThing::InputIndex);
19389 FloatRegister tempFloat = ToFloatRegister(ins->temp0());
19390 ValueOperand output = ToOutValue(ins);
19392 masm.toHashableNonGCThing(input, output, tempFloat);
19395 void CodeGenerator::visitToHashableString(LToHashableString* ins) {
19396 Register input = ToRegister(ins->input());
19397 Register output = ToRegister(ins->output());
19399 using Fn = JSAtom* (*)(JSContext*, JSString*);
19400 auto* ool = oolCallVM<Fn, js::AtomizeString>(ins, ArgList(input),
19401 StoreRegisterTo(output));
19403 masm.branchTest32(Assembler::Zero, Address(input, JSString::offsetOfFlags()),
19404 Imm32(JSString::ATOM_BIT), ool->entry());
19405 masm.movePtr(input, output);
19406 masm.bind(ool->rejoin());
19409 void CodeGenerator::visitToHashableValue(LToHashableValue* ins) {
19410 ValueOperand input = ToValue(ins, LToHashableValue::InputIndex);
19411 FloatRegister tempFloat = ToFloatRegister(ins->temp0());
19412 ValueOperand output = ToOutValue(ins);
19414 Register str = output.scratchReg();
19416 using Fn = JSAtom* (*)(JSContext*, JSString*);
19417 auto* ool =
19418 oolCallVM<Fn, js::AtomizeString>(ins, ArgList(str), StoreRegisterTo(str));
19420 masm.toHashableValue(input, output, tempFloat, ool->entry(), ool->rejoin());
19423 void CodeGenerator::visitHashNonGCThing(LHashNonGCThing* ins) {
19424 ValueOperand input = ToValue(ins, LHashNonGCThing::InputIndex);
19425 Register temp = ToRegister(ins->temp0());
19426 Register output = ToRegister(ins->output());
19428 masm.prepareHashNonGCThing(input, output, temp);
19431 void CodeGenerator::visitHashString(LHashString* ins) {
19432 Register input = ToRegister(ins->input());
19433 Register temp = ToRegister(ins->temp0());
19434 Register output = ToRegister(ins->output());
19436 masm.prepareHashString(input, output, temp);
19439 void CodeGenerator::visitHashSymbol(LHashSymbol* ins) {
19440 Register input = ToRegister(ins->input());
19441 Register output = ToRegister(ins->output());
19443 masm.prepareHashSymbol(input, output);
19446 void CodeGenerator::visitHashBigInt(LHashBigInt* ins) {
19447 Register input = ToRegister(ins->input());
19448 Register temp0 = ToRegister(ins->temp0());
19449 Register temp1 = ToRegister(ins->temp1());
19450 Register temp2 = ToRegister(ins->temp2());
19451 Register output = ToRegister(ins->output());
19453 masm.prepareHashBigInt(input, output, temp0, temp1, temp2);
19456 void CodeGenerator::visitHashObject(LHashObject* ins) {
19457 Register setObj = ToRegister(ins->setObject());
19458 ValueOperand input = ToValue(ins, LHashObject::InputIndex);
19459 Register temp0 = ToRegister(ins->temp0());
19460 Register temp1 = ToRegister(ins->temp1());
19461 Register temp2 = ToRegister(ins->temp2());
19462 Register temp3 = ToRegister(ins->temp3());
19463 Register output = ToRegister(ins->output());
19465 masm.prepareHashObject(setObj, input, output, temp0, temp1, temp2, temp3);
19468 void CodeGenerator::visitHashValue(LHashValue* ins) {
19469 Register setObj = ToRegister(ins->setObject());
19470 ValueOperand input = ToValue(ins, LHashValue::InputIndex);
19471 Register temp0 = ToRegister(ins->temp0());
19472 Register temp1 = ToRegister(ins->temp1());
19473 Register temp2 = ToRegister(ins->temp2());
19474 Register temp3 = ToRegister(ins->temp3());
19475 Register output = ToRegister(ins->output());
19477 masm.prepareHashValue(setObj, input, output, temp0, temp1, temp2, temp3);
19480 void CodeGenerator::visitSetObjectHasNonBigInt(LSetObjectHasNonBigInt* ins) {
19481 Register setObj = ToRegister(ins->setObject());
19482 ValueOperand input = ToValue(ins, LSetObjectHasNonBigInt::InputIndex);
19483 Register hash = ToRegister(ins->hash());
19484 Register temp0 = ToRegister(ins->temp0());
19485 Register temp1 = ToRegister(ins->temp1());
19486 Register output = ToRegister(ins->output());
19488 masm.setObjectHasNonBigInt(setObj, input, hash, output, temp0, temp1);
19491 void CodeGenerator::visitSetObjectHasBigInt(LSetObjectHasBigInt* ins) {
19492 Register setObj = ToRegister(ins->setObject());
19493 ValueOperand input = ToValue(ins, LSetObjectHasBigInt::InputIndex);
19494 Register hash = ToRegister(ins->hash());
19495 Register temp0 = ToRegister(ins->temp0());
19496 Register temp1 = ToRegister(ins->temp1());
19497 Register temp2 = ToRegister(ins->temp2());
19498 Register temp3 = ToRegister(ins->temp3());
19499 Register output = ToRegister(ins->output());
19501 masm.setObjectHasBigInt(setObj, input, hash, output, temp0, temp1, temp2,
19502 temp3);
19505 void CodeGenerator::visitSetObjectHasValue(LSetObjectHasValue* ins) {
19506 Register setObj = ToRegister(ins->setObject());
19507 ValueOperand input = ToValue(ins, LSetObjectHasValue::InputIndex);
19508 Register hash = ToRegister(ins->hash());
19509 Register temp0 = ToRegister(ins->temp0());
19510 Register temp1 = ToRegister(ins->temp1());
19511 Register temp2 = ToRegister(ins->temp2());
19512 Register temp3 = ToRegister(ins->temp3());
19513 Register output = ToRegister(ins->output());
19515 masm.setObjectHasValue(setObj, input, hash, output, temp0, temp1, temp2,
19516 temp3);
19519 void CodeGenerator::visitSetObjectHasValueVMCall(
19520 LSetObjectHasValueVMCall* ins) {
19521 pushArg(ToValue(ins, LSetObjectHasValueVMCall::InputIndex));
19522 pushArg(ToRegister(ins->setObject()));
19524 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
19525 callVM<Fn, jit::SetObjectHas>(ins);
19528 void CodeGenerator::visitSetObjectSize(LSetObjectSize* ins) {
19529 Register setObj = ToRegister(ins->setObject());
19530 Register output = ToRegister(ins->output());
19532 masm.loadSetObjectSize(setObj, output);
19535 void CodeGenerator::visitMapObjectHasNonBigInt(LMapObjectHasNonBigInt* ins) {
19536 Register mapObj = ToRegister(ins->mapObject());
19537 ValueOperand input = ToValue(ins, LMapObjectHasNonBigInt::InputIndex);
19538 Register hash = ToRegister(ins->hash());
19539 Register temp0 = ToRegister(ins->temp0());
19540 Register temp1 = ToRegister(ins->temp1());
19541 Register output = ToRegister(ins->output());
19543 masm.mapObjectHasNonBigInt(mapObj, input, hash, output, temp0, temp1);
19546 void CodeGenerator::visitMapObjectHasBigInt(LMapObjectHasBigInt* ins) {
19547 Register mapObj = ToRegister(ins->mapObject());
19548 ValueOperand input = ToValue(ins, LMapObjectHasBigInt::InputIndex);
19549 Register hash = ToRegister(ins->hash());
19550 Register temp0 = ToRegister(ins->temp0());
19551 Register temp1 = ToRegister(ins->temp1());
19552 Register temp2 = ToRegister(ins->temp2());
19553 Register temp3 = ToRegister(ins->temp3());
19554 Register output = ToRegister(ins->output());
19556 masm.mapObjectHasBigInt(mapObj, input, hash, output, temp0, temp1, temp2,
19557 temp3);
19560 void CodeGenerator::visitMapObjectHasValue(LMapObjectHasValue* ins) {
19561 Register mapObj = ToRegister(ins->mapObject());
19562 ValueOperand input = ToValue(ins, LMapObjectHasValue::InputIndex);
19563 Register hash = ToRegister(ins->hash());
19564 Register temp0 = ToRegister(ins->temp0());
19565 Register temp1 = ToRegister(ins->temp1());
19566 Register temp2 = ToRegister(ins->temp2());
19567 Register temp3 = ToRegister(ins->temp3());
19568 Register output = ToRegister(ins->output());
19570 masm.mapObjectHasValue(mapObj, input, hash, output, temp0, temp1, temp2,
19571 temp3);
19574 void CodeGenerator::visitMapObjectHasValueVMCall(
19575 LMapObjectHasValueVMCall* ins) {
19576 pushArg(ToValue(ins, LMapObjectHasValueVMCall::InputIndex));
19577 pushArg(ToRegister(ins->mapObject()));
19579 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
19580 callVM<Fn, jit::MapObjectHas>(ins);
19583 void CodeGenerator::visitMapObjectGetNonBigInt(LMapObjectGetNonBigInt* ins) {
19584 Register mapObj = ToRegister(ins->mapObject());
19585 ValueOperand input = ToValue(ins, LMapObjectGetNonBigInt::InputIndex);
19586 Register hash = ToRegister(ins->hash());
19587 Register temp0 = ToRegister(ins->temp0());
19588 Register temp1 = ToRegister(ins->temp1());
19589 ValueOperand output = ToOutValue(ins);
19591 masm.mapObjectGetNonBigInt(mapObj, input, hash, output, temp0, temp1,
19592 output.scratchReg());
19595 void CodeGenerator::visitMapObjectGetBigInt(LMapObjectGetBigInt* ins) {
19596 Register mapObj = ToRegister(ins->mapObject());
19597 ValueOperand input = ToValue(ins, LMapObjectGetBigInt::InputIndex);
19598 Register hash = ToRegister(ins->hash());
19599 Register temp0 = ToRegister(ins->temp0());
19600 Register temp1 = ToRegister(ins->temp1());
19601 Register temp2 = ToRegister(ins->temp2());
19602 Register temp3 = ToRegister(ins->temp3());
19603 ValueOperand output = ToOutValue(ins);
19605 masm.mapObjectGetBigInt(mapObj, input, hash, output, temp0, temp1, temp2,
19606 temp3, output.scratchReg());
19609 void CodeGenerator::visitMapObjectGetValue(LMapObjectGetValue* ins) {
19610 Register mapObj = ToRegister(ins->mapObject());
19611 ValueOperand input = ToValue(ins, LMapObjectGetValue::InputIndex);
19612 Register hash = ToRegister(ins->hash());
19613 Register temp0 = ToRegister(ins->temp0());
19614 Register temp1 = ToRegister(ins->temp1());
19615 Register temp2 = ToRegister(ins->temp2());
19616 Register temp3 = ToRegister(ins->temp3());
19617 ValueOperand output = ToOutValue(ins);
19619 masm.mapObjectGetValue(mapObj, input, hash, output, temp0, temp1, temp2,
19620 temp3, output.scratchReg());
19623 void CodeGenerator::visitMapObjectGetValueVMCall(
19624 LMapObjectGetValueVMCall* ins) {
19625 pushArg(ToValue(ins, LMapObjectGetValueVMCall::InputIndex));
19626 pushArg(ToRegister(ins->mapObject()));
19628 using Fn =
19629 bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
19630 callVM<Fn, jit::MapObjectGet>(ins);
19633 void CodeGenerator::visitMapObjectSize(LMapObjectSize* ins) {
19634 Register mapObj = ToRegister(ins->mapObject());
19635 Register output = ToRegister(ins->output());
19637 masm.loadMapObjectSize(mapObj, output);
19640 template <size_t NumDefs>
19641 void CodeGenerator::emitIonToWasmCallBase(LIonToWasmCallBase<NumDefs>* lir) {
19642 wasm::JitCallStackArgVector stackArgs;
19643 masm.propagateOOM(stackArgs.reserve(lir->numOperands()));
19644 if (masm.oom()) {
19645 return;
19648 MIonToWasmCall* mir = lir->mir();
19649 const wasm::FuncExport& funcExport = mir->funcExport();
19650 const wasm::FuncType& sig =
19651 mir->instance()->metadata().getFuncExportType(funcExport);
19653 WasmABIArgGenerator abi;
19654 for (size_t i = 0; i < lir->numOperands(); i++) {
19655 MIRType argMir;
19656 switch (sig.args()[i].kind()) {
19657 case wasm::ValType::I32:
19658 case wasm::ValType::I64:
19659 case wasm::ValType::F32:
19660 case wasm::ValType::F64:
19661 argMir = sig.args()[i].toMIRType();
19662 break;
19663 case wasm::ValType::V128:
19664 MOZ_CRASH("unexpected argument type when calling from ion to wasm");
19665 case wasm::ValType::Ref:
19666 // temporarilyUnsupportedReftypeForEntry() restricts args to externref
19667 MOZ_RELEASE_ASSERT(sig.args()[i].refType().isExtern());
19668 // Argument is boxed on the JS side to an anyref, so passed as a
19669 // pointer here.
19670 argMir = sig.args()[i].toMIRType();
19671 break;
19674 ABIArg arg = abi.next(argMir);
19675 switch (arg.kind()) {
19676 case ABIArg::GPR:
19677 case ABIArg::FPU: {
19678 MOZ_ASSERT(ToAnyRegister(lir->getOperand(i)) == arg.reg());
19679 stackArgs.infallibleEmplaceBack(wasm::JitCallStackArg());
19680 break;
19682 case ABIArg::Stack: {
19683 const LAllocation* larg = lir->getOperand(i);
19684 if (larg->isConstant()) {
19685 stackArgs.infallibleEmplaceBack(ToInt32(larg));
19686 } else if (larg->isGeneralReg()) {
19687 stackArgs.infallibleEmplaceBack(ToRegister(larg));
19688 } else if (larg->isFloatReg()) {
19689 stackArgs.infallibleEmplaceBack(ToFloatRegister(larg));
19690 } else {
19691 // Always use the stack pointer here because GenerateDirectCallFromJit
19692 // depends on this.
19693 Address addr = ToAddress<BaseRegForAddress::SP>(larg);
19694 stackArgs.infallibleEmplaceBack(addr);
19696 break;
19698 #ifdef JS_CODEGEN_REGISTER_PAIR
19699 case ABIArg::GPR_PAIR: {
19700 MOZ_CRASH(
19701 "no way to pass i64, and wasm uses hardfp for function calls");
19703 #endif
19704 case ABIArg::Uninitialized: {
19705 MOZ_CRASH("Uninitialized ABIArg kind");
19710 const wasm::ValTypeVector& results = sig.results();
19711 if (results.length() == 0) {
19712 MOZ_ASSERT(lir->mir()->type() == MIRType::Value);
19713 } else {
19714 MOZ_ASSERT(results.length() == 1, "multi-value return unimplemented");
19715 switch (results[0].kind()) {
19716 case wasm::ValType::I32:
19717 MOZ_ASSERT(lir->mir()->type() == MIRType::Int32);
19718 MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
19719 break;
19720 case wasm::ValType::I64:
19721 MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
19722 MOZ_ASSERT(ToOutRegister64(lir) == ReturnReg64);
19723 break;
19724 case wasm::ValType::F32:
19725 MOZ_ASSERT(lir->mir()->type() == MIRType::Float32);
19726 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnFloat32Reg);
19727 break;
19728 case wasm::ValType::F64:
19729 MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
19730 MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
19731 break;
19732 case wasm::ValType::V128:
19733 MOZ_CRASH("unexpected return type when calling from ion to wasm");
19734 case wasm::ValType::Ref:
19735 // The wasm stubs layer unboxes anything that needs to be unboxed
19736 // and leaves it in a Value. A FuncRef/EqRef we could in principle
19737 // leave it as a raw object pointer but for now it complicates the
19738 // API to do so.
19739 MOZ_ASSERT(lir->mir()->type() == MIRType::Value);
19740 break;
19744 WasmInstanceObject* instObj = lir->mir()->instanceObject();
19746 Register scratch = ToRegister(lir->temp());
19748 uint32_t callOffset;
19749 ensureOsiSpace();
19750 GenerateDirectCallFromJit(masm, funcExport, instObj->instance(), stackArgs,
19751 scratch, &callOffset);
19753 // Add the instance object to the constant pool, so it is transferred to
19754 // the owning IonScript and so that it gets traced as long as the IonScript
19755 // lives.
19757 uint32_t unused;
19758 masm.propagateOOM(graph.addConstantToPool(ObjectValue(*instObj), &unused));
19760 markSafepointAt(callOffset, lir);
19763 void CodeGenerator::visitIonToWasmCall(LIonToWasmCall* lir) {
19764 emitIonToWasmCallBase(lir);
19766 void CodeGenerator::visitIonToWasmCallV(LIonToWasmCallV* lir) {
19767 emitIonToWasmCallBase(lir);
19769 void CodeGenerator::visitIonToWasmCallI64(LIonToWasmCallI64* lir) {
19770 emitIonToWasmCallBase(lir);
19773 void CodeGenerator::visitWasmNullConstant(LWasmNullConstant* lir) {
19774 masm.xorPtr(ToRegister(lir->output()), ToRegister(lir->output()));
19777 void CodeGenerator::visitWasmFence(LWasmFence* lir) {
19778 MOZ_ASSERT(gen->compilingWasm());
19779 masm.memoryBarrier(MembarFull);
19782 void CodeGenerator::visitWasmAnyRefFromJSValue(LWasmAnyRefFromJSValue* lir) {
19783 ValueOperand input = ToValue(lir, LWasmAnyRefFromJSValue::InputIndex);
19784 Register output = ToRegister(lir->output());
19785 FloatRegister tempFloat = ToFloatRegister(lir->temp0());
19787 using Fn = JSObject* (*)(JSContext* cx, HandleValue value);
19788 OutOfLineCode* oolBoxValue = oolCallVM<Fn, wasm::AnyRef::boxValue>(
19789 lir, ArgList(input), StoreRegisterTo(output));
19790 masm.convertValueToWasmAnyRef(input, output, tempFloat, oolBoxValue->entry());
19791 masm.bind(oolBoxValue->rejoin());
19794 void CodeGenerator::visitWasmAnyRefFromJSObject(LWasmAnyRefFromJSObject* lir) {
19795 Register input = ToRegister(lir->input());
19796 Register output = ToRegister(lir->output());
19797 masm.convertObjectToWasmAnyRef(input, output);
19800 void CodeGenerator::visitWasmAnyRefFromJSString(LWasmAnyRefFromJSString* lir) {
19801 Register input = ToRegister(lir->input());
19802 Register output = ToRegister(lir->output());
19803 masm.convertStringToWasmAnyRef(input, output);
19806 void CodeGenerator::visitWasmNewI31Ref(LWasmNewI31Ref* lir) {
19807 Register value = ToRegister(lir->value());
19808 Register output = ToRegister(lir->output());
19809 masm.truncate32ToWasmI31Ref(value, output);
19812 void CodeGenerator::visitWasmI31RefGet(LWasmI31RefGet* lir) {
19813 Register value = ToRegister(lir->value());
19814 Register output = ToRegister(lir->output());
19815 if (lir->mir()->wideningOp() == wasm::FieldWideningOp::Signed) {
19816 masm.convertWasmI31RefTo32Signed(value, output);
19817 } else {
19818 masm.convertWasmI31RefTo32Unsigned(value, output);
19822 #ifdef FUZZING_JS_FUZZILLI
19823 void CodeGenerator::emitFuzzilliHashDouble(FloatRegister floatDouble,
19824 Register scratch, Register output) {
19825 # ifdef JS_PUNBOX64
19826 Register64 reg64_1(scratch);
19827 Register64 reg64_2(output);
19828 masm.moveDoubleToGPR64(floatDouble, reg64_1);
19829 masm.move64(reg64_1, reg64_2);
19830 masm.rshift64(Imm32(32), reg64_2);
19831 masm.add32(scratch, output);
19832 # else
19833 Register64 reg64(scratch, output);
19834 masm.moveDoubleToGPR64(floatDouble, reg64);
19835 masm.add32(scratch, output);
19836 # endif
19839 void CodeGenerator::emitFuzzilliHashObject(LInstruction* lir, Register obj,
19840 Register output) {
19841 using Fn = void (*)(JSContext* cx, JSObject* obj, uint32_t* out);
19842 OutOfLineCode* ool = oolCallVM<Fn, FuzzilliHashObjectInl>(
19843 lir, ArgList(obj), StoreRegisterTo(output));
19845 masm.jump(ool->entry());
19846 masm.bind(ool->rejoin());
19849 void CodeGenerator::emitFuzzilliHashBigInt(Register bigInt, Register output) {
19850 LiveRegisterSet volatileRegs(GeneralRegisterSet::All(),
19851 FloatRegisterSet::All());
19852 volatileRegs.takeUnchecked(output);
19853 masm.PushRegsInMask(volatileRegs);
19855 using Fn = uint32_t (*)(BigInt* bigInt);
19856 masm.setupUnalignedABICall(output);
19857 masm.passABIArg(bigInt);
19858 masm.callWithABI<Fn, js::FuzzilliHashBigInt>();
19859 masm.storeCallInt32Result(output);
19861 masm.PopRegsInMask(volatileRegs);
19864 void CodeGenerator::visitFuzzilliHashV(LFuzzilliHashV* ins) {
19865 MOZ_ASSERT(ins->mir()->getOperand(0)->type() == MIRType::Value);
19867 ValueOperand value = ToValue(ins, 0);
19869 Label isDouble, isObject, isBigInt, done;
19871 FloatRegister scratchFloat = ToFloatRegister(ins->getTemp(1));
19872 Register scratch = ToRegister(ins->getTemp(0));
19873 Register output = ToRegister(ins->output());
19874 MOZ_ASSERT(scratch != output);
19876 # ifdef JS_PUNBOX64
19877 Register tagReg = ToRegister(ins->getTemp(0));
19878 masm.splitTag(value, tagReg);
19879 # else
19880 Register tagReg = value.typeReg();
19881 # endif
19883 Label noBigInt;
19884 masm.branchTestBigInt(Assembler::NotEqual, tagReg, &noBigInt);
19885 masm.unboxBigInt(value, scratch);
19886 masm.jump(&isBigInt);
19887 masm.bind(&noBigInt);
19889 Label noObject;
19890 masm.branchTestObject(Assembler::NotEqual, tagReg, &noObject);
19891 masm.unboxObject(value, scratch);
19892 masm.jump(&isObject);
19893 masm.bind(&noObject);
19895 Label noInt32;
19896 masm.branchTestInt32(Assembler::NotEqual, tagReg, &noInt32);
19897 masm.unboxInt32(value, scratch);
19898 masm.convertInt32ToDouble(scratch, scratchFloat);
19899 masm.jump(&isDouble);
19900 masm.bind(&noInt32);
19902 Label noNull;
19903 masm.branchTestNull(Assembler::NotEqual, tagReg, &noNull);
19904 masm.move32(Imm32(1), scratch);
19905 masm.convertInt32ToDouble(scratch, scratchFloat);
19906 masm.jump(&isDouble);
19907 masm.bind(&noNull);
19909 Label noUndefined;
19910 masm.branchTestUndefined(Assembler::NotEqual, tagReg, &noUndefined);
19911 masm.move32(Imm32(2), scratch);
19912 masm.convertInt32ToDouble(scratch, scratchFloat);
19913 masm.jump(&isDouble);
19914 masm.bind(&noUndefined);
19916 Label noBoolean;
19917 masm.branchTestBoolean(Assembler::NotEqual, tagReg, &noBoolean);
19918 masm.unboxBoolean(value, scratch);
19919 masm.add32(Imm32(3), scratch);
19920 masm.convertInt32ToDouble(scratch, scratchFloat);
19921 masm.jump(&isDouble);
19922 masm.bind(&noBoolean);
19924 Label noDouble;
19925 masm.branchTestDouble(Assembler::NotEqual, tagReg, &noDouble);
19926 masm.unboxDouble(value, scratchFloat);
19927 masm.canonicalizeDoubleIfDeterministic(scratchFloat);
19929 masm.jump(&isDouble);
19930 masm.bind(&noDouble);
19931 masm.move32(Imm32(0), output);
19932 masm.jump(&done);
19934 masm.bind(&isBigInt);
19935 emitFuzzilliHashBigInt(scratch, output);
19936 masm.jump(&done);
19938 masm.bind(&isObject);
19939 emitFuzzilliHashObject(ins, scratch, output);
19940 masm.jump(&done);
19942 masm.bind(&isDouble);
19943 emitFuzzilliHashDouble(scratchFloat, scratch, output);
19945 masm.bind(&done);
19948 void CodeGenerator::visitFuzzilliHashT(LFuzzilliHashT* ins) {
19949 const LAllocation* value = ins->value();
19950 MIRType mirType = ins->mir()->getOperand(0)->type();
19952 FloatRegister scratchFloat = ToFloatRegister(ins->getTemp(1));
19953 Register scratch = ToRegister(ins->getTemp(0));
19954 Register output = ToRegister(ins->output());
19955 MOZ_ASSERT(scratch != output);
19957 if (mirType == MIRType::Object) {
19958 MOZ_ASSERT(value->isGeneralReg());
19959 masm.mov(value->toGeneralReg()->reg(), scratch);
19960 emitFuzzilliHashObject(ins, scratch, output);
19961 } else if (mirType == MIRType::BigInt) {
19962 MOZ_ASSERT(value->isGeneralReg());
19963 masm.mov(value->toGeneralReg()->reg(), scratch);
19964 emitFuzzilliHashBigInt(scratch, output);
19965 } else if (mirType == MIRType::Double) {
19966 MOZ_ASSERT(value->isFloatReg());
19967 masm.moveDouble(value->toFloatReg()->reg(), scratchFloat);
19968 masm.canonicalizeDoubleIfDeterministic(scratchFloat);
19969 emitFuzzilliHashDouble(scratchFloat, scratch, output);
19970 } else if (mirType == MIRType::Float32) {
19971 MOZ_ASSERT(value->isFloatReg());
19972 masm.convertFloat32ToDouble(value->toFloatReg()->reg(), scratchFloat);
19973 masm.canonicalizeDoubleIfDeterministic(scratchFloat);
19974 emitFuzzilliHashDouble(scratchFloat, scratch, output);
19975 } else if (mirType == MIRType::Int32) {
19976 MOZ_ASSERT(value->isGeneralReg());
19977 masm.mov(value->toGeneralReg()->reg(), scratch);
19978 masm.convertInt32ToDouble(scratch, scratchFloat);
19979 emitFuzzilliHashDouble(scratchFloat, scratch, output);
19980 } else if (mirType == MIRType::Null) {
19981 MOZ_ASSERT(value->isBogus());
19982 masm.move32(Imm32(1), scratch);
19983 masm.convertInt32ToDouble(scratch, scratchFloat);
19984 emitFuzzilliHashDouble(scratchFloat, scratch, output);
19985 } else if (mirType == MIRType::Undefined) {
19986 MOZ_ASSERT(value->isBogus());
19987 masm.move32(Imm32(2), scratch);
19988 masm.convertInt32ToDouble(scratch, scratchFloat);
19989 emitFuzzilliHashDouble(scratchFloat, scratch, output);
19990 } else if (mirType == MIRType::Boolean) {
19991 MOZ_ASSERT(value->isGeneralReg());
19992 masm.mov(value->toGeneralReg()->reg(), scratch);
19993 masm.add32(Imm32(3), scratch);
19994 masm.convertInt32ToDouble(scratch, scratchFloat);
19995 emitFuzzilliHashDouble(scratchFloat, scratch, output);
19996 } else {
19997 MOZ_CRASH("unexpected type");
20001 void CodeGenerator::visitFuzzilliHashStore(LFuzzilliHashStore* ins) {
20002 const LAllocation* value = ins->value();
20003 MOZ_ASSERT(ins->mir()->getOperand(0)->type() == MIRType::Int32);
20004 MOZ_ASSERT(value->isGeneralReg());
20006 Register scratchJSContext = ToRegister(ins->getTemp(0));
20007 Register scratch = ToRegister(ins->getTemp(1));
20009 masm.loadJSContext(scratchJSContext);
20011 // stats
20012 Address addrExecHashInputs(scratchJSContext,
20013 offsetof(JSContext, executionHashInputs));
20014 masm.load32(addrExecHashInputs, scratch);
20015 masm.add32(Imm32(1), scratch);
20016 masm.store32(scratch, addrExecHashInputs);
20018 Address addrExecHash(scratchJSContext, offsetof(JSContext, executionHash));
20019 masm.load32(addrExecHash, scratch);
20020 masm.add32(value->toGeneralReg()->reg(), scratch);
20021 masm.rotateLeft(Imm32(1), scratch, scratch);
20022 masm.store32(scratch, addrExecHash);
20024 #endif
20026 static_assert(!std::is_polymorphic_v<CodeGenerator>,
20027 "CodeGenerator should not have any virtual methods");
20029 } // namespace jit
20030 } // namespace js