Bug 1885489 - Part 5: Add SnapshotIterator::readInt32(). r=iain
[gecko.git] / js / src / jit / x86 / Assembler-x86.h
blobcedd94de09c8253eb8b024e5270703de2e971da6
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #ifndef jit_x86_Assembler_x86_h
8 #define jit_x86_Assembler_x86_h
10 #include <iterator>
12 #include "jit/CompactBuffer.h"
13 #include "jit/JitCode.h"
14 #include "jit/shared/Assembler-shared.h"
15 #include "jit/x86-shared/Constants-x86-shared.h"
17 namespace js {
18 namespace jit {
20 static constexpr Register eax{X86Encoding::rax};
21 static constexpr Register ecx{X86Encoding::rcx};
22 static constexpr Register edx{X86Encoding::rdx};
23 static constexpr Register ebx{X86Encoding::rbx};
24 static constexpr Register esp{X86Encoding::rsp};
25 static constexpr Register ebp{X86Encoding::rbp};
26 static constexpr Register esi{X86Encoding::rsi};
27 static constexpr Register edi{X86Encoding::rdi};
29 static constexpr FloatRegister xmm0 =
30 FloatRegister(X86Encoding::xmm0, FloatRegisters::Double);
31 static constexpr FloatRegister xmm1 =
32 FloatRegister(X86Encoding::xmm1, FloatRegisters::Double);
33 static constexpr FloatRegister xmm2 =
34 FloatRegister(X86Encoding::xmm2, FloatRegisters::Double);
35 static constexpr FloatRegister xmm3 =
36 FloatRegister(X86Encoding::xmm3, FloatRegisters::Double);
37 static constexpr FloatRegister xmm4 =
38 FloatRegister(X86Encoding::xmm4, FloatRegisters::Double);
39 static constexpr FloatRegister xmm5 =
40 FloatRegister(X86Encoding::xmm5, FloatRegisters::Double);
41 static constexpr FloatRegister xmm6 =
42 FloatRegister(X86Encoding::xmm6, FloatRegisters::Double);
43 static constexpr FloatRegister xmm7 =
44 FloatRegister(X86Encoding::xmm7, FloatRegisters::Double);
46 // Vector registers fixed for use with some instructions, e.g. PBLENDVB.
47 static constexpr FloatRegister vmm0 =
48 FloatRegister(X86Encoding::xmm0, FloatRegisters::Simd128);
50 static constexpr Register InvalidReg{X86Encoding::invalid_reg};
51 static constexpr FloatRegister InvalidFloatReg = FloatRegister();
53 static constexpr Register JSReturnReg_Type = ecx;
54 static constexpr Register JSReturnReg_Data = edx;
55 static constexpr Register StackPointer = esp;
56 static constexpr Register FramePointer = ebp;
57 static constexpr Register ReturnReg = eax;
58 static constexpr FloatRegister ReturnFloat32Reg =
59 FloatRegister(X86Encoding::xmm0, FloatRegisters::Single);
60 static constexpr FloatRegister ReturnDoubleReg =
61 FloatRegister(X86Encoding::xmm0, FloatRegisters::Double);
62 static constexpr FloatRegister ReturnSimd128Reg =
63 FloatRegister(X86Encoding::xmm0, FloatRegisters::Simd128);
64 static constexpr FloatRegister ScratchFloat32Reg_ =
65 FloatRegister(X86Encoding::xmm7, FloatRegisters::Single);
66 static constexpr FloatRegister ScratchDoubleReg_ =
67 FloatRegister(X86Encoding::xmm7, FloatRegisters::Double);
68 static constexpr FloatRegister ScratchSimd128Reg =
69 FloatRegister(X86Encoding::xmm7, FloatRegisters::Simd128);
71 // Note, EDX:EAX is the system ABI 64-bit return register, and it is to our
72 // advantage to keep the SpiderMonkey ABI in sync with the system ABI.
74 // However, using EDX here means that we have to use a register that does not
75 // have a word or byte part (eg DX/DH/DL) in some other places; notably,
76 // ABINonArgReturnReg1 is EDI. If this becomes a problem and ReturnReg64 has to
77 // be something other than EDX:EAX, then jitted code that calls directly to C++
78 // will need to shuffle the return value from EDX:EAX into ReturnReg64 directly
79 // after the call. See bug 1730161 for discussion and a patch that does that.
80 static constexpr Register64 ReturnReg64(edx, eax);
82 // Avoid ebp, which is the FramePointer, which is unavailable in some modes.
83 static constexpr Register CallTempReg0 = edi;
84 static constexpr Register CallTempReg1 = eax;
85 static constexpr Register CallTempReg2 = ebx;
86 static constexpr Register CallTempReg3 = ecx;
87 static constexpr Register CallTempReg4 = esi;
88 static constexpr Register CallTempReg5 = edx;
90 // We have no arg regs, so our NonArgRegs are just our CallTempReg*
91 static constexpr Register CallTempNonArgRegs[] = {edi, eax, ebx, ecx, esi, edx};
92 static constexpr uint32_t NumCallTempNonArgRegs = std::size(CallTempNonArgRegs);
94 class ABIArgGenerator {
95 uint32_t stackOffset_;
96 ABIArg current_;
98 public:
99 ABIArgGenerator();
100 ABIArg next(MIRType argType);
101 ABIArg& current() { return current_; }
102 uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
103 void increaseStackOffset(uint32_t bytes) { stackOffset_ += bytes; }
106 // These registers may be volatile or nonvolatile.
107 static constexpr Register ABINonArgReg0 = eax;
108 static constexpr Register ABINonArgReg1 = ebx;
109 static constexpr Register ABINonArgReg2 = ecx;
110 static constexpr Register ABINonArgReg3 = edx;
112 // This register may be volatile or nonvolatile. Avoid xmm7 which is the
113 // ScratchDoubleReg_.
114 static constexpr FloatRegister ABINonArgDoubleReg =
115 FloatRegister(X86Encoding::xmm0, FloatRegisters::Double);
117 // These registers may be volatile or nonvolatile.
118 // Note: these three registers are all guaranteed to be different
119 static constexpr Register ABINonArgReturnReg0 = ecx;
120 static constexpr Register ABINonArgReturnReg1 = edi;
121 static constexpr Register ABINonVolatileReg = ebx;
123 // This register is guaranteed to be clobberable during the prologue and
124 // epilogue of an ABI call which must preserve both ABI argument, return
125 // and non-volatile registers.
126 static constexpr Register ABINonArgReturnVolatileReg = ecx;
128 // Instance pointer argument register for WebAssembly functions. This must not
129 // alias any other register used for passing function arguments or return
130 // values. Preserved by WebAssembly functions.
131 static constexpr Register InstanceReg = esi;
133 // Registers used for asm.js/wasm table calls. These registers must be disjoint
134 // from the ABI argument registers, InstanceReg and each other.
135 static constexpr Register WasmTableCallScratchReg0 = ABINonArgReg0;
136 static constexpr Register WasmTableCallScratchReg1 = ABINonArgReg1;
137 static constexpr Register WasmTableCallSigReg = ABINonArgReg2;
138 static constexpr Register WasmTableCallIndexReg = ABINonArgReg3;
140 // Registers used for ref calls.
141 static constexpr Register WasmCallRefCallScratchReg0 = ABINonArgReg0;
142 static constexpr Register WasmCallRefCallScratchReg1 = ABINonArgReg1;
143 static constexpr Register WasmCallRefReg = ABINonArgReg3;
145 // Registers used for wasm tail calls operations.
146 static constexpr Register WasmTailCallInstanceScratchReg = ABINonArgReg1;
147 static constexpr Register WasmTailCallRAScratchReg = ABINonArgReg2;
148 static constexpr Register WasmTailCallFPScratchReg = ABINonArgReg3;
150 // Register used as a scratch along the return path in the fast js -> wasm stub
151 // code. This must not overlap ReturnReg, JSReturnOperand, or InstanceReg.
152 // It must be a volatile register.
153 static constexpr Register WasmJitEntryReturnScratch = ebx;
155 static constexpr Register OsrFrameReg = edx;
156 static constexpr Register PreBarrierReg = edx;
158 // Not enough registers for a PC register (R0-R2 use 2 registers each).
159 static constexpr Register InterpreterPCReg = InvalidReg;
161 // Registers used by RegExpMatcher and RegExpExecMatch stubs (do not use
162 // JSReturnOperand).
163 static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
164 static constexpr Register RegExpMatcherStringReg = CallTempReg1;
165 static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2;
167 // Registers used by RegExpExecTest stub (do not use ReturnReg).
168 static constexpr Register RegExpExecTestRegExpReg = CallTempReg0;
169 static constexpr Register RegExpExecTestStringReg = CallTempReg2;
171 // Registers used by RegExpSearcher stub (do not use ReturnReg).
172 static constexpr Register RegExpSearcherRegExpReg = CallTempReg0;
173 static constexpr Register RegExpSearcherStringReg = CallTempReg2;
174 static constexpr Register RegExpSearcherLastIndexReg = CallTempReg3;
176 // GCC stack is aligned on 16 bytes. Ion does not maintain this for internal
177 // calls. wasm code does.
178 #if defined(__GNUC__) && !defined(__MINGW32__)
179 static constexpr uint32_t ABIStackAlignment = 16;
180 #else
181 static constexpr uint32_t ABIStackAlignment = 4;
182 #endif
183 static constexpr uint32_t CodeAlignment = 16;
184 static constexpr uint32_t JitStackAlignment = 16;
186 static constexpr uint32_t JitStackValueAlignment =
187 JitStackAlignment / sizeof(Value);
188 static_assert(JitStackAlignment % sizeof(Value) == 0 &&
189 JitStackValueAlignment >= 1,
190 "Stack alignment should be a non-zero multiple of sizeof(Value)");
192 static constexpr uint32_t SimdMemoryAlignment = 16;
194 static_assert(CodeAlignment % SimdMemoryAlignment == 0,
195 "Code alignment should be larger than any of the alignments "
196 "which are used for "
197 "the constant sections of the code buffer. Thus it should be "
198 "larger than the "
199 "alignment for SIMD constants.");
201 static_assert(JitStackAlignment % SimdMemoryAlignment == 0,
202 "Stack alignment should be larger than any of the alignments "
203 "which are used for "
204 "spilled values. Thus it should be larger than the alignment "
205 "for SIMD accesses.");
207 static constexpr uint32_t WasmStackAlignment = SimdMemoryAlignment;
208 static constexpr uint32_t WasmTrapInstructionLength = 2;
210 // See comments in wasm::GenerateFunctionPrologue. The difference between these
211 // is the size of the largest callable prologue on the platform. (We could make
212 // the tail offset 3, but I have opted for 4 as that results in a better-aligned
213 // branch target.)
214 static constexpr uint32_t WasmCheckedCallEntryOffset = 0u;
216 struct ImmTag : public Imm32 {
217 explicit ImmTag(JSValueTag mask) : Imm32(int32_t(mask)) {}
220 struct ImmType : public ImmTag {
221 explicit ImmType(JSValueType type) : ImmTag(JSVAL_TYPE_TO_TAG(type)) {}
224 static constexpr Scale ScalePointer = TimesFour;
226 } // namespace jit
227 } // namespace js
229 #include "jit/x86-shared/Assembler-x86-shared.h"
231 namespace js {
232 namespace jit {
234 static inline Operand LowWord(const Operand& op) {
235 switch (op.kind()) {
236 case Operand::MEM_REG_DISP:
237 return Operand(LowWord(op.toAddress()));
238 case Operand::MEM_SCALE:
239 return Operand(LowWord(op.toBaseIndex()));
240 default:
241 MOZ_CRASH("Invalid operand type");
245 static inline Operand HighWord(const Operand& op) {
246 switch (op.kind()) {
247 case Operand::MEM_REG_DISP:
248 return Operand(HighWord(op.toAddress()));
249 case Operand::MEM_SCALE:
250 return Operand(HighWord(op.toBaseIndex()));
251 default:
252 MOZ_CRASH("Invalid operand type");
256 // Return operand from a JS -> JS call.
257 static constexpr ValueOperand JSReturnOperand{JSReturnReg_Type,
258 JSReturnReg_Data};
260 class Assembler : public AssemblerX86Shared {
261 Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
263 void addPendingJump(JmpSrc src, ImmPtr target, RelocationKind kind) {
264 enoughMemory_ &=
265 jumps_.append(RelativePatch(src.offset(), target.value, kind));
266 if (kind == RelocationKind::JITCODE) {
267 jumpRelocations_.writeUnsigned(src.offset());
271 public:
272 using AssemblerX86Shared::call;
273 using AssemblerX86Shared::cmpl;
274 using AssemblerX86Shared::j;
275 using AssemblerX86Shared::jmp;
276 using AssemblerX86Shared::movl;
277 using AssemblerX86Shared::pop;
278 using AssemblerX86Shared::push;
279 using AssemblerX86Shared::retarget;
280 using AssemblerX86Shared::vmovsd;
281 using AssemblerX86Shared::vmovss;
283 static void TraceJumpRelocations(JSTracer* trc, JitCode* code,
284 CompactBufferReader& reader);
286 // Copy the assembly code to the given buffer, and perform any pending
287 // relocations relying on the target address.
288 void executableCopy(uint8_t* buffer);
290 void assertNoGCThings() const {
291 #ifdef DEBUG
292 MOZ_ASSERT(dataRelocations_.length() == 0);
293 for (auto& j : jumps_) {
294 MOZ_ASSERT(j.kind == RelocationKind::HARDCODED);
296 #endif
299 // Actual assembly emitting functions.
301 void push(ImmGCPtr ptr) {
302 masm.push_i32(int32_t(ptr.value));
303 writeDataRelocation(ptr);
305 void push(const ImmWord imm) { push(Imm32(imm.value)); }
306 void push(const ImmPtr imm) { push(ImmWord(uintptr_t(imm.value))); }
307 void push(FloatRegister src) {
308 subl(Imm32(sizeof(double)), StackPointer);
309 vmovsd(src, Address(StackPointer, 0));
312 CodeOffset pushWithPatch(ImmWord word) {
313 masm.push_i32(int32_t(word.value));
314 return CodeOffset(masm.currentOffset());
317 void pop(FloatRegister src) {
318 vmovsd(Address(StackPointer, 0), src);
319 addl(Imm32(sizeof(double)), StackPointer);
322 CodeOffset movWithPatch(ImmWord word, Register dest) {
323 movl(Imm32(word.value), dest);
324 return CodeOffset(masm.currentOffset());
326 CodeOffset movWithPatch(ImmPtr imm, Register dest) {
327 return movWithPatch(ImmWord(uintptr_t(imm.value)), dest);
330 void movl(ImmGCPtr ptr, Register dest) {
331 masm.movl_i32r(uintptr_t(ptr.value), dest.encoding());
332 writeDataRelocation(ptr);
334 void movl(ImmGCPtr ptr, const Operand& dest) {
335 switch (dest.kind()) {
336 case Operand::REG:
337 masm.movl_i32r(uintptr_t(ptr.value), dest.reg());
338 writeDataRelocation(ptr);
339 break;
340 case Operand::MEM_REG_DISP:
341 masm.movl_i32m(uintptr_t(ptr.value), dest.disp(), dest.base());
342 writeDataRelocation(ptr);
343 break;
344 case Operand::MEM_SCALE:
345 masm.movl_i32m(uintptr_t(ptr.value), dest.disp(), dest.base(),
346 dest.index(), dest.scale());
347 writeDataRelocation(ptr);
348 break;
349 default:
350 MOZ_CRASH("unexpected operand kind");
353 void movl(ImmWord imm, Register dest) {
354 masm.movl_i32r(imm.value, dest.encoding());
356 void movl(ImmPtr imm, Register dest) {
357 movl(ImmWord(uintptr_t(imm.value)), dest);
359 void mov(ImmWord imm, Register dest) {
360 // Use xor for setting registers to zero, as it is specially optimized
361 // for this purpose on modern hardware. Note that it does clobber FLAGS
362 // though.
363 if (imm.value == 0) {
364 xorl(dest, dest);
365 } else {
366 movl(imm, dest);
369 void mov(ImmPtr imm, Register dest) {
370 mov(ImmWord(uintptr_t(imm.value)), dest);
372 void mov(wasm::SymbolicAddress imm, Register dest) {
373 masm.movl_i32r(-1, dest.encoding());
374 append(wasm::SymbolicAccess(CodeOffset(masm.currentOffset()), imm));
376 void mov(const Operand& src, Register dest) { movl(src, dest); }
377 void mov(Register src, const Operand& dest) { movl(src, dest); }
378 void mov(Imm32 imm, const Operand& dest) { movl(imm, dest); }
379 void mov(CodeLabel* label, Register dest) {
380 // Put a placeholder value in the instruction stream.
381 masm.movl_i32r(0, dest.encoding());
382 label->patchAt()->bind(masm.size());
384 void mov(Register src, Register dest) { movl(src, dest); }
385 void xchg(Register src, Register dest) { xchgl(src, dest); }
386 void lea(const Operand& src, Register dest) { return leal(src, dest); }
387 void cmovz32(const Operand& src, Register dest) { return cmovzl(src, dest); }
388 void cmovzPtr(const Operand& src, Register dest) { return cmovzl(src, dest); }
390 void fstp32(const Operand& src) {
391 switch (src.kind()) {
392 case Operand::MEM_REG_DISP:
393 masm.fstp32_m(src.disp(), src.base());
394 break;
395 default:
396 MOZ_CRASH("unexpected operand kind");
399 void faddp() { masm.faddp(); }
401 void cmpl(ImmWord rhs, Register lhs) {
402 masm.cmpl_ir(rhs.value, lhs.encoding());
404 void cmpl(ImmPtr rhs, Register lhs) {
405 cmpl(ImmWord(uintptr_t(rhs.value)), lhs);
407 void cmpl(ImmGCPtr rhs, Register lhs) {
408 masm.cmpl_i32r(uintptr_t(rhs.value), lhs.encoding());
409 writeDataRelocation(rhs);
411 void cmpl(Register rhs, Register lhs) {
412 masm.cmpl_rr(rhs.encoding(), lhs.encoding());
414 void cmpl(ImmGCPtr rhs, const Operand& lhs) {
415 switch (lhs.kind()) {
416 case Operand::REG:
417 masm.cmpl_i32r(uintptr_t(rhs.value), lhs.reg());
418 writeDataRelocation(rhs);
419 break;
420 case Operand::MEM_REG_DISP:
421 masm.cmpl_i32m(uintptr_t(rhs.value), lhs.disp(), lhs.base());
422 writeDataRelocation(rhs);
423 break;
424 case Operand::MEM_ADDRESS32:
425 masm.cmpl_i32m(uintptr_t(rhs.value), lhs.address());
426 writeDataRelocation(rhs);
427 break;
428 default:
429 MOZ_CRASH("unexpected operand kind");
432 void cmpl(Register rhs, wasm::SymbolicAddress lhs) {
433 masm.cmpl_rm_disp32(rhs.encoding(), (void*)-1);
434 append(wasm::SymbolicAccess(CodeOffset(masm.currentOffset()), lhs));
436 void cmpl(Imm32 rhs, wasm::SymbolicAddress lhs) {
437 JmpSrc src = masm.cmpl_im_disp32(rhs.value, (void*)-1);
438 append(wasm::SymbolicAccess(CodeOffset(src.offset()), lhs));
441 void adcl(Imm32 imm, Register dest) {
442 masm.adcl_ir(imm.value, dest.encoding());
444 void adcl(Register src, Register dest) {
445 masm.adcl_rr(src.encoding(), dest.encoding());
447 void adcl(Operand src, Register dest) {
448 switch (src.kind()) {
449 case Operand::MEM_REG_DISP:
450 masm.adcl_mr(src.disp(), src.base(), dest.encoding());
451 break;
452 case Operand::MEM_SCALE:
453 masm.adcl_mr(src.disp(), src.base(), src.index(), src.scale(),
454 dest.encoding());
455 break;
456 default:
457 MOZ_CRASH("unexpected operand kind");
461 void sbbl(Imm32 imm, Register dest) {
462 masm.sbbl_ir(imm.value, dest.encoding());
464 void sbbl(Register src, Register dest) {
465 masm.sbbl_rr(src.encoding(), dest.encoding());
467 void sbbl(Operand src, Register dest) {
468 switch (src.kind()) {
469 case Operand::MEM_REG_DISP:
470 masm.sbbl_mr(src.disp(), src.base(), dest.encoding());
471 break;
472 case Operand::MEM_SCALE:
473 masm.sbbl_mr(src.disp(), src.base(), src.index(), src.scale(),
474 dest.encoding());
475 break;
476 default:
477 MOZ_CRASH("unexpected operand kind");
481 void mull(Register multiplier) { masm.mull_r(multiplier.encoding()); }
483 void shldl(const Imm32 imm, Register src, Register dest) {
484 masm.shldl_irr(imm.value, src.encoding(), dest.encoding());
486 void shrdl(const Imm32 imm, Register src, Register dest) {
487 masm.shrdl_irr(imm.value, src.encoding(), dest.encoding());
490 void vhaddpd(FloatRegister rhs, FloatRegister lhsDest) {
491 MOZ_ASSERT(HasSSE3());
492 MOZ_ASSERT(rhs.size() == 16);
493 MOZ_ASSERT(lhsDest.size() == 16);
494 masm.vhaddpd_rr(rhs.encoding(), lhsDest.encoding(), lhsDest.encoding());
497 void fild(const Operand& src) {
498 switch (src.kind()) {
499 case Operand::MEM_REG_DISP:
500 masm.fild_m(src.disp(), src.base());
501 break;
502 default:
503 MOZ_CRASH("unexpected operand kind");
507 void jmp(ImmPtr target, RelocationKind reloc = RelocationKind::HARDCODED) {
508 JmpSrc src = masm.jmp();
509 addPendingJump(src, target, reloc);
511 void j(Condition cond, ImmPtr target,
512 RelocationKind reloc = RelocationKind::HARDCODED) {
513 JmpSrc src = masm.jCC(static_cast<X86Encoding::Condition>(cond));
514 addPendingJump(src, target, reloc);
517 void jmp(JitCode* target) {
518 jmp(ImmPtr(target->raw()), RelocationKind::JITCODE);
520 void j(Condition cond, JitCode* target) {
521 j(cond, ImmPtr(target->raw()), RelocationKind::JITCODE);
523 void call(JitCode* target) {
524 JmpSrc src = masm.call();
525 addPendingJump(src, ImmPtr(target->raw()), RelocationKind::JITCODE);
527 void call(ImmWord target) { call(ImmPtr((void*)target.value)); }
528 void call(ImmPtr target) {
529 JmpSrc src = masm.call();
530 addPendingJump(src, target, RelocationKind::HARDCODED);
533 // Emit a CALL or CMP (nop) instruction. ToggleCall can be used to patch
534 // this instruction.
535 CodeOffset toggledCall(JitCode* target, bool enabled) {
536 CodeOffset offset(size());
537 JmpSrc src = enabled ? masm.call() : masm.cmp_eax();
538 addPendingJump(src, ImmPtr(target->raw()), RelocationKind::JITCODE);
539 MOZ_ASSERT_IF(!oom(), size() - offset.offset() == ToggledCallSize(nullptr));
540 return offset;
543 static size_t ToggledCallSize(uint8_t* code) {
544 // Size of a call instruction.
545 return 5;
548 // Re-routes pending jumps to an external target, flushing the label in the
549 // process.
550 void retarget(Label* label, ImmPtr target, RelocationKind reloc) {
551 if (label->used()) {
552 bool more;
553 X86Encoding::JmpSrc jmp(label->offset());
554 do {
555 X86Encoding::JmpSrc next;
556 more = masm.nextJump(jmp, &next);
557 addPendingJump(jmp, target, reloc);
558 jmp = next;
559 } while (more);
561 label->reset();
564 // Move a 32-bit immediate into a register where the immediate can be
565 // patched.
566 CodeOffset movlWithPatch(Imm32 imm, Register dest) {
567 masm.movl_i32r(imm.value, dest.encoding());
568 return CodeOffset(masm.currentOffset());
571 // Load from *(base + disp32) where disp32 can be patched.
572 CodeOffset movsblWithPatch(const Operand& src, Register dest) {
573 switch (src.kind()) {
574 case Operand::MEM_REG_DISP:
575 masm.movsbl_mr_disp32(src.disp(), src.base(), dest.encoding());
576 break;
577 case Operand::MEM_ADDRESS32:
578 masm.movsbl_mr(src.address(), dest.encoding());
579 break;
580 default:
581 MOZ_CRASH("unexpected operand kind");
583 return CodeOffset(masm.currentOffset());
585 CodeOffset movzblWithPatch(const Operand& src, Register dest) {
586 switch (src.kind()) {
587 case Operand::MEM_REG_DISP:
588 masm.movzbl_mr_disp32(src.disp(), src.base(), dest.encoding());
589 break;
590 case Operand::MEM_ADDRESS32:
591 masm.movzbl_mr(src.address(), dest.encoding());
592 break;
593 default:
594 MOZ_CRASH("unexpected operand kind");
596 return CodeOffset(masm.currentOffset());
598 CodeOffset movswlWithPatch(const Operand& src, Register dest) {
599 switch (src.kind()) {
600 case Operand::MEM_REG_DISP:
601 masm.movswl_mr_disp32(src.disp(), src.base(), dest.encoding());
602 break;
603 case Operand::MEM_ADDRESS32:
604 masm.movswl_mr(src.address(), dest.encoding());
605 break;
606 default:
607 MOZ_CRASH("unexpected operand kind");
609 return CodeOffset(masm.currentOffset());
611 CodeOffset movzwlWithPatch(const Operand& src, Register dest) {
612 switch (src.kind()) {
613 case Operand::MEM_REG_DISP:
614 masm.movzwl_mr_disp32(src.disp(), src.base(), dest.encoding());
615 break;
616 case Operand::MEM_ADDRESS32:
617 masm.movzwl_mr(src.address(), dest.encoding());
618 break;
619 default:
620 MOZ_CRASH("unexpected operand kind");
622 return CodeOffset(masm.currentOffset());
624 CodeOffset movlWithPatch(const Operand& src, Register dest) {
625 switch (src.kind()) {
626 case Operand::MEM_REG_DISP:
627 masm.movl_mr_disp32(src.disp(), src.base(), dest.encoding());
628 break;
629 case Operand::MEM_ADDRESS32:
630 masm.movl_mr(src.address(), dest.encoding());
631 break;
632 default:
633 MOZ_CRASH("unexpected operand kind");
635 return CodeOffset(masm.currentOffset());
637 CodeOffset vmovssWithPatch(const Operand& src, FloatRegister dest) {
638 MOZ_ASSERT(HasSSE2());
639 switch (src.kind()) {
640 case Operand::MEM_REG_DISP:
641 masm.vmovss_mr_disp32(src.disp(), src.base(), dest.encoding());
642 break;
643 case Operand::MEM_ADDRESS32:
644 masm.vmovss_mr(src.address(), dest.encoding());
645 break;
646 default:
647 MOZ_CRASH("unexpected operand kind");
649 return CodeOffset(masm.currentOffset());
651 void vmovss(const Operand& src, FloatRegister dest) {
652 MOZ_ASSERT(HasSSE2());
653 switch (src.kind()) {
654 case Operand::MEM_REG_DISP:
655 masm.vmovss_mr_disp32(src.disp(), src.base(), dest.encoding());
656 break;
657 case Operand::MEM_ADDRESS32:
658 masm.vmovss_mr(src.address(), dest.encoding());
659 break;
660 case Operand::MEM_SCALE:
661 masm.vmovss_mr(src.disp(), src.base(), src.index(), src.scale(),
662 dest.encoding());
663 break;
664 default:
665 MOZ_CRASH("unexpected operand kind");
668 CodeOffset vmovdWithPatch(const Operand& src, FloatRegister dest) {
669 MOZ_ASSERT(HasSSE2());
670 switch (src.kind()) {
671 case Operand::MEM_REG_DISP:
672 masm.vmovd_mr_disp32(src.disp(), src.base(), dest.encoding());
673 break;
674 case Operand::MEM_ADDRESS32:
675 masm.vmovd_mr(src.address(), dest.encoding());
676 break;
677 default:
678 MOZ_CRASH("unexpected operand kind");
680 return CodeOffset(masm.currentOffset());
682 CodeOffset vmovqWithPatch(const Operand& src, FloatRegister dest) {
683 MOZ_ASSERT(HasSSE2());
684 switch (src.kind()) {
685 case Operand::MEM_REG_DISP:
686 masm.vmovq_mr_disp32(src.disp(), src.base(), dest.encoding());
687 break;
688 case Operand::MEM_ADDRESS32:
689 masm.vmovq_mr(src.address(), dest.encoding());
690 break;
691 default:
692 MOZ_CRASH("unexpected operand kind");
694 return CodeOffset(masm.currentOffset());
696 CodeOffset vmovsdWithPatch(const Operand& src, FloatRegister dest) {
697 MOZ_ASSERT(HasSSE2());
698 switch (src.kind()) {
699 case Operand::MEM_REG_DISP:
700 masm.vmovsd_mr_disp32(src.disp(), src.base(), dest.encoding());
701 break;
702 case Operand::MEM_ADDRESS32:
703 masm.vmovsd_mr(src.address(), dest.encoding());
704 break;
705 default:
706 MOZ_CRASH("unexpected operand kind");
708 return CodeOffset(masm.currentOffset());
710 void vmovsd(const Operand& src, FloatRegister dest) {
711 MOZ_ASSERT(HasSSE2());
712 switch (src.kind()) {
713 case Operand::MEM_REG_DISP:
714 masm.vmovsd_mr_disp32(src.disp(), src.base(), dest.encoding());
715 break;
716 case Operand::MEM_ADDRESS32:
717 masm.vmovsd_mr(src.address(), dest.encoding());
718 break;
719 case Operand::MEM_SCALE:
720 masm.vmovsd_mr(src.disp(), src.base(), src.index(), src.scale(),
721 dest.encoding());
722 break;
723 default:
724 MOZ_CRASH("unexpected operand kind");
727 CodeOffset vmovupsWithPatch(const Operand& src, FloatRegister dest) {
728 MOZ_ASSERT(HasSSE2());
729 switch (src.kind()) {
730 case Operand::MEM_REG_DISP:
731 masm.vmovups_mr_disp32(src.disp(), src.base(), dest.encoding());
732 break;
733 case Operand::MEM_ADDRESS32:
734 masm.vmovups_mr(src.address(), dest.encoding());
735 break;
736 default:
737 MOZ_CRASH("unexpected operand kind");
739 return CodeOffset(masm.currentOffset());
741 CodeOffset vmovdquWithPatch(const Operand& src, FloatRegister dest) {
742 MOZ_ASSERT(HasSSE2());
743 switch (src.kind()) {
744 case Operand::MEM_REG_DISP:
745 masm.vmovdqu_mr_disp32(src.disp(), src.base(), dest.encoding());
746 break;
747 case Operand::MEM_ADDRESS32:
748 masm.vmovdqu_mr(src.address(), dest.encoding());
749 break;
750 default:
751 MOZ_CRASH("unexpected operand kind");
753 return CodeOffset(masm.currentOffset());
756 // Store to *(base + disp32) where disp32 can be patched.
757 CodeOffset movbWithPatch(Register src, const Operand& dest) {
758 switch (dest.kind()) {
759 case Operand::MEM_REG_DISP:
760 masm.movb_rm_disp32(src.encoding(), dest.disp(), dest.base());
761 break;
762 case Operand::MEM_ADDRESS32:
763 masm.movb_rm(src.encoding(), dest.address());
764 break;
765 default:
766 MOZ_CRASH("unexpected operand kind");
768 return CodeOffset(masm.currentOffset());
770 CodeOffset movwWithPatch(Register src, const Operand& dest) {
771 switch (dest.kind()) {
772 case Operand::MEM_REG_DISP:
773 masm.movw_rm_disp32(src.encoding(), dest.disp(), dest.base());
774 break;
775 case Operand::MEM_ADDRESS32:
776 masm.movw_rm(src.encoding(), dest.address());
777 break;
778 default:
779 MOZ_CRASH("unexpected operand kind");
781 return CodeOffset(masm.currentOffset());
783 CodeOffset movlWithPatch(Register src, const Operand& dest) {
784 switch (dest.kind()) {
785 case Operand::MEM_REG_DISP:
786 masm.movl_rm_disp32(src.encoding(), dest.disp(), dest.base());
787 break;
788 case Operand::MEM_ADDRESS32:
789 masm.movl_rm(src.encoding(), dest.address());
790 break;
791 default:
792 MOZ_CRASH("unexpected operand kind");
794 return CodeOffset(masm.currentOffset());
796 CodeOffset movlWithPatchLow(Register regLow, const Operand& dest) {
797 switch (dest.kind()) {
798 case Operand::MEM_REG_DISP: {
799 return movlWithPatch(regLow, LowWord(dest));
801 case Operand::MEM_ADDRESS32: {
802 Operand low(
803 PatchedAbsoluteAddress(uint32_t(dest.address()) + INT64LOW_OFFSET));
804 return movlWithPatch(regLow, low);
806 default:
807 MOZ_CRASH("unexpected operand kind");
810 CodeOffset movlWithPatchHigh(Register regHigh, const Operand& dest) {
811 switch (dest.kind()) {
812 case Operand::MEM_REG_DISP: {
813 return movlWithPatch(regHigh, HighWord(dest));
815 case Operand::MEM_ADDRESS32: {
816 Operand high(PatchedAbsoluteAddress(uint32_t(dest.address()) +
817 INT64HIGH_OFFSET));
818 return movlWithPatch(regHigh, high);
820 default:
821 MOZ_CRASH("unexpected operand kind");
824 CodeOffset vmovdWithPatch(FloatRegister src, const Operand& dest) {
825 MOZ_ASSERT(HasSSE2());
826 switch (dest.kind()) {
827 case Operand::MEM_REG_DISP:
828 masm.vmovd_rm_disp32(src.encoding(), dest.disp(), dest.base());
829 break;
830 case Operand::MEM_ADDRESS32:
831 masm.vmovd_rm(src.encoding(), dest.address());
832 break;
833 default:
834 MOZ_CRASH("unexpected operand kind");
836 return CodeOffset(masm.currentOffset());
838 CodeOffset vmovqWithPatch(FloatRegister src, const Operand& dest) {
839 MOZ_ASSERT(HasSSE2());
840 switch (dest.kind()) {
841 case Operand::MEM_REG_DISP:
842 masm.vmovq_rm_disp32(src.encoding(), dest.disp(), dest.base());
843 break;
844 case Operand::MEM_ADDRESS32:
845 masm.vmovq_rm(src.encoding(), dest.address());
846 break;
847 default:
848 MOZ_CRASH("unexpected operand kind");
850 return CodeOffset(masm.currentOffset());
852 CodeOffset vmovssWithPatch(FloatRegister src, const Operand& dest) {
853 MOZ_ASSERT(HasSSE2());
854 switch (dest.kind()) {
855 case Operand::MEM_REG_DISP:
856 masm.vmovss_rm_disp32(src.encoding(), dest.disp(), dest.base());
857 break;
858 case Operand::MEM_ADDRESS32:
859 masm.vmovss_rm(src.encoding(), dest.address());
860 break;
861 default:
862 MOZ_CRASH("unexpected operand kind");
864 return CodeOffset(masm.currentOffset());
866 void vmovss(FloatRegister src, const Operand& dest) {
867 MOZ_ASSERT(HasSSE2());
868 switch (dest.kind()) {
869 case Operand::MEM_REG_DISP:
870 masm.vmovss_rm_disp32(src.encoding(), dest.disp(), dest.base());
871 break;
872 case Operand::MEM_ADDRESS32:
873 masm.vmovss_rm(src.encoding(), dest.address());
874 break;
875 case Operand::MEM_SCALE:
876 masm.vmovss_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
877 dest.scale());
878 break;
879 default:
880 MOZ_CRASH("unexpected operand kind");
883 CodeOffset vmovsdWithPatch(FloatRegister src, const Operand& dest) {
884 MOZ_ASSERT(HasSSE2());
885 switch (dest.kind()) {
886 case Operand::MEM_REG_DISP:
887 masm.vmovsd_rm_disp32(src.encoding(), dest.disp(), dest.base());
888 break;
889 case Operand::MEM_ADDRESS32:
890 masm.vmovsd_rm(src.encoding(), dest.address());
891 break;
892 default:
893 MOZ_CRASH("unexpected operand kind");
895 return CodeOffset(masm.currentOffset());
897 void vmovsd(FloatRegister src, const Operand& dest) {
898 MOZ_ASSERT(HasSSE2());
899 switch (dest.kind()) {
900 case Operand::MEM_REG_DISP:
901 masm.vmovsd_rm_disp32(src.encoding(), dest.disp(), dest.base());
902 break;
903 case Operand::MEM_ADDRESS32:
904 masm.vmovsd_rm(src.encoding(), dest.address());
905 break;
906 case Operand::MEM_SCALE:
907 masm.vmovsd_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
908 dest.scale());
909 break;
910 default:
911 MOZ_CRASH("unexpected operand kind");
914 CodeOffset vmovupsWithPatch(FloatRegister src, const Operand& dest) {
915 MOZ_ASSERT(HasSSE2());
916 switch (dest.kind()) {
917 case Operand::MEM_REG_DISP:
918 masm.vmovups_rm_disp32(src.encoding(), dest.disp(), dest.base());
919 break;
920 case Operand::MEM_ADDRESS32:
921 masm.vmovups_rm(src.encoding(), dest.address());
922 break;
923 default:
924 MOZ_CRASH("unexpected operand kind");
926 return CodeOffset(masm.currentOffset());
928 CodeOffset vmovdquWithPatch(FloatRegister src, const Operand& dest) {
929 MOZ_ASSERT(HasSSE2());
930 switch (dest.kind()) {
931 case Operand::MEM_REG_DISP:
932 masm.vmovdqu_rm_disp32(src.encoding(), dest.disp(), dest.base());
933 break;
934 case Operand::MEM_ADDRESS32:
935 masm.vmovdqu_rm(src.encoding(), dest.address());
936 break;
937 default:
938 MOZ_CRASH("unexpected operand kind");
940 return CodeOffset(masm.currentOffset());
943 // Load from *(addr + index*scale) where addr can be patched.
944 CodeOffset movlWithPatch(PatchedAbsoluteAddress addr, Register index,
945 Scale scale, Register dest) {
946 masm.movl_mr(addr.addr, index.encoding(), scale, dest.encoding());
947 return CodeOffset(masm.currentOffset());
950 // Load from *src where src can be patched.
951 CodeOffset movsblWithPatch(PatchedAbsoluteAddress src, Register dest) {
952 masm.movsbl_mr(src.addr, dest.encoding());
953 return CodeOffset(masm.currentOffset());
955 CodeOffset movzblWithPatch(PatchedAbsoluteAddress src, Register dest) {
956 masm.movzbl_mr(src.addr, dest.encoding());
957 return CodeOffset(masm.currentOffset());
959 CodeOffset movswlWithPatch(PatchedAbsoluteAddress src, Register dest) {
960 masm.movswl_mr(src.addr, dest.encoding());
961 return CodeOffset(masm.currentOffset());
963 CodeOffset movzwlWithPatch(PatchedAbsoluteAddress src, Register dest) {
964 masm.movzwl_mr(src.addr, dest.encoding());
965 return CodeOffset(masm.currentOffset());
967 CodeOffset movlWithPatch(PatchedAbsoluteAddress src, Register dest) {
968 masm.movl_mr(src.addr, dest.encoding());
969 return CodeOffset(masm.currentOffset());
971 CodeOffset vmovssWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
972 MOZ_ASSERT(HasSSE2());
973 masm.vmovss_mr(src.addr, dest.encoding());
974 return CodeOffset(masm.currentOffset());
976 CodeOffset vmovdWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
977 MOZ_ASSERT(HasSSE2());
978 masm.vmovd_mr(src.addr, dest.encoding());
979 return CodeOffset(masm.currentOffset());
981 CodeOffset vmovqWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
982 MOZ_ASSERT(HasSSE2());
983 masm.vmovq_mr(src.addr, dest.encoding());
984 return CodeOffset(masm.currentOffset());
986 CodeOffset vmovsdWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
987 MOZ_ASSERT(HasSSE2());
988 masm.vmovsd_mr(src.addr, dest.encoding());
989 return CodeOffset(masm.currentOffset());
991 CodeOffset vmovdqaWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
992 MOZ_ASSERT(HasSSE2());
993 masm.vmovdqa_mr(src.addr, dest.encoding());
994 return CodeOffset(masm.currentOffset());
996 CodeOffset vmovdquWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
997 MOZ_ASSERT(HasSSE2());
998 masm.vmovdqu_mr(src.addr, dest.encoding());
999 return CodeOffset(masm.currentOffset());
1001 CodeOffset vmovapsWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
1002 MOZ_ASSERT(HasSSE2());
1003 masm.vmovaps_mr(src.addr, dest.encoding());
1004 return CodeOffset(masm.currentOffset());
1006 CodeOffset vmovupsWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
1007 MOZ_ASSERT(HasSSE2());
1008 masm.vmovups_mr(src.addr, dest.encoding());
1009 return CodeOffset(masm.currentOffset());
1012 // Store to *dest where dest can be patched.
1013 CodeOffset movbWithPatch(Register src, PatchedAbsoluteAddress dest) {
1014 masm.movb_rm(src.encoding(), dest.addr);
1015 return CodeOffset(masm.currentOffset());
1017 CodeOffset movwWithPatch(Register src, PatchedAbsoluteAddress dest) {
1018 masm.movw_rm(src.encoding(), dest.addr);
1019 return CodeOffset(masm.currentOffset());
1021 CodeOffset movlWithPatch(Register src, PatchedAbsoluteAddress dest) {
1022 masm.movl_rm(src.encoding(), dest.addr);
1023 return CodeOffset(masm.currentOffset());
1025 CodeOffset vmovssWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
1026 MOZ_ASSERT(HasSSE2());
1027 masm.vmovss_rm(src.encoding(), dest.addr);
1028 return CodeOffset(masm.currentOffset());
1030 CodeOffset vmovdWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
1031 MOZ_ASSERT(HasSSE2());
1032 masm.vmovd_rm(src.encoding(), dest.addr);
1033 return CodeOffset(masm.currentOffset());
1035 CodeOffset vmovqWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
1036 MOZ_ASSERT(HasSSE2());
1037 masm.vmovq_rm(src.encoding(), dest.addr);
1038 return CodeOffset(masm.currentOffset());
1040 CodeOffset vmovsdWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
1041 MOZ_ASSERT(HasSSE2());
1042 masm.vmovsd_rm(src.encoding(), dest.addr);
1043 return CodeOffset(masm.currentOffset());
1045 CodeOffset vmovdqaWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
1046 MOZ_ASSERT(HasSSE2());
1047 masm.vmovdqa_rm(src.encoding(), dest.addr);
1048 return CodeOffset(masm.currentOffset());
1050 CodeOffset vmovapsWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
1051 MOZ_ASSERT(HasSSE2());
1052 masm.vmovaps_rm(src.encoding(), dest.addr);
1053 return CodeOffset(masm.currentOffset());
1055 CodeOffset vmovdquWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
1056 MOZ_ASSERT(HasSSE2());
1057 masm.vmovdqu_rm(src.encoding(), dest.addr);
1058 return CodeOffset(masm.currentOffset());
1060 CodeOffset vmovupsWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
1061 MOZ_ASSERT(HasSSE2());
1062 masm.vmovups_rm(src.encoding(), dest.addr);
1063 return CodeOffset(masm.currentOffset());
1067 // Get a register in which we plan to put a quantity that will be used as an
1068 // integer argument. This differs from GetIntArgReg in that if we have no more
1069 // actual argument registers to use we will fall back on using whatever
1070 // CallTempReg* don't overlap the argument registers, and only fail once those
1071 // run out too.
1072 static inline bool GetTempRegForIntArg(uint32_t usedIntArgs,
1073 uint32_t usedFloatArgs, Register* out) {
1074 if (usedIntArgs >= NumCallTempNonArgRegs) {
1075 return false;
1077 *out = CallTempNonArgRegs[usedIntArgs];
1078 return true;
1081 } // namespace jit
1082 } // namespace js
1084 #endif /* jit_x86_Assembler_x86_h */