Change the year on all our generated files
[hiphop-php.git] / hphp / vixl / a64 / assembler-a64.cc
blobf74ef9d90cc85e1035d42399069b3468cde3067e
1 // Copyright 2013, ARM Limited
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 // * Redistributions of source code must retain the above copyright notice,
8 // this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above copyright notice,
10 // this list of conditions and the following disclaimer in the documentation
11 // and/or other materials provided with the distribution.
12 // * Neither the name of ARM Limited nor the names of its contributors may be
13 // used to endorse or promote products derived from this software without
14 // specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "hphp/vixl/a64/assembler-a64.h"
30 #include <cmath>
32 namespace vixl {
34 // CPURegList utilities.
35 CPURegister CPURegList::PopLowestIndex() {
36 if (IsEmpty()) {
37 return NoCPUReg;
39 int index = CountTrailingZeros(list_, kRegListSizeInBits);
40 assert((1 << index) & list_);
41 Remove(index);
42 return CPURegister(index, size_, type_);
46 CPURegister CPURegList::PopHighestIndex() {
47 assert(IsValid());
48 if (IsEmpty()) {
49 return NoCPUReg;
51 int index = CountLeadingZeros(list_, kRegListSizeInBits);
52 index = kRegListSizeInBits - 1 - index;
53 assert((1 << index) & list_);
54 Remove(index);
55 return CPURegister(index, size_, type_);
59 bool CPURegList::IsValid() const {
60 if ((type_ == CPURegister::kRegister) ||
61 (type_ == CPURegister::kFPRegister)) {
62 bool is_valid = true;
63 // Try to create a CPURegister for each element in the list.
64 for (int i = 0; i < kRegListSizeInBits; i++) {
65 if (((list_ >> i) & 1) != 0) {
66 is_valid &= CPURegister(i, size_, type_).IsValid();
69 return is_valid;
70 } else {
71 return false;
76 void CPURegList::RemoveCalleeSaved() {
77 if (type() == CPURegister::kRegister) {
78 Remove(GetCalleeSaved(RegisterSizeInBits()));
79 } else if (type() == CPURegister::kFPRegister) {
80 Remove(GetCalleeSavedFP(RegisterSizeInBits()));
85 CPURegList CPURegList::GetCalleeSaved(unsigned size) {
86 return CPURegList(CPURegister::kRegister, size, 19, 29);
90 CPURegList CPURegList::GetCalleeSavedFP(unsigned size) {
91 return CPURegList(CPURegister::kFPRegister, size, 8, 15);
95 CPURegList CPURegList::GetCallerSaved(unsigned size) {
96 // Registers x0-x18 and lr (x30) are caller-saved.
97 CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
98 list.Combine(lr);
99 return list;
103 CPURegList CPURegList::GetCallerSavedFP(unsigned size) {
104 // Registers d0-d7 and d16-d31 are caller-saved.
105 CPURegList list = CPURegList(CPURegister::kFPRegister, size, 0, 7);
106 list.Combine(CPURegList(CPURegister::kFPRegister, size, 16, 31));
107 return list;
111 const CPURegList kCalleeSaved = CPURegList::GetCalleeSaved();
112 const CPURegList kCalleeSavedFP = CPURegList::GetCalleeSavedFP();
113 const CPURegList kCallerSaved = CPURegList::GetCallerSaved();
114 const CPURegList kCallerSavedFP = CPURegList::GetCallerSavedFP();
117 // Registers.
118 #define WREG(n) w##n,
119 const Register Register::wregisters[] = {
120 REGISTER_CODE_LIST(WREG)
122 #undef WREG
124 #define XREG(n) x##n,
125 const Register Register::xregisters[] = {
126 REGISTER_CODE_LIST(XREG)
128 #undef XREG
130 #define SREG(n) s##n,
131 const FPRegister FPRegister::sregisters[] = {
132 REGISTER_CODE_LIST(SREG)
134 #undef SREG
136 #define DREG(n) d##n,
137 const FPRegister FPRegister::dregisters[] = {
138 REGISTER_CODE_LIST(DREG)
140 #undef DREG
143 MemOperand Register::operator[](const ptrdiff_t offset) const {
144 return MemOperand { *this, offset };
147 MemOperand Register::operator[](const Register& offset) const {
148 return MemOperand { *this, offset };
152 const Register& Register::WRegFromCode(unsigned code) {
153 // This function returns the zero register when code = 31. The stack pointer
154 // can not be returned.
155 assert(code < kNumberOfRegisters);
156 return wregisters[code];
160 const Register& Register::XRegFromCode(unsigned code) {
161 // This function returns the zero register when code = 31. The stack pointer
162 // can not be returned.
163 assert(code < kNumberOfRegisters);
164 return xregisters[code];
168 const FPRegister& FPRegister::SRegFromCode(unsigned code) {
169 assert(code < kNumberOfFPRegisters);
170 return sregisters[code];
174 const FPRegister& FPRegister::DRegFromCode(unsigned code) {
175 assert(code < kNumberOfFPRegisters);
176 return dregisters[code];
180 const Register& CPURegister::W() const {
181 assert(IsValidRegister());
182 assert(Is64Bits());
183 return Register::WRegFromCode(code_);
187 const Register& CPURegister::X() const {
188 assert(IsValidRegister());
189 assert(Is32Bits());
190 return Register::XRegFromCode(code_);
194 const FPRegister& CPURegister::S() const {
195 assert(IsValidFPRegister());
196 assert(Is64Bits());
197 return FPRegister::SRegFromCode(code_);
201 const FPRegister& CPURegister::D() const {
202 assert(IsValidFPRegister());
203 assert(Is32Bits());
204 return FPRegister::DRegFromCode(code_);
208 // Operand.
209 Operand::Operand(int64_t immediate)
210 : immediate_(immediate),
211 reg_(NoReg),
212 shift_(NO_SHIFT),
213 extend_(NO_EXTEND),
214 shift_amount_(0) {}
217 Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
218 : reg_(reg),
219 shift_(shift),
220 extend_(NO_EXTEND),
221 shift_amount_(shift_amount) {
222 assert(reg.Is64Bits() || (shift_amount < kWRegSize));
223 assert(reg.Is32Bits() || (shift_amount < kXRegSize));
224 assert(!reg.IsSP());
228 Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
229 : reg_(reg),
230 shift_(NO_SHIFT),
231 extend_(extend),
232 shift_amount_(shift_amount) {
233 assert(reg.IsValid());
234 assert(shift_amount <= 4);
235 assert(!reg.IsSP());
239 bool Operand::IsImmediate() const {
240 return reg_.Is(NoReg);
244 bool Operand::IsShiftedRegister() const {
245 return reg_.IsValid() && (shift_ != NO_SHIFT);
249 bool Operand::IsExtendedRegister() const {
250 return reg_.IsValid() && (extend_ != NO_EXTEND);
254 Operand Operand::ToExtendedRegister() const {
255 assert(IsShiftedRegister());
256 assert((shift_ == LSL) && (shift_amount_ <= 4));
257 return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
261 // MemOperand
262 MemOperand::MemOperand(Register base, ptrdiff_t offset, AddrMode addrmode)
263 : base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode) {
264 assert(base.Is64Bits() && !base.IsZero());
268 MemOperand::MemOperand(Register base,
269 Register regoffset,
270 Extend extend,
271 unsigned shift_amount)
272 : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
273 shift_(NO_SHIFT), extend_(extend), shift_amount_(shift_amount) {
274 assert(base.Is64Bits() && !base.IsZero());
275 assert(!regoffset.IsSP());
276 assert((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
280 MemOperand::MemOperand(Register base,
281 Register regoffset,
282 Shift shift,
283 unsigned shift_amount)
284 : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
285 shift_(shift), extend_(NO_EXTEND), shift_amount_(shift_amount) {
286 assert(base.Is64Bits() && !base.IsZero());
287 assert(!regoffset.IsSP());
288 assert(shift == LSL);
292 MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
293 : base_(base), regoffset_(NoReg), addrmode_(addrmode) {
294 assert(base.Is64Bits() && !base.IsZero());
296 if (offset.IsImmediate()) {
297 offset_ = offset.immediate();
298 } else if (offset.IsShiftedRegister()) {
299 assert(addrmode == Offset);
301 regoffset_ = offset.reg();
302 shift_= offset.shift();
303 shift_amount_ = offset.shift_amount();
305 extend_ = NO_EXTEND;
306 offset_ = 0;
308 // These assertions match those in the shifted-register constructor.
309 assert(!regoffset_.IsSP());
310 assert(shift_ == LSL);
311 } else {
312 assert(offset.IsExtendedRegister());
313 assert(addrmode == Offset);
315 regoffset_ = offset.reg();
316 extend_ = offset.extend();
317 shift_amount_ = offset.shift_amount();
319 shift_= NO_SHIFT;
320 offset_ = 0;
322 // These assertions match those in the extended-register constructor.
323 assert(!regoffset_.IsSP());
324 assert((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
329 bool MemOperand::IsImmediateOffset() const {
330 return (addrmode_ == Offset) && regoffset_.Is(NoReg);
334 bool MemOperand::IsRegisterOffset() const {
335 return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
339 bool MemOperand::IsPreIndex() const {
340 return addrmode_ == PreIndex;
344 bool MemOperand::IsPostIndex() const {
345 return addrmode_ == PostIndex;
349 // Assembler
350 Assembler::Assembler(HPHP::CodeBlock& cb)
351 : cb_(cb), literal_pool_monitor_(0) {
352 // Assert that this is an LP64 system.
353 assert(sizeof(int) == sizeof(int32_t)); // NOLINT(runtime/sizeof)
354 assert(sizeof(long) == sizeof(int64_t)); // NOLINT(runtime/int)
355 assert(sizeof(void *) == sizeof(int64_t)); // NOLINT(runtime/sizeof)
356 assert(sizeof(1) == sizeof(int32_t)); // NOLINT(runtime/sizeof)
357 assert(sizeof(1L) == sizeof(int64_t)); // NOLINT(runtime/sizeof)
361 Assembler::~Assembler() {
362 FinalizeCode();
363 assert(finalized_ || (cb_.used() == 0));
364 assert(literals_.empty());
368 void Assembler::Reset() {
369 #ifdef DEBUG
370 assert(literal_pool_monitor_ == 0);
371 cb_.zero();
372 finalized_ = false;
373 #endif
374 cb_.clear();
375 literals_.clear();
376 next_literal_pool_check_ = cb_.frontier() + kLiteralPoolCheckInterval;
380 void Assembler::FinalizeCode() {
381 if (!literals_.empty()) {
382 EmitLiteralPool();
384 #ifdef DEBUG
385 finalized_ = true;
386 #endif
390 void Assembler::bind(Label* label) {
391 label->is_bound_ = true;
392 label->target_ = cb_.frontier();
393 while (label->IsLinked()) {
394 // Get the address of the following instruction in the chain.
395 auto const link = Instruction::Cast(label->link_);
396 auto const actual_link = Instruction::Cast(cb_.toDestAddress(label->link_));
397 auto const next_link = actual_link->ImmPCOffsetTarget(link);
398 // Update the instruction target.
399 actual_link->SetImmPCOffsetTarget(Instruction::Cast(label->target_), link);
400 // Update the label's link.
401 // If the offset of the branch we just updated was 0 (kEndOfChain) we are
402 // done.
403 label->link_ = (link != next_link
404 ? reinterpret_cast<HPHP::CodeAddress>(next_link)
405 : nullptr);
410 int Assembler::UpdateAndGetByteOffsetTo(Label* label) {
411 int offset;
412 if (label->IsBound()) {
413 offset = label->target() - cb_.frontier();
414 } else if (label->IsLinked()) {
415 offset = label->link() - cb_.frontier();
416 } else {
417 offset = Label::kEndOfChain;
419 label->set_link(cb_.frontier());
420 return offset;
424 // Code generation.
425 void Assembler::br(const Register& xn) {
426 assert(xn.Is64Bits());
427 Emit(BR | Rn(xn));
431 void Assembler::blr(const Register& xn) {
432 assert(xn.Is64Bits());
433 Emit(BLR | Rn(xn));
437 void Assembler::ret(const Register& xn) {
438 assert(xn.Is64Bits());
439 Emit(RET | Rn(xn));
443 void Assembler::b(int imm26) {
444 Emit(B | ImmUncondBranch(imm26));
448 void Assembler::b(int imm19, Condition cond) {
449 Emit(B_cond | ImmCondBranch(imm19) | cond);
453 void Assembler::b(Label* label) {
454 b(UpdateAndGetInstructionOffsetTo(label));
458 void Assembler::b(Label* label, Condition cond) {
459 b(UpdateAndGetInstructionOffsetTo(label), cond);
463 void Assembler::bl(int imm26) {
464 Emit(BL | ImmUncondBranch(imm26));
468 void Assembler::bl(Label* label) {
469 bl(UpdateAndGetInstructionOffsetTo(label));
473 void Assembler::cbz(const Register& rt,
474 int imm19) {
475 Emit(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
479 void Assembler::cbz(const Register& rt,
480 Label* label) {
481 cbz(rt, UpdateAndGetInstructionOffsetTo(label));
485 void Assembler::cbnz(const Register& rt,
486 int imm19) {
487 Emit(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
491 void Assembler::cbnz(const Register& rt,
492 Label* label) {
493 cbnz(rt, UpdateAndGetInstructionOffsetTo(label));
497 void Assembler::tbz(const Register& rt,
498 unsigned bit_pos,
499 int imm14) {
500 assert(rt.Is64Bits());
501 Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
505 void Assembler::tbz(const Register& rt,
506 unsigned bit_pos,
507 Label* label) {
508 tbz(rt, bit_pos, UpdateAndGetInstructionOffsetTo(label));
512 void Assembler::tbnz(const Register& rt,
513 unsigned bit_pos,
514 int imm14) {
515 assert(rt.Is64Bits());
516 Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
520 void Assembler::tbnz(const Register& rt,
521 unsigned bit_pos,
522 Label* label) {
523 tbnz(rt, bit_pos, UpdateAndGetInstructionOffsetTo(label));
527 void Assembler::adr(const Register& rd, int imm21) {
528 assert(rd.Is64Bits());
529 Emit(ADR | ImmPCRelAddress(imm21) | Rd(rd));
533 void Assembler::adr(const Register& rd, Label* label) {
534 adr(rd, UpdateAndGetByteOffsetTo(label));
538 void Assembler::adrp(const Register& rd, int imm21) {
539 assert(rd.Is64Bits());
540 Emit(ADRP | ImmPCRelAddress(imm21) | Rd(rd));
544 void Assembler::adrp(const Register& rd, Label* label) {
545 adrp(rd, UpdateAndGetByteOffsetTo(label));
549 void Assembler::add(const Register& rd,
550 const Register& rn,
551 const Operand& operand,
552 FlagsUpdate S) {
553 AddSub(rd, rn, operand, S, ADD);
557 void Assembler::cmn(const Register& rn,
558 const Operand& operand) {
559 Register zr = AppropriateZeroRegFor(rn);
560 add(zr, rn, operand, SetFlags);
564 void Assembler::sub(const Register& rd,
565 const Register& rn,
566 const Operand& operand,
567 FlagsUpdate S) {
568 AddSub(rd, rn, operand, S, SUB);
572 void Assembler::cmp(const Register& rn, const Operand& operand) {
573 Register zr = AppropriateZeroRegFor(rn);
574 sub(zr, rn, operand, SetFlags);
578 void Assembler::neg(const Register& rd, const Operand& operand, FlagsUpdate S) {
579 Register zr = AppropriateZeroRegFor(rd);
580 sub(rd, zr, operand, S);
584 void Assembler::adc(const Register& rd,
585 const Register& rn,
586 const Operand& operand,
587 FlagsUpdate S) {
588 AddSubWithCarry(rd, rn, operand, S, ADC);
592 void Assembler::sbc(const Register& rd,
593 const Register& rn,
594 const Operand& operand,
595 FlagsUpdate S) {
596 AddSubWithCarry(rd, rn, operand, S, SBC);
600 void Assembler::ngc(const Register& rd, const Operand& operand, FlagsUpdate S) {
601 Register zr = AppropriateZeroRegFor(rd);
602 sbc(rd, zr, operand, S);
606 // Logical instructions.
607 void Assembler::and_(const Register& rd,
608 const Register& rn,
609 const Operand& operand,
610 FlagsUpdate S) {
611 Logical(rd, rn, operand, (S == SetFlags) ? ANDS : AND);
615 void Assembler::tst(const Register& rn,
616 const Operand& operand) {
617 and_(AppropriateZeroRegFor(rn), rn, operand, SetFlags);
621 void Assembler::bic(const Register& rd,
622 const Register& rn,
623 const Operand& operand,
624 FlagsUpdate S) {
625 Logical(rd, rn, operand, (S == SetFlags) ? BICS : BIC);
629 void Assembler::orr(const Register& rd,
630 const Register& rn,
631 const Operand& operand) {
632 Logical(rd, rn, operand, ORR);
636 void Assembler::orn(const Register& rd,
637 const Register& rn,
638 const Operand& operand) {
639 Logical(rd, rn, operand, ORN);
643 void Assembler::eor(const Register& rd,
644 const Register& rn,
645 const Operand& operand) {
646 Logical(rd, rn, operand, EOR);
650 void Assembler::eon(const Register& rd,
651 const Register& rn,
652 const Operand& operand) {
653 Logical(rd, rn, operand, EON);
657 void Assembler::lslv(const Register& rd,
658 const Register& rn,
659 const Register& rm) {
660 assert(rd.size() == rn.size());
661 assert(rd.size() == rm.size());
662 Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd));
666 void Assembler::lsrv(const Register& rd,
667 const Register& rn,
668 const Register& rm) {
669 assert(rd.size() == rn.size());
670 assert(rd.size() == rm.size());
671 Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd));
675 void Assembler::asrv(const Register& rd,
676 const Register& rn,
677 const Register& rm) {
678 assert(rd.size() == rn.size());
679 assert(rd.size() == rm.size());
680 Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd));
684 void Assembler::rorv(const Register& rd,
685 const Register& rn,
686 const Register& rm) {
687 assert(rd.size() == rn.size());
688 assert(rd.size() == rm.size());
689 Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd));
693 // Bitfield operations.
694 void Assembler::bfm(const Register& rd,
695 const Register& rn,
696 unsigned immr,
697 unsigned imms) {
698 assert(rd.size() == rn.size());
699 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
700 Emit(SF(rd) | BFM | N |
701 ImmR(immr, rd.size()) | ImmS(imms, rd.size()) | Rn(rn) | Rd(rd));
705 void Assembler::sbfm(const Register& rd,
706 const Register& rn,
707 unsigned immr,
708 unsigned imms) {
709 assert(rd.size() == rn.size());
710 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
711 Emit(SF(rd) | SBFM | N |
712 ImmR(immr, rd.size()) | ImmS(imms, rd.size()) | Rn(rn) | Rd(rd));
716 void Assembler::ubfm(const Register& rd,
717 const Register& rn,
718 unsigned immr,
719 unsigned imms) {
720 assert(rd.size() == rn.size());
721 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
722 Emit(SF(rd) | UBFM | N |
723 ImmR(immr, rd.size()) | ImmS(imms, rd.size()) | Rn(rn) | Rd(rd));
727 void Assembler::extr(const Register& rd,
728 const Register& rn,
729 const Register& rm,
730 unsigned lsb) {
731 assert(rd.size() == rn.size());
732 assert(rd.size() == rm.size());
733 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
734 Emit(SF(rd) | EXTR | N | Rm(rm) | ImmS(lsb, rd.size()) | Rn(rn) | Rd(rd));
738 void Assembler::csel(const Register& rd,
739 const Register& rn,
740 const Register& rm,
741 Condition cond) {
742 ConditionalSelect(rd, rn, rm, cond, CSEL);
746 void Assembler::csinc(const Register& rd,
747 const Register& rn,
748 const Register& rm,
749 Condition cond) {
750 ConditionalSelect(rd, rn, rm, cond, CSINC);
754 void Assembler::csinv(const Register& rd,
755 const Register& rn,
756 const Register& rm,
757 Condition cond) {
758 ConditionalSelect(rd, rn, rm, cond, CSINV);
762 void Assembler::csneg(const Register& rd,
763 const Register& rn,
764 const Register& rm,
765 Condition cond) {
766 ConditionalSelect(rd, rn, rm, cond, CSNEG);
770 void Assembler::cset(const Register &rd, Condition cond) {
771 assert((cond != al) && (cond != nv));
772 Register zr = AppropriateZeroRegFor(rd);
773 csinc(rd, zr, zr, InvertCondition(cond));
777 void Assembler::csetm(const Register &rd, Condition cond) {
778 assert((cond != al) && (cond != nv));
779 Register zr = AppropriateZeroRegFor(rd);
780 csinv(rd, zr, zr, InvertCondition(cond));
784 void Assembler::cinc(const Register &rd, const Register &rn, Condition cond) {
785 assert((cond != al) && (cond != nv));
786 csinc(rd, rn, rn, InvertCondition(cond));
790 void Assembler::cinv(const Register &rd, const Register &rn, Condition cond) {
791 assert((cond != al) && (cond != nv));
792 csinv(rd, rn, rn, InvertCondition(cond));
796 void Assembler::cneg(const Register &rd, const Register &rn, Condition cond) {
797 assert((cond != al) && (cond != nv));
798 csneg(rd, rn, rn, InvertCondition(cond));
802 void Assembler::ConditionalSelect(const Register& rd,
803 const Register& rn,
804 const Register& rm,
805 Condition cond,
806 ConditionalSelectOp op) {
807 assert(rd.size() == rn.size());
808 assert(rd.size() == rm.size());
809 Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd));
813 void Assembler::ccmn(const Register& rn,
814 const Operand& operand,
815 StatusFlags nzcv,
816 Condition cond) {
817 ConditionalCompare(rn, operand, nzcv, cond, CCMN);
821 void Assembler::ccmp(const Register& rn,
822 const Operand& operand,
823 StatusFlags nzcv,
824 Condition cond) {
825 ConditionalCompare(rn, operand, nzcv, cond, CCMP);
829 void Assembler::DataProcessing3Source(const Register& rd,
830 const Register& rn,
831 const Register& rm,
832 const Register& ra,
833 DataProcessing3SourceOp op) {
834 Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd));
838 void Assembler::mul(const Register& rd,
839 const Register& rn,
840 const Register& rm) {
841 assert(AreSameSizeAndType(rd, rn, rm));
842 DataProcessing3Source(rd, rn, rm, AppropriateZeroRegFor(rd), MADD);
846 void Assembler::madd(const Register& rd,
847 const Register& rn,
848 const Register& rm,
849 const Register& ra) {
850 DataProcessing3Source(rd, rn, rm, ra, MADD);
854 void Assembler::mneg(const Register& rd,
855 const Register& rn,
856 const Register& rm) {
857 assert(AreSameSizeAndType(rd, rn, rm));
858 DataProcessing3Source(rd, rn, rm, AppropriateZeroRegFor(rd), MSUB);
862 void Assembler::msub(const Register& rd,
863 const Register& rn,
864 const Register& rm,
865 const Register& ra) {
866 DataProcessing3Source(rd, rn, rm, ra, MSUB);
870 void Assembler::umaddl(const Register& rd,
871 const Register& rn,
872 const Register& rm,
873 const Register& ra) {
874 assert(rd.Is64Bits() && ra.Is64Bits());
875 assert(rn.Is32Bits() && rm.Is32Bits());
876 DataProcessing3Source(rd, rn, rm, ra, UMADDL_x);
880 void Assembler::smaddl(const Register& rd,
881 const Register& rn,
882 const Register& rm,
883 const Register& ra) {
884 assert(rd.Is64Bits() && ra.Is64Bits());
885 assert(rn.Is32Bits() && rm.Is32Bits());
886 DataProcessing3Source(rd, rn, rm, ra, SMADDL_x);
890 void Assembler::umsubl(const Register& rd,
891 const Register& rn,
892 const Register& rm,
893 const Register& ra) {
894 assert(rd.Is64Bits() && ra.Is64Bits());
895 assert(rn.Is32Bits() && rm.Is32Bits());
896 DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x);
900 void Assembler::smsubl(const Register& rd,
901 const Register& rn,
902 const Register& rm,
903 const Register& ra) {
904 assert(rd.Is64Bits() && ra.Is64Bits());
905 assert(rn.Is32Bits() && rm.Is32Bits());
906 DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x);
910 void Assembler::smull(const Register& rd,
911 const Register& rn,
912 const Register& rm) {
913 assert(rd.Is64Bits());
914 assert(rn.Is32Bits() && rm.Is32Bits());
915 DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x);
919 void Assembler::sdiv(const Register& rd,
920 const Register& rn,
921 const Register& rm) {
922 assert(rd.size() == rn.size());
923 assert(rd.size() == rm.size());
924 Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd));
928 void Assembler::smulh(const Register& xd,
929 const Register& xn,
930 const Register& xm) {
931 assert(xd.Is64Bits() && xn.Is64Bits() && xm.Is64Bits());
932 DataProcessing3Source(xd, xn, xm, xzr, SMULH_x);
935 void Assembler::udiv(const Register& rd,
936 const Register& rn,
937 const Register& rm) {
938 assert(rd.size() == rn.size());
939 assert(rd.size() == rm.size());
940 Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd));
944 void Assembler::rbit(const Register& rd,
945 const Register& rn) {
946 DataProcessing1Source(rd, rn, RBIT);
950 void Assembler::rev16(const Register& rd,
951 const Register& rn) {
952 DataProcessing1Source(rd, rn, REV16);
956 void Assembler::rev32(const Register& rd,
957 const Register& rn) {
958 assert(rd.Is64Bits());
959 DataProcessing1Source(rd, rn, REV);
963 void Assembler::rev(const Register& rd,
964 const Register& rn) {
965 DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w);
969 void Assembler::clz(const Register& rd,
970 const Register& rn) {
971 DataProcessing1Source(rd, rn, CLZ);
975 void Assembler::cls(const Register& rd,
976 const Register& rn) {
977 DataProcessing1Source(rd, rn, CLS);
981 void Assembler::ldp(const CPURegister& rt,
982 const CPURegister& rt2,
983 const MemOperand& src) {
984 LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2));
988 void Assembler::stp(const CPURegister& rt,
989 const CPURegister& rt2,
990 const MemOperand& dst) {
991 LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2));
995 void Assembler::ldpsw(const Register& rt,
996 const Register& rt2,
997 const MemOperand& src) {
998 assert(rt.Is64Bits());
999 LoadStorePair(rt, rt2, src, LDPSW_x);
1003 void Assembler::LoadStorePair(const CPURegister& rt,
1004 const CPURegister& rt2,
1005 const MemOperand& addr,
1006 LoadStorePairOp op) {
1007 // 'rt' and 'rt2' can only be aliased for stores.
1008 assert(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
1009 assert(AreSameSizeAndType(rt, rt2));
1011 Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
1012 ImmLSPair(addr.offset(), CalcLSPairDataSize(op));
1014 Instr addrmodeop;
1015 if (addr.IsImmediateOffset()) {
1016 addrmodeop = LoadStorePairOffsetFixed;
1017 } else {
1018 assert(addr.offset() != 0);
1019 if (addr.IsPreIndex()) {
1020 addrmodeop = LoadStorePairPreIndexFixed;
1021 } else {
1022 assert(addr.IsPostIndex());
1023 addrmodeop = LoadStorePairPostIndexFixed;
1026 Emit(addrmodeop | memop);
1030 void Assembler::ldnp(const CPURegister& rt,
1031 const CPURegister& rt2,
1032 const MemOperand& src) {
1033 LoadStorePairNonTemporal(rt, rt2, src,
1034 LoadPairNonTemporalOpFor(rt, rt2));
1038 void Assembler::stnp(const CPURegister& rt,
1039 const CPURegister& rt2,
1040 const MemOperand& dst) {
1041 LoadStorePairNonTemporal(rt, rt2, dst,
1042 StorePairNonTemporalOpFor(rt, rt2));
1046 void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
1047 const CPURegister& rt2,
1048 const MemOperand& addr,
1049 LoadStorePairNonTemporalOp op) {
1050 assert(!rt.Is(rt2));
1051 assert(AreSameSizeAndType(rt, rt2));
1052 assert(addr.IsImmediateOffset());
1054 LSDataSize size = CalcLSPairDataSize(
1055 static_cast<LoadStorePairOp>(op & LoadStorePairMask));
1056 Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
1057 ImmLSPair(addr.offset(), size));
1061 // Memory instructions.
1062 void Assembler::ldrb(const Register& rt, const MemOperand& src) {
1063 LoadStore(rt, src, LDRB_w);
1067 void Assembler::strb(const Register& rt, const MemOperand& dst) {
1068 LoadStore(rt, dst, STRB_w);
1072 void Assembler::ldrsb(const Register& rt, const MemOperand& src) {
1073 LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w);
1077 void Assembler::ldrh(const Register& rt, const MemOperand& src) {
1078 LoadStore(rt, src, LDRH_w);
1082 void Assembler::strh(const Register& rt, const MemOperand& dst) {
1083 LoadStore(rt, dst, STRH_w);
1087 void Assembler::ldrsh(const Register& rt, const MemOperand& src) {
1088 LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w);
1092 void Assembler::ldr(const CPURegister& rt, const MemOperand& src) {
1093 LoadStore(rt, src, LoadOpFor(rt));
1097 void Assembler::str(const CPURegister& rt, const MemOperand& src) {
1098 LoadStore(rt, src, StoreOpFor(rt));
1102 void Assembler::ldr(const Register& rt, Label* label) {
1103 if (rt.Is64Bits()) {
1104 Emit(LDR_x_lit
1105 | ImmLLiteral(UpdateAndGetInstructionOffsetTo(label))
1106 | Rt(rt));
1107 } else {
1108 Emit(LDR_w_lit
1109 | ImmLLiteral(UpdateAndGetInstructionOffsetTo(label))
1110 | Rt(rt));
1115 void Assembler::ldrsw(const Register& rt, const MemOperand& src) {
1116 assert(rt.Is64Bits());
1117 LoadStore(rt, src, LDRSW_x);
1121 void Assembler::ldr(const Register& rt, uint64_t imm) {
1122 LoadLiteral(rt, imm, rt.Is64Bits() ? LDR_x_lit : LDR_w_lit);
1126 void Assembler::ldr(const FPRegister& ft, double imm) {
1127 uint64_t rawbits = 0;
1128 LoadLiteralOp op;
1130 if (ft.Is64Bits()) {
1131 rawbits = double_to_rawbits(imm);
1132 op = LDR_d_lit;
1133 } else {
1134 assert(ft.Is32Bits());
1135 float float_imm = static_cast<float>(imm);
1136 rawbits = float_to_rawbits(float_imm);
1137 op = LDR_s_lit;
1140 LoadLiteral(ft, rawbits, op);
1144 void Assembler::ldaddal(const Register& rs, const Register& rt, const MemOperand& src) {
1145 assert(src.IsImmediateOffset() && (src.offset() == 0));
1146 // aquire/release semantics
1147 uint32_t op = rt.Is64Bits() ? LSELD_ADD_alx : LSELD_ADD_alw;
1148 Emit(op | Rs(rs) | Rt(rt) | RnSP(src.base()));
1152 void Assembler::ldxr(const Register& rt, const MemOperand& src) {
1153 assert(src.IsImmediateOffset() && (src.offset() == 0));
1154 LoadStoreExclusive op = rt.Is64Bits() ? LDXR_x : LDXR_w;
1155 Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
1159 void Assembler::stxr(const Register& rs,
1160 const Register& rt,
1161 const MemOperand& dst) {
1162 assert(dst.IsImmediateOffset() && (dst.offset() == 0));
1163 LoadStoreExclusive op = rt.Is64Bits() ? STXR_x : STXR_w;
1164 Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base()));
1168 void Assembler::ld1(const VRegister& vt,
1169 const MemOperand& src) {
1170 LoadStoreStruct(vt, src, NEON_LD1_1v);
1174 void Assembler::st1(const VRegister& vt,
1175 const MemOperand& src) {
1176 LoadStoreStruct(vt, src, NEON_ST1_1v);
1180 void Assembler::mov(const Register& rd, const Register& rm) {
1181 // Moves involving the stack pointer are encoded as add immediate with
1182 // second operand of zero. Otherwise, orr with first operand zr is
1183 // used.
1184 if (rd.IsSP() || rm.IsSP()) {
1185 add(rd, rm, 0);
1186 } else {
1187 orr(rd, AppropriateZeroRegFor(rd), rm);
1192 void Assembler::mvn(const Register& rd, const Operand& operand) {
1193 orn(rd, AppropriateZeroRegFor(rd), operand);
1197 void Assembler::mrs(const Register& rt, SystemRegister sysreg) {
1198 assert(rt.Is64Bits());
1199 Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt));
1203 void Assembler::msr(SystemRegister sysreg, const Register& rt) {
1204 assert(rt.Is64Bits());
1205 Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg));
1209 void Assembler::hint(SystemHint code) {
1210 Emit(HINT | ImmHint(code) | Rt(xzr));
1214 void Assembler::fmov(FPRegister fd, double imm) {
1215 if (fd.Is64Bits() && IsImmFP64(imm)) {
1216 Emit(FMOV_d_imm | Rd(fd) | ImmFP64(imm));
1217 } else if (fd.Is32Bits() && IsImmFP32(imm)) {
1218 Emit(FMOV_s_imm | Rd(fd) | ImmFP32(static_cast<float>(imm)));
1219 } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
1220 Register zr = AppropriateZeroRegFor(fd);
1221 fmov(fd, zr);
1222 } else {
1223 ldr(fd, imm);
1228 void Assembler::fmov(Register rd, FPRegister fn) {
1229 assert(rd.size() == fn.size());
1230 FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd;
1231 Emit(op | Rd(rd) | Rn(fn));
1235 void Assembler::fmov(FPRegister fd, Register rn) {
1236 assert(fd.size() == rn.size());
1237 FPIntegerConvertOp op = fd.Is32Bits() ? FMOV_sw : FMOV_dx;
1238 Emit(op | Rd(fd) | Rn(rn));
1242 void Assembler::fmov(FPRegister fd, FPRegister fn) {
1243 assert(fd.size() == fn.size());
1244 Emit(FPType(fd) | FMOV | Rd(fd) | Rn(fn));
1248 void Assembler::fmov(const FPRegister& fd, int index, const Register& rn) {
1249 assert(index == 1);
1250 USE(index);
1251 Emit(FMOV_d1_x | Rd(fd) | Rn(rn));
1255 void Assembler::fmov(const Register& rd, const FPRegister& fn, int index) {
1256 assert(index == 1);
1257 USE(index);
1258 Emit(FMOV_x_d1 | Rd(rd) | Rn(fn));
1262 void Assembler::fadd(const FPRegister& fd,
1263 const FPRegister& fn,
1264 const FPRegister& fm) {
1265 FPDataProcessing2Source(fd, fn, fm, FADD);
1269 void Assembler::fsub(const FPRegister& fd,
1270 const FPRegister& fn,
1271 const FPRegister& fm) {
1272 FPDataProcessing2Source(fd, fn, fm, FSUB);
1276 void Assembler::fmul(const FPRegister& fd,
1277 const FPRegister& fn,
1278 const FPRegister& fm) {
1279 FPDataProcessing2Source(fd, fn, fm, FMUL);
1283 void Assembler::fmsub(const FPRegister& fd,
1284 const FPRegister& fn,
1285 const FPRegister& fm,
1286 const FPRegister& fa) {
1287 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMSUB_s : FMSUB_d);
1291 void Assembler::fdiv(const FPRegister& fd,
1292 const FPRegister& fn,
1293 const FPRegister& fm) {
1294 FPDataProcessing2Source(fd, fn, fm, FDIV);
1298 void Assembler::fmax(const FPRegister& fd,
1299 const FPRegister& fn,
1300 const FPRegister& fm) {
1301 FPDataProcessing2Source(fd, fn, fm, FMAX);
1305 void Assembler::fmin(const FPRegister& fd,
1306 const FPRegister& fn,
1307 const FPRegister& fm) {
1308 FPDataProcessing2Source(fd, fn, fm, FMIN);
1312 void Assembler::fabs(const FPRegister& fd,
1313 const FPRegister& fn) {
1314 assert(fd.SizeInBits() == fn.SizeInBits());
1315 FPDataProcessing1Source(fd, fn, FABS);
1319 void Assembler::fneg(const FPRegister& fd,
1320 const FPRegister& fn) {
1321 assert(fd.SizeInBits() == fn.SizeInBits());
1322 FPDataProcessing1Source(fd, fn, FNEG);
1326 void Assembler::fsqrt(const FPRegister& fd,
1327 const FPRegister& fn) {
1328 assert(fd.SizeInBits() == fn.SizeInBits());
1329 FPDataProcessing1Source(fd, fn, FSQRT);
1333 void Assembler::frintn(const FPRegister& fd,
1334 const FPRegister& fn) {
1335 assert(fd.SizeInBits() == fn.SizeInBits());
1336 FPDataProcessing1Source(fd, fn, FRINTN);
1340 void Assembler::frintm(const FPRegister& fd,
1341 const FPRegister& fn) {
1342 assert(fd.SizeInBits() == fn.SizeInBits());
1343 FPDataProcessing1Source(fd, fn, FRINTM);
1347 void Assembler::frintp(const FPRegister& fd,
1348 const FPRegister& fn) {
1349 assert(fd.SizeInBits() == fn.SizeInBits());
1350 FPDataProcessing1Source(fd, fn, FRINTP);
1354 void Assembler::frintz(const FPRegister& fd,
1355 const FPRegister& fn) {
1356 assert(fd.SizeInBits() == fn.SizeInBits());
1357 FPDataProcessing1Source(fd, fn, FRINTZ);
1361 void Assembler::fcmp(const FPRegister& fn,
1362 const FPRegister& fm) {
1363 assert(fn.size() == fm.size());
1364 Emit(FPType(fn) | FCMP | Rm(fm) | Rn(fn));
1368 void Assembler::fcmp(const FPRegister& fn,
1369 double value) {
1370 USE(value);
1371 // Although the fcmp instruction can strictly only take an immediate value of
1372 // +0.0, we don't need to check for -0.0 because the sign of 0.0 doesn't
1373 // affect the result of the comparison.
1374 assert(value == 0.0);
1375 Emit(FPType(fn) | FCMP_zero | Rn(fn));
1379 void Assembler::fccmp(const FPRegister& fn,
1380 const FPRegister& fm,
1381 StatusFlags nzcv,
1382 Condition cond) {
1383 assert(fn.size() == fm.size());
1384 Emit(FPType(fn) | FCCMP | Rm(fm) | Cond(cond) | Rn(fn) | Nzcv(nzcv));
1388 void Assembler::fcsel(const FPRegister& fd,
1389 const FPRegister& fn,
1390 const FPRegister& fm,
1391 Condition cond) {
1392 assert(fd.size() == fn.size());
1393 assert(fd.size() == fm.size());
1394 Emit(FPType(fd) | FCSEL | Rm(fm) | Cond(cond) | Rn(fn) | Rd(fd));
1398 void Assembler::FPConvertToInt(const Register& rd,
1399 const FPRegister& fn,
1400 FPIntegerConvertOp op) {
1401 Emit(SF(rd) | FPType(fn) | op | Rn(fn) | Rd(rd));
1405 void Assembler::fcvt(const FPRegister& fd,
1406 const FPRegister& fn) {
1407 if (fd.Is64Bits()) {
1408 // Convert float to double.
1409 assert(fn.Is32Bits());
1410 FPDataProcessing1Source(fd, fn, FCVT_ds);
1411 } else {
1412 // Convert double to float.
1413 assert(fn.Is64Bits());
1414 FPDataProcessing1Source(fd, fn, FCVT_sd);
1419 void Assembler::fcvtmu(const Register& rd, const FPRegister& fn) {
1420 FPConvertToInt(rd, fn, FCVTMU);
1424 void Assembler::fcvtms(const Register& rd, const FPRegister& fn) {
1425 FPConvertToInt(rd, fn, FCVTMS);
1429 void Assembler::fcvtnu(const Register& rd,
1430 const FPRegister& fn) {
1431 FPConvertToInt(rd, fn, FCVTNU);
1435 void Assembler::fcvtns(const Register& rd,
1436 const FPRegister& fn) {
1437 FPConvertToInt(rd, fn, FCVTNS);
1441 void Assembler::fcvtzu(const Register& rd,
1442 const FPRegister& fn) {
1443 FPConvertToInt(rd, fn, FCVTZU);
1447 void Assembler::fcvtzs(const Register& rd,
1448 const FPRegister& fn) {
1449 FPConvertToInt(rd, fn, FCVTZS);
1453 void Assembler::scvtf(const FPRegister& fd,
1454 const Register& rn,
1455 unsigned fbits) {
1456 if (fbits == 0) {
1457 Emit(SF(rn) | FPType(fd) | SCVTF | Rn(rn) | Rd(fd));
1458 } else {
1459 Emit(SF(rn) | FPType(fd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
1460 Rd(fd));
1465 void Assembler::ucvtf(const FPRegister& fd,
1466 const Register& rn,
1467 unsigned fbits) {
1468 if (fbits == 0) {
1469 Emit(SF(rn) | FPType(fd) | UCVTF | Rn(rn) | Rd(fd));
1470 } else {
1471 Emit(SF(rn) | FPType(fd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
1472 Rd(fd));
1477 // Note:
1478 // Below, a difference in case for the same letter indicates a
1479 // negated bit.
1480 // If b is 1, then B is 0.
1481 Instr Assembler::ImmFP32(float imm) {
1482 assert(IsImmFP32(imm));
1483 // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
1484 uint32_t bits = float_to_rawbits(imm);
1485 // bit7: a000.0000
1486 uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
1487 // bit6: 0b00.0000
1488 uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
1489 // bit5_to_0: 00cd.efgh
1490 uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
1492 return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
1496 Instr Assembler::ImmFP64(double imm) {
1497 assert(IsImmFP64(imm));
1498 // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
1499 // 0000.0000.0000.0000.0000.0000.0000.0000
1500 uint64_t bits = double_to_rawbits(imm);
1501 // bit7: a000.0000
1502 uint32_t bit7 = ((bits >> 63) & 0x1) << 7;
1503 // bit6: 0b00.0000
1504 uint32_t bit6 = ((bits >> 61) & 0x1) << 6;
1505 // bit5_to_0: 00cd.efgh
1506 uint32_t bit5_to_0 = (bits >> 48) & 0x3f;
1508 return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
1512 // Code generation helpers.
1513 void Assembler::MoveWide(const Register& rd,
1514 uint64_t imm,
1515 int shift,
1516 MoveWideImmediateOp mov_op) {
1517 if (shift >= 0) {
1518 // Explicit shift specified.
1519 assert((shift == 0) || (shift == 16) || (shift == 32) || (shift == 48));
1520 assert(rd.Is64Bits() || (shift == 0) || (shift == 16));
1521 shift /= 16;
1522 } else {
1523 // Calculate a new immediate and shift combination to encode the immediate
1524 // argument.
1525 shift = 0;
1526 if ((imm & ~0xffffUL) == 0) {
1527 // Nothing to do.
1528 } else if ((imm & ~(0xffffUL << 16)) == 0) {
1529 imm >>= 16;
1530 shift = 1;
1531 } else if ((imm & ~(0xffffUL << 32)) == 0) {
1532 assert(rd.Is64Bits());
1533 imm >>= 32;
1534 shift = 2;
1535 } else if ((imm & ~(0xffffUL << 48)) == 0) {
1536 assert(rd.Is64Bits());
1537 imm >>= 48;
1538 shift = 3;
1542 assert(is_uint16(imm));
1544 Emit(SF(rd) | MoveWideImmediateFixed | mov_op |
1545 Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift));
1549 void Assembler::AddSub(const Register& rd,
1550 const Register& rn,
1551 const Operand& operand,
1552 FlagsUpdate S,
1553 AddSubOp op) {
1554 assert(rd.size() == rn.size());
1555 if (operand.IsImmediate()) {
1556 int64_t immediate = operand.immediate();
1557 assert(IsImmAddSub(immediate));
1558 Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
1559 Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
1560 ImmAddSub(immediate) | dest_reg | RnSP(rn));
1561 } else if (operand.IsShiftedRegister()) {
1562 assert(operand.reg().size() == rd.size());
1563 assert(operand.shift() != ROR);
1565 // For instructions of the form:
1566 // add/sub wsp, <Wn>, <Wm> [, LSL #0-3 ]
1567 // add/sub <Wd>, wsp, <Wm> [, LSL #0-3 ]
1568 // add/sub wsp, wsp, <Wm> [, LSL #0-3 ]
1569 // adds/subs <Wd>, wsp, <Wm> [, LSL #0-3 ]
1570 // or their 64-bit register equivalents, convert the operand from shifted to
1571 // extended register mode, and emit an add/sub extended instruction.
1572 if (rn.IsSP() || rd.IsSP()) {
1573 assert(!(rd.IsSP() && (S == SetFlags)));
1574 DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S,
1575 AddSubExtendedFixed | op);
1576 } else {
1577 DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op);
1579 } else {
1580 assert(operand.IsExtendedRegister());
1581 DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op);
1586 void Assembler::AddSubWithCarry(const Register& rd,
1587 const Register& rn,
1588 const Operand& operand,
1589 FlagsUpdate S,
1590 AddSubWithCarryOp op) {
1591 assert(rd.size() == rn.size());
1592 assert(rd.size() == operand.reg().size());
1593 assert(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
1594 Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
1598 void Assembler::hlt(int code) {
1599 assert(is_uint16(code));
1600 Emit(HLT | ImmException(code));
1604 void Assembler::brk(int code) {
1605 assert(is_uint16(code));
1606 Emit(BRK | ImmException(code));
1610 void Assembler::Logical(const Register& rd,
1611 const Register& rn,
1612 const Operand& operand,
1613 LogicalOp op) {
1614 assert(rd.size() == rn.size());
1615 if (operand.IsImmediate()) {
1616 int64_t immediate = operand.immediate();
1617 unsigned reg_size = rd.size();
1619 assert(immediate != 0);
1620 assert(immediate != -1);
1621 assert(rd.Is64Bits() || is_uint32(immediate));
1623 // If the operation is NOT, invert the operation and immediate.
1624 if ((op & NOT) == NOT) {
1625 op = static_cast<LogicalOp>(op & ~NOT);
1626 immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask);
1629 unsigned n, imm_s, imm_r;
1630 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
1631 // Immediate can be encoded in the instruction.
1632 LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
1633 } else {
1634 // This case is handled in the macro assembler.
1635 not_reached();
1637 } else {
1638 assert(operand.IsShiftedRegister());
1639 assert(operand.reg().size() == rd.size());
1640 Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed);
1641 DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op);
1646 void Assembler::LogicalImmediate(const Register& rd,
1647 const Register& rn,
1648 unsigned n,
1649 unsigned imm_s,
1650 unsigned imm_r,
1651 LogicalOp op) {
1652 unsigned reg_size = rd.size();
1653 Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd);
1654 Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) |
1655 ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg |
1656 Rn(rn));
1660 void Assembler::ConditionalCompare(const Register& rn,
1661 const Operand& operand,
1662 StatusFlags nzcv,
1663 Condition cond,
1664 ConditionalCompareOp op) {
1665 Instr ccmpop;
1666 if (operand.IsImmediate()) {
1667 int64_t immediate = operand.immediate();
1668 assert(IsImmConditionalCompare(immediate));
1669 ccmpop = ConditionalCompareImmediateFixed | op | ImmCondCmp(immediate);
1670 } else {
1671 assert(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
1672 ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg());
1674 Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv));
1678 void Assembler::DataProcessing1Source(const Register& rd,
1679 const Register& rn,
1680 DataProcessing1SourceOp op) {
1681 assert(rd.size() == rn.size());
1682 Emit(SF(rn) | op | Rn(rn) | Rd(rd));
1686 void Assembler::FPDataProcessing1Source(const FPRegister& fd,
1687 const FPRegister& fn,
1688 FPDataProcessing1SourceOp op) {
1689 Emit(FPType(fn) | op | Rn(fn) | Rd(fd));
1693 void Assembler::FPDataProcessing2Source(const FPRegister& fd,
1694 const FPRegister& fn,
1695 const FPRegister& fm,
1696 FPDataProcessing2SourceOp op) {
1697 assert(fd.size() == fn.size());
1698 assert(fd.size() == fm.size());
1699 Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd));
1703 void Assembler::FPDataProcessing3Source(const FPRegister& fd,
1704 const FPRegister& fn,
1705 const FPRegister& fm,
1706 const FPRegister& fa,
1707 FPDataProcessing3SourceOp op) {
1708 assert(AreSameSizeAndType(fd, fn, fm, fa));
1709 Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd) | Ra(fa));
1713 void Assembler::EmitShift(const Register& rd,
1714 const Register& rn,
1715 Shift shift,
1716 unsigned shift_amount) {
1717 switch (shift) {
1718 case LSL:
1719 lsl(rd, rn, shift_amount);
1720 break;
1721 case LSR:
1722 lsr(rd, rn, shift_amount);
1723 break;
1724 case ASR:
1725 asr(rd, rn, shift_amount);
1726 break;
1727 case ROR:
1728 ror(rd, rn, shift_amount);
1729 break;
1730 default:
1731 not_reached();
1736 void Assembler::EmitExtendShift(const Register& rd,
1737 const Register& rn,
1738 Extend extend,
1739 unsigned left_shift) {
1740 assert(rd.size() >= rn.size());
1741 unsigned reg_size = rd.size();
1742 // Use the correct size of register.
1743 Register rn_ = Register(rn.code(), rd.size());
1744 // Bits extracted are high_bit:0.
1745 unsigned high_bit = (8 << (extend & 0x3)) - 1;
1746 // Number of bits left in the result that are not introduced by the shift.
1747 unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1);
1749 if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) {
1750 switch (extend) {
1751 case UXTB:
1752 case UXTH:
1753 case UXTW: ubfm(rd, rn_, non_shift_bits, high_bit); break;
1754 case SXTB:
1755 case SXTH:
1756 case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break;
1757 case UXTX:
1758 case SXTX: {
1759 assert(rn.size() == kXRegSize);
1760 // Nothing to extend. Just shift.
1761 lsl(rd, rn_, left_shift);
1762 break;
1764 default: not_reached();
1766 } else {
1767 // No need to extend as the extended bits would be shifted away.
1768 lsl(rd, rn_, left_shift);
1773 void Assembler::DataProcShiftedRegister(const Register& rd,
1774 const Register& rn,
1775 const Operand& operand,
1776 FlagsUpdate S,
1777 Instr op) {
1778 assert(operand.IsShiftedRegister());
1779 assert(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount())));
1780 Emit(SF(rd) | op | Flags(S) |
1781 ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) |
1782 Rm(operand.reg()) | Rn(rn) | Rd(rd));
1786 void Assembler::DataProcExtendedRegister(const Register& rd,
1787 const Register& rn,
1788 const Operand& operand,
1789 FlagsUpdate S,
1790 Instr op) {
1791 Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
1792 Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) |
1793 ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) |
1794 dest_reg | RnSP(rn));
1798 bool Assembler::IsImmAddSub(int64_t immediate) {
1799 return is_uint12(immediate) ||
1800 (is_uint12(immediate >> 12) && ((immediate & 0xfff) == 0));
1803 void Assembler::LoadStore(const CPURegister& rt,
1804 const MemOperand& addr,
1805 LoadStoreOp op) {
1806 Instr memop = op | Rt(rt) | RnSP(addr.base());
1807 ptrdiff_t offset = addr.offset();
1809 if (addr.IsImmediateOffset()) {
1810 LSDataSize size = CalcLSDataSize(op);
1811 if (IsImmLSScaled(offset, size)) {
1812 // Use the scaled addressing mode.
1813 Emit(LoadStoreUnsignedOffsetFixed | memop |
1814 ImmLSUnsigned(offset >> size));
1815 } else if (IsImmLSUnscaled(offset)) {
1816 // Use the unscaled addressing mode.
1817 Emit(LoadStoreUnscaledOffsetFixed | memop | ImmLS(offset));
1818 } else {
1819 // This case is handled in the macro assembler.
1820 not_reached();
1822 } else if (addr.IsRegisterOffset()) {
1823 Extend ext = addr.extend();
1824 Shift shift = addr.shift();
1825 unsigned shift_amount = addr.shift_amount();
1827 // LSL is encoded in the option field as UXTX.
1828 if (shift == LSL) {
1829 ext = UXTX;
1832 // Shifts are encoded in one bit, indicating a left shift by the memory
1833 // access size.
1834 assert((shift_amount == 0) ||
1835 (shift_amount == static_cast<unsigned>(CalcLSDataSize(op))));
1836 Emit(LoadStoreRegisterOffsetFixed | memop | Rm(addr.regoffset()) |
1837 ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0));
1838 } else {
1839 if (IsImmLSUnscaled(offset)) {
1840 if (addr.IsPreIndex()) {
1841 Emit(LoadStorePreIndexFixed | memop | ImmLS(offset));
1842 } else {
1843 assert(addr.IsPostIndex());
1844 Emit(LoadStorePostIndexFixed | memop | ImmLS(offset));
1846 } else {
1847 // This case is handled in the macro assembler.
1848 not_reached();
1854 void Assembler::LoadStoreStruct(const VRegister& vt,
1855 const MemOperand& addr,
1856 NEONLoadStoreMultiStructOp op) {
1857 USE(vt);
1858 Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt));
1862 // NEON structure loads and stores.
1863 Instr Assembler::LoadStoreStructAddrModeField(const MemOperand& addr) {
1864 Instr addr_field = RnSP(addr.base());
1866 if (addr.IsPostIndex()) {
1867 static_assert(NEONLoadStoreMultiStructPostIndex ==
1868 static_cast<NEONLoadStoreMultiStructPostIndexOp>(
1869 NEONLoadStoreSingleStructPostIndex), "");
1871 addr_field |= NEONLoadStoreMultiStructPostIndex;
1872 if (addr.offset() == 0) {
1873 addr_field |= RmNot31(addr.regoffset());
1874 } else {
1875 // The immediate post index addressing mode is indicated by rm = 31.
1876 // The immediate is implied by the number of vector registers used.
1877 addr_field |= (0x1f << Rm_offset);
1879 } else {
1880 assert(addr.IsImmediateOffset() && (addr.offset() == 0));
1882 return addr_field;
1886 bool Assembler::IsImmLSUnscaled(ptrdiff_t offset) {
1887 return is_int9(offset);
1891 bool Assembler::IsImmLSScaled(ptrdiff_t offset, LSDataSize size) {
1892 bool offset_is_size_multiple = (((offset >> size) << size) == offset);
1893 return offset_is_size_multiple && is_uint12(offset >> size);
1897 void Assembler::LoadLiteral(const CPURegister& rt,
1898 uint64_t imm,
1899 LoadLiteralOp op) {
1900 assert(is_int32(imm) || is_uint32(imm) || (rt.Is64Bits()));
1902 BlockLiteralPoolScope scope(this);
1903 RecordLiteral(imm, rt.SizeInBytes());
1904 Emit(op | ImmLLiteral(0) | Rt(rt));
1908 // Test if a given value can be encoded in the immediate field of a logical
1909 // instruction.
1910 // If it can be encoded, the function returns true, and values pointed to by n,
1911 // imm_s and imm_r are updated with immediates encoded in the format required
1912 // by the corresponding fields in the logical instruction.
1913 // If it can not be encoded, the function returns false, and the values pointed
1914 // to by n, imm_s and imm_r are undefined.
1915 bool Assembler::IsImmLogical(uint64_t value,
1916 unsigned width,
1917 unsigned* n,
1918 unsigned* imm_s,
1919 unsigned* imm_r) {
1920 assert((n != nullptr) && (imm_s != nullptr) && (imm_r != nullptr));
1921 assert((width == kWRegSize) || (width == kXRegSize));
1923 // Logical immediates are encoded using parameters n, imm_s and imm_r using
1924 // the following table:
1926 // N imms immr size S R
1927 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
1928 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
1929 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
1930 // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
1931 // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
1932 // 0 11110s xxxxxr 2 UInt(s) UInt(r)
1933 // (s bits must not be all set)
1935 // A pattern is constructed of size bits, where the least significant S+1
1936 // bits are set. The pattern is rotated right by R, and repeated across a
1937 // 32 or 64-bit value, depending on destination register width.
1939 // To test if an arbitrary immediate can be encoded using this scheme, an
1940 // iterative algorithm is used.
1942 // TODO: This code does not consider using X/W register overlap to support
1943 // 64-bit immediates where the top 32-bits are zero, and the bottom 32-bits
1944 // are an encodable logical immediate.
1946 // 1. If the value has all set or all clear bits, it can't be encoded.
1947 if ((value == 0) || (value == 0xffffffffffffffffUL) ||
1948 ((width == kWRegSize) && (value == 0xffffffff))) {
1949 return false;
1952 unsigned lead_zero = CountLeadingZeros(value, width);
1953 unsigned lead_one = CountLeadingZeros(~value, width);
1954 unsigned trail_zero = CountTrailingZeros(value, width);
1955 unsigned trail_one = CountTrailingZeros(~value, width);
1956 unsigned set_bits = CountSetBits(value, width);
1958 // The fixed bits in the immediate s field.
1959 // If width == 64 (X reg), start at 0xFFFFFF80.
1960 // If width == 32 (W reg), start at 0xFFFFFFC0, as the iteration for 64-bit
1961 // widths won't be executed.
1962 int imm_s_fixed = (width == kXRegSize) ? -128 : -64;
1963 int imm_s_mask = 0x3F;
1965 for (;;) {
1966 // 2. If the value is two bits wide, it can be encoded.
1967 if (width == 2) {
1968 *n = 0;
1969 *imm_s = 0x3C;
1970 *imm_r = (value & 3) - 1;
1971 return true;
1974 *n = (width == 64) ? 1 : 0;
1975 *imm_s = ((imm_s_fixed | (set_bits - 1)) & imm_s_mask);
1976 if ((lead_zero + set_bits) == width) {
1977 *imm_r = 0;
1978 } else {
1979 *imm_r = (lead_zero > 0) ? (width - trail_zero) : lead_one;
1982 // 3. If the sum of leading zeros, trailing zeros and set bits is equal to
1983 // the bit width of the value, it can be encoded.
1984 if (lead_zero + trail_zero + set_bits == width) {
1985 return true;
1988 // 4. If the sum of leading ones, trailing ones and unset bits in the
1989 // value is equal to the bit width of the value, it can be encoded.
1990 if (lead_one + trail_one + (width - set_bits) == width) {
1991 return true;
1994 // 5. If the most-significant half of the bitwise value is equal to the
1995 // least-significant half, return to step 2 using the least-significant
1996 // half of the value.
1997 uint64_t mask = (1UL << (width >> 1)) - 1;
1998 if ((value & mask) == ((value >> (width >> 1)) & mask)) {
1999 width >>= 1;
2000 set_bits >>= 1;
2001 imm_s_fixed >>= 1;
2002 continue;
2005 // 6. Otherwise, the value can't be encoded.
2006 return false;
2010 bool Assembler::IsImmConditionalCompare(int64_t immediate) {
2011 return is_uint5(immediate);
2015 bool Assembler::IsImmFP32(float imm) {
2016 // Valid values will have the form:
2017 // aBbb.bbbc.defg.h000.0000.0000.0000.0000
2018 uint32_t bits = float_to_rawbits(imm);
2019 // bits[19..0] are cleared.
2020 if ((bits & 0x7ffff) != 0) {
2021 return false;
2024 // bits[29..25] are all set or all cleared.
2025 uint32_t b_pattern = (bits >> 16) & 0x3e00;
2026 if (b_pattern != 0 && b_pattern != 0x3e00) {
2027 return false;
2030 // bit[30] and bit[29] are opposite.
2031 if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
2032 return false;
2035 return true;
2039 bool Assembler::IsImmFP64(double imm) {
2040 // Valid values will have the form:
2041 // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
2042 // 0000.0000.0000.0000.0000.0000.0000.0000
2043 uint64_t bits = double_to_rawbits(imm);
2044 // bits[47..0] are cleared.
2045 if ((bits & 0xffffffffffffL) != 0) {
2046 return false;
2049 // bits[61..54] are all set or all cleared.
2050 uint32_t b_pattern = (bits >> 48) & 0x3fc0;
2051 if (b_pattern != 0 && b_pattern != 0x3fc0) {
2052 return false;
2055 // bit[62] and bit[61] are opposite.
2056 if (((bits ^ (bits << 1)) & 0x4000000000000000L) == 0) {
2057 return false;
2060 return true;
2064 LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
2065 assert(rt.IsValid());
2066 if (rt.IsRegister()) {
2067 return rt.Is64Bits() ? LDR_x : LDR_w;
2068 } else {
2069 assert(rt.IsFPRegister());
2070 return rt.Is64Bits() ? LDR_d : LDR_s;
2075 LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
2076 const CPURegister& rt2) {
2077 assert(AreSameSizeAndType(rt, rt2));
2078 USE(rt2);
2079 if (rt.IsRegister()) {
2080 return rt.Is64Bits() ? LDP_x : LDP_w;
2081 } else {
2082 assert(rt.IsFPRegister());
2083 return rt.Is64Bits() ? LDP_d : LDP_s;
2088 LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
2089 assert(rt.IsValid());
2090 if (rt.IsRegister()) {
2091 return rt.Is64Bits() ? STR_x : STR_w;
2092 } else {
2093 assert(rt.IsFPRegister());
2094 return rt.Is64Bits() ? STR_d : STR_s;
2099 LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
2100 const CPURegister& rt2) {
2101 assert(AreSameSizeAndType(rt, rt2));
2102 USE(rt2);
2103 if (rt.IsRegister()) {
2104 return rt.Is64Bits() ? STP_x : STP_w;
2105 } else {
2106 assert(rt.IsFPRegister());
2107 return rt.Is64Bits() ? STP_d : STP_s;
2112 LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor(
2113 const CPURegister& rt, const CPURegister& rt2) {
2114 assert(AreSameSizeAndType(rt, rt2));
2115 USE(rt2);
2116 if (rt.IsRegister()) {
2117 return rt.Is64Bits() ? LDNP_x : LDNP_w;
2118 } else {
2119 assert(rt.IsFPRegister());
2120 return rt.Is64Bits() ? LDNP_d : LDNP_s;
2125 LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor(
2126 const CPURegister& rt, const CPURegister& rt2) {
2127 assert(AreSameSizeAndType(rt, rt2));
2128 USE(rt2);
2129 if (rt.IsRegister()) {
2130 return rt.Is64Bits() ? STNP_x : STNP_w;
2131 } else {
2132 assert(rt.IsFPRegister());
2133 return rt.Is64Bits() ? STNP_d : STNP_s;
2138 void Assembler::RecordLiteral(int64_t imm, unsigned size) {
2139 literals_.push_front(new Literal(cb_.frontier(), imm, size));
2143 // Check if a literal pool should be emitted. Currently a literal is emitted
2144 // when:
2145 // * the distance to the first literal load handled by this pool is greater
2146 // than the recommended distance and the literal pool can be emitted without
2147 // generating a jump over it.
2148 // * the distance to the first literal load handled by this pool is greater
2149 // than twice the recommended distance.
2150 // TODO: refine this heuristic using real world data.
2151 void Assembler::CheckLiteralPool(LiteralPoolEmitOption option) {
2152 if (IsLiteralPoolBlocked()) {
2153 // Literal pool emission is forbidden, no point in doing further checks.
2154 return;
2157 if (literals_.empty()) {
2158 // No literal pool to emit.
2159 next_literal_pool_check_ += kLiteralPoolCheckInterval;
2160 return;
2163 intptr_t distance = cb_.frontier() - literals_.back()->pc_;
2164 if ((distance < kRecommendedLiteralPoolRange) ||
2165 ((option == JumpRequired) &&
2166 (distance < (2 * kRecommendedLiteralPoolRange)))) {
2167 // We prefer not to have to jump over the literal pool.
2168 next_literal_pool_check_ += kLiteralPoolCheckInterval;
2169 return;
2172 EmitLiteralPool(option);
2176 void Assembler::EmitLiteralPool(LiteralPoolEmitOption option) {
2177 // Prevent recursive calls while emitting the literal pool.
2178 BlockLiteralPoolScope scope(this);
2180 Label marker;
2181 Label start_of_pool;
2182 Label end_of_pool;
2184 if (option == JumpRequired) {
2185 b(&end_of_pool);
2188 // Leave space for a literal pool marker. This is populated later, once the
2189 // size of the pool is known.
2190 bind(&marker);
2191 nop();
2193 // Now populate the literal pool.
2194 bind(&start_of_pool);
2195 std::list<Literal*>::iterator it;
2196 for (it = literals_.begin(); it != literals_.end(); it++) {
2197 // Update the load-literal instruction to point to this pool entry.
2198 auto load_literal = Instruction::Cast((*it)->pc_);
2199 load_literal->SetImmLLiteral(Instruction::Cast(cb_.frontier()));
2200 // Copy the data into the pool.
2201 uint64_t value= (*it)->value_;
2202 unsigned size = (*it)->size_;
2203 assert((size == kXRegSizeInBytes) || (size == kWRegSizeInBytes));
2204 assert(cb_.canEmit(size));
2205 cb_.bytes(size, reinterpret_cast<const uint8_t*>(&value));
2206 delete *it;
2208 literals_.clear();
2209 bind(&end_of_pool);
2211 // The pool size should always be a multiple of four bytes because that is the
2212 // scaling applied by the LDR(literal) instruction, even for X-register loads.
2213 assert((SizeOfCodeGeneratedSince(&start_of_pool) % 4) == 0);
2214 uint64_t pool_size = SizeOfCodeGeneratedSince(&start_of_pool) / 4;
2216 // Literal pool marker indicating the size in words of the literal pool.
2217 // We use a literal load to the zero register, the offset indicating the
2218 // size in words. This instruction can encode a large enough offset to span
2219 // the entire pool at its maximum size.
2220 Instr marker_instruction = LDR_x_lit | ImmLLiteral(pool_size) | Rt(xzr);
2221 memcpy(marker.target(), &marker_instruction, kInstructionSize);
2223 next_literal_pool_check_ = cb_.frontier() + kLiteralPoolCheckInterval;
2227 // Return the size in bytes, required by the literal pool entries. This does
2228 // not include any marker or branch over the literal pool itself.
2229 size_t Assembler::LiteralPoolSize() {
2230 size_t size = 0;
2232 std::list<Literal*>::iterator it;
2233 for (it = literals_.begin(); it != literals_.end(); it++) {
2234 size += (*it)->size_;
2237 return size;
2241 bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
2242 const CPURegister& reg3, const CPURegister& reg4,
2243 const CPURegister& reg5, const CPURegister& reg6,
2244 const CPURegister& reg7, const CPURegister& reg8) {
2245 int number_of_valid_regs = 0;
2246 int number_of_valid_fpregs = 0;
2248 RegList unique_regs = 0;
2249 RegList unique_fpregs = 0;
2251 const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8};
2253 for (unsigned i = 0; i < sizeof(regs) / sizeof(regs[0]); i++) {
2254 if (regs[i].IsRegister()) {
2255 number_of_valid_regs++;
2256 unique_regs |= regs[i].Bit();
2257 } else if (regs[i].IsFPRegister()) {
2258 number_of_valid_fpregs++;
2259 unique_fpregs |= regs[i].Bit();
2260 } else {
2261 assert(!regs[i].IsValid());
2265 int number_of_unique_regs =
2266 CountSetBits(unique_regs, sizeof(unique_regs) * 8);
2267 int number_of_unique_fpregs =
2268 CountSetBits(unique_fpregs, sizeof(unique_fpregs) * 8);
2270 assert(number_of_valid_regs >= number_of_unique_regs);
2271 assert(number_of_valid_fpregs >= number_of_unique_fpregs);
2273 return (number_of_valid_regs != number_of_unique_regs) ||
2274 (number_of_valid_fpregs != number_of_unique_fpregs);
2278 bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2,
2279 const CPURegister& reg3, const CPURegister& reg4,
2280 const CPURegister& reg5, const CPURegister& reg6,
2281 const CPURegister& reg7, const CPURegister& reg8) {
2282 assert(reg1.IsValid());
2283 bool match = true;
2284 match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1);
2285 match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1);
2286 match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1);
2287 match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1);
2288 match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1);
2289 match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1);
2290 match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1);
2291 return match;
2295 } // namespace vixl