target-arm: Split AArch64 cases out of ats_write()
[qemu/ar7.git] / disas / libvixl / a64 / instructions-a64.cc
blob1f08c781eb5bc5d2b7c22797b5e173c528dfbcea
1 // Copyright 2013, ARM Limited
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 // * Redistributions of source code must retain the above copyright notice,
8 // this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above copyright notice,
10 // this list of conditions and the following disclaimer in the documentation
11 // and/or other materials provided with the distribution.
12 // * Neither the name of ARM Limited nor the names of its contributors may be
13 // used to endorse or promote products derived from this software without
14 // specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "a64/instructions-a64.h"
28 #include "a64/assembler-a64.h"
30 namespace vixl {
33 static uint64_t RotateRight(uint64_t value,
34 unsigned int rotate,
35 unsigned int width) {
36 VIXL_ASSERT(width <= 64);
37 rotate &= 63;
38 return ((value & ((UINT64_C(1) << rotate) - 1)) <<
39 (width - rotate)) | (value >> rotate);
43 static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
44 uint64_t value,
45 unsigned width) {
46 VIXL_ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
47 (width == 32));
48 VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
49 uint64_t result = value & ((UINT64_C(1) << width) - 1);
50 for (unsigned i = width; i < reg_size; i *= 2) {
51 result |= (result << i);
53 return result;
57 // Logical immediates can't encode zero, so a return value of zero is used to
58 // indicate a failure case. Specifically, where the constraints on imm_s are
59 // not met.
60 uint64_t Instruction::ImmLogical() const {
61 unsigned reg_size = SixtyFourBits() ? kXRegSize : kWRegSize;
62 int64_t n = BitN();
63 int64_t imm_s = ImmSetBits();
64 int64_t imm_r = ImmRotate();
66 // An integer is constructed from the n, imm_s and imm_r bits according to
67 // the following table:
69 // N imms immr size S R
70 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
71 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
72 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
73 // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
74 // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
75 // 0 11110s xxxxxr 2 UInt(s) UInt(r)
76 // (s bits must not be all set)
78 // A pattern is constructed of size bits, where the least significant S+1
79 // bits are set. The pattern is rotated right by R, and repeated across a
80 // 32 or 64-bit value, depending on destination register width.
83 if (n == 1) {
84 if (imm_s == 0x3F) {
85 return 0;
87 uint64_t bits = (UINT64_C(1) << (imm_s + 1)) - 1;
88 return RotateRight(bits, imm_r, 64);
89 } else {
90 if ((imm_s >> 1) == 0x1F) {
91 return 0;
93 for (int width = 0x20; width >= 0x2; width >>= 1) {
94 if ((imm_s & width) == 0) {
95 int mask = width - 1;
96 if ((imm_s & mask) == mask) {
97 return 0;
99 uint64_t bits = (UINT64_C(1) << ((imm_s & mask) + 1)) - 1;
100 return RepeatBitsAcrossReg(reg_size,
101 RotateRight(bits, imm_r & mask, width),
102 width);
106 VIXL_UNREACHABLE();
107 return 0;
111 float Instruction::ImmFP32() const {
112 // ImmFP: abcdefgh (8 bits)
113 // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
114 // where B is b ^ 1
115 uint32_t bits = ImmFP();
116 uint32_t bit7 = (bits >> 7) & 0x1;
117 uint32_t bit6 = (bits >> 6) & 0x1;
118 uint32_t bit5_to_0 = bits & 0x3f;
119 uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
121 return rawbits_to_float(result);
125 double Instruction::ImmFP64() const {
126 // ImmFP: abcdefgh (8 bits)
127 // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
128 // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
129 // where B is b ^ 1
130 uint32_t bits = ImmFP();
131 uint64_t bit7 = (bits >> 7) & 0x1;
132 uint64_t bit6 = (bits >> 6) & 0x1;
133 uint64_t bit5_to_0 = bits & 0x3f;
134 uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
136 return rawbits_to_double(result);
140 LSDataSize CalcLSPairDataSize(LoadStorePairOp op) {
141 switch (op) {
142 case STP_x:
143 case LDP_x:
144 case STP_d:
145 case LDP_d: return LSDoubleWord;
146 default: return LSWord;
151 const Instruction* Instruction::ImmPCOffsetTarget() const {
152 const Instruction * base = this;
153 ptrdiff_t offset;
154 if (IsPCRelAddressing()) {
155 // ADR and ADRP.
156 offset = ImmPCRel();
157 if (Mask(PCRelAddressingMask) == ADRP) {
158 base = AlignDown(base, kPageSize);
159 offset *= kPageSize;
160 } else {
161 VIXL_ASSERT(Mask(PCRelAddressingMask) == ADR);
163 } else {
164 // All PC-relative branches.
165 VIXL_ASSERT(BranchType() != UnknownBranchType);
166 // Relative branch offsets are instruction-size-aligned.
167 offset = ImmBranch() << kInstructionSizeLog2;
169 return base + offset;
173 inline int Instruction::ImmBranch() const {
174 switch (BranchType()) {
175 case CondBranchType: return ImmCondBranch();
176 case UncondBranchType: return ImmUncondBranch();
177 case CompareBranchType: return ImmCmpBranch();
178 case TestBranchType: return ImmTestBranch();
179 default: VIXL_UNREACHABLE();
181 return 0;
185 void Instruction::SetImmPCOffsetTarget(const Instruction* target) {
186 if (IsPCRelAddressing()) {
187 SetPCRelImmTarget(target);
188 } else {
189 SetBranchImmTarget(target);
194 void Instruction::SetPCRelImmTarget(const Instruction* target) {
195 int32_t imm21;
196 if ((Mask(PCRelAddressingMask) == ADR)) {
197 imm21 = target - this;
198 } else {
199 VIXL_ASSERT(Mask(PCRelAddressingMask) == ADRP);
200 uintptr_t this_page = reinterpret_cast<uintptr_t>(this) / kPageSize;
201 uintptr_t target_page = reinterpret_cast<uintptr_t>(target) / kPageSize;
202 imm21 = target_page - this_page;
204 Instr imm = Assembler::ImmPCRelAddress(imm21);
206 SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
210 void Instruction::SetBranchImmTarget(const Instruction* target) {
211 VIXL_ASSERT(((target - this) & 3) == 0);
212 Instr branch_imm = 0;
213 uint32_t imm_mask = 0;
214 int offset = (target - this) >> kInstructionSizeLog2;
215 switch (BranchType()) {
216 case CondBranchType: {
217 branch_imm = Assembler::ImmCondBranch(offset);
218 imm_mask = ImmCondBranch_mask;
219 break;
221 case UncondBranchType: {
222 branch_imm = Assembler::ImmUncondBranch(offset);
223 imm_mask = ImmUncondBranch_mask;
224 break;
226 case CompareBranchType: {
227 branch_imm = Assembler::ImmCmpBranch(offset);
228 imm_mask = ImmCmpBranch_mask;
229 break;
231 case TestBranchType: {
232 branch_imm = Assembler::ImmTestBranch(offset);
233 imm_mask = ImmTestBranch_mask;
234 break;
236 default: VIXL_UNREACHABLE();
238 SetInstructionBits(Mask(~imm_mask) | branch_imm);
242 void Instruction::SetImmLLiteral(const Instruction* source) {
243 VIXL_ASSERT(IsWordAligned(source));
244 ptrdiff_t offset = (source - this) >> kLiteralEntrySizeLog2;
245 Instr imm = Assembler::ImmLLiteral(offset);
246 Instr mask = ImmLLiteral_mask;
248 SetInstructionBits(Mask(~mask) | imm);
250 } // namespace vixl