1 // Copyright 2013, ARM Limited
2 // All rights reserved.
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
7 // * Redistributions of source code must retain the above copyright notice,
8 // this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above copyright notice,
10 // this list of conditions and the following disclaimer in the documentation
11 // and/or other materials provided with the distribution.
12 // * Neither the name of ARM Limited nor the names of its contributors may be
13 // used to endorse or promote products derived from this software without
14 // specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "a64/instructions-a64.h"
28 #include "a64/assembler-a64.h"
33 // Floating-point infinity values.
34 const float kFP32PositiveInfinity
= rawbits_to_float(0x7f800000);
35 const float kFP32NegativeInfinity
= rawbits_to_float(0xff800000);
36 const double kFP64PositiveInfinity
=
37 rawbits_to_double(UINT64_C(0x7ff0000000000000));
38 const double kFP64NegativeInfinity
=
39 rawbits_to_double(UINT64_C(0xfff0000000000000));
42 // The default NaN values (for FPCR.DN=1).
43 const double kFP64DefaultNaN
= rawbits_to_double(UINT64_C(0x7ff8000000000000));
44 const float kFP32DefaultNaN
= rawbits_to_float(0x7fc00000);
47 static uint64_t RotateRight(uint64_t value
,
50 VIXL_ASSERT(width
<= 64);
52 return ((value
& ((UINT64_C(1) << rotate
) - 1)) <<
53 (width
- rotate
)) | (value
>> rotate
);
57 static uint64_t RepeatBitsAcrossReg(unsigned reg_size
,
60 VIXL_ASSERT((width
== 2) || (width
== 4) || (width
== 8) || (width
== 16) ||
62 VIXL_ASSERT((reg_size
== kWRegSize
) || (reg_size
== kXRegSize
));
63 uint64_t result
= value
& ((UINT64_C(1) << width
) - 1);
64 for (unsigned i
= width
; i
< reg_size
; i
*= 2) {
65 result
|= (result
<< i
);
71 bool Instruction::IsLoad() const {
72 if (Mask(LoadStoreAnyFMask
) != LoadStoreAnyFixed
) {
76 if (Mask(LoadStorePairAnyFMask
) == LoadStorePairAnyFixed
) {
77 return Mask(LoadStorePairLBit
) != 0;
79 LoadStoreOp op
= static_cast<LoadStoreOp
>(Mask(LoadStoreOpMask
));
91 case LDR_d
: return true;
92 default: return false;
98 bool Instruction::IsStore() const {
99 if (Mask(LoadStoreAnyFMask
) != LoadStoreAnyFixed
) {
103 if (Mask(LoadStorePairAnyFMask
) == LoadStorePairAnyFixed
) {
104 return Mask(LoadStorePairLBit
) == 0;
106 LoadStoreOp op
= static_cast<LoadStoreOp
>(Mask(LoadStoreOpMask
));
113 case STR_d
: return true;
114 default: return false;
120 // Logical immediates can't encode zero, so a return value of zero is used to
121 // indicate a failure case. Specifically, where the constraints on imm_s are
123 uint64_t Instruction::ImmLogical() const {
124 unsigned reg_size
= SixtyFourBits() ? kXRegSize
: kWRegSize
;
126 int64_t imm_s
= ImmSetBits();
127 int64_t imm_r
= ImmRotate();
129 // An integer is constructed from the n, imm_s and imm_r bits according to
130 // the following table:
132 // N imms immr size S R
133 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
134 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
135 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
136 // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
137 // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
138 // 0 11110s xxxxxr 2 UInt(s) UInt(r)
139 // (s bits must not be all set)
141 // A pattern is constructed of size bits, where the least significant S+1
142 // bits are set. The pattern is rotated right by R, and repeated across a
143 // 32 or 64-bit value, depending on destination register width.
150 uint64_t bits
= (UINT64_C(1) << (imm_s
+ 1)) - 1;
151 return RotateRight(bits
, imm_r
, 64);
153 if ((imm_s
>> 1) == 0x1F) {
156 for (int width
= 0x20; width
>= 0x2; width
>>= 1) {
157 if ((imm_s
& width
) == 0) {
158 int mask
= width
- 1;
159 if ((imm_s
& mask
) == mask
) {
162 uint64_t bits
= (UINT64_C(1) << ((imm_s
& mask
) + 1)) - 1;
163 return RepeatBitsAcrossReg(reg_size
,
164 RotateRight(bits
, imm_r
& mask
, width
),
174 float Instruction::ImmFP32() const {
175 // ImmFP: abcdefgh (8 bits)
176 // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
178 uint32_t bits
= ImmFP();
179 uint32_t bit7
= (bits
>> 7) & 0x1;
180 uint32_t bit6
= (bits
>> 6) & 0x1;
181 uint32_t bit5_to_0
= bits
& 0x3f;
182 uint32_t result
= (bit7
<< 31) | ((32 - bit6
) << 25) | (bit5_to_0
<< 19);
184 return rawbits_to_float(result
);
188 double Instruction::ImmFP64() const {
189 // ImmFP: abcdefgh (8 bits)
190 // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
191 // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
193 uint32_t bits
= ImmFP();
194 uint64_t bit7
= (bits
>> 7) & 0x1;
195 uint64_t bit6
= (bits
>> 6) & 0x1;
196 uint64_t bit5_to_0
= bits
& 0x3f;
197 uint64_t result
= (bit7
<< 63) | ((256 - bit6
) << 54) | (bit5_to_0
<< 48);
199 return rawbits_to_double(result
);
203 LSDataSize
CalcLSPairDataSize(LoadStorePairOp op
) {
208 case LDP_d
: return LSDoubleWord
;
209 default: return LSWord
;
214 const Instruction
* Instruction::ImmPCOffsetTarget() const {
215 const Instruction
* base
= this;
217 if (IsPCRelAddressing()) {
220 if (Mask(PCRelAddressingMask
) == ADRP
) {
221 base
= AlignDown(base
, kPageSize
);
224 VIXL_ASSERT(Mask(PCRelAddressingMask
) == ADR
);
227 // All PC-relative branches.
228 VIXL_ASSERT(BranchType() != UnknownBranchType
);
229 // Relative branch offsets are instruction-size-aligned.
230 offset
= ImmBranch() << kInstructionSizeLog2
;
232 return base
+ offset
;
236 inline int Instruction::ImmBranch() const {
237 switch (BranchType()) {
238 case CondBranchType
: return ImmCondBranch();
239 case UncondBranchType
: return ImmUncondBranch();
240 case CompareBranchType
: return ImmCmpBranch();
241 case TestBranchType
: return ImmTestBranch();
242 default: VIXL_UNREACHABLE();
248 void Instruction::SetImmPCOffsetTarget(const Instruction
* target
) {
249 if (IsPCRelAddressing()) {
250 SetPCRelImmTarget(target
);
252 SetBranchImmTarget(target
);
257 void Instruction::SetPCRelImmTarget(const Instruction
* target
) {
259 if ((Mask(PCRelAddressingMask
) == ADR
)) {
260 imm21
= target
- this;
262 VIXL_ASSERT(Mask(PCRelAddressingMask
) == ADRP
);
263 uintptr_t this_page
= reinterpret_cast<uintptr_t>(this) / kPageSize
;
264 uintptr_t target_page
= reinterpret_cast<uintptr_t>(target
) / kPageSize
;
265 imm21
= target_page
- this_page
;
267 Instr imm
= Assembler::ImmPCRelAddress(imm21
);
269 SetInstructionBits(Mask(~ImmPCRel_mask
) | imm
);
273 void Instruction::SetBranchImmTarget(const Instruction
* target
) {
274 VIXL_ASSERT(((target
- this) & 3) == 0);
275 Instr branch_imm
= 0;
276 uint32_t imm_mask
= 0;
277 int offset
= (target
- this) >> kInstructionSizeLog2
;
278 switch (BranchType()) {
279 case CondBranchType
: {
280 branch_imm
= Assembler::ImmCondBranch(offset
);
281 imm_mask
= ImmCondBranch_mask
;
284 case UncondBranchType
: {
285 branch_imm
= Assembler::ImmUncondBranch(offset
);
286 imm_mask
= ImmUncondBranch_mask
;
289 case CompareBranchType
: {
290 branch_imm
= Assembler::ImmCmpBranch(offset
);
291 imm_mask
= ImmCmpBranch_mask
;
294 case TestBranchType
: {
295 branch_imm
= Assembler::ImmTestBranch(offset
);
296 imm_mask
= ImmTestBranch_mask
;
299 default: VIXL_UNREACHABLE();
301 SetInstructionBits(Mask(~imm_mask
) | branch_imm
);
305 void Instruction::SetImmLLiteral(const Instruction
* source
) {
306 VIXL_ASSERT(IsWordAligned(source
));
307 ptrdiff_t offset
= (source
- this) >> kLiteralEntrySizeLog2
;
308 Instr imm
= Assembler::ImmLLiteral(offset
);
309 Instr mask
= ImmLLiteral_mask
;
311 SetInstructionBits(Mask(~mask
) | imm
);