1 // Copyright 2013, ARM Limited
2 // All rights reserved.
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
7 // * Redistributions of source code must retain the above copyright notice,
8 // this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above copyright notice,
10 // this list of conditions and the following disclaimer in the documentation
11 // and/or other materials provided with the distribution.
12 // * Neither the name of ARM Limited nor the names of its contributors may be
13 // used to endorse or promote products derived from this software without
14 // specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "a64/instructions-a64.h"
28 #include "a64/assembler-a64.h"
33 static uint64_t RotateRight(uint64_t value
,
36 VIXL_ASSERT(width
<= 64);
38 return ((value
& ((UINT64_C(1) << rotate
) - 1)) <<
39 (width
- rotate
)) | (value
>> rotate
);
43 static uint64_t RepeatBitsAcrossReg(unsigned reg_size
,
46 VIXL_ASSERT((width
== 2) || (width
== 4) || (width
== 8) || (width
== 16) ||
48 VIXL_ASSERT((reg_size
== kWRegSize
) || (reg_size
== kXRegSize
));
49 uint64_t result
= value
& ((UINT64_C(1) << width
) - 1);
50 for (unsigned i
= width
; i
< reg_size
; i
*= 2) {
51 result
|= (result
<< i
);
57 // Logical immediates can't encode zero, so a return value of zero is used to
58 // indicate a failure case. Specifically, where the constraints on imm_s are
60 uint64_t Instruction::ImmLogical() {
61 unsigned reg_size
= SixtyFourBits() ? kXRegSize
: kWRegSize
;
63 int64_t imm_s
= ImmSetBits();
64 int64_t imm_r
= ImmRotate();
66 // An integer is constructed from the n, imm_s and imm_r bits according to
67 // the following table:
69 // N imms immr size S R
70 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
71 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
72 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
73 // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
74 // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
75 // 0 11110s xxxxxr 2 UInt(s) UInt(r)
76 // (s bits must not be all set)
78 // A pattern is constructed of size bits, where the least significant S+1
79 // bits are set. The pattern is rotated right by R, and repeated across a
80 // 32 or 64-bit value, depending on destination register width.
87 uint64_t bits
= (UINT64_C(1) << (imm_s
+ 1)) - 1;
88 return RotateRight(bits
, imm_r
, 64);
90 if ((imm_s
>> 1) == 0x1F) {
93 for (int width
= 0x20; width
>= 0x2; width
>>= 1) {
94 if ((imm_s
& width
) == 0) {
96 if ((imm_s
& mask
) == mask
) {
99 uint64_t bits
= (UINT64_C(1) << ((imm_s
& mask
) + 1)) - 1;
100 return RepeatBitsAcrossReg(reg_size
,
101 RotateRight(bits
, imm_r
& mask
, width
),
111 float Instruction::ImmFP32() {
112 // ImmFP: abcdefgh (8 bits)
113 // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
115 uint32_t bits
= ImmFP();
116 uint32_t bit7
= (bits
>> 7) & 0x1;
117 uint32_t bit6
= (bits
>> 6) & 0x1;
118 uint32_t bit5_to_0
= bits
& 0x3f;
119 uint32_t result
= (bit7
<< 31) | ((32 - bit6
) << 25) | (bit5_to_0
<< 19);
121 return rawbits_to_float(result
);
125 double Instruction::ImmFP64() {
126 // ImmFP: abcdefgh (8 bits)
127 // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
128 // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
130 uint32_t bits
= ImmFP();
131 uint64_t bit7
= (bits
>> 7) & 0x1;
132 uint64_t bit6
= (bits
>> 6) & 0x1;
133 uint64_t bit5_to_0
= bits
& 0x3f;
134 uint64_t result
= (bit7
<< 63) | ((256 - bit6
) << 54) | (bit5_to_0
<< 48);
136 return rawbits_to_double(result
);
140 LSDataSize
CalcLSPairDataSize(LoadStorePairOp op
) {
145 case LDP_d
: return LSDoubleWord
;
146 default: return LSWord
;
151 Instruction
* Instruction::ImmPCOffsetTarget() {
152 Instruction
* base
= this;
154 if (IsPCRelAddressing()) {
157 if (Mask(PCRelAddressingMask
) == ADRP
) {
158 base
= AlignDown(base
, kPageSize
);
161 VIXL_ASSERT(Mask(PCRelAddressingMask
) == ADR
);
164 // All PC-relative branches.
165 VIXL_ASSERT(BranchType() != UnknownBranchType
);
166 // Relative branch offsets are instruction-size-aligned.
167 offset
= ImmBranch() << kInstructionSizeLog2
;
169 return base
+ offset
;
173 inline int Instruction::ImmBranch() const {
174 switch (BranchType()) {
175 case CondBranchType
: return ImmCondBranch();
176 case UncondBranchType
: return ImmUncondBranch();
177 case CompareBranchType
: return ImmCmpBranch();
178 case TestBranchType
: return ImmTestBranch();
179 default: VIXL_UNREACHABLE();
185 void Instruction::SetImmPCOffsetTarget(Instruction
* target
) {
186 if (IsPCRelAddressing()) {
187 SetPCRelImmTarget(target
);
189 SetBranchImmTarget(target
);
194 void Instruction::SetPCRelImmTarget(Instruction
* target
) {
196 if ((Mask(PCRelAddressingMask
) == ADR
)) {
197 imm21
= target
- this;
199 VIXL_ASSERT(Mask(PCRelAddressingMask
) == ADRP
);
200 uintptr_t this_page
= reinterpret_cast<uintptr_t>(this) / kPageSize
;
201 uintptr_t target_page
= reinterpret_cast<uintptr_t>(target
) / kPageSize
;
202 imm21
= target_page
- this_page
;
204 Instr imm
= Assembler::ImmPCRelAddress(imm21
);
206 SetInstructionBits(Mask(~ImmPCRel_mask
) | imm
);
210 void Instruction::SetBranchImmTarget(Instruction
* target
) {
211 VIXL_ASSERT(((target
- this) & 3) == 0);
212 Instr branch_imm
= 0;
213 uint32_t imm_mask
= 0;
214 int offset
= (target
- this) >> kInstructionSizeLog2
;
215 switch (BranchType()) {
216 case CondBranchType
: {
217 branch_imm
= Assembler::ImmCondBranch(offset
);
218 imm_mask
= ImmCondBranch_mask
;
221 case UncondBranchType
: {
222 branch_imm
= Assembler::ImmUncondBranch(offset
);
223 imm_mask
= ImmUncondBranch_mask
;
226 case CompareBranchType
: {
227 branch_imm
= Assembler::ImmCmpBranch(offset
);
228 imm_mask
= ImmCmpBranch_mask
;
231 case TestBranchType
: {
232 branch_imm
= Assembler::ImmTestBranch(offset
);
233 imm_mask
= ImmTestBranch_mask
;
236 default: VIXL_UNREACHABLE();
238 SetInstructionBits(Mask(~imm_mask
) | branch_imm
);
242 void Instruction::SetImmLLiteral(Instruction
* source
) {
243 VIXL_ASSERT(((source
- this) & 3) == 0);
244 int offset
= (source
- this) >> kLiteralEntrySizeLog2
;
245 Instr imm
= Assembler::ImmLLiteral(offset
);
246 Instr mask
= ImmLLiteral_mask
;
248 SetInstructionBits(Mask(~mask
) | imm
);