1 // Copyright 2013, ARM Limited
2 // All rights reserved.
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
7 // * Redistributions of source code must retain the above copyright notice,
8 // this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above copyright notice,
10 // this list of conditions and the following disclaimer in the documentation
11 // and/or other materials provided with the distribution.
12 // * Neither the name of ARM Limited nor the names of its contributors may be
13 // used to endorse or promote products derived from this software without
14 // specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #ifndef VIXL_A64_INSTRUCTIONS_A64_H_
28 #define VIXL_A64_INSTRUCTIONS_A64_H_
32 #include "a64/constants-a64.h"
35 // ISA constants. --------------------------------------------------------------
37 typedef uint32_t Instr
;
38 const unsigned kInstructionSize
= 4;
39 const unsigned kInstructionSizeLog2
= 2;
40 const unsigned kLiteralEntrySize
= 4;
41 const unsigned kLiteralEntrySizeLog2
= 2;
42 const unsigned kMaxLoadLiteralRange
= 1 * MBytes
;
44 // This is the nominal page size (as used by the adrp instruction); the actual
45 // size of the memory pages allocated by the kernel is likely to differ.
46 const unsigned kPageSize
= 4 * KBytes
;
47 const unsigned kPageSizeLog2
= 12;
49 const unsigned kWRegSize
= 32;
50 const unsigned kWRegSizeLog2
= 5;
51 const unsigned kWRegSizeInBytes
= kWRegSize
/ 8;
52 const unsigned kWRegSizeInBytesLog2
= kWRegSizeLog2
- 3;
53 const unsigned kXRegSize
= 64;
54 const unsigned kXRegSizeLog2
= 6;
55 const unsigned kXRegSizeInBytes
= kXRegSize
/ 8;
56 const unsigned kXRegSizeInBytesLog2
= kXRegSizeLog2
- 3;
57 const unsigned kSRegSize
= 32;
58 const unsigned kSRegSizeLog2
= 5;
59 const unsigned kSRegSizeInBytes
= kSRegSize
/ 8;
60 const unsigned kSRegSizeInBytesLog2
= kSRegSizeLog2
- 3;
61 const unsigned kDRegSize
= 64;
62 const unsigned kDRegSizeLog2
= 6;
63 const unsigned kDRegSizeInBytes
= kDRegSize
/ 8;
64 const unsigned kDRegSizeInBytesLog2
= kDRegSizeLog2
- 3;
65 const uint64_t kWRegMask
= UINT64_C(0xffffffff);
66 const uint64_t kXRegMask
= UINT64_C(0xffffffffffffffff);
67 const uint64_t kSRegMask
= UINT64_C(0xffffffff);
68 const uint64_t kDRegMask
= UINT64_C(0xffffffffffffffff);
69 const uint64_t kSSignMask
= UINT64_C(0x80000000);
70 const uint64_t kDSignMask
= UINT64_C(0x8000000000000000);
71 const uint64_t kWSignMask
= UINT64_C(0x80000000);
72 const uint64_t kXSignMask
= UINT64_C(0x8000000000000000);
73 const uint64_t kByteMask
= UINT64_C(0xff);
74 const uint64_t kHalfWordMask
= UINT64_C(0xffff);
75 const uint64_t kWordMask
= UINT64_C(0xffffffff);
76 const uint64_t kXMaxUInt
= UINT64_C(0xffffffffffffffff);
77 const uint64_t kWMaxUInt
= UINT64_C(0xffffffff);
78 const int64_t kXMaxInt
= INT64_C(0x7fffffffffffffff);
79 const int64_t kXMinInt
= INT64_C(0x8000000000000000);
80 const int32_t kWMaxInt
= INT32_C(0x7fffffff);
81 const int32_t kWMinInt
= INT32_C(0x80000000);
82 const unsigned kLinkRegCode
= 30;
83 const unsigned kZeroRegCode
= 31;
84 const unsigned kSPRegInternalCode
= 63;
85 const unsigned kRegCodeMask
= 0x1f;
87 const unsigned kAddressTagOffset
= 56;
88 const unsigned kAddressTagWidth
= 8;
89 const uint64_t kAddressTagMask
=
90 ((UINT64_C(1) << kAddressTagWidth
) - 1) << kAddressTagOffset
;
91 VIXL_STATIC_ASSERT(kAddressTagMask
== UINT64_C(0xff00000000000000));
93 // AArch64 floating-point specifics. These match IEEE-754.
94 const unsigned kDoubleMantissaBits
= 52;
95 const unsigned kDoubleExponentBits
= 11;
96 const unsigned kFloatMantissaBits
= 23;
97 const unsigned kFloatExponentBits
= 8;
99 // Floating-point infinity values.
100 extern const float kFP32PositiveInfinity
;
101 extern const float kFP32NegativeInfinity
;
102 extern const double kFP64PositiveInfinity
;
103 extern const double kFP64NegativeInfinity
;
105 // The default NaN values (for FPCR.DN=1).
106 extern const double kFP64DefaultNaN
;
107 extern const float kFP32DefaultNaN
;
117 LSDataSize
CalcLSPairDataSize(LoadStorePairOp op
);
120 UnknownBranchType
= 0,
122 UncondBranchType
= 2,
123 CompareBranchType
= 3,
134 // The first four values are encodable directly by FPCR<RMode>.
136 FPPositiveInfinity
= 0x1,
137 FPNegativeInfinity
= 0x2,
140 // The final rounding mode is only available when explicitly specified by the
141 // instruction (such as with fcvta). It cannot be set in FPCR.
150 // Instructions. ---------------------------------------------------------------
154 Instr
InstructionBits() const {
155 return *(reinterpret_cast<const Instr
*>(this));
158 void SetInstructionBits(Instr new_instr
) {
159 *(reinterpret_cast<Instr
*>(this)) = new_instr
;
162 int Bit(int pos
) const {
163 return (InstructionBits() >> pos
) & 1;
166 uint32_t Bits(int msb
, int lsb
) const {
167 return unsigned_bitextract_32(msb
, lsb
, InstructionBits());
170 int32_t SignedBits(int msb
, int lsb
) const {
171 int32_t bits
= *(reinterpret_cast<const int32_t*>(this));
172 return signed_bitextract_32(msb
, lsb
, bits
);
175 Instr
Mask(uint32_t mask
) const {
176 return InstructionBits() & mask
;
179 #define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
180 int64_t Name() const { return Func(HighBit, LowBit); }
181 INSTRUCTION_FIELDS_LIST(DEFINE_GETTER
)
184 // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
185 // formed from ImmPCRelLo and ImmPCRelHi.
186 int ImmPCRel() const {
187 int const offset
= ((ImmPCRelHi() << ImmPCRelLo_width
) | ImmPCRelLo());
188 int const width
= ImmPCRelLo_width
+ ImmPCRelHi_width
;
189 return signed_bitextract_32(width
-1, 0, offset
);
192 uint64_t ImmLogical() const;
193 float ImmFP32() const;
194 double ImmFP64() const;
196 LSDataSize
SizeLSPair() const {
197 return CalcLSPairDataSize(
198 static_cast<LoadStorePairOp
>(Mask(LoadStorePairMask
)));
202 bool IsCondBranchImm() const {
203 return Mask(ConditionalBranchFMask
) == ConditionalBranchFixed
;
206 bool IsUncondBranchImm() const {
207 return Mask(UnconditionalBranchFMask
) == UnconditionalBranchFixed
;
210 bool IsCompareBranch() const {
211 return Mask(CompareBranchFMask
) == CompareBranchFixed
;
214 bool IsTestBranch() const {
215 return Mask(TestBranchFMask
) == TestBranchFixed
;
218 bool IsPCRelAddressing() const {
219 return Mask(PCRelAddressingFMask
) == PCRelAddressingFixed
;
222 bool IsLogicalImmediate() const {
223 return Mask(LogicalImmediateFMask
) == LogicalImmediateFixed
;
226 bool IsAddSubImmediate() const {
227 return Mask(AddSubImmediateFMask
) == AddSubImmediateFixed
;
230 bool IsAddSubExtended() const {
231 return Mask(AddSubExtendedFMask
) == AddSubExtendedFixed
;
234 bool IsLoadOrStore() const {
235 return Mask(LoadStoreAnyFMask
) == LoadStoreAnyFixed
;
239 bool IsStore() const;
241 bool IsLoadLiteral() const {
242 // This includes PRFM_lit.
243 return Mask(LoadLiteralFMask
) == LoadLiteralFixed
;
246 bool IsMovn() const {
247 return (Mask(MoveWideImmediateMask
) == MOVN_x
) ||
248 (Mask(MoveWideImmediateMask
) == MOVN_w
);
251 // Indicate whether Rd can be the stack pointer or the zero register. This
252 // does not check that the instruction actually has an Rd field.
253 Reg31Mode
RdMode() const {
254 // The following instructions use sp or wsp as Rd:
255 // Add/sub (immediate) when not setting the flags.
256 // Add/sub (extended) when not setting the flags.
257 // Logical (immediate) when not setting the flags.
258 // Otherwise, r31 is the zero register.
259 if (IsAddSubImmediate() || IsAddSubExtended()) {
260 if (Mask(AddSubSetFlagsBit
)) {
261 return Reg31IsZeroRegister
;
263 return Reg31IsStackPointer
;
266 if (IsLogicalImmediate()) {
267 // Of the logical (immediate) instructions, only ANDS (and its aliases)
268 // can set the flags. The others can all write into sp.
269 // Note that some logical operations are not available to
270 // immediate-operand instructions, so we have to combine two masks here.
271 if (Mask(LogicalImmediateMask
& LogicalOpMask
) == ANDS
) {
272 return Reg31IsZeroRegister
;
274 return Reg31IsStackPointer
;
277 return Reg31IsZeroRegister
;
280 // Indicate whether Rn can be the stack pointer or the zero register. This
281 // does not check that the instruction actually has an Rn field.
282 Reg31Mode
RnMode() const {
283 // The following instructions use sp or wsp as Rn:
284 // All loads and stores.
285 // Add/sub (immediate).
286 // Add/sub (extended).
287 // Otherwise, r31 is the zero register.
288 if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
289 return Reg31IsStackPointer
;
291 return Reg31IsZeroRegister
;
294 ImmBranchType
BranchType() const {
295 if (IsCondBranchImm()) {
296 return CondBranchType
;
297 } else if (IsUncondBranchImm()) {
298 return UncondBranchType
;
299 } else if (IsCompareBranch()) {
300 return CompareBranchType
;
301 } else if (IsTestBranch()) {
302 return TestBranchType
;
304 return UnknownBranchType
;
308 // Find the target of this instruction. 'this' may be a branch or a
309 // PC-relative addressing instruction.
310 const Instruction
* ImmPCOffsetTarget() const;
312 // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
313 // a PC-relative addressing instruction.
314 void SetImmPCOffsetTarget(const Instruction
* target
);
315 // Patch a literal load instruction to load from 'source'.
316 void SetImmLLiteral(const Instruction
* source
);
318 // Calculate the address of a literal referred to by a load-literal
319 // instruction, and return it as the specified type.
321 // The literal itself is safely mutable only if the backing buffer is safely
323 template <typename T
>
324 T
LiteralAddress() const {
325 uint64_t base_raw
= reinterpret_cast<uintptr_t>(this);
326 ptrdiff_t offset
= ImmLLiteral() << kLiteralEntrySizeLog2
;
327 uint64_t address_raw
= base_raw
+ offset
;
329 // Cast the address using a C-style cast. A reinterpret_cast would be
330 // appropriate, but it can't cast one integral type to another.
331 T address
= (T
)(address_raw
);
333 // Assert that the address can be represented by the specified type.
334 VIXL_ASSERT((uint64_t)(address
) == address_raw
);
339 uint32_t Literal32() const {
341 memcpy(&literal
, LiteralAddress
<const void*>(), sizeof(literal
));
345 uint64_t Literal64() const {
347 memcpy(&literal
, LiteralAddress
<const void*>(), sizeof(literal
));
351 float LiteralFP32() const {
352 return rawbits_to_float(Literal32());
355 double LiteralFP64() const {
356 return rawbits_to_double(Literal64());
359 const Instruction
* NextInstruction() const {
360 return this + kInstructionSize
;
363 const Instruction
* InstructionAtOffset(int64_t offset
) const {
364 VIXL_ASSERT(IsWordAligned(this + offset
));
365 return this + offset
;
368 template<typename T
> static Instruction
* Cast(T src
) {
369 return reinterpret_cast<Instruction
*>(src
);
372 template<typename T
> static const Instruction
* CastConst(T src
) {
373 return reinterpret_cast<const Instruction
*>(src
);
377 int ImmBranch() const;
379 void SetPCRelImmTarget(const Instruction
* target
);
380 void SetBranchImmTarget(const Instruction
* target
);
384 #endif // VIXL_A64_INSTRUCTIONS_A64_H_