disas/libvixl/a64/instructions-a64.h: Remove unused constants
[qemu-kvm.git] / disas / libvixl / a64 / instructions-a64.h
blob29f972291b117cad22b809cd41cebe345a8b3da8
1 // Copyright 2013, ARM Limited
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 // * Redistributions of source code must retain the above copyright notice,
8 // this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above copyright notice,
10 // this list of conditions and the following disclaimer in the documentation
11 // and/or other materials provided with the distribution.
12 // * Neither the name of ARM Limited nor the names of its contributors may be
13 // used to endorse or promote products derived from this software without
14 // specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #ifndef VIXL_A64_INSTRUCTIONS_A64_H_
28 #define VIXL_A64_INSTRUCTIONS_A64_H_
30 #include "globals.h"
31 #include "utils.h"
32 #include "a64/constants-a64.h"
34 namespace vixl {
35 // ISA constants. --------------------------------------------------------------
37 typedef uint32_t Instr;
38 const unsigned kInstructionSize = 4;
39 const unsigned kInstructionSizeLog2 = 2;
40 const unsigned kLiteralEntrySize = 4;
41 const unsigned kLiteralEntrySizeLog2 = 2;
42 const unsigned kMaxLoadLiteralRange = 1 * MBytes;
44 // This is the nominal page size (as used by the adrp instruction); the actual
45 // size of the memory pages allocated by the kernel is likely to differ.
46 const unsigned kPageSize = 4 * KBytes;
47 const unsigned kPageSizeLog2 = 12;
49 const unsigned kWRegSize = 32;
50 const unsigned kWRegSizeLog2 = 5;
51 const unsigned kWRegSizeInBytes = kWRegSize / 8;
52 const unsigned kWRegSizeInBytesLog2 = kWRegSizeLog2 - 3;
53 const unsigned kXRegSize = 64;
54 const unsigned kXRegSizeLog2 = 6;
55 const unsigned kXRegSizeInBytes = kXRegSize / 8;
56 const unsigned kXRegSizeInBytesLog2 = kXRegSizeLog2 - 3;
57 const unsigned kSRegSize = 32;
58 const unsigned kSRegSizeLog2 = 5;
59 const unsigned kSRegSizeInBytes = kSRegSize / 8;
60 const unsigned kSRegSizeInBytesLog2 = kSRegSizeLog2 - 3;
61 const unsigned kDRegSize = 64;
62 const unsigned kDRegSizeLog2 = 6;
63 const unsigned kDRegSizeInBytes = kDRegSize / 8;
64 const unsigned kDRegSizeInBytesLog2 = kDRegSizeLog2 - 3;
65 const uint64_t kWRegMask = UINT64_C(0xffffffff);
66 const uint64_t kXRegMask = UINT64_C(0xffffffffffffffff);
67 const uint64_t kSRegMask = UINT64_C(0xffffffff);
68 const uint64_t kDRegMask = UINT64_C(0xffffffffffffffff);
69 const uint64_t kSSignMask = UINT64_C(0x80000000);
70 const uint64_t kDSignMask = UINT64_C(0x8000000000000000);
71 const uint64_t kWSignMask = UINT64_C(0x80000000);
72 const uint64_t kXSignMask = UINT64_C(0x8000000000000000);
73 const uint64_t kByteMask = UINT64_C(0xff);
74 const uint64_t kHalfWordMask = UINT64_C(0xffff);
75 const uint64_t kWordMask = UINT64_C(0xffffffff);
76 const uint64_t kXMaxUInt = UINT64_C(0xffffffffffffffff);
77 const uint64_t kWMaxUInt = UINT64_C(0xffffffff);
78 const int64_t kXMaxInt = INT64_C(0x7fffffffffffffff);
79 const int64_t kXMinInt = INT64_C(0x8000000000000000);
80 const int32_t kWMaxInt = INT32_C(0x7fffffff);
81 const int32_t kWMinInt = INT32_C(0x80000000);
82 const unsigned kLinkRegCode = 30;
83 const unsigned kZeroRegCode = 31;
84 const unsigned kSPRegInternalCode = 63;
85 const unsigned kRegCodeMask = 0x1f;
87 const unsigned kAddressTagOffset = 56;
88 const unsigned kAddressTagWidth = 8;
89 const uint64_t kAddressTagMask =
90 ((UINT64_C(1) << kAddressTagWidth) - 1) << kAddressTagOffset;
91 VIXL_STATIC_ASSERT(kAddressTagMask == UINT64_C(0xff00000000000000));
93 // AArch64 floating-point specifics. These match IEEE-754.
94 const unsigned kDoubleMantissaBits = 52;
95 const unsigned kDoubleExponentBits = 11;
96 const unsigned kFloatMantissaBits = 23;
97 const unsigned kFloatExponentBits = 8;
99 enum LSDataSize {
100 LSByte = 0,
101 LSHalfword = 1,
102 LSWord = 2,
103 LSDoubleWord = 3
106 LSDataSize CalcLSPairDataSize(LoadStorePairOp op);
108 enum ImmBranchType {
109 UnknownBranchType = 0,
110 CondBranchType = 1,
111 UncondBranchType = 2,
112 CompareBranchType = 3,
113 TestBranchType = 4
116 enum AddrMode {
117 Offset,
118 PreIndex,
119 PostIndex
122 enum FPRounding {
123 // The first four values are encodable directly by FPCR<RMode>.
124 FPTieEven = 0x0,
125 FPPositiveInfinity = 0x1,
126 FPNegativeInfinity = 0x2,
127 FPZero = 0x3,
129 // The final rounding mode is only available when explicitly specified by the
130 // instruction (such as with fcvta). It cannot be set in FPCR.
131 FPTieAway
134 enum Reg31Mode {
135 Reg31IsStackPointer,
136 Reg31IsZeroRegister
139 // Instructions. ---------------------------------------------------------------
141 class Instruction {
142 public:
143 inline Instr InstructionBits() const {
144 return *(reinterpret_cast<const Instr*>(this));
147 inline void SetInstructionBits(Instr new_instr) {
148 *(reinterpret_cast<Instr*>(this)) = new_instr;
151 inline int Bit(int pos) const {
152 return (InstructionBits() >> pos) & 1;
155 inline uint32_t Bits(int msb, int lsb) const {
156 return unsigned_bitextract_32(msb, lsb, InstructionBits());
159 inline int32_t SignedBits(int msb, int lsb) const {
160 int32_t bits = *(reinterpret_cast<const int32_t*>(this));
161 return signed_bitextract_32(msb, lsb, bits);
164 inline Instr Mask(uint32_t mask) const {
165 return InstructionBits() & mask;
168 #define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
169 inline int64_t Name() const { return Func(HighBit, LowBit); }
170 INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
171 #undef DEFINE_GETTER
173 // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
174 // formed from ImmPCRelLo and ImmPCRelHi.
175 int ImmPCRel() const {
176 int const offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
177 int const width = ImmPCRelLo_width + ImmPCRelHi_width;
178 return signed_bitextract_32(width-1, 0, offset);
181 uint64_t ImmLogical() const;
182 float ImmFP32() const;
183 double ImmFP64() const;
185 inline LSDataSize SizeLSPair() const {
186 return CalcLSPairDataSize(
187 static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
190 // Helpers.
191 inline bool IsCondBranchImm() const {
192 return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
195 inline bool IsUncondBranchImm() const {
196 return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
199 inline bool IsCompareBranch() const {
200 return Mask(CompareBranchFMask) == CompareBranchFixed;
203 inline bool IsTestBranch() const {
204 return Mask(TestBranchFMask) == TestBranchFixed;
207 inline bool IsPCRelAddressing() const {
208 return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
211 inline bool IsLogicalImmediate() const {
212 return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
215 inline bool IsAddSubImmediate() const {
216 return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
219 inline bool IsAddSubExtended() const {
220 return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
223 inline bool IsLoadOrStore() const {
224 return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
227 inline bool IsMovn() const {
228 return (Mask(MoveWideImmediateMask) == MOVN_x) ||
229 (Mask(MoveWideImmediateMask) == MOVN_w);
232 // Indicate whether Rd can be the stack pointer or the zero register. This
233 // does not check that the instruction actually has an Rd field.
234 inline Reg31Mode RdMode() const {
235 // The following instructions use sp or wsp as Rd:
236 // Add/sub (immediate) when not setting the flags.
237 // Add/sub (extended) when not setting the flags.
238 // Logical (immediate) when not setting the flags.
239 // Otherwise, r31 is the zero register.
240 if (IsAddSubImmediate() || IsAddSubExtended()) {
241 if (Mask(AddSubSetFlagsBit)) {
242 return Reg31IsZeroRegister;
243 } else {
244 return Reg31IsStackPointer;
247 if (IsLogicalImmediate()) {
248 // Of the logical (immediate) instructions, only ANDS (and its aliases)
249 // can set the flags. The others can all write into sp.
250 // Note that some logical operations are not available to
251 // immediate-operand instructions, so we have to combine two masks here.
252 if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
253 return Reg31IsZeroRegister;
254 } else {
255 return Reg31IsStackPointer;
258 return Reg31IsZeroRegister;
261 // Indicate whether Rn can be the stack pointer or the zero register. This
262 // does not check that the instruction actually has an Rn field.
263 inline Reg31Mode RnMode() const {
264 // The following instructions use sp or wsp as Rn:
265 // All loads and stores.
266 // Add/sub (immediate).
267 // Add/sub (extended).
268 // Otherwise, r31 is the zero register.
269 if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
270 return Reg31IsStackPointer;
272 return Reg31IsZeroRegister;
275 inline ImmBranchType BranchType() const {
276 if (IsCondBranchImm()) {
277 return CondBranchType;
278 } else if (IsUncondBranchImm()) {
279 return UncondBranchType;
280 } else if (IsCompareBranch()) {
281 return CompareBranchType;
282 } else if (IsTestBranch()) {
283 return TestBranchType;
284 } else {
285 return UnknownBranchType;
289 // Find the target of this instruction. 'this' may be a branch or a
290 // PC-relative addressing instruction.
291 const Instruction* ImmPCOffsetTarget() const;
293 // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
294 // a PC-relative addressing instruction.
295 void SetImmPCOffsetTarget(const Instruction* target);
296 // Patch a literal load instruction to load from 'source'.
297 void SetImmLLiteral(const Instruction* source);
299 inline uint8_t* LiteralAddress() const {
300 int offset = ImmLLiteral() << kLiteralEntrySizeLog2;
301 const uint8_t* address = reinterpret_cast<const uint8_t*>(this) + offset;
302 // Note that the result is safely mutable only if the backing buffer is
303 // safely mutable.
304 return const_cast<uint8_t*>(address);
307 inline uint32_t Literal32() const {
308 uint32_t literal;
309 memcpy(&literal, LiteralAddress(), sizeof(literal));
311 return literal;
314 inline uint64_t Literal64() const {
315 uint64_t literal;
316 memcpy(&literal, LiteralAddress(), sizeof(literal));
318 return literal;
321 inline float LiteralFP32() const {
322 return rawbits_to_float(Literal32());
325 inline double LiteralFP64() const {
326 return rawbits_to_double(Literal64());
329 inline const Instruction* NextInstruction() const {
330 return this + kInstructionSize;
333 inline const Instruction* InstructionAtOffset(int64_t offset) const {
334 VIXL_ASSERT(IsWordAligned(this + offset));
335 return this + offset;
338 template<typename T> static inline Instruction* Cast(T src) {
339 return reinterpret_cast<Instruction*>(src);
342 template<typename T> static inline const Instruction* CastConst(T src) {
343 return reinterpret_cast<const Instruction*>(src);
346 private:
347 inline int ImmBranch() const;
349 void SetPCRelImmTarget(const Instruction* target);
350 void SetBranchImmTarget(const Instruction* target);
352 } // namespace vixl
354 #endif // VIXL_A64_INSTRUCTIONS_A64_H_