hw/dma/xilinx_axidma: remove dead code
[qemu/ar7.git] / disas / libvixl / utils.h
blobb4406263ac46e3904825d56dd1605c49ed4fed16
1 // Copyright 2013, ARM Limited
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 // * Redistributions of source code must retain the above copyright notice,
8 // this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above copyright notice,
10 // this list of conditions and the following disclaimer in the documentation
11 // and/or other materials provided with the distribution.
12 // * Neither the name of ARM Limited nor the names of its contributors may be
13 // used to endorse or promote products derived from this software without
14 // specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #ifndef VIXL_UTILS_H
28 #define VIXL_UTILS_H
30 #include <math.h>
31 #include <string.h>
32 #include "globals.h"
34 namespace vixl {
36 // Macros for compile-time format checking.
37 #if defined(__GNUC__)
38 #define PRINTF_CHECK(format_index, varargs_index) \
39 __attribute__((format(printf, format_index, varargs_index)))
40 #else
41 #define PRINTF_CHECK(format_index, varargs_index)
42 #endif
44 // Check number width.
45 inline bool is_intn(unsigned n, int64_t x) {
46 VIXL_ASSERT((0 < n) && (n < 64));
47 int64_t limit = INT64_C(1) << (n - 1);
48 return (-limit <= x) && (x < limit);
51 inline bool is_uintn(unsigned n, int64_t x) {
52 VIXL_ASSERT((0 < n) && (n < 64));
53 return !(x >> n);
56 inline unsigned truncate_to_intn(unsigned n, int64_t x) {
57 VIXL_ASSERT((0 < n) && (n < 64));
58 return (x & ((INT64_C(1) << n) - 1));
61 #define INT_1_TO_63_LIST(V) \
62 V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) \
63 V(9) V(10) V(11) V(12) V(13) V(14) V(15) V(16) \
64 V(17) V(18) V(19) V(20) V(21) V(22) V(23) V(24) \
65 V(25) V(26) V(27) V(28) V(29) V(30) V(31) V(32) \
66 V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \
67 V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) \
68 V(49) V(50) V(51) V(52) V(53) V(54) V(55) V(56) \
69 V(57) V(58) V(59) V(60) V(61) V(62) V(63)
71 #define DECLARE_IS_INT_N(N) \
72 inline bool is_int##N(int64_t x) { return is_intn(N, x); }
73 #define DECLARE_IS_UINT_N(N) \
74 inline bool is_uint##N(int64_t x) { return is_uintn(N, x); }
75 #define DECLARE_TRUNCATE_TO_INT_N(N) \
76 inline int truncate_to_int##N(int x) { return truncate_to_intn(N, x); }
77 INT_1_TO_63_LIST(DECLARE_IS_INT_N)
78 INT_1_TO_63_LIST(DECLARE_IS_UINT_N)
79 INT_1_TO_63_LIST(DECLARE_TRUNCATE_TO_INT_N)
80 #undef DECLARE_IS_INT_N
81 #undef DECLARE_IS_UINT_N
82 #undef DECLARE_TRUNCATE_TO_INT_N
84 // Bit field extraction.
85 inline uint32_t unsigned_bitextract_32(int msb, int lsb, uint32_t x) {
86 return (x >> lsb) & ((1 << (1 + msb - lsb)) - 1);
89 inline uint64_t unsigned_bitextract_64(int msb, int lsb, uint64_t x) {
90 return (x >> lsb) & ((static_cast<uint64_t>(1) << (1 + msb - lsb)) - 1);
93 inline int32_t signed_bitextract_32(int msb, int lsb, int32_t x) {
94 return (x << (31 - msb)) >> (lsb + 31 - msb);
97 inline int64_t signed_bitextract_64(int msb, int lsb, int64_t x) {
98 return (x << (63 - msb)) >> (lsb + 63 - msb);
101 // Floating point representation.
102 uint32_t float_to_rawbits(float value);
103 uint64_t double_to_rawbits(double value);
104 float rawbits_to_float(uint32_t bits);
105 double rawbits_to_double(uint64_t bits);
108 // NaN tests.
109 inline bool IsSignallingNaN(double num) {
110 const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000);
111 uint64_t raw = double_to_rawbits(num);
112 if (isnan(num) && ((raw & kFP64QuietNaNMask) == 0)) {
113 return true;
115 return false;
119 inline bool IsSignallingNaN(float num) {
120 const uint32_t kFP32QuietNaNMask = 0x00400000;
121 uint32_t raw = float_to_rawbits(num);
122 if (isnan(num) && ((raw & kFP32QuietNaNMask) == 0)) {
123 return true;
125 return false;
129 template <typename T>
130 inline bool IsQuietNaN(T num) {
131 return isnan(num) && !IsSignallingNaN(num);
135 // Convert the NaN in 'num' to a quiet NaN.
136 inline double ToQuietNaN(double num) {
137 const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000);
138 VIXL_ASSERT(isnan(num));
139 return rawbits_to_double(double_to_rawbits(num) | kFP64QuietNaNMask);
143 inline float ToQuietNaN(float num) {
144 const uint32_t kFP32QuietNaNMask = 0x00400000;
145 VIXL_ASSERT(isnan(num));
146 return rawbits_to_float(float_to_rawbits(num) | kFP32QuietNaNMask);
150 // Fused multiply-add.
151 inline double FusedMultiplyAdd(double op1, double op2, double a) {
152 return fma(op1, op2, a);
156 inline float FusedMultiplyAdd(float op1, float op2, float a) {
157 return fmaf(op1, op2, a);
161 // Bit counting.
162 int CountLeadingZeros(uint64_t value, int width);
163 int CountLeadingSignBits(int64_t value, int width);
164 int CountTrailingZeros(uint64_t value, int width);
165 int CountSetBits(uint64_t value, int width);
166 uint64_t LowestSetBit(uint64_t value);
167 bool IsPowerOf2(int64_t value);
169 unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
171 // Pointer alignment
172 // TODO: rename/refactor to make it specific to instructions.
173 template<typename T>
174 bool IsWordAligned(T pointer) {
175 VIXL_ASSERT(sizeof(pointer) == sizeof(intptr_t)); // NOLINT(runtime/sizeof)
176 return ((intptr_t)(pointer) & 3) == 0;
179 // Increment a pointer (up to 64 bits) until it has the specified alignment.
180 template<class T>
181 T AlignUp(T pointer, size_t alignment) {
182 // Use C-style casts to get static_cast behaviour for integral types (T), and
183 // reinterpret_cast behaviour for other types.
185 uint64_t pointer_raw = (uint64_t)pointer;
186 VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw));
188 size_t align_step = (alignment - pointer_raw) % alignment;
189 VIXL_ASSERT((pointer_raw + align_step) % alignment == 0);
191 return (T)(pointer_raw + align_step);
194 // Decrement a pointer (up to 64 bits) until it has the specified alignment.
195 template<class T>
196 T AlignDown(T pointer, size_t alignment) {
197 // Use C-style casts to get static_cast behaviour for integral types (T), and
198 // reinterpret_cast behaviour for other types.
200 uint64_t pointer_raw = (uint64_t)pointer;
201 VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw));
203 size_t align_step = pointer_raw % alignment;
204 VIXL_ASSERT((pointer_raw - align_step) % alignment == 0);
206 return (T)(pointer_raw - align_step);
209 } // namespace vixl
211 #endif // VIXL_UTILS_H