2 * Utility compute operations used by translated code.
4 * Copyright (c) 2007 Thiemo Seufer
5 * Copyright (c) 2007 Jocelyn Mayer
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
29 #include "qemu/bswap.h"
32 static inline void mulu64(uint64_t *plow
, uint64_t *phigh
,
33 uint64_t a
, uint64_t b
)
35 __uint128_t r
= (__uint128_t
)a
* b
;
40 static inline void muls64(uint64_t *plow
, uint64_t *phigh
,
43 __int128_t r
= (__int128_t
)a
* b
;
48 /* compute with 96 bit intermediate result: (a*b)/c */
49 static inline uint64_t muldiv64(uint64_t a
, uint32_t b
, uint32_t c
)
51 return (__int128_t
)a
* b
/ c
;
54 static inline int divu128(uint64_t *plow
, uint64_t *phigh
, uint64_t divisor
)
59 __uint128_t dividend
= ((__uint128_t
)*phigh
<< 64) | *plow
;
60 __uint128_t result
= dividend
/ divisor
;
62 *phigh
= dividend
% divisor
;
63 return result
> UINT64_MAX
;
67 static inline int divs128(int64_t *plow
, int64_t *phigh
, int64_t divisor
)
72 __int128_t dividend
= ((__int128_t
)*phigh
<< 64) | *plow
;
73 __int128_t result
= dividend
/ divisor
;
75 *phigh
= dividend
% divisor
;
76 return result
!= *plow
;
80 void muls64(uint64_t *phigh
, uint64_t *plow
, int64_t a
, int64_t b
);
81 void mulu64(uint64_t *phigh
, uint64_t *plow
, uint64_t a
, uint64_t b
);
82 int divu128(uint64_t *plow
, uint64_t *phigh
, uint64_t divisor
);
83 int divs128(int64_t *plow
, int64_t *phigh
, int64_t divisor
);
85 static inline uint64_t muldiv64(uint64_t a
, uint32_t b
, uint32_t c
)
90 #ifdef HOST_WORDS_BIGENDIAN
100 rl
= (uint64_t)u
.l
.low
* (uint64_t)b
;
101 rh
= (uint64_t)u
.l
.high
* (uint64_t)b
;
104 res
.l
.low
= (((rh
% c
) << 32) + (rl
& 0xffffffff)) / c
;
110 * clz32 - count leading zeros in a 32-bit value.
111 * @val: The value to search
113 * Returns 32 if the value is zero. Note that the GCC builtin is
114 * undefined if the value is zero.
116 static inline int clz32(uint32_t val
)
118 return val
? __builtin_clz(val
) : 32;
122 * clo32 - count leading ones in a 32-bit value.
123 * @val: The value to search
125 * Returns 32 if the value is -1.
127 static inline int clo32(uint32_t val
)
133 * clz64 - count leading zeros in a 64-bit value.
134 * @val: The value to search
136 * Returns 64 if the value is zero. Note that the GCC builtin is
137 * undefined if the value is zero.
139 static inline int clz64(uint64_t val
)
141 return val
? __builtin_clzll(val
) : 64;
145 * clo64 - count leading ones in a 64-bit value.
146 * @val: The value to search
148 * Returns 64 if the value is -1.
150 static inline int clo64(uint64_t val
)
156 * ctz32 - count trailing zeros in a 32-bit value.
157 * @val: The value to search
159 * Returns 32 if the value is zero. Note that the GCC builtin is
160 * undefined if the value is zero.
162 static inline int ctz32(uint32_t val
)
164 return val
? __builtin_ctz(val
) : 32;
168 * cto32 - count trailing ones in a 32-bit value.
169 * @val: The value to search
171 * Returns 32 if the value is -1.
173 static inline int cto32(uint32_t val
)
179 * ctz64 - count trailing zeros in a 64-bit value.
180 * @val: The value to search
182 * Returns 64 if the value is zero. Note that the GCC builtin is
183 * undefined if the value is zero.
185 static inline int ctz64(uint64_t val
)
187 return val
? __builtin_ctzll(val
) : 64;
191 * cto64 - count trailing ones in a 64-bit value.
192 * @val: The value to search
194 * Returns 64 if the value is -1.
196 static inline int cto64(uint64_t val
)
202 * clrsb32 - count leading redundant sign bits in a 32-bit value.
203 * @val: The value to search
205 * Returns the number of bits following the sign bit that are equal to it.
206 * No special cases; output range is [0-31].
208 static inline int clrsb32(uint32_t val
)
210 #if QEMU_GNUC_PREREQ(4, 7)
211 return __builtin_clrsb(val
);
213 return clz32(val
^ ((int32_t)val
>> 1)) - 1;
218 * clrsb64 - count leading redundant sign bits in a 64-bit value.
219 * @val: The value to search
221 * Returns the number of bits following the sign bit that are equal to it.
222 * No special cases; output range is [0-63].
224 static inline int clrsb64(uint64_t val
)
226 #if QEMU_GNUC_PREREQ(4, 7)
227 return __builtin_clrsbll(val
);
229 return clz64(val
^ ((int64_t)val
>> 1)) - 1;
234 * ctpop8 - count the population of one bits in an 8-bit value.
235 * @val: The value to search
237 static inline int ctpop8(uint8_t val
)
239 return __builtin_popcount(val
);
243 * ctpop16 - count the population of one bits in a 16-bit value.
244 * @val: The value to search
246 static inline int ctpop16(uint16_t val
)
248 return __builtin_popcount(val
);
252 * ctpop32 - count the population of one bits in a 32-bit value.
253 * @val: The value to search
255 static inline int ctpop32(uint32_t val
)
257 return __builtin_popcount(val
);
261 * ctpop64 - count the population of one bits in a 64-bit value.
262 * @val: The value to search
264 static inline int ctpop64(uint64_t val
)
266 return __builtin_popcountll(val
);
270 * revbit8 - reverse the bits in an 8-bit value.
271 * @x: The value to modify.
273 static inline uint8_t revbit8(uint8_t x
)
275 /* Assign the correct nibble position. */
276 x
= ((x
& 0xf0) >> 4)
278 /* Assign the correct bit position. */
279 x
= ((x
& 0x88) >> 3)
287 * revbit16 - reverse the bits in a 16-bit value.
288 * @x: The value to modify.
290 static inline uint16_t revbit16(uint16_t x
)
292 /* Assign the correct byte position. */
294 /* Assign the correct nibble position. */
295 x
= ((x
& 0xf0f0) >> 4)
296 | ((x
& 0x0f0f) << 4);
297 /* Assign the correct bit position. */
298 x
= ((x
& 0x8888) >> 3)
299 | ((x
& 0x4444) >> 1)
300 | ((x
& 0x2222) << 1)
301 | ((x
& 0x1111) << 3);
306 * revbit32 - reverse the bits in a 32-bit value.
307 * @x: The value to modify.
309 static inline uint32_t revbit32(uint32_t x
)
311 /* Assign the correct byte position. */
313 /* Assign the correct nibble position. */
314 x
= ((x
& 0xf0f0f0f0u
) >> 4)
315 | ((x
& 0x0f0f0f0fu
) << 4);
316 /* Assign the correct bit position. */
317 x
= ((x
& 0x88888888u
) >> 3)
318 | ((x
& 0x44444444u
) >> 1)
319 | ((x
& 0x22222222u
) << 1)
320 | ((x
& 0x11111111u
) << 3);
325 * revbit64 - reverse the bits in a 64-bit value.
326 * @x: The value to modify.
328 static inline uint64_t revbit64(uint64_t x
)
330 /* Assign the correct byte position. */
332 /* Assign the correct nibble position. */
333 x
= ((x
& 0xf0f0f0f0f0f0f0f0ull
) >> 4)
334 | ((x
& 0x0f0f0f0f0f0f0f0full
) << 4);
335 /* Assign the correct bit position. */
336 x
= ((x
& 0x8888888888888888ull
) >> 3)
337 | ((x
& 0x4444444444444444ull
) >> 1)
338 | ((x
& 0x2222222222222222ull
) << 1)
339 | ((x
& 0x1111111111111111ull
) << 3);
343 /* Host type specific sizes of these routines. */
345 #if ULONG_MAX == UINT32_MAX
350 # define ctpopl ctpop32
351 # define revbitl revbit32
352 #elif ULONG_MAX == UINT64_MAX
357 # define ctpopl ctpop64
358 # define revbitl revbit64
360 # error Unknown sizeof long
363 static inline bool is_power_of_2(uint64_t value
)
369 return !(value
& (value
- 1));
373 * Return @value rounded down to the nearest power of two or zero.
375 static inline uint64_t pow2floor(uint64_t value
)
378 /* Avoid undefined shift by 64 */
381 return 0x8000000000000000ull
>> clz64(value
);
385 * Return @value rounded up to the nearest power of two modulo 2^64.
386 * This is *zero* for @value > 2^63, so be careful.
388 static inline uint64_t pow2ceil(uint64_t value
)
390 int n
= clz64(value
- 1);
394 * @value - 1 has no leading zeroes, thus @value - 1 >= 2^63
395 * Therefore, either @value == 0 or @value > 2^63.
396 * If it's 0, return 1, else return 0.
400 return 0x8000000000000000ull
>> (n
- 1);
404 * urshift - 128-bit Unsigned Right Shift.
405 * @plow: in/out - lower 64-bit integer.
406 * @phigh: in/out - higher 64-bit integer.
407 * @shift: in - bytes to shift, between 0 and 127.
409 * Result is zero-extended and stored in plow/phigh, which are
410 * input/output variables. Shift values outside the range will
411 * be mod to 128. In other words, the caller is responsible to
412 * verify/assert both the shift range and plow/phigh pointers.
414 void urshift(uint64_t *plow
, uint64_t *phigh
, int32_t shift
);
417 * ulshift - 128-bit Unsigned Left Shift.
418 * @plow: in/out - lower 64-bit integer.
419 * @phigh: in/out - higher 64-bit integer.
420 * @shift: in - bytes to shift, between 0 and 127.
421 * @overflow: out - true if any 1-bit is shifted out.
423 * Result is zero-extended and stored in plow/phigh, which are
424 * input/output variables. Shift values outside the range will
425 * be mod to 128. In other words, the caller is responsible to
426 * verify/assert both the shift range and plow/phigh pointers.
428 void ulshift(uint64_t *plow
, uint64_t *phigh
, int32_t shift
, bool *overflow
);