2 * Utility compute operations used by translated code.
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2007 Aurelien Jarno
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu/osdep.h"
27 #include "qemu/host-utils.h"
30 /* Long integer helpers */
31 static inline void mul64(uint64_t *plow
, uint64_t *phigh
,
32 uint64_t a
, uint64_t b
)
44 LL rl
, rm
, rn
, rh
, a0
, b0
;
50 rl
.ll
= (uint64_t)a0
.l
.low
* b0
.l
.low
;
51 rm
.ll
= (uint64_t)a0
.l
.low
* b0
.l
.high
;
52 rn
.ll
= (uint64_t)a0
.l
.high
* b0
.l
.low
;
53 rh
.ll
= (uint64_t)a0
.l
.high
* b0
.l
.high
;
55 c
= (uint64_t)rl
.l
.high
+ rm
.l
.low
+ rn
.l
.low
;
58 c
= c
+ rm
.l
.high
+ rn
.l
.high
+ rh
.l
.low
;
60 rh
.l
.high
+= (uint32_t)(c
>> 32);
66 /* Unsigned 64x64 -> 128 multiplication */
67 void mulu64 (uint64_t *plow
, uint64_t *phigh
, uint64_t a
, uint64_t b
)
69 mul64(plow
, phigh
, a
, b
);
72 /* Signed 64x64 -> 128 multiplication */
73 void muls64 (uint64_t *plow
, uint64_t *phigh
, int64_t a
, int64_t b
)
77 mul64(plow
, &rh
, a
, b
);
79 /* Adjust for signs. */
90 * Unsigned 128-by-64 division.
91 * Returns the remainder.
92 * Returns quotient via plow and phigh.
93 * Also returns the remainder via the function return value.
95 uint64_t divu128(uint64_t *plow
, uint64_t *phigh
, uint64_t divisor
)
97 uint64_t dhi
= *phigh
;
99 uint64_t rem
, dhighest
;
102 if (divisor
== 0 || dhi
== 0) {
103 *plow
= dlo
/ divisor
;
105 return dlo
% divisor
;
111 /* normalize the divisor, shifting the dividend accordingly */
113 dhi
= (dhi
<< sh
) | (dlo
>> (64 - sh
));
118 *plow
= udiv_qrnnd(&rem
, dhi
, dlo
, divisor
);
121 /* normalize the divisor, shifting the dividend accordingly */
123 dhighest
= dhi
>> (64 - sh
);
124 dhi
= (dhi
<< sh
) | (dlo
>> (64 - sh
));
127 *phigh
= udiv_qrnnd(&dhi
, dhighest
, dhi
, divisor
);
131 * Since the MSB of divisor is set (sh == 0),
132 * (dhi - divisor) < divisor
134 * Thus, the high part of the quotient is 1, and we can
135 * calculate the low part with a single call to udiv_qrnnd
136 * after subtracting divisor from dhi
142 *plow
= udiv_qrnnd(&rem
, dhi
, dlo
, divisor
);
146 * since the dividend/divisor might have been normalized,
147 * the remainder might also have to be shifted back
154 * Signed 128-by-64 division.
155 * Returns quotient via plow and phigh.
156 * Also returns the remainder via the function return value.
158 int64_t divs128(uint64_t *plow
, int64_t *phigh
, int64_t divisor
)
160 bool neg_quotient
= false, neg_remainder
= false;
161 uint64_t unsig_hi
= *phigh
, unsig_lo
= *plow
;
165 neg_quotient
= !neg_quotient
;
166 neg_remainder
= !neg_remainder
;
169 unsig_hi
= -unsig_hi
;
171 unsig_hi
= ~unsig_hi
;
172 unsig_lo
= -unsig_lo
;
177 neg_quotient
= !neg_quotient
;
182 rem
= divu128(&unsig_lo
, &unsig_hi
, (uint64_t)divisor
);
206 * urshift - 128-bit Unsigned Right Shift.
207 * @plow: in/out - lower 64-bit integer.
208 * @phigh: in/out - higher 64-bit integer.
209 * @shift: in - bytes to shift, between 0 and 127.
211 * Result is zero-extended and stored in plow/phigh, which are
212 * input/output variables. Shift values outside the range will
213 * be mod to 128. In other words, the caller is responsible to
214 * verify/assert both the shift range and plow/phigh pointers.
216 void urshift(uint64_t *plow
, uint64_t *phigh
, int32_t shift
)
223 uint64_t h
= *phigh
>> (shift
& 63);
228 *plow
= (*plow
>> (shift
& 63)) | (*phigh
<< (64 - (shift
& 63)));
234 * ulshift - 128-bit Unsigned Left Shift.
235 * @plow: in/out - lower 64-bit integer.
236 * @phigh: in/out - higher 64-bit integer.
237 * @shift: in - bytes to shift, between 0 and 127.
238 * @overflow: out - true if any 1-bit is shifted out.
240 * Result is zero-extended and stored in plow/phigh, which are
241 * input/output variables. Shift values outside the range will
242 * be mod to 128. In other words, the caller is responsible to
243 * verify/assert both the shift range and plow/phigh pointers.
245 void ulshift(uint64_t *plow
, uint64_t *phigh
, int32_t shift
, bool *overflow
)
247 uint64_t low
= *plow
;
248 uint64_t high
= *phigh
;
255 /* check if any bit will be shifted out */
256 urshift(&low
, &high
, 128 - shift
);
262 *phigh
= *plow
<< (shift
& 63);
265 *phigh
= (*plow
>> (64 - (shift
& 63))) | (*phigh
<< (shift
& 63));
266 *plow
= *plow
<< shift
;