1 /* Simple data type for positive real numbers for the GNU compiler.
2 Copyright (C) 2002, 2003, 2004, 2007, 2010 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This library supports positive real numbers and 0;
21 inf and nan are NOT supported.
22 It is written to be simple and fast.
28 (for < 64-bit machines sig = sig_lo + sig_hi * 2 ^ SREAL_PART_BITS)
31 One HOST_WIDE_INT is used for the significant on 64-bit (and more than
33 otherwise two HOST_WIDE_INTs are used for the significant.
34 Only a half of significant bits is used (in normalized sreals) so that we do
35 not have problems with overflow, for example when c->sig = a->sig * b->sig.
36 So the precision for 64-bit and 32-bit machines is 32-bit.
38 Invariant: The numbers are normalized before and after each call of sreal_*.
41 All numbers (except zero) meet following conditions:
42 SREAL_MIN_SIG <= sig && sig <= SREAL_MAX_SIG
43 -SREAL_MAX_EXP <= exp && exp <= SREAL_MAX_EXP
45 If the number would be too large, it is set to upper bounds of these
48 If the number is zero or would be too small it meets following conditions:
49 sig == 0 && exp == -SREAL_MAX_EXP
54 #include "coretypes.h"
57 static inline void copy (sreal
*, sreal
*);
58 static inline void shift_right (sreal
*, int);
59 static void normalize (sreal
*);
61 /* Print the content of struct sreal. */
64 dump_sreal (FILE *file
, sreal
*x
)
66 #if SREAL_PART_BITS < 32
67 fprintf (file
, "((" HOST_WIDE_INT_PRINT_UNSIGNED
" * 2^16 + "
68 HOST_WIDE_INT_PRINT_UNSIGNED
") * 2^%d)",
69 x
->sig_hi
, x
->sig_lo
, x
->exp
);
71 fprintf (file
, "(" HOST_WIDE_INT_PRINT_UNSIGNED
" * 2^%d)", x
->sig
, x
->exp
);
75 /* Copy the sreal number. */
78 copy (sreal
*r
, sreal
*a
)
80 #if SREAL_PART_BITS < 32
81 r
->sig_lo
= a
->sig_lo
;
82 r
->sig_hi
= a
->sig_hi
;
89 /* Shift X right by S bits. Needed: 0 < S <= SREAL_BITS.
90 When the most significant bit shifted out is 1, add 1 to X (rounding). */
93 shift_right (sreal
*x
, int s
)
96 gcc_assert (s
<= SREAL_BITS
);
97 /* Exponent should never be so large because shift_right is used only by
98 sreal_add and sreal_sub ant thus the number cannot be shifted out from
100 gcc_assert (x
->exp
+ s
<= SREAL_MAX_EXP
);
104 #if SREAL_PART_BITS < 32
105 if (s
> SREAL_PART_BITS
)
107 s
-= SREAL_PART_BITS
;
108 x
->sig_hi
+= (uhwi
) 1 << (s
- 1);
109 x
->sig_lo
= x
->sig_hi
>> s
;
114 x
->sig_lo
+= (uhwi
) 1 << (s
- 1);
115 if (x
->sig_lo
& ((uhwi
) 1 << SREAL_PART_BITS
))
118 x
->sig_lo
-= (uhwi
) 1 << SREAL_PART_BITS
;
121 x
->sig_lo
|= (x
->sig_hi
& (((uhwi
) 1 << s
) - 1)) << (SREAL_PART_BITS
- s
);
125 x
->sig
+= (uhwi
) 1 << (s
- 1);
135 #if SREAL_PART_BITS < 32
139 if (x
->sig_lo
== 0 && x
->sig_hi
== 0)
141 x
->exp
= -SREAL_MAX_EXP
;
143 else if (x
->sig_hi
< SREAL_MIN_SIG
)
147 /* Move lower part of significant to higher part. */
148 x
->sig_hi
= x
->sig_lo
;
150 x
->exp
-= SREAL_PART_BITS
;
153 while (x
->sig_hi
< SREAL_MIN_SIG
)
159 /* Check underflow. */
160 if (x
->exp
< -SREAL_MAX_EXP
)
162 x
->exp
= -SREAL_MAX_EXP
;
168 mask
= (1 << SREAL_PART_BITS
) - (1 << (SREAL_PART_BITS
- shift
));
169 x
->sig_hi
|= (x
->sig_lo
& mask
) >> (SREAL_PART_BITS
- shift
);
170 x
->sig_lo
= (x
->sig_lo
<< shift
) & (((uhwi
) 1 << SREAL_PART_BITS
) - 1);
173 else if (x
->sig_hi
> SREAL_MAX_SIG
)
175 unsigned HOST_WIDE_INT tmp
= x
->sig_hi
;
177 /* Find out how many bits will be shifted. */
184 while (tmp
> SREAL_MAX_SIG
);
186 /* Round the number. */
187 x
->sig_lo
+= (uhwi
) 1 << (shift
- 1);
190 x
->sig_lo
+= ((x
->sig_hi
& (((uhwi
) 1 << shift
) - 1))
191 << (SREAL_PART_BITS
- shift
));
194 if (x
->sig_lo
& ((uhwi
) 1 << SREAL_PART_BITS
))
196 x
->sig_lo
-= (uhwi
) 1 << SREAL_PART_BITS
;
198 if (x
->sig_hi
> SREAL_MAX_SIG
)
200 /* x->sig_hi was SREAL_MAX_SIG before increment
201 so now last bit is zero. */
208 /* Check overflow. */
209 if (x
->exp
> SREAL_MAX_EXP
)
211 x
->exp
= SREAL_MAX_EXP
;
212 x
->sig_hi
= SREAL_MAX_SIG
;
213 x
->sig_lo
= SREAL_MAX_SIG
;
219 x
->exp
= -SREAL_MAX_EXP
;
221 else if (x
->sig
< SREAL_MIN_SIG
)
228 while (x
->sig
< SREAL_MIN_SIG
);
230 /* Check underflow. */
231 if (x
->exp
< -SREAL_MAX_EXP
)
233 x
->exp
= -SREAL_MAX_EXP
;
237 else if (x
->sig
> SREAL_MAX_SIG
)
242 last_bit
= x
->sig
& 1;
246 while (x
->sig
> SREAL_MAX_SIG
);
248 /* Round the number. */
250 if (x
->sig
> SREAL_MAX_SIG
)
256 /* Check overflow. */
257 if (x
->exp
> SREAL_MAX_EXP
)
259 x
->exp
= SREAL_MAX_EXP
;
260 x
->sig
= SREAL_MAX_SIG
;
266 /* Set *R to SIG * 2 ^ EXP. Return R. */
269 sreal_init (sreal
*r
, unsigned HOST_WIDE_INT sig
, signed int exp
)
271 #if SREAL_PART_BITS < 32
283 /* Return integer value of *R. */
286 sreal_to_int (sreal
*r
)
288 #if SREAL_PART_BITS < 32
289 if (r
->exp
<= -SREAL_BITS
)
292 return MAX_HOST_WIDE_INT
;
293 return ((r
->sig_hi
<< SREAL_PART_BITS
) + r
->sig_lo
) >> -r
->exp
;
295 if (r
->exp
<= -SREAL_BITS
)
297 if (r
->exp
>= SREAL_PART_BITS
)
298 return MAX_HOST_WIDE_INT
;
300 return r
->sig
<< r
->exp
;
302 return r
->sig
>> -r
->exp
;
307 /* Compare *A and *B. Return -1 if *A < *B, 1 if *A > *B and 0 if *A == *B. */
310 sreal_compare (sreal
*a
, sreal
*b
)
316 #if SREAL_PART_BITS < 32
317 if (a
->sig_hi
> b
->sig_hi
)
319 if (a
->sig_hi
< b
->sig_hi
)
321 if (a
->sig_lo
> b
->sig_lo
)
323 if (a
->sig_lo
< b
->sig_lo
)
334 /* *R = *A + *B. Return R. */
337 sreal_add (sreal
*r
, sreal
*a
, sreal
*b
)
343 if (sreal_compare (a
, b
) < 0)
351 dexp
= a
->exp
- b
->exp
;
353 if (dexp
> SREAL_BITS
)
355 #if SREAL_PART_BITS < 32
356 r
->sig_hi
= a
->sig_hi
;
357 r
->sig_lo
= a
->sig_lo
;
369 shift_right (&tmp
, dexp
);
373 #if SREAL_PART_BITS < 32
374 r
->sig_hi
= a
->sig_hi
+ bb
->sig_hi
;
375 r
->sig_lo
= a
->sig_lo
+ bb
->sig_lo
;
376 if (r
->sig_lo
& ((uhwi
) 1 << SREAL_PART_BITS
))
379 r
->sig_lo
-= (uhwi
) 1 << SREAL_PART_BITS
;
382 r
->sig
= a
->sig
+ bb
->sig
;
388 /* *R = *A - *B. Return R. */
391 sreal_sub (sreal
*r
, sreal
*a
, sreal
*b
)
397 gcc_assert (sreal_compare (a
, b
) >= 0);
399 dexp
= a
->exp
- b
->exp
;
401 if (dexp
> SREAL_BITS
)
403 #if SREAL_PART_BITS < 32
404 r
->sig_hi
= a
->sig_hi
;
405 r
->sig_lo
= a
->sig_lo
;
416 shift_right (&tmp
, dexp
);
420 #if SREAL_PART_BITS < 32
421 if (a
->sig_lo
< bb
->sig_lo
)
423 r
->sig_hi
= a
->sig_hi
- bb
->sig_hi
- 1;
424 r
->sig_lo
= a
->sig_lo
+ ((uhwi
) 1 << SREAL_PART_BITS
) - bb
->sig_lo
;
428 r
->sig_hi
= a
->sig_hi
- bb
->sig_hi
;
429 r
->sig_lo
= a
->sig_lo
- bb
->sig_lo
;
432 r
->sig
= a
->sig
- bb
->sig
;
438 /* *R = *A * *B. Return R. */
441 sreal_mul (sreal
*r
, sreal
*a
, sreal
*b
)
443 #if SREAL_PART_BITS < 32
444 if (a
->sig_hi
< SREAL_MIN_SIG
|| b
->sig_hi
< SREAL_MIN_SIG
)
448 r
->exp
= -SREAL_MAX_EXP
;
452 unsigned HOST_WIDE_INT tmp1
, tmp2
, tmp3
;
453 if (sreal_compare (a
, b
) < 0)
461 r
->exp
= a
->exp
+ b
->exp
+ SREAL_PART_BITS
;
463 tmp1
= a
->sig_lo
* b
->sig_lo
;
464 tmp2
= a
->sig_lo
* b
->sig_hi
;
465 tmp3
= a
->sig_hi
* b
->sig_lo
+ (tmp1
>> SREAL_PART_BITS
);
467 r
->sig_hi
= a
->sig_hi
* b
->sig_hi
;
468 r
->sig_hi
+= (tmp2
>> SREAL_PART_BITS
) + (tmp3
>> SREAL_PART_BITS
);
469 tmp2
&= ((uhwi
) 1 << SREAL_PART_BITS
) - 1;
470 tmp3
&= ((uhwi
) 1 << SREAL_PART_BITS
) - 1;
473 r
->sig_lo
= tmp1
& (((uhwi
) 1 << SREAL_PART_BITS
) - 1);
474 r
->sig_hi
+= tmp1
>> SREAL_PART_BITS
;
479 if (a
->sig
< SREAL_MIN_SIG
|| b
->sig
< SREAL_MIN_SIG
)
482 r
->exp
= -SREAL_MAX_EXP
;
486 r
->sig
= a
->sig
* b
->sig
;
487 r
->exp
= a
->exp
+ b
->exp
;
494 /* *R = *A / *B. Return R. */
497 sreal_div (sreal
*r
, sreal
*a
, sreal
*b
)
499 #if SREAL_PART_BITS < 32
500 unsigned HOST_WIDE_INT tmp
, tmp1
, tmp2
;
502 gcc_assert (b
->sig_hi
>= SREAL_MIN_SIG
);
503 if (a
->sig_hi
< SREAL_MIN_SIG
)
507 r
->exp
= -SREAL_MAX_EXP
;
511 /* Since division by the whole number is pretty ugly to write
512 we are dividing by first 3/4 of bits of number. */
514 tmp1
= (a
->sig_hi
<< SREAL_PART_BITS
) + a
->sig_lo
;
515 tmp2
= ((b
->sig_hi
<< (SREAL_PART_BITS
/ 2))
516 + (b
->sig_lo
>> (SREAL_PART_BITS
/ 2)));
517 if (b
->sig_lo
& ((uhwi
) 1 << ((SREAL_PART_BITS
/ 2) - 1)))
522 tmp1
= (tmp1
% tmp2
) << (SREAL_PART_BITS
/ 2);
523 r
->sig_hi
= tmp
<< SREAL_PART_BITS
;
526 tmp1
= (tmp1
% tmp2
) << (SREAL_PART_BITS
/ 2);
527 r
->sig_hi
+= tmp
<< (SREAL_PART_BITS
/ 2);
532 r
->exp
= a
->exp
- b
->exp
- SREAL_BITS
- SREAL_PART_BITS
/ 2;
536 gcc_assert (b
->sig
!= 0);
537 r
->sig
= (a
->sig
<< SREAL_PART_BITS
) / b
->sig
;
538 r
->exp
= a
->exp
- b
->exp
- SREAL_PART_BITS
;