1 /* Simple data type for positive real numbers for the GNU compiler.
2 Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21 /* This library supports positive real numbers and 0;
22 inf and nan are NOT supported.
23 It is written to be simple and fast.
29 (for < 64-bit machines sig = sig_lo + sig_hi * 2 ^ SREAL_PART_BITS)
32 One HOST_WIDE_INT is used for the significant on 64-bit (and more than
34 otherwise two HOST_WIDE_INTs are used for the significant.
35 Only a half of significant bits is used (in normalized sreals) so that we do
36 not have problems with overflow, for example when c->sig = a->sig * b->sig.
37 So the precision for 64-bit and 32-bit machines is 32-bit.
39 Invariant: The numbers are normalized before and after each call of sreal_*.
42 All numbers (except zero) meet following conditions:
43 SREAL_MIN_SIG <= sig && sig <= SREAL_MAX_SIG
44 -SREAL_MAX_EXP <= exp && exp <= SREAL_MAX_EXP
46 If the number would be too large, it is set to upper bounds of these
49 If the number is zero or would be too small it meets following conditions:
50 sig == 0 && exp == -SREAL_MAX_EXP
55 #include "coretypes.h"
59 static inline void copy (sreal
*, sreal
*);
60 static inline void shift_right (sreal
*, int);
61 static void normalize (sreal
*);
63 /* Print the content of struct sreal. */
66 dump_sreal (FILE *file
, sreal
*x
)
68 #if SREAL_PART_BITS < 32
69 fprintf (file
, "((" HOST_WIDE_INT_PRINT_UNSIGNED
" * 2^16 + "
70 HOST_WIDE_INT_PRINT_UNSIGNED
") * 2^%d)",
71 x
->sig_hi
, x
->sig_lo
, x
->exp
);
73 fprintf (file
, "(" HOST_WIDE_INT_PRINT_UNSIGNED
" * 2^%d)", x
->sig
, x
->exp
);
77 /* Copy the sreal number. */
80 copy (sreal
*r
, sreal
*a
)
82 #if SREAL_PART_BITS < 32
83 r
->sig_lo
= a
->sig_lo
;
84 r
->sig_hi
= a
->sig_hi
;
91 /* Shift X right by S bits. Needed: 0 < S <= SREAL_BITS.
92 When the most significant bit shifted out is 1, add 1 to X (rounding). */
95 shift_right (sreal
*x
, int s
)
98 gcc_assert (s
<= SREAL_BITS
);
99 /* Exponent should never be so large because shift_right is used only by
100 sreal_add and sreal_sub ant thus the number cannot be shifted out from
102 gcc_assert (x
->exp
+ s
<= SREAL_MAX_EXP
);
106 #if SREAL_PART_BITS < 32
107 if (s
> SREAL_PART_BITS
)
109 s
-= SREAL_PART_BITS
;
110 x
->sig_hi
+= (uhwi
) 1 << (s
- 1);
111 x
->sig_lo
= x
->sig_hi
>> s
;
116 x
->sig_lo
+= (uhwi
) 1 << (s
- 1);
117 if (x
->sig_lo
& ((uhwi
) 1 << SREAL_PART_BITS
))
120 x
->sig_lo
-= (uhwi
) 1 << SREAL_PART_BITS
;
123 x
->sig_lo
|= (x
->sig_hi
& (((uhwi
) 1 << s
) - 1)) << (SREAL_PART_BITS
- s
);
127 x
->sig
+= (uhwi
) 1 << (s
- 1);
137 #if SREAL_PART_BITS < 32
141 if (x
->sig_lo
== 0 && x
->sig_hi
== 0)
143 x
->exp
= -SREAL_MAX_EXP
;
145 else if (x
->sig_hi
< SREAL_MIN_SIG
)
149 /* Move lower part of significant to higher part. */
150 x
->sig_hi
= x
->sig_lo
;
152 x
->exp
-= SREAL_PART_BITS
;
155 while (x
->sig_hi
< SREAL_MIN_SIG
)
161 /* Check underflow. */
162 if (x
->exp
< -SREAL_MAX_EXP
)
164 x
->exp
= -SREAL_MAX_EXP
;
170 mask
= (1 << SREAL_PART_BITS
) - (1 << (SREAL_PART_BITS
- shift
));
171 x
->sig_hi
|= (x
->sig_lo
& mask
) >> (SREAL_PART_BITS
- shift
);
172 x
->sig_lo
= (x
->sig_lo
<< shift
) & (((uhwi
) 1 << SREAL_PART_BITS
) - 1);
175 else if (x
->sig_hi
> SREAL_MAX_SIG
)
177 unsigned HOST_WIDE_INT tmp
= x
->sig_hi
;
179 /* Find out how many bits will be shifted. */
186 while (tmp
> SREAL_MAX_SIG
);
188 /* Round the number. */
189 x
->sig_lo
+= (uhwi
) 1 << (shift
- 1);
192 x
->sig_lo
+= ((x
->sig_hi
& (((uhwi
) 1 << shift
) - 1))
193 << (SREAL_PART_BITS
- shift
));
196 if (x
->sig_lo
& ((uhwi
) 1 << SREAL_PART_BITS
))
198 x
->sig_lo
-= (uhwi
) 1 << SREAL_PART_BITS
;
200 if (x
->sig_hi
> SREAL_MAX_SIG
)
202 /* x->sig_hi was SREAL_MAX_SIG before increment
203 so now last bit is zero. */
210 /* Check overflow. */
211 if (x
->exp
> SREAL_MAX_EXP
)
213 x
->exp
= SREAL_MAX_EXP
;
214 x
->sig_hi
= SREAL_MAX_SIG
;
215 x
->sig_lo
= SREAL_MAX_SIG
;
221 x
->exp
= -SREAL_MAX_EXP
;
223 else if (x
->sig
< SREAL_MIN_SIG
)
230 while (x
->sig
< SREAL_MIN_SIG
);
232 /* Check underflow. */
233 if (x
->exp
< -SREAL_MAX_EXP
)
235 x
->exp
= -SREAL_MAX_EXP
;
239 else if (x
->sig
> SREAL_MAX_SIG
)
244 last_bit
= x
->sig
& 1;
248 while (x
->sig
> SREAL_MAX_SIG
);
250 /* Round the number. */
252 if (x
->sig
> SREAL_MAX_SIG
)
258 /* Check overflow. */
259 if (x
->exp
> SREAL_MAX_EXP
)
261 x
->exp
= SREAL_MAX_EXP
;
262 x
->sig
= SREAL_MAX_SIG
;
268 /* Set *R to SIG * 2 ^ EXP. Return R. */
271 sreal_init (sreal
*r
, unsigned HOST_WIDE_INT sig
, signed int exp
)
273 #if SREAL_PART_BITS < 32
285 /* Return integer value of *R. */
288 sreal_to_int (sreal
*r
)
290 #if SREAL_PART_BITS < 32
291 if (r
->exp
<= -SREAL_BITS
)
294 return MAX_HOST_WIDE_INT
;
295 return ((r
->sig_hi
<< SREAL_PART_BITS
) + r
->sig_lo
) >> -r
->exp
;
297 if (r
->exp
<= -SREAL_BITS
)
299 if (r
->exp
>= SREAL_PART_BITS
)
300 return MAX_HOST_WIDE_INT
;
302 return r
->sig
<< r
->exp
;
304 return r
->sig
>> -r
->exp
;
309 /* Compare *A and *B. Return -1 if *A < *B, 1 if *A > *B and 0 if *A == *B. */
312 sreal_compare (sreal
*a
, sreal
*b
)
318 #if SREAL_PART_BITS < 32
319 if (a
->sig_hi
> b
->sig_hi
)
321 if (a
->sig_hi
< b
->sig_hi
)
323 if (a
->sig_lo
> b
->sig_lo
)
325 if (a
->sig_lo
< b
->sig_lo
)
336 /* *R = *A + *B. Return R. */
339 sreal_add (sreal
*r
, sreal
*a
, sreal
*b
)
345 if (sreal_compare (a
, b
) < 0)
353 dexp
= a
->exp
- b
->exp
;
355 if (dexp
> SREAL_BITS
)
357 #if SREAL_PART_BITS < 32
358 r
->sig_hi
= a
->sig_hi
;
359 r
->sig_lo
= a
->sig_lo
;
371 shift_right (&tmp
, dexp
);
375 #if SREAL_PART_BITS < 32
376 r
->sig_hi
= a
->sig_hi
+ bb
->sig_hi
;
377 r
->sig_lo
= a
->sig_lo
+ bb
->sig_lo
;
378 if (r
->sig_lo
& ((uhwi
) 1 << SREAL_PART_BITS
))
381 r
->sig_lo
-= (uhwi
) 1 << SREAL_PART_BITS
;
384 r
->sig
= a
->sig
+ bb
->sig
;
390 /* *R = *A - *B. Return R. */
393 sreal_sub (sreal
*r
, sreal
*a
, sreal
*b
)
399 gcc_assert (sreal_compare (a
, b
) >= 0);
401 dexp
= a
->exp
- b
->exp
;
403 if (dexp
> SREAL_BITS
)
405 #if SREAL_PART_BITS < 32
406 r
->sig_hi
= a
->sig_hi
;
407 r
->sig_lo
= a
->sig_lo
;
418 shift_right (&tmp
, dexp
);
422 #if SREAL_PART_BITS < 32
423 if (a
->sig_lo
< bb
->sig_lo
)
425 r
->sig_hi
= a
->sig_hi
- bb
->sig_hi
- 1;
426 r
->sig_lo
= a
->sig_lo
+ ((uhwi
) 1 << SREAL_PART_BITS
) - bb
->sig_lo
;
430 r
->sig_hi
= a
->sig_hi
- bb
->sig_hi
;
431 r
->sig_lo
= a
->sig_lo
- bb
->sig_lo
;
434 r
->sig
= a
->sig
- bb
->sig
;
440 /* *R = *A * *B. Return R. */
443 sreal_mul (sreal
*r
, sreal
*a
, sreal
*b
)
445 #if SREAL_PART_BITS < 32
446 if (a
->sig_hi
< SREAL_MIN_SIG
|| b
->sig_hi
< SREAL_MIN_SIG
)
450 r
->exp
= -SREAL_MAX_EXP
;
454 unsigned HOST_WIDE_INT tmp1
, tmp2
, tmp3
;
455 if (sreal_compare (a
, b
) < 0)
463 r
->exp
= a
->exp
+ b
->exp
+ SREAL_PART_BITS
;
465 tmp1
= a
->sig_lo
* b
->sig_lo
;
466 tmp2
= a
->sig_lo
* b
->sig_hi
;
467 tmp3
= a
->sig_hi
* b
->sig_lo
+ (tmp1
>> SREAL_PART_BITS
);
469 r
->sig_hi
= a
->sig_hi
* b
->sig_hi
;
470 r
->sig_hi
+= (tmp2
>> SREAL_PART_BITS
) + (tmp3
>> SREAL_PART_BITS
);
471 tmp2
&= ((uhwi
) 1 << SREAL_PART_BITS
) - 1;
472 tmp3
&= ((uhwi
) 1 << SREAL_PART_BITS
) - 1;
475 r
->sig_lo
= tmp1
& (((uhwi
) 1 << SREAL_PART_BITS
) - 1);
476 r
->sig_hi
+= tmp1
>> SREAL_PART_BITS
;
481 if (a
->sig
< SREAL_MIN_SIG
|| b
->sig
< SREAL_MIN_SIG
)
484 r
->exp
= -SREAL_MAX_EXP
;
488 r
->sig
= a
->sig
* b
->sig
;
489 r
->exp
= a
->exp
+ b
->exp
;
496 /* *R = *A / *B. Return R. */
499 sreal_div (sreal
*r
, sreal
*a
, sreal
*b
)
501 #if SREAL_PART_BITS < 32
502 unsigned HOST_WIDE_INT tmp
, tmp1
, tmp2
;
504 gcc_assert (b
->sig_hi
>= SREAL_MIN_SIG
);
505 if (a
->sig_hi
< SREAL_MIN_SIG
)
509 r
->exp
= -SREAL_MAX_EXP
;
513 /* Since division by the whole number is pretty ugly to write
514 we are dividing by first 3/4 of bits of number. */
516 tmp1
= (a
->sig_hi
<< SREAL_PART_BITS
) + a
->sig_lo
;
517 tmp2
= ((b
->sig_hi
<< (SREAL_PART_BITS
/ 2))
518 + (b
->sig_lo
>> (SREAL_PART_BITS
/ 2)));
519 if (b
->sig_lo
& ((uhwi
) 1 << ((SREAL_PART_BITS
/ 2) - 1)))
524 tmp1
= (tmp1
% tmp2
) << (SREAL_PART_BITS
/ 2);
525 r
->sig_hi
= tmp
<< SREAL_PART_BITS
;
528 tmp1
= (tmp1
% tmp2
) << (SREAL_PART_BITS
/ 2);
529 r
->sig_hi
+= tmp
<< (SREAL_PART_BITS
/ 2);
534 r
->exp
= a
->exp
- b
->exp
- SREAL_BITS
- SREAL_PART_BITS
/ 2;
538 gcc_assert (b
->sig
!= 0);
539 r
->sig
= (a
->sig
<< SREAL_PART_BITS
) / b
->sig
;
540 r
->exp
= a
->exp
- b
->exp
- SREAL_PART_BITS
;