Fix changelog typo.
[official-gcc.git] / gcc / sreal.c
blob8980659c99bb9e7df0b5e4c6fcfb27313751c9ea
1 /* Simple data type for positive real numbers for the GNU compiler.
2 Copyright (C) 2002, 2003 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to the Free
18 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
19 02111-1307, USA. */
21 /* This library supports positive real numbers and 0;
22 inf and nan are NOT supported.
23 It is written to be simple and fast.
25 Value of sreal is
26 x = sig * 2 ^ exp
27 where
28 sig = significant
29 (for < 64-bit machines sig = sig_lo + sig_hi * 2 ^ SREAL_PART_BITS)
30 exp = exponent
32 One HOST_WIDE_INT is used for the significant on 64-bit (and more than
33 64-bit) machines,
34 otherwise two HOST_WIDE_INTs are used for the significant.
35 Only a half of significant bits is used (in normalized sreals) so that we do
36 not have problems with overflow, for example when c->sig = a->sig * b->sig.
37 So the precision for 64-bit and 32-bit machines is 32-bit.
39 Invariant: The numbers are normalized before and after each call of sreal_*.
41 Normalized sreals:
42 All numbers (except zero) meet following conditions:
43 SREAL_MIN_SIG <= sig && sig <= SREAL_MAX_SIG
44 -SREAL_MAX_EXP <= exp && exp <= SREAL_MAX_EXP
46 If the number would be too large, it is set to upper bounds of these
47 conditions.
49 If the number is zero or would be too small it meets following conditions:
50 sig == 0 && exp == -SREAL_MAX_EXP
53 #include "config.h"
54 #include "system.h"
55 #include "coretypes.h"
56 #include "tm.h"
57 #include "sreal.h"
59 static inline void copy (sreal *, sreal *);
60 static inline void shift_right (sreal *, int);
61 static void normalize (sreal *);
63 /* Print the content of struct sreal. */
65 void
66 dump_sreal (FILE *file, sreal *x)
68 #if SREAL_PART_BITS < 32
69 fprintf (file, "((" HOST_WIDE_INT_PRINT_UNSIGNED " * 2^16 + "
70 HOST_WIDE_INT_PRINT_UNSIGNED ") * 2^%d)",
71 x->sig_hi, x->sig_lo, x->exp);
72 #else
73 fprintf (file, "(" HOST_WIDE_INT_PRINT_UNSIGNED " * 2^%d)", x->sig, x->exp);
74 #endif
77 /* Copy the sreal number. */
79 static inline void
80 copy (sreal *r, sreal *a)
82 #if SREAL_PART_BITS < 32
83 r->sig_lo = a->sig_lo;
84 r->sig_hi = a->sig_hi;
85 #else
86 r->sig = a->sig;
87 #endif
88 r->exp = a->exp;
91 /* Shift X right by S bits. Needed: 0 < S <= SREAL_BITS.
92 When the most significant bit shifted out is 1, add 1 to X (rounding). */
94 static inline void
95 shift_right (sreal *x, int s)
97 #ifdef ENABLE_CHECKING
98 if (s <= 0 || s > SREAL_BITS)
99 abort ();
100 if (x->exp + s > SREAL_MAX_EXP)
102 /* Exponent should never be so large because shift_right is used only by
103 sreal_add and sreal_sub ant thus the number cannot be shifted out from
104 exponent range. */
105 abort ();
107 #endif
109 x->exp += s;
111 #if SREAL_PART_BITS < 32
112 if (s > SREAL_PART_BITS)
114 s -= SREAL_PART_BITS;
115 x->sig_hi += (uhwi) 1 << (s - 1);
116 x->sig_lo = x->sig_hi >> s;
117 x->sig_hi = 0;
119 else
121 x->sig_lo += (uhwi) 1 << (s - 1);
122 if (x->sig_lo & ((uhwi) 1 << SREAL_PART_BITS))
124 x->sig_hi++;
125 x->sig_lo -= (uhwi) 1 << SREAL_PART_BITS;
127 x->sig_lo >>= s;
128 x->sig_lo |= (x->sig_hi & (((uhwi) 1 << s) - 1)) << (SREAL_PART_BITS - s);
129 x->sig_hi >>= s;
131 #else
132 x->sig += (uhwi) 1 << (s - 1);
133 x->sig >>= s;
134 #endif
137 /* Normalize *X. */
139 static void
140 normalize (sreal *x)
142 #if SREAL_PART_BITS < 32
143 int shift;
144 HOST_WIDE_INT mask;
146 if (x->sig_lo == 0 && x->sig_hi == 0)
148 x->exp = -SREAL_MAX_EXP;
150 else if (x->sig_hi < SREAL_MIN_SIG)
152 if (x->sig_hi == 0)
154 /* Move lower part of significant to higher part. */
155 x->sig_hi = x->sig_lo;
156 x->sig_lo = 0;
157 x->exp -= SREAL_PART_BITS;
159 shift = 0;
160 while (x->sig_hi < SREAL_MIN_SIG)
162 x->sig_hi <<= 1;
163 x->exp--;
164 shift++;
166 /* Check underflow. */
167 if (x->exp < -SREAL_MAX_EXP)
169 x->exp = -SREAL_MAX_EXP;
170 x->sig_hi = 0;
171 x->sig_lo = 0;
173 else if (shift)
175 mask = (1 << SREAL_PART_BITS) - (1 << (SREAL_PART_BITS - shift));
176 x->sig_hi |= (x->sig_lo & mask) >> (SREAL_PART_BITS - shift);
177 x->sig_lo = (x->sig_lo << shift) & (((uhwi) 1 << SREAL_PART_BITS) - 1);
180 else if (x->sig_hi > SREAL_MAX_SIG)
182 unsigned HOST_WIDE_INT tmp = x->sig_hi;
184 /* Find out how many bits will be shifted. */
185 shift = 0;
188 tmp >>= 1;
189 shift++;
191 while (tmp > SREAL_MAX_SIG);
193 /* Round the number. */
194 x->sig_lo += (uhwi) 1 << (shift - 1);
196 x->sig_lo >>= shift;
197 x->sig_lo += ((x->sig_hi & (((uhwi) 1 << shift) - 1))
198 << (SREAL_PART_BITS - shift));
199 x->sig_hi >>= shift;
200 x->exp += shift;
201 if (x->sig_lo & ((uhwi) 1 << SREAL_PART_BITS))
203 x->sig_lo -= (uhwi) 1 << SREAL_PART_BITS;
204 x->sig_hi++;
205 if (x->sig_hi > SREAL_MAX_SIG)
207 /* x->sig_hi was SREAL_MAX_SIG before increment
208 so now last bit is zero. */
209 x->sig_hi >>= 1;
210 x->sig_lo >>= 1;
211 x->exp++;
215 /* Check overflow. */
216 if (x->exp > SREAL_MAX_EXP)
218 x->exp = SREAL_MAX_EXP;
219 x->sig_hi = SREAL_MAX_SIG;
220 x->sig_lo = SREAL_MAX_SIG;
223 #else
224 if (x->sig == 0)
226 x->exp = -SREAL_MAX_EXP;
228 else if (x->sig < SREAL_MIN_SIG)
232 x->sig <<= 1;
233 x->exp--;
235 while (x->sig < SREAL_MIN_SIG);
237 /* Check underflow. */
238 if (x->exp < -SREAL_MAX_EXP)
240 x->exp = -SREAL_MAX_EXP;
241 x->sig = 0;
244 else if (x->sig > SREAL_MAX_SIG)
246 int last_bit;
249 last_bit = x->sig & 1;
250 x->sig >>= 1;
251 x->exp++;
253 while (x->sig > SREAL_MAX_SIG);
255 /* Round the number. */
256 x->sig += last_bit;
257 if (x->sig > SREAL_MAX_SIG)
259 x->sig >>= 1;
260 x->exp++;
263 /* Check overflow. */
264 if (x->exp > SREAL_MAX_EXP)
266 x->exp = SREAL_MAX_EXP;
267 x->sig = SREAL_MAX_SIG;
270 #endif
273 /* Set *R to SIG * 2 ^ EXP. Return R. */
275 sreal *
276 sreal_init (sreal *r, unsigned HOST_WIDE_INT sig, signed int exp)
278 #if SREAL_PART_BITS < 32
279 r->sig_lo = 0;
280 r->sig_hi = sig;
281 r->exp = exp - 16;
282 #else
283 r->sig = sig;
284 r->exp = exp;
285 #endif
286 normalize (r);
287 return r;
290 /* Return integer value of *R. */
292 HOST_WIDE_INT
293 sreal_to_int (sreal *r)
295 #if SREAL_PART_BITS < 32
296 if (r->exp <= -SREAL_BITS)
297 return 0;
298 if (r->exp >= 0)
299 return MAX_HOST_WIDE_INT;
300 return ((r->sig_hi << SREAL_PART_BITS) + r->sig_lo) >> -r->exp;
301 #else
302 if (r->exp <= -SREAL_BITS)
303 return 0;
304 if (r->exp >= SREAL_PART_BITS)
305 return MAX_HOST_WIDE_INT;
306 if (r->exp > 0)
307 return r->sig << r->exp;
308 if (r->exp < 0)
309 return r->sig >> -r->exp;
310 return r->sig;
311 #endif
314 /* Compare *A and *B. Return -1 if *A < *B, 1 if *A > *B and 0 if *A == *B. */
317 sreal_compare (sreal *a, sreal *b)
319 if (a->exp > b->exp)
320 return 1;
321 if (a->exp < b->exp)
322 return -1;
323 #if SREAL_PART_BITS < 32
324 if (a->sig_hi > b->sig_hi)
325 return 1;
326 if (a->sig_hi < b->sig_hi)
327 return -1;
328 if (a->sig_lo > b->sig_lo)
329 return 1;
330 if (a->sig_lo < b->sig_lo)
331 return -1;
332 #else
333 if (a->sig > b->sig)
334 return 1;
335 if (a->sig < b->sig)
336 return -1;
337 #endif
338 return 0;
341 /* *R = *A + *B. Return R. */
343 sreal *
344 sreal_add (sreal *r, sreal *a, sreal *b)
346 int dexp;
347 sreal tmp;
348 sreal *bb;
350 if (sreal_compare (a, b) < 0)
352 sreal *swap;
353 swap = a;
354 a = b;
355 b = swap;
358 dexp = a->exp - b->exp;
359 r->exp = a->exp;
360 if (dexp > SREAL_BITS)
362 #if SREAL_PART_BITS < 32
363 r->sig_hi = a->sig_hi;
364 r->sig_lo = a->sig_lo;
365 #else
366 r->sig = a->sig;
367 #endif
368 return r;
371 if (dexp == 0)
372 bb = b;
373 else
375 copy (&tmp, b);
376 shift_right (&tmp, dexp);
377 bb = &tmp;
380 #if SREAL_PART_BITS < 32
381 r->sig_hi = a->sig_hi + bb->sig_hi;
382 r->sig_lo = a->sig_lo + bb->sig_lo;
383 if (r->sig_lo & ((uhwi) 1 << SREAL_PART_BITS))
385 r->sig_hi++;
386 r->sig_lo -= (uhwi) 1 << SREAL_PART_BITS;
388 #else
389 r->sig = a->sig + bb->sig;
390 #endif
391 normalize (r);
392 return r;
395 /* *R = *A - *B. Return R. */
397 sreal *
398 sreal_sub (sreal *r, sreal *a, sreal *b)
400 int dexp;
401 sreal tmp;
402 sreal *bb;
404 if (sreal_compare (a, b) < 0)
406 abort ();
409 dexp = a->exp - b->exp;
410 r->exp = a->exp;
411 if (dexp > SREAL_BITS)
413 #if SREAL_PART_BITS < 32
414 r->sig_hi = a->sig_hi;
415 r->sig_lo = a->sig_lo;
416 #else
417 r->sig = a->sig;
418 #endif
419 return r;
421 if (dexp == 0)
422 bb = b;
423 else
425 copy (&tmp, b);
426 shift_right (&tmp, dexp);
427 bb = &tmp;
430 #if SREAL_PART_BITS < 32
431 if (a->sig_lo < bb->sig_lo)
433 r->sig_hi = a->sig_hi - bb->sig_hi - 1;
434 r->sig_lo = a->sig_lo + ((uhwi) 1 << SREAL_PART_BITS) - bb->sig_lo;
436 else
438 r->sig_hi = a->sig_hi - bb->sig_hi;
439 r->sig_lo = a->sig_lo - bb->sig_lo;
441 #else
442 r->sig = a->sig - bb->sig;
443 #endif
444 normalize (r);
445 return r;
448 /* *R = *A * *B. Return R. */
450 sreal *
451 sreal_mul (sreal *r, sreal *a, sreal *b)
453 #if SREAL_PART_BITS < 32
454 if (a->sig_hi < SREAL_MIN_SIG || b->sig_hi < SREAL_MIN_SIG)
456 r->sig_lo = 0;
457 r->sig_hi = 0;
458 r->exp = -SREAL_MAX_EXP;
460 else
462 unsigned HOST_WIDE_INT tmp1, tmp2, tmp3;
463 if (sreal_compare (a, b) < 0)
465 sreal *swap;
466 swap = a;
467 a = b;
468 b = swap;
471 r->exp = a->exp + b->exp + SREAL_PART_BITS;
473 tmp1 = a->sig_lo * b->sig_lo;
474 tmp2 = a->sig_lo * b->sig_hi;
475 tmp3 = a->sig_hi * b->sig_lo + (tmp1 >> SREAL_PART_BITS);
477 r->sig_hi = a->sig_hi * b->sig_hi;
478 r->sig_hi += (tmp2 >> SREAL_PART_BITS) + (tmp3 >> SREAL_PART_BITS);
479 tmp2 &= ((uhwi) 1 << SREAL_PART_BITS) - 1;
480 tmp3 &= ((uhwi) 1 << SREAL_PART_BITS) - 1;
481 tmp1 = tmp2 + tmp3;
483 r->sig_lo = tmp1 & (((uhwi) 1 << SREAL_PART_BITS) - 1);
484 r->sig_hi += tmp1 >> SREAL_PART_BITS;
486 normalize (r);
488 #else
489 if (a->sig < SREAL_MIN_SIG || b->sig < SREAL_MIN_SIG)
491 r->sig = 0;
492 r->exp = -SREAL_MAX_EXP;
494 else
496 r->sig = a->sig * b->sig;
497 r->exp = a->exp + b->exp;
498 normalize (r);
500 #endif
501 return r;
504 /* *R = *A / *B. Return R. */
506 sreal *
507 sreal_div (sreal *r, sreal *a, sreal *b)
509 #if SREAL_PART_BITS < 32
510 unsigned HOST_WIDE_INT tmp, tmp1, tmp2;
512 if (b->sig_hi < SREAL_MIN_SIG)
514 abort ();
516 else if (a->sig_hi < SREAL_MIN_SIG)
518 r->sig_hi = 0;
519 r->sig_lo = 0;
520 r->exp = -SREAL_MAX_EXP;
522 else
524 /* Since division by the whole number is pretty ugly to write
525 we are dividing by first 3/4 of bits of number. */
527 tmp1 = (a->sig_hi << SREAL_PART_BITS) + a->sig_lo;
528 tmp2 = ((b->sig_hi << (SREAL_PART_BITS / 2))
529 + (b->sig_lo >> (SREAL_PART_BITS / 2)));
530 if (b->sig_lo & ((uhwi) 1 << ((SREAL_PART_BITS / 2) - 1)))
531 tmp2++;
533 r->sig_lo = 0;
534 tmp = tmp1 / tmp2;
535 tmp1 = (tmp1 % tmp2) << (SREAL_PART_BITS / 2);
536 r->sig_hi = tmp << SREAL_PART_BITS;
538 tmp = tmp1 / tmp2;
539 tmp1 = (tmp1 % tmp2) << (SREAL_PART_BITS / 2);
540 r->sig_hi += tmp << (SREAL_PART_BITS / 2);
542 tmp = tmp1 / tmp2;
543 r->sig_hi += tmp;
545 r->exp = a->exp - b->exp - SREAL_BITS - SREAL_PART_BITS / 2;
546 normalize (r);
548 #else
549 if (b->sig == 0)
551 abort ();
553 else
555 r->sig = (a->sig << SREAL_PART_BITS) / b->sig;
556 r->exp = a->exp - b->exp - SREAL_PART_BITS;
557 normalize (r);
559 #endif
560 return r;