In gcc/objc/: 2011-06-01 Nicola Pero <nicola.pero@meta-innovation.com>
[official-gcc.git] / gcc / sreal.c
blobe1ff2ced49a37c77c1c8c4f62a7827010cac0850
1 /* Simple data type for positive real numbers for the GNU compiler.
2 Copyright (C) 2002, 2003, 2004, 2007, 2010 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This library supports positive real numbers and 0;
21 inf and nan are NOT supported.
22 It is written to be simple and fast.
24 Value of sreal is
25 x = sig * 2 ^ exp
26 where
27 sig = significant
28 (for < 64-bit machines sig = sig_lo + sig_hi * 2 ^ SREAL_PART_BITS)
29 exp = exponent
31 One HOST_WIDE_INT is used for the significant on 64-bit (and more than
32 64-bit) machines,
33 otherwise two HOST_WIDE_INTs are used for the significant.
34 Only a half of significant bits is used (in normalized sreals) so that we do
35 not have problems with overflow, for example when c->sig = a->sig * b->sig.
36 So the precision for 64-bit and 32-bit machines is 32-bit.
38 Invariant: The numbers are normalized before and after each call of sreal_*.
40 Normalized sreals:
41 All numbers (except zero) meet following conditions:
42 SREAL_MIN_SIG <= sig && sig <= SREAL_MAX_SIG
43 -SREAL_MAX_EXP <= exp && exp <= SREAL_MAX_EXP
45 If the number would be too large, it is set to upper bounds of these
46 conditions.
48 If the number is zero or would be too small it meets following conditions:
49 sig == 0 && exp == -SREAL_MAX_EXP
52 #include "config.h"
53 #include "system.h"
54 #include "coretypes.h"
55 #include "sreal.h"
57 static inline void copy (sreal *, sreal *);
58 static inline void shift_right (sreal *, int);
59 static void normalize (sreal *);
61 /* Print the content of struct sreal. */
63 void
64 dump_sreal (FILE *file, sreal *x)
66 #if SREAL_PART_BITS < 32
67 fprintf (file, "((" HOST_WIDE_INT_PRINT_UNSIGNED " * 2^16 + "
68 HOST_WIDE_INT_PRINT_UNSIGNED ") * 2^%d)",
69 x->sig_hi, x->sig_lo, x->exp);
70 #else
71 fprintf (file, "(" HOST_WIDE_INT_PRINT_UNSIGNED " * 2^%d)", x->sig, x->exp);
72 #endif
75 /* Copy the sreal number. */
77 static inline void
78 copy (sreal *r, sreal *a)
80 #if SREAL_PART_BITS < 32
81 r->sig_lo = a->sig_lo;
82 r->sig_hi = a->sig_hi;
83 #else
84 r->sig = a->sig;
85 #endif
86 r->exp = a->exp;
89 /* Shift X right by S bits. Needed: 0 < S <= SREAL_BITS.
90 When the most significant bit shifted out is 1, add 1 to X (rounding). */
92 static inline void
93 shift_right (sreal *x, int s)
95 gcc_assert (s > 0);
96 gcc_assert (s <= SREAL_BITS);
97 /* Exponent should never be so large because shift_right is used only by
98 sreal_add and sreal_sub ant thus the number cannot be shifted out from
99 exponent range. */
100 gcc_assert (x->exp + s <= SREAL_MAX_EXP);
102 x->exp += s;
104 #if SREAL_PART_BITS < 32
105 if (s > SREAL_PART_BITS)
107 s -= SREAL_PART_BITS;
108 x->sig_hi += (uhwi) 1 << (s - 1);
109 x->sig_lo = x->sig_hi >> s;
110 x->sig_hi = 0;
112 else
114 x->sig_lo += (uhwi) 1 << (s - 1);
115 if (x->sig_lo & ((uhwi) 1 << SREAL_PART_BITS))
117 x->sig_hi++;
118 x->sig_lo -= (uhwi) 1 << SREAL_PART_BITS;
120 x->sig_lo >>= s;
121 x->sig_lo |= (x->sig_hi & (((uhwi) 1 << s) - 1)) << (SREAL_PART_BITS - s);
122 x->sig_hi >>= s;
124 #else
125 x->sig += (uhwi) 1 << (s - 1);
126 x->sig >>= s;
127 #endif
130 /* Normalize *X. */
132 static void
133 normalize (sreal *x)
135 #if SREAL_PART_BITS < 32
136 int shift;
137 HOST_WIDE_INT mask;
139 if (x->sig_lo == 0 && x->sig_hi == 0)
141 x->exp = -SREAL_MAX_EXP;
143 else if (x->sig_hi < SREAL_MIN_SIG)
145 if (x->sig_hi == 0)
147 /* Move lower part of significant to higher part. */
148 x->sig_hi = x->sig_lo;
149 x->sig_lo = 0;
150 x->exp -= SREAL_PART_BITS;
152 shift = 0;
153 while (x->sig_hi < SREAL_MIN_SIG)
155 x->sig_hi <<= 1;
156 x->exp--;
157 shift++;
159 /* Check underflow. */
160 if (x->exp < -SREAL_MAX_EXP)
162 x->exp = -SREAL_MAX_EXP;
163 x->sig_hi = 0;
164 x->sig_lo = 0;
166 else if (shift)
168 mask = (1 << SREAL_PART_BITS) - (1 << (SREAL_PART_BITS - shift));
169 x->sig_hi |= (x->sig_lo & mask) >> (SREAL_PART_BITS - shift);
170 x->sig_lo = (x->sig_lo << shift) & (((uhwi) 1 << SREAL_PART_BITS) - 1);
173 else if (x->sig_hi > SREAL_MAX_SIG)
175 unsigned HOST_WIDE_INT tmp = x->sig_hi;
177 /* Find out how many bits will be shifted. */
178 shift = 0;
181 tmp >>= 1;
182 shift++;
184 while (tmp > SREAL_MAX_SIG);
186 /* Round the number. */
187 x->sig_lo += (uhwi) 1 << (shift - 1);
189 x->sig_lo >>= shift;
190 x->sig_lo += ((x->sig_hi & (((uhwi) 1 << shift) - 1))
191 << (SREAL_PART_BITS - shift));
192 x->sig_hi >>= shift;
193 x->exp += shift;
194 if (x->sig_lo & ((uhwi) 1 << SREAL_PART_BITS))
196 x->sig_lo -= (uhwi) 1 << SREAL_PART_BITS;
197 x->sig_hi++;
198 if (x->sig_hi > SREAL_MAX_SIG)
200 /* x->sig_hi was SREAL_MAX_SIG before increment
201 so now last bit is zero. */
202 x->sig_hi >>= 1;
203 x->sig_lo >>= 1;
204 x->exp++;
208 /* Check overflow. */
209 if (x->exp > SREAL_MAX_EXP)
211 x->exp = SREAL_MAX_EXP;
212 x->sig_hi = SREAL_MAX_SIG;
213 x->sig_lo = SREAL_MAX_SIG;
216 #else
217 if (x->sig == 0)
219 x->exp = -SREAL_MAX_EXP;
221 else if (x->sig < SREAL_MIN_SIG)
225 x->sig <<= 1;
226 x->exp--;
228 while (x->sig < SREAL_MIN_SIG);
230 /* Check underflow. */
231 if (x->exp < -SREAL_MAX_EXP)
233 x->exp = -SREAL_MAX_EXP;
234 x->sig = 0;
237 else if (x->sig > SREAL_MAX_SIG)
239 int last_bit;
242 last_bit = x->sig & 1;
243 x->sig >>= 1;
244 x->exp++;
246 while (x->sig > SREAL_MAX_SIG);
248 /* Round the number. */
249 x->sig += last_bit;
250 if (x->sig > SREAL_MAX_SIG)
252 x->sig >>= 1;
253 x->exp++;
256 /* Check overflow. */
257 if (x->exp > SREAL_MAX_EXP)
259 x->exp = SREAL_MAX_EXP;
260 x->sig = SREAL_MAX_SIG;
263 #endif
266 /* Set *R to SIG * 2 ^ EXP. Return R. */
268 sreal *
269 sreal_init (sreal *r, unsigned HOST_WIDE_INT sig, signed int exp)
271 #if SREAL_PART_BITS < 32
272 r->sig_lo = 0;
273 r->sig_hi = sig;
274 r->exp = exp - 16;
275 #else
276 r->sig = sig;
277 r->exp = exp;
278 #endif
279 normalize (r);
280 return r;
283 /* Return integer value of *R. */
285 HOST_WIDE_INT
286 sreal_to_int (sreal *r)
288 #if SREAL_PART_BITS < 32
289 if (r->exp <= -SREAL_BITS)
290 return 0;
291 if (r->exp >= 0)
292 return MAX_HOST_WIDE_INT;
293 return ((r->sig_hi << SREAL_PART_BITS) + r->sig_lo) >> -r->exp;
294 #else
295 if (r->exp <= -SREAL_BITS)
296 return 0;
297 if (r->exp >= SREAL_PART_BITS)
298 return MAX_HOST_WIDE_INT;
299 if (r->exp > 0)
300 return r->sig << r->exp;
301 if (r->exp < 0)
302 return r->sig >> -r->exp;
303 return r->sig;
304 #endif
307 /* Compare *A and *B. Return -1 if *A < *B, 1 if *A > *B and 0 if *A == *B. */
310 sreal_compare (sreal *a, sreal *b)
312 if (a->exp > b->exp)
313 return 1;
314 if (a->exp < b->exp)
315 return -1;
316 #if SREAL_PART_BITS < 32
317 if (a->sig_hi > b->sig_hi)
318 return 1;
319 if (a->sig_hi < b->sig_hi)
320 return -1;
321 if (a->sig_lo > b->sig_lo)
322 return 1;
323 if (a->sig_lo < b->sig_lo)
324 return -1;
325 #else
326 if (a->sig > b->sig)
327 return 1;
328 if (a->sig < b->sig)
329 return -1;
330 #endif
331 return 0;
334 /* *R = *A + *B. Return R. */
336 sreal *
337 sreal_add (sreal *r, sreal *a, sreal *b)
339 int dexp;
340 sreal tmp;
341 sreal *bb;
343 if (sreal_compare (a, b) < 0)
345 sreal *swap;
346 swap = a;
347 a = b;
348 b = swap;
351 dexp = a->exp - b->exp;
352 r->exp = a->exp;
353 if (dexp > SREAL_BITS)
355 #if SREAL_PART_BITS < 32
356 r->sig_hi = a->sig_hi;
357 r->sig_lo = a->sig_lo;
358 #else
359 r->sig = a->sig;
360 #endif
361 return r;
364 if (dexp == 0)
365 bb = b;
366 else
368 copy (&tmp, b);
369 shift_right (&tmp, dexp);
370 bb = &tmp;
373 #if SREAL_PART_BITS < 32
374 r->sig_hi = a->sig_hi + bb->sig_hi;
375 r->sig_lo = a->sig_lo + bb->sig_lo;
376 if (r->sig_lo & ((uhwi) 1 << SREAL_PART_BITS))
378 r->sig_hi++;
379 r->sig_lo -= (uhwi) 1 << SREAL_PART_BITS;
381 #else
382 r->sig = a->sig + bb->sig;
383 #endif
384 normalize (r);
385 return r;
388 /* *R = *A - *B. Return R. */
390 sreal *
391 sreal_sub (sreal *r, sreal *a, sreal *b)
393 int dexp;
394 sreal tmp;
395 sreal *bb;
397 gcc_assert (sreal_compare (a, b) >= 0);
399 dexp = a->exp - b->exp;
400 r->exp = a->exp;
401 if (dexp > SREAL_BITS)
403 #if SREAL_PART_BITS < 32
404 r->sig_hi = a->sig_hi;
405 r->sig_lo = a->sig_lo;
406 #else
407 r->sig = a->sig;
408 #endif
409 return r;
411 if (dexp == 0)
412 bb = b;
413 else
415 copy (&tmp, b);
416 shift_right (&tmp, dexp);
417 bb = &tmp;
420 #if SREAL_PART_BITS < 32
421 if (a->sig_lo < bb->sig_lo)
423 r->sig_hi = a->sig_hi - bb->sig_hi - 1;
424 r->sig_lo = a->sig_lo + ((uhwi) 1 << SREAL_PART_BITS) - bb->sig_lo;
426 else
428 r->sig_hi = a->sig_hi - bb->sig_hi;
429 r->sig_lo = a->sig_lo - bb->sig_lo;
431 #else
432 r->sig = a->sig - bb->sig;
433 #endif
434 normalize (r);
435 return r;
438 /* *R = *A * *B. Return R. */
440 sreal *
441 sreal_mul (sreal *r, sreal *a, sreal *b)
443 #if SREAL_PART_BITS < 32
444 if (a->sig_hi < SREAL_MIN_SIG || b->sig_hi < SREAL_MIN_SIG)
446 r->sig_lo = 0;
447 r->sig_hi = 0;
448 r->exp = -SREAL_MAX_EXP;
450 else
452 unsigned HOST_WIDE_INT tmp1, tmp2, tmp3;
453 if (sreal_compare (a, b) < 0)
455 sreal *swap;
456 swap = a;
457 a = b;
458 b = swap;
461 r->exp = a->exp + b->exp + SREAL_PART_BITS;
463 tmp1 = a->sig_lo * b->sig_lo;
464 tmp2 = a->sig_lo * b->sig_hi;
465 tmp3 = a->sig_hi * b->sig_lo + (tmp1 >> SREAL_PART_BITS);
467 r->sig_hi = a->sig_hi * b->sig_hi;
468 r->sig_hi += (tmp2 >> SREAL_PART_BITS) + (tmp3 >> SREAL_PART_BITS);
469 tmp2 &= ((uhwi) 1 << SREAL_PART_BITS) - 1;
470 tmp3 &= ((uhwi) 1 << SREAL_PART_BITS) - 1;
471 tmp1 = tmp2 + tmp3;
473 r->sig_lo = tmp1 & (((uhwi) 1 << SREAL_PART_BITS) - 1);
474 r->sig_hi += tmp1 >> SREAL_PART_BITS;
476 normalize (r);
478 #else
479 if (a->sig < SREAL_MIN_SIG || b->sig < SREAL_MIN_SIG)
481 r->sig = 0;
482 r->exp = -SREAL_MAX_EXP;
484 else
486 r->sig = a->sig * b->sig;
487 r->exp = a->exp + b->exp;
488 normalize (r);
490 #endif
491 return r;
494 /* *R = *A / *B. Return R. */
496 sreal *
497 sreal_div (sreal *r, sreal *a, sreal *b)
499 #if SREAL_PART_BITS < 32
500 unsigned HOST_WIDE_INT tmp, tmp1, tmp2;
502 gcc_assert (b->sig_hi >= SREAL_MIN_SIG);
503 if (a->sig_hi < SREAL_MIN_SIG)
505 r->sig_hi = 0;
506 r->sig_lo = 0;
507 r->exp = -SREAL_MAX_EXP;
509 else
511 /* Since division by the whole number is pretty ugly to write
512 we are dividing by first 3/4 of bits of number. */
514 tmp1 = (a->sig_hi << SREAL_PART_BITS) + a->sig_lo;
515 tmp2 = ((b->sig_hi << (SREAL_PART_BITS / 2))
516 + (b->sig_lo >> (SREAL_PART_BITS / 2)));
517 if (b->sig_lo & ((uhwi) 1 << ((SREAL_PART_BITS / 2) - 1)))
518 tmp2++;
520 r->sig_lo = 0;
521 tmp = tmp1 / tmp2;
522 tmp1 = (tmp1 % tmp2) << (SREAL_PART_BITS / 2);
523 r->sig_hi = tmp << SREAL_PART_BITS;
525 tmp = tmp1 / tmp2;
526 tmp1 = (tmp1 % tmp2) << (SREAL_PART_BITS / 2);
527 r->sig_hi += tmp << (SREAL_PART_BITS / 2);
529 tmp = tmp1 / tmp2;
530 r->sig_hi += tmp;
532 r->exp = a->exp - b->exp - SREAL_BITS - SREAL_PART_BITS / 2;
533 normalize (r);
535 #else
536 gcc_assert (b->sig != 0);
537 r->sig = (a->sig << SREAL_PART_BITS) / b->sig;
538 r->exp = a->exp - b->exp - SREAL_PART_BITS;
539 normalize (r);
540 #endif
541 return r;