* gfortran.fortran-torture/execute/intrinsic_nearest.x: Skip AIX.
[official-gcc.git] / gcc / convert.c
blob9ecef4247ba702c6b12d9aa5b24c21278155a7dd
1 /* Utility routines for data type conversion for GCC.
2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 /* These routines are somewhat language-independent utility function
22 intended to be called by the language-specific convert () functions. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "flags.h"
30 #include "convert.h"
31 #include "diagnostic-core.h"
32 #include "langhooks.h"
34 /* Convert EXPR to some pointer or reference type TYPE.
35 EXPR must be pointer, reference, integer, enumeral, or literal zero;
36 in other cases error is called. */
38 tree
39 convert_to_pointer (tree type, tree expr)
41 location_t loc = EXPR_LOCATION (expr);
42 if (TREE_TYPE (expr) == type)
43 return expr;
45 switch (TREE_CODE (TREE_TYPE (expr)))
47 case POINTER_TYPE:
48 case REFERENCE_TYPE:
50 /* If the pointers point to different address spaces, conversion needs
51 to be done via a ADDR_SPACE_CONVERT_EXPR instead of a NOP_EXPR. */
52 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (type));
53 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (expr)));
55 if (to_as == from_as)
56 return fold_build1_loc (loc, NOP_EXPR, type, expr);
57 else
58 return fold_build1_loc (loc, ADDR_SPACE_CONVERT_EXPR, type, expr);
61 case INTEGER_TYPE:
62 case ENUMERAL_TYPE:
63 case BOOLEAN_TYPE:
65 /* If the input precision differs from the target pointer type
66 precision, first convert the input expression to an integer type of
67 the target precision. Some targets, e.g. VMS, need several pointer
68 sizes to coexist so the latter isn't necessarily POINTER_SIZE. */
69 unsigned int pprec = TYPE_PRECISION (type);
70 unsigned int eprec = TYPE_PRECISION (TREE_TYPE (expr));
72 if (eprec != pprec)
73 expr = fold_build1_loc (loc, NOP_EXPR,
74 lang_hooks.types.type_for_size (pprec, 0),
75 expr);
78 return fold_build1_loc (loc, CONVERT_EXPR, type, expr);
80 default:
81 error ("cannot convert to a pointer type");
82 return convert_to_pointer (type, integer_zero_node);
87 /* Convert EXPR to some floating-point type TYPE.
89 EXPR must be float, fixed-point, integer, or enumeral;
90 in other cases error is called. */
92 tree
93 convert_to_real (tree type, tree expr)
95 enum built_in_function fcode = builtin_mathfn_code (expr);
96 tree itype = TREE_TYPE (expr);
98 /* Disable until we figure out how to decide whether the functions are
99 present in runtime. */
100 /* Convert (float)sqrt((double)x) where x is float into sqrtf(x) */
101 if (optimize
102 && (TYPE_MODE (type) == TYPE_MODE (double_type_node)
103 || TYPE_MODE (type) == TYPE_MODE (float_type_node)))
105 switch (fcode)
107 #define CASE_MATHFN(FN) case BUILT_IN_##FN: case BUILT_IN_##FN##L:
108 CASE_MATHFN (COSH)
109 CASE_MATHFN (EXP)
110 CASE_MATHFN (EXP10)
111 CASE_MATHFN (EXP2)
112 CASE_MATHFN (EXPM1)
113 CASE_MATHFN (GAMMA)
114 CASE_MATHFN (J0)
115 CASE_MATHFN (J1)
116 CASE_MATHFN (LGAMMA)
117 CASE_MATHFN (POW10)
118 CASE_MATHFN (SINH)
119 CASE_MATHFN (TGAMMA)
120 CASE_MATHFN (Y0)
121 CASE_MATHFN (Y1)
122 /* The above functions may set errno differently with float
123 input or output so this transformation is not safe with
124 -fmath-errno. */
125 if (flag_errno_math)
126 break;
127 CASE_MATHFN (ACOS)
128 CASE_MATHFN (ACOSH)
129 CASE_MATHFN (ASIN)
130 CASE_MATHFN (ASINH)
131 CASE_MATHFN (ATAN)
132 CASE_MATHFN (ATANH)
133 CASE_MATHFN (CBRT)
134 CASE_MATHFN (COS)
135 CASE_MATHFN (ERF)
136 CASE_MATHFN (ERFC)
137 CASE_MATHFN (FABS)
138 CASE_MATHFN (LOG)
139 CASE_MATHFN (LOG10)
140 CASE_MATHFN (LOG2)
141 CASE_MATHFN (LOG1P)
142 CASE_MATHFN (LOGB)
143 CASE_MATHFN (SIN)
144 CASE_MATHFN (SQRT)
145 CASE_MATHFN (TAN)
146 CASE_MATHFN (TANH)
147 #undef CASE_MATHFN
149 tree arg0 = strip_float_extensions (CALL_EXPR_ARG (expr, 0));
150 tree newtype = type;
152 /* We have (outertype)sqrt((innertype)x). Choose the wider mode from
153 the both as the safe type for operation. */
154 if (TYPE_PRECISION (TREE_TYPE (arg0)) > TYPE_PRECISION (type))
155 newtype = TREE_TYPE (arg0);
157 /* Be careful about integer to fp conversions.
158 These may overflow still. */
159 if (FLOAT_TYPE_P (TREE_TYPE (arg0))
160 && TYPE_PRECISION (newtype) < TYPE_PRECISION (itype)
161 && (TYPE_MODE (newtype) == TYPE_MODE (double_type_node)
162 || TYPE_MODE (newtype) == TYPE_MODE (float_type_node)))
164 tree fn = mathfn_built_in (newtype, fcode);
166 if (fn)
168 tree arg = fold (convert_to_real (newtype, arg0));
169 expr = build_call_expr (fn, 1, arg);
170 if (newtype == type)
171 return expr;
175 default:
176 break;
179 if (optimize
180 && (((fcode == BUILT_IN_FLOORL
181 || fcode == BUILT_IN_CEILL
182 || fcode == BUILT_IN_ROUNDL
183 || fcode == BUILT_IN_RINTL
184 || fcode == BUILT_IN_TRUNCL
185 || fcode == BUILT_IN_NEARBYINTL)
186 && (TYPE_MODE (type) == TYPE_MODE (double_type_node)
187 || TYPE_MODE (type) == TYPE_MODE (float_type_node)))
188 || ((fcode == BUILT_IN_FLOOR
189 || fcode == BUILT_IN_CEIL
190 || fcode == BUILT_IN_ROUND
191 || fcode == BUILT_IN_RINT
192 || fcode == BUILT_IN_TRUNC
193 || fcode == BUILT_IN_NEARBYINT)
194 && (TYPE_MODE (type) == TYPE_MODE (float_type_node)))))
196 tree fn = mathfn_built_in (type, fcode);
198 if (fn)
200 tree arg = strip_float_extensions (CALL_EXPR_ARG (expr, 0));
202 /* Make sure (type)arg0 is an extension, otherwise we could end up
203 changing (float)floor(double d) into floorf((float)d), which is
204 incorrect because (float)d uses round-to-nearest and can round
205 up to the next integer. */
206 if (TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (arg)))
207 return build_call_expr (fn, 1, fold (convert_to_real (type, arg)));
211 /* Propagate the cast into the operation. */
212 if (itype != type && FLOAT_TYPE_P (type))
213 switch (TREE_CODE (expr))
215 /* Convert (float)-x into -(float)x. This is safe for
216 round-to-nearest rounding mode when the inner type is float. */
217 case ABS_EXPR:
218 case NEGATE_EXPR:
219 if (!flag_rounding_math
220 && FLOAT_TYPE_P (itype)
221 && TYPE_PRECISION (type) < TYPE_PRECISION (itype))
222 return build1 (TREE_CODE (expr), type,
223 fold (convert_to_real (type,
224 TREE_OPERAND (expr, 0))));
225 break;
226 /* Convert (outertype)((innertype0)a+(innertype1)b)
227 into ((newtype)a+(newtype)b) where newtype
228 is the widest mode from all of these. */
229 case PLUS_EXPR:
230 case MINUS_EXPR:
231 case MULT_EXPR:
232 case RDIV_EXPR:
234 tree arg0 = strip_float_extensions (TREE_OPERAND (expr, 0));
235 tree arg1 = strip_float_extensions (TREE_OPERAND (expr, 1));
237 if (FLOAT_TYPE_P (TREE_TYPE (arg0))
238 && FLOAT_TYPE_P (TREE_TYPE (arg1))
239 && DECIMAL_FLOAT_TYPE_P (itype) == DECIMAL_FLOAT_TYPE_P (type))
241 tree newtype = type;
243 if (TYPE_MODE (TREE_TYPE (arg0)) == SDmode
244 || TYPE_MODE (TREE_TYPE (arg1)) == SDmode
245 || TYPE_MODE (type) == SDmode)
246 newtype = dfloat32_type_node;
247 if (TYPE_MODE (TREE_TYPE (arg0)) == DDmode
248 || TYPE_MODE (TREE_TYPE (arg1)) == DDmode
249 || TYPE_MODE (type) == DDmode)
250 newtype = dfloat64_type_node;
251 if (TYPE_MODE (TREE_TYPE (arg0)) == TDmode
252 || TYPE_MODE (TREE_TYPE (arg1)) == TDmode
253 || TYPE_MODE (type) == TDmode)
254 newtype = dfloat128_type_node;
255 if (newtype == dfloat32_type_node
256 || newtype == dfloat64_type_node
257 || newtype == dfloat128_type_node)
259 expr = build2 (TREE_CODE (expr), newtype,
260 fold (convert_to_real (newtype, arg0)),
261 fold (convert_to_real (newtype, arg1)));
262 if (newtype == type)
263 return expr;
264 break;
267 if (TYPE_PRECISION (TREE_TYPE (arg0)) > TYPE_PRECISION (newtype))
268 newtype = TREE_TYPE (arg0);
269 if (TYPE_PRECISION (TREE_TYPE (arg1)) > TYPE_PRECISION (newtype))
270 newtype = TREE_TYPE (arg1);
271 /* Sometimes this transformation is safe (cannot
272 change results through affecting double rounding
273 cases) and sometimes it is not. If NEWTYPE is
274 wider than TYPE, e.g. (float)((long double)double
275 + (long double)double) converted to
276 (float)(double + double), the transformation is
277 unsafe regardless of the details of the types
278 involved; double rounding can arise if the result
279 of NEWTYPE arithmetic is a NEWTYPE value half way
280 between two representable TYPE values but the
281 exact value is sufficiently different (in the
282 right direction) for this difference to be
283 visible in ITYPE arithmetic. If NEWTYPE is the
284 same as TYPE, however, the transformation may be
285 safe depending on the types involved: it is safe
286 if the ITYPE has strictly more than twice as many
287 mantissa bits as TYPE, can represent infinities
288 and NaNs if the TYPE can, and has sufficient
289 exponent range for the product or ratio of two
290 values representable in the TYPE to be within the
291 range of normal values of ITYPE. */
292 if (TYPE_PRECISION (newtype) < TYPE_PRECISION (itype)
293 && (flag_unsafe_math_optimizations
294 || (TYPE_PRECISION (newtype) == TYPE_PRECISION (type)
295 && real_can_shorten_arithmetic (TYPE_MODE (itype),
296 TYPE_MODE (type))
297 && !excess_precision_type (newtype))))
299 expr = build2 (TREE_CODE (expr), newtype,
300 fold (convert_to_real (newtype, arg0)),
301 fold (convert_to_real (newtype, arg1)));
302 if (newtype == type)
303 return expr;
307 break;
308 default:
309 break;
312 switch (TREE_CODE (TREE_TYPE (expr)))
314 case REAL_TYPE:
315 /* Ignore the conversion if we don't need to store intermediate
316 results and neither type is a decimal float. */
317 return build1 ((flag_float_store
318 || DECIMAL_FLOAT_TYPE_P (type)
319 || DECIMAL_FLOAT_TYPE_P (itype))
320 ? CONVERT_EXPR : NOP_EXPR, type, expr);
322 case INTEGER_TYPE:
323 case ENUMERAL_TYPE:
324 case BOOLEAN_TYPE:
325 return build1 (FLOAT_EXPR, type, expr);
327 case FIXED_POINT_TYPE:
328 return build1 (FIXED_CONVERT_EXPR, type, expr);
330 case COMPLEX_TYPE:
331 return convert (type,
332 fold_build1 (REALPART_EXPR,
333 TREE_TYPE (TREE_TYPE (expr)), expr));
335 case POINTER_TYPE:
336 case REFERENCE_TYPE:
337 error ("pointer value used where a floating point value was expected");
338 return convert_to_real (type, integer_zero_node);
340 default:
341 error ("aggregate value used where a float was expected");
342 return convert_to_real (type, integer_zero_node);
346 /* Convert EXPR to some integer (or enum) type TYPE.
348 EXPR must be pointer, integer, discrete (enum, char, or bool), float,
349 fixed-point or vector; in other cases error is called.
351 The result of this is always supposed to be a newly created tree node
352 not in use in any existing structure. */
354 tree
355 convert_to_integer (tree type, tree expr)
357 enum tree_code ex_form = TREE_CODE (expr);
358 tree intype = TREE_TYPE (expr);
359 unsigned int inprec = element_precision (intype);
360 unsigned int outprec = element_precision (type);
362 /* An INTEGER_TYPE cannot be incomplete, but an ENUMERAL_TYPE can
363 be. Consider `enum E = { a, b = (enum E) 3 };'. */
364 if (!COMPLETE_TYPE_P (type))
366 error ("conversion to incomplete type");
367 return error_mark_node;
370 /* Convert e.g. (long)round(d) -> lround(d). */
371 /* If we're converting to char, we may encounter differing behavior
372 between converting from double->char vs double->long->char.
373 We're in "undefined" territory but we prefer to be conservative,
374 so only proceed in "unsafe" math mode. */
375 if (optimize
376 && (flag_unsafe_math_optimizations
377 || (long_integer_type_node
378 && outprec >= TYPE_PRECISION (long_integer_type_node))))
380 tree s_expr = strip_float_extensions (expr);
381 tree s_intype = TREE_TYPE (s_expr);
382 const enum built_in_function fcode = builtin_mathfn_code (s_expr);
383 tree fn = 0;
385 switch (fcode)
387 CASE_FLT_FN (BUILT_IN_CEIL):
388 /* Only convert in ISO C99 mode. */
389 if (!TARGET_C99_FUNCTIONS)
390 break;
391 if (outprec < TYPE_PRECISION (integer_type_node)
392 || (outprec == TYPE_PRECISION (integer_type_node)
393 && !TYPE_UNSIGNED (type)))
394 fn = mathfn_built_in (s_intype, BUILT_IN_ICEIL);
395 else if (outprec == TYPE_PRECISION (long_integer_type_node)
396 && !TYPE_UNSIGNED (type))
397 fn = mathfn_built_in (s_intype, BUILT_IN_LCEIL);
398 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
399 && !TYPE_UNSIGNED (type))
400 fn = mathfn_built_in (s_intype, BUILT_IN_LLCEIL);
401 break;
403 CASE_FLT_FN (BUILT_IN_FLOOR):
404 /* Only convert in ISO C99 mode. */
405 if (!TARGET_C99_FUNCTIONS)
406 break;
407 if (outprec < TYPE_PRECISION (integer_type_node)
408 || (outprec == TYPE_PRECISION (integer_type_node)
409 && !TYPE_UNSIGNED (type)))
410 fn = mathfn_built_in (s_intype, BUILT_IN_IFLOOR);
411 else if (outprec == TYPE_PRECISION (long_integer_type_node)
412 && !TYPE_UNSIGNED (type))
413 fn = mathfn_built_in (s_intype, BUILT_IN_LFLOOR);
414 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
415 && !TYPE_UNSIGNED (type))
416 fn = mathfn_built_in (s_intype, BUILT_IN_LLFLOOR);
417 break;
419 CASE_FLT_FN (BUILT_IN_ROUND):
420 /* Only convert in ISO C99 mode. */
421 if (!TARGET_C99_FUNCTIONS)
422 break;
423 if (outprec < TYPE_PRECISION (integer_type_node)
424 || (outprec == TYPE_PRECISION (integer_type_node)
425 && !TYPE_UNSIGNED (type)))
426 fn = mathfn_built_in (s_intype, BUILT_IN_IROUND);
427 else if (outprec == TYPE_PRECISION (long_integer_type_node)
428 && !TYPE_UNSIGNED (type))
429 fn = mathfn_built_in (s_intype, BUILT_IN_LROUND);
430 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
431 && !TYPE_UNSIGNED (type))
432 fn = mathfn_built_in (s_intype, BUILT_IN_LLROUND);
433 break;
435 CASE_FLT_FN (BUILT_IN_NEARBYINT):
436 /* Only convert nearbyint* if we can ignore math exceptions. */
437 if (flag_trapping_math)
438 break;
439 /* ... Fall through ... */
440 CASE_FLT_FN (BUILT_IN_RINT):
441 /* Only convert in ISO C99 mode. */
442 if (!TARGET_C99_FUNCTIONS)
443 break;
444 if (outprec < TYPE_PRECISION (integer_type_node)
445 || (outprec == TYPE_PRECISION (integer_type_node)
446 && !TYPE_UNSIGNED (type)))
447 fn = mathfn_built_in (s_intype, BUILT_IN_IRINT);
448 else if (outprec == TYPE_PRECISION (long_integer_type_node)
449 && !TYPE_UNSIGNED (type))
450 fn = mathfn_built_in (s_intype, BUILT_IN_LRINT);
451 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
452 && !TYPE_UNSIGNED (type))
453 fn = mathfn_built_in (s_intype, BUILT_IN_LLRINT);
454 break;
456 CASE_FLT_FN (BUILT_IN_TRUNC):
457 return convert_to_integer (type, CALL_EXPR_ARG (s_expr, 0));
459 default:
460 break;
463 if (fn)
465 tree newexpr = build_call_expr (fn, 1, CALL_EXPR_ARG (s_expr, 0));
466 return convert_to_integer (type, newexpr);
470 /* Convert (int)logb(d) -> ilogb(d). */
471 if (optimize
472 && flag_unsafe_math_optimizations
473 && !flag_trapping_math && !flag_errno_math && flag_finite_math_only
474 && integer_type_node
475 && (outprec > TYPE_PRECISION (integer_type_node)
476 || (outprec == TYPE_PRECISION (integer_type_node)
477 && !TYPE_UNSIGNED (type))))
479 tree s_expr = strip_float_extensions (expr);
480 tree s_intype = TREE_TYPE (s_expr);
481 const enum built_in_function fcode = builtin_mathfn_code (s_expr);
482 tree fn = 0;
484 switch (fcode)
486 CASE_FLT_FN (BUILT_IN_LOGB):
487 fn = mathfn_built_in (s_intype, BUILT_IN_ILOGB);
488 break;
490 default:
491 break;
494 if (fn)
496 tree newexpr = build_call_expr (fn, 1, CALL_EXPR_ARG (s_expr, 0));
497 return convert_to_integer (type, newexpr);
501 switch (TREE_CODE (intype))
503 case POINTER_TYPE:
504 case REFERENCE_TYPE:
505 if (integer_zerop (expr))
506 return build_int_cst (type, 0);
508 /* Convert to an unsigned integer of the correct width first, and from
509 there widen/truncate to the required type. Some targets support the
510 coexistence of multiple valid pointer sizes, so fetch the one we need
511 from the type. */
512 expr = fold_build1 (CONVERT_EXPR,
513 lang_hooks.types.type_for_size
514 (TYPE_PRECISION (intype), 0),
515 expr);
516 return fold_convert (type, expr);
518 case INTEGER_TYPE:
519 case ENUMERAL_TYPE:
520 case BOOLEAN_TYPE:
521 case OFFSET_TYPE:
522 /* If this is a logical operation, which just returns 0 or 1, we can
523 change the type of the expression. */
525 if (TREE_CODE_CLASS (ex_form) == tcc_comparison)
527 expr = copy_node (expr);
528 TREE_TYPE (expr) = type;
529 return expr;
532 /* If we are widening the type, put in an explicit conversion.
533 Similarly if we are not changing the width. After this, we know
534 we are truncating EXPR. */
536 else if (outprec >= inprec)
538 enum tree_code code;
540 /* If the precision of the EXPR's type is K bits and the
541 destination mode has more bits, and the sign is changing,
542 it is not safe to use a NOP_EXPR. For example, suppose
543 that EXPR's type is a 3-bit unsigned integer type, the
544 TYPE is a 3-bit signed integer type, and the machine mode
545 for the types is 8-bit QImode. In that case, the
546 conversion necessitates an explicit sign-extension. In
547 the signed-to-unsigned case the high-order bits have to
548 be cleared. */
549 if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (TREE_TYPE (expr))
550 && (TYPE_PRECISION (TREE_TYPE (expr))
551 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (expr)))))
552 code = CONVERT_EXPR;
553 else
554 code = NOP_EXPR;
556 return fold_build1 (code, type, expr);
559 /* If TYPE is an enumeral type or a type with a precision less
560 than the number of bits in its mode, do the conversion to the
561 type corresponding to its mode, then do a nop conversion
562 to TYPE. */
563 else if (TREE_CODE (type) == ENUMERAL_TYPE
564 || outprec != GET_MODE_PRECISION (TYPE_MODE (type)))
565 return build1 (NOP_EXPR, type,
566 convert (lang_hooks.types.type_for_mode
567 (TYPE_MODE (type), TYPE_UNSIGNED (type)),
568 expr));
570 /* Here detect when we can distribute the truncation down past some
571 arithmetic. For example, if adding two longs and converting to an
572 int, we can equally well convert both to ints and then add.
573 For the operations handled here, such truncation distribution
574 is always safe.
575 It is desirable in these cases:
576 1) when truncating down to full-word from a larger size
577 2) when truncating takes no work.
578 3) when at least one operand of the arithmetic has been extended
579 (as by C's default conversions). In this case we need two conversions
580 if we do the arithmetic as already requested, so we might as well
581 truncate both and then combine. Perhaps that way we need only one.
583 Note that in general we cannot do the arithmetic in a type
584 shorter than the desired result of conversion, even if the operands
585 are both extended from a shorter type, because they might overflow
586 if combined in that type. The exceptions to this--the times when
587 two narrow values can be combined in their narrow type even to
588 make a wider result--are handled by "shorten" in build_binary_op. */
590 switch (ex_form)
592 case RSHIFT_EXPR:
593 /* We can pass truncation down through right shifting
594 when the shift count is a nonpositive constant. */
595 if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
596 && tree_int_cst_sgn (TREE_OPERAND (expr, 1)) <= 0)
597 goto trunc1;
598 break;
600 case LSHIFT_EXPR:
601 /* We can pass truncation down through left shifting
602 when the shift count is a nonnegative constant and
603 the target type is unsigned. */
604 if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
605 && tree_int_cst_sgn (TREE_OPERAND (expr, 1)) >= 0
606 && TYPE_UNSIGNED (type)
607 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
609 /* If shift count is less than the width of the truncated type,
610 really shift. */
611 if (tree_int_cst_lt (TREE_OPERAND (expr, 1), TYPE_SIZE (type)))
612 /* In this case, shifting is like multiplication. */
613 goto trunc1;
614 else
616 /* If it is >= that width, result is zero.
617 Handling this with trunc1 would give the wrong result:
618 (int) ((long long) a << 32) is well defined (as 0)
619 but (int) a << 32 is undefined and would get a
620 warning. */
622 tree t = build_int_cst (type, 0);
624 /* If the original expression had side-effects, we must
625 preserve it. */
626 if (TREE_SIDE_EFFECTS (expr))
627 return build2 (COMPOUND_EXPR, type, expr, t);
628 else
629 return t;
632 break;
634 case TRUNC_DIV_EXPR:
636 tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
637 tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
639 /* Don't distribute unless the output precision is at least as big
640 as the actual inputs and it has the same signedness. */
641 if (outprec >= TYPE_PRECISION (TREE_TYPE (arg0))
642 && outprec >= TYPE_PRECISION (TREE_TYPE (arg1))
643 /* If signedness of arg0 and arg1 don't match,
644 we can't necessarily find a type to compare them in. */
645 && (TYPE_UNSIGNED (TREE_TYPE (arg0))
646 == TYPE_UNSIGNED (TREE_TYPE (arg1)))
647 /* Do not change the sign of the division. */
648 && (TYPE_UNSIGNED (TREE_TYPE (expr))
649 == TYPE_UNSIGNED (TREE_TYPE (arg0)))
650 /* Either require unsigned division or a division by
651 a constant that is not -1. */
652 && (TYPE_UNSIGNED (TREE_TYPE (arg0))
653 || (TREE_CODE (arg1) == INTEGER_CST
654 && !integer_all_onesp (arg1))))
655 goto trunc1;
656 break;
659 case MAX_EXPR:
660 case MIN_EXPR:
661 case MULT_EXPR:
663 tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
664 tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
666 /* Don't distribute unless the output precision is at least as big
667 as the actual inputs. Otherwise, the comparison of the
668 truncated values will be wrong. */
669 if (outprec >= TYPE_PRECISION (TREE_TYPE (arg0))
670 && outprec >= TYPE_PRECISION (TREE_TYPE (arg1))
671 /* If signedness of arg0 and arg1 don't match,
672 we can't necessarily find a type to compare them in. */
673 && (TYPE_UNSIGNED (TREE_TYPE (arg0))
674 == TYPE_UNSIGNED (TREE_TYPE (arg1))))
675 goto trunc1;
676 break;
679 case PLUS_EXPR:
680 case MINUS_EXPR:
681 case BIT_AND_EXPR:
682 case BIT_IOR_EXPR:
683 case BIT_XOR_EXPR:
684 trunc1:
686 tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
687 tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
689 /* Do not try to narrow operands of pointer subtraction;
690 that will interfere with other folding. */
691 if (ex_form == MINUS_EXPR
692 && CONVERT_EXPR_P (arg0)
693 && CONVERT_EXPR_P (arg1)
694 && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 0)))
695 && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (arg1, 0))))
696 break;
698 if (outprec >= BITS_PER_WORD
699 || TRULY_NOOP_TRUNCATION (outprec, inprec)
700 || inprec > TYPE_PRECISION (TREE_TYPE (arg0))
701 || inprec > TYPE_PRECISION (TREE_TYPE (arg1)))
703 /* Do the arithmetic in type TYPEX,
704 then convert result to TYPE. */
705 tree typex = type;
707 /* Can't do arithmetic in enumeral types
708 so use an integer type that will hold the values. */
709 if (TREE_CODE (typex) == ENUMERAL_TYPE)
710 typex = lang_hooks.types.type_for_size
711 (TYPE_PRECISION (typex), TYPE_UNSIGNED (typex));
713 /* But now perhaps TYPEX is as wide as INPREC.
714 In that case, do nothing special here.
715 (Otherwise would recurse infinitely in convert. */
716 if (TYPE_PRECISION (typex) != inprec)
718 /* Don't do unsigned arithmetic where signed was wanted,
719 or vice versa.
720 Exception: if both of the original operands were
721 unsigned then we can safely do the work as unsigned.
722 Exception: shift operations take their type solely
723 from the first argument.
724 Exception: the LSHIFT_EXPR case above requires that
725 we perform this operation unsigned lest we produce
726 signed-overflow undefinedness.
727 And we may need to do it as unsigned
728 if we truncate to the original size. */
729 if (TYPE_UNSIGNED (TREE_TYPE (expr))
730 || (TYPE_UNSIGNED (TREE_TYPE (arg0))
731 && (TYPE_UNSIGNED (TREE_TYPE (arg1))
732 || ex_form == LSHIFT_EXPR
733 || ex_form == RSHIFT_EXPR
734 || ex_form == LROTATE_EXPR
735 || ex_form == RROTATE_EXPR))
736 || ex_form == LSHIFT_EXPR
737 /* If we have !flag_wrapv, and either ARG0 or
738 ARG1 is of a signed type, we have to do
739 PLUS_EXPR, MINUS_EXPR or MULT_EXPR in an unsigned
740 type in case the operation in outprec precision
741 could overflow. Otherwise, we would introduce
742 signed-overflow undefinedness. */
743 || ((!TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0))
744 || !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg1)))
745 && ((TYPE_PRECISION (TREE_TYPE (arg0)) * 2u
746 > outprec)
747 || (TYPE_PRECISION (TREE_TYPE (arg1)) * 2u
748 > outprec))
749 && (ex_form == PLUS_EXPR
750 || ex_form == MINUS_EXPR
751 || ex_form == MULT_EXPR)))
752 typex = unsigned_type_for (typex);
753 else
754 typex = signed_type_for (typex);
755 return convert (type,
756 fold_build2 (ex_form, typex,
757 convert (typex, arg0),
758 convert (typex, arg1)));
762 break;
764 case NEGATE_EXPR:
765 case BIT_NOT_EXPR:
766 /* This is not correct for ABS_EXPR,
767 since we must test the sign before truncation. */
769 tree typex = unsigned_type_for (type);
770 return convert (type,
771 fold_build1 (ex_form, typex,
772 convert (typex,
773 TREE_OPERAND (expr, 0))));
776 case NOP_EXPR:
777 /* Don't introduce a
778 "can't convert between vector values of different size" error. */
779 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (expr, 0))) == VECTOR_TYPE
780 && (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_OPERAND (expr, 0))))
781 != GET_MODE_SIZE (TYPE_MODE (type))))
782 break;
783 /* If truncating after truncating, might as well do all at once.
784 If truncating after extending, we may get rid of wasted work. */
785 return convert (type, get_unwidened (TREE_OPERAND (expr, 0), type));
787 case COND_EXPR:
788 /* It is sometimes worthwhile to push the narrowing down through
789 the conditional and never loses. A COND_EXPR may have a throw
790 as one operand, which then has void type. Just leave void
791 operands as they are. */
792 return fold_build3 (COND_EXPR, type, TREE_OPERAND (expr, 0),
793 VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 1)))
794 ? TREE_OPERAND (expr, 1)
795 : convert (type, TREE_OPERAND (expr, 1)),
796 VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 2)))
797 ? TREE_OPERAND (expr, 2)
798 : convert (type, TREE_OPERAND (expr, 2)));
800 default:
801 break;
804 /* When parsing long initializers, we might end up with a lot of casts.
805 Shortcut this. */
806 if (TREE_CODE (expr) == INTEGER_CST)
807 return fold_convert (type, expr);
808 return build1 (CONVERT_EXPR, type, expr);
810 case REAL_TYPE:
811 return build1 (FIX_TRUNC_EXPR, type, expr);
813 case FIXED_POINT_TYPE:
814 return build1 (FIXED_CONVERT_EXPR, type, expr);
816 case COMPLEX_TYPE:
817 return convert (type,
818 fold_build1 (REALPART_EXPR,
819 TREE_TYPE (TREE_TYPE (expr)), expr));
821 case VECTOR_TYPE:
822 if (!tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (TREE_TYPE (expr))))
824 error ("can%'t convert between vector values of different size");
825 return error_mark_node;
827 return build1 (VIEW_CONVERT_EXPR, type, expr);
829 default:
830 error ("aggregate value used where an integer was expected");
831 return convert (type, integer_zero_node);
835 /* Convert EXPR to the complex type TYPE in the usual ways. */
837 tree
838 convert_to_complex (tree type, tree expr)
840 tree subtype = TREE_TYPE (type);
842 switch (TREE_CODE (TREE_TYPE (expr)))
844 case REAL_TYPE:
845 case FIXED_POINT_TYPE:
846 case INTEGER_TYPE:
847 case ENUMERAL_TYPE:
848 case BOOLEAN_TYPE:
849 return build2 (COMPLEX_EXPR, type, convert (subtype, expr),
850 convert (subtype, integer_zero_node));
852 case COMPLEX_TYPE:
854 tree elt_type = TREE_TYPE (TREE_TYPE (expr));
856 if (TYPE_MAIN_VARIANT (elt_type) == TYPE_MAIN_VARIANT (subtype))
857 return expr;
858 else if (TREE_CODE (expr) == COMPLEX_EXPR)
859 return fold_build2 (COMPLEX_EXPR, type,
860 convert (subtype, TREE_OPERAND (expr, 0)),
861 convert (subtype, TREE_OPERAND (expr, 1)));
862 else
864 expr = save_expr (expr);
865 return
866 fold_build2 (COMPLEX_EXPR, type,
867 convert (subtype,
868 fold_build1 (REALPART_EXPR,
869 TREE_TYPE (TREE_TYPE (expr)),
870 expr)),
871 convert (subtype,
872 fold_build1 (IMAGPART_EXPR,
873 TREE_TYPE (TREE_TYPE (expr)),
874 expr)));
878 case POINTER_TYPE:
879 case REFERENCE_TYPE:
880 error ("pointer value used where a complex was expected");
881 return convert_to_complex (type, integer_zero_node);
883 default:
884 error ("aggregate value used where a complex was expected");
885 return convert_to_complex (type, integer_zero_node);
889 /* Convert EXPR to the vector type TYPE in the usual ways. */
891 tree
892 convert_to_vector (tree type, tree expr)
894 switch (TREE_CODE (TREE_TYPE (expr)))
896 case INTEGER_TYPE:
897 case VECTOR_TYPE:
898 if (!tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (TREE_TYPE (expr))))
900 error ("can%'t convert between vector values of different size");
901 return error_mark_node;
903 return build1 (VIEW_CONVERT_EXPR, type, expr);
905 default:
906 error ("can%'t convert value to a vector");
907 return error_mark_node;
911 /* Convert EXPR to some fixed-point type TYPE.
913 EXPR must be fixed-point, float, integer, or enumeral;
914 in other cases error is called. */
916 tree
917 convert_to_fixed (tree type, tree expr)
919 if (integer_zerop (expr))
921 tree fixed_zero_node = build_fixed (type, FCONST0 (TYPE_MODE (type)));
922 return fixed_zero_node;
924 else if (integer_onep (expr) && ALL_SCALAR_ACCUM_MODE_P (TYPE_MODE (type)))
926 tree fixed_one_node = build_fixed (type, FCONST1 (TYPE_MODE (type)));
927 return fixed_one_node;
930 switch (TREE_CODE (TREE_TYPE (expr)))
932 case FIXED_POINT_TYPE:
933 case INTEGER_TYPE:
934 case ENUMERAL_TYPE:
935 case BOOLEAN_TYPE:
936 case REAL_TYPE:
937 return build1 (FIXED_CONVERT_EXPR, type, expr);
939 case COMPLEX_TYPE:
940 return convert (type,
941 fold_build1 (REALPART_EXPR,
942 TREE_TYPE (TREE_TYPE (expr)), expr));
944 default:
945 error ("aggregate value used where a fixed-point was expected");
946 return error_mark_node;