[AArch64] PR target/68129: Define TARGET_SUPPORTS_WIDE_INT
[official-gcc.git] / gcc / convert.c
blob113c11fbaf0ebc637c0596d734618ee143b062a5
1 /* Utility routines for data type conversion for GCC.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 /* These routines are somewhat language-independent utility function
22 intended to be called by the language-specific convert () functions. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "target.h"
28 #include "tree.h"
29 #include "diagnostic-core.h"
30 #include "fold-const.h"
31 #include "stor-layout.h"
32 #include "convert.h"
33 #include "langhooks.h"
34 #include "builtins.h"
35 #include "ubsan.h"
37 /* Convert EXPR to some pointer or reference type TYPE.
38 EXPR must be pointer, reference, integer, enumeral, or literal zero;
39 in other cases error is called. */
41 tree
42 convert_to_pointer (tree type, tree expr)
44 location_t loc = EXPR_LOCATION (expr);
45 if (TREE_TYPE (expr) == type)
46 return expr;
48 switch (TREE_CODE (TREE_TYPE (expr)))
50 case POINTER_TYPE:
51 case REFERENCE_TYPE:
53 /* If the pointers point to different address spaces, conversion needs
54 to be done via a ADDR_SPACE_CONVERT_EXPR instead of a NOP_EXPR. */
55 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (type));
56 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (expr)));
58 if (to_as == from_as)
59 return fold_build1_loc (loc, NOP_EXPR, type, expr);
60 else
61 return fold_build1_loc (loc, ADDR_SPACE_CONVERT_EXPR, type, expr);
64 case INTEGER_TYPE:
65 case ENUMERAL_TYPE:
66 case BOOLEAN_TYPE:
68 /* If the input precision differs from the target pointer type
69 precision, first convert the input expression to an integer type of
70 the target precision. Some targets, e.g. VMS, need several pointer
71 sizes to coexist so the latter isn't necessarily POINTER_SIZE. */
72 unsigned int pprec = TYPE_PRECISION (type);
73 unsigned int eprec = TYPE_PRECISION (TREE_TYPE (expr));
75 if (eprec != pprec)
76 expr = fold_build1_loc (loc, NOP_EXPR,
77 lang_hooks.types.type_for_size (pprec, 0),
78 expr);
81 return fold_build1_loc (loc, CONVERT_EXPR, type, expr);
83 default:
84 error ("cannot convert to a pointer type");
85 return convert_to_pointer (type, integer_zero_node);
90 /* Convert EXPR to some floating-point type TYPE.
92 EXPR must be float, fixed-point, integer, or enumeral;
93 in other cases error is called. */
95 tree
96 convert_to_real (tree type, tree expr)
98 enum built_in_function fcode = builtin_mathfn_code (expr);
99 tree itype = TREE_TYPE (expr);
101 if (TREE_CODE (expr) == COMPOUND_EXPR)
103 tree t = convert_to_real (type, TREE_OPERAND (expr, 1));
104 if (t == TREE_OPERAND (expr, 1))
105 return expr;
106 return build2_loc (EXPR_LOCATION (expr), COMPOUND_EXPR, TREE_TYPE (t),
107 TREE_OPERAND (expr, 0), t);
110 /* Disable until we figure out how to decide whether the functions are
111 present in runtime. */
112 /* Convert (float)sqrt((double)x) where x is float into sqrtf(x) */
113 if (optimize
114 && (TYPE_MODE (type) == TYPE_MODE (double_type_node)
115 || TYPE_MODE (type) == TYPE_MODE (float_type_node)))
117 switch (fcode)
119 #define CASE_MATHFN(FN) case BUILT_IN_##FN: case BUILT_IN_##FN##L:
120 CASE_MATHFN (COSH)
121 CASE_MATHFN (EXP)
122 CASE_MATHFN (EXP10)
123 CASE_MATHFN (EXP2)
124 CASE_MATHFN (EXPM1)
125 CASE_MATHFN (GAMMA)
126 CASE_MATHFN (J0)
127 CASE_MATHFN (J1)
128 CASE_MATHFN (LGAMMA)
129 CASE_MATHFN (POW10)
130 CASE_MATHFN (SINH)
131 CASE_MATHFN (TGAMMA)
132 CASE_MATHFN (Y0)
133 CASE_MATHFN (Y1)
134 /* The above functions may set errno differently with float
135 input or output so this transformation is not safe with
136 -fmath-errno. */
137 if (flag_errno_math)
138 break;
139 CASE_MATHFN (ACOS)
140 CASE_MATHFN (ACOSH)
141 CASE_MATHFN (ASIN)
142 CASE_MATHFN (ASINH)
143 CASE_MATHFN (ATAN)
144 CASE_MATHFN (ATANH)
145 CASE_MATHFN (CBRT)
146 CASE_MATHFN (COS)
147 CASE_MATHFN (ERF)
148 CASE_MATHFN (ERFC)
149 CASE_MATHFN (LOG)
150 CASE_MATHFN (LOG10)
151 CASE_MATHFN (LOG2)
152 CASE_MATHFN (LOG1P)
153 CASE_MATHFN (SIN)
154 CASE_MATHFN (TAN)
155 CASE_MATHFN (TANH)
156 /* The above functions are not safe to do this conversion. */
157 if (!flag_unsafe_math_optimizations)
158 break;
159 CASE_MATHFN (SQRT)
160 CASE_MATHFN (FABS)
161 CASE_MATHFN (LOGB)
162 #undef CASE_MATHFN
164 tree arg0 = strip_float_extensions (CALL_EXPR_ARG (expr, 0));
165 tree newtype = type;
167 /* We have (outertype)sqrt((innertype)x). Choose the wider mode from
168 the both as the safe type for operation. */
169 if (TYPE_PRECISION (TREE_TYPE (arg0)) > TYPE_PRECISION (type))
170 newtype = TREE_TYPE (arg0);
172 /* We consider to convert
174 (T1) sqrtT2 ((T2) exprT3)
176 (T1) sqrtT4 ((T4) exprT3)
178 , where T1 is TYPE, T2 is ITYPE, T3 is TREE_TYPE (ARG0),
179 and T4 is NEWTYPE. All those types are of floating point types.
180 T4 (NEWTYPE) should be narrower than T2 (ITYPE). This conversion
181 is safe only if P1 >= P2*2+2, where P1 and P2 are precisions of
182 T2 and T4. See the following URL for a reference:
183 http://stackoverflow.com/questions/9235456/determining-
184 floating-point-square-root
186 if ((fcode == BUILT_IN_SQRT || fcode == BUILT_IN_SQRTL)
187 && !flag_unsafe_math_optimizations)
189 /* The following conversion is unsafe even the precision condition
190 below is satisfied:
192 (float) sqrtl ((long double) double_val) -> (float) sqrt (double_val)
194 if (TYPE_MODE (type) != TYPE_MODE (newtype))
195 break;
197 int p1 = REAL_MODE_FORMAT (TYPE_MODE (itype))->p;
198 int p2 = REAL_MODE_FORMAT (TYPE_MODE (newtype))->p;
199 if (p1 < p2 * 2 + 2)
200 break;
203 /* Be careful about integer to fp conversions.
204 These may overflow still. */
205 if (FLOAT_TYPE_P (TREE_TYPE (arg0))
206 && TYPE_PRECISION (newtype) < TYPE_PRECISION (itype)
207 && (TYPE_MODE (newtype) == TYPE_MODE (double_type_node)
208 || TYPE_MODE (newtype) == TYPE_MODE (float_type_node)))
210 tree fn = mathfn_built_in (newtype, fcode);
212 if (fn)
214 tree arg = fold (convert_to_real (newtype, arg0));
215 expr = build_call_expr (fn, 1, arg);
216 if (newtype == type)
217 return expr;
221 default:
222 break;
226 /* Propagate the cast into the operation. */
227 if (itype != type && FLOAT_TYPE_P (type))
228 switch (TREE_CODE (expr))
230 /* Convert (float)-x into -(float)x. This is safe for
231 round-to-nearest rounding mode when the inner type is float. */
232 case ABS_EXPR:
233 case NEGATE_EXPR:
234 if (!flag_rounding_math
235 && FLOAT_TYPE_P (itype)
236 && TYPE_PRECISION (type) < TYPE_PRECISION (itype))
237 return build1 (TREE_CODE (expr), type,
238 fold (convert_to_real (type,
239 TREE_OPERAND (expr, 0))));
240 break;
241 /* Convert (outertype)((innertype0)a+(innertype1)b)
242 into ((newtype)a+(newtype)b) where newtype
243 is the widest mode from all of these. */
244 case PLUS_EXPR:
245 case MINUS_EXPR:
246 case MULT_EXPR:
247 case RDIV_EXPR:
249 tree arg0 = strip_float_extensions (TREE_OPERAND (expr, 0));
250 tree arg1 = strip_float_extensions (TREE_OPERAND (expr, 1));
252 if (FLOAT_TYPE_P (TREE_TYPE (arg0))
253 && FLOAT_TYPE_P (TREE_TYPE (arg1))
254 && DECIMAL_FLOAT_TYPE_P (itype) == DECIMAL_FLOAT_TYPE_P (type))
256 tree newtype = type;
258 if (TYPE_MODE (TREE_TYPE (arg0)) == SDmode
259 || TYPE_MODE (TREE_TYPE (arg1)) == SDmode
260 || TYPE_MODE (type) == SDmode)
261 newtype = dfloat32_type_node;
262 if (TYPE_MODE (TREE_TYPE (arg0)) == DDmode
263 || TYPE_MODE (TREE_TYPE (arg1)) == DDmode
264 || TYPE_MODE (type) == DDmode)
265 newtype = dfloat64_type_node;
266 if (TYPE_MODE (TREE_TYPE (arg0)) == TDmode
267 || TYPE_MODE (TREE_TYPE (arg1)) == TDmode
268 || TYPE_MODE (type) == TDmode)
269 newtype = dfloat128_type_node;
270 if (newtype == dfloat32_type_node
271 || newtype == dfloat64_type_node
272 || newtype == dfloat128_type_node)
274 expr = build2 (TREE_CODE (expr), newtype,
275 fold (convert_to_real (newtype, arg0)),
276 fold (convert_to_real (newtype, arg1)));
277 if (newtype == type)
278 return expr;
279 break;
282 if (TYPE_PRECISION (TREE_TYPE (arg0)) > TYPE_PRECISION (newtype))
283 newtype = TREE_TYPE (arg0);
284 if (TYPE_PRECISION (TREE_TYPE (arg1)) > TYPE_PRECISION (newtype))
285 newtype = TREE_TYPE (arg1);
286 /* Sometimes this transformation is safe (cannot
287 change results through affecting double rounding
288 cases) and sometimes it is not. If NEWTYPE is
289 wider than TYPE, e.g. (float)((long double)double
290 + (long double)double) converted to
291 (float)(double + double), the transformation is
292 unsafe regardless of the details of the types
293 involved; double rounding can arise if the result
294 of NEWTYPE arithmetic is a NEWTYPE value half way
295 between two representable TYPE values but the
296 exact value is sufficiently different (in the
297 right direction) for this difference to be
298 visible in ITYPE arithmetic. If NEWTYPE is the
299 same as TYPE, however, the transformation may be
300 safe depending on the types involved: it is safe
301 if the ITYPE has strictly more than twice as many
302 mantissa bits as TYPE, can represent infinities
303 and NaNs if the TYPE can, and has sufficient
304 exponent range for the product or ratio of two
305 values representable in the TYPE to be within the
306 range of normal values of ITYPE. */
307 if (TYPE_PRECISION (newtype) < TYPE_PRECISION (itype)
308 && (flag_unsafe_math_optimizations
309 || (TYPE_PRECISION (newtype) == TYPE_PRECISION (type)
310 && real_can_shorten_arithmetic (TYPE_MODE (itype),
311 TYPE_MODE (type))
312 && !excess_precision_type (newtype))))
314 expr = build2 (TREE_CODE (expr), newtype,
315 fold (convert_to_real (newtype, arg0)),
316 fold (convert_to_real (newtype, arg1)));
317 if (newtype == type)
318 return expr;
322 break;
323 default:
324 break;
327 switch (TREE_CODE (TREE_TYPE (expr)))
329 case REAL_TYPE:
330 /* Ignore the conversion if we don't need to store intermediate
331 results and neither type is a decimal float. */
332 return build1 ((flag_float_store
333 || DECIMAL_FLOAT_TYPE_P (type)
334 || DECIMAL_FLOAT_TYPE_P (itype))
335 ? CONVERT_EXPR : NOP_EXPR, type, expr);
337 case INTEGER_TYPE:
338 case ENUMERAL_TYPE:
339 case BOOLEAN_TYPE:
340 return build1 (FLOAT_EXPR, type, expr);
342 case FIXED_POINT_TYPE:
343 return build1 (FIXED_CONVERT_EXPR, type, expr);
345 case COMPLEX_TYPE:
346 return convert (type,
347 fold_build1 (REALPART_EXPR,
348 TREE_TYPE (TREE_TYPE (expr)), expr));
350 case POINTER_TYPE:
351 case REFERENCE_TYPE:
352 error ("pointer value used where a floating point value was expected");
353 return convert_to_real (type, integer_zero_node);
355 default:
356 error ("aggregate value used where a float was expected");
357 return convert_to_real (type, integer_zero_node);
361 /* Convert EXPR to some integer (or enum) type TYPE.
363 EXPR must be pointer, integer, discrete (enum, char, or bool), float,
364 fixed-point or vector; in other cases error is called.
366 The result of this is always supposed to be a newly created tree node
367 not in use in any existing structure. */
369 tree
370 convert_to_integer (tree type, tree expr)
372 enum tree_code ex_form = TREE_CODE (expr);
373 tree intype = TREE_TYPE (expr);
374 unsigned int inprec = element_precision (intype);
375 unsigned int outprec = element_precision (type);
376 location_t loc = EXPR_LOCATION (expr);
378 /* An INTEGER_TYPE cannot be incomplete, but an ENUMERAL_TYPE can
379 be. Consider `enum E = { a, b = (enum E) 3 };'. */
380 if (!COMPLETE_TYPE_P (type))
382 error ("conversion to incomplete type");
383 return error_mark_node;
386 if (ex_form == COMPOUND_EXPR)
388 tree t = convert_to_integer (type, TREE_OPERAND (expr, 1));
389 if (t == TREE_OPERAND (expr, 1))
390 return expr;
391 return build2_loc (EXPR_LOCATION (expr), COMPOUND_EXPR, TREE_TYPE (t),
392 TREE_OPERAND (expr, 0), t);
395 /* Convert e.g. (long)round(d) -> lround(d). */
396 /* If we're converting to char, we may encounter differing behavior
397 between converting from double->char vs double->long->char.
398 We're in "undefined" territory but we prefer to be conservative,
399 so only proceed in "unsafe" math mode. */
400 if (optimize
401 && (flag_unsafe_math_optimizations
402 || (long_integer_type_node
403 && outprec >= TYPE_PRECISION (long_integer_type_node))))
405 tree s_expr = strip_float_extensions (expr);
406 tree s_intype = TREE_TYPE (s_expr);
407 const enum built_in_function fcode = builtin_mathfn_code (s_expr);
408 tree fn = 0;
410 switch (fcode)
412 CASE_FLT_FN (BUILT_IN_CEIL):
413 /* Only convert in ISO C99 mode. */
414 if (!targetm.libc_has_function (function_c99_misc))
415 break;
416 if (outprec < TYPE_PRECISION (integer_type_node)
417 || (outprec == TYPE_PRECISION (integer_type_node)
418 && !TYPE_UNSIGNED (type)))
419 fn = mathfn_built_in (s_intype, BUILT_IN_ICEIL);
420 else if (outprec == TYPE_PRECISION (long_integer_type_node)
421 && !TYPE_UNSIGNED (type))
422 fn = mathfn_built_in (s_intype, BUILT_IN_LCEIL);
423 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
424 && !TYPE_UNSIGNED (type))
425 fn = mathfn_built_in (s_intype, BUILT_IN_LLCEIL);
426 break;
428 CASE_FLT_FN (BUILT_IN_FLOOR):
429 /* Only convert in ISO C99 mode. */
430 if (!targetm.libc_has_function (function_c99_misc))
431 break;
432 if (outprec < TYPE_PRECISION (integer_type_node)
433 || (outprec == TYPE_PRECISION (integer_type_node)
434 && !TYPE_UNSIGNED (type)))
435 fn = mathfn_built_in (s_intype, BUILT_IN_IFLOOR);
436 else if (outprec == TYPE_PRECISION (long_integer_type_node)
437 && !TYPE_UNSIGNED (type))
438 fn = mathfn_built_in (s_intype, BUILT_IN_LFLOOR);
439 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
440 && !TYPE_UNSIGNED (type))
441 fn = mathfn_built_in (s_intype, BUILT_IN_LLFLOOR);
442 break;
444 CASE_FLT_FN (BUILT_IN_ROUND):
445 /* Only convert in ISO C99 mode and with -fno-math-errno. */
446 if (!targetm.libc_has_function (function_c99_misc) || flag_errno_math)
447 break;
448 if (outprec < TYPE_PRECISION (integer_type_node)
449 || (outprec == TYPE_PRECISION (integer_type_node)
450 && !TYPE_UNSIGNED (type)))
451 fn = mathfn_built_in (s_intype, BUILT_IN_IROUND);
452 else if (outprec == TYPE_PRECISION (long_integer_type_node)
453 && !TYPE_UNSIGNED (type))
454 fn = mathfn_built_in (s_intype, BUILT_IN_LROUND);
455 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
456 && !TYPE_UNSIGNED (type))
457 fn = mathfn_built_in (s_intype, BUILT_IN_LLROUND);
458 break;
460 CASE_FLT_FN (BUILT_IN_NEARBYINT):
461 /* Only convert nearbyint* if we can ignore math exceptions. */
462 if (flag_trapping_math)
463 break;
464 /* ... Fall through ... */
465 CASE_FLT_FN (BUILT_IN_RINT):
466 /* Only convert in ISO C99 mode and with -fno-math-errno. */
467 if (!targetm.libc_has_function (function_c99_misc) || flag_errno_math)
468 break;
469 if (outprec < TYPE_PRECISION (integer_type_node)
470 || (outprec == TYPE_PRECISION (integer_type_node)
471 && !TYPE_UNSIGNED (type)))
472 fn = mathfn_built_in (s_intype, BUILT_IN_IRINT);
473 else if (outprec == TYPE_PRECISION (long_integer_type_node)
474 && !TYPE_UNSIGNED (type))
475 fn = mathfn_built_in (s_intype, BUILT_IN_LRINT);
476 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
477 && !TYPE_UNSIGNED (type))
478 fn = mathfn_built_in (s_intype, BUILT_IN_LLRINT);
479 break;
481 CASE_FLT_FN (BUILT_IN_TRUNC):
482 return convert_to_integer (type, CALL_EXPR_ARG (s_expr, 0));
484 default:
485 break;
488 if (fn)
490 tree newexpr = build_call_expr (fn, 1, CALL_EXPR_ARG (s_expr, 0));
491 return convert_to_integer (type, newexpr);
495 /* Convert (int)logb(d) -> ilogb(d). */
496 if (optimize
497 && flag_unsafe_math_optimizations
498 && !flag_trapping_math && !flag_errno_math && flag_finite_math_only
499 && integer_type_node
500 && (outprec > TYPE_PRECISION (integer_type_node)
501 || (outprec == TYPE_PRECISION (integer_type_node)
502 && !TYPE_UNSIGNED (type))))
504 tree s_expr = strip_float_extensions (expr);
505 tree s_intype = TREE_TYPE (s_expr);
506 const enum built_in_function fcode = builtin_mathfn_code (s_expr);
507 tree fn = 0;
509 switch (fcode)
511 CASE_FLT_FN (BUILT_IN_LOGB):
512 fn = mathfn_built_in (s_intype, BUILT_IN_ILOGB);
513 break;
515 default:
516 break;
519 if (fn)
521 tree newexpr = build_call_expr (fn, 1, CALL_EXPR_ARG (s_expr, 0));
522 return convert_to_integer (type, newexpr);
526 switch (TREE_CODE (intype))
528 case POINTER_TYPE:
529 case REFERENCE_TYPE:
530 if (integer_zerop (expr))
531 return build_int_cst (type, 0);
533 /* Convert to an unsigned integer of the correct width first, and from
534 there widen/truncate to the required type. Some targets support the
535 coexistence of multiple valid pointer sizes, so fetch the one we need
536 from the type. */
537 expr = fold_build1 (CONVERT_EXPR,
538 lang_hooks.types.type_for_size
539 (TYPE_PRECISION (intype), 0),
540 expr);
541 return fold_convert (type, expr);
543 case INTEGER_TYPE:
544 case ENUMERAL_TYPE:
545 case BOOLEAN_TYPE:
546 case OFFSET_TYPE:
547 /* If this is a logical operation, which just returns 0 or 1, we can
548 change the type of the expression. */
550 if (TREE_CODE_CLASS (ex_form) == tcc_comparison)
552 expr = copy_node (expr);
553 TREE_TYPE (expr) = type;
554 return expr;
557 /* If we are widening the type, put in an explicit conversion.
558 Similarly if we are not changing the width. After this, we know
559 we are truncating EXPR. */
561 else if (outprec >= inprec)
563 enum tree_code code;
565 /* If the precision of the EXPR's type is K bits and the
566 destination mode has more bits, and the sign is changing,
567 it is not safe to use a NOP_EXPR. For example, suppose
568 that EXPR's type is a 3-bit unsigned integer type, the
569 TYPE is a 3-bit signed integer type, and the machine mode
570 for the types is 8-bit QImode. In that case, the
571 conversion necessitates an explicit sign-extension. In
572 the signed-to-unsigned case the high-order bits have to
573 be cleared. */
574 if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (TREE_TYPE (expr))
575 && (TYPE_PRECISION (TREE_TYPE (expr))
576 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (expr)))))
577 code = CONVERT_EXPR;
578 else
579 code = NOP_EXPR;
581 return fold_build1 (code, type, expr);
584 /* If TYPE is an enumeral type or a type with a precision less
585 than the number of bits in its mode, do the conversion to the
586 type corresponding to its mode, then do a nop conversion
587 to TYPE. */
588 else if (TREE_CODE (type) == ENUMERAL_TYPE
589 || outprec != GET_MODE_PRECISION (TYPE_MODE (type)))
590 return build1 (NOP_EXPR, type,
591 convert (lang_hooks.types.type_for_mode
592 (TYPE_MODE (type), TYPE_UNSIGNED (type)),
593 expr));
595 /* Here detect when we can distribute the truncation down past some
596 arithmetic. For example, if adding two longs and converting to an
597 int, we can equally well convert both to ints and then add.
598 For the operations handled here, such truncation distribution
599 is always safe.
600 It is desirable in these cases:
601 1) when truncating down to full-word from a larger size
602 2) when truncating takes no work.
603 3) when at least one operand of the arithmetic has been extended
604 (as by C's default conversions). In this case we need two conversions
605 if we do the arithmetic as already requested, so we might as well
606 truncate both and then combine. Perhaps that way we need only one.
608 Note that in general we cannot do the arithmetic in a type
609 shorter than the desired result of conversion, even if the operands
610 are both extended from a shorter type, because they might overflow
611 if combined in that type. The exceptions to this--the times when
612 two narrow values can be combined in their narrow type even to
613 make a wider result--are handled by "shorten" in build_binary_op. */
615 switch (ex_form)
617 case RSHIFT_EXPR:
618 /* We can pass truncation down through right shifting
619 when the shift count is a nonpositive constant. */
620 if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
621 && tree_int_cst_sgn (TREE_OPERAND (expr, 1)) <= 0)
622 goto trunc1;
623 break;
625 case LSHIFT_EXPR:
626 /* We can pass truncation down through left shifting
627 when the shift count is a nonnegative constant and
628 the target type is unsigned. */
629 if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
630 && tree_int_cst_sgn (TREE_OPERAND (expr, 1)) >= 0
631 && TYPE_UNSIGNED (type)
632 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
634 /* If shift count is less than the width of the truncated type,
635 really shift. */
636 if (tree_int_cst_lt (TREE_OPERAND (expr, 1), TYPE_SIZE (type)))
637 /* In this case, shifting is like multiplication. */
638 goto trunc1;
639 else
641 /* If it is >= that width, result is zero.
642 Handling this with trunc1 would give the wrong result:
643 (int) ((long long) a << 32) is well defined (as 0)
644 but (int) a << 32 is undefined and would get a
645 warning. */
647 tree t = build_int_cst (type, 0);
649 /* If the original expression had side-effects, we must
650 preserve it. */
651 if (TREE_SIDE_EFFECTS (expr))
652 return build2 (COMPOUND_EXPR, type, expr, t);
653 else
654 return t;
657 break;
659 case TRUNC_DIV_EXPR:
661 tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
662 tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
664 /* Don't distribute unless the output precision is at least as big
665 as the actual inputs and it has the same signedness. */
666 if (outprec >= TYPE_PRECISION (TREE_TYPE (arg0))
667 && outprec >= TYPE_PRECISION (TREE_TYPE (arg1))
668 /* If signedness of arg0 and arg1 don't match,
669 we can't necessarily find a type to compare them in. */
670 && (TYPE_UNSIGNED (TREE_TYPE (arg0))
671 == TYPE_UNSIGNED (TREE_TYPE (arg1)))
672 /* Do not change the sign of the division. */
673 && (TYPE_UNSIGNED (TREE_TYPE (expr))
674 == TYPE_UNSIGNED (TREE_TYPE (arg0)))
675 /* Either require unsigned division or a division by
676 a constant that is not -1. */
677 && (TYPE_UNSIGNED (TREE_TYPE (arg0))
678 || (TREE_CODE (arg1) == INTEGER_CST
679 && !integer_all_onesp (arg1))))
680 goto trunc1;
681 break;
684 case MAX_EXPR:
685 case MIN_EXPR:
686 case MULT_EXPR:
688 tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
689 tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
691 /* Don't distribute unless the output precision is at least as big
692 as the actual inputs. Otherwise, the comparison of the
693 truncated values will be wrong. */
694 if (outprec >= TYPE_PRECISION (TREE_TYPE (arg0))
695 && outprec >= TYPE_PRECISION (TREE_TYPE (arg1))
696 /* If signedness of arg0 and arg1 don't match,
697 we can't necessarily find a type to compare them in. */
698 && (TYPE_UNSIGNED (TREE_TYPE (arg0))
699 == TYPE_UNSIGNED (TREE_TYPE (arg1))))
700 goto trunc1;
701 break;
704 case PLUS_EXPR:
705 case MINUS_EXPR:
706 case BIT_AND_EXPR:
707 case BIT_IOR_EXPR:
708 case BIT_XOR_EXPR:
709 trunc1:
711 tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
712 tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
714 /* Do not try to narrow operands of pointer subtraction;
715 that will interfere with other folding. */
716 if (ex_form == MINUS_EXPR
717 && CONVERT_EXPR_P (arg0)
718 && CONVERT_EXPR_P (arg1)
719 && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 0)))
720 && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (arg1, 0))))
721 break;
723 if (outprec >= BITS_PER_WORD
724 || TRULY_NOOP_TRUNCATION (outprec, inprec)
725 || inprec > TYPE_PRECISION (TREE_TYPE (arg0))
726 || inprec > TYPE_PRECISION (TREE_TYPE (arg1)))
728 /* Do the arithmetic in type TYPEX,
729 then convert result to TYPE. */
730 tree typex = type;
732 /* Can't do arithmetic in enumeral types
733 so use an integer type that will hold the values. */
734 if (TREE_CODE (typex) == ENUMERAL_TYPE)
735 typex
736 = lang_hooks.types.type_for_size (TYPE_PRECISION (typex),
737 TYPE_UNSIGNED (typex));
739 /* But now perhaps TYPEX is as wide as INPREC.
740 In that case, do nothing special here.
741 (Otherwise would recurse infinitely in convert. */
742 if (TYPE_PRECISION (typex) != inprec)
744 /* Don't do unsigned arithmetic where signed was wanted,
745 or vice versa.
746 Exception: if both of the original operands were
747 unsigned then we can safely do the work as unsigned.
748 Exception: shift operations take their type solely
749 from the first argument.
750 Exception: the LSHIFT_EXPR case above requires that
751 we perform this operation unsigned lest we produce
752 signed-overflow undefinedness.
753 And we may need to do it as unsigned
754 if we truncate to the original size. */
755 if (TYPE_UNSIGNED (TREE_TYPE (expr))
756 || (TYPE_UNSIGNED (TREE_TYPE (arg0))
757 && (TYPE_UNSIGNED (TREE_TYPE (arg1))
758 || ex_form == LSHIFT_EXPR
759 || ex_form == RSHIFT_EXPR
760 || ex_form == LROTATE_EXPR
761 || ex_form == RROTATE_EXPR))
762 || ex_form == LSHIFT_EXPR
763 /* If we have !flag_wrapv, and either ARG0 or
764 ARG1 is of a signed type, we have to do
765 PLUS_EXPR, MINUS_EXPR or MULT_EXPR in an unsigned
766 type in case the operation in outprec precision
767 could overflow. Otherwise, we would introduce
768 signed-overflow undefinedness. */
769 || ((!TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0))
770 || !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg1)))
771 && ((TYPE_PRECISION (TREE_TYPE (arg0)) * 2u
772 > outprec)
773 || (TYPE_PRECISION (TREE_TYPE (arg1)) * 2u
774 > outprec))
775 && (ex_form == PLUS_EXPR
776 || ex_form == MINUS_EXPR
777 || ex_form == MULT_EXPR)))
779 if (!TYPE_UNSIGNED (typex))
780 typex = unsigned_type_for (typex);
782 else
784 if (TYPE_UNSIGNED (typex))
785 typex = signed_type_for (typex);
787 return convert (type,
788 fold_build2 (ex_form, typex,
789 convert (typex, arg0),
790 convert (typex, arg1)));
794 break;
796 case NEGATE_EXPR:
797 case BIT_NOT_EXPR:
798 /* This is not correct for ABS_EXPR,
799 since we must test the sign before truncation. */
801 /* Do the arithmetic in type TYPEX,
802 then convert result to TYPE. */
803 tree typex = type;
805 /* Can't do arithmetic in enumeral types
806 so use an integer type that will hold the values. */
807 if (TREE_CODE (typex) == ENUMERAL_TYPE)
808 typex
809 = lang_hooks.types.type_for_size (TYPE_PRECISION (typex),
810 TYPE_UNSIGNED (typex));
812 if (!TYPE_UNSIGNED (typex))
813 typex = unsigned_type_for (typex);
814 return convert (type,
815 fold_build1 (ex_form, typex,
816 convert (typex,
817 TREE_OPERAND (expr, 0))));
820 CASE_CONVERT:
821 /* Don't introduce a
822 "can't convert between vector values of different size" error. */
823 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (expr, 0))) == VECTOR_TYPE
824 && (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_OPERAND (expr, 0))))
825 != GET_MODE_SIZE (TYPE_MODE (type))))
826 break;
827 /* If truncating after truncating, might as well do all at once.
828 If truncating after extending, we may get rid of wasted work. */
829 return convert (type, get_unwidened (TREE_OPERAND (expr, 0), type));
831 case COND_EXPR:
832 /* It is sometimes worthwhile to push the narrowing down through
833 the conditional and never loses. A COND_EXPR may have a throw
834 as one operand, which then has void type. Just leave void
835 operands as they are. */
836 return fold_build3 (COND_EXPR, type, TREE_OPERAND (expr, 0),
837 VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 1)))
838 ? TREE_OPERAND (expr, 1)
839 : convert (type, TREE_OPERAND (expr, 1)),
840 VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 2)))
841 ? TREE_OPERAND (expr, 2)
842 : convert (type, TREE_OPERAND (expr, 2)));
844 default:
845 break;
848 /* When parsing long initializers, we might end up with a lot of casts.
849 Shortcut this. */
850 if (TREE_CODE (expr) == INTEGER_CST)
851 return fold_convert (type, expr);
852 return build1 (CONVERT_EXPR, type, expr);
854 case REAL_TYPE:
855 if (flag_sanitize & SANITIZE_FLOAT_CAST
856 && do_ubsan_in_current_function ())
858 expr = save_expr (expr);
859 tree check = ubsan_instrument_float_cast (loc, type, expr, expr);
860 expr = build1 (FIX_TRUNC_EXPR, type, expr);
861 if (check == NULL)
862 return expr;
863 return fold_build2 (COMPOUND_EXPR, TREE_TYPE (expr), check, expr);
865 else
866 return build1 (FIX_TRUNC_EXPR, type, expr);
868 case FIXED_POINT_TYPE:
869 return build1 (FIXED_CONVERT_EXPR, type, expr);
871 case COMPLEX_TYPE:
872 return convert (type,
873 fold_build1 (REALPART_EXPR,
874 TREE_TYPE (TREE_TYPE (expr)), expr));
876 case VECTOR_TYPE:
877 if (!tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (TREE_TYPE (expr))))
879 error ("can%'t convert a vector of type %qT"
880 " to type %qT which has different size",
881 TREE_TYPE (expr), type);
882 return error_mark_node;
884 return build1 (VIEW_CONVERT_EXPR, type, expr);
886 default:
887 error ("aggregate value used where an integer was expected");
888 return convert (type, integer_zero_node);
892 /* Convert EXPR to the complex type TYPE in the usual ways. */
894 tree
895 convert_to_complex (tree type, tree expr)
897 tree subtype = TREE_TYPE (type);
899 switch (TREE_CODE (TREE_TYPE (expr)))
901 case REAL_TYPE:
902 case FIXED_POINT_TYPE:
903 case INTEGER_TYPE:
904 case ENUMERAL_TYPE:
905 case BOOLEAN_TYPE:
906 return build2 (COMPLEX_EXPR, type, convert (subtype, expr),
907 convert (subtype, integer_zero_node));
909 case COMPLEX_TYPE:
911 tree elt_type = TREE_TYPE (TREE_TYPE (expr));
913 if (TYPE_MAIN_VARIANT (elt_type) == TYPE_MAIN_VARIANT (subtype))
914 return expr;
915 else if (TREE_CODE (expr) == COMPOUND_EXPR)
917 tree t = convert_to_complex (type, TREE_OPERAND (expr, 1));
918 if (t == TREE_OPERAND (expr, 1))
919 return expr;
920 return build2_loc (EXPR_LOCATION (expr), COMPOUND_EXPR,
921 TREE_TYPE (t), TREE_OPERAND (expr, 0), t);
923 else if (TREE_CODE (expr) == COMPLEX_EXPR)
924 return fold_build2 (COMPLEX_EXPR, type,
925 convert (subtype, TREE_OPERAND (expr, 0)),
926 convert (subtype, TREE_OPERAND (expr, 1)));
927 else
929 expr = save_expr (expr);
930 return
931 fold_build2 (COMPLEX_EXPR, type,
932 convert (subtype,
933 fold_build1 (REALPART_EXPR,
934 TREE_TYPE (TREE_TYPE (expr)),
935 expr)),
936 convert (subtype,
937 fold_build1 (IMAGPART_EXPR,
938 TREE_TYPE (TREE_TYPE (expr)),
939 expr)));
943 case POINTER_TYPE:
944 case REFERENCE_TYPE:
945 error ("pointer value used where a complex was expected");
946 return convert_to_complex (type, integer_zero_node);
948 default:
949 error ("aggregate value used where a complex was expected");
950 return convert_to_complex (type, integer_zero_node);
954 /* Convert EXPR to the vector type TYPE in the usual ways. */
956 tree
957 convert_to_vector (tree type, tree expr)
959 switch (TREE_CODE (TREE_TYPE (expr)))
961 case INTEGER_TYPE:
962 case VECTOR_TYPE:
963 if (!tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (TREE_TYPE (expr))))
965 error ("can%'t convert a value of type %qT"
966 " to vector type %qT which has different size",
967 TREE_TYPE (expr), type);
968 return error_mark_node;
970 return build1 (VIEW_CONVERT_EXPR, type, expr);
972 default:
973 error ("can%'t convert value to a vector");
974 return error_mark_node;
978 /* Convert EXPR to some fixed-point type TYPE.
980 EXPR must be fixed-point, float, integer, or enumeral;
981 in other cases error is called. */
983 tree
984 convert_to_fixed (tree type, tree expr)
986 if (integer_zerop (expr))
988 tree fixed_zero_node = build_fixed (type, FCONST0 (TYPE_MODE (type)));
989 return fixed_zero_node;
991 else if (integer_onep (expr) && ALL_SCALAR_ACCUM_MODE_P (TYPE_MODE (type)))
993 tree fixed_one_node = build_fixed (type, FCONST1 (TYPE_MODE (type)));
994 return fixed_one_node;
997 switch (TREE_CODE (TREE_TYPE (expr)))
999 case FIXED_POINT_TYPE:
1000 case INTEGER_TYPE:
1001 case ENUMERAL_TYPE:
1002 case BOOLEAN_TYPE:
1003 case REAL_TYPE:
1004 return build1 (FIXED_CONVERT_EXPR, type, expr);
1006 case COMPLEX_TYPE:
1007 return convert (type,
1008 fold_build1 (REALPART_EXPR,
1009 TREE_TYPE (TREE_TYPE (expr)), expr));
1011 default:
1012 error ("aggregate value used where a fixed-point was expected");
1013 return error_mark_node;