* omp-low.c (MASK_GANG, MASK_WORKER, MASK_VECTOR): Delete.
[official-gcc.git] / gcc / convert.c
blob498d3a5c748c8d171382375cdd43809807f31269
1 /* Utility routines for data type conversion for GCC.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 /* These routines are somewhat language-independent utility function
22 intended to be called by the language-specific convert () functions. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "alias.h"
29 #include "tree.h"
30 #include "fold-const.h"
31 #include "stor-layout.h"
32 #include "flags.h"
33 #include "convert.h"
34 #include "diagnostic-core.h"
35 #include "target.h"
36 #include "langhooks.h"
37 #include "builtins.h"
38 #include "ubsan.h"
40 /* Convert EXPR to some pointer or reference type TYPE.
41 EXPR must be pointer, reference, integer, enumeral, or literal zero;
42 in other cases error is called. */
44 tree
45 convert_to_pointer (tree type, tree expr)
47 location_t loc = EXPR_LOCATION (expr);
48 if (TREE_TYPE (expr) == type)
49 return expr;
51 switch (TREE_CODE (TREE_TYPE (expr)))
53 case POINTER_TYPE:
54 case REFERENCE_TYPE:
56 /* If the pointers point to different address spaces, conversion needs
57 to be done via a ADDR_SPACE_CONVERT_EXPR instead of a NOP_EXPR. */
58 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (type));
59 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (expr)));
61 if (to_as == from_as)
62 return fold_build1_loc (loc, NOP_EXPR, type, expr);
63 else
64 return fold_build1_loc (loc, ADDR_SPACE_CONVERT_EXPR, type, expr);
67 case INTEGER_TYPE:
68 case ENUMERAL_TYPE:
69 case BOOLEAN_TYPE:
71 /* If the input precision differs from the target pointer type
72 precision, first convert the input expression to an integer type of
73 the target precision. Some targets, e.g. VMS, need several pointer
74 sizes to coexist so the latter isn't necessarily POINTER_SIZE. */
75 unsigned int pprec = TYPE_PRECISION (type);
76 unsigned int eprec = TYPE_PRECISION (TREE_TYPE (expr));
78 if (eprec != pprec)
79 expr = fold_build1_loc (loc, NOP_EXPR,
80 lang_hooks.types.type_for_size (pprec, 0),
81 expr);
84 return fold_build1_loc (loc, CONVERT_EXPR, type, expr);
86 default:
87 error ("cannot convert to a pointer type");
88 return convert_to_pointer (type, integer_zero_node);
93 /* Convert EXPR to some floating-point type TYPE.
95 EXPR must be float, fixed-point, integer, or enumeral;
96 in other cases error is called. */
98 tree
99 convert_to_real (tree type, tree expr)
101 enum built_in_function fcode = builtin_mathfn_code (expr);
102 tree itype = TREE_TYPE (expr);
104 if (TREE_CODE (expr) == COMPOUND_EXPR)
106 tree t = convert_to_real (type, TREE_OPERAND (expr, 1));
107 if (t == TREE_OPERAND (expr, 1))
108 return expr;
109 return build2_loc (EXPR_LOCATION (expr), COMPOUND_EXPR, TREE_TYPE (t),
110 TREE_OPERAND (expr, 0), t);
113 /* Disable until we figure out how to decide whether the functions are
114 present in runtime. */
115 /* Convert (float)sqrt((double)x) where x is float into sqrtf(x) */
116 if (optimize
117 && (TYPE_MODE (type) == TYPE_MODE (double_type_node)
118 || TYPE_MODE (type) == TYPE_MODE (float_type_node)))
120 switch (fcode)
122 #define CASE_MATHFN(FN) case BUILT_IN_##FN: case BUILT_IN_##FN##L:
123 CASE_MATHFN (COSH)
124 CASE_MATHFN (EXP)
125 CASE_MATHFN (EXP10)
126 CASE_MATHFN (EXP2)
127 CASE_MATHFN (EXPM1)
128 CASE_MATHFN (GAMMA)
129 CASE_MATHFN (J0)
130 CASE_MATHFN (J1)
131 CASE_MATHFN (LGAMMA)
132 CASE_MATHFN (POW10)
133 CASE_MATHFN (SINH)
134 CASE_MATHFN (TGAMMA)
135 CASE_MATHFN (Y0)
136 CASE_MATHFN (Y1)
137 /* The above functions may set errno differently with float
138 input or output so this transformation is not safe with
139 -fmath-errno. */
140 if (flag_errno_math)
141 break;
142 CASE_MATHFN (ACOS)
143 CASE_MATHFN (ACOSH)
144 CASE_MATHFN (ASIN)
145 CASE_MATHFN (ASINH)
146 CASE_MATHFN (ATAN)
147 CASE_MATHFN (ATANH)
148 CASE_MATHFN (CBRT)
149 CASE_MATHFN (COS)
150 CASE_MATHFN (ERF)
151 CASE_MATHFN (ERFC)
152 CASE_MATHFN (LOG)
153 CASE_MATHFN (LOG10)
154 CASE_MATHFN (LOG2)
155 CASE_MATHFN (LOG1P)
156 CASE_MATHFN (SIN)
157 CASE_MATHFN (TAN)
158 CASE_MATHFN (TANH)
159 /* The above functions are not safe to do this conversion. */
160 if (!flag_unsafe_math_optimizations)
161 break;
162 CASE_MATHFN (SQRT)
163 CASE_MATHFN (FABS)
164 CASE_MATHFN (LOGB)
165 #undef CASE_MATHFN
167 tree arg0 = strip_float_extensions (CALL_EXPR_ARG (expr, 0));
168 tree newtype = type;
170 /* We have (outertype)sqrt((innertype)x). Choose the wider mode from
171 the both as the safe type for operation. */
172 if (TYPE_PRECISION (TREE_TYPE (arg0)) > TYPE_PRECISION (type))
173 newtype = TREE_TYPE (arg0);
175 /* We consider to convert
177 (T1) sqrtT2 ((T2) exprT3)
179 (T1) sqrtT4 ((T4) exprT3)
181 , where T1 is TYPE, T2 is ITYPE, T3 is TREE_TYPE (ARG0),
182 and T4 is NEWTYPE. All those types are of floating point types.
183 T4 (NEWTYPE) should be narrower than T2 (ITYPE). This conversion
184 is safe only if P1 >= P2*2+2, where P1 and P2 are precisions of
185 T2 and T4. See the following URL for a reference:
186 http://stackoverflow.com/questions/9235456/determining-
187 floating-point-square-root
189 if ((fcode == BUILT_IN_SQRT || fcode == BUILT_IN_SQRTL)
190 && !flag_unsafe_math_optimizations)
192 /* The following conversion is unsafe even the precision condition
193 below is satisfied:
195 (float) sqrtl ((long double) double_val) -> (float) sqrt (double_val)
197 if (TYPE_MODE (type) != TYPE_MODE (newtype))
198 break;
200 int p1 = REAL_MODE_FORMAT (TYPE_MODE (itype))->p;
201 int p2 = REAL_MODE_FORMAT (TYPE_MODE (newtype))->p;
202 if (p1 < p2 * 2 + 2)
203 break;
206 /* Be careful about integer to fp conversions.
207 These may overflow still. */
208 if (FLOAT_TYPE_P (TREE_TYPE (arg0))
209 && TYPE_PRECISION (newtype) < TYPE_PRECISION (itype)
210 && (TYPE_MODE (newtype) == TYPE_MODE (double_type_node)
211 || TYPE_MODE (newtype) == TYPE_MODE (float_type_node)))
213 tree fn = mathfn_built_in (newtype, fcode);
215 if (fn)
217 tree arg = fold (convert_to_real (newtype, arg0));
218 expr = build_call_expr (fn, 1, arg);
219 if (newtype == type)
220 return expr;
224 default:
225 break;
229 /* Propagate the cast into the operation. */
230 if (itype != type && FLOAT_TYPE_P (type))
231 switch (TREE_CODE (expr))
233 /* Convert (float)-x into -(float)x. This is safe for
234 round-to-nearest rounding mode when the inner type is float. */
235 case ABS_EXPR:
236 case NEGATE_EXPR:
237 if (!flag_rounding_math
238 && FLOAT_TYPE_P (itype)
239 && TYPE_PRECISION (type) < TYPE_PRECISION (itype))
240 return build1 (TREE_CODE (expr), type,
241 fold (convert_to_real (type,
242 TREE_OPERAND (expr, 0))));
243 break;
244 /* Convert (outertype)((innertype0)a+(innertype1)b)
245 into ((newtype)a+(newtype)b) where newtype
246 is the widest mode from all of these. */
247 case PLUS_EXPR:
248 case MINUS_EXPR:
249 case MULT_EXPR:
250 case RDIV_EXPR:
252 tree arg0 = strip_float_extensions (TREE_OPERAND (expr, 0));
253 tree arg1 = strip_float_extensions (TREE_OPERAND (expr, 1));
255 if (FLOAT_TYPE_P (TREE_TYPE (arg0))
256 && FLOAT_TYPE_P (TREE_TYPE (arg1))
257 && DECIMAL_FLOAT_TYPE_P (itype) == DECIMAL_FLOAT_TYPE_P (type))
259 tree newtype = type;
261 if (TYPE_MODE (TREE_TYPE (arg0)) == SDmode
262 || TYPE_MODE (TREE_TYPE (arg1)) == SDmode
263 || TYPE_MODE (type) == SDmode)
264 newtype = dfloat32_type_node;
265 if (TYPE_MODE (TREE_TYPE (arg0)) == DDmode
266 || TYPE_MODE (TREE_TYPE (arg1)) == DDmode
267 || TYPE_MODE (type) == DDmode)
268 newtype = dfloat64_type_node;
269 if (TYPE_MODE (TREE_TYPE (arg0)) == TDmode
270 || TYPE_MODE (TREE_TYPE (arg1)) == TDmode
271 || TYPE_MODE (type) == TDmode)
272 newtype = dfloat128_type_node;
273 if (newtype == dfloat32_type_node
274 || newtype == dfloat64_type_node
275 || newtype == dfloat128_type_node)
277 expr = build2 (TREE_CODE (expr), newtype,
278 fold (convert_to_real (newtype, arg0)),
279 fold (convert_to_real (newtype, arg1)));
280 if (newtype == type)
281 return expr;
282 break;
285 if (TYPE_PRECISION (TREE_TYPE (arg0)) > TYPE_PRECISION (newtype))
286 newtype = TREE_TYPE (arg0);
287 if (TYPE_PRECISION (TREE_TYPE (arg1)) > TYPE_PRECISION (newtype))
288 newtype = TREE_TYPE (arg1);
289 /* Sometimes this transformation is safe (cannot
290 change results through affecting double rounding
291 cases) and sometimes it is not. If NEWTYPE is
292 wider than TYPE, e.g. (float)((long double)double
293 + (long double)double) converted to
294 (float)(double + double), the transformation is
295 unsafe regardless of the details of the types
296 involved; double rounding can arise if the result
297 of NEWTYPE arithmetic is a NEWTYPE value half way
298 between two representable TYPE values but the
299 exact value is sufficiently different (in the
300 right direction) for this difference to be
301 visible in ITYPE arithmetic. If NEWTYPE is the
302 same as TYPE, however, the transformation may be
303 safe depending on the types involved: it is safe
304 if the ITYPE has strictly more than twice as many
305 mantissa bits as TYPE, can represent infinities
306 and NaNs if the TYPE can, and has sufficient
307 exponent range for the product or ratio of two
308 values representable in the TYPE to be within the
309 range of normal values of ITYPE. */
310 if (TYPE_PRECISION (newtype) < TYPE_PRECISION (itype)
311 && (flag_unsafe_math_optimizations
312 || (TYPE_PRECISION (newtype) == TYPE_PRECISION (type)
313 && real_can_shorten_arithmetic (TYPE_MODE (itype),
314 TYPE_MODE (type))
315 && !excess_precision_type (newtype))))
317 expr = build2 (TREE_CODE (expr), newtype,
318 fold (convert_to_real (newtype, arg0)),
319 fold (convert_to_real (newtype, arg1)));
320 if (newtype == type)
321 return expr;
325 break;
326 default:
327 break;
330 switch (TREE_CODE (TREE_TYPE (expr)))
332 case REAL_TYPE:
333 /* Ignore the conversion if we don't need to store intermediate
334 results and neither type is a decimal float. */
335 return build1 ((flag_float_store
336 || DECIMAL_FLOAT_TYPE_P (type)
337 || DECIMAL_FLOAT_TYPE_P (itype))
338 ? CONVERT_EXPR : NOP_EXPR, type, expr);
340 case INTEGER_TYPE:
341 case ENUMERAL_TYPE:
342 case BOOLEAN_TYPE:
343 return build1 (FLOAT_EXPR, type, expr);
345 case FIXED_POINT_TYPE:
346 return build1 (FIXED_CONVERT_EXPR, type, expr);
348 case COMPLEX_TYPE:
349 return convert (type,
350 fold_build1 (REALPART_EXPR,
351 TREE_TYPE (TREE_TYPE (expr)), expr));
353 case POINTER_TYPE:
354 case REFERENCE_TYPE:
355 error ("pointer value used where a floating point value was expected");
356 return convert_to_real (type, integer_zero_node);
358 default:
359 error ("aggregate value used where a float was expected");
360 return convert_to_real (type, integer_zero_node);
364 /* Convert EXPR to some integer (or enum) type TYPE.
366 EXPR must be pointer, integer, discrete (enum, char, or bool), float,
367 fixed-point or vector; in other cases error is called.
369 The result of this is always supposed to be a newly created tree node
370 not in use in any existing structure. */
372 tree
373 convert_to_integer (tree type, tree expr)
375 enum tree_code ex_form = TREE_CODE (expr);
376 tree intype = TREE_TYPE (expr);
377 unsigned int inprec = element_precision (intype);
378 unsigned int outprec = element_precision (type);
379 location_t loc = EXPR_LOCATION (expr);
381 /* An INTEGER_TYPE cannot be incomplete, but an ENUMERAL_TYPE can
382 be. Consider `enum E = { a, b = (enum E) 3 };'. */
383 if (!COMPLETE_TYPE_P (type))
385 error ("conversion to incomplete type");
386 return error_mark_node;
389 if (ex_form == COMPOUND_EXPR)
391 tree t = convert_to_integer (type, TREE_OPERAND (expr, 1));
392 if (t == TREE_OPERAND (expr, 1))
393 return expr;
394 return build2_loc (EXPR_LOCATION (expr), COMPOUND_EXPR, TREE_TYPE (t),
395 TREE_OPERAND (expr, 0), t);
398 /* Convert e.g. (long)round(d) -> lround(d). */
399 /* If we're converting to char, we may encounter differing behavior
400 between converting from double->char vs double->long->char.
401 We're in "undefined" territory but we prefer to be conservative,
402 so only proceed in "unsafe" math mode. */
403 if (optimize
404 && (flag_unsafe_math_optimizations
405 || (long_integer_type_node
406 && outprec >= TYPE_PRECISION (long_integer_type_node))))
408 tree s_expr = strip_float_extensions (expr);
409 tree s_intype = TREE_TYPE (s_expr);
410 const enum built_in_function fcode = builtin_mathfn_code (s_expr);
411 tree fn = 0;
413 switch (fcode)
415 CASE_FLT_FN (BUILT_IN_CEIL):
416 /* Only convert in ISO C99 mode. */
417 if (!targetm.libc_has_function (function_c99_misc))
418 break;
419 if (outprec < TYPE_PRECISION (integer_type_node)
420 || (outprec == TYPE_PRECISION (integer_type_node)
421 && !TYPE_UNSIGNED (type)))
422 fn = mathfn_built_in (s_intype, BUILT_IN_ICEIL);
423 else if (outprec == TYPE_PRECISION (long_integer_type_node)
424 && !TYPE_UNSIGNED (type))
425 fn = mathfn_built_in (s_intype, BUILT_IN_LCEIL);
426 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
427 && !TYPE_UNSIGNED (type))
428 fn = mathfn_built_in (s_intype, BUILT_IN_LLCEIL);
429 break;
431 CASE_FLT_FN (BUILT_IN_FLOOR):
432 /* Only convert in ISO C99 mode. */
433 if (!targetm.libc_has_function (function_c99_misc))
434 break;
435 if (outprec < TYPE_PRECISION (integer_type_node)
436 || (outprec == TYPE_PRECISION (integer_type_node)
437 && !TYPE_UNSIGNED (type)))
438 fn = mathfn_built_in (s_intype, BUILT_IN_IFLOOR);
439 else if (outprec == TYPE_PRECISION (long_integer_type_node)
440 && !TYPE_UNSIGNED (type))
441 fn = mathfn_built_in (s_intype, BUILT_IN_LFLOOR);
442 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
443 && !TYPE_UNSIGNED (type))
444 fn = mathfn_built_in (s_intype, BUILT_IN_LLFLOOR);
445 break;
447 CASE_FLT_FN (BUILT_IN_ROUND):
448 /* Only convert in ISO C99 mode and with -fno-math-errno. */
449 if (!targetm.libc_has_function (function_c99_misc) || flag_errno_math)
450 break;
451 if (outprec < TYPE_PRECISION (integer_type_node)
452 || (outprec == TYPE_PRECISION (integer_type_node)
453 && !TYPE_UNSIGNED (type)))
454 fn = mathfn_built_in (s_intype, BUILT_IN_IROUND);
455 else if (outprec == TYPE_PRECISION (long_integer_type_node)
456 && !TYPE_UNSIGNED (type))
457 fn = mathfn_built_in (s_intype, BUILT_IN_LROUND);
458 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
459 && !TYPE_UNSIGNED (type))
460 fn = mathfn_built_in (s_intype, BUILT_IN_LLROUND);
461 break;
463 CASE_FLT_FN (BUILT_IN_NEARBYINT):
464 /* Only convert nearbyint* if we can ignore math exceptions. */
465 if (flag_trapping_math)
466 break;
467 /* ... Fall through ... */
468 CASE_FLT_FN (BUILT_IN_RINT):
469 /* Only convert in ISO C99 mode and with -fno-math-errno. */
470 if (!targetm.libc_has_function (function_c99_misc) || flag_errno_math)
471 break;
472 if (outprec < TYPE_PRECISION (integer_type_node)
473 || (outprec == TYPE_PRECISION (integer_type_node)
474 && !TYPE_UNSIGNED (type)))
475 fn = mathfn_built_in (s_intype, BUILT_IN_IRINT);
476 else if (outprec == TYPE_PRECISION (long_integer_type_node)
477 && !TYPE_UNSIGNED (type))
478 fn = mathfn_built_in (s_intype, BUILT_IN_LRINT);
479 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
480 && !TYPE_UNSIGNED (type))
481 fn = mathfn_built_in (s_intype, BUILT_IN_LLRINT);
482 break;
484 CASE_FLT_FN (BUILT_IN_TRUNC):
485 return convert_to_integer (type, CALL_EXPR_ARG (s_expr, 0));
487 default:
488 break;
491 if (fn)
493 tree newexpr = build_call_expr (fn, 1, CALL_EXPR_ARG (s_expr, 0));
494 return convert_to_integer (type, newexpr);
498 /* Convert (int)logb(d) -> ilogb(d). */
499 if (optimize
500 && flag_unsafe_math_optimizations
501 && !flag_trapping_math && !flag_errno_math && flag_finite_math_only
502 && integer_type_node
503 && (outprec > TYPE_PRECISION (integer_type_node)
504 || (outprec == TYPE_PRECISION (integer_type_node)
505 && !TYPE_UNSIGNED (type))))
507 tree s_expr = strip_float_extensions (expr);
508 tree s_intype = TREE_TYPE (s_expr);
509 const enum built_in_function fcode = builtin_mathfn_code (s_expr);
510 tree fn = 0;
512 switch (fcode)
514 CASE_FLT_FN (BUILT_IN_LOGB):
515 fn = mathfn_built_in (s_intype, BUILT_IN_ILOGB);
516 break;
518 default:
519 break;
522 if (fn)
524 tree newexpr = build_call_expr (fn, 1, CALL_EXPR_ARG (s_expr, 0));
525 return convert_to_integer (type, newexpr);
529 switch (TREE_CODE (intype))
531 case POINTER_TYPE:
532 case REFERENCE_TYPE:
533 if (integer_zerop (expr))
534 return build_int_cst (type, 0);
536 /* Convert to an unsigned integer of the correct width first, and from
537 there widen/truncate to the required type. Some targets support the
538 coexistence of multiple valid pointer sizes, so fetch the one we need
539 from the type. */
540 expr = fold_build1 (CONVERT_EXPR,
541 lang_hooks.types.type_for_size
542 (TYPE_PRECISION (intype), 0),
543 expr);
544 return fold_convert (type, expr);
546 case INTEGER_TYPE:
547 case ENUMERAL_TYPE:
548 case BOOLEAN_TYPE:
549 case OFFSET_TYPE:
550 /* If this is a logical operation, which just returns 0 or 1, we can
551 change the type of the expression. */
553 if (TREE_CODE_CLASS (ex_form) == tcc_comparison)
555 expr = copy_node (expr);
556 TREE_TYPE (expr) = type;
557 return expr;
560 /* If we are widening the type, put in an explicit conversion.
561 Similarly if we are not changing the width. After this, we know
562 we are truncating EXPR. */
564 else if (outprec >= inprec)
566 enum tree_code code;
568 /* If the precision of the EXPR's type is K bits and the
569 destination mode has more bits, and the sign is changing,
570 it is not safe to use a NOP_EXPR. For example, suppose
571 that EXPR's type is a 3-bit unsigned integer type, the
572 TYPE is a 3-bit signed integer type, and the machine mode
573 for the types is 8-bit QImode. In that case, the
574 conversion necessitates an explicit sign-extension. In
575 the signed-to-unsigned case the high-order bits have to
576 be cleared. */
577 if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (TREE_TYPE (expr))
578 && (TYPE_PRECISION (TREE_TYPE (expr))
579 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (expr)))))
580 code = CONVERT_EXPR;
581 else
582 code = NOP_EXPR;
584 return fold_build1 (code, type, expr);
587 /* If TYPE is an enumeral type or a type with a precision less
588 than the number of bits in its mode, do the conversion to the
589 type corresponding to its mode, then do a nop conversion
590 to TYPE. */
591 else if (TREE_CODE (type) == ENUMERAL_TYPE
592 || outprec != GET_MODE_PRECISION (TYPE_MODE (type)))
593 return build1 (NOP_EXPR, type,
594 convert (lang_hooks.types.type_for_mode
595 (TYPE_MODE (type), TYPE_UNSIGNED (type)),
596 expr));
598 /* Here detect when we can distribute the truncation down past some
599 arithmetic. For example, if adding two longs and converting to an
600 int, we can equally well convert both to ints and then add.
601 For the operations handled here, such truncation distribution
602 is always safe.
603 It is desirable in these cases:
604 1) when truncating down to full-word from a larger size
605 2) when truncating takes no work.
606 3) when at least one operand of the arithmetic has been extended
607 (as by C's default conversions). In this case we need two conversions
608 if we do the arithmetic as already requested, so we might as well
609 truncate both and then combine. Perhaps that way we need only one.
611 Note that in general we cannot do the arithmetic in a type
612 shorter than the desired result of conversion, even if the operands
613 are both extended from a shorter type, because they might overflow
614 if combined in that type. The exceptions to this--the times when
615 two narrow values can be combined in their narrow type even to
616 make a wider result--are handled by "shorten" in build_binary_op. */
618 switch (ex_form)
620 case RSHIFT_EXPR:
621 /* We can pass truncation down through right shifting
622 when the shift count is a nonpositive constant. */
623 if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
624 && tree_int_cst_sgn (TREE_OPERAND (expr, 1)) <= 0)
625 goto trunc1;
626 break;
628 case LSHIFT_EXPR:
629 /* We can pass truncation down through left shifting
630 when the shift count is a nonnegative constant and
631 the target type is unsigned. */
632 if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
633 && tree_int_cst_sgn (TREE_OPERAND (expr, 1)) >= 0
634 && TYPE_UNSIGNED (type)
635 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
637 /* If shift count is less than the width of the truncated type,
638 really shift. */
639 if (tree_int_cst_lt (TREE_OPERAND (expr, 1), TYPE_SIZE (type)))
640 /* In this case, shifting is like multiplication. */
641 goto trunc1;
642 else
644 /* If it is >= that width, result is zero.
645 Handling this with trunc1 would give the wrong result:
646 (int) ((long long) a << 32) is well defined (as 0)
647 but (int) a << 32 is undefined and would get a
648 warning. */
650 tree t = build_int_cst (type, 0);
652 /* If the original expression had side-effects, we must
653 preserve it. */
654 if (TREE_SIDE_EFFECTS (expr))
655 return build2 (COMPOUND_EXPR, type, expr, t);
656 else
657 return t;
660 break;
662 case TRUNC_DIV_EXPR:
664 tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
665 tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
667 /* Don't distribute unless the output precision is at least as big
668 as the actual inputs and it has the same signedness. */
669 if (outprec >= TYPE_PRECISION (TREE_TYPE (arg0))
670 && outprec >= TYPE_PRECISION (TREE_TYPE (arg1))
671 /* If signedness of arg0 and arg1 don't match,
672 we can't necessarily find a type to compare them in. */
673 && (TYPE_UNSIGNED (TREE_TYPE (arg0))
674 == TYPE_UNSIGNED (TREE_TYPE (arg1)))
675 /* Do not change the sign of the division. */
676 && (TYPE_UNSIGNED (TREE_TYPE (expr))
677 == TYPE_UNSIGNED (TREE_TYPE (arg0)))
678 /* Either require unsigned division or a division by
679 a constant that is not -1. */
680 && (TYPE_UNSIGNED (TREE_TYPE (arg0))
681 || (TREE_CODE (arg1) == INTEGER_CST
682 && !integer_all_onesp (arg1))))
683 goto trunc1;
684 break;
687 case MAX_EXPR:
688 case MIN_EXPR:
689 case MULT_EXPR:
691 tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
692 tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
694 /* Don't distribute unless the output precision is at least as big
695 as the actual inputs. Otherwise, the comparison of the
696 truncated values will be wrong. */
697 if (outprec >= TYPE_PRECISION (TREE_TYPE (arg0))
698 && outprec >= TYPE_PRECISION (TREE_TYPE (arg1))
699 /* If signedness of arg0 and arg1 don't match,
700 we can't necessarily find a type to compare them in. */
701 && (TYPE_UNSIGNED (TREE_TYPE (arg0))
702 == TYPE_UNSIGNED (TREE_TYPE (arg1))))
703 goto trunc1;
704 break;
707 case PLUS_EXPR:
708 case MINUS_EXPR:
709 case BIT_AND_EXPR:
710 case BIT_IOR_EXPR:
711 case BIT_XOR_EXPR:
712 trunc1:
714 tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
715 tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
717 /* Do not try to narrow operands of pointer subtraction;
718 that will interfere with other folding. */
719 if (ex_form == MINUS_EXPR
720 && CONVERT_EXPR_P (arg0)
721 && CONVERT_EXPR_P (arg1)
722 && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 0)))
723 && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (arg1, 0))))
724 break;
726 if (outprec >= BITS_PER_WORD
727 || TRULY_NOOP_TRUNCATION (outprec, inprec)
728 || inprec > TYPE_PRECISION (TREE_TYPE (arg0))
729 || inprec > TYPE_PRECISION (TREE_TYPE (arg1)))
731 /* Do the arithmetic in type TYPEX,
732 then convert result to TYPE. */
733 tree typex = type;
735 /* Can't do arithmetic in enumeral types
736 so use an integer type that will hold the values. */
737 if (TREE_CODE (typex) == ENUMERAL_TYPE)
738 typex
739 = lang_hooks.types.type_for_size (TYPE_PRECISION (typex),
740 TYPE_UNSIGNED (typex));
742 /* But now perhaps TYPEX is as wide as INPREC.
743 In that case, do nothing special here.
744 (Otherwise would recurse infinitely in convert. */
745 if (TYPE_PRECISION (typex) != inprec)
747 /* Don't do unsigned arithmetic where signed was wanted,
748 or vice versa.
749 Exception: if both of the original operands were
750 unsigned then we can safely do the work as unsigned.
751 Exception: shift operations take their type solely
752 from the first argument.
753 Exception: the LSHIFT_EXPR case above requires that
754 we perform this operation unsigned lest we produce
755 signed-overflow undefinedness.
756 And we may need to do it as unsigned
757 if we truncate to the original size. */
758 if (TYPE_UNSIGNED (TREE_TYPE (expr))
759 || (TYPE_UNSIGNED (TREE_TYPE (arg0))
760 && (TYPE_UNSIGNED (TREE_TYPE (arg1))
761 || ex_form == LSHIFT_EXPR
762 || ex_form == RSHIFT_EXPR
763 || ex_form == LROTATE_EXPR
764 || ex_form == RROTATE_EXPR))
765 || ex_form == LSHIFT_EXPR
766 /* If we have !flag_wrapv, and either ARG0 or
767 ARG1 is of a signed type, we have to do
768 PLUS_EXPR, MINUS_EXPR or MULT_EXPR in an unsigned
769 type in case the operation in outprec precision
770 could overflow. Otherwise, we would introduce
771 signed-overflow undefinedness. */
772 || ((!TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0))
773 || !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg1)))
774 && ((TYPE_PRECISION (TREE_TYPE (arg0)) * 2u
775 > outprec)
776 || (TYPE_PRECISION (TREE_TYPE (arg1)) * 2u
777 > outprec))
778 && (ex_form == PLUS_EXPR
779 || ex_form == MINUS_EXPR
780 || ex_form == MULT_EXPR)))
782 if (!TYPE_UNSIGNED (typex))
783 typex = unsigned_type_for (typex);
785 else
787 if (TYPE_UNSIGNED (typex))
788 typex = signed_type_for (typex);
790 return convert (type,
791 fold_build2 (ex_form, typex,
792 convert (typex, arg0),
793 convert (typex, arg1)));
797 break;
799 case NEGATE_EXPR:
800 case BIT_NOT_EXPR:
801 /* This is not correct for ABS_EXPR,
802 since we must test the sign before truncation. */
804 /* Do the arithmetic in type TYPEX,
805 then convert result to TYPE. */
806 tree typex = type;
808 /* Can't do arithmetic in enumeral types
809 so use an integer type that will hold the values. */
810 if (TREE_CODE (typex) == ENUMERAL_TYPE)
811 typex
812 = lang_hooks.types.type_for_size (TYPE_PRECISION (typex),
813 TYPE_UNSIGNED (typex));
815 if (!TYPE_UNSIGNED (typex))
816 typex = unsigned_type_for (typex);
817 return convert (type,
818 fold_build1 (ex_form, typex,
819 convert (typex,
820 TREE_OPERAND (expr, 0))));
823 CASE_CONVERT:
824 /* Don't introduce a
825 "can't convert between vector values of different size" error. */
826 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (expr, 0))) == VECTOR_TYPE
827 && (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_OPERAND (expr, 0))))
828 != GET_MODE_SIZE (TYPE_MODE (type))))
829 break;
830 /* If truncating after truncating, might as well do all at once.
831 If truncating after extending, we may get rid of wasted work. */
832 return convert (type, get_unwidened (TREE_OPERAND (expr, 0), type));
834 case COND_EXPR:
835 /* It is sometimes worthwhile to push the narrowing down through
836 the conditional and never loses. A COND_EXPR may have a throw
837 as one operand, which then has void type. Just leave void
838 operands as they are. */
839 return fold_build3 (COND_EXPR, type, TREE_OPERAND (expr, 0),
840 VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 1)))
841 ? TREE_OPERAND (expr, 1)
842 : convert (type, TREE_OPERAND (expr, 1)),
843 VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 2)))
844 ? TREE_OPERAND (expr, 2)
845 : convert (type, TREE_OPERAND (expr, 2)));
847 default:
848 break;
851 /* When parsing long initializers, we might end up with a lot of casts.
852 Shortcut this. */
853 if (TREE_CODE (expr) == INTEGER_CST)
854 return fold_convert (type, expr);
855 return build1 (CONVERT_EXPR, type, expr);
857 case REAL_TYPE:
858 if (flag_sanitize & SANITIZE_FLOAT_CAST
859 && do_ubsan_in_current_function ())
861 expr = save_expr (expr);
862 tree check = ubsan_instrument_float_cast (loc, type, expr, expr);
863 expr = build1 (FIX_TRUNC_EXPR, type, expr);
864 if (check == NULL)
865 return expr;
866 return fold_build2 (COMPOUND_EXPR, TREE_TYPE (expr), check, expr);
868 else
869 return build1 (FIX_TRUNC_EXPR, type, expr);
871 case FIXED_POINT_TYPE:
872 return build1 (FIXED_CONVERT_EXPR, type, expr);
874 case COMPLEX_TYPE:
875 return convert (type,
876 fold_build1 (REALPART_EXPR,
877 TREE_TYPE (TREE_TYPE (expr)), expr));
879 case VECTOR_TYPE:
880 if (!tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (TREE_TYPE (expr))))
882 error ("can%'t convert a vector of type %qT"
883 " to type %qT which has different size",
884 TREE_TYPE (expr), type);
885 return error_mark_node;
887 return build1 (VIEW_CONVERT_EXPR, type, expr);
889 default:
890 error ("aggregate value used where an integer was expected");
891 return convert (type, integer_zero_node);
895 /* Convert EXPR to the complex type TYPE in the usual ways. */
897 tree
898 convert_to_complex (tree type, tree expr)
900 tree subtype = TREE_TYPE (type);
902 switch (TREE_CODE (TREE_TYPE (expr)))
904 case REAL_TYPE:
905 case FIXED_POINT_TYPE:
906 case INTEGER_TYPE:
907 case ENUMERAL_TYPE:
908 case BOOLEAN_TYPE:
909 return build2 (COMPLEX_EXPR, type, convert (subtype, expr),
910 convert (subtype, integer_zero_node));
912 case COMPLEX_TYPE:
914 tree elt_type = TREE_TYPE (TREE_TYPE (expr));
916 if (TYPE_MAIN_VARIANT (elt_type) == TYPE_MAIN_VARIANT (subtype))
917 return expr;
918 else if (TREE_CODE (expr) == COMPOUND_EXPR)
920 tree t = convert_to_complex (type, TREE_OPERAND (expr, 1));
921 if (t == TREE_OPERAND (expr, 1))
922 return expr;
923 return build2_loc (EXPR_LOCATION (expr), COMPOUND_EXPR,
924 TREE_TYPE (t), TREE_OPERAND (expr, 0), t);
926 else if (TREE_CODE (expr) == COMPLEX_EXPR)
927 return fold_build2 (COMPLEX_EXPR, type,
928 convert (subtype, TREE_OPERAND (expr, 0)),
929 convert (subtype, TREE_OPERAND (expr, 1)));
930 else
932 expr = save_expr (expr);
933 return
934 fold_build2 (COMPLEX_EXPR, type,
935 convert (subtype,
936 fold_build1 (REALPART_EXPR,
937 TREE_TYPE (TREE_TYPE (expr)),
938 expr)),
939 convert (subtype,
940 fold_build1 (IMAGPART_EXPR,
941 TREE_TYPE (TREE_TYPE (expr)),
942 expr)));
946 case POINTER_TYPE:
947 case REFERENCE_TYPE:
948 error ("pointer value used where a complex was expected");
949 return convert_to_complex (type, integer_zero_node);
951 default:
952 error ("aggregate value used where a complex was expected");
953 return convert_to_complex (type, integer_zero_node);
957 /* Convert EXPR to the vector type TYPE in the usual ways. */
959 tree
960 convert_to_vector (tree type, tree expr)
962 switch (TREE_CODE (TREE_TYPE (expr)))
964 case INTEGER_TYPE:
965 case VECTOR_TYPE:
966 if (!tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (TREE_TYPE (expr))))
968 error ("can%'t convert a value of type %qT"
969 " to vector type %qT which has different size",
970 TREE_TYPE (expr), type);
971 return error_mark_node;
973 return build1 (VIEW_CONVERT_EXPR, type, expr);
975 default:
976 error ("can%'t convert value to a vector");
977 return error_mark_node;
981 /* Convert EXPR to some fixed-point type TYPE.
983 EXPR must be fixed-point, float, integer, or enumeral;
984 in other cases error is called. */
986 tree
987 convert_to_fixed (tree type, tree expr)
989 if (integer_zerop (expr))
991 tree fixed_zero_node = build_fixed (type, FCONST0 (TYPE_MODE (type)));
992 return fixed_zero_node;
994 else if (integer_onep (expr) && ALL_SCALAR_ACCUM_MODE_P (TYPE_MODE (type)))
996 tree fixed_one_node = build_fixed (type, FCONST1 (TYPE_MODE (type)));
997 return fixed_one_node;
1000 switch (TREE_CODE (TREE_TYPE (expr)))
1002 case FIXED_POINT_TYPE:
1003 case INTEGER_TYPE:
1004 case ENUMERAL_TYPE:
1005 case BOOLEAN_TYPE:
1006 case REAL_TYPE:
1007 return build1 (FIXED_CONVERT_EXPR, type, expr);
1009 case COMPLEX_TYPE:
1010 return convert (type,
1011 fold_build1 (REALPART_EXPR,
1012 TREE_TYPE (TREE_TYPE (expr)), expr));
1014 default:
1015 error ("aggregate value used where a fixed-point was expected");
1016 return error_mark_node;