1 /* Utility routines for data type conversion for GCC.
2 Copyright (C) 1987, 1988, 1991, 1992, 1993, 1994, 1995, 1997, 1998,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
24 /* These routines are somewhat language-independent utility function
25 intended to be called by the language-specific convert () functions. */
29 #include "coretypes.h"
35 #include "langhooks.h"
38 /* Convert EXPR to some pointer or reference type TYPE.
39 EXPR must be pointer, reference, integer, enumeral, or literal zero;
40 in other cases error is called. */
43 convert_to_pointer (tree type
, tree expr
)
45 if (TREE_TYPE (expr
) == type
)
48 /* Propagate overflow to the NULL pointer. */
49 if (integer_zerop (expr
))
50 return force_fit_type_double (type
, 0, 0, 0, TREE_OVERFLOW (expr
));
52 switch (TREE_CODE (TREE_TYPE (expr
)))
56 return fold_build1 (NOP_EXPR
, type
, expr
);
61 if (TYPE_PRECISION (TREE_TYPE (expr
)) != POINTER_SIZE
)
62 expr
= fold_build1 (NOP_EXPR
,
63 lang_hooks
.types
.type_for_size (POINTER_SIZE
, 0),
65 return fold_build1 (CONVERT_EXPR
, type
, expr
);
69 error ("cannot convert to a pointer type");
70 return convert_to_pointer (type
, integer_zero_node
);
74 /* Avoid any floating point extensions from EXP. */
76 strip_float_extensions (tree exp
)
80 /* For floating point constant look up the narrowest type that can hold
81 it properly and handle it like (type)(narrowest_type)constant.
82 This way we can optimize for instance a=a*2.0 where "a" is float
83 but 2.0 is double constant. */
84 if (TREE_CODE (exp
) == REAL_CST
)
89 orig
= TREE_REAL_CST (exp
);
90 if (TYPE_PRECISION (TREE_TYPE (exp
)) > TYPE_PRECISION (float_type_node
)
91 && exact_real_truncate (TYPE_MODE (float_type_node
), &orig
))
92 type
= float_type_node
;
93 else if (TYPE_PRECISION (TREE_TYPE (exp
))
94 > TYPE_PRECISION (double_type_node
)
95 && exact_real_truncate (TYPE_MODE (double_type_node
), &orig
))
96 type
= double_type_node
;
98 return build_real (type
, real_value_truncate (TYPE_MODE (type
), orig
));
101 if (TREE_CODE (exp
) != NOP_EXPR
102 && TREE_CODE (exp
) != CONVERT_EXPR
)
105 sub
= TREE_OPERAND (exp
, 0);
106 subt
= TREE_TYPE (sub
);
107 expt
= TREE_TYPE (exp
);
109 if (!FLOAT_TYPE_P (subt
))
112 if (TYPE_PRECISION (subt
) > TYPE_PRECISION (expt
))
115 return strip_float_extensions (sub
);
119 /* Convert EXPR to some floating-point type TYPE.
121 EXPR must be float, integer, or enumeral;
122 in other cases error is called. */
125 convert_to_real (tree type
, tree expr
)
127 enum built_in_function fcode
= builtin_mathfn_code (expr
);
128 tree itype
= TREE_TYPE (expr
);
130 /* Disable until we figure out how to decide whether the functions are
131 present in runtime. */
132 /* Convert (float)sqrt((double)x) where x is float into sqrtf(x) */
134 && (TYPE_MODE (type
) == TYPE_MODE (double_type_node
)
135 || TYPE_MODE (type
) == TYPE_MODE (float_type_node
)))
139 #define CASE_MATHFN(FN) case BUILT_IN_##FN: case BUILT_IN_##FN##L:
176 tree arg0
= strip_float_extensions (CALL_EXPR_ARG (expr
, 0));
179 /* We have (outertype)sqrt((innertype)x). Choose the wider mode from
180 the both as the safe type for operation. */
181 if (TYPE_PRECISION (TREE_TYPE (arg0
)) > TYPE_PRECISION (type
))
182 newtype
= TREE_TYPE (arg0
);
184 /* Be careful about integer to fp conversions.
185 These may overflow still. */
186 if (FLOAT_TYPE_P (TREE_TYPE (arg0
))
187 && TYPE_PRECISION (newtype
) < TYPE_PRECISION (itype
)
188 && (TYPE_MODE (newtype
) == TYPE_MODE (double_type_node
)
189 || TYPE_MODE (newtype
) == TYPE_MODE (float_type_node
)))
191 tree fn
= mathfn_built_in (newtype
, fcode
);
195 tree arg
= fold (convert_to_real (newtype
, arg0
));
196 expr
= build_call_expr (fn
, 1, arg
);
207 && (((fcode
== BUILT_IN_FLOORL
208 || fcode
== BUILT_IN_CEILL
209 || fcode
== BUILT_IN_ROUNDL
210 || fcode
== BUILT_IN_RINTL
211 || fcode
== BUILT_IN_TRUNCL
212 || fcode
== BUILT_IN_NEARBYINTL
)
213 && (TYPE_MODE (type
) == TYPE_MODE (double_type_node
)
214 || TYPE_MODE (type
) == TYPE_MODE (float_type_node
)))
215 || ((fcode
== BUILT_IN_FLOOR
216 || fcode
== BUILT_IN_CEIL
217 || fcode
== BUILT_IN_ROUND
218 || fcode
== BUILT_IN_RINT
219 || fcode
== BUILT_IN_TRUNC
220 || fcode
== BUILT_IN_NEARBYINT
)
221 && (TYPE_MODE (type
) == TYPE_MODE (float_type_node
)))))
223 tree fn
= mathfn_built_in (type
, fcode
);
227 tree arg
= strip_float_extensions (CALL_EXPR_ARG (expr
, 0));
229 /* Make sure (type)arg0 is an extension, otherwise we could end up
230 changing (float)floor(double d) into floorf((float)d), which is
231 incorrect because (float)d uses round-to-nearest and can round
232 up to the next integer. */
233 if (TYPE_PRECISION (type
) >= TYPE_PRECISION (TREE_TYPE (arg
)))
234 return build_call_expr (fn
, 1, fold (convert_to_real (type
, arg
)));
238 /* Propagate the cast into the operation. */
239 if (itype
!= type
&& FLOAT_TYPE_P (type
))
240 switch (TREE_CODE (expr
))
242 /* Convert (float)-x into -(float)x. This is safe for
243 round-to-nearest rounding mode. */
246 if (!flag_rounding_math
247 && TYPE_PRECISION (type
) < TYPE_PRECISION (TREE_TYPE (expr
)))
248 return build1 (TREE_CODE (expr
), type
,
249 fold (convert_to_real (type
,
250 TREE_OPERAND (expr
, 0))));
252 /* Convert (outertype)((innertype0)a+(innertype1)b)
253 into ((newtype)a+(newtype)b) where newtype
254 is the widest mode from all of these. */
260 tree arg0
= strip_float_extensions (TREE_OPERAND (expr
, 0));
261 tree arg1
= strip_float_extensions (TREE_OPERAND (expr
, 1));
263 if (FLOAT_TYPE_P (TREE_TYPE (arg0
))
264 && FLOAT_TYPE_P (TREE_TYPE (arg1
)))
268 if (TYPE_MODE (TREE_TYPE (arg0
)) == SDmode
269 || TYPE_MODE (TREE_TYPE (arg1
)) == SDmode
)
270 newtype
= dfloat32_type_node
;
271 if (TYPE_MODE (TREE_TYPE (arg0
)) == DDmode
272 || TYPE_MODE (TREE_TYPE (arg1
)) == DDmode
)
273 newtype
= dfloat64_type_node
;
274 if (TYPE_MODE (TREE_TYPE (arg0
)) == TDmode
275 || TYPE_MODE (TREE_TYPE (arg1
)) == TDmode
)
276 newtype
= dfloat128_type_node
;
277 if (newtype
== dfloat32_type_node
278 || newtype
== dfloat64_type_node
279 || newtype
== dfloat128_type_node
)
281 expr
= build2 (TREE_CODE (expr
), newtype
,
282 fold (convert_to_real (newtype
, arg0
)),
283 fold (convert_to_real (newtype
, arg1
)));
289 if (TYPE_PRECISION (TREE_TYPE (arg0
)) > TYPE_PRECISION (newtype
))
290 newtype
= TREE_TYPE (arg0
);
291 if (TYPE_PRECISION (TREE_TYPE (arg1
)) > TYPE_PRECISION (newtype
))
292 newtype
= TREE_TYPE (arg1
);
293 if (TYPE_PRECISION (newtype
) < TYPE_PRECISION (itype
))
295 expr
= build2 (TREE_CODE (expr
), newtype
,
296 fold (convert_to_real (newtype
, arg0
)),
297 fold (convert_to_real (newtype
, arg1
)));
308 switch (TREE_CODE (TREE_TYPE (expr
)))
311 /* Ignore the conversion if we don't need to store intermediate
312 results and neither type is a decimal float. */
313 return build1 ((flag_float_store
314 || DECIMAL_FLOAT_TYPE_P (type
)
315 || DECIMAL_FLOAT_TYPE_P (itype
))
316 ? CONVERT_EXPR
: NOP_EXPR
, type
, expr
);
321 return build1 (FLOAT_EXPR
, type
, expr
);
324 return convert (type
,
325 fold_build1 (REALPART_EXPR
,
326 TREE_TYPE (TREE_TYPE (expr
)), expr
));
330 error ("pointer value used where a floating point value was expected");
331 return convert_to_real (type
, integer_zero_node
);
334 error ("aggregate value used where a float was expected");
335 return convert_to_real (type
, integer_zero_node
);
339 /* Convert EXPR to some integer (or enum) type TYPE.
341 EXPR must be pointer, integer, discrete (enum, char, or bool), float, or
342 vector; in other cases error is called.
344 The result of this is always supposed to be a newly created tree node
345 not in use in any existing structure. */
348 convert_to_integer (tree type
, tree expr
)
350 enum tree_code ex_form
= TREE_CODE (expr
);
351 tree intype
= TREE_TYPE (expr
);
352 unsigned int inprec
= TYPE_PRECISION (intype
);
353 unsigned int outprec
= TYPE_PRECISION (type
);
355 /* An INTEGER_TYPE cannot be incomplete, but an ENUMERAL_TYPE can
356 be. Consider `enum E = { a, b = (enum E) 3 };'. */
357 if (!COMPLETE_TYPE_P (type
))
359 error ("conversion to incomplete type");
360 return error_mark_node
;
363 /* Convert e.g. (long)round(d) -> lround(d). */
364 /* If we're converting to char, we may encounter differing behavior
365 between converting from double->char vs double->long->char.
366 We're in "undefined" territory but we prefer to be conservative,
367 so only proceed in "unsafe" math mode. */
369 && (flag_unsafe_math_optimizations
370 || (long_integer_type_node
371 && outprec
>= TYPE_PRECISION (long_integer_type_node
))))
373 tree s_expr
= strip_float_extensions (expr
);
374 tree s_intype
= TREE_TYPE (s_expr
);
375 const enum built_in_function fcode
= builtin_mathfn_code (s_expr
);
380 CASE_FLT_FN (BUILT_IN_CEIL
):
381 /* Only convert in ISO C99 mode. */
382 if (!TARGET_C99_FUNCTIONS
)
384 if (outprec
< TYPE_PRECISION (long_integer_type_node
)
385 || (outprec
== TYPE_PRECISION (long_integer_type_node
)
386 && !TYPE_UNSIGNED (type
)))
387 fn
= mathfn_built_in (s_intype
, BUILT_IN_LCEIL
);
388 else if (outprec
== TYPE_PRECISION (long_long_integer_type_node
)
389 && !TYPE_UNSIGNED (type
))
390 fn
= mathfn_built_in (s_intype
, BUILT_IN_LLCEIL
);
393 CASE_FLT_FN (BUILT_IN_FLOOR
):
394 /* Only convert in ISO C99 mode. */
395 if (!TARGET_C99_FUNCTIONS
)
397 if (outprec
< TYPE_PRECISION (long_integer_type_node
)
398 || (outprec
== TYPE_PRECISION (long_integer_type_node
)
399 && !TYPE_UNSIGNED (type
)))
400 fn
= mathfn_built_in (s_intype
, BUILT_IN_LFLOOR
);
401 else if (outprec
== TYPE_PRECISION (long_long_integer_type_node
)
402 && !TYPE_UNSIGNED (type
))
403 fn
= mathfn_built_in (s_intype
, BUILT_IN_LLFLOOR
);
406 CASE_FLT_FN (BUILT_IN_ROUND
):
407 if (outprec
< TYPE_PRECISION (long_integer_type_node
)
408 || (outprec
== TYPE_PRECISION (long_integer_type_node
)
409 && !TYPE_UNSIGNED (type
)))
410 fn
= mathfn_built_in (s_intype
, BUILT_IN_LROUND
);
411 else if (outprec
== TYPE_PRECISION (long_long_integer_type_node
)
412 && !TYPE_UNSIGNED (type
))
413 fn
= mathfn_built_in (s_intype
, BUILT_IN_LLROUND
);
416 CASE_FLT_FN (BUILT_IN_NEARBYINT
):
417 /* Only convert nearbyint* if we can ignore math exceptions. */
418 if (flag_trapping_math
)
420 /* ... Fall through ... */
421 CASE_FLT_FN (BUILT_IN_RINT
):
422 if (outprec
< TYPE_PRECISION (long_integer_type_node
)
423 || (outprec
== TYPE_PRECISION (long_integer_type_node
)
424 && !TYPE_UNSIGNED (type
)))
425 fn
= mathfn_built_in (s_intype
, BUILT_IN_LRINT
);
426 else if (outprec
== TYPE_PRECISION (long_long_integer_type_node
)
427 && !TYPE_UNSIGNED (type
))
428 fn
= mathfn_built_in (s_intype
, BUILT_IN_LLRINT
);
431 CASE_FLT_FN (BUILT_IN_TRUNC
):
432 return convert_to_integer (type
, CALL_EXPR_ARG (s_expr
, 0));
440 tree newexpr
= build_call_expr (fn
, 1, CALL_EXPR_ARG (s_expr
, 0));
441 return convert_to_integer (type
, newexpr
);
445 switch (TREE_CODE (intype
))
449 if (integer_zerop (expr
))
450 return build_int_cst (type
, 0);
452 /* Convert to an unsigned integer of the correct width first,
453 and from there widen/truncate to the required type. */
454 expr
= fold_build1 (CONVERT_EXPR
,
455 lang_hooks
.types
.type_for_size (POINTER_SIZE
, 0),
457 return fold_convert (type
, expr
);
462 /* If this is a logical operation, which just returns 0 or 1, we can
463 change the type of the expression. */
465 if (TREE_CODE_CLASS (ex_form
) == tcc_comparison
)
467 expr
= copy_node (expr
);
468 TREE_TYPE (expr
) = type
;
472 /* If we are widening the type, put in an explicit conversion.
473 Similarly if we are not changing the width. After this, we know
474 we are truncating EXPR. */
476 else if (outprec
>= inprec
)
481 /* If the precision of the EXPR's type is K bits and the
482 destination mode has more bits, and the sign is changing,
483 it is not safe to use a NOP_EXPR. For example, suppose
484 that EXPR's type is a 3-bit unsigned integer type, the
485 TYPE is a 3-bit signed integer type, and the machine mode
486 for the types is 8-bit QImode. In that case, the
487 conversion necessitates an explicit sign-extension. In
488 the signed-to-unsigned case the high-order bits have to
490 if (TYPE_UNSIGNED (type
) != TYPE_UNSIGNED (TREE_TYPE (expr
))
491 && (TYPE_PRECISION (TREE_TYPE (expr
))
492 != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (expr
)))))
497 tem
= fold_unary (code
, type
, expr
);
501 tem
= build1 (code
, type
, expr
);
502 TREE_NO_WARNING (tem
) = 1;
506 /* If TYPE is an enumeral type or a type with a precision less
507 than the number of bits in its mode, do the conversion to the
508 type corresponding to its mode, then do a nop conversion
510 else if (TREE_CODE (type
) == ENUMERAL_TYPE
511 || outprec
!= GET_MODE_BITSIZE (TYPE_MODE (type
)))
512 return build1 (NOP_EXPR
, type
,
513 convert (lang_hooks
.types
.type_for_mode
514 (TYPE_MODE (type
), TYPE_UNSIGNED (type
)),
517 /* Here detect when we can distribute the truncation down past some
518 arithmetic. For example, if adding two longs and converting to an
519 int, we can equally well convert both to ints and then add.
520 For the operations handled here, such truncation distribution
522 It is desirable in these cases:
523 1) when truncating down to full-word from a larger size
524 2) when truncating takes no work.
525 3) when at least one operand of the arithmetic has been extended
526 (as by C's default conversions). In this case we need two conversions
527 if we do the arithmetic as already requested, so we might as well
528 truncate both and then combine. Perhaps that way we need only one.
530 Note that in general we cannot do the arithmetic in a type
531 shorter than the desired result of conversion, even if the operands
532 are both extended from a shorter type, because they might overflow
533 if combined in that type. The exceptions to this--the times when
534 two narrow values can be combined in their narrow type even to
535 make a wider result--are handled by "shorten" in build_binary_op. */
540 /* We can pass truncation down through right shifting
541 when the shift count is a nonpositive constant. */
542 if (TREE_CODE (TREE_OPERAND (expr
, 1)) == INTEGER_CST
543 && tree_int_cst_sgn (TREE_OPERAND (expr
, 1)) <= 0)
548 /* We can pass truncation down through left shifting
549 when the shift count is a nonnegative constant and
550 the target type is unsigned. */
551 if (TREE_CODE (TREE_OPERAND (expr
, 1)) == INTEGER_CST
552 && tree_int_cst_sgn (TREE_OPERAND (expr
, 1)) >= 0
553 && TYPE_UNSIGNED (type
)
554 && TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
)
556 /* If shift count is less than the width of the truncated type,
558 if (tree_int_cst_lt (TREE_OPERAND (expr
, 1), TYPE_SIZE (type
)))
559 /* In this case, shifting is like multiplication. */
563 /* If it is >= that width, result is zero.
564 Handling this with trunc1 would give the wrong result:
565 (int) ((long long) a << 32) is well defined (as 0)
566 but (int) a << 32 is undefined and would get a
569 tree t
= build_int_cst (type
, 0);
571 /* If the original expression had side-effects, we must
573 if (TREE_SIDE_EFFECTS (expr
))
574 return build2 (COMPOUND_EXPR
, type
, expr
, t
);
585 tree arg0
= get_unwidened (TREE_OPERAND (expr
, 0), type
);
586 tree arg1
= get_unwidened (TREE_OPERAND (expr
, 1), type
);
588 /* Don't distribute unless the output precision is at least as big
589 as the actual inputs. Otherwise, the comparison of the
590 truncated values will be wrong. */
591 if (outprec
>= TYPE_PRECISION (TREE_TYPE (arg0
))
592 && outprec
>= TYPE_PRECISION (TREE_TYPE (arg1
))
593 /* If signedness of arg0 and arg1 don't match,
594 we can't necessarily find a type to compare them in. */
595 && (TYPE_UNSIGNED (TREE_TYPE (arg0
))
596 == TYPE_UNSIGNED (TREE_TYPE (arg1
))))
608 tree arg0
= get_unwidened (TREE_OPERAND (expr
, 0), type
);
609 tree arg1
= get_unwidened (TREE_OPERAND (expr
, 1), type
);
611 if (outprec
>= BITS_PER_WORD
612 || TRULY_NOOP_TRUNCATION (outprec
, inprec
)
613 || inprec
> TYPE_PRECISION (TREE_TYPE (arg0
))
614 || inprec
> TYPE_PRECISION (TREE_TYPE (arg1
)))
616 /* Do the arithmetic in type TYPEX,
617 then convert result to TYPE. */
620 /* Can't do arithmetic in enumeral types
621 so use an integer type that will hold the values. */
622 if (TREE_CODE (typex
) == ENUMERAL_TYPE
)
623 typex
= lang_hooks
.types
.type_for_size
624 (TYPE_PRECISION (typex
), TYPE_UNSIGNED (typex
));
626 /* But now perhaps TYPEX is as wide as INPREC.
627 In that case, do nothing special here.
628 (Otherwise would recurse infinitely in convert. */
629 if (TYPE_PRECISION (typex
) != inprec
)
631 /* Don't do unsigned arithmetic where signed was wanted,
633 Exception: if both of the original operands were
634 unsigned then we can safely do the work as unsigned.
635 Exception: shift operations take their type solely
636 from the first argument.
637 Exception: the LSHIFT_EXPR case above requires that
638 we perform this operation unsigned lest we produce
639 signed-overflow undefinedness.
640 And we may need to do it as unsigned
641 if we truncate to the original size. */
642 if (TYPE_UNSIGNED (TREE_TYPE (expr
))
643 || (TYPE_UNSIGNED (TREE_TYPE (arg0
))
644 && (TYPE_UNSIGNED (TREE_TYPE (arg1
))
645 || ex_form
== LSHIFT_EXPR
646 || ex_form
== RSHIFT_EXPR
647 || ex_form
== LROTATE_EXPR
648 || ex_form
== RROTATE_EXPR
))
649 || ex_form
== LSHIFT_EXPR
650 /* If we have !flag_wrapv, and either ARG0 or
651 ARG1 is of a signed type, we have to do
652 PLUS_EXPR or MINUS_EXPR in an unsigned
653 type. Otherwise, we would introduce
654 signed-overflow undefinedness. */
655 || ((!TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0
))
656 || !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg1
)))
657 && (ex_form
== PLUS_EXPR
658 || ex_form
== MINUS_EXPR
)))
659 typex
= unsigned_type_for (typex
);
661 typex
= signed_type_for (typex
);
662 return convert (type
,
663 fold_build2 (ex_form
, typex
,
664 convert (typex
, arg0
),
665 convert (typex
, arg1
)));
673 /* This is not correct for ABS_EXPR,
674 since we must test the sign before truncation. */
678 /* Don't do unsigned arithmetic where signed was wanted,
680 if (TYPE_UNSIGNED (TREE_TYPE (expr
)))
681 typex
= unsigned_type_for (type
);
683 typex
= signed_type_for (type
);
684 return convert (type
,
685 fold_build1 (ex_form
, typex
,
687 TREE_OPERAND (expr
, 0))));
692 "can't convert between vector values of different size" error. */
693 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (expr
, 0))) == VECTOR_TYPE
694 && (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_OPERAND (expr
, 0))))
695 != GET_MODE_SIZE (TYPE_MODE (type
))))
697 /* If truncating after truncating, might as well do all at once.
698 If truncating after extending, we may get rid of wasted work. */
699 return convert (type
, get_unwidened (TREE_OPERAND (expr
, 0), type
));
702 /* It is sometimes worthwhile to push the narrowing down through
703 the conditional and never loses. */
704 return fold_build3 (COND_EXPR
, type
, TREE_OPERAND (expr
, 0),
705 convert (type
, TREE_OPERAND (expr
, 1)),
706 convert (type
, TREE_OPERAND (expr
, 2)));
712 return build1 (CONVERT_EXPR
, type
, expr
);
715 return build1 (FIX_TRUNC_EXPR
, type
, expr
);
718 return convert (type
,
719 fold_build1 (REALPART_EXPR
,
720 TREE_TYPE (TREE_TYPE (expr
)), expr
));
723 if (!tree_int_cst_equal (TYPE_SIZE (type
), TYPE_SIZE (TREE_TYPE (expr
))))
725 error ("can't convert between vector values of different size");
726 return error_mark_node
;
728 return build1 (VIEW_CONVERT_EXPR
, type
, expr
);
731 error ("aggregate value used where an integer was expected");
732 return convert (type
, integer_zero_node
);
736 /* Convert EXPR to the complex type TYPE in the usual ways. */
739 convert_to_complex (tree type
, tree expr
)
741 tree subtype
= TREE_TYPE (type
);
743 switch (TREE_CODE (TREE_TYPE (expr
)))
749 return build2 (COMPLEX_EXPR
, type
, convert (subtype
, expr
),
750 convert (subtype
, integer_zero_node
));
754 tree elt_type
= TREE_TYPE (TREE_TYPE (expr
));
756 if (TYPE_MAIN_VARIANT (elt_type
) == TYPE_MAIN_VARIANT (subtype
))
758 else if (TREE_CODE (expr
) == COMPLEX_EXPR
)
759 return fold_build2 (COMPLEX_EXPR
, type
,
760 convert (subtype
, TREE_OPERAND (expr
, 0)),
761 convert (subtype
, TREE_OPERAND (expr
, 1)));
764 expr
= save_expr (expr
);
766 fold_build2 (COMPLEX_EXPR
, type
,
768 fold_build1 (REALPART_EXPR
,
769 TREE_TYPE (TREE_TYPE (expr
)),
772 fold_build1 (IMAGPART_EXPR
,
773 TREE_TYPE (TREE_TYPE (expr
)),
780 error ("pointer value used where a complex was expected");
781 return convert_to_complex (type
, integer_zero_node
);
784 error ("aggregate value used where a complex was expected");
785 return convert_to_complex (type
, integer_zero_node
);
789 /* Convert EXPR to the vector type TYPE in the usual ways. */
792 convert_to_vector (tree type
, tree expr
)
794 switch (TREE_CODE (TREE_TYPE (expr
)))
798 if (!tree_int_cst_equal (TYPE_SIZE (type
), TYPE_SIZE (TREE_TYPE (expr
))))
800 error ("can't convert between vector values of different size");
801 return error_mark_node
;
803 return build1 (VIEW_CONVERT_EXPR
, type
, expr
);
806 error ("can't convert value to a vector");
807 return error_mark_node
;