1 /* Utility routines for data type conversion for GCC.
2 Copyright (C) 1987, 1988, 1991, 1992, 1993, 1994, 1995, 1997, 1998,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
23 /* These routines are somewhat language-independent utility function
24 intended to be called by the language-specific convert () functions. */
28 #include "coretypes.h"
34 #include "langhooks.h"
37 /* Convert EXPR to some pointer or reference type TYPE.
38 EXPR must be pointer, reference, integer, enumeral, or literal zero;
39 in other cases error is called. */
42 convert_to_pointer (tree type
, tree expr
)
44 if (TREE_TYPE (expr
) == type
)
47 if (integer_zerop (expr
))
49 tree t
= build_int_cst (type
, 0);
50 if (TREE_OVERFLOW (expr
) || TREE_CONSTANT_OVERFLOW (expr
))
51 t
= force_fit_type (t
, 0, TREE_OVERFLOW (expr
),
52 TREE_CONSTANT_OVERFLOW (expr
));
56 switch (TREE_CODE (TREE_TYPE (expr
)))
60 return fold_build1 (NOP_EXPR
, type
, expr
);
65 if (TYPE_PRECISION (TREE_TYPE (expr
)) != POINTER_SIZE
)
66 expr
= fold_build1 (NOP_EXPR
,
67 lang_hooks
.types
.type_for_size (POINTER_SIZE
, 0),
69 return fold_build1 (CONVERT_EXPR
, type
, expr
);
73 error ("cannot convert to a pointer type");
74 return convert_to_pointer (type
, integer_zero_node
);
78 /* Avoid any floating point extensions from EXP. */
80 strip_float_extensions (tree exp
)
84 /* For floating point constant look up the narrowest type that can hold
85 it properly and handle it like (type)(narrowest_type)constant.
86 This way we can optimize for instance a=a*2.0 where "a" is float
87 but 2.0 is double constant. */
88 if (TREE_CODE (exp
) == REAL_CST
)
93 orig
= TREE_REAL_CST (exp
);
94 if (TYPE_PRECISION (TREE_TYPE (exp
)) > TYPE_PRECISION (float_type_node
)
95 && exact_real_truncate (TYPE_MODE (float_type_node
), &orig
))
96 type
= float_type_node
;
97 else if (TYPE_PRECISION (TREE_TYPE (exp
))
98 > TYPE_PRECISION (double_type_node
)
99 && exact_real_truncate (TYPE_MODE (double_type_node
), &orig
))
100 type
= double_type_node
;
102 return build_real (type
, real_value_truncate (TYPE_MODE (type
), orig
));
105 if (TREE_CODE (exp
) != NOP_EXPR
106 && TREE_CODE (exp
) != CONVERT_EXPR
)
109 sub
= TREE_OPERAND (exp
, 0);
110 subt
= TREE_TYPE (sub
);
111 expt
= TREE_TYPE (exp
);
113 if (!FLOAT_TYPE_P (subt
))
116 if (TYPE_PRECISION (subt
) > TYPE_PRECISION (expt
))
119 return strip_float_extensions (sub
);
123 /* Convert EXPR to some floating-point type TYPE.
125 EXPR must be float, integer, or enumeral;
126 in other cases error is called. */
129 convert_to_real (tree type
, tree expr
)
131 enum built_in_function fcode
= builtin_mathfn_code (expr
);
132 tree itype
= TREE_TYPE (expr
);
134 /* Disable until we figure out how to decide whether the functions are
135 present in runtime. */
136 /* Convert (float)sqrt((double)x) where x is float into sqrtf(x) */
138 && (TYPE_MODE (type
) == TYPE_MODE (double_type_node
)
139 || TYPE_MODE (type
) == TYPE_MODE (float_type_node
)))
143 #define CASE_MATHFN(FN) case BUILT_IN_##FN: case BUILT_IN_##FN##L:
180 tree arg0
= strip_float_extensions (TREE_VALUE (TREE_OPERAND (expr
, 1)));
183 /* We have (outertype)sqrt((innertype)x). Choose the wider mode from
184 the both as the safe type for operation. */
185 if (TYPE_PRECISION (TREE_TYPE (arg0
)) > TYPE_PRECISION (type
))
186 newtype
= TREE_TYPE (arg0
);
188 /* Be careful about integer to fp conversions.
189 These may overflow still. */
190 if (FLOAT_TYPE_P (TREE_TYPE (arg0
))
191 && TYPE_PRECISION (newtype
) < TYPE_PRECISION (itype
)
192 && (TYPE_MODE (newtype
) == TYPE_MODE (double_type_node
)
193 || TYPE_MODE (newtype
) == TYPE_MODE (float_type_node
)))
196 tree fn
= mathfn_built_in (newtype
, fcode
);
200 arglist
= build_tree_list (NULL_TREE
, fold (convert_to_real (newtype
, arg0
)));
201 expr
= build_function_call_expr (fn
, arglist
);
212 && (((fcode
== BUILT_IN_FLOORL
213 || fcode
== BUILT_IN_CEILL
214 || fcode
== BUILT_IN_ROUNDL
215 || fcode
== BUILT_IN_RINTL
216 || fcode
== BUILT_IN_TRUNCL
217 || fcode
== BUILT_IN_NEARBYINTL
)
218 && (TYPE_MODE (type
) == TYPE_MODE (double_type_node
)
219 || TYPE_MODE (type
) == TYPE_MODE (float_type_node
)))
220 || ((fcode
== BUILT_IN_FLOOR
221 || fcode
== BUILT_IN_CEIL
222 || fcode
== BUILT_IN_ROUND
223 || fcode
== BUILT_IN_RINT
224 || fcode
== BUILT_IN_TRUNC
225 || fcode
== BUILT_IN_NEARBYINT
)
226 && (TYPE_MODE (type
) == TYPE_MODE (float_type_node
)))))
228 tree fn
= mathfn_built_in (type
, fcode
);
233 = strip_float_extensions (TREE_VALUE (TREE_OPERAND (expr
, 1)));
235 /* Make sure (type)arg0 is an extension, otherwise we could end up
236 changing (float)floor(double d) into floorf((float)d), which is
237 incorrect because (float)d uses round-to-nearest and can round
238 up to the next integer. */
239 if (TYPE_PRECISION (type
) >= TYPE_PRECISION (TREE_TYPE (arg
)))
241 build_function_call_expr (fn
,
242 build_tree_list (NULL_TREE
,
243 fold (convert_to_real (type
, arg
))));
247 /* Propagate the cast into the operation. */
248 if (itype
!= type
&& FLOAT_TYPE_P (type
))
249 switch (TREE_CODE (expr
))
251 /* Convert (float)-x into -(float)x. This is always safe. */
254 if (TYPE_PRECISION (type
) < TYPE_PRECISION (TREE_TYPE (expr
)))
255 return build1 (TREE_CODE (expr
), type
,
256 fold (convert_to_real (type
,
257 TREE_OPERAND (expr
, 0))));
259 /* Convert (outertype)((innertype0)a+(innertype1)b)
260 into ((newtype)a+(newtype)b) where newtype
261 is the widest mode from all of these. */
267 tree arg0
= strip_float_extensions (TREE_OPERAND (expr
, 0));
268 tree arg1
= strip_float_extensions (TREE_OPERAND (expr
, 1));
270 if (FLOAT_TYPE_P (TREE_TYPE (arg0
))
271 && FLOAT_TYPE_P (TREE_TYPE (arg1
)))
275 if (TYPE_MODE (TREE_TYPE (arg0
)) == SDmode
276 || TYPE_MODE (TREE_TYPE (arg1
)) == SDmode
)
277 newtype
= dfloat32_type_node
;
278 if (TYPE_MODE (TREE_TYPE (arg0
)) == DDmode
279 || TYPE_MODE (TREE_TYPE (arg1
)) == DDmode
)
280 newtype
= dfloat64_type_node
;
281 if (TYPE_MODE (TREE_TYPE (arg0
)) == TDmode
282 || TYPE_MODE (TREE_TYPE (arg1
)) == TDmode
)
283 newtype
= dfloat128_type_node
;
284 if (newtype
== dfloat32_type_node
285 || newtype
== dfloat64_type_node
286 || newtype
== dfloat128_type_node
)
288 expr
= build2 (TREE_CODE (expr
), newtype
,
289 fold (convert_to_real (newtype
, arg0
)),
290 fold (convert_to_real (newtype
, arg1
)));
296 if (TYPE_PRECISION (TREE_TYPE (arg0
)) > TYPE_PRECISION (newtype
))
297 newtype
= TREE_TYPE (arg0
);
298 if (TYPE_PRECISION (TREE_TYPE (arg1
)) > TYPE_PRECISION (newtype
))
299 newtype
= TREE_TYPE (arg1
);
300 if (TYPE_PRECISION (newtype
) < TYPE_PRECISION (itype
))
302 expr
= build2 (TREE_CODE (expr
), newtype
,
303 fold (convert_to_real (newtype
, arg0
)),
304 fold (convert_to_real (newtype
, arg1
)));
315 switch (TREE_CODE (TREE_TYPE (expr
)))
318 /* Ignore the conversion if we don't need to store intermediate
319 results and neither type is a decimal float. */
320 return build1 ((flag_float_store
321 || DECIMAL_FLOAT_TYPE_P (type
)
322 || DECIMAL_FLOAT_TYPE_P (itype
))
323 ? CONVERT_EXPR
: NOP_EXPR
, type
, expr
);
328 return build1 (FLOAT_EXPR
, type
, expr
);
331 return convert (type
,
332 fold_build1 (REALPART_EXPR
,
333 TREE_TYPE (TREE_TYPE (expr
)), expr
));
337 error ("pointer value used where a floating point value was expected");
338 return convert_to_real (type
, integer_zero_node
);
341 error ("aggregate value used where a float was expected");
342 return convert_to_real (type
, integer_zero_node
);
346 /* Convert EXPR to some integer (or enum) type TYPE.
348 EXPR must be pointer, integer, discrete (enum, char, or bool), float, or
349 vector; in other cases error is called.
351 The result of this is always supposed to be a newly created tree node
352 not in use in any existing structure. */
355 convert_to_integer (tree type
, tree expr
)
357 enum tree_code ex_form
= TREE_CODE (expr
);
358 tree intype
= TREE_TYPE (expr
);
359 unsigned int inprec
= TYPE_PRECISION (intype
);
360 unsigned int outprec
= TYPE_PRECISION (type
);
362 /* An INTEGER_TYPE cannot be incomplete, but an ENUMERAL_TYPE can
363 be. Consider `enum E = { a, b = (enum E) 3 };'. */
364 if (!COMPLETE_TYPE_P (type
))
366 error ("conversion to incomplete type");
367 return error_mark_node
;
370 /* Convert e.g. (long)round(d) -> lround(d). */
371 /* If we're converting to char, we may encounter differing behavior
372 between converting from double->char vs double->long->char.
373 We're in "undefined" territory but we prefer to be conservative,
374 so only proceed in "unsafe" math mode. */
376 && (flag_unsafe_math_optimizations
377 || (long_integer_type_node
378 && outprec
>= TYPE_PRECISION (long_integer_type_node
))))
380 tree s_expr
= strip_float_extensions (expr
);
381 tree s_intype
= TREE_TYPE (s_expr
);
382 const enum built_in_function fcode
= builtin_mathfn_code (s_expr
);
387 CASE_FLT_FN (BUILT_IN_CEIL
):
388 /* Only convert in ISO C99 mode. */
389 if (!TARGET_C99_FUNCTIONS
)
391 if (outprec
< TYPE_PRECISION (long_integer_type_node
)
392 || (outprec
== TYPE_PRECISION (long_integer_type_node
)
393 && !TYPE_UNSIGNED (type
)))
394 fn
= mathfn_built_in (s_intype
, BUILT_IN_LCEIL
);
395 else if (outprec
== TYPE_PRECISION (long_long_integer_type_node
)
396 && !TYPE_UNSIGNED (type
))
397 fn
= mathfn_built_in (s_intype
, BUILT_IN_LLCEIL
);
400 CASE_FLT_FN (BUILT_IN_FLOOR
):
401 /* Only convert in ISO C99 mode. */
402 if (!TARGET_C99_FUNCTIONS
)
404 if (outprec
< TYPE_PRECISION (long_integer_type_node
)
405 || (outprec
== TYPE_PRECISION (long_integer_type_node
)
406 && !TYPE_UNSIGNED (type
)))
407 fn
= mathfn_built_in (s_intype
, BUILT_IN_LFLOOR
);
408 else if (outprec
== TYPE_PRECISION (long_long_integer_type_node
)
409 && !TYPE_UNSIGNED (type
))
410 fn
= mathfn_built_in (s_intype
, BUILT_IN_LLFLOOR
);
413 CASE_FLT_FN (BUILT_IN_ROUND
):
414 if (outprec
< TYPE_PRECISION (long_integer_type_node
)
415 || (outprec
== TYPE_PRECISION (long_integer_type_node
)
416 && !TYPE_UNSIGNED (type
)))
417 fn
= mathfn_built_in (s_intype
, BUILT_IN_LROUND
);
418 else if (outprec
== TYPE_PRECISION (long_long_integer_type_node
)
419 && !TYPE_UNSIGNED (type
))
420 fn
= mathfn_built_in (s_intype
, BUILT_IN_LLROUND
);
423 CASE_FLT_FN (BUILT_IN_RINT
):
424 /* Only convert rint* if we can ignore math exceptions. */
425 if (flag_trapping_math
)
427 /* ... Fall through ... */
428 CASE_FLT_FN (BUILT_IN_NEARBYINT
):
429 if (outprec
< TYPE_PRECISION (long_integer_type_node
)
430 || (outprec
== TYPE_PRECISION (long_integer_type_node
)
431 && !TYPE_UNSIGNED (type
)))
432 fn
= mathfn_built_in (s_intype
, BUILT_IN_LRINT
);
433 else if (outprec
== TYPE_PRECISION (long_long_integer_type_node
)
434 && !TYPE_UNSIGNED (type
))
435 fn
= mathfn_built_in (s_intype
, BUILT_IN_LLRINT
);
438 CASE_FLT_FN (BUILT_IN_TRUNC
):
440 tree arglist
= TREE_OPERAND (s_expr
, 1);
441 return convert_to_integer (type
, TREE_VALUE (arglist
));
450 tree arglist
= TREE_OPERAND (s_expr
, 1);
451 tree newexpr
= build_function_call_expr (fn
, arglist
);
452 return convert_to_integer (type
, newexpr
);
456 switch (TREE_CODE (intype
))
460 if (integer_zerop (expr
))
461 return build_int_cst (type
, 0);
463 /* Convert to an unsigned integer of the correct width first,
464 and from there widen/truncate to the required type. */
465 expr
= fold_build1 (CONVERT_EXPR
,
466 lang_hooks
.types
.type_for_size (POINTER_SIZE
, 0),
468 return fold_convert (type
, expr
);
473 /* If this is a logical operation, which just returns 0 or 1, we can
474 change the type of the expression. */
476 if (TREE_CODE_CLASS (ex_form
) == tcc_comparison
)
478 expr
= copy_node (expr
);
479 TREE_TYPE (expr
) = type
;
483 /* If we are widening the type, put in an explicit conversion.
484 Similarly if we are not changing the width. After this, we know
485 we are truncating EXPR. */
487 else if (outprec
>= inprec
)
491 /* If the precision of the EXPR's type is K bits and the
492 destination mode has more bits, and the sign is changing,
493 it is not safe to use a NOP_EXPR. For example, suppose
494 that EXPR's type is a 3-bit unsigned integer type, the
495 TYPE is a 3-bit signed integer type, and the machine mode
496 for the types is 8-bit QImode. In that case, the
497 conversion necessitates an explicit sign-extension. In
498 the signed-to-unsigned case the high-order bits have to
500 if (TYPE_UNSIGNED (type
) != TYPE_UNSIGNED (TREE_TYPE (expr
))
501 && (TYPE_PRECISION (TREE_TYPE (expr
))
502 != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (expr
)))))
507 return fold_build1 (code
, type
, expr
);
510 /* If TYPE is an enumeral type or a type with a precision less
511 than the number of bits in its mode, do the conversion to the
512 type corresponding to its mode, then do a nop conversion
514 else if (TREE_CODE (type
) == ENUMERAL_TYPE
515 || outprec
!= GET_MODE_BITSIZE (TYPE_MODE (type
)))
516 return build1 (NOP_EXPR
, type
,
517 convert (lang_hooks
.types
.type_for_mode
518 (TYPE_MODE (type
), TYPE_UNSIGNED (type
)),
521 /* Here detect when we can distribute the truncation down past some
522 arithmetic. For example, if adding two longs and converting to an
523 int, we can equally well convert both to ints and then add.
524 For the operations handled here, such truncation distribution
526 It is desirable in these cases:
527 1) when truncating down to full-word from a larger size
528 2) when truncating takes no work.
529 3) when at least one operand of the arithmetic has been extended
530 (as by C's default conversions). In this case we need two conversions
531 if we do the arithmetic as already requested, so we might as well
532 truncate both and then combine. Perhaps that way we need only one.
534 Note that in general we cannot do the arithmetic in a type
535 shorter than the desired result of conversion, even if the operands
536 are both extended from a shorter type, because they might overflow
537 if combined in that type. The exceptions to this--the times when
538 two narrow values can be combined in their narrow type even to
539 make a wider result--are handled by "shorten" in build_binary_op. */
544 /* We can pass truncation down through right shifting
545 when the shift count is a nonpositive constant. */
546 if (TREE_CODE (TREE_OPERAND (expr
, 1)) == INTEGER_CST
547 && tree_int_cst_sgn (TREE_OPERAND (expr
, 1)) <= 0)
552 /* We can pass truncation down through left shifting
553 when the shift count is a nonnegative constant and
554 the target type is unsigned. */
555 if (TREE_CODE (TREE_OPERAND (expr
, 1)) == INTEGER_CST
556 && tree_int_cst_sgn (TREE_OPERAND (expr
, 1)) >= 0
557 && TYPE_UNSIGNED (type
)
558 && TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
)
560 /* If shift count is less than the width of the truncated type,
562 if (tree_int_cst_lt (TREE_OPERAND (expr
, 1), TYPE_SIZE (type
)))
563 /* In this case, shifting is like multiplication. */
567 /* If it is >= that width, result is zero.
568 Handling this with trunc1 would give the wrong result:
569 (int) ((long long) a << 32) is well defined (as 0)
570 but (int) a << 32 is undefined and would get a
573 tree t
= build_int_cst (type
, 0);
575 /* If the original expression had side-effects, we must
577 if (TREE_SIDE_EFFECTS (expr
))
578 return build2 (COMPOUND_EXPR
, type
, expr
, t
);
589 tree arg0
= get_unwidened (TREE_OPERAND (expr
, 0), type
);
590 tree arg1
= get_unwidened (TREE_OPERAND (expr
, 1), type
);
592 /* Don't distribute unless the output precision is at least as big
593 as the actual inputs. Otherwise, the comparison of the
594 truncated values will be wrong. */
595 if (outprec
>= TYPE_PRECISION (TREE_TYPE (arg0
))
596 && outprec
>= TYPE_PRECISION (TREE_TYPE (arg1
))
597 /* If signedness of arg0 and arg1 don't match,
598 we can't necessarily find a type to compare them in. */
599 && (TYPE_UNSIGNED (TREE_TYPE (arg0
))
600 == TYPE_UNSIGNED (TREE_TYPE (arg1
))))
612 tree arg0
= get_unwidened (TREE_OPERAND (expr
, 0), type
);
613 tree arg1
= get_unwidened (TREE_OPERAND (expr
, 1), type
);
615 if (outprec
>= BITS_PER_WORD
616 || TRULY_NOOP_TRUNCATION (outprec
, inprec
)
617 || inprec
> TYPE_PRECISION (TREE_TYPE (arg0
))
618 || inprec
> TYPE_PRECISION (TREE_TYPE (arg1
)))
620 /* Do the arithmetic in type TYPEX,
621 then convert result to TYPE. */
624 /* Can't do arithmetic in enumeral types
625 so use an integer type that will hold the values. */
626 if (TREE_CODE (typex
) == ENUMERAL_TYPE
)
627 typex
= lang_hooks
.types
.type_for_size
628 (TYPE_PRECISION (typex
), TYPE_UNSIGNED (typex
));
630 /* But now perhaps TYPEX is as wide as INPREC.
631 In that case, do nothing special here.
632 (Otherwise would recurse infinitely in convert. */
633 if (TYPE_PRECISION (typex
) != inprec
)
635 /* Don't do unsigned arithmetic where signed was wanted,
637 Exception: if both of the original operands were
638 unsigned then we can safely do the work as unsigned.
639 Exception: shift operations take their type solely
640 from the first argument.
641 Exception: the LSHIFT_EXPR case above requires that
642 we perform this operation unsigned lest we produce
643 signed-overflow undefinedness.
644 And we may need to do it as unsigned
645 if we truncate to the original size. */
646 if (TYPE_UNSIGNED (TREE_TYPE (expr
))
647 || (TYPE_UNSIGNED (TREE_TYPE (arg0
))
648 && (TYPE_UNSIGNED (TREE_TYPE (arg1
))
649 || ex_form
== LSHIFT_EXPR
650 || ex_form
== RSHIFT_EXPR
651 || ex_form
== LROTATE_EXPR
652 || ex_form
== RROTATE_EXPR
))
653 || ex_form
== LSHIFT_EXPR
654 /* If we have !flag_wrapv, and either ARG0 or
655 ARG1 is of a signed type, we have to do
656 PLUS_EXPR or MINUS_EXPR in an unsigned
657 type. Otherwise, we would introduce
658 signed-overflow undefinedness. */
660 && (ex_form
== PLUS_EXPR
661 || ex_form
== MINUS_EXPR
)
662 && (!TYPE_UNSIGNED (TREE_TYPE (arg0
))
663 || !TYPE_UNSIGNED (TREE_TYPE (arg1
)))))
664 typex
= lang_hooks
.types
.unsigned_type (typex
);
666 typex
= lang_hooks
.types
.signed_type (typex
);
667 return convert (type
,
668 fold_build2 (ex_form
, typex
,
669 convert (typex
, arg0
),
670 convert (typex
, arg1
)));
678 /* This is not correct for ABS_EXPR,
679 since we must test the sign before truncation. */
683 /* Don't do unsigned arithmetic where signed was wanted,
685 if (TYPE_UNSIGNED (TREE_TYPE (expr
)))
686 typex
= lang_hooks
.types
.unsigned_type (type
);
688 typex
= lang_hooks
.types
.signed_type (type
);
689 return convert (type
,
690 fold_build1 (ex_form
, typex
,
692 TREE_OPERAND (expr
, 0))));
697 "can't convert between vector values of different size" error. */
698 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (expr
, 0))) == VECTOR_TYPE
699 && (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_OPERAND (expr
, 0))))
700 != GET_MODE_SIZE (TYPE_MODE (type
))))
702 /* If truncating after truncating, might as well do all at once.
703 If truncating after extending, we may get rid of wasted work. */
704 return convert (type
, get_unwidened (TREE_OPERAND (expr
, 0), type
));
707 /* It is sometimes worthwhile to push the narrowing down through
708 the conditional and never loses. */
709 return fold_build3 (COND_EXPR
, type
, TREE_OPERAND (expr
, 0),
710 convert (type
, TREE_OPERAND (expr
, 1)),
711 convert (type
, TREE_OPERAND (expr
, 2)));
717 return build1 (CONVERT_EXPR
, type
, expr
);
720 return build1 (FIX_TRUNC_EXPR
, type
, expr
);
723 return convert (type
,
724 fold_build1 (REALPART_EXPR
,
725 TREE_TYPE (TREE_TYPE (expr
)), expr
));
728 if (!tree_int_cst_equal (TYPE_SIZE (type
), TYPE_SIZE (TREE_TYPE (expr
))))
730 error ("can't convert between vector values of different size");
731 return error_mark_node
;
733 return build1 (VIEW_CONVERT_EXPR
, type
, expr
);
736 error ("aggregate value used where an integer was expected");
737 return convert (type
, integer_zero_node
);
741 /* Convert EXPR to the complex type TYPE in the usual ways. */
744 convert_to_complex (tree type
, tree expr
)
746 tree subtype
= TREE_TYPE (type
);
748 switch (TREE_CODE (TREE_TYPE (expr
)))
754 return build2 (COMPLEX_EXPR
, type
, convert (subtype
, expr
),
755 convert (subtype
, integer_zero_node
));
759 tree elt_type
= TREE_TYPE (TREE_TYPE (expr
));
761 if (TYPE_MAIN_VARIANT (elt_type
) == TYPE_MAIN_VARIANT (subtype
))
763 else if (TREE_CODE (expr
) == COMPLEX_EXPR
)
764 return fold_build2 (COMPLEX_EXPR
, type
,
765 convert (subtype
, TREE_OPERAND (expr
, 0)),
766 convert (subtype
, TREE_OPERAND (expr
, 1)));
769 expr
= save_expr (expr
);
771 fold_build2 (COMPLEX_EXPR
, type
,
773 fold_build1 (REALPART_EXPR
,
774 TREE_TYPE (TREE_TYPE (expr
)),
777 fold_build1 (IMAGPART_EXPR
,
778 TREE_TYPE (TREE_TYPE (expr
)),
785 error ("pointer value used where a complex was expected");
786 return convert_to_complex (type
, integer_zero_node
);
789 error ("aggregate value used where a complex was expected");
790 return convert_to_complex (type
, integer_zero_node
);
794 /* Convert EXPR to the vector type TYPE in the usual ways. */
797 convert_to_vector (tree type
, tree expr
)
799 switch (TREE_CODE (TREE_TYPE (expr
)))
803 if (!tree_int_cst_equal (TYPE_SIZE (type
), TYPE_SIZE (TREE_TYPE (expr
))))
805 error ("can't convert between vector values of different size");
806 return error_mark_node
;
808 return build1 (VIEW_CONVERT_EXPR
, type
, expr
);
811 error ("can't convert value to a vector");
812 return error_mark_node
;