1 /* Utility routines for data type conversion for GCC.
2 Copyright (C) 1987, 1988, 1991, 1992, 1993, 1994, 1995, 1997, 1998,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
23 /* These routines are somewhat language-independent utility function
24 intended to be called by the language-specific convert () functions. */
28 #include "coretypes.h"
34 #include "langhooks.h"
37 /* Convert EXPR to some pointer or reference type TYPE.
38 EXPR must be pointer, reference, integer, enumeral, or literal zero;
39 in other cases error is called. */
42 convert_to_pointer (tree type
, tree expr
)
44 if (TREE_TYPE (expr
) == type
)
47 /* Propagate overflow to the NULL pointer. */
48 if (integer_zerop (expr
))
49 return force_fit_type_double (type
, 0, 0, 0, TREE_OVERFLOW (expr
));
51 switch (TREE_CODE (TREE_TYPE (expr
)))
55 return fold_build1 (NOP_EXPR
, type
, expr
);
60 if (TYPE_PRECISION (TREE_TYPE (expr
)) != POINTER_SIZE
)
61 expr
= fold_build1 (NOP_EXPR
,
62 lang_hooks
.types
.type_for_size (POINTER_SIZE
, 0),
64 return fold_build1 (CONVERT_EXPR
, type
, expr
);
68 error ("cannot convert to a pointer type");
69 return convert_to_pointer (type
, integer_zero_node
);
73 /* Avoid any floating point extensions from EXP. */
75 strip_float_extensions (tree exp
)
79 /* For floating point constant look up the narrowest type that can hold
80 it properly and handle it like (type)(narrowest_type)constant.
81 This way we can optimize for instance a=a*2.0 where "a" is float
82 but 2.0 is double constant. */
83 if (TREE_CODE (exp
) == REAL_CST
)
88 orig
= TREE_REAL_CST (exp
);
89 if (TYPE_PRECISION (TREE_TYPE (exp
)) > TYPE_PRECISION (float_type_node
)
90 && exact_real_truncate (TYPE_MODE (float_type_node
), &orig
))
91 type
= float_type_node
;
92 else if (TYPE_PRECISION (TREE_TYPE (exp
))
93 > TYPE_PRECISION (double_type_node
)
94 && exact_real_truncate (TYPE_MODE (double_type_node
), &orig
))
95 type
= double_type_node
;
97 return build_real (type
, real_value_truncate (TYPE_MODE (type
), orig
));
100 if (TREE_CODE (exp
) != NOP_EXPR
101 && TREE_CODE (exp
) != CONVERT_EXPR
)
104 sub
= TREE_OPERAND (exp
, 0);
105 subt
= TREE_TYPE (sub
);
106 expt
= TREE_TYPE (exp
);
108 if (!FLOAT_TYPE_P (subt
))
111 if (TYPE_PRECISION (subt
) > TYPE_PRECISION (expt
))
114 return strip_float_extensions (sub
);
118 /* Convert EXPR to some floating-point type TYPE.
120 EXPR must be float, integer, or enumeral;
121 in other cases error is called. */
124 convert_to_real (tree type
, tree expr
)
126 enum built_in_function fcode
= builtin_mathfn_code (expr
);
127 tree itype
= TREE_TYPE (expr
);
129 /* Disable until we figure out how to decide whether the functions are
130 present in runtime. */
131 /* Convert (float)sqrt((double)x) where x is float into sqrtf(x) */
133 && (TYPE_MODE (type
) == TYPE_MODE (double_type_node
)
134 || TYPE_MODE (type
) == TYPE_MODE (float_type_node
)))
138 #define CASE_MATHFN(FN) case BUILT_IN_##FN: case BUILT_IN_##FN##L:
175 tree arg0
= strip_float_extensions (CALL_EXPR_ARG (expr
, 0));
178 /* We have (outertype)sqrt((innertype)x). Choose the wider mode from
179 the both as the safe type for operation. */
180 if (TYPE_PRECISION (TREE_TYPE (arg0
)) > TYPE_PRECISION (type
))
181 newtype
= TREE_TYPE (arg0
);
183 /* Be careful about integer to fp conversions.
184 These may overflow still. */
185 if (FLOAT_TYPE_P (TREE_TYPE (arg0
))
186 && TYPE_PRECISION (newtype
) < TYPE_PRECISION (itype
)
187 && (TYPE_MODE (newtype
) == TYPE_MODE (double_type_node
)
188 || TYPE_MODE (newtype
) == TYPE_MODE (float_type_node
)))
190 tree fn
= mathfn_built_in (newtype
, fcode
);
194 tree arg
= fold (convert_to_real (newtype
, arg0
));
195 expr
= build_call_expr (fn
, 1, arg
);
206 && (((fcode
== BUILT_IN_FLOORL
207 || fcode
== BUILT_IN_CEILL
208 || fcode
== BUILT_IN_ROUNDL
209 || fcode
== BUILT_IN_RINTL
210 || fcode
== BUILT_IN_TRUNCL
211 || fcode
== BUILT_IN_NEARBYINTL
)
212 && (TYPE_MODE (type
) == TYPE_MODE (double_type_node
)
213 || TYPE_MODE (type
) == TYPE_MODE (float_type_node
)))
214 || ((fcode
== BUILT_IN_FLOOR
215 || fcode
== BUILT_IN_CEIL
216 || fcode
== BUILT_IN_ROUND
217 || fcode
== BUILT_IN_RINT
218 || fcode
== BUILT_IN_TRUNC
219 || fcode
== BUILT_IN_NEARBYINT
)
220 && (TYPE_MODE (type
) == TYPE_MODE (float_type_node
)))))
222 tree fn
= mathfn_built_in (type
, fcode
);
226 tree arg
= strip_float_extensions (CALL_EXPR_ARG (expr
, 0));
228 /* Make sure (type)arg0 is an extension, otherwise we could end up
229 changing (float)floor(double d) into floorf((float)d), which is
230 incorrect because (float)d uses round-to-nearest and can round
231 up to the next integer. */
232 if (TYPE_PRECISION (type
) >= TYPE_PRECISION (TREE_TYPE (arg
)))
233 return build_call_expr (fn
, 1, fold (convert_to_real (type
, arg
)));
237 /* Propagate the cast into the operation. */
238 if (itype
!= type
&& FLOAT_TYPE_P (type
))
239 switch (TREE_CODE (expr
))
241 /* Convert (float)-x into -(float)x. This is safe for
242 round-to-nearest rounding mode. */
245 if (!flag_rounding_math
246 && TYPE_PRECISION (type
) < TYPE_PRECISION (TREE_TYPE (expr
)))
247 return build1 (TREE_CODE (expr
), type
,
248 fold (convert_to_real (type
,
249 TREE_OPERAND (expr
, 0))));
251 /* Convert (outertype)((innertype0)a+(innertype1)b)
252 into ((newtype)a+(newtype)b) where newtype
253 is the widest mode from all of these. */
259 tree arg0
= strip_float_extensions (TREE_OPERAND (expr
, 0));
260 tree arg1
= strip_float_extensions (TREE_OPERAND (expr
, 1));
262 if (FLOAT_TYPE_P (TREE_TYPE (arg0
))
263 && FLOAT_TYPE_P (TREE_TYPE (arg1
)))
267 if (TYPE_MODE (TREE_TYPE (arg0
)) == SDmode
268 || TYPE_MODE (TREE_TYPE (arg1
)) == SDmode
)
269 newtype
= dfloat32_type_node
;
270 if (TYPE_MODE (TREE_TYPE (arg0
)) == DDmode
271 || TYPE_MODE (TREE_TYPE (arg1
)) == DDmode
)
272 newtype
= dfloat64_type_node
;
273 if (TYPE_MODE (TREE_TYPE (arg0
)) == TDmode
274 || TYPE_MODE (TREE_TYPE (arg1
)) == TDmode
)
275 newtype
= dfloat128_type_node
;
276 if (newtype
== dfloat32_type_node
277 || newtype
== dfloat64_type_node
278 || newtype
== dfloat128_type_node
)
280 expr
= build2 (TREE_CODE (expr
), newtype
,
281 fold (convert_to_real (newtype
, arg0
)),
282 fold (convert_to_real (newtype
, arg1
)));
288 if (TYPE_PRECISION (TREE_TYPE (arg0
)) > TYPE_PRECISION (newtype
))
289 newtype
= TREE_TYPE (arg0
);
290 if (TYPE_PRECISION (TREE_TYPE (arg1
)) > TYPE_PRECISION (newtype
))
291 newtype
= TREE_TYPE (arg1
);
292 if (TYPE_PRECISION (newtype
) < TYPE_PRECISION (itype
))
294 expr
= build2 (TREE_CODE (expr
), newtype
,
295 fold (convert_to_real (newtype
, arg0
)),
296 fold (convert_to_real (newtype
, arg1
)));
307 switch (TREE_CODE (TREE_TYPE (expr
)))
310 /* Ignore the conversion if we don't need to store intermediate
311 results and neither type is a decimal float. */
312 return build1 ((flag_float_store
313 || DECIMAL_FLOAT_TYPE_P (type
)
314 || DECIMAL_FLOAT_TYPE_P (itype
))
315 ? CONVERT_EXPR
: NOP_EXPR
, type
, expr
);
320 return build1 (FLOAT_EXPR
, type
, expr
);
323 return convert (type
,
324 fold_build1 (REALPART_EXPR
,
325 TREE_TYPE (TREE_TYPE (expr
)), expr
));
329 error ("pointer value used where a floating point value was expected");
330 return convert_to_real (type
, integer_zero_node
);
333 error ("aggregate value used where a float was expected");
334 return convert_to_real (type
, integer_zero_node
);
338 /* Convert EXPR to some integer (or enum) type TYPE.
340 EXPR must be pointer, integer, discrete (enum, char, or bool), float, or
341 vector; in other cases error is called.
343 The result of this is always supposed to be a newly created tree node
344 not in use in any existing structure. */
347 convert_to_integer (tree type
, tree expr
)
349 enum tree_code ex_form
= TREE_CODE (expr
);
350 tree intype
= TREE_TYPE (expr
);
351 unsigned int inprec
= TYPE_PRECISION (intype
);
352 unsigned int outprec
= TYPE_PRECISION (type
);
354 /* An INTEGER_TYPE cannot be incomplete, but an ENUMERAL_TYPE can
355 be. Consider `enum E = { a, b = (enum E) 3 };'. */
356 if (!COMPLETE_TYPE_P (type
))
358 error ("conversion to incomplete type");
359 return error_mark_node
;
362 /* Convert e.g. (long)round(d) -> lround(d). */
363 /* If we're converting to char, we may encounter differing behavior
364 between converting from double->char vs double->long->char.
365 We're in "undefined" territory but we prefer to be conservative,
366 so only proceed in "unsafe" math mode. */
368 && (flag_unsafe_math_optimizations
369 || (long_integer_type_node
370 && outprec
>= TYPE_PRECISION (long_integer_type_node
))))
372 tree s_expr
= strip_float_extensions (expr
);
373 tree s_intype
= TREE_TYPE (s_expr
);
374 const enum built_in_function fcode
= builtin_mathfn_code (s_expr
);
379 CASE_FLT_FN (BUILT_IN_CEIL
):
380 /* Only convert in ISO C99 mode. */
381 if (!TARGET_C99_FUNCTIONS
)
383 if (outprec
< TYPE_PRECISION (long_integer_type_node
)
384 || (outprec
== TYPE_PRECISION (long_integer_type_node
)
385 && !TYPE_UNSIGNED (type
)))
386 fn
= mathfn_built_in (s_intype
, BUILT_IN_LCEIL
);
387 else if (outprec
== TYPE_PRECISION (long_long_integer_type_node
)
388 && !TYPE_UNSIGNED (type
))
389 fn
= mathfn_built_in (s_intype
, BUILT_IN_LLCEIL
);
392 CASE_FLT_FN (BUILT_IN_FLOOR
):
393 /* Only convert in ISO C99 mode. */
394 if (!TARGET_C99_FUNCTIONS
)
396 if (outprec
< TYPE_PRECISION (long_integer_type_node
)
397 || (outprec
== TYPE_PRECISION (long_integer_type_node
)
398 && !TYPE_UNSIGNED (type
)))
399 fn
= mathfn_built_in (s_intype
, BUILT_IN_LFLOOR
);
400 else if (outprec
== TYPE_PRECISION (long_long_integer_type_node
)
401 && !TYPE_UNSIGNED (type
))
402 fn
= mathfn_built_in (s_intype
, BUILT_IN_LLFLOOR
);
405 CASE_FLT_FN (BUILT_IN_ROUND
):
406 if (outprec
< TYPE_PRECISION (long_integer_type_node
)
407 || (outprec
== TYPE_PRECISION (long_integer_type_node
)
408 && !TYPE_UNSIGNED (type
)))
409 fn
= mathfn_built_in (s_intype
, BUILT_IN_LROUND
);
410 else if (outprec
== TYPE_PRECISION (long_long_integer_type_node
)
411 && !TYPE_UNSIGNED (type
))
412 fn
= mathfn_built_in (s_intype
, BUILT_IN_LLROUND
);
415 CASE_FLT_FN (BUILT_IN_NEARBYINT
):
416 /* Only convert nearbyint* if we can ignore math exceptions. */
417 if (flag_trapping_math
)
419 /* ... Fall through ... */
420 CASE_FLT_FN (BUILT_IN_RINT
):
421 if (outprec
< TYPE_PRECISION (long_integer_type_node
)
422 || (outprec
== TYPE_PRECISION (long_integer_type_node
)
423 && !TYPE_UNSIGNED (type
)))
424 fn
= mathfn_built_in (s_intype
, BUILT_IN_LRINT
);
425 else if (outprec
== TYPE_PRECISION (long_long_integer_type_node
)
426 && !TYPE_UNSIGNED (type
))
427 fn
= mathfn_built_in (s_intype
, BUILT_IN_LLRINT
);
430 CASE_FLT_FN (BUILT_IN_TRUNC
):
431 return convert_to_integer (type
, CALL_EXPR_ARG (s_expr
, 0));
439 tree newexpr
= build_call_expr (fn
, 1, CALL_EXPR_ARG (s_expr
, 0));
440 return convert_to_integer (type
, newexpr
);
444 switch (TREE_CODE (intype
))
448 if (integer_zerop (expr
))
449 return build_int_cst (type
, 0);
451 /* Convert to an unsigned integer of the correct width first,
452 and from there widen/truncate to the required type. */
453 expr
= fold_build1 (CONVERT_EXPR
,
454 lang_hooks
.types
.type_for_size (POINTER_SIZE
, 0),
456 return fold_convert (type
, expr
);
461 /* If this is a logical operation, which just returns 0 or 1, we can
462 change the type of the expression. */
464 if (TREE_CODE_CLASS (ex_form
) == tcc_comparison
)
466 expr
= copy_node (expr
);
467 TREE_TYPE (expr
) = type
;
471 /* If we are widening the type, put in an explicit conversion.
472 Similarly if we are not changing the width. After this, we know
473 we are truncating EXPR. */
475 else if (outprec
>= inprec
)
480 /* If the precision of the EXPR's type is K bits and the
481 destination mode has more bits, and the sign is changing,
482 it is not safe to use a NOP_EXPR. For example, suppose
483 that EXPR's type is a 3-bit unsigned integer type, the
484 TYPE is a 3-bit signed integer type, and the machine mode
485 for the types is 8-bit QImode. In that case, the
486 conversion necessitates an explicit sign-extension. In
487 the signed-to-unsigned case the high-order bits have to
489 if (TYPE_UNSIGNED (type
) != TYPE_UNSIGNED (TREE_TYPE (expr
))
490 && (TYPE_PRECISION (TREE_TYPE (expr
))
491 != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (expr
)))))
496 tem
= fold_unary (code
, type
, expr
);
500 tem
= build1 (code
, type
, expr
);
501 TREE_NO_WARNING (tem
) = 1;
505 /* If TYPE is an enumeral type or a type with a precision less
506 than the number of bits in its mode, do the conversion to the
507 type corresponding to its mode, then do a nop conversion
509 else if (TREE_CODE (type
) == ENUMERAL_TYPE
510 || outprec
!= GET_MODE_BITSIZE (TYPE_MODE (type
)))
511 return build1 (NOP_EXPR
, type
,
512 convert (lang_hooks
.types
.type_for_mode
513 (TYPE_MODE (type
), TYPE_UNSIGNED (type
)),
516 /* Here detect when we can distribute the truncation down past some
517 arithmetic. For example, if adding two longs and converting to an
518 int, we can equally well convert both to ints and then add.
519 For the operations handled here, such truncation distribution
521 It is desirable in these cases:
522 1) when truncating down to full-word from a larger size
523 2) when truncating takes no work.
524 3) when at least one operand of the arithmetic has been extended
525 (as by C's default conversions). In this case we need two conversions
526 if we do the arithmetic as already requested, so we might as well
527 truncate both and then combine. Perhaps that way we need only one.
529 Note that in general we cannot do the arithmetic in a type
530 shorter than the desired result of conversion, even if the operands
531 are both extended from a shorter type, because they might overflow
532 if combined in that type. The exceptions to this--the times when
533 two narrow values can be combined in their narrow type even to
534 make a wider result--are handled by "shorten" in build_binary_op. */
539 /* We can pass truncation down through right shifting
540 when the shift count is a nonpositive constant. */
541 if (TREE_CODE (TREE_OPERAND (expr
, 1)) == INTEGER_CST
542 && tree_int_cst_sgn (TREE_OPERAND (expr
, 1)) <= 0)
547 /* We can pass truncation down through left shifting
548 when the shift count is a nonnegative constant and
549 the target type is unsigned. */
550 if (TREE_CODE (TREE_OPERAND (expr
, 1)) == INTEGER_CST
551 && tree_int_cst_sgn (TREE_OPERAND (expr
, 1)) >= 0
552 && TYPE_UNSIGNED (type
)
553 && TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
)
555 /* If shift count is less than the width of the truncated type,
557 if (tree_int_cst_lt (TREE_OPERAND (expr
, 1), TYPE_SIZE (type
)))
558 /* In this case, shifting is like multiplication. */
562 /* If it is >= that width, result is zero.
563 Handling this with trunc1 would give the wrong result:
564 (int) ((long long) a << 32) is well defined (as 0)
565 but (int) a << 32 is undefined and would get a
568 tree t
= build_int_cst (type
, 0);
570 /* If the original expression had side-effects, we must
572 if (TREE_SIDE_EFFECTS (expr
))
573 return build2 (COMPOUND_EXPR
, type
, expr
, t
);
584 tree arg0
= get_unwidened (TREE_OPERAND (expr
, 0), type
);
585 tree arg1
= get_unwidened (TREE_OPERAND (expr
, 1), type
);
587 /* Don't distribute unless the output precision is at least as big
588 as the actual inputs. Otherwise, the comparison of the
589 truncated values will be wrong. */
590 if (outprec
>= TYPE_PRECISION (TREE_TYPE (arg0
))
591 && outprec
>= TYPE_PRECISION (TREE_TYPE (arg1
))
592 /* If signedness of arg0 and arg1 don't match,
593 we can't necessarily find a type to compare them in. */
594 && (TYPE_UNSIGNED (TREE_TYPE (arg0
))
595 == TYPE_UNSIGNED (TREE_TYPE (arg1
))))
607 tree arg0
= get_unwidened (TREE_OPERAND (expr
, 0), type
);
608 tree arg1
= get_unwidened (TREE_OPERAND (expr
, 1), type
);
610 if (outprec
>= BITS_PER_WORD
611 || TRULY_NOOP_TRUNCATION (outprec
, inprec
)
612 || inprec
> TYPE_PRECISION (TREE_TYPE (arg0
))
613 || inprec
> TYPE_PRECISION (TREE_TYPE (arg1
)))
615 /* Do the arithmetic in type TYPEX,
616 then convert result to TYPE. */
619 /* Can't do arithmetic in enumeral types
620 so use an integer type that will hold the values. */
621 if (TREE_CODE (typex
) == ENUMERAL_TYPE
)
622 typex
= lang_hooks
.types
.type_for_size
623 (TYPE_PRECISION (typex
), TYPE_UNSIGNED (typex
));
625 /* But now perhaps TYPEX is as wide as INPREC.
626 In that case, do nothing special here.
627 (Otherwise would recurse infinitely in convert. */
628 if (TYPE_PRECISION (typex
) != inprec
)
630 /* Don't do unsigned arithmetic where signed was wanted,
632 Exception: if both of the original operands were
633 unsigned then we can safely do the work as unsigned.
634 Exception: shift operations take their type solely
635 from the first argument.
636 Exception: the LSHIFT_EXPR case above requires that
637 we perform this operation unsigned lest we produce
638 signed-overflow undefinedness.
639 And we may need to do it as unsigned
640 if we truncate to the original size. */
641 if (TYPE_UNSIGNED (TREE_TYPE (expr
))
642 || (TYPE_UNSIGNED (TREE_TYPE (arg0
))
643 && (TYPE_UNSIGNED (TREE_TYPE (arg1
))
644 || ex_form
== LSHIFT_EXPR
645 || ex_form
== RSHIFT_EXPR
646 || ex_form
== LROTATE_EXPR
647 || ex_form
== RROTATE_EXPR
))
648 || ex_form
== LSHIFT_EXPR
649 /* If we have !flag_wrapv, and either ARG0 or
650 ARG1 is of a signed type, we have to do
651 PLUS_EXPR or MINUS_EXPR in an unsigned
652 type. Otherwise, we would introduce
653 signed-overflow undefinedness. */
654 || ((!TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0
))
655 || !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg1
)))
656 && (ex_form
== PLUS_EXPR
657 || ex_form
== MINUS_EXPR
)))
658 typex
= unsigned_type_for (typex
);
660 typex
= signed_type_for (typex
);
661 return convert (type
,
662 fold_build2 (ex_form
, typex
,
663 convert (typex
, arg0
),
664 convert (typex
, arg1
)));
672 /* This is not correct for ABS_EXPR,
673 since we must test the sign before truncation. */
677 /* Don't do unsigned arithmetic where signed was wanted,
679 if (TYPE_UNSIGNED (TREE_TYPE (expr
)))
680 typex
= unsigned_type_for (type
);
682 typex
= signed_type_for (type
);
683 return convert (type
,
684 fold_build1 (ex_form
, typex
,
686 TREE_OPERAND (expr
, 0))));
691 "can't convert between vector values of different size" error. */
692 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (expr
, 0))) == VECTOR_TYPE
693 && (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_OPERAND (expr
, 0))))
694 != GET_MODE_SIZE (TYPE_MODE (type
))))
696 /* If truncating after truncating, might as well do all at once.
697 If truncating after extending, we may get rid of wasted work. */
698 return convert (type
, get_unwidened (TREE_OPERAND (expr
, 0), type
));
701 /* It is sometimes worthwhile to push the narrowing down through
702 the conditional and never loses. */
703 return fold_build3 (COND_EXPR
, type
, TREE_OPERAND (expr
, 0),
704 convert (type
, TREE_OPERAND (expr
, 1)),
705 convert (type
, TREE_OPERAND (expr
, 2)));
711 return build1 (CONVERT_EXPR
, type
, expr
);
714 return build1 (FIX_TRUNC_EXPR
, type
, expr
);
717 return convert (type
,
718 fold_build1 (REALPART_EXPR
,
719 TREE_TYPE (TREE_TYPE (expr
)), expr
));
722 if (!tree_int_cst_equal (TYPE_SIZE (type
), TYPE_SIZE (TREE_TYPE (expr
))))
724 error ("can't convert between vector values of different size");
725 return error_mark_node
;
727 return build1 (VIEW_CONVERT_EXPR
, type
, expr
);
730 error ("aggregate value used where an integer was expected");
731 return convert (type
, integer_zero_node
);
735 /* Convert EXPR to the complex type TYPE in the usual ways. */
738 convert_to_complex (tree type
, tree expr
)
740 tree subtype
= TREE_TYPE (type
);
742 switch (TREE_CODE (TREE_TYPE (expr
)))
748 return build2 (COMPLEX_EXPR
, type
, convert (subtype
, expr
),
749 convert (subtype
, integer_zero_node
));
753 tree elt_type
= TREE_TYPE (TREE_TYPE (expr
));
755 if (TYPE_MAIN_VARIANT (elt_type
) == TYPE_MAIN_VARIANT (subtype
))
757 else if (TREE_CODE (expr
) == COMPLEX_EXPR
)
758 return fold_build2 (COMPLEX_EXPR
, type
,
759 convert (subtype
, TREE_OPERAND (expr
, 0)),
760 convert (subtype
, TREE_OPERAND (expr
, 1)));
763 expr
= save_expr (expr
);
765 fold_build2 (COMPLEX_EXPR
, type
,
767 fold_build1 (REALPART_EXPR
,
768 TREE_TYPE (TREE_TYPE (expr
)),
771 fold_build1 (IMAGPART_EXPR
,
772 TREE_TYPE (TREE_TYPE (expr
)),
779 error ("pointer value used where a complex was expected");
780 return convert_to_complex (type
, integer_zero_node
);
783 error ("aggregate value used where a complex was expected");
784 return convert_to_complex (type
, integer_zero_node
);
788 /* Convert EXPR to the vector type TYPE in the usual ways. */
791 convert_to_vector (tree type
, tree expr
)
793 switch (TREE_CODE (TREE_TYPE (expr
)))
797 if (!tree_int_cst_equal (TYPE_SIZE (type
), TYPE_SIZE (TREE_TYPE (expr
))))
799 error ("can't convert between vector values of different size");
800 return error_mark_node
;
802 return build1 (VIEW_CONVERT_EXPR
, type
, expr
);
805 error ("can't convert value to a vector");
806 return error_mark_node
;