2 Copyright (C) 2011-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
24 #include "internal-fn.h"
25 #include "stor-layout.h"
27 #include "insn-codes.h"
35 #include "hard-reg-set.h"
38 #include "dominance.h"
40 #include "basic-block.h"
41 #include "tree-ssa-alias.h"
42 #include "internal-fn.h"
43 #include "gimple-expr.h"
48 #include "stringpool.h"
49 #include "tree-ssanames.h"
50 #include "diagnostic-core.h"
52 /* The names of each internal function, indexed by function number. */
53 const char *const internal_fn_name_array
[] = {
54 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) #CODE,
55 #include "internal-fn.def"
56 #undef DEF_INTERNAL_FN
60 /* The ECF_* flags of each internal function, indexed by function number. */
61 const int internal_fn_flags_array
[] = {
62 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) FLAGS,
63 #include "internal-fn.def"
64 #undef DEF_INTERNAL_FN
68 /* Fnspec of each internal function, indexed by function number. */
69 const_tree internal_fn_fnspec_array
[IFN_LAST
+ 1];
74 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
75 if (FNSPEC) internal_fn_fnspec_array[IFN_##CODE] = \
76 build_string ((int) sizeof (FNSPEC), FNSPEC ? FNSPEC : "");
77 #include "internal-fn.def"
78 #undef DEF_INTERNAL_FN
79 internal_fn_fnspec_array
[IFN_LAST
] = 0;
82 /* ARRAY_TYPE is an array of vector modes. Return the associated insn
83 for load-lanes-style optab OPTAB. The insn must exist. */
86 get_multi_vector_move (tree array_type
, convert_optab optab
)
92 gcc_assert (TREE_CODE (array_type
) == ARRAY_TYPE
);
93 imode
= TYPE_MODE (array_type
);
94 vmode
= TYPE_MODE (TREE_TYPE (array_type
));
96 icode
= convert_optab_handler (optab
, imode
, vmode
);
97 gcc_assert (icode
!= CODE_FOR_nothing
);
101 /* Expand LOAD_LANES call STMT. */
104 expand_LOAD_LANES (gimple stmt
)
106 struct expand_operand ops
[2];
110 lhs
= gimple_call_lhs (stmt
);
111 rhs
= gimple_call_arg (stmt
, 0);
112 type
= TREE_TYPE (lhs
);
114 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
115 mem
= expand_normal (rhs
);
117 gcc_assert (MEM_P (mem
));
118 PUT_MODE (mem
, TYPE_MODE (type
));
120 create_output_operand (&ops
[0], target
, TYPE_MODE (type
));
121 create_fixed_operand (&ops
[1], mem
);
122 expand_insn (get_multi_vector_move (type
, vec_load_lanes_optab
), 2, ops
);
125 /* Expand STORE_LANES call STMT. */
128 expand_STORE_LANES (gimple stmt
)
130 struct expand_operand ops
[2];
134 lhs
= gimple_call_lhs (stmt
);
135 rhs
= gimple_call_arg (stmt
, 0);
136 type
= TREE_TYPE (rhs
);
138 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
139 reg
= expand_normal (rhs
);
141 gcc_assert (MEM_P (target
));
142 PUT_MODE (target
, TYPE_MODE (type
));
144 create_fixed_operand (&ops
[0], target
);
145 create_input_operand (&ops
[1], reg
, TYPE_MODE (type
));
146 expand_insn (get_multi_vector_move (type
, vec_store_lanes_optab
), 2, ops
);
150 expand_ANNOTATE (gimple stmt ATTRIBUTE_UNUSED
)
155 /* This should get expanded in adjust_simduid_builtins. */
158 expand_GOMP_SIMD_LANE (gimple stmt ATTRIBUTE_UNUSED
)
163 /* This should get expanded in adjust_simduid_builtins. */
166 expand_GOMP_SIMD_VF (gimple stmt ATTRIBUTE_UNUSED
)
171 /* This should get expanded in adjust_simduid_builtins. */
174 expand_GOMP_SIMD_LAST_LANE (gimple stmt ATTRIBUTE_UNUSED
)
179 /* This should get expanded in the sanopt pass. */
182 expand_UBSAN_NULL (gimple stmt ATTRIBUTE_UNUSED
)
187 /* This should get expanded in the sanopt pass. */
190 expand_UBSAN_BOUNDS (gimple stmt ATTRIBUTE_UNUSED
)
195 /* This should get expanded in the sanopt pass. */
198 expand_UBSAN_OBJECT_SIZE (gimple stmt ATTRIBUTE_UNUSED
)
203 /* This should get expanded in the sanopt pass. */
206 expand_ASAN_CHECK (gimple stmt ATTRIBUTE_UNUSED
)
211 /* Helper function for expand_addsub_overflow. Return 1
212 if ARG interpreted as signed in its precision is known to be always
213 positive or 2 if ARG is known to be always negative, or 3 if ARG may
214 be positive or negative. */
217 get_range_pos_neg (tree arg
)
219 if (arg
== error_mark_node
)
222 int prec
= TYPE_PRECISION (TREE_TYPE (arg
));
224 if (TREE_CODE (arg
) == INTEGER_CST
)
226 wide_int w
= wi::sext (arg
, prec
);
232 while (CONVERT_EXPR_P (arg
)
233 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg
, 0)))
234 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg
, 0))) <= prec
)
236 arg
= TREE_OPERAND (arg
, 0);
237 /* Narrower value zero extended into wider type
238 will always result in positive values. */
239 if (TYPE_UNSIGNED (TREE_TYPE (arg
))
240 && TYPE_PRECISION (TREE_TYPE (arg
)) < prec
)
242 prec
= TYPE_PRECISION (TREE_TYPE (arg
));
247 if (TREE_CODE (arg
) != SSA_NAME
)
249 wide_int arg_min
, arg_max
;
250 while (get_range_info (arg
, &arg_min
, &arg_max
) != VR_RANGE
)
252 gimple g
= SSA_NAME_DEF_STMT (arg
);
253 if (is_gimple_assign (g
)
254 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g
)))
256 tree t
= gimple_assign_rhs1 (g
);
257 if (INTEGRAL_TYPE_P (TREE_TYPE (t
))
258 && TYPE_PRECISION (TREE_TYPE (t
)) <= prec
)
260 if (TYPE_UNSIGNED (TREE_TYPE (t
))
261 && TYPE_PRECISION (TREE_TYPE (t
)) < prec
)
263 prec
= TYPE_PRECISION (TREE_TYPE (t
));
272 if (TYPE_UNSIGNED (TREE_TYPE (arg
)))
274 /* For unsigned values, the "positive" range comes
275 below the "negative" range. */
276 if (!wi::neg_p (wi::sext (arg_max
, prec
), SIGNED
))
278 if (wi::neg_p (wi::sext (arg_min
, prec
), SIGNED
))
283 if (!wi::neg_p (wi::sext (arg_min
, prec
), SIGNED
))
285 if (wi::neg_p (wi::sext (arg_max
, prec
), SIGNED
))
291 /* Return minimum precision needed to represent all values
292 of ARG in SIGNed integral type. */
295 get_min_precision (tree arg
, signop sign
)
297 int prec
= TYPE_PRECISION (TREE_TYPE (arg
));
299 signop orig_sign
= sign
;
300 if (TREE_CODE (arg
) == INTEGER_CST
)
303 if (TYPE_SIGN (TREE_TYPE (arg
)) != sign
)
305 widest_int w
= wi::to_widest (arg
);
306 w
= wi::ext (w
, prec
, sign
);
307 p
= wi::min_precision (w
, sign
);
310 p
= wi::min_precision (arg
, sign
);
311 return MIN (p
, prec
);
313 while (CONVERT_EXPR_P (arg
)
314 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg
, 0)))
315 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg
, 0))) <= prec
)
317 arg
= TREE_OPERAND (arg
, 0);
318 if (TYPE_PRECISION (TREE_TYPE (arg
)) < prec
)
320 if (TYPE_UNSIGNED (TREE_TYPE (arg
)))
322 else if (sign
== UNSIGNED
&& get_range_pos_neg (arg
) != 1)
323 return prec
+ (orig_sign
!= sign
);
324 prec
= TYPE_PRECISION (TREE_TYPE (arg
));
327 return prec
+ (orig_sign
!= sign
);
329 if (TREE_CODE (arg
) != SSA_NAME
)
330 return prec
+ (orig_sign
!= sign
);
331 wide_int arg_min
, arg_max
;
332 while (get_range_info (arg
, &arg_min
, &arg_max
) != VR_RANGE
)
334 gimple g
= SSA_NAME_DEF_STMT (arg
);
335 if (is_gimple_assign (g
)
336 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g
)))
338 tree t
= gimple_assign_rhs1 (g
);
339 if (INTEGRAL_TYPE_P (TREE_TYPE (t
))
340 && TYPE_PRECISION (TREE_TYPE (t
)) <= prec
)
343 if (TYPE_PRECISION (TREE_TYPE (arg
)) < prec
)
345 if (TYPE_UNSIGNED (TREE_TYPE (arg
)))
347 else if (sign
== UNSIGNED
&& get_range_pos_neg (arg
) != 1)
348 return prec
+ (orig_sign
!= sign
);
349 prec
= TYPE_PRECISION (TREE_TYPE (arg
));
352 return prec
+ (orig_sign
!= sign
);
356 return prec
+ (orig_sign
!= sign
);
358 if (sign
== TYPE_SIGN (TREE_TYPE (arg
)))
360 int p1
= wi::min_precision (arg_min
, sign
);
361 int p2
= wi::min_precision (arg_max
, sign
);
363 prec
= MIN (prec
, p1
);
365 else if (sign
== UNSIGNED
&& !wi::neg_p (arg_min
, SIGNED
))
367 int p
= wi::min_precision (arg_max
, SIGNED
);
368 prec
= MIN (prec
, p
);
370 return prec
+ (orig_sign
!= sign
);
373 /* Helper for expand_*_overflow. Store RES into the __real__ part
374 of TARGET. If RES has larger MODE than __real__ part of TARGET,
375 set the __imag__ part to 1 if RES doesn't fit into it. */
378 expand_arith_overflow_result_store (tree lhs
, rtx target
,
379 machine_mode mode
, rtx res
)
381 machine_mode tgtmode
= GET_MODE_INNER (GET_MODE (target
));
385 rtx_code_label
*done_label
= gen_label_rtx ();
386 int uns
= TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs
)));
387 lres
= convert_modes (tgtmode
, mode
, res
, uns
);
388 gcc_assert (GET_MODE_PRECISION (tgtmode
) < GET_MODE_PRECISION (mode
));
389 emit_cmp_and_jump_insns (res
, convert_modes (mode
, tgtmode
, lres
, uns
),
390 EQ
, NULL_RTX
, mode
, false, done_label
,
392 write_complex_part (target
, const1_rtx
, true);
393 emit_label (done_label
);
395 write_complex_part (target
, lres
, false);
398 /* Add sub/add overflow checking to the statement STMT.
399 CODE says whether the operation is +, or -. */
402 expand_addsub_overflow (location_t loc
, tree_code code
, tree lhs
,
403 tree arg0
, tree arg1
, bool unsr_p
, bool uns0_p
,
404 bool uns1_p
, bool is_ubsan
)
406 rtx res
, target
= NULL_RTX
;
408 rtx_code_label
*done_label
= gen_label_rtx ();
409 rtx_code_label
*do_error
= gen_label_rtx ();
410 do_pending_stack_adjust ();
411 rtx op0
= expand_normal (arg0
);
412 rtx op1
= expand_normal (arg1
);
413 machine_mode mode
= TYPE_MODE (TREE_TYPE (arg0
));
414 int prec
= GET_MODE_PRECISION (mode
);
415 rtx sgn
= immed_wide_int_const (wi::min_value (prec
, SIGNED
), mode
);
419 gcc_assert (!unsr_p
&& !uns0_p
&& !uns1_p
);
423 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
425 write_complex_part (target
, const0_rtx
, true);
428 /* We assume both operands and result have the same precision
429 here (GET_MODE_BITSIZE (mode)), S stands for signed type
430 with that precision, U for unsigned type with that precision,
431 sgn for unsigned most significant bit in that precision.
432 s1 is signed first operand, u1 is unsigned first operand,
433 s2 is signed second operand, u2 is unsigned second operand,
434 sr is signed result, ur is unsigned result and the following
435 rules say how to compute result (which is always result of
436 the operands as if both were unsigned, cast to the right
437 signedness) and how to compute whether operation overflowed.
440 res = (S) ((U) s1 + (U) s2)
441 ovf = s2 < 0 ? res > s1 : res < s1 (or jump on overflow)
443 res = (S) ((U) s1 - (U) s2)
444 ovf = s2 < 0 ? res < s1 : res > s2 (or jump on overflow)
447 ovf = res < u1 (or jump on carry, but RTL opts will handle it)
450 ovf = res > u1 (or jump on carry, but RTL opts will handle it)
452 res = (S) ((U) s1 + u2)
453 ovf = ((U) res ^ sgn) < u2
458 ovf = t1 < 0 ? t2 > s1 : t2 < s1 (or jump on overflow)
460 res = (S) ((U) s1 - u2)
461 ovf = u2 > ((U) s1 ^ sgn)
464 ovf = s1 < 0 || u2 > (U) s1
467 ovf = u1 >= ((U) s2 ^ sgn)
472 ovf = s2 < 0 ? (S) t2 < (S) t1 : (S) t2 > (S) t1 (or jump on overflow)
474 res = (U) s1 + (U) s2
475 ovf = s2 < 0 ? (s1 | (S) res) < 0) : (s1 & (S) res) < 0)
478 ovf = (U) res < u2 || res < 0
481 ovf = u1 >= u2 ? res < 0 : res >= 0
483 res = (U) s1 - (U) s2
484 ovf = s2 >= 0 ? ((s1 | (S) res) < 0) : ((s1 & (S) res) < 0) */
486 if (code
== PLUS_EXPR
&& uns0_p
&& !uns1_p
)
488 /* PLUS_EXPR is commutative, if operand signedness differs,
489 canonicalize to the first operand being signed and second
490 unsigned to simplify following code. */
502 if (uns0_p
&& uns1_p
&& unsr_p
)
504 /* Compute the operation. On RTL level, the addition is always
506 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
507 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
509 /* For PLUS_EXPR, the operation is commutative, so we can pick
510 operand to compare against. For prec <= BITS_PER_WORD, I think
511 preferring REG operand is better over CONST_INT, because
512 the CONST_INT might enlarge the instruction or CSE would need
513 to figure out we'd already loaded it into a register before.
514 For prec > BITS_PER_WORD, I think CONST_INT might be more beneficial,
515 as then the multi-word comparison can be perhaps simplified. */
516 if (code
== PLUS_EXPR
517 && (prec
<= BITS_PER_WORD
518 ? (CONST_SCALAR_INT_P (op0
) && REG_P (op1
))
519 : CONST_SCALAR_INT_P (op1
)))
521 emit_cmp_and_jump_insns (res
, tem
, code
== PLUS_EXPR
? GEU
: LEU
,
522 NULL_RTX
, mode
, false, done_label
,
528 if (!uns0_p
&& uns1_p
&& !unsr_p
)
530 /* Compute the operation. On RTL level, the addition is always
532 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
533 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
534 rtx tem
= expand_binop (mode
, add_optab
,
535 code
== PLUS_EXPR
? res
: op0
, sgn
,
536 NULL_RTX
, false, OPTAB_LIB_WIDEN
);
537 emit_cmp_and_jump_insns (tem
, op1
, GEU
, NULL_RTX
, mode
, false,
538 done_label
, PROB_VERY_LIKELY
);
543 if (code
== PLUS_EXPR
&& !uns0_p
&& uns1_p
&& unsr_p
)
545 op1
= expand_binop (mode
, add_optab
, op1
, sgn
, NULL_RTX
, false,
547 /* As we've changed op1, we have to avoid using the value range
548 for the original argument. */
549 arg1
= error_mark_node
;
555 if (code
== MINUS_EXPR
&& uns0_p
&& !uns1_p
&& unsr_p
)
557 op0
= expand_binop (mode
, add_optab
, op0
, sgn
, NULL_RTX
, false,
559 /* As we've changed op0, we have to avoid using the value range
560 for the original argument. */
561 arg0
= error_mark_node
;
567 if (code
== MINUS_EXPR
&& !uns0_p
&& uns1_p
&& unsr_p
)
569 /* Compute the operation. On RTL level, the addition is always
571 res
= expand_binop (mode
, sub_optab
, op0
, op1
, NULL_RTX
, false,
573 int pos_neg
= get_range_pos_neg (arg0
);
575 /* If ARG0 is known to be always negative, this is always overflow. */
576 emit_jump (do_error
);
577 else if (pos_neg
== 3)
578 /* If ARG0 is not known to be always positive, check at runtime. */
579 emit_cmp_and_jump_insns (op0
, const0_rtx
, LT
, NULL_RTX
, mode
, false,
580 do_error
, PROB_VERY_UNLIKELY
);
581 emit_cmp_and_jump_insns (op1
, op0
, LEU
, NULL_RTX
, mode
, false,
582 done_label
, PROB_VERY_LIKELY
);
587 if (code
== MINUS_EXPR
&& uns0_p
&& !uns1_p
&& !unsr_p
)
589 /* Compute the operation. On RTL level, the addition is always
591 res
= expand_binop (mode
, sub_optab
, op0
, op1
, NULL_RTX
, false,
593 rtx tem
= expand_binop (mode
, add_optab
, op1
, sgn
, NULL_RTX
, false,
595 emit_cmp_and_jump_insns (op0
, tem
, LTU
, NULL_RTX
, mode
, false,
596 done_label
, PROB_VERY_LIKELY
);
601 if (code
== PLUS_EXPR
&& uns0_p
&& uns1_p
&& !unsr_p
)
603 /* Compute the operation. On RTL level, the addition is always
605 res
= expand_binop (mode
, add_optab
, op0
, op1
, NULL_RTX
, false,
607 emit_cmp_and_jump_insns (res
, const0_rtx
, LT
, NULL_RTX
, mode
, false,
608 do_error
, PROB_VERY_UNLIKELY
);
610 /* The operation is commutative, so we can pick operand to compare
611 against. For prec <= BITS_PER_WORD, I think preferring REG operand
612 is better over CONST_INT, because the CONST_INT might enlarge the
613 instruction or CSE would need to figure out we'd already loaded it
614 into a register before. For prec > BITS_PER_WORD, I think CONST_INT
615 might be more beneficial, as then the multi-word comparison can be
616 perhaps simplified. */
617 if (prec
<= BITS_PER_WORD
618 ? (CONST_SCALAR_INT_P (op1
) && REG_P (op0
))
619 : CONST_SCALAR_INT_P (op0
))
621 emit_cmp_and_jump_insns (res
, tem
, GEU
, NULL_RTX
, mode
, false,
622 done_label
, PROB_VERY_LIKELY
);
627 if (!uns0_p
&& !uns1_p
&& unsr_p
)
629 /* Compute the operation. On RTL level, the addition is always
631 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
632 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
633 int pos_neg
= get_range_pos_neg (arg1
);
634 if (code
== PLUS_EXPR
)
636 int pos_neg0
= get_range_pos_neg (arg0
);
637 if (pos_neg0
!= 3 && pos_neg
== 3)
648 tem
= expand_binop (mode
, ((pos_neg
== 1) ^ (code
== MINUS_EXPR
))
649 ? and_optab
: ior_optab
,
650 op0
, res
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
651 emit_cmp_and_jump_insns (tem
, const0_rtx
, GE
, NULL_RTX
, mode
, false,
652 done_label
, PROB_VERY_LIKELY
);
656 rtx_code_label
*do_ior_label
= gen_label_rtx ();
657 emit_cmp_and_jump_insns (op1
, const0_rtx
,
658 code
== MINUS_EXPR
? GE
: LT
, NULL_RTX
,
659 mode
, false, do_ior_label
, PROB_EVEN
);
660 tem
= expand_binop (mode
, and_optab
, op0
, res
, NULL_RTX
, false,
662 emit_cmp_and_jump_insns (tem
, const0_rtx
, GE
, NULL_RTX
, mode
, false,
663 done_label
, PROB_VERY_LIKELY
);
664 emit_jump (do_error
);
665 emit_label (do_ior_label
);
666 tem
= expand_binop (mode
, ior_optab
, op0
, res
, NULL_RTX
, false,
668 emit_cmp_and_jump_insns (tem
, const0_rtx
, GE
, NULL_RTX
, mode
, false,
669 done_label
, PROB_VERY_LIKELY
);
675 if (code
== MINUS_EXPR
&& uns0_p
&& uns1_p
&& !unsr_p
)
677 /* Compute the operation. On RTL level, the addition is always
679 res
= expand_binop (mode
, sub_optab
, op0
, op1
, NULL_RTX
, false,
681 rtx_code_label
*op0_geu_op1
= gen_label_rtx ();
682 emit_cmp_and_jump_insns (op0
, op1
, GEU
, NULL_RTX
, mode
, false,
683 op0_geu_op1
, PROB_EVEN
);
684 emit_cmp_and_jump_insns (res
, const0_rtx
, LT
, NULL_RTX
, mode
, false,
685 done_label
, PROB_VERY_LIKELY
);
686 emit_jump (do_error
);
687 emit_label (op0_geu_op1
);
688 emit_cmp_and_jump_insns (res
, const0_rtx
, GE
, NULL_RTX
, mode
, false,
689 done_label
, PROB_VERY_LIKELY
);
693 gcc_assert (!uns0_p
&& !uns1_p
&& !unsr_p
);
697 enum insn_code icode
;
698 icode
= optab_handler (code
== PLUS_EXPR
? addv4_optab
: subv4_optab
, mode
);
699 if (icode
!= CODE_FOR_nothing
)
701 struct expand_operand ops
[4];
702 rtx_insn
*last
= get_last_insn ();
704 res
= gen_reg_rtx (mode
);
705 create_output_operand (&ops
[0], res
, mode
);
706 create_input_operand (&ops
[1], op0
, mode
);
707 create_input_operand (&ops
[2], op1
, mode
);
708 create_fixed_operand (&ops
[3], do_error
);
709 if (maybe_expand_insn (icode
, 4, ops
))
711 last
= get_last_insn ();
712 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
714 && any_condjump_p (last
)
715 && !find_reg_note (last
, REG_BR_PROB
, 0))
716 add_int_reg_note (last
, REG_BR_PROB
, PROB_VERY_UNLIKELY
);
717 emit_jump (done_label
);
721 delete_insns_since (last
);
722 icode
= CODE_FOR_nothing
;
726 if (icode
== CODE_FOR_nothing
)
728 rtx_code_label
*sub_check
= gen_label_rtx ();
731 /* Compute the operation. On RTL level, the addition is always
733 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
734 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
736 /* If we can prove one of the arguments (for MINUS_EXPR only
737 the second operand, as subtraction is not commutative) is always
738 non-negative or always negative, we can do just one comparison
739 and conditional jump instead of 2 at runtime, 3 present in the
740 emitted code. If one of the arguments is CONST_INT, all we
741 need is to make sure it is op1, then the first
742 emit_cmp_and_jump_insns will be just folded. Otherwise try
743 to use range info if available. */
744 if (code
== PLUS_EXPR
&& CONST_INT_P (op0
))
750 else if (CONST_INT_P (op1
))
752 else if (code
== PLUS_EXPR
&& TREE_CODE (arg0
) == SSA_NAME
)
754 pos_neg
= get_range_pos_neg (arg0
);
762 if (pos_neg
== 3 && !CONST_INT_P (op1
) && TREE_CODE (arg1
) == SSA_NAME
)
763 pos_neg
= get_range_pos_neg (arg1
);
765 /* If the op1 is negative, we have to use a different check. */
767 emit_cmp_and_jump_insns (op1
, const0_rtx
, LT
, NULL_RTX
, mode
,
768 false, sub_check
, PROB_EVEN
);
770 /* Compare the result of the operation with one of the operands. */
772 emit_cmp_and_jump_insns (res
, op0
, code
== PLUS_EXPR
? GE
: LE
,
773 NULL_RTX
, mode
, false, done_label
,
776 /* If we get here, we have to print the error. */
779 emit_jump (do_error
);
781 emit_label (sub_check
);
784 /* We have k = a + b for b < 0 here. k <= a must hold. */
786 emit_cmp_and_jump_insns (res
, op0
, code
== PLUS_EXPR
? LE
: GE
,
787 NULL_RTX
, mode
, false, done_label
,
792 emit_label (do_error
);
795 /* Expand the ubsan builtin call. */
797 fn
= ubsan_build_overflow_builtin (code
, loc
, TREE_TYPE (arg0
),
801 do_pending_stack_adjust ();
804 write_complex_part (target
, const1_rtx
, true);
807 emit_label (done_label
);
812 emit_move_insn (target
, res
);
816 res
= expand_binop (mode
, add_optab
, res
, sgn
, NULL_RTX
, false,
819 expand_arith_overflow_result_store (lhs
, target
, mode
, res
);
824 /* Add negate overflow checking to the statement STMT. */
827 expand_neg_overflow (location_t loc
, tree lhs
, tree arg1
, bool is_ubsan
)
831 rtx_code_label
*done_label
, *do_error
;
832 rtx target
= NULL_RTX
;
834 done_label
= gen_label_rtx ();
835 do_error
= gen_label_rtx ();
837 do_pending_stack_adjust ();
838 op1
= expand_normal (arg1
);
840 machine_mode mode
= TYPE_MODE (TREE_TYPE (arg1
));
843 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
845 write_complex_part (target
, const0_rtx
, true);
848 enum insn_code icode
= optab_handler (negv3_optab
, mode
);
849 if (icode
!= CODE_FOR_nothing
)
851 struct expand_operand ops
[3];
852 rtx_insn
*last
= get_last_insn ();
854 res
= gen_reg_rtx (mode
);
855 create_output_operand (&ops
[0], res
, mode
);
856 create_input_operand (&ops
[1], op1
, mode
);
857 create_fixed_operand (&ops
[2], do_error
);
858 if (maybe_expand_insn (icode
, 3, ops
))
860 last
= get_last_insn ();
861 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
863 && any_condjump_p (last
)
864 && !find_reg_note (last
, REG_BR_PROB
, 0))
865 add_int_reg_note (last
, REG_BR_PROB
, PROB_VERY_UNLIKELY
);
866 emit_jump (done_label
);
870 delete_insns_since (last
);
871 icode
= CODE_FOR_nothing
;
875 if (icode
== CODE_FOR_nothing
)
877 /* Compute the operation. On RTL level, the addition is always
879 res
= expand_unop (mode
, neg_optab
, op1
, NULL_RTX
, false);
881 /* Compare the operand with the most negative value. */
882 rtx minv
= expand_normal (TYPE_MIN_VALUE (TREE_TYPE (arg1
)));
883 emit_cmp_and_jump_insns (op1
, minv
, NE
, NULL_RTX
, mode
, false,
884 done_label
, PROB_VERY_LIKELY
);
887 emit_label (do_error
);
890 /* Expand the ubsan builtin call. */
892 fn
= ubsan_build_overflow_builtin (NEGATE_EXPR
, loc
, TREE_TYPE (arg1
),
896 do_pending_stack_adjust ();
899 write_complex_part (target
, const1_rtx
, true);
902 emit_label (done_label
);
907 emit_move_insn (target
, res
);
909 expand_arith_overflow_result_store (lhs
, target
, mode
, res
);
913 /* Add mul overflow checking to the statement STMT. */
916 expand_mul_overflow (location_t loc
, tree lhs
, tree arg0
, tree arg1
,
917 bool unsr_p
, bool uns0_p
, bool uns1_p
, bool is_ubsan
)
921 rtx_code_label
*done_label
, *do_error
;
922 rtx target
= NULL_RTX
;
924 enum insn_code icode
;
926 done_label
= gen_label_rtx ();
927 do_error
= gen_label_rtx ();
929 do_pending_stack_adjust ();
930 op0
= expand_normal (arg0
);
931 op1
= expand_normal (arg1
);
933 machine_mode mode
= TYPE_MODE (TREE_TYPE (arg0
));
937 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
939 write_complex_part (target
, const0_rtx
, true);
943 gcc_assert (!unsr_p
&& !uns0_p
&& !uns1_p
);
945 /* We assume both operands and result have the same precision
946 here (GET_MODE_BITSIZE (mode)), S stands for signed type
947 with that precision, U for unsigned type with that precision,
948 sgn for unsigned most significant bit in that precision.
949 s1 is signed first operand, u1 is unsigned first operand,
950 s2 is signed second operand, u2 is unsigned second operand,
951 sr is signed result, ur is unsigned result and the following
952 rules say how to compute result (which is always result of
953 the operands as if both were unsigned, cast to the right
954 signedness) and how to compute whether operation overflowed.
955 main_ovf (false) stands for jump on signed multiplication
956 overflow or the main algorithm with uns == false.
957 main_ovf (true) stands for jump on unsigned multiplication
958 overflow or the main algorithm with uns == true.
961 res = (S) ((U) s1 * (U) s2)
962 ovf = main_ovf (false)
965 ovf = main_ovf (true)
968 ovf = (s1 < 0 && u2) || main_ovf (true)
971 ovf = res < 0 || main_ovf (true)
973 res = (S) ((U) s1 * u2)
974 ovf = (S) u2 >= 0 ? main_ovf (false)
975 : (s1 != 0 && (s1 != -1 || u2 != (U) res))
977 t1 = (s1 & s2) < 0 ? (-(U) s1) : ((U) s1)
978 t2 = (s1 & s2) < 0 ? (-(U) s2) : ((U) s2)
980 ovf = (s1 ^ s2) < 0 ? (s1 && s2) : main_ovf (true) */
982 if (uns0_p
&& !uns1_p
)
984 /* Multiplication is commutative, if operand signedness differs,
985 canonicalize to the first operand being signed and second
986 unsigned to simplify following code. */
997 int pos_neg0
= get_range_pos_neg (arg0
);
998 int pos_neg1
= get_range_pos_neg (arg1
);
1001 if (!uns0_p
&& uns1_p
&& unsr_p
)
1006 /* If s1 is non-negative, just perform normal u1 * u2 -> ur. */
1009 /* If s1 is negative, avoid the main code, just multiply and
1010 signal overflow if op1 is not 0. */
1011 struct separate_ops ops
;
1012 ops
.code
= MULT_EXPR
;
1013 ops
.type
= TREE_TYPE (arg1
);
1014 ops
.op0
= make_tree (ops
.type
, op0
);
1015 ops
.op1
= make_tree (ops
.type
, op1
);
1016 ops
.op2
= NULL_TREE
;
1018 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1019 emit_cmp_and_jump_insns (op1
, const0_rtx
, EQ
, NULL_RTX
, mode
,
1020 false, done_label
, PROB_VERY_LIKELY
);
1021 goto do_error_label
;
1023 rtx_code_label
*do_main_label
;
1024 do_main_label
= gen_label_rtx ();
1025 emit_cmp_and_jump_insns (op0
, const0_rtx
, GE
, NULL_RTX
, mode
,
1026 false, do_main_label
, PROB_VERY_LIKELY
);
1027 emit_cmp_and_jump_insns (op1
, const0_rtx
, EQ
, NULL_RTX
, mode
,
1028 false, do_main_label
, PROB_VERY_LIKELY
);
1029 write_complex_part (target
, const1_rtx
, true);
1030 emit_label (do_main_label
);
1038 if (uns0_p
&& uns1_p
&& !unsr_p
)
1041 /* Rest of handling of this case after res is computed. */
1046 if (!uns0_p
&& uns1_p
&& !unsr_p
)
1053 /* If (S) u2 is negative (i.e. u2 is larger than maximum of S,
1054 avoid the main code, just multiply and signal overflow
1055 unless 0 * u2 or -1 * ((U) Smin). */
1056 struct separate_ops ops
;
1057 ops
.code
= MULT_EXPR
;
1058 ops
.type
= TREE_TYPE (arg1
);
1059 ops
.op0
= make_tree (ops
.type
, op0
);
1060 ops
.op1
= make_tree (ops
.type
, op1
);
1061 ops
.op2
= NULL_TREE
;
1063 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1064 emit_cmp_and_jump_insns (op0
, const0_rtx
, EQ
, NULL_RTX
, mode
,
1065 false, done_label
, PROB_VERY_LIKELY
);
1066 emit_cmp_and_jump_insns (op0
, constm1_rtx
, NE
, NULL_RTX
, mode
,
1067 false, do_error
, PROB_VERY_UNLIKELY
);
1069 prec
= GET_MODE_PRECISION (mode
);
1071 sgn
= immed_wide_int_const (wi::min_value (prec
, SIGNED
), mode
);
1072 emit_cmp_and_jump_insns (op1
, sgn
, EQ
, NULL_RTX
, mode
,
1073 false, done_label
, PROB_VERY_LIKELY
);
1074 goto do_error_label
;
1076 /* Rest of handling of this case after res is computed. */
1084 if (!uns0_p
&& !uns1_p
&& unsr_p
)
1087 switch (pos_neg0
| pos_neg1
)
1089 case 1: /* Both operands known to be non-negative. */
1091 case 2: /* Both operands known to be negative. */
1092 op0
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, false);
1093 op1
= expand_unop (mode
, neg_optab
, op1
, NULL_RTX
, false);
1094 /* Avoid looking at arg0/arg1 ranges, as we've changed
1096 arg0
= error_mark_node
;
1097 arg1
= error_mark_node
;
1100 if ((pos_neg0
^ pos_neg1
) == 3)
1102 /* If one operand is known to be negative and the other
1103 non-negative, this overflows always, unless the non-negative
1104 one is 0. Just do normal multiply and set overflow
1105 unless one of the operands is 0. */
1106 struct separate_ops ops
;
1107 ops
.code
= MULT_EXPR
;
1109 = build_nonstandard_integer_type (GET_MODE_PRECISION (mode
),
1111 ops
.op0
= make_tree (ops
.type
, op0
);
1112 ops
.op1
= make_tree (ops
.type
, op1
);
1113 ops
.op2
= NULL_TREE
;
1115 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1116 tem
= expand_binop (mode
, and_optab
, op0
, op1
, NULL_RTX
, false,
1118 emit_cmp_and_jump_insns (tem
, const0_rtx
, EQ
, NULL_RTX
, mode
,
1119 false, done_label
, PROB_VERY_LIKELY
);
1120 goto do_error_label
;
1122 /* The general case, do all the needed comparisons at runtime. */
1123 rtx_code_label
*do_main_label
, *after_negate_label
;
1125 rop0
= gen_reg_rtx (mode
);
1126 rop1
= gen_reg_rtx (mode
);
1127 emit_move_insn (rop0
, op0
);
1128 emit_move_insn (rop1
, op1
);
1131 do_main_label
= gen_label_rtx ();
1132 after_negate_label
= gen_label_rtx ();
1133 tem
= expand_binop (mode
, and_optab
, op0
, op1
, NULL_RTX
, false,
1135 emit_cmp_and_jump_insns (tem
, const0_rtx
, GE
, NULL_RTX
, mode
, false,
1136 after_negate_label
, PROB_VERY_LIKELY
);
1137 /* Both arguments negative here, negate them and continue with
1138 normal unsigned overflow checking multiplication. */
1139 emit_move_insn (op0
, expand_unop (mode
, neg_optab
, op0
,
1141 emit_move_insn (op1
, expand_unop (mode
, neg_optab
, op1
,
1143 /* Avoid looking at arg0/arg1 ranges, as we might have changed
1145 arg0
= error_mark_node
;
1146 arg1
= error_mark_node
;
1147 emit_jump (do_main_label
);
1148 emit_label (after_negate_label
);
1149 tem2
= expand_binop (mode
, xor_optab
, op0
, op1
, NULL_RTX
, false,
1151 emit_cmp_and_jump_insns (tem2
, const0_rtx
, GE
, NULL_RTX
, mode
, false,
1152 do_main_label
, PROB_VERY_LIKELY
);
1153 /* One argument is negative here, the other positive. This
1154 overflows always, unless one of the arguments is 0. But
1155 if e.g. s2 is 0, (U) s1 * 0 doesn't overflow, whatever s1
1156 is, thus we can keep do_main code oring in overflow as is. */
1157 emit_cmp_and_jump_insns (tem
, const0_rtx
, EQ
, NULL_RTX
, mode
, false,
1158 do_main_label
, PROB_VERY_LIKELY
);
1159 write_complex_part (target
, const1_rtx
, true);
1160 emit_label (do_main_label
);
1168 type
= build_nonstandard_integer_type (GET_MODE_PRECISION (mode
), uns
);
1169 sign
= uns
? UNSIGNED
: SIGNED
;
1170 icode
= optab_handler (uns
? umulv4_optab
: mulv4_optab
, mode
);
1171 if (icode
!= CODE_FOR_nothing
)
1173 struct expand_operand ops
[4];
1174 rtx_insn
*last
= get_last_insn ();
1176 res
= gen_reg_rtx (mode
);
1177 create_output_operand (&ops
[0], res
, mode
);
1178 create_input_operand (&ops
[1], op0
, mode
);
1179 create_input_operand (&ops
[2], op1
, mode
);
1180 create_fixed_operand (&ops
[3], do_error
);
1181 if (maybe_expand_insn (icode
, 4, ops
))
1183 last
= get_last_insn ();
1184 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
1186 && any_condjump_p (last
)
1187 && !find_reg_note (last
, REG_BR_PROB
, 0))
1188 add_int_reg_note (last
, REG_BR_PROB
, PROB_VERY_UNLIKELY
);
1189 emit_jump (done_label
);
1193 delete_insns_since (last
);
1194 icode
= CODE_FOR_nothing
;
1198 if (icode
== CODE_FOR_nothing
)
1200 struct separate_ops ops
;
1201 int prec
= GET_MODE_PRECISION (mode
);
1202 machine_mode hmode
= mode_for_size (prec
/ 2, MODE_INT
, 1);
1203 ops
.op0
= make_tree (type
, op0
);
1204 ops
.op1
= make_tree (type
, op1
);
1205 ops
.op2
= NULL_TREE
;
1207 if (GET_MODE_2XWIDER_MODE (mode
) != VOIDmode
1208 && targetm
.scalar_mode_supported_p (GET_MODE_2XWIDER_MODE (mode
)))
1210 machine_mode wmode
= GET_MODE_2XWIDER_MODE (mode
);
1211 ops
.code
= WIDEN_MULT_EXPR
;
1213 = build_nonstandard_integer_type (GET_MODE_PRECISION (wmode
), uns
);
1215 res
= expand_expr_real_2 (&ops
, NULL_RTX
, wmode
, EXPAND_NORMAL
);
1216 rtx hipart
= expand_shift (RSHIFT_EXPR
, wmode
, res
, prec
,
1218 hipart
= gen_lowpart (mode
, hipart
);
1219 res
= gen_lowpart (mode
, res
);
1221 /* For the unsigned multiplication, there was overflow if
1222 HIPART is non-zero. */
1223 emit_cmp_and_jump_insns (hipart
, const0_rtx
, EQ
, NULL_RTX
, mode
,
1224 false, done_label
, PROB_VERY_LIKELY
);
1227 rtx signbit
= expand_shift (RSHIFT_EXPR
, mode
, res
, prec
- 1,
1229 /* RES is low half of the double width result, HIPART
1230 the high half. There was overflow if
1231 HIPART is different from RES < 0 ? -1 : 0. */
1232 emit_cmp_and_jump_insns (signbit
, hipart
, EQ
, NULL_RTX
, mode
,
1233 false, done_label
, PROB_VERY_LIKELY
);
1236 else if (hmode
!= BLKmode
&& 2 * GET_MODE_PRECISION (hmode
) == prec
)
1238 rtx_code_label
*large_op0
= gen_label_rtx ();
1239 rtx_code_label
*small_op0_large_op1
= gen_label_rtx ();
1240 rtx_code_label
*one_small_one_large
= gen_label_rtx ();
1241 rtx_code_label
*both_ops_large
= gen_label_rtx ();
1242 rtx_code_label
*after_hipart_neg
= uns
? NULL
: gen_label_rtx ();
1243 rtx_code_label
*after_lopart_neg
= uns
? NULL
: gen_label_rtx ();
1244 rtx_code_label
*do_overflow
= gen_label_rtx ();
1245 rtx_code_label
*hipart_different
= uns
? NULL
: gen_label_rtx ();
1247 unsigned int hprec
= GET_MODE_PRECISION (hmode
);
1248 rtx hipart0
= expand_shift (RSHIFT_EXPR
, mode
, op0
, hprec
,
1250 hipart0
= gen_lowpart (hmode
, hipart0
);
1251 rtx lopart0
= gen_lowpart (hmode
, op0
);
1252 rtx signbit0
= const0_rtx
;
1254 signbit0
= expand_shift (RSHIFT_EXPR
, hmode
, lopart0
, hprec
- 1,
1256 rtx hipart1
= expand_shift (RSHIFT_EXPR
, mode
, op1
, hprec
,
1258 hipart1
= gen_lowpart (hmode
, hipart1
);
1259 rtx lopart1
= gen_lowpart (hmode
, op1
);
1260 rtx signbit1
= const0_rtx
;
1262 signbit1
= expand_shift (RSHIFT_EXPR
, hmode
, lopart1
, hprec
- 1,
1265 res
= gen_reg_rtx (mode
);
1267 /* True if op0 resp. op1 are known to be in the range of
1269 bool op0_small_p
= false;
1270 bool op1_small_p
= false;
1271 /* True if op0 resp. op1 are known to have all zeros or all ones
1272 in the upper half of bits, but are not known to be
1274 bool op0_medium_p
= false;
1275 bool op1_medium_p
= false;
1276 /* -1 if op{0,1} is known to be negative, 0 if it is known to be
1277 nonnegative, 1 if unknown. */
1283 else if (pos_neg0
== 2)
1287 else if (pos_neg1
== 2)
1290 unsigned int mprec0
= prec
;
1291 if (arg0
!= error_mark_node
)
1292 mprec0
= get_min_precision (arg0
, sign
);
1293 if (mprec0
<= hprec
)
1295 else if (!uns
&& mprec0
<= hprec
+ 1)
1296 op0_medium_p
= true;
1297 unsigned int mprec1
= prec
;
1298 if (arg1
!= error_mark_node
)
1299 mprec1
= get_min_precision (arg1
, sign
);
1300 if (mprec1
<= hprec
)
1302 else if (!uns
&& mprec1
<= hprec
+ 1)
1303 op1_medium_p
= true;
1305 int smaller_sign
= 1;
1306 int larger_sign
= 1;
1309 smaller_sign
= op0_sign
;
1310 larger_sign
= op1_sign
;
1312 else if (op1_small_p
)
1314 smaller_sign
= op1_sign
;
1315 larger_sign
= op0_sign
;
1317 else if (op0_sign
== op1_sign
)
1319 smaller_sign
= op0_sign
;
1320 larger_sign
= op0_sign
;
1324 emit_cmp_and_jump_insns (signbit0
, hipart0
, NE
, NULL_RTX
, hmode
,
1325 false, large_op0
, PROB_UNLIKELY
);
1328 emit_cmp_and_jump_insns (signbit1
, hipart1
, NE
, NULL_RTX
, hmode
,
1329 false, small_op0_large_op1
,
1332 /* If both op0 and op1 are sign (!uns) or zero (uns) extended from
1333 hmode to mode, the multiplication will never overflow. We can
1334 do just one hmode x hmode => mode widening multiplication. */
1335 rtx lopart0s
= lopart0
, lopart1s
= lopart1
;
1336 if (GET_CODE (lopart0
) == SUBREG
)
1338 lopart0s
= shallow_copy_rtx (lopart0
);
1339 SUBREG_PROMOTED_VAR_P (lopart0s
) = 1;
1340 SUBREG_PROMOTED_SET (lopart0s
, uns
? SRP_UNSIGNED
: SRP_SIGNED
);
1342 if (GET_CODE (lopart1
) == SUBREG
)
1344 lopart1s
= shallow_copy_rtx (lopart1
);
1345 SUBREG_PROMOTED_VAR_P (lopart1s
) = 1;
1346 SUBREG_PROMOTED_SET (lopart1s
, uns
? SRP_UNSIGNED
: SRP_SIGNED
);
1348 tree halfstype
= build_nonstandard_integer_type (hprec
, uns
);
1349 ops
.op0
= make_tree (halfstype
, lopart0s
);
1350 ops
.op1
= make_tree (halfstype
, lopart1s
);
1351 ops
.code
= WIDEN_MULT_EXPR
;
1354 = expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1355 emit_move_insn (res
, thisres
);
1356 emit_jump (done_label
);
1358 emit_label (small_op0_large_op1
);
1360 /* If op0 is sign (!uns) or zero (uns) extended from hmode to mode,
1361 but op1 is not, just swap the arguments and handle it as op1
1362 sign/zero extended, op0 not. */
1363 rtx larger
= gen_reg_rtx (mode
);
1364 rtx hipart
= gen_reg_rtx (hmode
);
1365 rtx lopart
= gen_reg_rtx (hmode
);
1366 emit_move_insn (larger
, op1
);
1367 emit_move_insn (hipart
, hipart1
);
1368 emit_move_insn (lopart
, lopart0
);
1369 emit_jump (one_small_one_large
);
1371 emit_label (large_op0
);
1374 emit_cmp_and_jump_insns (signbit1
, hipart1
, NE
, NULL_RTX
, hmode
,
1375 false, both_ops_large
, PROB_UNLIKELY
);
1377 /* If op1 is sign (!uns) or zero (uns) extended from hmode to mode,
1378 but op0 is not, prepare larger, hipart and lopart pseudos and
1379 handle it together with small_op0_large_op1. */
1380 emit_move_insn (larger
, op0
);
1381 emit_move_insn (hipart
, hipart0
);
1382 emit_move_insn (lopart
, lopart1
);
1384 emit_label (one_small_one_large
);
1386 /* lopart is the low part of the operand that is sign extended
1387 to mode, larger is the the other operand, hipart is the
1388 high part of larger and lopart0 and lopart1 are the low parts
1390 We perform lopart0 * lopart1 and lopart * hipart widening
1392 tree halfutype
= build_nonstandard_integer_type (hprec
, 1);
1393 ops
.op0
= make_tree (halfutype
, lopart0
);
1394 ops
.op1
= make_tree (halfutype
, lopart1
);
1396 = expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1398 ops
.op0
= make_tree (halfutype
, lopart
);
1399 ops
.op1
= make_tree (halfutype
, hipart
);
1400 rtx loxhi
= gen_reg_rtx (mode
);
1401 rtx tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1402 emit_move_insn (loxhi
, tem
);
1406 /* if (hipart < 0) loxhi -= lopart << (bitsize / 2); */
1407 if (larger_sign
== 0)
1408 emit_jump (after_hipart_neg
);
1409 else if (larger_sign
!= -1)
1410 emit_cmp_and_jump_insns (hipart
, const0_rtx
, GE
, NULL_RTX
,
1411 hmode
, false, after_hipart_neg
,
1414 tem
= convert_modes (mode
, hmode
, lopart
, 1);
1415 tem
= expand_shift (LSHIFT_EXPR
, mode
, tem
, hprec
, NULL_RTX
, 1);
1416 tem
= expand_simple_binop (mode
, MINUS
, loxhi
, tem
, NULL_RTX
,
1418 emit_move_insn (loxhi
, tem
);
1420 emit_label (after_hipart_neg
);
1422 /* if (lopart < 0) loxhi -= larger; */
1423 if (smaller_sign
== 0)
1424 emit_jump (after_lopart_neg
);
1425 else if (smaller_sign
!= -1)
1426 emit_cmp_and_jump_insns (lopart
, const0_rtx
, GE
, NULL_RTX
,
1427 hmode
, false, after_lopart_neg
,
1430 tem
= expand_simple_binop (mode
, MINUS
, loxhi
, larger
, NULL_RTX
,
1432 emit_move_insn (loxhi
, tem
);
1434 emit_label (after_lopart_neg
);
1437 /* loxhi += (uns) lo0xlo1 >> (bitsize / 2); */
1438 tem
= expand_shift (RSHIFT_EXPR
, mode
, lo0xlo1
, hprec
, NULL_RTX
, 1);
1439 tem
= expand_simple_binop (mode
, PLUS
, loxhi
, tem
, NULL_RTX
,
1441 emit_move_insn (loxhi
, tem
);
1443 /* if (loxhi >> (bitsize / 2)
1444 == (hmode) loxhi >> (bitsize / 2 - 1)) (if !uns)
1445 if (loxhi >> (bitsize / 2) == 0 (if uns). */
1446 rtx hipartloxhi
= expand_shift (RSHIFT_EXPR
, mode
, loxhi
, hprec
,
1448 hipartloxhi
= gen_lowpart (hmode
, hipartloxhi
);
1449 rtx signbitloxhi
= const0_rtx
;
1451 signbitloxhi
= expand_shift (RSHIFT_EXPR
, hmode
,
1452 gen_lowpart (hmode
, loxhi
),
1453 hprec
- 1, NULL_RTX
, 0);
1455 emit_cmp_and_jump_insns (signbitloxhi
, hipartloxhi
, NE
, NULL_RTX
,
1456 hmode
, false, do_overflow
,
1457 PROB_VERY_UNLIKELY
);
1459 /* res = (loxhi << (bitsize / 2)) | (hmode) lo0xlo1; */
1460 rtx loxhishifted
= expand_shift (LSHIFT_EXPR
, mode
, loxhi
, hprec
,
1462 tem
= convert_modes (mode
, hmode
, gen_lowpart (hmode
, lo0xlo1
), 1);
1464 tem
= expand_simple_binop (mode
, IOR
, loxhishifted
, tem
, res
,
1467 emit_move_insn (res
, tem
);
1468 emit_jump (done_label
);
1470 emit_label (both_ops_large
);
1472 /* If both operands are large (not sign (!uns) or zero (uns)
1473 extended from hmode), then perform the full multiplication
1474 which will be the result of the operation.
1475 The only cases which don't overflow are for signed multiplication
1476 some cases where both hipart0 and highpart1 are 0 or -1.
1477 For unsigned multiplication when high parts are both non-zero
1478 this overflows always. */
1479 ops
.code
= MULT_EXPR
;
1480 ops
.op0
= make_tree (type
, op0
);
1481 ops
.op1
= make_tree (type
, op1
);
1482 tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1483 emit_move_insn (res
, tem
);
1489 tem
= expand_simple_binop (hmode
, PLUS
, hipart0
, const1_rtx
,
1490 NULL_RTX
, 1, OPTAB_DIRECT
);
1491 emit_cmp_and_jump_insns (tem
, const1_rtx
, GTU
, NULL_RTX
,
1492 hmode
, true, do_error
,
1493 PROB_VERY_UNLIKELY
);
1498 tem
= expand_simple_binop (hmode
, PLUS
, hipart1
, const1_rtx
,
1499 NULL_RTX
, 1, OPTAB_DIRECT
);
1500 emit_cmp_and_jump_insns (tem
, const1_rtx
, GTU
, NULL_RTX
,
1501 hmode
, true, do_error
,
1502 PROB_VERY_UNLIKELY
);
1505 /* At this point hipart{0,1} are both in [-1, 0]. If they are
1506 the same, overflow happened if res is negative, if they are
1507 different, overflow happened if res is positive. */
1508 if (op0_sign
!= 1 && op1_sign
!= 1 && op0_sign
!= op1_sign
)
1509 emit_jump (hipart_different
);
1510 else if (op0_sign
== 1 || op1_sign
== 1)
1511 emit_cmp_and_jump_insns (hipart0
, hipart1
, NE
, NULL_RTX
, hmode
,
1512 true, hipart_different
, PROB_EVEN
);
1514 emit_cmp_and_jump_insns (res
, const0_rtx
, LT
, NULL_RTX
, mode
,
1515 false, do_error
, PROB_VERY_UNLIKELY
);
1516 emit_jump (done_label
);
1518 emit_label (hipart_different
);
1520 emit_cmp_and_jump_insns (res
, const0_rtx
, GE
, NULL_RTX
, mode
,
1521 false, do_error
, PROB_VERY_UNLIKELY
);
1522 emit_jump (done_label
);
1525 emit_label (do_overflow
);
1527 /* Overflow, do full multiplication and fallthru into do_error. */
1528 ops
.op0
= make_tree (type
, op0
);
1529 ops
.op1
= make_tree (type
, op1
);
1530 tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1531 emit_move_insn (res
, tem
);
1535 gcc_assert (!is_ubsan
);
1536 ops
.code
= MULT_EXPR
;
1538 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1539 emit_jump (done_label
);
1544 emit_label (do_error
);
1547 /* Expand the ubsan builtin call. */
1549 fn
= ubsan_build_overflow_builtin (MULT_EXPR
, loc
, TREE_TYPE (arg0
),
1553 do_pending_stack_adjust ();
1556 write_complex_part (target
, const1_rtx
, true);
1559 emit_label (done_label
);
1562 if (uns0_p
&& uns1_p
&& !unsr_p
)
1564 rtx_code_label
*all_done_label
= gen_label_rtx ();
1565 emit_cmp_and_jump_insns (res
, const0_rtx
, GE
, NULL_RTX
, mode
,
1566 false, all_done_label
, PROB_VERY_LIKELY
);
1567 write_complex_part (target
, const1_rtx
, true);
1568 emit_label (all_done_label
);
1572 if (!uns0_p
&& uns1_p
&& !unsr_p
&& pos_neg1
== 3)
1574 rtx_code_label
*all_done_label
= gen_label_rtx ();
1575 rtx_code_label
*set_noovf
= gen_label_rtx ();
1576 emit_cmp_and_jump_insns (op1
, const0_rtx
, GE
, NULL_RTX
, mode
,
1577 false, all_done_label
, PROB_VERY_LIKELY
);
1578 write_complex_part (target
, const1_rtx
, true);
1579 emit_cmp_and_jump_insns (op0
, const0_rtx
, EQ
, NULL_RTX
, mode
,
1580 false, set_noovf
, PROB_VERY_LIKELY
);
1581 emit_cmp_and_jump_insns (op0
, constm1_rtx
, NE
, NULL_RTX
, mode
,
1582 false, all_done_label
, PROB_VERY_UNLIKELY
);
1583 emit_cmp_and_jump_insns (op1
, res
, NE
, NULL_RTX
, mode
,
1584 false, all_done_label
, PROB_VERY_UNLIKELY
);
1585 emit_label (set_noovf
);
1586 write_complex_part (target
, const0_rtx
, true);
1587 emit_label (all_done_label
);
1593 emit_move_insn (target
, res
);
1595 expand_arith_overflow_result_store (lhs
, target
, mode
, res
);
1599 /* Expand UBSAN_CHECK_ADD call STMT. */
1602 expand_UBSAN_CHECK_ADD (gimple stmt
)
1604 location_t loc
= gimple_location (stmt
);
1605 tree lhs
= gimple_call_lhs (stmt
);
1606 tree arg0
= gimple_call_arg (stmt
, 0);
1607 tree arg1
= gimple_call_arg (stmt
, 1);
1608 expand_addsub_overflow (loc
, PLUS_EXPR
, lhs
, arg0
, arg1
,
1609 false, false, false, true);
1612 /* Expand UBSAN_CHECK_SUB call STMT. */
1615 expand_UBSAN_CHECK_SUB (gimple stmt
)
1617 location_t loc
= gimple_location (stmt
);
1618 tree lhs
= gimple_call_lhs (stmt
);
1619 tree arg0
= gimple_call_arg (stmt
, 0);
1620 tree arg1
= gimple_call_arg (stmt
, 1);
1621 if (integer_zerop (arg0
))
1622 expand_neg_overflow (loc
, lhs
, arg1
, true);
1624 expand_addsub_overflow (loc
, MINUS_EXPR
, lhs
, arg0
, arg1
,
1625 false, false, false, true);
1628 /* Expand UBSAN_CHECK_MUL call STMT. */
1631 expand_UBSAN_CHECK_MUL (gimple stmt
)
1633 location_t loc
= gimple_location (stmt
);
1634 tree lhs
= gimple_call_lhs (stmt
);
1635 tree arg0
= gimple_call_arg (stmt
, 0);
1636 tree arg1
= gimple_call_arg (stmt
, 1);
1637 expand_mul_overflow (loc
, lhs
, arg0
, arg1
, false, false, false, true);
1640 /* Helper function for {ADD,SUB,MUL}_OVERFLOW call stmt expansion. */
1643 expand_arith_overflow (enum tree_code code
, gimple stmt
)
1645 tree lhs
= gimple_call_lhs (stmt
);
1646 if (lhs
== NULL_TREE
)
1648 tree arg0
= gimple_call_arg (stmt
, 0);
1649 tree arg1
= gimple_call_arg (stmt
, 1);
1650 tree type
= TREE_TYPE (TREE_TYPE (lhs
));
1651 int uns0_p
= TYPE_UNSIGNED (TREE_TYPE (arg0
));
1652 int uns1_p
= TYPE_UNSIGNED (TREE_TYPE (arg1
));
1653 int unsr_p
= TYPE_UNSIGNED (type
);
1654 int prec0
= TYPE_PRECISION (TREE_TYPE (arg0
));
1655 int prec1
= TYPE_PRECISION (TREE_TYPE (arg1
));
1656 int precres
= TYPE_PRECISION (type
);
1657 location_t loc
= gimple_location (stmt
);
1658 if (!uns0_p
&& get_range_pos_neg (arg0
) == 1)
1660 if (!uns1_p
&& get_range_pos_neg (arg1
) == 1)
1662 int pr
= get_min_precision (arg0
, uns0_p
? UNSIGNED
: SIGNED
);
1663 prec0
= MIN (prec0
, pr
);
1664 pr
= get_min_precision (arg1
, uns1_p
? UNSIGNED
: SIGNED
);
1665 prec1
= MIN (prec1
, pr
);
1667 /* If uns0_p && uns1_p, precop is minimum needed precision
1668 of unsigned type to hold the exact result, otherwise
1669 precop is minimum needed precision of signed type to
1670 hold the exact result. */
1672 if (code
== MULT_EXPR
)
1673 precop
= prec0
+ prec1
+ (uns0_p
!= uns1_p
);
1676 if (uns0_p
== uns1_p
)
1677 precop
= MAX (prec0
, prec1
) + 1;
1679 precop
= MAX (prec0
+ 1, prec1
) + 1;
1681 precop
= MAX (prec0
, prec1
+ 1) + 1;
1683 int orig_precres
= precres
;
1687 if ((uns0_p
&& uns1_p
)
1688 ? ((precop
+ !unsr_p
) <= precres
1689 /* u1 - u2 -> ur can overflow, no matter what precision
1691 && (code
!= MINUS_EXPR
|| !unsr_p
))
1692 : (!unsr_p
&& precop
<= precres
))
1694 /* The infinity precision result will always fit into result. */
1695 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1696 write_complex_part (target
, const0_rtx
, true);
1697 enum machine_mode mode
= TYPE_MODE (type
);
1698 struct separate_ops ops
;
1701 ops
.op0
= fold_convert_loc (loc
, type
, arg0
);
1702 ops
.op1
= fold_convert_loc (loc
, type
, arg1
);
1703 ops
.op2
= NULL_TREE
;
1705 rtx tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1706 expand_arith_overflow_result_store (lhs
, target
, mode
, tem
);
1710 #ifdef WORD_REGISTER_OPERATIONS
1711 /* For sub-word operations, if target doesn't have them, start
1712 with precres widening right away, otherwise do it only
1713 if the most simple cases can't be used. */
1714 if (orig_precres
== precres
&& precres
< BITS_PER_WORD
)
1718 if ((uns0_p
&& uns1_p
&& unsr_p
&& prec0
<= precres
&& prec1
<= precres
)
1719 || ((!uns0_p
|| !uns1_p
) && !unsr_p
1720 && prec0
+ uns0_p
<= precres
1721 && prec1
+ uns1_p
<= precres
))
1723 arg0
= fold_convert_loc (loc
, type
, arg0
);
1724 arg1
= fold_convert_loc (loc
, type
, arg1
);
1728 if (integer_zerop (arg0
) && !unsr_p
)
1729 expand_neg_overflow (loc
, lhs
, arg1
, false);
1732 expand_addsub_overflow (loc
, code
, lhs
, arg0
, arg1
,
1733 unsr_p
, unsr_p
, unsr_p
, false);
1736 expand_mul_overflow (loc
, lhs
, arg0
, arg1
,
1737 unsr_p
, unsr_p
, unsr_p
, false);
1744 /* For sub-word operations, retry with a wider type first. */
1745 if (orig_precres
== precres
&& precop
<= BITS_PER_WORD
)
1747 #ifdef WORD_REGISTER_OPERATIONS
1748 int p
= BITS_PER_WORD
;
1752 enum machine_mode m
= smallest_mode_for_size (p
, MODE_INT
);
1753 tree optype
= build_nonstandard_integer_type (GET_MODE_PRECISION (m
),
1756 p
= TYPE_PRECISION (optype
);
1760 unsr_p
= TYPE_UNSIGNED (optype
);
1766 if (prec0
<= precres
&& prec1
<= precres
)
1771 types
[0] = build_nonstandard_integer_type (precres
, 0);
1777 types
[1] = build_nonstandard_integer_type (precres
, 1);
1779 arg0
= fold_convert_loc (loc
, types
[uns0_p
], arg0
);
1780 arg1
= fold_convert_loc (loc
, types
[uns1_p
], arg1
);
1781 if (code
!= MULT_EXPR
)
1782 expand_addsub_overflow (loc
, code
, lhs
, arg0
, arg1
, unsr_p
,
1783 uns0_p
, uns1_p
, false);
1785 expand_mul_overflow (loc
, lhs
, arg0
, arg1
, unsr_p
,
1786 uns0_p
, uns1_p
, false);
1790 /* Retry with a wider type. */
1791 if (orig_precres
== precres
)
1793 int p
= MAX (prec0
, prec1
);
1794 enum machine_mode m
= smallest_mode_for_size (p
, MODE_INT
);
1795 tree optype
= build_nonstandard_integer_type (GET_MODE_PRECISION (m
),
1798 p
= TYPE_PRECISION (optype
);
1802 unsr_p
= TYPE_UNSIGNED (optype
);
1813 /* Expand ADD_OVERFLOW STMT. */
1816 expand_ADD_OVERFLOW (gimple stmt
)
1818 expand_arith_overflow (PLUS_EXPR
, stmt
);
1821 /* Expand SUB_OVERFLOW STMT. */
1824 expand_SUB_OVERFLOW (gimple stmt
)
1826 expand_arith_overflow (MINUS_EXPR
, stmt
);
1829 /* Expand MUL_OVERFLOW STMT. */
1832 expand_MUL_OVERFLOW (gimple stmt
)
1834 expand_arith_overflow (MULT_EXPR
, stmt
);
1837 /* This should get folded in tree-vectorizer.c. */
1840 expand_LOOP_VECTORIZED (gimple stmt ATTRIBUTE_UNUSED
)
1846 expand_MASK_LOAD (gimple stmt
)
1848 struct expand_operand ops
[3];
1849 tree type
, lhs
, rhs
, maskt
;
1850 rtx mem
, target
, mask
;
1852 maskt
= gimple_call_arg (stmt
, 2);
1853 lhs
= gimple_call_lhs (stmt
);
1854 if (lhs
== NULL_TREE
)
1856 type
= TREE_TYPE (lhs
);
1857 rhs
= fold_build2 (MEM_REF
, type
, gimple_call_arg (stmt
, 0),
1858 gimple_call_arg (stmt
, 1));
1860 mem
= expand_expr (rhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1861 gcc_assert (MEM_P (mem
));
1862 mask
= expand_normal (maskt
);
1863 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1864 create_output_operand (&ops
[0], target
, TYPE_MODE (type
));
1865 create_fixed_operand (&ops
[1], mem
);
1866 create_input_operand (&ops
[2], mask
, TYPE_MODE (TREE_TYPE (maskt
)));
1867 expand_insn (optab_handler (maskload_optab
, TYPE_MODE (type
)), 3, ops
);
1871 expand_MASK_STORE (gimple stmt
)
1873 struct expand_operand ops
[3];
1874 tree type
, lhs
, rhs
, maskt
;
1877 maskt
= gimple_call_arg (stmt
, 2);
1878 rhs
= gimple_call_arg (stmt
, 3);
1879 type
= TREE_TYPE (rhs
);
1880 lhs
= fold_build2 (MEM_REF
, type
, gimple_call_arg (stmt
, 0),
1881 gimple_call_arg (stmt
, 1));
1883 mem
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1884 gcc_assert (MEM_P (mem
));
1885 mask
= expand_normal (maskt
);
1886 reg
= expand_normal (rhs
);
1887 create_fixed_operand (&ops
[0], mem
);
1888 create_input_operand (&ops
[1], reg
, TYPE_MODE (type
));
1889 create_input_operand (&ops
[2], mask
, TYPE_MODE (TREE_TYPE (maskt
)));
1890 expand_insn (optab_handler (maskstore_optab
, TYPE_MODE (type
)), 3, ops
);
1894 expand_ABNORMAL_DISPATCHER (gimple
)
1899 expand_BUILTIN_EXPECT (gimple stmt
)
1901 /* When guessing was done, the hints should be already stripped away. */
1902 gcc_assert (!flag_guess_branch_prob
|| optimize
== 0 || seen_error ());
1905 tree lhs
= gimple_call_lhs (stmt
);
1907 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1909 target
= const0_rtx
;
1910 rtx val
= expand_expr (gimple_call_arg (stmt
, 0), target
, VOIDmode
, EXPAND_NORMAL
);
1911 if (lhs
&& val
!= target
)
1912 emit_move_insn (target
, val
);
1915 /* Routines to expand each internal function, indexed by function number.
1916 Each routine has the prototype:
1918 expand_<NAME> (gimple stmt)
1920 where STMT is the statement that performs the call. */
1921 static void (*const internal_fn_expanders
[]) (gimple
) = {
1922 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) expand_##CODE,
1923 #include "internal-fn.def"
1924 #undef DEF_INTERNAL_FN
1928 /* Expand STMT, which is a call to internal function FN. */
1931 expand_internal_call (gimple stmt
)
1933 internal_fn_expanders
[(int) gimple_call_internal_fn (stmt
)] (stmt
);