2 Copyright (C) 2011-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
27 #include "fold-const.h"
28 #include "internal-fn.h"
29 #include "stor-layout.h"
31 #include "hard-reg-set.h"
35 #include "insn-config.h"
44 #include "insn-codes.h"
47 #include "dominance.h"
49 #include "basic-block.h"
50 #include "tree-ssa-alias.h"
51 #include "gimple-expr.h"
55 #include "stringpool.h"
56 #include "tree-ssanames.h"
57 #include "diagnostic-core.h"
59 /* The names of each internal function, indexed by function number. */
60 const char *const internal_fn_name_array
[] = {
61 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) #CODE,
62 #include "internal-fn.def"
63 #undef DEF_INTERNAL_FN
67 /* The ECF_* flags of each internal function, indexed by function number. */
68 const int internal_fn_flags_array
[] = {
69 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) FLAGS,
70 #include "internal-fn.def"
71 #undef DEF_INTERNAL_FN
75 /* Fnspec of each internal function, indexed by function number. */
76 const_tree internal_fn_fnspec_array
[IFN_LAST
+ 1];
81 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
82 if (FNSPEC) internal_fn_fnspec_array[IFN_##CODE] = \
83 build_string ((int) sizeof (FNSPEC), FNSPEC ? FNSPEC : "");
84 #include "internal-fn.def"
85 #undef DEF_INTERNAL_FN
86 internal_fn_fnspec_array
[IFN_LAST
] = 0;
89 /* ARRAY_TYPE is an array of vector modes. Return the associated insn
90 for load-lanes-style optab OPTAB. The insn must exist. */
93 get_multi_vector_move (tree array_type
, convert_optab optab
)
99 gcc_assert (TREE_CODE (array_type
) == ARRAY_TYPE
);
100 imode
= TYPE_MODE (array_type
);
101 vmode
= TYPE_MODE (TREE_TYPE (array_type
));
103 icode
= convert_optab_handler (optab
, imode
, vmode
);
104 gcc_assert (icode
!= CODE_FOR_nothing
);
108 /* Expand LOAD_LANES call STMT. */
111 expand_LOAD_LANES (gcall
*stmt
)
113 struct expand_operand ops
[2];
117 lhs
= gimple_call_lhs (stmt
);
118 rhs
= gimple_call_arg (stmt
, 0);
119 type
= TREE_TYPE (lhs
);
121 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
122 mem
= expand_normal (rhs
);
124 gcc_assert (MEM_P (mem
));
125 PUT_MODE (mem
, TYPE_MODE (type
));
127 create_output_operand (&ops
[0], target
, TYPE_MODE (type
));
128 create_fixed_operand (&ops
[1], mem
);
129 expand_insn (get_multi_vector_move (type
, vec_load_lanes_optab
), 2, ops
);
132 /* Expand STORE_LANES call STMT. */
135 expand_STORE_LANES (gcall
*stmt
)
137 struct expand_operand ops
[2];
141 lhs
= gimple_call_lhs (stmt
);
142 rhs
= gimple_call_arg (stmt
, 0);
143 type
= TREE_TYPE (rhs
);
145 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
146 reg
= expand_normal (rhs
);
148 gcc_assert (MEM_P (target
));
149 PUT_MODE (target
, TYPE_MODE (type
));
151 create_fixed_operand (&ops
[0], target
);
152 create_input_operand (&ops
[1], reg
, TYPE_MODE (type
));
153 expand_insn (get_multi_vector_move (type
, vec_store_lanes_optab
), 2, ops
);
157 expand_ANNOTATE (gcall
*)
162 /* This should get expanded in adjust_simduid_builtins. */
165 expand_GOMP_SIMD_LANE (gcall
*)
170 /* This should get expanded in adjust_simduid_builtins. */
173 expand_GOMP_SIMD_VF (gcall
*)
178 /* This should get expanded in adjust_simduid_builtins. */
181 expand_GOMP_SIMD_LAST_LANE (gcall
*)
186 /* This should get expanded in the sanopt pass. */
189 expand_UBSAN_NULL (gcall
*)
194 /* This should get expanded in the sanopt pass. */
197 expand_UBSAN_BOUNDS (gcall
*)
202 /* This should get expanded in the sanopt pass. */
205 expand_UBSAN_VPTR (gcall
*)
210 /* This should get expanded in the sanopt pass. */
213 expand_UBSAN_OBJECT_SIZE (gcall
*)
218 /* This should get expanded in the sanopt pass. */
221 expand_ASAN_CHECK (gcall
*)
226 /* This should get expanded in the tsan pass. */
229 expand_TSAN_FUNC_EXIT (gcall
*)
234 /* Helper function for expand_addsub_overflow. Return 1
235 if ARG interpreted as signed in its precision is known to be always
236 positive or 2 if ARG is known to be always negative, or 3 if ARG may
237 be positive or negative. */
240 get_range_pos_neg (tree arg
)
242 if (arg
== error_mark_node
)
245 int prec
= TYPE_PRECISION (TREE_TYPE (arg
));
247 if (TREE_CODE (arg
) == INTEGER_CST
)
249 wide_int w
= wi::sext (arg
, prec
);
255 while (CONVERT_EXPR_P (arg
)
256 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg
, 0)))
257 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg
, 0))) <= prec
)
259 arg
= TREE_OPERAND (arg
, 0);
260 /* Narrower value zero extended into wider type
261 will always result in positive values. */
262 if (TYPE_UNSIGNED (TREE_TYPE (arg
))
263 && TYPE_PRECISION (TREE_TYPE (arg
)) < prec
)
265 prec
= TYPE_PRECISION (TREE_TYPE (arg
));
270 if (TREE_CODE (arg
) != SSA_NAME
)
272 wide_int arg_min
, arg_max
;
273 while (get_range_info (arg
, &arg_min
, &arg_max
) != VR_RANGE
)
275 gimple g
= SSA_NAME_DEF_STMT (arg
);
276 if (is_gimple_assign (g
)
277 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g
)))
279 tree t
= gimple_assign_rhs1 (g
);
280 if (INTEGRAL_TYPE_P (TREE_TYPE (t
))
281 && TYPE_PRECISION (TREE_TYPE (t
)) <= prec
)
283 if (TYPE_UNSIGNED (TREE_TYPE (t
))
284 && TYPE_PRECISION (TREE_TYPE (t
)) < prec
)
286 prec
= TYPE_PRECISION (TREE_TYPE (t
));
295 if (TYPE_UNSIGNED (TREE_TYPE (arg
)))
297 /* For unsigned values, the "positive" range comes
298 below the "negative" range. */
299 if (!wi::neg_p (wi::sext (arg_max
, prec
), SIGNED
))
301 if (wi::neg_p (wi::sext (arg_min
, prec
), SIGNED
))
306 if (!wi::neg_p (wi::sext (arg_min
, prec
), SIGNED
))
308 if (wi::neg_p (wi::sext (arg_max
, prec
), SIGNED
))
314 /* Return minimum precision needed to represent all values
315 of ARG in SIGNed integral type. */
318 get_min_precision (tree arg
, signop sign
)
320 int prec
= TYPE_PRECISION (TREE_TYPE (arg
));
322 signop orig_sign
= sign
;
323 if (TREE_CODE (arg
) == INTEGER_CST
)
326 if (TYPE_SIGN (TREE_TYPE (arg
)) != sign
)
328 widest_int w
= wi::to_widest (arg
);
329 w
= wi::ext (w
, prec
, sign
);
330 p
= wi::min_precision (w
, sign
);
333 p
= wi::min_precision (arg
, sign
);
334 return MIN (p
, prec
);
336 while (CONVERT_EXPR_P (arg
)
337 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg
, 0)))
338 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg
, 0))) <= prec
)
340 arg
= TREE_OPERAND (arg
, 0);
341 if (TYPE_PRECISION (TREE_TYPE (arg
)) < prec
)
343 if (TYPE_UNSIGNED (TREE_TYPE (arg
)))
345 else if (sign
== UNSIGNED
&& get_range_pos_neg (arg
) != 1)
346 return prec
+ (orig_sign
!= sign
);
347 prec
= TYPE_PRECISION (TREE_TYPE (arg
));
350 return prec
+ (orig_sign
!= sign
);
352 if (TREE_CODE (arg
) != SSA_NAME
)
353 return prec
+ (orig_sign
!= sign
);
354 wide_int arg_min
, arg_max
;
355 while (get_range_info (arg
, &arg_min
, &arg_max
) != VR_RANGE
)
357 gimple g
= SSA_NAME_DEF_STMT (arg
);
358 if (is_gimple_assign (g
)
359 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g
)))
361 tree t
= gimple_assign_rhs1 (g
);
362 if (INTEGRAL_TYPE_P (TREE_TYPE (t
))
363 && TYPE_PRECISION (TREE_TYPE (t
)) <= prec
)
366 if (TYPE_PRECISION (TREE_TYPE (arg
)) < prec
)
368 if (TYPE_UNSIGNED (TREE_TYPE (arg
)))
370 else if (sign
== UNSIGNED
&& get_range_pos_neg (arg
) != 1)
371 return prec
+ (orig_sign
!= sign
);
372 prec
= TYPE_PRECISION (TREE_TYPE (arg
));
375 return prec
+ (orig_sign
!= sign
);
379 return prec
+ (orig_sign
!= sign
);
381 if (sign
== TYPE_SIGN (TREE_TYPE (arg
)))
383 int p1
= wi::min_precision (arg_min
, sign
);
384 int p2
= wi::min_precision (arg_max
, sign
);
386 prec
= MIN (prec
, p1
);
388 else if (sign
== UNSIGNED
&& !wi::neg_p (arg_min
, SIGNED
))
390 int p
= wi::min_precision (arg_max
, UNSIGNED
);
391 prec
= MIN (prec
, p
);
393 return prec
+ (orig_sign
!= sign
);
396 /* Helper for expand_*_overflow. Store RES into the __real__ part
397 of TARGET. If RES has larger MODE than __real__ part of TARGET,
398 set the __imag__ part to 1 if RES doesn't fit into it. */
401 expand_arith_overflow_result_store (tree lhs
, rtx target
,
402 machine_mode mode
, rtx res
)
404 machine_mode tgtmode
= GET_MODE_INNER (GET_MODE (target
));
408 rtx_code_label
*done_label
= gen_label_rtx ();
409 int uns
= TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs
)));
410 lres
= convert_modes (tgtmode
, mode
, res
, uns
);
411 gcc_assert (GET_MODE_PRECISION (tgtmode
) < GET_MODE_PRECISION (mode
));
412 do_compare_rtx_and_jump (res
, convert_modes (mode
, tgtmode
, lres
, uns
),
413 EQ
, true, mode
, NULL_RTX
, NULL
, done_label
,
415 write_complex_part (target
, const1_rtx
, true);
416 emit_label (done_label
);
418 write_complex_part (target
, lres
, false);
421 /* Helper for expand_*_overflow. Store RES into TARGET. */
424 expand_ubsan_result_store (rtx target
, rtx res
)
426 if (GET_CODE (target
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (target
))
427 /* If this is a scalar in a register that is stored in a wider mode
428 than the declared mode, compute the result into its declared mode
429 and then convert to the wider mode. Our value is the computed
431 convert_move (SUBREG_REG (target
), res
, SUBREG_PROMOTED_SIGN (target
));
433 emit_move_insn (target
, res
);
436 /* Add sub/add overflow checking to the statement STMT.
437 CODE says whether the operation is +, or -. */
440 expand_addsub_overflow (location_t loc
, tree_code code
, tree lhs
,
441 tree arg0
, tree arg1
, bool unsr_p
, bool uns0_p
,
442 bool uns1_p
, bool is_ubsan
)
444 rtx res
, target
= NULL_RTX
;
446 rtx_code_label
*done_label
= gen_label_rtx ();
447 rtx_code_label
*do_error
= gen_label_rtx ();
448 do_pending_stack_adjust ();
449 rtx op0
= expand_normal (arg0
);
450 rtx op1
= expand_normal (arg1
);
451 machine_mode mode
= TYPE_MODE (TREE_TYPE (arg0
));
452 int prec
= GET_MODE_PRECISION (mode
);
453 rtx sgn
= immed_wide_int_const (wi::min_value (prec
, SIGNED
), mode
);
457 gcc_assert (!unsr_p
&& !uns0_p
&& !uns1_p
);
461 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
463 write_complex_part (target
, const0_rtx
, true);
466 /* We assume both operands and result have the same precision
467 here (GET_MODE_BITSIZE (mode)), S stands for signed type
468 with that precision, U for unsigned type with that precision,
469 sgn for unsigned most significant bit in that precision.
470 s1 is signed first operand, u1 is unsigned first operand,
471 s2 is signed second operand, u2 is unsigned second operand,
472 sr is signed result, ur is unsigned result and the following
473 rules say how to compute result (which is always result of
474 the operands as if both were unsigned, cast to the right
475 signedness) and how to compute whether operation overflowed.
478 res = (S) ((U) s1 + (U) s2)
479 ovf = s2 < 0 ? res > s1 : res < s1 (or jump on overflow)
481 res = (S) ((U) s1 - (U) s2)
482 ovf = s2 < 0 ? res < s1 : res > s2 (or jump on overflow)
485 ovf = res < u1 (or jump on carry, but RTL opts will handle it)
488 ovf = res > u1 (or jump on carry, but RTL opts will handle it)
490 res = (S) ((U) s1 + u2)
491 ovf = ((U) res ^ sgn) < u2
496 ovf = t1 < 0 ? t2 > s1 : t2 < s1 (or jump on overflow)
498 res = (S) ((U) s1 - u2)
499 ovf = u2 > ((U) s1 ^ sgn)
502 ovf = s1 < 0 || u2 > (U) s1
505 ovf = u1 >= ((U) s2 ^ sgn)
510 ovf = s2 < 0 ? (S) t2 < (S) t1 : (S) t2 > (S) t1 (or jump on overflow)
512 res = (U) s1 + (U) s2
513 ovf = s2 < 0 ? (s1 | (S) res) < 0) : (s1 & (S) res) < 0)
516 ovf = (U) res < u2 || res < 0
519 ovf = u1 >= u2 ? res < 0 : res >= 0
521 res = (U) s1 - (U) s2
522 ovf = s2 >= 0 ? ((s1 | (S) res) < 0) : ((s1 & (S) res) < 0) */
524 if (code
== PLUS_EXPR
&& uns0_p
&& !uns1_p
)
526 /* PLUS_EXPR is commutative, if operand signedness differs,
527 canonicalize to the first operand being signed and second
528 unsigned to simplify following code. */
529 std::swap (op0
, op1
);
530 std::swap (arg0
, arg1
);
536 if (uns0_p
&& uns1_p
&& unsr_p
)
538 /* Compute the operation. On RTL level, the addition is always
540 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
541 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
543 /* For PLUS_EXPR, the operation is commutative, so we can pick
544 operand to compare against. For prec <= BITS_PER_WORD, I think
545 preferring REG operand is better over CONST_INT, because
546 the CONST_INT might enlarge the instruction or CSE would need
547 to figure out we'd already loaded it into a register before.
548 For prec > BITS_PER_WORD, I think CONST_INT might be more beneficial,
549 as then the multi-word comparison can be perhaps simplified. */
550 if (code
== PLUS_EXPR
551 && (prec
<= BITS_PER_WORD
552 ? (CONST_SCALAR_INT_P (op0
) && REG_P (op1
))
553 : CONST_SCALAR_INT_P (op1
)))
555 do_compare_rtx_and_jump (res
, tem
, code
== PLUS_EXPR
? GEU
: LEU
,
556 true, mode
, NULL_RTX
, NULL
, done_label
,
562 if (!uns0_p
&& uns1_p
&& !unsr_p
)
564 /* Compute the operation. On RTL level, the addition is always
566 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
567 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
568 rtx tem
= expand_binop (mode
, add_optab
,
569 code
== PLUS_EXPR
? res
: op0
, sgn
,
570 NULL_RTX
, false, OPTAB_LIB_WIDEN
);
571 do_compare_rtx_and_jump (tem
, op1
, GEU
, true, mode
, NULL_RTX
, NULL
,
572 done_label
, PROB_VERY_LIKELY
);
577 if (code
== PLUS_EXPR
&& !uns0_p
&& uns1_p
&& unsr_p
)
579 op1
= expand_binop (mode
, add_optab
, op1
, sgn
, NULL_RTX
, false,
581 /* As we've changed op1, we have to avoid using the value range
582 for the original argument. */
583 arg1
= error_mark_node
;
589 if (code
== MINUS_EXPR
&& uns0_p
&& !uns1_p
&& unsr_p
)
591 op0
= expand_binop (mode
, add_optab
, op0
, sgn
, NULL_RTX
, false,
593 /* As we've changed op0, we have to avoid using the value range
594 for the original argument. */
595 arg0
= error_mark_node
;
601 if (code
== MINUS_EXPR
&& !uns0_p
&& uns1_p
&& unsr_p
)
603 /* Compute the operation. On RTL level, the addition is always
605 res
= expand_binop (mode
, sub_optab
, op0
, op1
, NULL_RTX
, false,
607 int pos_neg
= get_range_pos_neg (arg0
);
609 /* If ARG0 is known to be always negative, this is always overflow. */
610 emit_jump (do_error
);
611 else if (pos_neg
== 3)
612 /* If ARG0 is not known to be always positive, check at runtime. */
613 do_compare_rtx_and_jump (op0
, const0_rtx
, LT
, false, mode
, NULL_RTX
,
614 NULL
, do_error
, PROB_VERY_UNLIKELY
);
615 do_compare_rtx_and_jump (op1
, op0
, LEU
, true, mode
, NULL_RTX
, NULL
,
616 done_label
, PROB_VERY_LIKELY
);
621 if (code
== MINUS_EXPR
&& uns0_p
&& !uns1_p
&& !unsr_p
)
623 /* Compute the operation. On RTL level, the addition is always
625 res
= expand_binop (mode
, sub_optab
, op0
, op1
, NULL_RTX
, false,
627 rtx tem
= expand_binop (mode
, add_optab
, op1
, sgn
, NULL_RTX
, false,
629 do_compare_rtx_and_jump (op0
, tem
, LTU
, true, mode
, NULL_RTX
, NULL
,
630 done_label
, PROB_VERY_LIKELY
);
635 if (code
== PLUS_EXPR
&& uns0_p
&& uns1_p
&& !unsr_p
)
637 /* Compute the operation. On RTL level, the addition is always
639 res
= expand_binop (mode
, add_optab
, op0
, op1
, NULL_RTX
, false,
641 do_compare_rtx_and_jump (res
, const0_rtx
, LT
, false, mode
, NULL_RTX
,
642 NULL
, do_error
, PROB_VERY_UNLIKELY
);
644 /* The operation is commutative, so we can pick operand to compare
645 against. For prec <= BITS_PER_WORD, I think preferring REG operand
646 is better over CONST_INT, because the CONST_INT might enlarge the
647 instruction or CSE would need to figure out we'd already loaded it
648 into a register before. For prec > BITS_PER_WORD, I think CONST_INT
649 might be more beneficial, as then the multi-word comparison can be
650 perhaps simplified. */
651 if (prec
<= BITS_PER_WORD
652 ? (CONST_SCALAR_INT_P (op1
) && REG_P (op0
))
653 : CONST_SCALAR_INT_P (op0
))
655 do_compare_rtx_and_jump (res
, tem
, GEU
, true, mode
, NULL_RTX
, NULL
,
656 done_label
, PROB_VERY_LIKELY
);
661 if (!uns0_p
&& !uns1_p
&& unsr_p
)
663 /* Compute the operation. On RTL level, the addition is always
665 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
666 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
667 int pos_neg
= get_range_pos_neg (arg1
);
668 if (code
== PLUS_EXPR
)
670 int pos_neg0
= get_range_pos_neg (arg0
);
671 if (pos_neg0
!= 3 && pos_neg
== 3)
673 std::swap (op0
, op1
);
680 tem
= expand_binop (mode
, ((pos_neg
== 1) ^ (code
== MINUS_EXPR
))
681 ? and_optab
: ior_optab
,
682 op0
, res
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
683 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL
,
684 NULL
, done_label
, PROB_VERY_LIKELY
);
688 rtx_code_label
*do_ior_label
= gen_label_rtx ();
689 do_compare_rtx_and_jump (op1
, const0_rtx
,
690 code
== MINUS_EXPR
? GE
: LT
, false, mode
,
691 NULL_RTX
, NULL
, do_ior_label
,
693 tem
= expand_binop (mode
, and_optab
, op0
, res
, NULL_RTX
, false,
695 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
696 NULL
, done_label
, PROB_VERY_LIKELY
);
697 emit_jump (do_error
);
698 emit_label (do_ior_label
);
699 tem
= expand_binop (mode
, ior_optab
, op0
, res
, NULL_RTX
, false,
701 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
702 NULL
, done_label
, PROB_VERY_LIKELY
);
708 if (code
== MINUS_EXPR
&& uns0_p
&& uns1_p
&& !unsr_p
)
710 /* Compute the operation. On RTL level, the addition is always
712 res
= expand_binop (mode
, sub_optab
, op0
, op1
, NULL_RTX
, false,
714 rtx_code_label
*op0_geu_op1
= gen_label_rtx ();
715 do_compare_rtx_and_jump (op0
, op1
, GEU
, true, mode
, NULL_RTX
, NULL
,
716 op0_geu_op1
, PROB_EVEN
);
717 do_compare_rtx_and_jump (res
, const0_rtx
, LT
, false, mode
, NULL_RTX
,
718 NULL
, done_label
, PROB_VERY_LIKELY
);
719 emit_jump (do_error
);
720 emit_label (op0_geu_op1
);
721 do_compare_rtx_and_jump (res
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
722 NULL
, done_label
, PROB_VERY_LIKELY
);
726 gcc_assert (!uns0_p
&& !uns1_p
&& !unsr_p
);
730 enum insn_code icode
;
731 icode
= optab_handler (code
== PLUS_EXPR
? addv4_optab
: subv4_optab
, mode
);
732 if (icode
!= CODE_FOR_nothing
)
734 struct expand_operand ops
[4];
735 rtx_insn
*last
= get_last_insn ();
737 res
= gen_reg_rtx (mode
);
738 create_output_operand (&ops
[0], res
, mode
);
739 create_input_operand (&ops
[1], op0
, mode
);
740 create_input_operand (&ops
[2], op1
, mode
);
741 create_fixed_operand (&ops
[3], do_error
);
742 if (maybe_expand_insn (icode
, 4, ops
))
744 last
= get_last_insn ();
745 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
747 && any_condjump_p (last
)
748 && !find_reg_note (last
, REG_BR_PROB
, 0))
749 add_int_reg_note (last
, REG_BR_PROB
, PROB_VERY_UNLIKELY
);
750 emit_jump (done_label
);
754 delete_insns_since (last
);
755 icode
= CODE_FOR_nothing
;
759 if (icode
== CODE_FOR_nothing
)
761 rtx_code_label
*sub_check
= gen_label_rtx ();
764 /* Compute the operation. On RTL level, the addition is always
766 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
767 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
769 /* If we can prove one of the arguments (for MINUS_EXPR only
770 the second operand, as subtraction is not commutative) is always
771 non-negative or always negative, we can do just one comparison
772 and conditional jump instead of 2 at runtime, 3 present in the
773 emitted code. If one of the arguments is CONST_INT, all we
774 need is to make sure it is op1, then the first
775 do_compare_rtx_and_jump will be just folded. Otherwise try
776 to use range info if available. */
777 if (code
== PLUS_EXPR
&& CONST_INT_P (op0
))
778 std::swap (op0
, op1
);
779 else if (CONST_INT_P (op1
))
781 else if (code
== PLUS_EXPR
&& TREE_CODE (arg0
) == SSA_NAME
)
783 pos_neg
= get_range_pos_neg (arg0
);
785 std::swap (op0
, op1
);
787 if (pos_neg
== 3 && !CONST_INT_P (op1
) && TREE_CODE (arg1
) == SSA_NAME
)
788 pos_neg
= get_range_pos_neg (arg1
);
790 /* If the op1 is negative, we have to use a different check. */
792 do_compare_rtx_and_jump (op1
, const0_rtx
, LT
, false, mode
, NULL_RTX
,
793 NULL
, sub_check
, PROB_EVEN
);
795 /* Compare the result of the operation with one of the operands. */
797 do_compare_rtx_and_jump (res
, op0
, code
== PLUS_EXPR
? GE
: LE
,
798 false, mode
, NULL_RTX
, NULL
, done_label
,
801 /* If we get here, we have to print the error. */
804 emit_jump (do_error
);
806 emit_label (sub_check
);
809 /* We have k = a + b for b < 0 here. k <= a must hold. */
811 do_compare_rtx_and_jump (res
, op0
, code
== PLUS_EXPR
? LE
: GE
,
812 false, mode
, NULL_RTX
, NULL
, done_label
,
817 emit_label (do_error
);
820 /* Expand the ubsan builtin call. */
822 fn
= ubsan_build_overflow_builtin (code
, loc
, TREE_TYPE (arg0
),
826 do_pending_stack_adjust ();
829 write_complex_part (target
, const1_rtx
, true);
832 emit_label (done_label
);
837 expand_ubsan_result_store (target
, res
);
841 res
= expand_binop (mode
, add_optab
, res
, sgn
, NULL_RTX
, false,
844 expand_arith_overflow_result_store (lhs
, target
, mode
, res
);
849 /* Add negate overflow checking to the statement STMT. */
852 expand_neg_overflow (location_t loc
, tree lhs
, tree arg1
, bool is_ubsan
)
856 rtx_code_label
*done_label
, *do_error
;
857 rtx target
= NULL_RTX
;
859 done_label
= gen_label_rtx ();
860 do_error
= gen_label_rtx ();
862 do_pending_stack_adjust ();
863 op1
= expand_normal (arg1
);
865 machine_mode mode
= TYPE_MODE (TREE_TYPE (arg1
));
868 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
870 write_complex_part (target
, const0_rtx
, true);
873 enum insn_code icode
= optab_handler (negv3_optab
, mode
);
874 if (icode
!= CODE_FOR_nothing
)
876 struct expand_operand ops
[3];
877 rtx_insn
*last
= get_last_insn ();
879 res
= gen_reg_rtx (mode
);
880 create_output_operand (&ops
[0], res
, mode
);
881 create_input_operand (&ops
[1], op1
, mode
);
882 create_fixed_operand (&ops
[2], do_error
);
883 if (maybe_expand_insn (icode
, 3, ops
))
885 last
= get_last_insn ();
886 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
888 && any_condjump_p (last
)
889 && !find_reg_note (last
, REG_BR_PROB
, 0))
890 add_int_reg_note (last
, REG_BR_PROB
, PROB_VERY_UNLIKELY
);
891 emit_jump (done_label
);
895 delete_insns_since (last
);
896 icode
= CODE_FOR_nothing
;
900 if (icode
== CODE_FOR_nothing
)
902 /* Compute the operation. On RTL level, the addition is always
904 res
= expand_unop (mode
, neg_optab
, op1
, NULL_RTX
, false);
906 /* Compare the operand with the most negative value. */
907 rtx minv
= expand_normal (TYPE_MIN_VALUE (TREE_TYPE (arg1
)));
908 do_compare_rtx_and_jump (op1
, minv
, NE
, true, mode
, NULL_RTX
, NULL
,
909 done_label
, PROB_VERY_LIKELY
);
912 emit_label (do_error
);
915 /* Expand the ubsan builtin call. */
917 fn
= ubsan_build_overflow_builtin (NEGATE_EXPR
, loc
, TREE_TYPE (arg1
),
921 do_pending_stack_adjust ();
924 write_complex_part (target
, const1_rtx
, true);
927 emit_label (done_label
);
932 expand_ubsan_result_store (target
, res
);
934 expand_arith_overflow_result_store (lhs
, target
, mode
, res
);
938 /* Add mul overflow checking to the statement STMT. */
941 expand_mul_overflow (location_t loc
, tree lhs
, tree arg0
, tree arg1
,
942 bool unsr_p
, bool uns0_p
, bool uns1_p
, bool is_ubsan
)
946 rtx_code_label
*done_label
, *do_error
;
947 rtx target
= NULL_RTX
;
949 enum insn_code icode
;
951 done_label
= gen_label_rtx ();
952 do_error
= gen_label_rtx ();
954 do_pending_stack_adjust ();
955 op0
= expand_normal (arg0
);
956 op1
= expand_normal (arg1
);
958 machine_mode mode
= TYPE_MODE (TREE_TYPE (arg0
));
962 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
964 write_complex_part (target
, const0_rtx
, true);
968 gcc_assert (!unsr_p
&& !uns0_p
&& !uns1_p
);
970 /* We assume both operands and result have the same precision
971 here (GET_MODE_BITSIZE (mode)), S stands for signed type
972 with that precision, U for unsigned type with that precision,
973 sgn for unsigned most significant bit in that precision.
974 s1 is signed first operand, u1 is unsigned first operand,
975 s2 is signed second operand, u2 is unsigned second operand,
976 sr is signed result, ur is unsigned result and the following
977 rules say how to compute result (which is always result of
978 the operands as if both were unsigned, cast to the right
979 signedness) and how to compute whether operation overflowed.
980 main_ovf (false) stands for jump on signed multiplication
981 overflow or the main algorithm with uns == false.
982 main_ovf (true) stands for jump on unsigned multiplication
983 overflow or the main algorithm with uns == true.
986 res = (S) ((U) s1 * (U) s2)
987 ovf = main_ovf (false)
990 ovf = main_ovf (true)
993 ovf = (s1 < 0 && u2) || main_ovf (true)
996 ovf = res < 0 || main_ovf (true)
998 res = (S) ((U) s1 * u2)
999 ovf = (S) u2 >= 0 ? main_ovf (false)
1000 : (s1 != 0 && (s1 != -1 || u2 != (U) res))
1002 t1 = (s1 & s2) < 0 ? (-(U) s1) : ((U) s1)
1003 t2 = (s1 & s2) < 0 ? (-(U) s2) : ((U) s2)
1005 ovf = (s1 ^ s2) < 0 ? (s1 && s2) : main_ovf (true) */
1007 if (uns0_p
&& !uns1_p
)
1009 /* Multiplication is commutative, if operand signedness differs,
1010 canonicalize to the first operand being signed and second
1011 unsigned to simplify following code. */
1012 std::swap (op0
, op1
);
1013 std::swap (arg0
, arg1
);
1018 int pos_neg0
= get_range_pos_neg (arg0
);
1019 int pos_neg1
= get_range_pos_neg (arg1
);
1022 if (!uns0_p
&& uns1_p
&& unsr_p
)
1027 /* If s1 is non-negative, just perform normal u1 * u2 -> ur. */
1030 /* If s1 is negative, avoid the main code, just multiply and
1031 signal overflow if op1 is not 0. */
1032 struct separate_ops ops
;
1033 ops
.code
= MULT_EXPR
;
1034 ops
.type
= TREE_TYPE (arg1
);
1035 ops
.op0
= make_tree (ops
.type
, op0
);
1036 ops
.op1
= make_tree (ops
.type
, op1
);
1037 ops
.op2
= NULL_TREE
;
1039 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1040 do_compare_rtx_and_jump (op1
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1041 NULL
, done_label
, PROB_VERY_LIKELY
);
1042 goto do_error_label
;
1044 rtx_code_label
*do_main_label
;
1045 do_main_label
= gen_label_rtx ();
1046 do_compare_rtx_and_jump (op0
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1047 NULL
, do_main_label
, PROB_VERY_LIKELY
);
1048 do_compare_rtx_and_jump (op1
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1049 NULL
, do_main_label
, PROB_VERY_LIKELY
);
1050 write_complex_part (target
, const1_rtx
, true);
1051 emit_label (do_main_label
);
1059 if (uns0_p
&& uns1_p
&& !unsr_p
)
1062 /* Rest of handling of this case after res is computed. */
1067 if (!uns0_p
&& uns1_p
&& !unsr_p
)
1074 /* If (S) u2 is negative (i.e. u2 is larger than maximum of S,
1075 avoid the main code, just multiply and signal overflow
1076 unless 0 * u2 or -1 * ((U) Smin). */
1077 struct separate_ops ops
;
1078 ops
.code
= MULT_EXPR
;
1079 ops
.type
= TREE_TYPE (arg1
);
1080 ops
.op0
= make_tree (ops
.type
, op0
);
1081 ops
.op1
= make_tree (ops
.type
, op1
);
1082 ops
.op2
= NULL_TREE
;
1084 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1085 do_compare_rtx_and_jump (op0
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1086 NULL
, done_label
, PROB_VERY_LIKELY
);
1087 do_compare_rtx_and_jump (op0
, constm1_rtx
, NE
, true, mode
, NULL_RTX
,
1088 NULL
, do_error
, PROB_VERY_UNLIKELY
);
1090 prec
= GET_MODE_PRECISION (mode
);
1092 sgn
= immed_wide_int_const (wi::min_value (prec
, SIGNED
), mode
);
1093 do_compare_rtx_and_jump (op1
, sgn
, EQ
, true, mode
, NULL_RTX
,
1094 NULL
, done_label
, PROB_VERY_LIKELY
);
1095 goto do_error_label
;
1097 /* Rest of handling of this case after res is computed. */
1105 if (!uns0_p
&& !uns1_p
&& unsr_p
)
1108 switch (pos_neg0
| pos_neg1
)
1110 case 1: /* Both operands known to be non-negative. */
1112 case 2: /* Both operands known to be negative. */
1113 op0
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, false);
1114 op1
= expand_unop (mode
, neg_optab
, op1
, NULL_RTX
, false);
1115 /* Avoid looking at arg0/arg1 ranges, as we've changed
1117 arg0
= error_mark_node
;
1118 arg1
= error_mark_node
;
1121 if ((pos_neg0
^ pos_neg1
) == 3)
1123 /* If one operand is known to be negative and the other
1124 non-negative, this overflows always, unless the non-negative
1125 one is 0. Just do normal multiply and set overflow
1126 unless one of the operands is 0. */
1127 struct separate_ops ops
;
1128 ops
.code
= MULT_EXPR
;
1130 = build_nonstandard_integer_type (GET_MODE_PRECISION (mode
),
1132 ops
.op0
= make_tree (ops
.type
, op0
);
1133 ops
.op1
= make_tree (ops
.type
, op1
);
1134 ops
.op2
= NULL_TREE
;
1136 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1137 tem
= expand_binop (mode
, and_optab
, op0
, op1
, NULL_RTX
, false,
1139 do_compare_rtx_and_jump (tem
, const0_rtx
, EQ
, true, mode
,
1140 NULL_RTX
, NULL
, done_label
,
1142 goto do_error_label
;
1144 /* The general case, do all the needed comparisons at runtime. */
1145 rtx_code_label
*do_main_label
, *after_negate_label
;
1147 rop0
= gen_reg_rtx (mode
);
1148 rop1
= gen_reg_rtx (mode
);
1149 emit_move_insn (rop0
, op0
);
1150 emit_move_insn (rop1
, op1
);
1153 do_main_label
= gen_label_rtx ();
1154 after_negate_label
= gen_label_rtx ();
1155 tem
= expand_binop (mode
, and_optab
, op0
, op1
, NULL_RTX
, false,
1157 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1158 NULL
, after_negate_label
, PROB_VERY_LIKELY
);
1159 /* Both arguments negative here, negate them and continue with
1160 normal unsigned overflow checking multiplication. */
1161 emit_move_insn (op0
, expand_unop (mode
, neg_optab
, op0
,
1163 emit_move_insn (op1
, expand_unop (mode
, neg_optab
, op1
,
1165 /* Avoid looking at arg0/arg1 ranges, as we might have changed
1167 arg0
= error_mark_node
;
1168 arg1
= error_mark_node
;
1169 emit_jump (do_main_label
);
1170 emit_label (after_negate_label
);
1171 tem2
= expand_binop (mode
, xor_optab
, op0
, op1
, NULL_RTX
, false,
1173 do_compare_rtx_and_jump (tem2
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1174 NULL
, do_main_label
, PROB_VERY_LIKELY
);
1175 /* One argument is negative here, the other positive. This
1176 overflows always, unless one of the arguments is 0. But
1177 if e.g. s2 is 0, (U) s1 * 0 doesn't overflow, whatever s1
1178 is, thus we can keep do_main code oring in overflow as is. */
1179 do_compare_rtx_and_jump (tem
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1180 NULL
, do_main_label
, PROB_VERY_LIKELY
);
1181 write_complex_part (target
, const1_rtx
, true);
1182 emit_label (do_main_label
);
1190 type
= build_nonstandard_integer_type (GET_MODE_PRECISION (mode
), uns
);
1191 sign
= uns
? UNSIGNED
: SIGNED
;
1192 icode
= optab_handler (uns
? umulv4_optab
: mulv4_optab
, mode
);
1193 if (icode
!= CODE_FOR_nothing
)
1195 struct expand_operand ops
[4];
1196 rtx_insn
*last
= get_last_insn ();
1198 res
= gen_reg_rtx (mode
);
1199 create_output_operand (&ops
[0], res
, mode
);
1200 create_input_operand (&ops
[1], op0
, mode
);
1201 create_input_operand (&ops
[2], op1
, mode
);
1202 create_fixed_operand (&ops
[3], do_error
);
1203 if (maybe_expand_insn (icode
, 4, ops
))
1205 last
= get_last_insn ();
1206 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
1208 && any_condjump_p (last
)
1209 && !find_reg_note (last
, REG_BR_PROB
, 0))
1210 add_int_reg_note (last
, REG_BR_PROB
, PROB_VERY_UNLIKELY
);
1211 emit_jump (done_label
);
1215 delete_insns_since (last
);
1216 icode
= CODE_FOR_nothing
;
1220 if (icode
== CODE_FOR_nothing
)
1222 struct separate_ops ops
;
1223 int prec
= GET_MODE_PRECISION (mode
);
1224 machine_mode hmode
= mode_for_size (prec
/ 2, MODE_INT
, 1);
1225 ops
.op0
= make_tree (type
, op0
);
1226 ops
.op1
= make_tree (type
, op1
);
1227 ops
.op2
= NULL_TREE
;
1229 if (GET_MODE_2XWIDER_MODE (mode
) != VOIDmode
1230 && targetm
.scalar_mode_supported_p (GET_MODE_2XWIDER_MODE (mode
)))
1232 machine_mode wmode
= GET_MODE_2XWIDER_MODE (mode
);
1233 ops
.code
= WIDEN_MULT_EXPR
;
1235 = build_nonstandard_integer_type (GET_MODE_PRECISION (wmode
), uns
);
1237 res
= expand_expr_real_2 (&ops
, NULL_RTX
, wmode
, EXPAND_NORMAL
);
1238 rtx hipart
= expand_shift (RSHIFT_EXPR
, wmode
, res
, prec
,
1240 hipart
= gen_lowpart (mode
, hipart
);
1241 res
= gen_lowpart (mode
, res
);
1243 /* For the unsigned multiplication, there was overflow if
1244 HIPART is non-zero. */
1245 do_compare_rtx_and_jump (hipart
, const0_rtx
, EQ
, true, mode
,
1246 NULL_RTX
, NULL
, done_label
,
1250 rtx signbit
= expand_shift (RSHIFT_EXPR
, mode
, res
, prec
- 1,
1252 /* RES is low half of the double width result, HIPART
1253 the high half. There was overflow if
1254 HIPART is different from RES < 0 ? -1 : 0. */
1255 do_compare_rtx_and_jump (signbit
, hipart
, EQ
, true, mode
,
1256 NULL_RTX
, NULL
, done_label
,
1260 else if (hmode
!= BLKmode
&& 2 * GET_MODE_PRECISION (hmode
) == prec
)
1262 rtx_code_label
*large_op0
= gen_label_rtx ();
1263 rtx_code_label
*small_op0_large_op1
= gen_label_rtx ();
1264 rtx_code_label
*one_small_one_large
= gen_label_rtx ();
1265 rtx_code_label
*both_ops_large
= gen_label_rtx ();
1266 rtx_code_label
*after_hipart_neg
= uns
? NULL
: gen_label_rtx ();
1267 rtx_code_label
*after_lopart_neg
= uns
? NULL
: gen_label_rtx ();
1268 rtx_code_label
*do_overflow
= gen_label_rtx ();
1269 rtx_code_label
*hipart_different
= uns
? NULL
: gen_label_rtx ();
1271 unsigned int hprec
= GET_MODE_PRECISION (hmode
);
1272 rtx hipart0
= expand_shift (RSHIFT_EXPR
, mode
, op0
, hprec
,
1274 hipart0
= gen_lowpart (hmode
, hipart0
);
1275 rtx lopart0
= gen_lowpart (hmode
, op0
);
1276 rtx signbit0
= const0_rtx
;
1278 signbit0
= expand_shift (RSHIFT_EXPR
, hmode
, lopart0
, hprec
- 1,
1280 rtx hipart1
= expand_shift (RSHIFT_EXPR
, mode
, op1
, hprec
,
1282 hipart1
= gen_lowpart (hmode
, hipart1
);
1283 rtx lopart1
= gen_lowpart (hmode
, op1
);
1284 rtx signbit1
= const0_rtx
;
1286 signbit1
= expand_shift (RSHIFT_EXPR
, hmode
, lopart1
, hprec
- 1,
1289 res
= gen_reg_rtx (mode
);
1291 /* True if op0 resp. op1 are known to be in the range of
1293 bool op0_small_p
= false;
1294 bool op1_small_p
= false;
1295 /* True if op0 resp. op1 are known to have all zeros or all ones
1296 in the upper half of bits, but are not known to be
1298 bool op0_medium_p
= false;
1299 bool op1_medium_p
= false;
1300 /* -1 if op{0,1} is known to be negative, 0 if it is known to be
1301 nonnegative, 1 if unknown. */
1307 else if (pos_neg0
== 2)
1311 else if (pos_neg1
== 2)
1314 unsigned int mprec0
= prec
;
1315 if (arg0
!= error_mark_node
)
1316 mprec0
= get_min_precision (arg0
, sign
);
1317 if (mprec0
<= hprec
)
1319 else if (!uns
&& mprec0
<= hprec
+ 1)
1320 op0_medium_p
= true;
1321 unsigned int mprec1
= prec
;
1322 if (arg1
!= error_mark_node
)
1323 mprec1
= get_min_precision (arg1
, sign
);
1324 if (mprec1
<= hprec
)
1326 else if (!uns
&& mprec1
<= hprec
+ 1)
1327 op1_medium_p
= true;
1329 int smaller_sign
= 1;
1330 int larger_sign
= 1;
1333 smaller_sign
= op0_sign
;
1334 larger_sign
= op1_sign
;
1336 else if (op1_small_p
)
1338 smaller_sign
= op1_sign
;
1339 larger_sign
= op0_sign
;
1341 else if (op0_sign
== op1_sign
)
1343 smaller_sign
= op0_sign
;
1344 larger_sign
= op0_sign
;
1348 do_compare_rtx_and_jump (signbit0
, hipart0
, NE
, true, hmode
,
1349 NULL_RTX
, NULL
, large_op0
,
1353 do_compare_rtx_and_jump (signbit1
, hipart1
, NE
, true, hmode
,
1354 NULL_RTX
, NULL
, small_op0_large_op1
,
1357 /* If both op0 and op1 are sign (!uns) or zero (uns) extended from
1358 hmode to mode, the multiplication will never overflow. We can
1359 do just one hmode x hmode => mode widening multiplication. */
1360 rtx lopart0s
= lopart0
, lopart1s
= lopart1
;
1361 if (GET_CODE (lopart0
) == SUBREG
)
1363 lopart0s
= shallow_copy_rtx (lopart0
);
1364 SUBREG_PROMOTED_VAR_P (lopart0s
) = 1;
1365 SUBREG_PROMOTED_SET (lopart0s
, uns
? SRP_UNSIGNED
: SRP_SIGNED
);
1367 if (GET_CODE (lopart1
) == SUBREG
)
1369 lopart1s
= shallow_copy_rtx (lopart1
);
1370 SUBREG_PROMOTED_VAR_P (lopart1s
) = 1;
1371 SUBREG_PROMOTED_SET (lopart1s
, uns
? SRP_UNSIGNED
: SRP_SIGNED
);
1373 tree halfstype
= build_nonstandard_integer_type (hprec
, uns
);
1374 ops
.op0
= make_tree (halfstype
, lopart0s
);
1375 ops
.op1
= make_tree (halfstype
, lopart1s
);
1376 ops
.code
= WIDEN_MULT_EXPR
;
1379 = expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1380 emit_move_insn (res
, thisres
);
1381 emit_jump (done_label
);
1383 emit_label (small_op0_large_op1
);
1385 /* If op0 is sign (!uns) or zero (uns) extended from hmode to mode,
1386 but op1 is not, just swap the arguments and handle it as op1
1387 sign/zero extended, op0 not. */
1388 rtx larger
= gen_reg_rtx (mode
);
1389 rtx hipart
= gen_reg_rtx (hmode
);
1390 rtx lopart
= gen_reg_rtx (hmode
);
1391 emit_move_insn (larger
, op1
);
1392 emit_move_insn (hipart
, hipart1
);
1393 emit_move_insn (lopart
, lopart0
);
1394 emit_jump (one_small_one_large
);
1396 emit_label (large_op0
);
1399 do_compare_rtx_and_jump (signbit1
, hipart1
, NE
, true, hmode
,
1400 NULL_RTX
, NULL
, both_ops_large
,
1403 /* If op1 is sign (!uns) or zero (uns) extended from hmode to mode,
1404 but op0 is not, prepare larger, hipart and lopart pseudos and
1405 handle it together with small_op0_large_op1. */
1406 emit_move_insn (larger
, op0
);
1407 emit_move_insn (hipart
, hipart0
);
1408 emit_move_insn (lopart
, lopart1
);
1410 emit_label (one_small_one_large
);
1412 /* lopart is the low part of the operand that is sign extended
1413 to mode, larger is the the other operand, hipart is the
1414 high part of larger and lopart0 and lopart1 are the low parts
1416 We perform lopart0 * lopart1 and lopart * hipart widening
1418 tree halfutype
= build_nonstandard_integer_type (hprec
, 1);
1419 ops
.op0
= make_tree (halfutype
, lopart0
);
1420 ops
.op1
= make_tree (halfutype
, lopart1
);
1422 = expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1424 ops
.op0
= make_tree (halfutype
, lopart
);
1425 ops
.op1
= make_tree (halfutype
, hipart
);
1426 rtx loxhi
= gen_reg_rtx (mode
);
1427 rtx tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1428 emit_move_insn (loxhi
, tem
);
1432 /* if (hipart < 0) loxhi -= lopart << (bitsize / 2); */
1433 if (larger_sign
== 0)
1434 emit_jump (after_hipart_neg
);
1435 else if (larger_sign
!= -1)
1436 do_compare_rtx_and_jump (hipart
, const0_rtx
, GE
, false, hmode
,
1437 NULL_RTX
, NULL
, after_hipart_neg
,
1440 tem
= convert_modes (mode
, hmode
, lopart
, 1);
1441 tem
= expand_shift (LSHIFT_EXPR
, mode
, tem
, hprec
, NULL_RTX
, 1);
1442 tem
= expand_simple_binop (mode
, MINUS
, loxhi
, tem
, NULL_RTX
,
1444 emit_move_insn (loxhi
, tem
);
1446 emit_label (after_hipart_neg
);
1448 /* if (lopart < 0) loxhi -= larger; */
1449 if (smaller_sign
== 0)
1450 emit_jump (after_lopart_neg
);
1451 else if (smaller_sign
!= -1)
1452 do_compare_rtx_and_jump (lopart
, const0_rtx
, GE
, false, hmode
,
1453 NULL_RTX
, NULL
, after_lopart_neg
,
1456 tem
= expand_simple_binop (mode
, MINUS
, loxhi
, larger
, NULL_RTX
,
1458 emit_move_insn (loxhi
, tem
);
1460 emit_label (after_lopart_neg
);
1463 /* loxhi += (uns) lo0xlo1 >> (bitsize / 2); */
1464 tem
= expand_shift (RSHIFT_EXPR
, mode
, lo0xlo1
, hprec
, NULL_RTX
, 1);
1465 tem
= expand_simple_binop (mode
, PLUS
, loxhi
, tem
, NULL_RTX
,
1467 emit_move_insn (loxhi
, tem
);
1469 /* if (loxhi >> (bitsize / 2)
1470 == (hmode) loxhi >> (bitsize / 2 - 1)) (if !uns)
1471 if (loxhi >> (bitsize / 2) == 0 (if uns). */
1472 rtx hipartloxhi
= expand_shift (RSHIFT_EXPR
, mode
, loxhi
, hprec
,
1474 hipartloxhi
= gen_lowpart (hmode
, hipartloxhi
);
1475 rtx signbitloxhi
= const0_rtx
;
1477 signbitloxhi
= expand_shift (RSHIFT_EXPR
, hmode
,
1478 gen_lowpart (hmode
, loxhi
),
1479 hprec
- 1, NULL_RTX
, 0);
1481 do_compare_rtx_and_jump (signbitloxhi
, hipartloxhi
, NE
, true, hmode
,
1482 NULL_RTX
, NULL
, do_overflow
,
1483 PROB_VERY_UNLIKELY
);
1485 /* res = (loxhi << (bitsize / 2)) | (hmode) lo0xlo1; */
1486 rtx loxhishifted
= expand_shift (LSHIFT_EXPR
, mode
, loxhi
, hprec
,
1488 tem
= convert_modes (mode
, hmode
, gen_lowpart (hmode
, lo0xlo1
), 1);
1490 tem
= expand_simple_binop (mode
, IOR
, loxhishifted
, tem
, res
,
1493 emit_move_insn (res
, tem
);
1494 emit_jump (done_label
);
1496 emit_label (both_ops_large
);
1498 /* If both operands are large (not sign (!uns) or zero (uns)
1499 extended from hmode), then perform the full multiplication
1500 which will be the result of the operation.
1501 The only cases which don't overflow are for signed multiplication
1502 some cases where both hipart0 and highpart1 are 0 or -1.
1503 For unsigned multiplication when high parts are both non-zero
1504 this overflows always. */
1505 ops
.code
= MULT_EXPR
;
1506 ops
.op0
= make_tree (type
, op0
);
1507 ops
.op1
= make_tree (type
, op1
);
1508 tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1509 emit_move_insn (res
, tem
);
1515 tem
= expand_simple_binop (hmode
, PLUS
, hipart0
, const1_rtx
,
1516 NULL_RTX
, 1, OPTAB_DIRECT
);
1517 do_compare_rtx_and_jump (tem
, const1_rtx
, GTU
, true, hmode
,
1518 NULL_RTX
, NULL
, do_error
,
1519 PROB_VERY_UNLIKELY
);
1524 tem
= expand_simple_binop (hmode
, PLUS
, hipart1
, const1_rtx
,
1525 NULL_RTX
, 1, OPTAB_DIRECT
);
1526 do_compare_rtx_and_jump (tem
, const1_rtx
, GTU
, true, hmode
,
1527 NULL_RTX
, NULL
, do_error
,
1528 PROB_VERY_UNLIKELY
);
1531 /* At this point hipart{0,1} are both in [-1, 0]. If they are
1532 the same, overflow happened if res is negative, if they are
1533 different, overflow happened if res is positive. */
1534 if (op0_sign
!= 1 && op1_sign
!= 1 && op0_sign
!= op1_sign
)
1535 emit_jump (hipart_different
);
1536 else if (op0_sign
== 1 || op1_sign
== 1)
1537 do_compare_rtx_and_jump (hipart0
, hipart1
, NE
, true, hmode
,
1538 NULL_RTX
, NULL
, hipart_different
,
1541 do_compare_rtx_and_jump (res
, const0_rtx
, LT
, false, mode
,
1542 NULL_RTX
, NULL
, do_error
,
1543 PROB_VERY_UNLIKELY
);
1544 emit_jump (done_label
);
1546 emit_label (hipart_different
);
1548 do_compare_rtx_and_jump (res
, const0_rtx
, GE
, false, mode
,
1549 NULL_RTX
, NULL
, do_error
,
1550 PROB_VERY_UNLIKELY
);
1551 emit_jump (done_label
);
1554 emit_label (do_overflow
);
1556 /* Overflow, do full multiplication and fallthru into do_error. */
1557 ops
.op0
= make_tree (type
, op0
);
1558 ops
.op1
= make_tree (type
, op1
);
1559 tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1560 emit_move_insn (res
, tem
);
1564 gcc_assert (!is_ubsan
);
1565 ops
.code
= MULT_EXPR
;
1567 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1568 emit_jump (done_label
);
1573 emit_label (do_error
);
1576 /* Expand the ubsan builtin call. */
1578 fn
= ubsan_build_overflow_builtin (MULT_EXPR
, loc
, TREE_TYPE (arg0
),
1582 do_pending_stack_adjust ();
1585 write_complex_part (target
, const1_rtx
, true);
1588 emit_label (done_label
);
1591 if (uns0_p
&& uns1_p
&& !unsr_p
)
1593 rtx_code_label
*all_done_label
= gen_label_rtx ();
1594 do_compare_rtx_and_jump (res
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1595 NULL
, all_done_label
, PROB_VERY_LIKELY
);
1596 write_complex_part (target
, const1_rtx
, true);
1597 emit_label (all_done_label
);
1601 if (!uns0_p
&& uns1_p
&& !unsr_p
&& pos_neg1
== 3)
1603 rtx_code_label
*all_done_label
= gen_label_rtx ();
1604 rtx_code_label
*set_noovf
= gen_label_rtx ();
1605 do_compare_rtx_and_jump (op1
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1606 NULL
, all_done_label
, PROB_VERY_LIKELY
);
1607 write_complex_part (target
, const1_rtx
, true);
1608 do_compare_rtx_and_jump (op0
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1609 NULL
, set_noovf
, PROB_VERY_LIKELY
);
1610 do_compare_rtx_and_jump (op0
, constm1_rtx
, NE
, true, mode
, NULL_RTX
,
1611 NULL
, all_done_label
, PROB_VERY_UNLIKELY
);
1612 do_compare_rtx_and_jump (op1
, res
, NE
, true, mode
, NULL_RTX
, NULL
,
1613 all_done_label
, PROB_VERY_UNLIKELY
);
1614 emit_label (set_noovf
);
1615 write_complex_part (target
, const0_rtx
, true);
1616 emit_label (all_done_label
);
1622 expand_ubsan_result_store (target
, res
);
1624 expand_arith_overflow_result_store (lhs
, target
, mode
, res
);
1628 /* Expand UBSAN_CHECK_ADD call STMT. */
1631 expand_UBSAN_CHECK_ADD (gcall
*stmt
)
1633 location_t loc
= gimple_location (stmt
);
1634 tree lhs
= gimple_call_lhs (stmt
);
1635 tree arg0
= gimple_call_arg (stmt
, 0);
1636 tree arg1
= gimple_call_arg (stmt
, 1);
1637 expand_addsub_overflow (loc
, PLUS_EXPR
, lhs
, arg0
, arg1
,
1638 false, false, false, true);
1641 /* Expand UBSAN_CHECK_SUB call STMT. */
1644 expand_UBSAN_CHECK_SUB (gcall
*stmt
)
1646 location_t loc
= gimple_location (stmt
);
1647 tree lhs
= gimple_call_lhs (stmt
);
1648 tree arg0
= gimple_call_arg (stmt
, 0);
1649 tree arg1
= gimple_call_arg (stmt
, 1);
1650 if (integer_zerop (arg0
))
1651 expand_neg_overflow (loc
, lhs
, arg1
, true);
1653 expand_addsub_overflow (loc
, MINUS_EXPR
, lhs
, arg0
, arg1
,
1654 false, false, false, true);
1657 /* Expand UBSAN_CHECK_MUL call STMT. */
1660 expand_UBSAN_CHECK_MUL (gcall
*stmt
)
1662 location_t loc
= gimple_location (stmt
);
1663 tree lhs
= gimple_call_lhs (stmt
);
1664 tree arg0
= gimple_call_arg (stmt
, 0);
1665 tree arg1
= gimple_call_arg (stmt
, 1);
1666 expand_mul_overflow (loc
, lhs
, arg0
, arg1
, false, false, false, true);
1669 /* Helper function for {ADD,SUB,MUL}_OVERFLOW call stmt expansion. */
1672 expand_arith_overflow (enum tree_code code
, gimple stmt
)
1674 tree lhs
= gimple_call_lhs (stmt
);
1675 if (lhs
== NULL_TREE
)
1677 tree arg0
= gimple_call_arg (stmt
, 0);
1678 tree arg1
= gimple_call_arg (stmt
, 1);
1679 tree type
= TREE_TYPE (TREE_TYPE (lhs
));
1680 int uns0_p
= TYPE_UNSIGNED (TREE_TYPE (arg0
));
1681 int uns1_p
= TYPE_UNSIGNED (TREE_TYPE (arg1
));
1682 int unsr_p
= TYPE_UNSIGNED (type
);
1683 int prec0
= TYPE_PRECISION (TREE_TYPE (arg0
));
1684 int prec1
= TYPE_PRECISION (TREE_TYPE (arg1
));
1685 int precres
= TYPE_PRECISION (type
);
1686 location_t loc
= gimple_location (stmt
);
1687 if (!uns0_p
&& get_range_pos_neg (arg0
) == 1)
1689 if (!uns1_p
&& get_range_pos_neg (arg1
) == 1)
1691 int pr
= get_min_precision (arg0
, uns0_p
? UNSIGNED
: SIGNED
);
1692 prec0
= MIN (prec0
, pr
);
1693 pr
= get_min_precision (arg1
, uns1_p
? UNSIGNED
: SIGNED
);
1694 prec1
= MIN (prec1
, pr
);
1696 /* If uns0_p && uns1_p, precop is minimum needed precision
1697 of unsigned type to hold the exact result, otherwise
1698 precop is minimum needed precision of signed type to
1699 hold the exact result. */
1701 if (code
== MULT_EXPR
)
1702 precop
= prec0
+ prec1
+ (uns0_p
!= uns1_p
);
1705 if (uns0_p
== uns1_p
)
1706 precop
= MAX (prec0
, prec1
) + 1;
1708 precop
= MAX (prec0
+ 1, prec1
) + 1;
1710 precop
= MAX (prec0
, prec1
+ 1) + 1;
1712 int orig_precres
= precres
;
1716 if ((uns0_p
&& uns1_p
)
1717 ? ((precop
+ !unsr_p
) <= precres
1718 /* u1 - u2 -> ur can overflow, no matter what precision
1720 && (code
!= MINUS_EXPR
|| !unsr_p
))
1721 : (!unsr_p
&& precop
<= precres
))
1723 /* The infinity precision result will always fit into result. */
1724 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1725 write_complex_part (target
, const0_rtx
, true);
1726 enum machine_mode mode
= TYPE_MODE (type
);
1727 struct separate_ops ops
;
1730 ops
.op0
= fold_convert_loc (loc
, type
, arg0
);
1731 ops
.op1
= fold_convert_loc (loc
, type
, arg1
);
1732 ops
.op2
= NULL_TREE
;
1734 rtx tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1735 expand_arith_overflow_result_store (lhs
, target
, mode
, tem
);
1739 #ifdef WORD_REGISTER_OPERATIONS
1740 /* For sub-word operations, if target doesn't have them, start
1741 with precres widening right away, otherwise do it only
1742 if the most simple cases can't be used. */
1743 if (orig_precres
== precres
&& precres
< BITS_PER_WORD
)
1747 if ((uns0_p
&& uns1_p
&& unsr_p
&& prec0
<= precres
&& prec1
<= precres
)
1748 || ((!uns0_p
|| !uns1_p
) && !unsr_p
1749 && prec0
+ uns0_p
<= precres
1750 && prec1
+ uns1_p
<= precres
))
1752 arg0
= fold_convert_loc (loc
, type
, arg0
);
1753 arg1
= fold_convert_loc (loc
, type
, arg1
);
1757 if (integer_zerop (arg0
) && !unsr_p
)
1758 expand_neg_overflow (loc
, lhs
, arg1
, false);
1761 expand_addsub_overflow (loc
, code
, lhs
, arg0
, arg1
,
1762 unsr_p
, unsr_p
, unsr_p
, false);
1765 expand_mul_overflow (loc
, lhs
, arg0
, arg1
,
1766 unsr_p
, unsr_p
, unsr_p
, false);
1773 /* For sub-word operations, retry with a wider type first. */
1774 if (orig_precres
== precres
&& precop
<= BITS_PER_WORD
)
1776 #ifdef WORD_REGISTER_OPERATIONS
1777 int p
= BITS_PER_WORD
;
1781 enum machine_mode m
= smallest_mode_for_size (p
, MODE_INT
);
1782 tree optype
= build_nonstandard_integer_type (GET_MODE_PRECISION (m
),
1785 p
= TYPE_PRECISION (optype
);
1789 unsr_p
= TYPE_UNSIGNED (optype
);
1795 if (prec0
<= precres
&& prec1
<= precres
)
1800 types
[0] = build_nonstandard_integer_type (precres
, 0);
1806 types
[1] = build_nonstandard_integer_type (precres
, 1);
1808 arg0
= fold_convert_loc (loc
, types
[uns0_p
], arg0
);
1809 arg1
= fold_convert_loc (loc
, types
[uns1_p
], arg1
);
1810 if (code
!= MULT_EXPR
)
1811 expand_addsub_overflow (loc
, code
, lhs
, arg0
, arg1
, unsr_p
,
1812 uns0_p
, uns1_p
, false);
1814 expand_mul_overflow (loc
, lhs
, arg0
, arg1
, unsr_p
,
1815 uns0_p
, uns1_p
, false);
1819 /* Retry with a wider type. */
1820 if (orig_precres
== precres
)
1822 int p
= MAX (prec0
, prec1
);
1823 enum machine_mode m
= smallest_mode_for_size (p
, MODE_INT
);
1824 tree optype
= build_nonstandard_integer_type (GET_MODE_PRECISION (m
),
1827 p
= TYPE_PRECISION (optype
);
1831 unsr_p
= TYPE_UNSIGNED (optype
);
1842 /* Expand ADD_OVERFLOW STMT. */
1845 expand_ADD_OVERFLOW (gcall
*stmt
)
1847 expand_arith_overflow (PLUS_EXPR
, stmt
);
1850 /* Expand SUB_OVERFLOW STMT. */
1853 expand_SUB_OVERFLOW (gcall
*stmt
)
1855 expand_arith_overflow (MINUS_EXPR
, stmt
);
1858 /* Expand MUL_OVERFLOW STMT. */
1861 expand_MUL_OVERFLOW (gcall
*stmt
)
1863 expand_arith_overflow (MULT_EXPR
, stmt
);
1866 /* This should get folded in tree-vectorizer.c. */
1869 expand_LOOP_VECTORIZED (gcall
*)
1875 expand_MASK_LOAD (gcall
*stmt
)
1877 struct expand_operand ops
[3];
1878 tree type
, lhs
, rhs
, maskt
;
1879 rtx mem
, target
, mask
;
1881 maskt
= gimple_call_arg (stmt
, 2);
1882 lhs
= gimple_call_lhs (stmt
);
1883 if (lhs
== NULL_TREE
)
1885 type
= TREE_TYPE (lhs
);
1886 rhs
= fold_build2 (MEM_REF
, type
, gimple_call_arg (stmt
, 0),
1887 gimple_call_arg (stmt
, 1));
1889 mem
= expand_expr (rhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1890 gcc_assert (MEM_P (mem
));
1891 mask
= expand_normal (maskt
);
1892 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1893 create_output_operand (&ops
[0], target
, TYPE_MODE (type
));
1894 create_fixed_operand (&ops
[1], mem
);
1895 create_input_operand (&ops
[2], mask
, TYPE_MODE (TREE_TYPE (maskt
)));
1896 expand_insn (optab_handler (maskload_optab
, TYPE_MODE (type
)), 3, ops
);
1900 expand_MASK_STORE (gcall
*stmt
)
1902 struct expand_operand ops
[3];
1903 tree type
, lhs
, rhs
, maskt
;
1906 maskt
= gimple_call_arg (stmt
, 2);
1907 rhs
= gimple_call_arg (stmt
, 3);
1908 type
= TREE_TYPE (rhs
);
1909 lhs
= fold_build2 (MEM_REF
, type
, gimple_call_arg (stmt
, 0),
1910 gimple_call_arg (stmt
, 1));
1912 mem
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1913 gcc_assert (MEM_P (mem
));
1914 mask
= expand_normal (maskt
);
1915 reg
= expand_normal (rhs
);
1916 create_fixed_operand (&ops
[0], mem
);
1917 create_input_operand (&ops
[1], reg
, TYPE_MODE (type
));
1918 create_input_operand (&ops
[2], mask
, TYPE_MODE (TREE_TYPE (maskt
)));
1919 expand_insn (optab_handler (maskstore_optab
, TYPE_MODE (type
)), 3, ops
);
1923 expand_ABNORMAL_DISPATCHER (gcall
*)
1928 expand_BUILTIN_EXPECT (gcall
*stmt
)
1930 /* When guessing was done, the hints should be already stripped away. */
1931 gcc_assert (!flag_guess_branch_prob
|| optimize
== 0 || seen_error ());
1934 tree lhs
= gimple_call_lhs (stmt
);
1936 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1938 target
= const0_rtx
;
1939 rtx val
= expand_expr (gimple_call_arg (stmt
, 0), target
, VOIDmode
, EXPAND_NORMAL
);
1940 if (lhs
&& val
!= target
)
1941 emit_move_insn (target
, val
);
1944 /* IFN_VA_ARG is supposed to be expanded at pass_stdarg. So this dummy function
1945 should never be called. */
1948 expand_VA_ARG (gcall
*stmt ATTRIBUTE_UNUSED
)
1953 /* Routines to expand each internal function, indexed by function number.
1954 Each routine has the prototype:
1956 expand_<NAME> (gcall *stmt)
1958 where STMT is the statement that performs the call. */
1959 static void (*const internal_fn_expanders
[]) (gcall
*) = {
1960 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) expand_##CODE,
1961 #include "internal-fn.def"
1962 #undef DEF_INTERNAL_FN
1966 /* Expand STMT, which is a call to internal function FN. */
1969 expand_internal_call (gcall
*stmt
)
1971 internal_fn_expanders
[(int) gimple_call_internal_fn (stmt
)] (stmt
);