2 Copyright (C) 2011-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
30 #include "fold-const.h"
31 #include "internal-fn.h"
32 #include "stor-layout.h"
34 #include "insn-config.h"
43 #include "insn-codes.h"
47 #include "stringpool.h"
48 #include "tree-ssanames.h"
49 #include "diagnostic-core.h"
51 /* The names of each internal function, indexed by function number. */
52 const char *const internal_fn_name_array
[] = {
53 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) #CODE,
54 #include "internal-fn.def"
55 #undef DEF_INTERNAL_FN
59 /* The ECF_* flags of each internal function, indexed by function number. */
60 const int internal_fn_flags_array
[] = {
61 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) FLAGS,
62 #include "internal-fn.def"
63 #undef DEF_INTERNAL_FN
67 /* Fnspec of each internal function, indexed by function number. */
68 const_tree internal_fn_fnspec_array
[IFN_LAST
+ 1];
73 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
74 if (FNSPEC) internal_fn_fnspec_array[IFN_##CODE] = \
75 build_string ((int) sizeof (FNSPEC), FNSPEC ? FNSPEC : "");
76 #include "internal-fn.def"
77 #undef DEF_INTERNAL_FN
78 internal_fn_fnspec_array
[IFN_LAST
] = 0;
81 /* ARRAY_TYPE is an array of vector modes. Return the associated insn
82 for load-lanes-style optab OPTAB. The insn must exist. */
85 get_multi_vector_move (tree array_type
, convert_optab optab
)
91 gcc_assert (TREE_CODE (array_type
) == ARRAY_TYPE
);
92 imode
= TYPE_MODE (array_type
);
93 vmode
= TYPE_MODE (TREE_TYPE (array_type
));
95 icode
= convert_optab_handler (optab
, imode
, vmode
);
96 gcc_assert (icode
!= CODE_FOR_nothing
);
100 /* Expand LOAD_LANES call STMT. */
103 expand_LOAD_LANES (gcall
*stmt
)
105 struct expand_operand ops
[2];
109 lhs
= gimple_call_lhs (stmt
);
110 rhs
= gimple_call_arg (stmt
, 0);
111 type
= TREE_TYPE (lhs
);
113 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
114 mem
= expand_normal (rhs
);
116 gcc_assert (MEM_P (mem
));
117 PUT_MODE (mem
, TYPE_MODE (type
));
119 create_output_operand (&ops
[0], target
, TYPE_MODE (type
));
120 create_fixed_operand (&ops
[1], mem
);
121 expand_insn (get_multi_vector_move (type
, vec_load_lanes_optab
), 2, ops
);
124 /* Expand STORE_LANES call STMT. */
127 expand_STORE_LANES (gcall
*stmt
)
129 struct expand_operand ops
[2];
133 lhs
= gimple_call_lhs (stmt
);
134 rhs
= gimple_call_arg (stmt
, 0);
135 type
= TREE_TYPE (rhs
);
137 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
138 reg
= expand_normal (rhs
);
140 gcc_assert (MEM_P (target
));
141 PUT_MODE (target
, TYPE_MODE (type
));
143 create_fixed_operand (&ops
[0], target
);
144 create_input_operand (&ops
[1], reg
, TYPE_MODE (type
));
145 expand_insn (get_multi_vector_move (type
, vec_store_lanes_optab
), 2, ops
);
149 expand_ANNOTATE (gcall
*)
154 /* This should get expanded in adjust_simduid_builtins. */
157 expand_GOMP_SIMD_LANE (gcall
*)
162 /* This should get expanded in adjust_simduid_builtins. */
165 expand_GOMP_SIMD_VF (gcall
*)
170 /* This should get expanded in adjust_simduid_builtins. */
173 expand_GOMP_SIMD_LAST_LANE (gcall
*)
178 /* This should get expanded in the sanopt pass. */
181 expand_UBSAN_NULL (gcall
*)
186 /* This should get expanded in the sanopt pass. */
189 expand_UBSAN_BOUNDS (gcall
*)
194 /* This should get expanded in the sanopt pass. */
197 expand_UBSAN_VPTR (gcall
*)
202 /* This should get expanded in the sanopt pass. */
205 expand_UBSAN_OBJECT_SIZE (gcall
*)
210 /* This should get expanded in the sanopt pass. */
213 expand_ASAN_CHECK (gcall
*)
218 /* This should get expanded in the tsan pass. */
221 expand_TSAN_FUNC_EXIT (gcall
*)
226 /* Helper function for expand_addsub_overflow. Return 1
227 if ARG interpreted as signed in its precision is known to be always
228 positive or 2 if ARG is known to be always negative, or 3 if ARG may
229 be positive or negative. */
232 get_range_pos_neg (tree arg
)
234 if (arg
== error_mark_node
)
237 int prec
= TYPE_PRECISION (TREE_TYPE (arg
));
239 if (TREE_CODE (arg
) == INTEGER_CST
)
241 wide_int w
= wi::sext (arg
, prec
);
247 while (CONVERT_EXPR_P (arg
)
248 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg
, 0)))
249 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg
, 0))) <= prec
)
251 arg
= TREE_OPERAND (arg
, 0);
252 /* Narrower value zero extended into wider type
253 will always result in positive values. */
254 if (TYPE_UNSIGNED (TREE_TYPE (arg
))
255 && TYPE_PRECISION (TREE_TYPE (arg
)) < prec
)
257 prec
= TYPE_PRECISION (TREE_TYPE (arg
));
262 if (TREE_CODE (arg
) != SSA_NAME
)
264 wide_int arg_min
, arg_max
;
265 while (get_range_info (arg
, &arg_min
, &arg_max
) != VR_RANGE
)
267 gimple
*g
= SSA_NAME_DEF_STMT (arg
);
268 if (is_gimple_assign (g
)
269 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g
)))
271 tree t
= gimple_assign_rhs1 (g
);
272 if (INTEGRAL_TYPE_P (TREE_TYPE (t
))
273 && TYPE_PRECISION (TREE_TYPE (t
)) <= prec
)
275 if (TYPE_UNSIGNED (TREE_TYPE (t
))
276 && TYPE_PRECISION (TREE_TYPE (t
)) < prec
)
278 prec
= TYPE_PRECISION (TREE_TYPE (t
));
287 if (TYPE_UNSIGNED (TREE_TYPE (arg
)))
289 /* For unsigned values, the "positive" range comes
290 below the "negative" range. */
291 if (!wi::neg_p (wi::sext (arg_max
, prec
), SIGNED
))
293 if (wi::neg_p (wi::sext (arg_min
, prec
), SIGNED
))
298 if (!wi::neg_p (wi::sext (arg_min
, prec
), SIGNED
))
300 if (wi::neg_p (wi::sext (arg_max
, prec
), SIGNED
))
306 /* Return minimum precision needed to represent all values
307 of ARG in SIGNed integral type. */
310 get_min_precision (tree arg
, signop sign
)
312 int prec
= TYPE_PRECISION (TREE_TYPE (arg
));
314 signop orig_sign
= sign
;
315 if (TREE_CODE (arg
) == INTEGER_CST
)
318 if (TYPE_SIGN (TREE_TYPE (arg
)) != sign
)
320 widest_int w
= wi::to_widest (arg
);
321 w
= wi::ext (w
, prec
, sign
);
322 p
= wi::min_precision (w
, sign
);
325 p
= wi::min_precision (arg
, sign
);
326 return MIN (p
, prec
);
328 while (CONVERT_EXPR_P (arg
)
329 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg
, 0)))
330 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg
, 0))) <= prec
)
332 arg
= TREE_OPERAND (arg
, 0);
333 if (TYPE_PRECISION (TREE_TYPE (arg
)) < prec
)
335 if (TYPE_UNSIGNED (TREE_TYPE (arg
)))
337 else if (sign
== UNSIGNED
&& get_range_pos_neg (arg
) != 1)
338 return prec
+ (orig_sign
!= sign
);
339 prec
= TYPE_PRECISION (TREE_TYPE (arg
));
342 return prec
+ (orig_sign
!= sign
);
344 if (TREE_CODE (arg
) != SSA_NAME
)
345 return prec
+ (orig_sign
!= sign
);
346 wide_int arg_min
, arg_max
;
347 while (get_range_info (arg
, &arg_min
, &arg_max
) != VR_RANGE
)
349 gimple
*g
= SSA_NAME_DEF_STMT (arg
);
350 if (is_gimple_assign (g
)
351 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g
)))
353 tree t
= gimple_assign_rhs1 (g
);
354 if (INTEGRAL_TYPE_P (TREE_TYPE (t
))
355 && TYPE_PRECISION (TREE_TYPE (t
)) <= prec
)
358 if (TYPE_PRECISION (TREE_TYPE (arg
)) < prec
)
360 if (TYPE_UNSIGNED (TREE_TYPE (arg
)))
362 else if (sign
== UNSIGNED
&& get_range_pos_neg (arg
) != 1)
363 return prec
+ (orig_sign
!= sign
);
364 prec
= TYPE_PRECISION (TREE_TYPE (arg
));
367 return prec
+ (orig_sign
!= sign
);
371 return prec
+ (orig_sign
!= sign
);
373 if (sign
== TYPE_SIGN (TREE_TYPE (arg
)))
375 int p1
= wi::min_precision (arg_min
, sign
);
376 int p2
= wi::min_precision (arg_max
, sign
);
378 prec
= MIN (prec
, p1
);
380 else if (sign
== UNSIGNED
&& !wi::neg_p (arg_min
, SIGNED
))
382 int p
= wi::min_precision (arg_max
, UNSIGNED
);
383 prec
= MIN (prec
, p
);
385 return prec
+ (orig_sign
!= sign
);
388 /* Helper for expand_*_overflow. Store RES into the __real__ part
389 of TARGET. If RES has larger MODE than __real__ part of TARGET,
390 set the __imag__ part to 1 if RES doesn't fit into it. */
393 expand_arith_overflow_result_store (tree lhs
, rtx target
,
394 machine_mode mode
, rtx res
)
396 machine_mode tgtmode
= GET_MODE_INNER (GET_MODE (target
));
400 rtx_code_label
*done_label
= gen_label_rtx ();
401 int uns
= TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs
)));
402 lres
= convert_modes (tgtmode
, mode
, res
, uns
);
403 gcc_assert (GET_MODE_PRECISION (tgtmode
) < GET_MODE_PRECISION (mode
));
404 do_compare_rtx_and_jump (res
, convert_modes (mode
, tgtmode
, lres
, uns
),
405 EQ
, true, mode
, NULL_RTX
, NULL
, done_label
,
407 write_complex_part (target
, const1_rtx
, true);
408 emit_label (done_label
);
410 write_complex_part (target
, lres
, false);
413 /* Helper for expand_*_overflow. Store RES into TARGET. */
416 expand_ubsan_result_store (rtx target
, rtx res
)
418 if (GET_CODE (target
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (target
))
419 /* If this is a scalar in a register that is stored in a wider mode
420 than the declared mode, compute the result into its declared mode
421 and then convert to the wider mode. Our value is the computed
423 convert_move (SUBREG_REG (target
), res
, SUBREG_PROMOTED_SIGN (target
));
425 emit_move_insn (target
, res
);
428 /* Add sub/add overflow checking to the statement STMT.
429 CODE says whether the operation is +, or -. */
432 expand_addsub_overflow (location_t loc
, tree_code code
, tree lhs
,
433 tree arg0
, tree arg1
, bool unsr_p
, bool uns0_p
,
434 bool uns1_p
, bool is_ubsan
)
436 rtx res
, target
= NULL_RTX
;
438 rtx_code_label
*done_label
= gen_label_rtx ();
439 rtx_code_label
*do_error
= gen_label_rtx ();
440 do_pending_stack_adjust ();
441 rtx op0
= expand_normal (arg0
);
442 rtx op1
= expand_normal (arg1
);
443 machine_mode mode
= TYPE_MODE (TREE_TYPE (arg0
));
444 int prec
= GET_MODE_PRECISION (mode
);
445 rtx sgn
= immed_wide_int_const (wi::min_value (prec
, SIGNED
), mode
);
449 gcc_assert (!unsr_p
&& !uns0_p
&& !uns1_p
);
453 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
455 write_complex_part (target
, const0_rtx
, true);
458 /* We assume both operands and result have the same precision
459 here (GET_MODE_BITSIZE (mode)), S stands for signed type
460 with that precision, U for unsigned type with that precision,
461 sgn for unsigned most significant bit in that precision.
462 s1 is signed first operand, u1 is unsigned first operand,
463 s2 is signed second operand, u2 is unsigned second operand,
464 sr is signed result, ur is unsigned result and the following
465 rules say how to compute result (which is always result of
466 the operands as if both were unsigned, cast to the right
467 signedness) and how to compute whether operation overflowed.
470 res = (S) ((U) s1 + (U) s2)
471 ovf = s2 < 0 ? res > s1 : res < s1 (or jump on overflow)
473 res = (S) ((U) s1 - (U) s2)
474 ovf = s2 < 0 ? res < s1 : res > s2 (or jump on overflow)
477 ovf = res < u1 (or jump on carry, but RTL opts will handle it)
480 ovf = res > u1 (or jump on carry, but RTL opts will handle it)
482 res = (S) ((U) s1 + u2)
483 ovf = ((U) res ^ sgn) < u2
488 ovf = t1 < 0 ? t2 > s1 : t2 < s1 (or jump on overflow)
490 res = (S) ((U) s1 - u2)
491 ovf = u2 > ((U) s1 ^ sgn)
494 ovf = s1 < 0 || u2 > (U) s1
497 ovf = u1 >= ((U) s2 ^ sgn)
502 ovf = s2 < 0 ? (S) t2 < (S) t1 : (S) t2 > (S) t1 (or jump on overflow)
504 res = (U) s1 + (U) s2
505 ovf = s2 < 0 ? (s1 | (S) res) < 0) : (s1 & (S) res) < 0)
508 ovf = (U) res < u2 || res < 0
511 ovf = u1 >= u2 ? res < 0 : res >= 0
513 res = (U) s1 - (U) s2
514 ovf = s2 >= 0 ? ((s1 | (S) res) < 0) : ((s1 & (S) res) < 0) */
516 if (code
== PLUS_EXPR
&& uns0_p
&& !uns1_p
)
518 /* PLUS_EXPR is commutative, if operand signedness differs,
519 canonicalize to the first operand being signed and second
520 unsigned to simplify following code. */
521 std::swap (op0
, op1
);
522 std::swap (arg0
, arg1
);
528 if (uns0_p
&& uns1_p
&& unsr_p
)
530 /* Compute the operation. On RTL level, the addition is always
532 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
533 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
535 /* For PLUS_EXPR, the operation is commutative, so we can pick
536 operand to compare against. For prec <= BITS_PER_WORD, I think
537 preferring REG operand is better over CONST_INT, because
538 the CONST_INT might enlarge the instruction or CSE would need
539 to figure out we'd already loaded it into a register before.
540 For prec > BITS_PER_WORD, I think CONST_INT might be more beneficial,
541 as then the multi-word comparison can be perhaps simplified. */
542 if (code
== PLUS_EXPR
543 && (prec
<= BITS_PER_WORD
544 ? (CONST_SCALAR_INT_P (op0
) && REG_P (op1
))
545 : CONST_SCALAR_INT_P (op1
)))
547 do_compare_rtx_and_jump (res
, tem
, code
== PLUS_EXPR
? GEU
: LEU
,
548 true, mode
, NULL_RTX
, NULL
, done_label
,
554 if (!uns0_p
&& uns1_p
&& !unsr_p
)
556 /* Compute the operation. On RTL level, the addition is always
558 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
559 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
560 rtx tem
= expand_binop (mode
, add_optab
,
561 code
== PLUS_EXPR
? res
: op0
, sgn
,
562 NULL_RTX
, false, OPTAB_LIB_WIDEN
);
563 do_compare_rtx_and_jump (tem
, op1
, GEU
, true, mode
, NULL_RTX
, NULL
,
564 done_label
, PROB_VERY_LIKELY
);
569 if (code
== PLUS_EXPR
&& !uns0_p
&& uns1_p
&& unsr_p
)
571 op1
= expand_binop (mode
, add_optab
, op1
, sgn
, NULL_RTX
, false,
573 /* As we've changed op1, we have to avoid using the value range
574 for the original argument. */
575 arg1
= error_mark_node
;
581 if (code
== MINUS_EXPR
&& uns0_p
&& !uns1_p
&& unsr_p
)
583 op0
= expand_binop (mode
, add_optab
, op0
, sgn
, NULL_RTX
, false,
585 /* As we've changed op0, we have to avoid using the value range
586 for the original argument. */
587 arg0
= error_mark_node
;
593 if (code
== MINUS_EXPR
&& !uns0_p
&& uns1_p
&& unsr_p
)
595 /* Compute the operation. On RTL level, the addition is always
597 res
= expand_binop (mode
, sub_optab
, op0
, op1
, NULL_RTX
, false,
599 int pos_neg
= get_range_pos_neg (arg0
);
601 /* If ARG0 is known to be always negative, this is always overflow. */
602 emit_jump (do_error
);
603 else if (pos_neg
== 3)
604 /* If ARG0 is not known to be always positive, check at runtime. */
605 do_compare_rtx_and_jump (op0
, const0_rtx
, LT
, false, mode
, NULL_RTX
,
606 NULL
, do_error
, PROB_VERY_UNLIKELY
);
607 do_compare_rtx_and_jump (op1
, op0
, LEU
, true, mode
, NULL_RTX
, NULL
,
608 done_label
, PROB_VERY_LIKELY
);
613 if (code
== MINUS_EXPR
&& uns0_p
&& !uns1_p
&& !unsr_p
)
615 /* Compute the operation. On RTL level, the addition is always
617 res
= expand_binop (mode
, sub_optab
, op0
, op1
, NULL_RTX
, false,
619 rtx tem
= expand_binop (mode
, add_optab
, op1
, sgn
, NULL_RTX
, false,
621 do_compare_rtx_and_jump (op0
, tem
, LTU
, true, mode
, NULL_RTX
, NULL
,
622 done_label
, PROB_VERY_LIKELY
);
627 if (code
== PLUS_EXPR
&& uns0_p
&& uns1_p
&& !unsr_p
)
629 /* Compute the operation. On RTL level, the addition is always
631 res
= expand_binop (mode
, add_optab
, op0
, op1
, NULL_RTX
, false,
633 do_compare_rtx_and_jump (res
, const0_rtx
, LT
, false, mode
, NULL_RTX
,
634 NULL
, do_error
, PROB_VERY_UNLIKELY
);
636 /* The operation is commutative, so we can pick operand to compare
637 against. For prec <= BITS_PER_WORD, I think preferring REG operand
638 is better over CONST_INT, because the CONST_INT might enlarge the
639 instruction or CSE would need to figure out we'd already loaded it
640 into a register before. For prec > BITS_PER_WORD, I think CONST_INT
641 might be more beneficial, as then the multi-word comparison can be
642 perhaps simplified. */
643 if (prec
<= BITS_PER_WORD
644 ? (CONST_SCALAR_INT_P (op1
) && REG_P (op0
))
645 : CONST_SCALAR_INT_P (op0
))
647 do_compare_rtx_and_jump (res
, tem
, GEU
, true, mode
, NULL_RTX
, NULL
,
648 done_label
, PROB_VERY_LIKELY
);
653 if (!uns0_p
&& !uns1_p
&& unsr_p
)
655 /* Compute the operation. On RTL level, the addition is always
657 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
658 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
659 int pos_neg
= get_range_pos_neg (arg1
);
660 if (code
== PLUS_EXPR
)
662 int pos_neg0
= get_range_pos_neg (arg0
);
663 if (pos_neg0
!= 3 && pos_neg
== 3)
665 std::swap (op0
, op1
);
672 tem
= expand_binop (mode
, ((pos_neg
== 1) ^ (code
== MINUS_EXPR
))
673 ? and_optab
: ior_optab
,
674 op0
, res
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
675 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL
,
676 NULL
, done_label
, PROB_VERY_LIKELY
);
680 rtx_code_label
*do_ior_label
= gen_label_rtx ();
681 do_compare_rtx_and_jump (op1
, const0_rtx
,
682 code
== MINUS_EXPR
? GE
: LT
, false, mode
,
683 NULL_RTX
, NULL
, do_ior_label
,
685 tem
= expand_binop (mode
, and_optab
, op0
, res
, NULL_RTX
, false,
687 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
688 NULL
, done_label
, PROB_VERY_LIKELY
);
689 emit_jump (do_error
);
690 emit_label (do_ior_label
);
691 tem
= expand_binop (mode
, ior_optab
, op0
, res
, NULL_RTX
, false,
693 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
694 NULL
, done_label
, PROB_VERY_LIKELY
);
700 if (code
== MINUS_EXPR
&& uns0_p
&& uns1_p
&& !unsr_p
)
702 /* Compute the operation. On RTL level, the addition is always
704 res
= expand_binop (mode
, sub_optab
, op0
, op1
, NULL_RTX
, false,
706 rtx_code_label
*op0_geu_op1
= gen_label_rtx ();
707 do_compare_rtx_and_jump (op0
, op1
, GEU
, true, mode
, NULL_RTX
, NULL
,
708 op0_geu_op1
, PROB_EVEN
);
709 do_compare_rtx_and_jump (res
, const0_rtx
, LT
, false, mode
, NULL_RTX
,
710 NULL
, done_label
, PROB_VERY_LIKELY
);
711 emit_jump (do_error
);
712 emit_label (op0_geu_op1
);
713 do_compare_rtx_and_jump (res
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
714 NULL
, done_label
, PROB_VERY_LIKELY
);
718 gcc_assert (!uns0_p
&& !uns1_p
&& !unsr_p
);
722 enum insn_code icode
;
723 icode
= optab_handler (code
== PLUS_EXPR
? addv4_optab
: subv4_optab
, mode
);
724 if (icode
!= CODE_FOR_nothing
)
726 struct expand_operand ops
[4];
727 rtx_insn
*last
= get_last_insn ();
729 res
= gen_reg_rtx (mode
);
730 create_output_operand (&ops
[0], res
, mode
);
731 create_input_operand (&ops
[1], op0
, mode
);
732 create_input_operand (&ops
[2], op1
, mode
);
733 create_fixed_operand (&ops
[3], do_error
);
734 if (maybe_expand_insn (icode
, 4, ops
))
736 last
= get_last_insn ();
737 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
739 && any_condjump_p (last
)
740 && !find_reg_note (last
, REG_BR_PROB
, 0))
741 add_int_reg_note (last
, REG_BR_PROB
, PROB_VERY_UNLIKELY
);
742 emit_jump (done_label
);
746 delete_insns_since (last
);
747 icode
= CODE_FOR_nothing
;
751 if (icode
== CODE_FOR_nothing
)
753 rtx_code_label
*sub_check
= gen_label_rtx ();
756 /* Compute the operation. On RTL level, the addition is always
758 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
759 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
761 /* If we can prove one of the arguments (for MINUS_EXPR only
762 the second operand, as subtraction is not commutative) is always
763 non-negative or always negative, we can do just one comparison
764 and conditional jump instead of 2 at runtime, 3 present in the
765 emitted code. If one of the arguments is CONST_INT, all we
766 need is to make sure it is op1, then the first
767 do_compare_rtx_and_jump will be just folded. Otherwise try
768 to use range info if available. */
769 if (code
== PLUS_EXPR
&& CONST_INT_P (op0
))
770 std::swap (op0
, op1
);
771 else if (CONST_INT_P (op1
))
773 else if (code
== PLUS_EXPR
&& TREE_CODE (arg0
) == SSA_NAME
)
775 pos_neg
= get_range_pos_neg (arg0
);
777 std::swap (op0
, op1
);
779 if (pos_neg
== 3 && !CONST_INT_P (op1
) && TREE_CODE (arg1
) == SSA_NAME
)
780 pos_neg
= get_range_pos_neg (arg1
);
782 /* If the op1 is negative, we have to use a different check. */
784 do_compare_rtx_and_jump (op1
, const0_rtx
, LT
, false, mode
, NULL_RTX
,
785 NULL
, sub_check
, PROB_EVEN
);
787 /* Compare the result of the operation with one of the operands. */
789 do_compare_rtx_and_jump (res
, op0
, code
== PLUS_EXPR
? GE
: LE
,
790 false, mode
, NULL_RTX
, NULL
, done_label
,
793 /* If we get here, we have to print the error. */
796 emit_jump (do_error
);
798 emit_label (sub_check
);
801 /* We have k = a + b for b < 0 here. k <= a must hold. */
803 do_compare_rtx_and_jump (res
, op0
, code
== PLUS_EXPR
? LE
: GE
,
804 false, mode
, NULL_RTX
, NULL
, done_label
,
809 emit_label (do_error
);
812 /* Expand the ubsan builtin call. */
814 fn
= ubsan_build_overflow_builtin (code
, loc
, TREE_TYPE (arg0
),
818 do_pending_stack_adjust ();
821 write_complex_part (target
, const1_rtx
, true);
824 emit_label (done_label
);
829 expand_ubsan_result_store (target
, res
);
833 res
= expand_binop (mode
, add_optab
, res
, sgn
, NULL_RTX
, false,
836 expand_arith_overflow_result_store (lhs
, target
, mode
, res
);
841 /* Add negate overflow checking to the statement STMT. */
844 expand_neg_overflow (location_t loc
, tree lhs
, tree arg1
, bool is_ubsan
)
848 rtx_code_label
*done_label
, *do_error
;
849 rtx target
= NULL_RTX
;
851 done_label
= gen_label_rtx ();
852 do_error
= gen_label_rtx ();
854 do_pending_stack_adjust ();
855 op1
= expand_normal (arg1
);
857 machine_mode mode
= TYPE_MODE (TREE_TYPE (arg1
));
860 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
862 write_complex_part (target
, const0_rtx
, true);
865 enum insn_code icode
= optab_handler (negv3_optab
, mode
);
866 if (icode
!= CODE_FOR_nothing
)
868 struct expand_operand ops
[3];
869 rtx_insn
*last
= get_last_insn ();
871 res
= gen_reg_rtx (mode
);
872 create_output_operand (&ops
[0], res
, mode
);
873 create_input_operand (&ops
[1], op1
, mode
);
874 create_fixed_operand (&ops
[2], do_error
);
875 if (maybe_expand_insn (icode
, 3, ops
))
877 last
= get_last_insn ();
878 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
880 && any_condjump_p (last
)
881 && !find_reg_note (last
, REG_BR_PROB
, 0))
882 add_int_reg_note (last
, REG_BR_PROB
, PROB_VERY_UNLIKELY
);
883 emit_jump (done_label
);
887 delete_insns_since (last
);
888 icode
= CODE_FOR_nothing
;
892 if (icode
== CODE_FOR_nothing
)
894 /* Compute the operation. On RTL level, the addition is always
896 res
= expand_unop (mode
, neg_optab
, op1
, NULL_RTX
, false);
898 /* Compare the operand with the most negative value. */
899 rtx minv
= expand_normal (TYPE_MIN_VALUE (TREE_TYPE (arg1
)));
900 do_compare_rtx_and_jump (op1
, minv
, NE
, true, mode
, NULL_RTX
, NULL
,
901 done_label
, PROB_VERY_LIKELY
);
904 emit_label (do_error
);
907 /* Expand the ubsan builtin call. */
909 fn
= ubsan_build_overflow_builtin (NEGATE_EXPR
, loc
, TREE_TYPE (arg1
),
913 do_pending_stack_adjust ();
916 write_complex_part (target
, const1_rtx
, true);
919 emit_label (done_label
);
924 expand_ubsan_result_store (target
, res
);
926 expand_arith_overflow_result_store (lhs
, target
, mode
, res
);
930 /* Add mul overflow checking to the statement STMT. */
933 expand_mul_overflow (location_t loc
, tree lhs
, tree arg0
, tree arg1
,
934 bool unsr_p
, bool uns0_p
, bool uns1_p
, bool is_ubsan
)
938 rtx_code_label
*done_label
, *do_error
;
939 rtx target
= NULL_RTX
;
941 enum insn_code icode
;
943 done_label
= gen_label_rtx ();
944 do_error
= gen_label_rtx ();
946 do_pending_stack_adjust ();
947 op0
= expand_normal (arg0
);
948 op1
= expand_normal (arg1
);
950 machine_mode mode
= TYPE_MODE (TREE_TYPE (arg0
));
954 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
956 write_complex_part (target
, const0_rtx
, true);
960 gcc_assert (!unsr_p
&& !uns0_p
&& !uns1_p
);
962 /* We assume both operands and result have the same precision
963 here (GET_MODE_BITSIZE (mode)), S stands for signed type
964 with that precision, U for unsigned type with that precision,
965 sgn for unsigned most significant bit in that precision.
966 s1 is signed first operand, u1 is unsigned first operand,
967 s2 is signed second operand, u2 is unsigned second operand,
968 sr is signed result, ur is unsigned result and the following
969 rules say how to compute result (which is always result of
970 the operands as if both were unsigned, cast to the right
971 signedness) and how to compute whether operation overflowed.
972 main_ovf (false) stands for jump on signed multiplication
973 overflow or the main algorithm with uns == false.
974 main_ovf (true) stands for jump on unsigned multiplication
975 overflow or the main algorithm with uns == true.
978 res = (S) ((U) s1 * (U) s2)
979 ovf = main_ovf (false)
982 ovf = main_ovf (true)
985 ovf = (s1 < 0 && u2) || main_ovf (true)
988 ovf = res < 0 || main_ovf (true)
990 res = (S) ((U) s1 * u2)
991 ovf = (S) u2 >= 0 ? main_ovf (false)
992 : (s1 != 0 && (s1 != -1 || u2 != (U) res))
994 t1 = (s1 & s2) < 0 ? (-(U) s1) : ((U) s1)
995 t2 = (s1 & s2) < 0 ? (-(U) s2) : ((U) s2)
997 ovf = (s1 ^ s2) < 0 ? (s1 && s2) : main_ovf (true) */
999 if (uns0_p
&& !uns1_p
)
1001 /* Multiplication is commutative, if operand signedness differs,
1002 canonicalize to the first operand being signed and second
1003 unsigned to simplify following code. */
1004 std::swap (op0
, op1
);
1005 std::swap (arg0
, arg1
);
1010 int pos_neg0
= get_range_pos_neg (arg0
);
1011 int pos_neg1
= get_range_pos_neg (arg1
);
1014 if (!uns0_p
&& uns1_p
&& unsr_p
)
1019 /* If s1 is non-negative, just perform normal u1 * u2 -> ur. */
1022 /* If s1 is negative, avoid the main code, just multiply and
1023 signal overflow if op1 is not 0. */
1024 struct separate_ops ops
;
1025 ops
.code
= MULT_EXPR
;
1026 ops
.type
= TREE_TYPE (arg1
);
1027 ops
.op0
= make_tree (ops
.type
, op0
);
1028 ops
.op1
= make_tree (ops
.type
, op1
);
1029 ops
.op2
= NULL_TREE
;
1031 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1032 do_compare_rtx_and_jump (op1
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1033 NULL
, done_label
, PROB_VERY_LIKELY
);
1034 goto do_error_label
;
1036 rtx_code_label
*do_main_label
;
1037 do_main_label
= gen_label_rtx ();
1038 do_compare_rtx_and_jump (op0
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1039 NULL
, do_main_label
, PROB_VERY_LIKELY
);
1040 do_compare_rtx_and_jump (op1
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1041 NULL
, do_main_label
, PROB_VERY_LIKELY
);
1042 write_complex_part (target
, const1_rtx
, true);
1043 emit_label (do_main_label
);
1051 if (uns0_p
&& uns1_p
&& !unsr_p
)
1054 /* Rest of handling of this case after res is computed. */
1059 if (!uns0_p
&& uns1_p
&& !unsr_p
)
1066 /* If (S) u2 is negative (i.e. u2 is larger than maximum of S,
1067 avoid the main code, just multiply and signal overflow
1068 unless 0 * u2 or -1 * ((U) Smin). */
1069 struct separate_ops ops
;
1070 ops
.code
= MULT_EXPR
;
1071 ops
.type
= TREE_TYPE (arg1
);
1072 ops
.op0
= make_tree (ops
.type
, op0
);
1073 ops
.op1
= make_tree (ops
.type
, op1
);
1074 ops
.op2
= NULL_TREE
;
1076 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1077 do_compare_rtx_and_jump (op0
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1078 NULL
, done_label
, PROB_VERY_LIKELY
);
1079 do_compare_rtx_and_jump (op0
, constm1_rtx
, NE
, true, mode
, NULL_RTX
,
1080 NULL
, do_error
, PROB_VERY_UNLIKELY
);
1082 prec
= GET_MODE_PRECISION (mode
);
1084 sgn
= immed_wide_int_const (wi::min_value (prec
, SIGNED
), mode
);
1085 do_compare_rtx_and_jump (op1
, sgn
, EQ
, true, mode
, NULL_RTX
,
1086 NULL
, done_label
, PROB_VERY_LIKELY
);
1087 goto do_error_label
;
1089 /* Rest of handling of this case after res is computed. */
1097 if (!uns0_p
&& !uns1_p
&& unsr_p
)
1100 switch (pos_neg0
| pos_neg1
)
1102 case 1: /* Both operands known to be non-negative. */
1104 case 2: /* Both operands known to be negative. */
1105 op0
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, false);
1106 op1
= expand_unop (mode
, neg_optab
, op1
, NULL_RTX
, false);
1107 /* Avoid looking at arg0/arg1 ranges, as we've changed
1109 arg0
= error_mark_node
;
1110 arg1
= error_mark_node
;
1113 if ((pos_neg0
^ pos_neg1
) == 3)
1115 /* If one operand is known to be negative and the other
1116 non-negative, this overflows always, unless the non-negative
1117 one is 0. Just do normal multiply and set overflow
1118 unless one of the operands is 0. */
1119 struct separate_ops ops
;
1120 ops
.code
= MULT_EXPR
;
1122 = build_nonstandard_integer_type (GET_MODE_PRECISION (mode
),
1124 ops
.op0
= make_tree (ops
.type
, op0
);
1125 ops
.op1
= make_tree (ops
.type
, op1
);
1126 ops
.op2
= NULL_TREE
;
1128 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1129 tem
= expand_binop (mode
, and_optab
, op0
, op1
, NULL_RTX
, false,
1131 do_compare_rtx_and_jump (tem
, const0_rtx
, EQ
, true, mode
,
1132 NULL_RTX
, NULL
, done_label
,
1134 goto do_error_label
;
1136 /* The general case, do all the needed comparisons at runtime. */
1137 rtx_code_label
*do_main_label
, *after_negate_label
;
1139 rop0
= gen_reg_rtx (mode
);
1140 rop1
= gen_reg_rtx (mode
);
1141 emit_move_insn (rop0
, op0
);
1142 emit_move_insn (rop1
, op1
);
1145 do_main_label
= gen_label_rtx ();
1146 after_negate_label
= gen_label_rtx ();
1147 tem
= expand_binop (mode
, and_optab
, op0
, op1
, NULL_RTX
, false,
1149 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1150 NULL
, after_negate_label
, PROB_VERY_LIKELY
);
1151 /* Both arguments negative here, negate them and continue with
1152 normal unsigned overflow checking multiplication. */
1153 emit_move_insn (op0
, expand_unop (mode
, neg_optab
, op0
,
1155 emit_move_insn (op1
, expand_unop (mode
, neg_optab
, op1
,
1157 /* Avoid looking at arg0/arg1 ranges, as we might have changed
1159 arg0
= error_mark_node
;
1160 arg1
= error_mark_node
;
1161 emit_jump (do_main_label
);
1162 emit_label (after_negate_label
);
1163 tem2
= expand_binop (mode
, xor_optab
, op0
, op1
, NULL_RTX
, false,
1165 do_compare_rtx_and_jump (tem2
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1166 NULL
, do_main_label
, PROB_VERY_LIKELY
);
1167 /* One argument is negative here, the other positive. This
1168 overflows always, unless one of the arguments is 0. But
1169 if e.g. s2 is 0, (U) s1 * 0 doesn't overflow, whatever s1
1170 is, thus we can keep do_main code oring in overflow as is. */
1171 do_compare_rtx_and_jump (tem
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1172 NULL
, do_main_label
, PROB_VERY_LIKELY
);
1173 write_complex_part (target
, const1_rtx
, true);
1174 emit_label (do_main_label
);
1182 type
= build_nonstandard_integer_type (GET_MODE_PRECISION (mode
), uns
);
1183 sign
= uns
? UNSIGNED
: SIGNED
;
1184 icode
= optab_handler (uns
? umulv4_optab
: mulv4_optab
, mode
);
1185 if (icode
!= CODE_FOR_nothing
)
1187 struct expand_operand ops
[4];
1188 rtx_insn
*last
= get_last_insn ();
1190 res
= gen_reg_rtx (mode
);
1191 create_output_operand (&ops
[0], res
, mode
);
1192 create_input_operand (&ops
[1], op0
, mode
);
1193 create_input_operand (&ops
[2], op1
, mode
);
1194 create_fixed_operand (&ops
[3], do_error
);
1195 if (maybe_expand_insn (icode
, 4, ops
))
1197 last
= get_last_insn ();
1198 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
1200 && any_condjump_p (last
)
1201 && !find_reg_note (last
, REG_BR_PROB
, 0))
1202 add_int_reg_note (last
, REG_BR_PROB
, PROB_VERY_UNLIKELY
);
1203 emit_jump (done_label
);
1207 delete_insns_since (last
);
1208 icode
= CODE_FOR_nothing
;
1212 if (icode
== CODE_FOR_nothing
)
1214 struct separate_ops ops
;
1215 int prec
= GET_MODE_PRECISION (mode
);
1216 machine_mode hmode
= mode_for_size (prec
/ 2, MODE_INT
, 1);
1217 ops
.op0
= make_tree (type
, op0
);
1218 ops
.op1
= make_tree (type
, op1
);
1219 ops
.op2
= NULL_TREE
;
1221 if (GET_MODE_2XWIDER_MODE (mode
) != VOIDmode
1222 && targetm
.scalar_mode_supported_p (GET_MODE_2XWIDER_MODE (mode
)))
1224 machine_mode wmode
= GET_MODE_2XWIDER_MODE (mode
);
1225 ops
.code
= WIDEN_MULT_EXPR
;
1227 = build_nonstandard_integer_type (GET_MODE_PRECISION (wmode
), uns
);
1229 res
= expand_expr_real_2 (&ops
, NULL_RTX
, wmode
, EXPAND_NORMAL
);
1230 rtx hipart
= expand_shift (RSHIFT_EXPR
, wmode
, res
, prec
,
1232 hipart
= gen_lowpart (mode
, hipart
);
1233 res
= gen_lowpart (mode
, res
);
1235 /* For the unsigned multiplication, there was overflow if
1236 HIPART is non-zero. */
1237 do_compare_rtx_and_jump (hipart
, const0_rtx
, EQ
, true, mode
,
1238 NULL_RTX
, NULL
, done_label
,
1242 rtx signbit
= expand_shift (RSHIFT_EXPR
, mode
, res
, prec
- 1,
1244 /* RES is low half of the double width result, HIPART
1245 the high half. There was overflow if
1246 HIPART is different from RES < 0 ? -1 : 0. */
1247 do_compare_rtx_and_jump (signbit
, hipart
, EQ
, true, mode
,
1248 NULL_RTX
, NULL
, done_label
,
1252 else if (hmode
!= BLKmode
&& 2 * GET_MODE_PRECISION (hmode
) == prec
)
1254 rtx_code_label
*large_op0
= gen_label_rtx ();
1255 rtx_code_label
*small_op0_large_op1
= gen_label_rtx ();
1256 rtx_code_label
*one_small_one_large
= gen_label_rtx ();
1257 rtx_code_label
*both_ops_large
= gen_label_rtx ();
1258 rtx_code_label
*after_hipart_neg
= uns
? NULL
: gen_label_rtx ();
1259 rtx_code_label
*after_lopart_neg
= uns
? NULL
: gen_label_rtx ();
1260 rtx_code_label
*do_overflow
= gen_label_rtx ();
1261 rtx_code_label
*hipart_different
= uns
? NULL
: gen_label_rtx ();
1263 unsigned int hprec
= GET_MODE_PRECISION (hmode
);
1264 rtx hipart0
= expand_shift (RSHIFT_EXPR
, mode
, op0
, hprec
,
1266 hipart0
= gen_lowpart (hmode
, hipart0
);
1267 rtx lopart0
= gen_lowpart (hmode
, op0
);
1268 rtx signbit0
= const0_rtx
;
1270 signbit0
= expand_shift (RSHIFT_EXPR
, hmode
, lopart0
, hprec
- 1,
1272 rtx hipart1
= expand_shift (RSHIFT_EXPR
, mode
, op1
, hprec
,
1274 hipart1
= gen_lowpart (hmode
, hipart1
);
1275 rtx lopart1
= gen_lowpart (hmode
, op1
);
1276 rtx signbit1
= const0_rtx
;
1278 signbit1
= expand_shift (RSHIFT_EXPR
, hmode
, lopart1
, hprec
- 1,
1281 res
= gen_reg_rtx (mode
);
1283 /* True if op0 resp. op1 are known to be in the range of
1285 bool op0_small_p
= false;
1286 bool op1_small_p
= false;
1287 /* True if op0 resp. op1 are known to have all zeros or all ones
1288 in the upper half of bits, but are not known to be
1290 bool op0_medium_p
= false;
1291 bool op1_medium_p
= false;
1292 /* -1 if op{0,1} is known to be negative, 0 if it is known to be
1293 nonnegative, 1 if unknown. */
1299 else if (pos_neg0
== 2)
1303 else if (pos_neg1
== 2)
1306 unsigned int mprec0
= prec
;
1307 if (arg0
!= error_mark_node
)
1308 mprec0
= get_min_precision (arg0
, sign
);
1309 if (mprec0
<= hprec
)
1311 else if (!uns
&& mprec0
<= hprec
+ 1)
1312 op0_medium_p
= true;
1313 unsigned int mprec1
= prec
;
1314 if (arg1
!= error_mark_node
)
1315 mprec1
= get_min_precision (arg1
, sign
);
1316 if (mprec1
<= hprec
)
1318 else if (!uns
&& mprec1
<= hprec
+ 1)
1319 op1_medium_p
= true;
1321 int smaller_sign
= 1;
1322 int larger_sign
= 1;
1325 smaller_sign
= op0_sign
;
1326 larger_sign
= op1_sign
;
1328 else if (op1_small_p
)
1330 smaller_sign
= op1_sign
;
1331 larger_sign
= op0_sign
;
1333 else if (op0_sign
== op1_sign
)
1335 smaller_sign
= op0_sign
;
1336 larger_sign
= op0_sign
;
1340 do_compare_rtx_and_jump (signbit0
, hipart0
, NE
, true, hmode
,
1341 NULL_RTX
, NULL
, large_op0
,
1345 do_compare_rtx_and_jump (signbit1
, hipart1
, NE
, true, hmode
,
1346 NULL_RTX
, NULL
, small_op0_large_op1
,
1349 /* If both op0 and op1 are sign (!uns) or zero (uns) extended from
1350 hmode to mode, the multiplication will never overflow. We can
1351 do just one hmode x hmode => mode widening multiplication. */
1352 rtx lopart0s
= lopart0
, lopart1s
= lopart1
;
1353 if (GET_CODE (lopart0
) == SUBREG
)
1355 lopart0s
= shallow_copy_rtx (lopart0
);
1356 SUBREG_PROMOTED_VAR_P (lopart0s
) = 1;
1357 SUBREG_PROMOTED_SET (lopart0s
, uns
? SRP_UNSIGNED
: SRP_SIGNED
);
1359 if (GET_CODE (lopart1
) == SUBREG
)
1361 lopart1s
= shallow_copy_rtx (lopart1
);
1362 SUBREG_PROMOTED_VAR_P (lopart1s
) = 1;
1363 SUBREG_PROMOTED_SET (lopart1s
, uns
? SRP_UNSIGNED
: SRP_SIGNED
);
1365 tree halfstype
= build_nonstandard_integer_type (hprec
, uns
);
1366 ops
.op0
= make_tree (halfstype
, lopart0s
);
1367 ops
.op1
= make_tree (halfstype
, lopart1s
);
1368 ops
.code
= WIDEN_MULT_EXPR
;
1371 = expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1372 emit_move_insn (res
, thisres
);
1373 emit_jump (done_label
);
1375 emit_label (small_op0_large_op1
);
1377 /* If op0 is sign (!uns) or zero (uns) extended from hmode to mode,
1378 but op1 is not, just swap the arguments and handle it as op1
1379 sign/zero extended, op0 not. */
1380 rtx larger
= gen_reg_rtx (mode
);
1381 rtx hipart
= gen_reg_rtx (hmode
);
1382 rtx lopart
= gen_reg_rtx (hmode
);
1383 emit_move_insn (larger
, op1
);
1384 emit_move_insn (hipart
, hipart1
);
1385 emit_move_insn (lopart
, lopart0
);
1386 emit_jump (one_small_one_large
);
1388 emit_label (large_op0
);
1391 do_compare_rtx_and_jump (signbit1
, hipart1
, NE
, true, hmode
,
1392 NULL_RTX
, NULL
, both_ops_large
,
1395 /* If op1 is sign (!uns) or zero (uns) extended from hmode to mode,
1396 but op0 is not, prepare larger, hipart and lopart pseudos and
1397 handle it together with small_op0_large_op1. */
1398 emit_move_insn (larger
, op0
);
1399 emit_move_insn (hipart
, hipart0
);
1400 emit_move_insn (lopart
, lopart1
);
1402 emit_label (one_small_one_large
);
1404 /* lopart is the low part of the operand that is sign extended
1405 to mode, larger is the other operand, hipart is the
1406 high part of larger and lopart0 and lopart1 are the low parts
1408 We perform lopart0 * lopart1 and lopart * hipart widening
1410 tree halfutype
= build_nonstandard_integer_type (hprec
, 1);
1411 ops
.op0
= make_tree (halfutype
, lopart0
);
1412 ops
.op1
= make_tree (halfutype
, lopart1
);
1414 = expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1416 ops
.op0
= make_tree (halfutype
, lopart
);
1417 ops
.op1
= make_tree (halfutype
, hipart
);
1418 rtx loxhi
= gen_reg_rtx (mode
);
1419 rtx tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1420 emit_move_insn (loxhi
, tem
);
1424 /* if (hipart < 0) loxhi -= lopart << (bitsize / 2); */
1425 if (larger_sign
== 0)
1426 emit_jump (after_hipart_neg
);
1427 else if (larger_sign
!= -1)
1428 do_compare_rtx_and_jump (hipart
, const0_rtx
, GE
, false, hmode
,
1429 NULL_RTX
, NULL
, after_hipart_neg
,
1432 tem
= convert_modes (mode
, hmode
, lopart
, 1);
1433 tem
= expand_shift (LSHIFT_EXPR
, mode
, tem
, hprec
, NULL_RTX
, 1);
1434 tem
= expand_simple_binop (mode
, MINUS
, loxhi
, tem
, NULL_RTX
,
1436 emit_move_insn (loxhi
, tem
);
1438 emit_label (after_hipart_neg
);
1440 /* if (lopart < 0) loxhi -= larger; */
1441 if (smaller_sign
== 0)
1442 emit_jump (after_lopart_neg
);
1443 else if (smaller_sign
!= -1)
1444 do_compare_rtx_and_jump (lopart
, const0_rtx
, GE
, false, hmode
,
1445 NULL_RTX
, NULL
, after_lopart_neg
,
1448 tem
= expand_simple_binop (mode
, MINUS
, loxhi
, larger
, NULL_RTX
,
1450 emit_move_insn (loxhi
, tem
);
1452 emit_label (after_lopart_neg
);
1455 /* loxhi += (uns) lo0xlo1 >> (bitsize / 2); */
1456 tem
= expand_shift (RSHIFT_EXPR
, mode
, lo0xlo1
, hprec
, NULL_RTX
, 1);
1457 tem
= expand_simple_binop (mode
, PLUS
, loxhi
, tem
, NULL_RTX
,
1459 emit_move_insn (loxhi
, tem
);
1461 /* if (loxhi >> (bitsize / 2)
1462 == (hmode) loxhi >> (bitsize / 2 - 1)) (if !uns)
1463 if (loxhi >> (bitsize / 2) == 0 (if uns). */
1464 rtx hipartloxhi
= expand_shift (RSHIFT_EXPR
, mode
, loxhi
, hprec
,
1466 hipartloxhi
= gen_lowpart (hmode
, hipartloxhi
);
1467 rtx signbitloxhi
= const0_rtx
;
1469 signbitloxhi
= expand_shift (RSHIFT_EXPR
, hmode
,
1470 gen_lowpart (hmode
, loxhi
),
1471 hprec
- 1, NULL_RTX
, 0);
1473 do_compare_rtx_and_jump (signbitloxhi
, hipartloxhi
, NE
, true, hmode
,
1474 NULL_RTX
, NULL
, do_overflow
,
1475 PROB_VERY_UNLIKELY
);
1477 /* res = (loxhi << (bitsize / 2)) | (hmode) lo0xlo1; */
1478 rtx loxhishifted
= expand_shift (LSHIFT_EXPR
, mode
, loxhi
, hprec
,
1480 tem
= convert_modes (mode
, hmode
, gen_lowpart (hmode
, lo0xlo1
), 1);
1482 tem
= expand_simple_binop (mode
, IOR
, loxhishifted
, tem
, res
,
1485 emit_move_insn (res
, tem
);
1486 emit_jump (done_label
);
1488 emit_label (both_ops_large
);
1490 /* If both operands are large (not sign (!uns) or zero (uns)
1491 extended from hmode), then perform the full multiplication
1492 which will be the result of the operation.
1493 The only cases which don't overflow are for signed multiplication
1494 some cases where both hipart0 and highpart1 are 0 or -1.
1495 For unsigned multiplication when high parts are both non-zero
1496 this overflows always. */
1497 ops
.code
= MULT_EXPR
;
1498 ops
.op0
= make_tree (type
, op0
);
1499 ops
.op1
= make_tree (type
, op1
);
1500 tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1501 emit_move_insn (res
, tem
);
1507 tem
= expand_simple_binop (hmode
, PLUS
, hipart0
, const1_rtx
,
1508 NULL_RTX
, 1, OPTAB_DIRECT
);
1509 do_compare_rtx_and_jump (tem
, const1_rtx
, GTU
, true, hmode
,
1510 NULL_RTX
, NULL
, do_error
,
1511 PROB_VERY_UNLIKELY
);
1516 tem
= expand_simple_binop (hmode
, PLUS
, hipart1
, const1_rtx
,
1517 NULL_RTX
, 1, OPTAB_DIRECT
);
1518 do_compare_rtx_and_jump (tem
, const1_rtx
, GTU
, true, hmode
,
1519 NULL_RTX
, NULL
, do_error
,
1520 PROB_VERY_UNLIKELY
);
1523 /* At this point hipart{0,1} are both in [-1, 0]. If they are
1524 the same, overflow happened if res is negative, if they are
1525 different, overflow happened if res is positive. */
1526 if (op0_sign
!= 1 && op1_sign
!= 1 && op0_sign
!= op1_sign
)
1527 emit_jump (hipart_different
);
1528 else if (op0_sign
== 1 || op1_sign
== 1)
1529 do_compare_rtx_and_jump (hipart0
, hipart1
, NE
, true, hmode
,
1530 NULL_RTX
, NULL
, hipart_different
,
1533 do_compare_rtx_and_jump (res
, const0_rtx
, LT
, false, mode
,
1534 NULL_RTX
, NULL
, do_error
,
1535 PROB_VERY_UNLIKELY
);
1536 emit_jump (done_label
);
1538 emit_label (hipart_different
);
1540 do_compare_rtx_and_jump (res
, const0_rtx
, GE
, false, mode
,
1541 NULL_RTX
, NULL
, do_error
,
1542 PROB_VERY_UNLIKELY
);
1543 emit_jump (done_label
);
1546 emit_label (do_overflow
);
1548 /* Overflow, do full multiplication and fallthru into do_error. */
1549 ops
.op0
= make_tree (type
, op0
);
1550 ops
.op1
= make_tree (type
, op1
);
1551 tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1552 emit_move_insn (res
, tem
);
1556 gcc_assert (!is_ubsan
);
1557 ops
.code
= MULT_EXPR
;
1559 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1560 emit_jump (done_label
);
1565 emit_label (do_error
);
1568 /* Expand the ubsan builtin call. */
1570 fn
= ubsan_build_overflow_builtin (MULT_EXPR
, loc
, TREE_TYPE (arg0
),
1574 do_pending_stack_adjust ();
1577 write_complex_part (target
, const1_rtx
, true);
1580 emit_label (done_label
);
1583 if (uns0_p
&& uns1_p
&& !unsr_p
)
1585 rtx_code_label
*all_done_label
= gen_label_rtx ();
1586 do_compare_rtx_and_jump (res
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1587 NULL
, all_done_label
, PROB_VERY_LIKELY
);
1588 write_complex_part (target
, const1_rtx
, true);
1589 emit_label (all_done_label
);
1593 if (!uns0_p
&& uns1_p
&& !unsr_p
&& pos_neg1
== 3)
1595 rtx_code_label
*all_done_label
= gen_label_rtx ();
1596 rtx_code_label
*set_noovf
= gen_label_rtx ();
1597 do_compare_rtx_and_jump (op1
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1598 NULL
, all_done_label
, PROB_VERY_LIKELY
);
1599 write_complex_part (target
, const1_rtx
, true);
1600 do_compare_rtx_and_jump (op0
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1601 NULL
, set_noovf
, PROB_VERY_LIKELY
);
1602 do_compare_rtx_and_jump (op0
, constm1_rtx
, NE
, true, mode
, NULL_RTX
,
1603 NULL
, all_done_label
, PROB_VERY_UNLIKELY
);
1604 do_compare_rtx_and_jump (op1
, res
, NE
, true, mode
, NULL_RTX
, NULL
,
1605 all_done_label
, PROB_VERY_UNLIKELY
);
1606 emit_label (set_noovf
);
1607 write_complex_part (target
, const0_rtx
, true);
1608 emit_label (all_done_label
);
1614 expand_ubsan_result_store (target
, res
);
1616 expand_arith_overflow_result_store (lhs
, target
, mode
, res
);
1620 /* Expand UBSAN_CHECK_ADD call STMT. */
1623 expand_UBSAN_CHECK_ADD (gcall
*stmt
)
1625 location_t loc
= gimple_location (stmt
);
1626 tree lhs
= gimple_call_lhs (stmt
);
1627 tree arg0
= gimple_call_arg (stmt
, 0);
1628 tree arg1
= gimple_call_arg (stmt
, 1);
1629 expand_addsub_overflow (loc
, PLUS_EXPR
, lhs
, arg0
, arg1
,
1630 false, false, false, true);
1633 /* Expand UBSAN_CHECK_SUB call STMT. */
1636 expand_UBSAN_CHECK_SUB (gcall
*stmt
)
1638 location_t loc
= gimple_location (stmt
);
1639 tree lhs
= gimple_call_lhs (stmt
);
1640 tree arg0
= gimple_call_arg (stmt
, 0);
1641 tree arg1
= gimple_call_arg (stmt
, 1);
1642 if (integer_zerop (arg0
))
1643 expand_neg_overflow (loc
, lhs
, arg1
, true);
1645 expand_addsub_overflow (loc
, MINUS_EXPR
, lhs
, arg0
, arg1
,
1646 false, false, false, true);
1649 /* Expand UBSAN_CHECK_MUL call STMT. */
1652 expand_UBSAN_CHECK_MUL (gcall
*stmt
)
1654 location_t loc
= gimple_location (stmt
);
1655 tree lhs
= gimple_call_lhs (stmt
);
1656 tree arg0
= gimple_call_arg (stmt
, 0);
1657 tree arg1
= gimple_call_arg (stmt
, 1);
1658 expand_mul_overflow (loc
, lhs
, arg0
, arg1
, false, false, false, true);
1661 /* Helper function for {ADD,SUB,MUL}_OVERFLOW call stmt expansion. */
1664 expand_arith_overflow (enum tree_code code
, gimple
*stmt
)
1666 tree lhs
= gimple_call_lhs (stmt
);
1667 if (lhs
== NULL_TREE
)
1669 tree arg0
= gimple_call_arg (stmt
, 0);
1670 tree arg1
= gimple_call_arg (stmt
, 1);
1671 tree type
= TREE_TYPE (TREE_TYPE (lhs
));
1672 int uns0_p
= TYPE_UNSIGNED (TREE_TYPE (arg0
));
1673 int uns1_p
= TYPE_UNSIGNED (TREE_TYPE (arg1
));
1674 int unsr_p
= TYPE_UNSIGNED (type
);
1675 int prec0
= TYPE_PRECISION (TREE_TYPE (arg0
));
1676 int prec1
= TYPE_PRECISION (TREE_TYPE (arg1
));
1677 int precres
= TYPE_PRECISION (type
);
1678 location_t loc
= gimple_location (stmt
);
1679 if (!uns0_p
&& get_range_pos_neg (arg0
) == 1)
1681 if (!uns1_p
&& get_range_pos_neg (arg1
) == 1)
1683 int pr
= get_min_precision (arg0
, uns0_p
? UNSIGNED
: SIGNED
);
1684 prec0
= MIN (prec0
, pr
);
1685 pr
= get_min_precision (arg1
, uns1_p
? UNSIGNED
: SIGNED
);
1686 prec1
= MIN (prec1
, pr
);
1688 /* If uns0_p && uns1_p, precop is minimum needed precision
1689 of unsigned type to hold the exact result, otherwise
1690 precop is minimum needed precision of signed type to
1691 hold the exact result. */
1693 if (code
== MULT_EXPR
)
1694 precop
= prec0
+ prec1
+ (uns0_p
!= uns1_p
);
1697 if (uns0_p
== uns1_p
)
1698 precop
= MAX (prec0
, prec1
) + 1;
1700 precop
= MAX (prec0
+ 1, prec1
) + 1;
1702 precop
= MAX (prec0
, prec1
+ 1) + 1;
1704 int orig_precres
= precres
;
1708 if ((uns0_p
&& uns1_p
)
1709 ? ((precop
+ !unsr_p
) <= precres
1710 /* u1 - u2 -> ur can overflow, no matter what precision
1712 && (code
!= MINUS_EXPR
|| !unsr_p
))
1713 : (!unsr_p
&& precop
<= precres
))
1715 /* The infinity precision result will always fit into result. */
1716 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1717 write_complex_part (target
, const0_rtx
, true);
1718 enum machine_mode mode
= TYPE_MODE (type
);
1719 struct separate_ops ops
;
1722 ops
.op0
= fold_convert_loc (loc
, type
, arg0
);
1723 ops
.op1
= fold_convert_loc (loc
, type
, arg1
);
1724 ops
.op2
= NULL_TREE
;
1726 rtx tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1727 expand_arith_overflow_result_store (lhs
, target
, mode
, tem
);
1731 /* For sub-word operations, if target doesn't have them, start
1732 with precres widening right away, otherwise do it only
1733 if the most simple cases can't be used. */
1734 if (WORD_REGISTER_OPERATIONS
1735 && orig_precres
== precres
1736 && precres
< BITS_PER_WORD
)
1738 else if ((uns0_p
&& uns1_p
&& unsr_p
&& prec0
<= precres
1739 && prec1
<= precres
)
1740 || ((!uns0_p
|| !uns1_p
) && !unsr_p
1741 && prec0
+ uns0_p
<= precres
1742 && prec1
+ uns1_p
<= precres
))
1744 arg0
= fold_convert_loc (loc
, type
, arg0
);
1745 arg1
= fold_convert_loc (loc
, type
, arg1
);
1749 if (integer_zerop (arg0
) && !unsr_p
)
1750 expand_neg_overflow (loc
, lhs
, arg1
, false);
1753 expand_addsub_overflow (loc
, code
, lhs
, arg0
, arg1
,
1754 unsr_p
, unsr_p
, unsr_p
, false);
1757 expand_mul_overflow (loc
, lhs
, arg0
, arg1
,
1758 unsr_p
, unsr_p
, unsr_p
, false);
1765 /* For sub-word operations, retry with a wider type first. */
1766 if (orig_precres
== precres
&& precop
<= BITS_PER_WORD
)
1768 #if WORD_REGISTER_OPERATIONS
1769 int p
= BITS_PER_WORD
;
1773 enum machine_mode m
= smallest_mode_for_size (p
, MODE_INT
);
1774 tree optype
= build_nonstandard_integer_type (GET_MODE_PRECISION (m
),
1777 p
= TYPE_PRECISION (optype
);
1781 unsr_p
= TYPE_UNSIGNED (optype
);
1787 if (prec0
<= precres
&& prec1
<= precres
)
1792 types
[0] = build_nonstandard_integer_type (precres
, 0);
1798 types
[1] = build_nonstandard_integer_type (precres
, 1);
1800 arg0
= fold_convert_loc (loc
, types
[uns0_p
], arg0
);
1801 arg1
= fold_convert_loc (loc
, types
[uns1_p
], arg1
);
1802 if (code
!= MULT_EXPR
)
1803 expand_addsub_overflow (loc
, code
, lhs
, arg0
, arg1
, unsr_p
,
1804 uns0_p
, uns1_p
, false);
1806 expand_mul_overflow (loc
, lhs
, arg0
, arg1
, unsr_p
,
1807 uns0_p
, uns1_p
, false);
1811 /* Retry with a wider type. */
1812 if (orig_precres
== precres
)
1814 int p
= MAX (prec0
, prec1
);
1815 enum machine_mode m
= smallest_mode_for_size (p
, MODE_INT
);
1816 tree optype
= build_nonstandard_integer_type (GET_MODE_PRECISION (m
),
1819 p
= TYPE_PRECISION (optype
);
1823 unsr_p
= TYPE_UNSIGNED (optype
);
1834 /* Expand ADD_OVERFLOW STMT. */
1837 expand_ADD_OVERFLOW (gcall
*stmt
)
1839 expand_arith_overflow (PLUS_EXPR
, stmt
);
1842 /* Expand SUB_OVERFLOW STMT. */
1845 expand_SUB_OVERFLOW (gcall
*stmt
)
1847 expand_arith_overflow (MINUS_EXPR
, stmt
);
1850 /* Expand MUL_OVERFLOW STMT. */
1853 expand_MUL_OVERFLOW (gcall
*stmt
)
1855 expand_arith_overflow (MULT_EXPR
, stmt
);
1858 /* This should get folded in tree-vectorizer.c. */
1861 expand_LOOP_VECTORIZED (gcall
*)
1867 expand_MASK_LOAD (gcall
*stmt
)
1869 struct expand_operand ops
[3];
1870 tree type
, lhs
, rhs
, maskt
;
1871 rtx mem
, target
, mask
;
1873 maskt
= gimple_call_arg (stmt
, 2);
1874 lhs
= gimple_call_lhs (stmt
);
1875 if (lhs
== NULL_TREE
)
1877 type
= TREE_TYPE (lhs
);
1878 rhs
= fold_build2 (MEM_REF
, type
, gimple_call_arg (stmt
, 0),
1879 gimple_call_arg (stmt
, 1));
1881 mem
= expand_expr (rhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1882 gcc_assert (MEM_P (mem
));
1883 mask
= expand_normal (maskt
);
1884 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1885 create_output_operand (&ops
[0], target
, TYPE_MODE (type
));
1886 create_fixed_operand (&ops
[1], mem
);
1887 create_input_operand (&ops
[2], mask
, TYPE_MODE (TREE_TYPE (maskt
)));
1888 expand_insn (optab_handler (maskload_optab
, TYPE_MODE (type
)), 3, ops
);
1892 expand_MASK_STORE (gcall
*stmt
)
1894 struct expand_operand ops
[3];
1895 tree type
, lhs
, rhs
, maskt
;
1898 maskt
= gimple_call_arg (stmt
, 2);
1899 rhs
= gimple_call_arg (stmt
, 3);
1900 type
= TREE_TYPE (rhs
);
1901 lhs
= fold_build2 (MEM_REF
, type
, gimple_call_arg (stmt
, 0),
1902 gimple_call_arg (stmt
, 1));
1904 mem
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1905 gcc_assert (MEM_P (mem
));
1906 mask
= expand_normal (maskt
);
1907 reg
= expand_normal (rhs
);
1908 create_fixed_operand (&ops
[0], mem
);
1909 create_input_operand (&ops
[1], reg
, TYPE_MODE (type
));
1910 create_input_operand (&ops
[2], mask
, TYPE_MODE (TREE_TYPE (maskt
)));
1911 expand_insn (optab_handler (maskstore_optab
, TYPE_MODE (type
)), 3, ops
);
1915 expand_ABNORMAL_DISPATCHER (gcall
*)
1920 expand_BUILTIN_EXPECT (gcall
*stmt
)
1922 /* When guessing was done, the hints should be already stripped away. */
1923 gcc_assert (!flag_guess_branch_prob
|| optimize
== 0 || seen_error ());
1926 tree lhs
= gimple_call_lhs (stmt
);
1928 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1930 target
= const0_rtx
;
1931 rtx val
= expand_expr (gimple_call_arg (stmt
, 0), target
, VOIDmode
, EXPAND_NORMAL
);
1932 if (lhs
&& val
!= target
)
1933 emit_move_insn (target
, val
);
1936 /* IFN_VA_ARG is supposed to be expanded at pass_stdarg. So this dummy function
1937 should never be called. */
1940 expand_VA_ARG (gcall
*stmt ATTRIBUTE_UNUSED
)
1945 /* Routines to expand each internal function, indexed by function number.
1946 Each routine has the prototype:
1948 expand_<NAME> (gcall *stmt)
1950 where STMT is the statement that performs the call. */
1951 static void (*const internal_fn_expanders
[]) (gcall
*) = {
1952 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) expand_##CODE,
1953 #include "internal-fn.def"
1954 #undef DEF_INTERNAL_FN
1958 /* Expand STMT, which is a call to internal function FN. */
1961 expand_internal_call (gcall
*stmt
)
1963 internal_fn_expanders
[(int) gimple_call_internal_fn (stmt
)] (stmt
);