2 Copyright (C) 2011-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
30 #include "fold-const.h"
31 #include "internal-fn.h"
32 #include "stor-layout.h"
34 #include "insn-config.h"
43 #include "insn-codes.h"
47 #include "stringpool.h"
48 #include "tree-ssanames.h"
49 #include "diagnostic-core.h"
51 /* The names of each internal function, indexed by function number. */
52 const char *const internal_fn_name_array
[] = {
53 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) #CODE,
54 #include "internal-fn.def"
55 #undef DEF_INTERNAL_FN
59 /* The ECF_* flags of each internal function, indexed by function number. */
60 const int internal_fn_flags_array
[] = {
61 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) FLAGS,
62 #include "internal-fn.def"
63 #undef DEF_INTERNAL_FN
67 /* Fnspec of each internal function, indexed by function number. */
68 const_tree internal_fn_fnspec_array
[IFN_LAST
+ 1];
73 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
74 if (FNSPEC) internal_fn_fnspec_array[IFN_##CODE] = \
75 build_string ((int) sizeof (FNSPEC), FNSPEC ? FNSPEC : "");
76 #include "internal-fn.def"
77 #undef DEF_INTERNAL_FN
78 internal_fn_fnspec_array
[IFN_LAST
] = 0;
81 /* ARRAY_TYPE is an array of vector modes. Return the associated insn
82 for load-lanes-style optab OPTAB. The insn must exist. */
85 get_multi_vector_move (tree array_type
, convert_optab optab
)
91 gcc_assert (TREE_CODE (array_type
) == ARRAY_TYPE
);
92 imode
= TYPE_MODE (array_type
);
93 vmode
= TYPE_MODE (TREE_TYPE (array_type
));
95 icode
= convert_optab_handler (optab
, imode
, vmode
);
96 gcc_assert (icode
!= CODE_FOR_nothing
);
100 /* Expand LOAD_LANES call STMT. */
103 expand_LOAD_LANES (gcall
*stmt
)
105 struct expand_operand ops
[2];
109 lhs
= gimple_call_lhs (stmt
);
110 rhs
= gimple_call_arg (stmt
, 0);
111 type
= TREE_TYPE (lhs
);
113 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
114 mem
= expand_normal (rhs
);
116 gcc_assert (MEM_P (mem
));
117 PUT_MODE (mem
, TYPE_MODE (type
));
119 create_output_operand (&ops
[0], target
, TYPE_MODE (type
));
120 create_fixed_operand (&ops
[1], mem
);
121 expand_insn (get_multi_vector_move (type
, vec_load_lanes_optab
), 2, ops
);
124 /* Expand STORE_LANES call STMT. */
127 expand_STORE_LANES (gcall
*stmt
)
129 struct expand_operand ops
[2];
133 lhs
= gimple_call_lhs (stmt
);
134 rhs
= gimple_call_arg (stmt
, 0);
135 type
= TREE_TYPE (rhs
);
137 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
138 reg
= expand_normal (rhs
);
140 gcc_assert (MEM_P (target
));
141 PUT_MODE (target
, TYPE_MODE (type
));
143 create_fixed_operand (&ops
[0], target
);
144 create_input_operand (&ops
[1], reg
, TYPE_MODE (type
));
145 expand_insn (get_multi_vector_move (type
, vec_store_lanes_optab
), 2, ops
);
149 expand_ANNOTATE (gcall
*)
154 /* This should get expanded in adjust_simduid_builtins. */
157 expand_GOMP_SIMD_LANE (gcall
*)
162 /* This should get expanded in adjust_simduid_builtins. */
165 expand_GOMP_SIMD_VF (gcall
*)
170 /* This should get expanded in adjust_simduid_builtins. */
173 expand_GOMP_SIMD_LAST_LANE (gcall
*)
178 /* This should get expanded in adjust_simduid_builtins. */
181 expand_GOMP_SIMD_ORDERED_START (gcall
*)
186 /* This should get expanded in adjust_simduid_builtins. */
189 expand_GOMP_SIMD_ORDERED_END (gcall
*)
194 /* This should get expanded in the sanopt pass. */
197 expand_UBSAN_NULL (gcall
*)
202 /* This should get expanded in the sanopt pass. */
205 expand_UBSAN_BOUNDS (gcall
*)
210 /* This should get expanded in the sanopt pass. */
213 expand_UBSAN_VPTR (gcall
*)
218 /* This should get expanded in the sanopt pass. */
221 expand_UBSAN_OBJECT_SIZE (gcall
*)
226 /* This should get expanded in the sanopt pass. */
229 expand_ASAN_CHECK (gcall
*)
234 /* This should get expanded in the tsan pass. */
237 expand_TSAN_FUNC_EXIT (gcall
*)
242 /* Helper function for expand_addsub_overflow. Return 1
243 if ARG interpreted as signed in its precision is known to be always
244 positive or 2 if ARG is known to be always negative, or 3 if ARG may
245 be positive or negative. */
248 get_range_pos_neg (tree arg
)
250 if (arg
== error_mark_node
)
253 int prec
= TYPE_PRECISION (TREE_TYPE (arg
));
255 if (TREE_CODE (arg
) == INTEGER_CST
)
257 wide_int w
= wi::sext (arg
, prec
);
263 while (CONVERT_EXPR_P (arg
)
264 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg
, 0)))
265 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg
, 0))) <= prec
)
267 arg
= TREE_OPERAND (arg
, 0);
268 /* Narrower value zero extended into wider type
269 will always result in positive values. */
270 if (TYPE_UNSIGNED (TREE_TYPE (arg
))
271 && TYPE_PRECISION (TREE_TYPE (arg
)) < prec
)
273 prec
= TYPE_PRECISION (TREE_TYPE (arg
));
278 if (TREE_CODE (arg
) != SSA_NAME
)
280 wide_int arg_min
, arg_max
;
281 while (get_range_info (arg
, &arg_min
, &arg_max
) != VR_RANGE
)
283 gimple
*g
= SSA_NAME_DEF_STMT (arg
);
284 if (is_gimple_assign (g
)
285 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g
)))
287 tree t
= gimple_assign_rhs1 (g
);
288 if (INTEGRAL_TYPE_P (TREE_TYPE (t
))
289 && TYPE_PRECISION (TREE_TYPE (t
)) <= prec
)
291 if (TYPE_UNSIGNED (TREE_TYPE (t
))
292 && TYPE_PRECISION (TREE_TYPE (t
)) < prec
)
294 prec
= TYPE_PRECISION (TREE_TYPE (t
));
303 if (TYPE_UNSIGNED (TREE_TYPE (arg
)))
305 /* For unsigned values, the "positive" range comes
306 below the "negative" range. */
307 if (!wi::neg_p (wi::sext (arg_max
, prec
), SIGNED
))
309 if (wi::neg_p (wi::sext (arg_min
, prec
), SIGNED
))
314 if (!wi::neg_p (wi::sext (arg_min
, prec
), SIGNED
))
316 if (wi::neg_p (wi::sext (arg_max
, prec
), SIGNED
))
322 /* Return minimum precision needed to represent all values
323 of ARG in SIGNed integral type. */
326 get_min_precision (tree arg
, signop sign
)
328 int prec
= TYPE_PRECISION (TREE_TYPE (arg
));
330 signop orig_sign
= sign
;
331 if (TREE_CODE (arg
) == INTEGER_CST
)
334 if (TYPE_SIGN (TREE_TYPE (arg
)) != sign
)
336 widest_int w
= wi::to_widest (arg
);
337 w
= wi::ext (w
, prec
, sign
);
338 p
= wi::min_precision (w
, sign
);
341 p
= wi::min_precision (arg
, sign
);
342 return MIN (p
, prec
);
344 while (CONVERT_EXPR_P (arg
)
345 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg
, 0)))
346 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg
, 0))) <= prec
)
348 arg
= TREE_OPERAND (arg
, 0);
349 if (TYPE_PRECISION (TREE_TYPE (arg
)) < prec
)
351 if (TYPE_UNSIGNED (TREE_TYPE (arg
)))
353 else if (sign
== UNSIGNED
&& get_range_pos_neg (arg
) != 1)
354 return prec
+ (orig_sign
!= sign
);
355 prec
= TYPE_PRECISION (TREE_TYPE (arg
));
358 return prec
+ (orig_sign
!= sign
);
360 if (TREE_CODE (arg
) != SSA_NAME
)
361 return prec
+ (orig_sign
!= sign
);
362 wide_int arg_min
, arg_max
;
363 while (get_range_info (arg
, &arg_min
, &arg_max
) != VR_RANGE
)
365 gimple
*g
= SSA_NAME_DEF_STMT (arg
);
366 if (is_gimple_assign (g
)
367 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g
)))
369 tree t
= gimple_assign_rhs1 (g
);
370 if (INTEGRAL_TYPE_P (TREE_TYPE (t
))
371 && TYPE_PRECISION (TREE_TYPE (t
)) <= prec
)
374 if (TYPE_PRECISION (TREE_TYPE (arg
)) < prec
)
376 if (TYPE_UNSIGNED (TREE_TYPE (arg
)))
378 else if (sign
== UNSIGNED
&& get_range_pos_neg (arg
) != 1)
379 return prec
+ (orig_sign
!= sign
);
380 prec
= TYPE_PRECISION (TREE_TYPE (arg
));
383 return prec
+ (orig_sign
!= sign
);
387 return prec
+ (orig_sign
!= sign
);
389 if (sign
== TYPE_SIGN (TREE_TYPE (arg
)))
391 int p1
= wi::min_precision (arg_min
, sign
);
392 int p2
= wi::min_precision (arg_max
, sign
);
394 prec
= MIN (prec
, p1
);
396 else if (sign
== UNSIGNED
&& !wi::neg_p (arg_min
, SIGNED
))
398 int p
= wi::min_precision (arg_max
, UNSIGNED
);
399 prec
= MIN (prec
, p
);
401 return prec
+ (orig_sign
!= sign
);
404 /* Helper for expand_*_overflow. Store RES into the __real__ part
405 of TARGET. If RES has larger MODE than __real__ part of TARGET,
406 set the __imag__ part to 1 if RES doesn't fit into it. */
409 expand_arith_overflow_result_store (tree lhs
, rtx target
,
410 machine_mode mode
, rtx res
)
412 machine_mode tgtmode
= GET_MODE_INNER (GET_MODE (target
));
416 rtx_code_label
*done_label
= gen_label_rtx ();
417 int uns
= TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs
)));
418 lres
= convert_modes (tgtmode
, mode
, res
, uns
);
419 gcc_assert (GET_MODE_PRECISION (tgtmode
) < GET_MODE_PRECISION (mode
));
420 do_compare_rtx_and_jump (res
, convert_modes (mode
, tgtmode
, lres
, uns
),
421 EQ
, true, mode
, NULL_RTX
, NULL
, done_label
,
423 write_complex_part (target
, const1_rtx
, true);
424 emit_label (done_label
);
426 write_complex_part (target
, lres
, false);
429 /* Helper for expand_*_overflow. Store RES into TARGET. */
432 expand_ubsan_result_store (rtx target
, rtx res
)
434 if (GET_CODE (target
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (target
))
435 /* If this is a scalar in a register that is stored in a wider mode
436 than the declared mode, compute the result into its declared mode
437 and then convert to the wider mode. Our value is the computed
439 convert_move (SUBREG_REG (target
), res
, SUBREG_PROMOTED_SIGN (target
));
441 emit_move_insn (target
, res
);
444 /* Add sub/add overflow checking to the statement STMT.
445 CODE says whether the operation is +, or -. */
448 expand_addsub_overflow (location_t loc
, tree_code code
, tree lhs
,
449 tree arg0
, tree arg1
, bool unsr_p
, bool uns0_p
,
450 bool uns1_p
, bool is_ubsan
)
452 rtx res
, target
= NULL_RTX
;
454 rtx_code_label
*done_label
= gen_label_rtx ();
455 rtx_code_label
*do_error
= gen_label_rtx ();
456 do_pending_stack_adjust ();
457 rtx op0
= expand_normal (arg0
);
458 rtx op1
= expand_normal (arg1
);
459 machine_mode mode
= TYPE_MODE (TREE_TYPE (arg0
));
460 int prec
= GET_MODE_PRECISION (mode
);
461 rtx sgn
= immed_wide_int_const (wi::min_value (prec
, SIGNED
), mode
);
465 gcc_assert (!unsr_p
&& !uns0_p
&& !uns1_p
);
469 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
471 write_complex_part (target
, const0_rtx
, true);
474 /* We assume both operands and result have the same precision
475 here (GET_MODE_BITSIZE (mode)), S stands for signed type
476 with that precision, U for unsigned type with that precision,
477 sgn for unsigned most significant bit in that precision.
478 s1 is signed first operand, u1 is unsigned first operand,
479 s2 is signed second operand, u2 is unsigned second operand,
480 sr is signed result, ur is unsigned result and the following
481 rules say how to compute result (which is always result of
482 the operands as if both were unsigned, cast to the right
483 signedness) and how to compute whether operation overflowed.
486 res = (S) ((U) s1 + (U) s2)
487 ovf = s2 < 0 ? res > s1 : res < s1 (or jump on overflow)
489 res = (S) ((U) s1 - (U) s2)
490 ovf = s2 < 0 ? res < s1 : res > s2 (or jump on overflow)
493 ovf = res < u1 (or jump on carry, but RTL opts will handle it)
496 ovf = res > u1 (or jump on carry, but RTL opts will handle it)
498 res = (S) ((U) s1 + u2)
499 ovf = ((U) res ^ sgn) < u2
504 ovf = t1 < 0 ? t2 > s1 : t2 < s1 (or jump on overflow)
506 res = (S) ((U) s1 - u2)
507 ovf = u2 > ((U) s1 ^ sgn)
510 ovf = s1 < 0 || u2 > (U) s1
513 ovf = u1 >= ((U) s2 ^ sgn)
518 ovf = s2 < 0 ? (S) t2 < (S) t1 : (S) t2 > (S) t1 (or jump on overflow)
520 res = (U) s1 + (U) s2
521 ovf = s2 < 0 ? (s1 | (S) res) < 0) : (s1 & (S) res) < 0)
524 ovf = (U) res < u2 || res < 0
527 ovf = u1 >= u2 ? res < 0 : res >= 0
529 res = (U) s1 - (U) s2
530 ovf = s2 >= 0 ? ((s1 | (S) res) < 0) : ((s1 & (S) res) < 0) */
532 if (code
== PLUS_EXPR
&& uns0_p
&& !uns1_p
)
534 /* PLUS_EXPR is commutative, if operand signedness differs,
535 canonicalize to the first operand being signed and second
536 unsigned to simplify following code. */
537 std::swap (op0
, op1
);
538 std::swap (arg0
, arg1
);
544 if (uns0_p
&& uns1_p
&& unsr_p
)
546 /* Compute the operation. On RTL level, the addition is always
548 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
549 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
551 /* For PLUS_EXPR, the operation is commutative, so we can pick
552 operand to compare against. For prec <= BITS_PER_WORD, I think
553 preferring REG operand is better over CONST_INT, because
554 the CONST_INT might enlarge the instruction or CSE would need
555 to figure out we'd already loaded it into a register before.
556 For prec > BITS_PER_WORD, I think CONST_INT might be more beneficial,
557 as then the multi-word comparison can be perhaps simplified. */
558 if (code
== PLUS_EXPR
559 && (prec
<= BITS_PER_WORD
560 ? (CONST_SCALAR_INT_P (op0
) && REG_P (op1
))
561 : CONST_SCALAR_INT_P (op1
)))
563 do_compare_rtx_and_jump (res
, tem
, code
== PLUS_EXPR
? GEU
: LEU
,
564 true, mode
, NULL_RTX
, NULL
, done_label
,
570 if (!uns0_p
&& uns1_p
&& !unsr_p
)
572 /* Compute the operation. On RTL level, the addition is always
574 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
575 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
576 rtx tem
= expand_binop (mode
, add_optab
,
577 code
== PLUS_EXPR
? res
: op0
, sgn
,
578 NULL_RTX
, false, OPTAB_LIB_WIDEN
);
579 do_compare_rtx_and_jump (tem
, op1
, GEU
, true, mode
, NULL_RTX
, NULL
,
580 done_label
, PROB_VERY_LIKELY
);
585 if (code
== PLUS_EXPR
&& !uns0_p
&& uns1_p
&& unsr_p
)
587 op1
= expand_binop (mode
, add_optab
, op1
, sgn
, NULL_RTX
, false,
589 /* As we've changed op1, we have to avoid using the value range
590 for the original argument. */
591 arg1
= error_mark_node
;
597 if (code
== MINUS_EXPR
&& uns0_p
&& !uns1_p
&& unsr_p
)
599 op0
= expand_binop (mode
, add_optab
, op0
, sgn
, NULL_RTX
, false,
601 /* As we've changed op0, we have to avoid using the value range
602 for the original argument. */
603 arg0
= error_mark_node
;
609 if (code
== MINUS_EXPR
&& !uns0_p
&& uns1_p
&& unsr_p
)
611 /* Compute the operation. On RTL level, the addition is always
613 res
= expand_binop (mode
, sub_optab
, op0
, op1
, NULL_RTX
, false,
615 int pos_neg
= get_range_pos_neg (arg0
);
617 /* If ARG0 is known to be always negative, this is always overflow. */
618 emit_jump (do_error
);
619 else if (pos_neg
== 3)
620 /* If ARG0 is not known to be always positive, check at runtime. */
621 do_compare_rtx_and_jump (op0
, const0_rtx
, LT
, false, mode
, NULL_RTX
,
622 NULL
, do_error
, PROB_VERY_UNLIKELY
);
623 do_compare_rtx_and_jump (op1
, op0
, LEU
, true, mode
, NULL_RTX
, NULL
,
624 done_label
, PROB_VERY_LIKELY
);
629 if (code
== MINUS_EXPR
&& uns0_p
&& !uns1_p
&& !unsr_p
)
631 /* Compute the operation. On RTL level, the addition is always
633 res
= expand_binop (mode
, sub_optab
, op0
, op1
, NULL_RTX
, false,
635 rtx tem
= expand_binop (mode
, add_optab
, op1
, sgn
, NULL_RTX
, false,
637 do_compare_rtx_and_jump (op0
, tem
, LTU
, true, mode
, NULL_RTX
, NULL
,
638 done_label
, PROB_VERY_LIKELY
);
643 if (code
== PLUS_EXPR
&& uns0_p
&& uns1_p
&& !unsr_p
)
645 /* Compute the operation. On RTL level, the addition is always
647 res
= expand_binop (mode
, add_optab
, op0
, op1
, NULL_RTX
, false,
649 do_compare_rtx_and_jump (res
, const0_rtx
, LT
, false, mode
, NULL_RTX
,
650 NULL
, do_error
, PROB_VERY_UNLIKELY
);
652 /* The operation is commutative, so we can pick operand to compare
653 against. For prec <= BITS_PER_WORD, I think preferring REG operand
654 is better over CONST_INT, because the CONST_INT might enlarge the
655 instruction or CSE would need to figure out we'd already loaded it
656 into a register before. For prec > BITS_PER_WORD, I think CONST_INT
657 might be more beneficial, as then the multi-word comparison can be
658 perhaps simplified. */
659 if (prec
<= BITS_PER_WORD
660 ? (CONST_SCALAR_INT_P (op1
) && REG_P (op0
))
661 : CONST_SCALAR_INT_P (op0
))
663 do_compare_rtx_and_jump (res
, tem
, GEU
, true, mode
, NULL_RTX
, NULL
,
664 done_label
, PROB_VERY_LIKELY
);
669 if (!uns0_p
&& !uns1_p
&& unsr_p
)
671 /* Compute the operation. On RTL level, the addition is always
673 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
674 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
675 int pos_neg
= get_range_pos_neg (arg1
);
676 if (code
== PLUS_EXPR
)
678 int pos_neg0
= get_range_pos_neg (arg0
);
679 if (pos_neg0
!= 3 && pos_neg
== 3)
681 std::swap (op0
, op1
);
688 tem
= expand_binop (mode
, ((pos_neg
== 1) ^ (code
== MINUS_EXPR
))
689 ? and_optab
: ior_optab
,
690 op0
, res
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
691 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL
,
692 NULL
, done_label
, PROB_VERY_LIKELY
);
696 rtx_code_label
*do_ior_label
= gen_label_rtx ();
697 do_compare_rtx_and_jump (op1
, const0_rtx
,
698 code
== MINUS_EXPR
? GE
: LT
, false, mode
,
699 NULL_RTX
, NULL
, do_ior_label
,
701 tem
= expand_binop (mode
, and_optab
, op0
, res
, NULL_RTX
, false,
703 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
704 NULL
, done_label
, PROB_VERY_LIKELY
);
705 emit_jump (do_error
);
706 emit_label (do_ior_label
);
707 tem
= expand_binop (mode
, ior_optab
, op0
, res
, NULL_RTX
, false,
709 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
710 NULL
, done_label
, PROB_VERY_LIKELY
);
716 if (code
== MINUS_EXPR
&& uns0_p
&& uns1_p
&& !unsr_p
)
718 /* Compute the operation. On RTL level, the addition is always
720 res
= expand_binop (mode
, sub_optab
, op0
, op1
, NULL_RTX
, false,
722 rtx_code_label
*op0_geu_op1
= gen_label_rtx ();
723 do_compare_rtx_and_jump (op0
, op1
, GEU
, true, mode
, NULL_RTX
, NULL
,
724 op0_geu_op1
, PROB_EVEN
);
725 do_compare_rtx_and_jump (res
, const0_rtx
, LT
, false, mode
, NULL_RTX
,
726 NULL
, done_label
, PROB_VERY_LIKELY
);
727 emit_jump (do_error
);
728 emit_label (op0_geu_op1
);
729 do_compare_rtx_and_jump (res
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
730 NULL
, done_label
, PROB_VERY_LIKELY
);
734 gcc_assert (!uns0_p
&& !uns1_p
&& !unsr_p
);
738 enum insn_code icode
;
739 icode
= optab_handler (code
== PLUS_EXPR
? addv4_optab
: subv4_optab
, mode
);
740 if (icode
!= CODE_FOR_nothing
)
742 struct expand_operand ops
[4];
743 rtx_insn
*last
= get_last_insn ();
745 res
= gen_reg_rtx (mode
);
746 create_output_operand (&ops
[0], res
, mode
);
747 create_input_operand (&ops
[1], op0
, mode
);
748 create_input_operand (&ops
[2], op1
, mode
);
749 create_fixed_operand (&ops
[3], do_error
);
750 if (maybe_expand_insn (icode
, 4, ops
))
752 last
= get_last_insn ();
753 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
755 && any_condjump_p (last
)
756 && !find_reg_note (last
, REG_BR_PROB
, 0))
757 add_int_reg_note (last
, REG_BR_PROB
, PROB_VERY_UNLIKELY
);
758 emit_jump (done_label
);
762 delete_insns_since (last
);
763 icode
= CODE_FOR_nothing
;
767 if (icode
== CODE_FOR_nothing
)
769 rtx_code_label
*sub_check
= gen_label_rtx ();
772 /* Compute the operation. On RTL level, the addition is always
774 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
775 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
777 /* If we can prove one of the arguments (for MINUS_EXPR only
778 the second operand, as subtraction is not commutative) is always
779 non-negative or always negative, we can do just one comparison
780 and conditional jump instead of 2 at runtime, 3 present in the
781 emitted code. If one of the arguments is CONST_INT, all we
782 need is to make sure it is op1, then the first
783 do_compare_rtx_and_jump will be just folded. Otherwise try
784 to use range info if available. */
785 if (code
== PLUS_EXPR
&& CONST_INT_P (op0
))
786 std::swap (op0
, op1
);
787 else if (CONST_INT_P (op1
))
789 else if (code
== PLUS_EXPR
&& TREE_CODE (arg0
) == SSA_NAME
)
791 pos_neg
= get_range_pos_neg (arg0
);
793 std::swap (op0
, op1
);
795 if (pos_neg
== 3 && !CONST_INT_P (op1
) && TREE_CODE (arg1
) == SSA_NAME
)
796 pos_neg
= get_range_pos_neg (arg1
);
798 /* If the op1 is negative, we have to use a different check. */
800 do_compare_rtx_and_jump (op1
, const0_rtx
, LT
, false, mode
, NULL_RTX
,
801 NULL
, sub_check
, PROB_EVEN
);
803 /* Compare the result of the operation with one of the operands. */
805 do_compare_rtx_and_jump (res
, op0
, code
== PLUS_EXPR
? GE
: LE
,
806 false, mode
, NULL_RTX
, NULL
, done_label
,
809 /* If we get here, we have to print the error. */
812 emit_jump (do_error
);
814 emit_label (sub_check
);
817 /* We have k = a + b for b < 0 here. k <= a must hold. */
819 do_compare_rtx_and_jump (res
, op0
, code
== PLUS_EXPR
? LE
: GE
,
820 false, mode
, NULL_RTX
, NULL
, done_label
,
825 emit_label (do_error
);
828 /* Expand the ubsan builtin call. */
830 fn
= ubsan_build_overflow_builtin (code
, loc
, TREE_TYPE (arg0
),
834 do_pending_stack_adjust ();
837 write_complex_part (target
, const1_rtx
, true);
840 emit_label (done_label
);
845 expand_ubsan_result_store (target
, res
);
849 res
= expand_binop (mode
, add_optab
, res
, sgn
, NULL_RTX
, false,
852 expand_arith_overflow_result_store (lhs
, target
, mode
, res
);
857 /* Add negate overflow checking to the statement STMT. */
860 expand_neg_overflow (location_t loc
, tree lhs
, tree arg1
, bool is_ubsan
)
864 rtx_code_label
*done_label
, *do_error
;
865 rtx target
= NULL_RTX
;
867 done_label
= gen_label_rtx ();
868 do_error
= gen_label_rtx ();
870 do_pending_stack_adjust ();
871 op1
= expand_normal (arg1
);
873 machine_mode mode
= TYPE_MODE (TREE_TYPE (arg1
));
876 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
878 write_complex_part (target
, const0_rtx
, true);
881 enum insn_code icode
= optab_handler (negv3_optab
, mode
);
882 if (icode
!= CODE_FOR_nothing
)
884 struct expand_operand ops
[3];
885 rtx_insn
*last
= get_last_insn ();
887 res
= gen_reg_rtx (mode
);
888 create_output_operand (&ops
[0], res
, mode
);
889 create_input_operand (&ops
[1], op1
, mode
);
890 create_fixed_operand (&ops
[2], do_error
);
891 if (maybe_expand_insn (icode
, 3, ops
))
893 last
= get_last_insn ();
894 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
896 && any_condjump_p (last
)
897 && !find_reg_note (last
, REG_BR_PROB
, 0))
898 add_int_reg_note (last
, REG_BR_PROB
, PROB_VERY_UNLIKELY
);
899 emit_jump (done_label
);
903 delete_insns_since (last
);
904 icode
= CODE_FOR_nothing
;
908 if (icode
== CODE_FOR_nothing
)
910 /* Compute the operation. On RTL level, the addition is always
912 res
= expand_unop (mode
, neg_optab
, op1
, NULL_RTX
, false);
914 /* Compare the operand with the most negative value. */
915 rtx minv
= expand_normal (TYPE_MIN_VALUE (TREE_TYPE (arg1
)));
916 do_compare_rtx_and_jump (op1
, minv
, NE
, true, mode
, NULL_RTX
, NULL
,
917 done_label
, PROB_VERY_LIKELY
);
920 emit_label (do_error
);
923 /* Expand the ubsan builtin call. */
925 fn
= ubsan_build_overflow_builtin (NEGATE_EXPR
, loc
, TREE_TYPE (arg1
),
929 do_pending_stack_adjust ();
932 write_complex_part (target
, const1_rtx
, true);
935 emit_label (done_label
);
940 expand_ubsan_result_store (target
, res
);
942 expand_arith_overflow_result_store (lhs
, target
, mode
, res
);
946 /* Add mul overflow checking to the statement STMT. */
949 expand_mul_overflow (location_t loc
, tree lhs
, tree arg0
, tree arg1
,
950 bool unsr_p
, bool uns0_p
, bool uns1_p
, bool is_ubsan
)
954 rtx_code_label
*done_label
, *do_error
;
955 rtx target
= NULL_RTX
;
957 enum insn_code icode
;
959 done_label
= gen_label_rtx ();
960 do_error
= gen_label_rtx ();
962 do_pending_stack_adjust ();
963 op0
= expand_normal (arg0
);
964 op1
= expand_normal (arg1
);
966 machine_mode mode
= TYPE_MODE (TREE_TYPE (arg0
));
970 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
972 write_complex_part (target
, const0_rtx
, true);
976 gcc_assert (!unsr_p
&& !uns0_p
&& !uns1_p
);
978 /* We assume both operands and result have the same precision
979 here (GET_MODE_BITSIZE (mode)), S stands for signed type
980 with that precision, U for unsigned type with that precision,
981 sgn for unsigned most significant bit in that precision.
982 s1 is signed first operand, u1 is unsigned first operand,
983 s2 is signed second operand, u2 is unsigned second operand,
984 sr is signed result, ur is unsigned result and the following
985 rules say how to compute result (which is always result of
986 the operands as if both were unsigned, cast to the right
987 signedness) and how to compute whether operation overflowed.
988 main_ovf (false) stands for jump on signed multiplication
989 overflow or the main algorithm with uns == false.
990 main_ovf (true) stands for jump on unsigned multiplication
991 overflow or the main algorithm with uns == true.
994 res = (S) ((U) s1 * (U) s2)
995 ovf = main_ovf (false)
998 ovf = main_ovf (true)
1001 ovf = (s1 < 0 && u2) || main_ovf (true)
1004 ovf = res < 0 || main_ovf (true)
1006 res = (S) ((U) s1 * u2)
1007 ovf = (S) u2 >= 0 ? main_ovf (false)
1008 : (s1 != 0 && (s1 != -1 || u2 != (U) res))
1010 t1 = (s1 & s2) < 0 ? (-(U) s1) : ((U) s1)
1011 t2 = (s1 & s2) < 0 ? (-(U) s2) : ((U) s2)
1013 ovf = (s1 ^ s2) < 0 ? (s1 && s2) : main_ovf (true) */
1015 if (uns0_p
&& !uns1_p
)
1017 /* Multiplication is commutative, if operand signedness differs,
1018 canonicalize to the first operand being signed and second
1019 unsigned to simplify following code. */
1020 std::swap (op0
, op1
);
1021 std::swap (arg0
, arg1
);
1026 int pos_neg0
= get_range_pos_neg (arg0
);
1027 int pos_neg1
= get_range_pos_neg (arg1
);
1030 if (!uns0_p
&& uns1_p
&& unsr_p
)
1035 /* If s1 is non-negative, just perform normal u1 * u2 -> ur. */
1038 /* If s1 is negative, avoid the main code, just multiply and
1039 signal overflow if op1 is not 0. */
1040 struct separate_ops ops
;
1041 ops
.code
= MULT_EXPR
;
1042 ops
.type
= TREE_TYPE (arg1
);
1043 ops
.op0
= make_tree (ops
.type
, op0
);
1044 ops
.op1
= make_tree (ops
.type
, op1
);
1045 ops
.op2
= NULL_TREE
;
1047 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1048 do_compare_rtx_and_jump (op1
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1049 NULL
, done_label
, PROB_VERY_LIKELY
);
1050 goto do_error_label
;
1052 rtx_code_label
*do_main_label
;
1053 do_main_label
= gen_label_rtx ();
1054 do_compare_rtx_and_jump (op0
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1055 NULL
, do_main_label
, PROB_VERY_LIKELY
);
1056 do_compare_rtx_and_jump (op1
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1057 NULL
, do_main_label
, PROB_VERY_LIKELY
);
1058 write_complex_part (target
, const1_rtx
, true);
1059 emit_label (do_main_label
);
1067 if (uns0_p
&& uns1_p
&& !unsr_p
)
1070 /* Rest of handling of this case after res is computed. */
1075 if (!uns0_p
&& uns1_p
&& !unsr_p
)
1082 /* If (S) u2 is negative (i.e. u2 is larger than maximum of S,
1083 avoid the main code, just multiply and signal overflow
1084 unless 0 * u2 or -1 * ((U) Smin). */
1085 struct separate_ops ops
;
1086 ops
.code
= MULT_EXPR
;
1087 ops
.type
= TREE_TYPE (arg1
);
1088 ops
.op0
= make_tree (ops
.type
, op0
);
1089 ops
.op1
= make_tree (ops
.type
, op1
);
1090 ops
.op2
= NULL_TREE
;
1092 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1093 do_compare_rtx_and_jump (op0
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1094 NULL
, done_label
, PROB_VERY_LIKELY
);
1095 do_compare_rtx_and_jump (op0
, constm1_rtx
, NE
, true, mode
, NULL_RTX
,
1096 NULL
, do_error
, PROB_VERY_UNLIKELY
);
1098 prec
= GET_MODE_PRECISION (mode
);
1100 sgn
= immed_wide_int_const (wi::min_value (prec
, SIGNED
), mode
);
1101 do_compare_rtx_and_jump (op1
, sgn
, EQ
, true, mode
, NULL_RTX
,
1102 NULL
, done_label
, PROB_VERY_LIKELY
);
1103 goto do_error_label
;
1105 /* Rest of handling of this case after res is computed. */
1113 if (!uns0_p
&& !uns1_p
&& unsr_p
)
1116 switch (pos_neg0
| pos_neg1
)
1118 case 1: /* Both operands known to be non-negative. */
1120 case 2: /* Both operands known to be negative. */
1121 op0
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, false);
1122 op1
= expand_unop (mode
, neg_optab
, op1
, NULL_RTX
, false);
1123 /* Avoid looking at arg0/arg1 ranges, as we've changed
1125 arg0
= error_mark_node
;
1126 arg1
= error_mark_node
;
1129 if ((pos_neg0
^ pos_neg1
) == 3)
1131 /* If one operand is known to be negative and the other
1132 non-negative, this overflows always, unless the non-negative
1133 one is 0. Just do normal multiply and set overflow
1134 unless one of the operands is 0. */
1135 struct separate_ops ops
;
1136 ops
.code
= MULT_EXPR
;
1138 = build_nonstandard_integer_type (GET_MODE_PRECISION (mode
),
1140 ops
.op0
= make_tree (ops
.type
, op0
);
1141 ops
.op1
= make_tree (ops
.type
, op1
);
1142 ops
.op2
= NULL_TREE
;
1144 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1145 tem
= expand_binop (mode
, and_optab
, op0
, op1
, NULL_RTX
, false,
1147 do_compare_rtx_and_jump (tem
, const0_rtx
, EQ
, true, mode
,
1148 NULL_RTX
, NULL
, done_label
,
1150 goto do_error_label
;
1152 /* The general case, do all the needed comparisons at runtime. */
1153 rtx_code_label
*do_main_label
, *after_negate_label
;
1155 rop0
= gen_reg_rtx (mode
);
1156 rop1
= gen_reg_rtx (mode
);
1157 emit_move_insn (rop0
, op0
);
1158 emit_move_insn (rop1
, op1
);
1161 do_main_label
= gen_label_rtx ();
1162 after_negate_label
= gen_label_rtx ();
1163 tem
= expand_binop (mode
, and_optab
, op0
, op1
, NULL_RTX
, false,
1165 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1166 NULL
, after_negate_label
, PROB_VERY_LIKELY
);
1167 /* Both arguments negative here, negate them and continue with
1168 normal unsigned overflow checking multiplication. */
1169 emit_move_insn (op0
, expand_unop (mode
, neg_optab
, op0
,
1171 emit_move_insn (op1
, expand_unop (mode
, neg_optab
, op1
,
1173 /* Avoid looking at arg0/arg1 ranges, as we might have changed
1175 arg0
= error_mark_node
;
1176 arg1
= error_mark_node
;
1177 emit_jump (do_main_label
);
1178 emit_label (after_negate_label
);
1179 tem2
= expand_binop (mode
, xor_optab
, op0
, op1
, NULL_RTX
, false,
1181 do_compare_rtx_and_jump (tem2
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1182 NULL
, do_main_label
, PROB_VERY_LIKELY
);
1183 /* One argument is negative here, the other positive. This
1184 overflows always, unless one of the arguments is 0. But
1185 if e.g. s2 is 0, (U) s1 * 0 doesn't overflow, whatever s1
1186 is, thus we can keep do_main code oring in overflow as is. */
1187 do_compare_rtx_and_jump (tem
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1188 NULL
, do_main_label
, PROB_VERY_LIKELY
);
1189 write_complex_part (target
, const1_rtx
, true);
1190 emit_label (do_main_label
);
1198 type
= build_nonstandard_integer_type (GET_MODE_PRECISION (mode
), uns
);
1199 sign
= uns
? UNSIGNED
: SIGNED
;
1200 icode
= optab_handler (uns
? umulv4_optab
: mulv4_optab
, mode
);
1201 if (icode
!= CODE_FOR_nothing
)
1203 struct expand_operand ops
[4];
1204 rtx_insn
*last
= get_last_insn ();
1206 res
= gen_reg_rtx (mode
);
1207 create_output_operand (&ops
[0], res
, mode
);
1208 create_input_operand (&ops
[1], op0
, mode
);
1209 create_input_operand (&ops
[2], op1
, mode
);
1210 create_fixed_operand (&ops
[3], do_error
);
1211 if (maybe_expand_insn (icode
, 4, ops
))
1213 last
= get_last_insn ();
1214 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
1216 && any_condjump_p (last
)
1217 && !find_reg_note (last
, REG_BR_PROB
, 0))
1218 add_int_reg_note (last
, REG_BR_PROB
, PROB_VERY_UNLIKELY
);
1219 emit_jump (done_label
);
1223 delete_insns_since (last
);
1224 icode
= CODE_FOR_nothing
;
1228 if (icode
== CODE_FOR_nothing
)
1230 struct separate_ops ops
;
1231 int prec
= GET_MODE_PRECISION (mode
);
1232 machine_mode hmode
= mode_for_size (prec
/ 2, MODE_INT
, 1);
1233 ops
.op0
= make_tree (type
, op0
);
1234 ops
.op1
= make_tree (type
, op1
);
1235 ops
.op2
= NULL_TREE
;
1237 if (GET_MODE_2XWIDER_MODE (mode
) != VOIDmode
1238 && targetm
.scalar_mode_supported_p (GET_MODE_2XWIDER_MODE (mode
)))
1240 machine_mode wmode
= GET_MODE_2XWIDER_MODE (mode
);
1241 ops
.code
= WIDEN_MULT_EXPR
;
1243 = build_nonstandard_integer_type (GET_MODE_PRECISION (wmode
), uns
);
1245 res
= expand_expr_real_2 (&ops
, NULL_RTX
, wmode
, EXPAND_NORMAL
);
1246 rtx hipart
= expand_shift (RSHIFT_EXPR
, wmode
, res
, prec
,
1248 hipart
= gen_lowpart (mode
, hipart
);
1249 res
= gen_lowpart (mode
, res
);
1251 /* For the unsigned multiplication, there was overflow if
1252 HIPART is non-zero. */
1253 do_compare_rtx_and_jump (hipart
, const0_rtx
, EQ
, true, mode
,
1254 NULL_RTX
, NULL
, done_label
,
1258 rtx signbit
= expand_shift (RSHIFT_EXPR
, mode
, res
, prec
- 1,
1260 /* RES is low half of the double width result, HIPART
1261 the high half. There was overflow if
1262 HIPART is different from RES < 0 ? -1 : 0. */
1263 do_compare_rtx_and_jump (signbit
, hipart
, EQ
, true, mode
,
1264 NULL_RTX
, NULL
, done_label
,
1268 else if (hmode
!= BLKmode
&& 2 * GET_MODE_PRECISION (hmode
) == prec
)
1270 rtx_code_label
*large_op0
= gen_label_rtx ();
1271 rtx_code_label
*small_op0_large_op1
= gen_label_rtx ();
1272 rtx_code_label
*one_small_one_large
= gen_label_rtx ();
1273 rtx_code_label
*both_ops_large
= gen_label_rtx ();
1274 rtx_code_label
*after_hipart_neg
= uns
? NULL
: gen_label_rtx ();
1275 rtx_code_label
*after_lopart_neg
= uns
? NULL
: gen_label_rtx ();
1276 rtx_code_label
*do_overflow
= gen_label_rtx ();
1277 rtx_code_label
*hipart_different
= uns
? NULL
: gen_label_rtx ();
1279 unsigned int hprec
= GET_MODE_PRECISION (hmode
);
1280 rtx hipart0
= expand_shift (RSHIFT_EXPR
, mode
, op0
, hprec
,
1282 hipart0
= gen_lowpart (hmode
, hipart0
);
1283 rtx lopart0
= gen_lowpart (hmode
, op0
);
1284 rtx signbit0
= const0_rtx
;
1286 signbit0
= expand_shift (RSHIFT_EXPR
, hmode
, lopart0
, hprec
- 1,
1288 rtx hipart1
= expand_shift (RSHIFT_EXPR
, mode
, op1
, hprec
,
1290 hipart1
= gen_lowpart (hmode
, hipart1
);
1291 rtx lopart1
= gen_lowpart (hmode
, op1
);
1292 rtx signbit1
= const0_rtx
;
1294 signbit1
= expand_shift (RSHIFT_EXPR
, hmode
, lopart1
, hprec
- 1,
1297 res
= gen_reg_rtx (mode
);
1299 /* True if op0 resp. op1 are known to be in the range of
1301 bool op0_small_p
= false;
1302 bool op1_small_p
= false;
1303 /* True if op0 resp. op1 are known to have all zeros or all ones
1304 in the upper half of bits, but are not known to be
1306 bool op0_medium_p
= false;
1307 bool op1_medium_p
= false;
1308 /* -1 if op{0,1} is known to be negative, 0 if it is known to be
1309 nonnegative, 1 if unknown. */
1315 else if (pos_neg0
== 2)
1319 else if (pos_neg1
== 2)
1322 unsigned int mprec0
= prec
;
1323 if (arg0
!= error_mark_node
)
1324 mprec0
= get_min_precision (arg0
, sign
);
1325 if (mprec0
<= hprec
)
1327 else if (!uns
&& mprec0
<= hprec
+ 1)
1328 op0_medium_p
= true;
1329 unsigned int mprec1
= prec
;
1330 if (arg1
!= error_mark_node
)
1331 mprec1
= get_min_precision (arg1
, sign
);
1332 if (mprec1
<= hprec
)
1334 else if (!uns
&& mprec1
<= hprec
+ 1)
1335 op1_medium_p
= true;
1337 int smaller_sign
= 1;
1338 int larger_sign
= 1;
1341 smaller_sign
= op0_sign
;
1342 larger_sign
= op1_sign
;
1344 else if (op1_small_p
)
1346 smaller_sign
= op1_sign
;
1347 larger_sign
= op0_sign
;
1349 else if (op0_sign
== op1_sign
)
1351 smaller_sign
= op0_sign
;
1352 larger_sign
= op0_sign
;
1356 do_compare_rtx_and_jump (signbit0
, hipart0
, NE
, true, hmode
,
1357 NULL_RTX
, NULL
, large_op0
,
1361 do_compare_rtx_and_jump (signbit1
, hipart1
, NE
, true, hmode
,
1362 NULL_RTX
, NULL
, small_op0_large_op1
,
1365 /* If both op0 and op1 are sign (!uns) or zero (uns) extended from
1366 hmode to mode, the multiplication will never overflow. We can
1367 do just one hmode x hmode => mode widening multiplication. */
1368 rtx lopart0s
= lopart0
, lopart1s
= lopart1
;
1369 if (GET_CODE (lopart0
) == SUBREG
)
1371 lopart0s
= shallow_copy_rtx (lopart0
);
1372 SUBREG_PROMOTED_VAR_P (lopart0s
) = 1;
1373 SUBREG_PROMOTED_SET (lopart0s
, uns
? SRP_UNSIGNED
: SRP_SIGNED
);
1375 if (GET_CODE (lopart1
) == SUBREG
)
1377 lopart1s
= shallow_copy_rtx (lopart1
);
1378 SUBREG_PROMOTED_VAR_P (lopart1s
) = 1;
1379 SUBREG_PROMOTED_SET (lopart1s
, uns
? SRP_UNSIGNED
: SRP_SIGNED
);
1381 tree halfstype
= build_nonstandard_integer_type (hprec
, uns
);
1382 ops
.op0
= make_tree (halfstype
, lopart0s
);
1383 ops
.op1
= make_tree (halfstype
, lopart1s
);
1384 ops
.code
= WIDEN_MULT_EXPR
;
1387 = expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1388 emit_move_insn (res
, thisres
);
1389 emit_jump (done_label
);
1391 emit_label (small_op0_large_op1
);
1393 /* If op0 is sign (!uns) or zero (uns) extended from hmode to mode,
1394 but op1 is not, just swap the arguments and handle it as op1
1395 sign/zero extended, op0 not. */
1396 rtx larger
= gen_reg_rtx (mode
);
1397 rtx hipart
= gen_reg_rtx (hmode
);
1398 rtx lopart
= gen_reg_rtx (hmode
);
1399 emit_move_insn (larger
, op1
);
1400 emit_move_insn (hipart
, hipart1
);
1401 emit_move_insn (lopart
, lopart0
);
1402 emit_jump (one_small_one_large
);
1404 emit_label (large_op0
);
1407 do_compare_rtx_and_jump (signbit1
, hipart1
, NE
, true, hmode
,
1408 NULL_RTX
, NULL
, both_ops_large
,
1411 /* If op1 is sign (!uns) or zero (uns) extended from hmode to mode,
1412 but op0 is not, prepare larger, hipart and lopart pseudos and
1413 handle it together with small_op0_large_op1. */
1414 emit_move_insn (larger
, op0
);
1415 emit_move_insn (hipart
, hipart0
);
1416 emit_move_insn (lopart
, lopart1
);
1418 emit_label (one_small_one_large
);
1420 /* lopart is the low part of the operand that is sign extended
1421 to mode, larger is the other operand, hipart is the
1422 high part of larger and lopart0 and lopart1 are the low parts
1424 We perform lopart0 * lopart1 and lopart * hipart widening
1426 tree halfutype
= build_nonstandard_integer_type (hprec
, 1);
1427 ops
.op0
= make_tree (halfutype
, lopart0
);
1428 ops
.op1
= make_tree (halfutype
, lopart1
);
1430 = expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1432 ops
.op0
= make_tree (halfutype
, lopart
);
1433 ops
.op1
= make_tree (halfutype
, hipart
);
1434 rtx loxhi
= gen_reg_rtx (mode
);
1435 rtx tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1436 emit_move_insn (loxhi
, tem
);
1440 /* if (hipart < 0) loxhi -= lopart << (bitsize / 2); */
1441 if (larger_sign
== 0)
1442 emit_jump (after_hipart_neg
);
1443 else if (larger_sign
!= -1)
1444 do_compare_rtx_and_jump (hipart
, const0_rtx
, GE
, false, hmode
,
1445 NULL_RTX
, NULL
, after_hipart_neg
,
1448 tem
= convert_modes (mode
, hmode
, lopart
, 1);
1449 tem
= expand_shift (LSHIFT_EXPR
, mode
, tem
, hprec
, NULL_RTX
, 1);
1450 tem
= expand_simple_binop (mode
, MINUS
, loxhi
, tem
, NULL_RTX
,
1452 emit_move_insn (loxhi
, tem
);
1454 emit_label (after_hipart_neg
);
1456 /* if (lopart < 0) loxhi -= larger; */
1457 if (smaller_sign
== 0)
1458 emit_jump (after_lopart_neg
);
1459 else if (smaller_sign
!= -1)
1460 do_compare_rtx_and_jump (lopart
, const0_rtx
, GE
, false, hmode
,
1461 NULL_RTX
, NULL
, after_lopart_neg
,
1464 tem
= expand_simple_binop (mode
, MINUS
, loxhi
, larger
, NULL_RTX
,
1466 emit_move_insn (loxhi
, tem
);
1468 emit_label (after_lopart_neg
);
1471 /* loxhi += (uns) lo0xlo1 >> (bitsize / 2); */
1472 tem
= expand_shift (RSHIFT_EXPR
, mode
, lo0xlo1
, hprec
, NULL_RTX
, 1);
1473 tem
= expand_simple_binop (mode
, PLUS
, loxhi
, tem
, NULL_RTX
,
1475 emit_move_insn (loxhi
, tem
);
1477 /* if (loxhi >> (bitsize / 2)
1478 == (hmode) loxhi >> (bitsize / 2 - 1)) (if !uns)
1479 if (loxhi >> (bitsize / 2) == 0 (if uns). */
1480 rtx hipartloxhi
= expand_shift (RSHIFT_EXPR
, mode
, loxhi
, hprec
,
1482 hipartloxhi
= gen_lowpart (hmode
, hipartloxhi
);
1483 rtx signbitloxhi
= const0_rtx
;
1485 signbitloxhi
= expand_shift (RSHIFT_EXPR
, hmode
,
1486 gen_lowpart (hmode
, loxhi
),
1487 hprec
- 1, NULL_RTX
, 0);
1489 do_compare_rtx_and_jump (signbitloxhi
, hipartloxhi
, NE
, true, hmode
,
1490 NULL_RTX
, NULL
, do_overflow
,
1491 PROB_VERY_UNLIKELY
);
1493 /* res = (loxhi << (bitsize / 2)) | (hmode) lo0xlo1; */
1494 rtx loxhishifted
= expand_shift (LSHIFT_EXPR
, mode
, loxhi
, hprec
,
1496 tem
= convert_modes (mode
, hmode
, gen_lowpart (hmode
, lo0xlo1
), 1);
1498 tem
= expand_simple_binop (mode
, IOR
, loxhishifted
, tem
, res
,
1501 emit_move_insn (res
, tem
);
1502 emit_jump (done_label
);
1504 emit_label (both_ops_large
);
1506 /* If both operands are large (not sign (!uns) or zero (uns)
1507 extended from hmode), then perform the full multiplication
1508 which will be the result of the operation.
1509 The only cases which don't overflow are for signed multiplication
1510 some cases where both hipart0 and highpart1 are 0 or -1.
1511 For unsigned multiplication when high parts are both non-zero
1512 this overflows always. */
1513 ops
.code
= MULT_EXPR
;
1514 ops
.op0
= make_tree (type
, op0
);
1515 ops
.op1
= make_tree (type
, op1
);
1516 tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1517 emit_move_insn (res
, tem
);
1523 tem
= expand_simple_binop (hmode
, PLUS
, hipart0
, const1_rtx
,
1524 NULL_RTX
, 1, OPTAB_DIRECT
);
1525 do_compare_rtx_and_jump (tem
, const1_rtx
, GTU
, true, hmode
,
1526 NULL_RTX
, NULL
, do_error
,
1527 PROB_VERY_UNLIKELY
);
1532 tem
= expand_simple_binop (hmode
, PLUS
, hipart1
, const1_rtx
,
1533 NULL_RTX
, 1, OPTAB_DIRECT
);
1534 do_compare_rtx_and_jump (tem
, const1_rtx
, GTU
, true, hmode
,
1535 NULL_RTX
, NULL
, do_error
,
1536 PROB_VERY_UNLIKELY
);
1539 /* At this point hipart{0,1} are both in [-1, 0]. If they are
1540 the same, overflow happened if res is negative, if they are
1541 different, overflow happened if res is positive. */
1542 if (op0_sign
!= 1 && op1_sign
!= 1 && op0_sign
!= op1_sign
)
1543 emit_jump (hipart_different
);
1544 else if (op0_sign
== 1 || op1_sign
== 1)
1545 do_compare_rtx_and_jump (hipart0
, hipart1
, NE
, true, hmode
,
1546 NULL_RTX
, NULL
, hipart_different
,
1549 do_compare_rtx_and_jump (res
, const0_rtx
, LT
, false, mode
,
1550 NULL_RTX
, NULL
, do_error
,
1551 PROB_VERY_UNLIKELY
);
1552 emit_jump (done_label
);
1554 emit_label (hipart_different
);
1556 do_compare_rtx_and_jump (res
, const0_rtx
, GE
, false, mode
,
1557 NULL_RTX
, NULL
, do_error
,
1558 PROB_VERY_UNLIKELY
);
1559 emit_jump (done_label
);
1562 emit_label (do_overflow
);
1564 /* Overflow, do full multiplication and fallthru into do_error. */
1565 ops
.op0
= make_tree (type
, op0
);
1566 ops
.op1
= make_tree (type
, op1
);
1567 tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1568 emit_move_insn (res
, tem
);
1572 gcc_assert (!is_ubsan
);
1573 ops
.code
= MULT_EXPR
;
1575 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1576 emit_jump (done_label
);
1581 emit_label (do_error
);
1584 /* Expand the ubsan builtin call. */
1586 fn
= ubsan_build_overflow_builtin (MULT_EXPR
, loc
, TREE_TYPE (arg0
),
1590 do_pending_stack_adjust ();
1593 write_complex_part (target
, const1_rtx
, true);
1596 emit_label (done_label
);
1599 if (uns0_p
&& uns1_p
&& !unsr_p
)
1601 rtx_code_label
*all_done_label
= gen_label_rtx ();
1602 do_compare_rtx_and_jump (res
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1603 NULL
, all_done_label
, PROB_VERY_LIKELY
);
1604 write_complex_part (target
, const1_rtx
, true);
1605 emit_label (all_done_label
);
1609 if (!uns0_p
&& uns1_p
&& !unsr_p
&& pos_neg1
== 3)
1611 rtx_code_label
*all_done_label
= gen_label_rtx ();
1612 rtx_code_label
*set_noovf
= gen_label_rtx ();
1613 do_compare_rtx_and_jump (op1
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1614 NULL
, all_done_label
, PROB_VERY_LIKELY
);
1615 write_complex_part (target
, const1_rtx
, true);
1616 do_compare_rtx_and_jump (op0
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1617 NULL
, set_noovf
, PROB_VERY_LIKELY
);
1618 do_compare_rtx_and_jump (op0
, constm1_rtx
, NE
, true, mode
, NULL_RTX
,
1619 NULL
, all_done_label
, PROB_VERY_UNLIKELY
);
1620 do_compare_rtx_and_jump (op1
, res
, NE
, true, mode
, NULL_RTX
, NULL
,
1621 all_done_label
, PROB_VERY_UNLIKELY
);
1622 emit_label (set_noovf
);
1623 write_complex_part (target
, const0_rtx
, true);
1624 emit_label (all_done_label
);
1630 expand_ubsan_result_store (target
, res
);
1632 expand_arith_overflow_result_store (lhs
, target
, mode
, res
);
1636 /* Expand UBSAN_CHECK_ADD call STMT. */
1639 expand_UBSAN_CHECK_ADD (gcall
*stmt
)
1641 location_t loc
= gimple_location (stmt
);
1642 tree lhs
= gimple_call_lhs (stmt
);
1643 tree arg0
= gimple_call_arg (stmt
, 0);
1644 tree arg1
= gimple_call_arg (stmt
, 1);
1645 expand_addsub_overflow (loc
, PLUS_EXPR
, lhs
, arg0
, arg1
,
1646 false, false, false, true);
1649 /* Expand UBSAN_CHECK_SUB call STMT. */
1652 expand_UBSAN_CHECK_SUB (gcall
*stmt
)
1654 location_t loc
= gimple_location (stmt
);
1655 tree lhs
= gimple_call_lhs (stmt
);
1656 tree arg0
= gimple_call_arg (stmt
, 0);
1657 tree arg1
= gimple_call_arg (stmt
, 1);
1658 if (integer_zerop (arg0
))
1659 expand_neg_overflow (loc
, lhs
, arg1
, true);
1661 expand_addsub_overflow (loc
, MINUS_EXPR
, lhs
, arg0
, arg1
,
1662 false, false, false, true);
1665 /* Expand UBSAN_CHECK_MUL call STMT. */
1668 expand_UBSAN_CHECK_MUL (gcall
*stmt
)
1670 location_t loc
= gimple_location (stmt
);
1671 tree lhs
= gimple_call_lhs (stmt
);
1672 tree arg0
= gimple_call_arg (stmt
, 0);
1673 tree arg1
= gimple_call_arg (stmt
, 1);
1674 expand_mul_overflow (loc
, lhs
, arg0
, arg1
, false, false, false, true);
1677 /* Helper function for {ADD,SUB,MUL}_OVERFLOW call stmt expansion. */
1680 expand_arith_overflow (enum tree_code code
, gimple
*stmt
)
1682 tree lhs
= gimple_call_lhs (stmt
);
1683 if (lhs
== NULL_TREE
)
1685 tree arg0
= gimple_call_arg (stmt
, 0);
1686 tree arg1
= gimple_call_arg (stmt
, 1);
1687 tree type
= TREE_TYPE (TREE_TYPE (lhs
));
1688 int uns0_p
= TYPE_UNSIGNED (TREE_TYPE (arg0
));
1689 int uns1_p
= TYPE_UNSIGNED (TREE_TYPE (arg1
));
1690 int unsr_p
= TYPE_UNSIGNED (type
);
1691 int prec0
= TYPE_PRECISION (TREE_TYPE (arg0
));
1692 int prec1
= TYPE_PRECISION (TREE_TYPE (arg1
));
1693 int precres
= TYPE_PRECISION (type
);
1694 location_t loc
= gimple_location (stmt
);
1695 if (!uns0_p
&& get_range_pos_neg (arg0
) == 1)
1697 if (!uns1_p
&& get_range_pos_neg (arg1
) == 1)
1699 int pr
= get_min_precision (arg0
, uns0_p
? UNSIGNED
: SIGNED
);
1700 prec0
= MIN (prec0
, pr
);
1701 pr
= get_min_precision (arg1
, uns1_p
? UNSIGNED
: SIGNED
);
1702 prec1
= MIN (prec1
, pr
);
1704 /* If uns0_p && uns1_p, precop is minimum needed precision
1705 of unsigned type to hold the exact result, otherwise
1706 precop is minimum needed precision of signed type to
1707 hold the exact result. */
1709 if (code
== MULT_EXPR
)
1710 precop
= prec0
+ prec1
+ (uns0_p
!= uns1_p
);
1713 if (uns0_p
== uns1_p
)
1714 precop
= MAX (prec0
, prec1
) + 1;
1716 precop
= MAX (prec0
+ 1, prec1
) + 1;
1718 precop
= MAX (prec0
, prec1
+ 1) + 1;
1720 int orig_precres
= precres
;
1724 if ((uns0_p
&& uns1_p
)
1725 ? ((precop
+ !unsr_p
) <= precres
1726 /* u1 - u2 -> ur can overflow, no matter what precision
1728 && (code
!= MINUS_EXPR
|| !unsr_p
))
1729 : (!unsr_p
&& precop
<= precres
))
1731 /* The infinity precision result will always fit into result. */
1732 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1733 write_complex_part (target
, const0_rtx
, true);
1734 enum machine_mode mode
= TYPE_MODE (type
);
1735 struct separate_ops ops
;
1738 ops
.op0
= fold_convert_loc (loc
, type
, arg0
);
1739 ops
.op1
= fold_convert_loc (loc
, type
, arg1
);
1740 ops
.op2
= NULL_TREE
;
1742 rtx tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1743 expand_arith_overflow_result_store (lhs
, target
, mode
, tem
);
1747 /* For sub-word operations, if target doesn't have them, start
1748 with precres widening right away, otherwise do it only
1749 if the most simple cases can't be used. */
1750 if (WORD_REGISTER_OPERATIONS
1751 && orig_precres
== precres
1752 && precres
< BITS_PER_WORD
)
1754 else if ((uns0_p
&& uns1_p
&& unsr_p
&& prec0
<= precres
1755 && prec1
<= precres
)
1756 || ((!uns0_p
|| !uns1_p
) && !unsr_p
1757 && prec0
+ uns0_p
<= precres
1758 && prec1
+ uns1_p
<= precres
))
1760 arg0
= fold_convert_loc (loc
, type
, arg0
);
1761 arg1
= fold_convert_loc (loc
, type
, arg1
);
1765 if (integer_zerop (arg0
) && !unsr_p
)
1766 expand_neg_overflow (loc
, lhs
, arg1
, false);
1769 expand_addsub_overflow (loc
, code
, lhs
, arg0
, arg1
,
1770 unsr_p
, unsr_p
, unsr_p
, false);
1773 expand_mul_overflow (loc
, lhs
, arg0
, arg1
,
1774 unsr_p
, unsr_p
, unsr_p
, false);
1781 /* For sub-word operations, retry with a wider type first. */
1782 if (orig_precres
== precres
&& precop
<= BITS_PER_WORD
)
1784 #if WORD_REGISTER_OPERATIONS
1785 int p
= BITS_PER_WORD
;
1789 enum machine_mode m
= smallest_mode_for_size (p
, MODE_INT
);
1790 tree optype
= build_nonstandard_integer_type (GET_MODE_PRECISION (m
),
1793 p
= TYPE_PRECISION (optype
);
1797 unsr_p
= TYPE_UNSIGNED (optype
);
1803 if (prec0
<= precres
&& prec1
<= precres
)
1808 types
[0] = build_nonstandard_integer_type (precres
, 0);
1814 types
[1] = build_nonstandard_integer_type (precres
, 1);
1816 arg0
= fold_convert_loc (loc
, types
[uns0_p
], arg0
);
1817 arg1
= fold_convert_loc (loc
, types
[uns1_p
], arg1
);
1818 if (code
!= MULT_EXPR
)
1819 expand_addsub_overflow (loc
, code
, lhs
, arg0
, arg1
, unsr_p
,
1820 uns0_p
, uns1_p
, false);
1822 expand_mul_overflow (loc
, lhs
, arg0
, arg1
, unsr_p
,
1823 uns0_p
, uns1_p
, false);
1827 /* Retry with a wider type. */
1828 if (orig_precres
== precres
)
1830 int p
= MAX (prec0
, prec1
);
1831 enum machine_mode m
= smallest_mode_for_size (p
, MODE_INT
);
1832 tree optype
= build_nonstandard_integer_type (GET_MODE_PRECISION (m
),
1835 p
= TYPE_PRECISION (optype
);
1839 unsr_p
= TYPE_UNSIGNED (optype
);
1850 /* Expand ADD_OVERFLOW STMT. */
1853 expand_ADD_OVERFLOW (gcall
*stmt
)
1855 expand_arith_overflow (PLUS_EXPR
, stmt
);
1858 /* Expand SUB_OVERFLOW STMT. */
1861 expand_SUB_OVERFLOW (gcall
*stmt
)
1863 expand_arith_overflow (MINUS_EXPR
, stmt
);
1866 /* Expand MUL_OVERFLOW STMT. */
1869 expand_MUL_OVERFLOW (gcall
*stmt
)
1871 expand_arith_overflow (MULT_EXPR
, stmt
);
1874 /* This should get folded in tree-vectorizer.c. */
1877 expand_LOOP_VECTORIZED (gcall
*)
1883 expand_MASK_LOAD (gcall
*stmt
)
1885 struct expand_operand ops
[3];
1886 tree type
, lhs
, rhs
, maskt
;
1887 rtx mem
, target
, mask
;
1889 maskt
= gimple_call_arg (stmt
, 2);
1890 lhs
= gimple_call_lhs (stmt
);
1891 if (lhs
== NULL_TREE
)
1893 type
= TREE_TYPE (lhs
);
1894 rhs
= fold_build2 (MEM_REF
, type
, gimple_call_arg (stmt
, 0),
1895 gimple_call_arg (stmt
, 1));
1897 mem
= expand_expr (rhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1898 gcc_assert (MEM_P (mem
));
1899 mask
= expand_normal (maskt
);
1900 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1901 create_output_operand (&ops
[0], target
, TYPE_MODE (type
));
1902 create_fixed_operand (&ops
[1], mem
);
1903 create_input_operand (&ops
[2], mask
, TYPE_MODE (TREE_TYPE (maskt
)));
1904 expand_insn (optab_handler (maskload_optab
, TYPE_MODE (type
)), 3, ops
);
1908 expand_MASK_STORE (gcall
*stmt
)
1910 struct expand_operand ops
[3];
1911 tree type
, lhs
, rhs
, maskt
;
1914 maskt
= gimple_call_arg (stmt
, 2);
1915 rhs
= gimple_call_arg (stmt
, 3);
1916 type
= TREE_TYPE (rhs
);
1917 lhs
= fold_build2 (MEM_REF
, type
, gimple_call_arg (stmt
, 0),
1918 gimple_call_arg (stmt
, 1));
1920 mem
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1921 gcc_assert (MEM_P (mem
));
1922 mask
= expand_normal (maskt
);
1923 reg
= expand_normal (rhs
);
1924 create_fixed_operand (&ops
[0], mem
);
1925 create_input_operand (&ops
[1], reg
, TYPE_MODE (type
));
1926 create_input_operand (&ops
[2], mask
, TYPE_MODE (TREE_TYPE (maskt
)));
1927 expand_insn (optab_handler (maskstore_optab
, TYPE_MODE (type
)), 3, ops
);
1931 expand_ABNORMAL_DISPATCHER (gcall
*)
1936 expand_BUILTIN_EXPECT (gcall
*stmt
)
1938 /* When guessing was done, the hints should be already stripped away. */
1939 gcc_assert (!flag_guess_branch_prob
|| optimize
== 0 || seen_error ());
1942 tree lhs
= gimple_call_lhs (stmt
);
1944 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1946 target
= const0_rtx
;
1947 rtx val
= expand_expr (gimple_call_arg (stmt
, 0), target
, VOIDmode
, EXPAND_NORMAL
);
1948 if (lhs
&& val
!= target
)
1949 emit_move_insn (target
, val
);
1952 /* IFN_VA_ARG is supposed to be expanded at pass_stdarg. So this dummy function
1953 should never be called. */
1956 expand_VA_ARG (gcall
*stmt ATTRIBUTE_UNUSED
)
1961 /* Expand the IFN_UNIQUE function according to its first argument. */
1964 expand_UNIQUE (gcall
*stmt
)
1966 rtx pattern
= NULL_RTX
;
1967 enum ifn_unique_kind kind
1968 = (enum ifn_unique_kind
) TREE_INT_CST_LOW (gimple_call_arg (stmt
, 0));
1975 case IFN_UNIQUE_UNSPEC
:
1976 if (targetm
.have_unique ())
1977 pattern
= targetm
.gen_unique ();
1980 case IFN_UNIQUE_OACC_FORK
:
1981 case IFN_UNIQUE_OACC_JOIN
:
1982 if (targetm
.have_oacc_fork () && targetm
.have_oacc_join ())
1984 tree lhs
= gimple_call_lhs (stmt
);
1985 rtx target
= const0_rtx
;
1988 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1990 rtx data_dep
= expand_normal (gimple_call_arg (stmt
, 1));
1991 rtx axis
= expand_normal (gimple_call_arg (stmt
, 2));
1993 if (kind
== IFN_UNIQUE_OACC_FORK
)
1994 pattern
= targetm
.gen_oacc_fork (target
, data_dep
, axis
);
1996 pattern
= targetm
.gen_oacc_join (target
, data_dep
, axis
);
2004 emit_insn (pattern
);
2007 /* The size of an OpenACC compute dimension. */
2010 expand_GOACC_DIM_SIZE (gcall
*stmt
)
2012 tree lhs
= gimple_call_lhs (stmt
);
2017 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2018 if (targetm
.have_oacc_dim_size ())
2020 rtx dim
= expand_expr (gimple_call_arg (stmt
, 0), NULL_RTX
,
2021 VOIDmode
, EXPAND_NORMAL
);
2022 emit_insn (targetm
.gen_oacc_dim_size (target
, dim
));
2025 emit_move_insn (target
, GEN_INT (1));
2028 /* The position of an OpenACC execution engine along one compute axis. */
2031 expand_GOACC_DIM_POS (gcall
*stmt
)
2033 tree lhs
= gimple_call_lhs (stmt
);
2038 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2039 if (targetm
.have_oacc_dim_pos ())
2041 rtx dim
= expand_expr (gimple_call_arg (stmt
, 0), NULL_RTX
,
2042 VOIDmode
, EXPAND_NORMAL
);
2043 emit_insn (targetm
.gen_oacc_dim_pos (target
, dim
));
2046 emit_move_insn (target
, const0_rtx
);
2049 /* This is expanded by oacc_device_lower pass. */
2052 expand_GOACC_LOOP (gcall
*stmt ATTRIBUTE_UNUSED
)
2057 /* Routines to expand each internal function, indexed by function number.
2058 Each routine has the prototype:
2060 expand_<NAME> (gcall *stmt)
2062 where STMT is the statement that performs the call. */
2063 static void (*const internal_fn_expanders
[]) (gcall
*) = {
2064 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) expand_##CODE,
2065 #include "internal-fn.def"
2066 #undef DEF_INTERNAL_FN
2070 /* Expand STMT, which is a call to internal function FN. */
2073 expand_internal_call (gcall
*stmt
)
2075 internal_fn_expanders
[(int) gimple_call_internal_fn (stmt
)] (stmt
);