2 Copyright (C) 2011-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
29 #include "stringpool.h"
31 #include "tree-ssanames.h"
36 #include "diagnostic-core.h"
37 #include "fold-const.h"
38 #include "internal-fn.h"
39 #include "stor-layout.h"
42 #include "stringpool.h"
48 #include "optabs-tree.h"
50 /* The names of each internal function, indexed by function number. */
51 const char *const internal_fn_name_array
[] = {
52 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) #CODE,
53 #include "internal-fn.def"
57 /* The ECF_* flags of each internal function, indexed by function number. */
58 const int internal_fn_flags_array
[] = {
59 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) FLAGS,
60 #include "internal-fn.def"
64 /* Fnspec of each internal function, indexed by function number. */
65 const_tree internal_fn_fnspec_array
[IFN_LAST
+ 1];
70 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
71 if (FNSPEC) internal_fn_fnspec_array[IFN_##CODE] = \
72 build_string ((int) sizeof (FNSPEC), FNSPEC ? FNSPEC : "");
73 #include "internal-fn.def"
74 internal_fn_fnspec_array
[IFN_LAST
] = 0;
77 /* Create static initializers for the information returned by
78 direct_internal_fn. */
79 #define not_direct { -2, -2, false }
80 #define mask_load_direct { -1, 2, false }
81 #define load_lanes_direct { -1, -1, false }
82 #define mask_store_direct { 3, 2, false }
83 #define store_lanes_direct { 0, 0, false }
84 #define unary_direct { 0, 0, true }
85 #define binary_direct { 0, 0, true }
87 const direct_internal_fn_info direct_internal_fn_array
[IFN_LAST
+ 1] = {
88 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) not_direct,
89 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) TYPE##_direct,
90 #include "internal-fn.def"
94 /* ARRAY_TYPE is an array of vector modes. Return the associated insn
95 for load-lanes-style optab OPTAB, or CODE_FOR_nothing if none. */
98 get_multi_vector_move (tree array_type
, convert_optab optab
)
103 gcc_assert (TREE_CODE (array_type
) == ARRAY_TYPE
);
104 imode
= TYPE_MODE (array_type
);
105 vmode
= TYPE_MODE (TREE_TYPE (array_type
));
107 return convert_optab_handler (optab
, imode
, vmode
);
110 /* Expand LOAD_LANES call STMT using optab OPTAB. */
113 expand_load_lanes_optab_fn (internal_fn
, gcall
*stmt
, convert_optab optab
)
115 struct expand_operand ops
[2];
119 lhs
= gimple_call_lhs (stmt
);
120 rhs
= gimple_call_arg (stmt
, 0);
121 type
= TREE_TYPE (lhs
);
123 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
124 mem
= expand_normal (rhs
);
126 gcc_assert (MEM_P (mem
));
127 PUT_MODE (mem
, TYPE_MODE (type
));
129 create_output_operand (&ops
[0], target
, TYPE_MODE (type
));
130 create_fixed_operand (&ops
[1], mem
);
131 expand_insn (get_multi_vector_move (type
, optab
), 2, ops
);
134 /* Expand STORE_LANES call STMT using optab OPTAB. */
137 expand_store_lanes_optab_fn (internal_fn
, gcall
*stmt
, convert_optab optab
)
139 struct expand_operand ops
[2];
143 lhs
= gimple_call_lhs (stmt
);
144 rhs
= gimple_call_arg (stmt
, 0);
145 type
= TREE_TYPE (rhs
);
147 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
148 reg
= expand_normal (rhs
);
150 gcc_assert (MEM_P (target
));
151 PUT_MODE (target
, TYPE_MODE (type
));
153 create_fixed_operand (&ops
[0], target
);
154 create_input_operand (&ops
[1], reg
, TYPE_MODE (type
));
155 expand_insn (get_multi_vector_move (type
, optab
), 2, ops
);
159 expand_ANNOTATE (internal_fn
, gcall
*)
164 /* This should get expanded in omp_device_lower pass. */
167 expand_GOMP_USE_SIMT (internal_fn
, gcall
*)
172 /* This should get expanded in omp_device_lower pass. */
175 expand_GOMP_SIMT_ENTER (internal_fn
, gcall
*)
180 /* Allocate per-lane storage and begin non-uniform execution region. */
183 expand_GOMP_SIMT_ENTER_ALLOC (internal_fn
, gcall
*stmt
)
186 tree lhs
= gimple_call_lhs (stmt
);
188 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
190 target
= gen_reg_rtx (Pmode
);
191 rtx size
= expand_normal (gimple_call_arg (stmt
, 0));
192 rtx align
= expand_normal (gimple_call_arg (stmt
, 1));
193 struct expand_operand ops
[3];
194 create_output_operand (&ops
[0], target
, Pmode
);
195 create_input_operand (&ops
[1], size
, Pmode
);
196 create_input_operand (&ops
[2], align
, Pmode
);
197 gcc_assert (targetm
.have_omp_simt_enter ());
198 expand_insn (targetm
.code_for_omp_simt_enter
, 3, ops
);
201 /* Deallocate per-lane storage and leave non-uniform execution region. */
204 expand_GOMP_SIMT_EXIT (internal_fn
, gcall
*stmt
)
206 gcc_checking_assert (!gimple_call_lhs (stmt
));
207 rtx arg
= expand_normal (gimple_call_arg (stmt
, 0));
208 struct expand_operand ops
[1];
209 create_input_operand (&ops
[0], arg
, Pmode
);
210 gcc_assert (targetm
.have_omp_simt_exit ());
211 expand_insn (targetm
.code_for_omp_simt_exit
, 1, ops
);
214 /* Lane index on SIMT targets: thread index in the warp on NVPTX. On targets
215 without SIMT execution this should be expanded in omp_device_lower pass. */
218 expand_GOMP_SIMT_LANE (internal_fn
, gcall
*stmt
)
220 tree lhs
= gimple_call_lhs (stmt
);
224 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
225 gcc_assert (targetm
.have_omp_simt_lane ());
226 emit_insn (targetm
.gen_omp_simt_lane (target
));
229 /* This should get expanded in omp_device_lower pass. */
232 expand_GOMP_SIMT_VF (internal_fn
, gcall
*)
237 /* Lane index of the first SIMT lane that supplies a non-zero argument.
238 This is a SIMT counterpart to GOMP_SIMD_LAST_LANE, used to represent the
239 lane that executed the last iteration for handling OpenMP lastprivate. */
242 expand_GOMP_SIMT_LAST_LANE (internal_fn
, gcall
*stmt
)
244 tree lhs
= gimple_call_lhs (stmt
);
248 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
249 rtx cond
= expand_normal (gimple_call_arg (stmt
, 0));
250 machine_mode mode
= TYPE_MODE (TREE_TYPE (lhs
));
251 struct expand_operand ops
[2];
252 create_output_operand (&ops
[0], target
, mode
);
253 create_input_operand (&ops
[1], cond
, mode
);
254 gcc_assert (targetm
.have_omp_simt_last_lane ());
255 expand_insn (targetm
.code_for_omp_simt_last_lane
, 2, ops
);
258 /* Non-transparent predicate used in SIMT lowering of OpenMP "ordered". */
261 expand_GOMP_SIMT_ORDERED_PRED (internal_fn
, gcall
*stmt
)
263 tree lhs
= gimple_call_lhs (stmt
);
267 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
268 rtx ctr
= expand_normal (gimple_call_arg (stmt
, 0));
269 machine_mode mode
= TYPE_MODE (TREE_TYPE (lhs
));
270 struct expand_operand ops
[2];
271 create_output_operand (&ops
[0], target
, mode
);
272 create_input_operand (&ops
[1], ctr
, mode
);
273 gcc_assert (targetm
.have_omp_simt_ordered ());
274 expand_insn (targetm
.code_for_omp_simt_ordered
, 2, ops
);
277 /* "Or" boolean reduction across SIMT lanes: return non-zero in all lanes if
278 any lane supplies a non-zero argument. */
281 expand_GOMP_SIMT_VOTE_ANY (internal_fn
, gcall
*stmt
)
283 tree lhs
= gimple_call_lhs (stmt
);
287 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
288 rtx cond
= expand_normal (gimple_call_arg (stmt
, 0));
289 machine_mode mode
= TYPE_MODE (TREE_TYPE (lhs
));
290 struct expand_operand ops
[2];
291 create_output_operand (&ops
[0], target
, mode
);
292 create_input_operand (&ops
[1], cond
, mode
);
293 gcc_assert (targetm
.have_omp_simt_vote_any ());
294 expand_insn (targetm
.code_for_omp_simt_vote_any
, 2, ops
);
297 /* Exchange between SIMT lanes with a "butterfly" pattern: source lane index
298 is destination lane index XOR given offset. */
301 expand_GOMP_SIMT_XCHG_BFLY (internal_fn
, gcall
*stmt
)
303 tree lhs
= gimple_call_lhs (stmt
);
307 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
308 rtx src
= expand_normal (gimple_call_arg (stmt
, 0));
309 rtx idx
= expand_normal (gimple_call_arg (stmt
, 1));
310 machine_mode mode
= TYPE_MODE (TREE_TYPE (lhs
));
311 struct expand_operand ops
[3];
312 create_output_operand (&ops
[0], target
, mode
);
313 create_input_operand (&ops
[1], src
, mode
);
314 create_input_operand (&ops
[2], idx
, SImode
);
315 gcc_assert (targetm
.have_omp_simt_xchg_bfly ());
316 expand_insn (targetm
.code_for_omp_simt_xchg_bfly
, 3, ops
);
319 /* Exchange between SIMT lanes according to given source lane index. */
322 expand_GOMP_SIMT_XCHG_IDX (internal_fn
, gcall
*stmt
)
324 tree lhs
= gimple_call_lhs (stmt
);
328 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
329 rtx src
= expand_normal (gimple_call_arg (stmt
, 0));
330 rtx idx
= expand_normal (gimple_call_arg (stmt
, 1));
331 machine_mode mode
= TYPE_MODE (TREE_TYPE (lhs
));
332 struct expand_operand ops
[3];
333 create_output_operand (&ops
[0], target
, mode
);
334 create_input_operand (&ops
[1], src
, mode
);
335 create_input_operand (&ops
[2], idx
, SImode
);
336 gcc_assert (targetm
.have_omp_simt_xchg_idx ());
337 expand_insn (targetm
.code_for_omp_simt_xchg_idx
, 3, ops
);
340 /* This should get expanded in adjust_simduid_builtins. */
343 expand_GOMP_SIMD_LANE (internal_fn
, gcall
*)
348 /* This should get expanded in adjust_simduid_builtins. */
351 expand_GOMP_SIMD_VF (internal_fn
, gcall
*)
356 /* This should get expanded in adjust_simduid_builtins. */
359 expand_GOMP_SIMD_LAST_LANE (internal_fn
, gcall
*)
364 /* This should get expanded in adjust_simduid_builtins. */
367 expand_GOMP_SIMD_ORDERED_START (internal_fn
, gcall
*)
372 /* This should get expanded in adjust_simduid_builtins. */
375 expand_GOMP_SIMD_ORDERED_END (internal_fn
, gcall
*)
380 /* This should get expanded in the sanopt pass. */
383 expand_UBSAN_NULL (internal_fn
, gcall
*)
388 /* This should get expanded in the sanopt pass. */
391 expand_UBSAN_BOUNDS (internal_fn
, gcall
*)
396 /* This should get expanded in the sanopt pass. */
399 expand_UBSAN_VPTR (internal_fn
, gcall
*)
404 /* This should get expanded in the sanopt pass. */
407 expand_UBSAN_PTR (internal_fn
, gcall
*)
412 /* This should get expanded in the sanopt pass. */
415 expand_UBSAN_OBJECT_SIZE (internal_fn
, gcall
*)
420 /* This should get expanded in the sanopt pass. */
423 expand_ASAN_CHECK (internal_fn
, gcall
*)
428 /* This should get expanded in the sanopt pass. */
431 expand_ASAN_MARK (internal_fn
, gcall
*)
436 /* This should get expanded in the sanopt pass. */
439 expand_ASAN_POISON (internal_fn
, gcall
*)
444 /* This should get expanded in the sanopt pass. */
447 expand_ASAN_POISON_USE (internal_fn
, gcall
*)
452 /* This should get expanded in the tsan pass. */
455 expand_TSAN_FUNC_EXIT (internal_fn
, gcall
*)
460 /* This should get expanded in the lower pass. */
463 expand_FALLTHROUGH (internal_fn
, gcall
*call
)
465 error_at (gimple_location (call
),
466 "invalid use of attribute %<fallthrough%>");
469 /* Return minimum precision needed to represent all values
470 of ARG in SIGNed integral type. */
473 get_min_precision (tree arg
, signop sign
)
475 int prec
= TYPE_PRECISION (TREE_TYPE (arg
));
477 signop orig_sign
= sign
;
478 if (TREE_CODE (arg
) == INTEGER_CST
)
481 if (TYPE_SIGN (TREE_TYPE (arg
)) != sign
)
483 widest_int w
= wi::to_widest (arg
);
484 w
= wi::ext (w
, prec
, sign
);
485 p
= wi::min_precision (w
, sign
);
488 p
= wi::min_precision (arg
, sign
);
489 return MIN (p
, prec
);
491 while (CONVERT_EXPR_P (arg
)
492 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg
, 0)))
493 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg
, 0))) <= prec
)
495 arg
= TREE_OPERAND (arg
, 0);
496 if (TYPE_PRECISION (TREE_TYPE (arg
)) < prec
)
498 if (TYPE_UNSIGNED (TREE_TYPE (arg
)))
500 else if (sign
== UNSIGNED
&& get_range_pos_neg (arg
) != 1)
501 return prec
+ (orig_sign
!= sign
);
502 prec
= TYPE_PRECISION (TREE_TYPE (arg
));
505 return prec
+ (orig_sign
!= sign
);
507 if (TREE_CODE (arg
) != SSA_NAME
)
508 return prec
+ (orig_sign
!= sign
);
509 wide_int arg_min
, arg_max
;
510 while (get_range_info (arg
, &arg_min
, &arg_max
) != VR_RANGE
)
512 gimple
*g
= SSA_NAME_DEF_STMT (arg
);
513 if (is_gimple_assign (g
)
514 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g
)))
516 tree t
= gimple_assign_rhs1 (g
);
517 if (INTEGRAL_TYPE_P (TREE_TYPE (t
))
518 && TYPE_PRECISION (TREE_TYPE (t
)) <= prec
)
521 if (TYPE_PRECISION (TREE_TYPE (arg
)) < prec
)
523 if (TYPE_UNSIGNED (TREE_TYPE (arg
)))
525 else if (sign
== UNSIGNED
&& get_range_pos_neg (arg
) != 1)
526 return prec
+ (orig_sign
!= sign
);
527 prec
= TYPE_PRECISION (TREE_TYPE (arg
));
530 return prec
+ (orig_sign
!= sign
);
534 return prec
+ (orig_sign
!= sign
);
536 if (sign
== TYPE_SIGN (TREE_TYPE (arg
)))
538 int p1
= wi::min_precision (arg_min
, sign
);
539 int p2
= wi::min_precision (arg_max
, sign
);
541 prec
= MIN (prec
, p1
);
543 else if (sign
== UNSIGNED
&& !wi::neg_p (arg_min
, SIGNED
))
545 int p
= wi::min_precision (arg_max
, UNSIGNED
);
546 prec
= MIN (prec
, p
);
548 return prec
+ (orig_sign
!= sign
);
551 /* Helper for expand_*_overflow. Set the __imag__ part to true
552 (1 except for signed:1 type, in which case store -1). */
555 expand_arith_set_overflow (tree lhs
, rtx target
)
557 if (TYPE_PRECISION (TREE_TYPE (TREE_TYPE (lhs
))) == 1
558 && !TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs
))))
559 write_complex_part (target
, constm1_rtx
, true);
561 write_complex_part (target
, const1_rtx
, true);
564 /* Helper for expand_*_overflow. Store RES into the __real__ part
565 of TARGET. If RES has larger MODE than __real__ part of TARGET,
566 set the __imag__ part to 1 if RES doesn't fit into it. Similarly
567 if LHS has smaller precision than its mode. */
570 expand_arith_overflow_result_store (tree lhs
, rtx target
,
571 machine_mode mode
, rtx res
)
573 machine_mode tgtmode
= GET_MODE_INNER (GET_MODE (target
));
577 rtx_code_label
*done_label
= gen_label_rtx ();
578 int uns
= TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs
)));
579 lres
= convert_modes (tgtmode
, mode
, res
, uns
);
580 gcc_assert (GET_MODE_PRECISION (tgtmode
) < GET_MODE_PRECISION (mode
));
581 do_compare_rtx_and_jump (res
, convert_modes (mode
, tgtmode
, lres
, uns
),
582 EQ
, true, mode
, NULL_RTX
, NULL
, done_label
,
583 profile_probability::very_likely ());
584 expand_arith_set_overflow (lhs
, target
);
585 emit_label (done_label
);
587 int prec
= TYPE_PRECISION (TREE_TYPE (TREE_TYPE (lhs
)));
588 int tgtprec
= GET_MODE_PRECISION (tgtmode
);
591 rtx_code_label
*done_label
= gen_label_rtx ();
592 int uns
= TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs
)));
597 = immed_wide_int_const (wi::shifted_mask (0, prec
, false, tgtprec
),
599 lres
= expand_simple_binop (tgtmode
, AND
, res
, mask
, NULL_RTX
,
600 true, OPTAB_LIB_WIDEN
);
604 lres
= expand_shift (LSHIFT_EXPR
, tgtmode
, res
, tgtprec
- prec
,
606 lres
= expand_shift (RSHIFT_EXPR
, tgtmode
, lres
, tgtprec
- prec
,
609 do_compare_rtx_and_jump (res
, lres
,
610 EQ
, true, tgtmode
, NULL_RTX
, NULL
, done_label
,
611 profile_probability::very_likely ());
612 expand_arith_set_overflow (lhs
, target
);
613 emit_label (done_label
);
615 write_complex_part (target
, lres
, false);
618 /* Helper for expand_*_overflow. Store RES into TARGET. */
621 expand_ubsan_result_store (rtx target
, rtx res
)
623 if (GET_CODE (target
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (target
))
624 /* If this is a scalar in a register that is stored in a wider mode
625 than the declared mode, compute the result into its declared mode
626 and then convert to the wider mode. Our value is the computed
628 convert_move (SUBREG_REG (target
), res
, SUBREG_PROMOTED_SIGN (target
));
630 emit_move_insn (target
, res
);
633 /* Add sub/add overflow checking to the statement STMT.
634 CODE says whether the operation is +, or -. */
637 expand_addsub_overflow (location_t loc
, tree_code code
, tree lhs
,
638 tree arg0
, tree arg1
, bool unsr_p
, bool uns0_p
,
639 bool uns1_p
, bool is_ubsan
, tree
*datap
)
641 rtx res
, target
= NULL_RTX
;
643 rtx_code_label
*done_label
= gen_label_rtx ();
644 rtx_code_label
*do_error
= gen_label_rtx ();
645 do_pending_stack_adjust ();
646 rtx op0
= expand_normal (arg0
);
647 rtx op1
= expand_normal (arg1
);
648 scalar_int_mode mode
= SCALAR_INT_TYPE_MODE (TREE_TYPE (arg0
));
649 int prec
= GET_MODE_PRECISION (mode
);
650 rtx sgn
= immed_wide_int_const (wi::min_value (prec
, SIGNED
), mode
);
654 gcc_assert (!unsr_p
&& !uns0_p
&& !uns1_p
);
658 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
660 write_complex_part (target
, const0_rtx
, true);
663 /* We assume both operands and result have the same precision
664 here (GET_MODE_BITSIZE (mode)), S stands for signed type
665 with that precision, U for unsigned type with that precision,
666 sgn for unsigned most significant bit in that precision.
667 s1 is signed first operand, u1 is unsigned first operand,
668 s2 is signed second operand, u2 is unsigned second operand,
669 sr is signed result, ur is unsigned result and the following
670 rules say how to compute result (which is always result of
671 the operands as if both were unsigned, cast to the right
672 signedness) and how to compute whether operation overflowed.
675 res = (S) ((U) s1 + (U) s2)
676 ovf = s2 < 0 ? res > s1 : res < s1 (or jump on overflow)
678 res = (S) ((U) s1 - (U) s2)
679 ovf = s2 < 0 ? res < s1 : res > s2 (or jump on overflow)
682 ovf = res < u1 (or jump on carry, but RTL opts will handle it)
685 ovf = res > u1 (or jump on carry, but RTL opts will handle it)
687 res = (S) ((U) s1 + u2)
688 ovf = ((U) res ^ sgn) < u2
693 ovf = t1 < 0 ? t2 > s1 : t2 < s1 (or jump on overflow)
695 res = (S) ((U) s1 - u2)
696 ovf = u2 > ((U) s1 ^ sgn)
699 ovf = s1 < 0 || u2 > (U) s1
702 ovf = u1 >= ((U) s2 ^ sgn)
707 ovf = s2 < 0 ? (S) t2 < (S) t1 : (S) t2 > (S) t1 (or jump on overflow)
709 res = (U) s1 + (U) s2
710 ovf = s2 < 0 ? (s1 | (S) res) < 0) : (s1 & (S) res) < 0)
713 ovf = (U) res < u2 || res < 0
716 ovf = u1 >= u2 ? res < 0 : res >= 0
718 res = (U) s1 - (U) s2
719 ovf = s2 >= 0 ? ((s1 | (S) res) < 0) : ((s1 & (S) res) < 0) */
721 if (code
== PLUS_EXPR
&& uns0_p
&& !uns1_p
)
723 /* PLUS_EXPR is commutative, if operand signedness differs,
724 canonicalize to the first operand being signed and second
725 unsigned to simplify following code. */
726 std::swap (op0
, op1
);
727 std::swap (arg0
, arg1
);
733 if (uns0_p
&& uns1_p
&& unsr_p
)
735 insn_code icode
= optab_handler (code
== PLUS_EXPR
? uaddv4_optab
736 : usubv4_optab
, mode
);
737 if (icode
!= CODE_FOR_nothing
)
739 struct expand_operand ops
[4];
740 rtx_insn
*last
= get_last_insn ();
742 res
= gen_reg_rtx (mode
);
743 create_output_operand (&ops
[0], res
, mode
);
744 create_input_operand (&ops
[1], op0
, mode
);
745 create_input_operand (&ops
[2], op1
, mode
);
746 create_fixed_operand (&ops
[3], do_error
);
747 if (maybe_expand_insn (icode
, 4, ops
))
749 last
= get_last_insn ();
750 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
752 && any_condjump_p (last
)
753 && !find_reg_note (last
, REG_BR_PROB
, 0))
754 add_reg_br_prob_note (last
,
755 profile_probability::very_unlikely ());
756 emit_jump (done_label
);
760 delete_insns_since (last
);
763 /* Compute the operation. On RTL level, the addition is always
765 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
766 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
768 /* For PLUS_EXPR, the operation is commutative, so we can pick
769 operand to compare against. For prec <= BITS_PER_WORD, I think
770 preferring REG operand is better over CONST_INT, because
771 the CONST_INT might enlarge the instruction or CSE would need
772 to figure out we'd already loaded it into a register before.
773 For prec > BITS_PER_WORD, I think CONST_INT might be more beneficial,
774 as then the multi-word comparison can be perhaps simplified. */
775 if (code
== PLUS_EXPR
776 && (prec
<= BITS_PER_WORD
777 ? (CONST_SCALAR_INT_P (op0
) && REG_P (op1
))
778 : CONST_SCALAR_INT_P (op1
)))
780 do_compare_rtx_and_jump (res
, tem
, code
== PLUS_EXPR
? GEU
: LEU
,
781 true, mode
, NULL_RTX
, NULL
, done_label
,
782 profile_probability::very_likely ());
787 if (!uns0_p
&& uns1_p
&& !unsr_p
)
789 /* Compute the operation. On RTL level, the addition is always
791 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
792 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
793 rtx tem
= expand_binop (mode
, add_optab
,
794 code
== PLUS_EXPR
? res
: op0
, sgn
,
795 NULL_RTX
, false, OPTAB_LIB_WIDEN
);
796 do_compare_rtx_and_jump (tem
, op1
, GEU
, true, mode
, NULL_RTX
, NULL
,
797 done_label
, profile_probability::very_likely ());
802 if (code
== PLUS_EXPR
&& !uns0_p
&& uns1_p
&& unsr_p
)
804 op1
= expand_binop (mode
, add_optab
, op1
, sgn
, NULL_RTX
, false,
806 /* As we've changed op1, we have to avoid using the value range
807 for the original argument. */
808 arg1
= error_mark_node
;
814 if (code
== MINUS_EXPR
&& uns0_p
&& !uns1_p
&& unsr_p
)
816 op0
= expand_binop (mode
, add_optab
, op0
, sgn
, NULL_RTX
, false,
818 /* As we've changed op0, we have to avoid using the value range
819 for the original argument. */
820 arg0
= error_mark_node
;
826 if (code
== MINUS_EXPR
&& !uns0_p
&& uns1_p
&& unsr_p
)
828 /* Compute the operation. On RTL level, the addition is always
830 res
= expand_binop (mode
, sub_optab
, op0
, op1
, NULL_RTX
, false,
832 int pos_neg
= get_range_pos_neg (arg0
);
834 /* If ARG0 is known to be always negative, this is always overflow. */
835 emit_jump (do_error
);
836 else if (pos_neg
== 3)
837 /* If ARG0 is not known to be always positive, check at runtime. */
838 do_compare_rtx_and_jump (op0
, const0_rtx
, LT
, false, mode
, NULL_RTX
,
839 NULL
, do_error
, profile_probability::very_unlikely ());
840 do_compare_rtx_and_jump (op1
, op0
, LEU
, true, mode
, NULL_RTX
, NULL
,
841 done_label
, profile_probability::very_likely ());
846 if (code
== MINUS_EXPR
&& uns0_p
&& !uns1_p
&& !unsr_p
)
848 /* Compute the operation. On RTL level, the addition is always
850 res
= expand_binop (mode
, sub_optab
, op0
, op1
, NULL_RTX
, false,
852 rtx tem
= expand_binop (mode
, add_optab
, op1
, sgn
, NULL_RTX
, false,
854 do_compare_rtx_and_jump (op0
, tem
, LTU
, true, mode
, NULL_RTX
, NULL
,
855 done_label
, profile_probability::very_likely ());
860 if (code
== PLUS_EXPR
&& uns0_p
&& uns1_p
&& !unsr_p
)
862 /* Compute the operation. On RTL level, the addition is always
864 res
= expand_binop (mode
, add_optab
, op0
, op1
, NULL_RTX
, false,
866 do_compare_rtx_and_jump (res
, const0_rtx
, LT
, false, mode
, NULL_RTX
,
867 NULL
, do_error
, profile_probability::very_unlikely ());
869 /* The operation is commutative, so we can pick operand to compare
870 against. For prec <= BITS_PER_WORD, I think preferring REG operand
871 is better over CONST_INT, because the CONST_INT might enlarge the
872 instruction or CSE would need to figure out we'd already loaded it
873 into a register before. For prec > BITS_PER_WORD, I think CONST_INT
874 might be more beneficial, as then the multi-word comparison can be
875 perhaps simplified. */
876 if (prec
<= BITS_PER_WORD
877 ? (CONST_SCALAR_INT_P (op1
) && REG_P (op0
))
878 : CONST_SCALAR_INT_P (op0
))
880 do_compare_rtx_and_jump (res
, tem
, GEU
, true, mode
, NULL_RTX
, NULL
,
881 done_label
, profile_probability::very_likely ());
886 if (!uns0_p
&& !uns1_p
&& unsr_p
)
888 /* Compute the operation. On RTL level, the addition is always
890 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
891 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
892 int pos_neg
= get_range_pos_neg (arg1
);
893 if (code
== PLUS_EXPR
)
895 int pos_neg0
= get_range_pos_neg (arg0
);
896 if (pos_neg0
!= 3 && pos_neg
== 3)
898 std::swap (op0
, op1
);
905 tem
= expand_binop (mode
, ((pos_neg
== 1) ^ (code
== MINUS_EXPR
))
906 ? and_optab
: ior_optab
,
907 op0
, res
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
908 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL
,
909 NULL
, done_label
, profile_probability::very_likely ());
913 rtx_code_label
*do_ior_label
= gen_label_rtx ();
914 do_compare_rtx_and_jump (op1
, const0_rtx
,
915 code
== MINUS_EXPR
? GE
: LT
, false, mode
,
916 NULL_RTX
, NULL
, do_ior_label
,
917 profile_probability::even ());
918 tem
= expand_binop (mode
, and_optab
, op0
, res
, NULL_RTX
, false,
920 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
921 NULL
, done_label
, profile_probability::very_likely ());
922 emit_jump (do_error
);
923 emit_label (do_ior_label
);
924 tem
= expand_binop (mode
, ior_optab
, op0
, res
, NULL_RTX
, false,
926 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
927 NULL
, done_label
, profile_probability::very_likely ());
933 if (code
== MINUS_EXPR
&& uns0_p
&& uns1_p
&& !unsr_p
)
935 /* Compute the operation. On RTL level, the addition is always
937 res
= expand_binop (mode
, sub_optab
, op0
, op1
, NULL_RTX
, false,
939 rtx_code_label
*op0_geu_op1
= gen_label_rtx ();
940 do_compare_rtx_and_jump (op0
, op1
, GEU
, true, mode
, NULL_RTX
, NULL
,
941 op0_geu_op1
, profile_probability::even ());
942 do_compare_rtx_and_jump (res
, const0_rtx
, LT
, false, mode
, NULL_RTX
,
943 NULL
, done_label
, profile_probability::very_likely ());
944 emit_jump (do_error
);
945 emit_label (op0_geu_op1
);
946 do_compare_rtx_and_jump (res
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
947 NULL
, done_label
, profile_probability::very_likely ());
951 gcc_assert (!uns0_p
&& !uns1_p
&& !unsr_p
);
956 insn_code icode
= optab_handler (code
== PLUS_EXPR
? addv4_optab
957 : subv4_optab
, mode
);
958 if (icode
!= CODE_FOR_nothing
)
960 struct expand_operand ops
[4];
961 rtx_insn
*last
= get_last_insn ();
963 res
= gen_reg_rtx (mode
);
964 create_output_operand (&ops
[0], res
, mode
);
965 create_input_operand (&ops
[1], op0
, mode
);
966 create_input_operand (&ops
[2], op1
, mode
);
967 create_fixed_operand (&ops
[3], do_error
);
968 if (maybe_expand_insn (icode
, 4, ops
))
970 last
= get_last_insn ();
971 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
973 && any_condjump_p (last
)
974 && !find_reg_note (last
, REG_BR_PROB
, 0))
975 add_reg_br_prob_note (last
,
976 profile_probability::very_unlikely ());
977 emit_jump (done_label
);
981 delete_insns_since (last
);
984 /* Compute the operation. On RTL level, the addition is always
986 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
987 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
989 /* If we can prove that one of the arguments (for MINUS_EXPR only
990 the second operand, as subtraction is not commutative) is always
991 non-negative or always negative, we can do just one comparison
992 and conditional jump. */
993 int pos_neg
= get_range_pos_neg (arg1
);
994 if (code
== PLUS_EXPR
)
996 int pos_neg0
= get_range_pos_neg (arg0
);
997 if (pos_neg0
!= 3 && pos_neg
== 3)
999 std::swap (op0
, op1
);
1004 /* Addition overflows if and only if the two operands have the same sign,
1005 and the result has the opposite sign. Subtraction overflows if and
1006 only if the two operands have opposite sign, and the subtrahend has
1007 the same sign as the result. Here 0 is counted as positive. */
1010 /* Compute op0 ^ op1 (operands have opposite sign). */
1011 rtx op_xor
= expand_binop (mode
, xor_optab
, op0
, op1
, NULL_RTX
, false,
1014 /* Compute res ^ op1 (result and 2nd operand have opposite sign). */
1015 rtx res_xor
= expand_binop (mode
, xor_optab
, res
, op1
, NULL_RTX
, false,
1019 if (code
== PLUS_EXPR
)
1021 /* Compute (res ^ op1) & ~(op0 ^ op1). */
1022 tem
= expand_unop (mode
, one_cmpl_optab
, op_xor
, NULL_RTX
, false);
1023 tem
= expand_binop (mode
, and_optab
, res_xor
, tem
, NULL_RTX
, false,
1028 /* Compute (op0 ^ op1) & ~(res ^ op1). */
1029 tem
= expand_unop (mode
, one_cmpl_optab
, res_xor
, NULL_RTX
, false);
1030 tem
= expand_binop (mode
, and_optab
, op_xor
, tem
, NULL_RTX
, false,
1034 /* No overflow if the result has bit sign cleared. */
1035 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1036 NULL
, done_label
, profile_probability::very_likely ());
1039 /* Compare the result of the operation with the first operand.
1040 No overflow for addition if second operand is positive and result
1041 is larger or second operand is negative and result is smaller.
1042 Likewise for subtraction with sign of second operand flipped. */
1044 do_compare_rtx_and_jump (res
, op0
,
1045 (pos_neg
== 1) ^ (code
== MINUS_EXPR
) ? GE
: LE
,
1046 false, mode
, NULL_RTX
, NULL
, done_label
,
1047 profile_probability::very_likely ());
1051 emit_label (do_error
);
1054 /* Expand the ubsan builtin call. */
1056 fn
= ubsan_build_overflow_builtin (code
, loc
, TREE_TYPE (arg0
),
1060 do_pending_stack_adjust ();
1063 expand_arith_set_overflow (lhs
, target
);
1066 emit_label (done_label
);
1071 expand_ubsan_result_store (target
, res
);
1075 res
= expand_binop (mode
, add_optab
, res
, sgn
, NULL_RTX
, false,
1078 expand_arith_overflow_result_store (lhs
, target
, mode
, res
);
1083 /* Add negate overflow checking to the statement STMT. */
1086 expand_neg_overflow (location_t loc
, tree lhs
, tree arg1
, bool is_ubsan
,
1091 rtx_code_label
*done_label
, *do_error
;
1092 rtx target
= NULL_RTX
;
1094 done_label
= gen_label_rtx ();
1095 do_error
= gen_label_rtx ();
1097 do_pending_stack_adjust ();
1098 op1
= expand_normal (arg1
);
1100 scalar_int_mode mode
= SCALAR_INT_TYPE_MODE (TREE_TYPE (arg1
));
1103 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1105 write_complex_part (target
, const0_rtx
, true);
1108 enum insn_code icode
= optab_handler (negv3_optab
, mode
);
1109 if (icode
!= CODE_FOR_nothing
)
1111 struct expand_operand ops
[3];
1112 rtx_insn
*last
= get_last_insn ();
1114 res
= gen_reg_rtx (mode
);
1115 create_output_operand (&ops
[0], res
, mode
);
1116 create_input_operand (&ops
[1], op1
, mode
);
1117 create_fixed_operand (&ops
[2], do_error
);
1118 if (maybe_expand_insn (icode
, 3, ops
))
1120 last
= get_last_insn ();
1121 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
1123 && any_condjump_p (last
)
1124 && !find_reg_note (last
, REG_BR_PROB
, 0))
1125 add_reg_br_prob_note (last
,
1126 profile_probability::very_unlikely ());
1127 emit_jump (done_label
);
1131 delete_insns_since (last
);
1132 icode
= CODE_FOR_nothing
;
1136 if (icode
== CODE_FOR_nothing
)
1138 /* Compute the operation. On RTL level, the addition is always
1140 res
= expand_unop (mode
, neg_optab
, op1
, NULL_RTX
, false);
1142 /* Compare the operand with the most negative value. */
1143 rtx minv
= expand_normal (TYPE_MIN_VALUE (TREE_TYPE (arg1
)));
1144 do_compare_rtx_and_jump (op1
, minv
, NE
, true, mode
, NULL_RTX
, NULL
,
1145 done_label
, profile_probability::very_likely ());
1148 emit_label (do_error
);
1151 /* Expand the ubsan builtin call. */
1153 fn
= ubsan_build_overflow_builtin (NEGATE_EXPR
, loc
, TREE_TYPE (arg1
),
1154 arg1
, NULL_TREE
, datap
);
1157 do_pending_stack_adjust ();
1160 expand_arith_set_overflow (lhs
, target
);
1163 emit_label (done_label
);
1168 expand_ubsan_result_store (target
, res
);
1170 expand_arith_overflow_result_store (lhs
, target
, mode
, res
);
1174 /* Add mul overflow checking to the statement STMT. */
1177 expand_mul_overflow (location_t loc
, tree lhs
, tree arg0
, tree arg1
,
1178 bool unsr_p
, bool uns0_p
, bool uns1_p
, bool is_ubsan
,
1183 rtx_code_label
*done_label
, *do_error
;
1184 rtx target
= NULL_RTX
;
1186 enum insn_code icode
;
1188 done_label
= gen_label_rtx ();
1189 do_error
= gen_label_rtx ();
1191 do_pending_stack_adjust ();
1192 op0
= expand_normal (arg0
);
1193 op1
= expand_normal (arg1
);
1195 scalar_int_mode mode
= SCALAR_INT_TYPE_MODE (TREE_TYPE (arg0
));
1199 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1201 write_complex_part (target
, const0_rtx
, true);
1205 gcc_assert (!unsr_p
&& !uns0_p
&& !uns1_p
);
1207 /* We assume both operands and result have the same precision
1208 here (GET_MODE_BITSIZE (mode)), S stands for signed type
1209 with that precision, U for unsigned type with that precision,
1210 sgn for unsigned most significant bit in that precision.
1211 s1 is signed first operand, u1 is unsigned first operand,
1212 s2 is signed second operand, u2 is unsigned second operand,
1213 sr is signed result, ur is unsigned result and the following
1214 rules say how to compute result (which is always result of
1215 the operands as if both were unsigned, cast to the right
1216 signedness) and how to compute whether operation overflowed.
1217 main_ovf (false) stands for jump on signed multiplication
1218 overflow or the main algorithm with uns == false.
1219 main_ovf (true) stands for jump on unsigned multiplication
1220 overflow or the main algorithm with uns == true.
1223 res = (S) ((U) s1 * (U) s2)
1224 ovf = main_ovf (false)
1227 ovf = main_ovf (true)
1230 ovf = (s1 < 0 && u2) || main_ovf (true)
1233 ovf = res < 0 || main_ovf (true)
1235 res = (S) ((U) s1 * u2)
1236 ovf = (S) u2 >= 0 ? main_ovf (false)
1237 : (s1 != 0 && (s1 != -1 || u2 != (U) res))
1239 t1 = (s1 & s2) < 0 ? (-(U) s1) : ((U) s1)
1240 t2 = (s1 & s2) < 0 ? (-(U) s2) : ((U) s2)
1242 ovf = (s1 ^ s2) < 0 ? (s1 && s2) : main_ovf (true) */
1244 if (uns0_p
&& !uns1_p
)
1246 /* Multiplication is commutative, if operand signedness differs,
1247 canonicalize to the first operand being signed and second
1248 unsigned to simplify following code. */
1249 std::swap (op0
, op1
);
1250 std::swap (arg0
, arg1
);
1255 int pos_neg0
= get_range_pos_neg (arg0
);
1256 int pos_neg1
= get_range_pos_neg (arg1
);
1259 if (!uns0_p
&& uns1_p
&& unsr_p
)
1264 /* If s1 is non-negative, just perform normal u1 * u2 -> ur. */
1267 /* If s1 is negative, avoid the main code, just multiply and
1268 signal overflow if op1 is not 0. */
1269 struct separate_ops ops
;
1270 ops
.code
= MULT_EXPR
;
1271 ops
.type
= TREE_TYPE (arg1
);
1272 ops
.op0
= make_tree (ops
.type
, op0
);
1273 ops
.op1
= make_tree (ops
.type
, op1
);
1274 ops
.op2
= NULL_TREE
;
1276 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1277 do_compare_rtx_and_jump (op1
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1278 NULL
, done_label
, profile_probability::very_likely ());
1279 goto do_error_label
;
1281 rtx_code_label
*do_main_label
;
1282 do_main_label
= gen_label_rtx ();
1283 do_compare_rtx_and_jump (op0
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1284 NULL
, do_main_label
, profile_probability::very_likely ());
1285 do_compare_rtx_and_jump (op1
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1286 NULL
, do_main_label
, profile_probability::very_likely ());
1287 expand_arith_set_overflow (lhs
, target
);
1288 emit_label (do_main_label
);
1296 if (uns0_p
&& uns1_p
&& !unsr_p
)
1299 /* Rest of handling of this case after res is computed. */
1304 if (!uns0_p
&& uns1_p
&& !unsr_p
)
1311 /* If (S) u2 is negative (i.e. u2 is larger than maximum of S,
1312 avoid the main code, just multiply and signal overflow
1313 unless 0 * u2 or -1 * ((U) Smin). */
1314 struct separate_ops ops
;
1315 ops
.code
= MULT_EXPR
;
1316 ops
.type
= TREE_TYPE (arg1
);
1317 ops
.op0
= make_tree (ops
.type
, op0
);
1318 ops
.op1
= make_tree (ops
.type
, op1
);
1319 ops
.op2
= NULL_TREE
;
1321 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1322 do_compare_rtx_and_jump (op0
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1323 NULL
, done_label
, profile_probability::very_likely ());
1324 do_compare_rtx_and_jump (op0
, constm1_rtx
, NE
, true, mode
, NULL_RTX
,
1325 NULL
, do_error
, profile_probability::very_unlikely ());
1327 prec
= GET_MODE_PRECISION (mode
);
1329 sgn
= immed_wide_int_const (wi::min_value (prec
, SIGNED
), mode
);
1330 do_compare_rtx_and_jump (op1
, sgn
, EQ
, true, mode
, NULL_RTX
,
1331 NULL
, done_label
, profile_probability::very_likely ());
1332 goto do_error_label
;
1334 /* Rest of handling of this case after res is computed. */
1342 if (!uns0_p
&& !uns1_p
&& unsr_p
)
1345 switch (pos_neg0
| pos_neg1
)
1347 case 1: /* Both operands known to be non-negative. */
1349 case 2: /* Both operands known to be negative. */
1350 op0
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, false);
1351 op1
= expand_unop (mode
, neg_optab
, op1
, NULL_RTX
, false);
1352 /* Avoid looking at arg0/arg1 ranges, as we've changed
1354 arg0
= error_mark_node
;
1355 arg1
= error_mark_node
;
1358 if ((pos_neg0
^ pos_neg1
) == 3)
1360 /* If one operand is known to be negative and the other
1361 non-negative, this overflows always, unless the non-negative
1362 one is 0. Just do normal multiply and set overflow
1363 unless one of the operands is 0. */
1364 struct separate_ops ops
;
1365 ops
.code
= MULT_EXPR
;
1367 = build_nonstandard_integer_type (GET_MODE_PRECISION (mode
),
1369 ops
.op0
= make_tree (ops
.type
, op0
);
1370 ops
.op1
= make_tree (ops
.type
, op1
);
1371 ops
.op2
= NULL_TREE
;
1373 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1374 tem
= expand_binop (mode
, and_optab
, op0
, op1
, NULL_RTX
, false,
1376 do_compare_rtx_and_jump (tem
, const0_rtx
, EQ
, true, mode
,
1377 NULL_RTX
, NULL
, done_label
,
1378 profile_probability::very_likely ());
1379 goto do_error_label
;
1381 /* The general case, do all the needed comparisons at runtime. */
1382 rtx_code_label
*do_main_label
, *after_negate_label
;
1384 rop0
= gen_reg_rtx (mode
);
1385 rop1
= gen_reg_rtx (mode
);
1386 emit_move_insn (rop0
, op0
);
1387 emit_move_insn (rop1
, op1
);
1390 do_main_label
= gen_label_rtx ();
1391 after_negate_label
= gen_label_rtx ();
1392 tem
= expand_binop (mode
, and_optab
, op0
, op1
, NULL_RTX
, false,
1394 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1395 NULL
, after_negate_label
, profile_probability::very_likely ());
1396 /* Both arguments negative here, negate them and continue with
1397 normal unsigned overflow checking multiplication. */
1398 emit_move_insn (op0
, expand_unop (mode
, neg_optab
, op0
,
1400 emit_move_insn (op1
, expand_unop (mode
, neg_optab
, op1
,
1402 /* Avoid looking at arg0/arg1 ranges, as we might have changed
1404 arg0
= error_mark_node
;
1405 arg1
= error_mark_node
;
1406 emit_jump (do_main_label
);
1407 emit_label (after_negate_label
);
1408 tem2
= expand_binop (mode
, xor_optab
, op0
, op1
, NULL_RTX
, false,
1410 do_compare_rtx_and_jump (tem2
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1411 NULL
, do_main_label
, profile_probability::very_likely ());
1412 /* One argument is negative here, the other positive. This
1413 overflows always, unless one of the arguments is 0. But
1414 if e.g. s2 is 0, (U) s1 * 0 doesn't overflow, whatever s1
1415 is, thus we can keep do_main code oring in overflow as is. */
1416 do_compare_rtx_and_jump (tem
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1417 NULL
, do_main_label
, profile_probability::very_likely ());
1418 expand_arith_set_overflow (lhs
, target
);
1419 emit_label (do_main_label
);
1427 type
= build_nonstandard_integer_type (GET_MODE_PRECISION (mode
), uns
);
1428 sign
= uns
? UNSIGNED
: SIGNED
;
1429 icode
= optab_handler (uns
? umulv4_optab
: mulv4_optab
, mode
);
1430 if (icode
!= CODE_FOR_nothing
)
1432 struct expand_operand ops
[4];
1433 rtx_insn
*last
= get_last_insn ();
1435 res
= gen_reg_rtx (mode
);
1436 create_output_operand (&ops
[0], res
, mode
);
1437 create_input_operand (&ops
[1], op0
, mode
);
1438 create_input_operand (&ops
[2], op1
, mode
);
1439 create_fixed_operand (&ops
[3], do_error
);
1440 if (maybe_expand_insn (icode
, 4, ops
))
1442 last
= get_last_insn ();
1443 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
1445 && any_condjump_p (last
)
1446 && !find_reg_note (last
, REG_BR_PROB
, 0))
1447 add_reg_br_prob_note (last
,
1448 profile_probability::very_unlikely ());
1449 emit_jump (done_label
);
1453 delete_insns_since (last
);
1454 icode
= CODE_FOR_nothing
;
1458 if (icode
== CODE_FOR_nothing
)
1460 struct separate_ops ops
;
1461 int prec
= GET_MODE_PRECISION (mode
);
1462 scalar_int_mode hmode
;
1464 ops
.op0
= make_tree (type
, op0
);
1465 ops
.op1
= make_tree (type
, op1
);
1466 ops
.op2
= NULL_TREE
;
1468 if (GET_MODE_2XWIDER_MODE (mode
).exists (&wmode
)
1469 && targetm
.scalar_mode_supported_p (wmode
))
1471 ops
.code
= WIDEN_MULT_EXPR
;
1473 = build_nonstandard_integer_type (GET_MODE_PRECISION (wmode
), uns
);
1475 res
= expand_expr_real_2 (&ops
, NULL_RTX
, wmode
, EXPAND_NORMAL
);
1476 rtx hipart
= expand_shift (RSHIFT_EXPR
, wmode
, res
, prec
,
1478 hipart
= convert_modes (mode
, wmode
, hipart
, uns
);
1479 res
= convert_modes (mode
, wmode
, res
, uns
);
1481 /* For the unsigned multiplication, there was overflow if
1482 HIPART is non-zero. */
1483 do_compare_rtx_and_jump (hipart
, const0_rtx
, EQ
, true, mode
,
1484 NULL_RTX
, NULL
, done_label
,
1485 profile_probability::very_likely ());
1488 rtx signbit
= expand_shift (RSHIFT_EXPR
, mode
, res
, prec
- 1,
1490 /* RES is low half of the double width result, HIPART
1491 the high half. There was overflow if
1492 HIPART is different from RES < 0 ? -1 : 0. */
1493 do_compare_rtx_and_jump (signbit
, hipart
, EQ
, true, mode
,
1494 NULL_RTX
, NULL
, done_label
,
1495 profile_probability::very_likely ());
1498 else if (int_mode_for_size (prec
/ 2, 1).exists (&hmode
)
1499 && 2 * GET_MODE_PRECISION (hmode
) == prec
)
1501 rtx_code_label
*large_op0
= gen_label_rtx ();
1502 rtx_code_label
*small_op0_large_op1
= gen_label_rtx ();
1503 rtx_code_label
*one_small_one_large
= gen_label_rtx ();
1504 rtx_code_label
*both_ops_large
= gen_label_rtx ();
1505 rtx_code_label
*after_hipart_neg
= uns
? NULL
: gen_label_rtx ();
1506 rtx_code_label
*after_lopart_neg
= uns
? NULL
: gen_label_rtx ();
1507 rtx_code_label
*do_overflow
= gen_label_rtx ();
1508 rtx_code_label
*hipart_different
= uns
? NULL
: gen_label_rtx ();
1510 unsigned int hprec
= GET_MODE_PRECISION (hmode
);
1511 rtx hipart0
= expand_shift (RSHIFT_EXPR
, mode
, op0
, hprec
,
1513 hipart0
= convert_modes (hmode
, mode
, hipart0
, uns
);
1514 rtx lopart0
= convert_modes (hmode
, mode
, op0
, uns
);
1515 rtx signbit0
= const0_rtx
;
1517 signbit0
= expand_shift (RSHIFT_EXPR
, hmode
, lopart0
, hprec
- 1,
1519 rtx hipart1
= expand_shift (RSHIFT_EXPR
, mode
, op1
, hprec
,
1521 hipart1
= convert_modes (hmode
, mode
, hipart1
, uns
);
1522 rtx lopart1
= convert_modes (hmode
, mode
, op1
, uns
);
1523 rtx signbit1
= const0_rtx
;
1525 signbit1
= expand_shift (RSHIFT_EXPR
, hmode
, lopart1
, hprec
- 1,
1528 res
= gen_reg_rtx (mode
);
1530 /* True if op0 resp. op1 are known to be in the range of
1532 bool op0_small_p
= false;
1533 bool op1_small_p
= false;
1534 /* True if op0 resp. op1 are known to have all zeros or all ones
1535 in the upper half of bits, but are not known to be
1537 bool op0_medium_p
= false;
1538 bool op1_medium_p
= false;
1539 /* -1 if op{0,1} is known to be negative, 0 if it is known to be
1540 nonnegative, 1 if unknown. */
1546 else if (pos_neg0
== 2)
1550 else if (pos_neg1
== 2)
1553 unsigned int mprec0
= prec
;
1554 if (arg0
!= error_mark_node
)
1555 mprec0
= get_min_precision (arg0
, sign
);
1556 if (mprec0
<= hprec
)
1558 else if (!uns
&& mprec0
<= hprec
+ 1)
1559 op0_medium_p
= true;
1560 unsigned int mprec1
= prec
;
1561 if (arg1
!= error_mark_node
)
1562 mprec1
= get_min_precision (arg1
, sign
);
1563 if (mprec1
<= hprec
)
1565 else if (!uns
&& mprec1
<= hprec
+ 1)
1566 op1_medium_p
= true;
1568 int smaller_sign
= 1;
1569 int larger_sign
= 1;
1572 smaller_sign
= op0_sign
;
1573 larger_sign
= op1_sign
;
1575 else if (op1_small_p
)
1577 smaller_sign
= op1_sign
;
1578 larger_sign
= op0_sign
;
1580 else if (op0_sign
== op1_sign
)
1582 smaller_sign
= op0_sign
;
1583 larger_sign
= op0_sign
;
1587 do_compare_rtx_and_jump (signbit0
, hipart0
, NE
, true, hmode
,
1588 NULL_RTX
, NULL
, large_op0
,
1589 profile_probability::unlikely ());
1592 do_compare_rtx_and_jump (signbit1
, hipart1
, NE
, true, hmode
,
1593 NULL_RTX
, NULL
, small_op0_large_op1
,
1594 profile_probability::unlikely ());
1596 /* If both op0 and op1 are sign (!uns) or zero (uns) extended from
1597 hmode to mode, the multiplication will never overflow. We can
1598 do just one hmode x hmode => mode widening multiplication. */
1599 rtx lopart0s
= lopart0
, lopart1s
= lopart1
;
1600 if (GET_CODE (lopart0
) == SUBREG
)
1602 lopart0s
= shallow_copy_rtx (lopart0
);
1603 SUBREG_PROMOTED_VAR_P (lopart0s
) = 1;
1604 SUBREG_PROMOTED_SET (lopart0s
, uns
? SRP_UNSIGNED
: SRP_SIGNED
);
1606 if (GET_CODE (lopart1
) == SUBREG
)
1608 lopart1s
= shallow_copy_rtx (lopart1
);
1609 SUBREG_PROMOTED_VAR_P (lopart1s
) = 1;
1610 SUBREG_PROMOTED_SET (lopart1s
, uns
? SRP_UNSIGNED
: SRP_SIGNED
);
1612 tree halfstype
= build_nonstandard_integer_type (hprec
, uns
);
1613 ops
.op0
= make_tree (halfstype
, lopart0s
);
1614 ops
.op1
= make_tree (halfstype
, lopart1s
);
1615 ops
.code
= WIDEN_MULT_EXPR
;
1618 = expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1619 emit_move_insn (res
, thisres
);
1620 emit_jump (done_label
);
1622 emit_label (small_op0_large_op1
);
1624 /* If op0 is sign (!uns) or zero (uns) extended from hmode to mode,
1625 but op1 is not, just swap the arguments and handle it as op1
1626 sign/zero extended, op0 not. */
1627 rtx larger
= gen_reg_rtx (mode
);
1628 rtx hipart
= gen_reg_rtx (hmode
);
1629 rtx lopart
= gen_reg_rtx (hmode
);
1630 emit_move_insn (larger
, op1
);
1631 emit_move_insn (hipart
, hipart1
);
1632 emit_move_insn (lopart
, lopart0
);
1633 emit_jump (one_small_one_large
);
1635 emit_label (large_op0
);
1638 do_compare_rtx_and_jump (signbit1
, hipart1
, NE
, true, hmode
,
1639 NULL_RTX
, NULL
, both_ops_large
,
1640 profile_probability::unlikely ());
1642 /* If op1 is sign (!uns) or zero (uns) extended from hmode to mode,
1643 but op0 is not, prepare larger, hipart and lopart pseudos and
1644 handle it together with small_op0_large_op1. */
1645 emit_move_insn (larger
, op0
);
1646 emit_move_insn (hipart
, hipart0
);
1647 emit_move_insn (lopart
, lopart1
);
1649 emit_label (one_small_one_large
);
1651 /* lopart is the low part of the operand that is sign extended
1652 to mode, larger is the other operand, hipart is the
1653 high part of larger and lopart0 and lopart1 are the low parts
1655 We perform lopart0 * lopart1 and lopart * hipart widening
1657 tree halfutype
= build_nonstandard_integer_type (hprec
, 1);
1658 ops
.op0
= make_tree (halfutype
, lopart0
);
1659 ops
.op1
= make_tree (halfutype
, lopart1
);
1661 = expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1663 ops
.op0
= make_tree (halfutype
, lopart
);
1664 ops
.op1
= make_tree (halfutype
, hipart
);
1665 rtx loxhi
= gen_reg_rtx (mode
);
1666 rtx tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1667 emit_move_insn (loxhi
, tem
);
1671 /* if (hipart < 0) loxhi -= lopart << (bitsize / 2); */
1672 if (larger_sign
== 0)
1673 emit_jump (after_hipart_neg
);
1674 else if (larger_sign
!= -1)
1675 do_compare_rtx_and_jump (hipart
, const0_rtx
, GE
, false, hmode
,
1676 NULL_RTX
, NULL
, after_hipart_neg
,
1677 profile_probability::even ());
1679 tem
= convert_modes (mode
, hmode
, lopart
, 1);
1680 tem
= expand_shift (LSHIFT_EXPR
, mode
, tem
, hprec
, NULL_RTX
, 1);
1681 tem
= expand_simple_binop (mode
, MINUS
, loxhi
, tem
, NULL_RTX
,
1683 emit_move_insn (loxhi
, tem
);
1685 emit_label (after_hipart_neg
);
1687 /* if (lopart < 0) loxhi -= larger; */
1688 if (smaller_sign
== 0)
1689 emit_jump (after_lopart_neg
);
1690 else if (smaller_sign
!= -1)
1691 do_compare_rtx_and_jump (lopart
, const0_rtx
, GE
, false, hmode
,
1692 NULL_RTX
, NULL
, after_lopart_neg
,
1693 profile_probability::even ());
1695 tem
= expand_simple_binop (mode
, MINUS
, loxhi
, larger
, NULL_RTX
,
1697 emit_move_insn (loxhi
, tem
);
1699 emit_label (after_lopart_neg
);
1702 /* loxhi += (uns) lo0xlo1 >> (bitsize / 2); */
1703 tem
= expand_shift (RSHIFT_EXPR
, mode
, lo0xlo1
, hprec
, NULL_RTX
, 1);
1704 tem
= expand_simple_binop (mode
, PLUS
, loxhi
, tem
, NULL_RTX
,
1706 emit_move_insn (loxhi
, tem
);
1708 /* if (loxhi >> (bitsize / 2)
1709 == (hmode) loxhi >> (bitsize / 2 - 1)) (if !uns)
1710 if (loxhi >> (bitsize / 2) == 0 (if uns). */
1711 rtx hipartloxhi
= expand_shift (RSHIFT_EXPR
, mode
, loxhi
, hprec
,
1713 hipartloxhi
= convert_modes (hmode
, mode
, hipartloxhi
, 0);
1714 rtx signbitloxhi
= const0_rtx
;
1716 signbitloxhi
= expand_shift (RSHIFT_EXPR
, hmode
,
1717 convert_modes (hmode
, mode
,
1719 hprec
- 1, NULL_RTX
, 0);
1721 do_compare_rtx_and_jump (signbitloxhi
, hipartloxhi
, NE
, true, hmode
,
1722 NULL_RTX
, NULL
, do_overflow
,
1723 profile_probability::very_unlikely ());
1725 /* res = (loxhi << (bitsize / 2)) | (hmode) lo0xlo1; */
1726 rtx loxhishifted
= expand_shift (LSHIFT_EXPR
, mode
, loxhi
, hprec
,
1728 tem
= convert_modes (mode
, hmode
,
1729 convert_modes (hmode
, mode
, lo0xlo1
, 1), 1);
1731 tem
= expand_simple_binop (mode
, IOR
, loxhishifted
, tem
, res
,
1734 emit_move_insn (res
, tem
);
1735 emit_jump (done_label
);
1737 emit_label (both_ops_large
);
1739 /* If both operands are large (not sign (!uns) or zero (uns)
1740 extended from hmode), then perform the full multiplication
1741 which will be the result of the operation.
1742 The only cases which don't overflow are for signed multiplication
1743 some cases where both hipart0 and highpart1 are 0 or -1.
1744 For unsigned multiplication when high parts are both non-zero
1745 this overflows always. */
1746 ops
.code
= MULT_EXPR
;
1747 ops
.op0
= make_tree (type
, op0
);
1748 ops
.op1
= make_tree (type
, op1
);
1749 tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1750 emit_move_insn (res
, tem
);
1756 tem
= expand_simple_binop (hmode
, PLUS
, hipart0
, const1_rtx
,
1757 NULL_RTX
, 1, OPTAB_DIRECT
);
1758 do_compare_rtx_and_jump (tem
, const1_rtx
, GTU
, true, hmode
,
1759 NULL_RTX
, NULL
, do_error
,
1760 profile_probability::very_unlikely ());
1765 tem
= expand_simple_binop (hmode
, PLUS
, hipart1
, const1_rtx
,
1766 NULL_RTX
, 1, OPTAB_DIRECT
);
1767 do_compare_rtx_and_jump (tem
, const1_rtx
, GTU
, true, hmode
,
1768 NULL_RTX
, NULL
, do_error
,
1769 profile_probability::very_unlikely ());
1772 /* At this point hipart{0,1} are both in [-1, 0]. If they are
1773 the same, overflow happened if res is negative, if they are
1774 different, overflow happened if res is positive. */
1775 if (op0_sign
!= 1 && op1_sign
!= 1 && op0_sign
!= op1_sign
)
1776 emit_jump (hipart_different
);
1777 else if (op0_sign
== 1 || op1_sign
== 1)
1778 do_compare_rtx_and_jump (hipart0
, hipart1
, NE
, true, hmode
,
1779 NULL_RTX
, NULL
, hipart_different
,
1780 profile_probability::even ());
1782 do_compare_rtx_and_jump (res
, const0_rtx
, LT
, false, mode
,
1783 NULL_RTX
, NULL
, do_error
,
1784 profile_probability::very_unlikely ());
1785 emit_jump (done_label
);
1787 emit_label (hipart_different
);
1789 do_compare_rtx_and_jump (res
, const0_rtx
, GE
, false, mode
,
1790 NULL_RTX
, NULL
, do_error
,
1791 profile_probability::very_unlikely ());
1792 emit_jump (done_label
);
1795 emit_label (do_overflow
);
1797 /* Overflow, do full multiplication and fallthru into do_error. */
1798 ops
.op0
= make_tree (type
, op0
);
1799 ops
.op1
= make_tree (type
, op1
);
1800 tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1801 emit_move_insn (res
, tem
);
1805 gcc_assert (!is_ubsan
);
1806 ops
.code
= MULT_EXPR
;
1808 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1809 emit_jump (done_label
);
1814 emit_label (do_error
);
1817 /* Expand the ubsan builtin call. */
1819 fn
= ubsan_build_overflow_builtin (MULT_EXPR
, loc
, TREE_TYPE (arg0
),
1823 do_pending_stack_adjust ();
1826 expand_arith_set_overflow (lhs
, target
);
1829 emit_label (done_label
);
1832 if (uns0_p
&& uns1_p
&& !unsr_p
)
1834 rtx_code_label
*all_done_label
= gen_label_rtx ();
1835 do_compare_rtx_and_jump (res
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1836 NULL
, all_done_label
, profile_probability::very_likely ());
1837 expand_arith_set_overflow (lhs
, target
);
1838 emit_label (all_done_label
);
1842 if (!uns0_p
&& uns1_p
&& !unsr_p
&& pos_neg1
== 3)
1844 rtx_code_label
*all_done_label
= gen_label_rtx ();
1845 rtx_code_label
*set_noovf
= gen_label_rtx ();
1846 do_compare_rtx_and_jump (op1
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1847 NULL
, all_done_label
, profile_probability::very_likely ());
1848 expand_arith_set_overflow (lhs
, target
);
1849 do_compare_rtx_and_jump (op0
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1850 NULL
, set_noovf
, profile_probability::very_likely ());
1851 do_compare_rtx_and_jump (op0
, constm1_rtx
, NE
, true, mode
, NULL_RTX
,
1852 NULL
, all_done_label
, profile_probability::very_unlikely ());
1853 do_compare_rtx_and_jump (op1
, res
, NE
, true, mode
, NULL_RTX
, NULL
,
1854 all_done_label
, profile_probability::very_unlikely ());
1855 emit_label (set_noovf
);
1856 write_complex_part (target
, const0_rtx
, true);
1857 emit_label (all_done_label
);
1863 expand_ubsan_result_store (target
, res
);
1865 expand_arith_overflow_result_store (lhs
, target
, mode
, res
);
1869 /* Expand UBSAN_CHECK_* internal function if it has vector operands. */
1872 expand_vector_ubsan_overflow (location_t loc
, enum tree_code code
, tree lhs
,
1873 tree arg0
, tree arg1
)
1875 int cnt
= TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0
));
1876 rtx_code_label
*loop_lab
= NULL
;
1877 rtx cntvar
= NULL_RTX
;
1878 tree cntv
= NULL_TREE
;
1879 tree eltype
= TREE_TYPE (TREE_TYPE (arg0
));
1880 tree sz
= TYPE_SIZE (eltype
);
1881 tree data
= NULL_TREE
;
1882 tree resv
= NULL_TREE
;
1883 rtx lhsr
= NULL_RTX
;
1884 rtx resvr
= NULL_RTX
;
1889 lhsr
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1890 if (!VECTOR_MODE_P (GET_MODE (lhsr
))
1891 || (op
= optab_for_tree_code (code
, TREE_TYPE (arg0
),
1892 optab_default
)) == unknown_optab
1893 || (optab_handler (op
, TYPE_MODE (TREE_TYPE (arg0
)))
1894 == CODE_FOR_nothing
))
1897 resv
= make_tree (TREE_TYPE (lhs
), lhsr
);
1900 resvr
= assign_temp (TREE_TYPE (lhs
), 1, 1);
1901 resv
= make_tree (TREE_TYPE (lhs
), resvr
);
1907 do_pending_stack_adjust ();
1908 loop_lab
= gen_label_rtx ();
1909 cntvar
= gen_reg_rtx (TYPE_MODE (sizetype
));
1910 cntv
= make_tree (sizetype
, cntvar
);
1911 emit_move_insn (cntvar
, const0_rtx
);
1912 emit_label (loop_lab
);
1914 if (TREE_CODE (arg0
) != VECTOR_CST
)
1916 rtx arg0r
= expand_normal (arg0
);
1917 arg0
= make_tree (TREE_TYPE (arg0
), arg0r
);
1919 if (TREE_CODE (arg1
) != VECTOR_CST
)
1921 rtx arg1r
= expand_normal (arg1
);
1922 arg1
= make_tree (TREE_TYPE (arg1
), arg1r
);
1924 for (int i
= 0; i
< (cnt
> 4 ? 1 : cnt
); i
++)
1926 tree op0
, op1
, res
= NULL_TREE
;
1929 tree atype
= build_array_type_nelts (eltype
, cnt
);
1930 op0
= uniform_vector_p (arg0
);
1931 if (op0
== NULL_TREE
)
1933 op0
= fold_build1_loc (loc
, VIEW_CONVERT_EXPR
, atype
, arg0
);
1934 op0
= build4_loc (loc
, ARRAY_REF
, eltype
, op0
, cntv
,
1935 NULL_TREE
, NULL_TREE
);
1937 op1
= uniform_vector_p (arg1
);
1938 if (op1
== NULL_TREE
)
1940 op1
= fold_build1_loc (loc
, VIEW_CONVERT_EXPR
, atype
, arg1
);
1941 op1
= build4_loc (loc
, ARRAY_REF
, eltype
, op1
, cntv
,
1942 NULL_TREE
, NULL_TREE
);
1946 res
= fold_build1_loc (loc
, VIEW_CONVERT_EXPR
, atype
, resv
);
1947 res
= build4_loc (loc
, ARRAY_REF
, eltype
, res
, cntv
,
1948 NULL_TREE
, NULL_TREE
);
1953 tree bitpos
= bitsize_int (tree_to_uhwi (sz
) * i
);
1954 op0
= fold_build3_loc (loc
, BIT_FIELD_REF
, eltype
, arg0
, sz
, bitpos
);
1955 op1
= fold_build3_loc (loc
, BIT_FIELD_REF
, eltype
, arg1
, sz
, bitpos
);
1957 res
= fold_build3_loc (loc
, BIT_FIELD_REF
, eltype
, resv
, sz
,
1963 expand_addsub_overflow (loc
, PLUS_EXPR
, res
, op0
, op1
,
1964 false, false, false, true, &data
);
1967 if (cnt
> 4 ? integer_zerop (arg0
) : integer_zerop (op0
))
1968 expand_neg_overflow (loc
, res
, op1
, true, &data
);
1970 expand_addsub_overflow (loc
, MINUS_EXPR
, res
, op0
, op1
,
1971 false, false, false, true, &data
);
1974 expand_mul_overflow (loc
, res
, op0
, op1
, false, false, false,
1983 struct separate_ops ops
;
1984 ops
.code
= PLUS_EXPR
;
1985 ops
.type
= TREE_TYPE (cntv
);
1987 ops
.op1
= build_int_cst (TREE_TYPE (cntv
), 1);
1988 ops
.op2
= NULL_TREE
;
1990 rtx ret
= expand_expr_real_2 (&ops
, cntvar
, TYPE_MODE (sizetype
),
1993 emit_move_insn (cntvar
, ret
);
1994 do_compare_rtx_and_jump (cntvar
, GEN_INT (cnt
), NE
, false,
1995 TYPE_MODE (sizetype
), NULL_RTX
, NULL
, loop_lab
,
1996 profile_probability::very_likely ());
1998 if (lhs
&& resv
== NULL_TREE
)
2000 struct separate_ops ops
;
2002 ops
.type
= TREE_TYPE (arg0
);
2005 ops
.op2
= NULL_TREE
;
2007 rtx ret
= expand_expr_real_2 (&ops
, lhsr
, TYPE_MODE (TREE_TYPE (arg0
)),
2010 emit_move_insn (lhsr
, ret
);
2013 emit_move_insn (lhsr
, resvr
);
2016 /* Expand UBSAN_CHECK_ADD call STMT. */
2019 expand_UBSAN_CHECK_ADD (internal_fn
, gcall
*stmt
)
2021 location_t loc
= gimple_location (stmt
);
2022 tree lhs
= gimple_call_lhs (stmt
);
2023 tree arg0
= gimple_call_arg (stmt
, 0);
2024 tree arg1
= gimple_call_arg (stmt
, 1);
2025 if (VECTOR_TYPE_P (TREE_TYPE (arg0
)))
2026 expand_vector_ubsan_overflow (loc
, PLUS_EXPR
, lhs
, arg0
, arg1
);
2028 expand_addsub_overflow (loc
, PLUS_EXPR
, lhs
, arg0
, arg1
,
2029 false, false, false, true, NULL
);
2032 /* Expand UBSAN_CHECK_SUB call STMT. */
2035 expand_UBSAN_CHECK_SUB (internal_fn
, gcall
*stmt
)
2037 location_t loc
= gimple_location (stmt
);
2038 tree lhs
= gimple_call_lhs (stmt
);
2039 tree arg0
= gimple_call_arg (stmt
, 0);
2040 tree arg1
= gimple_call_arg (stmt
, 1);
2041 if (VECTOR_TYPE_P (TREE_TYPE (arg0
)))
2042 expand_vector_ubsan_overflow (loc
, MINUS_EXPR
, lhs
, arg0
, arg1
);
2043 else if (integer_zerop (arg0
))
2044 expand_neg_overflow (loc
, lhs
, arg1
, true, NULL
);
2046 expand_addsub_overflow (loc
, MINUS_EXPR
, lhs
, arg0
, arg1
,
2047 false, false, false, true, NULL
);
2050 /* Expand UBSAN_CHECK_MUL call STMT. */
2053 expand_UBSAN_CHECK_MUL (internal_fn
, gcall
*stmt
)
2055 location_t loc
= gimple_location (stmt
);
2056 tree lhs
= gimple_call_lhs (stmt
);
2057 tree arg0
= gimple_call_arg (stmt
, 0);
2058 tree arg1
= gimple_call_arg (stmt
, 1);
2059 if (VECTOR_TYPE_P (TREE_TYPE (arg0
)))
2060 expand_vector_ubsan_overflow (loc
, MULT_EXPR
, lhs
, arg0
, arg1
);
2062 expand_mul_overflow (loc
, lhs
, arg0
, arg1
, false, false, false, true,
2066 /* Helper function for {ADD,SUB,MUL}_OVERFLOW call stmt expansion. */
2069 expand_arith_overflow (enum tree_code code
, gimple
*stmt
)
2071 tree lhs
= gimple_call_lhs (stmt
);
2072 if (lhs
== NULL_TREE
)
2074 tree arg0
= gimple_call_arg (stmt
, 0);
2075 tree arg1
= gimple_call_arg (stmt
, 1);
2076 tree type
= TREE_TYPE (TREE_TYPE (lhs
));
2077 int uns0_p
= TYPE_UNSIGNED (TREE_TYPE (arg0
));
2078 int uns1_p
= TYPE_UNSIGNED (TREE_TYPE (arg1
));
2079 int unsr_p
= TYPE_UNSIGNED (type
);
2080 int prec0
= TYPE_PRECISION (TREE_TYPE (arg0
));
2081 int prec1
= TYPE_PRECISION (TREE_TYPE (arg1
));
2082 int precres
= TYPE_PRECISION (type
);
2083 location_t loc
= gimple_location (stmt
);
2084 if (!uns0_p
&& get_range_pos_neg (arg0
) == 1)
2086 if (!uns1_p
&& get_range_pos_neg (arg1
) == 1)
2088 int pr
= get_min_precision (arg0
, uns0_p
? UNSIGNED
: SIGNED
);
2089 prec0
= MIN (prec0
, pr
);
2090 pr
= get_min_precision (arg1
, uns1_p
? UNSIGNED
: SIGNED
);
2091 prec1
= MIN (prec1
, pr
);
2093 /* If uns0_p && uns1_p, precop is minimum needed precision
2094 of unsigned type to hold the exact result, otherwise
2095 precop is minimum needed precision of signed type to
2096 hold the exact result. */
2098 if (code
== MULT_EXPR
)
2099 precop
= prec0
+ prec1
+ (uns0_p
!= uns1_p
);
2102 if (uns0_p
== uns1_p
)
2103 precop
= MAX (prec0
, prec1
) + 1;
2105 precop
= MAX (prec0
+ 1, prec1
) + 1;
2107 precop
= MAX (prec0
, prec1
+ 1) + 1;
2109 int orig_precres
= precres
;
2113 if ((uns0_p
&& uns1_p
)
2114 ? ((precop
+ !unsr_p
) <= precres
2115 /* u1 - u2 -> ur can overflow, no matter what precision
2117 && (code
!= MINUS_EXPR
|| !unsr_p
))
2118 : (!unsr_p
&& precop
<= precres
))
2120 /* The infinity precision result will always fit into result. */
2121 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2122 write_complex_part (target
, const0_rtx
, true);
2123 scalar_int_mode mode
= SCALAR_INT_TYPE_MODE (type
);
2124 struct separate_ops ops
;
2127 ops
.op0
= fold_convert_loc (loc
, type
, arg0
);
2128 ops
.op1
= fold_convert_loc (loc
, type
, arg1
);
2129 ops
.op2
= NULL_TREE
;
2131 rtx tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
2132 expand_arith_overflow_result_store (lhs
, target
, mode
, tem
);
2136 /* For operations with low precision, if target doesn't have them, start
2137 with precres widening right away, otherwise do it only if the most
2138 simple cases can't be used. */
2139 const int min_precision
= targetm
.min_arithmetic_precision ();
2140 if (orig_precres
== precres
&& precres
< min_precision
)
2142 else if ((uns0_p
&& uns1_p
&& unsr_p
&& prec0
<= precres
2143 && prec1
<= precres
)
2144 || ((!uns0_p
|| !uns1_p
) && !unsr_p
2145 && prec0
+ uns0_p
<= precres
2146 && prec1
+ uns1_p
<= precres
))
2148 arg0
= fold_convert_loc (loc
, type
, arg0
);
2149 arg1
= fold_convert_loc (loc
, type
, arg1
);
2153 if (integer_zerop (arg0
) && !unsr_p
)
2155 expand_neg_overflow (loc
, lhs
, arg1
, false, NULL
);
2160 expand_addsub_overflow (loc
, code
, lhs
, arg0
, arg1
, unsr_p
,
2161 unsr_p
, unsr_p
, false, NULL
);
2164 expand_mul_overflow (loc
, lhs
, arg0
, arg1
, unsr_p
,
2165 unsr_p
, unsr_p
, false, NULL
);
2172 /* For sub-word operations, retry with a wider type first. */
2173 if (orig_precres
== precres
&& precop
<= BITS_PER_WORD
)
2175 int p
= MAX (min_precision
, precop
);
2176 scalar_int_mode m
= smallest_int_mode_for_size (p
);
2177 tree optype
= build_nonstandard_integer_type (GET_MODE_PRECISION (m
),
2180 p
= TYPE_PRECISION (optype
);
2184 unsr_p
= TYPE_UNSIGNED (optype
);
2190 if (prec0
<= precres
&& prec1
<= precres
)
2195 types
[0] = build_nonstandard_integer_type (precres
, 0);
2201 types
[1] = build_nonstandard_integer_type (precres
, 1);
2203 arg0
= fold_convert_loc (loc
, types
[uns0_p
], arg0
);
2204 arg1
= fold_convert_loc (loc
, types
[uns1_p
], arg1
);
2205 if (code
!= MULT_EXPR
)
2206 expand_addsub_overflow (loc
, code
, lhs
, arg0
, arg1
, unsr_p
,
2207 uns0_p
, uns1_p
, false, NULL
);
2209 expand_mul_overflow (loc
, lhs
, arg0
, arg1
, unsr_p
,
2210 uns0_p
, uns1_p
, false, NULL
);
2214 /* Retry with a wider type. */
2215 if (orig_precres
== precres
)
2217 int p
= MAX (prec0
, prec1
);
2218 scalar_int_mode m
= smallest_int_mode_for_size (p
);
2219 tree optype
= build_nonstandard_integer_type (GET_MODE_PRECISION (m
),
2222 p
= TYPE_PRECISION (optype
);
2226 unsr_p
= TYPE_UNSIGNED (optype
);
2237 /* Expand ADD_OVERFLOW STMT. */
2240 expand_ADD_OVERFLOW (internal_fn
, gcall
*stmt
)
2242 expand_arith_overflow (PLUS_EXPR
, stmt
);
2245 /* Expand SUB_OVERFLOW STMT. */
2248 expand_SUB_OVERFLOW (internal_fn
, gcall
*stmt
)
2250 expand_arith_overflow (MINUS_EXPR
, stmt
);
2253 /* Expand MUL_OVERFLOW STMT. */
2256 expand_MUL_OVERFLOW (internal_fn
, gcall
*stmt
)
2258 expand_arith_overflow (MULT_EXPR
, stmt
);
2261 /* This should get folded in tree-vectorizer.c. */
2264 expand_LOOP_VECTORIZED (internal_fn
, gcall
*)
2269 /* This should get folded in tree-vectorizer.c. */
2272 expand_LOOP_DIST_ALIAS (internal_fn
, gcall
*)
2277 /* Expand MASK_LOAD call STMT using optab OPTAB. */
2280 expand_mask_load_optab_fn (internal_fn
, gcall
*stmt
, convert_optab optab
)
2282 struct expand_operand ops
[3];
2283 tree type
, lhs
, rhs
, maskt
, ptr
;
2284 rtx mem
, target
, mask
;
2287 maskt
= gimple_call_arg (stmt
, 2);
2288 lhs
= gimple_call_lhs (stmt
);
2289 if (lhs
== NULL_TREE
)
2291 type
= TREE_TYPE (lhs
);
2292 ptr
= build_int_cst (TREE_TYPE (gimple_call_arg (stmt
, 1)), 0);
2293 align
= tree_to_shwi (gimple_call_arg (stmt
, 1));
2294 if (TYPE_ALIGN (type
) != align
)
2295 type
= build_aligned_type (type
, align
);
2296 rhs
= fold_build2 (MEM_REF
, type
, gimple_call_arg (stmt
, 0), ptr
);
2298 mem
= expand_expr (rhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2299 gcc_assert (MEM_P (mem
));
2300 mask
= expand_normal (maskt
);
2301 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2302 create_output_operand (&ops
[0], target
, TYPE_MODE (type
));
2303 create_fixed_operand (&ops
[1], mem
);
2304 create_input_operand (&ops
[2], mask
, TYPE_MODE (TREE_TYPE (maskt
)));
2305 expand_insn (convert_optab_handler (optab
, TYPE_MODE (type
),
2306 TYPE_MODE (TREE_TYPE (maskt
))),
2310 /* Expand MASK_STORE call STMT using optab OPTAB. */
2313 expand_mask_store_optab_fn (internal_fn
, gcall
*stmt
, convert_optab optab
)
2315 struct expand_operand ops
[3];
2316 tree type
, lhs
, rhs
, maskt
, ptr
;
2320 maskt
= gimple_call_arg (stmt
, 2);
2321 rhs
= gimple_call_arg (stmt
, 3);
2322 type
= TREE_TYPE (rhs
);
2323 ptr
= build_int_cst (TREE_TYPE (gimple_call_arg (stmt
, 1)), 0);
2324 align
= tree_to_shwi (gimple_call_arg (stmt
, 1));
2325 if (TYPE_ALIGN (type
) != align
)
2326 type
= build_aligned_type (type
, align
);
2327 lhs
= fold_build2 (MEM_REF
, type
, gimple_call_arg (stmt
, 0), ptr
);
2329 mem
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2330 gcc_assert (MEM_P (mem
));
2331 mask
= expand_normal (maskt
);
2332 reg
= expand_normal (rhs
);
2333 create_fixed_operand (&ops
[0], mem
);
2334 create_input_operand (&ops
[1], reg
, TYPE_MODE (type
));
2335 create_input_operand (&ops
[2], mask
, TYPE_MODE (TREE_TYPE (maskt
)));
2336 expand_insn (convert_optab_handler (optab
, TYPE_MODE (type
),
2337 TYPE_MODE (TREE_TYPE (maskt
))),
2342 expand_ABNORMAL_DISPATCHER (internal_fn
, gcall
*)
2347 expand_BUILTIN_EXPECT (internal_fn
, gcall
*stmt
)
2349 /* When guessing was done, the hints should be already stripped away. */
2350 gcc_assert (!flag_guess_branch_prob
|| optimize
== 0 || seen_error ());
2353 tree lhs
= gimple_call_lhs (stmt
);
2355 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2357 target
= const0_rtx
;
2358 rtx val
= expand_expr (gimple_call_arg (stmt
, 0), target
, VOIDmode
, EXPAND_NORMAL
);
2359 if (lhs
&& val
!= target
)
2360 emit_move_insn (target
, val
);
2363 /* IFN_VA_ARG is supposed to be expanded at pass_stdarg. So this dummy function
2364 should never be called. */
2367 expand_VA_ARG (internal_fn
, gcall
*)
2372 /* Expand the IFN_UNIQUE function according to its first argument. */
2375 expand_UNIQUE (internal_fn
, gcall
*stmt
)
2377 rtx pattern
= NULL_RTX
;
2378 enum ifn_unique_kind kind
2379 = (enum ifn_unique_kind
) TREE_INT_CST_LOW (gimple_call_arg (stmt
, 0));
2386 case IFN_UNIQUE_UNSPEC
:
2387 if (targetm
.have_unique ())
2388 pattern
= targetm
.gen_unique ();
2391 case IFN_UNIQUE_OACC_FORK
:
2392 case IFN_UNIQUE_OACC_JOIN
:
2393 if (targetm
.have_oacc_fork () && targetm
.have_oacc_join ())
2395 tree lhs
= gimple_call_lhs (stmt
);
2396 rtx target
= const0_rtx
;
2399 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2401 rtx data_dep
= expand_normal (gimple_call_arg (stmt
, 1));
2402 rtx axis
= expand_normal (gimple_call_arg (stmt
, 2));
2404 if (kind
== IFN_UNIQUE_OACC_FORK
)
2405 pattern
= targetm
.gen_oacc_fork (target
, data_dep
, axis
);
2407 pattern
= targetm
.gen_oacc_join (target
, data_dep
, axis
);
2415 emit_insn (pattern
);
2418 /* The size of an OpenACC compute dimension. */
2421 expand_GOACC_DIM_SIZE (internal_fn
, gcall
*stmt
)
2423 tree lhs
= gimple_call_lhs (stmt
);
2428 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2429 if (targetm
.have_oacc_dim_size ())
2431 rtx dim
= expand_expr (gimple_call_arg (stmt
, 0), NULL_RTX
,
2432 VOIDmode
, EXPAND_NORMAL
);
2433 emit_insn (targetm
.gen_oacc_dim_size (target
, dim
));
2436 emit_move_insn (target
, GEN_INT (1));
2439 /* The position of an OpenACC execution engine along one compute axis. */
2442 expand_GOACC_DIM_POS (internal_fn
, gcall
*stmt
)
2444 tree lhs
= gimple_call_lhs (stmt
);
2449 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2450 if (targetm
.have_oacc_dim_pos ())
2452 rtx dim
= expand_expr (gimple_call_arg (stmt
, 0), NULL_RTX
,
2453 VOIDmode
, EXPAND_NORMAL
);
2454 emit_insn (targetm
.gen_oacc_dim_pos (target
, dim
));
2457 emit_move_insn (target
, const0_rtx
);
2460 /* This is expanded by oacc_device_lower pass. */
2463 expand_GOACC_LOOP (internal_fn
, gcall
*)
2468 /* This is expanded by oacc_device_lower pass. */
2471 expand_GOACC_REDUCTION (internal_fn
, gcall
*)
2476 /* This is expanded by oacc_device_lower pass. */
2479 expand_GOACC_TILE (internal_fn
, gcall
*)
2484 /* Set errno to EDOM. */
2487 expand_SET_EDOM (internal_fn
, gcall
*)
2490 #ifdef GEN_ERRNO_RTX
2491 rtx errno_rtx
= GEN_ERRNO_RTX
;
2493 rtx errno_rtx
= gen_rtx_MEM (word_mode
, gen_rtx_SYMBOL_REF (Pmode
, "errno"));
2495 emit_move_insn (errno_rtx
,
2496 gen_int_mode (TARGET_EDOM
, GET_MODE (errno_rtx
)));
2502 /* Expand atomic bit test and set. */
2505 expand_ATOMIC_BIT_TEST_AND_SET (internal_fn
, gcall
*call
)
2507 expand_ifn_atomic_bit_test_and (call
);
2510 /* Expand atomic bit test and complement. */
2513 expand_ATOMIC_BIT_TEST_AND_COMPLEMENT (internal_fn
, gcall
*call
)
2515 expand_ifn_atomic_bit_test_and (call
);
2518 /* Expand atomic bit test and reset. */
2521 expand_ATOMIC_BIT_TEST_AND_RESET (internal_fn
, gcall
*call
)
2523 expand_ifn_atomic_bit_test_and (call
);
2526 /* Expand atomic bit test and set. */
2529 expand_ATOMIC_COMPARE_EXCHANGE (internal_fn
, gcall
*call
)
2531 expand_ifn_atomic_compare_exchange (call
);
2534 /* Expand LAUNDER to assignment, lhs = arg0. */
2537 expand_LAUNDER (internal_fn
, gcall
*call
)
2539 tree lhs
= gimple_call_lhs (call
);
2544 expand_assignment (lhs
, gimple_call_arg (call
, 0), false);
2547 /* Expand DIVMOD() using:
2548 a) optab handler for udivmod/sdivmod if it is available.
2549 b) If optab_handler doesn't exist, generate call to
2550 target-specific divmod libfunc. */
2553 expand_DIVMOD (internal_fn
, gcall
*call_stmt
)
2555 tree lhs
= gimple_call_lhs (call_stmt
);
2556 tree arg0
= gimple_call_arg (call_stmt
, 0);
2557 tree arg1
= gimple_call_arg (call_stmt
, 1);
2559 gcc_assert (TREE_CODE (TREE_TYPE (lhs
)) == COMPLEX_TYPE
);
2560 tree type
= TREE_TYPE (TREE_TYPE (lhs
));
2561 machine_mode mode
= TYPE_MODE (type
);
2562 bool unsignedp
= TYPE_UNSIGNED (type
);
2563 optab tab
= (unsignedp
) ? udivmod_optab
: sdivmod_optab
;
2565 rtx op0
= expand_normal (arg0
);
2566 rtx op1
= expand_normal (arg1
);
2567 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2569 rtx quotient
, remainder
, libfunc
;
2571 /* Check if optab_handler exists for divmod_optab for given mode. */
2572 if (optab_handler (tab
, mode
) != CODE_FOR_nothing
)
2574 quotient
= gen_reg_rtx (mode
);
2575 remainder
= gen_reg_rtx (mode
);
2576 expand_twoval_binop (tab
, op0
, op1
, quotient
, remainder
, unsignedp
);
2579 /* Generate call to divmod libfunc if it exists. */
2580 else if ((libfunc
= optab_libfunc (tab
, mode
)) != NULL_RTX
)
2581 targetm
.expand_divmod_libfunc (libfunc
, mode
, op0
, op1
,
2582 "ient
, &remainder
);
2587 /* Wrap the return value (quotient, remainder) within COMPLEX_EXPR. */
2588 expand_expr (build2 (COMPLEX_EXPR
, TREE_TYPE (lhs
),
2589 make_tree (TREE_TYPE (arg0
), quotient
),
2590 make_tree (TREE_TYPE (arg1
), remainder
)),
2591 target
, VOIDmode
, EXPAND_NORMAL
);
2594 /* Expand a call to FN using the operands in STMT. FN has a single
2595 output operand and NARGS input operands. */
2598 expand_direct_optab_fn (internal_fn fn
, gcall
*stmt
, direct_optab optab
,
2601 expand_operand
*ops
= XALLOCAVEC (expand_operand
, nargs
+ 1);
2603 tree_pair types
= direct_internal_fn_types (fn
, stmt
);
2604 insn_code icode
= direct_optab_handler (optab
, TYPE_MODE (types
.first
));
2606 tree lhs
= gimple_call_lhs (stmt
);
2607 tree lhs_type
= TREE_TYPE (lhs
);
2608 rtx lhs_rtx
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2609 create_output_operand (&ops
[0], lhs_rtx
, insn_data
[icode
].operand
[0].mode
);
2611 for (unsigned int i
= 0; i
< nargs
; ++i
)
2613 tree rhs
= gimple_call_arg (stmt
, i
);
2614 tree rhs_type
= TREE_TYPE (rhs
);
2615 rtx rhs_rtx
= expand_normal (rhs
);
2616 if (INTEGRAL_TYPE_P (rhs_type
))
2617 create_convert_operand_from (&ops
[i
+ 1], rhs_rtx
,
2618 TYPE_MODE (rhs_type
),
2619 TYPE_UNSIGNED (rhs_type
));
2621 create_input_operand (&ops
[i
+ 1], rhs_rtx
, TYPE_MODE (rhs_type
));
2624 expand_insn (icode
, nargs
+ 1, ops
);
2625 if (!rtx_equal_p (lhs_rtx
, ops
[0].value
))
2627 /* If the return value has an integral type, convert the instruction
2628 result to that type. This is useful for things that return an
2629 int regardless of the size of the input. If the instruction result
2630 is smaller than required, assume that it is signed.
2632 If the return value has a nonintegral type, its mode must match
2633 the instruction result. */
2634 if (GET_CODE (lhs_rtx
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (lhs_rtx
))
2636 /* If this is a scalar in a register that is stored in a wider
2637 mode than the declared mode, compute the result into its
2638 declared mode and then convert to the wider mode. */
2639 gcc_checking_assert (INTEGRAL_TYPE_P (lhs_type
));
2640 rtx tmp
= convert_to_mode (GET_MODE (lhs_rtx
), ops
[0].value
, 0);
2641 convert_move (SUBREG_REG (lhs_rtx
), tmp
,
2642 SUBREG_PROMOTED_SIGN (lhs_rtx
));
2644 else if (GET_MODE (lhs_rtx
) == GET_MODE (ops
[0].value
))
2645 emit_move_insn (lhs_rtx
, ops
[0].value
);
2648 gcc_checking_assert (INTEGRAL_TYPE_P (lhs_type
));
2649 convert_move (lhs_rtx
, ops
[0].value
, 0);
2654 /* Expanders for optabs that can use expand_direct_optab_fn. */
2656 #define expand_unary_optab_fn(FN, STMT, OPTAB) \
2657 expand_direct_optab_fn (FN, STMT, OPTAB, 1)
2659 #define expand_binary_optab_fn(FN, STMT, OPTAB) \
2660 expand_direct_optab_fn (FN, STMT, OPTAB, 2)
2662 /* RETURN_TYPE and ARGS are a return type and argument list that are
2663 in principle compatible with FN (which satisfies direct_internal_fn_p).
2664 Return the types that should be used to determine whether the
2665 target supports FN. */
2668 direct_internal_fn_types (internal_fn fn
, tree return_type
, tree
*args
)
2670 const direct_internal_fn_info
&info
= direct_internal_fn (fn
);
2671 tree type0
= (info
.type0
< 0 ? return_type
: TREE_TYPE (args
[info
.type0
]));
2672 tree type1
= (info
.type1
< 0 ? return_type
: TREE_TYPE (args
[info
.type1
]));
2673 return tree_pair (type0
, type1
);
2676 /* CALL is a call whose return type and arguments are in principle
2677 compatible with FN (which satisfies direct_internal_fn_p). Return the
2678 types that should be used to determine whether the target supports FN. */
2681 direct_internal_fn_types (internal_fn fn
, gcall
*call
)
2683 const direct_internal_fn_info
&info
= direct_internal_fn (fn
);
2684 tree op0
= (info
.type0
< 0
2685 ? gimple_call_lhs (call
)
2686 : gimple_call_arg (call
, info
.type0
));
2687 tree op1
= (info
.type1
< 0
2688 ? gimple_call_lhs (call
)
2689 : gimple_call_arg (call
, info
.type1
));
2690 return tree_pair (TREE_TYPE (op0
), TREE_TYPE (op1
));
2693 /* Return true if OPTAB is supported for TYPES (whose modes should be
2694 the same) when the optimization type is OPT_TYPE. Used for simple
2698 direct_optab_supported_p (direct_optab optab
, tree_pair types
,
2699 optimization_type opt_type
)
2701 machine_mode mode
= TYPE_MODE (types
.first
);
2702 gcc_checking_assert (mode
== TYPE_MODE (types
.second
));
2703 return direct_optab_handler (optab
, mode
, opt_type
) != CODE_FOR_nothing
;
2706 /* Return true if load/store lanes optab OPTAB is supported for
2707 array type TYPES.first when the optimization type is OPT_TYPE. */
2710 multi_vector_optab_supported_p (convert_optab optab
, tree_pair types
,
2711 optimization_type opt_type
)
2713 gcc_assert (TREE_CODE (types
.first
) == ARRAY_TYPE
);
2714 machine_mode imode
= TYPE_MODE (types
.first
);
2715 machine_mode vmode
= TYPE_MODE (TREE_TYPE (types
.first
));
2716 return (convert_optab_handler (optab
, imode
, vmode
, opt_type
)
2717 != CODE_FOR_nothing
);
2720 #define direct_unary_optab_supported_p direct_optab_supported_p
2721 #define direct_binary_optab_supported_p direct_optab_supported_p
2722 #define direct_mask_load_optab_supported_p direct_optab_supported_p
2723 #define direct_load_lanes_optab_supported_p multi_vector_optab_supported_p
2724 #define direct_mask_store_optab_supported_p direct_optab_supported_p
2725 #define direct_store_lanes_optab_supported_p multi_vector_optab_supported_p
2727 /* Return true if FN is supported for the types in TYPES when the
2728 optimization type is OPT_TYPE. The types are those associated with
2729 the "type0" and "type1" fields of FN's direct_internal_fn_info
2733 direct_internal_fn_supported_p (internal_fn fn
, tree_pair types
,
2734 optimization_type opt_type
)
2738 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
2739 case IFN_##CODE: break;
2740 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
2742 return direct_##TYPE##_optab_supported_p (OPTAB##_optab, types, \
2744 #include "internal-fn.def"
2752 /* Return true if FN is supported for type TYPE when the optimization
2753 type is OPT_TYPE. The caller knows that the "type0" and "type1"
2754 fields of FN's direct_internal_fn_info structure are the same. */
2757 direct_internal_fn_supported_p (internal_fn fn
, tree type
,
2758 optimization_type opt_type
)
2760 const direct_internal_fn_info
&info
= direct_internal_fn (fn
);
2761 gcc_checking_assert (info
.type0
== info
.type1
);
2762 return direct_internal_fn_supported_p (fn
, tree_pair (type
, type
), opt_type
);
2765 /* Return true if IFN_SET_EDOM is supported. */
2768 set_edom_supported_p (void)
2777 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
2779 expand_##CODE (internal_fn fn, gcall *stmt) \
2781 expand_##TYPE##_optab_fn (fn, stmt, OPTAB##_optab); \
2783 #include "internal-fn.def"
2785 /* Routines to expand each internal function, indexed by function number.
2786 Each routine has the prototype:
2788 expand_<NAME> (gcall *stmt)
2790 where STMT is the statement that performs the call. */
2791 static void (*const internal_fn_expanders
[]) (internal_fn
, gcall
*) = {
2792 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) expand_##CODE,
2793 #include "internal-fn.def"
2797 /* Expand STMT as though it were a call to internal function FN. */
2800 expand_internal_call (internal_fn fn
, gcall
*stmt
)
2802 internal_fn_expanders
[fn
] (fn
, stmt
);
2805 /* Expand STMT, which is a call to internal function FN. */
2808 expand_internal_call (gcall
*stmt
)
2810 expand_internal_call (gimple_call_internal_fn (stmt
), stmt
);
2814 expand_PHI (internal_fn
, gcall
*)