2 Copyright (C) 2011-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
29 #include "stringpool.h"
31 #include "tree-ssanames.h"
36 #include "diagnostic-core.h"
37 #include "fold-const.h"
38 #include "internal-fn.h"
39 #include "stor-layout.h"
42 #include "stringpool.h"
48 #include "optabs-tree.h"
50 /* The names of each internal function, indexed by function number. */
51 const char *const internal_fn_name_array
[] = {
52 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) #CODE,
53 #include "internal-fn.def"
57 /* The ECF_* flags of each internal function, indexed by function number. */
58 const int internal_fn_flags_array
[] = {
59 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) FLAGS,
60 #include "internal-fn.def"
64 /* Fnspec of each internal function, indexed by function number. */
65 const_tree internal_fn_fnspec_array
[IFN_LAST
+ 1];
70 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
71 if (FNSPEC) internal_fn_fnspec_array[IFN_##CODE] = \
72 build_string ((int) sizeof (FNSPEC), FNSPEC ? FNSPEC : "");
73 #include "internal-fn.def"
74 internal_fn_fnspec_array
[IFN_LAST
] = 0;
77 /* Create static initializers for the information returned by
78 direct_internal_fn. */
79 #define not_direct { -2, -2, false }
80 #define mask_load_direct { -1, 2, false }
81 #define load_lanes_direct { -1, -1, false }
82 #define mask_store_direct { 3, 2, false }
83 #define store_lanes_direct { 0, 0, false }
84 #define unary_direct { 0, 0, true }
85 #define binary_direct { 0, 0, true }
87 const direct_internal_fn_info direct_internal_fn_array
[IFN_LAST
+ 1] = {
88 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) not_direct,
89 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) TYPE##_direct,
90 #include "internal-fn.def"
94 /* ARRAY_TYPE is an array of vector modes. Return the associated insn
95 for load-lanes-style optab OPTAB, or CODE_FOR_nothing if none. */
98 get_multi_vector_move (tree array_type
, convert_optab optab
)
103 gcc_assert (TREE_CODE (array_type
) == ARRAY_TYPE
);
104 imode
= TYPE_MODE (array_type
);
105 vmode
= TYPE_MODE (TREE_TYPE (array_type
));
107 return convert_optab_handler (optab
, imode
, vmode
);
110 /* Expand LOAD_LANES call STMT using optab OPTAB. */
113 expand_load_lanes_optab_fn (internal_fn
, gcall
*stmt
, convert_optab optab
)
115 struct expand_operand ops
[2];
119 lhs
= gimple_call_lhs (stmt
);
120 rhs
= gimple_call_arg (stmt
, 0);
121 type
= TREE_TYPE (lhs
);
123 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
124 mem
= expand_normal (rhs
);
126 gcc_assert (MEM_P (mem
));
127 PUT_MODE (mem
, TYPE_MODE (type
));
129 create_output_operand (&ops
[0], target
, TYPE_MODE (type
));
130 create_fixed_operand (&ops
[1], mem
);
131 expand_insn (get_multi_vector_move (type
, optab
), 2, ops
);
134 /* Expand STORE_LANES call STMT using optab OPTAB. */
137 expand_store_lanes_optab_fn (internal_fn
, gcall
*stmt
, convert_optab optab
)
139 struct expand_operand ops
[2];
143 lhs
= gimple_call_lhs (stmt
);
144 rhs
= gimple_call_arg (stmt
, 0);
145 type
= TREE_TYPE (rhs
);
147 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
148 reg
= expand_normal (rhs
);
150 gcc_assert (MEM_P (target
));
151 PUT_MODE (target
, TYPE_MODE (type
));
153 create_fixed_operand (&ops
[0], target
);
154 create_input_operand (&ops
[1], reg
, TYPE_MODE (type
));
155 expand_insn (get_multi_vector_move (type
, optab
), 2, ops
);
159 expand_ANNOTATE (internal_fn
, gcall
*)
164 /* This should get expanded in omp_device_lower pass. */
167 expand_GOMP_USE_SIMT (internal_fn
, gcall
*)
172 /* This should get expanded in omp_device_lower pass. */
175 expand_GOMP_SIMT_ENTER (internal_fn
, gcall
*)
180 /* Allocate per-lane storage and begin non-uniform execution region. */
183 expand_GOMP_SIMT_ENTER_ALLOC (internal_fn
, gcall
*stmt
)
186 tree lhs
= gimple_call_lhs (stmt
);
188 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
190 target
= gen_reg_rtx (Pmode
);
191 rtx size
= expand_normal (gimple_call_arg (stmt
, 0));
192 rtx align
= expand_normal (gimple_call_arg (stmt
, 1));
193 struct expand_operand ops
[3];
194 create_output_operand (&ops
[0], target
, Pmode
);
195 create_input_operand (&ops
[1], size
, Pmode
);
196 create_input_operand (&ops
[2], align
, Pmode
);
197 gcc_assert (targetm
.have_omp_simt_enter ());
198 expand_insn (targetm
.code_for_omp_simt_enter
, 3, ops
);
201 /* Deallocate per-lane storage and leave non-uniform execution region. */
204 expand_GOMP_SIMT_EXIT (internal_fn
, gcall
*stmt
)
206 gcc_checking_assert (!gimple_call_lhs (stmt
));
207 rtx arg
= expand_normal (gimple_call_arg (stmt
, 0));
208 struct expand_operand ops
[1];
209 create_input_operand (&ops
[0], arg
, Pmode
);
210 gcc_assert (targetm
.have_omp_simt_exit ());
211 expand_insn (targetm
.code_for_omp_simt_exit
, 1, ops
);
214 /* Lane index on SIMT targets: thread index in the warp on NVPTX. On targets
215 without SIMT execution this should be expanded in omp_device_lower pass. */
218 expand_GOMP_SIMT_LANE (internal_fn
, gcall
*stmt
)
220 tree lhs
= gimple_call_lhs (stmt
);
224 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
225 gcc_assert (targetm
.have_omp_simt_lane ());
226 emit_insn (targetm
.gen_omp_simt_lane (target
));
229 /* This should get expanded in omp_device_lower pass. */
232 expand_GOMP_SIMT_VF (internal_fn
, gcall
*)
237 /* Lane index of the first SIMT lane that supplies a non-zero argument.
238 This is a SIMT counterpart to GOMP_SIMD_LAST_LANE, used to represent the
239 lane that executed the last iteration for handling OpenMP lastprivate. */
242 expand_GOMP_SIMT_LAST_LANE (internal_fn
, gcall
*stmt
)
244 tree lhs
= gimple_call_lhs (stmt
);
248 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
249 rtx cond
= expand_normal (gimple_call_arg (stmt
, 0));
250 machine_mode mode
= TYPE_MODE (TREE_TYPE (lhs
));
251 struct expand_operand ops
[2];
252 create_output_operand (&ops
[0], target
, mode
);
253 create_input_operand (&ops
[1], cond
, mode
);
254 gcc_assert (targetm
.have_omp_simt_last_lane ());
255 expand_insn (targetm
.code_for_omp_simt_last_lane
, 2, ops
);
258 /* Non-transparent predicate used in SIMT lowering of OpenMP "ordered". */
261 expand_GOMP_SIMT_ORDERED_PRED (internal_fn
, gcall
*stmt
)
263 tree lhs
= gimple_call_lhs (stmt
);
267 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
268 rtx ctr
= expand_normal (gimple_call_arg (stmt
, 0));
269 machine_mode mode
= TYPE_MODE (TREE_TYPE (lhs
));
270 struct expand_operand ops
[2];
271 create_output_operand (&ops
[0], target
, mode
);
272 create_input_operand (&ops
[1], ctr
, mode
);
273 gcc_assert (targetm
.have_omp_simt_ordered ());
274 expand_insn (targetm
.code_for_omp_simt_ordered
, 2, ops
);
277 /* "Or" boolean reduction across SIMT lanes: return non-zero in all lanes if
278 any lane supplies a non-zero argument. */
281 expand_GOMP_SIMT_VOTE_ANY (internal_fn
, gcall
*stmt
)
283 tree lhs
= gimple_call_lhs (stmt
);
287 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
288 rtx cond
= expand_normal (gimple_call_arg (stmt
, 0));
289 machine_mode mode
= TYPE_MODE (TREE_TYPE (lhs
));
290 struct expand_operand ops
[2];
291 create_output_operand (&ops
[0], target
, mode
);
292 create_input_operand (&ops
[1], cond
, mode
);
293 gcc_assert (targetm
.have_omp_simt_vote_any ());
294 expand_insn (targetm
.code_for_omp_simt_vote_any
, 2, ops
);
297 /* Exchange between SIMT lanes with a "butterfly" pattern: source lane index
298 is destination lane index XOR given offset. */
301 expand_GOMP_SIMT_XCHG_BFLY (internal_fn
, gcall
*stmt
)
303 tree lhs
= gimple_call_lhs (stmt
);
307 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
308 rtx src
= expand_normal (gimple_call_arg (stmt
, 0));
309 rtx idx
= expand_normal (gimple_call_arg (stmt
, 1));
310 machine_mode mode
= TYPE_MODE (TREE_TYPE (lhs
));
311 struct expand_operand ops
[3];
312 create_output_operand (&ops
[0], target
, mode
);
313 create_input_operand (&ops
[1], src
, mode
);
314 create_input_operand (&ops
[2], idx
, SImode
);
315 gcc_assert (targetm
.have_omp_simt_xchg_bfly ());
316 expand_insn (targetm
.code_for_omp_simt_xchg_bfly
, 3, ops
);
319 /* Exchange between SIMT lanes according to given source lane index. */
322 expand_GOMP_SIMT_XCHG_IDX (internal_fn
, gcall
*stmt
)
324 tree lhs
= gimple_call_lhs (stmt
);
328 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
329 rtx src
= expand_normal (gimple_call_arg (stmt
, 0));
330 rtx idx
= expand_normal (gimple_call_arg (stmt
, 1));
331 machine_mode mode
= TYPE_MODE (TREE_TYPE (lhs
));
332 struct expand_operand ops
[3];
333 create_output_operand (&ops
[0], target
, mode
);
334 create_input_operand (&ops
[1], src
, mode
);
335 create_input_operand (&ops
[2], idx
, SImode
);
336 gcc_assert (targetm
.have_omp_simt_xchg_idx ());
337 expand_insn (targetm
.code_for_omp_simt_xchg_idx
, 3, ops
);
340 /* This should get expanded in adjust_simduid_builtins. */
343 expand_GOMP_SIMD_LANE (internal_fn
, gcall
*)
348 /* This should get expanded in adjust_simduid_builtins. */
351 expand_GOMP_SIMD_VF (internal_fn
, gcall
*)
356 /* This should get expanded in adjust_simduid_builtins. */
359 expand_GOMP_SIMD_LAST_LANE (internal_fn
, gcall
*)
364 /* This should get expanded in adjust_simduid_builtins. */
367 expand_GOMP_SIMD_ORDERED_START (internal_fn
, gcall
*)
372 /* This should get expanded in adjust_simduid_builtins. */
375 expand_GOMP_SIMD_ORDERED_END (internal_fn
, gcall
*)
380 /* This should get expanded in the sanopt pass. */
383 expand_UBSAN_NULL (internal_fn
, gcall
*)
388 /* This should get expanded in the sanopt pass. */
391 expand_UBSAN_BOUNDS (internal_fn
, gcall
*)
396 /* This should get expanded in the sanopt pass. */
399 expand_UBSAN_VPTR (internal_fn
, gcall
*)
404 /* This should get expanded in the sanopt pass. */
407 expand_UBSAN_PTR (internal_fn
, gcall
*)
412 /* This should get expanded in the sanopt pass. */
415 expand_UBSAN_OBJECT_SIZE (internal_fn
, gcall
*)
420 /* This should get expanded in the sanopt pass. */
423 expand_ASAN_CHECK (internal_fn
, gcall
*)
428 /* This should get expanded in the sanopt pass. */
431 expand_ASAN_MARK (internal_fn
, gcall
*)
436 /* This should get expanded in the sanopt pass. */
439 expand_ASAN_POISON (internal_fn
, gcall
*)
444 /* This should get expanded in the sanopt pass. */
447 expand_ASAN_POISON_USE (internal_fn
, gcall
*)
452 /* This should get expanded in the tsan pass. */
455 expand_TSAN_FUNC_EXIT (internal_fn
, gcall
*)
460 /* This should get expanded in the lower pass. */
463 expand_FALLTHROUGH (internal_fn
, gcall
*call
)
465 error_at (gimple_location (call
),
466 "invalid use of attribute %<fallthrough%>");
469 /* Return minimum precision needed to represent all values
470 of ARG in SIGNed integral type. */
473 get_min_precision (tree arg
, signop sign
)
475 int prec
= TYPE_PRECISION (TREE_TYPE (arg
));
477 signop orig_sign
= sign
;
478 if (TREE_CODE (arg
) == INTEGER_CST
)
481 if (TYPE_SIGN (TREE_TYPE (arg
)) != sign
)
483 widest_int w
= wi::to_widest (arg
);
484 w
= wi::ext (w
, prec
, sign
);
485 p
= wi::min_precision (w
, sign
);
488 p
= wi::min_precision (arg
, sign
);
489 return MIN (p
, prec
);
491 while (CONVERT_EXPR_P (arg
)
492 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg
, 0)))
493 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg
, 0))) <= prec
)
495 arg
= TREE_OPERAND (arg
, 0);
496 if (TYPE_PRECISION (TREE_TYPE (arg
)) < prec
)
498 if (TYPE_UNSIGNED (TREE_TYPE (arg
)))
500 else if (sign
== UNSIGNED
&& get_range_pos_neg (arg
) != 1)
501 return prec
+ (orig_sign
!= sign
);
502 prec
= TYPE_PRECISION (TREE_TYPE (arg
));
505 return prec
+ (orig_sign
!= sign
);
507 if (TREE_CODE (arg
) != SSA_NAME
)
508 return prec
+ (orig_sign
!= sign
);
509 wide_int arg_min
, arg_max
;
510 while (get_range_info (arg
, &arg_min
, &arg_max
) != VR_RANGE
)
512 gimple
*g
= SSA_NAME_DEF_STMT (arg
);
513 if (is_gimple_assign (g
)
514 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g
)))
516 tree t
= gimple_assign_rhs1 (g
);
517 if (INTEGRAL_TYPE_P (TREE_TYPE (t
))
518 && TYPE_PRECISION (TREE_TYPE (t
)) <= prec
)
521 if (TYPE_PRECISION (TREE_TYPE (arg
)) < prec
)
523 if (TYPE_UNSIGNED (TREE_TYPE (arg
)))
525 else if (sign
== UNSIGNED
&& get_range_pos_neg (arg
) != 1)
526 return prec
+ (orig_sign
!= sign
);
527 prec
= TYPE_PRECISION (TREE_TYPE (arg
));
530 return prec
+ (orig_sign
!= sign
);
534 return prec
+ (orig_sign
!= sign
);
536 if (sign
== TYPE_SIGN (TREE_TYPE (arg
)))
538 int p1
= wi::min_precision (arg_min
, sign
);
539 int p2
= wi::min_precision (arg_max
, sign
);
541 prec
= MIN (prec
, p1
);
543 else if (sign
== UNSIGNED
&& !wi::neg_p (arg_min
, SIGNED
))
545 int p
= wi::min_precision (arg_max
, UNSIGNED
);
546 prec
= MIN (prec
, p
);
548 return prec
+ (orig_sign
!= sign
);
551 /* Helper for expand_*_overflow. Set the __imag__ part to true
552 (1 except for signed:1 type, in which case store -1). */
555 expand_arith_set_overflow (tree lhs
, rtx target
)
557 if (TYPE_PRECISION (TREE_TYPE (TREE_TYPE (lhs
))) == 1
558 && !TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs
))))
559 write_complex_part (target
, constm1_rtx
, true);
561 write_complex_part (target
, const1_rtx
, true);
564 /* Helper for expand_*_overflow. Store RES into the __real__ part
565 of TARGET. If RES has larger MODE than __real__ part of TARGET,
566 set the __imag__ part to 1 if RES doesn't fit into it. Similarly
567 if LHS has smaller precision than its mode. */
570 expand_arith_overflow_result_store (tree lhs
, rtx target
,
571 machine_mode mode
, rtx res
)
573 scalar_int_mode tgtmode
574 = as_a
<scalar_int_mode
> (GET_MODE_INNER (GET_MODE (target
)));
578 rtx_code_label
*done_label
= gen_label_rtx ();
579 int uns
= TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs
)));
580 lres
= convert_modes (tgtmode
, mode
, res
, uns
);
581 gcc_assert (GET_MODE_PRECISION (tgtmode
) < GET_MODE_PRECISION (mode
));
582 do_compare_rtx_and_jump (res
, convert_modes (mode
, tgtmode
, lres
, uns
),
583 EQ
, true, mode
, NULL_RTX
, NULL
, done_label
,
584 profile_probability::very_likely ());
585 expand_arith_set_overflow (lhs
, target
);
586 emit_label (done_label
);
588 int prec
= TYPE_PRECISION (TREE_TYPE (TREE_TYPE (lhs
)));
589 int tgtprec
= GET_MODE_PRECISION (tgtmode
);
592 rtx_code_label
*done_label
= gen_label_rtx ();
593 int uns
= TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs
)));
598 = immed_wide_int_const (wi::shifted_mask (0, prec
, false, tgtprec
),
600 lres
= expand_simple_binop (tgtmode
, AND
, res
, mask
, NULL_RTX
,
601 true, OPTAB_LIB_WIDEN
);
605 lres
= expand_shift (LSHIFT_EXPR
, tgtmode
, res
, tgtprec
- prec
,
607 lres
= expand_shift (RSHIFT_EXPR
, tgtmode
, lres
, tgtprec
- prec
,
610 do_compare_rtx_and_jump (res
, lres
,
611 EQ
, true, tgtmode
, NULL_RTX
, NULL
, done_label
,
612 profile_probability::very_likely ());
613 expand_arith_set_overflow (lhs
, target
);
614 emit_label (done_label
);
616 write_complex_part (target
, lres
, false);
619 /* Helper for expand_*_overflow. Store RES into TARGET. */
622 expand_ubsan_result_store (rtx target
, rtx res
)
624 if (GET_CODE (target
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (target
))
625 /* If this is a scalar in a register that is stored in a wider mode
626 than the declared mode, compute the result into its declared mode
627 and then convert to the wider mode. Our value is the computed
629 convert_move (SUBREG_REG (target
), res
, SUBREG_PROMOTED_SIGN (target
));
631 emit_move_insn (target
, res
);
634 /* Add sub/add overflow checking to the statement STMT.
635 CODE says whether the operation is +, or -. */
638 expand_addsub_overflow (location_t loc
, tree_code code
, tree lhs
,
639 tree arg0
, tree arg1
, bool unsr_p
, bool uns0_p
,
640 bool uns1_p
, bool is_ubsan
, tree
*datap
)
642 rtx res
, target
= NULL_RTX
;
644 rtx_code_label
*done_label
= gen_label_rtx ();
645 rtx_code_label
*do_error
= gen_label_rtx ();
646 do_pending_stack_adjust ();
647 rtx op0
= expand_normal (arg0
);
648 rtx op1
= expand_normal (arg1
);
649 scalar_int_mode mode
= SCALAR_INT_TYPE_MODE (TREE_TYPE (arg0
));
650 int prec
= GET_MODE_PRECISION (mode
);
651 rtx sgn
= immed_wide_int_const (wi::min_value (prec
, SIGNED
), mode
);
655 gcc_assert (!unsr_p
&& !uns0_p
&& !uns1_p
);
659 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
661 write_complex_part (target
, const0_rtx
, true);
664 /* We assume both operands and result have the same precision
665 here (GET_MODE_BITSIZE (mode)), S stands for signed type
666 with that precision, U for unsigned type with that precision,
667 sgn for unsigned most significant bit in that precision.
668 s1 is signed first operand, u1 is unsigned first operand,
669 s2 is signed second operand, u2 is unsigned second operand,
670 sr is signed result, ur is unsigned result and the following
671 rules say how to compute result (which is always result of
672 the operands as if both were unsigned, cast to the right
673 signedness) and how to compute whether operation overflowed.
676 res = (S) ((U) s1 + (U) s2)
677 ovf = s2 < 0 ? res > s1 : res < s1 (or jump on overflow)
679 res = (S) ((U) s1 - (U) s2)
680 ovf = s2 < 0 ? res < s1 : res > s2 (or jump on overflow)
683 ovf = res < u1 (or jump on carry, but RTL opts will handle it)
686 ovf = res > u1 (or jump on carry, but RTL opts will handle it)
688 res = (S) ((U) s1 + u2)
689 ovf = ((U) res ^ sgn) < u2
694 ovf = t1 < 0 ? t2 > s1 : t2 < s1 (or jump on overflow)
696 res = (S) ((U) s1 - u2)
697 ovf = u2 > ((U) s1 ^ sgn)
700 ovf = s1 < 0 || u2 > (U) s1
703 ovf = u1 >= ((U) s2 ^ sgn)
708 ovf = s2 < 0 ? (S) t2 < (S) t1 : (S) t2 > (S) t1 (or jump on overflow)
710 res = (U) s1 + (U) s2
711 ovf = s2 < 0 ? (s1 | (S) res) < 0) : (s1 & (S) res) < 0)
714 ovf = (U) res < u2 || res < 0
717 ovf = u1 >= u2 ? res < 0 : res >= 0
719 res = (U) s1 - (U) s2
720 ovf = s2 >= 0 ? ((s1 | (S) res) < 0) : ((s1 & (S) res) < 0) */
722 if (code
== PLUS_EXPR
&& uns0_p
&& !uns1_p
)
724 /* PLUS_EXPR is commutative, if operand signedness differs,
725 canonicalize to the first operand being signed and second
726 unsigned to simplify following code. */
727 std::swap (op0
, op1
);
728 std::swap (arg0
, arg1
);
734 if (uns0_p
&& uns1_p
&& unsr_p
)
736 insn_code icode
= optab_handler (code
== PLUS_EXPR
? uaddv4_optab
737 : usubv4_optab
, mode
);
738 if (icode
!= CODE_FOR_nothing
)
740 struct expand_operand ops
[4];
741 rtx_insn
*last
= get_last_insn ();
743 res
= gen_reg_rtx (mode
);
744 create_output_operand (&ops
[0], res
, mode
);
745 create_input_operand (&ops
[1], op0
, mode
);
746 create_input_operand (&ops
[2], op1
, mode
);
747 create_fixed_operand (&ops
[3], do_error
);
748 if (maybe_expand_insn (icode
, 4, ops
))
750 last
= get_last_insn ();
751 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
753 && any_condjump_p (last
)
754 && !find_reg_note (last
, REG_BR_PROB
, 0))
755 add_reg_br_prob_note (last
,
756 profile_probability::very_unlikely ());
757 emit_jump (done_label
);
761 delete_insns_since (last
);
764 /* Compute the operation. On RTL level, the addition is always
766 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
767 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
769 /* For PLUS_EXPR, the operation is commutative, so we can pick
770 operand to compare against. For prec <= BITS_PER_WORD, I think
771 preferring REG operand is better over CONST_INT, because
772 the CONST_INT might enlarge the instruction or CSE would need
773 to figure out we'd already loaded it into a register before.
774 For prec > BITS_PER_WORD, I think CONST_INT might be more beneficial,
775 as then the multi-word comparison can be perhaps simplified. */
776 if (code
== PLUS_EXPR
777 && (prec
<= BITS_PER_WORD
778 ? (CONST_SCALAR_INT_P (op0
) && REG_P (op1
))
779 : CONST_SCALAR_INT_P (op1
)))
781 do_compare_rtx_and_jump (res
, tem
, code
== PLUS_EXPR
? GEU
: LEU
,
782 true, mode
, NULL_RTX
, NULL
, done_label
,
783 profile_probability::very_likely ());
788 if (!uns0_p
&& uns1_p
&& !unsr_p
)
790 /* Compute the operation. On RTL level, the addition is always
792 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
793 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
794 rtx tem
= expand_binop (mode
, add_optab
,
795 code
== PLUS_EXPR
? res
: op0
, sgn
,
796 NULL_RTX
, false, OPTAB_LIB_WIDEN
);
797 do_compare_rtx_and_jump (tem
, op1
, GEU
, true, mode
, NULL_RTX
, NULL
,
798 done_label
, profile_probability::very_likely ());
803 if (code
== PLUS_EXPR
&& !uns0_p
&& uns1_p
&& unsr_p
)
805 op1
= expand_binop (mode
, add_optab
, op1
, sgn
, NULL_RTX
, false,
807 /* As we've changed op1, we have to avoid using the value range
808 for the original argument. */
809 arg1
= error_mark_node
;
815 if (code
== MINUS_EXPR
&& uns0_p
&& !uns1_p
&& unsr_p
)
817 op0
= expand_binop (mode
, add_optab
, op0
, sgn
, NULL_RTX
, false,
819 /* As we've changed op0, we have to avoid using the value range
820 for the original argument. */
821 arg0
= error_mark_node
;
827 if (code
== MINUS_EXPR
&& !uns0_p
&& uns1_p
&& unsr_p
)
829 /* Compute the operation. On RTL level, the addition is always
831 res
= expand_binop (mode
, sub_optab
, op0
, op1
, NULL_RTX
, false,
833 int pos_neg
= get_range_pos_neg (arg0
);
835 /* If ARG0 is known to be always negative, this is always overflow. */
836 emit_jump (do_error
);
837 else if (pos_neg
== 3)
838 /* If ARG0 is not known to be always positive, check at runtime. */
839 do_compare_rtx_and_jump (op0
, const0_rtx
, LT
, false, mode
, NULL_RTX
,
840 NULL
, do_error
, profile_probability::very_unlikely ());
841 do_compare_rtx_and_jump (op1
, op0
, LEU
, true, mode
, NULL_RTX
, NULL
,
842 done_label
, profile_probability::very_likely ());
847 if (code
== MINUS_EXPR
&& uns0_p
&& !uns1_p
&& !unsr_p
)
849 /* Compute the operation. On RTL level, the addition is always
851 res
= expand_binop (mode
, sub_optab
, op0
, op1
, NULL_RTX
, false,
853 rtx tem
= expand_binop (mode
, add_optab
, op1
, sgn
, NULL_RTX
, false,
855 do_compare_rtx_and_jump (op0
, tem
, LTU
, true, mode
, NULL_RTX
, NULL
,
856 done_label
, profile_probability::very_likely ());
861 if (code
== PLUS_EXPR
&& uns0_p
&& uns1_p
&& !unsr_p
)
863 /* Compute the operation. On RTL level, the addition is always
865 res
= expand_binop (mode
, add_optab
, op0
, op1
, NULL_RTX
, false,
867 do_compare_rtx_and_jump (res
, const0_rtx
, LT
, false, mode
, NULL_RTX
,
868 NULL
, do_error
, profile_probability::very_unlikely ());
870 /* The operation is commutative, so we can pick operand to compare
871 against. For prec <= BITS_PER_WORD, I think preferring REG operand
872 is better over CONST_INT, because the CONST_INT might enlarge the
873 instruction or CSE would need to figure out we'd already loaded it
874 into a register before. For prec > BITS_PER_WORD, I think CONST_INT
875 might be more beneficial, as then the multi-word comparison can be
876 perhaps simplified. */
877 if (prec
<= BITS_PER_WORD
878 ? (CONST_SCALAR_INT_P (op1
) && REG_P (op0
))
879 : CONST_SCALAR_INT_P (op0
))
881 do_compare_rtx_and_jump (res
, tem
, GEU
, true, mode
, NULL_RTX
, NULL
,
882 done_label
, profile_probability::very_likely ());
887 if (!uns0_p
&& !uns1_p
&& unsr_p
)
889 /* Compute the operation. On RTL level, the addition is always
891 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
892 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
893 int pos_neg
= get_range_pos_neg (arg1
);
894 if (code
== PLUS_EXPR
)
896 int pos_neg0
= get_range_pos_neg (arg0
);
897 if (pos_neg0
!= 3 && pos_neg
== 3)
899 std::swap (op0
, op1
);
906 tem
= expand_binop (mode
, ((pos_neg
== 1) ^ (code
== MINUS_EXPR
))
907 ? and_optab
: ior_optab
,
908 op0
, res
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
909 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL
,
910 NULL
, done_label
, profile_probability::very_likely ());
914 rtx_code_label
*do_ior_label
= gen_label_rtx ();
915 do_compare_rtx_and_jump (op1
, const0_rtx
,
916 code
== MINUS_EXPR
? GE
: LT
, false, mode
,
917 NULL_RTX
, NULL
, do_ior_label
,
918 profile_probability::even ());
919 tem
= expand_binop (mode
, and_optab
, op0
, res
, NULL_RTX
, false,
921 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
922 NULL
, done_label
, profile_probability::very_likely ());
923 emit_jump (do_error
);
924 emit_label (do_ior_label
);
925 tem
= expand_binop (mode
, ior_optab
, op0
, res
, NULL_RTX
, false,
927 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
928 NULL
, done_label
, profile_probability::very_likely ());
934 if (code
== MINUS_EXPR
&& uns0_p
&& uns1_p
&& !unsr_p
)
936 /* Compute the operation. On RTL level, the addition is always
938 res
= expand_binop (mode
, sub_optab
, op0
, op1
, NULL_RTX
, false,
940 rtx_code_label
*op0_geu_op1
= gen_label_rtx ();
941 do_compare_rtx_and_jump (op0
, op1
, GEU
, true, mode
, NULL_RTX
, NULL
,
942 op0_geu_op1
, profile_probability::even ());
943 do_compare_rtx_and_jump (res
, const0_rtx
, LT
, false, mode
, NULL_RTX
,
944 NULL
, done_label
, profile_probability::very_likely ());
945 emit_jump (do_error
);
946 emit_label (op0_geu_op1
);
947 do_compare_rtx_and_jump (res
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
948 NULL
, done_label
, profile_probability::very_likely ());
952 gcc_assert (!uns0_p
&& !uns1_p
&& !unsr_p
);
957 insn_code icode
= optab_handler (code
== PLUS_EXPR
? addv4_optab
958 : subv4_optab
, mode
);
959 if (icode
!= CODE_FOR_nothing
)
961 struct expand_operand ops
[4];
962 rtx_insn
*last
= get_last_insn ();
964 res
= gen_reg_rtx (mode
);
965 create_output_operand (&ops
[0], res
, mode
);
966 create_input_operand (&ops
[1], op0
, mode
);
967 create_input_operand (&ops
[2], op1
, mode
);
968 create_fixed_operand (&ops
[3], do_error
);
969 if (maybe_expand_insn (icode
, 4, ops
))
971 last
= get_last_insn ();
972 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
974 && any_condjump_p (last
)
975 && !find_reg_note (last
, REG_BR_PROB
, 0))
976 add_reg_br_prob_note (last
,
977 profile_probability::very_unlikely ());
978 emit_jump (done_label
);
982 delete_insns_since (last
);
985 /* Compute the operation. On RTL level, the addition is always
987 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
988 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
990 /* If we can prove that one of the arguments (for MINUS_EXPR only
991 the second operand, as subtraction is not commutative) is always
992 non-negative or always negative, we can do just one comparison
993 and conditional jump. */
994 int pos_neg
= get_range_pos_neg (arg1
);
995 if (code
== PLUS_EXPR
)
997 int pos_neg0
= get_range_pos_neg (arg0
);
998 if (pos_neg0
!= 3 && pos_neg
== 3)
1000 std::swap (op0
, op1
);
1005 /* Addition overflows if and only if the two operands have the same sign,
1006 and the result has the opposite sign. Subtraction overflows if and
1007 only if the two operands have opposite sign, and the subtrahend has
1008 the same sign as the result. Here 0 is counted as positive. */
1011 /* Compute op0 ^ op1 (operands have opposite sign). */
1012 rtx op_xor
= expand_binop (mode
, xor_optab
, op0
, op1
, NULL_RTX
, false,
1015 /* Compute res ^ op1 (result and 2nd operand have opposite sign). */
1016 rtx res_xor
= expand_binop (mode
, xor_optab
, res
, op1
, NULL_RTX
, false,
1020 if (code
== PLUS_EXPR
)
1022 /* Compute (res ^ op1) & ~(op0 ^ op1). */
1023 tem
= expand_unop (mode
, one_cmpl_optab
, op_xor
, NULL_RTX
, false);
1024 tem
= expand_binop (mode
, and_optab
, res_xor
, tem
, NULL_RTX
, false,
1029 /* Compute (op0 ^ op1) & ~(res ^ op1). */
1030 tem
= expand_unop (mode
, one_cmpl_optab
, res_xor
, NULL_RTX
, false);
1031 tem
= expand_binop (mode
, and_optab
, op_xor
, tem
, NULL_RTX
, false,
1035 /* No overflow if the result has bit sign cleared. */
1036 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1037 NULL
, done_label
, profile_probability::very_likely ());
1040 /* Compare the result of the operation with the first operand.
1041 No overflow for addition if second operand is positive and result
1042 is larger or second operand is negative and result is smaller.
1043 Likewise for subtraction with sign of second operand flipped. */
1045 do_compare_rtx_and_jump (res
, op0
,
1046 (pos_neg
== 1) ^ (code
== MINUS_EXPR
) ? GE
: LE
,
1047 false, mode
, NULL_RTX
, NULL
, done_label
,
1048 profile_probability::very_likely ());
1052 emit_label (do_error
);
1055 /* Expand the ubsan builtin call. */
1057 fn
= ubsan_build_overflow_builtin (code
, loc
, TREE_TYPE (arg0
),
1061 do_pending_stack_adjust ();
1064 expand_arith_set_overflow (lhs
, target
);
1067 emit_label (done_label
);
1072 expand_ubsan_result_store (target
, res
);
1076 res
= expand_binop (mode
, add_optab
, res
, sgn
, NULL_RTX
, false,
1079 expand_arith_overflow_result_store (lhs
, target
, mode
, res
);
1084 /* Add negate overflow checking to the statement STMT. */
1087 expand_neg_overflow (location_t loc
, tree lhs
, tree arg1
, bool is_ubsan
,
1092 rtx_code_label
*done_label
, *do_error
;
1093 rtx target
= NULL_RTX
;
1095 done_label
= gen_label_rtx ();
1096 do_error
= gen_label_rtx ();
1098 do_pending_stack_adjust ();
1099 op1
= expand_normal (arg1
);
1101 scalar_int_mode mode
= SCALAR_INT_TYPE_MODE (TREE_TYPE (arg1
));
1104 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1106 write_complex_part (target
, const0_rtx
, true);
1109 enum insn_code icode
= optab_handler (negv3_optab
, mode
);
1110 if (icode
!= CODE_FOR_nothing
)
1112 struct expand_operand ops
[3];
1113 rtx_insn
*last
= get_last_insn ();
1115 res
= gen_reg_rtx (mode
);
1116 create_output_operand (&ops
[0], res
, mode
);
1117 create_input_operand (&ops
[1], op1
, mode
);
1118 create_fixed_operand (&ops
[2], do_error
);
1119 if (maybe_expand_insn (icode
, 3, ops
))
1121 last
= get_last_insn ();
1122 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
1124 && any_condjump_p (last
)
1125 && !find_reg_note (last
, REG_BR_PROB
, 0))
1126 add_reg_br_prob_note (last
,
1127 profile_probability::very_unlikely ());
1128 emit_jump (done_label
);
1132 delete_insns_since (last
);
1133 icode
= CODE_FOR_nothing
;
1137 if (icode
== CODE_FOR_nothing
)
1139 /* Compute the operation. On RTL level, the addition is always
1141 res
= expand_unop (mode
, neg_optab
, op1
, NULL_RTX
, false);
1143 /* Compare the operand with the most negative value. */
1144 rtx minv
= expand_normal (TYPE_MIN_VALUE (TREE_TYPE (arg1
)));
1145 do_compare_rtx_and_jump (op1
, minv
, NE
, true, mode
, NULL_RTX
, NULL
,
1146 done_label
, profile_probability::very_likely ());
1149 emit_label (do_error
);
1152 /* Expand the ubsan builtin call. */
1154 fn
= ubsan_build_overflow_builtin (NEGATE_EXPR
, loc
, TREE_TYPE (arg1
),
1155 arg1
, NULL_TREE
, datap
);
1158 do_pending_stack_adjust ();
1161 expand_arith_set_overflow (lhs
, target
);
1164 emit_label (done_label
);
1169 expand_ubsan_result_store (target
, res
);
1171 expand_arith_overflow_result_store (lhs
, target
, mode
, res
);
1175 /* Add mul overflow checking to the statement STMT. */
1178 expand_mul_overflow (location_t loc
, tree lhs
, tree arg0
, tree arg1
,
1179 bool unsr_p
, bool uns0_p
, bool uns1_p
, bool is_ubsan
,
1184 rtx_code_label
*done_label
, *do_error
;
1185 rtx target
= NULL_RTX
;
1187 enum insn_code icode
;
1189 done_label
= gen_label_rtx ();
1190 do_error
= gen_label_rtx ();
1192 do_pending_stack_adjust ();
1193 op0
= expand_normal (arg0
);
1194 op1
= expand_normal (arg1
);
1196 scalar_int_mode mode
= SCALAR_INT_TYPE_MODE (TREE_TYPE (arg0
));
1200 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1202 write_complex_part (target
, const0_rtx
, true);
1206 gcc_assert (!unsr_p
&& !uns0_p
&& !uns1_p
);
1208 /* We assume both operands and result have the same precision
1209 here (GET_MODE_BITSIZE (mode)), S stands for signed type
1210 with that precision, U for unsigned type with that precision,
1211 sgn for unsigned most significant bit in that precision.
1212 s1 is signed first operand, u1 is unsigned first operand,
1213 s2 is signed second operand, u2 is unsigned second operand,
1214 sr is signed result, ur is unsigned result and the following
1215 rules say how to compute result (which is always result of
1216 the operands as if both were unsigned, cast to the right
1217 signedness) and how to compute whether operation overflowed.
1218 main_ovf (false) stands for jump on signed multiplication
1219 overflow or the main algorithm with uns == false.
1220 main_ovf (true) stands for jump on unsigned multiplication
1221 overflow or the main algorithm with uns == true.
1224 res = (S) ((U) s1 * (U) s2)
1225 ovf = main_ovf (false)
1228 ovf = main_ovf (true)
1231 ovf = (s1 < 0 && u2) || main_ovf (true)
1234 ovf = res < 0 || main_ovf (true)
1236 res = (S) ((U) s1 * u2)
1237 ovf = (S) u2 >= 0 ? main_ovf (false)
1238 : (s1 != 0 && (s1 != -1 || u2 != (U) res))
1240 t1 = (s1 & s2) < 0 ? (-(U) s1) : ((U) s1)
1241 t2 = (s1 & s2) < 0 ? (-(U) s2) : ((U) s2)
1243 ovf = (s1 ^ s2) < 0 ? (s1 && s2) : main_ovf (true) */
1245 if (uns0_p
&& !uns1_p
)
1247 /* Multiplication is commutative, if operand signedness differs,
1248 canonicalize to the first operand being signed and second
1249 unsigned to simplify following code. */
1250 std::swap (op0
, op1
);
1251 std::swap (arg0
, arg1
);
1256 int pos_neg0
= get_range_pos_neg (arg0
);
1257 int pos_neg1
= get_range_pos_neg (arg1
);
1260 if (!uns0_p
&& uns1_p
&& unsr_p
)
1265 /* If s1 is non-negative, just perform normal u1 * u2 -> ur. */
1268 /* If s1 is negative, avoid the main code, just multiply and
1269 signal overflow if op1 is not 0. */
1270 struct separate_ops ops
;
1271 ops
.code
= MULT_EXPR
;
1272 ops
.type
= TREE_TYPE (arg1
);
1273 ops
.op0
= make_tree (ops
.type
, op0
);
1274 ops
.op1
= make_tree (ops
.type
, op1
);
1275 ops
.op2
= NULL_TREE
;
1277 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1278 do_compare_rtx_and_jump (op1
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1279 NULL
, done_label
, profile_probability::very_likely ());
1280 goto do_error_label
;
1282 rtx_code_label
*do_main_label
;
1283 do_main_label
= gen_label_rtx ();
1284 do_compare_rtx_and_jump (op0
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1285 NULL
, do_main_label
, profile_probability::very_likely ());
1286 do_compare_rtx_and_jump (op1
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1287 NULL
, do_main_label
, profile_probability::very_likely ());
1288 expand_arith_set_overflow (lhs
, target
);
1289 emit_label (do_main_label
);
1297 if (uns0_p
&& uns1_p
&& !unsr_p
)
1300 /* Rest of handling of this case after res is computed. */
1305 if (!uns0_p
&& uns1_p
&& !unsr_p
)
1312 /* If (S) u2 is negative (i.e. u2 is larger than maximum of S,
1313 avoid the main code, just multiply and signal overflow
1314 unless 0 * u2 or -1 * ((U) Smin). */
1315 struct separate_ops ops
;
1316 ops
.code
= MULT_EXPR
;
1317 ops
.type
= TREE_TYPE (arg1
);
1318 ops
.op0
= make_tree (ops
.type
, op0
);
1319 ops
.op1
= make_tree (ops
.type
, op1
);
1320 ops
.op2
= NULL_TREE
;
1322 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1323 do_compare_rtx_and_jump (op0
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1324 NULL
, done_label
, profile_probability::very_likely ());
1325 do_compare_rtx_and_jump (op0
, constm1_rtx
, NE
, true, mode
, NULL_RTX
,
1326 NULL
, do_error
, profile_probability::very_unlikely ());
1328 prec
= GET_MODE_PRECISION (mode
);
1330 sgn
= immed_wide_int_const (wi::min_value (prec
, SIGNED
), mode
);
1331 do_compare_rtx_and_jump (op1
, sgn
, EQ
, true, mode
, NULL_RTX
,
1332 NULL
, done_label
, profile_probability::very_likely ());
1333 goto do_error_label
;
1335 /* Rest of handling of this case after res is computed. */
1343 if (!uns0_p
&& !uns1_p
&& unsr_p
)
1346 switch (pos_neg0
| pos_neg1
)
1348 case 1: /* Both operands known to be non-negative. */
1350 case 2: /* Both operands known to be negative. */
1351 op0
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, false);
1352 op1
= expand_unop (mode
, neg_optab
, op1
, NULL_RTX
, false);
1353 /* Avoid looking at arg0/arg1 ranges, as we've changed
1355 arg0
= error_mark_node
;
1356 arg1
= error_mark_node
;
1359 if ((pos_neg0
^ pos_neg1
) == 3)
1361 /* If one operand is known to be negative and the other
1362 non-negative, this overflows always, unless the non-negative
1363 one is 0. Just do normal multiply and set overflow
1364 unless one of the operands is 0. */
1365 struct separate_ops ops
;
1366 ops
.code
= MULT_EXPR
;
1368 = build_nonstandard_integer_type (GET_MODE_PRECISION (mode
),
1370 ops
.op0
= make_tree (ops
.type
, op0
);
1371 ops
.op1
= make_tree (ops
.type
, op1
);
1372 ops
.op2
= NULL_TREE
;
1374 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1375 tem
= expand_binop (mode
, and_optab
, op0
, op1
, NULL_RTX
, false,
1377 do_compare_rtx_and_jump (tem
, const0_rtx
, EQ
, true, mode
,
1378 NULL_RTX
, NULL
, done_label
,
1379 profile_probability::very_likely ());
1380 goto do_error_label
;
1382 /* The general case, do all the needed comparisons at runtime. */
1383 rtx_code_label
*do_main_label
, *after_negate_label
;
1385 rop0
= gen_reg_rtx (mode
);
1386 rop1
= gen_reg_rtx (mode
);
1387 emit_move_insn (rop0
, op0
);
1388 emit_move_insn (rop1
, op1
);
1391 do_main_label
= gen_label_rtx ();
1392 after_negate_label
= gen_label_rtx ();
1393 tem
= expand_binop (mode
, and_optab
, op0
, op1
, NULL_RTX
, false,
1395 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1396 NULL
, after_negate_label
, profile_probability::very_likely ());
1397 /* Both arguments negative here, negate them and continue with
1398 normal unsigned overflow checking multiplication. */
1399 emit_move_insn (op0
, expand_unop (mode
, neg_optab
, op0
,
1401 emit_move_insn (op1
, expand_unop (mode
, neg_optab
, op1
,
1403 /* Avoid looking at arg0/arg1 ranges, as we might have changed
1405 arg0
= error_mark_node
;
1406 arg1
= error_mark_node
;
1407 emit_jump (do_main_label
);
1408 emit_label (after_negate_label
);
1409 tem2
= expand_binop (mode
, xor_optab
, op0
, op1
, NULL_RTX
, false,
1411 do_compare_rtx_and_jump (tem2
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1412 NULL
, do_main_label
, profile_probability::very_likely ());
1413 /* One argument is negative here, the other positive. This
1414 overflows always, unless one of the arguments is 0. But
1415 if e.g. s2 is 0, (U) s1 * 0 doesn't overflow, whatever s1
1416 is, thus we can keep do_main code oring in overflow as is. */
1417 do_compare_rtx_and_jump (tem
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1418 NULL
, do_main_label
, profile_probability::very_likely ());
1419 expand_arith_set_overflow (lhs
, target
);
1420 emit_label (do_main_label
);
1428 type
= build_nonstandard_integer_type (GET_MODE_PRECISION (mode
), uns
);
1429 sign
= uns
? UNSIGNED
: SIGNED
;
1430 icode
= optab_handler (uns
? umulv4_optab
: mulv4_optab
, mode
);
1431 if (icode
!= CODE_FOR_nothing
)
1433 struct expand_operand ops
[4];
1434 rtx_insn
*last
= get_last_insn ();
1436 res
= gen_reg_rtx (mode
);
1437 create_output_operand (&ops
[0], res
, mode
);
1438 create_input_operand (&ops
[1], op0
, mode
);
1439 create_input_operand (&ops
[2], op1
, mode
);
1440 create_fixed_operand (&ops
[3], do_error
);
1441 if (maybe_expand_insn (icode
, 4, ops
))
1443 last
= get_last_insn ();
1444 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
1446 && any_condjump_p (last
)
1447 && !find_reg_note (last
, REG_BR_PROB
, 0))
1448 add_reg_br_prob_note (last
,
1449 profile_probability::very_unlikely ());
1450 emit_jump (done_label
);
1454 delete_insns_since (last
);
1455 icode
= CODE_FOR_nothing
;
1459 if (icode
== CODE_FOR_nothing
)
1461 struct separate_ops ops
;
1462 int prec
= GET_MODE_PRECISION (mode
);
1463 scalar_int_mode hmode
;
1465 ops
.op0
= make_tree (type
, op0
);
1466 ops
.op1
= make_tree (type
, op1
);
1467 ops
.op2
= NULL_TREE
;
1469 if (GET_MODE_2XWIDER_MODE (mode
).exists (&wmode
)
1470 && targetm
.scalar_mode_supported_p (wmode
))
1472 ops
.code
= WIDEN_MULT_EXPR
;
1474 = build_nonstandard_integer_type (GET_MODE_PRECISION (wmode
), uns
);
1476 res
= expand_expr_real_2 (&ops
, NULL_RTX
, wmode
, EXPAND_NORMAL
);
1477 rtx hipart
= expand_shift (RSHIFT_EXPR
, wmode
, res
, prec
,
1479 hipart
= convert_modes (mode
, wmode
, hipart
, uns
);
1480 res
= convert_modes (mode
, wmode
, res
, uns
);
1482 /* For the unsigned multiplication, there was overflow if
1483 HIPART is non-zero. */
1484 do_compare_rtx_and_jump (hipart
, const0_rtx
, EQ
, true, mode
,
1485 NULL_RTX
, NULL
, done_label
,
1486 profile_probability::very_likely ());
1489 rtx signbit
= expand_shift (RSHIFT_EXPR
, mode
, res
, prec
- 1,
1491 /* RES is low half of the double width result, HIPART
1492 the high half. There was overflow if
1493 HIPART is different from RES < 0 ? -1 : 0. */
1494 do_compare_rtx_and_jump (signbit
, hipart
, EQ
, true, mode
,
1495 NULL_RTX
, NULL
, done_label
,
1496 profile_probability::very_likely ());
1499 else if (int_mode_for_size (prec
/ 2, 1).exists (&hmode
)
1500 && 2 * GET_MODE_PRECISION (hmode
) == prec
)
1502 rtx_code_label
*large_op0
= gen_label_rtx ();
1503 rtx_code_label
*small_op0_large_op1
= gen_label_rtx ();
1504 rtx_code_label
*one_small_one_large
= gen_label_rtx ();
1505 rtx_code_label
*both_ops_large
= gen_label_rtx ();
1506 rtx_code_label
*after_hipart_neg
= uns
? NULL
: gen_label_rtx ();
1507 rtx_code_label
*after_lopart_neg
= uns
? NULL
: gen_label_rtx ();
1508 rtx_code_label
*do_overflow
= gen_label_rtx ();
1509 rtx_code_label
*hipart_different
= uns
? NULL
: gen_label_rtx ();
1511 unsigned int hprec
= GET_MODE_PRECISION (hmode
);
1512 rtx hipart0
= expand_shift (RSHIFT_EXPR
, mode
, op0
, hprec
,
1514 hipart0
= convert_modes (hmode
, mode
, hipart0
, uns
);
1515 rtx lopart0
= convert_modes (hmode
, mode
, op0
, uns
);
1516 rtx signbit0
= const0_rtx
;
1518 signbit0
= expand_shift (RSHIFT_EXPR
, hmode
, lopart0
, hprec
- 1,
1520 rtx hipart1
= expand_shift (RSHIFT_EXPR
, mode
, op1
, hprec
,
1522 hipart1
= convert_modes (hmode
, mode
, hipart1
, uns
);
1523 rtx lopart1
= convert_modes (hmode
, mode
, op1
, uns
);
1524 rtx signbit1
= const0_rtx
;
1526 signbit1
= expand_shift (RSHIFT_EXPR
, hmode
, lopart1
, hprec
- 1,
1529 res
= gen_reg_rtx (mode
);
1531 /* True if op0 resp. op1 are known to be in the range of
1533 bool op0_small_p
= false;
1534 bool op1_small_p
= false;
1535 /* True if op0 resp. op1 are known to have all zeros or all ones
1536 in the upper half of bits, but are not known to be
1538 bool op0_medium_p
= false;
1539 bool op1_medium_p
= false;
1540 /* -1 if op{0,1} is known to be negative, 0 if it is known to be
1541 nonnegative, 1 if unknown. */
1547 else if (pos_neg0
== 2)
1551 else if (pos_neg1
== 2)
1554 unsigned int mprec0
= prec
;
1555 if (arg0
!= error_mark_node
)
1556 mprec0
= get_min_precision (arg0
, sign
);
1557 if (mprec0
<= hprec
)
1559 else if (!uns
&& mprec0
<= hprec
+ 1)
1560 op0_medium_p
= true;
1561 unsigned int mprec1
= prec
;
1562 if (arg1
!= error_mark_node
)
1563 mprec1
= get_min_precision (arg1
, sign
);
1564 if (mprec1
<= hprec
)
1566 else if (!uns
&& mprec1
<= hprec
+ 1)
1567 op1_medium_p
= true;
1569 int smaller_sign
= 1;
1570 int larger_sign
= 1;
1573 smaller_sign
= op0_sign
;
1574 larger_sign
= op1_sign
;
1576 else if (op1_small_p
)
1578 smaller_sign
= op1_sign
;
1579 larger_sign
= op0_sign
;
1581 else if (op0_sign
== op1_sign
)
1583 smaller_sign
= op0_sign
;
1584 larger_sign
= op0_sign
;
1588 do_compare_rtx_and_jump (signbit0
, hipart0
, NE
, true, hmode
,
1589 NULL_RTX
, NULL
, large_op0
,
1590 profile_probability::unlikely ());
1593 do_compare_rtx_and_jump (signbit1
, hipart1
, NE
, true, hmode
,
1594 NULL_RTX
, NULL
, small_op0_large_op1
,
1595 profile_probability::unlikely ());
1597 /* If both op0 and op1 are sign (!uns) or zero (uns) extended from
1598 hmode to mode, the multiplication will never overflow. We can
1599 do just one hmode x hmode => mode widening multiplication. */
1600 rtx lopart0s
= lopart0
, lopart1s
= lopart1
;
1601 if (GET_CODE (lopart0
) == SUBREG
)
1603 lopart0s
= shallow_copy_rtx (lopart0
);
1604 SUBREG_PROMOTED_VAR_P (lopart0s
) = 1;
1605 SUBREG_PROMOTED_SET (lopart0s
, uns
? SRP_UNSIGNED
: SRP_SIGNED
);
1607 if (GET_CODE (lopart1
) == SUBREG
)
1609 lopart1s
= shallow_copy_rtx (lopart1
);
1610 SUBREG_PROMOTED_VAR_P (lopart1s
) = 1;
1611 SUBREG_PROMOTED_SET (lopart1s
, uns
? SRP_UNSIGNED
: SRP_SIGNED
);
1613 tree halfstype
= build_nonstandard_integer_type (hprec
, uns
);
1614 ops
.op0
= make_tree (halfstype
, lopart0s
);
1615 ops
.op1
= make_tree (halfstype
, lopart1s
);
1616 ops
.code
= WIDEN_MULT_EXPR
;
1619 = expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1620 emit_move_insn (res
, thisres
);
1621 emit_jump (done_label
);
1623 emit_label (small_op0_large_op1
);
1625 /* If op0 is sign (!uns) or zero (uns) extended from hmode to mode,
1626 but op1 is not, just swap the arguments and handle it as op1
1627 sign/zero extended, op0 not. */
1628 rtx larger
= gen_reg_rtx (mode
);
1629 rtx hipart
= gen_reg_rtx (hmode
);
1630 rtx lopart
= gen_reg_rtx (hmode
);
1631 emit_move_insn (larger
, op1
);
1632 emit_move_insn (hipart
, hipart1
);
1633 emit_move_insn (lopart
, lopart0
);
1634 emit_jump (one_small_one_large
);
1636 emit_label (large_op0
);
1639 do_compare_rtx_and_jump (signbit1
, hipart1
, NE
, true, hmode
,
1640 NULL_RTX
, NULL
, both_ops_large
,
1641 profile_probability::unlikely ());
1643 /* If op1 is sign (!uns) or zero (uns) extended from hmode to mode,
1644 but op0 is not, prepare larger, hipart and lopart pseudos and
1645 handle it together with small_op0_large_op1. */
1646 emit_move_insn (larger
, op0
);
1647 emit_move_insn (hipart
, hipart0
);
1648 emit_move_insn (lopart
, lopart1
);
1650 emit_label (one_small_one_large
);
1652 /* lopart is the low part of the operand that is sign extended
1653 to mode, larger is the other operand, hipart is the
1654 high part of larger and lopart0 and lopart1 are the low parts
1656 We perform lopart0 * lopart1 and lopart * hipart widening
1658 tree halfutype
= build_nonstandard_integer_type (hprec
, 1);
1659 ops
.op0
= make_tree (halfutype
, lopart0
);
1660 ops
.op1
= make_tree (halfutype
, lopart1
);
1662 = expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1664 ops
.op0
= make_tree (halfutype
, lopart
);
1665 ops
.op1
= make_tree (halfutype
, hipart
);
1666 rtx loxhi
= gen_reg_rtx (mode
);
1667 rtx tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1668 emit_move_insn (loxhi
, tem
);
1672 /* if (hipart < 0) loxhi -= lopart << (bitsize / 2); */
1673 if (larger_sign
== 0)
1674 emit_jump (after_hipart_neg
);
1675 else if (larger_sign
!= -1)
1676 do_compare_rtx_and_jump (hipart
, const0_rtx
, GE
, false, hmode
,
1677 NULL_RTX
, NULL
, after_hipart_neg
,
1678 profile_probability::even ());
1680 tem
= convert_modes (mode
, hmode
, lopart
, 1);
1681 tem
= expand_shift (LSHIFT_EXPR
, mode
, tem
, hprec
, NULL_RTX
, 1);
1682 tem
= expand_simple_binop (mode
, MINUS
, loxhi
, tem
, NULL_RTX
,
1684 emit_move_insn (loxhi
, tem
);
1686 emit_label (after_hipart_neg
);
1688 /* if (lopart < 0) loxhi -= larger; */
1689 if (smaller_sign
== 0)
1690 emit_jump (after_lopart_neg
);
1691 else if (smaller_sign
!= -1)
1692 do_compare_rtx_and_jump (lopart
, const0_rtx
, GE
, false, hmode
,
1693 NULL_RTX
, NULL
, after_lopart_neg
,
1694 profile_probability::even ());
1696 tem
= expand_simple_binop (mode
, MINUS
, loxhi
, larger
, NULL_RTX
,
1698 emit_move_insn (loxhi
, tem
);
1700 emit_label (after_lopart_neg
);
1703 /* loxhi += (uns) lo0xlo1 >> (bitsize / 2); */
1704 tem
= expand_shift (RSHIFT_EXPR
, mode
, lo0xlo1
, hprec
, NULL_RTX
, 1);
1705 tem
= expand_simple_binop (mode
, PLUS
, loxhi
, tem
, NULL_RTX
,
1707 emit_move_insn (loxhi
, tem
);
1709 /* if (loxhi >> (bitsize / 2)
1710 == (hmode) loxhi >> (bitsize / 2 - 1)) (if !uns)
1711 if (loxhi >> (bitsize / 2) == 0 (if uns). */
1712 rtx hipartloxhi
= expand_shift (RSHIFT_EXPR
, mode
, loxhi
, hprec
,
1714 hipartloxhi
= convert_modes (hmode
, mode
, hipartloxhi
, 0);
1715 rtx signbitloxhi
= const0_rtx
;
1717 signbitloxhi
= expand_shift (RSHIFT_EXPR
, hmode
,
1718 convert_modes (hmode
, mode
,
1720 hprec
- 1, NULL_RTX
, 0);
1722 do_compare_rtx_and_jump (signbitloxhi
, hipartloxhi
, NE
, true, hmode
,
1723 NULL_RTX
, NULL
, do_overflow
,
1724 profile_probability::very_unlikely ());
1726 /* res = (loxhi << (bitsize / 2)) | (hmode) lo0xlo1; */
1727 rtx loxhishifted
= expand_shift (LSHIFT_EXPR
, mode
, loxhi
, hprec
,
1729 tem
= convert_modes (mode
, hmode
,
1730 convert_modes (hmode
, mode
, lo0xlo1
, 1), 1);
1732 tem
= expand_simple_binop (mode
, IOR
, loxhishifted
, tem
, res
,
1735 emit_move_insn (res
, tem
);
1736 emit_jump (done_label
);
1738 emit_label (both_ops_large
);
1740 /* If both operands are large (not sign (!uns) or zero (uns)
1741 extended from hmode), then perform the full multiplication
1742 which will be the result of the operation.
1743 The only cases which don't overflow are for signed multiplication
1744 some cases where both hipart0 and highpart1 are 0 or -1.
1745 For unsigned multiplication when high parts are both non-zero
1746 this overflows always. */
1747 ops
.code
= MULT_EXPR
;
1748 ops
.op0
= make_tree (type
, op0
);
1749 ops
.op1
= make_tree (type
, op1
);
1750 tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1751 emit_move_insn (res
, tem
);
1757 tem
= expand_simple_binop (hmode
, PLUS
, hipart0
, const1_rtx
,
1758 NULL_RTX
, 1, OPTAB_DIRECT
);
1759 do_compare_rtx_and_jump (tem
, const1_rtx
, GTU
, true, hmode
,
1760 NULL_RTX
, NULL
, do_error
,
1761 profile_probability::very_unlikely ());
1766 tem
= expand_simple_binop (hmode
, PLUS
, hipart1
, const1_rtx
,
1767 NULL_RTX
, 1, OPTAB_DIRECT
);
1768 do_compare_rtx_and_jump (tem
, const1_rtx
, GTU
, true, hmode
,
1769 NULL_RTX
, NULL
, do_error
,
1770 profile_probability::very_unlikely ());
1773 /* At this point hipart{0,1} are both in [-1, 0]. If they are
1774 the same, overflow happened if res is negative, if they are
1775 different, overflow happened if res is positive. */
1776 if (op0_sign
!= 1 && op1_sign
!= 1 && op0_sign
!= op1_sign
)
1777 emit_jump (hipart_different
);
1778 else if (op0_sign
== 1 || op1_sign
== 1)
1779 do_compare_rtx_and_jump (hipart0
, hipart1
, NE
, true, hmode
,
1780 NULL_RTX
, NULL
, hipart_different
,
1781 profile_probability::even ());
1783 do_compare_rtx_and_jump (res
, const0_rtx
, LT
, false, mode
,
1784 NULL_RTX
, NULL
, do_error
,
1785 profile_probability::very_unlikely ());
1786 emit_jump (done_label
);
1788 emit_label (hipart_different
);
1790 do_compare_rtx_and_jump (res
, const0_rtx
, GE
, false, mode
,
1791 NULL_RTX
, NULL
, do_error
,
1792 profile_probability::very_unlikely ());
1793 emit_jump (done_label
);
1796 emit_label (do_overflow
);
1798 /* Overflow, do full multiplication and fallthru into do_error. */
1799 ops
.op0
= make_tree (type
, op0
);
1800 ops
.op1
= make_tree (type
, op1
);
1801 tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1802 emit_move_insn (res
, tem
);
1806 gcc_assert (!is_ubsan
);
1807 ops
.code
= MULT_EXPR
;
1809 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1810 emit_jump (done_label
);
1815 emit_label (do_error
);
1818 /* Expand the ubsan builtin call. */
1820 fn
= ubsan_build_overflow_builtin (MULT_EXPR
, loc
, TREE_TYPE (arg0
),
1824 do_pending_stack_adjust ();
1827 expand_arith_set_overflow (lhs
, target
);
1830 emit_label (done_label
);
1833 if (uns0_p
&& uns1_p
&& !unsr_p
)
1835 rtx_code_label
*all_done_label
= gen_label_rtx ();
1836 do_compare_rtx_and_jump (res
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1837 NULL
, all_done_label
, profile_probability::very_likely ());
1838 expand_arith_set_overflow (lhs
, target
);
1839 emit_label (all_done_label
);
1843 if (!uns0_p
&& uns1_p
&& !unsr_p
&& pos_neg1
== 3)
1845 rtx_code_label
*all_done_label
= gen_label_rtx ();
1846 rtx_code_label
*set_noovf
= gen_label_rtx ();
1847 do_compare_rtx_and_jump (op1
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1848 NULL
, all_done_label
, profile_probability::very_likely ());
1849 expand_arith_set_overflow (lhs
, target
);
1850 do_compare_rtx_and_jump (op0
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1851 NULL
, set_noovf
, profile_probability::very_likely ());
1852 do_compare_rtx_and_jump (op0
, constm1_rtx
, NE
, true, mode
, NULL_RTX
,
1853 NULL
, all_done_label
, profile_probability::very_unlikely ());
1854 do_compare_rtx_and_jump (op1
, res
, NE
, true, mode
, NULL_RTX
, NULL
,
1855 all_done_label
, profile_probability::very_unlikely ());
1856 emit_label (set_noovf
);
1857 write_complex_part (target
, const0_rtx
, true);
1858 emit_label (all_done_label
);
1864 expand_ubsan_result_store (target
, res
);
1866 expand_arith_overflow_result_store (lhs
, target
, mode
, res
);
1870 /* Expand UBSAN_CHECK_* internal function if it has vector operands. */
1873 expand_vector_ubsan_overflow (location_t loc
, enum tree_code code
, tree lhs
,
1874 tree arg0
, tree arg1
)
1876 int cnt
= TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0
));
1877 rtx_code_label
*loop_lab
= NULL
;
1878 rtx cntvar
= NULL_RTX
;
1879 tree cntv
= NULL_TREE
;
1880 tree eltype
= TREE_TYPE (TREE_TYPE (arg0
));
1881 tree sz
= TYPE_SIZE (eltype
);
1882 tree data
= NULL_TREE
;
1883 tree resv
= NULL_TREE
;
1884 rtx lhsr
= NULL_RTX
;
1885 rtx resvr
= NULL_RTX
;
1890 lhsr
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1891 if (!VECTOR_MODE_P (GET_MODE (lhsr
))
1892 || (op
= optab_for_tree_code (code
, TREE_TYPE (arg0
),
1893 optab_default
)) == unknown_optab
1894 || (optab_handler (op
, TYPE_MODE (TREE_TYPE (arg0
)))
1895 == CODE_FOR_nothing
))
1898 resv
= make_tree (TREE_TYPE (lhs
), lhsr
);
1901 resvr
= assign_temp (TREE_TYPE (lhs
), 1, 1);
1902 resv
= make_tree (TREE_TYPE (lhs
), resvr
);
1908 do_pending_stack_adjust ();
1909 loop_lab
= gen_label_rtx ();
1910 cntvar
= gen_reg_rtx (TYPE_MODE (sizetype
));
1911 cntv
= make_tree (sizetype
, cntvar
);
1912 emit_move_insn (cntvar
, const0_rtx
);
1913 emit_label (loop_lab
);
1915 if (TREE_CODE (arg0
) != VECTOR_CST
)
1917 rtx arg0r
= expand_normal (arg0
);
1918 arg0
= make_tree (TREE_TYPE (arg0
), arg0r
);
1920 if (TREE_CODE (arg1
) != VECTOR_CST
)
1922 rtx arg1r
= expand_normal (arg1
);
1923 arg1
= make_tree (TREE_TYPE (arg1
), arg1r
);
1925 for (int i
= 0; i
< (cnt
> 4 ? 1 : cnt
); i
++)
1927 tree op0
, op1
, res
= NULL_TREE
;
1930 tree atype
= build_array_type_nelts (eltype
, cnt
);
1931 op0
= uniform_vector_p (arg0
);
1932 if (op0
== NULL_TREE
)
1934 op0
= fold_build1_loc (loc
, VIEW_CONVERT_EXPR
, atype
, arg0
);
1935 op0
= build4_loc (loc
, ARRAY_REF
, eltype
, op0
, cntv
,
1936 NULL_TREE
, NULL_TREE
);
1938 op1
= uniform_vector_p (arg1
);
1939 if (op1
== NULL_TREE
)
1941 op1
= fold_build1_loc (loc
, VIEW_CONVERT_EXPR
, atype
, arg1
);
1942 op1
= build4_loc (loc
, ARRAY_REF
, eltype
, op1
, cntv
,
1943 NULL_TREE
, NULL_TREE
);
1947 res
= fold_build1_loc (loc
, VIEW_CONVERT_EXPR
, atype
, resv
);
1948 res
= build4_loc (loc
, ARRAY_REF
, eltype
, res
, cntv
,
1949 NULL_TREE
, NULL_TREE
);
1954 tree bitpos
= bitsize_int (tree_to_uhwi (sz
) * i
);
1955 op0
= fold_build3_loc (loc
, BIT_FIELD_REF
, eltype
, arg0
, sz
, bitpos
);
1956 op1
= fold_build3_loc (loc
, BIT_FIELD_REF
, eltype
, arg1
, sz
, bitpos
);
1958 res
= fold_build3_loc (loc
, BIT_FIELD_REF
, eltype
, resv
, sz
,
1964 expand_addsub_overflow (loc
, PLUS_EXPR
, res
, op0
, op1
,
1965 false, false, false, true, &data
);
1968 if (cnt
> 4 ? integer_zerop (arg0
) : integer_zerop (op0
))
1969 expand_neg_overflow (loc
, res
, op1
, true, &data
);
1971 expand_addsub_overflow (loc
, MINUS_EXPR
, res
, op0
, op1
,
1972 false, false, false, true, &data
);
1975 expand_mul_overflow (loc
, res
, op0
, op1
, false, false, false,
1984 struct separate_ops ops
;
1985 ops
.code
= PLUS_EXPR
;
1986 ops
.type
= TREE_TYPE (cntv
);
1988 ops
.op1
= build_int_cst (TREE_TYPE (cntv
), 1);
1989 ops
.op2
= NULL_TREE
;
1991 rtx ret
= expand_expr_real_2 (&ops
, cntvar
, TYPE_MODE (sizetype
),
1994 emit_move_insn (cntvar
, ret
);
1995 do_compare_rtx_and_jump (cntvar
, GEN_INT (cnt
), NE
, false,
1996 TYPE_MODE (sizetype
), NULL_RTX
, NULL
, loop_lab
,
1997 profile_probability::very_likely ());
1999 if (lhs
&& resv
== NULL_TREE
)
2001 struct separate_ops ops
;
2003 ops
.type
= TREE_TYPE (arg0
);
2006 ops
.op2
= NULL_TREE
;
2008 rtx ret
= expand_expr_real_2 (&ops
, lhsr
, TYPE_MODE (TREE_TYPE (arg0
)),
2011 emit_move_insn (lhsr
, ret
);
2014 emit_move_insn (lhsr
, resvr
);
2017 /* Expand UBSAN_CHECK_ADD call STMT. */
2020 expand_UBSAN_CHECK_ADD (internal_fn
, gcall
*stmt
)
2022 location_t loc
= gimple_location (stmt
);
2023 tree lhs
= gimple_call_lhs (stmt
);
2024 tree arg0
= gimple_call_arg (stmt
, 0);
2025 tree arg1
= gimple_call_arg (stmt
, 1);
2026 if (VECTOR_TYPE_P (TREE_TYPE (arg0
)))
2027 expand_vector_ubsan_overflow (loc
, PLUS_EXPR
, lhs
, arg0
, arg1
);
2029 expand_addsub_overflow (loc
, PLUS_EXPR
, lhs
, arg0
, arg1
,
2030 false, false, false, true, NULL
);
2033 /* Expand UBSAN_CHECK_SUB call STMT. */
2036 expand_UBSAN_CHECK_SUB (internal_fn
, gcall
*stmt
)
2038 location_t loc
= gimple_location (stmt
);
2039 tree lhs
= gimple_call_lhs (stmt
);
2040 tree arg0
= gimple_call_arg (stmt
, 0);
2041 tree arg1
= gimple_call_arg (stmt
, 1);
2042 if (VECTOR_TYPE_P (TREE_TYPE (arg0
)))
2043 expand_vector_ubsan_overflow (loc
, MINUS_EXPR
, lhs
, arg0
, arg1
);
2044 else if (integer_zerop (arg0
))
2045 expand_neg_overflow (loc
, lhs
, arg1
, true, NULL
);
2047 expand_addsub_overflow (loc
, MINUS_EXPR
, lhs
, arg0
, arg1
,
2048 false, false, false, true, NULL
);
2051 /* Expand UBSAN_CHECK_MUL call STMT. */
2054 expand_UBSAN_CHECK_MUL (internal_fn
, gcall
*stmt
)
2056 location_t loc
= gimple_location (stmt
);
2057 tree lhs
= gimple_call_lhs (stmt
);
2058 tree arg0
= gimple_call_arg (stmt
, 0);
2059 tree arg1
= gimple_call_arg (stmt
, 1);
2060 if (VECTOR_TYPE_P (TREE_TYPE (arg0
)))
2061 expand_vector_ubsan_overflow (loc
, MULT_EXPR
, lhs
, arg0
, arg1
);
2063 expand_mul_overflow (loc
, lhs
, arg0
, arg1
, false, false, false, true,
2067 /* Helper function for {ADD,SUB,MUL}_OVERFLOW call stmt expansion. */
2070 expand_arith_overflow (enum tree_code code
, gimple
*stmt
)
2072 tree lhs
= gimple_call_lhs (stmt
);
2073 if (lhs
== NULL_TREE
)
2075 tree arg0
= gimple_call_arg (stmt
, 0);
2076 tree arg1
= gimple_call_arg (stmt
, 1);
2077 tree type
= TREE_TYPE (TREE_TYPE (lhs
));
2078 int uns0_p
= TYPE_UNSIGNED (TREE_TYPE (arg0
));
2079 int uns1_p
= TYPE_UNSIGNED (TREE_TYPE (arg1
));
2080 int unsr_p
= TYPE_UNSIGNED (type
);
2081 int prec0
= TYPE_PRECISION (TREE_TYPE (arg0
));
2082 int prec1
= TYPE_PRECISION (TREE_TYPE (arg1
));
2083 int precres
= TYPE_PRECISION (type
);
2084 location_t loc
= gimple_location (stmt
);
2085 if (!uns0_p
&& get_range_pos_neg (arg0
) == 1)
2087 if (!uns1_p
&& get_range_pos_neg (arg1
) == 1)
2089 int pr
= get_min_precision (arg0
, uns0_p
? UNSIGNED
: SIGNED
);
2090 prec0
= MIN (prec0
, pr
);
2091 pr
= get_min_precision (arg1
, uns1_p
? UNSIGNED
: SIGNED
);
2092 prec1
= MIN (prec1
, pr
);
2094 /* If uns0_p && uns1_p, precop is minimum needed precision
2095 of unsigned type to hold the exact result, otherwise
2096 precop is minimum needed precision of signed type to
2097 hold the exact result. */
2099 if (code
== MULT_EXPR
)
2100 precop
= prec0
+ prec1
+ (uns0_p
!= uns1_p
);
2103 if (uns0_p
== uns1_p
)
2104 precop
= MAX (prec0
, prec1
) + 1;
2106 precop
= MAX (prec0
+ 1, prec1
) + 1;
2108 precop
= MAX (prec0
, prec1
+ 1) + 1;
2110 int orig_precres
= precres
;
2114 if ((uns0_p
&& uns1_p
)
2115 ? ((precop
+ !unsr_p
) <= precres
2116 /* u1 - u2 -> ur can overflow, no matter what precision
2118 && (code
!= MINUS_EXPR
|| !unsr_p
))
2119 : (!unsr_p
&& precop
<= precres
))
2121 /* The infinity precision result will always fit into result. */
2122 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2123 write_complex_part (target
, const0_rtx
, true);
2124 scalar_int_mode mode
= SCALAR_INT_TYPE_MODE (type
);
2125 struct separate_ops ops
;
2128 ops
.op0
= fold_convert_loc (loc
, type
, arg0
);
2129 ops
.op1
= fold_convert_loc (loc
, type
, arg1
);
2130 ops
.op2
= NULL_TREE
;
2132 rtx tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
2133 expand_arith_overflow_result_store (lhs
, target
, mode
, tem
);
2137 /* For operations with low precision, if target doesn't have them, start
2138 with precres widening right away, otherwise do it only if the most
2139 simple cases can't be used. */
2140 const int min_precision
= targetm
.min_arithmetic_precision ();
2141 if (orig_precres
== precres
&& precres
< min_precision
)
2143 else if ((uns0_p
&& uns1_p
&& unsr_p
&& prec0
<= precres
2144 && prec1
<= precres
)
2145 || ((!uns0_p
|| !uns1_p
) && !unsr_p
2146 && prec0
+ uns0_p
<= precres
2147 && prec1
+ uns1_p
<= precres
))
2149 arg0
= fold_convert_loc (loc
, type
, arg0
);
2150 arg1
= fold_convert_loc (loc
, type
, arg1
);
2154 if (integer_zerop (arg0
) && !unsr_p
)
2156 expand_neg_overflow (loc
, lhs
, arg1
, false, NULL
);
2161 expand_addsub_overflow (loc
, code
, lhs
, arg0
, arg1
, unsr_p
,
2162 unsr_p
, unsr_p
, false, NULL
);
2165 expand_mul_overflow (loc
, lhs
, arg0
, arg1
, unsr_p
,
2166 unsr_p
, unsr_p
, false, NULL
);
2173 /* For sub-word operations, retry with a wider type first. */
2174 if (orig_precres
== precres
&& precop
<= BITS_PER_WORD
)
2176 int p
= MAX (min_precision
, precop
);
2177 scalar_int_mode m
= smallest_int_mode_for_size (p
);
2178 tree optype
= build_nonstandard_integer_type (GET_MODE_PRECISION (m
),
2181 p
= TYPE_PRECISION (optype
);
2185 unsr_p
= TYPE_UNSIGNED (optype
);
2191 if (prec0
<= precres
&& prec1
<= precres
)
2196 types
[0] = build_nonstandard_integer_type (precres
, 0);
2202 types
[1] = build_nonstandard_integer_type (precres
, 1);
2204 arg0
= fold_convert_loc (loc
, types
[uns0_p
], arg0
);
2205 arg1
= fold_convert_loc (loc
, types
[uns1_p
], arg1
);
2206 if (code
!= MULT_EXPR
)
2207 expand_addsub_overflow (loc
, code
, lhs
, arg0
, arg1
, unsr_p
,
2208 uns0_p
, uns1_p
, false, NULL
);
2210 expand_mul_overflow (loc
, lhs
, arg0
, arg1
, unsr_p
,
2211 uns0_p
, uns1_p
, false, NULL
);
2215 /* Retry with a wider type. */
2216 if (orig_precres
== precres
)
2218 int p
= MAX (prec0
, prec1
);
2219 scalar_int_mode m
= smallest_int_mode_for_size (p
);
2220 tree optype
= build_nonstandard_integer_type (GET_MODE_PRECISION (m
),
2223 p
= TYPE_PRECISION (optype
);
2227 unsr_p
= TYPE_UNSIGNED (optype
);
2238 /* Expand ADD_OVERFLOW STMT. */
2241 expand_ADD_OVERFLOW (internal_fn
, gcall
*stmt
)
2243 expand_arith_overflow (PLUS_EXPR
, stmt
);
2246 /* Expand SUB_OVERFLOW STMT. */
2249 expand_SUB_OVERFLOW (internal_fn
, gcall
*stmt
)
2251 expand_arith_overflow (MINUS_EXPR
, stmt
);
2254 /* Expand MUL_OVERFLOW STMT. */
2257 expand_MUL_OVERFLOW (internal_fn
, gcall
*stmt
)
2259 expand_arith_overflow (MULT_EXPR
, stmt
);
2262 /* This should get folded in tree-vectorizer.c. */
2265 expand_LOOP_VECTORIZED (internal_fn
, gcall
*)
2270 /* This should get folded in tree-vectorizer.c. */
2273 expand_LOOP_DIST_ALIAS (internal_fn
, gcall
*)
2278 /* Expand MASK_LOAD call STMT using optab OPTAB. */
2281 expand_mask_load_optab_fn (internal_fn
, gcall
*stmt
, convert_optab optab
)
2283 struct expand_operand ops
[3];
2284 tree type
, lhs
, rhs
, maskt
, ptr
;
2285 rtx mem
, target
, mask
;
2288 maskt
= gimple_call_arg (stmt
, 2);
2289 lhs
= gimple_call_lhs (stmt
);
2290 if (lhs
== NULL_TREE
)
2292 type
= TREE_TYPE (lhs
);
2293 ptr
= build_int_cst (TREE_TYPE (gimple_call_arg (stmt
, 1)), 0);
2294 align
= tree_to_shwi (gimple_call_arg (stmt
, 1));
2295 if (TYPE_ALIGN (type
) != align
)
2296 type
= build_aligned_type (type
, align
);
2297 rhs
= fold_build2 (MEM_REF
, type
, gimple_call_arg (stmt
, 0), ptr
);
2299 mem
= expand_expr (rhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2300 gcc_assert (MEM_P (mem
));
2301 mask
= expand_normal (maskt
);
2302 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2303 create_output_operand (&ops
[0], target
, TYPE_MODE (type
));
2304 create_fixed_operand (&ops
[1], mem
);
2305 create_input_operand (&ops
[2], mask
, TYPE_MODE (TREE_TYPE (maskt
)));
2306 expand_insn (convert_optab_handler (optab
, TYPE_MODE (type
),
2307 TYPE_MODE (TREE_TYPE (maskt
))),
2311 /* Expand MASK_STORE call STMT using optab OPTAB. */
2314 expand_mask_store_optab_fn (internal_fn
, gcall
*stmt
, convert_optab optab
)
2316 struct expand_operand ops
[3];
2317 tree type
, lhs
, rhs
, maskt
, ptr
;
2321 maskt
= gimple_call_arg (stmt
, 2);
2322 rhs
= gimple_call_arg (stmt
, 3);
2323 type
= TREE_TYPE (rhs
);
2324 ptr
= build_int_cst (TREE_TYPE (gimple_call_arg (stmt
, 1)), 0);
2325 align
= tree_to_shwi (gimple_call_arg (stmt
, 1));
2326 if (TYPE_ALIGN (type
) != align
)
2327 type
= build_aligned_type (type
, align
);
2328 lhs
= fold_build2 (MEM_REF
, type
, gimple_call_arg (stmt
, 0), ptr
);
2330 mem
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2331 gcc_assert (MEM_P (mem
));
2332 mask
= expand_normal (maskt
);
2333 reg
= expand_normal (rhs
);
2334 create_fixed_operand (&ops
[0], mem
);
2335 create_input_operand (&ops
[1], reg
, TYPE_MODE (type
));
2336 create_input_operand (&ops
[2], mask
, TYPE_MODE (TREE_TYPE (maskt
)));
2337 expand_insn (convert_optab_handler (optab
, TYPE_MODE (type
),
2338 TYPE_MODE (TREE_TYPE (maskt
))),
2343 expand_ABNORMAL_DISPATCHER (internal_fn
, gcall
*)
2348 expand_BUILTIN_EXPECT (internal_fn
, gcall
*stmt
)
2350 /* When guessing was done, the hints should be already stripped away. */
2351 gcc_assert (!flag_guess_branch_prob
|| optimize
== 0 || seen_error ());
2354 tree lhs
= gimple_call_lhs (stmt
);
2356 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2358 target
= const0_rtx
;
2359 rtx val
= expand_expr (gimple_call_arg (stmt
, 0), target
, VOIDmode
, EXPAND_NORMAL
);
2360 if (lhs
&& val
!= target
)
2361 emit_move_insn (target
, val
);
2364 /* IFN_VA_ARG is supposed to be expanded at pass_stdarg. So this dummy function
2365 should never be called. */
2368 expand_VA_ARG (internal_fn
, gcall
*)
2373 /* Expand the IFN_UNIQUE function according to its first argument. */
2376 expand_UNIQUE (internal_fn
, gcall
*stmt
)
2378 rtx pattern
= NULL_RTX
;
2379 enum ifn_unique_kind kind
2380 = (enum ifn_unique_kind
) TREE_INT_CST_LOW (gimple_call_arg (stmt
, 0));
2387 case IFN_UNIQUE_UNSPEC
:
2388 if (targetm
.have_unique ())
2389 pattern
= targetm
.gen_unique ();
2392 case IFN_UNIQUE_OACC_FORK
:
2393 case IFN_UNIQUE_OACC_JOIN
:
2394 if (targetm
.have_oacc_fork () && targetm
.have_oacc_join ())
2396 tree lhs
= gimple_call_lhs (stmt
);
2397 rtx target
= const0_rtx
;
2400 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2402 rtx data_dep
= expand_normal (gimple_call_arg (stmt
, 1));
2403 rtx axis
= expand_normal (gimple_call_arg (stmt
, 2));
2405 if (kind
== IFN_UNIQUE_OACC_FORK
)
2406 pattern
= targetm
.gen_oacc_fork (target
, data_dep
, axis
);
2408 pattern
= targetm
.gen_oacc_join (target
, data_dep
, axis
);
2416 emit_insn (pattern
);
2419 /* The size of an OpenACC compute dimension. */
2422 expand_GOACC_DIM_SIZE (internal_fn
, gcall
*stmt
)
2424 tree lhs
= gimple_call_lhs (stmt
);
2429 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2430 if (targetm
.have_oacc_dim_size ())
2432 rtx dim
= expand_expr (gimple_call_arg (stmt
, 0), NULL_RTX
,
2433 VOIDmode
, EXPAND_NORMAL
);
2434 emit_insn (targetm
.gen_oacc_dim_size (target
, dim
));
2437 emit_move_insn (target
, GEN_INT (1));
2440 /* The position of an OpenACC execution engine along one compute axis. */
2443 expand_GOACC_DIM_POS (internal_fn
, gcall
*stmt
)
2445 tree lhs
= gimple_call_lhs (stmt
);
2450 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2451 if (targetm
.have_oacc_dim_pos ())
2453 rtx dim
= expand_expr (gimple_call_arg (stmt
, 0), NULL_RTX
,
2454 VOIDmode
, EXPAND_NORMAL
);
2455 emit_insn (targetm
.gen_oacc_dim_pos (target
, dim
));
2458 emit_move_insn (target
, const0_rtx
);
2461 /* This is expanded by oacc_device_lower pass. */
2464 expand_GOACC_LOOP (internal_fn
, gcall
*)
2469 /* This is expanded by oacc_device_lower pass. */
2472 expand_GOACC_REDUCTION (internal_fn
, gcall
*)
2477 /* This is expanded by oacc_device_lower pass. */
2480 expand_GOACC_TILE (internal_fn
, gcall
*)
2485 /* Set errno to EDOM. */
2488 expand_SET_EDOM (internal_fn
, gcall
*)
2491 #ifdef GEN_ERRNO_RTX
2492 rtx errno_rtx
= GEN_ERRNO_RTX
;
2494 rtx errno_rtx
= gen_rtx_MEM (word_mode
, gen_rtx_SYMBOL_REF (Pmode
, "errno"));
2496 emit_move_insn (errno_rtx
,
2497 gen_int_mode (TARGET_EDOM
, GET_MODE (errno_rtx
)));
2503 /* Expand atomic bit test and set. */
2506 expand_ATOMIC_BIT_TEST_AND_SET (internal_fn
, gcall
*call
)
2508 expand_ifn_atomic_bit_test_and (call
);
2511 /* Expand atomic bit test and complement. */
2514 expand_ATOMIC_BIT_TEST_AND_COMPLEMENT (internal_fn
, gcall
*call
)
2516 expand_ifn_atomic_bit_test_and (call
);
2519 /* Expand atomic bit test and reset. */
2522 expand_ATOMIC_BIT_TEST_AND_RESET (internal_fn
, gcall
*call
)
2524 expand_ifn_atomic_bit_test_and (call
);
2527 /* Expand atomic bit test and set. */
2530 expand_ATOMIC_COMPARE_EXCHANGE (internal_fn
, gcall
*call
)
2532 expand_ifn_atomic_compare_exchange (call
);
2535 /* Expand LAUNDER to assignment, lhs = arg0. */
2538 expand_LAUNDER (internal_fn
, gcall
*call
)
2540 tree lhs
= gimple_call_lhs (call
);
2545 expand_assignment (lhs
, gimple_call_arg (call
, 0), false);
2548 /* Expand DIVMOD() using:
2549 a) optab handler for udivmod/sdivmod if it is available.
2550 b) If optab_handler doesn't exist, generate call to
2551 target-specific divmod libfunc. */
2554 expand_DIVMOD (internal_fn
, gcall
*call_stmt
)
2556 tree lhs
= gimple_call_lhs (call_stmt
);
2557 tree arg0
= gimple_call_arg (call_stmt
, 0);
2558 tree arg1
= gimple_call_arg (call_stmt
, 1);
2560 gcc_assert (TREE_CODE (TREE_TYPE (lhs
)) == COMPLEX_TYPE
);
2561 tree type
= TREE_TYPE (TREE_TYPE (lhs
));
2562 machine_mode mode
= TYPE_MODE (type
);
2563 bool unsignedp
= TYPE_UNSIGNED (type
);
2564 optab tab
= (unsignedp
) ? udivmod_optab
: sdivmod_optab
;
2566 rtx op0
= expand_normal (arg0
);
2567 rtx op1
= expand_normal (arg1
);
2568 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2570 rtx quotient
, remainder
, libfunc
;
2572 /* Check if optab_handler exists for divmod_optab for given mode. */
2573 if (optab_handler (tab
, mode
) != CODE_FOR_nothing
)
2575 quotient
= gen_reg_rtx (mode
);
2576 remainder
= gen_reg_rtx (mode
);
2577 expand_twoval_binop (tab
, op0
, op1
, quotient
, remainder
, unsignedp
);
2580 /* Generate call to divmod libfunc if it exists. */
2581 else if ((libfunc
= optab_libfunc (tab
, mode
)) != NULL_RTX
)
2582 targetm
.expand_divmod_libfunc (libfunc
, mode
, op0
, op1
,
2583 "ient
, &remainder
);
2588 /* Wrap the return value (quotient, remainder) within COMPLEX_EXPR. */
2589 expand_expr (build2 (COMPLEX_EXPR
, TREE_TYPE (lhs
),
2590 make_tree (TREE_TYPE (arg0
), quotient
),
2591 make_tree (TREE_TYPE (arg1
), remainder
)),
2592 target
, VOIDmode
, EXPAND_NORMAL
);
2595 /* Expand a call to FN using the operands in STMT. FN has a single
2596 output operand and NARGS input operands. */
2599 expand_direct_optab_fn (internal_fn fn
, gcall
*stmt
, direct_optab optab
,
2602 expand_operand
*ops
= XALLOCAVEC (expand_operand
, nargs
+ 1);
2604 tree_pair types
= direct_internal_fn_types (fn
, stmt
);
2605 insn_code icode
= direct_optab_handler (optab
, TYPE_MODE (types
.first
));
2607 tree lhs
= gimple_call_lhs (stmt
);
2608 tree lhs_type
= TREE_TYPE (lhs
);
2609 rtx lhs_rtx
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2610 create_output_operand (&ops
[0], lhs_rtx
, insn_data
[icode
].operand
[0].mode
);
2612 for (unsigned int i
= 0; i
< nargs
; ++i
)
2614 tree rhs
= gimple_call_arg (stmt
, i
);
2615 tree rhs_type
= TREE_TYPE (rhs
);
2616 rtx rhs_rtx
= expand_normal (rhs
);
2617 if (INTEGRAL_TYPE_P (rhs_type
))
2618 create_convert_operand_from (&ops
[i
+ 1], rhs_rtx
,
2619 TYPE_MODE (rhs_type
),
2620 TYPE_UNSIGNED (rhs_type
));
2622 create_input_operand (&ops
[i
+ 1], rhs_rtx
, TYPE_MODE (rhs_type
));
2625 expand_insn (icode
, nargs
+ 1, ops
);
2626 if (!rtx_equal_p (lhs_rtx
, ops
[0].value
))
2628 /* If the return value has an integral type, convert the instruction
2629 result to that type. This is useful for things that return an
2630 int regardless of the size of the input. If the instruction result
2631 is smaller than required, assume that it is signed.
2633 If the return value has a nonintegral type, its mode must match
2634 the instruction result. */
2635 if (GET_CODE (lhs_rtx
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (lhs_rtx
))
2637 /* If this is a scalar in a register that is stored in a wider
2638 mode than the declared mode, compute the result into its
2639 declared mode and then convert to the wider mode. */
2640 gcc_checking_assert (INTEGRAL_TYPE_P (lhs_type
));
2641 rtx tmp
= convert_to_mode (GET_MODE (lhs_rtx
), ops
[0].value
, 0);
2642 convert_move (SUBREG_REG (lhs_rtx
), tmp
,
2643 SUBREG_PROMOTED_SIGN (lhs_rtx
));
2645 else if (GET_MODE (lhs_rtx
) == GET_MODE (ops
[0].value
))
2646 emit_move_insn (lhs_rtx
, ops
[0].value
);
2649 gcc_checking_assert (INTEGRAL_TYPE_P (lhs_type
));
2650 convert_move (lhs_rtx
, ops
[0].value
, 0);
2655 /* Expanders for optabs that can use expand_direct_optab_fn. */
2657 #define expand_unary_optab_fn(FN, STMT, OPTAB) \
2658 expand_direct_optab_fn (FN, STMT, OPTAB, 1)
2660 #define expand_binary_optab_fn(FN, STMT, OPTAB) \
2661 expand_direct_optab_fn (FN, STMT, OPTAB, 2)
2663 /* RETURN_TYPE and ARGS are a return type and argument list that are
2664 in principle compatible with FN (which satisfies direct_internal_fn_p).
2665 Return the types that should be used to determine whether the
2666 target supports FN. */
2669 direct_internal_fn_types (internal_fn fn
, tree return_type
, tree
*args
)
2671 const direct_internal_fn_info
&info
= direct_internal_fn (fn
);
2672 tree type0
= (info
.type0
< 0 ? return_type
: TREE_TYPE (args
[info
.type0
]));
2673 tree type1
= (info
.type1
< 0 ? return_type
: TREE_TYPE (args
[info
.type1
]));
2674 return tree_pair (type0
, type1
);
2677 /* CALL is a call whose return type and arguments are in principle
2678 compatible with FN (which satisfies direct_internal_fn_p). Return the
2679 types that should be used to determine whether the target supports FN. */
2682 direct_internal_fn_types (internal_fn fn
, gcall
*call
)
2684 const direct_internal_fn_info
&info
= direct_internal_fn (fn
);
2685 tree op0
= (info
.type0
< 0
2686 ? gimple_call_lhs (call
)
2687 : gimple_call_arg (call
, info
.type0
));
2688 tree op1
= (info
.type1
< 0
2689 ? gimple_call_lhs (call
)
2690 : gimple_call_arg (call
, info
.type1
));
2691 return tree_pair (TREE_TYPE (op0
), TREE_TYPE (op1
));
2694 /* Return true if OPTAB is supported for TYPES (whose modes should be
2695 the same) when the optimization type is OPT_TYPE. Used for simple
2699 direct_optab_supported_p (direct_optab optab
, tree_pair types
,
2700 optimization_type opt_type
)
2702 machine_mode mode
= TYPE_MODE (types
.first
);
2703 gcc_checking_assert (mode
== TYPE_MODE (types
.second
));
2704 return direct_optab_handler (optab
, mode
, opt_type
) != CODE_FOR_nothing
;
2707 /* Return true if load/store lanes optab OPTAB is supported for
2708 array type TYPES.first when the optimization type is OPT_TYPE. */
2711 multi_vector_optab_supported_p (convert_optab optab
, tree_pair types
,
2712 optimization_type opt_type
)
2714 gcc_assert (TREE_CODE (types
.first
) == ARRAY_TYPE
);
2715 machine_mode imode
= TYPE_MODE (types
.first
);
2716 machine_mode vmode
= TYPE_MODE (TREE_TYPE (types
.first
));
2717 return (convert_optab_handler (optab
, imode
, vmode
, opt_type
)
2718 != CODE_FOR_nothing
);
2721 #define direct_unary_optab_supported_p direct_optab_supported_p
2722 #define direct_binary_optab_supported_p direct_optab_supported_p
2723 #define direct_mask_load_optab_supported_p direct_optab_supported_p
2724 #define direct_load_lanes_optab_supported_p multi_vector_optab_supported_p
2725 #define direct_mask_store_optab_supported_p direct_optab_supported_p
2726 #define direct_store_lanes_optab_supported_p multi_vector_optab_supported_p
2728 /* Return true if FN is supported for the types in TYPES when the
2729 optimization type is OPT_TYPE. The types are those associated with
2730 the "type0" and "type1" fields of FN's direct_internal_fn_info
2734 direct_internal_fn_supported_p (internal_fn fn
, tree_pair types
,
2735 optimization_type opt_type
)
2739 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
2740 case IFN_##CODE: break;
2741 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
2743 return direct_##TYPE##_optab_supported_p (OPTAB##_optab, types, \
2745 #include "internal-fn.def"
2753 /* Return true if FN is supported for type TYPE when the optimization
2754 type is OPT_TYPE. The caller knows that the "type0" and "type1"
2755 fields of FN's direct_internal_fn_info structure are the same. */
2758 direct_internal_fn_supported_p (internal_fn fn
, tree type
,
2759 optimization_type opt_type
)
2761 const direct_internal_fn_info
&info
= direct_internal_fn (fn
);
2762 gcc_checking_assert (info
.type0
== info
.type1
);
2763 return direct_internal_fn_supported_p (fn
, tree_pair (type
, type
), opt_type
);
2766 /* Return true if IFN_SET_EDOM is supported. */
2769 set_edom_supported_p (void)
2778 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
2780 expand_##CODE (internal_fn fn, gcall *stmt) \
2782 expand_##TYPE##_optab_fn (fn, stmt, OPTAB##_optab); \
2784 #include "internal-fn.def"
2786 /* Routines to expand each internal function, indexed by function number.
2787 Each routine has the prototype:
2789 expand_<NAME> (gcall *stmt)
2791 where STMT is the statement that performs the call. */
2792 static void (*const internal_fn_expanders
[]) (internal_fn
, gcall
*) = {
2793 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) expand_##CODE,
2794 #include "internal-fn.def"
2798 /* Expand STMT as though it were a call to internal function FN. */
2801 expand_internal_call (internal_fn fn
, gcall
*stmt
)
2803 internal_fn_expanders
[fn
] (fn
, stmt
);
2806 /* Expand STMT, which is a call to internal function FN. */
2809 expand_internal_call (gcall
*stmt
)
2811 expand_internal_call (gimple_call_internal_fn (stmt
), stmt
);
2815 expand_PHI (internal_fn
, gcall
*)