2 Copyright (C) 2011-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
29 #include "stringpool.h"
31 #include "tree-ssanames.h"
36 #include "diagnostic-core.h"
37 #include "fold-const.h"
38 #include "internal-fn.h"
39 #include "stor-layout.h"
46 #include "optabs-tree.h"
48 /* The names of each internal function, indexed by function number. */
49 const char *const internal_fn_name_array
[] = {
50 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) #CODE,
51 #include "internal-fn.def"
55 /* The ECF_* flags of each internal function, indexed by function number. */
56 const int internal_fn_flags_array
[] = {
57 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) FLAGS,
58 #include "internal-fn.def"
62 /* Fnspec of each internal function, indexed by function number. */
63 const_tree internal_fn_fnspec_array
[IFN_LAST
+ 1];
68 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
69 if (FNSPEC) internal_fn_fnspec_array[IFN_##CODE] = \
70 build_string ((int) sizeof (FNSPEC), FNSPEC ? FNSPEC : "");
71 #include "internal-fn.def"
72 internal_fn_fnspec_array
[IFN_LAST
] = 0;
75 /* Create static initializers for the information returned by
76 direct_internal_fn. */
77 #define not_direct { -2, -2, false }
78 #define mask_load_direct { -1, 2, false }
79 #define load_lanes_direct { -1, -1, false }
80 #define mask_store_direct { 3, 2, false }
81 #define store_lanes_direct { 0, 0, false }
82 #define unary_direct { 0, 0, true }
83 #define binary_direct { 0, 0, true }
85 const direct_internal_fn_info direct_internal_fn_array
[IFN_LAST
+ 1] = {
86 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) not_direct,
87 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) TYPE##_direct,
88 #include "internal-fn.def"
92 /* ARRAY_TYPE is an array of vector modes. Return the associated insn
93 for load-lanes-style optab OPTAB, or CODE_FOR_nothing if none. */
96 get_multi_vector_move (tree array_type
, convert_optab optab
)
101 gcc_assert (TREE_CODE (array_type
) == ARRAY_TYPE
);
102 imode
= TYPE_MODE (array_type
);
103 vmode
= TYPE_MODE (TREE_TYPE (array_type
));
105 return convert_optab_handler (optab
, imode
, vmode
);
108 /* Expand LOAD_LANES call STMT using optab OPTAB. */
111 expand_load_lanes_optab_fn (internal_fn
, gcall
*stmt
, convert_optab optab
)
113 struct expand_operand ops
[2];
117 lhs
= gimple_call_lhs (stmt
);
118 rhs
= gimple_call_arg (stmt
, 0);
119 type
= TREE_TYPE (lhs
);
121 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
122 mem
= expand_normal (rhs
);
124 gcc_assert (MEM_P (mem
));
125 PUT_MODE (mem
, TYPE_MODE (type
));
127 create_output_operand (&ops
[0], target
, TYPE_MODE (type
));
128 create_fixed_operand (&ops
[1], mem
);
129 expand_insn (get_multi_vector_move (type
, optab
), 2, ops
);
132 /* Expand STORE_LANES call STMT using optab OPTAB. */
135 expand_store_lanes_optab_fn (internal_fn
, gcall
*stmt
, convert_optab optab
)
137 struct expand_operand ops
[2];
141 lhs
= gimple_call_lhs (stmt
);
142 rhs
= gimple_call_arg (stmt
, 0);
143 type
= TREE_TYPE (rhs
);
145 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
146 reg
= expand_normal (rhs
);
148 gcc_assert (MEM_P (target
));
149 PUT_MODE (target
, TYPE_MODE (type
));
151 create_fixed_operand (&ops
[0], target
);
152 create_input_operand (&ops
[1], reg
, TYPE_MODE (type
));
153 expand_insn (get_multi_vector_move (type
, optab
), 2, ops
);
157 expand_ANNOTATE (internal_fn
, gcall
*)
162 /* This should get expanded in omp_device_lower pass. */
165 expand_GOMP_USE_SIMT (internal_fn
, gcall
*)
170 /* This should get expanded in omp_device_lower pass. */
173 expand_GOMP_SIMT_ENTER (internal_fn
, gcall
*)
178 /* Allocate per-lane storage and begin non-uniform execution region. */
181 expand_GOMP_SIMT_ENTER_ALLOC (internal_fn
, gcall
*stmt
)
184 tree lhs
= gimple_call_lhs (stmt
);
186 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
188 target
= gen_reg_rtx (Pmode
);
189 rtx size
= expand_normal (gimple_call_arg (stmt
, 0));
190 rtx align
= expand_normal (gimple_call_arg (stmt
, 1));
191 struct expand_operand ops
[3];
192 create_output_operand (&ops
[0], target
, Pmode
);
193 create_input_operand (&ops
[1], size
, Pmode
);
194 create_input_operand (&ops
[2], align
, Pmode
);
195 gcc_assert (targetm
.have_omp_simt_enter ());
196 expand_insn (targetm
.code_for_omp_simt_enter
, 3, ops
);
199 /* Deallocate per-lane storage and leave non-uniform execution region. */
202 expand_GOMP_SIMT_EXIT (internal_fn
, gcall
*stmt
)
204 gcc_checking_assert (!gimple_call_lhs (stmt
));
205 rtx arg
= expand_normal (gimple_call_arg (stmt
, 0));
206 struct expand_operand ops
[1];
207 create_input_operand (&ops
[0], arg
, Pmode
);
208 gcc_assert (targetm
.have_omp_simt_exit ());
209 expand_insn (targetm
.code_for_omp_simt_exit
, 1, ops
);
212 /* Lane index on SIMT targets: thread index in the warp on NVPTX. On targets
213 without SIMT execution this should be expanded in omp_device_lower pass. */
216 expand_GOMP_SIMT_LANE (internal_fn
, gcall
*stmt
)
218 tree lhs
= gimple_call_lhs (stmt
);
222 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
223 gcc_assert (targetm
.have_omp_simt_lane ());
224 emit_insn (targetm
.gen_omp_simt_lane (target
));
227 /* This should get expanded in omp_device_lower pass. */
230 expand_GOMP_SIMT_VF (internal_fn
, gcall
*)
235 /* Lane index of the first SIMT lane that supplies a non-zero argument.
236 This is a SIMT counterpart to GOMP_SIMD_LAST_LANE, used to represent the
237 lane that executed the last iteration for handling OpenMP lastprivate. */
240 expand_GOMP_SIMT_LAST_LANE (internal_fn
, gcall
*stmt
)
242 tree lhs
= gimple_call_lhs (stmt
);
246 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
247 rtx cond
= expand_normal (gimple_call_arg (stmt
, 0));
248 machine_mode mode
= TYPE_MODE (TREE_TYPE (lhs
));
249 struct expand_operand ops
[2];
250 create_output_operand (&ops
[0], target
, mode
);
251 create_input_operand (&ops
[1], cond
, mode
);
252 gcc_assert (targetm
.have_omp_simt_last_lane ());
253 expand_insn (targetm
.code_for_omp_simt_last_lane
, 2, ops
);
256 /* Non-transparent predicate used in SIMT lowering of OpenMP "ordered". */
259 expand_GOMP_SIMT_ORDERED_PRED (internal_fn
, gcall
*stmt
)
261 tree lhs
= gimple_call_lhs (stmt
);
265 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
266 rtx ctr
= expand_normal (gimple_call_arg (stmt
, 0));
267 machine_mode mode
= TYPE_MODE (TREE_TYPE (lhs
));
268 struct expand_operand ops
[2];
269 create_output_operand (&ops
[0], target
, mode
);
270 create_input_operand (&ops
[1], ctr
, mode
);
271 gcc_assert (targetm
.have_omp_simt_ordered ());
272 expand_insn (targetm
.code_for_omp_simt_ordered
, 2, ops
);
275 /* "Or" boolean reduction across SIMT lanes: return non-zero in all lanes if
276 any lane supplies a non-zero argument. */
279 expand_GOMP_SIMT_VOTE_ANY (internal_fn
, gcall
*stmt
)
281 tree lhs
= gimple_call_lhs (stmt
);
285 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
286 rtx cond
= expand_normal (gimple_call_arg (stmt
, 0));
287 machine_mode mode
= TYPE_MODE (TREE_TYPE (lhs
));
288 struct expand_operand ops
[2];
289 create_output_operand (&ops
[0], target
, mode
);
290 create_input_operand (&ops
[1], cond
, mode
);
291 gcc_assert (targetm
.have_omp_simt_vote_any ());
292 expand_insn (targetm
.code_for_omp_simt_vote_any
, 2, ops
);
295 /* Exchange between SIMT lanes with a "butterfly" pattern: source lane index
296 is destination lane index XOR given offset. */
299 expand_GOMP_SIMT_XCHG_BFLY (internal_fn
, gcall
*stmt
)
301 tree lhs
= gimple_call_lhs (stmt
);
305 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
306 rtx src
= expand_normal (gimple_call_arg (stmt
, 0));
307 rtx idx
= expand_normal (gimple_call_arg (stmt
, 1));
308 machine_mode mode
= TYPE_MODE (TREE_TYPE (lhs
));
309 struct expand_operand ops
[3];
310 create_output_operand (&ops
[0], target
, mode
);
311 create_input_operand (&ops
[1], src
, mode
);
312 create_input_operand (&ops
[2], idx
, SImode
);
313 gcc_assert (targetm
.have_omp_simt_xchg_bfly ());
314 expand_insn (targetm
.code_for_omp_simt_xchg_bfly
, 3, ops
);
317 /* Exchange between SIMT lanes according to given source lane index. */
320 expand_GOMP_SIMT_XCHG_IDX (internal_fn
, gcall
*stmt
)
322 tree lhs
= gimple_call_lhs (stmt
);
326 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
327 rtx src
= expand_normal (gimple_call_arg (stmt
, 0));
328 rtx idx
= expand_normal (gimple_call_arg (stmt
, 1));
329 machine_mode mode
= TYPE_MODE (TREE_TYPE (lhs
));
330 struct expand_operand ops
[3];
331 create_output_operand (&ops
[0], target
, mode
);
332 create_input_operand (&ops
[1], src
, mode
);
333 create_input_operand (&ops
[2], idx
, SImode
);
334 gcc_assert (targetm
.have_omp_simt_xchg_idx ());
335 expand_insn (targetm
.code_for_omp_simt_xchg_idx
, 3, ops
);
338 /* This should get expanded in adjust_simduid_builtins. */
341 expand_GOMP_SIMD_LANE (internal_fn
, gcall
*)
346 /* This should get expanded in adjust_simduid_builtins. */
349 expand_GOMP_SIMD_VF (internal_fn
, gcall
*)
354 /* This should get expanded in adjust_simduid_builtins. */
357 expand_GOMP_SIMD_LAST_LANE (internal_fn
, gcall
*)
362 /* This should get expanded in adjust_simduid_builtins. */
365 expand_GOMP_SIMD_ORDERED_START (internal_fn
, gcall
*)
370 /* This should get expanded in adjust_simduid_builtins. */
373 expand_GOMP_SIMD_ORDERED_END (internal_fn
, gcall
*)
378 /* This should get expanded in the sanopt pass. */
381 expand_UBSAN_NULL (internal_fn
, gcall
*)
386 /* This should get expanded in the sanopt pass. */
389 expand_UBSAN_BOUNDS (internal_fn
, gcall
*)
394 /* This should get expanded in the sanopt pass. */
397 expand_UBSAN_VPTR (internal_fn
, gcall
*)
402 /* This should get expanded in the sanopt pass. */
405 expand_UBSAN_PTR (internal_fn
, gcall
*)
410 /* This should get expanded in the sanopt pass. */
413 expand_UBSAN_OBJECT_SIZE (internal_fn
, gcall
*)
418 /* This should get expanded in the sanopt pass. */
421 expand_ASAN_CHECK (internal_fn
, gcall
*)
426 /* This should get expanded in the sanopt pass. */
429 expand_ASAN_MARK (internal_fn
, gcall
*)
434 /* This should get expanded in the sanopt pass. */
437 expand_ASAN_POISON (internal_fn
, gcall
*)
442 /* This should get expanded in the sanopt pass. */
445 expand_ASAN_POISON_USE (internal_fn
, gcall
*)
450 /* This should get expanded in the tsan pass. */
453 expand_TSAN_FUNC_EXIT (internal_fn
, gcall
*)
458 /* This should get expanded in the lower pass. */
461 expand_FALLTHROUGH (internal_fn
, gcall
*call
)
463 error_at (gimple_location (call
),
464 "invalid use of attribute %<fallthrough%>");
467 /* Return minimum precision needed to represent all values
468 of ARG in SIGNed integral type. */
471 get_min_precision (tree arg
, signop sign
)
473 int prec
= TYPE_PRECISION (TREE_TYPE (arg
));
475 signop orig_sign
= sign
;
476 if (TREE_CODE (arg
) == INTEGER_CST
)
479 if (TYPE_SIGN (TREE_TYPE (arg
)) != sign
)
481 widest_int w
= wi::to_widest (arg
);
482 w
= wi::ext (w
, prec
, sign
);
483 p
= wi::min_precision (w
, sign
);
486 p
= wi::min_precision (arg
, sign
);
487 return MIN (p
, prec
);
489 while (CONVERT_EXPR_P (arg
)
490 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg
, 0)))
491 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg
, 0))) <= prec
)
493 arg
= TREE_OPERAND (arg
, 0);
494 if (TYPE_PRECISION (TREE_TYPE (arg
)) < prec
)
496 if (TYPE_UNSIGNED (TREE_TYPE (arg
)))
498 else if (sign
== UNSIGNED
&& get_range_pos_neg (arg
) != 1)
499 return prec
+ (orig_sign
!= sign
);
500 prec
= TYPE_PRECISION (TREE_TYPE (arg
));
503 return prec
+ (orig_sign
!= sign
);
505 if (TREE_CODE (arg
) != SSA_NAME
)
506 return prec
+ (orig_sign
!= sign
);
507 wide_int arg_min
, arg_max
;
508 while (get_range_info (arg
, &arg_min
, &arg_max
) != VR_RANGE
)
510 gimple
*g
= SSA_NAME_DEF_STMT (arg
);
511 if (is_gimple_assign (g
)
512 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g
)))
514 tree t
= gimple_assign_rhs1 (g
);
515 if (INTEGRAL_TYPE_P (TREE_TYPE (t
))
516 && TYPE_PRECISION (TREE_TYPE (t
)) <= prec
)
519 if (TYPE_PRECISION (TREE_TYPE (arg
)) < prec
)
521 if (TYPE_UNSIGNED (TREE_TYPE (arg
)))
523 else if (sign
== UNSIGNED
&& get_range_pos_neg (arg
) != 1)
524 return prec
+ (orig_sign
!= sign
);
525 prec
= TYPE_PRECISION (TREE_TYPE (arg
));
528 return prec
+ (orig_sign
!= sign
);
532 return prec
+ (orig_sign
!= sign
);
534 if (sign
== TYPE_SIGN (TREE_TYPE (arg
)))
536 int p1
= wi::min_precision (arg_min
, sign
);
537 int p2
= wi::min_precision (arg_max
, sign
);
539 prec
= MIN (prec
, p1
);
541 else if (sign
== UNSIGNED
&& !wi::neg_p (arg_min
, SIGNED
))
543 int p
= wi::min_precision (arg_max
, UNSIGNED
);
544 prec
= MIN (prec
, p
);
546 return prec
+ (orig_sign
!= sign
);
549 /* Helper for expand_*_overflow. Set the __imag__ part to true
550 (1 except for signed:1 type, in which case store -1). */
553 expand_arith_set_overflow (tree lhs
, rtx target
)
555 if (TYPE_PRECISION (TREE_TYPE (TREE_TYPE (lhs
))) == 1
556 && !TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs
))))
557 write_complex_part (target
, constm1_rtx
, true);
559 write_complex_part (target
, const1_rtx
, true);
562 /* Helper for expand_*_overflow. Store RES into the __real__ part
563 of TARGET. If RES has larger MODE than __real__ part of TARGET,
564 set the __imag__ part to 1 if RES doesn't fit into it. Similarly
565 if LHS has smaller precision than its mode. */
568 expand_arith_overflow_result_store (tree lhs
, rtx target
,
569 machine_mode mode
, rtx res
)
571 machine_mode tgtmode
= GET_MODE_INNER (GET_MODE (target
));
575 rtx_code_label
*done_label
= gen_label_rtx ();
576 int uns
= TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs
)));
577 lres
= convert_modes (tgtmode
, mode
, res
, uns
);
578 gcc_assert (GET_MODE_PRECISION (tgtmode
) < GET_MODE_PRECISION (mode
));
579 do_compare_rtx_and_jump (res
, convert_modes (mode
, tgtmode
, lres
, uns
),
580 EQ
, true, mode
, NULL_RTX
, NULL
, done_label
,
581 profile_probability::very_likely ());
582 expand_arith_set_overflow (lhs
, target
);
583 emit_label (done_label
);
585 int prec
= TYPE_PRECISION (TREE_TYPE (TREE_TYPE (lhs
)));
586 int tgtprec
= GET_MODE_PRECISION (tgtmode
);
589 rtx_code_label
*done_label
= gen_label_rtx ();
590 int uns
= TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs
)));
595 = immed_wide_int_const (wi::shifted_mask (0, prec
, false, tgtprec
),
597 lres
= expand_simple_binop (tgtmode
, AND
, res
, mask
, NULL_RTX
,
598 true, OPTAB_LIB_WIDEN
);
602 lres
= expand_shift (LSHIFT_EXPR
, tgtmode
, res
, tgtprec
- prec
,
604 lres
= expand_shift (RSHIFT_EXPR
, tgtmode
, lres
, tgtprec
- prec
,
607 do_compare_rtx_and_jump (res
, lres
,
608 EQ
, true, tgtmode
, NULL_RTX
, NULL
, done_label
,
609 profile_probability::very_likely ());
610 expand_arith_set_overflow (lhs
, target
);
611 emit_label (done_label
);
613 write_complex_part (target
, lres
, false);
616 /* Helper for expand_*_overflow. Store RES into TARGET. */
619 expand_ubsan_result_store (rtx target
, rtx res
)
621 if (GET_CODE (target
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (target
))
622 /* If this is a scalar in a register that is stored in a wider mode
623 than the declared mode, compute the result into its declared mode
624 and then convert to the wider mode. Our value is the computed
626 convert_move (SUBREG_REG (target
), res
, SUBREG_PROMOTED_SIGN (target
));
628 emit_move_insn (target
, res
);
631 /* Add sub/add overflow checking to the statement STMT.
632 CODE says whether the operation is +, or -. */
635 expand_addsub_overflow (location_t loc
, tree_code code
, tree lhs
,
636 tree arg0
, tree arg1
, bool unsr_p
, bool uns0_p
,
637 bool uns1_p
, bool is_ubsan
, tree
*datap
)
639 rtx res
, target
= NULL_RTX
;
641 rtx_code_label
*done_label
= gen_label_rtx ();
642 rtx_code_label
*do_error
= gen_label_rtx ();
643 do_pending_stack_adjust ();
644 rtx op0
= expand_normal (arg0
);
645 rtx op1
= expand_normal (arg1
);
646 machine_mode mode
= TYPE_MODE (TREE_TYPE (arg0
));
647 int prec
= GET_MODE_PRECISION (mode
);
648 rtx sgn
= immed_wide_int_const (wi::min_value (prec
, SIGNED
), mode
);
652 gcc_assert (!unsr_p
&& !uns0_p
&& !uns1_p
);
656 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
658 write_complex_part (target
, const0_rtx
, true);
661 /* We assume both operands and result have the same precision
662 here (GET_MODE_BITSIZE (mode)), S stands for signed type
663 with that precision, U for unsigned type with that precision,
664 sgn for unsigned most significant bit in that precision.
665 s1 is signed first operand, u1 is unsigned first operand,
666 s2 is signed second operand, u2 is unsigned second operand,
667 sr is signed result, ur is unsigned result and the following
668 rules say how to compute result (which is always result of
669 the operands as if both were unsigned, cast to the right
670 signedness) and how to compute whether operation overflowed.
673 res = (S) ((U) s1 + (U) s2)
674 ovf = s2 < 0 ? res > s1 : res < s1 (or jump on overflow)
676 res = (S) ((U) s1 - (U) s2)
677 ovf = s2 < 0 ? res < s1 : res > s2 (or jump on overflow)
680 ovf = res < u1 (or jump on carry, but RTL opts will handle it)
683 ovf = res > u1 (or jump on carry, but RTL opts will handle it)
685 res = (S) ((U) s1 + u2)
686 ovf = ((U) res ^ sgn) < u2
691 ovf = t1 < 0 ? t2 > s1 : t2 < s1 (or jump on overflow)
693 res = (S) ((U) s1 - u2)
694 ovf = u2 > ((U) s1 ^ sgn)
697 ovf = s1 < 0 || u2 > (U) s1
700 ovf = u1 >= ((U) s2 ^ sgn)
705 ovf = s2 < 0 ? (S) t2 < (S) t1 : (S) t2 > (S) t1 (or jump on overflow)
707 res = (U) s1 + (U) s2
708 ovf = s2 < 0 ? (s1 | (S) res) < 0) : (s1 & (S) res) < 0)
711 ovf = (U) res < u2 || res < 0
714 ovf = u1 >= u2 ? res < 0 : res >= 0
716 res = (U) s1 - (U) s2
717 ovf = s2 >= 0 ? ((s1 | (S) res) < 0) : ((s1 & (S) res) < 0) */
719 if (code
== PLUS_EXPR
&& uns0_p
&& !uns1_p
)
721 /* PLUS_EXPR is commutative, if operand signedness differs,
722 canonicalize to the first operand being signed and second
723 unsigned to simplify following code. */
724 std::swap (op0
, op1
);
725 std::swap (arg0
, arg1
);
731 if (uns0_p
&& uns1_p
&& unsr_p
)
733 insn_code icode
= optab_handler (code
== PLUS_EXPR
? uaddv4_optab
734 : usubv4_optab
, mode
);
735 if (icode
!= CODE_FOR_nothing
)
737 struct expand_operand ops
[4];
738 rtx_insn
*last
= get_last_insn ();
740 res
= gen_reg_rtx (mode
);
741 create_output_operand (&ops
[0], res
, mode
);
742 create_input_operand (&ops
[1], op0
, mode
);
743 create_input_operand (&ops
[2], op1
, mode
);
744 create_fixed_operand (&ops
[3], do_error
);
745 if (maybe_expand_insn (icode
, 4, ops
))
747 last
= get_last_insn ();
748 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
750 && any_condjump_p (last
)
751 && !find_reg_note (last
, REG_BR_PROB
, 0))
752 add_reg_br_prob_note (last
,
753 profile_probability::very_unlikely ());
754 emit_jump (done_label
);
758 delete_insns_since (last
);
761 /* Compute the operation. On RTL level, the addition is always
763 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
764 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
766 /* For PLUS_EXPR, the operation is commutative, so we can pick
767 operand to compare against. For prec <= BITS_PER_WORD, I think
768 preferring REG operand is better over CONST_INT, because
769 the CONST_INT might enlarge the instruction or CSE would need
770 to figure out we'd already loaded it into a register before.
771 For prec > BITS_PER_WORD, I think CONST_INT might be more beneficial,
772 as then the multi-word comparison can be perhaps simplified. */
773 if (code
== PLUS_EXPR
774 && (prec
<= BITS_PER_WORD
775 ? (CONST_SCALAR_INT_P (op0
) && REG_P (op1
))
776 : CONST_SCALAR_INT_P (op1
)))
778 do_compare_rtx_and_jump (res
, tem
, code
== PLUS_EXPR
? GEU
: LEU
,
779 true, mode
, NULL_RTX
, NULL
, done_label
,
780 profile_probability::very_likely ());
785 if (!uns0_p
&& uns1_p
&& !unsr_p
)
787 /* Compute the operation. On RTL level, the addition is always
789 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
790 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
791 rtx tem
= expand_binop (mode
, add_optab
,
792 code
== PLUS_EXPR
? res
: op0
, sgn
,
793 NULL_RTX
, false, OPTAB_LIB_WIDEN
);
794 do_compare_rtx_and_jump (tem
, op1
, GEU
, true, mode
, NULL_RTX
, NULL
,
795 done_label
, profile_probability::very_likely ());
800 if (code
== PLUS_EXPR
&& !uns0_p
&& uns1_p
&& unsr_p
)
802 op1
= expand_binop (mode
, add_optab
, op1
, sgn
, NULL_RTX
, false,
804 /* As we've changed op1, we have to avoid using the value range
805 for the original argument. */
806 arg1
= error_mark_node
;
812 if (code
== MINUS_EXPR
&& uns0_p
&& !uns1_p
&& unsr_p
)
814 op0
= expand_binop (mode
, add_optab
, op0
, sgn
, NULL_RTX
, false,
816 /* As we've changed op0, we have to avoid using the value range
817 for the original argument. */
818 arg0
= error_mark_node
;
824 if (code
== MINUS_EXPR
&& !uns0_p
&& uns1_p
&& unsr_p
)
826 /* Compute the operation. On RTL level, the addition is always
828 res
= expand_binop (mode
, sub_optab
, op0
, op1
, NULL_RTX
, false,
830 int pos_neg
= get_range_pos_neg (arg0
);
832 /* If ARG0 is known to be always negative, this is always overflow. */
833 emit_jump (do_error
);
834 else if (pos_neg
== 3)
835 /* If ARG0 is not known to be always positive, check at runtime. */
836 do_compare_rtx_and_jump (op0
, const0_rtx
, LT
, false, mode
, NULL_RTX
,
837 NULL
, do_error
, profile_probability::very_unlikely ());
838 do_compare_rtx_and_jump (op1
, op0
, LEU
, true, mode
, NULL_RTX
, NULL
,
839 done_label
, profile_probability::very_likely ());
844 if (code
== MINUS_EXPR
&& uns0_p
&& !uns1_p
&& !unsr_p
)
846 /* Compute the operation. On RTL level, the addition is always
848 res
= expand_binop (mode
, sub_optab
, op0
, op1
, NULL_RTX
, false,
850 rtx tem
= expand_binop (mode
, add_optab
, op1
, sgn
, NULL_RTX
, false,
852 do_compare_rtx_and_jump (op0
, tem
, LTU
, true, mode
, NULL_RTX
, NULL
,
853 done_label
, profile_probability::very_likely ());
858 if (code
== PLUS_EXPR
&& uns0_p
&& uns1_p
&& !unsr_p
)
860 /* Compute the operation. On RTL level, the addition is always
862 res
= expand_binop (mode
, add_optab
, op0
, op1
, NULL_RTX
, false,
864 do_compare_rtx_and_jump (res
, const0_rtx
, LT
, false, mode
, NULL_RTX
,
865 NULL
, do_error
, profile_probability::very_unlikely ());
867 /* The operation is commutative, so we can pick operand to compare
868 against. For prec <= BITS_PER_WORD, I think preferring REG operand
869 is better over CONST_INT, because the CONST_INT might enlarge the
870 instruction or CSE would need to figure out we'd already loaded it
871 into a register before. For prec > BITS_PER_WORD, I think CONST_INT
872 might be more beneficial, as then the multi-word comparison can be
873 perhaps simplified. */
874 if (prec
<= BITS_PER_WORD
875 ? (CONST_SCALAR_INT_P (op1
) && REG_P (op0
))
876 : CONST_SCALAR_INT_P (op0
))
878 do_compare_rtx_and_jump (res
, tem
, GEU
, true, mode
, NULL_RTX
, NULL
,
879 done_label
, profile_probability::very_likely ());
884 if (!uns0_p
&& !uns1_p
&& unsr_p
)
886 /* Compute the operation. On RTL level, the addition is always
888 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
889 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
890 int pos_neg
= get_range_pos_neg (arg1
);
891 if (code
== PLUS_EXPR
)
893 int pos_neg0
= get_range_pos_neg (arg0
);
894 if (pos_neg0
!= 3 && pos_neg
== 3)
896 std::swap (op0
, op1
);
903 tem
= expand_binop (mode
, ((pos_neg
== 1) ^ (code
== MINUS_EXPR
))
904 ? and_optab
: ior_optab
,
905 op0
, res
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
906 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL
,
907 NULL
, done_label
, profile_probability::very_likely ());
911 rtx_code_label
*do_ior_label
= gen_label_rtx ();
912 do_compare_rtx_and_jump (op1
, const0_rtx
,
913 code
== MINUS_EXPR
? GE
: LT
, false, mode
,
914 NULL_RTX
, NULL
, do_ior_label
,
915 profile_probability::even ());
916 tem
= expand_binop (mode
, and_optab
, op0
, res
, NULL_RTX
, false,
918 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
919 NULL
, done_label
, profile_probability::very_likely ());
920 emit_jump (do_error
);
921 emit_label (do_ior_label
);
922 tem
= expand_binop (mode
, ior_optab
, op0
, res
, NULL_RTX
, false,
924 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
925 NULL
, done_label
, profile_probability::very_likely ());
931 if (code
== MINUS_EXPR
&& uns0_p
&& uns1_p
&& !unsr_p
)
933 /* Compute the operation. On RTL level, the addition is always
935 res
= expand_binop (mode
, sub_optab
, op0
, op1
, NULL_RTX
, false,
937 rtx_code_label
*op0_geu_op1
= gen_label_rtx ();
938 do_compare_rtx_and_jump (op0
, op1
, GEU
, true, mode
, NULL_RTX
, NULL
,
939 op0_geu_op1
, profile_probability::even ());
940 do_compare_rtx_and_jump (res
, const0_rtx
, LT
, false, mode
, NULL_RTX
,
941 NULL
, done_label
, profile_probability::very_likely ());
942 emit_jump (do_error
);
943 emit_label (op0_geu_op1
);
944 do_compare_rtx_and_jump (res
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
945 NULL
, done_label
, profile_probability::very_likely ());
949 gcc_assert (!uns0_p
&& !uns1_p
&& !unsr_p
);
954 insn_code icode
= optab_handler (code
== PLUS_EXPR
? addv4_optab
955 : subv4_optab
, mode
);
956 if (icode
!= CODE_FOR_nothing
)
958 struct expand_operand ops
[4];
959 rtx_insn
*last
= get_last_insn ();
961 res
= gen_reg_rtx (mode
);
962 create_output_operand (&ops
[0], res
, mode
);
963 create_input_operand (&ops
[1], op0
, mode
);
964 create_input_operand (&ops
[2], op1
, mode
);
965 create_fixed_operand (&ops
[3], do_error
);
966 if (maybe_expand_insn (icode
, 4, ops
))
968 last
= get_last_insn ();
969 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
971 && any_condjump_p (last
)
972 && !find_reg_note (last
, REG_BR_PROB
, 0))
973 add_reg_br_prob_note (last
,
974 profile_probability::very_unlikely ());
975 emit_jump (done_label
);
979 delete_insns_since (last
);
982 /* Compute the operation. On RTL level, the addition is always
984 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
985 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
987 /* If we can prove that one of the arguments (for MINUS_EXPR only
988 the second operand, as subtraction is not commutative) is always
989 non-negative or always negative, we can do just one comparison
990 and conditional jump. */
991 int pos_neg
= get_range_pos_neg (arg1
);
992 if (code
== PLUS_EXPR
)
994 int pos_neg0
= get_range_pos_neg (arg0
);
995 if (pos_neg0
!= 3 && pos_neg
== 3)
997 std::swap (op0
, op1
);
1002 /* Addition overflows if and only if the two operands have the same sign,
1003 and the result has the opposite sign. Subtraction overflows if and
1004 only if the two operands have opposite sign, and the subtrahend has
1005 the same sign as the result. Here 0 is counted as positive. */
1008 /* Compute op0 ^ op1 (operands have opposite sign). */
1009 rtx op_xor
= expand_binop (mode
, xor_optab
, op0
, op1
, NULL_RTX
, false,
1012 /* Compute res ^ op1 (result and 2nd operand have opposite sign). */
1013 rtx res_xor
= expand_binop (mode
, xor_optab
, res
, op1
, NULL_RTX
, false,
1017 if (code
== PLUS_EXPR
)
1019 /* Compute (res ^ op1) & ~(op0 ^ op1). */
1020 tem
= expand_unop (mode
, one_cmpl_optab
, op_xor
, NULL_RTX
, false);
1021 tem
= expand_binop (mode
, and_optab
, res_xor
, tem
, NULL_RTX
, false,
1026 /* Compute (op0 ^ op1) & ~(res ^ op1). */
1027 tem
= expand_unop (mode
, one_cmpl_optab
, res_xor
, NULL_RTX
, false);
1028 tem
= expand_binop (mode
, and_optab
, op_xor
, tem
, NULL_RTX
, false,
1032 /* No overflow if the result has bit sign cleared. */
1033 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1034 NULL
, done_label
, profile_probability::very_likely ());
1037 /* Compare the result of the operation with the first operand.
1038 No overflow for addition if second operand is positive and result
1039 is larger or second operand is negative and result is smaller.
1040 Likewise for subtraction with sign of second operand flipped. */
1042 do_compare_rtx_and_jump (res
, op0
,
1043 (pos_neg
== 1) ^ (code
== MINUS_EXPR
) ? GE
: LE
,
1044 false, mode
, NULL_RTX
, NULL
, done_label
,
1045 profile_probability::very_likely ());
1049 emit_label (do_error
);
1052 /* Expand the ubsan builtin call. */
1054 fn
= ubsan_build_overflow_builtin (code
, loc
, TREE_TYPE (arg0
),
1058 do_pending_stack_adjust ();
1061 expand_arith_set_overflow (lhs
, target
);
1064 emit_label (done_label
);
1069 expand_ubsan_result_store (target
, res
);
1073 res
= expand_binop (mode
, add_optab
, res
, sgn
, NULL_RTX
, false,
1076 expand_arith_overflow_result_store (lhs
, target
, mode
, res
);
1081 /* Add negate overflow checking to the statement STMT. */
1084 expand_neg_overflow (location_t loc
, tree lhs
, tree arg1
, bool is_ubsan
,
1089 rtx_code_label
*done_label
, *do_error
;
1090 rtx target
= NULL_RTX
;
1092 done_label
= gen_label_rtx ();
1093 do_error
= gen_label_rtx ();
1095 do_pending_stack_adjust ();
1096 op1
= expand_normal (arg1
);
1098 machine_mode mode
= TYPE_MODE (TREE_TYPE (arg1
));
1101 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1103 write_complex_part (target
, const0_rtx
, true);
1106 enum insn_code icode
= optab_handler (negv3_optab
, mode
);
1107 if (icode
!= CODE_FOR_nothing
)
1109 struct expand_operand ops
[3];
1110 rtx_insn
*last
= get_last_insn ();
1112 res
= gen_reg_rtx (mode
);
1113 create_output_operand (&ops
[0], res
, mode
);
1114 create_input_operand (&ops
[1], op1
, mode
);
1115 create_fixed_operand (&ops
[2], do_error
);
1116 if (maybe_expand_insn (icode
, 3, ops
))
1118 last
= get_last_insn ();
1119 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
1121 && any_condjump_p (last
)
1122 && !find_reg_note (last
, REG_BR_PROB
, 0))
1123 add_reg_br_prob_note (last
,
1124 profile_probability::very_unlikely ());
1125 emit_jump (done_label
);
1129 delete_insns_since (last
);
1130 icode
= CODE_FOR_nothing
;
1134 if (icode
== CODE_FOR_nothing
)
1136 /* Compute the operation. On RTL level, the addition is always
1138 res
= expand_unop (mode
, neg_optab
, op1
, NULL_RTX
, false);
1140 /* Compare the operand with the most negative value. */
1141 rtx minv
= expand_normal (TYPE_MIN_VALUE (TREE_TYPE (arg1
)));
1142 do_compare_rtx_and_jump (op1
, minv
, NE
, true, mode
, NULL_RTX
, NULL
,
1143 done_label
, profile_probability::very_likely ());
1146 emit_label (do_error
);
1149 /* Expand the ubsan builtin call. */
1151 fn
= ubsan_build_overflow_builtin (NEGATE_EXPR
, loc
, TREE_TYPE (arg1
),
1152 arg1
, NULL_TREE
, datap
);
1155 do_pending_stack_adjust ();
1158 expand_arith_set_overflow (lhs
, target
);
1161 emit_label (done_label
);
1166 expand_ubsan_result_store (target
, res
);
1168 expand_arith_overflow_result_store (lhs
, target
, mode
, res
);
1172 /* Add mul overflow checking to the statement STMT. */
1175 expand_mul_overflow (location_t loc
, tree lhs
, tree arg0
, tree arg1
,
1176 bool unsr_p
, bool uns0_p
, bool uns1_p
, bool is_ubsan
,
1181 rtx_code_label
*done_label
, *do_error
;
1182 rtx target
= NULL_RTX
;
1184 enum insn_code icode
;
1186 done_label
= gen_label_rtx ();
1187 do_error
= gen_label_rtx ();
1189 do_pending_stack_adjust ();
1190 op0
= expand_normal (arg0
);
1191 op1
= expand_normal (arg1
);
1193 machine_mode mode
= TYPE_MODE (TREE_TYPE (arg0
));
1197 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1199 write_complex_part (target
, const0_rtx
, true);
1203 gcc_assert (!unsr_p
&& !uns0_p
&& !uns1_p
);
1205 /* We assume both operands and result have the same precision
1206 here (GET_MODE_BITSIZE (mode)), S stands for signed type
1207 with that precision, U for unsigned type with that precision,
1208 sgn for unsigned most significant bit in that precision.
1209 s1 is signed first operand, u1 is unsigned first operand,
1210 s2 is signed second operand, u2 is unsigned second operand,
1211 sr is signed result, ur is unsigned result and the following
1212 rules say how to compute result (which is always result of
1213 the operands as if both were unsigned, cast to the right
1214 signedness) and how to compute whether operation overflowed.
1215 main_ovf (false) stands for jump on signed multiplication
1216 overflow or the main algorithm with uns == false.
1217 main_ovf (true) stands for jump on unsigned multiplication
1218 overflow or the main algorithm with uns == true.
1221 res = (S) ((U) s1 * (U) s2)
1222 ovf = main_ovf (false)
1225 ovf = main_ovf (true)
1228 ovf = (s1 < 0 && u2) || main_ovf (true)
1231 ovf = res < 0 || main_ovf (true)
1233 res = (S) ((U) s1 * u2)
1234 ovf = (S) u2 >= 0 ? main_ovf (false)
1235 : (s1 != 0 && (s1 != -1 || u2 != (U) res))
1237 t1 = (s1 & s2) < 0 ? (-(U) s1) : ((U) s1)
1238 t2 = (s1 & s2) < 0 ? (-(U) s2) : ((U) s2)
1240 ovf = (s1 ^ s2) < 0 ? (s1 && s2) : main_ovf (true) */
1242 if (uns0_p
&& !uns1_p
)
1244 /* Multiplication is commutative, if operand signedness differs,
1245 canonicalize to the first operand being signed and second
1246 unsigned to simplify following code. */
1247 std::swap (op0
, op1
);
1248 std::swap (arg0
, arg1
);
1253 int pos_neg0
= get_range_pos_neg (arg0
);
1254 int pos_neg1
= get_range_pos_neg (arg1
);
1257 if (!uns0_p
&& uns1_p
&& unsr_p
)
1262 /* If s1 is non-negative, just perform normal u1 * u2 -> ur. */
1265 /* If s1 is negative, avoid the main code, just multiply and
1266 signal overflow if op1 is not 0. */
1267 struct separate_ops ops
;
1268 ops
.code
= MULT_EXPR
;
1269 ops
.type
= TREE_TYPE (arg1
);
1270 ops
.op0
= make_tree (ops
.type
, op0
);
1271 ops
.op1
= make_tree (ops
.type
, op1
);
1272 ops
.op2
= NULL_TREE
;
1274 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1275 do_compare_rtx_and_jump (op1
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1276 NULL
, done_label
, profile_probability::very_likely ());
1277 goto do_error_label
;
1279 rtx_code_label
*do_main_label
;
1280 do_main_label
= gen_label_rtx ();
1281 do_compare_rtx_and_jump (op0
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1282 NULL
, do_main_label
, profile_probability::very_likely ());
1283 do_compare_rtx_and_jump (op1
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1284 NULL
, do_main_label
, profile_probability::very_likely ());
1285 expand_arith_set_overflow (lhs
, target
);
1286 emit_label (do_main_label
);
1294 if (uns0_p
&& uns1_p
&& !unsr_p
)
1297 /* Rest of handling of this case after res is computed. */
1302 if (!uns0_p
&& uns1_p
&& !unsr_p
)
1309 /* If (S) u2 is negative (i.e. u2 is larger than maximum of S,
1310 avoid the main code, just multiply and signal overflow
1311 unless 0 * u2 or -1 * ((U) Smin). */
1312 struct separate_ops ops
;
1313 ops
.code
= MULT_EXPR
;
1314 ops
.type
= TREE_TYPE (arg1
);
1315 ops
.op0
= make_tree (ops
.type
, op0
);
1316 ops
.op1
= make_tree (ops
.type
, op1
);
1317 ops
.op2
= NULL_TREE
;
1319 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1320 do_compare_rtx_and_jump (op0
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1321 NULL
, done_label
, profile_probability::very_likely ());
1322 do_compare_rtx_and_jump (op0
, constm1_rtx
, NE
, true, mode
, NULL_RTX
,
1323 NULL
, do_error
, profile_probability::very_unlikely ());
1325 prec
= GET_MODE_PRECISION (mode
);
1327 sgn
= immed_wide_int_const (wi::min_value (prec
, SIGNED
), mode
);
1328 do_compare_rtx_and_jump (op1
, sgn
, EQ
, true, mode
, NULL_RTX
,
1329 NULL
, done_label
, profile_probability::very_likely ());
1330 goto do_error_label
;
1332 /* Rest of handling of this case after res is computed. */
1340 if (!uns0_p
&& !uns1_p
&& unsr_p
)
1343 switch (pos_neg0
| pos_neg1
)
1345 case 1: /* Both operands known to be non-negative. */
1347 case 2: /* Both operands known to be negative. */
1348 op0
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, false);
1349 op1
= expand_unop (mode
, neg_optab
, op1
, NULL_RTX
, false);
1350 /* Avoid looking at arg0/arg1 ranges, as we've changed
1352 arg0
= error_mark_node
;
1353 arg1
= error_mark_node
;
1356 if ((pos_neg0
^ pos_neg1
) == 3)
1358 /* If one operand is known to be negative and the other
1359 non-negative, this overflows always, unless the non-negative
1360 one is 0. Just do normal multiply and set overflow
1361 unless one of the operands is 0. */
1362 struct separate_ops ops
;
1363 ops
.code
= MULT_EXPR
;
1365 = build_nonstandard_integer_type (GET_MODE_PRECISION (mode
),
1367 ops
.op0
= make_tree (ops
.type
, op0
);
1368 ops
.op1
= make_tree (ops
.type
, op1
);
1369 ops
.op2
= NULL_TREE
;
1371 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1372 tem
= expand_binop (mode
, and_optab
, op0
, op1
, NULL_RTX
, false,
1374 do_compare_rtx_and_jump (tem
, const0_rtx
, EQ
, true, mode
,
1375 NULL_RTX
, NULL
, done_label
,
1376 profile_probability::very_likely ());
1377 goto do_error_label
;
1379 /* The general case, do all the needed comparisons at runtime. */
1380 rtx_code_label
*do_main_label
, *after_negate_label
;
1382 rop0
= gen_reg_rtx (mode
);
1383 rop1
= gen_reg_rtx (mode
);
1384 emit_move_insn (rop0
, op0
);
1385 emit_move_insn (rop1
, op1
);
1388 do_main_label
= gen_label_rtx ();
1389 after_negate_label
= gen_label_rtx ();
1390 tem
= expand_binop (mode
, and_optab
, op0
, op1
, NULL_RTX
, false,
1392 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1393 NULL
, after_negate_label
, profile_probability::very_likely ());
1394 /* Both arguments negative here, negate them and continue with
1395 normal unsigned overflow checking multiplication. */
1396 emit_move_insn (op0
, expand_unop (mode
, neg_optab
, op0
,
1398 emit_move_insn (op1
, expand_unop (mode
, neg_optab
, op1
,
1400 /* Avoid looking at arg0/arg1 ranges, as we might have changed
1402 arg0
= error_mark_node
;
1403 arg1
= error_mark_node
;
1404 emit_jump (do_main_label
);
1405 emit_label (after_negate_label
);
1406 tem2
= expand_binop (mode
, xor_optab
, op0
, op1
, NULL_RTX
, false,
1408 do_compare_rtx_and_jump (tem2
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1409 NULL
, do_main_label
, profile_probability::very_likely ());
1410 /* One argument is negative here, the other positive. This
1411 overflows always, unless one of the arguments is 0. But
1412 if e.g. s2 is 0, (U) s1 * 0 doesn't overflow, whatever s1
1413 is, thus we can keep do_main code oring in overflow as is. */
1414 do_compare_rtx_and_jump (tem
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1415 NULL
, do_main_label
, profile_probability::very_likely ());
1416 expand_arith_set_overflow (lhs
, target
);
1417 emit_label (do_main_label
);
1425 type
= build_nonstandard_integer_type (GET_MODE_PRECISION (mode
), uns
);
1426 sign
= uns
? UNSIGNED
: SIGNED
;
1427 icode
= optab_handler (uns
? umulv4_optab
: mulv4_optab
, mode
);
1428 if (icode
!= CODE_FOR_nothing
)
1430 struct expand_operand ops
[4];
1431 rtx_insn
*last
= get_last_insn ();
1433 res
= gen_reg_rtx (mode
);
1434 create_output_operand (&ops
[0], res
, mode
);
1435 create_input_operand (&ops
[1], op0
, mode
);
1436 create_input_operand (&ops
[2], op1
, mode
);
1437 create_fixed_operand (&ops
[3], do_error
);
1438 if (maybe_expand_insn (icode
, 4, ops
))
1440 last
= get_last_insn ();
1441 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
1443 && any_condjump_p (last
)
1444 && !find_reg_note (last
, REG_BR_PROB
, 0))
1445 add_reg_br_prob_note (last
,
1446 profile_probability::very_unlikely ());
1447 emit_jump (done_label
);
1451 delete_insns_since (last
);
1452 icode
= CODE_FOR_nothing
;
1456 if (icode
== CODE_FOR_nothing
)
1458 struct separate_ops ops
;
1459 int prec
= GET_MODE_PRECISION (mode
);
1460 machine_mode hmode
= mode_for_size (prec
/ 2, MODE_INT
, 1);
1461 ops
.op0
= make_tree (type
, op0
);
1462 ops
.op1
= make_tree (type
, op1
);
1463 ops
.op2
= NULL_TREE
;
1465 if (GET_MODE_2XWIDER_MODE (mode
) != VOIDmode
1466 && targetm
.scalar_mode_supported_p (GET_MODE_2XWIDER_MODE (mode
)))
1468 machine_mode wmode
= GET_MODE_2XWIDER_MODE (mode
);
1469 ops
.code
= WIDEN_MULT_EXPR
;
1471 = build_nonstandard_integer_type (GET_MODE_PRECISION (wmode
), uns
);
1473 res
= expand_expr_real_2 (&ops
, NULL_RTX
, wmode
, EXPAND_NORMAL
);
1474 rtx hipart
= expand_shift (RSHIFT_EXPR
, wmode
, res
, prec
,
1476 hipart
= convert_modes (mode
, wmode
, hipart
, uns
);
1477 res
= convert_modes (mode
, wmode
, res
, uns
);
1479 /* For the unsigned multiplication, there was overflow if
1480 HIPART is non-zero. */
1481 do_compare_rtx_and_jump (hipart
, const0_rtx
, EQ
, true, mode
,
1482 NULL_RTX
, NULL
, done_label
,
1483 profile_probability::very_likely ());
1486 rtx signbit
= expand_shift (RSHIFT_EXPR
, mode
, res
, prec
- 1,
1488 /* RES is low half of the double width result, HIPART
1489 the high half. There was overflow if
1490 HIPART is different from RES < 0 ? -1 : 0. */
1491 do_compare_rtx_and_jump (signbit
, hipart
, EQ
, true, mode
,
1492 NULL_RTX
, NULL
, done_label
,
1493 profile_probability::very_likely ());
1496 else if (hmode
!= BLKmode
&& 2 * GET_MODE_PRECISION (hmode
) == prec
)
1498 rtx_code_label
*large_op0
= gen_label_rtx ();
1499 rtx_code_label
*small_op0_large_op1
= gen_label_rtx ();
1500 rtx_code_label
*one_small_one_large
= gen_label_rtx ();
1501 rtx_code_label
*both_ops_large
= gen_label_rtx ();
1502 rtx_code_label
*after_hipart_neg
= uns
? NULL
: gen_label_rtx ();
1503 rtx_code_label
*after_lopart_neg
= uns
? NULL
: gen_label_rtx ();
1504 rtx_code_label
*do_overflow
= gen_label_rtx ();
1505 rtx_code_label
*hipart_different
= uns
? NULL
: gen_label_rtx ();
1507 unsigned int hprec
= GET_MODE_PRECISION (hmode
);
1508 rtx hipart0
= expand_shift (RSHIFT_EXPR
, mode
, op0
, hprec
,
1510 hipart0
= convert_modes (hmode
, mode
, hipart0
, uns
);
1511 rtx lopart0
= convert_modes (hmode
, mode
, op0
, uns
);
1512 rtx signbit0
= const0_rtx
;
1514 signbit0
= expand_shift (RSHIFT_EXPR
, hmode
, lopart0
, hprec
- 1,
1516 rtx hipart1
= expand_shift (RSHIFT_EXPR
, mode
, op1
, hprec
,
1518 hipart1
= convert_modes (hmode
, mode
, hipart1
, uns
);
1519 rtx lopart1
= convert_modes (hmode
, mode
, op1
, uns
);
1520 rtx signbit1
= const0_rtx
;
1522 signbit1
= expand_shift (RSHIFT_EXPR
, hmode
, lopart1
, hprec
- 1,
1525 res
= gen_reg_rtx (mode
);
1527 /* True if op0 resp. op1 are known to be in the range of
1529 bool op0_small_p
= false;
1530 bool op1_small_p
= false;
1531 /* True if op0 resp. op1 are known to have all zeros or all ones
1532 in the upper half of bits, but are not known to be
1534 bool op0_medium_p
= false;
1535 bool op1_medium_p
= false;
1536 /* -1 if op{0,1} is known to be negative, 0 if it is known to be
1537 nonnegative, 1 if unknown. */
1543 else if (pos_neg0
== 2)
1547 else if (pos_neg1
== 2)
1550 unsigned int mprec0
= prec
;
1551 if (arg0
!= error_mark_node
)
1552 mprec0
= get_min_precision (arg0
, sign
);
1553 if (mprec0
<= hprec
)
1555 else if (!uns
&& mprec0
<= hprec
+ 1)
1556 op0_medium_p
= true;
1557 unsigned int mprec1
= prec
;
1558 if (arg1
!= error_mark_node
)
1559 mprec1
= get_min_precision (arg1
, sign
);
1560 if (mprec1
<= hprec
)
1562 else if (!uns
&& mprec1
<= hprec
+ 1)
1563 op1_medium_p
= true;
1565 int smaller_sign
= 1;
1566 int larger_sign
= 1;
1569 smaller_sign
= op0_sign
;
1570 larger_sign
= op1_sign
;
1572 else if (op1_small_p
)
1574 smaller_sign
= op1_sign
;
1575 larger_sign
= op0_sign
;
1577 else if (op0_sign
== op1_sign
)
1579 smaller_sign
= op0_sign
;
1580 larger_sign
= op0_sign
;
1584 do_compare_rtx_and_jump (signbit0
, hipart0
, NE
, true, hmode
,
1585 NULL_RTX
, NULL
, large_op0
,
1586 profile_probability::unlikely ());
1589 do_compare_rtx_and_jump (signbit1
, hipart1
, NE
, true, hmode
,
1590 NULL_RTX
, NULL
, small_op0_large_op1
,
1591 profile_probability::unlikely ());
1593 /* If both op0 and op1 are sign (!uns) or zero (uns) extended from
1594 hmode to mode, the multiplication will never overflow. We can
1595 do just one hmode x hmode => mode widening multiplication. */
1596 rtx lopart0s
= lopart0
, lopart1s
= lopart1
;
1597 if (GET_CODE (lopart0
) == SUBREG
)
1599 lopart0s
= shallow_copy_rtx (lopart0
);
1600 SUBREG_PROMOTED_VAR_P (lopart0s
) = 1;
1601 SUBREG_PROMOTED_SET (lopart0s
, uns
? SRP_UNSIGNED
: SRP_SIGNED
);
1603 if (GET_CODE (lopart1
) == SUBREG
)
1605 lopart1s
= shallow_copy_rtx (lopart1
);
1606 SUBREG_PROMOTED_VAR_P (lopart1s
) = 1;
1607 SUBREG_PROMOTED_SET (lopart1s
, uns
? SRP_UNSIGNED
: SRP_SIGNED
);
1609 tree halfstype
= build_nonstandard_integer_type (hprec
, uns
);
1610 ops
.op0
= make_tree (halfstype
, lopart0s
);
1611 ops
.op1
= make_tree (halfstype
, lopart1s
);
1612 ops
.code
= WIDEN_MULT_EXPR
;
1615 = expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1616 emit_move_insn (res
, thisres
);
1617 emit_jump (done_label
);
1619 emit_label (small_op0_large_op1
);
1621 /* If op0 is sign (!uns) or zero (uns) extended from hmode to mode,
1622 but op1 is not, just swap the arguments and handle it as op1
1623 sign/zero extended, op0 not. */
1624 rtx larger
= gen_reg_rtx (mode
);
1625 rtx hipart
= gen_reg_rtx (hmode
);
1626 rtx lopart
= gen_reg_rtx (hmode
);
1627 emit_move_insn (larger
, op1
);
1628 emit_move_insn (hipart
, hipart1
);
1629 emit_move_insn (lopart
, lopart0
);
1630 emit_jump (one_small_one_large
);
1632 emit_label (large_op0
);
1635 do_compare_rtx_and_jump (signbit1
, hipart1
, NE
, true, hmode
,
1636 NULL_RTX
, NULL
, both_ops_large
,
1637 profile_probability::unlikely ());
1639 /* If op1 is sign (!uns) or zero (uns) extended from hmode to mode,
1640 but op0 is not, prepare larger, hipart and lopart pseudos and
1641 handle it together with small_op0_large_op1. */
1642 emit_move_insn (larger
, op0
);
1643 emit_move_insn (hipart
, hipart0
);
1644 emit_move_insn (lopart
, lopart1
);
1646 emit_label (one_small_one_large
);
1648 /* lopart is the low part of the operand that is sign extended
1649 to mode, larger is the other operand, hipart is the
1650 high part of larger and lopart0 and lopart1 are the low parts
1652 We perform lopart0 * lopart1 and lopart * hipart widening
1654 tree halfutype
= build_nonstandard_integer_type (hprec
, 1);
1655 ops
.op0
= make_tree (halfutype
, lopart0
);
1656 ops
.op1
= make_tree (halfutype
, lopart1
);
1658 = expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1660 ops
.op0
= make_tree (halfutype
, lopart
);
1661 ops
.op1
= make_tree (halfutype
, hipart
);
1662 rtx loxhi
= gen_reg_rtx (mode
);
1663 rtx tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1664 emit_move_insn (loxhi
, tem
);
1668 /* if (hipart < 0) loxhi -= lopart << (bitsize / 2); */
1669 if (larger_sign
== 0)
1670 emit_jump (after_hipart_neg
);
1671 else if (larger_sign
!= -1)
1672 do_compare_rtx_and_jump (hipart
, const0_rtx
, GE
, false, hmode
,
1673 NULL_RTX
, NULL
, after_hipart_neg
,
1674 profile_probability::even ());
1676 tem
= convert_modes (mode
, hmode
, lopart
, 1);
1677 tem
= expand_shift (LSHIFT_EXPR
, mode
, tem
, hprec
, NULL_RTX
, 1);
1678 tem
= expand_simple_binop (mode
, MINUS
, loxhi
, tem
, NULL_RTX
,
1680 emit_move_insn (loxhi
, tem
);
1682 emit_label (after_hipart_neg
);
1684 /* if (lopart < 0) loxhi -= larger; */
1685 if (smaller_sign
== 0)
1686 emit_jump (after_lopart_neg
);
1687 else if (smaller_sign
!= -1)
1688 do_compare_rtx_and_jump (lopart
, const0_rtx
, GE
, false, hmode
,
1689 NULL_RTX
, NULL
, after_lopart_neg
,
1690 profile_probability::even ());
1692 tem
= expand_simple_binop (mode
, MINUS
, loxhi
, larger
, NULL_RTX
,
1694 emit_move_insn (loxhi
, tem
);
1696 emit_label (after_lopart_neg
);
1699 /* loxhi += (uns) lo0xlo1 >> (bitsize / 2); */
1700 tem
= expand_shift (RSHIFT_EXPR
, mode
, lo0xlo1
, hprec
, NULL_RTX
, 1);
1701 tem
= expand_simple_binop (mode
, PLUS
, loxhi
, tem
, NULL_RTX
,
1703 emit_move_insn (loxhi
, tem
);
1705 /* if (loxhi >> (bitsize / 2)
1706 == (hmode) loxhi >> (bitsize / 2 - 1)) (if !uns)
1707 if (loxhi >> (bitsize / 2) == 0 (if uns). */
1708 rtx hipartloxhi
= expand_shift (RSHIFT_EXPR
, mode
, loxhi
, hprec
,
1710 hipartloxhi
= convert_modes (hmode
, mode
, hipartloxhi
, 0);
1711 rtx signbitloxhi
= const0_rtx
;
1713 signbitloxhi
= expand_shift (RSHIFT_EXPR
, hmode
,
1714 convert_modes (hmode
, mode
,
1716 hprec
- 1, NULL_RTX
, 0);
1718 do_compare_rtx_and_jump (signbitloxhi
, hipartloxhi
, NE
, true, hmode
,
1719 NULL_RTX
, NULL
, do_overflow
,
1720 profile_probability::very_unlikely ());
1722 /* res = (loxhi << (bitsize / 2)) | (hmode) lo0xlo1; */
1723 rtx loxhishifted
= expand_shift (LSHIFT_EXPR
, mode
, loxhi
, hprec
,
1725 tem
= convert_modes (mode
, hmode
,
1726 convert_modes (hmode
, mode
, lo0xlo1
, 1), 1);
1728 tem
= expand_simple_binop (mode
, IOR
, loxhishifted
, tem
, res
,
1731 emit_move_insn (res
, tem
);
1732 emit_jump (done_label
);
1734 emit_label (both_ops_large
);
1736 /* If both operands are large (not sign (!uns) or zero (uns)
1737 extended from hmode), then perform the full multiplication
1738 which will be the result of the operation.
1739 The only cases which don't overflow are for signed multiplication
1740 some cases where both hipart0 and highpart1 are 0 or -1.
1741 For unsigned multiplication when high parts are both non-zero
1742 this overflows always. */
1743 ops
.code
= MULT_EXPR
;
1744 ops
.op0
= make_tree (type
, op0
);
1745 ops
.op1
= make_tree (type
, op1
);
1746 tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1747 emit_move_insn (res
, tem
);
1753 tem
= expand_simple_binop (hmode
, PLUS
, hipart0
, const1_rtx
,
1754 NULL_RTX
, 1, OPTAB_DIRECT
);
1755 do_compare_rtx_and_jump (tem
, const1_rtx
, GTU
, true, hmode
,
1756 NULL_RTX
, NULL
, do_error
,
1757 profile_probability::very_unlikely ());
1762 tem
= expand_simple_binop (hmode
, PLUS
, hipart1
, const1_rtx
,
1763 NULL_RTX
, 1, OPTAB_DIRECT
);
1764 do_compare_rtx_and_jump (tem
, const1_rtx
, GTU
, true, hmode
,
1765 NULL_RTX
, NULL
, do_error
,
1766 profile_probability::very_unlikely ());
1769 /* At this point hipart{0,1} are both in [-1, 0]. If they are
1770 the same, overflow happened if res is negative, if they are
1771 different, overflow happened if res is positive. */
1772 if (op0_sign
!= 1 && op1_sign
!= 1 && op0_sign
!= op1_sign
)
1773 emit_jump (hipart_different
);
1774 else if (op0_sign
== 1 || op1_sign
== 1)
1775 do_compare_rtx_and_jump (hipart0
, hipart1
, NE
, true, hmode
,
1776 NULL_RTX
, NULL
, hipart_different
,
1777 profile_probability::even ());
1779 do_compare_rtx_and_jump (res
, const0_rtx
, LT
, false, mode
,
1780 NULL_RTX
, NULL
, do_error
,
1781 profile_probability::very_unlikely ());
1782 emit_jump (done_label
);
1784 emit_label (hipart_different
);
1786 do_compare_rtx_and_jump (res
, const0_rtx
, GE
, false, mode
,
1787 NULL_RTX
, NULL
, do_error
,
1788 profile_probability::very_unlikely ());
1789 emit_jump (done_label
);
1792 emit_label (do_overflow
);
1794 /* Overflow, do full multiplication and fallthru into do_error. */
1795 ops
.op0
= make_tree (type
, op0
);
1796 ops
.op1
= make_tree (type
, op1
);
1797 tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1798 emit_move_insn (res
, tem
);
1802 gcc_assert (!is_ubsan
);
1803 ops
.code
= MULT_EXPR
;
1805 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1806 emit_jump (done_label
);
1811 emit_label (do_error
);
1814 /* Expand the ubsan builtin call. */
1816 fn
= ubsan_build_overflow_builtin (MULT_EXPR
, loc
, TREE_TYPE (arg0
),
1820 do_pending_stack_adjust ();
1823 expand_arith_set_overflow (lhs
, target
);
1826 emit_label (done_label
);
1829 if (uns0_p
&& uns1_p
&& !unsr_p
)
1831 rtx_code_label
*all_done_label
= gen_label_rtx ();
1832 do_compare_rtx_and_jump (res
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1833 NULL
, all_done_label
, profile_probability::very_likely ());
1834 expand_arith_set_overflow (lhs
, target
);
1835 emit_label (all_done_label
);
1839 if (!uns0_p
&& uns1_p
&& !unsr_p
&& pos_neg1
== 3)
1841 rtx_code_label
*all_done_label
= gen_label_rtx ();
1842 rtx_code_label
*set_noovf
= gen_label_rtx ();
1843 do_compare_rtx_and_jump (op1
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1844 NULL
, all_done_label
, profile_probability::very_likely ());
1845 expand_arith_set_overflow (lhs
, target
);
1846 do_compare_rtx_and_jump (op0
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1847 NULL
, set_noovf
, profile_probability::very_likely ());
1848 do_compare_rtx_and_jump (op0
, constm1_rtx
, NE
, true, mode
, NULL_RTX
,
1849 NULL
, all_done_label
, profile_probability::very_unlikely ());
1850 do_compare_rtx_and_jump (op1
, res
, NE
, true, mode
, NULL_RTX
, NULL
,
1851 all_done_label
, profile_probability::very_unlikely ());
1852 emit_label (set_noovf
);
1853 write_complex_part (target
, const0_rtx
, true);
1854 emit_label (all_done_label
);
1860 expand_ubsan_result_store (target
, res
);
1862 expand_arith_overflow_result_store (lhs
, target
, mode
, res
);
1866 /* Expand UBSAN_CHECK_* internal function if it has vector operands. */
1869 expand_vector_ubsan_overflow (location_t loc
, enum tree_code code
, tree lhs
,
1870 tree arg0
, tree arg1
)
1872 int cnt
= TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0
));
1873 rtx_code_label
*loop_lab
= NULL
;
1874 rtx cntvar
= NULL_RTX
;
1875 tree cntv
= NULL_TREE
;
1876 tree eltype
= TREE_TYPE (TREE_TYPE (arg0
));
1877 tree sz
= TYPE_SIZE (eltype
);
1878 tree data
= NULL_TREE
;
1879 tree resv
= NULL_TREE
;
1880 rtx lhsr
= NULL_RTX
;
1881 rtx resvr
= NULL_RTX
;
1886 lhsr
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1887 if (!VECTOR_MODE_P (GET_MODE (lhsr
))
1888 || (op
= optab_for_tree_code (code
, TREE_TYPE (arg0
),
1889 optab_default
)) == unknown_optab
1890 || (optab_handler (op
, TYPE_MODE (TREE_TYPE (arg0
)))
1891 == CODE_FOR_nothing
))
1894 resv
= make_tree (TREE_TYPE (lhs
), lhsr
);
1897 resvr
= assign_temp (TREE_TYPE (lhs
), 1, 1);
1898 resv
= make_tree (TREE_TYPE (lhs
), resvr
);
1904 do_pending_stack_adjust ();
1905 loop_lab
= gen_label_rtx ();
1906 cntvar
= gen_reg_rtx (TYPE_MODE (sizetype
));
1907 cntv
= make_tree (sizetype
, cntvar
);
1908 emit_move_insn (cntvar
, const0_rtx
);
1909 emit_label (loop_lab
);
1911 if (TREE_CODE (arg0
) != VECTOR_CST
)
1913 rtx arg0r
= expand_normal (arg0
);
1914 arg0
= make_tree (TREE_TYPE (arg0
), arg0r
);
1916 if (TREE_CODE (arg1
) != VECTOR_CST
)
1918 rtx arg1r
= expand_normal (arg1
);
1919 arg1
= make_tree (TREE_TYPE (arg1
), arg1r
);
1921 for (int i
= 0; i
< (cnt
> 4 ? 1 : cnt
); i
++)
1923 tree op0
, op1
, res
= NULL_TREE
;
1926 tree atype
= build_array_type_nelts (eltype
, cnt
);
1927 op0
= uniform_vector_p (arg0
);
1928 if (op0
== NULL_TREE
)
1930 op0
= fold_build1_loc (loc
, VIEW_CONVERT_EXPR
, atype
, arg0
);
1931 op0
= build4_loc (loc
, ARRAY_REF
, eltype
, op0
, cntv
,
1932 NULL_TREE
, NULL_TREE
);
1934 op1
= uniform_vector_p (arg1
);
1935 if (op1
== NULL_TREE
)
1937 op1
= fold_build1_loc (loc
, VIEW_CONVERT_EXPR
, atype
, arg1
);
1938 op1
= build4_loc (loc
, ARRAY_REF
, eltype
, op1
, cntv
,
1939 NULL_TREE
, NULL_TREE
);
1943 res
= fold_build1_loc (loc
, VIEW_CONVERT_EXPR
, atype
, resv
);
1944 res
= build4_loc (loc
, ARRAY_REF
, eltype
, res
, cntv
,
1945 NULL_TREE
, NULL_TREE
);
1950 tree bitpos
= bitsize_int (tree_to_uhwi (sz
) * i
);
1951 op0
= fold_build3_loc (loc
, BIT_FIELD_REF
, eltype
, arg0
, sz
, bitpos
);
1952 op1
= fold_build3_loc (loc
, BIT_FIELD_REF
, eltype
, arg1
, sz
, bitpos
);
1954 res
= fold_build3_loc (loc
, BIT_FIELD_REF
, eltype
, resv
, sz
,
1960 expand_addsub_overflow (loc
, PLUS_EXPR
, res
, op0
, op1
,
1961 false, false, false, true, &data
);
1964 if (cnt
> 4 ? integer_zerop (arg0
) : integer_zerop (op0
))
1965 expand_neg_overflow (loc
, res
, op1
, true, &data
);
1967 expand_addsub_overflow (loc
, MINUS_EXPR
, res
, op0
, op1
,
1968 false, false, false, true, &data
);
1971 expand_mul_overflow (loc
, res
, op0
, op1
, false, false, false,
1980 struct separate_ops ops
;
1981 ops
.code
= PLUS_EXPR
;
1982 ops
.type
= TREE_TYPE (cntv
);
1984 ops
.op1
= build_int_cst (TREE_TYPE (cntv
), 1);
1985 ops
.op2
= NULL_TREE
;
1987 rtx ret
= expand_expr_real_2 (&ops
, cntvar
, TYPE_MODE (sizetype
),
1990 emit_move_insn (cntvar
, ret
);
1991 do_compare_rtx_and_jump (cntvar
, GEN_INT (cnt
), NE
, false,
1992 TYPE_MODE (sizetype
), NULL_RTX
, NULL
, loop_lab
,
1993 profile_probability::very_likely ());
1995 if (lhs
&& resv
== NULL_TREE
)
1997 struct separate_ops ops
;
1999 ops
.type
= TREE_TYPE (arg0
);
2002 ops
.op2
= NULL_TREE
;
2004 rtx ret
= expand_expr_real_2 (&ops
, lhsr
, TYPE_MODE (TREE_TYPE (arg0
)),
2007 emit_move_insn (lhsr
, ret
);
2010 emit_move_insn (lhsr
, resvr
);
2013 /* Expand UBSAN_CHECK_ADD call STMT. */
2016 expand_UBSAN_CHECK_ADD (internal_fn
, gcall
*stmt
)
2018 location_t loc
= gimple_location (stmt
);
2019 tree lhs
= gimple_call_lhs (stmt
);
2020 tree arg0
= gimple_call_arg (stmt
, 0);
2021 tree arg1
= gimple_call_arg (stmt
, 1);
2022 if (VECTOR_TYPE_P (TREE_TYPE (arg0
)))
2023 expand_vector_ubsan_overflow (loc
, PLUS_EXPR
, lhs
, arg0
, arg1
);
2025 expand_addsub_overflow (loc
, PLUS_EXPR
, lhs
, arg0
, arg1
,
2026 false, false, false, true, NULL
);
2029 /* Expand UBSAN_CHECK_SUB call STMT. */
2032 expand_UBSAN_CHECK_SUB (internal_fn
, gcall
*stmt
)
2034 location_t loc
= gimple_location (stmt
);
2035 tree lhs
= gimple_call_lhs (stmt
);
2036 tree arg0
= gimple_call_arg (stmt
, 0);
2037 tree arg1
= gimple_call_arg (stmt
, 1);
2038 if (VECTOR_TYPE_P (TREE_TYPE (arg0
)))
2039 expand_vector_ubsan_overflow (loc
, MINUS_EXPR
, lhs
, arg0
, arg1
);
2040 else if (integer_zerop (arg0
))
2041 expand_neg_overflow (loc
, lhs
, arg1
, true, NULL
);
2043 expand_addsub_overflow (loc
, MINUS_EXPR
, lhs
, arg0
, arg1
,
2044 false, false, false, true, NULL
);
2047 /* Expand UBSAN_CHECK_MUL call STMT. */
2050 expand_UBSAN_CHECK_MUL (internal_fn
, gcall
*stmt
)
2052 location_t loc
= gimple_location (stmt
);
2053 tree lhs
= gimple_call_lhs (stmt
);
2054 tree arg0
= gimple_call_arg (stmt
, 0);
2055 tree arg1
= gimple_call_arg (stmt
, 1);
2056 if (VECTOR_TYPE_P (TREE_TYPE (arg0
)))
2057 expand_vector_ubsan_overflow (loc
, MULT_EXPR
, lhs
, arg0
, arg1
);
2059 expand_mul_overflow (loc
, lhs
, arg0
, arg1
, false, false, false, true,
2063 /* Helper function for {ADD,SUB,MUL}_OVERFLOW call stmt expansion. */
2066 expand_arith_overflow (enum tree_code code
, gimple
*stmt
)
2068 tree lhs
= gimple_call_lhs (stmt
);
2069 if (lhs
== NULL_TREE
)
2071 tree arg0
= gimple_call_arg (stmt
, 0);
2072 tree arg1
= gimple_call_arg (stmt
, 1);
2073 tree type
= TREE_TYPE (TREE_TYPE (lhs
));
2074 int uns0_p
= TYPE_UNSIGNED (TREE_TYPE (arg0
));
2075 int uns1_p
= TYPE_UNSIGNED (TREE_TYPE (arg1
));
2076 int unsr_p
= TYPE_UNSIGNED (type
);
2077 int prec0
= TYPE_PRECISION (TREE_TYPE (arg0
));
2078 int prec1
= TYPE_PRECISION (TREE_TYPE (arg1
));
2079 int precres
= TYPE_PRECISION (type
);
2080 location_t loc
= gimple_location (stmt
);
2081 if (!uns0_p
&& get_range_pos_neg (arg0
) == 1)
2083 if (!uns1_p
&& get_range_pos_neg (arg1
) == 1)
2085 int pr
= get_min_precision (arg0
, uns0_p
? UNSIGNED
: SIGNED
);
2086 prec0
= MIN (prec0
, pr
);
2087 pr
= get_min_precision (arg1
, uns1_p
? UNSIGNED
: SIGNED
);
2088 prec1
= MIN (prec1
, pr
);
2090 /* If uns0_p && uns1_p, precop is minimum needed precision
2091 of unsigned type to hold the exact result, otherwise
2092 precop is minimum needed precision of signed type to
2093 hold the exact result. */
2095 if (code
== MULT_EXPR
)
2096 precop
= prec0
+ prec1
+ (uns0_p
!= uns1_p
);
2099 if (uns0_p
== uns1_p
)
2100 precop
= MAX (prec0
, prec1
) + 1;
2102 precop
= MAX (prec0
+ 1, prec1
) + 1;
2104 precop
= MAX (prec0
, prec1
+ 1) + 1;
2106 int orig_precres
= precres
;
2110 if ((uns0_p
&& uns1_p
)
2111 ? ((precop
+ !unsr_p
) <= precres
2112 /* u1 - u2 -> ur can overflow, no matter what precision
2114 && (code
!= MINUS_EXPR
|| !unsr_p
))
2115 : (!unsr_p
&& precop
<= precres
))
2117 /* The infinity precision result will always fit into result. */
2118 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2119 write_complex_part (target
, const0_rtx
, true);
2120 machine_mode mode
= TYPE_MODE (type
);
2121 struct separate_ops ops
;
2124 ops
.op0
= fold_convert_loc (loc
, type
, arg0
);
2125 ops
.op1
= fold_convert_loc (loc
, type
, arg1
);
2126 ops
.op2
= NULL_TREE
;
2128 rtx tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
2129 expand_arith_overflow_result_store (lhs
, target
, mode
, tem
);
2133 /* For operations with low precision, if target doesn't have them, start
2134 with precres widening right away, otherwise do it only if the most
2135 simple cases can't be used. */
2136 const int min_precision
= targetm
.min_arithmetic_precision ();
2137 if (orig_precres
== precres
&& precres
< min_precision
)
2139 else if ((uns0_p
&& uns1_p
&& unsr_p
&& prec0
<= precres
2140 && prec1
<= precres
)
2141 || ((!uns0_p
|| !uns1_p
) && !unsr_p
2142 && prec0
+ uns0_p
<= precres
2143 && prec1
+ uns1_p
<= precres
))
2145 arg0
= fold_convert_loc (loc
, type
, arg0
);
2146 arg1
= fold_convert_loc (loc
, type
, arg1
);
2150 if (integer_zerop (arg0
) && !unsr_p
)
2152 expand_neg_overflow (loc
, lhs
, arg1
, false, NULL
);
2157 expand_addsub_overflow (loc
, code
, lhs
, arg0
, arg1
, unsr_p
,
2158 unsr_p
, unsr_p
, false, NULL
);
2161 expand_mul_overflow (loc
, lhs
, arg0
, arg1
, unsr_p
,
2162 unsr_p
, unsr_p
, false, NULL
);
2169 /* For sub-word operations, retry with a wider type first. */
2170 if (orig_precres
== precres
&& precop
<= BITS_PER_WORD
)
2172 int p
= MAX (min_precision
, precop
);
2173 machine_mode m
= smallest_mode_for_size (p
, MODE_INT
);
2174 tree optype
= build_nonstandard_integer_type (GET_MODE_PRECISION (m
),
2177 p
= TYPE_PRECISION (optype
);
2181 unsr_p
= TYPE_UNSIGNED (optype
);
2187 if (prec0
<= precres
&& prec1
<= precres
)
2192 types
[0] = build_nonstandard_integer_type (precres
, 0);
2198 types
[1] = build_nonstandard_integer_type (precres
, 1);
2200 arg0
= fold_convert_loc (loc
, types
[uns0_p
], arg0
);
2201 arg1
= fold_convert_loc (loc
, types
[uns1_p
], arg1
);
2202 if (code
!= MULT_EXPR
)
2203 expand_addsub_overflow (loc
, code
, lhs
, arg0
, arg1
, unsr_p
,
2204 uns0_p
, uns1_p
, false, NULL
);
2206 expand_mul_overflow (loc
, lhs
, arg0
, arg1
, unsr_p
,
2207 uns0_p
, uns1_p
, false, NULL
);
2211 /* Retry with a wider type. */
2212 if (orig_precres
== precres
)
2214 int p
= MAX (prec0
, prec1
);
2215 machine_mode m
= smallest_mode_for_size (p
, MODE_INT
);
2216 tree optype
= build_nonstandard_integer_type (GET_MODE_PRECISION (m
),
2219 p
= TYPE_PRECISION (optype
);
2223 unsr_p
= TYPE_UNSIGNED (optype
);
2234 /* Expand ADD_OVERFLOW STMT. */
2237 expand_ADD_OVERFLOW (internal_fn
, gcall
*stmt
)
2239 expand_arith_overflow (PLUS_EXPR
, stmt
);
2242 /* Expand SUB_OVERFLOW STMT. */
2245 expand_SUB_OVERFLOW (internal_fn
, gcall
*stmt
)
2247 expand_arith_overflow (MINUS_EXPR
, stmt
);
2250 /* Expand MUL_OVERFLOW STMT. */
2253 expand_MUL_OVERFLOW (internal_fn
, gcall
*stmt
)
2255 expand_arith_overflow (MULT_EXPR
, stmt
);
2258 /* This should get folded in tree-vectorizer.c. */
2261 expand_LOOP_VECTORIZED (internal_fn
, gcall
*)
2266 /* This should get folded in tree-vectorizer.c. */
2269 expand_LOOP_DIST_ALIAS (internal_fn
, gcall
*)
2274 /* Expand MASK_LOAD call STMT using optab OPTAB. */
2277 expand_mask_load_optab_fn (internal_fn
, gcall
*stmt
, convert_optab optab
)
2279 struct expand_operand ops
[3];
2280 tree type
, lhs
, rhs
, maskt
, ptr
;
2281 rtx mem
, target
, mask
;
2284 maskt
= gimple_call_arg (stmt
, 2);
2285 lhs
= gimple_call_lhs (stmt
);
2286 if (lhs
== NULL_TREE
)
2288 type
= TREE_TYPE (lhs
);
2289 ptr
= build_int_cst (TREE_TYPE (gimple_call_arg (stmt
, 1)), 0);
2290 align
= tree_to_shwi (gimple_call_arg (stmt
, 1));
2291 if (TYPE_ALIGN (type
) != align
)
2292 type
= build_aligned_type (type
, align
);
2293 rhs
= fold_build2 (MEM_REF
, type
, gimple_call_arg (stmt
, 0), ptr
);
2295 mem
= expand_expr (rhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2296 gcc_assert (MEM_P (mem
));
2297 mask
= expand_normal (maskt
);
2298 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2299 create_output_operand (&ops
[0], target
, TYPE_MODE (type
));
2300 create_fixed_operand (&ops
[1], mem
);
2301 create_input_operand (&ops
[2], mask
, TYPE_MODE (TREE_TYPE (maskt
)));
2302 expand_insn (convert_optab_handler (optab
, TYPE_MODE (type
),
2303 TYPE_MODE (TREE_TYPE (maskt
))),
2307 /* Expand MASK_STORE call STMT using optab OPTAB. */
2310 expand_mask_store_optab_fn (internal_fn
, gcall
*stmt
, convert_optab optab
)
2312 struct expand_operand ops
[3];
2313 tree type
, lhs
, rhs
, maskt
, ptr
;
2317 maskt
= gimple_call_arg (stmt
, 2);
2318 rhs
= gimple_call_arg (stmt
, 3);
2319 type
= TREE_TYPE (rhs
);
2320 ptr
= build_int_cst (TREE_TYPE (gimple_call_arg (stmt
, 1)), 0);
2321 align
= tree_to_shwi (gimple_call_arg (stmt
, 1));
2322 if (TYPE_ALIGN (type
) != align
)
2323 type
= build_aligned_type (type
, align
);
2324 lhs
= fold_build2 (MEM_REF
, type
, gimple_call_arg (stmt
, 0), ptr
);
2326 mem
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2327 gcc_assert (MEM_P (mem
));
2328 mask
= expand_normal (maskt
);
2329 reg
= expand_normal (rhs
);
2330 create_fixed_operand (&ops
[0], mem
);
2331 create_input_operand (&ops
[1], reg
, TYPE_MODE (type
));
2332 create_input_operand (&ops
[2], mask
, TYPE_MODE (TREE_TYPE (maskt
)));
2333 expand_insn (convert_optab_handler (optab
, TYPE_MODE (type
),
2334 TYPE_MODE (TREE_TYPE (maskt
))),
2339 expand_ABNORMAL_DISPATCHER (internal_fn
, gcall
*)
2344 expand_BUILTIN_EXPECT (internal_fn
, gcall
*stmt
)
2346 /* When guessing was done, the hints should be already stripped away. */
2347 gcc_assert (!flag_guess_branch_prob
|| optimize
== 0 || seen_error ());
2350 tree lhs
= gimple_call_lhs (stmt
);
2352 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2354 target
= const0_rtx
;
2355 rtx val
= expand_expr (gimple_call_arg (stmt
, 0), target
, VOIDmode
, EXPAND_NORMAL
);
2356 if (lhs
&& val
!= target
)
2357 emit_move_insn (target
, val
);
2360 /* IFN_VA_ARG is supposed to be expanded at pass_stdarg. So this dummy function
2361 should never be called. */
2364 expand_VA_ARG (internal_fn
, gcall
*)
2369 /* Expand the IFN_UNIQUE function according to its first argument. */
2372 expand_UNIQUE (internal_fn
, gcall
*stmt
)
2374 rtx pattern
= NULL_RTX
;
2375 enum ifn_unique_kind kind
2376 = (enum ifn_unique_kind
) TREE_INT_CST_LOW (gimple_call_arg (stmt
, 0));
2383 case IFN_UNIQUE_UNSPEC
:
2384 if (targetm
.have_unique ())
2385 pattern
= targetm
.gen_unique ();
2388 case IFN_UNIQUE_OACC_FORK
:
2389 case IFN_UNIQUE_OACC_JOIN
:
2390 if (targetm
.have_oacc_fork () && targetm
.have_oacc_join ())
2392 tree lhs
= gimple_call_lhs (stmt
);
2393 rtx target
= const0_rtx
;
2396 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2398 rtx data_dep
= expand_normal (gimple_call_arg (stmt
, 1));
2399 rtx axis
= expand_normal (gimple_call_arg (stmt
, 2));
2401 if (kind
== IFN_UNIQUE_OACC_FORK
)
2402 pattern
= targetm
.gen_oacc_fork (target
, data_dep
, axis
);
2404 pattern
= targetm
.gen_oacc_join (target
, data_dep
, axis
);
2412 emit_insn (pattern
);
2415 /* The size of an OpenACC compute dimension. */
2418 expand_GOACC_DIM_SIZE (internal_fn
, gcall
*stmt
)
2420 tree lhs
= gimple_call_lhs (stmt
);
2425 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2426 if (targetm
.have_oacc_dim_size ())
2428 rtx dim
= expand_expr (gimple_call_arg (stmt
, 0), NULL_RTX
,
2429 VOIDmode
, EXPAND_NORMAL
);
2430 emit_insn (targetm
.gen_oacc_dim_size (target
, dim
));
2433 emit_move_insn (target
, GEN_INT (1));
2436 /* The position of an OpenACC execution engine along one compute axis. */
2439 expand_GOACC_DIM_POS (internal_fn
, gcall
*stmt
)
2441 tree lhs
= gimple_call_lhs (stmt
);
2446 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2447 if (targetm
.have_oacc_dim_pos ())
2449 rtx dim
= expand_expr (gimple_call_arg (stmt
, 0), NULL_RTX
,
2450 VOIDmode
, EXPAND_NORMAL
);
2451 emit_insn (targetm
.gen_oacc_dim_pos (target
, dim
));
2454 emit_move_insn (target
, const0_rtx
);
2457 /* This is expanded by oacc_device_lower pass. */
2460 expand_GOACC_LOOP (internal_fn
, gcall
*)
2465 /* This is expanded by oacc_device_lower pass. */
2468 expand_GOACC_REDUCTION (internal_fn
, gcall
*)
2473 /* This is expanded by oacc_device_lower pass. */
2476 expand_GOACC_TILE (internal_fn
, gcall
*)
2481 /* Set errno to EDOM. */
2484 expand_SET_EDOM (internal_fn
, gcall
*)
2487 #ifdef GEN_ERRNO_RTX
2488 rtx errno_rtx
= GEN_ERRNO_RTX
;
2490 rtx errno_rtx
= gen_rtx_MEM (word_mode
, gen_rtx_SYMBOL_REF (Pmode
, "errno"));
2492 emit_move_insn (errno_rtx
,
2493 gen_int_mode (TARGET_EDOM
, GET_MODE (errno_rtx
)));
2499 /* Expand atomic bit test and set. */
2502 expand_ATOMIC_BIT_TEST_AND_SET (internal_fn
, gcall
*call
)
2504 expand_ifn_atomic_bit_test_and (call
);
2507 /* Expand atomic bit test and complement. */
2510 expand_ATOMIC_BIT_TEST_AND_COMPLEMENT (internal_fn
, gcall
*call
)
2512 expand_ifn_atomic_bit_test_and (call
);
2515 /* Expand atomic bit test and reset. */
2518 expand_ATOMIC_BIT_TEST_AND_RESET (internal_fn
, gcall
*call
)
2520 expand_ifn_atomic_bit_test_and (call
);
2523 /* Expand atomic bit test and set. */
2526 expand_ATOMIC_COMPARE_EXCHANGE (internal_fn
, gcall
*call
)
2528 expand_ifn_atomic_compare_exchange (call
);
2531 /* Expand LAUNDER to assignment, lhs = arg0. */
2534 expand_LAUNDER (internal_fn
, gcall
*call
)
2536 tree lhs
= gimple_call_lhs (call
);
2541 expand_assignment (lhs
, gimple_call_arg (call
, 0), false);
2544 /* Expand DIVMOD() using:
2545 a) optab handler for udivmod/sdivmod if it is available.
2546 b) If optab_handler doesn't exist, generate call to
2547 target-specific divmod libfunc. */
2550 expand_DIVMOD (internal_fn
, gcall
*call_stmt
)
2552 tree lhs
= gimple_call_lhs (call_stmt
);
2553 tree arg0
= gimple_call_arg (call_stmt
, 0);
2554 tree arg1
= gimple_call_arg (call_stmt
, 1);
2556 gcc_assert (TREE_CODE (TREE_TYPE (lhs
)) == COMPLEX_TYPE
);
2557 tree type
= TREE_TYPE (TREE_TYPE (lhs
));
2558 machine_mode mode
= TYPE_MODE (type
);
2559 bool unsignedp
= TYPE_UNSIGNED (type
);
2560 optab tab
= (unsignedp
) ? udivmod_optab
: sdivmod_optab
;
2562 rtx op0
= expand_normal (arg0
);
2563 rtx op1
= expand_normal (arg1
);
2564 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2566 rtx quotient
, remainder
, libfunc
;
2568 /* Check if optab_handler exists for divmod_optab for given mode. */
2569 if (optab_handler (tab
, mode
) != CODE_FOR_nothing
)
2571 quotient
= gen_reg_rtx (mode
);
2572 remainder
= gen_reg_rtx (mode
);
2573 expand_twoval_binop (tab
, op0
, op1
, quotient
, remainder
, unsignedp
);
2576 /* Generate call to divmod libfunc if it exists. */
2577 else if ((libfunc
= optab_libfunc (tab
, mode
)) != NULL_RTX
)
2578 targetm
.expand_divmod_libfunc (libfunc
, mode
, op0
, op1
,
2579 "ient
, &remainder
);
2584 /* Wrap the return value (quotient, remainder) within COMPLEX_EXPR. */
2585 expand_expr (build2 (COMPLEX_EXPR
, TREE_TYPE (lhs
),
2586 make_tree (TREE_TYPE (arg0
), quotient
),
2587 make_tree (TREE_TYPE (arg1
), remainder
)),
2588 target
, VOIDmode
, EXPAND_NORMAL
);
2591 /* Expand a call to FN using the operands in STMT. FN has a single
2592 output operand and NARGS input operands. */
2595 expand_direct_optab_fn (internal_fn fn
, gcall
*stmt
, direct_optab optab
,
2598 expand_operand
*ops
= XALLOCAVEC (expand_operand
, nargs
+ 1);
2600 tree_pair types
= direct_internal_fn_types (fn
, stmt
);
2601 insn_code icode
= direct_optab_handler (optab
, TYPE_MODE (types
.first
));
2603 tree lhs
= gimple_call_lhs (stmt
);
2604 tree lhs_type
= TREE_TYPE (lhs
);
2605 rtx lhs_rtx
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2606 create_output_operand (&ops
[0], lhs_rtx
, insn_data
[icode
].operand
[0].mode
);
2608 for (unsigned int i
= 0; i
< nargs
; ++i
)
2610 tree rhs
= gimple_call_arg (stmt
, i
);
2611 tree rhs_type
= TREE_TYPE (rhs
);
2612 rtx rhs_rtx
= expand_normal (rhs
);
2613 if (INTEGRAL_TYPE_P (rhs_type
))
2614 create_convert_operand_from (&ops
[i
+ 1], rhs_rtx
,
2615 TYPE_MODE (rhs_type
),
2616 TYPE_UNSIGNED (rhs_type
));
2618 create_input_operand (&ops
[i
+ 1], rhs_rtx
, TYPE_MODE (rhs_type
));
2621 expand_insn (icode
, nargs
+ 1, ops
);
2622 if (!rtx_equal_p (lhs_rtx
, ops
[0].value
))
2624 /* If the return value has an integral type, convert the instruction
2625 result to that type. This is useful for things that return an
2626 int regardless of the size of the input. If the instruction result
2627 is smaller than required, assume that it is signed.
2629 If the return value has a nonintegral type, its mode must match
2630 the instruction result. */
2631 if (GET_CODE (lhs_rtx
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (lhs_rtx
))
2633 /* If this is a scalar in a register that is stored in a wider
2634 mode than the declared mode, compute the result into its
2635 declared mode and then convert to the wider mode. */
2636 gcc_checking_assert (INTEGRAL_TYPE_P (lhs_type
));
2637 rtx tmp
= convert_to_mode (GET_MODE (lhs_rtx
), ops
[0].value
, 0);
2638 convert_move (SUBREG_REG (lhs_rtx
), tmp
,
2639 SUBREG_PROMOTED_SIGN (lhs_rtx
));
2641 else if (GET_MODE (lhs_rtx
) == GET_MODE (ops
[0].value
))
2642 emit_move_insn (lhs_rtx
, ops
[0].value
);
2645 gcc_checking_assert (INTEGRAL_TYPE_P (lhs_type
));
2646 convert_move (lhs_rtx
, ops
[0].value
, 0);
2651 /* Expanders for optabs that can use expand_direct_optab_fn. */
2653 #define expand_unary_optab_fn(FN, STMT, OPTAB) \
2654 expand_direct_optab_fn (FN, STMT, OPTAB, 1)
2656 #define expand_binary_optab_fn(FN, STMT, OPTAB) \
2657 expand_direct_optab_fn (FN, STMT, OPTAB, 2)
2659 /* RETURN_TYPE and ARGS are a return type and argument list that are
2660 in principle compatible with FN (which satisfies direct_internal_fn_p).
2661 Return the types that should be used to determine whether the
2662 target supports FN. */
2665 direct_internal_fn_types (internal_fn fn
, tree return_type
, tree
*args
)
2667 const direct_internal_fn_info
&info
= direct_internal_fn (fn
);
2668 tree type0
= (info
.type0
< 0 ? return_type
: TREE_TYPE (args
[info
.type0
]));
2669 tree type1
= (info
.type1
< 0 ? return_type
: TREE_TYPE (args
[info
.type1
]));
2670 return tree_pair (type0
, type1
);
2673 /* CALL is a call whose return type and arguments are in principle
2674 compatible with FN (which satisfies direct_internal_fn_p). Return the
2675 types that should be used to determine whether the target supports FN. */
2678 direct_internal_fn_types (internal_fn fn
, gcall
*call
)
2680 const direct_internal_fn_info
&info
= direct_internal_fn (fn
);
2681 tree op0
= (info
.type0
< 0
2682 ? gimple_call_lhs (call
)
2683 : gimple_call_arg (call
, info
.type0
));
2684 tree op1
= (info
.type1
< 0
2685 ? gimple_call_lhs (call
)
2686 : gimple_call_arg (call
, info
.type1
));
2687 return tree_pair (TREE_TYPE (op0
), TREE_TYPE (op1
));
2690 /* Return true if OPTAB is supported for TYPES (whose modes should be
2691 the same) when the optimization type is OPT_TYPE. Used for simple
2695 direct_optab_supported_p (direct_optab optab
, tree_pair types
,
2696 optimization_type opt_type
)
2698 machine_mode mode
= TYPE_MODE (types
.first
);
2699 gcc_checking_assert (mode
== TYPE_MODE (types
.second
));
2700 return direct_optab_handler (optab
, mode
, opt_type
) != CODE_FOR_nothing
;
2703 /* Return true if load/store lanes optab OPTAB is supported for
2704 array type TYPES.first when the optimization type is OPT_TYPE. */
2707 multi_vector_optab_supported_p (convert_optab optab
, tree_pair types
,
2708 optimization_type opt_type
)
2710 gcc_assert (TREE_CODE (types
.first
) == ARRAY_TYPE
);
2711 machine_mode imode
= TYPE_MODE (types
.first
);
2712 machine_mode vmode
= TYPE_MODE (TREE_TYPE (types
.first
));
2713 return (convert_optab_handler (optab
, imode
, vmode
, opt_type
)
2714 != CODE_FOR_nothing
);
2717 #define direct_unary_optab_supported_p direct_optab_supported_p
2718 #define direct_binary_optab_supported_p direct_optab_supported_p
2719 #define direct_mask_load_optab_supported_p direct_optab_supported_p
2720 #define direct_load_lanes_optab_supported_p multi_vector_optab_supported_p
2721 #define direct_mask_store_optab_supported_p direct_optab_supported_p
2722 #define direct_store_lanes_optab_supported_p multi_vector_optab_supported_p
2724 /* Return true if FN is supported for the types in TYPES when the
2725 optimization type is OPT_TYPE. The types are those associated with
2726 the "type0" and "type1" fields of FN's direct_internal_fn_info
2730 direct_internal_fn_supported_p (internal_fn fn
, tree_pair types
,
2731 optimization_type opt_type
)
2735 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
2736 case IFN_##CODE: break;
2737 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
2739 return direct_##TYPE##_optab_supported_p (OPTAB##_optab, types, \
2741 #include "internal-fn.def"
2749 /* Return true if FN is supported for type TYPE when the optimization
2750 type is OPT_TYPE. The caller knows that the "type0" and "type1"
2751 fields of FN's direct_internal_fn_info structure are the same. */
2754 direct_internal_fn_supported_p (internal_fn fn
, tree type
,
2755 optimization_type opt_type
)
2757 const direct_internal_fn_info
&info
= direct_internal_fn (fn
);
2758 gcc_checking_assert (info
.type0
== info
.type1
);
2759 return direct_internal_fn_supported_p (fn
, tree_pair (type
, type
), opt_type
);
2762 /* Return true if IFN_SET_EDOM is supported. */
2765 set_edom_supported_p (void)
2774 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
2776 expand_##CODE (internal_fn fn, gcall *stmt) \
2778 expand_##TYPE##_optab_fn (fn, stmt, OPTAB##_optab); \
2780 #include "internal-fn.def"
2782 /* Routines to expand each internal function, indexed by function number.
2783 Each routine has the prototype:
2785 expand_<NAME> (gcall *stmt)
2787 where STMT is the statement that performs the call. */
2788 static void (*const internal_fn_expanders
[]) (internal_fn
, gcall
*) = {
2789 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) expand_##CODE,
2790 #include "internal-fn.def"
2794 /* Expand STMT as though it were a call to internal function FN. */
2797 expand_internal_call (internal_fn fn
, gcall
*stmt
)
2799 internal_fn_expanders
[fn
] (fn
, stmt
);
2802 /* Expand STMT, which is a call to internal function FN. */
2805 expand_internal_call (gcall
*stmt
)
2807 expand_internal_call (gimple_call_internal_fn (stmt
), stmt
);
2811 expand_PHI (internal_fn
, gcall
*)