2 Copyright (C) 2011-2019 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
29 #include "stringpool.h"
31 #include "tree-ssanames.h"
36 #include "diagnostic-core.h"
37 #include "fold-const.h"
38 #include "internal-fn.h"
39 #include "stor-layout.h"
42 #include "stringpool.h"
48 #include "optabs-tree.h"
49 #include "gimple-ssa.h"
50 #include "tree-phinodes.h"
51 #include "ssa-iterators.h"
53 /* The names of each internal function, indexed by function number. */
54 const char *const internal_fn_name_array
[] = {
55 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) #CODE,
56 #include "internal-fn.def"
60 /* The ECF_* flags of each internal function, indexed by function number. */
61 const int internal_fn_flags_array
[] = {
62 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) FLAGS,
63 #include "internal-fn.def"
67 /* Return the internal function called NAME, or IFN_LAST if there's
71 lookup_internal_fn (const char *name
)
73 typedef hash_map
<nofree_string_hash
, internal_fn
> name_to_fn_map_type
;
74 static name_to_fn_map_type
*name_to_fn_map
;
78 name_to_fn_map
= new name_to_fn_map_type (IFN_LAST
);
79 for (unsigned int i
= 0; i
< IFN_LAST
; ++i
)
80 name_to_fn_map
->put (internal_fn_name (internal_fn (i
)),
83 internal_fn
*entry
= name_to_fn_map
->get (name
);
84 return entry
? *entry
: IFN_LAST
;
87 /* Fnspec of each internal function, indexed by function number. */
88 const_tree internal_fn_fnspec_array
[IFN_LAST
+ 1];
93 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
94 if (FNSPEC) internal_fn_fnspec_array[IFN_##CODE] = \
95 build_string ((int) sizeof (FNSPEC), FNSPEC ? FNSPEC : "");
96 #include "internal-fn.def"
97 internal_fn_fnspec_array
[IFN_LAST
] = 0;
100 /* Create static initializers for the information returned by
101 direct_internal_fn. */
102 #define not_direct { -2, -2, false }
103 #define mask_load_direct { -1, 2, false }
104 #define load_lanes_direct { -1, -1, false }
105 #define mask_load_lanes_direct { -1, -1, false }
106 #define gather_load_direct { -1, -1, false }
107 #define mask_store_direct { 3, 2, false }
108 #define store_lanes_direct { 0, 0, false }
109 #define mask_store_lanes_direct { 0, 0, false }
110 #define scatter_store_direct { 3, 3, false }
111 #define unary_direct { 0, 0, true }
112 #define binary_direct { 0, 0, true }
113 #define ternary_direct { 0, 0, true }
114 #define cond_unary_direct { 1, 1, true }
115 #define cond_binary_direct { 1, 1, true }
116 #define cond_ternary_direct { 1, 1, true }
117 #define while_direct { 0, 2, false }
118 #define fold_extract_direct { 2, 2, false }
119 #define fold_left_direct { 1, 1, false }
120 #define mask_fold_left_direct { 1, 1, false }
122 const direct_internal_fn_info direct_internal_fn_array
[IFN_LAST
+ 1] = {
123 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) not_direct,
124 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) TYPE##_direct,
125 #define DEF_INTERNAL_SIGNED_OPTAB_FN(CODE, FLAGS, SELECTOR, SIGNED_OPTAB, \
126 UNSIGNED_OPTAB, TYPE) TYPE##_direct,
127 #include "internal-fn.def"
131 /* ARRAY_TYPE is an array of vector modes. Return the associated insn
132 for load-lanes-style optab OPTAB, or CODE_FOR_nothing if none. */
134 static enum insn_code
135 get_multi_vector_move (tree array_type
, convert_optab optab
)
140 gcc_assert (TREE_CODE (array_type
) == ARRAY_TYPE
);
141 imode
= TYPE_MODE (array_type
);
142 vmode
= TYPE_MODE (TREE_TYPE (array_type
));
144 return convert_optab_handler (optab
, imode
, vmode
);
147 /* Expand LOAD_LANES call STMT using optab OPTAB. */
150 expand_load_lanes_optab_fn (internal_fn
, gcall
*stmt
, convert_optab optab
)
152 class expand_operand ops
[2];
156 lhs
= gimple_call_lhs (stmt
);
157 rhs
= gimple_call_arg (stmt
, 0);
158 type
= TREE_TYPE (lhs
);
160 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
161 mem
= expand_normal (rhs
);
163 gcc_assert (MEM_P (mem
));
164 PUT_MODE (mem
, TYPE_MODE (type
));
166 create_output_operand (&ops
[0], target
, TYPE_MODE (type
));
167 create_fixed_operand (&ops
[1], mem
);
168 expand_insn (get_multi_vector_move (type
, optab
), 2, ops
);
171 /* Expand STORE_LANES call STMT using optab OPTAB. */
174 expand_store_lanes_optab_fn (internal_fn
, gcall
*stmt
, convert_optab optab
)
176 class expand_operand ops
[2];
180 lhs
= gimple_call_lhs (stmt
);
181 rhs
= gimple_call_arg (stmt
, 0);
182 type
= TREE_TYPE (rhs
);
184 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
185 reg
= expand_normal (rhs
);
187 gcc_assert (MEM_P (target
));
188 PUT_MODE (target
, TYPE_MODE (type
));
190 create_fixed_operand (&ops
[0], target
);
191 create_input_operand (&ops
[1], reg
, TYPE_MODE (type
));
192 expand_insn (get_multi_vector_move (type
, optab
), 2, ops
);
196 expand_ANNOTATE (internal_fn
, gcall
*)
201 /* This should get expanded in omp_device_lower pass. */
204 expand_GOMP_USE_SIMT (internal_fn
, gcall
*)
209 /* This should get expanded in omp_device_lower pass. */
212 expand_GOMP_SIMT_ENTER (internal_fn
, gcall
*)
217 /* Allocate per-lane storage and begin non-uniform execution region. */
220 expand_GOMP_SIMT_ENTER_ALLOC (internal_fn
, gcall
*stmt
)
223 tree lhs
= gimple_call_lhs (stmt
);
225 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
227 target
= gen_reg_rtx (Pmode
);
228 rtx size
= expand_normal (gimple_call_arg (stmt
, 0));
229 rtx align
= expand_normal (gimple_call_arg (stmt
, 1));
230 class expand_operand ops
[3];
231 create_output_operand (&ops
[0], target
, Pmode
);
232 create_input_operand (&ops
[1], size
, Pmode
);
233 create_input_operand (&ops
[2], align
, Pmode
);
234 gcc_assert (targetm
.have_omp_simt_enter ());
235 expand_insn (targetm
.code_for_omp_simt_enter
, 3, ops
);
238 /* Deallocate per-lane storage and leave non-uniform execution region. */
241 expand_GOMP_SIMT_EXIT (internal_fn
, gcall
*stmt
)
243 gcc_checking_assert (!gimple_call_lhs (stmt
));
244 rtx arg
= expand_normal (gimple_call_arg (stmt
, 0));
245 class expand_operand ops
[1];
246 create_input_operand (&ops
[0], arg
, Pmode
);
247 gcc_assert (targetm
.have_omp_simt_exit ());
248 expand_insn (targetm
.code_for_omp_simt_exit
, 1, ops
);
251 /* Lane index on SIMT targets: thread index in the warp on NVPTX. On targets
252 without SIMT execution this should be expanded in omp_device_lower pass. */
255 expand_GOMP_SIMT_LANE (internal_fn
, gcall
*stmt
)
257 tree lhs
= gimple_call_lhs (stmt
);
261 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
262 gcc_assert (targetm
.have_omp_simt_lane ());
263 emit_insn (targetm
.gen_omp_simt_lane (target
));
266 /* This should get expanded in omp_device_lower pass. */
269 expand_GOMP_SIMT_VF (internal_fn
, gcall
*)
274 /* Lane index of the first SIMT lane that supplies a non-zero argument.
275 This is a SIMT counterpart to GOMP_SIMD_LAST_LANE, used to represent the
276 lane that executed the last iteration for handling OpenMP lastprivate. */
279 expand_GOMP_SIMT_LAST_LANE (internal_fn
, gcall
*stmt
)
281 tree lhs
= gimple_call_lhs (stmt
);
285 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
286 rtx cond
= expand_normal (gimple_call_arg (stmt
, 0));
287 machine_mode mode
= TYPE_MODE (TREE_TYPE (lhs
));
288 class expand_operand ops
[2];
289 create_output_operand (&ops
[0], target
, mode
);
290 create_input_operand (&ops
[1], cond
, mode
);
291 gcc_assert (targetm
.have_omp_simt_last_lane ());
292 expand_insn (targetm
.code_for_omp_simt_last_lane
, 2, ops
);
295 /* Non-transparent predicate used in SIMT lowering of OpenMP "ordered". */
298 expand_GOMP_SIMT_ORDERED_PRED (internal_fn
, gcall
*stmt
)
300 tree lhs
= gimple_call_lhs (stmt
);
304 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
305 rtx ctr
= expand_normal (gimple_call_arg (stmt
, 0));
306 machine_mode mode
= TYPE_MODE (TREE_TYPE (lhs
));
307 class expand_operand ops
[2];
308 create_output_operand (&ops
[0], target
, mode
);
309 create_input_operand (&ops
[1], ctr
, mode
);
310 gcc_assert (targetm
.have_omp_simt_ordered ());
311 expand_insn (targetm
.code_for_omp_simt_ordered
, 2, ops
);
314 /* "Or" boolean reduction across SIMT lanes: return non-zero in all lanes if
315 any lane supplies a non-zero argument. */
318 expand_GOMP_SIMT_VOTE_ANY (internal_fn
, gcall
*stmt
)
320 tree lhs
= gimple_call_lhs (stmt
);
324 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
325 rtx cond
= expand_normal (gimple_call_arg (stmt
, 0));
326 machine_mode mode
= TYPE_MODE (TREE_TYPE (lhs
));
327 class expand_operand ops
[2];
328 create_output_operand (&ops
[0], target
, mode
);
329 create_input_operand (&ops
[1], cond
, mode
);
330 gcc_assert (targetm
.have_omp_simt_vote_any ());
331 expand_insn (targetm
.code_for_omp_simt_vote_any
, 2, ops
);
334 /* Exchange between SIMT lanes with a "butterfly" pattern: source lane index
335 is destination lane index XOR given offset. */
338 expand_GOMP_SIMT_XCHG_BFLY (internal_fn
, gcall
*stmt
)
340 tree lhs
= gimple_call_lhs (stmt
);
344 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
345 rtx src
= expand_normal (gimple_call_arg (stmt
, 0));
346 rtx idx
= expand_normal (gimple_call_arg (stmt
, 1));
347 machine_mode mode
= TYPE_MODE (TREE_TYPE (lhs
));
348 class expand_operand ops
[3];
349 create_output_operand (&ops
[0], target
, mode
);
350 create_input_operand (&ops
[1], src
, mode
);
351 create_input_operand (&ops
[2], idx
, SImode
);
352 gcc_assert (targetm
.have_omp_simt_xchg_bfly ());
353 expand_insn (targetm
.code_for_omp_simt_xchg_bfly
, 3, ops
);
356 /* Exchange between SIMT lanes according to given source lane index. */
359 expand_GOMP_SIMT_XCHG_IDX (internal_fn
, gcall
*stmt
)
361 tree lhs
= gimple_call_lhs (stmt
);
365 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
366 rtx src
= expand_normal (gimple_call_arg (stmt
, 0));
367 rtx idx
= expand_normal (gimple_call_arg (stmt
, 1));
368 machine_mode mode
= TYPE_MODE (TREE_TYPE (lhs
));
369 class expand_operand ops
[3];
370 create_output_operand (&ops
[0], target
, mode
);
371 create_input_operand (&ops
[1], src
, mode
);
372 create_input_operand (&ops
[2], idx
, SImode
);
373 gcc_assert (targetm
.have_omp_simt_xchg_idx ());
374 expand_insn (targetm
.code_for_omp_simt_xchg_idx
, 3, ops
);
377 /* This should get expanded in adjust_simduid_builtins. */
380 expand_GOMP_SIMD_LANE (internal_fn
, gcall
*)
385 /* This should get expanded in adjust_simduid_builtins. */
388 expand_GOMP_SIMD_VF (internal_fn
, gcall
*)
393 /* This should get expanded in adjust_simduid_builtins. */
396 expand_GOMP_SIMD_LAST_LANE (internal_fn
, gcall
*)
401 /* This should get expanded in adjust_simduid_builtins. */
404 expand_GOMP_SIMD_ORDERED_START (internal_fn
, gcall
*)
409 /* This should get expanded in adjust_simduid_builtins. */
412 expand_GOMP_SIMD_ORDERED_END (internal_fn
, gcall
*)
417 /* This should get expanded in the sanopt pass. */
420 expand_UBSAN_NULL (internal_fn
, gcall
*)
425 /* This should get expanded in the sanopt pass. */
428 expand_UBSAN_BOUNDS (internal_fn
, gcall
*)
433 /* This should get expanded in the sanopt pass. */
436 expand_UBSAN_VPTR (internal_fn
, gcall
*)
441 /* This should get expanded in the sanopt pass. */
444 expand_UBSAN_PTR (internal_fn
, gcall
*)
449 /* This should get expanded in the sanopt pass. */
452 expand_UBSAN_OBJECT_SIZE (internal_fn
, gcall
*)
457 /* This should get expanded in the sanopt pass. */
460 expand_ASAN_CHECK (internal_fn
, gcall
*)
465 /* This should get expanded in the sanopt pass. */
468 expand_ASAN_MARK (internal_fn
, gcall
*)
473 /* This should get expanded in the sanopt pass. */
476 expand_ASAN_POISON (internal_fn
, gcall
*)
481 /* This should get expanded in the sanopt pass. */
484 expand_ASAN_POISON_USE (internal_fn
, gcall
*)
489 /* This should get expanded in the tsan pass. */
492 expand_TSAN_FUNC_EXIT (internal_fn
, gcall
*)
497 /* This should get expanded in the lower pass. */
500 expand_FALLTHROUGH (internal_fn
, gcall
*call
)
502 error_at (gimple_location (call
),
503 "invalid use of attribute %<fallthrough%>");
506 /* Return minimum precision needed to represent all values
507 of ARG in SIGNed integral type. */
510 get_min_precision (tree arg
, signop sign
)
512 int prec
= TYPE_PRECISION (TREE_TYPE (arg
));
514 signop orig_sign
= sign
;
515 if (TREE_CODE (arg
) == INTEGER_CST
)
518 if (TYPE_SIGN (TREE_TYPE (arg
)) != sign
)
520 widest_int w
= wi::to_widest (arg
);
521 w
= wi::ext (w
, prec
, sign
);
522 p
= wi::min_precision (w
, sign
);
525 p
= wi::min_precision (wi::to_wide (arg
), sign
);
526 return MIN (p
, prec
);
528 while (CONVERT_EXPR_P (arg
)
529 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg
, 0)))
530 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg
, 0))) <= prec
)
532 arg
= TREE_OPERAND (arg
, 0);
533 if (TYPE_PRECISION (TREE_TYPE (arg
)) < prec
)
535 if (TYPE_UNSIGNED (TREE_TYPE (arg
)))
537 else if (sign
== UNSIGNED
&& get_range_pos_neg (arg
) != 1)
538 return prec
+ (orig_sign
!= sign
);
539 prec
= TYPE_PRECISION (TREE_TYPE (arg
));
542 return prec
+ (orig_sign
!= sign
);
544 if (TREE_CODE (arg
) != SSA_NAME
)
545 return prec
+ (orig_sign
!= sign
);
546 wide_int arg_min
, arg_max
;
547 while (get_range_info (arg
, &arg_min
, &arg_max
) != VR_RANGE
)
549 gimple
*g
= SSA_NAME_DEF_STMT (arg
);
550 if (is_gimple_assign (g
)
551 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g
)))
553 tree t
= gimple_assign_rhs1 (g
);
554 if (INTEGRAL_TYPE_P (TREE_TYPE (t
))
555 && TYPE_PRECISION (TREE_TYPE (t
)) <= prec
)
558 if (TYPE_PRECISION (TREE_TYPE (arg
)) < prec
)
560 if (TYPE_UNSIGNED (TREE_TYPE (arg
)))
562 else if (sign
== UNSIGNED
&& get_range_pos_neg (arg
) != 1)
563 return prec
+ (orig_sign
!= sign
);
564 prec
= TYPE_PRECISION (TREE_TYPE (arg
));
567 return prec
+ (orig_sign
!= sign
);
571 return prec
+ (orig_sign
!= sign
);
573 if (sign
== TYPE_SIGN (TREE_TYPE (arg
)))
575 int p1
= wi::min_precision (arg_min
, sign
);
576 int p2
= wi::min_precision (arg_max
, sign
);
578 prec
= MIN (prec
, p1
);
580 else if (sign
== UNSIGNED
&& !wi::neg_p (arg_min
, SIGNED
))
582 int p
= wi::min_precision (arg_max
, UNSIGNED
);
583 prec
= MIN (prec
, p
);
585 return prec
+ (orig_sign
!= sign
);
588 /* Helper for expand_*_overflow. Set the __imag__ part to true
589 (1 except for signed:1 type, in which case store -1). */
592 expand_arith_set_overflow (tree lhs
, rtx target
)
594 if (TYPE_PRECISION (TREE_TYPE (TREE_TYPE (lhs
))) == 1
595 && !TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs
))))
596 write_complex_part (target
, constm1_rtx
, true);
598 write_complex_part (target
, const1_rtx
, true);
601 /* Helper for expand_*_overflow. Store RES into the __real__ part
602 of TARGET. If RES has larger MODE than __real__ part of TARGET,
603 set the __imag__ part to 1 if RES doesn't fit into it. Similarly
604 if LHS has smaller precision than its mode. */
607 expand_arith_overflow_result_store (tree lhs
, rtx target
,
608 scalar_int_mode mode
, rtx res
)
610 scalar_int_mode tgtmode
611 = as_a
<scalar_int_mode
> (GET_MODE_INNER (GET_MODE (target
)));
615 rtx_code_label
*done_label
= gen_label_rtx ();
616 int uns
= TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs
)));
617 lres
= convert_modes (tgtmode
, mode
, res
, uns
);
618 gcc_assert (GET_MODE_PRECISION (tgtmode
) < GET_MODE_PRECISION (mode
));
619 do_compare_rtx_and_jump (res
, convert_modes (mode
, tgtmode
, lres
, uns
),
620 EQ
, true, mode
, NULL_RTX
, NULL
, done_label
,
621 profile_probability::very_likely ());
622 expand_arith_set_overflow (lhs
, target
);
623 emit_label (done_label
);
625 int prec
= TYPE_PRECISION (TREE_TYPE (TREE_TYPE (lhs
)));
626 int tgtprec
= GET_MODE_PRECISION (tgtmode
);
629 rtx_code_label
*done_label
= gen_label_rtx ();
630 int uns
= TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs
)));
635 = immed_wide_int_const (wi::shifted_mask (0, prec
, false, tgtprec
),
637 lres
= expand_simple_binop (tgtmode
, AND
, res
, mask
, NULL_RTX
,
638 true, OPTAB_LIB_WIDEN
);
642 lres
= expand_shift (LSHIFT_EXPR
, tgtmode
, res
, tgtprec
- prec
,
644 lres
= expand_shift (RSHIFT_EXPR
, tgtmode
, lres
, tgtprec
- prec
,
647 do_compare_rtx_and_jump (res
, lres
,
648 EQ
, true, tgtmode
, NULL_RTX
, NULL
, done_label
,
649 profile_probability::very_likely ());
650 expand_arith_set_overflow (lhs
, target
);
651 emit_label (done_label
);
653 write_complex_part (target
, lres
, false);
656 /* Helper for expand_*_overflow. Store RES into TARGET. */
659 expand_ubsan_result_store (rtx target
, rtx res
)
661 if (GET_CODE (target
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (target
))
662 /* If this is a scalar in a register that is stored in a wider mode
663 than the declared mode, compute the result into its declared mode
664 and then convert to the wider mode. Our value is the computed
666 convert_move (SUBREG_REG (target
), res
, SUBREG_PROMOTED_SIGN (target
));
668 emit_move_insn (target
, res
);
671 /* Add sub/add overflow checking to the statement STMT.
672 CODE says whether the operation is +, or -. */
675 expand_addsub_overflow (location_t loc
, tree_code code
, tree lhs
,
676 tree arg0
, tree arg1
, bool unsr_p
, bool uns0_p
,
677 bool uns1_p
, bool is_ubsan
, tree
*datap
)
679 rtx res
, target
= NULL_RTX
;
681 rtx_code_label
*done_label
= gen_label_rtx ();
682 rtx_code_label
*do_error
= gen_label_rtx ();
683 do_pending_stack_adjust ();
684 rtx op0
= expand_normal (arg0
);
685 rtx op1
= expand_normal (arg1
);
686 scalar_int_mode mode
= SCALAR_INT_TYPE_MODE (TREE_TYPE (arg0
));
687 int prec
= GET_MODE_PRECISION (mode
);
688 rtx sgn
= immed_wide_int_const (wi::min_value (prec
, SIGNED
), mode
);
692 gcc_assert (!unsr_p
&& !uns0_p
&& !uns1_p
);
696 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
698 write_complex_part (target
, const0_rtx
, true);
701 /* We assume both operands and result have the same precision
702 here (GET_MODE_BITSIZE (mode)), S stands for signed type
703 with that precision, U for unsigned type with that precision,
704 sgn for unsigned most significant bit in that precision.
705 s1 is signed first operand, u1 is unsigned first operand,
706 s2 is signed second operand, u2 is unsigned second operand,
707 sr is signed result, ur is unsigned result and the following
708 rules say how to compute result (which is always result of
709 the operands as if both were unsigned, cast to the right
710 signedness) and how to compute whether operation overflowed.
713 res = (S) ((U) s1 + (U) s2)
714 ovf = s2 < 0 ? res > s1 : res < s1 (or jump on overflow)
716 res = (S) ((U) s1 - (U) s2)
717 ovf = s2 < 0 ? res < s1 : res > s2 (or jump on overflow)
720 ovf = res < u1 (or jump on carry, but RTL opts will handle it)
723 ovf = res > u1 (or jump on carry, but RTL opts will handle it)
725 res = (S) ((U) s1 + u2)
726 ovf = ((U) res ^ sgn) < u2
731 ovf = t1 < 0 ? t2 > s1 : t2 < s1 (or jump on overflow)
733 res = (S) ((U) s1 - u2)
734 ovf = u2 > ((U) s1 ^ sgn)
737 ovf = s1 < 0 || u2 > (U) s1
740 ovf = u1 >= ((U) s2 ^ sgn)
745 ovf = s2 < 0 ? (S) t2 < (S) t1 : (S) t2 > (S) t1 (or jump on overflow)
747 res = (U) s1 + (U) s2
748 ovf = s2 < 0 ? (s1 | (S) res) < 0) : (s1 & (S) res) < 0)
751 ovf = (U) res < u2 || res < 0
754 ovf = u1 >= u2 ? res < 0 : res >= 0
756 res = (U) s1 - (U) s2
757 ovf = s2 >= 0 ? ((s1 | (S) res) < 0) : ((s1 & (S) res) < 0) */
759 if (code
== PLUS_EXPR
&& uns0_p
&& !uns1_p
)
761 /* PLUS_EXPR is commutative, if operand signedness differs,
762 canonicalize to the first operand being signed and second
763 unsigned to simplify following code. */
764 std::swap (op0
, op1
);
765 std::swap (arg0
, arg1
);
771 if (uns0_p
&& uns1_p
&& unsr_p
)
773 insn_code icode
= optab_handler (code
== PLUS_EXPR
? uaddv4_optab
774 : usubv4_optab
, mode
);
775 if (icode
!= CODE_FOR_nothing
)
777 class expand_operand ops
[4];
778 rtx_insn
*last
= get_last_insn ();
780 res
= gen_reg_rtx (mode
);
781 create_output_operand (&ops
[0], res
, mode
);
782 create_input_operand (&ops
[1], op0
, mode
);
783 create_input_operand (&ops
[2], op1
, mode
);
784 create_fixed_operand (&ops
[3], do_error
);
785 if (maybe_expand_insn (icode
, 4, ops
))
787 last
= get_last_insn ();
788 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
790 && any_condjump_p (last
)
791 && !find_reg_note (last
, REG_BR_PROB
, 0))
792 add_reg_br_prob_note (last
,
793 profile_probability::very_unlikely ());
794 emit_jump (done_label
);
798 delete_insns_since (last
);
801 /* Compute the operation. On RTL level, the addition is always
803 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
804 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
806 /* For PLUS_EXPR, the operation is commutative, so we can pick
807 operand to compare against. For prec <= BITS_PER_WORD, I think
808 preferring REG operand is better over CONST_INT, because
809 the CONST_INT might enlarge the instruction or CSE would need
810 to figure out we'd already loaded it into a register before.
811 For prec > BITS_PER_WORD, I think CONST_INT might be more beneficial,
812 as then the multi-word comparison can be perhaps simplified. */
813 if (code
== PLUS_EXPR
814 && (prec
<= BITS_PER_WORD
815 ? (CONST_SCALAR_INT_P (op0
) && REG_P (op1
))
816 : CONST_SCALAR_INT_P (op1
)))
818 do_compare_rtx_and_jump (res
, tem
, code
== PLUS_EXPR
? GEU
: LEU
,
819 true, mode
, NULL_RTX
, NULL
, done_label
,
820 profile_probability::very_likely ());
825 if (!uns0_p
&& uns1_p
&& !unsr_p
)
827 /* Compute the operation. On RTL level, the addition is always
829 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
830 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
831 rtx tem
= expand_binop (mode
, add_optab
,
832 code
== PLUS_EXPR
? res
: op0
, sgn
,
833 NULL_RTX
, false, OPTAB_LIB_WIDEN
);
834 do_compare_rtx_and_jump (tem
, op1
, GEU
, true, mode
, NULL_RTX
, NULL
,
835 done_label
, profile_probability::very_likely ());
840 if (code
== PLUS_EXPR
&& !uns0_p
&& uns1_p
&& unsr_p
)
842 op1
= expand_binop (mode
, add_optab
, op1
, sgn
, NULL_RTX
, false,
844 /* As we've changed op1, we have to avoid using the value range
845 for the original argument. */
846 arg1
= error_mark_node
;
852 if (code
== MINUS_EXPR
&& uns0_p
&& !uns1_p
&& unsr_p
)
854 op0
= expand_binop (mode
, add_optab
, op0
, sgn
, NULL_RTX
, false,
856 /* As we've changed op0, we have to avoid using the value range
857 for the original argument. */
858 arg0
= error_mark_node
;
864 if (code
== MINUS_EXPR
&& !uns0_p
&& uns1_p
&& unsr_p
)
866 /* Compute the operation. On RTL level, the addition is always
868 res
= expand_binop (mode
, sub_optab
, op0
, op1
, NULL_RTX
, false,
870 int pos_neg
= get_range_pos_neg (arg0
);
872 /* If ARG0 is known to be always negative, this is always overflow. */
873 emit_jump (do_error
);
874 else if (pos_neg
== 3)
875 /* If ARG0 is not known to be always positive, check at runtime. */
876 do_compare_rtx_and_jump (op0
, const0_rtx
, LT
, false, mode
, NULL_RTX
,
877 NULL
, do_error
, profile_probability::very_unlikely ());
878 do_compare_rtx_and_jump (op1
, op0
, LEU
, true, mode
, NULL_RTX
, NULL
,
879 done_label
, profile_probability::very_likely ());
884 if (code
== MINUS_EXPR
&& uns0_p
&& !uns1_p
&& !unsr_p
)
886 /* Compute the operation. On RTL level, the addition is always
888 res
= expand_binop (mode
, sub_optab
, op0
, op1
, NULL_RTX
, false,
890 rtx tem
= expand_binop (mode
, add_optab
, op1
, sgn
, NULL_RTX
, false,
892 do_compare_rtx_and_jump (op0
, tem
, LTU
, true, mode
, NULL_RTX
, NULL
,
893 done_label
, profile_probability::very_likely ());
898 if (code
== PLUS_EXPR
&& uns0_p
&& uns1_p
&& !unsr_p
)
900 /* Compute the operation. On RTL level, the addition is always
902 res
= expand_binop (mode
, add_optab
, op0
, op1
, NULL_RTX
, false,
904 do_compare_rtx_and_jump (res
, const0_rtx
, LT
, false, mode
, NULL_RTX
,
905 NULL
, do_error
, profile_probability::very_unlikely ());
907 /* The operation is commutative, so we can pick operand to compare
908 against. For prec <= BITS_PER_WORD, I think preferring REG operand
909 is better over CONST_INT, because the CONST_INT might enlarge the
910 instruction or CSE would need to figure out we'd already loaded it
911 into a register before. For prec > BITS_PER_WORD, I think CONST_INT
912 might be more beneficial, as then the multi-word comparison can be
913 perhaps simplified. */
914 if (prec
<= BITS_PER_WORD
915 ? (CONST_SCALAR_INT_P (op1
) && REG_P (op0
))
916 : CONST_SCALAR_INT_P (op0
))
918 do_compare_rtx_and_jump (res
, tem
, GEU
, true, mode
, NULL_RTX
, NULL
,
919 done_label
, profile_probability::very_likely ());
924 if (!uns0_p
&& !uns1_p
&& unsr_p
)
926 /* Compute the operation. On RTL level, the addition is always
928 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
929 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
930 int pos_neg
= get_range_pos_neg (arg1
);
931 if (code
== PLUS_EXPR
)
933 int pos_neg0
= get_range_pos_neg (arg0
);
934 if (pos_neg0
!= 3 && pos_neg
== 3)
936 std::swap (op0
, op1
);
943 tem
= expand_binop (mode
, ((pos_neg
== 1) ^ (code
== MINUS_EXPR
))
944 ? and_optab
: ior_optab
,
945 op0
, res
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
946 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL
,
947 NULL
, done_label
, profile_probability::very_likely ());
951 rtx_code_label
*do_ior_label
= gen_label_rtx ();
952 do_compare_rtx_and_jump (op1
, const0_rtx
,
953 code
== MINUS_EXPR
? GE
: LT
, false, mode
,
954 NULL_RTX
, NULL
, do_ior_label
,
955 profile_probability::even ());
956 tem
= expand_binop (mode
, and_optab
, op0
, res
, NULL_RTX
, false,
958 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
959 NULL
, done_label
, profile_probability::very_likely ());
960 emit_jump (do_error
);
961 emit_label (do_ior_label
);
962 tem
= expand_binop (mode
, ior_optab
, op0
, res
, NULL_RTX
, false,
964 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
965 NULL
, done_label
, profile_probability::very_likely ());
971 if (code
== MINUS_EXPR
&& uns0_p
&& uns1_p
&& !unsr_p
)
973 /* Compute the operation. On RTL level, the addition is always
975 res
= expand_binop (mode
, sub_optab
, op0
, op1
, NULL_RTX
, false,
977 rtx_code_label
*op0_geu_op1
= gen_label_rtx ();
978 do_compare_rtx_and_jump (op0
, op1
, GEU
, true, mode
, NULL_RTX
, NULL
,
979 op0_geu_op1
, profile_probability::even ());
980 do_compare_rtx_and_jump (res
, const0_rtx
, LT
, false, mode
, NULL_RTX
,
981 NULL
, done_label
, profile_probability::very_likely ());
982 emit_jump (do_error
);
983 emit_label (op0_geu_op1
);
984 do_compare_rtx_and_jump (res
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
985 NULL
, done_label
, profile_probability::very_likely ());
989 gcc_assert (!uns0_p
&& !uns1_p
&& !unsr_p
);
994 insn_code icode
= optab_handler (code
== PLUS_EXPR
? addv4_optab
995 : subv4_optab
, mode
);
996 if (icode
!= CODE_FOR_nothing
)
998 class expand_operand ops
[4];
999 rtx_insn
*last
= get_last_insn ();
1001 res
= gen_reg_rtx (mode
);
1002 create_output_operand (&ops
[0], res
, mode
);
1003 create_input_operand (&ops
[1], op0
, mode
);
1004 create_input_operand (&ops
[2], op1
, mode
);
1005 create_fixed_operand (&ops
[3], do_error
);
1006 if (maybe_expand_insn (icode
, 4, ops
))
1008 last
= get_last_insn ();
1009 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
1011 && any_condjump_p (last
)
1012 && !find_reg_note (last
, REG_BR_PROB
, 0))
1013 add_reg_br_prob_note (last
,
1014 profile_probability::very_unlikely ());
1015 emit_jump (done_label
);
1016 goto do_error_label
;
1019 delete_insns_since (last
);
1022 /* Compute the operation. On RTL level, the addition is always
1024 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
1025 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
1027 /* If we can prove that one of the arguments (for MINUS_EXPR only
1028 the second operand, as subtraction is not commutative) is always
1029 non-negative or always negative, we can do just one comparison
1030 and conditional jump. */
1031 int pos_neg
= get_range_pos_neg (arg1
);
1032 if (code
== PLUS_EXPR
)
1034 int pos_neg0
= get_range_pos_neg (arg0
);
1035 if (pos_neg0
!= 3 && pos_neg
== 3)
1037 std::swap (op0
, op1
);
1042 /* Addition overflows if and only if the two operands have the same sign,
1043 and the result has the opposite sign. Subtraction overflows if and
1044 only if the two operands have opposite sign, and the subtrahend has
1045 the same sign as the result. Here 0 is counted as positive. */
1048 /* Compute op0 ^ op1 (operands have opposite sign). */
1049 rtx op_xor
= expand_binop (mode
, xor_optab
, op0
, op1
, NULL_RTX
, false,
1052 /* Compute res ^ op1 (result and 2nd operand have opposite sign). */
1053 rtx res_xor
= expand_binop (mode
, xor_optab
, res
, op1
, NULL_RTX
, false,
1057 if (code
== PLUS_EXPR
)
1059 /* Compute (res ^ op1) & ~(op0 ^ op1). */
1060 tem
= expand_unop (mode
, one_cmpl_optab
, op_xor
, NULL_RTX
, false);
1061 tem
= expand_binop (mode
, and_optab
, res_xor
, tem
, NULL_RTX
, false,
1066 /* Compute (op0 ^ op1) & ~(res ^ op1). */
1067 tem
= expand_unop (mode
, one_cmpl_optab
, res_xor
, NULL_RTX
, false);
1068 tem
= expand_binop (mode
, and_optab
, op_xor
, tem
, NULL_RTX
, false,
1072 /* No overflow if the result has bit sign cleared. */
1073 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1074 NULL
, done_label
, profile_probability::very_likely ());
1077 /* Compare the result of the operation with the first operand.
1078 No overflow for addition if second operand is positive and result
1079 is larger or second operand is negative and result is smaller.
1080 Likewise for subtraction with sign of second operand flipped. */
1082 do_compare_rtx_and_jump (res
, op0
,
1083 (pos_neg
== 1) ^ (code
== MINUS_EXPR
) ? GE
: LE
,
1084 false, mode
, NULL_RTX
, NULL
, done_label
,
1085 profile_probability::very_likely ());
1089 emit_label (do_error
);
1092 /* Expand the ubsan builtin call. */
1094 fn
= ubsan_build_overflow_builtin (code
, loc
, TREE_TYPE (arg0
),
1098 do_pending_stack_adjust ();
1101 expand_arith_set_overflow (lhs
, target
);
1104 emit_label (done_label
);
1109 expand_ubsan_result_store (target
, res
);
1113 res
= expand_binop (mode
, add_optab
, res
, sgn
, NULL_RTX
, false,
1116 expand_arith_overflow_result_store (lhs
, target
, mode
, res
);
1121 /* Add negate overflow checking to the statement STMT. */
1124 expand_neg_overflow (location_t loc
, tree lhs
, tree arg1
, bool is_ubsan
,
1129 rtx_code_label
*done_label
, *do_error
;
1130 rtx target
= NULL_RTX
;
1132 done_label
= gen_label_rtx ();
1133 do_error
= gen_label_rtx ();
1135 do_pending_stack_adjust ();
1136 op1
= expand_normal (arg1
);
1138 scalar_int_mode mode
= SCALAR_INT_TYPE_MODE (TREE_TYPE (arg1
));
1141 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1143 write_complex_part (target
, const0_rtx
, true);
1146 enum insn_code icode
= optab_handler (negv3_optab
, mode
);
1147 if (icode
!= CODE_FOR_nothing
)
1149 class expand_operand ops
[3];
1150 rtx_insn
*last
= get_last_insn ();
1152 res
= gen_reg_rtx (mode
);
1153 create_output_operand (&ops
[0], res
, mode
);
1154 create_input_operand (&ops
[1], op1
, mode
);
1155 create_fixed_operand (&ops
[2], do_error
);
1156 if (maybe_expand_insn (icode
, 3, ops
))
1158 last
= get_last_insn ();
1159 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
1161 && any_condjump_p (last
)
1162 && !find_reg_note (last
, REG_BR_PROB
, 0))
1163 add_reg_br_prob_note (last
,
1164 profile_probability::very_unlikely ());
1165 emit_jump (done_label
);
1169 delete_insns_since (last
);
1170 icode
= CODE_FOR_nothing
;
1174 if (icode
== CODE_FOR_nothing
)
1176 /* Compute the operation. On RTL level, the addition is always
1178 res
= expand_unop (mode
, neg_optab
, op1
, NULL_RTX
, false);
1180 /* Compare the operand with the most negative value. */
1181 rtx minv
= expand_normal (TYPE_MIN_VALUE (TREE_TYPE (arg1
)));
1182 do_compare_rtx_and_jump (op1
, minv
, NE
, true, mode
, NULL_RTX
, NULL
,
1183 done_label
, profile_probability::very_likely ());
1186 emit_label (do_error
);
1189 /* Expand the ubsan builtin call. */
1191 fn
= ubsan_build_overflow_builtin (NEGATE_EXPR
, loc
, TREE_TYPE (arg1
),
1192 arg1
, NULL_TREE
, datap
);
1195 do_pending_stack_adjust ();
1198 expand_arith_set_overflow (lhs
, target
);
1201 emit_label (done_label
);
1206 expand_ubsan_result_store (target
, res
);
1208 expand_arith_overflow_result_store (lhs
, target
, mode
, res
);
1212 /* Return true if UNS WIDEN_MULT_EXPR with result mode WMODE and operand
1213 mode MODE can be expanded without using a libcall. */
1216 can_widen_mult_without_libcall (scalar_int_mode wmode
, scalar_int_mode mode
,
1217 rtx op0
, rtx op1
, bool uns
)
1219 if (find_widening_optab_handler (umul_widen_optab
, wmode
, mode
)
1220 != CODE_FOR_nothing
)
1223 if (find_widening_optab_handler (smul_widen_optab
, wmode
, mode
)
1224 != CODE_FOR_nothing
)
1227 rtx_insn
*last
= get_last_insn ();
1228 if (CONSTANT_P (op0
))
1229 op0
= convert_modes (wmode
, mode
, op0
, uns
);
1231 op0
= gen_raw_REG (wmode
, LAST_VIRTUAL_REGISTER
+ 1);
1232 if (CONSTANT_P (op1
))
1233 op1
= convert_modes (wmode
, mode
, op1
, uns
);
1235 op1
= gen_raw_REG (wmode
, LAST_VIRTUAL_REGISTER
+ 2);
1236 rtx ret
= expand_mult (wmode
, op0
, op1
, NULL_RTX
, uns
, true);
1237 delete_insns_since (last
);
1238 return ret
!= NULL_RTX
;
1241 /* Add mul overflow checking to the statement STMT. */
1244 expand_mul_overflow (location_t loc
, tree lhs
, tree arg0
, tree arg1
,
1245 bool unsr_p
, bool uns0_p
, bool uns1_p
, bool is_ubsan
,
1250 rtx_code_label
*done_label
, *do_error
;
1251 rtx target
= NULL_RTX
;
1253 enum insn_code icode
;
1255 done_label
= gen_label_rtx ();
1256 do_error
= gen_label_rtx ();
1258 do_pending_stack_adjust ();
1259 op0
= expand_normal (arg0
);
1260 op1
= expand_normal (arg1
);
1262 scalar_int_mode mode
= SCALAR_INT_TYPE_MODE (TREE_TYPE (arg0
));
1266 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1268 write_complex_part (target
, const0_rtx
, true);
1272 gcc_assert (!unsr_p
&& !uns0_p
&& !uns1_p
);
1274 /* We assume both operands and result have the same precision
1275 here (GET_MODE_BITSIZE (mode)), S stands for signed type
1276 with that precision, U for unsigned type with that precision,
1277 sgn for unsigned most significant bit in that precision.
1278 s1 is signed first operand, u1 is unsigned first operand,
1279 s2 is signed second operand, u2 is unsigned second operand,
1280 sr is signed result, ur is unsigned result and the following
1281 rules say how to compute result (which is always result of
1282 the operands as if both were unsigned, cast to the right
1283 signedness) and how to compute whether operation overflowed.
1284 main_ovf (false) stands for jump on signed multiplication
1285 overflow or the main algorithm with uns == false.
1286 main_ovf (true) stands for jump on unsigned multiplication
1287 overflow or the main algorithm with uns == true.
1290 res = (S) ((U) s1 * (U) s2)
1291 ovf = main_ovf (false)
1294 ovf = main_ovf (true)
1297 ovf = (s1 < 0 && u2) || main_ovf (true)
1300 ovf = res < 0 || main_ovf (true)
1302 res = (S) ((U) s1 * u2)
1303 ovf = (S) u2 >= 0 ? main_ovf (false)
1304 : (s1 != 0 && (s1 != -1 || u2 != (U) res))
1306 t1 = (s1 & s2) < 0 ? (-(U) s1) : ((U) s1)
1307 t2 = (s1 & s2) < 0 ? (-(U) s2) : ((U) s2)
1309 ovf = (s1 ^ s2) < 0 ? (s1 && s2) : main_ovf (true) */
1311 if (uns0_p
&& !uns1_p
)
1313 /* Multiplication is commutative, if operand signedness differs,
1314 canonicalize to the first operand being signed and second
1315 unsigned to simplify following code. */
1316 std::swap (op0
, op1
);
1317 std::swap (arg0
, arg1
);
1322 int pos_neg0
= get_range_pos_neg (arg0
);
1323 int pos_neg1
= get_range_pos_neg (arg1
);
1326 if (!uns0_p
&& uns1_p
&& unsr_p
)
1331 /* If s1 is non-negative, just perform normal u1 * u2 -> ur. */
1334 /* If s1 is negative, avoid the main code, just multiply and
1335 signal overflow if op1 is not 0. */
1336 struct separate_ops ops
;
1337 ops
.code
= MULT_EXPR
;
1338 ops
.type
= TREE_TYPE (arg1
);
1339 ops
.op0
= make_tree (ops
.type
, op0
);
1340 ops
.op1
= make_tree (ops
.type
, op1
);
1341 ops
.op2
= NULL_TREE
;
1343 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1344 do_compare_rtx_and_jump (op1
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1345 NULL
, done_label
, profile_probability::very_likely ());
1346 goto do_error_label
;
1348 rtx_code_label
*do_main_label
;
1349 do_main_label
= gen_label_rtx ();
1350 do_compare_rtx_and_jump (op0
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1351 NULL
, do_main_label
, profile_probability::very_likely ());
1352 do_compare_rtx_and_jump (op1
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1353 NULL
, do_main_label
, profile_probability::very_likely ());
1354 expand_arith_set_overflow (lhs
, target
);
1355 emit_label (do_main_label
);
1363 if (uns0_p
&& uns1_p
&& !unsr_p
)
1366 /* Rest of handling of this case after res is computed. */
1371 if (!uns0_p
&& uns1_p
&& !unsr_p
)
1378 /* If (S) u2 is negative (i.e. u2 is larger than maximum of S,
1379 avoid the main code, just multiply and signal overflow
1380 unless 0 * u2 or -1 * ((U) Smin). */
1381 struct separate_ops ops
;
1382 ops
.code
= MULT_EXPR
;
1383 ops
.type
= TREE_TYPE (arg1
);
1384 ops
.op0
= make_tree (ops
.type
, op0
);
1385 ops
.op1
= make_tree (ops
.type
, op1
);
1386 ops
.op2
= NULL_TREE
;
1388 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1389 do_compare_rtx_and_jump (op0
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1390 NULL
, done_label
, profile_probability::very_likely ());
1391 do_compare_rtx_and_jump (op0
, constm1_rtx
, NE
, true, mode
, NULL_RTX
,
1392 NULL
, do_error
, profile_probability::very_unlikely ());
1394 prec
= GET_MODE_PRECISION (mode
);
1396 sgn
= immed_wide_int_const (wi::min_value (prec
, SIGNED
), mode
);
1397 do_compare_rtx_and_jump (op1
, sgn
, EQ
, true, mode
, NULL_RTX
,
1398 NULL
, done_label
, profile_probability::very_likely ());
1399 goto do_error_label
;
1401 /* Rest of handling of this case after res is computed. */
1409 if (!uns0_p
&& !uns1_p
&& unsr_p
)
1412 switch (pos_neg0
| pos_neg1
)
1414 case 1: /* Both operands known to be non-negative. */
1416 case 2: /* Both operands known to be negative. */
1417 op0
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, false);
1418 op1
= expand_unop (mode
, neg_optab
, op1
, NULL_RTX
, false);
1419 /* Avoid looking at arg0/arg1 ranges, as we've changed
1421 arg0
= error_mark_node
;
1422 arg1
= error_mark_node
;
1425 if ((pos_neg0
^ pos_neg1
) == 3)
1427 /* If one operand is known to be negative and the other
1428 non-negative, this overflows always, unless the non-negative
1429 one is 0. Just do normal multiply and set overflow
1430 unless one of the operands is 0. */
1431 struct separate_ops ops
;
1432 ops
.code
= MULT_EXPR
;
1434 = build_nonstandard_integer_type (GET_MODE_PRECISION (mode
),
1436 ops
.op0
= make_tree (ops
.type
, op0
);
1437 ops
.op1
= make_tree (ops
.type
, op1
);
1438 ops
.op2
= NULL_TREE
;
1440 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1441 tem
= expand_binop (mode
, and_optab
, op0
, op1
, NULL_RTX
, false,
1443 do_compare_rtx_and_jump (tem
, const0_rtx
, EQ
, true, mode
,
1444 NULL_RTX
, NULL
, done_label
,
1445 profile_probability::very_likely ());
1446 goto do_error_label
;
1448 /* The general case, do all the needed comparisons at runtime. */
1449 rtx_code_label
*do_main_label
, *after_negate_label
;
1451 rop0
= gen_reg_rtx (mode
);
1452 rop1
= gen_reg_rtx (mode
);
1453 emit_move_insn (rop0
, op0
);
1454 emit_move_insn (rop1
, op1
);
1457 do_main_label
= gen_label_rtx ();
1458 after_negate_label
= gen_label_rtx ();
1459 tem
= expand_binop (mode
, and_optab
, op0
, op1
, NULL_RTX
, false,
1461 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1462 NULL
, after_negate_label
, profile_probability::very_likely ());
1463 /* Both arguments negative here, negate them and continue with
1464 normal unsigned overflow checking multiplication. */
1465 emit_move_insn (op0
, expand_unop (mode
, neg_optab
, op0
,
1467 emit_move_insn (op1
, expand_unop (mode
, neg_optab
, op1
,
1469 /* Avoid looking at arg0/arg1 ranges, as we might have changed
1471 arg0
= error_mark_node
;
1472 arg1
= error_mark_node
;
1473 emit_jump (do_main_label
);
1474 emit_label (after_negate_label
);
1475 tem2
= expand_binop (mode
, xor_optab
, op0
, op1
, NULL_RTX
, false,
1477 do_compare_rtx_and_jump (tem2
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1478 NULL
, do_main_label
, profile_probability::very_likely ());
1479 /* One argument is negative here, the other positive. This
1480 overflows always, unless one of the arguments is 0. But
1481 if e.g. s2 is 0, (U) s1 * 0 doesn't overflow, whatever s1
1482 is, thus we can keep do_main code oring in overflow as is. */
1483 do_compare_rtx_and_jump (tem
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1484 NULL
, do_main_label
, profile_probability::very_likely ());
1485 expand_arith_set_overflow (lhs
, target
);
1486 emit_label (do_main_label
);
1494 type
= build_nonstandard_integer_type (GET_MODE_PRECISION (mode
), uns
);
1495 sign
= uns
? UNSIGNED
: SIGNED
;
1496 icode
= optab_handler (uns
? umulv4_optab
: mulv4_optab
, mode
);
1498 && (integer_pow2p (arg0
) || integer_pow2p (arg1
))
1499 && (optimize_insn_for_speed_p () || icode
== CODE_FOR_nothing
))
1501 /* Optimize unsigned multiplication by power of 2 constant
1502 using 2 shifts, one for result, one to extract the shifted
1503 out bits to see if they are all zero.
1504 Don't do this if optimizing for size and we have umulv4_optab,
1505 in that case assume multiplication will be shorter.
1506 This is heuristics based on the single target that provides
1507 umulv4 right now (i?86/x86_64), if further targets add it, this
1508 might need to be revisited.
1509 Cases where both operands are constant should be folded already
1510 during GIMPLE, and cases where one operand is constant but not
1511 power of 2 are questionable, either the WIDEN_MULT_EXPR case
1512 below can be done without multiplication, just by shifts and adds,
1513 or we'd need to divide the result (and hope it actually doesn't
1514 really divide nor multiply) and compare the result of the division
1515 with the original operand. */
1520 if (integer_pow2p (arg0
))
1522 std::swap (opn0
, opn1
);
1523 std::swap (argn0
, argn1
);
1525 int cnt
= tree_log2 (argn1
);
1526 if (cnt
>= 0 && cnt
< GET_MODE_PRECISION (mode
))
1528 rtx upper
= const0_rtx
;
1529 res
= expand_shift (LSHIFT_EXPR
, mode
, opn0
, cnt
, NULL_RTX
, uns
);
1531 upper
= expand_shift (RSHIFT_EXPR
, mode
, opn0
,
1532 GET_MODE_PRECISION (mode
) - cnt
,
1534 do_compare_rtx_and_jump (upper
, const0_rtx
, EQ
, true, mode
,
1535 NULL_RTX
, NULL
, done_label
,
1536 profile_probability::very_likely ());
1537 goto do_error_label
;
1540 if (icode
!= CODE_FOR_nothing
)
1542 class expand_operand ops
[4];
1543 rtx_insn
*last
= get_last_insn ();
1545 res
= gen_reg_rtx (mode
);
1546 create_output_operand (&ops
[0], res
, mode
);
1547 create_input_operand (&ops
[1], op0
, mode
);
1548 create_input_operand (&ops
[2], op1
, mode
);
1549 create_fixed_operand (&ops
[3], do_error
);
1550 if (maybe_expand_insn (icode
, 4, ops
))
1552 last
= get_last_insn ();
1553 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
1555 && any_condjump_p (last
)
1556 && !find_reg_note (last
, REG_BR_PROB
, 0))
1557 add_reg_br_prob_note (last
,
1558 profile_probability::very_unlikely ());
1559 emit_jump (done_label
);
1563 delete_insns_since (last
);
1564 icode
= CODE_FOR_nothing
;
1568 if (icode
== CODE_FOR_nothing
)
1570 struct separate_ops ops
;
1571 int prec
= GET_MODE_PRECISION (mode
);
1572 scalar_int_mode hmode
, wmode
;
1573 ops
.op0
= make_tree (type
, op0
);
1574 ops
.op1
= make_tree (type
, op1
);
1575 ops
.op2
= NULL_TREE
;
1578 /* Optimize unsigned overflow check where we don't use the
1579 multiplication result, just whether overflow happened.
1580 If we can do MULT_HIGHPART_EXPR, that followed by
1581 comparison of the result against zero is cheapest.
1582 We'll still compute res, but it should be DCEd later. */
1588 && !(uns0_p
&& uns1_p
&& !unsr_p
)
1589 && can_mult_highpart_p (mode
, uns
) == 1
1590 && single_imm_use (lhs
, &use
, &use_stmt
)
1591 && is_gimple_assign (use_stmt
)
1592 && gimple_assign_rhs_code (use_stmt
) == IMAGPART_EXPR
)
1595 if (GET_MODE_2XWIDER_MODE (mode
).exists (&wmode
)
1596 && targetm
.scalar_mode_supported_p (wmode
)
1597 && can_widen_mult_without_libcall (wmode
, mode
, op0
, op1
, uns
))
1600 ops
.code
= WIDEN_MULT_EXPR
;
1602 = build_nonstandard_integer_type (GET_MODE_PRECISION (wmode
), uns
);
1604 res
= expand_expr_real_2 (&ops
, NULL_RTX
, wmode
, EXPAND_NORMAL
);
1605 rtx hipart
= expand_shift (RSHIFT_EXPR
, wmode
, res
, prec
,
1607 hipart
= convert_modes (mode
, wmode
, hipart
, uns
);
1608 res
= convert_modes (mode
, wmode
, res
, uns
);
1610 /* For the unsigned multiplication, there was overflow if
1611 HIPART is non-zero. */
1612 do_compare_rtx_and_jump (hipart
, const0_rtx
, EQ
, true, mode
,
1613 NULL_RTX
, NULL
, done_label
,
1614 profile_probability::very_likely ());
1617 rtx signbit
= expand_shift (RSHIFT_EXPR
, mode
, res
, prec
- 1,
1619 /* RES is low half of the double width result, HIPART
1620 the high half. There was overflow if
1621 HIPART is different from RES < 0 ? -1 : 0. */
1622 do_compare_rtx_and_jump (signbit
, hipart
, EQ
, true, mode
,
1623 NULL_RTX
, NULL
, done_label
,
1624 profile_probability::very_likely ());
1627 else if (can_mult_highpart_p (mode
, uns
) == 1)
1630 ops
.code
= MULT_HIGHPART_EXPR
;
1633 rtx hipart
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
,
1635 ops
.code
= MULT_EXPR
;
1636 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1638 /* For the unsigned multiplication, there was overflow if
1639 HIPART is non-zero. */
1640 do_compare_rtx_and_jump (hipart
, const0_rtx
, EQ
, true, mode
,
1641 NULL_RTX
, NULL
, done_label
,
1642 profile_probability::very_likely ());
1645 rtx signbit
= expand_shift (RSHIFT_EXPR
, mode
, res
, prec
- 1,
1647 /* RES is low half of the double width result, HIPART
1648 the high half. There was overflow if
1649 HIPART is different from RES < 0 ? -1 : 0. */
1650 do_compare_rtx_and_jump (signbit
, hipart
, EQ
, true, mode
,
1651 NULL_RTX
, NULL
, done_label
,
1652 profile_probability::very_likely ());
1656 else if (int_mode_for_size (prec
/ 2, 1).exists (&hmode
)
1657 && 2 * GET_MODE_PRECISION (hmode
) == prec
)
1659 rtx_code_label
*large_op0
= gen_label_rtx ();
1660 rtx_code_label
*small_op0_large_op1
= gen_label_rtx ();
1661 rtx_code_label
*one_small_one_large
= gen_label_rtx ();
1662 rtx_code_label
*both_ops_large
= gen_label_rtx ();
1663 rtx_code_label
*after_hipart_neg
= uns
? NULL
: gen_label_rtx ();
1664 rtx_code_label
*after_lopart_neg
= uns
? NULL
: gen_label_rtx ();
1665 rtx_code_label
*do_overflow
= gen_label_rtx ();
1666 rtx_code_label
*hipart_different
= uns
? NULL
: gen_label_rtx ();
1668 unsigned int hprec
= GET_MODE_PRECISION (hmode
);
1669 rtx hipart0
= expand_shift (RSHIFT_EXPR
, mode
, op0
, hprec
,
1671 hipart0
= convert_modes (hmode
, mode
, hipart0
, uns
);
1672 rtx lopart0
= convert_modes (hmode
, mode
, op0
, uns
);
1673 rtx signbit0
= const0_rtx
;
1675 signbit0
= expand_shift (RSHIFT_EXPR
, hmode
, lopart0
, hprec
- 1,
1677 rtx hipart1
= expand_shift (RSHIFT_EXPR
, mode
, op1
, hprec
,
1679 hipart1
= convert_modes (hmode
, mode
, hipart1
, uns
);
1680 rtx lopart1
= convert_modes (hmode
, mode
, op1
, uns
);
1681 rtx signbit1
= const0_rtx
;
1683 signbit1
= expand_shift (RSHIFT_EXPR
, hmode
, lopart1
, hprec
- 1,
1686 res
= gen_reg_rtx (mode
);
1688 /* True if op0 resp. op1 are known to be in the range of
1690 bool op0_small_p
= false;
1691 bool op1_small_p
= false;
1692 /* True if op0 resp. op1 are known to have all zeros or all ones
1693 in the upper half of bits, but are not known to be
1695 bool op0_medium_p
= false;
1696 bool op1_medium_p
= false;
1697 /* -1 if op{0,1} is known to be negative, 0 if it is known to be
1698 nonnegative, 1 if unknown. */
1704 else if (pos_neg0
== 2)
1708 else if (pos_neg1
== 2)
1711 unsigned int mprec0
= prec
;
1712 if (arg0
!= error_mark_node
)
1713 mprec0
= get_min_precision (arg0
, sign
);
1714 if (mprec0
<= hprec
)
1716 else if (!uns
&& mprec0
<= hprec
+ 1)
1717 op0_medium_p
= true;
1718 unsigned int mprec1
= prec
;
1719 if (arg1
!= error_mark_node
)
1720 mprec1
= get_min_precision (arg1
, sign
);
1721 if (mprec1
<= hprec
)
1723 else if (!uns
&& mprec1
<= hprec
+ 1)
1724 op1_medium_p
= true;
1726 int smaller_sign
= 1;
1727 int larger_sign
= 1;
1730 smaller_sign
= op0_sign
;
1731 larger_sign
= op1_sign
;
1733 else if (op1_small_p
)
1735 smaller_sign
= op1_sign
;
1736 larger_sign
= op0_sign
;
1738 else if (op0_sign
== op1_sign
)
1740 smaller_sign
= op0_sign
;
1741 larger_sign
= op0_sign
;
1745 do_compare_rtx_and_jump (signbit0
, hipart0
, NE
, true, hmode
,
1746 NULL_RTX
, NULL
, large_op0
,
1747 profile_probability::unlikely ());
1750 do_compare_rtx_and_jump (signbit1
, hipart1
, NE
, true, hmode
,
1751 NULL_RTX
, NULL
, small_op0_large_op1
,
1752 profile_probability::unlikely ());
1754 /* If both op0 and op1 are sign (!uns) or zero (uns) extended from
1755 hmode to mode, the multiplication will never overflow. We can
1756 do just one hmode x hmode => mode widening multiplication. */
1757 tree halfstype
= build_nonstandard_integer_type (hprec
, uns
);
1758 ops
.op0
= make_tree (halfstype
, lopart0
);
1759 ops
.op1
= make_tree (halfstype
, lopart1
);
1760 ops
.code
= WIDEN_MULT_EXPR
;
1763 = expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1764 emit_move_insn (res
, thisres
);
1765 emit_jump (done_label
);
1767 emit_label (small_op0_large_op1
);
1769 /* If op0 is sign (!uns) or zero (uns) extended from hmode to mode,
1770 but op1 is not, just swap the arguments and handle it as op1
1771 sign/zero extended, op0 not. */
1772 rtx larger
= gen_reg_rtx (mode
);
1773 rtx hipart
= gen_reg_rtx (hmode
);
1774 rtx lopart
= gen_reg_rtx (hmode
);
1775 emit_move_insn (larger
, op1
);
1776 emit_move_insn (hipart
, hipart1
);
1777 emit_move_insn (lopart
, lopart0
);
1778 emit_jump (one_small_one_large
);
1780 emit_label (large_op0
);
1783 do_compare_rtx_and_jump (signbit1
, hipart1
, NE
, true, hmode
,
1784 NULL_RTX
, NULL
, both_ops_large
,
1785 profile_probability::unlikely ());
1787 /* If op1 is sign (!uns) or zero (uns) extended from hmode to mode,
1788 but op0 is not, prepare larger, hipart and lopart pseudos and
1789 handle it together with small_op0_large_op1. */
1790 emit_move_insn (larger
, op0
);
1791 emit_move_insn (hipart
, hipart0
);
1792 emit_move_insn (lopart
, lopart1
);
1794 emit_label (one_small_one_large
);
1796 /* lopart is the low part of the operand that is sign extended
1797 to mode, larger is the other operand, hipart is the
1798 high part of larger and lopart0 and lopart1 are the low parts
1800 We perform lopart0 * lopart1 and lopart * hipart widening
1802 tree halfutype
= build_nonstandard_integer_type (hprec
, 1);
1803 ops
.op0
= make_tree (halfutype
, lopart0
);
1804 ops
.op1
= make_tree (halfutype
, lopart1
);
1806 = expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1808 ops
.op0
= make_tree (halfutype
, lopart
);
1809 ops
.op1
= make_tree (halfutype
, hipart
);
1810 rtx loxhi
= gen_reg_rtx (mode
);
1811 rtx tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1812 emit_move_insn (loxhi
, tem
);
1816 /* if (hipart < 0) loxhi -= lopart << (bitsize / 2); */
1817 if (larger_sign
== 0)
1818 emit_jump (after_hipart_neg
);
1819 else if (larger_sign
!= -1)
1820 do_compare_rtx_and_jump (hipart
, const0_rtx
, GE
, false, hmode
,
1821 NULL_RTX
, NULL
, after_hipart_neg
,
1822 profile_probability::even ());
1824 tem
= convert_modes (mode
, hmode
, lopart
, 1);
1825 tem
= expand_shift (LSHIFT_EXPR
, mode
, tem
, hprec
, NULL_RTX
, 1);
1826 tem
= expand_simple_binop (mode
, MINUS
, loxhi
, tem
, NULL_RTX
,
1828 emit_move_insn (loxhi
, tem
);
1830 emit_label (after_hipart_neg
);
1832 /* if (lopart < 0) loxhi -= larger; */
1833 if (smaller_sign
== 0)
1834 emit_jump (after_lopart_neg
);
1835 else if (smaller_sign
!= -1)
1836 do_compare_rtx_and_jump (lopart
, const0_rtx
, GE
, false, hmode
,
1837 NULL_RTX
, NULL
, after_lopart_neg
,
1838 profile_probability::even ());
1840 tem
= expand_simple_binop (mode
, MINUS
, loxhi
, larger
, NULL_RTX
,
1842 emit_move_insn (loxhi
, tem
);
1844 emit_label (after_lopart_neg
);
1847 /* loxhi += (uns) lo0xlo1 >> (bitsize / 2); */
1848 tem
= expand_shift (RSHIFT_EXPR
, mode
, lo0xlo1
, hprec
, NULL_RTX
, 1);
1849 tem
= expand_simple_binop (mode
, PLUS
, loxhi
, tem
, NULL_RTX
,
1851 emit_move_insn (loxhi
, tem
);
1853 /* if (loxhi >> (bitsize / 2)
1854 == (hmode) loxhi >> (bitsize / 2 - 1)) (if !uns)
1855 if (loxhi >> (bitsize / 2) == 0 (if uns). */
1856 rtx hipartloxhi
= expand_shift (RSHIFT_EXPR
, mode
, loxhi
, hprec
,
1858 hipartloxhi
= convert_modes (hmode
, mode
, hipartloxhi
, 0);
1859 rtx signbitloxhi
= const0_rtx
;
1861 signbitloxhi
= expand_shift (RSHIFT_EXPR
, hmode
,
1862 convert_modes (hmode
, mode
,
1864 hprec
- 1, NULL_RTX
, 0);
1866 do_compare_rtx_and_jump (signbitloxhi
, hipartloxhi
, NE
, true, hmode
,
1867 NULL_RTX
, NULL
, do_overflow
,
1868 profile_probability::very_unlikely ());
1870 /* res = (loxhi << (bitsize / 2)) | (hmode) lo0xlo1; */
1871 rtx loxhishifted
= expand_shift (LSHIFT_EXPR
, mode
, loxhi
, hprec
,
1873 tem
= convert_modes (mode
, hmode
,
1874 convert_modes (hmode
, mode
, lo0xlo1
, 1), 1);
1876 tem
= expand_simple_binop (mode
, IOR
, loxhishifted
, tem
, res
,
1879 emit_move_insn (res
, tem
);
1880 emit_jump (done_label
);
1882 emit_label (both_ops_large
);
1884 /* If both operands are large (not sign (!uns) or zero (uns)
1885 extended from hmode), then perform the full multiplication
1886 which will be the result of the operation.
1887 The only cases which don't overflow are for signed multiplication
1888 some cases where both hipart0 and highpart1 are 0 or -1.
1889 For unsigned multiplication when high parts are both non-zero
1890 this overflows always. */
1891 ops
.code
= MULT_EXPR
;
1892 ops
.op0
= make_tree (type
, op0
);
1893 ops
.op1
= make_tree (type
, op1
);
1894 tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1895 emit_move_insn (res
, tem
);
1901 tem
= expand_simple_binop (hmode
, PLUS
, hipart0
, const1_rtx
,
1902 NULL_RTX
, 1, OPTAB_WIDEN
);
1903 do_compare_rtx_and_jump (tem
, const1_rtx
, GTU
, true, hmode
,
1904 NULL_RTX
, NULL
, do_error
,
1905 profile_probability::very_unlikely ());
1910 tem
= expand_simple_binop (hmode
, PLUS
, hipart1
, const1_rtx
,
1911 NULL_RTX
, 1, OPTAB_WIDEN
);
1912 do_compare_rtx_and_jump (tem
, const1_rtx
, GTU
, true, hmode
,
1913 NULL_RTX
, NULL
, do_error
,
1914 profile_probability::very_unlikely ());
1917 /* At this point hipart{0,1} are both in [-1, 0]. If they are
1918 the same, overflow happened if res is non-positive, if they
1919 are different, overflow happened if res is positive. */
1920 if (op0_sign
!= 1 && op1_sign
!= 1 && op0_sign
!= op1_sign
)
1921 emit_jump (hipart_different
);
1922 else if (op0_sign
== 1 || op1_sign
== 1)
1923 do_compare_rtx_and_jump (hipart0
, hipart1
, NE
, true, hmode
,
1924 NULL_RTX
, NULL
, hipart_different
,
1925 profile_probability::even ());
1927 do_compare_rtx_and_jump (res
, const0_rtx
, LE
, false, mode
,
1928 NULL_RTX
, NULL
, do_error
,
1929 profile_probability::very_unlikely ());
1930 emit_jump (done_label
);
1932 emit_label (hipart_different
);
1934 do_compare_rtx_and_jump (res
, const0_rtx
, GE
, false, mode
,
1935 NULL_RTX
, NULL
, do_error
,
1936 profile_probability::very_unlikely ());
1937 emit_jump (done_label
);
1940 emit_label (do_overflow
);
1942 /* Overflow, do full multiplication and fallthru into do_error. */
1943 ops
.op0
= make_tree (type
, op0
);
1944 ops
.op1
= make_tree (type
, op1
);
1945 tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1946 emit_move_insn (res
, tem
);
1948 else if (GET_MODE_2XWIDER_MODE (mode
).exists (&wmode
)
1949 && targetm
.scalar_mode_supported_p (wmode
))
1950 /* Even emitting a libcall is better than not detecting overflow
1955 gcc_assert (!is_ubsan
);
1956 ops
.code
= MULT_EXPR
;
1958 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1959 emit_jump (done_label
);
1964 emit_label (do_error
);
1967 /* Expand the ubsan builtin call. */
1969 fn
= ubsan_build_overflow_builtin (MULT_EXPR
, loc
, TREE_TYPE (arg0
),
1973 do_pending_stack_adjust ();
1976 expand_arith_set_overflow (lhs
, target
);
1979 emit_label (done_label
);
1982 if (uns0_p
&& uns1_p
&& !unsr_p
)
1984 rtx_code_label
*all_done_label
= gen_label_rtx ();
1985 do_compare_rtx_and_jump (res
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1986 NULL
, all_done_label
, profile_probability::very_likely ());
1987 expand_arith_set_overflow (lhs
, target
);
1988 emit_label (all_done_label
);
1992 if (!uns0_p
&& uns1_p
&& !unsr_p
&& pos_neg1
== 3)
1994 rtx_code_label
*all_done_label
= gen_label_rtx ();
1995 rtx_code_label
*set_noovf
= gen_label_rtx ();
1996 do_compare_rtx_and_jump (op1
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1997 NULL
, all_done_label
, profile_probability::very_likely ());
1998 expand_arith_set_overflow (lhs
, target
);
1999 do_compare_rtx_and_jump (op0
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
2000 NULL
, set_noovf
, profile_probability::very_likely ());
2001 do_compare_rtx_and_jump (op0
, constm1_rtx
, NE
, true, mode
, NULL_RTX
,
2002 NULL
, all_done_label
, profile_probability::very_unlikely ());
2003 do_compare_rtx_and_jump (op1
, res
, NE
, true, mode
, NULL_RTX
, NULL
,
2004 all_done_label
, profile_probability::very_unlikely ());
2005 emit_label (set_noovf
);
2006 write_complex_part (target
, const0_rtx
, true);
2007 emit_label (all_done_label
);
2013 expand_ubsan_result_store (target
, res
);
2015 expand_arith_overflow_result_store (lhs
, target
, mode
, res
);
2019 /* Expand UBSAN_CHECK_* internal function if it has vector operands. */
2022 expand_vector_ubsan_overflow (location_t loc
, enum tree_code code
, tree lhs
,
2023 tree arg0
, tree arg1
)
2025 poly_uint64 cnt
= TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0
));
2026 rtx_code_label
*loop_lab
= NULL
;
2027 rtx cntvar
= NULL_RTX
;
2028 tree cntv
= NULL_TREE
;
2029 tree eltype
= TREE_TYPE (TREE_TYPE (arg0
));
2030 tree sz
= TYPE_SIZE (eltype
);
2031 tree data
= NULL_TREE
;
2032 tree resv
= NULL_TREE
;
2033 rtx lhsr
= NULL_RTX
;
2034 rtx resvr
= NULL_RTX
;
2035 unsigned HOST_WIDE_INT const_cnt
= 0;
2036 bool use_loop_p
= (!cnt
.is_constant (&const_cnt
) || const_cnt
> 4);
2041 lhsr
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2042 if (!VECTOR_MODE_P (GET_MODE (lhsr
))
2043 || (op
= optab_for_tree_code (code
, TREE_TYPE (arg0
),
2044 optab_default
)) == unknown_optab
2045 || (optab_handler (op
, TYPE_MODE (TREE_TYPE (arg0
)))
2046 == CODE_FOR_nothing
))
2049 resv
= make_tree (TREE_TYPE (lhs
), lhsr
);
2052 resvr
= assign_temp (TREE_TYPE (lhs
), 1, 1);
2053 resv
= make_tree (TREE_TYPE (lhs
), resvr
);
2059 do_pending_stack_adjust ();
2060 loop_lab
= gen_label_rtx ();
2061 cntvar
= gen_reg_rtx (TYPE_MODE (sizetype
));
2062 cntv
= make_tree (sizetype
, cntvar
);
2063 emit_move_insn (cntvar
, const0_rtx
);
2064 emit_label (loop_lab
);
2066 if (TREE_CODE (arg0
) != VECTOR_CST
)
2068 rtx arg0r
= expand_normal (arg0
);
2069 arg0
= make_tree (TREE_TYPE (arg0
), arg0r
);
2071 if (TREE_CODE (arg1
) != VECTOR_CST
)
2073 rtx arg1r
= expand_normal (arg1
);
2074 arg1
= make_tree (TREE_TYPE (arg1
), arg1r
);
2076 for (unsigned int i
= 0; i
< (use_loop_p
? 1 : const_cnt
); i
++)
2078 tree op0
, op1
, res
= NULL_TREE
;
2081 tree atype
= build_array_type_nelts (eltype
, cnt
);
2082 op0
= uniform_vector_p (arg0
);
2083 if (op0
== NULL_TREE
)
2085 op0
= fold_build1_loc (loc
, VIEW_CONVERT_EXPR
, atype
, arg0
);
2086 op0
= build4_loc (loc
, ARRAY_REF
, eltype
, op0
, cntv
,
2087 NULL_TREE
, NULL_TREE
);
2089 op1
= uniform_vector_p (arg1
);
2090 if (op1
== NULL_TREE
)
2092 op1
= fold_build1_loc (loc
, VIEW_CONVERT_EXPR
, atype
, arg1
);
2093 op1
= build4_loc (loc
, ARRAY_REF
, eltype
, op1
, cntv
,
2094 NULL_TREE
, NULL_TREE
);
2098 res
= fold_build1_loc (loc
, VIEW_CONVERT_EXPR
, atype
, resv
);
2099 res
= build4_loc (loc
, ARRAY_REF
, eltype
, res
, cntv
,
2100 NULL_TREE
, NULL_TREE
);
2105 tree bitpos
= bitsize_int (tree_to_uhwi (sz
) * i
);
2106 op0
= fold_build3_loc (loc
, BIT_FIELD_REF
, eltype
, arg0
, sz
, bitpos
);
2107 op1
= fold_build3_loc (loc
, BIT_FIELD_REF
, eltype
, arg1
, sz
, bitpos
);
2109 res
= fold_build3_loc (loc
, BIT_FIELD_REF
, eltype
, resv
, sz
,
2115 expand_addsub_overflow (loc
, PLUS_EXPR
, res
, op0
, op1
,
2116 false, false, false, true, &data
);
2119 if (use_loop_p
? integer_zerop (arg0
) : integer_zerop (op0
))
2120 expand_neg_overflow (loc
, res
, op1
, true, &data
);
2122 expand_addsub_overflow (loc
, MINUS_EXPR
, res
, op0
, op1
,
2123 false, false, false, true, &data
);
2126 expand_mul_overflow (loc
, res
, op0
, op1
, false, false, false,
2135 struct separate_ops ops
;
2136 ops
.code
= PLUS_EXPR
;
2137 ops
.type
= TREE_TYPE (cntv
);
2139 ops
.op1
= build_int_cst (TREE_TYPE (cntv
), 1);
2140 ops
.op2
= NULL_TREE
;
2142 rtx ret
= expand_expr_real_2 (&ops
, cntvar
, TYPE_MODE (sizetype
),
2145 emit_move_insn (cntvar
, ret
);
2146 rtx cntrtx
= gen_int_mode (cnt
, TYPE_MODE (sizetype
));
2147 do_compare_rtx_and_jump (cntvar
, cntrtx
, NE
, false,
2148 TYPE_MODE (sizetype
), NULL_RTX
, NULL
, loop_lab
,
2149 profile_probability::very_likely ());
2151 if (lhs
&& resv
== NULL_TREE
)
2153 struct separate_ops ops
;
2155 ops
.type
= TREE_TYPE (arg0
);
2158 ops
.op2
= NULL_TREE
;
2160 rtx ret
= expand_expr_real_2 (&ops
, lhsr
, TYPE_MODE (TREE_TYPE (arg0
)),
2163 emit_move_insn (lhsr
, ret
);
2166 emit_move_insn (lhsr
, resvr
);
2169 /* Expand UBSAN_CHECK_ADD call STMT. */
2172 expand_UBSAN_CHECK_ADD (internal_fn
, gcall
*stmt
)
2174 location_t loc
= gimple_location (stmt
);
2175 tree lhs
= gimple_call_lhs (stmt
);
2176 tree arg0
= gimple_call_arg (stmt
, 0);
2177 tree arg1
= gimple_call_arg (stmt
, 1);
2178 if (VECTOR_TYPE_P (TREE_TYPE (arg0
)))
2179 expand_vector_ubsan_overflow (loc
, PLUS_EXPR
, lhs
, arg0
, arg1
);
2181 expand_addsub_overflow (loc
, PLUS_EXPR
, lhs
, arg0
, arg1
,
2182 false, false, false, true, NULL
);
2185 /* Expand UBSAN_CHECK_SUB call STMT. */
2188 expand_UBSAN_CHECK_SUB (internal_fn
, gcall
*stmt
)
2190 location_t loc
= gimple_location (stmt
);
2191 tree lhs
= gimple_call_lhs (stmt
);
2192 tree arg0
= gimple_call_arg (stmt
, 0);
2193 tree arg1
= gimple_call_arg (stmt
, 1);
2194 if (VECTOR_TYPE_P (TREE_TYPE (arg0
)))
2195 expand_vector_ubsan_overflow (loc
, MINUS_EXPR
, lhs
, arg0
, arg1
);
2196 else if (integer_zerop (arg0
))
2197 expand_neg_overflow (loc
, lhs
, arg1
, true, NULL
);
2199 expand_addsub_overflow (loc
, MINUS_EXPR
, lhs
, arg0
, arg1
,
2200 false, false, false, true, NULL
);
2203 /* Expand UBSAN_CHECK_MUL call STMT. */
2206 expand_UBSAN_CHECK_MUL (internal_fn
, gcall
*stmt
)
2208 location_t loc
= gimple_location (stmt
);
2209 tree lhs
= gimple_call_lhs (stmt
);
2210 tree arg0
= gimple_call_arg (stmt
, 0);
2211 tree arg1
= gimple_call_arg (stmt
, 1);
2212 if (VECTOR_TYPE_P (TREE_TYPE (arg0
)))
2213 expand_vector_ubsan_overflow (loc
, MULT_EXPR
, lhs
, arg0
, arg1
);
2215 expand_mul_overflow (loc
, lhs
, arg0
, arg1
, false, false, false, true,
2219 /* Helper function for {ADD,SUB,MUL}_OVERFLOW call stmt expansion. */
2222 expand_arith_overflow (enum tree_code code
, gimple
*stmt
)
2224 tree lhs
= gimple_call_lhs (stmt
);
2225 if (lhs
== NULL_TREE
)
2227 tree arg0
= gimple_call_arg (stmt
, 0);
2228 tree arg1
= gimple_call_arg (stmt
, 1);
2229 tree type
= TREE_TYPE (TREE_TYPE (lhs
));
2230 int uns0_p
= TYPE_UNSIGNED (TREE_TYPE (arg0
));
2231 int uns1_p
= TYPE_UNSIGNED (TREE_TYPE (arg1
));
2232 int unsr_p
= TYPE_UNSIGNED (type
);
2233 int prec0
= TYPE_PRECISION (TREE_TYPE (arg0
));
2234 int prec1
= TYPE_PRECISION (TREE_TYPE (arg1
));
2235 int precres
= TYPE_PRECISION (type
);
2236 location_t loc
= gimple_location (stmt
);
2237 if (!uns0_p
&& get_range_pos_neg (arg0
) == 1)
2239 if (!uns1_p
&& get_range_pos_neg (arg1
) == 1)
2241 int pr
= get_min_precision (arg0
, uns0_p
? UNSIGNED
: SIGNED
);
2242 prec0
= MIN (prec0
, pr
);
2243 pr
= get_min_precision (arg1
, uns1_p
? UNSIGNED
: SIGNED
);
2244 prec1
= MIN (prec1
, pr
);
2246 /* If uns0_p && uns1_p, precop is minimum needed precision
2247 of unsigned type to hold the exact result, otherwise
2248 precop is minimum needed precision of signed type to
2249 hold the exact result. */
2251 if (code
== MULT_EXPR
)
2252 precop
= prec0
+ prec1
+ (uns0_p
!= uns1_p
);
2255 if (uns0_p
== uns1_p
)
2256 precop
= MAX (prec0
, prec1
) + 1;
2258 precop
= MAX (prec0
+ 1, prec1
) + 1;
2260 precop
= MAX (prec0
, prec1
+ 1) + 1;
2262 int orig_precres
= precres
;
2266 if ((uns0_p
&& uns1_p
)
2267 ? ((precop
+ !unsr_p
) <= precres
2268 /* u1 - u2 -> ur can overflow, no matter what precision
2270 && (code
!= MINUS_EXPR
|| !unsr_p
))
2271 : (!unsr_p
&& precop
<= precres
))
2273 /* The infinity precision result will always fit into result. */
2274 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2275 write_complex_part (target
, const0_rtx
, true);
2276 scalar_int_mode mode
= SCALAR_INT_TYPE_MODE (type
);
2277 struct separate_ops ops
;
2280 ops
.op0
= fold_convert_loc (loc
, type
, arg0
);
2281 ops
.op1
= fold_convert_loc (loc
, type
, arg1
);
2282 ops
.op2
= NULL_TREE
;
2284 rtx tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
2285 expand_arith_overflow_result_store (lhs
, target
, mode
, tem
);
2289 /* For operations with low precision, if target doesn't have them, start
2290 with precres widening right away, otherwise do it only if the most
2291 simple cases can't be used. */
2292 const int min_precision
= targetm
.min_arithmetic_precision ();
2293 if (orig_precres
== precres
&& precres
< min_precision
)
2295 else if ((uns0_p
&& uns1_p
&& unsr_p
&& prec0
<= precres
2296 && prec1
<= precres
)
2297 || ((!uns0_p
|| !uns1_p
) && !unsr_p
2298 && prec0
+ uns0_p
<= precres
2299 && prec1
+ uns1_p
<= precres
))
2301 arg0
= fold_convert_loc (loc
, type
, arg0
);
2302 arg1
= fold_convert_loc (loc
, type
, arg1
);
2306 if (integer_zerop (arg0
) && !unsr_p
)
2308 expand_neg_overflow (loc
, lhs
, arg1
, false, NULL
);
2313 expand_addsub_overflow (loc
, code
, lhs
, arg0
, arg1
, unsr_p
,
2314 unsr_p
, unsr_p
, false, NULL
);
2317 expand_mul_overflow (loc
, lhs
, arg0
, arg1
, unsr_p
,
2318 unsr_p
, unsr_p
, false, NULL
);
2325 /* For sub-word operations, retry with a wider type first. */
2326 if (orig_precres
== precres
&& precop
<= BITS_PER_WORD
)
2328 int p
= MAX (min_precision
, precop
);
2329 scalar_int_mode m
= smallest_int_mode_for_size (p
);
2330 tree optype
= build_nonstandard_integer_type (GET_MODE_PRECISION (m
),
2333 p
= TYPE_PRECISION (optype
);
2337 unsr_p
= TYPE_UNSIGNED (optype
);
2343 if (prec0
<= precres
&& prec1
<= precres
)
2348 types
[0] = build_nonstandard_integer_type (precres
, 0);
2354 types
[1] = build_nonstandard_integer_type (precres
, 1);
2356 arg0
= fold_convert_loc (loc
, types
[uns0_p
], arg0
);
2357 arg1
= fold_convert_loc (loc
, types
[uns1_p
], arg1
);
2358 if (code
!= MULT_EXPR
)
2359 expand_addsub_overflow (loc
, code
, lhs
, arg0
, arg1
, unsr_p
,
2360 uns0_p
, uns1_p
, false, NULL
);
2362 expand_mul_overflow (loc
, lhs
, arg0
, arg1
, unsr_p
,
2363 uns0_p
, uns1_p
, false, NULL
);
2367 /* Retry with a wider type. */
2368 if (orig_precres
== precres
)
2370 int p
= MAX (prec0
, prec1
);
2371 scalar_int_mode m
= smallest_int_mode_for_size (p
);
2372 tree optype
= build_nonstandard_integer_type (GET_MODE_PRECISION (m
),
2375 p
= TYPE_PRECISION (optype
);
2379 unsr_p
= TYPE_UNSIGNED (optype
);
2390 /* Expand ADD_OVERFLOW STMT. */
2393 expand_ADD_OVERFLOW (internal_fn
, gcall
*stmt
)
2395 expand_arith_overflow (PLUS_EXPR
, stmt
);
2398 /* Expand SUB_OVERFLOW STMT. */
2401 expand_SUB_OVERFLOW (internal_fn
, gcall
*stmt
)
2403 expand_arith_overflow (MINUS_EXPR
, stmt
);
2406 /* Expand MUL_OVERFLOW STMT. */
2409 expand_MUL_OVERFLOW (internal_fn
, gcall
*stmt
)
2411 expand_arith_overflow (MULT_EXPR
, stmt
);
2414 /* This should get folded in tree-vectorizer.c. */
2417 expand_LOOP_VECTORIZED (internal_fn
, gcall
*)
2422 /* This should get folded in tree-vectorizer.c. */
2425 expand_LOOP_DIST_ALIAS (internal_fn
, gcall
*)
2430 /* Return a memory reference of type TYPE for argument INDEX of STMT.
2431 Use argument INDEX + 1 to derive the second (TBAA) operand. */
2434 expand_call_mem_ref (tree type
, gcall
*stmt
, int index
)
2436 tree addr
= gimple_call_arg (stmt
, index
);
2437 tree alias_ptr_type
= TREE_TYPE (gimple_call_arg (stmt
, index
+ 1));
2438 unsigned int align
= tree_to_shwi (gimple_call_arg (stmt
, index
+ 1));
2439 if (TYPE_ALIGN (type
) != align
)
2440 type
= build_aligned_type (type
, align
);
2443 if (TREE_CODE (tmp
) == SSA_NAME
)
2445 gimple
*def
= SSA_NAME_DEF_STMT (tmp
);
2446 if (gimple_assign_single_p (def
))
2447 tmp
= gimple_assign_rhs1 (def
);
2450 if (TREE_CODE (tmp
) == ADDR_EXPR
)
2452 tree mem
= TREE_OPERAND (tmp
, 0);
2453 if (TREE_CODE (mem
) == TARGET_MEM_REF
2454 && types_compatible_p (TREE_TYPE (mem
), type
))
2456 tree offset
= TMR_OFFSET (mem
);
2457 if (type
!= TREE_TYPE (mem
)
2458 || alias_ptr_type
!= TREE_TYPE (offset
)
2459 || !integer_zerop (offset
))
2461 mem
= copy_node (mem
);
2462 TMR_OFFSET (mem
) = wide_int_to_tree (alias_ptr_type
,
2463 wi::to_poly_wide (offset
));
2464 TREE_TYPE (mem
) = type
;
2470 return fold_build2 (MEM_REF
, type
, addr
, build_int_cst (alias_ptr_type
, 0));
2473 /* Expand MASK_LOAD{,_LANES} call STMT using optab OPTAB. */
2476 expand_mask_load_optab_fn (internal_fn
, gcall
*stmt
, convert_optab optab
)
2478 class expand_operand ops
[3];
2479 tree type
, lhs
, rhs
, maskt
;
2480 rtx mem
, target
, mask
;
2483 maskt
= gimple_call_arg (stmt
, 2);
2484 lhs
= gimple_call_lhs (stmt
);
2485 if (lhs
== NULL_TREE
)
2487 type
= TREE_TYPE (lhs
);
2488 rhs
= expand_call_mem_ref (type
, stmt
, 0);
2490 if (optab
== vec_mask_load_lanes_optab
)
2491 icode
= get_multi_vector_move (type
, optab
);
2493 icode
= convert_optab_handler (optab
, TYPE_MODE (type
),
2494 TYPE_MODE (TREE_TYPE (maskt
)));
2496 mem
= expand_expr (rhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2497 gcc_assert (MEM_P (mem
));
2498 mask
= expand_normal (maskt
);
2499 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2500 create_output_operand (&ops
[0], target
, TYPE_MODE (type
));
2501 create_fixed_operand (&ops
[1], mem
);
2502 create_input_operand (&ops
[2], mask
, TYPE_MODE (TREE_TYPE (maskt
)));
2503 expand_insn (icode
, 3, ops
);
2506 #define expand_mask_load_lanes_optab_fn expand_mask_load_optab_fn
2508 /* Expand MASK_STORE{,_LANES} call STMT using optab OPTAB. */
2511 expand_mask_store_optab_fn (internal_fn
, gcall
*stmt
, convert_optab optab
)
2513 class expand_operand ops
[3];
2514 tree type
, lhs
, rhs
, maskt
;
2518 maskt
= gimple_call_arg (stmt
, 2);
2519 rhs
= gimple_call_arg (stmt
, 3);
2520 type
= TREE_TYPE (rhs
);
2521 lhs
= expand_call_mem_ref (type
, stmt
, 0);
2523 if (optab
== vec_mask_store_lanes_optab
)
2524 icode
= get_multi_vector_move (type
, optab
);
2526 icode
= convert_optab_handler (optab
, TYPE_MODE (type
),
2527 TYPE_MODE (TREE_TYPE (maskt
)));
2529 mem
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2530 gcc_assert (MEM_P (mem
));
2531 mask
= expand_normal (maskt
);
2532 reg
= expand_normal (rhs
);
2533 create_fixed_operand (&ops
[0], mem
);
2534 create_input_operand (&ops
[1], reg
, TYPE_MODE (type
));
2535 create_input_operand (&ops
[2], mask
, TYPE_MODE (TREE_TYPE (maskt
)));
2536 expand_insn (icode
, 3, ops
);
2539 #define expand_mask_store_lanes_optab_fn expand_mask_store_optab_fn
2542 expand_ABNORMAL_DISPATCHER (internal_fn
, gcall
*)
2547 expand_BUILTIN_EXPECT (internal_fn
, gcall
*stmt
)
2549 /* When guessing was done, the hints should be already stripped away. */
2550 gcc_assert (!flag_guess_branch_prob
|| optimize
== 0 || seen_error ());
2553 tree lhs
= gimple_call_lhs (stmt
);
2555 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2557 target
= const0_rtx
;
2558 rtx val
= expand_expr (gimple_call_arg (stmt
, 0), target
, VOIDmode
, EXPAND_NORMAL
);
2559 if (lhs
&& val
!= target
)
2560 emit_move_insn (target
, val
);
2563 /* IFN_VA_ARG is supposed to be expanded at pass_stdarg. So this dummy function
2564 should never be called. */
2567 expand_VA_ARG (internal_fn
, gcall
*)
2572 /* IFN_VEC_CONVERT is supposed to be expanded at pass_lower_vector. So this
2573 dummy function should never be called. */
2576 expand_VEC_CONVERT (internal_fn
, gcall
*)
2581 /* Expand the IFN_UNIQUE function according to its first argument. */
2584 expand_UNIQUE (internal_fn
, gcall
*stmt
)
2586 rtx pattern
= NULL_RTX
;
2587 enum ifn_unique_kind kind
2588 = (enum ifn_unique_kind
) TREE_INT_CST_LOW (gimple_call_arg (stmt
, 0));
2595 case IFN_UNIQUE_UNSPEC
:
2596 if (targetm
.have_unique ())
2597 pattern
= targetm
.gen_unique ();
2600 case IFN_UNIQUE_OACC_FORK
:
2601 case IFN_UNIQUE_OACC_JOIN
:
2602 if (targetm
.have_oacc_fork () && targetm
.have_oacc_join ())
2604 tree lhs
= gimple_call_lhs (stmt
);
2605 rtx target
= const0_rtx
;
2608 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2610 rtx data_dep
= expand_normal (gimple_call_arg (stmt
, 1));
2611 rtx axis
= expand_normal (gimple_call_arg (stmt
, 2));
2613 if (kind
== IFN_UNIQUE_OACC_FORK
)
2614 pattern
= targetm
.gen_oacc_fork (target
, data_dep
, axis
);
2616 pattern
= targetm
.gen_oacc_join (target
, data_dep
, axis
);
2624 emit_insn (pattern
);
2627 /* The size of an OpenACC compute dimension. */
2630 expand_GOACC_DIM_SIZE (internal_fn
, gcall
*stmt
)
2632 tree lhs
= gimple_call_lhs (stmt
);
2637 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2638 if (targetm
.have_oacc_dim_size ())
2640 rtx dim
= expand_expr (gimple_call_arg (stmt
, 0), NULL_RTX
,
2641 VOIDmode
, EXPAND_NORMAL
);
2642 emit_insn (targetm
.gen_oacc_dim_size (target
, dim
));
2645 emit_move_insn (target
, GEN_INT (1));
2648 /* The position of an OpenACC execution engine along one compute axis. */
2651 expand_GOACC_DIM_POS (internal_fn
, gcall
*stmt
)
2653 tree lhs
= gimple_call_lhs (stmt
);
2658 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2659 if (targetm
.have_oacc_dim_pos ())
2661 rtx dim
= expand_expr (gimple_call_arg (stmt
, 0), NULL_RTX
,
2662 VOIDmode
, EXPAND_NORMAL
);
2663 emit_insn (targetm
.gen_oacc_dim_pos (target
, dim
));
2666 emit_move_insn (target
, const0_rtx
);
2669 /* This is expanded by oacc_device_lower pass. */
2672 expand_GOACC_LOOP (internal_fn
, gcall
*)
2677 /* This is expanded by oacc_device_lower pass. */
2680 expand_GOACC_REDUCTION (internal_fn
, gcall
*)
2685 /* This is expanded by oacc_device_lower pass. */
2688 expand_GOACC_TILE (internal_fn
, gcall
*)
2693 /* Set errno to EDOM. */
2696 expand_SET_EDOM (internal_fn
, gcall
*)
2699 #ifdef GEN_ERRNO_RTX
2700 rtx errno_rtx
= GEN_ERRNO_RTX
;
2702 rtx errno_rtx
= gen_rtx_MEM (word_mode
, gen_rtx_SYMBOL_REF (Pmode
, "errno"));
2704 emit_move_insn (errno_rtx
,
2705 gen_int_mode (TARGET_EDOM
, GET_MODE (errno_rtx
)));
2711 /* Expand atomic bit test and set. */
2714 expand_ATOMIC_BIT_TEST_AND_SET (internal_fn
, gcall
*call
)
2716 expand_ifn_atomic_bit_test_and (call
);
2719 /* Expand atomic bit test and complement. */
2722 expand_ATOMIC_BIT_TEST_AND_COMPLEMENT (internal_fn
, gcall
*call
)
2724 expand_ifn_atomic_bit_test_and (call
);
2727 /* Expand atomic bit test and reset. */
2730 expand_ATOMIC_BIT_TEST_AND_RESET (internal_fn
, gcall
*call
)
2732 expand_ifn_atomic_bit_test_and (call
);
2735 /* Expand atomic bit test and set. */
2738 expand_ATOMIC_COMPARE_EXCHANGE (internal_fn
, gcall
*call
)
2740 expand_ifn_atomic_compare_exchange (call
);
2743 /* Expand LAUNDER to assignment, lhs = arg0. */
2746 expand_LAUNDER (internal_fn
, gcall
*call
)
2748 tree lhs
= gimple_call_lhs (call
);
2753 expand_assignment (lhs
, gimple_call_arg (call
, 0), false);
2756 /* Expand {MASK_,}SCATTER_STORE{S,U} call CALL using optab OPTAB. */
2759 expand_scatter_store_optab_fn (internal_fn
, gcall
*stmt
, direct_optab optab
)
2761 internal_fn ifn
= gimple_call_internal_fn (stmt
);
2762 int rhs_index
= internal_fn_stored_value_index (ifn
);
2763 int mask_index
= internal_fn_mask_index (ifn
);
2764 tree base
= gimple_call_arg (stmt
, 0);
2765 tree offset
= gimple_call_arg (stmt
, 1);
2766 tree scale
= gimple_call_arg (stmt
, 2);
2767 tree rhs
= gimple_call_arg (stmt
, rhs_index
);
2769 rtx base_rtx
= expand_normal (base
);
2770 rtx offset_rtx
= expand_normal (offset
);
2771 HOST_WIDE_INT scale_int
= tree_to_shwi (scale
);
2772 rtx rhs_rtx
= expand_normal (rhs
);
2774 class expand_operand ops
[6];
2776 create_address_operand (&ops
[i
++], base_rtx
);
2777 create_input_operand (&ops
[i
++], offset_rtx
, TYPE_MODE (TREE_TYPE (offset
)));
2778 create_integer_operand (&ops
[i
++], TYPE_UNSIGNED (TREE_TYPE (offset
)));
2779 create_integer_operand (&ops
[i
++], scale_int
);
2780 create_input_operand (&ops
[i
++], rhs_rtx
, TYPE_MODE (TREE_TYPE (rhs
)));
2781 if (mask_index
>= 0)
2783 tree mask
= gimple_call_arg (stmt
, mask_index
);
2784 rtx mask_rtx
= expand_normal (mask
);
2785 create_input_operand (&ops
[i
++], mask_rtx
, TYPE_MODE (TREE_TYPE (mask
)));
2788 insn_code icode
= direct_optab_handler (optab
, TYPE_MODE (TREE_TYPE (rhs
)));
2789 expand_insn (icode
, i
, ops
);
2792 /* Expand {MASK_,}GATHER_LOAD call CALL using optab OPTAB. */
2795 expand_gather_load_optab_fn (internal_fn
, gcall
*stmt
, direct_optab optab
)
2797 tree lhs
= gimple_call_lhs (stmt
);
2798 tree base
= gimple_call_arg (stmt
, 0);
2799 tree offset
= gimple_call_arg (stmt
, 1);
2800 tree scale
= gimple_call_arg (stmt
, 2);
2802 rtx lhs_rtx
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2803 rtx base_rtx
= expand_normal (base
);
2804 rtx offset_rtx
= expand_normal (offset
);
2805 HOST_WIDE_INT scale_int
= tree_to_shwi (scale
);
2808 class expand_operand ops
[6];
2809 create_output_operand (&ops
[i
++], lhs_rtx
, TYPE_MODE (TREE_TYPE (lhs
)));
2810 create_address_operand (&ops
[i
++], base_rtx
);
2811 create_input_operand (&ops
[i
++], offset_rtx
, TYPE_MODE (TREE_TYPE (offset
)));
2812 create_integer_operand (&ops
[i
++], TYPE_UNSIGNED (TREE_TYPE (offset
)));
2813 create_integer_operand (&ops
[i
++], scale_int
);
2814 if (optab
== mask_gather_load_optab
)
2816 tree mask
= gimple_call_arg (stmt
, 3);
2817 rtx mask_rtx
= expand_normal (mask
);
2818 create_input_operand (&ops
[i
++], mask_rtx
, TYPE_MODE (TREE_TYPE (mask
)));
2820 insn_code icode
= direct_optab_handler (optab
, TYPE_MODE (TREE_TYPE (lhs
)));
2821 expand_insn (icode
, i
, ops
);
2824 /* Expand DIVMOD() using:
2825 a) optab handler for udivmod/sdivmod if it is available.
2826 b) If optab_handler doesn't exist, generate call to
2827 target-specific divmod libfunc. */
2830 expand_DIVMOD (internal_fn
, gcall
*call_stmt
)
2832 tree lhs
= gimple_call_lhs (call_stmt
);
2833 tree arg0
= gimple_call_arg (call_stmt
, 0);
2834 tree arg1
= gimple_call_arg (call_stmt
, 1);
2836 gcc_assert (TREE_CODE (TREE_TYPE (lhs
)) == COMPLEX_TYPE
);
2837 tree type
= TREE_TYPE (TREE_TYPE (lhs
));
2838 machine_mode mode
= TYPE_MODE (type
);
2839 bool unsignedp
= TYPE_UNSIGNED (type
);
2840 optab tab
= (unsignedp
) ? udivmod_optab
: sdivmod_optab
;
2842 rtx op0
= expand_normal (arg0
);
2843 rtx op1
= expand_normal (arg1
);
2844 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2846 rtx quotient
, remainder
, libfunc
;
2848 /* Check if optab_handler exists for divmod_optab for given mode. */
2849 if (optab_handler (tab
, mode
) != CODE_FOR_nothing
)
2851 quotient
= gen_reg_rtx (mode
);
2852 remainder
= gen_reg_rtx (mode
);
2853 expand_twoval_binop (tab
, op0
, op1
, quotient
, remainder
, unsignedp
);
2856 /* Generate call to divmod libfunc if it exists. */
2857 else if ((libfunc
= optab_libfunc (tab
, mode
)) != NULL_RTX
)
2858 targetm
.expand_divmod_libfunc (libfunc
, mode
, op0
, op1
,
2859 "ient
, &remainder
);
2864 /* Wrap the return value (quotient, remainder) within COMPLEX_EXPR. */
2865 expand_expr (build2 (COMPLEX_EXPR
, TREE_TYPE (lhs
),
2866 make_tree (TREE_TYPE (arg0
), quotient
),
2867 make_tree (TREE_TYPE (arg1
), remainder
)),
2868 target
, VOIDmode
, EXPAND_NORMAL
);
2874 expand_NOP (internal_fn
, gcall
*)
2876 /* Nothing. But it shouldn't really prevail. */
2879 /* Expand a call to FN using the operands in STMT. FN has a single
2880 output operand and NARGS input operands. */
2883 expand_direct_optab_fn (internal_fn fn
, gcall
*stmt
, direct_optab optab
,
2886 expand_operand
*ops
= XALLOCAVEC (expand_operand
, nargs
+ 1);
2888 tree_pair types
= direct_internal_fn_types (fn
, stmt
);
2889 insn_code icode
= direct_optab_handler (optab
, TYPE_MODE (types
.first
));
2890 gcc_assert (icode
!= CODE_FOR_nothing
);
2892 tree lhs
= gimple_call_lhs (stmt
);
2893 rtx lhs_rtx
= NULL_RTX
;
2895 lhs_rtx
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2897 /* Do not assign directly to a promoted subreg, since there is no
2898 guarantee that the instruction will leave the upper bits of the
2899 register in the state required by SUBREG_PROMOTED_SIGN. */
2901 if (dest
&& GET_CODE (dest
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (dest
))
2904 create_output_operand (&ops
[0], dest
, insn_data
[icode
].operand
[0].mode
);
2906 for (unsigned int i
= 0; i
< nargs
; ++i
)
2908 tree rhs
= gimple_call_arg (stmt
, i
);
2909 tree rhs_type
= TREE_TYPE (rhs
);
2910 rtx rhs_rtx
= expand_normal (rhs
);
2911 if (INTEGRAL_TYPE_P (rhs_type
))
2912 create_convert_operand_from (&ops
[i
+ 1], rhs_rtx
,
2913 TYPE_MODE (rhs_type
),
2914 TYPE_UNSIGNED (rhs_type
));
2916 create_input_operand (&ops
[i
+ 1], rhs_rtx
, TYPE_MODE (rhs_type
));
2919 expand_insn (icode
, nargs
+ 1, ops
);
2920 if (lhs_rtx
&& !rtx_equal_p (lhs_rtx
, ops
[0].value
))
2922 /* If the return value has an integral type, convert the instruction
2923 result to that type. This is useful for things that return an
2924 int regardless of the size of the input. If the instruction result
2925 is smaller than required, assume that it is signed.
2927 If the return value has a nonintegral type, its mode must match
2928 the instruction result. */
2929 if (GET_CODE (lhs_rtx
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (lhs_rtx
))
2931 /* If this is a scalar in a register that is stored in a wider
2932 mode than the declared mode, compute the result into its
2933 declared mode and then convert to the wider mode. */
2934 gcc_checking_assert (INTEGRAL_TYPE_P (TREE_TYPE (lhs
)));
2935 rtx tmp
= convert_to_mode (GET_MODE (lhs_rtx
), ops
[0].value
, 0);
2936 convert_move (SUBREG_REG (lhs_rtx
), tmp
,
2937 SUBREG_PROMOTED_SIGN (lhs_rtx
));
2939 else if (GET_MODE (lhs_rtx
) == GET_MODE (ops
[0].value
))
2940 emit_move_insn (lhs_rtx
, ops
[0].value
);
2943 gcc_checking_assert (INTEGRAL_TYPE_P (TREE_TYPE (lhs
)));
2944 convert_move (lhs_rtx
, ops
[0].value
, 0);
2949 /* Expand WHILE_ULT call STMT using optab OPTAB. */
2952 expand_while_optab_fn (internal_fn
, gcall
*stmt
, convert_optab optab
)
2954 expand_operand ops
[3];
2957 tree lhs
= gimple_call_lhs (stmt
);
2958 tree lhs_type
= TREE_TYPE (lhs
);
2959 rtx lhs_rtx
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2960 create_output_operand (&ops
[0], lhs_rtx
, TYPE_MODE (lhs_type
));
2962 for (unsigned int i
= 0; i
< 2; ++i
)
2964 tree rhs
= gimple_call_arg (stmt
, i
);
2965 rhs_type
[i
] = TREE_TYPE (rhs
);
2966 rtx rhs_rtx
= expand_normal (rhs
);
2967 create_input_operand (&ops
[i
+ 1], rhs_rtx
, TYPE_MODE (rhs_type
[i
]));
2970 insn_code icode
= convert_optab_handler (optab
, TYPE_MODE (rhs_type
[0]),
2971 TYPE_MODE (lhs_type
));
2973 expand_insn (icode
, 3, ops
);
2974 if (!rtx_equal_p (lhs_rtx
, ops
[0].value
))
2975 emit_move_insn (lhs_rtx
, ops
[0].value
);
2978 /* Expanders for optabs that can use expand_direct_optab_fn. */
2980 #define expand_unary_optab_fn(FN, STMT, OPTAB) \
2981 expand_direct_optab_fn (FN, STMT, OPTAB, 1)
2983 #define expand_binary_optab_fn(FN, STMT, OPTAB) \
2984 expand_direct_optab_fn (FN, STMT, OPTAB, 2)
2986 #define expand_ternary_optab_fn(FN, STMT, OPTAB) \
2987 expand_direct_optab_fn (FN, STMT, OPTAB, 3)
2989 #define expand_cond_unary_optab_fn(FN, STMT, OPTAB) \
2990 expand_direct_optab_fn (FN, STMT, OPTAB, 3)
2992 #define expand_cond_binary_optab_fn(FN, STMT, OPTAB) \
2993 expand_direct_optab_fn (FN, STMT, OPTAB, 4)
2995 #define expand_cond_ternary_optab_fn(FN, STMT, OPTAB) \
2996 expand_direct_optab_fn (FN, STMT, OPTAB, 5)
2998 #define expand_fold_extract_optab_fn(FN, STMT, OPTAB) \
2999 expand_direct_optab_fn (FN, STMT, OPTAB, 3)
3001 #define expand_fold_left_optab_fn(FN, STMT, OPTAB) \
3002 expand_direct_optab_fn (FN, STMT, OPTAB, 2)
3004 #define expand_mask_fold_left_optab_fn(FN, STMT, OPTAB) \
3005 expand_direct_optab_fn (FN, STMT, OPTAB, 3)
3007 /* RETURN_TYPE and ARGS are a return type and argument list that are
3008 in principle compatible with FN (which satisfies direct_internal_fn_p).
3009 Return the types that should be used to determine whether the
3010 target supports FN. */
3013 direct_internal_fn_types (internal_fn fn
, tree return_type
, tree
*args
)
3015 const direct_internal_fn_info
&info
= direct_internal_fn (fn
);
3016 tree type0
= (info
.type0
< 0 ? return_type
: TREE_TYPE (args
[info
.type0
]));
3017 tree type1
= (info
.type1
< 0 ? return_type
: TREE_TYPE (args
[info
.type1
]));
3018 return tree_pair (type0
, type1
);
3021 /* CALL is a call whose return type and arguments are in principle
3022 compatible with FN (which satisfies direct_internal_fn_p). Return the
3023 types that should be used to determine whether the target supports FN. */
3026 direct_internal_fn_types (internal_fn fn
, gcall
*call
)
3028 const direct_internal_fn_info
&info
= direct_internal_fn (fn
);
3029 tree op0
= (info
.type0
< 0
3030 ? gimple_call_lhs (call
)
3031 : gimple_call_arg (call
, info
.type0
));
3032 tree op1
= (info
.type1
< 0
3033 ? gimple_call_lhs (call
)
3034 : gimple_call_arg (call
, info
.type1
));
3035 return tree_pair (TREE_TYPE (op0
), TREE_TYPE (op1
));
3038 /* Return true if OPTAB is supported for TYPES (whose modes should be
3039 the same) when the optimization type is OPT_TYPE. Used for simple
3043 direct_optab_supported_p (direct_optab optab
, tree_pair types
,
3044 optimization_type opt_type
)
3046 machine_mode mode
= TYPE_MODE (types
.first
);
3047 gcc_checking_assert (mode
== TYPE_MODE (types
.second
));
3048 return direct_optab_handler (optab
, mode
, opt_type
) != CODE_FOR_nothing
;
3051 /* Return true if OPTAB is supported for TYPES, where the first type
3052 is the destination and the second type is the source. Used for
3056 convert_optab_supported_p (convert_optab optab
, tree_pair types
,
3057 optimization_type opt_type
)
3059 return (convert_optab_handler (optab
, TYPE_MODE (types
.first
),
3060 TYPE_MODE (types
.second
), opt_type
)
3061 != CODE_FOR_nothing
);
3064 /* Return true if load/store lanes optab OPTAB is supported for
3065 array type TYPES.first when the optimization type is OPT_TYPE. */
3068 multi_vector_optab_supported_p (convert_optab optab
, tree_pair types
,
3069 optimization_type opt_type
)
3071 gcc_assert (TREE_CODE (types
.first
) == ARRAY_TYPE
);
3072 machine_mode imode
= TYPE_MODE (types
.first
);
3073 machine_mode vmode
= TYPE_MODE (TREE_TYPE (types
.first
));
3074 return (convert_optab_handler (optab
, imode
, vmode
, opt_type
)
3075 != CODE_FOR_nothing
);
3078 #define direct_unary_optab_supported_p direct_optab_supported_p
3079 #define direct_binary_optab_supported_p direct_optab_supported_p
3080 #define direct_ternary_optab_supported_p direct_optab_supported_p
3081 #define direct_cond_unary_optab_supported_p direct_optab_supported_p
3082 #define direct_cond_binary_optab_supported_p direct_optab_supported_p
3083 #define direct_cond_ternary_optab_supported_p direct_optab_supported_p
3084 #define direct_mask_load_optab_supported_p direct_optab_supported_p
3085 #define direct_load_lanes_optab_supported_p multi_vector_optab_supported_p
3086 #define direct_mask_load_lanes_optab_supported_p multi_vector_optab_supported_p
3087 #define direct_gather_load_optab_supported_p direct_optab_supported_p
3088 #define direct_mask_store_optab_supported_p direct_optab_supported_p
3089 #define direct_store_lanes_optab_supported_p multi_vector_optab_supported_p
3090 #define direct_mask_store_lanes_optab_supported_p multi_vector_optab_supported_p
3091 #define direct_scatter_store_optab_supported_p direct_optab_supported_p
3092 #define direct_while_optab_supported_p convert_optab_supported_p
3093 #define direct_fold_extract_optab_supported_p direct_optab_supported_p
3094 #define direct_fold_left_optab_supported_p direct_optab_supported_p
3095 #define direct_mask_fold_left_optab_supported_p direct_optab_supported_p
3097 /* Return the optab used by internal function FN. */
3100 direct_internal_fn_optab (internal_fn fn
, tree_pair types
)
3104 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
3105 case IFN_##CODE: break;
3106 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
3107 case IFN_##CODE: return OPTAB##_optab;
3108 #define DEF_INTERNAL_SIGNED_OPTAB_FN(CODE, FLAGS, SELECTOR, SIGNED_OPTAB, \
3109 UNSIGNED_OPTAB, TYPE) \
3110 case IFN_##CODE: return (TYPE_UNSIGNED (types.SELECTOR) \
3111 ? UNSIGNED_OPTAB ## _optab \
3112 : SIGNED_OPTAB ## _optab);
3113 #include "internal-fn.def"
3121 /* Return the optab used by internal function FN. */
3124 direct_internal_fn_optab (internal_fn fn
)
3128 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
3129 case IFN_##CODE: break;
3130 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
3131 case IFN_##CODE: return OPTAB##_optab;
3132 #include "internal-fn.def"
3140 /* Return true if FN is supported for the types in TYPES when the
3141 optimization type is OPT_TYPE. The types are those associated with
3142 the "type0" and "type1" fields of FN's direct_internal_fn_info
3146 direct_internal_fn_supported_p (internal_fn fn
, tree_pair types
,
3147 optimization_type opt_type
)
3151 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
3152 case IFN_##CODE: break;
3153 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
3155 return direct_##TYPE##_optab_supported_p (OPTAB##_optab, types, \
3157 #define DEF_INTERNAL_SIGNED_OPTAB_FN(CODE, FLAGS, SELECTOR, SIGNED_OPTAB, \
3158 UNSIGNED_OPTAB, TYPE) \
3161 optab which_optab = (TYPE_UNSIGNED (types.SELECTOR) \
3162 ? UNSIGNED_OPTAB ## _optab \
3163 : SIGNED_OPTAB ## _optab); \
3164 return direct_##TYPE##_optab_supported_p (which_optab, types, \
3167 #include "internal-fn.def"
3175 /* Return true if FN is supported for type TYPE when the optimization
3176 type is OPT_TYPE. The caller knows that the "type0" and "type1"
3177 fields of FN's direct_internal_fn_info structure are the same. */
3180 direct_internal_fn_supported_p (internal_fn fn
, tree type
,
3181 optimization_type opt_type
)
3183 const direct_internal_fn_info
&info
= direct_internal_fn (fn
);
3184 gcc_checking_assert (info
.type0
== info
.type1
);
3185 return direct_internal_fn_supported_p (fn
, tree_pair (type
, type
), opt_type
);
3188 /* Return true if the STMT is supported when the optimization type is OPT_TYPE,
3189 given that STMT is a call to a direct internal function. */
3192 direct_internal_fn_supported_p (gcall
*stmt
, optimization_type opt_type
)
3194 internal_fn fn
= gimple_call_internal_fn (stmt
);
3195 tree_pair types
= direct_internal_fn_types (fn
, stmt
);
3196 return direct_internal_fn_supported_p (fn
, types
, opt_type
);
3199 /* If FN is commutative in two consecutive arguments, return the
3200 index of the first, otherwise return -1. */
3203 first_commutative_argument (internal_fn fn
)
3237 /* Return true if IFN_SET_EDOM is supported. */
3240 set_edom_supported_p (void)
3249 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
3251 expand_##CODE (internal_fn fn, gcall *stmt) \
3253 expand_##TYPE##_optab_fn (fn, stmt, OPTAB##_optab); \
3255 #define DEF_INTERNAL_SIGNED_OPTAB_FN(CODE, FLAGS, SELECTOR, SIGNED_OPTAB, \
3256 UNSIGNED_OPTAB, TYPE) \
3258 expand_##CODE (internal_fn fn, gcall *stmt) \
3260 tree_pair types = direct_internal_fn_types (fn, stmt); \
3261 optab which_optab = direct_internal_fn_optab (fn, types); \
3262 expand_##TYPE##_optab_fn (fn, stmt, which_optab); \
3264 #include "internal-fn.def"
3266 /* Routines to expand each internal function, indexed by function number.
3267 Each routine has the prototype:
3269 expand_<NAME> (gcall *stmt)
3271 where STMT is the statement that performs the call. */
3272 static void (*const internal_fn_expanders
[]) (internal_fn
, gcall
*) = {
3273 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) expand_##CODE,
3274 #include "internal-fn.def"
3278 /* Invoke T(CODE, IFN) for each conditional function IFN that maps to a
3280 #define FOR_EACH_CODE_MAPPING(T) \
3281 T (PLUS_EXPR, IFN_COND_ADD) \
3282 T (MINUS_EXPR, IFN_COND_SUB) \
3283 T (MULT_EXPR, IFN_COND_MUL) \
3284 T (TRUNC_DIV_EXPR, IFN_COND_DIV) \
3285 T (TRUNC_MOD_EXPR, IFN_COND_MOD) \
3286 T (RDIV_EXPR, IFN_COND_RDIV) \
3287 T (MIN_EXPR, IFN_COND_MIN) \
3288 T (MAX_EXPR, IFN_COND_MAX) \
3289 T (BIT_AND_EXPR, IFN_COND_AND) \
3290 T (BIT_IOR_EXPR, IFN_COND_IOR) \
3291 T (BIT_XOR_EXPR, IFN_COND_XOR) \
3292 T (LSHIFT_EXPR, IFN_COND_SHL) \
3293 T (RSHIFT_EXPR, IFN_COND_SHR)
3295 /* Return a function that only performs CODE when a certain condition is met
3296 and that uses a given fallback value otherwise. For example, if CODE is
3297 a binary operation associated with conditional function FN:
3299 LHS = FN (COND, A, B, ELSE)
3301 is equivalent to the C expression:
3303 LHS = COND ? A CODE B : ELSE;
3305 operating elementwise if the operands are vectors.
3307 Return IFN_LAST if no such function exists. */
3310 get_conditional_internal_fn (tree_code code
)
3314 #define CASE(CODE, IFN) case CODE: return IFN;
3315 FOR_EACH_CODE_MAPPING(CASE
)
3322 /* If IFN implements the conditional form of a tree code, return that
3323 tree code, otherwise return ERROR_MARK. */
3326 conditional_internal_fn_code (internal_fn ifn
)
3330 #define CASE(CODE, IFN) case IFN: return CODE;
3331 FOR_EACH_CODE_MAPPING(CASE
)
3338 /* Invoke T(IFN) for each internal function IFN that also has an
3340 #define FOR_EACH_COND_FN_PAIR(T) \
3346 /* Return a function that only performs internal function FN when a
3347 certain condition is met and that uses a given fallback value otherwise.
3348 In other words, the returned function FN' is such that:
3350 LHS = FN' (COND, A1, ... An, ELSE)
3352 is equivalent to the C expression:
3354 LHS = COND ? FN (A1, ..., An) : ELSE;
3356 operating elementwise if the operands are vectors.
3358 Return IFN_LAST if no such function exists. */
3361 get_conditional_internal_fn (internal_fn fn
)
3365 #define CASE(NAME) case IFN_##NAME: return IFN_COND_##NAME;
3366 FOR_EACH_COND_FN_PAIR(CASE
)
3373 /* If IFN implements the conditional form of an unconditional internal
3374 function, return that unconditional function, otherwise return IFN_LAST. */
3377 get_unconditional_internal_fn (internal_fn ifn
)
3381 #define CASE(NAME) case IFN_COND_##NAME: return IFN_##NAME;
3382 FOR_EACH_COND_FN_PAIR(CASE
)
3389 /* Return true if STMT can be interpreted as a conditional tree code
3390 operation of the form:
3392 LHS = COND ? OP (RHS1, ...) : ELSE;
3394 operating elementwise if the operands are vectors. This includes
3395 the case of an all-true COND, so that the operation always happens.
3397 When returning true, set:
3399 - *COND_OUT to the condition COND, or to NULL_TREE if the condition
3400 is known to be all-true
3401 - *CODE_OUT to the tree code
3402 - OPS[I] to operand I of *CODE_OUT
3403 - *ELSE_OUT to the fallback value ELSE, or to NULL_TREE if the
3404 condition is known to be all true. */
3407 can_interpret_as_conditional_op_p (gimple
*stmt
, tree
*cond_out
,
3408 tree_code
*code_out
,
3409 tree (&ops
)[3], tree
*else_out
)
3411 if (gassign
*assign
= dyn_cast
<gassign
*> (stmt
))
3413 *cond_out
= NULL_TREE
;
3414 *code_out
= gimple_assign_rhs_code (assign
);
3415 ops
[0] = gimple_assign_rhs1 (assign
);
3416 ops
[1] = gimple_assign_rhs2 (assign
);
3417 ops
[2] = gimple_assign_rhs3 (assign
);
3418 *else_out
= NULL_TREE
;
3421 if (gcall
*call
= dyn_cast
<gcall
*> (stmt
))
3422 if (gimple_call_internal_p (call
))
3424 internal_fn ifn
= gimple_call_internal_fn (call
);
3425 tree_code code
= conditional_internal_fn_code (ifn
);
3426 if (code
!= ERROR_MARK
)
3428 *cond_out
= gimple_call_arg (call
, 0);
3430 unsigned int nops
= gimple_call_num_args (call
) - 2;
3431 for (unsigned int i
= 0; i
< 3; ++i
)
3432 ops
[i
] = i
< nops
? gimple_call_arg (call
, i
+ 1) : NULL_TREE
;
3433 *else_out
= gimple_call_arg (call
, nops
+ 1);
3434 if (integer_truep (*cond_out
))
3436 *cond_out
= NULL_TREE
;
3437 *else_out
= NULL_TREE
;
3445 /* Return true if IFN is some form of load from memory. */
3448 internal_load_fn_p (internal_fn fn
)
3453 case IFN_LOAD_LANES
:
3454 case IFN_MASK_LOAD_LANES
:
3455 case IFN_GATHER_LOAD
:
3456 case IFN_MASK_GATHER_LOAD
:
3464 /* Return true if IFN is some form of store to memory. */
3467 internal_store_fn_p (internal_fn fn
)
3471 case IFN_MASK_STORE
:
3472 case IFN_STORE_LANES
:
3473 case IFN_MASK_STORE_LANES
:
3474 case IFN_SCATTER_STORE
:
3475 case IFN_MASK_SCATTER_STORE
:
3483 /* Return true if IFN is some form of gather load or scatter store. */
3486 internal_gather_scatter_fn_p (internal_fn fn
)
3490 case IFN_GATHER_LOAD
:
3491 case IFN_MASK_GATHER_LOAD
:
3492 case IFN_SCATTER_STORE
:
3493 case IFN_MASK_SCATTER_STORE
:
3501 /* If FN takes a vector mask argument, return the index of that argument,
3502 otherwise return -1. */
3505 internal_fn_mask_index (internal_fn fn
)
3510 case IFN_MASK_LOAD_LANES
:
3511 case IFN_MASK_STORE
:
3512 case IFN_MASK_STORE_LANES
:
3515 case IFN_MASK_GATHER_LOAD
:
3518 case IFN_MASK_SCATTER_STORE
:
3522 return (conditional_internal_fn_code (fn
) != ERROR_MARK
3523 || get_unconditional_internal_fn (fn
) != IFN_LAST
? 0 : -1);
3527 /* If FN takes a value that should be stored to memory, return the index
3528 of that argument, otherwise return -1. */
3531 internal_fn_stored_value_index (internal_fn fn
)
3535 case IFN_MASK_STORE
:
3536 case IFN_SCATTER_STORE
:
3537 case IFN_MASK_SCATTER_STORE
:
3545 /* Return true if the target supports gather load or scatter store function
3546 IFN. For loads, VECTOR_TYPE is the vector type of the load result,
3547 while for stores it is the vector type of the stored data argument.
3548 MEMORY_ELEMENT_TYPE is the type of the memory elements being loaded
3549 or stored. OFFSET_SIGN is the sign of the offset argument, which is
3550 only relevant when the offset is narrower than an address. SCALE is
3551 the amount by which the offset should be multiplied *after* it has
3552 been extended to address width. */
3555 internal_gather_scatter_fn_supported_p (internal_fn ifn
, tree vector_type
,
3556 tree memory_element_type
,
3557 signop offset_sign
, int scale
)
3559 if (!tree_int_cst_equal (TYPE_SIZE (TREE_TYPE (vector_type
)),
3560 TYPE_SIZE (memory_element_type
)))
3562 optab optab
= direct_internal_fn_optab (ifn
);
3563 insn_code icode
= direct_optab_handler (optab
, TYPE_MODE (vector_type
));
3564 int output_ops
= internal_load_fn_p (ifn
) ? 1 : 0;
3565 return (icode
!= CODE_FOR_nothing
3566 && insn_operand_matches (icode
, 2 + output_ops
,
3567 GEN_INT (offset_sign
== UNSIGNED
))
3568 && insn_operand_matches (icode
, 3 + output_ops
,
3572 /* Expand STMT as though it were a call to internal function FN. */
3575 expand_internal_call (internal_fn fn
, gcall
*stmt
)
3577 internal_fn_expanders
[fn
] (fn
, stmt
);
3580 /* Expand STMT, which is a call to internal function FN. */
3583 expand_internal_call (gcall
*stmt
)
3585 expand_internal_call (gimple_call_internal_fn (stmt
), stmt
);
3588 /* If TYPE is a vector type, return true if IFN is a direct internal
3589 function that is supported for that type. If TYPE is a scalar type,
3590 return true if IFN is a direct internal function that is supported for
3591 the target's preferred vector version of TYPE. */
3594 vectorized_internal_fn_supported_p (internal_fn ifn
, tree type
)
3597 if (!VECTOR_TYPE_P (type
) && is_a
<scalar_mode
> (TYPE_MODE (type
), &smode
))
3599 machine_mode vmode
= targetm
.vectorize
.preferred_simd_mode (smode
);
3600 if (VECTOR_MODE_P (vmode
))
3601 type
= build_vector_type_for_mode (type
, vmode
);
3604 return (VECTOR_MODE_P (TYPE_MODE (type
))
3605 && direct_internal_fn_supported_p (ifn
, type
, OPTIMIZE_FOR_SPEED
));
3609 expand_PHI (internal_fn
, gcall
*)