2 Copyright (C) 2011-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
29 #include "stringpool.h"
30 #include "tree-ssanames.h"
34 #include "diagnostic-core.h"
35 #include "fold-const.h"
36 #include "internal-fn.h"
37 #include "stor-layout.h"
43 /* The names of each internal function, indexed by function number. */
44 const char *const internal_fn_name_array
[] = {
45 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) #CODE,
46 #include "internal-fn.def"
50 /* The ECF_* flags of each internal function, indexed by function number. */
51 const int internal_fn_flags_array
[] = {
52 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) FLAGS,
53 #include "internal-fn.def"
57 /* Fnspec of each internal function, indexed by function number. */
58 const_tree internal_fn_fnspec_array
[IFN_LAST
+ 1];
63 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
64 if (FNSPEC) internal_fn_fnspec_array[IFN_##CODE] = \
65 build_string ((int) sizeof (FNSPEC), FNSPEC ? FNSPEC : "");
66 #include "internal-fn.def"
67 internal_fn_fnspec_array
[IFN_LAST
] = 0;
70 /* Create static initializers for the information returned by
71 direct_internal_fn. */
72 #define not_direct { -2, -2, false }
73 #define mask_load_direct { -1, 2, false }
74 #define load_lanes_direct { -1, -1, false }
75 #define mask_store_direct { 3, 2, false }
76 #define store_lanes_direct { 0, 0, false }
77 #define unary_direct { 0, 0, true }
78 #define binary_direct { 0, 0, true }
80 const direct_internal_fn_info direct_internal_fn_array
[IFN_LAST
+ 1] = {
81 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) not_direct,
82 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) TYPE##_direct,
83 #include "internal-fn.def"
87 /* ARRAY_TYPE is an array of vector modes. Return the associated insn
88 for load-lanes-style optab OPTAB, or CODE_FOR_nothing if none. */
91 get_multi_vector_move (tree array_type
, convert_optab optab
)
96 gcc_assert (TREE_CODE (array_type
) == ARRAY_TYPE
);
97 imode
= TYPE_MODE (array_type
);
98 vmode
= TYPE_MODE (TREE_TYPE (array_type
));
100 return convert_optab_handler (optab
, imode
, vmode
);
103 /* Expand LOAD_LANES call STMT using optab OPTAB. */
106 expand_load_lanes_optab_fn (internal_fn
, gcall
*stmt
, convert_optab optab
)
108 struct expand_operand ops
[2];
112 lhs
= gimple_call_lhs (stmt
);
113 rhs
= gimple_call_arg (stmt
, 0);
114 type
= TREE_TYPE (lhs
);
116 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
117 mem
= expand_normal (rhs
);
119 gcc_assert (MEM_P (mem
));
120 PUT_MODE (mem
, TYPE_MODE (type
));
122 create_output_operand (&ops
[0], target
, TYPE_MODE (type
));
123 create_fixed_operand (&ops
[1], mem
);
124 expand_insn (get_multi_vector_move (type
, optab
), 2, ops
);
127 /* Expand STORE_LANES call STMT using optab OPTAB. */
130 expand_store_lanes_optab_fn (internal_fn
, gcall
*stmt
, convert_optab optab
)
132 struct expand_operand ops
[2];
136 lhs
= gimple_call_lhs (stmt
);
137 rhs
= gimple_call_arg (stmt
, 0);
138 type
= TREE_TYPE (rhs
);
140 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
141 reg
= expand_normal (rhs
);
143 gcc_assert (MEM_P (target
));
144 PUT_MODE (target
, TYPE_MODE (type
));
146 create_fixed_operand (&ops
[0], target
);
147 create_input_operand (&ops
[1], reg
, TYPE_MODE (type
));
148 expand_insn (get_multi_vector_move (type
, optab
), 2, ops
);
152 expand_ANNOTATE (internal_fn
, gcall
*)
157 /* This should get expanded in adjust_simduid_builtins. */
160 expand_GOMP_SIMD_LANE (internal_fn
, gcall
*)
165 /* This should get expanded in adjust_simduid_builtins. */
168 expand_GOMP_SIMD_VF (internal_fn
, gcall
*)
173 /* This should get expanded in adjust_simduid_builtins. */
176 expand_GOMP_SIMD_LAST_LANE (internal_fn
, gcall
*)
181 /* This should get expanded in adjust_simduid_builtins. */
184 expand_GOMP_SIMD_ORDERED_START (internal_fn
, gcall
*)
189 /* This should get expanded in adjust_simduid_builtins. */
192 expand_GOMP_SIMD_ORDERED_END (internal_fn
, gcall
*)
197 /* This should get expanded in the sanopt pass. */
200 expand_UBSAN_NULL (internal_fn
, gcall
*)
205 /* This should get expanded in the sanopt pass. */
208 expand_UBSAN_BOUNDS (internal_fn
, gcall
*)
213 /* This should get expanded in the sanopt pass. */
216 expand_UBSAN_VPTR (internal_fn
, gcall
*)
221 /* This should get expanded in the sanopt pass. */
224 expand_UBSAN_OBJECT_SIZE (internal_fn
, gcall
*)
229 /* This should get expanded in the sanopt pass. */
232 expand_ASAN_CHECK (internal_fn
, gcall
*)
237 /* This should get expanded in the tsan pass. */
240 expand_TSAN_FUNC_EXIT (internal_fn
, gcall
*)
245 /* Helper function for expand_addsub_overflow. Return 1
246 if ARG interpreted as signed in its precision is known to be always
247 positive or 2 if ARG is known to be always negative, or 3 if ARG may
248 be positive or negative. */
251 get_range_pos_neg (tree arg
)
253 if (arg
== error_mark_node
)
256 int prec
= TYPE_PRECISION (TREE_TYPE (arg
));
258 if (TREE_CODE (arg
) == INTEGER_CST
)
260 wide_int w
= wi::sext (arg
, prec
);
266 while (CONVERT_EXPR_P (arg
)
267 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg
, 0)))
268 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg
, 0))) <= prec
)
270 arg
= TREE_OPERAND (arg
, 0);
271 /* Narrower value zero extended into wider type
272 will always result in positive values. */
273 if (TYPE_UNSIGNED (TREE_TYPE (arg
))
274 && TYPE_PRECISION (TREE_TYPE (arg
)) < prec
)
276 prec
= TYPE_PRECISION (TREE_TYPE (arg
));
281 if (TREE_CODE (arg
) != SSA_NAME
)
283 wide_int arg_min
, arg_max
;
284 while (get_range_info (arg
, &arg_min
, &arg_max
) != VR_RANGE
)
286 gimple
*g
= SSA_NAME_DEF_STMT (arg
);
287 if (is_gimple_assign (g
)
288 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g
)))
290 tree t
= gimple_assign_rhs1 (g
);
291 if (INTEGRAL_TYPE_P (TREE_TYPE (t
))
292 && TYPE_PRECISION (TREE_TYPE (t
)) <= prec
)
294 if (TYPE_UNSIGNED (TREE_TYPE (t
))
295 && TYPE_PRECISION (TREE_TYPE (t
)) < prec
)
297 prec
= TYPE_PRECISION (TREE_TYPE (t
));
306 if (TYPE_UNSIGNED (TREE_TYPE (arg
)))
308 /* For unsigned values, the "positive" range comes
309 below the "negative" range. */
310 if (!wi::neg_p (wi::sext (arg_max
, prec
), SIGNED
))
312 if (wi::neg_p (wi::sext (arg_min
, prec
), SIGNED
))
317 if (!wi::neg_p (wi::sext (arg_min
, prec
), SIGNED
))
319 if (wi::neg_p (wi::sext (arg_max
, prec
), SIGNED
))
325 /* Return minimum precision needed to represent all values
326 of ARG in SIGNed integral type. */
329 get_min_precision (tree arg
, signop sign
)
331 int prec
= TYPE_PRECISION (TREE_TYPE (arg
));
333 signop orig_sign
= sign
;
334 if (TREE_CODE (arg
) == INTEGER_CST
)
337 if (TYPE_SIGN (TREE_TYPE (arg
)) != sign
)
339 widest_int w
= wi::to_widest (arg
);
340 w
= wi::ext (w
, prec
, sign
);
341 p
= wi::min_precision (w
, sign
);
344 p
= wi::min_precision (arg
, sign
);
345 return MIN (p
, prec
);
347 while (CONVERT_EXPR_P (arg
)
348 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg
, 0)))
349 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg
, 0))) <= prec
)
351 arg
= TREE_OPERAND (arg
, 0);
352 if (TYPE_PRECISION (TREE_TYPE (arg
)) < prec
)
354 if (TYPE_UNSIGNED (TREE_TYPE (arg
)))
356 else if (sign
== UNSIGNED
&& get_range_pos_neg (arg
) != 1)
357 return prec
+ (orig_sign
!= sign
);
358 prec
= TYPE_PRECISION (TREE_TYPE (arg
));
361 return prec
+ (orig_sign
!= sign
);
363 if (TREE_CODE (arg
) != SSA_NAME
)
364 return prec
+ (orig_sign
!= sign
);
365 wide_int arg_min
, arg_max
;
366 while (get_range_info (arg
, &arg_min
, &arg_max
) != VR_RANGE
)
368 gimple
*g
= SSA_NAME_DEF_STMT (arg
);
369 if (is_gimple_assign (g
)
370 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g
)))
372 tree t
= gimple_assign_rhs1 (g
);
373 if (INTEGRAL_TYPE_P (TREE_TYPE (t
))
374 && TYPE_PRECISION (TREE_TYPE (t
)) <= prec
)
377 if (TYPE_PRECISION (TREE_TYPE (arg
)) < prec
)
379 if (TYPE_UNSIGNED (TREE_TYPE (arg
)))
381 else if (sign
== UNSIGNED
&& get_range_pos_neg (arg
) != 1)
382 return prec
+ (orig_sign
!= sign
);
383 prec
= TYPE_PRECISION (TREE_TYPE (arg
));
386 return prec
+ (orig_sign
!= sign
);
390 return prec
+ (orig_sign
!= sign
);
392 if (sign
== TYPE_SIGN (TREE_TYPE (arg
)))
394 int p1
= wi::min_precision (arg_min
, sign
);
395 int p2
= wi::min_precision (arg_max
, sign
);
397 prec
= MIN (prec
, p1
);
399 else if (sign
== UNSIGNED
&& !wi::neg_p (arg_min
, SIGNED
))
401 int p
= wi::min_precision (arg_max
, UNSIGNED
);
402 prec
= MIN (prec
, p
);
404 return prec
+ (orig_sign
!= sign
);
407 /* Helper for expand_*_overflow. Store RES into the __real__ part
408 of TARGET. If RES has larger MODE than __real__ part of TARGET,
409 set the __imag__ part to 1 if RES doesn't fit into it. */
412 expand_arith_overflow_result_store (tree lhs
, rtx target
,
413 machine_mode mode
, rtx res
)
415 machine_mode tgtmode
= GET_MODE_INNER (GET_MODE (target
));
419 rtx_code_label
*done_label
= gen_label_rtx ();
420 int uns
= TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs
)));
421 lres
= convert_modes (tgtmode
, mode
, res
, uns
);
422 gcc_assert (GET_MODE_PRECISION (tgtmode
) < GET_MODE_PRECISION (mode
));
423 do_compare_rtx_and_jump (res
, convert_modes (mode
, tgtmode
, lres
, uns
),
424 EQ
, true, mode
, NULL_RTX
, NULL
, done_label
,
426 write_complex_part (target
, const1_rtx
, true);
427 emit_label (done_label
);
429 write_complex_part (target
, lres
, false);
432 /* Helper for expand_*_overflow. Store RES into TARGET. */
435 expand_ubsan_result_store (rtx target
, rtx res
)
437 if (GET_CODE (target
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (target
))
438 /* If this is a scalar in a register that is stored in a wider mode
439 than the declared mode, compute the result into its declared mode
440 and then convert to the wider mode. Our value is the computed
442 convert_move (SUBREG_REG (target
), res
, SUBREG_PROMOTED_SIGN (target
));
444 emit_move_insn (target
, res
);
447 /* Add sub/add overflow checking to the statement STMT.
448 CODE says whether the operation is +, or -. */
451 expand_addsub_overflow (location_t loc
, tree_code code
, tree lhs
,
452 tree arg0
, tree arg1
, bool unsr_p
, bool uns0_p
,
453 bool uns1_p
, bool is_ubsan
)
455 rtx res
, target
= NULL_RTX
;
457 rtx_code_label
*done_label
= gen_label_rtx ();
458 rtx_code_label
*do_error
= gen_label_rtx ();
459 do_pending_stack_adjust ();
460 rtx op0
= expand_normal (arg0
);
461 rtx op1
= expand_normal (arg1
);
462 machine_mode mode
= TYPE_MODE (TREE_TYPE (arg0
));
463 int prec
= GET_MODE_PRECISION (mode
);
464 rtx sgn
= immed_wide_int_const (wi::min_value (prec
, SIGNED
), mode
);
468 gcc_assert (!unsr_p
&& !uns0_p
&& !uns1_p
);
472 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
474 write_complex_part (target
, const0_rtx
, true);
477 /* We assume both operands and result have the same precision
478 here (GET_MODE_BITSIZE (mode)), S stands for signed type
479 with that precision, U for unsigned type with that precision,
480 sgn for unsigned most significant bit in that precision.
481 s1 is signed first operand, u1 is unsigned first operand,
482 s2 is signed second operand, u2 is unsigned second operand,
483 sr is signed result, ur is unsigned result and the following
484 rules say how to compute result (which is always result of
485 the operands as if both were unsigned, cast to the right
486 signedness) and how to compute whether operation overflowed.
489 res = (S) ((U) s1 + (U) s2)
490 ovf = s2 < 0 ? res > s1 : res < s1 (or jump on overflow)
492 res = (S) ((U) s1 - (U) s2)
493 ovf = s2 < 0 ? res < s1 : res > s2 (or jump on overflow)
496 ovf = res < u1 (or jump on carry, but RTL opts will handle it)
499 ovf = res > u1 (or jump on carry, but RTL opts will handle it)
501 res = (S) ((U) s1 + u2)
502 ovf = ((U) res ^ sgn) < u2
507 ovf = t1 < 0 ? t2 > s1 : t2 < s1 (or jump on overflow)
509 res = (S) ((U) s1 - u2)
510 ovf = u2 > ((U) s1 ^ sgn)
513 ovf = s1 < 0 || u2 > (U) s1
516 ovf = u1 >= ((U) s2 ^ sgn)
521 ovf = s2 < 0 ? (S) t2 < (S) t1 : (S) t2 > (S) t1 (or jump on overflow)
523 res = (U) s1 + (U) s2
524 ovf = s2 < 0 ? (s1 | (S) res) < 0) : (s1 & (S) res) < 0)
527 ovf = (U) res < u2 || res < 0
530 ovf = u1 >= u2 ? res < 0 : res >= 0
532 res = (U) s1 - (U) s2
533 ovf = s2 >= 0 ? ((s1 | (S) res) < 0) : ((s1 & (S) res) < 0) */
535 if (code
== PLUS_EXPR
&& uns0_p
&& !uns1_p
)
537 /* PLUS_EXPR is commutative, if operand signedness differs,
538 canonicalize to the first operand being signed and second
539 unsigned to simplify following code. */
540 std::swap (op0
, op1
);
541 std::swap (arg0
, arg1
);
547 if (uns0_p
&& uns1_p
&& unsr_p
)
549 insn_code icode
= optab_handler (code
== PLUS_EXPR
? uaddv4_optab
550 : usubv4_optab
, mode
);
551 if (icode
!= CODE_FOR_nothing
)
553 struct expand_operand ops
[4];
554 rtx_insn
*last
= get_last_insn ();
556 res
= gen_reg_rtx (mode
);
557 create_output_operand (&ops
[0], res
, mode
);
558 create_input_operand (&ops
[1], op0
, mode
);
559 create_input_operand (&ops
[2], op1
, mode
);
560 create_fixed_operand (&ops
[3], do_error
);
561 if (maybe_expand_insn (icode
, 4, ops
))
563 last
= get_last_insn ();
564 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
566 && any_condjump_p (last
)
567 && !find_reg_note (last
, REG_BR_PROB
, 0))
568 add_int_reg_note (last
, REG_BR_PROB
, PROB_VERY_UNLIKELY
);
569 emit_jump (done_label
);
573 delete_insns_since (last
);
576 /* Compute the operation. On RTL level, the addition is always
578 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
579 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
581 /* For PLUS_EXPR, the operation is commutative, so we can pick
582 operand to compare against. For prec <= BITS_PER_WORD, I think
583 preferring REG operand is better over CONST_INT, because
584 the CONST_INT might enlarge the instruction or CSE would need
585 to figure out we'd already loaded it into a register before.
586 For prec > BITS_PER_WORD, I think CONST_INT might be more beneficial,
587 as then the multi-word comparison can be perhaps simplified. */
588 if (code
== PLUS_EXPR
589 && (prec
<= BITS_PER_WORD
590 ? (CONST_SCALAR_INT_P (op0
) && REG_P (op1
))
591 : CONST_SCALAR_INT_P (op1
)))
593 do_compare_rtx_and_jump (res
, tem
, code
== PLUS_EXPR
? GEU
: LEU
,
594 true, mode
, NULL_RTX
, NULL
, done_label
,
600 if (!uns0_p
&& uns1_p
&& !unsr_p
)
602 /* Compute the operation. On RTL level, the addition is always
604 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
605 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
606 rtx tem
= expand_binop (mode
, add_optab
,
607 code
== PLUS_EXPR
? res
: op0
, sgn
,
608 NULL_RTX
, false, OPTAB_LIB_WIDEN
);
609 do_compare_rtx_and_jump (tem
, op1
, GEU
, true, mode
, NULL_RTX
, NULL
,
610 done_label
, PROB_VERY_LIKELY
);
615 if (code
== PLUS_EXPR
&& !uns0_p
&& uns1_p
&& unsr_p
)
617 op1
= expand_binop (mode
, add_optab
, op1
, sgn
, NULL_RTX
, false,
619 /* As we've changed op1, we have to avoid using the value range
620 for the original argument. */
621 arg1
= error_mark_node
;
627 if (code
== MINUS_EXPR
&& uns0_p
&& !uns1_p
&& unsr_p
)
629 op0
= expand_binop (mode
, add_optab
, op0
, sgn
, NULL_RTX
, false,
631 /* As we've changed op0, we have to avoid using the value range
632 for the original argument. */
633 arg0
= error_mark_node
;
639 if (code
== MINUS_EXPR
&& !uns0_p
&& uns1_p
&& unsr_p
)
641 /* Compute the operation. On RTL level, the addition is always
643 res
= expand_binop (mode
, sub_optab
, op0
, op1
, NULL_RTX
, false,
645 int pos_neg
= get_range_pos_neg (arg0
);
647 /* If ARG0 is known to be always negative, this is always overflow. */
648 emit_jump (do_error
);
649 else if (pos_neg
== 3)
650 /* If ARG0 is not known to be always positive, check at runtime. */
651 do_compare_rtx_and_jump (op0
, const0_rtx
, LT
, false, mode
, NULL_RTX
,
652 NULL
, do_error
, PROB_VERY_UNLIKELY
);
653 do_compare_rtx_and_jump (op1
, op0
, LEU
, true, mode
, NULL_RTX
, NULL
,
654 done_label
, PROB_VERY_LIKELY
);
659 if (code
== MINUS_EXPR
&& uns0_p
&& !uns1_p
&& !unsr_p
)
661 /* Compute the operation. On RTL level, the addition is always
663 res
= expand_binop (mode
, sub_optab
, op0
, op1
, NULL_RTX
, false,
665 rtx tem
= expand_binop (mode
, add_optab
, op1
, sgn
, NULL_RTX
, false,
667 do_compare_rtx_and_jump (op0
, tem
, LTU
, true, mode
, NULL_RTX
, NULL
,
668 done_label
, PROB_VERY_LIKELY
);
673 if (code
== PLUS_EXPR
&& uns0_p
&& uns1_p
&& !unsr_p
)
675 /* Compute the operation. On RTL level, the addition is always
677 res
= expand_binop (mode
, add_optab
, op0
, op1
, NULL_RTX
, false,
679 do_compare_rtx_and_jump (res
, const0_rtx
, LT
, false, mode
, NULL_RTX
,
680 NULL
, do_error
, PROB_VERY_UNLIKELY
);
682 /* The operation is commutative, so we can pick operand to compare
683 against. For prec <= BITS_PER_WORD, I think preferring REG operand
684 is better over CONST_INT, because the CONST_INT might enlarge the
685 instruction or CSE would need to figure out we'd already loaded it
686 into a register before. For prec > BITS_PER_WORD, I think CONST_INT
687 might be more beneficial, as then the multi-word comparison can be
688 perhaps simplified. */
689 if (prec
<= BITS_PER_WORD
690 ? (CONST_SCALAR_INT_P (op1
) && REG_P (op0
))
691 : CONST_SCALAR_INT_P (op0
))
693 do_compare_rtx_and_jump (res
, tem
, GEU
, true, mode
, NULL_RTX
, NULL
,
694 done_label
, PROB_VERY_LIKELY
);
699 if (!uns0_p
&& !uns1_p
&& unsr_p
)
701 /* Compute the operation. On RTL level, the addition is always
703 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
704 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
705 int pos_neg
= get_range_pos_neg (arg1
);
706 if (code
== PLUS_EXPR
)
708 int pos_neg0
= get_range_pos_neg (arg0
);
709 if (pos_neg0
!= 3 && pos_neg
== 3)
711 std::swap (op0
, op1
);
718 tem
= expand_binop (mode
, ((pos_neg
== 1) ^ (code
== MINUS_EXPR
))
719 ? and_optab
: ior_optab
,
720 op0
, res
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
721 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL
,
722 NULL
, done_label
, PROB_VERY_LIKELY
);
726 rtx_code_label
*do_ior_label
= gen_label_rtx ();
727 do_compare_rtx_and_jump (op1
, const0_rtx
,
728 code
== MINUS_EXPR
? GE
: LT
, false, mode
,
729 NULL_RTX
, NULL
, do_ior_label
,
731 tem
= expand_binop (mode
, and_optab
, op0
, res
, NULL_RTX
, false,
733 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
734 NULL
, done_label
, PROB_VERY_LIKELY
);
735 emit_jump (do_error
);
736 emit_label (do_ior_label
);
737 tem
= expand_binop (mode
, ior_optab
, op0
, res
, NULL_RTX
, false,
739 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
740 NULL
, done_label
, PROB_VERY_LIKELY
);
746 if (code
== MINUS_EXPR
&& uns0_p
&& uns1_p
&& !unsr_p
)
748 /* Compute the operation. On RTL level, the addition is always
750 res
= expand_binop (mode
, sub_optab
, op0
, op1
, NULL_RTX
, false,
752 rtx_code_label
*op0_geu_op1
= gen_label_rtx ();
753 do_compare_rtx_and_jump (op0
, op1
, GEU
, true, mode
, NULL_RTX
, NULL
,
754 op0_geu_op1
, PROB_EVEN
);
755 do_compare_rtx_and_jump (res
, const0_rtx
, LT
, false, mode
, NULL_RTX
,
756 NULL
, done_label
, PROB_VERY_LIKELY
);
757 emit_jump (do_error
);
758 emit_label (op0_geu_op1
);
759 do_compare_rtx_and_jump (res
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
760 NULL
, done_label
, PROB_VERY_LIKELY
);
764 gcc_assert (!uns0_p
&& !uns1_p
&& !unsr_p
);
769 insn_code icode
= optab_handler (code
== PLUS_EXPR
? addv4_optab
770 : subv4_optab
, mode
);
771 if (icode
!= CODE_FOR_nothing
)
773 struct expand_operand ops
[4];
774 rtx_insn
*last
= get_last_insn ();
776 res
= gen_reg_rtx (mode
);
777 create_output_operand (&ops
[0], res
, mode
);
778 create_input_operand (&ops
[1], op0
, mode
);
779 create_input_operand (&ops
[2], op1
, mode
);
780 create_fixed_operand (&ops
[3], do_error
);
781 if (maybe_expand_insn (icode
, 4, ops
))
783 last
= get_last_insn ();
784 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
786 && any_condjump_p (last
)
787 && !find_reg_note (last
, REG_BR_PROB
, 0))
788 add_int_reg_note (last
, REG_BR_PROB
, PROB_VERY_UNLIKELY
);
789 emit_jump (done_label
);
793 delete_insns_since (last
);
796 rtx_code_label
*sub_check
= gen_label_rtx ();
799 /* Compute the operation. On RTL level, the addition is always
801 res
= expand_binop (mode
, code
== PLUS_EXPR
? add_optab
: sub_optab
,
802 op0
, op1
, NULL_RTX
, false, OPTAB_LIB_WIDEN
);
804 /* If we can prove one of the arguments (for MINUS_EXPR only
805 the second operand, as subtraction is not commutative) is always
806 non-negative or always negative, we can do just one comparison
807 and conditional jump instead of 2 at runtime, 3 present in the
808 emitted code. If one of the arguments is CONST_INT, all we
809 need is to make sure it is op1, then the first
810 do_compare_rtx_and_jump will be just folded. Otherwise try
811 to use range info if available. */
812 if (code
== PLUS_EXPR
&& CONST_INT_P (op0
))
813 std::swap (op0
, op1
);
814 else if (CONST_INT_P (op1
))
816 else if (code
== PLUS_EXPR
&& TREE_CODE (arg0
) == SSA_NAME
)
818 pos_neg
= get_range_pos_neg (arg0
);
820 std::swap (op0
, op1
);
822 if (pos_neg
== 3 && !CONST_INT_P (op1
) && TREE_CODE (arg1
) == SSA_NAME
)
823 pos_neg
= get_range_pos_neg (arg1
);
825 /* If the op1 is negative, we have to use a different check. */
827 do_compare_rtx_and_jump (op1
, const0_rtx
, LT
, false, mode
, NULL_RTX
,
828 NULL
, sub_check
, PROB_EVEN
);
830 /* Compare the result of the operation with one of the operands. */
832 do_compare_rtx_and_jump (res
, op0
, code
== PLUS_EXPR
? GE
: LE
,
833 false, mode
, NULL_RTX
, NULL
, done_label
,
836 /* If we get here, we have to print the error. */
839 emit_jump (do_error
);
840 emit_label (sub_check
);
843 /* We have k = a + b for b < 0 here. k <= a must hold. */
845 do_compare_rtx_and_jump (res
, op0
, code
== PLUS_EXPR
? LE
: GE
,
846 false, mode
, NULL_RTX
, NULL
, done_label
,
851 emit_label (do_error
);
854 /* Expand the ubsan builtin call. */
856 fn
= ubsan_build_overflow_builtin (code
, loc
, TREE_TYPE (arg0
),
860 do_pending_stack_adjust ();
863 write_complex_part (target
, const1_rtx
, true);
866 emit_label (done_label
);
871 expand_ubsan_result_store (target
, res
);
875 res
= expand_binop (mode
, add_optab
, res
, sgn
, NULL_RTX
, false,
878 expand_arith_overflow_result_store (lhs
, target
, mode
, res
);
883 /* Add negate overflow checking to the statement STMT. */
886 expand_neg_overflow (location_t loc
, tree lhs
, tree arg1
, bool is_ubsan
)
890 rtx_code_label
*done_label
, *do_error
;
891 rtx target
= NULL_RTX
;
893 done_label
= gen_label_rtx ();
894 do_error
= gen_label_rtx ();
896 do_pending_stack_adjust ();
897 op1
= expand_normal (arg1
);
899 machine_mode mode
= TYPE_MODE (TREE_TYPE (arg1
));
902 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
904 write_complex_part (target
, const0_rtx
, true);
907 enum insn_code icode
= optab_handler (negv3_optab
, mode
);
908 if (icode
!= CODE_FOR_nothing
)
910 struct expand_operand ops
[3];
911 rtx_insn
*last
= get_last_insn ();
913 res
= gen_reg_rtx (mode
);
914 create_output_operand (&ops
[0], res
, mode
);
915 create_input_operand (&ops
[1], op1
, mode
);
916 create_fixed_operand (&ops
[2], do_error
);
917 if (maybe_expand_insn (icode
, 3, ops
))
919 last
= get_last_insn ();
920 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
922 && any_condjump_p (last
)
923 && !find_reg_note (last
, REG_BR_PROB
, 0))
924 add_int_reg_note (last
, REG_BR_PROB
, PROB_VERY_UNLIKELY
);
925 emit_jump (done_label
);
929 delete_insns_since (last
);
930 icode
= CODE_FOR_nothing
;
934 if (icode
== CODE_FOR_nothing
)
936 /* Compute the operation. On RTL level, the addition is always
938 res
= expand_unop (mode
, neg_optab
, op1
, NULL_RTX
, false);
940 /* Compare the operand with the most negative value. */
941 rtx minv
= expand_normal (TYPE_MIN_VALUE (TREE_TYPE (arg1
)));
942 do_compare_rtx_and_jump (op1
, minv
, NE
, true, mode
, NULL_RTX
, NULL
,
943 done_label
, PROB_VERY_LIKELY
);
946 emit_label (do_error
);
949 /* Expand the ubsan builtin call. */
951 fn
= ubsan_build_overflow_builtin (NEGATE_EXPR
, loc
, TREE_TYPE (arg1
),
955 do_pending_stack_adjust ();
958 write_complex_part (target
, const1_rtx
, true);
961 emit_label (done_label
);
966 expand_ubsan_result_store (target
, res
);
968 expand_arith_overflow_result_store (lhs
, target
, mode
, res
);
972 /* Add mul overflow checking to the statement STMT. */
975 expand_mul_overflow (location_t loc
, tree lhs
, tree arg0
, tree arg1
,
976 bool unsr_p
, bool uns0_p
, bool uns1_p
, bool is_ubsan
)
980 rtx_code_label
*done_label
, *do_error
;
981 rtx target
= NULL_RTX
;
983 enum insn_code icode
;
985 done_label
= gen_label_rtx ();
986 do_error
= gen_label_rtx ();
988 do_pending_stack_adjust ();
989 op0
= expand_normal (arg0
);
990 op1
= expand_normal (arg1
);
992 machine_mode mode
= TYPE_MODE (TREE_TYPE (arg0
));
996 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
998 write_complex_part (target
, const0_rtx
, true);
1002 gcc_assert (!unsr_p
&& !uns0_p
&& !uns1_p
);
1004 /* We assume both operands and result have the same precision
1005 here (GET_MODE_BITSIZE (mode)), S stands for signed type
1006 with that precision, U for unsigned type with that precision,
1007 sgn for unsigned most significant bit in that precision.
1008 s1 is signed first operand, u1 is unsigned first operand,
1009 s2 is signed second operand, u2 is unsigned second operand,
1010 sr is signed result, ur is unsigned result and the following
1011 rules say how to compute result (which is always result of
1012 the operands as if both were unsigned, cast to the right
1013 signedness) and how to compute whether operation overflowed.
1014 main_ovf (false) stands for jump on signed multiplication
1015 overflow or the main algorithm with uns == false.
1016 main_ovf (true) stands for jump on unsigned multiplication
1017 overflow or the main algorithm with uns == true.
1020 res = (S) ((U) s1 * (U) s2)
1021 ovf = main_ovf (false)
1024 ovf = main_ovf (true)
1027 ovf = (s1 < 0 && u2) || main_ovf (true)
1030 ovf = res < 0 || main_ovf (true)
1032 res = (S) ((U) s1 * u2)
1033 ovf = (S) u2 >= 0 ? main_ovf (false)
1034 : (s1 != 0 && (s1 != -1 || u2 != (U) res))
1036 t1 = (s1 & s2) < 0 ? (-(U) s1) : ((U) s1)
1037 t2 = (s1 & s2) < 0 ? (-(U) s2) : ((U) s2)
1039 ovf = (s1 ^ s2) < 0 ? (s1 && s2) : main_ovf (true) */
1041 if (uns0_p
&& !uns1_p
)
1043 /* Multiplication is commutative, if operand signedness differs,
1044 canonicalize to the first operand being signed and second
1045 unsigned to simplify following code. */
1046 std::swap (op0
, op1
);
1047 std::swap (arg0
, arg1
);
1052 int pos_neg0
= get_range_pos_neg (arg0
);
1053 int pos_neg1
= get_range_pos_neg (arg1
);
1056 if (!uns0_p
&& uns1_p
&& unsr_p
)
1061 /* If s1 is non-negative, just perform normal u1 * u2 -> ur. */
1064 /* If s1 is negative, avoid the main code, just multiply and
1065 signal overflow if op1 is not 0. */
1066 struct separate_ops ops
;
1067 ops
.code
= MULT_EXPR
;
1068 ops
.type
= TREE_TYPE (arg1
);
1069 ops
.op0
= make_tree (ops
.type
, op0
);
1070 ops
.op1
= make_tree (ops
.type
, op1
);
1071 ops
.op2
= NULL_TREE
;
1073 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1074 do_compare_rtx_and_jump (op1
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1075 NULL
, done_label
, PROB_VERY_LIKELY
);
1076 goto do_error_label
;
1078 rtx_code_label
*do_main_label
;
1079 do_main_label
= gen_label_rtx ();
1080 do_compare_rtx_and_jump (op0
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1081 NULL
, do_main_label
, PROB_VERY_LIKELY
);
1082 do_compare_rtx_and_jump (op1
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1083 NULL
, do_main_label
, PROB_VERY_LIKELY
);
1084 write_complex_part (target
, const1_rtx
, true);
1085 emit_label (do_main_label
);
1093 if (uns0_p
&& uns1_p
&& !unsr_p
)
1096 /* Rest of handling of this case after res is computed. */
1101 if (!uns0_p
&& uns1_p
&& !unsr_p
)
1108 /* If (S) u2 is negative (i.e. u2 is larger than maximum of S,
1109 avoid the main code, just multiply and signal overflow
1110 unless 0 * u2 or -1 * ((U) Smin). */
1111 struct separate_ops ops
;
1112 ops
.code
= MULT_EXPR
;
1113 ops
.type
= TREE_TYPE (arg1
);
1114 ops
.op0
= make_tree (ops
.type
, op0
);
1115 ops
.op1
= make_tree (ops
.type
, op1
);
1116 ops
.op2
= NULL_TREE
;
1118 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1119 do_compare_rtx_and_jump (op0
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1120 NULL
, done_label
, PROB_VERY_LIKELY
);
1121 do_compare_rtx_and_jump (op0
, constm1_rtx
, NE
, true, mode
, NULL_RTX
,
1122 NULL
, do_error
, PROB_VERY_UNLIKELY
);
1124 prec
= GET_MODE_PRECISION (mode
);
1126 sgn
= immed_wide_int_const (wi::min_value (prec
, SIGNED
), mode
);
1127 do_compare_rtx_and_jump (op1
, sgn
, EQ
, true, mode
, NULL_RTX
,
1128 NULL
, done_label
, PROB_VERY_LIKELY
);
1129 goto do_error_label
;
1131 /* Rest of handling of this case after res is computed. */
1139 if (!uns0_p
&& !uns1_p
&& unsr_p
)
1142 switch (pos_neg0
| pos_neg1
)
1144 case 1: /* Both operands known to be non-negative. */
1146 case 2: /* Both operands known to be negative. */
1147 op0
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, false);
1148 op1
= expand_unop (mode
, neg_optab
, op1
, NULL_RTX
, false);
1149 /* Avoid looking at arg0/arg1 ranges, as we've changed
1151 arg0
= error_mark_node
;
1152 arg1
= error_mark_node
;
1155 if ((pos_neg0
^ pos_neg1
) == 3)
1157 /* If one operand is known to be negative and the other
1158 non-negative, this overflows always, unless the non-negative
1159 one is 0. Just do normal multiply and set overflow
1160 unless one of the operands is 0. */
1161 struct separate_ops ops
;
1162 ops
.code
= MULT_EXPR
;
1164 = build_nonstandard_integer_type (GET_MODE_PRECISION (mode
),
1166 ops
.op0
= make_tree (ops
.type
, op0
);
1167 ops
.op1
= make_tree (ops
.type
, op1
);
1168 ops
.op2
= NULL_TREE
;
1170 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1171 tem
= expand_binop (mode
, and_optab
, op0
, op1
, NULL_RTX
, false,
1173 do_compare_rtx_and_jump (tem
, const0_rtx
, EQ
, true, mode
,
1174 NULL_RTX
, NULL
, done_label
,
1176 goto do_error_label
;
1178 /* The general case, do all the needed comparisons at runtime. */
1179 rtx_code_label
*do_main_label
, *after_negate_label
;
1181 rop0
= gen_reg_rtx (mode
);
1182 rop1
= gen_reg_rtx (mode
);
1183 emit_move_insn (rop0
, op0
);
1184 emit_move_insn (rop1
, op1
);
1187 do_main_label
= gen_label_rtx ();
1188 after_negate_label
= gen_label_rtx ();
1189 tem
= expand_binop (mode
, and_optab
, op0
, op1
, NULL_RTX
, false,
1191 do_compare_rtx_and_jump (tem
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1192 NULL
, after_negate_label
, PROB_VERY_LIKELY
);
1193 /* Both arguments negative here, negate them and continue with
1194 normal unsigned overflow checking multiplication. */
1195 emit_move_insn (op0
, expand_unop (mode
, neg_optab
, op0
,
1197 emit_move_insn (op1
, expand_unop (mode
, neg_optab
, op1
,
1199 /* Avoid looking at arg0/arg1 ranges, as we might have changed
1201 arg0
= error_mark_node
;
1202 arg1
= error_mark_node
;
1203 emit_jump (do_main_label
);
1204 emit_label (after_negate_label
);
1205 tem2
= expand_binop (mode
, xor_optab
, op0
, op1
, NULL_RTX
, false,
1207 do_compare_rtx_and_jump (tem2
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1208 NULL
, do_main_label
, PROB_VERY_LIKELY
);
1209 /* One argument is negative here, the other positive. This
1210 overflows always, unless one of the arguments is 0. But
1211 if e.g. s2 is 0, (U) s1 * 0 doesn't overflow, whatever s1
1212 is, thus we can keep do_main code oring in overflow as is. */
1213 do_compare_rtx_and_jump (tem
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1214 NULL
, do_main_label
, PROB_VERY_LIKELY
);
1215 write_complex_part (target
, const1_rtx
, true);
1216 emit_label (do_main_label
);
1224 type
= build_nonstandard_integer_type (GET_MODE_PRECISION (mode
), uns
);
1225 sign
= uns
? UNSIGNED
: SIGNED
;
1226 icode
= optab_handler (uns
? umulv4_optab
: mulv4_optab
, mode
);
1227 if (icode
!= CODE_FOR_nothing
)
1229 struct expand_operand ops
[4];
1230 rtx_insn
*last
= get_last_insn ();
1232 res
= gen_reg_rtx (mode
);
1233 create_output_operand (&ops
[0], res
, mode
);
1234 create_input_operand (&ops
[1], op0
, mode
);
1235 create_input_operand (&ops
[2], op1
, mode
);
1236 create_fixed_operand (&ops
[3], do_error
);
1237 if (maybe_expand_insn (icode
, 4, ops
))
1239 last
= get_last_insn ();
1240 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
1242 && any_condjump_p (last
)
1243 && !find_reg_note (last
, REG_BR_PROB
, 0))
1244 add_int_reg_note (last
, REG_BR_PROB
, PROB_VERY_UNLIKELY
);
1245 emit_jump (done_label
);
1249 delete_insns_since (last
);
1250 icode
= CODE_FOR_nothing
;
1254 if (icode
== CODE_FOR_nothing
)
1256 struct separate_ops ops
;
1257 int prec
= GET_MODE_PRECISION (mode
);
1258 machine_mode hmode
= mode_for_size (prec
/ 2, MODE_INT
, 1);
1259 ops
.op0
= make_tree (type
, op0
);
1260 ops
.op1
= make_tree (type
, op1
);
1261 ops
.op2
= NULL_TREE
;
1263 if (GET_MODE_2XWIDER_MODE (mode
) != VOIDmode
1264 && targetm
.scalar_mode_supported_p (GET_MODE_2XWIDER_MODE (mode
)))
1266 machine_mode wmode
= GET_MODE_2XWIDER_MODE (mode
);
1267 ops
.code
= WIDEN_MULT_EXPR
;
1269 = build_nonstandard_integer_type (GET_MODE_PRECISION (wmode
), uns
);
1271 res
= expand_expr_real_2 (&ops
, NULL_RTX
, wmode
, EXPAND_NORMAL
);
1272 rtx hipart
= expand_shift (RSHIFT_EXPR
, wmode
, res
, prec
,
1274 hipart
= gen_lowpart (mode
, hipart
);
1275 res
= gen_lowpart (mode
, res
);
1277 /* For the unsigned multiplication, there was overflow if
1278 HIPART is non-zero. */
1279 do_compare_rtx_and_jump (hipart
, const0_rtx
, EQ
, true, mode
,
1280 NULL_RTX
, NULL
, done_label
,
1284 rtx signbit
= expand_shift (RSHIFT_EXPR
, mode
, res
, prec
- 1,
1286 /* RES is low half of the double width result, HIPART
1287 the high half. There was overflow if
1288 HIPART is different from RES < 0 ? -1 : 0. */
1289 do_compare_rtx_and_jump (signbit
, hipart
, EQ
, true, mode
,
1290 NULL_RTX
, NULL
, done_label
,
1294 else if (hmode
!= BLKmode
&& 2 * GET_MODE_PRECISION (hmode
) == prec
)
1296 rtx_code_label
*large_op0
= gen_label_rtx ();
1297 rtx_code_label
*small_op0_large_op1
= gen_label_rtx ();
1298 rtx_code_label
*one_small_one_large
= gen_label_rtx ();
1299 rtx_code_label
*both_ops_large
= gen_label_rtx ();
1300 rtx_code_label
*after_hipart_neg
= uns
? NULL
: gen_label_rtx ();
1301 rtx_code_label
*after_lopart_neg
= uns
? NULL
: gen_label_rtx ();
1302 rtx_code_label
*do_overflow
= gen_label_rtx ();
1303 rtx_code_label
*hipart_different
= uns
? NULL
: gen_label_rtx ();
1305 unsigned int hprec
= GET_MODE_PRECISION (hmode
);
1306 rtx hipart0
= expand_shift (RSHIFT_EXPR
, mode
, op0
, hprec
,
1308 hipart0
= gen_lowpart (hmode
, hipart0
);
1309 rtx lopart0
= gen_lowpart (hmode
, op0
);
1310 rtx signbit0
= const0_rtx
;
1312 signbit0
= expand_shift (RSHIFT_EXPR
, hmode
, lopart0
, hprec
- 1,
1314 rtx hipart1
= expand_shift (RSHIFT_EXPR
, mode
, op1
, hprec
,
1316 hipart1
= gen_lowpart (hmode
, hipart1
);
1317 rtx lopart1
= gen_lowpart (hmode
, op1
);
1318 rtx signbit1
= const0_rtx
;
1320 signbit1
= expand_shift (RSHIFT_EXPR
, hmode
, lopart1
, hprec
- 1,
1323 res
= gen_reg_rtx (mode
);
1325 /* True if op0 resp. op1 are known to be in the range of
1327 bool op0_small_p
= false;
1328 bool op1_small_p
= false;
1329 /* True if op0 resp. op1 are known to have all zeros or all ones
1330 in the upper half of bits, but are not known to be
1332 bool op0_medium_p
= false;
1333 bool op1_medium_p
= false;
1334 /* -1 if op{0,1} is known to be negative, 0 if it is known to be
1335 nonnegative, 1 if unknown. */
1341 else if (pos_neg0
== 2)
1345 else if (pos_neg1
== 2)
1348 unsigned int mprec0
= prec
;
1349 if (arg0
!= error_mark_node
)
1350 mprec0
= get_min_precision (arg0
, sign
);
1351 if (mprec0
<= hprec
)
1353 else if (!uns
&& mprec0
<= hprec
+ 1)
1354 op0_medium_p
= true;
1355 unsigned int mprec1
= prec
;
1356 if (arg1
!= error_mark_node
)
1357 mprec1
= get_min_precision (arg1
, sign
);
1358 if (mprec1
<= hprec
)
1360 else if (!uns
&& mprec1
<= hprec
+ 1)
1361 op1_medium_p
= true;
1363 int smaller_sign
= 1;
1364 int larger_sign
= 1;
1367 smaller_sign
= op0_sign
;
1368 larger_sign
= op1_sign
;
1370 else if (op1_small_p
)
1372 smaller_sign
= op1_sign
;
1373 larger_sign
= op0_sign
;
1375 else if (op0_sign
== op1_sign
)
1377 smaller_sign
= op0_sign
;
1378 larger_sign
= op0_sign
;
1382 do_compare_rtx_and_jump (signbit0
, hipart0
, NE
, true, hmode
,
1383 NULL_RTX
, NULL
, large_op0
,
1387 do_compare_rtx_and_jump (signbit1
, hipart1
, NE
, true, hmode
,
1388 NULL_RTX
, NULL
, small_op0_large_op1
,
1391 /* If both op0 and op1 are sign (!uns) or zero (uns) extended from
1392 hmode to mode, the multiplication will never overflow. We can
1393 do just one hmode x hmode => mode widening multiplication. */
1394 rtx lopart0s
= lopart0
, lopart1s
= lopart1
;
1395 if (GET_CODE (lopart0
) == SUBREG
)
1397 lopart0s
= shallow_copy_rtx (lopart0
);
1398 SUBREG_PROMOTED_VAR_P (lopart0s
) = 1;
1399 SUBREG_PROMOTED_SET (lopart0s
, uns
? SRP_UNSIGNED
: SRP_SIGNED
);
1401 if (GET_CODE (lopart1
) == SUBREG
)
1403 lopart1s
= shallow_copy_rtx (lopart1
);
1404 SUBREG_PROMOTED_VAR_P (lopart1s
) = 1;
1405 SUBREG_PROMOTED_SET (lopart1s
, uns
? SRP_UNSIGNED
: SRP_SIGNED
);
1407 tree halfstype
= build_nonstandard_integer_type (hprec
, uns
);
1408 ops
.op0
= make_tree (halfstype
, lopart0s
);
1409 ops
.op1
= make_tree (halfstype
, lopart1s
);
1410 ops
.code
= WIDEN_MULT_EXPR
;
1413 = expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1414 emit_move_insn (res
, thisres
);
1415 emit_jump (done_label
);
1417 emit_label (small_op0_large_op1
);
1419 /* If op0 is sign (!uns) or zero (uns) extended from hmode to mode,
1420 but op1 is not, just swap the arguments and handle it as op1
1421 sign/zero extended, op0 not. */
1422 rtx larger
= gen_reg_rtx (mode
);
1423 rtx hipart
= gen_reg_rtx (hmode
);
1424 rtx lopart
= gen_reg_rtx (hmode
);
1425 emit_move_insn (larger
, op1
);
1426 emit_move_insn (hipart
, hipart1
);
1427 emit_move_insn (lopart
, lopart0
);
1428 emit_jump (one_small_one_large
);
1430 emit_label (large_op0
);
1433 do_compare_rtx_and_jump (signbit1
, hipart1
, NE
, true, hmode
,
1434 NULL_RTX
, NULL
, both_ops_large
,
1437 /* If op1 is sign (!uns) or zero (uns) extended from hmode to mode,
1438 but op0 is not, prepare larger, hipart and lopart pseudos and
1439 handle it together with small_op0_large_op1. */
1440 emit_move_insn (larger
, op0
);
1441 emit_move_insn (hipart
, hipart0
);
1442 emit_move_insn (lopart
, lopart1
);
1444 emit_label (one_small_one_large
);
1446 /* lopart is the low part of the operand that is sign extended
1447 to mode, larger is the other operand, hipart is the
1448 high part of larger and lopart0 and lopart1 are the low parts
1450 We perform lopart0 * lopart1 and lopart * hipart widening
1452 tree halfutype
= build_nonstandard_integer_type (hprec
, 1);
1453 ops
.op0
= make_tree (halfutype
, lopart0
);
1454 ops
.op1
= make_tree (halfutype
, lopart1
);
1456 = expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1458 ops
.op0
= make_tree (halfutype
, lopart
);
1459 ops
.op1
= make_tree (halfutype
, hipart
);
1460 rtx loxhi
= gen_reg_rtx (mode
);
1461 rtx tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1462 emit_move_insn (loxhi
, tem
);
1466 /* if (hipart < 0) loxhi -= lopart << (bitsize / 2); */
1467 if (larger_sign
== 0)
1468 emit_jump (after_hipart_neg
);
1469 else if (larger_sign
!= -1)
1470 do_compare_rtx_and_jump (hipart
, const0_rtx
, GE
, false, hmode
,
1471 NULL_RTX
, NULL
, after_hipart_neg
,
1474 tem
= convert_modes (mode
, hmode
, lopart
, 1);
1475 tem
= expand_shift (LSHIFT_EXPR
, mode
, tem
, hprec
, NULL_RTX
, 1);
1476 tem
= expand_simple_binop (mode
, MINUS
, loxhi
, tem
, NULL_RTX
,
1478 emit_move_insn (loxhi
, tem
);
1480 emit_label (after_hipart_neg
);
1482 /* if (lopart < 0) loxhi -= larger; */
1483 if (smaller_sign
== 0)
1484 emit_jump (after_lopart_neg
);
1485 else if (smaller_sign
!= -1)
1486 do_compare_rtx_and_jump (lopart
, const0_rtx
, GE
, false, hmode
,
1487 NULL_RTX
, NULL
, after_lopart_neg
,
1490 tem
= expand_simple_binop (mode
, MINUS
, loxhi
, larger
, NULL_RTX
,
1492 emit_move_insn (loxhi
, tem
);
1494 emit_label (after_lopart_neg
);
1497 /* loxhi += (uns) lo0xlo1 >> (bitsize / 2); */
1498 tem
= expand_shift (RSHIFT_EXPR
, mode
, lo0xlo1
, hprec
, NULL_RTX
, 1);
1499 tem
= expand_simple_binop (mode
, PLUS
, loxhi
, tem
, NULL_RTX
,
1501 emit_move_insn (loxhi
, tem
);
1503 /* if (loxhi >> (bitsize / 2)
1504 == (hmode) loxhi >> (bitsize / 2 - 1)) (if !uns)
1505 if (loxhi >> (bitsize / 2) == 0 (if uns). */
1506 rtx hipartloxhi
= expand_shift (RSHIFT_EXPR
, mode
, loxhi
, hprec
,
1508 hipartloxhi
= gen_lowpart (hmode
, hipartloxhi
);
1509 rtx signbitloxhi
= const0_rtx
;
1511 signbitloxhi
= expand_shift (RSHIFT_EXPR
, hmode
,
1512 gen_lowpart (hmode
, loxhi
),
1513 hprec
- 1, NULL_RTX
, 0);
1515 do_compare_rtx_and_jump (signbitloxhi
, hipartloxhi
, NE
, true, hmode
,
1516 NULL_RTX
, NULL
, do_overflow
,
1517 PROB_VERY_UNLIKELY
);
1519 /* res = (loxhi << (bitsize / 2)) | (hmode) lo0xlo1; */
1520 rtx loxhishifted
= expand_shift (LSHIFT_EXPR
, mode
, loxhi
, hprec
,
1522 tem
= convert_modes (mode
, hmode
, gen_lowpart (hmode
, lo0xlo1
), 1);
1524 tem
= expand_simple_binop (mode
, IOR
, loxhishifted
, tem
, res
,
1527 emit_move_insn (res
, tem
);
1528 emit_jump (done_label
);
1530 emit_label (both_ops_large
);
1532 /* If both operands are large (not sign (!uns) or zero (uns)
1533 extended from hmode), then perform the full multiplication
1534 which will be the result of the operation.
1535 The only cases which don't overflow are for signed multiplication
1536 some cases where both hipart0 and highpart1 are 0 or -1.
1537 For unsigned multiplication when high parts are both non-zero
1538 this overflows always. */
1539 ops
.code
= MULT_EXPR
;
1540 ops
.op0
= make_tree (type
, op0
);
1541 ops
.op1
= make_tree (type
, op1
);
1542 tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1543 emit_move_insn (res
, tem
);
1549 tem
= expand_simple_binop (hmode
, PLUS
, hipart0
, const1_rtx
,
1550 NULL_RTX
, 1, OPTAB_DIRECT
);
1551 do_compare_rtx_and_jump (tem
, const1_rtx
, GTU
, true, hmode
,
1552 NULL_RTX
, NULL
, do_error
,
1553 PROB_VERY_UNLIKELY
);
1558 tem
= expand_simple_binop (hmode
, PLUS
, hipart1
, const1_rtx
,
1559 NULL_RTX
, 1, OPTAB_DIRECT
);
1560 do_compare_rtx_and_jump (tem
, const1_rtx
, GTU
, true, hmode
,
1561 NULL_RTX
, NULL
, do_error
,
1562 PROB_VERY_UNLIKELY
);
1565 /* At this point hipart{0,1} are both in [-1, 0]. If they are
1566 the same, overflow happened if res is negative, if they are
1567 different, overflow happened if res is positive. */
1568 if (op0_sign
!= 1 && op1_sign
!= 1 && op0_sign
!= op1_sign
)
1569 emit_jump (hipart_different
);
1570 else if (op0_sign
== 1 || op1_sign
== 1)
1571 do_compare_rtx_and_jump (hipart0
, hipart1
, NE
, true, hmode
,
1572 NULL_RTX
, NULL
, hipart_different
,
1575 do_compare_rtx_and_jump (res
, const0_rtx
, LT
, false, mode
,
1576 NULL_RTX
, NULL
, do_error
,
1577 PROB_VERY_UNLIKELY
);
1578 emit_jump (done_label
);
1580 emit_label (hipart_different
);
1582 do_compare_rtx_and_jump (res
, const0_rtx
, GE
, false, mode
,
1583 NULL_RTX
, NULL
, do_error
,
1584 PROB_VERY_UNLIKELY
);
1585 emit_jump (done_label
);
1588 emit_label (do_overflow
);
1590 /* Overflow, do full multiplication and fallthru into do_error. */
1591 ops
.op0
= make_tree (type
, op0
);
1592 ops
.op1
= make_tree (type
, op1
);
1593 tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1594 emit_move_insn (res
, tem
);
1598 gcc_assert (!is_ubsan
);
1599 ops
.code
= MULT_EXPR
;
1601 res
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1602 emit_jump (done_label
);
1607 emit_label (do_error
);
1610 /* Expand the ubsan builtin call. */
1612 fn
= ubsan_build_overflow_builtin (MULT_EXPR
, loc
, TREE_TYPE (arg0
),
1616 do_pending_stack_adjust ();
1619 write_complex_part (target
, const1_rtx
, true);
1622 emit_label (done_label
);
1625 if (uns0_p
&& uns1_p
&& !unsr_p
)
1627 rtx_code_label
*all_done_label
= gen_label_rtx ();
1628 do_compare_rtx_and_jump (res
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1629 NULL
, all_done_label
, PROB_VERY_LIKELY
);
1630 write_complex_part (target
, const1_rtx
, true);
1631 emit_label (all_done_label
);
1635 if (!uns0_p
&& uns1_p
&& !unsr_p
&& pos_neg1
== 3)
1637 rtx_code_label
*all_done_label
= gen_label_rtx ();
1638 rtx_code_label
*set_noovf
= gen_label_rtx ();
1639 do_compare_rtx_and_jump (op1
, const0_rtx
, GE
, false, mode
, NULL_RTX
,
1640 NULL
, all_done_label
, PROB_VERY_LIKELY
);
1641 write_complex_part (target
, const1_rtx
, true);
1642 do_compare_rtx_and_jump (op0
, const0_rtx
, EQ
, true, mode
, NULL_RTX
,
1643 NULL
, set_noovf
, PROB_VERY_LIKELY
);
1644 do_compare_rtx_and_jump (op0
, constm1_rtx
, NE
, true, mode
, NULL_RTX
,
1645 NULL
, all_done_label
, PROB_VERY_UNLIKELY
);
1646 do_compare_rtx_and_jump (op1
, res
, NE
, true, mode
, NULL_RTX
, NULL
,
1647 all_done_label
, PROB_VERY_UNLIKELY
);
1648 emit_label (set_noovf
);
1649 write_complex_part (target
, const0_rtx
, true);
1650 emit_label (all_done_label
);
1656 expand_ubsan_result_store (target
, res
);
1658 expand_arith_overflow_result_store (lhs
, target
, mode
, res
);
1662 /* Expand UBSAN_CHECK_ADD call STMT. */
1665 expand_UBSAN_CHECK_ADD (internal_fn
, gcall
*stmt
)
1667 location_t loc
= gimple_location (stmt
);
1668 tree lhs
= gimple_call_lhs (stmt
);
1669 tree arg0
= gimple_call_arg (stmt
, 0);
1670 tree arg1
= gimple_call_arg (stmt
, 1);
1671 expand_addsub_overflow (loc
, PLUS_EXPR
, lhs
, arg0
, arg1
,
1672 false, false, false, true);
1675 /* Expand UBSAN_CHECK_SUB call STMT. */
1678 expand_UBSAN_CHECK_SUB (internal_fn
, gcall
*stmt
)
1680 location_t loc
= gimple_location (stmt
);
1681 tree lhs
= gimple_call_lhs (stmt
);
1682 tree arg0
= gimple_call_arg (stmt
, 0);
1683 tree arg1
= gimple_call_arg (stmt
, 1);
1684 if (integer_zerop (arg0
))
1685 expand_neg_overflow (loc
, lhs
, arg1
, true);
1687 expand_addsub_overflow (loc
, MINUS_EXPR
, lhs
, arg0
, arg1
,
1688 false, false, false, true);
1691 /* Expand UBSAN_CHECK_MUL call STMT. */
1694 expand_UBSAN_CHECK_MUL (internal_fn
, gcall
*stmt
)
1696 location_t loc
= gimple_location (stmt
);
1697 tree lhs
= gimple_call_lhs (stmt
);
1698 tree arg0
= gimple_call_arg (stmt
, 0);
1699 tree arg1
= gimple_call_arg (stmt
, 1);
1700 expand_mul_overflow (loc
, lhs
, arg0
, arg1
, false, false, false, true);
1703 /* Helper function for {ADD,SUB,MUL}_OVERFLOW call stmt expansion. */
1706 expand_arith_overflow (enum tree_code code
, gimple
*stmt
)
1708 tree lhs
= gimple_call_lhs (stmt
);
1709 if (lhs
== NULL_TREE
)
1711 tree arg0
= gimple_call_arg (stmt
, 0);
1712 tree arg1
= gimple_call_arg (stmt
, 1);
1713 tree type
= TREE_TYPE (TREE_TYPE (lhs
));
1714 int uns0_p
= TYPE_UNSIGNED (TREE_TYPE (arg0
));
1715 int uns1_p
= TYPE_UNSIGNED (TREE_TYPE (arg1
));
1716 int unsr_p
= TYPE_UNSIGNED (type
);
1717 int prec0
= TYPE_PRECISION (TREE_TYPE (arg0
));
1718 int prec1
= TYPE_PRECISION (TREE_TYPE (arg1
));
1719 int precres
= TYPE_PRECISION (type
);
1720 location_t loc
= gimple_location (stmt
);
1721 if (!uns0_p
&& get_range_pos_neg (arg0
) == 1)
1723 if (!uns1_p
&& get_range_pos_neg (arg1
) == 1)
1725 int pr
= get_min_precision (arg0
, uns0_p
? UNSIGNED
: SIGNED
);
1726 prec0
= MIN (prec0
, pr
);
1727 pr
= get_min_precision (arg1
, uns1_p
? UNSIGNED
: SIGNED
);
1728 prec1
= MIN (prec1
, pr
);
1730 /* If uns0_p && uns1_p, precop is minimum needed precision
1731 of unsigned type to hold the exact result, otherwise
1732 precop is minimum needed precision of signed type to
1733 hold the exact result. */
1735 if (code
== MULT_EXPR
)
1736 precop
= prec0
+ prec1
+ (uns0_p
!= uns1_p
);
1739 if (uns0_p
== uns1_p
)
1740 precop
= MAX (prec0
, prec1
) + 1;
1742 precop
= MAX (prec0
+ 1, prec1
) + 1;
1744 precop
= MAX (prec0
, prec1
+ 1) + 1;
1746 int orig_precres
= precres
;
1750 if ((uns0_p
&& uns1_p
)
1751 ? ((precop
+ !unsr_p
) <= precres
1752 /* u1 - u2 -> ur can overflow, no matter what precision
1754 && (code
!= MINUS_EXPR
|| !unsr_p
))
1755 : (!unsr_p
&& precop
<= precres
))
1757 /* The infinity precision result will always fit into result. */
1758 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1759 write_complex_part (target
, const0_rtx
, true);
1760 enum machine_mode mode
= TYPE_MODE (type
);
1761 struct separate_ops ops
;
1764 ops
.op0
= fold_convert_loc (loc
, type
, arg0
);
1765 ops
.op1
= fold_convert_loc (loc
, type
, arg1
);
1766 ops
.op2
= NULL_TREE
;
1768 rtx tem
= expand_expr_real_2 (&ops
, NULL_RTX
, mode
, EXPAND_NORMAL
);
1769 expand_arith_overflow_result_store (lhs
, target
, mode
, tem
);
1773 /* For sub-word operations, if target doesn't have them, start
1774 with precres widening right away, otherwise do it only
1775 if the most simple cases can't be used. */
1776 if (WORD_REGISTER_OPERATIONS
1777 && orig_precres
== precres
1778 && precres
< BITS_PER_WORD
)
1780 else if ((uns0_p
&& uns1_p
&& unsr_p
&& prec0
<= precres
1781 && prec1
<= precres
)
1782 || ((!uns0_p
|| !uns1_p
) && !unsr_p
1783 && prec0
+ uns0_p
<= precres
1784 && prec1
+ uns1_p
<= precres
))
1786 arg0
= fold_convert_loc (loc
, type
, arg0
);
1787 arg1
= fold_convert_loc (loc
, type
, arg1
);
1791 if (integer_zerop (arg0
) && !unsr_p
)
1792 expand_neg_overflow (loc
, lhs
, arg1
, false);
1795 expand_addsub_overflow (loc
, code
, lhs
, arg0
, arg1
,
1796 unsr_p
, unsr_p
, unsr_p
, false);
1799 expand_mul_overflow (loc
, lhs
, arg0
, arg1
,
1800 unsr_p
, unsr_p
, unsr_p
, false);
1807 /* For sub-word operations, retry with a wider type first. */
1808 if (orig_precres
== precres
&& precop
<= BITS_PER_WORD
)
1810 #if WORD_REGISTER_OPERATIONS
1811 int p
= BITS_PER_WORD
;
1815 enum machine_mode m
= smallest_mode_for_size (p
, MODE_INT
);
1816 tree optype
= build_nonstandard_integer_type (GET_MODE_PRECISION (m
),
1819 p
= TYPE_PRECISION (optype
);
1823 unsr_p
= TYPE_UNSIGNED (optype
);
1829 if (prec0
<= precres
&& prec1
<= precres
)
1834 types
[0] = build_nonstandard_integer_type (precres
, 0);
1840 types
[1] = build_nonstandard_integer_type (precres
, 1);
1842 arg0
= fold_convert_loc (loc
, types
[uns0_p
], arg0
);
1843 arg1
= fold_convert_loc (loc
, types
[uns1_p
], arg1
);
1844 if (code
!= MULT_EXPR
)
1845 expand_addsub_overflow (loc
, code
, lhs
, arg0
, arg1
, unsr_p
,
1846 uns0_p
, uns1_p
, false);
1848 expand_mul_overflow (loc
, lhs
, arg0
, arg1
, unsr_p
,
1849 uns0_p
, uns1_p
, false);
1853 /* Retry with a wider type. */
1854 if (orig_precres
== precres
)
1856 int p
= MAX (prec0
, prec1
);
1857 enum machine_mode m
= smallest_mode_for_size (p
, MODE_INT
);
1858 tree optype
= build_nonstandard_integer_type (GET_MODE_PRECISION (m
),
1861 p
= TYPE_PRECISION (optype
);
1865 unsr_p
= TYPE_UNSIGNED (optype
);
1876 /* Expand ADD_OVERFLOW STMT. */
1879 expand_ADD_OVERFLOW (internal_fn
, gcall
*stmt
)
1881 expand_arith_overflow (PLUS_EXPR
, stmt
);
1884 /* Expand SUB_OVERFLOW STMT. */
1887 expand_SUB_OVERFLOW (internal_fn
, gcall
*stmt
)
1889 expand_arith_overflow (MINUS_EXPR
, stmt
);
1892 /* Expand MUL_OVERFLOW STMT. */
1895 expand_MUL_OVERFLOW (internal_fn
, gcall
*stmt
)
1897 expand_arith_overflow (MULT_EXPR
, stmt
);
1900 /* This should get folded in tree-vectorizer.c. */
1903 expand_LOOP_VECTORIZED (internal_fn
, gcall
*)
1908 /* Expand MASK_LOAD call STMT using optab OPTAB. */
1911 expand_mask_load_optab_fn (internal_fn
, gcall
*stmt
, convert_optab optab
)
1913 struct expand_operand ops
[3];
1914 tree type
, lhs
, rhs
, maskt
, ptr
;
1915 rtx mem
, target
, mask
;
1918 maskt
= gimple_call_arg (stmt
, 2);
1919 lhs
= gimple_call_lhs (stmt
);
1920 if (lhs
== NULL_TREE
)
1922 type
= TREE_TYPE (lhs
);
1923 ptr
= build_int_cst (TREE_TYPE (gimple_call_arg (stmt
, 1)), 0);
1924 align
= tree_to_shwi (gimple_call_arg (stmt
, 1));
1925 if (TYPE_ALIGN (type
) != align
)
1926 type
= build_aligned_type (type
, align
);
1927 rhs
= fold_build2 (MEM_REF
, type
, gimple_call_arg (stmt
, 0), ptr
);
1929 mem
= expand_expr (rhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1930 gcc_assert (MEM_P (mem
));
1931 mask
= expand_normal (maskt
);
1932 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1933 create_output_operand (&ops
[0], target
, TYPE_MODE (type
));
1934 create_fixed_operand (&ops
[1], mem
);
1935 create_input_operand (&ops
[2], mask
, TYPE_MODE (TREE_TYPE (maskt
)));
1936 expand_insn (convert_optab_handler (optab
, TYPE_MODE (type
),
1937 TYPE_MODE (TREE_TYPE (maskt
))),
1941 /* Expand MASK_STORE call STMT using optab OPTAB. */
1944 expand_mask_store_optab_fn (internal_fn
, gcall
*stmt
, convert_optab optab
)
1946 struct expand_operand ops
[3];
1947 tree type
, lhs
, rhs
, maskt
, ptr
;
1951 maskt
= gimple_call_arg (stmt
, 2);
1952 rhs
= gimple_call_arg (stmt
, 3);
1953 type
= TREE_TYPE (rhs
);
1954 ptr
= build_int_cst (TREE_TYPE (gimple_call_arg (stmt
, 1)), 0);
1955 align
= tree_to_shwi (gimple_call_arg (stmt
, 1));
1956 if (TYPE_ALIGN (type
) != align
)
1957 type
= build_aligned_type (type
, align
);
1958 lhs
= fold_build2 (MEM_REF
, type
, gimple_call_arg (stmt
, 0), ptr
);
1960 mem
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1961 gcc_assert (MEM_P (mem
));
1962 mask
= expand_normal (maskt
);
1963 reg
= expand_normal (rhs
);
1964 create_fixed_operand (&ops
[0], mem
);
1965 create_input_operand (&ops
[1], reg
, TYPE_MODE (type
));
1966 create_input_operand (&ops
[2], mask
, TYPE_MODE (TREE_TYPE (maskt
)));
1967 expand_insn (convert_optab_handler (optab
, TYPE_MODE (type
),
1968 TYPE_MODE (TREE_TYPE (maskt
))),
1973 expand_ABNORMAL_DISPATCHER (internal_fn
, gcall
*)
1978 expand_BUILTIN_EXPECT (internal_fn
, gcall
*stmt
)
1980 /* When guessing was done, the hints should be already stripped away. */
1981 gcc_assert (!flag_guess_branch_prob
|| optimize
== 0 || seen_error ());
1984 tree lhs
= gimple_call_lhs (stmt
);
1986 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
1988 target
= const0_rtx
;
1989 rtx val
= expand_expr (gimple_call_arg (stmt
, 0), target
, VOIDmode
, EXPAND_NORMAL
);
1990 if (lhs
&& val
!= target
)
1991 emit_move_insn (target
, val
);
1994 /* IFN_VA_ARG is supposed to be expanded at pass_stdarg. So this dummy function
1995 should never be called. */
1998 expand_VA_ARG (internal_fn
, gcall
*)
2003 /* Expand the IFN_UNIQUE function according to its first argument. */
2006 expand_UNIQUE (internal_fn
, gcall
*stmt
)
2008 rtx pattern
= NULL_RTX
;
2009 enum ifn_unique_kind kind
2010 = (enum ifn_unique_kind
) TREE_INT_CST_LOW (gimple_call_arg (stmt
, 0));
2017 case IFN_UNIQUE_UNSPEC
:
2018 if (targetm
.have_unique ())
2019 pattern
= targetm
.gen_unique ();
2022 case IFN_UNIQUE_OACC_FORK
:
2023 case IFN_UNIQUE_OACC_JOIN
:
2024 if (targetm
.have_oacc_fork () && targetm
.have_oacc_join ())
2026 tree lhs
= gimple_call_lhs (stmt
);
2027 rtx target
= const0_rtx
;
2030 target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2032 rtx data_dep
= expand_normal (gimple_call_arg (stmt
, 1));
2033 rtx axis
= expand_normal (gimple_call_arg (stmt
, 2));
2035 if (kind
== IFN_UNIQUE_OACC_FORK
)
2036 pattern
= targetm
.gen_oacc_fork (target
, data_dep
, axis
);
2038 pattern
= targetm
.gen_oacc_join (target
, data_dep
, axis
);
2046 emit_insn (pattern
);
2049 /* The size of an OpenACC compute dimension. */
2052 expand_GOACC_DIM_SIZE (internal_fn
, gcall
*stmt
)
2054 tree lhs
= gimple_call_lhs (stmt
);
2059 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2060 if (targetm
.have_oacc_dim_size ())
2062 rtx dim
= expand_expr (gimple_call_arg (stmt
, 0), NULL_RTX
,
2063 VOIDmode
, EXPAND_NORMAL
);
2064 emit_insn (targetm
.gen_oacc_dim_size (target
, dim
));
2067 emit_move_insn (target
, GEN_INT (1));
2070 /* The position of an OpenACC execution engine along one compute axis. */
2073 expand_GOACC_DIM_POS (internal_fn
, gcall
*stmt
)
2075 tree lhs
= gimple_call_lhs (stmt
);
2080 rtx target
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2081 if (targetm
.have_oacc_dim_pos ())
2083 rtx dim
= expand_expr (gimple_call_arg (stmt
, 0), NULL_RTX
,
2084 VOIDmode
, EXPAND_NORMAL
);
2085 emit_insn (targetm
.gen_oacc_dim_pos (target
, dim
));
2088 emit_move_insn (target
, const0_rtx
);
2091 /* This is expanded by oacc_device_lower pass. */
2094 expand_GOACC_LOOP (internal_fn
, gcall
*)
2099 /* This is expanded by oacc_device_lower pass. */
2102 expand_GOACC_REDUCTION (internal_fn
, gcall
*)
2107 /* Set errno to EDOM. */
2110 expand_SET_EDOM (internal_fn
, gcall
*)
2113 #ifdef GEN_ERRNO_RTX
2114 rtx errno_rtx
= GEN_ERRNO_RTX
;
2116 rtx errno_rtx
= gen_rtx_MEM (word_mode
, gen_rtx_SYMBOL_REF (Pmode
, "errno"));
2118 emit_move_insn (errno_rtx
,
2119 gen_int_mode (TARGET_EDOM
, GET_MODE (errno_rtx
)));
2125 /* Expand a call to FN using the operands in STMT. FN has a single
2126 output operand and NARGS input operands. */
2129 expand_direct_optab_fn (internal_fn fn
, gcall
*stmt
, direct_optab optab
,
2132 expand_operand
*ops
= XALLOCAVEC (expand_operand
, nargs
+ 1);
2134 tree_pair types
= direct_internal_fn_types (fn
, stmt
);
2135 insn_code icode
= direct_optab_handler (optab
, TYPE_MODE (types
.first
));
2137 tree lhs
= gimple_call_lhs (stmt
);
2138 tree lhs_type
= TREE_TYPE (lhs
);
2139 rtx lhs_rtx
= expand_expr (lhs
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
2140 create_output_operand (&ops
[0], lhs_rtx
, insn_data
[icode
].operand
[0].mode
);
2142 for (unsigned int i
= 0; i
< nargs
; ++i
)
2144 tree rhs
= gimple_call_arg (stmt
, i
);
2145 tree rhs_type
= TREE_TYPE (rhs
);
2146 rtx rhs_rtx
= expand_normal (rhs
);
2147 if (INTEGRAL_TYPE_P (rhs_type
))
2148 create_convert_operand_from (&ops
[i
+ 1], rhs_rtx
,
2149 TYPE_MODE (rhs_type
),
2150 TYPE_UNSIGNED (rhs_type
));
2152 create_input_operand (&ops
[i
+ 1], rhs_rtx
, TYPE_MODE (rhs_type
));
2155 expand_insn (icode
, nargs
+ 1, ops
);
2156 if (!rtx_equal_p (lhs_rtx
, ops
[0].value
))
2158 /* If the return value has an integral type, convert the instruction
2159 result to that type. This is useful for things that return an
2160 int regardless of the size of the input. If the instruction result
2161 is smaller than required, assume that it is signed.
2163 If the return value has a nonintegral type, its mode must match
2164 the instruction result. */
2165 if (GET_CODE (lhs_rtx
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (lhs_rtx
))
2167 /* If this is a scalar in a register that is stored in a wider
2168 mode than the declared mode, compute the result into its
2169 declared mode and then convert to the wider mode. */
2170 gcc_checking_assert (INTEGRAL_TYPE_P (lhs_type
));
2171 rtx tmp
= convert_to_mode (GET_MODE (lhs_rtx
), ops
[0].value
, 0);
2172 convert_move (SUBREG_REG (lhs_rtx
), tmp
,
2173 SUBREG_PROMOTED_SIGN (lhs_rtx
));
2175 else if (GET_MODE (lhs_rtx
) == GET_MODE (ops
[0].value
))
2176 emit_move_insn (lhs_rtx
, ops
[0].value
);
2179 gcc_checking_assert (INTEGRAL_TYPE_P (lhs_type
));
2180 convert_move (lhs_rtx
, ops
[0].value
, 0);
2185 /* Expanders for optabs that can use expand_direct_optab_fn. */
2187 #define expand_unary_optab_fn(FN, STMT, OPTAB) \
2188 expand_direct_optab_fn (FN, STMT, OPTAB, 1)
2190 #define expand_binary_optab_fn(FN, STMT, OPTAB) \
2191 expand_direct_optab_fn (FN, STMT, OPTAB, 2)
2193 /* RETURN_TYPE and ARGS are a return type and argument list that are
2194 in principle compatible with FN (which satisfies direct_internal_fn_p).
2195 Return the types that should be used to determine whether the
2196 target supports FN. */
2199 direct_internal_fn_types (internal_fn fn
, tree return_type
, tree
*args
)
2201 const direct_internal_fn_info
&info
= direct_internal_fn (fn
);
2202 tree type0
= (info
.type0
< 0 ? return_type
: TREE_TYPE (args
[info
.type0
]));
2203 tree type1
= (info
.type1
< 0 ? return_type
: TREE_TYPE (args
[info
.type1
]));
2204 return tree_pair (type0
, type1
);
2207 /* CALL is a call whose return type and arguments are in principle
2208 compatible with FN (which satisfies direct_internal_fn_p). Return the
2209 types that should be used to determine whether the target supports FN. */
2212 direct_internal_fn_types (internal_fn fn
, gcall
*call
)
2214 const direct_internal_fn_info
&info
= direct_internal_fn (fn
);
2215 tree op0
= (info
.type0
< 0
2216 ? gimple_call_lhs (call
)
2217 : gimple_call_arg (call
, info
.type0
));
2218 tree op1
= (info
.type1
< 0
2219 ? gimple_call_lhs (call
)
2220 : gimple_call_arg (call
, info
.type1
));
2221 return tree_pair (TREE_TYPE (op0
), TREE_TYPE (op1
));
2224 /* Return true if OPTAB is supported for TYPES (whose modes should be
2225 the same) when the optimization type is OPT_TYPE. Used for simple
2229 direct_optab_supported_p (direct_optab optab
, tree_pair types
,
2230 optimization_type opt_type
)
2232 machine_mode mode
= TYPE_MODE (types
.first
);
2233 gcc_checking_assert (mode
== TYPE_MODE (types
.second
));
2234 return direct_optab_handler (optab
, mode
, opt_type
) != CODE_FOR_nothing
;
2237 /* Return true if load/store lanes optab OPTAB is supported for
2238 array type TYPES.first when the optimization type is OPT_TYPE. */
2241 multi_vector_optab_supported_p (convert_optab optab
, tree_pair types
,
2242 optimization_type opt_type
)
2244 gcc_assert (TREE_CODE (types
.first
) == ARRAY_TYPE
);
2245 machine_mode imode
= TYPE_MODE (types
.first
);
2246 machine_mode vmode
= TYPE_MODE (TREE_TYPE (types
.first
));
2247 return (convert_optab_handler (optab
, imode
, vmode
, opt_type
)
2248 != CODE_FOR_nothing
);
2251 #define direct_unary_optab_supported_p direct_optab_supported_p
2252 #define direct_binary_optab_supported_p direct_optab_supported_p
2253 #define direct_mask_load_optab_supported_p direct_optab_supported_p
2254 #define direct_load_lanes_optab_supported_p multi_vector_optab_supported_p
2255 #define direct_mask_store_optab_supported_p direct_optab_supported_p
2256 #define direct_store_lanes_optab_supported_p multi_vector_optab_supported_p
2258 /* Return true if FN is supported for the types in TYPES when the
2259 optimization type is OPT_TYPE. The types are those associated with
2260 the "type0" and "type1" fields of FN's direct_internal_fn_info
2264 direct_internal_fn_supported_p (internal_fn fn
, tree_pair types
,
2265 optimization_type opt_type
)
2269 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
2270 case IFN_##CODE: break;
2271 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
2273 return direct_##TYPE##_optab_supported_p (OPTAB##_optab, types, \
2275 #include "internal-fn.def"
2283 /* Return true if FN is supported for type TYPE when the optimization
2284 type is OPT_TYPE. The caller knows that the "type0" and "type1"
2285 fields of FN's direct_internal_fn_info structure are the same. */
2288 direct_internal_fn_supported_p (internal_fn fn
, tree type
,
2289 optimization_type opt_type
)
2291 const direct_internal_fn_info
&info
= direct_internal_fn (fn
);
2292 gcc_checking_assert (info
.type0
== info
.type1
);
2293 return direct_internal_fn_supported_p (fn
, tree_pair (type
, type
), opt_type
);
2296 /* Return true if IFN_SET_EDOM is supported. */
2299 set_edom_supported_p (void)
2308 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
2310 expand_##CODE (internal_fn fn, gcall *stmt) \
2312 expand_##TYPE##_optab_fn (fn, stmt, OPTAB##_optab); \
2314 #include "internal-fn.def"
2316 /* Routines to expand each internal function, indexed by function number.
2317 Each routine has the prototype:
2319 expand_<NAME> (gcall *stmt)
2321 where STMT is the statement that performs the call. */
2322 static void (*const internal_fn_expanders
[]) (internal_fn
, gcall
*) = {
2323 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) expand_##CODE,
2324 #include "internal-fn.def"
2328 /* Expand STMT as though it were a call to internal function FN. */
2331 expand_internal_call (internal_fn fn
, gcall
*stmt
)
2333 internal_fn_expanders
[fn
] (fn
, stmt
);
2336 /* Expand STMT, which is a call to internal function FN. */
2339 expand_internal_call (gcall
*stmt
)
2341 expand_internal_call (gimple_call_internal_fn (stmt
), stmt
);