2017-02-20 Paul Thomas <pault@gcc.gnu.org>
[official-gcc.git] / gcc / internal-fn.c
blob1d84b260bf18703ca4a41a300d4899c23c380727
1 /* Internal functions.
2 Copyright (C) 2011-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "target.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "predict.h"
29 #include "stringpool.h"
30 #include "tree-vrp.h"
31 #include "tree-ssanames.h"
32 #include "expmed.h"
33 #include "memmodel.h"
34 #include "optabs.h"
35 #include "emit-rtl.h"
36 #include "diagnostic-core.h"
37 #include "fold-const.h"
38 #include "internal-fn.h"
39 #include "stor-layout.h"
40 #include "dojump.h"
41 #include "expr.h"
42 #include "ubsan.h"
43 #include "recog.h"
44 #include "builtins.h"
45 #include "optabs-tree.h"
47 /* The names of each internal function, indexed by function number. */
48 const char *const internal_fn_name_array[] = {
49 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) #CODE,
50 #include "internal-fn.def"
51 "<invalid-fn>"
54 /* The ECF_* flags of each internal function, indexed by function number. */
55 const int internal_fn_flags_array[] = {
56 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) FLAGS,
57 #include "internal-fn.def"
61 /* Fnspec of each internal function, indexed by function number. */
62 const_tree internal_fn_fnspec_array[IFN_LAST + 1];
64 void
65 init_internal_fns ()
67 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
68 if (FNSPEC) internal_fn_fnspec_array[IFN_##CODE] = \
69 build_string ((int) sizeof (FNSPEC), FNSPEC ? FNSPEC : "");
70 #include "internal-fn.def"
71 internal_fn_fnspec_array[IFN_LAST] = 0;
74 /* Create static initializers for the information returned by
75 direct_internal_fn. */
76 #define not_direct { -2, -2, false }
77 #define mask_load_direct { -1, 2, false }
78 #define load_lanes_direct { -1, -1, false }
79 #define mask_store_direct { 3, 2, false }
80 #define store_lanes_direct { 0, 0, false }
81 #define unary_direct { 0, 0, true }
82 #define binary_direct { 0, 0, true }
84 const direct_internal_fn_info direct_internal_fn_array[IFN_LAST + 1] = {
85 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) not_direct,
86 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) TYPE##_direct,
87 #include "internal-fn.def"
88 not_direct
91 /* ARRAY_TYPE is an array of vector modes. Return the associated insn
92 for load-lanes-style optab OPTAB, or CODE_FOR_nothing if none. */
94 static enum insn_code
95 get_multi_vector_move (tree array_type, convert_optab optab)
97 machine_mode imode;
98 machine_mode vmode;
100 gcc_assert (TREE_CODE (array_type) == ARRAY_TYPE);
101 imode = TYPE_MODE (array_type);
102 vmode = TYPE_MODE (TREE_TYPE (array_type));
104 return convert_optab_handler (optab, imode, vmode);
107 /* Expand LOAD_LANES call STMT using optab OPTAB. */
109 static void
110 expand_load_lanes_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
112 struct expand_operand ops[2];
113 tree type, lhs, rhs;
114 rtx target, mem;
116 lhs = gimple_call_lhs (stmt);
117 rhs = gimple_call_arg (stmt, 0);
118 type = TREE_TYPE (lhs);
120 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
121 mem = expand_normal (rhs);
123 gcc_assert (MEM_P (mem));
124 PUT_MODE (mem, TYPE_MODE (type));
126 create_output_operand (&ops[0], target, TYPE_MODE (type));
127 create_fixed_operand (&ops[1], mem);
128 expand_insn (get_multi_vector_move (type, optab), 2, ops);
131 /* Expand STORE_LANES call STMT using optab OPTAB. */
133 static void
134 expand_store_lanes_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
136 struct expand_operand ops[2];
137 tree type, lhs, rhs;
138 rtx target, reg;
140 lhs = gimple_call_lhs (stmt);
141 rhs = gimple_call_arg (stmt, 0);
142 type = TREE_TYPE (rhs);
144 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
145 reg = expand_normal (rhs);
147 gcc_assert (MEM_P (target));
148 PUT_MODE (target, TYPE_MODE (type));
150 create_fixed_operand (&ops[0], target);
151 create_input_operand (&ops[1], reg, TYPE_MODE (type));
152 expand_insn (get_multi_vector_move (type, optab), 2, ops);
155 static void
156 expand_ANNOTATE (internal_fn, gcall *)
158 gcc_unreachable ();
161 /* This should get expanded in omp_device_lower pass. */
163 static void
164 expand_GOMP_USE_SIMT (internal_fn, gcall *)
166 gcc_unreachable ();
169 /* Lane index on SIMT targets: thread index in the warp on NVPTX. On targets
170 without SIMT execution this should be expanded in omp_device_lower pass. */
172 static void
173 expand_GOMP_SIMT_LANE (internal_fn, gcall *stmt)
175 tree lhs = gimple_call_lhs (stmt);
176 if (!lhs)
177 return;
179 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
180 gcc_assert (targetm.have_omp_simt_lane ());
181 emit_insn (targetm.gen_omp_simt_lane (target));
184 /* This should get expanded in omp_device_lower pass. */
186 static void
187 expand_GOMP_SIMT_VF (internal_fn, gcall *)
189 gcc_unreachable ();
192 /* Lane index of the first SIMT lane that supplies a non-zero argument.
193 This is a SIMT counterpart to GOMP_SIMD_LAST_LANE, used to represent the
194 lane that executed the last iteration for handling OpenMP lastprivate. */
196 static void
197 expand_GOMP_SIMT_LAST_LANE (internal_fn, gcall *stmt)
199 tree lhs = gimple_call_lhs (stmt);
200 if (!lhs)
201 return;
203 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
204 rtx cond = expand_normal (gimple_call_arg (stmt, 0));
205 machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
206 struct expand_operand ops[2];
207 create_output_operand (&ops[0], target, mode);
208 create_input_operand (&ops[1], cond, mode);
209 gcc_assert (targetm.have_omp_simt_last_lane ());
210 expand_insn (targetm.code_for_omp_simt_last_lane, 2, ops);
213 /* Non-transparent predicate used in SIMT lowering of OpenMP "ordered". */
215 static void
216 expand_GOMP_SIMT_ORDERED_PRED (internal_fn, gcall *stmt)
218 tree lhs = gimple_call_lhs (stmt);
219 if (!lhs)
220 return;
222 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
223 rtx ctr = expand_normal (gimple_call_arg (stmt, 0));
224 machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
225 struct expand_operand ops[2];
226 create_output_operand (&ops[0], target, mode);
227 create_input_operand (&ops[1], ctr, mode);
228 gcc_assert (targetm.have_omp_simt_ordered ());
229 expand_insn (targetm.code_for_omp_simt_ordered, 2, ops);
232 /* "Or" boolean reduction across SIMT lanes: return non-zero in all lanes if
233 any lane supplies a non-zero argument. */
235 static void
236 expand_GOMP_SIMT_VOTE_ANY (internal_fn, gcall *stmt)
238 tree lhs = gimple_call_lhs (stmt);
239 if (!lhs)
240 return;
242 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
243 rtx cond = expand_normal (gimple_call_arg (stmt, 0));
244 machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
245 struct expand_operand ops[2];
246 create_output_operand (&ops[0], target, mode);
247 create_input_operand (&ops[1], cond, mode);
248 gcc_assert (targetm.have_omp_simt_vote_any ());
249 expand_insn (targetm.code_for_omp_simt_vote_any, 2, ops);
252 /* Exchange between SIMT lanes with a "butterfly" pattern: source lane index
253 is destination lane index XOR given offset. */
255 static void
256 expand_GOMP_SIMT_XCHG_BFLY (internal_fn, gcall *stmt)
258 tree lhs = gimple_call_lhs (stmt);
259 if (!lhs)
260 return;
262 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
263 rtx src = expand_normal (gimple_call_arg (stmt, 0));
264 rtx idx = expand_normal (gimple_call_arg (stmt, 1));
265 machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
266 struct expand_operand ops[3];
267 create_output_operand (&ops[0], target, mode);
268 create_input_operand (&ops[1], src, mode);
269 create_input_operand (&ops[2], idx, SImode);
270 gcc_assert (targetm.have_omp_simt_xchg_bfly ());
271 expand_insn (targetm.code_for_omp_simt_xchg_bfly, 3, ops);
274 /* Exchange between SIMT lanes according to given source lane index. */
276 static void
277 expand_GOMP_SIMT_XCHG_IDX (internal_fn, gcall *stmt)
279 tree lhs = gimple_call_lhs (stmt);
280 if (!lhs)
281 return;
283 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
284 rtx src = expand_normal (gimple_call_arg (stmt, 0));
285 rtx idx = expand_normal (gimple_call_arg (stmt, 1));
286 machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
287 struct expand_operand ops[3];
288 create_output_operand (&ops[0], target, mode);
289 create_input_operand (&ops[1], src, mode);
290 create_input_operand (&ops[2], idx, SImode);
291 gcc_assert (targetm.have_omp_simt_xchg_idx ());
292 expand_insn (targetm.code_for_omp_simt_xchg_idx, 3, ops);
295 /* This should get expanded in adjust_simduid_builtins. */
297 static void
298 expand_GOMP_SIMD_LANE (internal_fn, gcall *)
300 gcc_unreachable ();
303 /* This should get expanded in adjust_simduid_builtins. */
305 static void
306 expand_GOMP_SIMD_VF (internal_fn, gcall *)
308 gcc_unreachable ();
311 /* This should get expanded in adjust_simduid_builtins. */
313 static void
314 expand_GOMP_SIMD_LAST_LANE (internal_fn, gcall *)
316 gcc_unreachable ();
319 /* This should get expanded in adjust_simduid_builtins. */
321 static void
322 expand_GOMP_SIMD_ORDERED_START (internal_fn, gcall *)
324 gcc_unreachable ();
327 /* This should get expanded in adjust_simduid_builtins. */
329 static void
330 expand_GOMP_SIMD_ORDERED_END (internal_fn, gcall *)
332 gcc_unreachable ();
335 /* This should get expanded in the sanopt pass. */
337 static void
338 expand_UBSAN_NULL (internal_fn, gcall *)
340 gcc_unreachable ();
343 /* This should get expanded in the sanopt pass. */
345 static void
346 expand_UBSAN_BOUNDS (internal_fn, gcall *)
348 gcc_unreachable ();
351 /* This should get expanded in the sanopt pass. */
353 static void
354 expand_UBSAN_VPTR (internal_fn, gcall *)
356 gcc_unreachable ();
359 /* This should get expanded in the sanopt pass. */
361 static void
362 expand_UBSAN_OBJECT_SIZE (internal_fn, gcall *)
364 gcc_unreachable ();
367 /* This should get expanded in the sanopt pass. */
369 static void
370 expand_ASAN_CHECK (internal_fn, gcall *)
372 gcc_unreachable ();
375 /* This should get expanded in the sanopt pass. */
377 static void
378 expand_ASAN_MARK (internal_fn, gcall *)
380 gcc_unreachable ();
383 /* This should get expanded in the sanopt pass. */
385 static void
386 expand_ASAN_POISON (internal_fn, gcall *)
388 gcc_unreachable ();
391 /* This should get expanded in the sanopt pass. */
393 static void
394 expand_ASAN_POISON_USE (internal_fn, gcall *)
396 gcc_unreachable ();
399 /* This should get expanded in the tsan pass. */
401 static void
402 expand_TSAN_FUNC_EXIT (internal_fn, gcall *)
404 gcc_unreachable ();
407 /* This should get expanded in the lower pass. */
409 static void
410 expand_FALLTHROUGH (internal_fn, gcall *call)
412 error_at (gimple_location (call),
413 "invalid use of attribute %<fallthrough%>");
416 /* Helper function for expand_addsub_overflow. Return 1
417 if ARG interpreted as signed in its precision is known to be always
418 positive or 2 if ARG is known to be always negative, or 3 if ARG may
419 be positive or negative. */
421 static int
422 get_range_pos_neg (tree arg)
424 if (arg == error_mark_node)
425 return 3;
427 int prec = TYPE_PRECISION (TREE_TYPE (arg));
428 int cnt = 0;
429 if (TREE_CODE (arg) == INTEGER_CST)
431 wide_int w = wi::sext (arg, prec);
432 if (wi::neg_p (w))
433 return 2;
434 else
435 return 1;
437 while (CONVERT_EXPR_P (arg)
438 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
439 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg, 0))) <= prec)
441 arg = TREE_OPERAND (arg, 0);
442 /* Narrower value zero extended into wider type
443 will always result in positive values. */
444 if (TYPE_UNSIGNED (TREE_TYPE (arg))
445 && TYPE_PRECISION (TREE_TYPE (arg)) < prec)
446 return 1;
447 prec = TYPE_PRECISION (TREE_TYPE (arg));
448 if (++cnt > 30)
449 return 3;
452 if (TREE_CODE (arg) != SSA_NAME)
453 return 3;
454 wide_int arg_min, arg_max;
455 while (get_range_info (arg, &arg_min, &arg_max) != VR_RANGE)
457 gimple *g = SSA_NAME_DEF_STMT (arg);
458 if (is_gimple_assign (g)
459 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g)))
461 tree t = gimple_assign_rhs1 (g);
462 if (INTEGRAL_TYPE_P (TREE_TYPE (t))
463 && TYPE_PRECISION (TREE_TYPE (t)) <= prec)
465 if (TYPE_UNSIGNED (TREE_TYPE (t))
466 && TYPE_PRECISION (TREE_TYPE (t)) < prec)
467 return 1;
468 prec = TYPE_PRECISION (TREE_TYPE (t));
469 arg = t;
470 if (++cnt > 30)
471 return 3;
472 continue;
475 return 3;
477 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
479 /* For unsigned values, the "positive" range comes
480 below the "negative" range. */
481 if (!wi::neg_p (wi::sext (arg_max, prec), SIGNED))
482 return 1;
483 if (wi::neg_p (wi::sext (arg_min, prec), SIGNED))
484 return 2;
486 else
488 if (!wi::neg_p (wi::sext (arg_min, prec), SIGNED))
489 return 1;
490 if (wi::neg_p (wi::sext (arg_max, prec), SIGNED))
491 return 2;
493 return 3;
496 /* Return minimum precision needed to represent all values
497 of ARG in SIGNed integral type. */
499 static int
500 get_min_precision (tree arg, signop sign)
502 int prec = TYPE_PRECISION (TREE_TYPE (arg));
503 int cnt = 0;
504 signop orig_sign = sign;
505 if (TREE_CODE (arg) == INTEGER_CST)
507 int p;
508 if (TYPE_SIGN (TREE_TYPE (arg)) != sign)
510 widest_int w = wi::to_widest (arg);
511 w = wi::ext (w, prec, sign);
512 p = wi::min_precision (w, sign);
514 else
515 p = wi::min_precision (arg, sign);
516 return MIN (p, prec);
518 while (CONVERT_EXPR_P (arg)
519 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
520 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg, 0))) <= prec)
522 arg = TREE_OPERAND (arg, 0);
523 if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
525 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
526 sign = UNSIGNED;
527 else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
528 return prec + (orig_sign != sign);
529 prec = TYPE_PRECISION (TREE_TYPE (arg));
531 if (++cnt > 30)
532 return prec + (orig_sign != sign);
534 if (TREE_CODE (arg) != SSA_NAME)
535 return prec + (orig_sign != sign);
536 wide_int arg_min, arg_max;
537 while (get_range_info (arg, &arg_min, &arg_max) != VR_RANGE)
539 gimple *g = SSA_NAME_DEF_STMT (arg);
540 if (is_gimple_assign (g)
541 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g)))
543 tree t = gimple_assign_rhs1 (g);
544 if (INTEGRAL_TYPE_P (TREE_TYPE (t))
545 && TYPE_PRECISION (TREE_TYPE (t)) <= prec)
547 arg = t;
548 if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
550 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
551 sign = UNSIGNED;
552 else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
553 return prec + (orig_sign != sign);
554 prec = TYPE_PRECISION (TREE_TYPE (arg));
556 if (++cnt > 30)
557 return prec + (orig_sign != sign);
558 continue;
561 return prec + (orig_sign != sign);
563 if (sign == TYPE_SIGN (TREE_TYPE (arg)))
565 int p1 = wi::min_precision (arg_min, sign);
566 int p2 = wi::min_precision (arg_max, sign);
567 p1 = MAX (p1, p2);
568 prec = MIN (prec, p1);
570 else if (sign == UNSIGNED && !wi::neg_p (arg_min, SIGNED))
572 int p = wi::min_precision (arg_max, UNSIGNED);
573 prec = MIN (prec, p);
575 return prec + (orig_sign != sign);
578 /* Helper for expand_*_overflow. Set the __imag__ part to true
579 (1 except for signed:1 type, in which case store -1). */
581 static void
582 expand_arith_set_overflow (tree lhs, rtx target)
584 if (TYPE_PRECISION (TREE_TYPE (TREE_TYPE (lhs))) == 1
585 && !TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs))))
586 write_complex_part (target, constm1_rtx, true);
587 else
588 write_complex_part (target, const1_rtx, true);
591 /* Helper for expand_*_overflow. Store RES into the __real__ part
592 of TARGET. If RES has larger MODE than __real__ part of TARGET,
593 set the __imag__ part to 1 if RES doesn't fit into it. Similarly
594 if LHS has smaller precision than its mode. */
596 static void
597 expand_arith_overflow_result_store (tree lhs, rtx target,
598 machine_mode mode, rtx res)
600 machine_mode tgtmode = GET_MODE_INNER (GET_MODE (target));
601 rtx lres = res;
602 if (tgtmode != mode)
604 rtx_code_label *done_label = gen_label_rtx ();
605 int uns = TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs)));
606 lres = convert_modes (tgtmode, mode, res, uns);
607 gcc_assert (GET_MODE_PRECISION (tgtmode) < GET_MODE_PRECISION (mode));
608 do_compare_rtx_and_jump (res, convert_modes (mode, tgtmode, lres, uns),
609 EQ, true, mode, NULL_RTX, NULL, done_label,
610 PROB_VERY_LIKELY);
611 expand_arith_set_overflow (lhs, target);
612 emit_label (done_label);
614 int prec = TYPE_PRECISION (TREE_TYPE (TREE_TYPE (lhs)));
615 int tgtprec = GET_MODE_PRECISION (tgtmode);
616 if (prec < tgtprec)
618 rtx_code_label *done_label = gen_label_rtx ();
619 int uns = TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs)));
620 res = lres;
621 if (uns)
623 rtx mask
624 = immed_wide_int_const (wi::shifted_mask (0, prec, false, tgtprec),
625 tgtmode);
626 lres = expand_simple_binop (tgtmode, AND, res, mask, NULL_RTX,
627 true, OPTAB_LIB_WIDEN);
629 else
631 lres = expand_shift (LSHIFT_EXPR, tgtmode, res, tgtprec - prec,
632 NULL_RTX, 1);
633 lres = expand_shift (RSHIFT_EXPR, tgtmode, lres, tgtprec - prec,
634 NULL_RTX, 0);
636 do_compare_rtx_and_jump (res, lres,
637 EQ, true, tgtmode, NULL_RTX, NULL, done_label,
638 PROB_VERY_LIKELY);
639 expand_arith_set_overflow (lhs, target);
640 emit_label (done_label);
642 write_complex_part (target, lres, false);
645 /* Helper for expand_*_overflow. Store RES into TARGET. */
647 static void
648 expand_ubsan_result_store (rtx target, rtx res)
650 if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target))
651 /* If this is a scalar in a register that is stored in a wider mode
652 than the declared mode, compute the result into its declared mode
653 and then convert to the wider mode. Our value is the computed
654 expression. */
655 convert_move (SUBREG_REG (target), res, SUBREG_PROMOTED_SIGN (target));
656 else
657 emit_move_insn (target, res);
660 /* Add sub/add overflow checking to the statement STMT.
661 CODE says whether the operation is +, or -. */
663 static void
664 expand_addsub_overflow (location_t loc, tree_code code, tree lhs,
665 tree arg0, tree arg1, bool unsr_p, bool uns0_p,
666 bool uns1_p, bool is_ubsan, tree *datap)
668 rtx res, target = NULL_RTX;
669 tree fn;
670 rtx_code_label *done_label = gen_label_rtx ();
671 rtx_code_label *do_error = gen_label_rtx ();
672 do_pending_stack_adjust ();
673 rtx op0 = expand_normal (arg0);
674 rtx op1 = expand_normal (arg1);
675 machine_mode mode = TYPE_MODE (TREE_TYPE (arg0));
676 int prec = GET_MODE_PRECISION (mode);
677 rtx sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
678 bool do_xor = false;
680 if (is_ubsan)
681 gcc_assert (!unsr_p && !uns0_p && !uns1_p);
683 if (lhs)
685 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
686 if (!is_ubsan)
687 write_complex_part (target, const0_rtx, true);
690 /* We assume both operands and result have the same precision
691 here (GET_MODE_BITSIZE (mode)), S stands for signed type
692 with that precision, U for unsigned type with that precision,
693 sgn for unsigned most significant bit in that precision.
694 s1 is signed first operand, u1 is unsigned first operand,
695 s2 is signed second operand, u2 is unsigned second operand,
696 sr is signed result, ur is unsigned result and the following
697 rules say how to compute result (which is always result of
698 the operands as if both were unsigned, cast to the right
699 signedness) and how to compute whether operation overflowed.
701 s1 + s2 -> sr
702 res = (S) ((U) s1 + (U) s2)
703 ovf = s2 < 0 ? res > s1 : res < s1 (or jump on overflow)
704 s1 - s2 -> sr
705 res = (S) ((U) s1 - (U) s2)
706 ovf = s2 < 0 ? res < s1 : res > s2 (or jump on overflow)
707 u1 + u2 -> ur
708 res = u1 + u2
709 ovf = res < u1 (or jump on carry, but RTL opts will handle it)
710 u1 - u2 -> ur
711 res = u1 - u2
712 ovf = res > u1 (or jump on carry, but RTL opts will handle it)
713 s1 + u2 -> sr
714 res = (S) ((U) s1 + u2)
715 ovf = ((U) res ^ sgn) < u2
716 s1 + u2 -> ur
717 t1 = (S) (u2 ^ sgn)
718 t2 = s1 + t1
719 res = (U) t2 ^ sgn
720 ovf = t1 < 0 ? t2 > s1 : t2 < s1 (or jump on overflow)
721 s1 - u2 -> sr
722 res = (S) ((U) s1 - u2)
723 ovf = u2 > ((U) s1 ^ sgn)
724 s1 - u2 -> ur
725 res = (U) s1 - u2
726 ovf = s1 < 0 || u2 > (U) s1
727 u1 - s2 -> sr
728 res = u1 - (U) s2
729 ovf = u1 >= ((U) s2 ^ sgn)
730 u1 - s2 -> ur
731 t1 = u1 ^ sgn
732 t2 = t1 - (U) s2
733 res = t2 ^ sgn
734 ovf = s2 < 0 ? (S) t2 < (S) t1 : (S) t2 > (S) t1 (or jump on overflow)
735 s1 + s2 -> ur
736 res = (U) s1 + (U) s2
737 ovf = s2 < 0 ? (s1 | (S) res) < 0) : (s1 & (S) res) < 0)
738 u1 + u2 -> sr
739 res = (S) (u1 + u2)
740 ovf = (U) res < u2 || res < 0
741 u1 - u2 -> sr
742 res = (S) (u1 - u2)
743 ovf = u1 >= u2 ? res < 0 : res >= 0
744 s1 - s2 -> ur
745 res = (U) s1 - (U) s2
746 ovf = s2 >= 0 ? ((s1 | (S) res) < 0) : ((s1 & (S) res) < 0) */
748 if (code == PLUS_EXPR && uns0_p && !uns1_p)
750 /* PLUS_EXPR is commutative, if operand signedness differs,
751 canonicalize to the first operand being signed and second
752 unsigned to simplify following code. */
753 std::swap (op0, op1);
754 std::swap (arg0, arg1);
755 uns0_p = false;
756 uns1_p = true;
759 /* u1 +- u2 -> ur */
760 if (uns0_p && uns1_p && unsr_p)
762 insn_code icode = optab_handler (code == PLUS_EXPR ? uaddv4_optab
763 : usubv4_optab, mode);
764 if (icode != CODE_FOR_nothing)
766 struct expand_operand ops[4];
767 rtx_insn *last = get_last_insn ();
769 res = gen_reg_rtx (mode);
770 create_output_operand (&ops[0], res, mode);
771 create_input_operand (&ops[1], op0, mode);
772 create_input_operand (&ops[2], op1, mode);
773 create_fixed_operand (&ops[3], do_error);
774 if (maybe_expand_insn (icode, 4, ops))
776 last = get_last_insn ();
777 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
778 && JUMP_P (last)
779 && any_condjump_p (last)
780 && !find_reg_note (last, REG_BR_PROB, 0))
781 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
782 emit_jump (done_label);
783 goto do_error_label;
786 delete_insns_since (last);
789 /* Compute the operation. On RTL level, the addition is always
790 unsigned. */
791 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
792 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
793 rtx tem = op0;
794 /* For PLUS_EXPR, the operation is commutative, so we can pick
795 operand to compare against. For prec <= BITS_PER_WORD, I think
796 preferring REG operand is better over CONST_INT, because
797 the CONST_INT might enlarge the instruction or CSE would need
798 to figure out we'd already loaded it into a register before.
799 For prec > BITS_PER_WORD, I think CONST_INT might be more beneficial,
800 as then the multi-word comparison can be perhaps simplified. */
801 if (code == PLUS_EXPR
802 && (prec <= BITS_PER_WORD
803 ? (CONST_SCALAR_INT_P (op0) && REG_P (op1))
804 : CONST_SCALAR_INT_P (op1)))
805 tem = op1;
806 do_compare_rtx_and_jump (res, tem, code == PLUS_EXPR ? GEU : LEU,
807 true, mode, NULL_RTX, NULL, done_label,
808 PROB_VERY_LIKELY);
809 goto do_error_label;
812 /* s1 +- u2 -> sr */
813 if (!uns0_p && uns1_p && !unsr_p)
815 /* Compute the operation. On RTL level, the addition is always
816 unsigned. */
817 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
818 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
819 rtx tem = expand_binop (mode, add_optab,
820 code == PLUS_EXPR ? res : op0, sgn,
821 NULL_RTX, false, OPTAB_LIB_WIDEN);
822 do_compare_rtx_and_jump (tem, op1, GEU, true, mode, NULL_RTX, NULL,
823 done_label, PROB_VERY_LIKELY);
824 goto do_error_label;
827 /* s1 + u2 -> ur */
828 if (code == PLUS_EXPR && !uns0_p && uns1_p && unsr_p)
830 op1 = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
831 OPTAB_LIB_WIDEN);
832 /* As we've changed op1, we have to avoid using the value range
833 for the original argument. */
834 arg1 = error_mark_node;
835 do_xor = true;
836 goto do_signed;
839 /* u1 - s2 -> ur */
840 if (code == MINUS_EXPR && uns0_p && !uns1_p && unsr_p)
842 op0 = expand_binop (mode, add_optab, op0, sgn, NULL_RTX, false,
843 OPTAB_LIB_WIDEN);
844 /* As we've changed op0, we have to avoid using the value range
845 for the original argument. */
846 arg0 = error_mark_node;
847 do_xor = true;
848 goto do_signed;
851 /* s1 - u2 -> ur */
852 if (code == MINUS_EXPR && !uns0_p && uns1_p && unsr_p)
854 /* Compute the operation. On RTL level, the addition is always
855 unsigned. */
856 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
857 OPTAB_LIB_WIDEN);
858 int pos_neg = get_range_pos_neg (arg0);
859 if (pos_neg == 2)
860 /* If ARG0 is known to be always negative, this is always overflow. */
861 emit_jump (do_error);
862 else if (pos_neg == 3)
863 /* If ARG0 is not known to be always positive, check at runtime. */
864 do_compare_rtx_and_jump (op0, const0_rtx, LT, false, mode, NULL_RTX,
865 NULL, do_error, PROB_VERY_UNLIKELY);
866 do_compare_rtx_and_jump (op1, op0, LEU, true, mode, NULL_RTX, NULL,
867 done_label, PROB_VERY_LIKELY);
868 goto do_error_label;
871 /* u1 - s2 -> sr */
872 if (code == MINUS_EXPR && uns0_p && !uns1_p && !unsr_p)
874 /* Compute the operation. On RTL level, the addition is always
875 unsigned. */
876 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
877 OPTAB_LIB_WIDEN);
878 rtx tem = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
879 OPTAB_LIB_WIDEN);
880 do_compare_rtx_and_jump (op0, tem, LTU, true, mode, NULL_RTX, NULL,
881 done_label, PROB_VERY_LIKELY);
882 goto do_error_label;
885 /* u1 + u2 -> sr */
886 if (code == PLUS_EXPR && uns0_p && uns1_p && !unsr_p)
888 /* Compute the operation. On RTL level, the addition is always
889 unsigned. */
890 res = expand_binop (mode, add_optab, op0, op1, NULL_RTX, false,
891 OPTAB_LIB_WIDEN);
892 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode, NULL_RTX,
893 NULL, do_error, PROB_VERY_UNLIKELY);
894 rtx tem = op1;
895 /* The operation is commutative, so we can pick operand to compare
896 against. For prec <= BITS_PER_WORD, I think preferring REG operand
897 is better over CONST_INT, because the CONST_INT might enlarge the
898 instruction or CSE would need to figure out we'd already loaded it
899 into a register before. For prec > BITS_PER_WORD, I think CONST_INT
900 might be more beneficial, as then the multi-word comparison can be
901 perhaps simplified. */
902 if (prec <= BITS_PER_WORD
903 ? (CONST_SCALAR_INT_P (op1) && REG_P (op0))
904 : CONST_SCALAR_INT_P (op0))
905 tem = op0;
906 do_compare_rtx_and_jump (res, tem, GEU, true, mode, NULL_RTX, NULL,
907 done_label, PROB_VERY_LIKELY);
908 goto do_error_label;
911 /* s1 +- s2 -> ur */
912 if (!uns0_p && !uns1_p && unsr_p)
914 /* Compute the operation. On RTL level, the addition is always
915 unsigned. */
916 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
917 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
918 int pos_neg = get_range_pos_neg (arg1);
919 if (code == PLUS_EXPR)
921 int pos_neg0 = get_range_pos_neg (arg0);
922 if (pos_neg0 != 3 && pos_neg == 3)
924 std::swap (op0, op1);
925 pos_neg = pos_neg0;
928 rtx tem;
929 if (pos_neg != 3)
931 tem = expand_binop (mode, ((pos_neg == 1) ^ (code == MINUS_EXPR))
932 ? and_optab : ior_optab,
933 op0, res, NULL_RTX, false, OPTAB_LIB_WIDEN);
934 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL,
935 NULL, done_label, PROB_VERY_LIKELY);
937 else
939 rtx_code_label *do_ior_label = gen_label_rtx ();
940 do_compare_rtx_and_jump (op1, const0_rtx,
941 code == MINUS_EXPR ? GE : LT, false, mode,
942 NULL_RTX, NULL, do_ior_label,
943 PROB_EVEN);
944 tem = expand_binop (mode, and_optab, op0, res, NULL_RTX, false,
945 OPTAB_LIB_WIDEN);
946 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
947 NULL, done_label, PROB_VERY_LIKELY);
948 emit_jump (do_error);
949 emit_label (do_ior_label);
950 tem = expand_binop (mode, ior_optab, op0, res, NULL_RTX, false,
951 OPTAB_LIB_WIDEN);
952 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
953 NULL, done_label, PROB_VERY_LIKELY);
955 goto do_error_label;
958 /* u1 - u2 -> sr */
959 if (code == MINUS_EXPR && uns0_p && uns1_p && !unsr_p)
961 /* Compute the operation. On RTL level, the addition is always
962 unsigned. */
963 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
964 OPTAB_LIB_WIDEN);
965 rtx_code_label *op0_geu_op1 = gen_label_rtx ();
966 do_compare_rtx_and_jump (op0, op1, GEU, true, mode, NULL_RTX, NULL,
967 op0_geu_op1, PROB_EVEN);
968 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode, NULL_RTX,
969 NULL, done_label, PROB_VERY_LIKELY);
970 emit_jump (do_error);
971 emit_label (op0_geu_op1);
972 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode, NULL_RTX,
973 NULL, done_label, PROB_VERY_LIKELY);
974 goto do_error_label;
977 gcc_assert (!uns0_p && !uns1_p && !unsr_p);
979 /* s1 +- s2 -> sr */
980 do_signed:
982 insn_code icode = optab_handler (code == PLUS_EXPR ? addv4_optab
983 : subv4_optab, mode);
984 if (icode != CODE_FOR_nothing)
986 struct expand_operand ops[4];
987 rtx_insn *last = get_last_insn ();
989 res = gen_reg_rtx (mode);
990 create_output_operand (&ops[0], res, mode);
991 create_input_operand (&ops[1], op0, mode);
992 create_input_operand (&ops[2], op1, mode);
993 create_fixed_operand (&ops[3], do_error);
994 if (maybe_expand_insn (icode, 4, ops))
996 last = get_last_insn ();
997 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
998 && JUMP_P (last)
999 && any_condjump_p (last)
1000 && !find_reg_note (last, REG_BR_PROB, 0))
1001 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
1002 emit_jump (done_label);
1003 goto do_error_label;
1006 delete_insns_since (last);
1009 /* Compute the operation. On RTL level, the addition is always
1010 unsigned. */
1011 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
1012 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
1014 /* If we can prove that one of the arguments (for MINUS_EXPR only
1015 the second operand, as subtraction is not commutative) is always
1016 non-negative or always negative, we can do just one comparison
1017 and conditional jump. */
1018 int pos_neg = get_range_pos_neg (arg1);
1019 if (code == PLUS_EXPR)
1021 int pos_neg0 = get_range_pos_neg (arg0);
1022 if (pos_neg0 != 3 && pos_neg == 3)
1024 std::swap (op0, op1);
1025 pos_neg = pos_neg0;
1029 /* Addition overflows if and only if the two operands have the same sign,
1030 and the result has the opposite sign. Subtraction overflows if and
1031 only if the two operands have opposite sign, and the subtrahend has
1032 the same sign as the result. Here 0 is counted as positive. */
1033 if (pos_neg == 3)
1035 /* Compute op0 ^ op1 (operands have opposite sign). */
1036 rtx op_xor = expand_binop (mode, xor_optab, op0, op1, NULL_RTX, false,
1037 OPTAB_LIB_WIDEN);
1039 /* Compute res ^ op1 (result and 2nd operand have opposite sign). */
1040 rtx res_xor = expand_binop (mode, xor_optab, res, op1, NULL_RTX, false,
1041 OPTAB_LIB_WIDEN);
1043 rtx tem;
1044 if (code == PLUS_EXPR)
1046 /* Compute (res ^ op1) & ~(op0 ^ op1). */
1047 tem = expand_unop (mode, one_cmpl_optab, op_xor, NULL_RTX, false);
1048 tem = expand_binop (mode, and_optab, res_xor, tem, NULL_RTX, false,
1049 OPTAB_LIB_WIDEN);
1051 else
1053 /* Compute (op0 ^ op1) & ~(res ^ op1). */
1054 tem = expand_unop (mode, one_cmpl_optab, res_xor, NULL_RTX, false);
1055 tem = expand_binop (mode, and_optab, op_xor, tem, NULL_RTX, false,
1056 OPTAB_LIB_WIDEN);
1059 /* No overflow if the result has bit sign cleared. */
1060 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
1061 NULL, done_label, PROB_VERY_LIKELY);
1064 /* Compare the result of the operation with the first operand.
1065 No overflow for addition if second operand is positive and result
1066 is larger or second operand is negative and result is smaller.
1067 Likewise for subtraction with sign of second operand flipped. */
1068 else
1069 do_compare_rtx_and_jump (res, op0,
1070 (pos_neg == 1) ^ (code == MINUS_EXPR) ? GE : LE,
1071 false, mode, NULL_RTX, NULL, done_label,
1072 PROB_VERY_LIKELY);
1075 do_error_label:
1076 emit_label (do_error);
1077 if (is_ubsan)
1079 /* Expand the ubsan builtin call. */
1080 push_temp_slots ();
1081 fn = ubsan_build_overflow_builtin (code, loc, TREE_TYPE (arg0),
1082 arg0, arg1, datap);
1083 expand_normal (fn);
1084 pop_temp_slots ();
1085 do_pending_stack_adjust ();
1087 else if (lhs)
1088 expand_arith_set_overflow (lhs, target);
1090 /* We're done. */
1091 emit_label (done_label);
1093 if (lhs)
1095 if (is_ubsan)
1096 expand_ubsan_result_store (target, res);
1097 else
1099 if (do_xor)
1100 res = expand_binop (mode, add_optab, res, sgn, NULL_RTX, false,
1101 OPTAB_LIB_WIDEN);
1103 expand_arith_overflow_result_store (lhs, target, mode, res);
1108 /* Add negate overflow checking to the statement STMT. */
1110 static void
1111 expand_neg_overflow (location_t loc, tree lhs, tree arg1, bool is_ubsan,
1112 tree *datap)
1114 rtx res, op1;
1115 tree fn;
1116 rtx_code_label *done_label, *do_error;
1117 rtx target = NULL_RTX;
1119 done_label = gen_label_rtx ();
1120 do_error = gen_label_rtx ();
1122 do_pending_stack_adjust ();
1123 op1 = expand_normal (arg1);
1125 machine_mode mode = TYPE_MODE (TREE_TYPE (arg1));
1126 if (lhs)
1128 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1129 if (!is_ubsan)
1130 write_complex_part (target, const0_rtx, true);
1133 enum insn_code icode = optab_handler (negv3_optab, mode);
1134 if (icode != CODE_FOR_nothing)
1136 struct expand_operand ops[3];
1137 rtx_insn *last = get_last_insn ();
1139 res = gen_reg_rtx (mode);
1140 create_output_operand (&ops[0], res, mode);
1141 create_input_operand (&ops[1], op1, mode);
1142 create_fixed_operand (&ops[2], do_error);
1143 if (maybe_expand_insn (icode, 3, ops))
1145 last = get_last_insn ();
1146 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
1147 && JUMP_P (last)
1148 && any_condjump_p (last)
1149 && !find_reg_note (last, REG_BR_PROB, 0))
1150 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
1151 emit_jump (done_label);
1153 else
1155 delete_insns_since (last);
1156 icode = CODE_FOR_nothing;
1160 if (icode == CODE_FOR_nothing)
1162 /* Compute the operation. On RTL level, the addition is always
1163 unsigned. */
1164 res = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
1166 /* Compare the operand with the most negative value. */
1167 rtx minv = expand_normal (TYPE_MIN_VALUE (TREE_TYPE (arg1)));
1168 do_compare_rtx_and_jump (op1, minv, NE, true, mode, NULL_RTX, NULL,
1169 done_label, PROB_VERY_LIKELY);
1172 emit_label (do_error);
1173 if (is_ubsan)
1175 /* Expand the ubsan builtin call. */
1176 push_temp_slots ();
1177 fn = ubsan_build_overflow_builtin (NEGATE_EXPR, loc, TREE_TYPE (arg1),
1178 arg1, NULL_TREE, datap);
1179 expand_normal (fn);
1180 pop_temp_slots ();
1181 do_pending_stack_adjust ();
1183 else if (lhs)
1184 expand_arith_set_overflow (lhs, target);
1186 /* We're done. */
1187 emit_label (done_label);
1189 if (lhs)
1191 if (is_ubsan)
1192 expand_ubsan_result_store (target, res);
1193 else
1194 expand_arith_overflow_result_store (lhs, target, mode, res);
1198 /* Add mul overflow checking to the statement STMT. */
1200 static void
1201 expand_mul_overflow (location_t loc, tree lhs, tree arg0, tree arg1,
1202 bool unsr_p, bool uns0_p, bool uns1_p, bool is_ubsan,
1203 tree *datap)
1205 rtx res, op0, op1;
1206 tree fn, type;
1207 rtx_code_label *done_label, *do_error;
1208 rtx target = NULL_RTX;
1209 signop sign;
1210 enum insn_code icode;
1212 done_label = gen_label_rtx ();
1213 do_error = gen_label_rtx ();
1215 do_pending_stack_adjust ();
1216 op0 = expand_normal (arg0);
1217 op1 = expand_normal (arg1);
1219 machine_mode mode = TYPE_MODE (TREE_TYPE (arg0));
1220 bool uns = unsr_p;
1221 if (lhs)
1223 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1224 if (!is_ubsan)
1225 write_complex_part (target, const0_rtx, true);
1228 if (is_ubsan)
1229 gcc_assert (!unsr_p && !uns0_p && !uns1_p);
1231 /* We assume both operands and result have the same precision
1232 here (GET_MODE_BITSIZE (mode)), S stands for signed type
1233 with that precision, U for unsigned type with that precision,
1234 sgn for unsigned most significant bit in that precision.
1235 s1 is signed first operand, u1 is unsigned first operand,
1236 s2 is signed second operand, u2 is unsigned second operand,
1237 sr is signed result, ur is unsigned result and the following
1238 rules say how to compute result (which is always result of
1239 the operands as if both were unsigned, cast to the right
1240 signedness) and how to compute whether operation overflowed.
1241 main_ovf (false) stands for jump on signed multiplication
1242 overflow or the main algorithm with uns == false.
1243 main_ovf (true) stands for jump on unsigned multiplication
1244 overflow or the main algorithm with uns == true.
1246 s1 * s2 -> sr
1247 res = (S) ((U) s1 * (U) s2)
1248 ovf = main_ovf (false)
1249 u1 * u2 -> ur
1250 res = u1 * u2
1251 ovf = main_ovf (true)
1252 s1 * u2 -> ur
1253 res = (U) s1 * u2
1254 ovf = (s1 < 0 && u2) || main_ovf (true)
1255 u1 * u2 -> sr
1256 res = (S) (u1 * u2)
1257 ovf = res < 0 || main_ovf (true)
1258 s1 * u2 -> sr
1259 res = (S) ((U) s1 * u2)
1260 ovf = (S) u2 >= 0 ? main_ovf (false)
1261 : (s1 != 0 && (s1 != -1 || u2 != (U) res))
1262 s1 * s2 -> ur
1263 t1 = (s1 & s2) < 0 ? (-(U) s1) : ((U) s1)
1264 t2 = (s1 & s2) < 0 ? (-(U) s2) : ((U) s2)
1265 res = t1 * t2
1266 ovf = (s1 ^ s2) < 0 ? (s1 && s2) : main_ovf (true) */
1268 if (uns0_p && !uns1_p)
1270 /* Multiplication is commutative, if operand signedness differs,
1271 canonicalize to the first operand being signed and second
1272 unsigned to simplify following code. */
1273 std::swap (op0, op1);
1274 std::swap (arg0, arg1);
1275 uns0_p = false;
1276 uns1_p = true;
1279 int pos_neg0 = get_range_pos_neg (arg0);
1280 int pos_neg1 = get_range_pos_neg (arg1);
1282 /* s1 * u2 -> ur */
1283 if (!uns0_p && uns1_p && unsr_p)
1285 switch (pos_neg0)
1287 case 1:
1288 /* If s1 is non-negative, just perform normal u1 * u2 -> ur. */
1289 goto do_main;
1290 case 2:
1291 /* If s1 is negative, avoid the main code, just multiply and
1292 signal overflow if op1 is not 0. */
1293 struct separate_ops ops;
1294 ops.code = MULT_EXPR;
1295 ops.type = TREE_TYPE (arg1);
1296 ops.op0 = make_tree (ops.type, op0);
1297 ops.op1 = make_tree (ops.type, op1);
1298 ops.op2 = NULL_TREE;
1299 ops.location = loc;
1300 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1301 do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1302 NULL, done_label, PROB_VERY_LIKELY);
1303 goto do_error_label;
1304 case 3:
1305 rtx_code_label *do_main_label;
1306 do_main_label = gen_label_rtx ();
1307 do_compare_rtx_and_jump (op0, const0_rtx, GE, false, mode, NULL_RTX,
1308 NULL, do_main_label, PROB_VERY_LIKELY);
1309 do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1310 NULL, do_main_label, PROB_VERY_LIKELY);
1311 expand_arith_set_overflow (lhs, target);
1312 emit_label (do_main_label);
1313 goto do_main;
1314 default:
1315 gcc_unreachable ();
1319 /* u1 * u2 -> sr */
1320 if (uns0_p && uns1_p && !unsr_p)
1322 uns = true;
1323 /* Rest of handling of this case after res is computed. */
1324 goto do_main;
1327 /* s1 * u2 -> sr */
1328 if (!uns0_p && uns1_p && !unsr_p)
1330 switch (pos_neg1)
1332 case 1:
1333 goto do_main;
1334 case 2:
1335 /* If (S) u2 is negative (i.e. u2 is larger than maximum of S,
1336 avoid the main code, just multiply and signal overflow
1337 unless 0 * u2 or -1 * ((U) Smin). */
1338 struct separate_ops ops;
1339 ops.code = MULT_EXPR;
1340 ops.type = TREE_TYPE (arg1);
1341 ops.op0 = make_tree (ops.type, op0);
1342 ops.op1 = make_tree (ops.type, op1);
1343 ops.op2 = NULL_TREE;
1344 ops.location = loc;
1345 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1346 do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
1347 NULL, done_label, PROB_VERY_LIKELY);
1348 do_compare_rtx_and_jump (op0, constm1_rtx, NE, true, mode, NULL_RTX,
1349 NULL, do_error, PROB_VERY_UNLIKELY);
1350 int prec;
1351 prec = GET_MODE_PRECISION (mode);
1352 rtx sgn;
1353 sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
1354 do_compare_rtx_and_jump (op1, sgn, EQ, true, mode, NULL_RTX,
1355 NULL, done_label, PROB_VERY_LIKELY);
1356 goto do_error_label;
1357 case 3:
1358 /* Rest of handling of this case after res is computed. */
1359 goto do_main;
1360 default:
1361 gcc_unreachable ();
1365 /* s1 * s2 -> ur */
1366 if (!uns0_p && !uns1_p && unsr_p)
1368 rtx tem, tem2;
1369 switch (pos_neg0 | pos_neg1)
1371 case 1: /* Both operands known to be non-negative. */
1372 goto do_main;
1373 case 2: /* Both operands known to be negative. */
1374 op0 = expand_unop (mode, neg_optab, op0, NULL_RTX, false);
1375 op1 = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
1376 /* Avoid looking at arg0/arg1 ranges, as we've changed
1377 the arguments. */
1378 arg0 = error_mark_node;
1379 arg1 = error_mark_node;
1380 goto do_main;
1381 case 3:
1382 if ((pos_neg0 ^ pos_neg1) == 3)
1384 /* If one operand is known to be negative and the other
1385 non-negative, this overflows always, unless the non-negative
1386 one is 0. Just do normal multiply and set overflow
1387 unless one of the operands is 0. */
1388 struct separate_ops ops;
1389 ops.code = MULT_EXPR;
1390 ops.type
1391 = build_nonstandard_integer_type (GET_MODE_PRECISION (mode),
1393 ops.op0 = make_tree (ops.type, op0);
1394 ops.op1 = make_tree (ops.type, op1);
1395 ops.op2 = NULL_TREE;
1396 ops.location = loc;
1397 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1398 tem = expand_binop (mode, and_optab, op0, op1, NULL_RTX, false,
1399 OPTAB_LIB_WIDEN);
1400 do_compare_rtx_and_jump (tem, const0_rtx, EQ, true, mode,
1401 NULL_RTX, NULL, done_label,
1402 PROB_VERY_LIKELY);
1403 goto do_error_label;
1405 /* The general case, do all the needed comparisons at runtime. */
1406 rtx_code_label *do_main_label, *after_negate_label;
1407 rtx rop0, rop1;
1408 rop0 = gen_reg_rtx (mode);
1409 rop1 = gen_reg_rtx (mode);
1410 emit_move_insn (rop0, op0);
1411 emit_move_insn (rop1, op1);
1412 op0 = rop0;
1413 op1 = rop1;
1414 do_main_label = gen_label_rtx ();
1415 after_negate_label = gen_label_rtx ();
1416 tem = expand_binop (mode, and_optab, op0, op1, NULL_RTX, false,
1417 OPTAB_LIB_WIDEN);
1418 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
1419 NULL, after_negate_label, PROB_VERY_LIKELY);
1420 /* Both arguments negative here, negate them and continue with
1421 normal unsigned overflow checking multiplication. */
1422 emit_move_insn (op0, expand_unop (mode, neg_optab, op0,
1423 NULL_RTX, false));
1424 emit_move_insn (op1, expand_unop (mode, neg_optab, op1,
1425 NULL_RTX, false));
1426 /* Avoid looking at arg0/arg1 ranges, as we might have changed
1427 the arguments. */
1428 arg0 = error_mark_node;
1429 arg1 = error_mark_node;
1430 emit_jump (do_main_label);
1431 emit_label (after_negate_label);
1432 tem2 = expand_binop (mode, xor_optab, op0, op1, NULL_RTX, false,
1433 OPTAB_LIB_WIDEN);
1434 do_compare_rtx_and_jump (tem2, const0_rtx, GE, false, mode, NULL_RTX,
1435 NULL, do_main_label, PROB_VERY_LIKELY);
1436 /* One argument is negative here, the other positive. This
1437 overflows always, unless one of the arguments is 0. But
1438 if e.g. s2 is 0, (U) s1 * 0 doesn't overflow, whatever s1
1439 is, thus we can keep do_main code oring in overflow as is. */
1440 do_compare_rtx_and_jump (tem, const0_rtx, EQ, true, mode, NULL_RTX,
1441 NULL, do_main_label, PROB_VERY_LIKELY);
1442 expand_arith_set_overflow (lhs, target);
1443 emit_label (do_main_label);
1444 goto do_main;
1445 default:
1446 gcc_unreachable ();
1450 do_main:
1451 type = build_nonstandard_integer_type (GET_MODE_PRECISION (mode), uns);
1452 sign = uns ? UNSIGNED : SIGNED;
1453 icode = optab_handler (uns ? umulv4_optab : mulv4_optab, mode);
1454 if (icode != CODE_FOR_nothing)
1456 struct expand_operand ops[4];
1457 rtx_insn *last = get_last_insn ();
1459 res = gen_reg_rtx (mode);
1460 create_output_operand (&ops[0], res, mode);
1461 create_input_operand (&ops[1], op0, mode);
1462 create_input_operand (&ops[2], op1, mode);
1463 create_fixed_operand (&ops[3], do_error);
1464 if (maybe_expand_insn (icode, 4, ops))
1466 last = get_last_insn ();
1467 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
1468 && JUMP_P (last)
1469 && any_condjump_p (last)
1470 && !find_reg_note (last, REG_BR_PROB, 0))
1471 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
1472 emit_jump (done_label);
1474 else
1476 delete_insns_since (last);
1477 icode = CODE_FOR_nothing;
1481 if (icode == CODE_FOR_nothing)
1483 struct separate_ops ops;
1484 int prec = GET_MODE_PRECISION (mode);
1485 machine_mode hmode = mode_for_size (prec / 2, MODE_INT, 1);
1486 ops.op0 = make_tree (type, op0);
1487 ops.op1 = make_tree (type, op1);
1488 ops.op2 = NULL_TREE;
1489 ops.location = loc;
1490 if (GET_MODE_2XWIDER_MODE (mode) != VOIDmode
1491 && targetm.scalar_mode_supported_p (GET_MODE_2XWIDER_MODE (mode)))
1493 machine_mode wmode = GET_MODE_2XWIDER_MODE (mode);
1494 ops.code = WIDEN_MULT_EXPR;
1495 ops.type
1496 = build_nonstandard_integer_type (GET_MODE_PRECISION (wmode), uns);
1498 res = expand_expr_real_2 (&ops, NULL_RTX, wmode, EXPAND_NORMAL);
1499 rtx hipart = expand_shift (RSHIFT_EXPR, wmode, res, prec,
1500 NULL_RTX, uns);
1501 hipart = convert_modes (mode, wmode, hipart, uns);
1502 res = convert_modes (mode, wmode, res, uns);
1503 if (uns)
1504 /* For the unsigned multiplication, there was overflow if
1505 HIPART is non-zero. */
1506 do_compare_rtx_and_jump (hipart, const0_rtx, EQ, true, mode,
1507 NULL_RTX, NULL, done_label,
1508 PROB_VERY_LIKELY);
1509 else
1511 rtx signbit = expand_shift (RSHIFT_EXPR, mode, res, prec - 1,
1512 NULL_RTX, 0);
1513 /* RES is low half of the double width result, HIPART
1514 the high half. There was overflow if
1515 HIPART is different from RES < 0 ? -1 : 0. */
1516 do_compare_rtx_and_jump (signbit, hipart, EQ, true, mode,
1517 NULL_RTX, NULL, done_label,
1518 PROB_VERY_LIKELY);
1521 else if (hmode != BLKmode && 2 * GET_MODE_PRECISION (hmode) == prec)
1523 rtx_code_label *large_op0 = gen_label_rtx ();
1524 rtx_code_label *small_op0_large_op1 = gen_label_rtx ();
1525 rtx_code_label *one_small_one_large = gen_label_rtx ();
1526 rtx_code_label *both_ops_large = gen_label_rtx ();
1527 rtx_code_label *after_hipart_neg = uns ? NULL : gen_label_rtx ();
1528 rtx_code_label *after_lopart_neg = uns ? NULL : gen_label_rtx ();
1529 rtx_code_label *do_overflow = gen_label_rtx ();
1530 rtx_code_label *hipart_different = uns ? NULL : gen_label_rtx ();
1532 unsigned int hprec = GET_MODE_PRECISION (hmode);
1533 rtx hipart0 = expand_shift (RSHIFT_EXPR, mode, op0, hprec,
1534 NULL_RTX, uns);
1535 hipart0 = convert_modes (hmode, mode, hipart0, uns);
1536 rtx lopart0 = convert_modes (hmode, mode, op0, uns);
1537 rtx signbit0 = const0_rtx;
1538 if (!uns)
1539 signbit0 = expand_shift (RSHIFT_EXPR, hmode, lopart0, hprec - 1,
1540 NULL_RTX, 0);
1541 rtx hipart1 = expand_shift (RSHIFT_EXPR, mode, op1, hprec,
1542 NULL_RTX, uns);
1543 hipart1 = convert_modes (hmode, mode, hipart1, uns);
1544 rtx lopart1 = convert_modes (hmode, mode, op1, uns);
1545 rtx signbit1 = const0_rtx;
1546 if (!uns)
1547 signbit1 = expand_shift (RSHIFT_EXPR, hmode, lopart1, hprec - 1,
1548 NULL_RTX, 0);
1550 res = gen_reg_rtx (mode);
1552 /* True if op0 resp. op1 are known to be in the range of
1553 halfstype. */
1554 bool op0_small_p = false;
1555 bool op1_small_p = false;
1556 /* True if op0 resp. op1 are known to have all zeros or all ones
1557 in the upper half of bits, but are not known to be
1558 op{0,1}_small_p. */
1559 bool op0_medium_p = false;
1560 bool op1_medium_p = false;
1561 /* -1 if op{0,1} is known to be negative, 0 if it is known to be
1562 nonnegative, 1 if unknown. */
1563 int op0_sign = 1;
1564 int op1_sign = 1;
1566 if (pos_neg0 == 1)
1567 op0_sign = 0;
1568 else if (pos_neg0 == 2)
1569 op0_sign = -1;
1570 if (pos_neg1 == 1)
1571 op1_sign = 0;
1572 else if (pos_neg1 == 2)
1573 op1_sign = -1;
1575 unsigned int mprec0 = prec;
1576 if (arg0 != error_mark_node)
1577 mprec0 = get_min_precision (arg0, sign);
1578 if (mprec0 <= hprec)
1579 op0_small_p = true;
1580 else if (!uns && mprec0 <= hprec + 1)
1581 op0_medium_p = true;
1582 unsigned int mprec1 = prec;
1583 if (arg1 != error_mark_node)
1584 mprec1 = get_min_precision (arg1, sign);
1585 if (mprec1 <= hprec)
1586 op1_small_p = true;
1587 else if (!uns && mprec1 <= hprec + 1)
1588 op1_medium_p = true;
1590 int smaller_sign = 1;
1591 int larger_sign = 1;
1592 if (op0_small_p)
1594 smaller_sign = op0_sign;
1595 larger_sign = op1_sign;
1597 else if (op1_small_p)
1599 smaller_sign = op1_sign;
1600 larger_sign = op0_sign;
1602 else if (op0_sign == op1_sign)
1604 smaller_sign = op0_sign;
1605 larger_sign = op0_sign;
1608 if (!op0_small_p)
1609 do_compare_rtx_and_jump (signbit0, hipart0, NE, true, hmode,
1610 NULL_RTX, NULL, large_op0,
1611 PROB_UNLIKELY);
1613 if (!op1_small_p)
1614 do_compare_rtx_and_jump (signbit1, hipart1, NE, true, hmode,
1615 NULL_RTX, NULL, small_op0_large_op1,
1616 PROB_UNLIKELY);
1618 /* If both op0 and op1 are sign (!uns) or zero (uns) extended from
1619 hmode to mode, the multiplication will never overflow. We can
1620 do just one hmode x hmode => mode widening multiplication. */
1621 rtx lopart0s = lopart0, lopart1s = lopart1;
1622 if (GET_CODE (lopart0) == SUBREG)
1624 lopart0s = shallow_copy_rtx (lopart0);
1625 SUBREG_PROMOTED_VAR_P (lopart0s) = 1;
1626 SUBREG_PROMOTED_SET (lopart0s, uns ? SRP_UNSIGNED : SRP_SIGNED);
1628 if (GET_CODE (lopart1) == SUBREG)
1630 lopart1s = shallow_copy_rtx (lopart1);
1631 SUBREG_PROMOTED_VAR_P (lopart1s) = 1;
1632 SUBREG_PROMOTED_SET (lopart1s, uns ? SRP_UNSIGNED : SRP_SIGNED);
1634 tree halfstype = build_nonstandard_integer_type (hprec, uns);
1635 ops.op0 = make_tree (halfstype, lopart0s);
1636 ops.op1 = make_tree (halfstype, lopart1s);
1637 ops.code = WIDEN_MULT_EXPR;
1638 ops.type = type;
1639 rtx thisres
1640 = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1641 emit_move_insn (res, thisres);
1642 emit_jump (done_label);
1644 emit_label (small_op0_large_op1);
1646 /* If op0 is sign (!uns) or zero (uns) extended from hmode to mode,
1647 but op1 is not, just swap the arguments and handle it as op1
1648 sign/zero extended, op0 not. */
1649 rtx larger = gen_reg_rtx (mode);
1650 rtx hipart = gen_reg_rtx (hmode);
1651 rtx lopart = gen_reg_rtx (hmode);
1652 emit_move_insn (larger, op1);
1653 emit_move_insn (hipart, hipart1);
1654 emit_move_insn (lopart, lopart0);
1655 emit_jump (one_small_one_large);
1657 emit_label (large_op0);
1659 if (!op1_small_p)
1660 do_compare_rtx_and_jump (signbit1, hipart1, NE, true, hmode,
1661 NULL_RTX, NULL, both_ops_large,
1662 PROB_UNLIKELY);
1664 /* If op1 is sign (!uns) or zero (uns) extended from hmode to mode,
1665 but op0 is not, prepare larger, hipart and lopart pseudos and
1666 handle it together with small_op0_large_op1. */
1667 emit_move_insn (larger, op0);
1668 emit_move_insn (hipart, hipart0);
1669 emit_move_insn (lopart, lopart1);
1671 emit_label (one_small_one_large);
1673 /* lopart is the low part of the operand that is sign extended
1674 to mode, larger is the other operand, hipart is the
1675 high part of larger and lopart0 and lopart1 are the low parts
1676 of both operands.
1677 We perform lopart0 * lopart1 and lopart * hipart widening
1678 multiplications. */
1679 tree halfutype = build_nonstandard_integer_type (hprec, 1);
1680 ops.op0 = make_tree (halfutype, lopart0);
1681 ops.op1 = make_tree (halfutype, lopart1);
1682 rtx lo0xlo1
1683 = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1685 ops.op0 = make_tree (halfutype, lopart);
1686 ops.op1 = make_tree (halfutype, hipart);
1687 rtx loxhi = gen_reg_rtx (mode);
1688 rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1689 emit_move_insn (loxhi, tem);
1691 if (!uns)
1693 /* if (hipart < 0) loxhi -= lopart << (bitsize / 2); */
1694 if (larger_sign == 0)
1695 emit_jump (after_hipart_neg);
1696 else if (larger_sign != -1)
1697 do_compare_rtx_and_jump (hipart, const0_rtx, GE, false, hmode,
1698 NULL_RTX, NULL, after_hipart_neg,
1699 PROB_EVEN);
1701 tem = convert_modes (mode, hmode, lopart, 1);
1702 tem = expand_shift (LSHIFT_EXPR, mode, tem, hprec, NULL_RTX, 1);
1703 tem = expand_simple_binop (mode, MINUS, loxhi, tem, NULL_RTX,
1704 1, OPTAB_DIRECT);
1705 emit_move_insn (loxhi, tem);
1707 emit_label (after_hipart_neg);
1709 /* if (lopart < 0) loxhi -= larger; */
1710 if (smaller_sign == 0)
1711 emit_jump (after_lopart_neg);
1712 else if (smaller_sign != -1)
1713 do_compare_rtx_and_jump (lopart, const0_rtx, GE, false, hmode,
1714 NULL_RTX, NULL, after_lopart_neg,
1715 PROB_EVEN);
1717 tem = expand_simple_binop (mode, MINUS, loxhi, larger, NULL_RTX,
1718 1, OPTAB_DIRECT);
1719 emit_move_insn (loxhi, tem);
1721 emit_label (after_lopart_neg);
1724 /* loxhi += (uns) lo0xlo1 >> (bitsize / 2); */
1725 tem = expand_shift (RSHIFT_EXPR, mode, lo0xlo1, hprec, NULL_RTX, 1);
1726 tem = expand_simple_binop (mode, PLUS, loxhi, tem, NULL_RTX,
1727 1, OPTAB_DIRECT);
1728 emit_move_insn (loxhi, tem);
1730 /* if (loxhi >> (bitsize / 2)
1731 == (hmode) loxhi >> (bitsize / 2 - 1)) (if !uns)
1732 if (loxhi >> (bitsize / 2) == 0 (if uns). */
1733 rtx hipartloxhi = expand_shift (RSHIFT_EXPR, mode, loxhi, hprec,
1734 NULL_RTX, 0);
1735 hipartloxhi = convert_modes (hmode, mode, hipartloxhi, 0);
1736 rtx signbitloxhi = const0_rtx;
1737 if (!uns)
1738 signbitloxhi = expand_shift (RSHIFT_EXPR, hmode,
1739 convert_modes (hmode, mode,
1740 loxhi, 0),
1741 hprec - 1, NULL_RTX, 0);
1743 do_compare_rtx_and_jump (signbitloxhi, hipartloxhi, NE, true, hmode,
1744 NULL_RTX, NULL, do_overflow,
1745 PROB_VERY_UNLIKELY);
1747 /* res = (loxhi << (bitsize / 2)) | (hmode) lo0xlo1; */
1748 rtx loxhishifted = expand_shift (LSHIFT_EXPR, mode, loxhi, hprec,
1749 NULL_RTX, 1);
1750 tem = convert_modes (mode, hmode,
1751 convert_modes (hmode, mode, lo0xlo1, 1), 1);
1753 tem = expand_simple_binop (mode, IOR, loxhishifted, tem, res,
1754 1, OPTAB_DIRECT);
1755 if (tem != res)
1756 emit_move_insn (res, tem);
1757 emit_jump (done_label);
1759 emit_label (both_ops_large);
1761 /* If both operands are large (not sign (!uns) or zero (uns)
1762 extended from hmode), then perform the full multiplication
1763 which will be the result of the operation.
1764 The only cases which don't overflow are for signed multiplication
1765 some cases where both hipart0 and highpart1 are 0 or -1.
1766 For unsigned multiplication when high parts are both non-zero
1767 this overflows always. */
1768 ops.code = MULT_EXPR;
1769 ops.op0 = make_tree (type, op0);
1770 ops.op1 = make_tree (type, op1);
1771 tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1772 emit_move_insn (res, tem);
1774 if (!uns)
1776 if (!op0_medium_p)
1778 tem = expand_simple_binop (hmode, PLUS, hipart0, const1_rtx,
1779 NULL_RTX, 1, OPTAB_DIRECT);
1780 do_compare_rtx_and_jump (tem, const1_rtx, GTU, true, hmode,
1781 NULL_RTX, NULL, do_error,
1782 PROB_VERY_UNLIKELY);
1785 if (!op1_medium_p)
1787 tem = expand_simple_binop (hmode, PLUS, hipart1, const1_rtx,
1788 NULL_RTX, 1, OPTAB_DIRECT);
1789 do_compare_rtx_and_jump (tem, const1_rtx, GTU, true, hmode,
1790 NULL_RTX, NULL, do_error,
1791 PROB_VERY_UNLIKELY);
1794 /* At this point hipart{0,1} are both in [-1, 0]. If they are
1795 the same, overflow happened if res is negative, if they are
1796 different, overflow happened if res is positive. */
1797 if (op0_sign != 1 && op1_sign != 1 && op0_sign != op1_sign)
1798 emit_jump (hipart_different);
1799 else if (op0_sign == 1 || op1_sign == 1)
1800 do_compare_rtx_and_jump (hipart0, hipart1, NE, true, hmode,
1801 NULL_RTX, NULL, hipart_different,
1802 PROB_EVEN);
1804 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode,
1805 NULL_RTX, NULL, do_error,
1806 PROB_VERY_UNLIKELY);
1807 emit_jump (done_label);
1809 emit_label (hipart_different);
1811 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode,
1812 NULL_RTX, NULL, do_error,
1813 PROB_VERY_UNLIKELY);
1814 emit_jump (done_label);
1817 emit_label (do_overflow);
1819 /* Overflow, do full multiplication and fallthru into do_error. */
1820 ops.op0 = make_tree (type, op0);
1821 ops.op1 = make_tree (type, op1);
1822 tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1823 emit_move_insn (res, tem);
1825 else
1827 gcc_assert (!is_ubsan);
1828 ops.code = MULT_EXPR;
1829 ops.type = type;
1830 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1831 emit_jump (done_label);
1835 do_error_label:
1836 emit_label (do_error);
1837 if (is_ubsan)
1839 /* Expand the ubsan builtin call. */
1840 push_temp_slots ();
1841 fn = ubsan_build_overflow_builtin (MULT_EXPR, loc, TREE_TYPE (arg0),
1842 arg0, arg1, datap);
1843 expand_normal (fn);
1844 pop_temp_slots ();
1845 do_pending_stack_adjust ();
1847 else if (lhs)
1848 expand_arith_set_overflow (lhs, target);
1850 /* We're done. */
1851 emit_label (done_label);
1853 /* u1 * u2 -> sr */
1854 if (uns0_p && uns1_p && !unsr_p)
1856 rtx_code_label *all_done_label = gen_label_rtx ();
1857 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode, NULL_RTX,
1858 NULL, all_done_label, PROB_VERY_LIKELY);
1859 expand_arith_set_overflow (lhs, target);
1860 emit_label (all_done_label);
1863 /* s1 * u2 -> sr */
1864 if (!uns0_p && uns1_p && !unsr_p && pos_neg1 == 3)
1866 rtx_code_label *all_done_label = gen_label_rtx ();
1867 rtx_code_label *set_noovf = gen_label_rtx ();
1868 do_compare_rtx_and_jump (op1, const0_rtx, GE, false, mode, NULL_RTX,
1869 NULL, all_done_label, PROB_VERY_LIKELY);
1870 expand_arith_set_overflow (lhs, target);
1871 do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
1872 NULL, set_noovf, PROB_VERY_LIKELY);
1873 do_compare_rtx_and_jump (op0, constm1_rtx, NE, true, mode, NULL_RTX,
1874 NULL, all_done_label, PROB_VERY_UNLIKELY);
1875 do_compare_rtx_and_jump (op1, res, NE, true, mode, NULL_RTX, NULL,
1876 all_done_label, PROB_VERY_UNLIKELY);
1877 emit_label (set_noovf);
1878 write_complex_part (target, const0_rtx, true);
1879 emit_label (all_done_label);
1882 if (lhs)
1884 if (is_ubsan)
1885 expand_ubsan_result_store (target, res);
1886 else
1887 expand_arith_overflow_result_store (lhs, target, mode, res);
1891 /* Expand UBSAN_CHECK_* internal function if it has vector operands. */
1893 static void
1894 expand_vector_ubsan_overflow (location_t loc, enum tree_code code, tree lhs,
1895 tree arg0, tree arg1)
1897 int cnt = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0));
1898 rtx_code_label *loop_lab = NULL;
1899 rtx cntvar = NULL_RTX;
1900 tree cntv = NULL_TREE;
1901 tree eltype = TREE_TYPE (TREE_TYPE (arg0));
1902 tree sz = TYPE_SIZE (eltype);
1903 tree data = NULL_TREE;
1904 tree resv = NULL_TREE;
1905 rtx lhsr = NULL_RTX;
1906 rtx resvr = NULL_RTX;
1908 if (lhs)
1910 optab op;
1911 lhsr = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1912 if (!VECTOR_MODE_P (GET_MODE (lhsr))
1913 || (op = optab_for_tree_code (code, TREE_TYPE (arg0),
1914 optab_default)) == unknown_optab
1915 || (optab_handler (op, TYPE_MODE (TREE_TYPE (arg0)))
1916 == CODE_FOR_nothing))
1918 if (MEM_P (lhsr))
1919 resv = make_tree (TREE_TYPE (lhs), lhsr);
1920 else
1922 resvr = assign_temp (TREE_TYPE (lhs), 1, 1);
1923 resv = make_tree (TREE_TYPE (lhs), resvr);
1927 if (cnt > 4)
1929 do_pending_stack_adjust ();
1930 loop_lab = gen_label_rtx ();
1931 cntvar = gen_reg_rtx (TYPE_MODE (sizetype));
1932 cntv = make_tree (sizetype, cntvar);
1933 emit_move_insn (cntvar, const0_rtx);
1934 emit_label (loop_lab);
1936 if (TREE_CODE (arg0) != VECTOR_CST)
1938 rtx arg0r = expand_normal (arg0);
1939 arg0 = make_tree (TREE_TYPE (arg0), arg0r);
1941 if (TREE_CODE (arg1) != VECTOR_CST)
1943 rtx arg1r = expand_normal (arg1);
1944 arg1 = make_tree (TREE_TYPE (arg1), arg1r);
1946 for (int i = 0; i < (cnt > 4 ? 1 : cnt); i++)
1948 tree op0, op1, res = NULL_TREE;
1949 if (cnt > 4)
1951 tree atype = build_array_type_nelts (eltype, cnt);
1952 op0 = fold_build1_loc (loc, VIEW_CONVERT_EXPR, atype, arg0);
1953 op0 = build4_loc (loc, ARRAY_REF, eltype, op0, cntv,
1954 NULL_TREE, NULL_TREE);
1955 op1 = fold_build1_loc (loc, VIEW_CONVERT_EXPR, atype, arg1);
1956 op1 = build4_loc (loc, ARRAY_REF, eltype, op1, cntv,
1957 NULL_TREE, NULL_TREE);
1958 if (resv)
1960 res = fold_build1_loc (loc, VIEW_CONVERT_EXPR, atype, resv);
1961 res = build4_loc (loc, ARRAY_REF, eltype, res, cntv,
1962 NULL_TREE, NULL_TREE);
1965 else
1967 tree bitpos = bitsize_int (tree_to_uhwi (sz) * i);
1968 op0 = fold_build3_loc (loc, BIT_FIELD_REF, eltype, arg0, sz, bitpos);
1969 op1 = fold_build3_loc (loc, BIT_FIELD_REF, eltype, arg1, sz, bitpos);
1970 if (resv)
1971 res = fold_build3_loc (loc, BIT_FIELD_REF, eltype, resv, sz,
1972 bitpos);
1974 switch (code)
1976 case PLUS_EXPR:
1977 expand_addsub_overflow (loc, PLUS_EXPR, res, op0, op1,
1978 false, false, false, true, &data);
1979 break;
1980 case MINUS_EXPR:
1981 if (cnt > 4 ? integer_zerop (arg0) : integer_zerop (op0))
1982 expand_neg_overflow (loc, res, op1, true, &data);
1983 else
1984 expand_addsub_overflow (loc, MINUS_EXPR, res, op0, op1,
1985 false, false, false, true, &data);
1986 break;
1987 case MULT_EXPR:
1988 expand_mul_overflow (loc, res, op0, op1, false, false, false,
1989 true, &data);
1990 break;
1991 default:
1992 gcc_unreachable ();
1995 if (cnt > 4)
1997 struct separate_ops ops;
1998 ops.code = PLUS_EXPR;
1999 ops.type = TREE_TYPE (cntv);
2000 ops.op0 = cntv;
2001 ops.op1 = build_int_cst (TREE_TYPE (cntv), 1);
2002 ops.op2 = NULL_TREE;
2003 ops.location = loc;
2004 rtx ret = expand_expr_real_2 (&ops, cntvar, TYPE_MODE (sizetype),
2005 EXPAND_NORMAL);
2006 if (ret != cntvar)
2007 emit_move_insn (cntvar, ret);
2008 do_compare_rtx_and_jump (cntvar, GEN_INT (cnt), NE, false,
2009 TYPE_MODE (sizetype), NULL_RTX, NULL, loop_lab,
2010 PROB_VERY_LIKELY);
2012 if (lhs && resv == NULL_TREE)
2014 struct separate_ops ops;
2015 ops.code = code;
2016 ops.type = TREE_TYPE (arg0);
2017 ops.op0 = arg0;
2018 ops.op1 = arg1;
2019 ops.op2 = NULL_TREE;
2020 ops.location = loc;
2021 rtx ret = expand_expr_real_2 (&ops, lhsr, TYPE_MODE (TREE_TYPE (arg0)),
2022 EXPAND_NORMAL);
2023 if (ret != lhsr)
2024 emit_move_insn (lhsr, ret);
2026 else if (resvr)
2027 emit_move_insn (lhsr, resvr);
2030 /* Expand UBSAN_CHECK_ADD call STMT. */
2032 static void
2033 expand_UBSAN_CHECK_ADD (internal_fn, gcall *stmt)
2035 location_t loc = gimple_location (stmt);
2036 tree lhs = gimple_call_lhs (stmt);
2037 tree arg0 = gimple_call_arg (stmt, 0);
2038 tree arg1 = gimple_call_arg (stmt, 1);
2039 if (VECTOR_TYPE_P (TREE_TYPE (arg0)))
2040 expand_vector_ubsan_overflow (loc, PLUS_EXPR, lhs, arg0, arg1);
2041 else
2042 expand_addsub_overflow (loc, PLUS_EXPR, lhs, arg0, arg1,
2043 false, false, false, true, NULL);
2046 /* Expand UBSAN_CHECK_SUB call STMT. */
2048 static void
2049 expand_UBSAN_CHECK_SUB (internal_fn, gcall *stmt)
2051 location_t loc = gimple_location (stmt);
2052 tree lhs = gimple_call_lhs (stmt);
2053 tree arg0 = gimple_call_arg (stmt, 0);
2054 tree arg1 = gimple_call_arg (stmt, 1);
2055 if (VECTOR_TYPE_P (TREE_TYPE (arg0)))
2056 expand_vector_ubsan_overflow (loc, MINUS_EXPR, lhs, arg0, arg1);
2057 else if (integer_zerop (arg0))
2058 expand_neg_overflow (loc, lhs, arg1, true, NULL);
2059 else
2060 expand_addsub_overflow (loc, MINUS_EXPR, lhs, arg0, arg1,
2061 false, false, false, true, NULL);
2064 /* Expand UBSAN_CHECK_MUL call STMT. */
2066 static void
2067 expand_UBSAN_CHECK_MUL (internal_fn, gcall *stmt)
2069 location_t loc = gimple_location (stmt);
2070 tree lhs = gimple_call_lhs (stmt);
2071 tree arg0 = gimple_call_arg (stmt, 0);
2072 tree arg1 = gimple_call_arg (stmt, 1);
2073 if (VECTOR_TYPE_P (TREE_TYPE (arg0)))
2074 expand_vector_ubsan_overflow (loc, MULT_EXPR, lhs, arg0, arg1);
2075 else
2076 expand_mul_overflow (loc, lhs, arg0, arg1, false, false, false, true,
2077 NULL);
2080 /* Helper function for {ADD,SUB,MUL}_OVERFLOW call stmt expansion. */
2082 static void
2083 expand_arith_overflow (enum tree_code code, gimple *stmt)
2085 tree lhs = gimple_call_lhs (stmt);
2086 if (lhs == NULL_TREE)
2087 return;
2088 tree arg0 = gimple_call_arg (stmt, 0);
2089 tree arg1 = gimple_call_arg (stmt, 1);
2090 tree type = TREE_TYPE (TREE_TYPE (lhs));
2091 int uns0_p = TYPE_UNSIGNED (TREE_TYPE (arg0));
2092 int uns1_p = TYPE_UNSIGNED (TREE_TYPE (arg1));
2093 int unsr_p = TYPE_UNSIGNED (type);
2094 int prec0 = TYPE_PRECISION (TREE_TYPE (arg0));
2095 int prec1 = TYPE_PRECISION (TREE_TYPE (arg1));
2096 int precres = TYPE_PRECISION (type);
2097 location_t loc = gimple_location (stmt);
2098 if (!uns0_p && get_range_pos_neg (arg0) == 1)
2099 uns0_p = true;
2100 if (!uns1_p && get_range_pos_neg (arg1) == 1)
2101 uns1_p = true;
2102 int pr = get_min_precision (arg0, uns0_p ? UNSIGNED : SIGNED);
2103 prec0 = MIN (prec0, pr);
2104 pr = get_min_precision (arg1, uns1_p ? UNSIGNED : SIGNED);
2105 prec1 = MIN (prec1, pr);
2107 /* If uns0_p && uns1_p, precop is minimum needed precision
2108 of unsigned type to hold the exact result, otherwise
2109 precop is minimum needed precision of signed type to
2110 hold the exact result. */
2111 int precop;
2112 if (code == MULT_EXPR)
2113 precop = prec0 + prec1 + (uns0_p != uns1_p);
2114 else
2116 if (uns0_p == uns1_p)
2117 precop = MAX (prec0, prec1) + 1;
2118 else if (uns0_p)
2119 precop = MAX (prec0 + 1, prec1) + 1;
2120 else
2121 precop = MAX (prec0, prec1 + 1) + 1;
2123 int orig_precres = precres;
2127 if ((uns0_p && uns1_p)
2128 ? ((precop + !unsr_p) <= precres
2129 /* u1 - u2 -> ur can overflow, no matter what precision
2130 the result has. */
2131 && (code != MINUS_EXPR || !unsr_p))
2132 : (!unsr_p && precop <= precres))
2134 /* The infinity precision result will always fit into result. */
2135 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2136 write_complex_part (target, const0_rtx, true);
2137 enum machine_mode mode = TYPE_MODE (type);
2138 struct separate_ops ops;
2139 ops.code = code;
2140 ops.type = type;
2141 ops.op0 = fold_convert_loc (loc, type, arg0);
2142 ops.op1 = fold_convert_loc (loc, type, arg1);
2143 ops.op2 = NULL_TREE;
2144 ops.location = loc;
2145 rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
2146 expand_arith_overflow_result_store (lhs, target, mode, tem);
2147 return;
2150 /* For operations with low precision, if target doesn't have them, start
2151 with precres widening right away, otherwise do it only if the most
2152 simple cases can't be used. */
2153 const int min_precision = targetm.min_arithmetic_precision ();
2154 if (orig_precres == precres && precres < min_precision)
2156 else if ((uns0_p && uns1_p && unsr_p && prec0 <= precres
2157 && prec1 <= precres)
2158 || ((!uns0_p || !uns1_p) && !unsr_p
2159 && prec0 + uns0_p <= precres
2160 && prec1 + uns1_p <= precres))
2162 arg0 = fold_convert_loc (loc, type, arg0);
2163 arg1 = fold_convert_loc (loc, type, arg1);
2164 switch (code)
2166 case MINUS_EXPR:
2167 if (integer_zerop (arg0) && !unsr_p)
2169 expand_neg_overflow (loc, lhs, arg1, false, NULL);
2170 return;
2172 /* FALLTHRU */
2173 case PLUS_EXPR:
2174 expand_addsub_overflow (loc, code, lhs, arg0, arg1, unsr_p,
2175 unsr_p, unsr_p, false, NULL);
2176 return;
2177 case MULT_EXPR:
2178 expand_mul_overflow (loc, lhs, arg0, arg1, unsr_p,
2179 unsr_p, unsr_p, false, NULL);
2180 return;
2181 default:
2182 gcc_unreachable ();
2186 /* For sub-word operations, retry with a wider type first. */
2187 if (orig_precres == precres && precop <= BITS_PER_WORD)
2189 int p = MAX (min_precision, precop);
2190 enum machine_mode m = smallest_mode_for_size (p, MODE_INT);
2191 tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
2192 uns0_p && uns1_p
2193 && unsr_p);
2194 p = TYPE_PRECISION (optype);
2195 if (p > precres)
2197 precres = p;
2198 unsr_p = TYPE_UNSIGNED (optype);
2199 type = optype;
2200 continue;
2204 if (prec0 <= precres && prec1 <= precres)
2206 tree types[2];
2207 if (unsr_p)
2209 types[0] = build_nonstandard_integer_type (precres, 0);
2210 types[1] = type;
2212 else
2214 types[0] = type;
2215 types[1] = build_nonstandard_integer_type (precres, 1);
2217 arg0 = fold_convert_loc (loc, types[uns0_p], arg0);
2218 arg1 = fold_convert_loc (loc, types[uns1_p], arg1);
2219 if (code != MULT_EXPR)
2220 expand_addsub_overflow (loc, code, lhs, arg0, arg1, unsr_p,
2221 uns0_p, uns1_p, false, NULL);
2222 else
2223 expand_mul_overflow (loc, lhs, arg0, arg1, unsr_p,
2224 uns0_p, uns1_p, false, NULL);
2225 return;
2228 /* Retry with a wider type. */
2229 if (orig_precres == precres)
2231 int p = MAX (prec0, prec1);
2232 enum machine_mode m = smallest_mode_for_size (p, MODE_INT);
2233 tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
2234 uns0_p && uns1_p
2235 && unsr_p);
2236 p = TYPE_PRECISION (optype);
2237 if (p > precres)
2239 precres = p;
2240 unsr_p = TYPE_UNSIGNED (optype);
2241 type = optype;
2242 continue;
2246 gcc_unreachable ();
2248 while (1);
2251 /* Expand ADD_OVERFLOW STMT. */
2253 static void
2254 expand_ADD_OVERFLOW (internal_fn, gcall *stmt)
2256 expand_arith_overflow (PLUS_EXPR, stmt);
2259 /* Expand SUB_OVERFLOW STMT. */
2261 static void
2262 expand_SUB_OVERFLOW (internal_fn, gcall *stmt)
2264 expand_arith_overflow (MINUS_EXPR, stmt);
2267 /* Expand MUL_OVERFLOW STMT. */
2269 static void
2270 expand_MUL_OVERFLOW (internal_fn, gcall *stmt)
2272 expand_arith_overflow (MULT_EXPR, stmt);
2275 /* This should get folded in tree-vectorizer.c. */
2277 static void
2278 expand_LOOP_VECTORIZED (internal_fn, gcall *)
2280 gcc_unreachable ();
2283 /* Expand MASK_LOAD call STMT using optab OPTAB. */
2285 static void
2286 expand_mask_load_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
2288 struct expand_operand ops[3];
2289 tree type, lhs, rhs, maskt, ptr;
2290 rtx mem, target, mask;
2291 unsigned align;
2293 maskt = gimple_call_arg (stmt, 2);
2294 lhs = gimple_call_lhs (stmt);
2295 if (lhs == NULL_TREE)
2296 return;
2297 type = TREE_TYPE (lhs);
2298 ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)), 0);
2299 align = tree_to_shwi (gimple_call_arg (stmt, 1));
2300 if (TYPE_ALIGN (type) != align)
2301 type = build_aligned_type (type, align);
2302 rhs = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0), ptr);
2304 mem = expand_expr (rhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2305 gcc_assert (MEM_P (mem));
2306 mask = expand_normal (maskt);
2307 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2308 create_output_operand (&ops[0], target, TYPE_MODE (type));
2309 create_fixed_operand (&ops[1], mem);
2310 create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
2311 expand_insn (convert_optab_handler (optab, TYPE_MODE (type),
2312 TYPE_MODE (TREE_TYPE (maskt))),
2313 3, ops);
2316 /* Expand MASK_STORE call STMT using optab OPTAB. */
2318 static void
2319 expand_mask_store_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
2321 struct expand_operand ops[3];
2322 tree type, lhs, rhs, maskt, ptr;
2323 rtx mem, reg, mask;
2324 unsigned align;
2326 maskt = gimple_call_arg (stmt, 2);
2327 rhs = gimple_call_arg (stmt, 3);
2328 type = TREE_TYPE (rhs);
2329 ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)), 0);
2330 align = tree_to_shwi (gimple_call_arg (stmt, 1));
2331 if (TYPE_ALIGN (type) != align)
2332 type = build_aligned_type (type, align);
2333 lhs = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0), ptr);
2335 mem = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2336 gcc_assert (MEM_P (mem));
2337 mask = expand_normal (maskt);
2338 reg = expand_normal (rhs);
2339 create_fixed_operand (&ops[0], mem);
2340 create_input_operand (&ops[1], reg, TYPE_MODE (type));
2341 create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
2342 expand_insn (convert_optab_handler (optab, TYPE_MODE (type),
2343 TYPE_MODE (TREE_TYPE (maskt))),
2344 3, ops);
2347 static void
2348 expand_ABNORMAL_DISPATCHER (internal_fn, gcall *)
2352 static void
2353 expand_BUILTIN_EXPECT (internal_fn, gcall *stmt)
2355 /* When guessing was done, the hints should be already stripped away. */
2356 gcc_assert (!flag_guess_branch_prob || optimize == 0 || seen_error ());
2358 rtx target;
2359 tree lhs = gimple_call_lhs (stmt);
2360 if (lhs)
2361 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2362 else
2363 target = const0_rtx;
2364 rtx val = expand_expr (gimple_call_arg (stmt, 0), target, VOIDmode, EXPAND_NORMAL);
2365 if (lhs && val != target)
2366 emit_move_insn (target, val);
2369 /* IFN_VA_ARG is supposed to be expanded at pass_stdarg. So this dummy function
2370 should never be called. */
2372 static void
2373 expand_VA_ARG (internal_fn, gcall *)
2375 gcc_unreachable ();
2378 /* Expand the IFN_UNIQUE function according to its first argument. */
2380 static void
2381 expand_UNIQUE (internal_fn, gcall *stmt)
2383 rtx pattern = NULL_RTX;
2384 enum ifn_unique_kind kind
2385 = (enum ifn_unique_kind) TREE_INT_CST_LOW (gimple_call_arg (stmt, 0));
2387 switch (kind)
2389 default:
2390 gcc_unreachable ();
2392 case IFN_UNIQUE_UNSPEC:
2393 if (targetm.have_unique ())
2394 pattern = targetm.gen_unique ();
2395 break;
2397 case IFN_UNIQUE_OACC_FORK:
2398 case IFN_UNIQUE_OACC_JOIN:
2399 if (targetm.have_oacc_fork () && targetm.have_oacc_join ())
2401 tree lhs = gimple_call_lhs (stmt);
2402 rtx target = const0_rtx;
2404 if (lhs)
2405 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2407 rtx data_dep = expand_normal (gimple_call_arg (stmt, 1));
2408 rtx axis = expand_normal (gimple_call_arg (stmt, 2));
2410 if (kind == IFN_UNIQUE_OACC_FORK)
2411 pattern = targetm.gen_oacc_fork (target, data_dep, axis);
2412 else
2413 pattern = targetm.gen_oacc_join (target, data_dep, axis);
2415 else
2416 gcc_unreachable ();
2417 break;
2420 if (pattern)
2421 emit_insn (pattern);
2424 /* The size of an OpenACC compute dimension. */
2426 static void
2427 expand_GOACC_DIM_SIZE (internal_fn, gcall *stmt)
2429 tree lhs = gimple_call_lhs (stmt);
2431 if (!lhs)
2432 return;
2434 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2435 if (targetm.have_oacc_dim_size ())
2437 rtx dim = expand_expr (gimple_call_arg (stmt, 0), NULL_RTX,
2438 VOIDmode, EXPAND_NORMAL);
2439 emit_insn (targetm.gen_oacc_dim_size (target, dim));
2441 else
2442 emit_move_insn (target, GEN_INT (1));
2445 /* The position of an OpenACC execution engine along one compute axis. */
2447 static void
2448 expand_GOACC_DIM_POS (internal_fn, gcall *stmt)
2450 tree lhs = gimple_call_lhs (stmt);
2452 if (!lhs)
2453 return;
2455 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2456 if (targetm.have_oacc_dim_pos ())
2458 rtx dim = expand_expr (gimple_call_arg (stmt, 0), NULL_RTX,
2459 VOIDmode, EXPAND_NORMAL);
2460 emit_insn (targetm.gen_oacc_dim_pos (target, dim));
2462 else
2463 emit_move_insn (target, const0_rtx);
2466 /* This is expanded by oacc_device_lower pass. */
2468 static void
2469 expand_GOACC_LOOP (internal_fn, gcall *)
2471 gcc_unreachable ();
2474 /* This is expanded by oacc_device_lower pass. */
2476 static void
2477 expand_GOACC_REDUCTION (internal_fn, gcall *)
2479 gcc_unreachable ();
2482 /* This is expanded by oacc_device_lower pass. */
2484 static void
2485 expand_GOACC_TILE (internal_fn, gcall *)
2487 gcc_unreachable ();
2490 /* Set errno to EDOM. */
2492 static void
2493 expand_SET_EDOM (internal_fn, gcall *)
2495 #ifdef TARGET_EDOM
2496 #ifdef GEN_ERRNO_RTX
2497 rtx errno_rtx = GEN_ERRNO_RTX;
2498 #else
2499 rtx errno_rtx = gen_rtx_MEM (word_mode, gen_rtx_SYMBOL_REF (Pmode, "errno"));
2500 #endif
2501 emit_move_insn (errno_rtx,
2502 gen_int_mode (TARGET_EDOM, GET_MODE (errno_rtx)));
2503 #else
2504 gcc_unreachable ();
2505 #endif
2508 /* Expand atomic bit test and set. */
2510 static void
2511 expand_ATOMIC_BIT_TEST_AND_SET (internal_fn, gcall *call)
2513 expand_ifn_atomic_bit_test_and (call);
2516 /* Expand atomic bit test and complement. */
2518 static void
2519 expand_ATOMIC_BIT_TEST_AND_COMPLEMENT (internal_fn, gcall *call)
2521 expand_ifn_atomic_bit_test_and (call);
2524 /* Expand atomic bit test and reset. */
2526 static void
2527 expand_ATOMIC_BIT_TEST_AND_RESET (internal_fn, gcall *call)
2529 expand_ifn_atomic_bit_test_and (call);
2532 /* Expand atomic bit test and set. */
2534 static void
2535 expand_ATOMIC_COMPARE_EXCHANGE (internal_fn, gcall *call)
2537 expand_ifn_atomic_compare_exchange (call);
2540 /* Expand LAUNDER to assignment, lhs = arg0. */
2542 static void
2543 expand_LAUNDER (internal_fn, gcall *call)
2545 tree lhs = gimple_call_lhs (call);
2547 if (!lhs)
2548 return;
2550 expand_assignment (lhs, gimple_call_arg (call, 0), false);
2553 /* Expand DIVMOD() using:
2554 a) optab handler for udivmod/sdivmod if it is available.
2555 b) If optab_handler doesn't exist, generate call to
2556 target-specific divmod libfunc. */
2558 static void
2559 expand_DIVMOD (internal_fn, gcall *call_stmt)
2561 tree lhs = gimple_call_lhs (call_stmt);
2562 tree arg0 = gimple_call_arg (call_stmt, 0);
2563 tree arg1 = gimple_call_arg (call_stmt, 1);
2565 gcc_assert (TREE_CODE (TREE_TYPE (lhs)) == COMPLEX_TYPE);
2566 tree type = TREE_TYPE (TREE_TYPE (lhs));
2567 machine_mode mode = TYPE_MODE (type);
2568 bool unsignedp = TYPE_UNSIGNED (type);
2569 optab tab = (unsignedp) ? udivmod_optab : sdivmod_optab;
2571 rtx op0 = expand_normal (arg0);
2572 rtx op1 = expand_normal (arg1);
2573 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2575 rtx quotient, remainder, libfunc;
2577 /* Check if optab_handler exists for divmod_optab for given mode. */
2578 if (optab_handler (tab, mode) != CODE_FOR_nothing)
2580 quotient = gen_reg_rtx (mode);
2581 remainder = gen_reg_rtx (mode);
2582 expand_twoval_binop (tab, op0, op1, quotient, remainder, unsignedp);
2585 /* Generate call to divmod libfunc if it exists. */
2586 else if ((libfunc = optab_libfunc (tab, mode)) != NULL_RTX)
2587 targetm.expand_divmod_libfunc (libfunc, mode, op0, op1,
2588 &quotient, &remainder);
2590 else
2591 gcc_unreachable ();
2593 /* Wrap the return value (quotient, remainder) within COMPLEX_EXPR. */
2594 expand_expr (build2 (COMPLEX_EXPR, TREE_TYPE (lhs),
2595 make_tree (TREE_TYPE (arg0), quotient),
2596 make_tree (TREE_TYPE (arg1), remainder)),
2597 target, VOIDmode, EXPAND_NORMAL);
2600 /* Expand a call to FN using the operands in STMT. FN has a single
2601 output operand and NARGS input operands. */
2603 static void
2604 expand_direct_optab_fn (internal_fn fn, gcall *stmt, direct_optab optab,
2605 unsigned int nargs)
2607 expand_operand *ops = XALLOCAVEC (expand_operand, nargs + 1);
2609 tree_pair types = direct_internal_fn_types (fn, stmt);
2610 insn_code icode = direct_optab_handler (optab, TYPE_MODE (types.first));
2612 tree lhs = gimple_call_lhs (stmt);
2613 tree lhs_type = TREE_TYPE (lhs);
2614 rtx lhs_rtx = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2615 create_output_operand (&ops[0], lhs_rtx, insn_data[icode].operand[0].mode);
2617 for (unsigned int i = 0; i < nargs; ++i)
2619 tree rhs = gimple_call_arg (stmt, i);
2620 tree rhs_type = TREE_TYPE (rhs);
2621 rtx rhs_rtx = expand_normal (rhs);
2622 if (INTEGRAL_TYPE_P (rhs_type))
2623 create_convert_operand_from (&ops[i + 1], rhs_rtx,
2624 TYPE_MODE (rhs_type),
2625 TYPE_UNSIGNED (rhs_type));
2626 else
2627 create_input_operand (&ops[i + 1], rhs_rtx, TYPE_MODE (rhs_type));
2630 expand_insn (icode, nargs + 1, ops);
2631 if (!rtx_equal_p (lhs_rtx, ops[0].value))
2633 /* If the return value has an integral type, convert the instruction
2634 result to that type. This is useful for things that return an
2635 int regardless of the size of the input. If the instruction result
2636 is smaller than required, assume that it is signed.
2638 If the return value has a nonintegral type, its mode must match
2639 the instruction result. */
2640 if (GET_CODE (lhs_rtx) == SUBREG && SUBREG_PROMOTED_VAR_P (lhs_rtx))
2642 /* If this is a scalar in a register that is stored in a wider
2643 mode than the declared mode, compute the result into its
2644 declared mode and then convert to the wider mode. */
2645 gcc_checking_assert (INTEGRAL_TYPE_P (lhs_type));
2646 rtx tmp = convert_to_mode (GET_MODE (lhs_rtx), ops[0].value, 0);
2647 convert_move (SUBREG_REG (lhs_rtx), tmp,
2648 SUBREG_PROMOTED_SIGN (lhs_rtx));
2650 else if (GET_MODE (lhs_rtx) == GET_MODE (ops[0].value))
2651 emit_move_insn (lhs_rtx, ops[0].value);
2652 else
2654 gcc_checking_assert (INTEGRAL_TYPE_P (lhs_type));
2655 convert_move (lhs_rtx, ops[0].value, 0);
2660 /* Expanders for optabs that can use expand_direct_optab_fn. */
2662 #define expand_unary_optab_fn(FN, STMT, OPTAB) \
2663 expand_direct_optab_fn (FN, STMT, OPTAB, 1)
2665 #define expand_binary_optab_fn(FN, STMT, OPTAB) \
2666 expand_direct_optab_fn (FN, STMT, OPTAB, 2)
2668 /* RETURN_TYPE and ARGS are a return type and argument list that are
2669 in principle compatible with FN (which satisfies direct_internal_fn_p).
2670 Return the types that should be used to determine whether the
2671 target supports FN. */
2673 tree_pair
2674 direct_internal_fn_types (internal_fn fn, tree return_type, tree *args)
2676 const direct_internal_fn_info &info = direct_internal_fn (fn);
2677 tree type0 = (info.type0 < 0 ? return_type : TREE_TYPE (args[info.type0]));
2678 tree type1 = (info.type1 < 0 ? return_type : TREE_TYPE (args[info.type1]));
2679 return tree_pair (type0, type1);
2682 /* CALL is a call whose return type and arguments are in principle
2683 compatible with FN (which satisfies direct_internal_fn_p). Return the
2684 types that should be used to determine whether the target supports FN. */
2686 tree_pair
2687 direct_internal_fn_types (internal_fn fn, gcall *call)
2689 const direct_internal_fn_info &info = direct_internal_fn (fn);
2690 tree op0 = (info.type0 < 0
2691 ? gimple_call_lhs (call)
2692 : gimple_call_arg (call, info.type0));
2693 tree op1 = (info.type1 < 0
2694 ? gimple_call_lhs (call)
2695 : gimple_call_arg (call, info.type1));
2696 return tree_pair (TREE_TYPE (op0), TREE_TYPE (op1));
2699 /* Return true if OPTAB is supported for TYPES (whose modes should be
2700 the same) when the optimization type is OPT_TYPE. Used for simple
2701 direct optabs. */
2703 static bool
2704 direct_optab_supported_p (direct_optab optab, tree_pair types,
2705 optimization_type opt_type)
2707 machine_mode mode = TYPE_MODE (types.first);
2708 gcc_checking_assert (mode == TYPE_MODE (types.second));
2709 return direct_optab_handler (optab, mode, opt_type) != CODE_FOR_nothing;
2712 /* Return true if load/store lanes optab OPTAB is supported for
2713 array type TYPES.first when the optimization type is OPT_TYPE. */
2715 static bool
2716 multi_vector_optab_supported_p (convert_optab optab, tree_pair types,
2717 optimization_type opt_type)
2719 gcc_assert (TREE_CODE (types.first) == ARRAY_TYPE);
2720 machine_mode imode = TYPE_MODE (types.first);
2721 machine_mode vmode = TYPE_MODE (TREE_TYPE (types.first));
2722 return (convert_optab_handler (optab, imode, vmode, opt_type)
2723 != CODE_FOR_nothing);
2726 #define direct_unary_optab_supported_p direct_optab_supported_p
2727 #define direct_binary_optab_supported_p direct_optab_supported_p
2728 #define direct_mask_load_optab_supported_p direct_optab_supported_p
2729 #define direct_load_lanes_optab_supported_p multi_vector_optab_supported_p
2730 #define direct_mask_store_optab_supported_p direct_optab_supported_p
2731 #define direct_store_lanes_optab_supported_p multi_vector_optab_supported_p
2733 /* Return true if FN is supported for the types in TYPES when the
2734 optimization type is OPT_TYPE. The types are those associated with
2735 the "type0" and "type1" fields of FN's direct_internal_fn_info
2736 structure. */
2738 bool
2739 direct_internal_fn_supported_p (internal_fn fn, tree_pair types,
2740 optimization_type opt_type)
2742 switch (fn)
2744 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
2745 case IFN_##CODE: break;
2746 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
2747 case IFN_##CODE: \
2748 return direct_##TYPE##_optab_supported_p (OPTAB##_optab, types, \
2749 opt_type);
2750 #include "internal-fn.def"
2752 case IFN_LAST:
2753 break;
2755 gcc_unreachable ();
2758 /* Return true if FN is supported for type TYPE when the optimization
2759 type is OPT_TYPE. The caller knows that the "type0" and "type1"
2760 fields of FN's direct_internal_fn_info structure are the same. */
2762 bool
2763 direct_internal_fn_supported_p (internal_fn fn, tree type,
2764 optimization_type opt_type)
2766 const direct_internal_fn_info &info = direct_internal_fn (fn);
2767 gcc_checking_assert (info.type0 == info.type1);
2768 return direct_internal_fn_supported_p (fn, tree_pair (type, type), opt_type);
2771 /* Return true if IFN_SET_EDOM is supported. */
2773 bool
2774 set_edom_supported_p (void)
2776 #ifdef TARGET_EDOM
2777 return true;
2778 #else
2779 return false;
2780 #endif
2783 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
2784 static void \
2785 expand_##CODE (internal_fn fn, gcall *stmt) \
2787 expand_##TYPE##_optab_fn (fn, stmt, OPTAB##_optab); \
2789 #include "internal-fn.def"
2791 /* Routines to expand each internal function, indexed by function number.
2792 Each routine has the prototype:
2794 expand_<NAME> (gcall *stmt)
2796 where STMT is the statement that performs the call. */
2797 static void (*const internal_fn_expanders[]) (internal_fn, gcall *) = {
2798 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) expand_##CODE,
2799 #include "internal-fn.def"
2803 /* Expand STMT as though it were a call to internal function FN. */
2805 void
2806 expand_internal_call (internal_fn fn, gcall *stmt)
2808 internal_fn_expanders[fn] (fn, stmt);
2811 /* Expand STMT, which is a call to internal function FN. */
2813 void
2814 expand_internal_call (gcall *stmt)
2816 expand_internal_call (gimple_call_internal_fn (stmt), stmt);
2819 void
2820 expand_PHI (internal_fn, gcall *)
2822 gcc_unreachable ();