Check int_size_in_bytes in ix86_return_in_memory
[official-gcc.git] / gcc / internal-fn.c
blob82d59c92ecfb3bc8590000784db0705642b911e2
1 /* Internal functions.
2 Copyright (C) 2011-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "alias.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "rtl.h"
28 #include "options.h"
29 #include "fold-const.h"
30 #include "internal-fn.h"
31 #include "stor-layout.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "expmed.h"
35 #include "dojump.h"
36 #include "explow.h"
37 #include "calls.h"
38 #include "emit-rtl.h"
39 #include "varasm.h"
40 #include "stmt.h"
41 #include "expr.h"
42 #include "insn-codes.h"
43 #include "optabs.h"
44 #include "ubsan.h"
45 #include "target.h"
46 #include "stringpool.h"
47 #include "tree-ssanames.h"
48 #include "diagnostic-core.h"
50 /* The names of each internal function, indexed by function number. */
51 const char *const internal_fn_name_array[] = {
52 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) #CODE,
53 #include "internal-fn.def"
54 #undef DEF_INTERNAL_FN
55 "<invalid-fn>"
58 /* The ECF_* flags of each internal function, indexed by function number. */
59 const int internal_fn_flags_array[] = {
60 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) FLAGS,
61 #include "internal-fn.def"
62 #undef DEF_INTERNAL_FN
66 /* Fnspec of each internal function, indexed by function number. */
67 const_tree internal_fn_fnspec_array[IFN_LAST + 1];
69 void
70 init_internal_fns ()
72 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
73 if (FNSPEC) internal_fn_fnspec_array[IFN_##CODE] = \
74 build_string ((int) sizeof (FNSPEC), FNSPEC ? FNSPEC : "");
75 #include "internal-fn.def"
76 #undef DEF_INTERNAL_FN
77 internal_fn_fnspec_array[IFN_LAST] = 0;
80 /* ARRAY_TYPE is an array of vector modes. Return the associated insn
81 for load-lanes-style optab OPTAB. The insn must exist. */
83 static enum insn_code
84 get_multi_vector_move (tree array_type, convert_optab optab)
86 enum insn_code icode;
87 machine_mode imode;
88 machine_mode vmode;
90 gcc_assert (TREE_CODE (array_type) == ARRAY_TYPE);
91 imode = TYPE_MODE (array_type);
92 vmode = TYPE_MODE (TREE_TYPE (array_type));
94 icode = convert_optab_handler (optab, imode, vmode);
95 gcc_assert (icode != CODE_FOR_nothing);
96 return icode;
99 /* Expand LOAD_LANES call STMT. */
101 static void
102 expand_LOAD_LANES (gcall *stmt)
104 struct expand_operand ops[2];
105 tree type, lhs, rhs;
106 rtx target, mem;
108 lhs = gimple_call_lhs (stmt);
109 rhs = gimple_call_arg (stmt, 0);
110 type = TREE_TYPE (lhs);
112 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
113 mem = expand_normal (rhs);
115 gcc_assert (MEM_P (mem));
116 PUT_MODE (mem, TYPE_MODE (type));
118 create_output_operand (&ops[0], target, TYPE_MODE (type));
119 create_fixed_operand (&ops[1], mem);
120 expand_insn (get_multi_vector_move (type, vec_load_lanes_optab), 2, ops);
123 /* Expand STORE_LANES call STMT. */
125 static void
126 expand_STORE_LANES (gcall *stmt)
128 struct expand_operand ops[2];
129 tree type, lhs, rhs;
130 rtx target, reg;
132 lhs = gimple_call_lhs (stmt);
133 rhs = gimple_call_arg (stmt, 0);
134 type = TREE_TYPE (rhs);
136 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
137 reg = expand_normal (rhs);
139 gcc_assert (MEM_P (target));
140 PUT_MODE (target, TYPE_MODE (type));
142 create_fixed_operand (&ops[0], target);
143 create_input_operand (&ops[1], reg, TYPE_MODE (type));
144 expand_insn (get_multi_vector_move (type, vec_store_lanes_optab), 2, ops);
147 static void
148 expand_ANNOTATE (gcall *)
150 gcc_unreachable ();
153 /* This should get expanded in adjust_simduid_builtins. */
155 static void
156 expand_GOMP_SIMD_LANE (gcall *)
158 gcc_unreachable ();
161 /* This should get expanded in adjust_simduid_builtins. */
163 static void
164 expand_GOMP_SIMD_VF (gcall *)
166 gcc_unreachable ();
169 /* This should get expanded in adjust_simduid_builtins. */
171 static void
172 expand_GOMP_SIMD_LAST_LANE (gcall *)
174 gcc_unreachable ();
177 /* This should get expanded in the sanopt pass. */
179 static void
180 expand_UBSAN_NULL (gcall *)
182 gcc_unreachable ();
185 /* This should get expanded in the sanopt pass. */
187 static void
188 expand_UBSAN_BOUNDS (gcall *)
190 gcc_unreachable ();
193 /* This should get expanded in the sanopt pass. */
195 static void
196 expand_UBSAN_VPTR (gcall *)
198 gcc_unreachable ();
201 /* This should get expanded in the sanopt pass. */
203 static void
204 expand_UBSAN_OBJECT_SIZE (gcall *)
206 gcc_unreachable ();
209 /* This should get expanded in the sanopt pass. */
211 static void
212 expand_ASAN_CHECK (gcall *)
214 gcc_unreachable ();
217 /* This should get expanded in the tsan pass. */
219 static void
220 expand_TSAN_FUNC_EXIT (gcall *)
222 gcc_unreachable ();
225 /* Helper function for expand_addsub_overflow. Return 1
226 if ARG interpreted as signed in its precision is known to be always
227 positive or 2 if ARG is known to be always negative, or 3 if ARG may
228 be positive or negative. */
230 static int
231 get_range_pos_neg (tree arg)
233 if (arg == error_mark_node)
234 return 3;
236 int prec = TYPE_PRECISION (TREE_TYPE (arg));
237 int cnt = 0;
238 if (TREE_CODE (arg) == INTEGER_CST)
240 wide_int w = wi::sext (arg, prec);
241 if (wi::neg_p (w))
242 return 2;
243 else
244 return 1;
246 while (CONVERT_EXPR_P (arg)
247 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
248 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg, 0))) <= prec)
250 arg = TREE_OPERAND (arg, 0);
251 /* Narrower value zero extended into wider type
252 will always result in positive values. */
253 if (TYPE_UNSIGNED (TREE_TYPE (arg))
254 && TYPE_PRECISION (TREE_TYPE (arg)) < prec)
255 return 1;
256 prec = TYPE_PRECISION (TREE_TYPE (arg));
257 if (++cnt > 30)
258 return 3;
261 if (TREE_CODE (arg) != SSA_NAME)
262 return 3;
263 wide_int arg_min, arg_max;
264 while (get_range_info (arg, &arg_min, &arg_max) != VR_RANGE)
266 gimple g = SSA_NAME_DEF_STMT (arg);
267 if (is_gimple_assign (g)
268 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g)))
270 tree t = gimple_assign_rhs1 (g);
271 if (INTEGRAL_TYPE_P (TREE_TYPE (t))
272 && TYPE_PRECISION (TREE_TYPE (t)) <= prec)
274 if (TYPE_UNSIGNED (TREE_TYPE (t))
275 && TYPE_PRECISION (TREE_TYPE (t)) < prec)
276 return 1;
277 prec = TYPE_PRECISION (TREE_TYPE (t));
278 arg = t;
279 if (++cnt > 30)
280 return 3;
281 continue;
284 return 3;
286 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
288 /* For unsigned values, the "positive" range comes
289 below the "negative" range. */
290 if (!wi::neg_p (wi::sext (arg_max, prec), SIGNED))
291 return 1;
292 if (wi::neg_p (wi::sext (arg_min, prec), SIGNED))
293 return 2;
295 else
297 if (!wi::neg_p (wi::sext (arg_min, prec), SIGNED))
298 return 1;
299 if (wi::neg_p (wi::sext (arg_max, prec), SIGNED))
300 return 2;
302 return 3;
305 /* Return minimum precision needed to represent all values
306 of ARG in SIGNed integral type. */
308 static int
309 get_min_precision (tree arg, signop sign)
311 int prec = TYPE_PRECISION (TREE_TYPE (arg));
312 int cnt = 0;
313 signop orig_sign = sign;
314 if (TREE_CODE (arg) == INTEGER_CST)
316 int p;
317 if (TYPE_SIGN (TREE_TYPE (arg)) != sign)
319 widest_int w = wi::to_widest (arg);
320 w = wi::ext (w, prec, sign);
321 p = wi::min_precision (w, sign);
323 else
324 p = wi::min_precision (arg, sign);
325 return MIN (p, prec);
327 while (CONVERT_EXPR_P (arg)
328 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
329 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg, 0))) <= prec)
331 arg = TREE_OPERAND (arg, 0);
332 if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
334 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
335 sign = UNSIGNED;
336 else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
337 return prec + (orig_sign != sign);
338 prec = TYPE_PRECISION (TREE_TYPE (arg));
340 if (++cnt > 30)
341 return prec + (orig_sign != sign);
343 if (TREE_CODE (arg) != SSA_NAME)
344 return prec + (orig_sign != sign);
345 wide_int arg_min, arg_max;
346 while (get_range_info (arg, &arg_min, &arg_max) != VR_RANGE)
348 gimple g = SSA_NAME_DEF_STMT (arg);
349 if (is_gimple_assign (g)
350 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g)))
352 tree t = gimple_assign_rhs1 (g);
353 if (INTEGRAL_TYPE_P (TREE_TYPE (t))
354 && TYPE_PRECISION (TREE_TYPE (t)) <= prec)
356 arg = t;
357 if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
359 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
360 sign = UNSIGNED;
361 else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
362 return prec + (orig_sign != sign);
363 prec = TYPE_PRECISION (TREE_TYPE (arg));
365 if (++cnt > 30)
366 return prec + (orig_sign != sign);
367 continue;
370 return prec + (orig_sign != sign);
372 if (sign == TYPE_SIGN (TREE_TYPE (arg)))
374 int p1 = wi::min_precision (arg_min, sign);
375 int p2 = wi::min_precision (arg_max, sign);
376 p1 = MAX (p1, p2);
377 prec = MIN (prec, p1);
379 else if (sign == UNSIGNED && !wi::neg_p (arg_min, SIGNED))
381 int p = wi::min_precision (arg_max, UNSIGNED);
382 prec = MIN (prec, p);
384 return prec + (orig_sign != sign);
387 /* Helper for expand_*_overflow. Store RES into the __real__ part
388 of TARGET. If RES has larger MODE than __real__ part of TARGET,
389 set the __imag__ part to 1 if RES doesn't fit into it. */
391 static void
392 expand_arith_overflow_result_store (tree lhs, rtx target,
393 machine_mode mode, rtx res)
395 machine_mode tgtmode = GET_MODE_INNER (GET_MODE (target));
396 rtx lres = res;
397 if (tgtmode != mode)
399 rtx_code_label *done_label = gen_label_rtx ();
400 int uns = TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs)));
401 lres = convert_modes (tgtmode, mode, res, uns);
402 gcc_assert (GET_MODE_PRECISION (tgtmode) < GET_MODE_PRECISION (mode));
403 do_compare_rtx_and_jump (res, convert_modes (mode, tgtmode, lres, uns),
404 EQ, true, mode, NULL_RTX, NULL, done_label,
405 PROB_VERY_LIKELY);
406 write_complex_part (target, const1_rtx, true);
407 emit_label (done_label);
409 write_complex_part (target, lres, false);
412 /* Helper for expand_*_overflow. Store RES into TARGET. */
414 static void
415 expand_ubsan_result_store (rtx target, rtx res)
417 if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target))
418 /* If this is a scalar in a register that is stored in a wider mode
419 than the declared mode, compute the result into its declared mode
420 and then convert to the wider mode. Our value is the computed
421 expression. */
422 convert_move (SUBREG_REG (target), res, SUBREG_PROMOTED_SIGN (target));
423 else
424 emit_move_insn (target, res);
427 /* Add sub/add overflow checking to the statement STMT.
428 CODE says whether the operation is +, or -. */
430 static void
431 expand_addsub_overflow (location_t loc, tree_code code, tree lhs,
432 tree arg0, tree arg1, bool unsr_p, bool uns0_p,
433 bool uns1_p, bool is_ubsan)
435 rtx res, target = NULL_RTX;
436 tree fn;
437 rtx_code_label *done_label = gen_label_rtx ();
438 rtx_code_label *do_error = gen_label_rtx ();
439 do_pending_stack_adjust ();
440 rtx op0 = expand_normal (arg0);
441 rtx op1 = expand_normal (arg1);
442 machine_mode mode = TYPE_MODE (TREE_TYPE (arg0));
443 int prec = GET_MODE_PRECISION (mode);
444 rtx sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
445 bool do_xor = false;
447 if (is_ubsan)
448 gcc_assert (!unsr_p && !uns0_p && !uns1_p);
450 if (lhs)
452 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
453 if (!is_ubsan)
454 write_complex_part (target, const0_rtx, true);
457 /* We assume both operands and result have the same precision
458 here (GET_MODE_BITSIZE (mode)), S stands for signed type
459 with that precision, U for unsigned type with that precision,
460 sgn for unsigned most significant bit in that precision.
461 s1 is signed first operand, u1 is unsigned first operand,
462 s2 is signed second operand, u2 is unsigned second operand,
463 sr is signed result, ur is unsigned result and the following
464 rules say how to compute result (which is always result of
465 the operands as if both were unsigned, cast to the right
466 signedness) and how to compute whether operation overflowed.
468 s1 + s2 -> sr
469 res = (S) ((U) s1 + (U) s2)
470 ovf = s2 < 0 ? res > s1 : res < s1 (or jump on overflow)
471 s1 - s2 -> sr
472 res = (S) ((U) s1 - (U) s2)
473 ovf = s2 < 0 ? res < s1 : res > s2 (or jump on overflow)
474 u1 + u2 -> ur
475 res = u1 + u2
476 ovf = res < u1 (or jump on carry, but RTL opts will handle it)
477 u1 - u2 -> ur
478 res = u1 - u2
479 ovf = res > u1 (or jump on carry, but RTL opts will handle it)
480 s1 + u2 -> sr
481 res = (S) ((U) s1 + u2)
482 ovf = ((U) res ^ sgn) < u2
483 s1 + u2 -> ur
484 t1 = (S) (u2 ^ sgn)
485 t2 = s1 + t1
486 res = (U) t2 ^ sgn
487 ovf = t1 < 0 ? t2 > s1 : t2 < s1 (or jump on overflow)
488 s1 - u2 -> sr
489 res = (S) ((U) s1 - u2)
490 ovf = u2 > ((U) s1 ^ sgn)
491 s1 - u2 -> ur
492 res = (U) s1 - u2
493 ovf = s1 < 0 || u2 > (U) s1
494 u1 - s2 -> sr
495 res = u1 - (U) s2
496 ovf = u1 >= ((U) s2 ^ sgn)
497 u1 - s2 -> ur
498 t1 = u1 ^ sgn
499 t2 = t1 - (U) s2
500 res = t2 ^ sgn
501 ovf = s2 < 0 ? (S) t2 < (S) t1 : (S) t2 > (S) t1 (or jump on overflow)
502 s1 + s2 -> ur
503 res = (U) s1 + (U) s2
504 ovf = s2 < 0 ? (s1 | (S) res) < 0) : (s1 & (S) res) < 0)
505 u1 + u2 -> sr
506 res = (S) (u1 + u2)
507 ovf = (U) res < u2 || res < 0
508 u1 - u2 -> sr
509 res = (S) (u1 - u2)
510 ovf = u1 >= u2 ? res < 0 : res >= 0
511 s1 - s2 -> ur
512 res = (U) s1 - (U) s2
513 ovf = s2 >= 0 ? ((s1 | (S) res) < 0) : ((s1 & (S) res) < 0) */
515 if (code == PLUS_EXPR && uns0_p && !uns1_p)
517 /* PLUS_EXPR is commutative, if operand signedness differs,
518 canonicalize to the first operand being signed and second
519 unsigned to simplify following code. */
520 std::swap (op0, op1);
521 std::swap (arg0, arg1);
522 uns0_p = false;
523 uns1_p = true;
526 /* u1 +- u2 -> ur */
527 if (uns0_p && uns1_p && unsr_p)
529 /* Compute the operation. On RTL level, the addition is always
530 unsigned. */
531 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
532 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
533 rtx tem = op0;
534 /* For PLUS_EXPR, the operation is commutative, so we can pick
535 operand to compare against. For prec <= BITS_PER_WORD, I think
536 preferring REG operand is better over CONST_INT, because
537 the CONST_INT might enlarge the instruction or CSE would need
538 to figure out we'd already loaded it into a register before.
539 For prec > BITS_PER_WORD, I think CONST_INT might be more beneficial,
540 as then the multi-word comparison can be perhaps simplified. */
541 if (code == PLUS_EXPR
542 && (prec <= BITS_PER_WORD
543 ? (CONST_SCALAR_INT_P (op0) && REG_P (op1))
544 : CONST_SCALAR_INT_P (op1)))
545 tem = op1;
546 do_compare_rtx_and_jump (res, tem, code == PLUS_EXPR ? GEU : LEU,
547 true, mode, NULL_RTX, NULL, done_label,
548 PROB_VERY_LIKELY);
549 goto do_error_label;
552 /* s1 +- u2 -> sr */
553 if (!uns0_p && uns1_p && !unsr_p)
555 /* Compute the operation. On RTL level, the addition is always
556 unsigned. */
557 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
558 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
559 rtx tem = expand_binop (mode, add_optab,
560 code == PLUS_EXPR ? res : op0, sgn,
561 NULL_RTX, false, OPTAB_LIB_WIDEN);
562 do_compare_rtx_and_jump (tem, op1, GEU, true, mode, NULL_RTX, NULL,
563 done_label, PROB_VERY_LIKELY);
564 goto do_error_label;
567 /* s1 + u2 -> ur */
568 if (code == PLUS_EXPR && !uns0_p && uns1_p && unsr_p)
570 op1 = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
571 OPTAB_LIB_WIDEN);
572 /* As we've changed op1, we have to avoid using the value range
573 for the original argument. */
574 arg1 = error_mark_node;
575 do_xor = true;
576 goto do_signed;
579 /* u1 - s2 -> ur */
580 if (code == MINUS_EXPR && uns0_p && !uns1_p && unsr_p)
582 op0 = expand_binop (mode, add_optab, op0, sgn, NULL_RTX, false,
583 OPTAB_LIB_WIDEN);
584 /* As we've changed op0, we have to avoid using the value range
585 for the original argument. */
586 arg0 = error_mark_node;
587 do_xor = true;
588 goto do_signed;
591 /* s1 - u2 -> ur */
592 if (code == MINUS_EXPR && !uns0_p && uns1_p && unsr_p)
594 /* Compute the operation. On RTL level, the addition is always
595 unsigned. */
596 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
597 OPTAB_LIB_WIDEN);
598 int pos_neg = get_range_pos_neg (arg0);
599 if (pos_neg == 2)
600 /* If ARG0 is known to be always negative, this is always overflow. */
601 emit_jump (do_error);
602 else if (pos_neg == 3)
603 /* If ARG0 is not known to be always positive, check at runtime. */
604 do_compare_rtx_and_jump (op0, const0_rtx, LT, false, mode, NULL_RTX,
605 NULL, do_error, PROB_VERY_UNLIKELY);
606 do_compare_rtx_and_jump (op1, op0, LEU, true, mode, NULL_RTX, NULL,
607 done_label, PROB_VERY_LIKELY);
608 goto do_error_label;
611 /* u1 - s2 -> sr */
612 if (code == MINUS_EXPR && uns0_p && !uns1_p && !unsr_p)
614 /* Compute the operation. On RTL level, the addition is always
615 unsigned. */
616 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
617 OPTAB_LIB_WIDEN);
618 rtx tem = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
619 OPTAB_LIB_WIDEN);
620 do_compare_rtx_and_jump (op0, tem, LTU, true, mode, NULL_RTX, NULL,
621 done_label, PROB_VERY_LIKELY);
622 goto do_error_label;
625 /* u1 + u2 -> sr */
626 if (code == PLUS_EXPR && uns0_p && uns1_p && !unsr_p)
628 /* Compute the operation. On RTL level, the addition is always
629 unsigned. */
630 res = expand_binop (mode, add_optab, op0, op1, NULL_RTX, false,
631 OPTAB_LIB_WIDEN);
632 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode, NULL_RTX,
633 NULL, do_error, PROB_VERY_UNLIKELY);
634 rtx tem = op1;
635 /* The operation is commutative, so we can pick operand to compare
636 against. For prec <= BITS_PER_WORD, I think preferring REG operand
637 is better over CONST_INT, because the CONST_INT might enlarge the
638 instruction or CSE would need to figure out we'd already loaded it
639 into a register before. For prec > BITS_PER_WORD, I think CONST_INT
640 might be more beneficial, as then the multi-word comparison can be
641 perhaps simplified. */
642 if (prec <= BITS_PER_WORD
643 ? (CONST_SCALAR_INT_P (op1) && REG_P (op0))
644 : CONST_SCALAR_INT_P (op0))
645 tem = op0;
646 do_compare_rtx_and_jump (res, tem, GEU, true, mode, NULL_RTX, NULL,
647 done_label, PROB_VERY_LIKELY);
648 goto do_error_label;
651 /* s1 +- s2 -> ur */
652 if (!uns0_p && !uns1_p && unsr_p)
654 /* Compute the operation. On RTL level, the addition is always
655 unsigned. */
656 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
657 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
658 int pos_neg = get_range_pos_neg (arg1);
659 if (code == PLUS_EXPR)
661 int pos_neg0 = get_range_pos_neg (arg0);
662 if (pos_neg0 != 3 && pos_neg == 3)
664 std::swap (op0, op1);
665 pos_neg = pos_neg0;
668 rtx tem;
669 if (pos_neg != 3)
671 tem = expand_binop (mode, ((pos_neg == 1) ^ (code == MINUS_EXPR))
672 ? and_optab : ior_optab,
673 op0, res, NULL_RTX, false, OPTAB_LIB_WIDEN);
674 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL,
675 NULL, done_label, PROB_VERY_LIKELY);
677 else
679 rtx_code_label *do_ior_label = gen_label_rtx ();
680 do_compare_rtx_and_jump (op1, const0_rtx,
681 code == MINUS_EXPR ? GE : LT, false, mode,
682 NULL_RTX, NULL, do_ior_label,
683 PROB_EVEN);
684 tem = expand_binop (mode, and_optab, op0, res, NULL_RTX, false,
685 OPTAB_LIB_WIDEN);
686 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
687 NULL, done_label, PROB_VERY_LIKELY);
688 emit_jump (do_error);
689 emit_label (do_ior_label);
690 tem = expand_binop (mode, ior_optab, op0, res, NULL_RTX, false,
691 OPTAB_LIB_WIDEN);
692 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
693 NULL, done_label, PROB_VERY_LIKELY);
695 goto do_error_label;
698 /* u1 - u2 -> sr */
699 if (code == MINUS_EXPR && uns0_p && uns1_p && !unsr_p)
701 /* Compute the operation. On RTL level, the addition is always
702 unsigned. */
703 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
704 OPTAB_LIB_WIDEN);
705 rtx_code_label *op0_geu_op1 = gen_label_rtx ();
706 do_compare_rtx_and_jump (op0, op1, GEU, true, mode, NULL_RTX, NULL,
707 op0_geu_op1, PROB_EVEN);
708 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode, NULL_RTX,
709 NULL, done_label, PROB_VERY_LIKELY);
710 emit_jump (do_error);
711 emit_label (op0_geu_op1);
712 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode, NULL_RTX,
713 NULL, done_label, PROB_VERY_LIKELY);
714 goto do_error_label;
717 gcc_assert (!uns0_p && !uns1_p && !unsr_p);
719 /* s1 +- s2 -> sr */
720 do_signed: ;
721 enum insn_code icode;
722 icode = optab_handler (code == PLUS_EXPR ? addv4_optab : subv4_optab, mode);
723 if (icode != CODE_FOR_nothing)
725 struct expand_operand ops[4];
726 rtx_insn *last = get_last_insn ();
728 res = gen_reg_rtx (mode);
729 create_output_operand (&ops[0], res, mode);
730 create_input_operand (&ops[1], op0, mode);
731 create_input_operand (&ops[2], op1, mode);
732 create_fixed_operand (&ops[3], do_error);
733 if (maybe_expand_insn (icode, 4, ops))
735 last = get_last_insn ();
736 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
737 && JUMP_P (last)
738 && any_condjump_p (last)
739 && !find_reg_note (last, REG_BR_PROB, 0))
740 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
741 emit_jump (done_label);
743 else
745 delete_insns_since (last);
746 icode = CODE_FOR_nothing;
750 if (icode == CODE_FOR_nothing)
752 rtx_code_label *sub_check = gen_label_rtx ();
753 int pos_neg = 3;
755 /* Compute the operation. On RTL level, the addition is always
756 unsigned. */
757 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
758 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
760 /* If we can prove one of the arguments (for MINUS_EXPR only
761 the second operand, as subtraction is not commutative) is always
762 non-negative or always negative, we can do just one comparison
763 and conditional jump instead of 2 at runtime, 3 present in the
764 emitted code. If one of the arguments is CONST_INT, all we
765 need is to make sure it is op1, then the first
766 do_compare_rtx_and_jump will be just folded. Otherwise try
767 to use range info if available. */
768 if (code == PLUS_EXPR && CONST_INT_P (op0))
769 std::swap (op0, op1);
770 else if (CONST_INT_P (op1))
772 else if (code == PLUS_EXPR && TREE_CODE (arg0) == SSA_NAME)
774 pos_neg = get_range_pos_neg (arg0);
775 if (pos_neg != 3)
776 std::swap (op0, op1);
778 if (pos_neg == 3 && !CONST_INT_P (op1) && TREE_CODE (arg1) == SSA_NAME)
779 pos_neg = get_range_pos_neg (arg1);
781 /* If the op1 is negative, we have to use a different check. */
782 if (pos_neg == 3)
783 do_compare_rtx_and_jump (op1, const0_rtx, LT, false, mode, NULL_RTX,
784 NULL, sub_check, PROB_EVEN);
786 /* Compare the result of the operation with one of the operands. */
787 if (pos_neg & 1)
788 do_compare_rtx_and_jump (res, op0, code == PLUS_EXPR ? GE : LE,
789 false, mode, NULL_RTX, NULL, done_label,
790 PROB_VERY_LIKELY);
792 /* If we get here, we have to print the error. */
793 if (pos_neg == 3)
795 emit_jump (do_error);
797 emit_label (sub_check);
800 /* We have k = a + b for b < 0 here. k <= a must hold. */
801 if (pos_neg & 2)
802 do_compare_rtx_and_jump (res, op0, code == PLUS_EXPR ? LE : GE,
803 false, mode, NULL_RTX, NULL, done_label,
804 PROB_VERY_LIKELY);
807 do_error_label:
808 emit_label (do_error);
809 if (is_ubsan)
811 /* Expand the ubsan builtin call. */
812 push_temp_slots ();
813 fn = ubsan_build_overflow_builtin (code, loc, TREE_TYPE (arg0),
814 arg0, arg1);
815 expand_normal (fn);
816 pop_temp_slots ();
817 do_pending_stack_adjust ();
819 else if (lhs)
820 write_complex_part (target, const1_rtx, true);
822 /* We're done. */
823 emit_label (done_label);
825 if (lhs)
827 if (is_ubsan)
828 expand_ubsan_result_store (target, res);
829 else
831 if (do_xor)
832 res = expand_binop (mode, add_optab, res, sgn, NULL_RTX, false,
833 OPTAB_LIB_WIDEN);
835 expand_arith_overflow_result_store (lhs, target, mode, res);
840 /* Add negate overflow checking to the statement STMT. */
842 static void
843 expand_neg_overflow (location_t loc, tree lhs, tree arg1, bool is_ubsan)
845 rtx res, op1;
846 tree fn;
847 rtx_code_label *done_label, *do_error;
848 rtx target = NULL_RTX;
850 done_label = gen_label_rtx ();
851 do_error = gen_label_rtx ();
853 do_pending_stack_adjust ();
854 op1 = expand_normal (arg1);
856 machine_mode mode = TYPE_MODE (TREE_TYPE (arg1));
857 if (lhs)
859 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
860 if (!is_ubsan)
861 write_complex_part (target, const0_rtx, true);
864 enum insn_code icode = optab_handler (negv3_optab, mode);
865 if (icode != CODE_FOR_nothing)
867 struct expand_operand ops[3];
868 rtx_insn *last = get_last_insn ();
870 res = gen_reg_rtx (mode);
871 create_output_operand (&ops[0], res, mode);
872 create_input_operand (&ops[1], op1, mode);
873 create_fixed_operand (&ops[2], do_error);
874 if (maybe_expand_insn (icode, 3, ops))
876 last = get_last_insn ();
877 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
878 && JUMP_P (last)
879 && any_condjump_p (last)
880 && !find_reg_note (last, REG_BR_PROB, 0))
881 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
882 emit_jump (done_label);
884 else
886 delete_insns_since (last);
887 icode = CODE_FOR_nothing;
891 if (icode == CODE_FOR_nothing)
893 /* Compute the operation. On RTL level, the addition is always
894 unsigned. */
895 res = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
897 /* Compare the operand with the most negative value. */
898 rtx minv = expand_normal (TYPE_MIN_VALUE (TREE_TYPE (arg1)));
899 do_compare_rtx_and_jump (op1, minv, NE, true, mode, NULL_RTX, NULL,
900 done_label, PROB_VERY_LIKELY);
903 emit_label (do_error);
904 if (is_ubsan)
906 /* Expand the ubsan builtin call. */
907 push_temp_slots ();
908 fn = ubsan_build_overflow_builtin (NEGATE_EXPR, loc, TREE_TYPE (arg1),
909 arg1, NULL_TREE);
910 expand_normal (fn);
911 pop_temp_slots ();
912 do_pending_stack_adjust ();
914 else if (lhs)
915 write_complex_part (target, const1_rtx, true);
917 /* We're done. */
918 emit_label (done_label);
920 if (lhs)
922 if (is_ubsan)
923 expand_ubsan_result_store (target, res);
924 else
925 expand_arith_overflow_result_store (lhs, target, mode, res);
929 /* Add mul overflow checking to the statement STMT. */
931 static void
932 expand_mul_overflow (location_t loc, tree lhs, tree arg0, tree arg1,
933 bool unsr_p, bool uns0_p, bool uns1_p, bool is_ubsan)
935 rtx res, op0, op1;
936 tree fn, type;
937 rtx_code_label *done_label, *do_error;
938 rtx target = NULL_RTX;
939 signop sign;
940 enum insn_code icode;
942 done_label = gen_label_rtx ();
943 do_error = gen_label_rtx ();
945 do_pending_stack_adjust ();
946 op0 = expand_normal (arg0);
947 op1 = expand_normal (arg1);
949 machine_mode mode = TYPE_MODE (TREE_TYPE (arg0));
950 bool uns = unsr_p;
951 if (lhs)
953 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
954 if (!is_ubsan)
955 write_complex_part (target, const0_rtx, true);
958 if (is_ubsan)
959 gcc_assert (!unsr_p && !uns0_p && !uns1_p);
961 /* We assume both operands and result have the same precision
962 here (GET_MODE_BITSIZE (mode)), S stands for signed type
963 with that precision, U for unsigned type with that precision,
964 sgn for unsigned most significant bit in that precision.
965 s1 is signed first operand, u1 is unsigned first operand,
966 s2 is signed second operand, u2 is unsigned second operand,
967 sr is signed result, ur is unsigned result and the following
968 rules say how to compute result (which is always result of
969 the operands as if both were unsigned, cast to the right
970 signedness) and how to compute whether operation overflowed.
971 main_ovf (false) stands for jump on signed multiplication
972 overflow or the main algorithm with uns == false.
973 main_ovf (true) stands for jump on unsigned multiplication
974 overflow or the main algorithm with uns == true.
976 s1 * s2 -> sr
977 res = (S) ((U) s1 * (U) s2)
978 ovf = main_ovf (false)
979 u1 * u2 -> ur
980 res = u1 * u2
981 ovf = main_ovf (true)
982 s1 * u2 -> ur
983 res = (U) s1 * u2
984 ovf = (s1 < 0 && u2) || main_ovf (true)
985 u1 * u2 -> sr
986 res = (S) (u1 * u2)
987 ovf = res < 0 || main_ovf (true)
988 s1 * u2 -> sr
989 res = (S) ((U) s1 * u2)
990 ovf = (S) u2 >= 0 ? main_ovf (false)
991 : (s1 != 0 && (s1 != -1 || u2 != (U) res))
992 s1 * s2 -> ur
993 t1 = (s1 & s2) < 0 ? (-(U) s1) : ((U) s1)
994 t2 = (s1 & s2) < 0 ? (-(U) s2) : ((U) s2)
995 res = t1 * t2
996 ovf = (s1 ^ s2) < 0 ? (s1 && s2) : main_ovf (true) */
998 if (uns0_p && !uns1_p)
1000 /* Multiplication is commutative, if operand signedness differs,
1001 canonicalize to the first operand being signed and second
1002 unsigned to simplify following code. */
1003 std::swap (op0, op1);
1004 std::swap (arg0, arg1);
1005 uns0_p = false;
1006 uns1_p = true;
1009 int pos_neg0 = get_range_pos_neg (arg0);
1010 int pos_neg1 = get_range_pos_neg (arg1);
1012 /* s1 * u2 -> ur */
1013 if (!uns0_p && uns1_p && unsr_p)
1015 switch (pos_neg0)
1017 case 1:
1018 /* If s1 is non-negative, just perform normal u1 * u2 -> ur. */
1019 goto do_main;
1020 case 2:
1021 /* If s1 is negative, avoid the main code, just multiply and
1022 signal overflow if op1 is not 0. */
1023 struct separate_ops ops;
1024 ops.code = MULT_EXPR;
1025 ops.type = TREE_TYPE (arg1);
1026 ops.op0 = make_tree (ops.type, op0);
1027 ops.op1 = make_tree (ops.type, op1);
1028 ops.op2 = NULL_TREE;
1029 ops.location = loc;
1030 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1031 do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1032 NULL, done_label, PROB_VERY_LIKELY);
1033 goto do_error_label;
1034 case 3:
1035 rtx_code_label *do_main_label;
1036 do_main_label = gen_label_rtx ();
1037 do_compare_rtx_and_jump (op0, const0_rtx, GE, false, mode, NULL_RTX,
1038 NULL, do_main_label, PROB_VERY_LIKELY);
1039 do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1040 NULL, do_main_label, PROB_VERY_LIKELY);
1041 write_complex_part (target, const1_rtx, true);
1042 emit_label (do_main_label);
1043 goto do_main;
1044 default:
1045 gcc_unreachable ();
1049 /* u1 * u2 -> sr */
1050 if (uns0_p && uns1_p && !unsr_p)
1052 uns = true;
1053 /* Rest of handling of this case after res is computed. */
1054 goto do_main;
1057 /* s1 * u2 -> sr */
1058 if (!uns0_p && uns1_p && !unsr_p)
1060 switch (pos_neg1)
1062 case 1:
1063 goto do_main;
1064 case 2:
1065 /* If (S) u2 is negative (i.e. u2 is larger than maximum of S,
1066 avoid the main code, just multiply and signal overflow
1067 unless 0 * u2 or -1 * ((U) Smin). */
1068 struct separate_ops ops;
1069 ops.code = MULT_EXPR;
1070 ops.type = TREE_TYPE (arg1);
1071 ops.op0 = make_tree (ops.type, op0);
1072 ops.op1 = make_tree (ops.type, op1);
1073 ops.op2 = NULL_TREE;
1074 ops.location = loc;
1075 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1076 do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
1077 NULL, done_label, PROB_VERY_LIKELY);
1078 do_compare_rtx_and_jump (op0, constm1_rtx, NE, true, mode, NULL_RTX,
1079 NULL, do_error, PROB_VERY_UNLIKELY);
1080 int prec;
1081 prec = GET_MODE_PRECISION (mode);
1082 rtx sgn;
1083 sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
1084 do_compare_rtx_and_jump (op1, sgn, EQ, true, mode, NULL_RTX,
1085 NULL, done_label, PROB_VERY_LIKELY);
1086 goto do_error_label;
1087 case 3:
1088 /* Rest of handling of this case after res is computed. */
1089 goto do_main;
1090 default:
1091 gcc_unreachable ();
1095 /* s1 * s2 -> ur */
1096 if (!uns0_p && !uns1_p && unsr_p)
1098 rtx tem, tem2;
1099 switch (pos_neg0 | pos_neg1)
1101 case 1: /* Both operands known to be non-negative. */
1102 goto do_main;
1103 case 2: /* Both operands known to be negative. */
1104 op0 = expand_unop (mode, neg_optab, op0, NULL_RTX, false);
1105 op1 = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
1106 /* Avoid looking at arg0/arg1 ranges, as we've changed
1107 the arguments. */
1108 arg0 = error_mark_node;
1109 arg1 = error_mark_node;
1110 goto do_main;
1111 case 3:
1112 if ((pos_neg0 ^ pos_neg1) == 3)
1114 /* If one operand is known to be negative and the other
1115 non-negative, this overflows always, unless the non-negative
1116 one is 0. Just do normal multiply and set overflow
1117 unless one of the operands is 0. */
1118 struct separate_ops ops;
1119 ops.code = MULT_EXPR;
1120 ops.type
1121 = build_nonstandard_integer_type (GET_MODE_PRECISION (mode),
1123 ops.op0 = make_tree (ops.type, op0);
1124 ops.op1 = make_tree (ops.type, op1);
1125 ops.op2 = NULL_TREE;
1126 ops.location = loc;
1127 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1128 tem = expand_binop (mode, and_optab, op0, op1, NULL_RTX, false,
1129 OPTAB_LIB_WIDEN);
1130 do_compare_rtx_and_jump (tem, const0_rtx, EQ, true, mode,
1131 NULL_RTX, NULL, done_label,
1132 PROB_VERY_LIKELY);
1133 goto do_error_label;
1135 /* The general case, do all the needed comparisons at runtime. */
1136 rtx_code_label *do_main_label, *after_negate_label;
1137 rtx rop0, rop1;
1138 rop0 = gen_reg_rtx (mode);
1139 rop1 = gen_reg_rtx (mode);
1140 emit_move_insn (rop0, op0);
1141 emit_move_insn (rop1, op1);
1142 op0 = rop0;
1143 op1 = rop1;
1144 do_main_label = gen_label_rtx ();
1145 after_negate_label = gen_label_rtx ();
1146 tem = expand_binop (mode, and_optab, op0, op1, NULL_RTX, false,
1147 OPTAB_LIB_WIDEN);
1148 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
1149 NULL, after_negate_label, PROB_VERY_LIKELY);
1150 /* Both arguments negative here, negate them and continue with
1151 normal unsigned overflow checking multiplication. */
1152 emit_move_insn (op0, expand_unop (mode, neg_optab, op0,
1153 NULL_RTX, false));
1154 emit_move_insn (op1, expand_unop (mode, neg_optab, op1,
1155 NULL_RTX, false));
1156 /* Avoid looking at arg0/arg1 ranges, as we might have changed
1157 the arguments. */
1158 arg0 = error_mark_node;
1159 arg1 = error_mark_node;
1160 emit_jump (do_main_label);
1161 emit_label (after_negate_label);
1162 tem2 = expand_binop (mode, xor_optab, op0, op1, NULL_RTX, false,
1163 OPTAB_LIB_WIDEN);
1164 do_compare_rtx_and_jump (tem2, const0_rtx, GE, false, mode, NULL_RTX,
1165 NULL, do_main_label, PROB_VERY_LIKELY);
1166 /* One argument is negative here, the other positive. This
1167 overflows always, unless one of the arguments is 0. But
1168 if e.g. s2 is 0, (U) s1 * 0 doesn't overflow, whatever s1
1169 is, thus we can keep do_main code oring in overflow as is. */
1170 do_compare_rtx_and_jump (tem, const0_rtx, EQ, true, mode, NULL_RTX,
1171 NULL, do_main_label, PROB_VERY_LIKELY);
1172 write_complex_part (target, const1_rtx, true);
1173 emit_label (do_main_label);
1174 goto do_main;
1175 default:
1176 gcc_unreachable ();
1180 do_main:
1181 type = build_nonstandard_integer_type (GET_MODE_PRECISION (mode), uns);
1182 sign = uns ? UNSIGNED : SIGNED;
1183 icode = optab_handler (uns ? umulv4_optab : mulv4_optab, mode);
1184 if (icode != CODE_FOR_nothing)
1186 struct expand_operand ops[4];
1187 rtx_insn *last = get_last_insn ();
1189 res = gen_reg_rtx (mode);
1190 create_output_operand (&ops[0], res, mode);
1191 create_input_operand (&ops[1], op0, mode);
1192 create_input_operand (&ops[2], op1, mode);
1193 create_fixed_operand (&ops[3], do_error);
1194 if (maybe_expand_insn (icode, 4, ops))
1196 last = get_last_insn ();
1197 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
1198 && JUMP_P (last)
1199 && any_condjump_p (last)
1200 && !find_reg_note (last, REG_BR_PROB, 0))
1201 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
1202 emit_jump (done_label);
1204 else
1206 delete_insns_since (last);
1207 icode = CODE_FOR_nothing;
1211 if (icode == CODE_FOR_nothing)
1213 struct separate_ops ops;
1214 int prec = GET_MODE_PRECISION (mode);
1215 machine_mode hmode = mode_for_size (prec / 2, MODE_INT, 1);
1216 ops.op0 = make_tree (type, op0);
1217 ops.op1 = make_tree (type, op1);
1218 ops.op2 = NULL_TREE;
1219 ops.location = loc;
1220 if (GET_MODE_2XWIDER_MODE (mode) != VOIDmode
1221 && targetm.scalar_mode_supported_p (GET_MODE_2XWIDER_MODE (mode)))
1223 machine_mode wmode = GET_MODE_2XWIDER_MODE (mode);
1224 ops.code = WIDEN_MULT_EXPR;
1225 ops.type
1226 = build_nonstandard_integer_type (GET_MODE_PRECISION (wmode), uns);
1228 res = expand_expr_real_2 (&ops, NULL_RTX, wmode, EXPAND_NORMAL);
1229 rtx hipart = expand_shift (RSHIFT_EXPR, wmode, res, prec,
1230 NULL_RTX, uns);
1231 hipart = gen_lowpart (mode, hipart);
1232 res = gen_lowpart (mode, res);
1233 if (uns)
1234 /* For the unsigned multiplication, there was overflow if
1235 HIPART is non-zero. */
1236 do_compare_rtx_and_jump (hipart, const0_rtx, EQ, true, mode,
1237 NULL_RTX, NULL, done_label,
1238 PROB_VERY_LIKELY);
1239 else
1241 rtx signbit = expand_shift (RSHIFT_EXPR, mode, res, prec - 1,
1242 NULL_RTX, 0);
1243 /* RES is low half of the double width result, HIPART
1244 the high half. There was overflow if
1245 HIPART is different from RES < 0 ? -1 : 0. */
1246 do_compare_rtx_and_jump (signbit, hipart, EQ, true, mode,
1247 NULL_RTX, NULL, done_label,
1248 PROB_VERY_LIKELY);
1251 else if (hmode != BLKmode && 2 * GET_MODE_PRECISION (hmode) == prec)
1253 rtx_code_label *large_op0 = gen_label_rtx ();
1254 rtx_code_label *small_op0_large_op1 = gen_label_rtx ();
1255 rtx_code_label *one_small_one_large = gen_label_rtx ();
1256 rtx_code_label *both_ops_large = gen_label_rtx ();
1257 rtx_code_label *after_hipart_neg = uns ? NULL : gen_label_rtx ();
1258 rtx_code_label *after_lopart_neg = uns ? NULL : gen_label_rtx ();
1259 rtx_code_label *do_overflow = gen_label_rtx ();
1260 rtx_code_label *hipart_different = uns ? NULL : gen_label_rtx ();
1262 unsigned int hprec = GET_MODE_PRECISION (hmode);
1263 rtx hipart0 = expand_shift (RSHIFT_EXPR, mode, op0, hprec,
1264 NULL_RTX, uns);
1265 hipart0 = gen_lowpart (hmode, hipart0);
1266 rtx lopart0 = gen_lowpart (hmode, op0);
1267 rtx signbit0 = const0_rtx;
1268 if (!uns)
1269 signbit0 = expand_shift (RSHIFT_EXPR, hmode, lopart0, hprec - 1,
1270 NULL_RTX, 0);
1271 rtx hipart1 = expand_shift (RSHIFT_EXPR, mode, op1, hprec,
1272 NULL_RTX, uns);
1273 hipart1 = gen_lowpart (hmode, hipart1);
1274 rtx lopart1 = gen_lowpart (hmode, op1);
1275 rtx signbit1 = const0_rtx;
1276 if (!uns)
1277 signbit1 = expand_shift (RSHIFT_EXPR, hmode, lopart1, hprec - 1,
1278 NULL_RTX, 0);
1280 res = gen_reg_rtx (mode);
1282 /* True if op0 resp. op1 are known to be in the range of
1283 halfstype. */
1284 bool op0_small_p = false;
1285 bool op1_small_p = false;
1286 /* True if op0 resp. op1 are known to have all zeros or all ones
1287 in the upper half of bits, but are not known to be
1288 op{0,1}_small_p. */
1289 bool op0_medium_p = false;
1290 bool op1_medium_p = false;
1291 /* -1 if op{0,1} is known to be negative, 0 if it is known to be
1292 nonnegative, 1 if unknown. */
1293 int op0_sign = 1;
1294 int op1_sign = 1;
1296 if (pos_neg0 == 1)
1297 op0_sign = 0;
1298 else if (pos_neg0 == 2)
1299 op0_sign = -1;
1300 if (pos_neg1 == 1)
1301 op1_sign = 0;
1302 else if (pos_neg1 == 2)
1303 op1_sign = -1;
1305 unsigned int mprec0 = prec;
1306 if (arg0 != error_mark_node)
1307 mprec0 = get_min_precision (arg0, sign);
1308 if (mprec0 <= hprec)
1309 op0_small_p = true;
1310 else if (!uns && mprec0 <= hprec + 1)
1311 op0_medium_p = true;
1312 unsigned int mprec1 = prec;
1313 if (arg1 != error_mark_node)
1314 mprec1 = get_min_precision (arg1, sign);
1315 if (mprec1 <= hprec)
1316 op1_small_p = true;
1317 else if (!uns && mprec1 <= hprec + 1)
1318 op1_medium_p = true;
1320 int smaller_sign = 1;
1321 int larger_sign = 1;
1322 if (op0_small_p)
1324 smaller_sign = op0_sign;
1325 larger_sign = op1_sign;
1327 else if (op1_small_p)
1329 smaller_sign = op1_sign;
1330 larger_sign = op0_sign;
1332 else if (op0_sign == op1_sign)
1334 smaller_sign = op0_sign;
1335 larger_sign = op0_sign;
1338 if (!op0_small_p)
1339 do_compare_rtx_and_jump (signbit0, hipart0, NE, true, hmode,
1340 NULL_RTX, NULL, large_op0,
1341 PROB_UNLIKELY);
1343 if (!op1_small_p)
1344 do_compare_rtx_and_jump (signbit1, hipart1, NE, true, hmode,
1345 NULL_RTX, NULL, small_op0_large_op1,
1346 PROB_UNLIKELY);
1348 /* If both op0 and op1 are sign (!uns) or zero (uns) extended from
1349 hmode to mode, the multiplication will never overflow. We can
1350 do just one hmode x hmode => mode widening multiplication. */
1351 rtx lopart0s = lopart0, lopart1s = lopart1;
1352 if (GET_CODE (lopart0) == SUBREG)
1354 lopart0s = shallow_copy_rtx (lopart0);
1355 SUBREG_PROMOTED_VAR_P (lopart0s) = 1;
1356 SUBREG_PROMOTED_SET (lopart0s, uns ? SRP_UNSIGNED : SRP_SIGNED);
1358 if (GET_CODE (lopart1) == SUBREG)
1360 lopart1s = shallow_copy_rtx (lopart1);
1361 SUBREG_PROMOTED_VAR_P (lopart1s) = 1;
1362 SUBREG_PROMOTED_SET (lopart1s, uns ? SRP_UNSIGNED : SRP_SIGNED);
1364 tree halfstype = build_nonstandard_integer_type (hprec, uns);
1365 ops.op0 = make_tree (halfstype, lopart0s);
1366 ops.op1 = make_tree (halfstype, lopart1s);
1367 ops.code = WIDEN_MULT_EXPR;
1368 ops.type = type;
1369 rtx thisres
1370 = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1371 emit_move_insn (res, thisres);
1372 emit_jump (done_label);
1374 emit_label (small_op0_large_op1);
1376 /* If op0 is sign (!uns) or zero (uns) extended from hmode to mode,
1377 but op1 is not, just swap the arguments and handle it as op1
1378 sign/zero extended, op0 not. */
1379 rtx larger = gen_reg_rtx (mode);
1380 rtx hipart = gen_reg_rtx (hmode);
1381 rtx lopart = gen_reg_rtx (hmode);
1382 emit_move_insn (larger, op1);
1383 emit_move_insn (hipart, hipart1);
1384 emit_move_insn (lopart, lopart0);
1385 emit_jump (one_small_one_large);
1387 emit_label (large_op0);
1389 if (!op1_small_p)
1390 do_compare_rtx_and_jump (signbit1, hipart1, NE, true, hmode,
1391 NULL_RTX, NULL, both_ops_large,
1392 PROB_UNLIKELY);
1394 /* If op1 is sign (!uns) or zero (uns) extended from hmode to mode,
1395 but op0 is not, prepare larger, hipart and lopart pseudos and
1396 handle it together with small_op0_large_op1. */
1397 emit_move_insn (larger, op0);
1398 emit_move_insn (hipart, hipart0);
1399 emit_move_insn (lopart, lopart1);
1401 emit_label (one_small_one_large);
1403 /* lopart is the low part of the operand that is sign extended
1404 to mode, larger is the the other operand, hipart is the
1405 high part of larger and lopart0 and lopart1 are the low parts
1406 of both operands.
1407 We perform lopart0 * lopart1 and lopart * hipart widening
1408 multiplications. */
1409 tree halfutype = build_nonstandard_integer_type (hprec, 1);
1410 ops.op0 = make_tree (halfutype, lopart0);
1411 ops.op1 = make_tree (halfutype, lopart1);
1412 rtx lo0xlo1
1413 = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1415 ops.op0 = make_tree (halfutype, lopart);
1416 ops.op1 = make_tree (halfutype, hipart);
1417 rtx loxhi = gen_reg_rtx (mode);
1418 rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1419 emit_move_insn (loxhi, tem);
1421 if (!uns)
1423 /* if (hipart < 0) loxhi -= lopart << (bitsize / 2); */
1424 if (larger_sign == 0)
1425 emit_jump (after_hipart_neg);
1426 else if (larger_sign != -1)
1427 do_compare_rtx_and_jump (hipart, const0_rtx, GE, false, hmode,
1428 NULL_RTX, NULL, after_hipart_neg,
1429 PROB_EVEN);
1431 tem = convert_modes (mode, hmode, lopart, 1);
1432 tem = expand_shift (LSHIFT_EXPR, mode, tem, hprec, NULL_RTX, 1);
1433 tem = expand_simple_binop (mode, MINUS, loxhi, tem, NULL_RTX,
1434 1, OPTAB_DIRECT);
1435 emit_move_insn (loxhi, tem);
1437 emit_label (after_hipart_neg);
1439 /* if (lopart < 0) loxhi -= larger; */
1440 if (smaller_sign == 0)
1441 emit_jump (after_lopart_neg);
1442 else if (smaller_sign != -1)
1443 do_compare_rtx_and_jump (lopart, const0_rtx, GE, false, hmode,
1444 NULL_RTX, NULL, after_lopart_neg,
1445 PROB_EVEN);
1447 tem = expand_simple_binop (mode, MINUS, loxhi, larger, NULL_RTX,
1448 1, OPTAB_DIRECT);
1449 emit_move_insn (loxhi, tem);
1451 emit_label (after_lopart_neg);
1454 /* loxhi += (uns) lo0xlo1 >> (bitsize / 2); */
1455 tem = expand_shift (RSHIFT_EXPR, mode, lo0xlo1, hprec, NULL_RTX, 1);
1456 tem = expand_simple_binop (mode, PLUS, loxhi, tem, NULL_RTX,
1457 1, OPTAB_DIRECT);
1458 emit_move_insn (loxhi, tem);
1460 /* if (loxhi >> (bitsize / 2)
1461 == (hmode) loxhi >> (bitsize / 2 - 1)) (if !uns)
1462 if (loxhi >> (bitsize / 2) == 0 (if uns). */
1463 rtx hipartloxhi = expand_shift (RSHIFT_EXPR, mode, loxhi, hprec,
1464 NULL_RTX, 0);
1465 hipartloxhi = gen_lowpart (hmode, hipartloxhi);
1466 rtx signbitloxhi = const0_rtx;
1467 if (!uns)
1468 signbitloxhi = expand_shift (RSHIFT_EXPR, hmode,
1469 gen_lowpart (hmode, loxhi),
1470 hprec - 1, NULL_RTX, 0);
1472 do_compare_rtx_and_jump (signbitloxhi, hipartloxhi, NE, true, hmode,
1473 NULL_RTX, NULL, do_overflow,
1474 PROB_VERY_UNLIKELY);
1476 /* res = (loxhi << (bitsize / 2)) | (hmode) lo0xlo1; */
1477 rtx loxhishifted = expand_shift (LSHIFT_EXPR, mode, loxhi, hprec,
1478 NULL_RTX, 1);
1479 tem = convert_modes (mode, hmode, gen_lowpart (hmode, lo0xlo1), 1);
1481 tem = expand_simple_binop (mode, IOR, loxhishifted, tem, res,
1482 1, OPTAB_DIRECT);
1483 if (tem != res)
1484 emit_move_insn (res, tem);
1485 emit_jump (done_label);
1487 emit_label (both_ops_large);
1489 /* If both operands are large (not sign (!uns) or zero (uns)
1490 extended from hmode), then perform the full multiplication
1491 which will be the result of the operation.
1492 The only cases which don't overflow are for signed multiplication
1493 some cases where both hipart0 and highpart1 are 0 or -1.
1494 For unsigned multiplication when high parts are both non-zero
1495 this overflows always. */
1496 ops.code = MULT_EXPR;
1497 ops.op0 = make_tree (type, op0);
1498 ops.op1 = make_tree (type, op1);
1499 tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1500 emit_move_insn (res, tem);
1502 if (!uns)
1504 if (!op0_medium_p)
1506 tem = expand_simple_binop (hmode, PLUS, hipart0, const1_rtx,
1507 NULL_RTX, 1, OPTAB_DIRECT);
1508 do_compare_rtx_and_jump (tem, const1_rtx, GTU, true, hmode,
1509 NULL_RTX, NULL, do_error,
1510 PROB_VERY_UNLIKELY);
1513 if (!op1_medium_p)
1515 tem = expand_simple_binop (hmode, PLUS, hipart1, const1_rtx,
1516 NULL_RTX, 1, OPTAB_DIRECT);
1517 do_compare_rtx_and_jump (tem, const1_rtx, GTU, true, hmode,
1518 NULL_RTX, NULL, do_error,
1519 PROB_VERY_UNLIKELY);
1522 /* At this point hipart{0,1} are both in [-1, 0]. If they are
1523 the same, overflow happened if res is negative, if they are
1524 different, overflow happened if res is positive. */
1525 if (op0_sign != 1 && op1_sign != 1 && op0_sign != op1_sign)
1526 emit_jump (hipart_different);
1527 else if (op0_sign == 1 || op1_sign == 1)
1528 do_compare_rtx_and_jump (hipart0, hipart1, NE, true, hmode,
1529 NULL_RTX, NULL, hipart_different,
1530 PROB_EVEN);
1532 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode,
1533 NULL_RTX, NULL, do_error,
1534 PROB_VERY_UNLIKELY);
1535 emit_jump (done_label);
1537 emit_label (hipart_different);
1539 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode,
1540 NULL_RTX, NULL, do_error,
1541 PROB_VERY_UNLIKELY);
1542 emit_jump (done_label);
1545 emit_label (do_overflow);
1547 /* Overflow, do full multiplication and fallthru into do_error. */
1548 ops.op0 = make_tree (type, op0);
1549 ops.op1 = make_tree (type, op1);
1550 tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1551 emit_move_insn (res, tem);
1553 else
1555 gcc_assert (!is_ubsan);
1556 ops.code = MULT_EXPR;
1557 ops.type = type;
1558 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1559 emit_jump (done_label);
1563 do_error_label:
1564 emit_label (do_error);
1565 if (is_ubsan)
1567 /* Expand the ubsan builtin call. */
1568 push_temp_slots ();
1569 fn = ubsan_build_overflow_builtin (MULT_EXPR, loc, TREE_TYPE (arg0),
1570 arg0, arg1);
1571 expand_normal (fn);
1572 pop_temp_slots ();
1573 do_pending_stack_adjust ();
1575 else if (lhs)
1576 write_complex_part (target, const1_rtx, true);
1578 /* We're done. */
1579 emit_label (done_label);
1581 /* u1 * u2 -> sr */
1582 if (uns0_p && uns1_p && !unsr_p)
1584 rtx_code_label *all_done_label = gen_label_rtx ();
1585 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode, NULL_RTX,
1586 NULL, all_done_label, PROB_VERY_LIKELY);
1587 write_complex_part (target, const1_rtx, true);
1588 emit_label (all_done_label);
1591 /* s1 * u2 -> sr */
1592 if (!uns0_p && uns1_p && !unsr_p && pos_neg1 == 3)
1594 rtx_code_label *all_done_label = gen_label_rtx ();
1595 rtx_code_label *set_noovf = gen_label_rtx ();
1596 do_compare_rtx_and_jump (op1, const0_rtx, GE, false, mode, NULL_RTX,
1597 NULL, all_done_label, PROB_VERY_LIKELY);
1598 write_complex_part (target, const1_rtx, true);
1599 do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
1600 NULL, set_noovf, PROB_VERY_LIKELY);
1601 do_compare_rtx_and_jump (op0, constm1_rtx, NE, true, mode, NULL_RTX,
1602 NULL, all_done_label, PROB_VERY_UNLIKELY);
1603 do_compare_rtx_and_jump (op1, res, NE, true, mode, NULL_RTX, NULL,
1604 all_done_label, PROB_VERY_UNLIKELY);
1605 emit_label (set_noovf);
1606 write_complex_part (target, const0_rtx, true);
1607 emit_label (all_done_label);
1610 if (lhs)
1612 if (is_ubsan)
1613 expand_ubsan_result_store (target, res);
1614 else
1615 expand_arith_overflow_result_store (lhs, target, mode, res);
1619 /* Expand UBSAN_CHECK_ADD call STMT. */
1621 static void
1622 expand_UBSAN_CHECK_ADD (gcall *stmt)
1624 location_t loc = gimple_location (stmt);
1625 tree lhs = gimple_call_lhs (stmt);
1626 tree arg0 = gimple_call_arg (stmt, 0);
1627 tree arg1 = gimple_call_arg (stmt, 1);
1628 expand_addsub_overflow (loc, PLUS_EXPR, lhs, arg0, arg1,
1629 false, false, false, true);
1632 /* Expand UBSAN_CHECK_SUB call STMT. */
1634 static void
1635 expand_UBSAN_CHECK_SUB (gcall *stmt)
1637 location_t loc = gimple_location (stmt);
1638 tree lhs = gimple_call_lhs (stmt);
1639 tree arg0 = gimple_call_arg (stmt, 0);
1640 tree arg1 = gimple_call_arg (stmt, 1);
1641 if (integer_zerop (arg0))
1642 expand_neg_overflow (loc, lhs, arg1, true);
1643 else
1644 expand_addsub_overflow (loc, MINUS_EXPR, lhs, arg0, arg1,
1645 false, false, false, true);
1648 /* Expand UBSAN_CHECK_MUL call STMT. */
1650 static void
1651 expand_UBSAN_CHECK_MUL (gcall *stmt)
1653 location_t loc = gimple_location (stmt);
1654 tree lhs = gimple_call_lhs (stmt);
1655 tree arg0 = gimple_call_arg (stmt, 0);
1656 tree arg1 = gimple_call_arg (stmt, 1);
1657 expand_mul_overflow (loc, lhs, arg0, arg1, false, false, false, true);
1660 /* Helper function for {ADD,SUB,MUL}_OVERFLOW call stmt expansion. */
1662 static void
1663 expand_arith_overflow (enum tree_code code, gimple stmt)
1665 tree lhs = gimple_call_lhs (stmt);
1666 if (lhs == NULL_TREE)
1667 return;
1668 tree arg0 = gimple_call_arg (stmt, 0);
1669 tree arg1 = gimple_call_arg (stmt, 1);
1670 tree type = TREE_TYPE (TREE_TYPE (lhs));
1671 int uns0_p = TYPE_UNSIGNED (TREE_TYPE (arg0));
1672 int uns1_p = TYPE_UNSIGNED (TREE_TYPE (arg1));
1673 int unsr_p = TYPE_UNSIGNED (type);
1674 int prec0 = TYPE_PRECISION (TREE_TYPE (arg0));
1675 int prec1 = TYPE_PRECISION (TREE_TYPE (arg1));
1676 int precres = TYPE_PRECISION (type);
1677 location_t loc = gimple_location (stmt);
1678 if (!uns0_p && get_range_pos_neg (arg0) == 1)
1679 uns0_p = true;
1680 if (!uns1_p && get_range_pos_neg (arg1) == 1)
1681 uns1_p = true;
1682 int pr = get_min_precision (arg0, uns0_p ? UNSIGNED : SIGNED);
1683 prec0 = MIN (prec0, pr);
1684 pr = get_min_precision (arg1, uns1_p ? UNSIGNED : SIGNED);
1685 prec1 = MIN (prec1, pr);
1687 /* If uns0_p && uns1_p, precop is minimum needed precision
1688 of unsigned type to hold the exact result, otherwise
1689 precop is minimum needed precision of signed type to
1690 hold the exact result. */
1691 int precop;
1692 if (code == MULT_EXPR)
1693 precop = prec0 + prec1 + (uns0_p != uns1_p);
1694 else
1696 if (uns0_p == uns1_p)
1697 precop = MAX (prec0, prec1) + 1;
1698 else if (uns0_p)
1699 precop = MAX (prec0 + 1, prec1) + 1;
1700 else
1701 precop = MAX (prec0, prec1 + 1) + 1;
1703 int orig_precres = precres;
1707 if ((uns0_p && uns1_p)
1708 ? ((precop + !unsr_p) <= precres
1709 /* u1 - u2 -> ur can overflow, no matter what precision
1710 the result has. */
1711 && (code != MINUS_EXPR || !unsr_p))
1712 : (!unsr_p && precop <= precres))
1714 /* The infinity precision result will always fit into result. */
1715 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1716 write_complex_part (target, const0_rtx, true);
1717 enum machine_mode mode = TYPE_MODE (type);
1718 struct separate_ops ops;
1719 ops.code = code;
1720 ops.type = type;
1721 ops.op0 = fold_convert_loc (loc, type, arg0);
1722 ops.op1 = fold_convert_loc (loc, type, arg1);
1723 ops.op2 = NULL_TREE;
1724 ops.location = loc;
1725 rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1726 expand_arith_overflow_result_store (lhs, target, mode, tem);
1727 return;
1730 /* For sub-word operations, if target doesn't have them, start
1731 with precres widening right away, otherwise do it only
1732 if the most simple cases can't be used. */
1733 if (WORD_REGISTER_OPERATIONS
1734 && orig_precres == precres
1735 && precres < BITS_PER_WORD)
1737 else if ((uns0_p && uns1_p && unsr_p && prec0 <= precres
1738 && prec1 <= precres)
1739 || ((!uns0_p || !uns1_p) && !unsr_p
1740 && prec0 + uns0_p <= precres
1741 && prec1 + uns1_p <= precres))
1743 arg0 = fold_convert_loc (loc, type, arg0);
1744 arg1 = fold_convert_loc (loc, type, arg1);
1745 switch (code)
1747 case MINUS_EXPR:
1748 if (integer_zerop (arg0) && !unsr_p)
1749 expand_neg_overflow (loc, lhs, arg1, false);
1750 /* FALLTHRU */
1751 case PLUS_EXPR:
1752 expand_addsub_overflow (loc, code, lhs, arg0, arg1,
1753 unsr_p, unsr_p, unsr_p, false);
1754 return;
1755 case MULT_EXPR:
1756 expand_mul_overflow (loc, lhs, arg0, arg1,
1757 unsr_p, unsr_p, unsr_p, false);
1758 return;
1759 default:
1760 gcc_unreachable ();
1764 /* For sub-word operations, retry with a wider type first. */
1765 if (orig_precres == precres && precop <= BITS_PER_WORD)
1767 #if WORD_REGISTER_OPERATIONS
1768 int p = BITS_PER_WORD;
1769 #else
1770 int p = precop;
1771 #endif
1772 enum machine_mode m = smallest_mode_for_size (p, MODE_INT);
1773 tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
1774 uns0_p && uns1_p
1775 && unsr_p);
1776 p = TYPE_PRECISION (optype);
1777 if (p > precres)
1779 precres = p;
1780 unsr_p = TYPE_UNSIGNED (optype);
1781 type = optype;
1782 continue;
1786 if (prec0 <= precres && prec1 <= precres)
1788 tree types[2];
1789 if (unsr_p)
1791 types[0] = build_nonstandard_integer_type (precres, 0);
1792 types[1] = type;
1794 else
1796 types[0] = type;
1797 types[1] = build_nonstandard_integer_type (precres, 1);
1799 arg0 = fold_convert_loc (loc, types[uns0_p], arg0);
1800 arg1 = fold_convert_loc (loc, types[uns1_p], arg1);
1801 if (code != MULT_EXPR)
1802 expand_addsub_overflow (loc, code, lhs, arg0, arg1, unsr_p,
1803 uns0_p, uns1_p, false);
1804 else
1805 expand_mul_overflow (loc, lhs, arg0, arg1, unsr_p,
1806 uns0_p, uns1_p, false);
1807 return;
1810 /* Retry with a wider type. */
1811 if (orig_precres == precres)
1813 int p = MAX (prec0, prec1);
1814 enum machine_mode m = smallest_mode_for_size (p, MODE_INT);
1815 tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
1816 uns0_p && uns1_p
1817 && unsr_p);
1818 p = TYPE_PRECISION (optype);
1819 if (p > precres)
1821 precres = p;
1822 unsr_p = TYPE_UNSIGNED (optype);
1823 type = optype;
1824 continue;
1828 gcc_unreachable ();
1830 while (1);
1833 /* Expand ADD_OVERFLOW STMT. */
1835 static void
1836 expand_ADD_OVERFLOW (gcall *stmt)
1838 expand_arith_overflow (PLUS_EXPR, stmt);
1841 /* Expand SUB_OVERFLOW STMT. */
1843 static void
1844 expand_SUB_OVERFLOW (gcall *stmt)
1846 expand_arith_overflow (MINUS_EXPR, stmt);
1849 /* Expand MUL_OVERFLOW STMT. */
1851 static void
1852 expand_MUL_OVERFLOW (gcall *stmt)
1854 expand_arith_overflow (MULT_EXPR, stmt);
1857 /* This should get folded in tree-vectorizer.c. */
1859 static void
1860 expand_LOOP_VECTORIZED (gcall *)
1862 gcc_unreachable ();
1865 static void
1866 expand_MASK_LOAD (gcall *stmt)
1868 struct expand_operand ops[3];
1869 tree type, lhs, rhs, maskt;
1870 rtx mem, target, mask;
1872 maskt = gimple_call_arg (stmt, 2);
1873 lhs = gimple_call_lhs (stmt);
1874 if (lhs == NULL_TREE)
1875 return;
1876 type = TREE_TYPE (lhs);
1877 rhs = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0),
1878 gimple_call_arg (stmt, 1));
1880 mem = expand_expr (rhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1881 gcc_assert (MEM_P (mem));
1882 mask = expand_normal (maskt);
1883 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1884 create_output_operand (&ops[0], target, TYPE_MODE (type));
1885 create_fixed_operand (&ops[1], mem);
1886 create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
1887 expand_insn (optab_handler (maskload_optab, TYPE_MODE (type)), 3, ops);
1890 static void
1891 expand_MASK_STORE (gcall *stmt)
1893 struct expand_operand ops[3];
1894 tree type, lhs, rhs, maskt;
1895 rtx mem, reg, mask;
1897 maskt = gimple_call_arg (stmt, 2);
1898 rhs = gimple_call_arg (stmt, 3);
1899 type = TREE_TYPE (rhs);
1900 lhs = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0),
1901 gimple_call_arg (stmt, 1));
1903 mem = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1904 gcc_assert (MEM_P (mem));
1905 mask = expand_normal (maskt);
1906 reg = expand_normal (rhs);
1907 create_fixed_operand (&ops[0], mem);
1908 create_input_operand (&ops[1], reg, TYPE_MODE (type));
1909 create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
1910 expand_insn (optab_handler (maskstore_optab, TYPE_MODE (type)), 3, ops);
1913 static void
1914 expand_ABNORMAL_DISPATCHER (gcall *)
1918 static void
1919 expand_BUILTIN_EXPECT (gcall *stmt)
1921 /* When guessing was done, the hints should be already stripped away. */
1922 gcc_assert (!flag_guess_branch_prob || optimize == 0 || seen_error ());
1924 rtx target;
1925 tree lhs = gimple_call_lhs (stmt);
1926 if (lhs)
1927 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1928 else
1929 target = const0_rtx;
1930 rtx val = expand_expr (gimple_call_arg (stmt, 0), target, VOIDmode, EXPAND_NORMAL);
1931 if (lhs && val != target)
1932 emit_move_insn (target, val);
1935 /* IFN_VA_ARG is supposed to be expanded at pass_stdarg. So this dummy function
1936 should never be called. */
1938 static void
1939 expand_VA_ARG (gcall *stmt ATTRIBUTE_UNUSED)
1941 gcc_unreachable ();
1944 /* Routines to expand each internal function, indexed by function number.
1945 Each routine has the prototype:
1947 expand_<NAME> (gcall *stmt)
1949 where STMT is the statement that performs the call. */
1950 static void (*const internal_fn_expanders[]) (gcall *) = {
1951 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) expand_##CODE,
1952 #include "internal-fn.def"
1953 #undef DEF_INTERNAL_FN
1957 /* Expand STMT, which is a call to internal function FN. */
1959 void
1960 expand_internal_call (gcall *stmt)
1962 internal_fn_expanders[(int) gimple_call_internal_fn (stmt)] (stmt);