PR ipa/64481
[official-gcc.git] / gcc / internal-fn.c
blob07a9ec5150cfa5045f2ab1d30660d221ea348c11
1 /* Internal functions.
2 Copyright (C) 2011-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "hash-set.h"
24 #include "machmode.h"
25 #include "vec.h"
26 #include "double-int.h"
27 #include "input.h"
28 #include "alias.h"
29 #include "symtab.h"
30 #include "options.h"
31 #include "wide-int.h"
32 #include "inchash.h"
33 #include "tree.h"
34 #include "fold-const.h"
35 #include "internal-fn.h"
36 #include "stor-layout.h"
37 #include "expr.h"
38 #include "insn-codes.h"
39 #include "optabs.h"
40 #include "predict.h"
41 #include "tm.h"
42 #include "hard-reg-set.h"
43 #include "input.h"
44 #include "function.h"
45 #include "dominance.h"
46 #include "cfg.h"
47 #include "basic-block.h"
48 #include "tree-ssa-alias.h"
49 #include "internal-fn.h"
50 #include "gimple-expr.h"
51 #include "is-a.h"
52 #include "gimple.h"
53 #include "ubsan.h"
54 #include "target.h"
55 #include "stringpool.h"
56 #include "tree-ssanames.h"
57 #include "diagnostic-core.h"
59 /* The names of each internal function, indexed by function number. */
60 const char *const internal_fn_name_array[] = {
61 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) #CODE,
62 #include "internal-fn.def"
63 #undef DEF_INTERNAL_FN
64 "<invalid-fn>"
67 /* The ECF_* flags of each internal function, indexed by function number. */
68 const int internal_fn_flags_array[] = {
69 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) FLAGS,
70 #include "internal-fn.def"
71 #undef DEF_INTERNAL_FN
75 /* Fnspec of each internal function, indexed by function number. */
76 const_tree internal_fn_fnspec_array[IFN_LAST + 1];
78 void
79 init_internal_fns ()
81 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
82 if (FNSPEC) internal_fn_fnspec_array[IFN_##CODE] = \
83 build_string ((int) sizeof (FNSPEC), FNSPEC ? FNSPEC : "");
84 #include "internal-fn.def"
85 #undef DEF_INTERNAL_FN
86 internal_fn_fnspec_array[IFN_LAST] = 0;
89 /* ARRAY_TYPE is an array of vector modes. Return the associated insn
90 for load-lanes-style optab OPTAB. The insn must exist. */
92 static enum insn_code
93 get_multi_vector_move (tree array_type, convert_optab optab)
95 enum insn_code icode;
96 machine_mode imode;
97 machine_mode vmode;
99 gcc_assert (TREE_CODE (array_type) == ARRAY_TYPE);
100 imode = TYPE_MODE (array_type);
101 vmode = TYPE_MODE (TREE_TYPE (array_type));
103 icode = convert_optab_handler (optab, imode, vmode);
104 gcc_assert (icode != CODE_FOR_nothing);
105 return icode;
108 /* Expand LOAD_LANES call STMT. */
110 static void
111 expand_LOAD_LANES (gcall *stmt)
113 struct expand_operand ops[2];
114 tree type, lhs, rhs;
115 rtx target, mem;
117 lhs = gimple_call_lhs (stmt);
118 rhs = gimple_call_arg (stmt, 0);
119 type = TREE_TYPE (lhs);
121 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
122 mem = expand_normal (rhs);
124 gcc_assert (MEM_P (mem));
125 PUT_MODE (mem, TYPE_MODE (type));
127 create_output_operand (&ops[0], target, TYPE_MODE (type));
128 create_fixed_operand (&ops[1], mem);
129 expand_insn (get_multi_vector_move (type, vec_load_lanes_optab), 2, ops);
132 /* Expand STORE_LANES call STMT. */
134 static void
135 expand_STORE_LANES (gcall *stmt)
137 struct expand_operand ops[2];
138 tree type, lhs, rhs;
139 rtx target, reg;
141 lhs = gimple_call_lhs (stmt);
142 rhs = gimple_call_arg (stmt, 0);
143 type = TREE_TYPE (rhs);
145 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
146 reg = expand_normal (rhs);
148 gcc_assert (MEM_P (target));
149 PUT_MODE (target, TYPE_MODE (type));
151 create_fixed_operand (&ops[0], target);
152 create_input_operand (&ops[1], reg, TYPE_MODE (type));
153 expand_insn (get_multi_vector_move (type, vec_store_lanes_optab), 2, ops);
156 static void
157 expand_ANNOTATE (gcall *stmt ATTRIBUTE_UNUSED)
159 gcc_unreachable ();
162 /* This should get expanded in adjust_simduid_builtins. */
164 static void
165 expand_GOMP_SIMD_LANE (gcall *stmt ATTRIBUTE_UNUSED)
167 gcc_unreachable ();
170 /* This should get expanded in adjust_simduid_builtins. */
172 static void
173 expand_GOMP_SIMD_VF (gcall *stmt ATTRIBUTE_UNUSED)
175 gcc_unreachable ();
178 /* This should get expanded in adjust_simduid_builtins. */
180 static void
181 expand_GOMP_SIMD_LAST_LANE (gcall *stmt ATTRIBUTE_UNUSED)
183 gcc_unreachable ();
186 /* This should get expanded in the sanopt pass. */
188 static void
189 expand_UBSAN_NULL (gcall *stmt ATTRIBUTE_UNUSED)
191 gcc_unreachable ();
194 /* This should get expanded in the sanopt pass. */
196 static void
197 expand_UBSAN_BOUNDS (gcall *stmt ATTRIBUTE_UNUSED)
199 gcc_unreachable ();
202 /* This should get expanded in the sanopt pass. */
204 static void
205 expand_UBSAN_OBJECT_SIZE (gcall *stmt ATTRIBUTE_UNUSED)
207 gcc_unreachable ();
210 /* This should get expanded in the sanopt pass. */
212 static void
213 expand_ASAN_CHECK (gcall *stmt ATTRIBUTE_UNUSED)
215 gcc_unreachable ();
218 /* This should get expanded in the tsan pass. */
220 static void
221 expand_TSAN_FUNC_EXIT (gcall *)
223 gcc_unreachable ();
226 /* Helper function for expand_addsub_overflow. Return 1
227 if ARG interpreted as signed in its precision is known to be always
228 positive or 2 if ARG is known to be always negative, or 3 if ARG may
229 be positive or negative. */
231 static int
232 get_range_pos_neg (tree arg)
234 if (arg == error_mark_node)
235 return 3;
237 int prec = TYPE_PRECISION (TREE_TYPE (arg));
238 int cnt = 0;
239 if (TREE_CODE (arg) == INTEGER_CST)
241 wide_int w = wi::sext (arg, prec);
242 if (wi::neg_p (w))
243 return 2;
244 else
245 return 1;
247 while (CONVERT_EXPR_P (arg)
248 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
249 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg, 0))) <= prec)
251 arg = TREE_OPERAND (arg, 0);
252 /* Narrower value zero extended into wider type
253 will always result in positive values. */
254 if (TYPE_UNSIGNED (TREE_TYPE (arg))
255 && TYPE_PRECISION (TREE_TYPE (arg)) < prec)
256 return 1;
257 prec = TYPE_PRECISION (TREE_TYPE (arg));
258 if (++cnt > 30)
259 return 3;
262 if (TREE_CODE (arg) != SSA_NAME)
263 return 3;
264 wide_int arg_min, arg_max;
265 while (get_range_info (arg, &arg_min, &arg_max) != VR_RANGE)
267 gimple g = SSA_NAME_DEF_STMT (arg);
268 if (is_gimple_assign (g)
269 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g)))
271 tree t = gimple_assign_rhs1 (g);
272 if (INTEGRAL_TYPE_P (TREE_TYPE (t))
273 && TYPE_PRECISION (TREE_TYPE (t)) <= prec)
275 if (TYPE_UNSIGNED (TREE_TYPE (t))
276 && TYPE_PRECISION (TREE_TYPE (t)) < prec)
277 return 1;
278 prec = TYPE_PRECISION (TREE_TYPE (t));
279 arg = t;
280 if (++cnt > 30)
281 return 3;
282 continue;
285 return 3;
287 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
289 /* For unsigned values, the "positive" range comes
290 below the "negative" range. */
291 if (!wi::neg_p (wi::sext (arg_max, prec), SIGNED))
292 return 1;
293 if (wi::neg_p (wi::sext (arg_min, prec), SIGNED))
294 return 2;
296 else
298 if (!wi::neg_p (wi::sext (arg_min, prec), SIGNED))
299 return 1;
300 if (wi::neg_p (wi::sext (arg_max, prec), SIGNED))
301 return 2;
303 return 3;
306 /* Return minimum precision needed to represent all values
307 of ARG in SIGNed integral type. */
309 static int
310 get_min_precision (tree arg, signop sign)
312 int prec = TYPE_PRECISION (TREE_TYPE (arg));
313 int cnt = 0;
314 signop orig_sign = sign;
315 if (TREE_CODE (arg) == INTEGER_CST)
317 int p;
318 if (TYPE_SIGN (TREE_TYPE (arg)) != sign)
320 widest_int w = wi::to_widest (arg);
321 w = wi::ext (w, prec, sign);
322 p = wi::min_precision (w, sign);
324 else
325 p = wi::min_precision (arg, sign);
326 return MIN (p, prec);
328 while (CONVERT_EXPR_P (arg)
329 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
330 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg, 0))) <= prec)
332 arg = TREE_OPERAND (arg, 0);
333 if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
335 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
336 sign = UNSIGNED;
337 else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
338 return prec + (orig_sign != sign);
339 prec = TYPE_PRECISION (TREE_TYPE (arg));
341 if (++cnt > 30)
342 return prec + (orig_sign != sign);
344 if (TREE_CODE (arg) != SSA_NAME)
345 return prec + (orig_sign != sign);
346 wide_int arg_min, arg_max;
347 while (get_range_info (arg, &arg_min, &arg_max) != VR_RANGE)
349 gimple g = SSA_NAME_DEF_STMT (arg);
350 if (is_gimple_assign (g)
351 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g)))
353 tree t = gimple_assign_rhs1 (g);
354 if (INTEGRAL_TYPE_P (TREE_TYPE (t))
355 && TYPE_PRECISION (TREE_TYPE (t)) <= prec)
357 arg = t;
358 if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
360 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
361 sign = UNSIGNED;
362 else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
363 return prec + (orig_sign != sign);
364 prec = TYPE_PRECISION (TREE_TYPE (arg));
366 if (++cnt > 30)
367 return prec + (orig_sign != sign);
368 continue;
371 return prec + (orig_sign != sign);
373 if (sign == TYPE_SIGN (TREE_TYPE (arg)))
375 int p1 = wi::min_precision (arg_min, sign);
376 int p2 = wi::min_precision (arg_max, sign);
377 p1 = MAX (p1, p2);
378 prec = MIN (prec, p1);
380 else if (sign == UNSIGNED && !wi::neg_p (arg_min, SIGNED))
382 int p = wi::min_precision (arg_max, SIGNED);
383 prec = MIN (prec, p);
385 return prec + (orig_sign != sign);
388 /* Helper for expand_*_overflow. Store RES into the __real__ part
389 of TARGET. If RES has larger MODE than __real__ part of TARGET,
390 set the __imag__ part to 1 if RES doesn't fit into it. */
392 static void
393 expand_arith_overflow_result_store (tree lhs, rtx target,
394 machine_mode mode, rtx res)
396 machine_mode tgtmode = GET_MODE_INNER (GET_MODE (target));
397 rtx lres = res;
398 if (tgtmode != mode)
400 rtx_code_label *done_label = gen_label_rtx ();
401 int uns = TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs)));
402 lres = convert_modes (tgtmode, mode, res, uns);
403 gcc_assert (GET_MODE_PRECISION (tgtmode) < GET_MODE_PRECISION (mode));
404 do_compare_rtx_and_jump (res, convert_modes (mode, tgtmode, lres, uns),
405 EQ, true, mode, NULL_RTX, NULL_RTX, done_label,
406 PROB_VERY_LIKELY);
407 write_complex_part (target, const1_rtx, true);
408 emit_label (done_label);
410 write_complex_part (target, lres, false);
413 /* Helper for expand_*_overflow. Store RES into TARGET. */
415 static void
416 expand_ubsan_result_store (rtx target, rtx res)
418 if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target))
419 /* If this is a scalar in a register that is stored in a wider mode
420 than the declared mode, compute the result into its declared mode
421 and then convert to the wider mode. Our value is the computed
422 expression. */
423 convert_move (SUBREG_REG (target), res, SUBREG_PROMOTED_SIGN (target));
424 else
425 emit_move_insn (target, res);
428 /* Add sub/add overflow checking to the statement STMT.
429 CODE says whether the operation is +, or -. */
431 static void
432 expand_addsub_overflow (location_t loc, tree_code code, tree lhs,
433 tree arg0, tree arg1, bool unsr_p, bool uns0_p,
434 bool uns1_p, bool is_ubsan)
436 rtx res, target = NULL_RTX;
437 tree fn;
438 rtx_code_label *done_label = gen_label_rtx ();
439 rtx_code_label *do_error = gen_label_rtx ();
440 do_pending_stack_adjust ();
441 rtx op0 = expand_normal (arg0);
442 rtx op1 = expand_normal (arg1);
443 machine_mode mode = TYPE_MODE (TREE_TYPE (arg0));
444 int prec = GET_MODE_PRECISION (mode);
445 rtx sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
446 bool do_xor = false;
448 if (is_ubsan)
449 gcc_assert (!unsr_p && !uns0_p && !uns1_p);
451 if (lhs)
453 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
454 if (!is_ubsan)
455 write_complex_part (target, const0_rtx, true);
458 /* We assume both operands and result have the same precision
459 here (GET_MODE_BITSIZE (mode)), S stands for signed type
460 with that precision, U for unsigned type with that precision,
461 sgn for unsigned most significant bit in that precision.
462 s1 is signed first operand, u1 is unsigned first operand,
463 s2 is signed second operand, u2 is unsigned second operand,
464 sr is signed result, ur is unsigned result and the following
465 rules say how to compute result (which is always result of
466 the operands as if both were unsigned, cast to the right
467 signedness) and how to compute whether operation overflowed.
469 s1 + s2 -> sr
470 res = (S) ((U) s1 + (U) s2)
471 ovf = s2 < 0 ? res > s1 : res < s1 (or jump on overflow)
472 s1 - s2 -> sr
473 res = (S) ((U) s1 - (U) s2)
474 ovf = s2 < 0 ? res < s1 : res > s2 (or jump on overflow)
475 u1 + u2 -> ur
476 res = u1 + u2
477 ovf = res < u1 (or jump on carry, but RTL opts will handle it)
478 u1 - u2 -> ur
479 res = u1 - u2
480 ovf = res > u1 (or jump on carry, but RTL opts will handle it)
481 s1 + u2 -> sr
482 res = (S) ((U) s1 + u2)
483 ovf = ((U) res ^ sgn) < u2
484 s1 + u2 -> ur
485 t1 = (S) (u2 ^ sgn)
486 t2 = s1 + t1
487 res = (U) t2 ^ sgn
488 ovf = t1 < 0 ? t2 > s1 : t2 < s1 (or jump on overflow)
489 s1 - u2 -> sr
490 res = (S) ((U) s1 - u2)
491 ovf = u2 > ((U) s1 ^ sgn)
492 s1 - u2 -> ur
493 res = (U) s1 - u2
494 ovf = s1 < 0 || u2 > (U) s1
495 u1 - s2 -> sr
496 res = u1 - (U) s2
497 ovf = u1 >= ((U) s2 ^ sgn)
498 u1 - s2 -> ur
499 t1 = u1 ^ sgn
500 t2 = t1 - (U) s2
501 res = t2 ^ sgn
502 ovf = s2 < 0 ? (S) t2 < (S) t1 : (S) t2 > (S) t1 (or jump on overflow)
503 s1 + s2 -> ur
504 res = (U) s1 + (U) s2
505 ovf = s2 < 0 ? (s1 | (S) res) < 0) : (s1 & (S) res) < 0)
506 u1 + u2 -> sr
507 res = (S) (u1 + u2)
508 ovf = (U) res < u2 || res < 0
509 u1 - u2 -> sr
510 res = (S) (u1 - u2)
511 ovf = u1 >= u2 ? res < 0 : res >= 0
512 s1 - s2 -> ur
513 res = (U) s1 - (U) s2
514 ovf = s2 >= 0 ? ((s1 | (S) res) < 0) : ((s1 & (S) res) < 0) */
516 if (code == PLUS_EXPR && uns0_p && !uns1_p)
518 /* PLUS_EXPR is commutative, if operand signedness differs,
519 canonicalize to the first operand being signed and second
520 unsigned to simplify following code. */
521 rtx tem = op1;
522 op1 = op0;
523 op0 = tem;
524 tree t = arg1;
525 arg1 = arg0;
526 arg0 = t;
527 uns0_p = 0;
528 uns1_p = 1;
531 /* u1 +- u2 -> ur */
532 if (uns0_p && uns1_p && unsr_p)
534 /* Compute the operation. On RTL level, the addition is always
535 unsigned. */
536 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
537 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
538 rtx tem = op0;
539 /* For PLUS_EXPR, the operation is commutative, so we can pick
540 operand to compare against. For prec <= BITS_PER_WORD, I think
541 preferring REG operand is better over CONST_INT, because
542 the CONST_INT might enlarge the instruction or CSE would need
543 to figure out we'd already loaded it into a register before.
544 For prec > BITS_PER_WORD, I think CONST_INT might be more beneficial,
545 as then the multi-word comparison can be perhaps simplified. */
546 if (code == PLUS_EXPR
547 && (prec <= BITS_PER_WORD
548 ? (CONST_SCALAR_INT_P (op0) && REG_P (op1))
549 : CONST_SCALAR_INT_P (op1)))
550 tem = op1;
551 do_compare_rtx_and_jump (res, tem, code == PLUS_EXPR ? GEU : LEU,
552 true, mode, NULL_RTX, NULL_RTX, done_label,
553 PROB_VERY_LIKELY);
554 goto do_error_label;
557 /* s1 +- u2 -> sr */
558 if (!uns0_p && uns1_p && !unsr_p)
560 /* Compute the operation. On RTL level, the addition is always
561 unsigned. */
562 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
563 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
564 rtx tem = expand_binop (mode, add_optab,
565 code == PLUS_EXPR ? res : op0, sgn,
566 NULL_RTX, false, OPTAB_LIB_WIDEN);
567 do_compare_rtx_and_jump (tem, op1, GEU, true, mode, NULL_RTX, NULL_RTX,
568 done_label, PROB_VERY_LIKELY);
569 goto do_error_label;
572 /* s1 + u2 -> ur */
573 if (code == PLUS_EXPR && !uns0_p && uns1_p && unsr_p)
575 op1 = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
576 OPTAB_LIB_WIDEN);
577 /* As we've changed op1, we have to avoid using the value range
578 for the original argument. */
579 arg1 = error_mark_node;
580 do_xor = true;
581 goto do_signed;
584 /* u1 - s2 -> ur */
585 if (code == MINUS_EXPR && uns0_p && !uns1_p && unsr_p)
587 op0 = expand_binop (mode, add_optab, op0, sgn, NULL_RTX, false,
588 OPTAB_LIB_WIDEN);
589 /* As we've changed op0, we have to avoid using the value range
590 for the original argument. */
591 arg0 = error_mark_node;
592 do_xor = true;
593 goto do_signed;
596 /* s1 - u2 -> ur */
597 if (code == MINUS_EXPR && !uns0_p && uns1_p && unsr_p)
599 /* Compute the operation. On RTL level, the addition is always
600 unsigned. */
601 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
602 OPTAB_LIB_WIDEN);
603 int pos_neg = get_range_pos_neg (arg0);
604 if (pos_neg == 2)
605 /* If ARG0 is known to be always negative, this is always overflow. */
606 emit_jump (do_error);
607 else if (pos_neg == 3)
608 /* If ARG0 is not known to be always positive, check at runtime. */
609 do_compare_rtx_and_jump (op0, const0_rtx, LT, false, mode, NULL_RTX,
610 NULL_RTX, do_error, PROB_VERY_UNLIKELY);
611 do_compare_rtx_and_jump (op1, op0, LEU, true, mode, NULL_RTX, NULL_RTX,
612 done_label, PROB_VERY_LIKELY);
613 goto do_error_label;
616 /* u1 - s2 -> sr */
617 if (code == MINUS_EXPR && uns0_p && !uns1_p && !unsr_p)
619 /* Compute the operation. On RTL level, the addition is always
620 unsigned. */
621 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
622 OPTAB_LIB_WIDEN);
623 rtx tem = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
624 OPTAB_LIB_WIDEN);
625 do_compare_rtx_and_jump (op0, tem, LTU, true, mode, NULL_RTX, NULL_RTX,
626 done_label, PROB_VERY_LIKELY);
627 goto do_error_label;
630 /* u1 + u2 -> sr */
631 if (code == PLUS_EXPR && uns0_p && uns1_p && !unsr_p)
633 /* Compute the operation. On RTL level, the addition is always
634 unsigned. */
635 res = expand_binop (mode, add_optab, op0, op1, NULL_RTX, false,
636 OPTAB_LIB_WIDEN);
637 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode, NULL_RTX,
638 NULL_RTX, do_error, PROB_VERY_UNLIKELY);
639 rtx tem = op1;
640 /* The operation is commutative, so we can pick operand to compare
641 against. For prec <= BITS_PER_WORD, I think preferring REG operand
642 is better over CONST_INT, because the CONST_INT might enlarge the
643 instruction or CSE would need to figure out we'd already loaded it
644 into a register before. For prec > BITS_PER_WORD, I think CONST_INT
645 might be more beneficial, as then the multi-word comparison can be
646 perhaps simplified. */
647 if (prec <= BITS_PER_WORD
648 ? (CONST_SCALAR_INT_P (op1) && REG_P (op0))
649 : CONST_SCALAR_INT_P (op0))
650 tem = op0;
651 do_compare_rtx_and_jump (res, tem, GEU, true, mode, NULL_RTX, NULL_RTX,
652 done_label, PROB_VERY_LIKELY);
653 goto do_error_label;
656 /* s1 +- s2 -> ur */
657 if (!uns0_p && !uns1_p && unsr_p)
659 /* Compute the operation. On RTL level, the addition is always
660 unsigned. */
661 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
662 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
663 int pos_neg = get_range_pos_neg (arg1);
664 if (code == PLUS_EXPR)
666 int pos_neg0 = get_range_pos_neg (arg0);
667 if (pos_neg0 != 3 && pos_neg == 3)
669 rtx tem = op1;
670 op1 = op0;
671 op0 = tem;
672 pos_neg = pos_neg0;
675 rtx tem;
676 if (pos_neg != 3)
678 tem = expand_binop (mode, ((pos_neg == 1) ^ (code == MINUS_EXPR))
679 ? and_optab : ior_optab,
680 op0, res, NULL_RTX, false, OPTAB_LIB_WIDEN);
681 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
682 NULL_RTX, done_label, PROB_VERY_LIKELY);
684 else
686 rtx_code_label *do_ior_label = gen_label_rtx ();
687 do_compare_rtx_and_jump (op1, const0_rtx,
688 code == MINUS_EXPR ? GE : LT, false, mode,
689 NULL_RTX, NULL_RTX, do_ior_label,
690 PROB_EVEN);
691 tem = expand_binop (mode, and_optab, op0, res, NULL_RTX, false,
692 OPTAB_LIB_WIDEN);
693 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
694 NULL_RTX, done_label, PROB_VERY_LIKELY);
695 emit_jump (do_error);
696 emit_label (do_ior_label);
697 tem = expand_binop (mode, ior_optab, op0, res, NULL_RTX, false,
698 OPTAB_LIB_WIDEN);
699 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
700 NULL_RTX, done_label, PROB_VERY_LIKELY);
702 goto do_error_label;
705 /* u1 - u2 -> sr */
706 if (code == MINUS_EXPR && uns0_p && uns1_p && !unsr_p)
708 /* Compute the operation. On RTL level, the addition is always
709 unsigned. */
710 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
711 OPTAB_LIB_WIDEN);
712 rtx_code_label *op0_geu_op1 = gen_label_rtx ();
713 do_compare_rtx_and_jump (op0, op1, GEU, true, mode, NULL_RTX, NULL_RTX,
714 op0_geu_op1, PROB_EVEN);
715 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode, NULL_RTX,
716 NULL_RTX, done_label, PROB_VERY_LIKELY);
717 emit_jump (do_error);
718 emit_label (op0_geu_op1);
719 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode, NULL_RTX,
720 NULL_RTX, done_label, PROB_VERY_LIKELY);
721 goto do_error_label;
724 gcc_assert (!uns0_p && !uns1_p && !unsr_p);
726 /* s1 +- s2 -> sr */
727 do_signed: ;
728 enum insn_code icode;
729 icode = optab_handler (code == PLUS_EXPR ? addv4_optab : subv4_optab, mode);
730 if (icode != CODE_FOR_nothing)
732 struct expand_operand ops[4];
733 rtx_insn *last = get_last_insn ();
735 res = gen_reg_rtx (mode);
736 create_output_operand (&ops[0], res, mode);
737 create_input_operand (&ops[1], op0, mode);
738 create_input_operand (&ops[2], op1, mode);
739 create_fixed_operand (&ops[3], do_error);
740 if (maybe_expand_insn (icode, 4, ops))
742 last = get_last_insn ();
743 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
744 && JUMP_P (last)
745 && any_condjump_p (last)
746 && !find_reg_note (last, REG_BR_PROB, 0))
747 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
748 emit_jump (done_label);
750 else
752 delete_insns_since (last);
753 icode = CODE_FOR_nothing;
757 if (icode == CODE_FOR_nothing)
759 rtx_code_label *sub_check = gen_label_rtx ();
760 int pos_neg = 3;
762 /* Compute the operation. On RTL level, the addition is always
763 unsigned. */
764 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
765 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
767 /* If we can prove one of the arguments (for MINUS_EXPR only
768 the second operand, as subtraction is not commutative) is always
769 non-negative or always negative, we can do just one comparison
770 and conditional jump instead of 2 at runtime, 3 present in the
771 emitted code. If one of the arguments is CONST_INT, all we
772 need is to make sure it is op1, then the first
773 do_compare_rtx_and_jump will be just folded. Otherwise try
774 to use range info if available. */
775 if (code == PLUS_EXPR && CONST_INT_P (op0))
777 rtx tem = op0;
778 op0 = op1;
779 op1 = tem;
781 else if (CONST_INT_P (op1))
783 else if (code == PLUS_EXPR && TREE_CODE (arg0) == SSA_NAME)
785 pos_neg = get_range_pos_neg (arg0);
786 if (pos_neg != 3)
788 rtx tem = op0;
789 op0 = op1;
790 op1 = tem;
793 if (pos_neg == 3 && !CONST_INT_P (op1) && TREE_CODE (arg1) == SSA_NAME)
794 pos_neg = get_range_pos_neg (arg1);
796 /* If the op1 is negative, we have to use a different check. */
797 if (pos_neg == 3)
798 do_compare_rtx_and_jump (op1, const0_rtx, LT, false, mode, NULL_RTX,
799 NULL_RTX, sub_check, PROB_EVEN);
801 /* Compare the result of the operation with one of the operands. */
802 if (pos_neg & 1)
803 do_compare_rtx_and_jump (res, op0, code == PLUS_EXPR ? GE : LE,
804 false, mode, NULL_RTX, NULL_RTX, done_label,
805 PROB_VERY_LIKELY);
807 /* If we get here, we have to print the error. */
808 if (pos_neg == 3)
810 emit_jump (do_error);
812 emit_label (sub_check);
815 /* We have k = a + b for b < 0 here. k <= a must hold. */
816 if (pos_neg & 2)
817 do_compare_rtx_and_jump (res, op0, code == PLUS_EXPR ? LE : GE,
818 false, mode, NULL_RTX, NULL_RTX, done_label,
819 PROB_VERY_LIKELY);
822 do_error_label:
823 emit_label (do_error);
824 if (is_ubsan)
826 /* Expand the ubsan builtin call. */
827 push_temp_slots ();
828 fn = ubsan_build_overflow_builtin (code, loc, TREE_TYPE (arg0),
829 arg0, arg1);
830 expand_normal (fn);
831 pop_temp_slots ();
832 do_pending_stack_adjust ();
834 else if (lhs)
835 write_complex_part (target, const1_rtx, true);
837 /* We're done. */
838 emit_label (done_label);
840 if (lhs)
842 if (is_ubsan)
843 expand_ubsan_result_store (target, res);
844 else
846 if (do_xor)
847 res = expand_binop (mode, add_optab, res, sgn, NULL_RTX, false,
848 OPTAB_LIB_WIDEN);
850 expand_arith_overflow_result_store (lhs, target, mode, res);
855 /* Add negate overflow checking to the statement STMT. */
857 static void
858 expand_neg_overflow (location_t loc, tree lhs, tree arg1, bool is_ubsan)
860 rtx res, op1;
861 tree fn;
862 rtx_code_label *done_label, *do_error;
863 rtx target = NULL_RTX;
865 done_label = gen_label_rtx ();
866 do_error = gen_label_rtx ();
868 do_pending_stack_adjust ();
869 op1 = expand_normal (arg1);
871 machine_mode mode = TYPE_MODE (TREE_TYPE (arg1));
872 if (lhs)
874 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
875 if (!is_ubsan)
876 write_complex_part (target, const0_rtx, true);
879 enum insn_code icode = optab_handler (negv3_optab, mode);
880 if (icode != CODE_FOR_nothing)
882 struct expand_operand ops[3];
883 rtx_insn *last = get_last_insn ();
885 res = gen_reg_rtx (mode);
886 create_output_operand (&ops[0], res, mode);
887 create_input_operand (&ops[1], op1, mode);
888 create_fixed_operand (&ops[2], do_error);
889 if (maybe_expand_insn (icode, 3, ops))
891 last = get_last_insn ();
892 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
893 && JUMP_P (last)
894 && any_condjump_p (last)
895 && !find_reg_note (last, REG_BR_PROB, 0))
896 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
897 emit_jump (done_label);
899 else
901 delete_insns_since (last);
902 icode = CODE_FOR_nothing;
906 if (icode == CODE_FOR_nothing)
908 /* Compute the operation. On RTL level, the addition is always
909 unsigned. */
910 res = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
912 /* Compare the operand with the most negative value. */
913 rtx minv = expand_normal (TYPE_MIN_VALUE (TREE_TYPE (arg1)));
914 do_compare_rtx_and_jump (op1, minv, NE, true, mode, NULL_RTX, NULL_RTX,
915 done_label, PROB_VERY_LIKELY);
918 emit_label (do_error);
919 if (is_ubsan)
921 /* Expand the ubsan builtin call. */
922 push_temp_slots ();
923 fn = ubsan_build_overflow_builtin (NEGATE_EXPR, loc, TREE_TYPE (arg1),
924 arg1, NULL_TREE);
925 expand_normal (fn);
926 pop_temp_slots ();
927 do_pending_stack_adjust ();
929 else if (lhs)
930 write_complex_part (target, const1_rtx, true);
932 /* We're done. */
933 emit_label (done_label);
935 if (lhs)
937 if (is_ubsan)
938 expand_ubsan_result_store (target, res);
939 else
940 expand_arith_overflow_result_store (lhs, target, mode, res);
944 /* Add mul overflow checking to the statement STMT. */
946 static void
947 expand_mul_overflow (location_t loc, tree lhs, tree arg0, tree arg1,
948 bool unsr_p, bool uns0_p, bool uns1_p, bool is_ubsan)
950 rtx res, op0, op1;
951 tree fn, type;
952 rtx_code_label *done_label, *do_error;
953 rtx target = NULL_RTX;
954 signop sign;
955 enum insn_code icode;
957 done_label = gen_label_rtx ();
958 do_error = gen_label_rtx ();
960 do_pending_stack_adjust ();
961 op0 = expand_normal (arg0);
962 op1 = expand_normal (arg1);
964 machine_mode mode = TYPE_MODE (TREE_TYPE (arg0));
965 bool uns = unsr_p;
966 if (lhs)
968 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
969 if (!is_ubsan)
970 write_complex_part (target, const0_rtx, true);
973 if (is_ubsan)
974 gcc_assert (!unsr_p && !uns0_p && !uns1_p);
976 /* We assume both operands and result have the same precision
977 here (GET_MODE_BITSIZE (mode)), S stands for signed type
978 with that precision, U for unsigned type with that precision,
979 sgn for unsigned most significant bit in that precision.
980 s1 is signed first operand, u1 is unsigned first operand,
981 s2 is signed second operand, u2 is unsigned second operand,
982 sr is signed result, ur is unsigned result and the following
983 rules say how to compute result (which is always result of
984 the operands as if both were unsigned, cast to the right
985 signedness) and how to compute whether operation overflowed.
986 main_ovf (false) stands for jump on signed multiplication
987 overflow or the main algorithm with uns == false.
988 main_ovf (true) stands for jump on unsigned multiplication
989 overflow or the main algorithm with uns == true.
991 s1 * s2 -> sr
992 res = (S) ((U) s1 * (U) s2)
993 ovf = main_ovf (false)
994 u1 * u2 -> ur
995 res = u1 * u2
996 ovf = main_ovf (true)
997 s1 * u2 -> ur
998 res = (U) s1 * u2
999 ovf = (s1 < 0 && u2) || main_ovf (true)
1000 u1 * u2 -> sr
1001 res = (S) (u1 * u2)
1002 ovf = res < 0 || main_ovf (true)
1003 s1 * u2 -> sr
1004 res = (S) ((U) s1 * u2)
1005 ovf = (S) u2 >= 0 ? main_ovf (false)
1006 : (s1 != 0 && (s1 != -1 || u2 != (U) res))
1007 s1 * s2 -> ur
1008 t1 = (s1 & s2) < 0 ? (-(U) s1) : ((U) s1)
1009 t2 = (s1 & s2) < 0 ? (-(U) s2) : ((U) s2)
1010 res = t1 * t2
1011 ovf = (s1 ^ s2) < 0 ? (s1 && s2) : main_ovf (true) */
1013 if (uns0_p && !uns1_p)
1015 /* Multiplication is commutative, if operand signedness differs,
1016 canonicalize to the first operand being signed and second
1017 unsigned to simplify following code. */
1018 rtx tem = op1;
1019 op1 = op0;
1020 op0 = tem;
1021 tree t = arg1;
1022 arg1 = arg0;
1023 arg0 = t;
1024 uns0_p = 0;
1025 uns1_p = 1;
1028 int pos_neg0 = get_range_pos_neg (arg0);
1029 int pos_neg1 = get_range_pos_neg (arg1);
1031 /* s1 * u2 -> ur */
1032 if (!uns0_p && uns1_p && unsr_p)
1034 switch (pos_neg0)
1036 case 1:
1037 /* If s1 is non-negative, just perform normal u1 * u2 -> ur. */
1038 goto do_main;
1039 case 2:
1040 /* If s1 is negative, avoid the main code, just multiply and
1041 signal overflow if op1 is not 0. */
1042 struct separate_ops ops;
1043 ops.code = MULT_EXPR;
1044 ops.type = TREE_TYPE (arg1);
1045 ops.op0 = make_tree (ops.type, op0);
1046 ops.op1 = make_tree (ops.type, op1);
1047 ops.op2 = NULL_TREE;
1048 ops.location = loc;
1049 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1050 do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1051 NULL_RTX, done_label, PROB_VERY_LIKELY);
1052 goto do_error_label;
1053 case 3:
1054 rtx_code_label *do_main_label;
1055 do_main_label = gen_label_rtx ();
1056 do_compare_rtx_and_jump (op0, const0_rtx, GE, false, mode, NULL_RTX,
1057 NULL_RTX, do_main_label, PROB_VERY_LIKELY);
1058 do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1059 NULL_RTX, do_main_label, PROB_VERY_LIKELY);
1060 write_complex_part (target, const1_rtx, true);
1061 emit_label (do_main_label);
1062 goto do_main;
1063 default:
1064 gcc_unreachable ();
1068 /* u1 * u2 -> sr */
1069 if (uns0_p && uns1_p && !unsr_p)
1071 uns = true;
1072 /* Rest of handling of this case after res is computed. */
1073 goto do_main;
1076 /* s1 * u2 -> sr */
1077 if (!uns0_p && uns1_p && !unsr_p)
1079 switch (pos_neg1)
1081 case 1:
1082 goto do_main;
1083 case 2:
1084 /* If (S) u2 is negative (i.e. u2 is larger than maximum of S,
1085 avoid the main code, just multiply and signal overflow
1086 unless 0 * u2 or -1 * ((U) Smin). */
1087 struct separate_ops ops;
1088 ops.code = MULT_EXPR;
1089 ops.type = TREE_TYPE (arg1);
1090 ops.op0 = make_tree (ops.type, op0);
1091 ops.op1 = make_tree (ops.type, op1);
1092 ops.op2 = NULL_TREE;
1093 ops.location = loc;
1094 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1095 do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
1096 NULL_RTX, done_label, PROB_VERY_LIKELY);
1097 do_compare_rtx_and_jump (op0, constm1_rtx, NE, true, mode, NULL_RTX,
1098 NULL_RTX, do_error, PROB_VERY_UNLIKELY);
1099 int prec;
1100 prec = GET_MODE_PRECISION (mode);
1101 rtx sgn;
1102 sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
1103 do_compare_rtx_and_jump (op1, sgn, EQ, true, mode, NULL_RTX,
1104 NULL_RTX, done_label, PROB_VERY_LIKELY);
1105 goto do_error_label;
1106 case 3:
1107 /* Rest of handling of this case after res is computed. */
1108 goto do_main;
1109 default:
1110 gcc_unreachable ();
1114 /* s1 * s2 -> ur */
1115 if (!uns0_p && !uns1_p && unsr_p)
1117 rtx tem, tem2;
1118 switch (pos_neg0 | pos_neg1)
1120 case 1: /* Both operands known to be non-negative. */
1121 goto do_main;
1122 case 2: /* Both operands known to be negative. */
1123 op0 = expand_unop (mode, neg_optab, op0, NULL_RTX, false);
1124 op1 = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
1125 /* Avoid looking at arg0/arg1 ranges, as we've changed
1126 the arguments. */
1127 arg0 = error_mark_node;
1128 arg1 = error_mark_node;
1129 goto do_main;
1130 case 3:
1131 if ((pos_neg0 ^ pos_neg1) == 3)
1133 /* If one operand is known to be negative and the other
1134 non-negative, this overflows always, unless the non-negative
1135 one is 0. Just do normal multiply and set overflow
1136 unless one of the operands is 0. */
1137 struct separate_ops ops;
1138 ops.code = MULT_EXPR;
1139 ops.type
1140 = build_nonstandard_integer_type (GET_MODE_PRECISION (mode),
1142 ops.op0 = make_tree (ops.type, op0);
1143 ops.op1 = make_tree (ops.type, op1);
1144 ops.op2 = NULL_TREE;
1145 ops.location = loc;
1146 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1147 tem = expand_binop (mode, and_optab, op0, op1, NULL_RTX, false,
1148 OPTAB_LIB_WIDEN);
1149 do_compare_rtx_and_jump (tem, const0_rtx, EQ, true, mode,
1150 NULL_RTX, NULL_RTX, done_label,
1151 PROB_VERY_LIKELY);
1152 goto do_error_label;
1154 /* The general case, do all the needed comparisons at runtime. */
1155 rtx_code_label *do_main_label, *after_negate_label;
1156 rtx rop0, rop1;
1157 rop0 = gen_reg_rtx (mode);
1158 rop1 = gen_reg_rtx (mode);
1159 emit_move_insn (rop0, op0);
1160 emit_move_insn (rop1, op1);
1161 op0 = rop0;
1162 op1 = rop1;
1163 do_main_label = gen_label_rtx ();
1164 after_negate_label = gen_label_rtx ();
1165 tem = expand_binop (mode, and_optab, op0, op1, NULL_RTX, false,
1166 OPTAB_LIB_WIDEN);
1167 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
1168 NULL_RTX, after_negate_label,
1169 PROB_VERY_LIKELY);
1170 /* Both arguments negative here, negate them and continue with
1171 normal unsigned overflow checking multiplication. */
1172 emit_move_insn (op0, expand_unop (mode, neg_optab, op0,
1173 NULL_RTX, false));
1174 emit_move_insn (op1, expand_unop (mode, neg_optab, op1,
1175 NULL_RTX, false));
1176 /* Avoid looking at arg0/arg1 ranges, as we might have changed
1177 the arguments. */
1178 arg0 = error_mark_node;
1179 arg1 = error_mark_node;
1180 emit_jump (do_main_label);
1181 emit_label (after_negate_label);
1182 tem2 = expand_binop (mode, xor_optab, op0, op1, NULL_RTX, false,
1183 OPTAB_LIB_WIDEN);
1184 do_compare_rtx_and_jump (tem2, const0_rtx, GE, false, mode, NULL_RTX,
1185 NULL_RTX, do_main_label, PROB_VERY_LIKELY);
1186 /* One argument is negative here, the other positive. This
1187 overflows always, unless one of the arguments is 0. But
1188 if e.g. s2 is 0, (U) s1 * 0 doesn't overflow, whatever s1
1189 is, thus we can keep do_main code oring in overflow as is. */
1190 do_compare_rtx_and_jump (tem, const0_rtx, EQ, true, mode, NULL_RTX,
1191 NULL_RTX, do_main_label, PROB_VERY_LIKELY);
1192 write_complex_part (target, const1_rtx, true);
1193 emit_label (do_main_label);
1194 goto do_main;
1195 default:
1196 gcc_unreachable ();
1200 do_main:
1201 type = build_nonstandard_integer_type (GET_MODE_PRECISION (mode), uns);
1202 sign = uns ? UNSIGNED : SIGNED;
1203 icode = optab_handler (uns ? umulv4_optab : mulv4_optab, mode);
1204 if (icode != CODE_FOR_nothing)
1206 struct expand_operand ops[4];
1207 rtx_insn *last = get_last_insn ();
1209 res = gen_reg_rtx (mode);
1210 create_output_operand (&ops[0], res, mode);
1211 create_input_operand (&ops[1], op0, mode);
1212 create_input_operand (&ops[2], op1, mode);
1213 create_fixed_operand (&ops[3], do_error);
1214 if (maybe_expand_insn (icode, 4, ops))
1216 last = get_last_insn ();
1217 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
1218 && JUMP_P (last)
1219 && any_condjump_p (last)
1220 && !find_reg_note (last, REG_BR_PROB, 0))
1221 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
1222 emit_jump (done_label);
1224 else
1226 delete_insns_since (last);
1227 icode = CODE_FOR_nothing;
1231 if (icode == CODE_FOR_nothing)
1233 struct separate_ops ops;
1234 int prec = GET_MODE_PRECISION (mode);
1235 machine_mode hmode = mode_for_size (prec / 2, MODE_INT, 1);
1236 ops.op0 = make_tree (type, op0);
1237 ops.op1 = make_tree (type, op1);
1238 ops.op2 = NULL_TREE;
1239 ops.location = loc;
1240 if (GET_MODE_2XWIDER_MODE (mode) != VOIDmode
1241 && targetm.scalar_mode_supported_p (GET_MODE_2XWIDER_MODE (mode)))
1243 machine_mode wmode = GET_MODE_2XWIDER_MODE (mode);
1244 ops.code = WIDEN_MULT_EXPR;
1245 ops.type
1246 = build_nonstandard_integer_type (GET_MODE_PRECISION (wmode), uns);
1248 res = expand_expr_real_2 (&ops, NULL_RTX, wmode, EXPAND_NORMAL);
1249 rtx hipart = expand_shift (RSHIFT_EXPR, wmode, res, prec,
1250 NULL_RTX, uns);
1251 hipart = gen_lowpart (mode, hipart);
1252 res = gen_lowpart (mode, res);
1253 if (uns)
1254 /* For the unsigned multiplication, there was overflow if
1255 HIPART is non-zero. */
1256 do_compare_rtx_and_jump (hipart, const0_rtx, EQ, true, mode,
1257 NULL_RTX, NULL_RTX, done_label,
1258 PROB_VERY_LIKELY);
1259 else
1261 rtx signbit = expand_shift (RSHIFT_EXPR, mode, res, prec - 1,
1262 NULL_RTX, 0);
1263 /* RES is low half of the double width result, HIPART
1264 the high half. There was overflow if
1265 HIPART is different from RES < 0 ? -1 : 0. */
1266 do_compare_rtx_and_jump (signbit, hipart, EQ, true, mode,
1267 NULL_RTX, NULL_RTX, done_label,
1268 PROB_VERY_LIKELY);
1271 else if (hmode != BLKmode && 2 * GET_MODE_PRECISION (hmode) == prec)
1273 rtx_code_label *large_op0 = gen_label_rtx ();
1274 rtx_code_label *small_op0_large_op1 = gen_label_rtx ();
1275 rtx_code_label *one_small_one_large = gen_label_rtx ();
1276 rtx_code_label *both_ops_large = gen_label_rtx ();
1277 rtx_code_label *after_hipart_neg = uns ? NULL : gen_label_rtx ();
1278 rtx_code_label *after_lopart_neg = uns ? NULL : gen_label_rtx ();
1279 rtx_code_label *do_overflow = gen_label_rtx ();
1280 rtx_code_label *hipart_different = uns ? NULL : gen_label_rtx ();
1282 unsigned int hprec = GET_MODE_PRECISION (hmode);
1283 rtx hipart0 = expand_shift (RSHIFT_EXPR, mode, op0, hprec,
1284 NULL_RTX, uns);
1285 hipart0 = gen_lowpart (hmode, hipart0);
1286 rtx lopart0 = gen_lowpart (hmode, op0);
1287 rtx signbit0 = const0_rtx;
1288 if (!uns)
1289 signbit0 = expand_shift (RSHIFT_EXPR, hmode, lopart0, hprec - 1,
1290 NULL_RTX, 0);
1291 rtx hipart1 = expand_shift (RSHIFT_EXPR, mode, op1, hprec,
1292 NULL_RTX, uns);
1293 hipart1 = gen_lowpart (hmode, hipart1);
1294 rtx lopart1 = gen_lowpart (hmode, op1);
1295 rtx signbit1 = const0_rtx;
1296 if (!uns)
1297 signbit1 = expand_shift (RSHIFT_EXPR, hmode, lopart1, hprec - 1,
1298 NULL_RTX, 0);
1300 res = gen_reg_rtx (mode);
1302 /* True if op0 resp. op1 are known to be in the range of
1303 halfstype. */
1304 bool op0_small_p = false;
1305 bool op1_small_p = false;
1306 /* True if op0 resp. op1 are known to have all zeros or all ones
1307 in the upper half of bits, but are not known to be
1308 op{0,1}_small_p. */
1309 bool op0_medium_p = false;
1310 bool op1_medium_p = false;
1311 /* -1 if op{0,1} is known to be negative, 0 if it is known to be
1312 nonnegative, 1 if unknown. */
1313 int op0_sign = 1;
1314 int op1_sign = 1;
1316 if (pos_neg0 == 1)
1317 op0_sign = 0;
1318 else if (pos_neg0 == 2)
1319 op0_sign = -1;
1320 if (pos_neg1 == 1)
1321 op1_sign = 0;
1322 else if (pos_neg1 == 2)
1323 op1_sign = -1;
1325 unsigned int mprec0 = prec;
1326 if (arg0 != error_mark_node)
1327 mprec0 = get_min_precision (arg0, sign);
1328 if (mprec0 <= hprec)
1329 op0_small_p = true;
1330 else if (!uns && mprec0 <= hprec + 1)
1331 op0_medium_p = true;
1332 unsigned int mprec1 = prec;
1333 if (arg1 != error_mark_node)
1334 mprec1 = get_min_precision (arg1, sign);
1335 if (mprec1 <= hprec)
1336 op1_small_p = true;
1337 else if (!uns && mprec1 <= hprec + 1)
1338 op1_medium_p = true;
1340 int smaller_sign = 1;
1341 int larger_sign = 1;
1342 if (op0_small_p)
1344 smaller_sign = op0_sign;
1345 larger_sign = op1_sign;
1347 else if (op1_small_p)
1349 smaller_sign = op1_sign;
1350 larger_sign = op0_sign;
1352 else if (op0_sign == op1_sign)
1354 smaller_sign = op0_sign;
1355 larger_sign = op0_sign;
1358 if (!op0_small_p)
1359 do_compare_rtx_and_jump (signbit0, hipart0, NE, true, hmode,
1360 NULL_RTX, NULL_RTX, large_op0,
1361 PROB_UNLIKELY);
1363 if (!op1_small_p)
1364 do_compare_rtx_and_jump (signbit1, hipart1, NE, true, hmode,
1365 NULL_RTX, NULL_RTX, small_op0_large_op1,
1366 PROB_UNLIKELY);
1368 /* If both op0 and op1 are sign (!uns) or zero (uns) extended from
1369 hmode to mode, the multiplication will never overflow. We can
1370 do just one hmode x hmode => mode widening multiplication. */
1371 rtx lopart0s = lopart0, lopart1s = lopart1;
1372 if (GET_CODE (lopart0) == SUBREG)
1374 lopart0s = shallow_copy_rtx (lopart0);
1375 SUBREG_PROMOTED_VAR_P (lopart0s) = 1;
1376 SUBREG_PROMOTED_SET (lopart0s, uns ? SRP_UNSIGNED : SRP_SIGNED);
1378 if (GET_CODE (lopart1) == SUBREG)
1380 lopart1s = shallow_copy_rtx (lopart1);
1381 SUBREG_PROMOTED_VAR_P (lopart1s) = 1;
1382 SUBREG_PROMOTED_SET (lopart1s, uns ? SRP_UNSIGNED : SRP_SIGNED);
1384 tree halfstype = build_nonstandard_integer_type (hprec, uns);
1385 ops.op0 = make_tree (halfstype, lopart0s);
1386 ops.op1 = make_tree (halfstype, lopart1s);
1387 ops.code = WIDEN_MULT_EXPR;
1388 ops.type = type;
1389 rtx thisres
1390 = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1391 emit_move_insn (res, thisres);
1392 emit_jump (done_label);
1394 emit_label (small_op0_large_op1);
1396 /* If op0 is sign (!uns) or zero (uns) extended from hmode to mode,
1397 but op1 is not, just swap the arguments and handle it as op1
1398 sign/zero extended, op0 not. */
1399 rtx larger = gen_reg_rtx (mode);
1400 rtx hipart = gen_reg_rtx (hmode);
1401 rtx lopart = gen_reg_rtx (hmode);
1402 emit_move_insn (larger, op1);
1403 emit_move_insn (hipart, hipart1);
1404 emit_move_insn (lopart, lopart0);
1405 emit_jump (one_small_one_large);
1407 emit_label (large_op0);
1409 if (!op1_small_p)
1410 do_compare_rtx_and_jump (signbit1, hipart1, NE, true, hmode,
1411 NULL_RTX, NULL_RTX, both_ops_large,
1412 PROB_UNLIKELY);
1414 /* If op1 is sign (!uns) or zero (uns) extended from hmode to mode,
1415 but op0 is not, prepare larger, hipart and lopart pseudos and
1416 handle it together with small_op0_large_op1. */
1417 emit_move_insn (larger, op0);
1418 emit_move_insn (hipart, hipart0);
1419 emit_move_insn (lopart, lopart1);
1421 emit_label (one_small_one_large);
1423 /* lopart is the low part of the operand that is sign extended
1424 to mode, larger is the the other operand, hipart is the
1425 high part of larger and lopart0 and lopart1 are the low parts
1426 of both operands.
1427 We perform lopart0 * lopart1 and lopart * hipart widening
1428 multiplications. */
1429 tree halfutype = build_nonstandard_integer_type (hprec, 1);
1430 ops.op0 = make_tree (halfutype, lopart0);
1431 ops.op1 = make_tree (halfutype, lopart1);
1432 rtx lo0xlo1
1433 = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1435 ops.op0 = make_tree (halfutype, lopart);
1436 ops.op1 = make_tree (halfutype, hipart);
1437 rtx loxhi = gen_reg_rtx (mode);
1438 rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1439 emit_move_insn (loxhi, tem);
1441 if (!uns)
1443 /* if (hipart < 0) loxhi -= lopart << (bitsize / 2); */
1444 if (larger_sign == 0)
1445 emit_jump (after_hipart_neg);
1446 else if (larger_sign != -1)
1447 do_compare_rtx_and_jump (hipart, const0_rtx, GE, false, hmode,
1448 NULL_RTX, NULL_RTX, after_hipart_neg,
1449 PROB_EVEN);
1451 tem = convert_modes (mode, hmode, lopart, 1);
1452 tem = expand_shift (LSHIFT_EXPR, mode, tem, hprec, NULL_RTX, 1);
1453 tem = expand_simple_binop (mode, MINUS, loxhi, tem, NULL_RTX,
1454 1, OPTAB_DIRECT);
1455 emit_move_insn (loxhi, tem);
1457 emit_label (after_hipart_neg);
1459 /* if (lopart < 0) loxhi -= larger; */
1460 if (smaller_sign == 0)
1461 emit_jump (after_lopart_neg);
1462 else if (smaller_sign != -1)
1463 do_compare_rtx_and_jump (lopart, const0_rtx, GE, false, hmode,
1464 NULL_RTX, NULL_RTX, after_lopart_neg,
1465 PROB_EVEN);
1467 tem = expand_simple_binop (mode, MINUS, loxhi, larger, NULL_RTX,
1468 1, OPTAB_DIRECT);
1469 emit_move_insn (loxhi, tem);
1471 emit_label (after_lopart_neg);
1474 /* loxhi += (uns) lo0xlo1 >> (bitsize / 2); */
1475 tem = expand_shift (RSHIFT_EXPR, mode, lo0xlo1, hprec, NULL_RTX, 1);
1476 tem = expand_simple_binop (mode, PLUS, loxhi, tem, NULL_RTX,
1477 1, OPTAB_DIRECT);
1478 emit_move_insn (loxhi, tem);
1480 /* if (loxhi >> (bitsize / 2)
1481 == (hmode) loxhi >> (bitsize / 2 - 1)) (if !uns)
1482 if (loxhi >> (bitsize / 2) == 0 (if uns). */
1483 rtx hipartloxhi = expand_shift (RSHIFT_EXPR, mode, loxhi, hprec,
1484 NULL_RTX, 0);
1485 hipartloxhi = gen_lowpart (hmode, hipartloxhi);
1486 rtx signbitloxhi = const0_rtx;
1487 if (!uns)
1488 signbitloxhi = expand_shift (RSHIFT_EXPR, hmode,
1489 gen_lowpart (hmode, loxhi),
1490 hprec - 1, NULL_RTX, 0);
1492 do_compare_rtx_and_jump (signbitloxhi, hipartloxhi, NE, true, hmode,
1493 NULL_RTX, NULL_RTX, do_overflow,
1494 PROB_VERY_UNLIKELY);
1496 /* res = (loxhi << (bitsize / 2)) | (hmode) lo0xlo1; */
1497 rtx loxhishifted = expand_shift (LSHIFT_EXPR, mode, loxhi, hprec,
1498 NULL_RTX, 1);
1499 tem = convert_modes (mode, hmode, gen_lowpart (hmode, lo0xlo1), 1);
1501 tem = expand_simple_binop (mode, IOR, loxhishifted, tem, res,
1502 1, OPTAB_DIRECT);
1503 if (tem != res)
1504 emit_move_insn (res, tem);
1505 emit_jump (done_label);
1507 emit_label (both_ops_large);
1509 /* If both operands are large (not sign (!uns) or zero (uns)
1510 extended from hmode), then perform the full multiplication
1511 which will be the result of the operation.
1512 The only cases which don't overflow are for signed multiplication
1513 some cases where both hipart0 and highpart1 are 0 or -1.
1514 For unsigned multiplication when high parts are both non-zero
1515 this overflows always. */
1516 ops.code = MULT_EXPR;
1517 ops.op0 = make_tree (type, op0);
1518 ops.op1 = make_tree (type, op1);
1519 tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1520 emit_move_insn (res, tem);
1522 if (!uns)
1524 if (!op0_medium_p)
1526 tem = expand_simple_binop (hmode, PLUS, hipart0, const1_rtx,
1527 NULL_RTX, 1, OPTAB_DIRECT);
1528 do_compare_rtx_and_jump (tem, const1_rtx, GTU, true, hmode,
1529 NULL_RTX, NULL_RTX, do_error,
1530 PROB_VERY_UNLIKELY);
1533 if (!op1_medium_p)
1535 tem = expand_simple_binop (hmode, PLUS, hipart1, const1_rtx,
1536 NULL_RTX, 1, OPTAB_DIRECT);
1537 do_compare_rtx_and_jump (tem, const1_rtx, GTU, true, hmode,
1538 NULL_RTX, NULL_RTX, do_error,
1539 PROB_VERY_UNLIKELY);
1542 /* At this point hipart{0,1} are both in [-1, 0]. If they are
1543 the same, overflow happened if res is negative, if they are
1544 different, overflow happened if res is positive. */
1545 if (op0_sign != 1 && op1_sign != 1 && op0_sign != op1_sign)
1546 emit_jump (hipart_different);
1547 else if (op0_sign == 1 || op1_sign == 1)
1548 do_compare_rtx_and_jump (hipart0, hipart1, NE, true, hmode,
1549 NULL_RTX, NULL_RTX, hipart_different,
1550 PROB_EVEN);
1552 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode,
1553 NULL_RTX, NULL_RTX, do_error,
1554 PROB_VERY_UNLIKELY);
1555 emit_jump (done_label);
1557 emit_label (hipart_different);
1559 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode,
1560 NULL_RTX, NULL_RTX, do_error,
1561 PROB_VERY_UNLIKELY);
1562 emit_jump (done_label);
1565 emit_label (do_overflow);
1567 /* Overflow, do full multiplication and fallthru into do_error. */
1568 ops.op0 = make_tree (type, op0);
1569 ops.op1 = make_tree (type, op1);
1570 tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1571 emit_move_insn (res, tem);
1573 else
1575 gcc_assert (!is_ubsan);
1576 ops.code = MULT_EXPR;
1577 ops.type = type;
1578 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1579 emit_jump (done_label);
1583 do_error_label:
1584 emit_label (do_error);
1585 if (is_ubsan)
1587 /* Expand the ubsan builtin call. */
1588 push_temp_slots ();
1589 fn = ubsan_build_overflow_builtin (MULT_EXPR, loc, TREE_TYPE (arg0),
1590 arg0, arg1);
1591 expand_normal (fn);
1592 pop_temp_slots ();
1593 do_pending_stack_adjust ();
1595 else if (lhs)
1596 write_complex_part (target, const1_rtx, true);
1598 /* We're done. */
1599 emit_label (done_label);
1601 /* u1 * u2 -> sr */
1602 if (uns0_p && uns1_p && !unsr_p)
1604 rtx_code_label *all_done_label = gen_label_rtx ();
1605 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode, NULL_RTX,
1606 NULL_RTX, all_done_label, PROB_VERY_LIKELY);
1607 write_complex_part (target, const1_rtx, true);
1608 emit_label (all_done_label);
1611 /* s1 * u2 -> sr */
1612 if (!uns0_p && uns1_p && !unsr_p && pos_neg1 == 3)
1614 rtx_code_label *all_done_label = gen_label_rtx ();
1615 rtx_code_label *set_noovf = gen_label_rtx ();
1616 do_compare_rtx_and_jump (op1, const0_rtx, GE, false, mode, NULL_RTX,
1617 NULL_RTX, all_done_label, PROB_VERY_LIKELY);
1618 write_complex_part (target, const1_rtx, true);
1619 do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
1620 NULL_RTX, set_noovf, PROB_VERY_LIKELY);
1621 do_compare_rtx_and_jump (op0, constm1_rtx, NE, true, mode, NULL_RTX,
1622 NULL_RTX, all_done_label, PROB_VERY_UNLIKELY);
1623 do_compare_rtx_and_jump (op1, res, NE, true, mode, NULL_RTX, NULL_RTX,
1624 all_done_label, PROB_VERY_UNLIKELY);
1625 emit_label (set_noovf);
1626 write_complex_part (target, const0_rtx, true);
1627 emit_label (all_done_label);
1630 if (lhs)
1632 if (is_ubsan)
1633 expand_ubsan_result_store (target, res);
1634 else
1635 expand_arith_overflow_result_store (lhs, target, mode, res);
1639 /* Expand UBSAN_CHECK_ADD call STMT. */
1641 static void
1642 expand_UBSAN_CHECK_ADD (gcall *stmt)
1644 location_t loc = gimple_location (stmt);
1645 tree lhs = gimple_call_lhs (stmt);
1646 tree arg0 = gimple_call_arg (stmt, 0);
1647 tree arg1 = gimple_call_arg (stmt, 1);
1648 expand_addsub_overflow (loc, PLUS_EXPR, lhs, arg0, arg1,
1649 false, false, false, true);
1652 /* Expand UBSAN_CHECK_SUB call STMT. */
1654 static void
1655 expand_UBSAN_CHECK_SUB (gcall *stmt)
1657 location_t loc = gimple_location (stmt);
1658 tree lhs = gimple_call_lhs (stmt);
1659 tree arg0 = gimple_call_arg (stmt, 0);
1660 tree arg1 = gimple_call_arg (stmt, 1);
1661 if (integer_zerop (arg0))
1662 expand_neg_overflow (loc, lhs, arg1, true);
1663 else
1664 expand_addsub_overflow (loc, MINUS_EXPR, lhs, arg0, arg1,
1665 false, false, false, true);
1668 /* Expand UBSAN_CHECK_MUL call STMT. */
1670 static void
1671 expand_UBSAN_CHECK_MUL (gcall *stmt)
1673 location_t loc = gimple_location (stmt);
1674 tree lhs = gimple_call_lhs (stmt);
1675 tree arg0 = gimple_call_arg (stmt, 0);
1676 tree arg1 = gimple_call_arg (stmt, 1);
1677 expand_mul_overflow (loc, lhs, arg0, arg1, false, false, false, true);
1680 /* Helper function for {ADD,SUB,MUL}_OVERFLOW call stmt expansion. */
1682 static void
1683 expand_arith_overflow (enum tree_code code, gimple stmt)
1685 tree lhs = gimple_call_lhs (stmt);
1686 if (lhs == NULL_TREE)
1687 return;
1688 tree arg0 = gimple_call_arg (stmt, 0);
1689 tree arg1 = gimple_call_arg (stmt, 1);
1690 tree type = TREE_TYPE (TREE_TYPE (lhs));
1691 int uns0_p = TYPE_UNSIGNED (TREE_TYPE (arg0));
1692 int uns1_p = TYPE_UNSIGNED (TREE_TYPE (arg1));
1693 int unsr_p = TYPE_UNSIGNED (type);
1694 int prec0 = TYPE_PRECISION (TREE_TYPE (arg0));
1695 int prec1 = TYPE_PRECISION (TREE_TYPE (arg1));
1696 int precres = TYPE_PRECISION (type);
1697 location_t loc = gimple_location (stmt);
1698 if (!uns0_p && get_range_pos_neg (arg0) == 1)
1699 uns0_p = true;
1700 if (!uns1_p && get_range_pos_neg (arg1) == 1)
1701 uns1_p = true;
1702 int pr = get_min_precision (arg0, uns0_p ? UNSIGNED : SIGNED);
1703 prec0 = MIN (prec0, pr);
1704 pr = get_min_precision (arg1, uns1_p ? UNSIGNED : SIGNED);
1705 prec1 = MIN (prec1, pr);
1707 /* If uns0_p && uns1_p, precop is minimum needed precision
1708 of unsigned type to hold the exact result, otherwise
1709 precop is minimum needed precision of signed type to
1710 hold the exact result. */
1711 int precop;
1712 if (code == MULT_EXPR)
1713 precop = prec0 + prec1 + (uns0_p != uns1_p);
1714 else
1716 if (uns0_p == uns1_p)
1717 precop = MAX (prec0, prec1) + 1;
1718 else if (uns0_p)
1719 precop = MAX (prec0 + 1, prec1) + 1;
1720 else
1721 precop = MAX (prec0, prec1 + 1) + 1;
1723 int orig_precres = precres;
1727 if ((uns0_p && uns1_p)
1728 ? ((precop + !unsr_p) <= precres
1729 /* u1 - u2 -> ur can overflow, no matter what precision
1730 the result has. */
1731 && (code != MINUS_EXPR || !unsr_p))
1732 : (!unsr_p && precop <= precres))
1734 /* The infinity precision result will always fit into result. */
1735 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1736 write_complex_part (target, const0_rtx, true);
1737 enum machine_mode mode = TYPE_MODE (type);
1738 struct separate_ops ops;
1739 ops.code = code;
1740 ops.type = type;
1741 ops.op0 = fold_convert_loc (loc, type, arg0);
1742 ops.op1 = fold_convert_loc (loc, type, arg1);
1743 ops.op2 = NULL_TREE;
1744 ops.location = loc;
1745 rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1746 expand_arith_overflow_result_store (lhs, target, mode, tem);
1747 return;
1750 #ifdef WORD_REGISTER_OPERATIONS
1751 /* For sub-word operations, if target doesn't have them, start
1752 with precres widening right away, otherwise do it only
1753 if the most simple cases can't be used. */
1754 if (orig_precres == precres && precres < BITS_PER_WORD)
1756 else
1757 #endif
1758 if ((uns0_p && uns1_p && unsr_p && prec0 <= precres && prec1 <= precres)
1759 || ((!uns0_p || !uns1_p) && !unsr_p
1760 && prec0 + uns0_p <= precres
1761 && prec1 + uns1_p <= precres))
1763 arg0 = fold_convert_loc (loc, type, arg0);
1764 arg1 = fold_convert_loc (loc, type, arg1);
1765 switch (code)
1767 case MINUS_EXPR:
1768 if (integer_zerop (arg0) && !unsr_p)
1769 expand_neg_overflow (loc, lhs, arg1, false);
1770 /* FALLTHRU */
1771 case PLUS_EXPR:
1772 expand_addsub_overflow (loc, code, lhs, arg0, arg1,
1773 unsr_p, unsr_p, unsr_p, false);
1774 return;
1775 case MULT_EXPR:
1776 expand_mul_overflow (loc, lhs, arg0, arg1,
1777 unsr_p, unsr_p, unsr_p, false);
1778 return;
1779 default:
1780 gcc_unreachable ();
1784 /* For sub-word operations, retry with a wider type first. */
1785 if (orig_precres == precres && precop <= BITS_PER_WORD)
1787 #ifdef WORD_REGISTER_OPERATIONS
1788 int p = BITS_PER_WORD;
1789 #else
1790 int p = precop;
1791 #endif
1792 enum machine_mode m = smallest_mode_for_size (p, MODE_INT);
1793 tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
1794 uns0_p && uns1_p
1795 && unsr_p);
1796 p = TYPE_PRECISION (optype);
1797 if (p > precres)
1799 precres = p;
1800 unsr_p = TYPE_UNSIGNED (optype);
1801 type = optype;
1802 continue;
1806 if (prec0 <= precres && prec1 <= precres)
1808 tree types[2];
1809 if (unsr_p)
1811 types[0] = build_nonstandard_integer_type (precres, 0);
1812 types[1] = type;
1814 else
1816 types[0] = type;
1817 types[1] = build_nonstandard_integer_type (precres, 1);
1819 arg0 = fold_convert_loc (loc, types[uns0_p], arg0);
1820 arg1 = fold_convert_loc (loc, types[uns1_p], arg1);
1821 if (code != MULT_EXPR)
1822 expand_addsub_overflow (loc, code, lhs, arg0, arg1, unsr_p,
1823 uns0_p, uns1_p, false);
1824 else
1825 expand_mul_overflow (loc, lhs, arg0, arg1, unsr_p,
1826 uns0_p, uns1_p, false);
1827 return;
1830 /* Retry with a wider type. */
1831 if (orig_precres == precres)
1833 int p = MAX (prec0, prec1);
1834 enum machine_mode m = smallest_mode_for_size (p, MODE_INT);
1835 tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
1836 uns0_p && uns1_p
1837 && unsr_p);
1838 p = TYPE_PRECISION (optype);
1839 if (p > precres)
1841 precres = p;
1842 unsr_p = TYPE_UNSIGNED (optype);
1843 type = optype;
1844 continue;
1848 gcc_unreachable ();
1850 while (1);
1853 /* Expand ADD_OVERFLOW STMT. */
1855 static void
1856 expand_ADD_OVERFLOW (gcall *stmt)
1858 expand_arith_overflow (PLUS_EXPR, stmt);
1861 /* Expand SUB_OVERFLOW STMT. */
1863 static void
1864 expand_SUB_OVERFLOW (gcall *stmt)
1866 expand_arith_overflow (MINUS_EXPR, stmt);
1869 /* Expand MUL_OVERFLOW STMT. */
1871 static void
1872 expand_MUL_OVERFLOW (gcall *stmt)
1874 expand_arith_overflow (MULT_EXPR, stmt);
1877 /* This should get folded in tree-vectorizer.c. */
1879 static void
1880 expand_LOOP_VECTORIZED (gcall *stmt ATTRIBUTE_UNUSED)
1882 gcc_unreachable ();
1885 static void
1886 expand_MASK_LOAD (gcall *stmt)
1888 struct expand_operand ops[3];
1889 tree type, lhs, rhs, maskt;
1890 rtx mem, target, mask;
1892 maskt = gimple_call_arg (stmt, 2);
1893 lhs = gimple_call_lhs (stmt);
1894 if (lhs == NULL_TREE)
1895 return;
1896 type = TREE_TYPE (lhs);
1897 rhs = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0),
1898 gimple_call_arg (stmt, 1));
1900 mem = expand_expr (rhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1901 gcc_assert (MEM_P (mem));
1902 mask = expand_normal (maskt);
1903 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1904 create_output_operand (&ops[0], target, TYPE_MODE (type));
1905 create_fixed_operand (&ops[1], mem);
1906 create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
1907 expand_insn (optab_handler (maskload_optab, TYPE_MODE (type)), 3, ops);
1910 static void
1911 expand_MASK_STORE (gcall *stmt)
1913 struct expand_operand ops[3];
1914 tree type, lhs, rhs, maskt;
1915 rtx mem, reg, mask;
1917 maskt = gimple_call_arg (stmt, 2);
1918 rhs = gimple_call_arg (stmt, 3);
1919 type = TREE_TYPE (rhs);
1920 lhs = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0),
1921 gimple_call_arg (stmt, 1));
1923 mem = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1924 gcc_assert (MEM_P (mem));
1925 mask = expand_normal (maskt);
1926 reg = expand_normal (rhs);
1927 create_fixed_operand (&ops[0], mem);
1928 create_input_operand (&ops[1], reg, TYPE_MODE (type));
1929 create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
1930 expand_insn (optab_handler (maskstore_optab, TYPE_MODE (type)), 3, ops);
1933 static void
1934 expand_ABNORMAL_DISPATCHER (gcall *)
1938 static void
1939 expand_BUILTIN_EXPECT (gcall *stmt)
1941 /* When guessing was done, the hints should be already stripped away. */
1942 gcc_assert (!flag_guess_branch_prob || optimize == 0 || seen_error ());
1944 rtx target;
1945 tree lhs = gimple_call_lhs (stmt);
1946 if (lhs)
1947 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1948 else
1949 target = const0_rtx;
1950 rtx val = expand_expr (gimple_call_arg (stmt, 0), target, VOIDmode, EXPAND_NORMAL);
1951 if (lhs && val != target)
1952 emit_move_insn (target, val);
1955 /* Routines to expand each internal function, indexed by function number.
1956 Each routine has the prototype:
1958 expand_<NAME> (gcall *stmt)
1960 where STMT is the statement that performs the call. */
1961 static void (*const internal_fn_expanders[]) (gcall *) = {
1962 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) expand_##CODE,
1963 #include "internal-fn.def"
1964 #undef DEF_INTERNAL_FN
1968 /* Expand STMT, which is a call to internal function FN. */
1970 void
1971 expand_internal_call (gcall *stmt)
1973 internal_fn_expanders[(int) gimple_call_internal_fn (stmt)] (stmt);