PR target/6641
[official-gcc.git] / gcc / internal-fn.c
bloba93698e3f237840c65e2ec45725f8a823982882f
1 /* Internal functions.
2 Copyright (C) 2011-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "hash-set.h"
24 #include "vec.h"
25 #include "input.h"
26 #include "alias.h"
27 #include "symtab.h"
28 #include "options.h"
29 #include "inchash.h"
30 #include "tree.h"
31 #include "fold-const.h"
32 #include "internal-fn.h"
33 #include "stor-layout.h"
34 #include "hashtab.h"
35 #include "tm.h"
36 #include "hard-reg-set.h"
37 #include "function.h"
38 #include "rtl.h"
39 #include "flags.h"
40 #include "statistics.h"
41 #include "insn-config.h"
42 #include "expmed.h"
43 #include "dojump.h"
44 #include "explow.h"
45 #include "calls.h"
46 #include "emit-rtl.h"
47 #include "varasm.h"
48 #include "stmt.h"
49 #include "expr.h"
50 #include "insn-codes.h"
51 #include "optabs.h"
52 #include "predict.h"
53 #include "dominance.h"
54 #include "cfg.h"
55 #include "basic-block.h"
56 #include "tree-ssa-alias.h"
57 #include "gimple-expr.h"
58 #include "is-a.h"
59 #include "gimple.h"
60 #include "ubsan.h"
61 #include "target.h"
62 #include "stringpool.h"
63 #include "tree-ssanames.h"
64 #include "diagnostic-core.h"
66 /* The names of each internal function, indexed by function number. */
67 const char *const internal_fn_name_array[] = {
68 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) #CODE,
69 #include "internal-fn.def"
70 #undef DEF_INTERNAL_FN
71 "<invalid-fn>"
74 /* The ECF_* flags of each internal function, indexed by function number. */
75 const int internal_fn_flags_array[] = {
76 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) FLAGS,
77 #include "internal-fn.def"
78 #undef DEF_INTERNAL_FN
82 /* Fnspec of each internal function, indexed by function number. */
83 const_tree internal_fn_fnspec_array[IFN_LAST + 1];
85 void
86 init_internal_fns ()
88 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
89 if (FNSPEC) internal_fn_fnspec_array[IFN_##CODE] = \
90 build_string ((int) sizeof (FNSPEC), FNSPEC ? FNSPEC : "");
91 #include "internal-fn.def"
92 #undef DEF_INTERNAL_FN
93 internal_fn_fnspec_array[IFN_LAST] = 0;
96 /* ARRAY_TYPE is an array of vector modes. Return the associated insn
97 for load-lanes-style optab OPTAB. The insn must exist. */
99 static enum insn_code
100 get_multi_vector_move (tree array_type, convert_optab optab)
102 enum insn_code icode;
103 machine_mode imode;
104 machine_mode vmode;
106 gcc_assert (TREE_CODE (array_type) == ARRAY_TYPE);
107 imode = TYPE_MODE (array_type);
108 vmode = TYPE_MODE (TREE_TYPE (array_type));
110 icode = convert_optab_handler (optab, imode, vmode);
111 gcc_assert (icode != CODE_FOR_nothing);
112 return icode;
115 /* Expand LOAD_LANES call STMT. */
117 static void
118 expand_LOAD_LANES (gcall *stmt)
120 struct expand_operand ops[2];
121 tree type, lhs, rhs;
122 rtx target, mem;
124 lhs = gimple_call_lhs (stmt);
125 rhs = gimple_call_arg (stmt, 0);
126 type = TREE_TYPE (lhs);
128 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
129 mem = expand_normal (rhs);
131 gcc_assert (MEM_P (mem));
132 PUT_MODE (mem, TYPE_MODE (type));
134 create_output_operand (&ops[0], target, TYPE_MODE (type));
135 create_fixed_operand (&ops[1], mem);
136 expand_insn (get_multi_vector_move (type, vec_load_lanes_optab), 2, ops);
139 /* Expand STORE_LANES call STMT. */
141 static void
142 expand_STORE_LANES (gcall *stmt)
144 struct expand_operand ops[2];
145 tree type, lhs, rhs;
146 rtx target, reg;
148 lhs = gimple_call_lhs (stmt);
149 rhs = gimple_call_arg (stmt, 0);
150 type = TREE_TYPE (rhs);
152 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
153 reg = expand_normal (rhs);
155 gcc_assert (MEM_P (target));
156 PUT_MODE (target, TYPE_MODE (type));
158 create_fixed_operand (&ops[0], target);
159 create_input_operand (&ops[1], reg, TYPE_MODE (type));
160 expand_insn (get_multi_vector_move (type, vec_store_lanes_optab), 2, ops);
163 static void
164 expand_ANNOTATE (gcall *)
166 gcc_unreachable ();
169 /* This should get expanded in adjust_simduid_builtins. */
171 static void
172 expand_GOMP_SIMD_LANE (gcall *)
174 gcc_unreachable ();
177 /* This should get expanded in adjust_simduid_builtins. */
179 static void
180 expand_GOMP_SIMD_VF (gcall *)
182 gcc_unreachable ();
185 /* This should get expanded in adjust_simduid_builtins. */
187 static void
188 expand_GOMP_SIMD_LAST_LANE (gcall *)
190 gcc_unreachable ();
193 /* This should get expanded in the sanopt pass. */
195 static void
196 expand_UBSAN_NULL (gcall *)
198 gcc_unreachable ();
201 /* This should get expanded in the sanopt pass. */
203 static void
204 expand_UBSAN_BOUNDS (gcall *)
206 gcc_unreachable ();
209 /* This should get expanded in the sanopt pass. */
211 static void
212 expand_UBSAN_VPTR (gcall *)
214 gcc_unreachable ();
217 /* This should get expanded in the sanopt pass. */
219 static void
220 expand_UBSAN_OBJECT_SIZE (gcall *)
222 gcc_unreachable ();
225 /* This should get expanded in the sanopt pass. */
227 static void
228 expand_ASAN_CHECK (gcall *)
230 gcc_unreachable ();
233 /* This should get expanded in the tsan pass. */
235 static void
236 expand_TSAN_FUNC_EXIT (gcall *)
238 gcc_unreachable ();
241 /* Helper function for expand_addsub_overflow. Return 1
242 if ARG interpreted as signed in its precision is known to be always
243 positive or 2 if ARG is known to be always negative, or 3 if ARG may
244 be positive or negative. */
246 static int
247 get_range_pos_neg (tree arg)
249 if (arg == error_mark_node)
250 return 3;
252 int prec = TYPE_PRECISION (TREE_TYPE (arg));
253 int cnt = 0;
254 if (TREE_CODE (arg) == INTEGER_CST)
256 wide_int w = wi::sext (arg, prec);
257 if (wi::neg_p (w))
258 return 2;
259 else
260 return 1;
262 while (CONVERT_EXPR_P (arg)
263 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
264 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg, 0))) <= prec)
266 arg = TREE_OPERAND (arg, 0);
267 /* Narrower value zero extended into wider type
268 will always result in positive values. */
269 if (TYPE_UNSIGNED (TREE_TYPE (arg))
270 && TYPE_PRECISION (TREE_TYPE (arg)) < prec)
271 return 1;
272 prec = TYPE_PRECISION (TREE_TYPE (arg));
273 if (++cnt > 30)
274 return 3;
277 if (TREE_CODE (arg) != SSA_NAME)
278 return 3;
279 wide_int arg_min, arg_max;
280 while (get_range_info (arg, &arg_min, &arg_max) != VR_RANGE)
282 gimple g = SSA_NAME_DEF_STMT (arg);
283 if (is_gimple_assign (g)
284 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g)))
286 tree t = gimple_assign_rhs1 (g);
287 if (INTEGRAL_TYPE_P (TREE_TYPE (t))
288 && TYPE_PRECISION (TREE_TYPE (t)) <= prec)
290 if (TYPE_UNSIGNED (TREE_TYPE (t))
291 && TYPE_PRECISION (TREE_TYPE (t)) < prec)
292 return 1;
293 prec = TYPE_PRECISION (TREE_TYPE (t));
294 arg = t;
295 if (++cnt > 30)
296 return 3;
297 continue;
300 return 3;
302 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
304 /* For unsigned values, the "positive" range comes
305 below the "negative" range. */
306 if (!wi::neg_p (wi::sext (arg_max, prec), SIGNED))
307 return 1;
308 if (wi::neg_p (wi::sext (arg_min, prec), SIGNED))
309 return 2;
311 else
313 if (!wi::neg_p (wi::sext (arg_min, prec), SIGNED))
314 return 1;
315 if (wi::neg_p (wi::sext (arg_max, prec), SIGNED))
316 return 2;
318 return 3;
321 /* Return minimum precision needed to represent all values
322 of ARG in SIGNed integral type. */
324 static int
325 get_min_precision (tree arg, signop sign)
327 int prec = TYPE_PRECISION (TREE_TYPE (arg));
328 int cnt = 0;
329 signop orig_sign = sign;
330 if (TREE_CODE (arg) == INTEGER_CST)
332 int p;
333 if (TYPE_SIGN (TREE_TYPE (arg)) != sign)
335 widest_int w = wi::to_widest (arg);
336 w = wi::ext (w, prec, sign);
337 p = wi::min_precision (w, sign);
339 else
340 p = wi::min_precision (arg, sign);
341 return MIN (p, prec);
343 while (CONVERT_EXPR_P (arg)
344 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
345 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg, 0))) <= prec)
347 arg = TREE_OPERAND (arg, 0);
348 if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
350 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
351 sign = UNSIGNED;
352 else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
353 return prec + (orig_sign != sign);
354 prec = TYPE_PRECISION (TREE_TYPE (arg));
356 if (++cnt > 30)
357 return prec + (orig_sign != sign);
359 if (TREE_CODE (arg) != SSA_NAME)
360 return prec + (orig_sign != sign);
361 wide_int arg_min, arg_max;
362 while (get_range_info (arg, &arg_min, &arg_max) != VR_RANGE)
364 gimple g = SSA_NAME_DEF_STMT (arg);
365 if (is_gimple_assign (g)
366 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g)))
368 tree t = gimple_assign_rhs1 (g);
369 if (INTEGRAL_TYPE_P (TREE_TYPE (t))
370 && TYPE_PRECISION (TREE_TYPE (t)) <= prec)
372 arg = t;
373 if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
375 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
376 sign = UNSIGNED;
377 else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
378 return prec + (orig_sign != sign);
379 prec = TYPE_PRECISION (TREE_TYPE (arg));
381 if (++cnt > 30)
382 return prec + (orig_sign != sign);
383 continue;
386 return prec + (orig_sign != sign);
388 if (sign == TYPE_SIGN (TREE_TYPE (arg)))
390 int p1 = wi::min_precision (arg_min, sign);
391 int p2 = wi::min_precision (arg_max, sign);
392 p1 = MAX (p1, p2);
393 prec = MIN (prec, p1);
395 else if (sign == UNSIGNED && !wi::neg_p (arg_min, SIGNED))
397 int p = wi::min_precision (arg_max, UNSIGNED);
398 prec = MIN (prec, p);
400 return prec + (orig_sign != sign);
403 /* Helper for expand_*_overflow. Store RES into the __real__ part
404 of TARGET. If RES has larger MODE than __real__ part of TARGET,
405 set the __imag__ part to 1 if RES doesn't fit into it. */
407 static void
408 expand_arith_overflow_result_store (tree lhs, rtx target,
409 machine_mode mode, rtx res)
411 machine_mode tgtmode = GET_MODE_INNER (GET_MODE (target));
412 rtx lres = res;
413 if (tgtmode != mode)
415 rtx_code_label *done_label = gen_label_rtx ();
416 int uns = TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs)));
417 lres = convert_modes (tgtmode, mode, res, uns);
418 gcc_assert (GET_MODE_PRECISION (tgtmode) < GET_MODE_PRECISION (mode));
419 do_compare_rtx_and_jump (res, convert_modes (mode, tgtmode, lres, uns),
420 EQ, true, mode, NULL_RTX, NULL, done_label,
421 PROB_VERY_LIKELY);
422 write_complex_part (target, const1_rtx, true);
423 emit_label (done_label);
425 write_complex_part (target, lres, false);
428 /* Helper for expand_*_overflow. Store RES into TARGET. */
430 static void
431 expand_ubsan_result_store (rtx target, rtx res)
433 if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target))
434 /* If this is a scalar in a register that is stored in a wider mode
435 than the declared mode, compute the result into its declared mode
436 and then convert to the wider mode. Our value is the computed
437 expression. */
438 convert_move (SUBREG_REG (target), res, SUBREG_PROMOTED_SIGN (target));
439 else
440 emit_move_insn (target, res);
443 /* Add sub/add overflow checking to the statement STMT.
444 CODE says whether the operation is +, or -. */
446 static void
447 expand_addsub_overflow (location_t loc, tree_code code, tree lhs,
448 tree arg0, tree arg1, bool unsr_p, bool uns0_p,
449 bool uns1_p, bool is_ubsan)
451 rtx res, target = NULL_RTX;
452 tree fn;
453 rtx_code_label *done_label = gen_label_rtx ();
454 rtx_code_label *do_error = gen_label_rtx ();
455 do_pending_stack_adjust ();
456 rtx op0 = expand_normal (arg0);
457 rtx op1 = expand_normal (arg1);
458 machine_mode mode = TYPE_MODE (TREE_TYPE (arg0));
459 int prec = GET_MODE_PRECISION (mode);
460 rtx sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
461 bool do_xor = false;
463 if (is_ubsan)
464 gcc_assert (!unsr_p && !uns0_p && !uns1_p);
466 if (lhs)
468 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
469 if (!is_ubsan)
470 write_complex_part (target, const0_rtx, true);
473 /* We assume both operands and result have the same precision
474 here (GET_MODE_BITSIZE (mode)), S stands for signed type
475 with that precision, U for unsigned type with that precision,
476 sgn for unsigned most significant bit in that precision.
477 s1 is signed first operand, u1 is unsigned first operand,
478 s2 is signed second operand, u2 is unsigned second operand,
479 sr is signed result, ur is unsigned result and the following
480 rules say how to compute result (which is always result of
481 the operands as if both were unsigned, cast to the right
482 signedness) and how to compute whether operation overflowed.
484 s1 + s2 -> sr
485 res = (S) ((U) s1 + (U) s2)
486 ovf = s2 < 0 ? res > s1 : res < s1 (or jump on overflow)
487 s1 - s2 -> sr
488 res = (S) ((U) s1 - (U) s2)
489 ovf = s2 < 0 ? res < s1 : res > s2 (or jump on overflow)
490 u1 + u2 -> ur
491 res = u1 + u2
492 ovf = res < u1 (or jump on carry, but RTL opts will handle it)
493 u1 - u2 -> ur
494 res = u1 - u2
495 ovf = res > u1 (or jump on carry, but RTL opts will handle it)
496 s1 + u2 -> sr
497 res = (S) ((U) s1 + u2)
498 ovf = ((U) res ^ sgn) < u2
499 s1 + u2 -> ur
500 t1 = (S) (u2 ^ sgn)
501 t2 = s1 + t1
502 res = (U) t2 ^ sgn
503 ovf = t1 < 0 ? t2 > s1 : t2 < s1 (or jump on overflow)
504 s1 - u2 -> sr
505 res = (S) ((U) s1 - u2)
506 ovf = u2 > ((U) s1 ^ sgn)
507 s1 - u2 -> ur
508 res = (U) s1 - u2
509 ovf = s1 < 0 || u2 > (U) s1
510 u1 - s2 -> sr
511 res = u1 - (U) s2
512 ovf = u1 >= ((U) s2 ^ sgn)
513 u1 - s2 -> ur
514 t1 = u1 ^ sgn
515 t2 = t1 - (U) s2
516 res = t2 ^ sgn
517 ovf = s2 < 0 ? (S) t2 < (S) t1 : (S) t2 > (S) t1 (or jump on overflow)
518 s1 + s2 -> ur
519 res = (U) s1 + (U) s2
520 ovf = s2 < 0 ? (s1 | (S) res) < 0) : (s1 & (S) res) < 0)
521 u1 + u2 -> sr
522 res = (S) (u1 + u2)
523 ovf = (U) res < u2 || res < 0
524 u1 - u2 -> sr
525 res = (S) (u1 - u2)
526 ovf = u1 >= u2 ? res < 0 : res >= 0
527 s1 - s2 -> ur
528 res = (U) s1 - (U) s2
529 ovf = s2 >= 0 ? ((s1 | (S) res) < 0) : ((s1 & (S) res) < 0) */
531 if (code == PLUS_EXPR && uns0_p && !uns1_p)
533 /* PLUS_EXPR is commutative, if operand signedness differs,
534 canonicalize to the first operand being signed and second
535 unsigned to simplify following code. */
536 rtx tem = op1;
537 op1 = op0;
538 op0 = tem;
539 tree t = arg1;
540 arg1 = arg0;
541 arg0 = t;
542 uns0_p = 0;
543 uns1_p = 1;
546 /* u1 +- u2 -> ur */
547 if (uns0_p && uns1_p && unsr_p)
549 /* Compute the operation. On RTL level, the addition is always
550 unsigned. */
551 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
552 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
553 rtx tem = op0;
554 /* For PLUS_EXPR, the operation is commutative, so we can pick
555 operand to compare against. For prec <= BITS_PER_WORD, I think
556 preferring REG operand is better over CONST_INT, because
557 the CONST_INT might enlarge the instruction or CSE would need
558 to figure out we'd already loaded it into a register before.
559 For prec > BITS_PER_WORD, I think CONST_INT might be more beneficial,
560 as then the multi-word comparison can be perhaps simplified. */
561 if (code == PLUS_EXPR
562 && (prec <= BITS_PER_WORD
563 ? (CONST_SCALAR_INT_P (op0) && REG_P (op1))
564 : CONST_SCALAR_INT_P (op1)))
565 tem = op1;
566 do_compare_rtx_and_jump (res, tem, code == PLUS_EXPR ? GEU : LEU,
567 true, mode, NULL_RTX, NULL, done_label,
568 PROB_VERY_LIKELY);
569 goto do_error_label;
572 /* s1 +- u2 -> sr */
573 if (!uns0_p && uns1_p && !unsr_p)
575 /* Compute the operation. On RTL level, the addition is always
576 unsigned. */
577 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
578 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
579 rtx tem = expand_binop (mode, add_optab,
580 code == PLUS_EXPR ? res : op0, sgn,
581 NULL_RTX, false, OPTAB_LIB_WIDEN);
582 do_compare_rtx_and_jump (tem, op1, GEU, true, mode, NULL_RTX, NULL,
583 done_label, PROB_VERY_LIKELY);
584 goto do_error_label;
587 /* s1 + u2 -> ur */
588 if (code == PLUS_EXPR && !uns0_p && uns1_p && unsr_p)
590 op1 = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
591 OPTAB_LIB_WIDEN);
592 /* As we've changed op1, we have to avoid using the value range
593 for the original argument. */
594 arg1 = error_mark_node;
595 do_xor = true;
596 goto do_signed;
599 /* u1 - s2 -> ur */
600 if (code == MINUS_EXPR && uns0_p && !uns1_p && unsr_p)
602 op0 = expand_binop (mode, add_optab, op0, sgn, NULL_RTX, false,
603 OPTAB_LIB_WIDEN);
604 /* As we've changed op0, we have to avoid using the value range
605 for the original argument. */
606 arg0 = error_mark_node;
607 do_xor = true;
608 goto do_signed;
611 /* s1 - u2 -> ur */
612 if (code == MINUS_EXPR && !uns0_p && uns1_p && unsr_p)
614 /* Compute the operation. On RTL level, the addition is always
615 unsigned. */
616 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
617 OPTAB_LIB_WIDEN);
618 int pos_neg = get_range_pos_neg (arg0);
619 if (pos_neg == 2)
620 /* If ARG0 is known to be always negative, this is always overflow. */
621 emit_jump (do_error);
622 else if (pos_neg == 3)
623 /* If ARG0 is not known to be always positive, check at runtime. */
624 do_compare_rtx_and_jump (op0, const0_rtx, LT, false, mode, NULL_RTX,
625 NULL, do_error, PROB_VERY_UNLIKELY);
626 do_compare_rtx_and_jump (op1, op0, LEU, true, mode, NULL_RTX, NULL,
627 done_label, PROB_VERY_LIKELY);
628 goto do_error_label;
631 /* u1 - s2 -> sr */
632 if (code == MINUS_EXPR && uns0_p && !uns1_p && !unsr_p)
634 /* Compute the operation. On RTL level, the addition is always
635 unsigned. */
636 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
637 OPTAB_LIB_WIDEN);
638 rtx tem = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
639 OPTAB_LIB_WIDEN);
640 do_compare_rtx_and_jump (op0, tem, LTU, true, mode, NULL_RTX, NULL,
641 done_label, PROB_VERY_LIKELY);
642 goto do_error_label;
645 /* u1 + u2 -> sr */
646 if (code == PLUS_EXPR && uns0_p && uns1_p && !unsr_p)
648 /* Compute the operation. On RTL level, the addition is always
649 unsigned. */
650 res = expand_binop (mode, add_optab, op0, op1, NULL_RTX, false,
651 OPTAB_LIB_WIDEN);
652 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode, NULL_RTX,
653 NULL, do_error, PROB_VERY_UNLIKELY);
654 rtx tem = op1;
655 /* The operation is commutative, so we can pick operand to compare
656 against. For prec <= BITS_PER_WORD, I think preferring REG operand
657 is better over CONST_INT, because the CONST_INT might enlarge the
658 instruction or CSE would need to figure out we'd already loaded it
659 into a register before. For prec > BITS_PER_WORD, I think CONST_INT
660 might be more beneficial, as then the multi-word comparison can be
661 perhaps simplified. */
662 if (prec <= BITS_PER_WORD
663 ? (CONST_SCALAR_INT_P (op1) && REG_P (op0))
664 : CONST_SCALAR_INT_P (op0))
665 tem = op0;
666 do_compare_rtx_and_jump (res, tem, GEU, true, mode, NULL_RTX, NULL,
667 done_label, PROB_VERY_LIKELY);
668 goto do_error_label;
671 /* s1 +- s2 -> ur */
672 if (!uns0_p && !uns1_p && unsr_p)
674 /* Compute the operation. On RTL level, the addition is always
675 unsigned. */
676 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
677 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
678 int pos_neg = get_range_pos_neg (arg1);
679 if (code == PLUS_EXPR)
681 int pos_neg0 = get_range_pos_neg (arg0);
682 if (pos_neg0 != 3 && pos_neg == 3)
684 rtx tem = op1;
685 op1 = op0;
686 op0 = tem;
687 pos_neg = pos_neg0;
690 rtx tem;
691 if (pos_neg != 3)
693 tem = expand_binop (mode, ((pos_neg == 1) ^ (code == MINUS_EXPR))
694 ? and_optab : ior_optab,
695 op0, res, NULL_RTX, false, OPTAB_LIB_WIDEN);
696 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL,
697 NULL, done_label, PROB_VERY_LIKELY);
699 else
701 rtx_code_label *do_ior_label = gen_label_rtx ();
702 do_compare_rtx_and_jump (op1, const0_rtx,
703 code == MINUS_EXPR ? GE : LT, false, mode,
704 NULL_RTX, NULL, do_ior_label,
705 PROB_EVEN);
706 tem = expand_binop (mode, and_optab, op0, res, NULL_RTX, false,
707 OPTAB_LIB_WIDEN);
708 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
709 NULL, done_label, PROB_VERY_LIKELY);
710 emit_jump (do_error);
711 emit_label (do_ior_label);
712 tem = expand_binop (mode, ior_optab, op0, res, NULL_RTX, false,
713 OPTAB_LIB_WIDEN);
714 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
715 NULL, done_label, PROB_VERY_LIKELY);
717 goto do_error_label;
720 /* u1 - u2 -> sr */
721 if (code == MINUS_EXPR && uns0_p && uns1_p && !unsr_p)
723 /* Compute the operation. On RTL level, the addition is always
724 unsigned. */
725 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
726 OPTAB_LIB_WIDEN);
727 rtx_code_label *op0_geu_op1 = gen_label_rtx ();
728 do_compare_rtx_and_jump (op0, op1, GEU, true, mode, NULL_RTX, NULL,
729 op0_geu_op1, PROB_EVEN);
730 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode, NULL_RTX,
731 NULL, done_label, PROB_VERY_LIKELY);
732 emit_jump (do_error);
733 emit_label (op0_geu_op1);
734 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode, NULL_RTX,
735 NULL, done_label, PROB_VERY_LIKELY);
736 goto do_error_label;
739 gcc_assert (!uns0_p && !uns1_p && !unsr_p);
741 /* s1 +- s2 -> sr */
742 do_signed: ;
743 enum insn_code icode;
744 icode = optab_handler (code == PLUS_EXPR ? addv4_optab : subv4_optab, mode);
745 if (icode != CODE_FOR_nothing)
747 struct expand_operand ops[4];
748 rtx_insn *last = get_last_insn ();
750 res = gen_reg_rtx (mode);
751 create_output_operand (&ops[0], res, mode);
752 create_input_operand (&ops[1], op0, mode);
753 create_input_operand (&ops[2], op1, mode);
754 create_fixed_operand (&ops[3], do_error);
755 if (maybe_expand_insn (icode, 4, ops))
757 last = get_last_insn ();
758 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
759 && JUMP_P (last)
760 && any_condjump_p (last)
761 && !find_reg_note (last, REG_BR_PROB, 0))
762 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
763 emit_jump (done_label);
765 else
767 delete_insns_since (last);
768 icode = CODE_FOR_nothing;
772 if (icode == CODE_FOR_nothing)
774 rtx_code_label *sub_check = gen_label_rtx ();
775 int pos_neg = 3;
777 /* Compute the operation. On RTL level, the addition is always
778 unsigned. */
779 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
780 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
782 /* If we can prove one of the arguments (for MINUS_EXPR only
783 the second operand, as subtraction is not commutative) is always
784 non-negative or always negative, we can do just one comparison
785 and conditional jump instead of 2 at runtime, 3 present in the
786 emitted code. If one of the arguments is CONST_INT, all we
787 need is to make sure it is op1, then the first
788 do_compare_rtx_and_jump will be just folded. Otherwise try
789 to use range info if available. */
790 if (code == PLUS_EXPR && CONST_INT_P (op0))
792 rtx tem = op0;
793 op0 = op1;
794 op1 = tem;
796 else if (CONST_INT_P (op1))
798 else if (code == PLUS_EXPR && TREE_CODE (arg0) == SSA_NAME)
800 pos_neg = get_range_pos_neg (arg0);
801 if (pos_neg != 3)
803 rtx tem = op0;
804 op0 = op1;
805 op1 = tem;
808 if (pos_neg == 3 && !CONST_INT_P (op1) && TREE_CODE (arg1) == SSA_NAME)
809 pos_neg = get_range_pos_neg (arg1);
811 /* If the op1 is negative, we have to use a different check. */
812 if (pos_neg == 3)
813 do_compare_rtx_and_jump (op1, const0_rtx, LT, false, mode, NULL_RTX,
814 NULL, sub_check, PROB_EVEN);
816 /* Compare the result of the operation with one of the operands. */
817 if (pos_neg & 1)
818 do_compare_rtx_and_jump (res, op0, code == PLUS_EXPR ? GE : LE,
819 false, mode, NULL_RTX, NULL, done_label,
820 PROB_VERY_LIKELY);
822 /* If we get here, we have to print the error. */
823 if (pos_neg == 3)
825 emit_jump (do_error);
827 emit_label (sub_check);
830 /* We have k = a + b for b < 0 here. k <= a must hold. */
831 if (pos_neg & 2)
832 do_compare_rtx_and_jump (res, op0, code == PLUS_EXPR ? LE : GE,
833 false, mode, NULL_RTX, NULL, done_label,
834 PROB_VERY_LIKELY);
837 do_error_label:
838 emit_label (do_error);
839 if (is_ubsan)
841 /* Expand the ubsan builtin call. */
842 push_temp_slots ();
843 fn = ubsan_build_overflow_builtin (code, loc, TREE_TYPE (arg0),
844 arg0, arg1);
845 expand_normal (fn);
846 pop_temp_slots ();
847 do_pending_stack_adjust ();
849 else if (lhs)
850 write_complex_part (target, const1_rtx, true);
852 /* We're done. */
853 emit_label (done_label);
855 if (lhs)
857 if (is_ubsan)
858 expand_ubsan_result_store (target, res);
859 else
861 if (do_xor)
862 res = expand_binop (mode, add_optab, res, sgn, NULL_RTX, false,
863 OPTAB_LIB_WIDEN);
865 expand_arith_overflow_result_store (lhs, target, mode, res);
870 /* Add negate overflow checking to the statement STMT. */
872 static void
873 expand_neg_overflow (location_t loc, tree lhs, tree arg1, bool is_ubsan)
875 rtx res, op1;
876 tree fn;
877 rtx_code_label *done_label, *do_error;
878 rtx target = NULL_RTX;
880 done_label = gen_label_rtx ();
881 do_error = gen_label_rtx ();
883 do_pending_stack_adjust ();
884 op1 = expand_normal (arg1);
886 machine_mode mode = TYPE_MODE (TREE_TYPE (arg1));
887 if (lhs)
889 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
890 if (!is_ubsan)
891 write_complex_part (target, const0_rtx, true);
894 enum insn_code icode = optab_handler (negv3_optab, mode);
895 if (icode != CODE_FOR_nothing)
897 struct expand_operand ops[3];
898 rtx_insn *last = get_last_insn ();
900 res = gen_reg_rtx (mode);
901 create_output_operand (&ops[0], res, mode);
902 create_input_operand (&ops[1], op1, mode);
903 create_fixed_operand (&ops[2], do_error);
904 if (maybe_expand_insn (icode, 3, ops))
906 last = get_last_insn ();
907 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
908 && JUMP_P (last)
909 && any_condjump_p (last)
910 && !find_reg_note (last, REG_BR_PROB, 0))
911 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
912 emit_jump (done_label);
914 else
916 delete_insns_since (last);
917 icode = CODE_FOR_nothing;
921 if (icode == CODE_FOR_nothing)
923 /* Compute the operation. On RTL level, the addition is always
924 unsigned. */
925 res = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
927 /* Compare the operand with the most negative value. */
928 rtx minv = expand_normal (TYPE_MIN_VALUE (TREE_TYPE (arg1)));
929 do_compare_rtx_and_jump (op1, minv, NE, true, mode, NULL_RTX, NULL,
930 done_label, PROB_VERY_LIKELY);
933 emit_label (do_error);
934 if (is_ubsan)
936 /* Expand the ubsan builtin call. */
937 push_temp_slots ();
938 fn = ubsan_build_overflow_builtin (NEGATE_EXPR, loc, TREE_TYPE (arg1),
939 arg1, NULL_TREE);
940 expand_normal (fn);
941 pop_temp_slots ();
942 do_pending_stack_adjust ();
944 else if (lhs)
945 write_complex_part (target, const1_rtx, true);
947 /* We're done. */
948 emit_label (done_label);
950 if (lhs)
952 if (is_ubsan)
953 expand_ubsan_result_store (target, res);
954 else
955 expand_arith_overflow_result_store (lhs, target, mode, res);
959 /* Add mul overflow checking to the statement STMT. */
961 static void
962 expand_mul_overflow (location_t loc, tree lhs, tree arg0, tree arg1,
963 bool unsr_p, bool uns0_p, bool uns1_p, bool is_ubsan)
965 rtx res, op0, op1;
966 tree fn, type;
967 rtx_code_label *done_label, *do_error;
968 rtx target = NULL_RTX;
969 signop sign;
970 enum insn_code icode;
972 done_label = gen_label_rtx ();
973 do_error = gen_label_rtx ();
975 do_pending_stack_adjust ();
976 op0 = expand_normal (arg0);
977 op1 = expand_normal (arg1);
979 machine_mode mode = TYPE_MODE (TREE_TYPE (arg0));
980 bool uns = unsr_p;
981 if (lhs)
983 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
984 if (!is_ubsan)
985 write_complex_part (target, const0_rtx, true);
988 if (is_ubsan)
989 gcc_assert (!unsr_p && !uns0_p && !uns1_p);
991 /* We assume both operands and result have the same precision
992 here (GET_MODE_BITSIZE (mode)), S stands for signed type
993 with that precision, U for unsigned type with that precision,
994 sgn for unsigned most significant bit in that precision.
995 s1 is signed first operand, u1 is unsigned first operand,
996 s2 is signed second operand, u2 is unsigned second operand,
997 sr is signed result, ur is unsigned result and the following
998 rules say how to compute result (which is always result of
999 the operands as if both were unsigned, cast to the right
1000 signedness) and how to compute whether operation overflowed.
1001 main_ovf (false) stands for jump on signed multiplication
1002 overflow or the main algorithm with uns == false.
1003 main_ovf (true) stands for jump on unsigned multiplication
1004 overflow or the main algorithm with uns == true.
1006 s1 * s2 -> sr
1007 res = (S) ((U) s1 * (U) s2)
1008 ovf = main_ovf (false)
1009 u1 * u2 -> ur
1010 res = u1 * u2
1011 ovf = main_ovf (true)
1012 s1 * u2 -> ur
1013 res = (U) s1 * u2
1014 ovf = (s1 < 0 && u2) || main_ovf (true)
1015 u1 * u2 -> sr
1016 res = (S) (u1 * u2)
1017 ovf = res < 0 || main_ovf (true)
1018 s1 * u2 -> sr
1019 res = (S) ((U) s1 * u2)
1020 ovf = (S) u2 >= 0 ? main_ovf (false)
1021 : (s1 != 0 && (s1 != -1 || u2 != (U) res))
1022 s1 * s2 -> ur
1023 t1 = (s1 & s2) < 0 ? (-(U) s1) : ((U) s1)
1024 t2 = (s1 & s2) < 0 ? (-(U) s2) : ((U) s2)
1025 res = t1 * t2
1026 ovf = (s1 ^ s2) < 0 ? (s1 && s2) : main_ovf (true) */
1028 if (uns0_p && !uns1_p)
1030 /* Multiplication is commutative, if operand signedness differs,
1031 canonicalize to the first operand being signed and second
1032 unsigned to simplify following code. */
1033 rtx tem = op1;
1034 op1 = op0;
1035 op0 = tem;
1036 tree t = arg1;
1037 arg1 = arg0;
1038 arg0 = t;
1039 uns0_p = 0;
1040 uns1_p = 1;
1043 int pos_neg0 = get_range_pos_neg (arg0);
1044 int pos_neg1 = get_range_pos_neg (arg1);
1046 /* s1 * u2 -> ur */
1047 if (!uns0_p && uns1_p && unsr_p)
1049 switch (pos_neg0)
1051 case 1:
1052 /* If s1 is non-negative, just perform normal u1 * u2 -> ur. */
1053 goto do_main;
1054 case 2:
1055 /* If s1 is negative, avoid the main code, just multiply and
1056 signal overflow if op1 is not 0. */
1057 struct separate_ops ops;
1058 ops.code = MULT_EXPR;
1059 ops.type = TREE_TYPE (arg1);
1060 ops.op0 = make_tree (ops.type, op0);
1061 ops.op1 = make_tree (ops.type, op1);
1062 ops.op2 = NULL_TREE;
1063 ops.location = loc;
1064 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1065 do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1066 NULL, done_label, PROB_VERY_LIKELY);
1067 goto do_error_label;
1068 case 3:
1069 rtx_code_label *do_main_label;
1070 do_main_label = gen_label_rtx ();
1071 do_compare_rtx_and_jump (op0, const0_rtx, GE, false, mode, NULL_RTX,
1072 NULL, do_main_label, PROB_VERY_LIKELY);
1073 do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1074 NULL, do_main_label, PROB_VERY_LIKELY);
1075 write_complex_part (target, const1_rtx, true);
1076 emit_label (do_main_label);
1077 goto do_main;
1078 default:
1079 gcc_unreachable ();
1083 /* u1 * u2 -> sr */
1084 if (uns0_p && uns1_p && !unsr_p)
1086 uns = true;
1087 /* Rest of handling of this case after res is computed. */
1088 goto do_main;
1091 /* s1 * u2 -> sr */
1092 if (!uns0_p && uns1_p && !unsr_p)
1094 switch (pos_neg1)
1096 case 1:
1097 goto do_main;
1098 case 2:
1099 /* If (S) u2 is negative (i.e. u2 is larger than maximum of S,
1100 avoid the main code, just multiply and signal overflow
1101 unless 0 * u2 or -1 * ((U) Smin). */
1102 struct separate_ops ops;
1103 ops.code = MULT_EXPR;
1104 ops.type = TREE_TYPE (arg1);
1105 ops.op0 = make_tree (ops.type, op0);
1106 ops.op1 = make_tree (ops.type, op1);
1107 ops.op2 = NULL_TREE;
1108 ops.location = loc;
1109 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1110 do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
1111 NULL, done_label, PROB_VERY_LIKELY);
1112 do_compare_rtx_and_jump (op0, constm1_rtx, NE, true, mode, NULL_RTX,
1113 NULL, do_error, PROB_VERY_UNLIKELY);
1114 int prec;
1115 prec = GET_MODE_PRECISION (mode);
1116 rtx sgn;
1117 sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
1118 do_compare_rtx_and_jump (op1, sgn, EQ, true, mode, NULL_RTX,
1119 NULL, done_label, PROB_VERY_LIKELY);
1120 goto do_error_label;
1121 case 3:
1122 /* Rest of handling of this case after res is computed. */
1123 goto do_main;
1124 default:
1125 gcc_unreachable ();
1129 /* s1 * s2 -> ur */
1130 if (!uns0_p && !uns1_p && unsr_p)
1132 rtx tem, tem2;
1133 switch (pos_neg0 | pos_neg1)
1135 case 1: /* Both operands known to be non-negative. */
1136 goto do_main;
1137 case 2: /* Both operands known to be negative. */
1138 op0 = expand_unop (mode, neg_optab, op0, NULL_RTX, false);
1139 op1 = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
1140 /* Avoid looking at arg0/arg1 ranges, as we've changed
1141 the arguments. */
1142 arg0 = error_mark_node;
1143 arg1 = error_mark_node;
1144 goto do_main;
1145 case 3:
1146 if ((pos_neg0 ^ pos_neg1) == 3)
1148 /* If one operand is known to be negative and the other
1149 non-negative, this overflows always, unless the non-negative
1150 one is 0. Just do normal multiply and set overflow
1151 unless one of the operands is 0. */
1152 struct separate_ops ops;
1153 ops.code = MULT_EXPR;
1154 ops.type
1155 = build_nonstandard_integer_type (GET_MODE_PRECISION (mode),
1157 ops.op0 = make_tree (ops.type, op0);
1158 ops.op1 = make_tree (ops.type, op1);
1159 ops.op2 = NULL_TREE;
1160 ops.location = loc;
1161 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1162 tem = expand_binop (mode, and_optab, op0, op1, NULL_RTX, false,
1163 OPTAB_LIB_WIDEN);
1164 do_compare_rtx_and_jump (tem, const0_rtx, EQ, true, mode,
1165 NULL_RTX, NULL, done_label,
1166 PROB_VERY_LIKELY);
1167 goto do_error_label;
1169 /* The general case, do all the needed comparisons at runtime. */
1170 rtx_code_label *do_main_label, *after_negate_label;
1171 rtx rop0, rop1;
1172 rop0 = gen_reg_rtx (mode);
1173 rop1 = gen_reg_rtx (mode);
1174 emit_move_insn (rop0, op0);
1175 emit_move_insn (rop1, op1);
1176 op0 = rop0;
1177 op1 = rop1;
1178 do_main_label = gen_label_rtx ();
1179 after_negate_label = gen_label_rtx ();
1180 tem = expand_binop (mode, and_optab, op0, op1, NULL_RTX, false,
1181 OPTAB_LIB_WIDEN);
1182 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
1183 NULL, after_negate_label, PROB_VERY_LIKELY);
1184 /* Both arguments negative here, negate them and continue with
1185 normal unsigned overflow checking multiplication. */
1186 emit_move_insn (op0, expand_unop (mode, neg_optab, op0,
1187 NULL_RTX, false));
1188 emit_move_insn (op1, expand_unop (mode, neg_optab, op1,
1189 NULL_RTX, false));
1190 /* Avoid looking at arg0/arg1 ranges, as we might have changed
1191 the arguments. */
1192 arg0 = error_mark_node;
1193 arg1 = error_mark_node;
1194 emit_jump (do_main_label);
1195 emit_label (after_negate_label);
1196 tem2 = expand_binop (mode, xor_optab, op0, op1, NULL_RTX, false,
1197 OPTAB_LIB_WIDEN);
1198 do_compare_rtx_and_jump (tem2, const0_rtx, GE, false, mode, NULL_RTX,
1199 NULL, do_main_label, PROB_VERY_LIKELY);
1200 /* One argument is negative here, the other positive. This
1201 overflows always, unless one of the arguments is 0. But
1202 if e.g. s2 is 0, (U) s1 * 0 doesn't overflow, whatever s1
1203 is, thus we can keep do_main code oring in overflow as is. */
1204 do_compare_rtx_and_jump (tem, const0_rtx, EQ, true, mode, NULL_RTX,
1205 NULL, do_main_label, PROB_VERY_LIKELY);
1206 write_complex_part (target, const1_rtx, true);
1207 emit_label (do_main_label);
1208 goto do_main;
1209 default:
1210 gcc_unreachable ();
1214 do_main:
1215 type = build_nonstandard_integer_type (GET_MODE_PRECISION (mode), uns);
1216 sign = uns ? UNSIGNED : SIGNED;
1217 icode = optab_handler (uns ? umulv4_optab : mulv4_optab, mode);
1218 if (icode != CODE_FOR_nothing)
1220 struct expand_operand ops[4];
1221 rtx_insn *last = get_last_insn ();
1223 res = gen_reg_rtx (mode);
1224 create_output_operand (&ops[0], res, mode);
1225 create_input_operand (&ops[1], op0, mode);
1226 create_input_operand (&ops[2], op1, mode);
1227 create_fixed_operand (&ops[3], do_error);
1228 if (maybe_expand_insn (icode, 4, ops))
1230 last = get_last_insn ();
1231 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
1232 && JUMP_P (last)
1233 && any_condjump_p (last)
1234 && !find_reg_note (last, REG_BR_PROB, 0))
1235 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
1236 emit_jump (done_label);
1238 else
1240 delete_insns_since (last);
1241 icode = CODE_FOR_nothing;
1245 if (icode == CODE_FOR_nothing)
1247 struct separate_ops ops;
1248 int prec = GET_MODE_PRECISION (mode);
1249 machine_mode hmode = mode_for_size (prec / 2, MODE_INT, 1);
1250 ops.op0 = make_tree (type, op0);
1251 ops.op1 = make_tree (type, op1);
1252 ops.op2 = NULL_TREE;
1253 ops.location = loc;
1254 if (GET_MODE_2XWIDER_MODE (mode) != VOIDmode
1255 && targetm.scalar_mode_supported_p (GET_MODE_2XWIDER_MODE (mode)))
1257 machine_mode wmode = GET_MODE_2XWIDER_MODE (mode);
1258 ops.code = WIDEN_MULT_EXPR;
1259 ops.type
1260 = build_nonstandard_integer_type (GET_MODE_PRECISION (wmode), uns);
1262 res = expand_expr_real_2 (&ops, NULL_RTX, wmode, EXPAND_NORMAL);
1263 rtx hipart = expand_shift (RSHIFT_EXPR, wmode, res, prec,
1264 NULL_RTX, uns);
1265 hipart = gen_lowpart (mode, hipart);
1266 res = gen_lowpart (mode, res);
1267 if (uns)
1268 /* For the unsigned multiplication, there was overflow if
1269 HIPART is non-zero. */
1270 do_compare_rtx_and_jump (hipart, const0_rtx, EQ, true, mode,
1271 NULL_RTX, NULL, done_label,
1272 PROB_VERY_LIKELY);
1273 else
1275 rtx signbit = expand_shift (RSHIFT_EXPR, mode, res, prec - 1,
1276 NULL_RTX, 0);
1277 /* RES is low half of the double width result, HIPART
1278 the high half. There was overflow if
1279 HIPART is different from RES < 0 ? -1 : 0. */
1280 do_compare_rtx_and_jump (signbit, hipart, EQ, true, mode,
1281 NULL_RTX, NULL, done_label,
1282 PROB_VERY_LIKELY);
1285 else if (hmode != BLKmode && 2 * GET_MODE_PRECISION (hmode) == prec)
1287 rtx_code_label *large_op0 = gen_label_rtx ();
1288 rtx_code_label *small_op0_large_op1 = gen_label_rtx ();
1289 rtx_code_label *one_small_one_large = gen_label_rtx ();
1290 rtx_code_label *both_ops_large = gen_label_rtx ();
1291 rtx_code_label *after_hipart_neg = uns ? NULL : gen_label_rtx ();
1292 rtx_code_label *after_lopart_neg = uns ? NULL : gen_label_rtx ();
1293 rtx_code_label *do_overflow = gen_label_rtx ();
1294 rtx_code_label *hipart_different = uns ? NULL : gen_label_rtx ();
1296 unsigned int hprec = GET_MODE_PRECISION (hmode);
1297 rtx hipart0 = expand_shift (RSHIFT_EXPR, mode, op0, hprec,
1298 NULL_RTX, uns);
1299 hipart0 = gen_lowpart (hmode, hipart0);
1300 rtx lopart0 = gen_lowpart (hmode, op0);
1301 rtx signbit0 = const0_rtx;
1302 if (!uns)
1303 signbit0 = expand_shift (RSHIFT_EXPR, hmode, lopart0, hprec - 1,
1304 NULL_RTX, 0);
1305 rtx hipart1 = expand_shift (RSHIFT_EXPR, mode, op1, hprec,
1306 NULL_RTX, uns);
1307 hipart1 = gen_lowpart (hmode, hipart1);
1308 rtx lopart1 = gen_lowpart (hmode, op1);
1309 rtx signbit1 = const0_rtx;
1310 if (!uns)
1311 signbit1 = expand_shift (RSHIFT_EXPR, hmode, lopart1, hprec - 1,
1312 NULL_RTX, 0);
1314 res = gen_reg_rtx (mode);
1316 /* True if op0 resp. op1 are known to be in the range of
1317 halfstype. */
1318 bool op0_small_p = false;
1319 bool op1_small_p = false;
1320 /* True if op0 resp. op1 are known to have all zeros or all ones
1321 in the upper half of bits, but are not known to be
1322 op{0,1}_small_p. */
1323 bool op0_medium_p = false;
1324 bool op1_medium_p = false;
1325 /* -1 if op{0,1} is known to be negative, 0 if it is known to be
1326 nonnegative, 1 if unknown. */
1327 int op0_sign = 1;
1328 int op1_sign = 1;
1330 if (pos_neg0 == 1)
1331 op0_sign = 0;
1332 else if (pos_neg0 == 2)
1333 op0_sign = -1;
1334 if (pos_neg1 == 1)
1335 op1_sign = 0;
1336 else if (pos_neg1 == 2)
1337 op1_sign = -1;
1339 unsigned int mprec0 = prec;
1340 if (arg0 != error_mark_node)
1341 mprec0 = get_min_precision (arg0, sign);
1342 if (mprec0 <= hprec)
1343 op0_small_p = true;
1344 else if (!uns && mprec0 <= hprec + 1)
1345 op0_medium_p = true;
1346 unsigned int mprec1 = prec;
1347 if (arg1 != error_mark_node)
1348 mprec1 = get_min_precision (arg1, sign);
1349 if (mprec1 <= hprec)
1350 op1_small_p = true;
1351 else if (!uns && mprec1 <= hprec + 1)
1352 op1_medium_p = true;
1354 int smaller_sign = 1;
1355 int larger_sign = 1;
1356 if (op0_small_p)
1358 smaller_sign = op0_sign;
1359 larger_sign = op1_sign;
1361 else if (op1_small_p)
1363 smaller_sign = op1_sign;
1364 larger_sign = op0_sign;
1366 else if (op0_sign == op1_sign)
1368 smaller_sign = op0_sign;
1369 larger_sign = op0_sign;
1372 if (!op0_small_p)
1373 do_compare_rtx_and_jump (signbit0, hipart0, NE, true, hmode,
1374 NULL_RTX, NULL, large_op0,
1375 PROB_UNLIKELY);
1377 if (!op1_small_p)
1378 do_compare_rtx_and_jump (signbit1, hipart1, NE, true, hmode,
1379 NULL_RTX, NULL, small_op0_large_op1,
1380 PROB_UNLIKELY);
1382 /* If both op0 and op1 are sign (!uns) or zero (uns) extended from
1383 hmode to mode, the multiplication will never overflow. We can
1384 do just one hmode x hmode => mode widening multiplication. */
1385 rtx lopart0s = lopart0, lopart1s = lopart1;
1386 if (GET_CODE (lopart0) == SUBREG)
1388 lopart0s = shallow_copy_rtx (lopart0);
1389 SUBREG_PROMOTED_VAR_P (lopart0s) = 1;
1390 SUBREG_PROMOTED_SET (lopart0s, uns ? SRP_UNSIGNED : SRP_SIGNED);
1392 if (GET_CODE (lopart1) == SUBREG)
1394 lopart1s = shallow_copy_rtx (lopart1);
1395 SUBREG_PROMOTED_VAR_P (lopart1s) = 1;
1396 SUBREG_PROMOTED_SET (lopart1s, uns ? SRP_UNSIGNED : SRP_SIGNED);
1398 tree halfstype = build_nonstandard_integer_type (hprec, uns);
1399 ops.op0 = make_tree (halfstype, lopart0s);
1400 ops.op1 = make_tree (halfstype, lopart1s);
1401 ops.code = WIDEN_MULT_EXPR;
1402 ops.type = type;
1403 rtx thisres
1404 = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1405 emit_move_insn (res, thisres);
1406 emit_jump (done_label);
1408 emit_label (small_op0_large_op1);
1410 /* If op0 is sign (!uns) or zero (uns) extended from hmode to mode,
1411 but op1 is not, just swap the arguments and handle it as op1
1412 sign/zero extended, op0 not. */
1413 rtx larger = gen_reg_rtx (mode);
1414 rtx hipart = gen_reg_rtx (hmode);
1415 rtx lopart = gen_reg_rtx (hmode);
1416 emit_move_insn (larger, op1);
1417 emit_move_insn (hipart, hipart1);
1418 emit_move_insn (lopart, lopart0);
1419 emit_jump (one_small_one_large);
1421 emit_label (large_op0);
1423 if (!op1_small_p)
1424 do_compare_rtx_and_jump (signbit1, hipart1, NE, true, hmode,
1425 NULL_RTX, NULL, both_ops_large,
1426 PROB_UNLIKELY);
1428 /* If op1 is sign (!uns) or zero (uns) extended from hmode to mode,
1429 but op0 is not, prepare larger, hipart and lopart pseudos and
1430 handle it together with small_op0_large_op1. */
1431 emit_move_insn (larger, op0);
1432 emit_move_insn (hipart, hipart0);
1433 emit_move_insn (lopart, lopart1);
1435 emit_label (one_small_one_large);
1437 /* lopart is the low part of the operand that is sign extended
1438 to mode, larger is the the other operand, hipart is the
1439 high part of larger and lopart0 and lopart1 are the low parts
1440 of both operands.
1441 We perform lopart0 * lopart1 and lopart * hipart widening
1442 multiplications. */
1443 tree halfutype = build_nonstandard_integer_type (hprec, 1);
1444 ops.op0 = make_tree (halfutype, lopart0);
1445 ops.op1 = make_tree (halfutype, lopart1);
1446 rtx lo0xlo1
1447 = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1449 ops.op0 = make_tree (halfutype, lopart);
1450 ops.op1 = make_tree (halfutype, hipart);
1451 rtx loxhi = gen_reg_rtx (mode);
1452 rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1453 emit_move_insn (loxhi, tem);
1455 if (!uns)
1457 /* if (hipart < 0) loxhi -= lopart << (bitsize / 2); */
1458 if (larger_sign == 0)
1459 emit_jump (after_hipart_neg);
1460 else if (larger_sign != -1)
1461 do_compare_rtx_and_jump (hipart, const0_rtx, GE, false, hmode,
1462 NULL_RTX, NULL, after_hipart_neg,
1463 PROB_EVEN);
1465 tem = convert_modes (mode, hmode, lopart, 1);
1466 tem = expand_shift (LSHIFT_EXPR, mode, tem, hprec, NULL_RTX, 1);
1467 tem = expand_simple_binop (mode, MINUS, loxhi, tem, NULL_RTX,
1468 1, OPTAB_DIRECT);
1469 emit_move_insn (loxhi, tem);
1471 emit_label (after_hipart_neg);
1473 /* if (lopart < 0) loxhi -= larger; */
1474 if (smaller_sign == 0)
1475 emit_jump (after_lopart_neg);
1476 else if (smaller_sign != -1)
1477 do_compare_rtx_and_jump (lopart, const0_rtx, GE, false, hmode,
1478 NULL_RTX, NULL, after_lopart_neg,
1479 PROB_EVEN);
1481 tem = expand_simple_binop (mode, MINUS, loxhi, larger, NULL_RTX,
1482 1, OPTAB_DIRECT);
1483 emit_move_insn (loxhi, tem);
1485 emit_label (after_lopart_neg);
1488 /* loxhi += (uns) lo0xlo1 >> (bitsize / 2); */
1489 tem = expand_shift (RSHIFT_EXPR, mode, lo0xlo1, hprec, NULL_RTX, 1);
1490 tem = expand_simple_binop (mode, PLUS, loxhi, tem, NULL_RTX,
1491 1, OPTAB_DIRECT);
1492 emit_move_insn (loxhi, tem);
1494 /* if (loxhi >> (bitsize / 2)
1495 == (hmode) loxhi >> (bitsize / 2 - 1)) (if !uns)
1496 if (loxhi >> (bitsize / 2) == 0 (if uns). */
1497 rtx hipartloxhi = expand_shift (RSHIFT_EXPR, mode, loxhi, hprec,
1498 NULL_RTX, 0);
1499 hipartloxhi = gen_lowpart (hmode, hipartloxhi);
1500 rtx signbitloxhi = const0_rtx;
1501 if (!uns)
1502 signbitloxhi = expand_shift (RSHIFT_EXPR, hmode,
1503 gen_lowpart (hmode, loxhi),
1504 hprec - 1, NULL_RTX, 0);
1506 do_compare_rtx_and_jump (signbitloxhi, hipartloxhi, NE, true, hmode,
1507 NULL_RTX, NULL, do_overflow,
1508 PROB_VERY_UNLIKELY);
1510 /* res = (loxhi << (bitsize / 2)) | (hmode) lo0xlo1; */
1511 rtx loxhishifted = expand_shift (LSHIFT_EXPR, mode, loxhi, hprec,
1512 NULL_RTX, 1);
1513 tem = convert_modes (mode, hmode, gen_lowpart (hmode, lo0xlo1), 1);
1515 tem = expand_simple_binop (mode, IOR, loxhishifted, tem, res,
1516 1, OPTAB_DIRECT);
1517 if (tem != res)
1518 emit_move_insn (res, tem);
1519 emit_jump (done_label);
1521 emit_label (both_ops_large);
1523 /* If both operands are large (not sign (!uns) or zero (uns)
1524 extended from hmode), then perform the full multiplication
1525 which will be the result of the operation.
1526 The only cases which don't overflow are for signed multiplication
1527 some cases where both hipart0 and highpart1 are 0 or -1.
1528 For unsigned multiplication when high parts are both non-zero
1529 this overflows always. */
1530 ops.code = MULT_EXPR;
1531 ops.op0 = make_tree (type, op0);
1532 ops.op1 = make_tree (type, op1);
1533 tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1534 emit_move_insn (res, tem);
1536 if (!uns)
1538 if (!op0_medium_p)
1540 tem = expand_simple_binop (hmode, PLUS, hipart0, const1_rtx,
1541 NULL_RTX, 1, OPTAB_DIRECT);
1542 do_compare_rtx_and_jump (tem, const1_rtx, GTU, true, hmode,
1543 NULL_RTX, NULL, do_error,
1544 PROB_VERY_UNLIKELY);
1547 if (!op1_medium_p)
1549 tem = expand_simple_binop (hmode, PLUS, hipart1, const1_rtx,
1550 NULL_RTX, 1, OPTAB_DIRECT);
1551 do_compare_rtx_and_jump (tem, const1_rtx, GTU, true, hmode,
1552 NULL_RTX, NULL, do_error,
1553 PROB_VERY_UNLIKELY);
1556 /* At this point hipart{0,1} are both in [-1, 0]. If they are
1557 the same, overflow happened if res is negative, if they are
1558 different, overflow happened if res is positive. */
1559 if (op0_sign != 1 && op1_sign != 1 && op0_sign != op1_sign)
1560 emit_jump (hipart_different);
1561 else if (op0_sign == 1 || op1_sign == 1)
1562 do_compare_rtx_and_jump (hipart0, hipart1, NE, true, hmode,
1563 NULL_RTX, NULL, hipart_different,
1564 PROB_EVEN);
1566 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode,
1567 NULL_RTX, NULL, do_error,
1568 PROB_VERY_UNLIKELY);
1569 emit_jump (done_label);
1571 emit_label (hipart_different);
1573 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode,
1574 NULL_RTX, NULL, do_error,
1575 PROB_VERY_UNLIKELY);
1576 emit_jump (done_label);
1579 emit_label (do_overflow);
1581 /* Overflow, do full multiplication and fallthru into do_error. */
1582 ops.op0 = make_tree (type, op0);
1583 ops.op1 = make_tree (type, op1);
1584 tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1585 emit_move_insn (res, tem);
1587 else
1589 gcc_assert (!is_ubsan);
1590 ops.code = MULT_EXPR;
1591 ops.type = type;
1592 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1593 emit_jump (done_label);
1597 do_error_label:
1598 emit_label (do_error);
1599 if (is_ubsan)
1601 /* Expand the ubsan builtin call. */
1602 push_temp_slots ();
1603 fn = ubsan_build_overflow_builtin (MULT_EXPR, loc, TREE_TYPE (arg0),
1604 arg0, arg1);
1605 expand_normal (fn);
1606 pop_temp_slots ();
1607 do_pending_stack_adjust ();
1609 else if (lhs)
1610 write_complex_part (target, const1_rtx, true);
1612 /* We're done. */
1613 emit_label (done_label);
1615 /* u1 * u2 -> sr */
1616 if (uns0_p && uns1_p && !unsr_p)
1618 rtx_code_label *all_done_label = gen_label_rtx ();
1619 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode, NULL_RTX,
1620 NULL, all_done_label, PROB_VERY_LIKELY);
1621 write_complex_part (target, const1_rtx, true);
1622 emit_label (all_done_label);
1625 /* s1 * u2 -> sr */
1626 if (!uns0_p && uns1_p && !unsr_p && pos_neg1 == 3)
1628 rtx_code_label *all_done_label = gen_label_rtx ();
1629 rtx_code_label *set_noovf = gen_label_rtx ();
1630 do_compare_rtx_and_jump (op1, const0_rtx, GE, false, mode, NULL_RTX,
1631 NULL, all_done_label, PROB_VERY_LIKELY);
1632 write_complex_part (target, const1_rtx, true);
1633 do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
1634 NULL, set_noovf, PROB_VERY_LIKELY);
1635 do_compare_rtx_and_jump (op0, constm1_rtx, NE, true, mode, NULL_RTX,
1636 NULL, all_done_label, PROB_VERY_UNLIKELY);
1637 do_compare_rtx_and_jump (op1, res, NE, true, mode, NULL_RTX, NULL,
1638 all_done_label, PROB_VERY_UNLIKELY);
1639 emit_label (set_noovf);
1640 write_complex_part (target, const0_rtx, true);
1641 emit_label (all_done_label);
1644 if (lhs)
1646 if (is_ubsan)
1647 expand_ubsan_result_store (target, res);
1648 else
1649 expand_arith_overflow_result_store (lhs, target, mode, res);
1653 /* Expand UBSAN_CHECK_ADD call STMT. */
1655 static void
1656 expand_UBSAN_CHECK_ADD (gcall *stmt)
1658 location_t loc = gimple_location (stmt);
1659 tree lhs = gimple_call_lhs (stmt);
1660 tree arg0 = gimple_call_arg (stmt, 0);
1661 tree arg1 = gimple_call_arg (stmt, 1);
1662 expand_addsub_overflow (loc, PLUS_EXPR, lhs, arg0, arg1,
1663 false, false, false, true);
1666 /* Expand UBSAN_CHECK_SUB call STMT. */
1668 static void
1669 expand_UBSAN_CHECK_SUB (gcall *stmt)
1671 location_t loc = gimple_location (stmt);
1672 tree lhs = gimple_call_lhs (stmt);
1673 tree arg0 = gimple_call_arg (stmt, 0);
1674 tree arg1 = gimple_call_arg (stmt, 1);
1675 if (integer_zerop (arg0))
1676 expand_neg_overflow (loc, lhs, arg1, true);
1677 else
1678 expand_addsub_overflow (loc, MINUS_EXPR, lhs, arg0, arg1,
1679 false, false, false, true);
1682 /* Expand UBSAN_CHECK_MUL call STMT. */
1684 static void
1685 expand_UBSAN_CHECK_MUL (gcall *stmt)
1687 location_t loc = gimple_location (stmt);
1688 tree lhs = gimple_call_lhs (stmt);
1689 tree arg0 = gimple_call_arg (stmt, 0);
1690 tree arg1 = gimple_call_arg (stmt, 1);
1691 expand_mul_overflow (loc, lhs, arg0, arg1, false, false, false, true);
1694 /* Helper function for {ADD,SUB,MUL}_OVERFLOW call stmt expansion. */
1696 static void
1697 expand_arith_overflow (enum tree_code code, gimple stmt)
1699 tree lhs = gimple_call_lhs (stmt);
1700 if (lhs == NULL_TREE)
1701 return;
1702 tree arg0 = gimple_call_arg (stmt, 0);
1703 tree arg1 = gimple_call_arg (stmt, 1);
1704 tree type = TREE_TYPE (TREE_TYPE (lhs));
1705 int uns0_p = TYPE_UNSIGNED (TREE_TYPE (arg0));
1706 int uns1_p = TYPE_UNSIGNED (TREE_TYPE (arg1));
1707 int unsr_p = TYPE_UNSIGNED (type);
1708 int prec0 = TYPE_PRECISION (TREE_TYPE (arg0));
1709 int prec1 = TYPE_PRECISION (TREE_TYPE (arg1));
1710 int precres = TYPE_PRECISION (type);
1711 location_t loc = gimple_location (stmt);
1712 if (!uns0_p && get_range_pos_neg (arg0) == 1)
1713 uns0_p = true;
1714 if (!uns1_p && get_range_pos_neg (arg1) == 1)
1715 uns1_p = true;
1716 int pr = get_min_precision (arg0, uns0_p ? UNSIGNED : SIGNED);
1717 prec0 = MIN (prec0, pr);
1718 pr = get_min_precision (arg1, uns1_p ? UNSIGNED : SIGNED);
1719 prec1 = MIN (prec1, pr);
1721 /* If uns0_p && uns1_p, precop is minimum needed precision
1722 of unsigned type to hold the exact result, otherwise
1723 precop is minimum needed precision of signed type to
1724 hold the exact result. */
1725 int precop;
1726 if (code == MULT_EXPR)
1727 precop = prec0 + prec1 + (uns0_p != uns1_p);
1728 else
1730 if (uns0_p == uns1_p)
1731 precop = MAX (prec0, prec1) + 1;
1732 else if (uns0_p)
1733 precop = MAX (prec0 + 1, prec1) + 1;
1734 else
1735 precop = MAX (prec0, prec1 + 1) + 1;
1737 int orig_precres = precres;
1741 if ((uns0_p && uns1_p)
1742 ? ((precop + !unsr_p) <= precres
1743 /* u1 - u2 -> ur can overflow, no matter what precision
1744 the result has. */
1745 && (code != MINUS_EXPR || !unsr_p))
1746 : (!unsr_p && precop <= precres))
1748 /* The infinity precision result will always fit into result. */
1749 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1750 write_complex_part (target, const0_rtx, true);
1751 enum machine_mode mode = TYPE_MODE (type);
1752 struct separate_ops ops;
1753 ops.code = code;
1754 ops.type = type;
1755 ops.op0 = fold_convert_loc (loc, type, arg0);
1756 ops.op1 = fold_convert_loc (loc, type, arg1);
1757 ops.op2 = NULL_TREE;
1758 ops.location = loc;
1759 rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1760 expand_arith_overflow_result_store (lhs, target, mode, tem);
1761 return;
1764 #ifdef WORD_REGISTER_OPERATIONS
1765 /* For sub-word operations, if target doesn't have them, start
1766 with precres widening right away, otherwise do it only
1767 if the most simple cases can't be used. */
1768 if (orig_precres == precres && precres < BITS_PER_WORD)
1770 else
1771 #endif
1772 if ((uns0_p && uns1_p && unsr_p && prec0 <= precres && prec1 <= precres)
1773 || ((!uns0_p || !uns1_p) && !unsr_p
1774 && prec0 + uns0_p <= precres
1775 && prec1 + uns1_p <= precres))
1777 arg0 = fold_convert_loc (loc, type, arg0);
1778 arg1 = fold_convert_loc (loc, type, arg1);
1779 switch (code)
1781 case MINUS_EXPR:
1782 if (integer_zerop (arg0) && !unsr_p)
1783 expand_neg_overflow (loc, lhs, arg1, false);
1784 /* FALLTHRU */
1785 case PLUS_EXPR:
1786 expand_addsub_overflow (loc, code, lhs, arg0, arg1,
1787 unsr_p, unsr_p, unsr_p, false);
1788 return;
1789 case MULT_EXPR:
1790 expand_mul_overflow (loc, lhs, arg0, arg1,
1791 unsr_p, unsr_p, unsr_p, false);
1792 return;
1793 default:
1794 gcc_unreachable ();
1798 /* For sub-word operations, retry with a wider type first. */
1799 if (orig_precres == precres && precop <= BITS_PER_WORD)
1801 #ifdef WORD_REGISTER_OPERATIONS
1802 int p = BITS_PER_WORD;
1803 #else
1804 int p = precop;
1805 #endif
1806 enum machine_mode m = smallest_mode_for_size (p, MODE_INT);
1807 tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
1808 uns0_p && uns1_p
1809 && unsr_p);
1810 p = TYPE_PRECISION (optype);
1811 if (p > precres)
1813 precres = p;
1814 unsr_p = TYPE_UNSIGNED (optype);
1815 type = optype;
1816 continue;
1820 if (prec0 <= precres && prec1 <= precres)
1822 tree types[2];
1823 if (unsr_p)
1825 types[0] = build_nonstandard_integer_type (precres, 0);
1826 types[1] = type;
1828 else
1830 types[0] = type;
1831 types[1] = build_nonstandard_integer_type (precres, 1);
1833 arg0 = fold_convert_loc (loc, types[uns0_p], arg0);
1834 arg1 = fold_convert_loc (loc, types[uns1_p], arg1);
1835 if (code != MULT_EXPR)
1836 expand_addsub_overflow (loc, code, lhs, arg0, arg1, unsr_p,
1837 uns0_p, uns1_p, false);
1838 else
1839 expand_mul_overflow (loc, lhs, arg0, arg1, unsr_p,
1840 uns0_p, uns1_p, false);
1841 return;
1844 /* Retry with a wider type. */
1845 if (orig_precres == precres)
1847 int p = MAX (prec0, prec1);
1848 enum machine_mode m = smallest_mode_for_size (p, MODE_INT);
1849 tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
1850 uns0_p && uns1_p
1851 && unsr_p);
1852 p = TYPE_PRECISION (optype);
1853 if (p > precres)
1855 precres = p;
1856 unsr_p = TYPE_UNSIGNED (optype);
1857 type = optype;
1858 continue;
1862 gcc_unreachable ();
1864 while (1);
1867 /* Expand ADD_OVERFLOW STMT. */
1869 static void
1870 expand_ADD_OVERFLOW (gcall *stmt)
1872 expand_arith_overflow (PLUS_EXPR, stmt);
1875 /* Expand SUB_OVERFLOW STMT. */
1877 static void
1878 expand_SUB_OVERFLOW (gcall *stmt)
1880 expand_arith_overflow (MINUS_EXPR, stmt);
1883 /* Expand MUL_OVERFLOW STMT. */
1885 static void
1886 expand_MUL_OVERFLOW (gcall *stmt)
1888 expand_arith_overflow (MULT_EXPR, stmt);
1891 /* This should get folded in tree-vectorizer.c. */
1893 static void
1894 expand_LOOP_VECTORIZED (gcall *)
1896 gcc_unreachable ();
1899 static void
1900 expand_MASK_LOAD (gcall *stmt)
1902 struct expand_operand ops[3];
1903 tree type, lhs, rhs, maskt;
1904 rtx mem, target, mask;
1906 maskt = gimple_call_arg (stmt, 2);
1907 lhs = gimple_call_lhs (stmt);
1908 if (lhs == NULL_TREE)
1909 return;
1910 type = TREE_TYPE (lhs);
1911 rhs = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0),
1912 gimple_call_arg (stmt, 1));
1914 mem = expand_expr (rhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1915 gcc_assert (MEM_P (mem));
1916 mask = expand_normal (maskt);
1917 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1918 create_output_operand (&ops[0], target, TYPE_MODE (type));
1919 create_fixed_operand (&ops[1], mem);
1920 create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
1921 expand_insn (optab_handler (maskload_optab, TYPE_MODE (type)), 3, ops);
1924 static void
1925 expand_MASK_STORE (gcall *stmt)
1927 struct expand_operand ops[3];
1928 tree type, lhs, rhs, maskt;
1929 rtx mem, reg, mask;
1931 maskt = gimple_call_arg (stmt, 2);
1932 rhs = gimple_call_arg (stmt, 3);
1933 type = TREE_TYPE (rhs);
1934 lhs = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0),
1935 gimple_call_arg (stmt, 1));
1937 mem = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1938 gcc_assert (MEM_P (mem));
1939 mask = expand_normal (maskt);
1940 reg = expand_normal (rhs);
1941 create_fixed_operand (&ops[0], mem);
1942 create_input_operand (&ops[1], reg, TYPE_MODE (type));
1943 create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
1944 expand_insn (optab_handler (maskstore_optab, TYPE_MODE (type)), 3, ops);
1947 static void
1948 expand_ABNORMAL_DISPATCHER (gcall *)
1952 static void
1953 expand_BUILTIN_EXPECT (gcall *stmt)
1955 /* When guessing was done, the hints should be already stripped away. */
1956 gcc_assert (!flag_guess_branch_prob || optimize == 0 || seen_error ());
1958 rtx target;
1959 tree lhs = gimple_call_lhs (stmt);
1960 if (lhs)
1961 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1962 else
1963 target = const0_rtx;
1964 rtx val = expand_expr (gimple_call_arg (stmt, 0), target, VOIDmode, EXPAND_NORMAL);
1965 if (lhs && val != target)
1966 emit_move_insn (target, val);
1969 /* IFN_VA_ARG is supposed to be expanded at pass_stdarg. So this dummy function
1970 should never be called. */
1972 static void
1973 expand_VA_ARG (gcall *stmt ATTRIBUTE_UNUSED)
1975 gcc_unreachable ();
1978 /* Routines to expand each internal function, indexed by function number.
1979 Each routine has the prototype:
1981 expand_<NAME> (gcall *stmt)
1983 where STMT is the statement that performs the call. */
1984 static void (*const internal_fn_expanders[]) (gcall *) = {
1985 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) expand_##CODE,
1986 #include "internal-fn.def"
1987 #undef DEF_INTERNAL_FN
1991 /* Expand STMT, which is a call to internal function FN. */
1993 void
1994 expand_internal_call (gcall *stmt)
1996 internal_fn_expanders[(int) gimple_call_internal_fn (stmt)] (stmt);