2015-01-15 Vladimir Makarov <vmakarov@redhat.com>
[official-gcc.git] / gcc / internal-fn.c
blob0609e4a34c7c303429efbfef2a59a7274904d35f
1 /* Internal functions.
2 Copyright (C) 2011-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "hash-set.h"
24 #include "machmode.h"
25 #include "vec.h"
26 #include "double-int.h"
27 #include "input.h"
28 #include "alias.h"
29 #include "symtab.h"
30 #include "options.h"
31 #include "wide-int.h"
32 #include "inchash.h"
33 #include "tree.h"
34 #include "fold-const.h"
35 #include "internal-fn.h"
36 #include "stor-layout.h"
37 #include "hashtab.h"
38 #include "tm.h"
39 #include "hard-reg-set.h"
40 #include "function.h"
41 #include "rtl.h"
42 #include "flags.h"
43 #include "statistics.h"
44 #include "real.h"
45 #include "fixed-value.h"
46 #include "insn-config.h"
47 #include "expmed.h"
48 #include "dojump.h"
49 #include "explow.h"
50 #include "calls.h"
51 #include "emit-rtl.h"
52 #include "varasm.h"
53 #include "stmt.h"
54 #include "expr.h"
55 #include "insn-codes.h"
56 #include "optabs.h"
57 #include "predict.h"
58 #include "dominance.h"
59 #include "cfg.h"
60 #include "basic-block.h"
61 #include "tree-ssa-alias.h"
62 #include "gimple-expr.h"
63 #include "is-a.h"
64 #include "gimple.h"
65 #include "ubsan.h"
66 #include "target.h"
67 #include "stringpool.h"
68 #include "tree-ssanames.h"
69 #include "diagnostic-core.h"
71 /* The names of each internal function, indexed by function number. */
72 const char *const internal_fn_name_array[] = {
73 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) #CODE,
74 #include "internal-fn.def"
75 #undef DEF_INTERNAL_FN
76 "<invalid-fn>"
79 /* The ECF_* flags of each internal function, indexed by function number. */
80 const int internal_fn_flags_array[] = {
81 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) FLAGS,
82 #include "internal-fn.def"
83 #undef DEF_INTERNAL_FN
87 /* Fnspec of each internal function, indexed by function number. */
88 const_tree internal_fn_fnspec_array[IFN_LAST + 1];
90 void
91 init_internal_fns ()
93 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
94 if (FNSPEC) internal_fn_fnspec_array[IFN_##CODE] = \
95 build_string ((int) sizeof (FNSPEC), FNSPEC ? FNSPEC : "");
96 #include "internal-fn.def"
97 #undef DEF_INTERNAL_FN
98 internal_fn_fnspec_array[IFN_LAST] = 0;
101 /* ARRAY_TYPE is an array of vector modes. Return the associated insn
102 for load-lanes-style optab OPTAB. The insn must exist. */
104 static enum insn_code
105 get_multi_vector_move (tree array_type, convert_optab optab)
107 enum insn_code icode;
108 machine_mode imode;
109 machine_mode vmode;
111 gcc_assert (TREE_CODE (array_type) == ARRAY_TYPE);
112 imode = TYPE_MODE (array_type);
113 vmode = TYPE_MODE (TREE_TYPE (array_type));
115 icode = convert_optab_handler (optab, imode, vmode);
116 gcc_assert (icode != CODE_FOR_nothing);
117 return icode;
120 /* Expand LOAD_LANES call STMT. */
122 static void
123 expand_LOAD_LANES (gcall *stmt)
125 struct expand_operand ops[2];
126 tree type, lhs, rhs;
127 rtx target, mem;
129 lhs = gimple_call_lhs (stmt);
130 rhs = gimple_call_arg (stmt, 0);
131 type = TREE_TYPE (lhs);
133 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
134 mem = expand_normal (rhs);
136 gcc_assert (MEM_P (mem));
137 PUT_MODE (mem, TYPE_MODE (type));
139 create_output_operand (&ops[0], target, TYPE_MODE (type));
140 create_fixed_operand (&ops[1], mem);
141 expand_insn (get_multi_vector_move (type, vec_load_lanes_optab), 2, ops);
144 /* Expand STORE_LANES call STMT. */
146 static void
147 expand_STORE_LANES (gcall *stmt)
149 struct expand_operand ops[2];
150 tree type, lhs, rhs;
151 rtx target, reg;
153 lhs = gimple_call_lhs (stmt);
154 rhs = gimple_call_arg (stmt, 0);
155 type = TREE_TYPE (rhs);
157 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
158 reg = expand_normal (rhs);
160 gcc_assert (MEM_P (target));
161 PUT_MODE (target, TYPE_MODE (type));
163 create_fixed_operand (&ops[0], target);
164 create_input_operand (&ops[1], reg, TYPE_MODE (type));
165 expand_insn (get_multi_vector_move (type, vec_store_lanes_optab), 2, ops);
168 static void
169 expand_ANNOTATE (gcall *stmt ATTRIBUTE_UNUSED)
171 gcc_unreachable ();
174 /* This should get expanded in adjust_simduid_builtins. */
176 static void
177 expand_GOMP_SIMD_LANE (gcall *stmt ATTRIBUTE_UNUSED)
179 gcc_unreachable ();
182 /* This should get expanded in adjust_simduid_builtins. */
184 static void
185 expand_GOMP_SIMD_VF (gcall *stmt ATTRIBUTE_UNUSED)
187 gcc_unreachable ();
190 /* This should get expanded in adjust_simduid_builtins. */
192 static void
193 expand_GOMP_SIMD_LAST_LANE (gcall *stmt ATTRIBUTE_UNUSED)
195 gcc_unreachable ();
198 /* This should get expanded in the sanopt pass. */
200 static void
201 expand_UBSAN_NULL (gcall *stmt ATTRIBUTE_UNUSED)
203 gcc_unreachable ();
206 /* This should get expanded in the sanopt pass. */
208 static void
209 expand_UBSAN_BOUNDS (gcall *stmt ATTRIBUTE_UNUSED)
211 gcc_unreachable ();
214 /* This should get expanded in the sanopt pass. */
216 static void
217 expand_UBSAN_OBJECT_SIZE (gcall *stmt ATTRIBUTE_UNUSED)
219 gcc_unreachable ();
222 /* This should get expanded in the sanopt pass. */
224 static void
225 expand_ASAN_CHECK (gcall *stmt ATTRIBUTE_UNUSED)
227 gcc_unreachable ();
230 /* This should get expanded in the tsan pass. */
232 static void
233 expand_TSAN_FUNC_EXIT (gcall *)
235 gcc_unreachable ();
238 /* Helper function for expand_addsub_overflow. Return 1
239 if ARG interpreted as signed in its precision is known to be always
240 positive or 2 if ARG is known to be always negative, or 3 if ARG may
241 be positive or negative. */
243 static int
244 get_range_pos_neg (tree arg)
246 if (arg == error_mark_node)
247 return 3;
249 int prec = TYPE_PRECISION (TREE_TYPE (arg));
250 int cnt = 0;
251 if (TREE_CODE (arg) == INTEGER_CST)
253 wide_int w = wi::sext (arg, prec);
254 if (wi::neg_p (w))
255 return 2;
256 else
257 return 1;
259 while (CONVERT_EXPR_P (arg)
260 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
261 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg, 0))) <= prec)
263 arg = TREE_OPERAND (arg, 0);
264 /* Narrower value zero extended into wider type
265 will always result in positive values. */
266 if (TYPE_UNSIGNED (TREE_TYPE (arg))
267 && TYPE_PRECISION (TREE_TYPE (arg)) < prec)
268 return 1;
269 prec = TYPE_PRECISION (TREE_TYPE (arg));
270 if (++cnt > 30)
271 return 3;
274 if (TREE_CODE (arg) != SSA_NAME)
275 return 3;
276 wide_int arg_min, arg_max;
277 while (get_range_info (arg, &arg_min, &arg_max) != VR_RANGE)
279 gimple g = SSA_NAME_DEF_STMT (arg);
280 if (is_gimple_assign (g)
281 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g)))
283 tree t = gimple_assign_rhs1 (g);
284 if (INTEGRAL_TYPE_P (TREE_TYPE (t))
285 && TYPE_PRECISION (TREE_TYPE (t)) <= prec)
287 if (TYPE_UNSIGNED (TREE_TYPE (t))
288 && TYPE_PRECISION (TREE_TYPE (t)) < prec)
289 return 1;
290 prec = TYPE_PRECISION (TREE_TYPE (t));
291 arg = t;
292 if (++cnt > 30)
293 return 3;
294 continue;
297 return 3;
299 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
301 /* For unsigned values, the "positive" range comes
302 below the "negative" range. */
303 if (!wi::neg_p (wi::sext (arg_max, prec), SIGNED))
304 return 1;
305 if (wi::neg_p (wi::sext (arg_min, prec), SIGNED))
306 return 2;
308 else
310 if (!wi::neg_p (wi::sext (arg_min, prec), SIGNED))
311 return 1;
312 if (wi::neg_p (wi::sext (arg_max, prec), SIGNED))
313 return 2;
315 return 3;
318 /* Return minimum precision needed to represent all values
319 of ARG in SIGNed integral type. */
321 static int
322 get_min_precision (tree arg, signop sign)
324 int prec = TYPE_PRECISION (TREE_TYPE (arg));
325 int cnt = 0;
326 signop orig_sign = sign;
327 if (TREE_CODE (arg) == INTEGER_CST)
329 int p;
330 if (TYPE_SIGN (TREE_TYPE (arg)) != sign)
332 widest_int w = wi::to_widest (arg);
333 w = wi::ext (w, prec, sign);
334 p = wi::min_precision (w, sign);
336 else
337 p = wi::min_precision (arg, sign);
338 return MIN (p, prec);
340 while (CONVERT_EXPR_P (arg)
341 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
342 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg, 0))) <= prec)
344 arg = TREE_OPERAND (arg, 0);
345 if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
347 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
348 sign = UNSIGNED;
349 else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
350 return prec + (orig_sign != sign);
351 prec = TYPE_PRECISION (TREE_TYPE (arg));
353 if (++cnt > 30)
354 return prec + (orig_sign != sign);
356 if (TREE_CODE (arg) != SSA_NAME)
357 return prec + (orig_sign != sign);
358 wide_int arg_min, arg_max;
359 while (get_range_info (arg, &arg_min, &arg_max) != VR_RANGE)
361 gimple g = SSA_NAME_DEF_STMT (arg);
362 if (is_gimple_assign (g)
363 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g)))
365 tree t = gimple_assign_rhs1 (g);
366 if (INTEGRAL_TYPE_P (TREE_TYPE (t))
367 && TYPE_PRECISION (TREE_TYPE (t)) <= prec)
369 arg = t;
370 if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
372 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
373 sign = UNSIGNED;
374 else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
375 return prec + (orig_sign != sign);
376 prec = TYPE_PRECISION (TREE_TYPE (arg));
378 if (++cnt > 30)
379 return prec + (orig_sign != sign);
380 continue;
383 return prec + (orig_sign != sign);
385 if (sign == TYPE_SIGN (TREE_TYPE (arg)))
387 int p1 = wi::min_precision (arg_min, sign);
388 int p2 = wi::min_precision (arg_max, sign);
389 p1 = MAX (p1, p2);
390 prec = MIN (prec, p1);
392 else if (sign == UNSIGNED && !wi::neg_p (arg_min, SIGNED))
394 int p = wi::min_precision (arg_max, SIGNED);
395 prec = MIN (prec, p);
397 return prec + (orig_sign != sign);
400 /* Helper for expand_*_overflow. Store RES into the __real__ part
401 of TARGET. If RES has larger MODE than __real__ part of TARGET,
402 set the __imag__ part to 1 if RES doesn't fit into it. */
404 static void
405 expand_arith_overflow_result_store (tree lhs, rtx target,
406 machine_mode mode, rtx res)
408 machine_mode tgtmode = GET_MODE_INNER (GET_MODE (target));
409 rtx lres = res;
410 if (tgtmode != mode)
412 rtx_code_label *done_label = gen_label_rtx ();
413 int uns = TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs)));
414 lres = convert_modes (tgtmode, mode, res, uns);
415 gcc_assert (GET_MODE_PRECISION (tgtmode) < GET_MODE_PRECISION (mode));
416 do_compare_rtx_and_jump (res, convert_modes (mode, tgtmode, lres, uns),
417 EQ, true, mode, NULL_RTX, NULL_RTX, done_label,
418 PROB_VERY_LIKELY);
419 write_complex_part (target, const1_rtx, true);
420 emit_label (done_label);
422 write_complex_part (target, lres, false);
425 /* Helper for expand_*_overflow. Store RES into TARGET. */
427 static void
428 expand_ubsan_result_store (rtx target, rtx res)
430 if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target))
431 /* If this is a scalar in a register that is stored in a wider mode
432 than the declared mode, compute the result into its declared mode
433 and then convert to the wider mode. Our value is the computed
434 expression. */
435 convert_move (SUBREG_REG (target), res, SUBREG_PROMOTED_SIGN (target));
436 else
437 emit_move_insn (target, res);
440 /* Add sub/add overflow checking to the statement STMT.
441 CODE says whether the operation is +, or -. */
443 static void
444 expand_addsub_overflow (location_t loc, tree_code code, tree lhs,
445 tree arg0, tree arg1, bool unsr_p, bool uns0_p,
446 bool uns1_p, bool is_ubsan)
448 rtx res, target = NULL_RTX;
449 tree fn;
450 rtx_code_label *done_label = gen_label_rtx ();
451 rtx_code_label *do_error = gen_label_rtx ();
452 do_pending_stack_adjust ();
453 rtx op0 = expand_normal (arg0);
454 rtx op1 = expand_normal (arg1);
455 machine_mode mode = TYPE_MODE (TREE_TYPE (arg0));
456 int prec = GET_MODE_PRECISION (mode);
457 rtx sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
458 bool do_xor = false;
460 if (is_ubsan)
461 gcc_assert (!unsr_p && !uns0_p && !uns1_p);
463 if (lhs)
465 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
466 if (!is_ubsan)
467 write_complex_part (target, const0_rtx, true);
470 /* We assume both operands and result have the same precision
471 here (GET_MODE_BITSIZE (mode)), S stands for signed type
472 with that precision, U for unsigned type with that precision,
473 sgn for unsigned most significant bit in that precision.
474 s1 is signed first operand, u1 is unsigned first operand,
475 s2 is signed second operand, u2 is unsigned second operand,
476 sr is signed result, ur is unsigned result and the following
477 rules say how to compute result (which is always result of
478 the operands as if both were unsigned, cast to the right
479 signedness) and how to compute whether operation overflowed.
481 s1 + s2 -> sr
482 res = (S) ((U) s1 + (U) s2)
483 ovf = s2 < 0 ? res > s1 : res < s1 (or jump on overflow)
484 s1 - s2 -> sr
485 res = (S) ((U) s1 - (U) s2)
486 ovf = s2 < 0 ? res < s1 : res > s2 (or jump on overflow)
487 u1 + u2 -> ur
488 res = u1 + u2
489 ovf = res < u1 (or jump on carry, but RTL opts will handle it)
490 u1 - u2 -> ur
491 res = u1 - u2
492 ovf = res > u1 (or jump on carry, but RTL opts will handle it)
493 s1 + u2 -> sr
494 res = (S) ((U) s1 + u2)
495 ovf = ((U) res ^ sgn) < u2
496 s1 + u2 -> ur
497 t1 = (S) (u2 ^ sgn)
498 t2 = s1 + t1
499 res = (U) t2 ^ sgn
500 ovf = t1 < 0 ? t2 > s1 : t2 < s1 (or jump on overflow)
501 s1 - u2 -> sr
502 res = (S) ((U) s1 - u2)
503 ovf = u2 > ((U) s1 ^ sgn)
504 s1 - u2 -> ur
505 res = (U) s1 - u2
506 ovf = s1 < 0 || u2 > (U) s1
507 u1 - s2 -> sr
508 res = u1 - (U) s2
509 ovf = u1 >= ((U) s2 ^ sgn)
510 u1 - s2 -> ur
511 t1 = u1 ^ sgn
512 t2 = t1 - (U) s2
513 res = t2 ^ sgn
514 ovf = s2 < 0 ? (S) t2 < (S) t1 : (S) t2 > (S) t1 (or jump on overflow)
515 s1 + s2 -> ur
516 res = (U) s1 + (U) s2
517 ovf = s2 < 0 ? (s1 | (S) res) < 0) : (s1 & (S) res) < 0)
518 u1 + u2 -> sr
519 res = (S) (u1 + u2)
520 ovf = (U) res < u2 || res < 0
521 u1 - u2 -> sr
522 res = (S) (u1 - u2)
523 ovf = u1 >= u2 ? res < 0 : res >= 0
524 s1 - s2 -> ur
525 res = (U) s1 - (U) s2
526 ovf = s2 >= 0 ? ((s1 | (S) res) < 0) : ((s1 & (S) res) < 0) */
528 if (code == PLUS_EXPR && uns0_p && !uns1_p)
530 /* PLUS_EXPR is commutative, if operand signedness differs,
531 canonicalize to the first operand being signed and second
532 unsigned to simplify following code. */
533 rtx tem = op1;
534 op1 = op0;
535 op0 = tem;
536 tree t = arg1;
537 arg1 = arg0;
538 arg0 = t;
539 uns0_p = 0;
540 uns1_p = 1;
543 /* u1 +- u2 -> ur */
544 if (uns0_p && uns1_p && unsr_p)
546 /* Compute the operation. On RTL level, the addition is always
547 unsigned. */
548 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
549 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
550 rtx tem = op0;
551 /* For PLUS_EXPR, the operation is commutative, so we can pick
552 operand to compare against. For prec <= BITS_PER_WORD, I think
553 preferring REG operand is better over CONST_INT, because
554 the CONST_INT might enlarge the instruction or CSE would need
555 to figure out we'd already loaded it into a register before.
556 For prec > BITS_PER_WORD, I think CONST_INT might be more beneficial,
557 as then the multi-word comparison can be perhaps simplified. */
558 if (code == PLUS_EXPR
559 && (prec <= BITS_PER_WORD
560 ? (CONST_SCALAR_INT_P (op0) && REG_P (op1))
561 : CONST_SCALAR_INT_P (op1)))
562 tem = op1;
563 do_compare_rtx_and_jump (res, tem, code == PLUS_EXPR ? GEU : LEU,
564 true, mode, NULL_RTX, NULL_RTX, done_label,
565 PROB_VERY_LIKELY);
566 goto do_error_label;
569 /* s1 +- u2 -> sr */
570 if (!uns0_p && uns1_p && !unsr_p)
572 /* Compute the operation. On RTL level, the addition is always
573 unsigned. */
574 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
575 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
576 rtx tem = expand_binop (mode, add_optab,
577 code == PLUS_EXPR ? res : op0, sgn,
578 NULL_RTX, false, OPTAB_LIB_WIDEN);
579 do_compare_rtx_and_jump (tem, op1, GEU, true, mode, NULL_RTX, NULL_RTX,
580 done_label, PROB_VERY_LIKELY);
581 goto do_error_label;
584 /* s1 + u2 -> ur */
585 if (code == PLUS_EXPR && !uns0_p && uns1_p && unsr_p)
587 op1 = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
588 OPTAB_LIB_WIDEN);
589 /* As we've changed op1, we have to avoid using the value range
590 for the original argument. */
591 arg1 = error_mark_node;
592 do_xor = true;
593 goto do_signed;
596 /* u1 - s2 -> ur */
597 if (code == MINUS_EXPR && uns0_p && !uns1_p && unsr_p)
599 op0 = expand_binop (mode, add_optab, op0, sgn, NULL_RTX, false,
600 OPTAB_LIB_WIDEN);
601 /* As we've changed op0, we have to avoid using the value range
602 for the original argument. */
603 arg0 = error_mark_node;
604 do_xor = true;
605 goto do_signed;
608 /* s1 - u2 -> ur */
609 if (code == MINUS_EXPR && !uns0_p && uns1_p && unsr_p)
611 /* Compute the operation. On RTL level, the addition is always
612 unsigned. */
613 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
614 OPTAB_LIB_WIDEN);
615 int pos_neg = get_range_pos_neg (arg0);
616 if (pos_neg == 2)
617 /* If ARG0 is known to be always negative, this is always overflow. */
618 emit_jump (do_error);
619 else if (pos_neg == 3)
620 /* If ARG0 is not known to be always positive, check at runtime. */
621 do_compare_rtx_and_jump (op0, const0_rtx, LT, false, mode, NULL_RTX,
622 NULL_RTX, do_error, PROB_VERY_UNLIKELY);
623 do_compare_rtx_and_jump (op1, op0, LEU, true, mode, NULL_RTX, NULL_RTX,
624 done_label, PROB_VERY_LIKELY);
625 goto do_error_label;
628 /* u1 - s2 -> sr */
629 if (code == MINUS_EXPR && uns0_p && !uns1_p && !unsr_p)
631 /* Compute the operation. On RTL level, the addition is always
632 unsigned. */
633 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
634 OPTAB_LIB_WIDEN);
635 rtx tem = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
636 OPTAB_LIB_WIDEN);
637 do_compare_rtx_and_jump (op0, tem, LTU, true, mode, NULL_RTX, NULL_RTX,
638 done_label, PROB_VERY_LIKELY);
639 goto do_error_label;
642 /* u1 + u2 -> sr */
643 if (code == PLUS_EXPR && uns0_p && uns1_p && !unsr_p)
645 /* Compute the operation. On RTL level, the addition is always
646 unsigned. */
647 res = expand_binop (mode, add_optab, op0, op1, NULL_RTX, false,
648 OPTAB_LIB_WIDEN);
649 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode, NULL_RTX,
650 NULL_RTX, do_error, PROB_VERY_UNLIKELY);
651 rtx tem = op1;
652 /* The operation is commutative, so we can pick operand to compare
653 against. For prec <= BITS_PER_WORD, I think preferring REG operand
654 is better over CONST_INT, because the CONST_INT might enlarge the
655 instruction or CSE would need to figure out we'd already loaded it
656 into a register before. For prec > BITS_PER_WORD, I think CONST_INT
657 might be more beneficial, as then the multi-word comparison can be
658 perhaps simplified. */
659 if (prec <= BITS_PER_WORD
660 ? (CONST_SCALAR_INT_P (op1) && REG_P (op0))
661 : CONST_SCALAR_INT_P (op0))
662 tem = op0;
663 do_compare_rtx_and_jump (res, tem, GEU, true, mode, NULL_RTX, NULL_RTX,
664 done_label, PROB_VERY_LIKELY);
665 goto do_error_label;
668 /* s1 +- s2 -> ur */
669 if (!uns0_p && !uns1_p && unsr_p)
671 /* Compute the operation. On RTL level, the addition is always
672 unsigned. */
673 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
674 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
675 int pos_neg = get_range_pos_neg (arg1);
676 if (code == PLUS_EXPR)
678 int pos_neg0 = get_range_pos_neg (arg0);
679 if (pos_neg0 != 3 && pos_neg == 3)
681 rtx tem = op1;
682 op1 = op0;
683 op0 = tem;
684 pos_neg = pos_neg0;
687 rtx tem;
688 if (pos_neg != 3)
690 tem = expand_binop (mode, ((pos_neg == 1) ^ (code == MINUS_EXPR))
691 ? and_optab : ior_optab,
692 op0, res, NULL_RTX, false, OPTAB_LIB_WIDEN);
693 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
694 NULL_RTX, done_label, PROB_VERY_LIKELY);
696 else
698 rtx_code_label *do_ior_label = gen_label_rtx ();
699 do_compare_rtx_and_jump (op1, const0_rtx,
700 code == MINUS_EXPR ? GE : LT, false, mode,
701 NULL_RTX, NULL_RTX, do_ior_label,
702 PROB_EVEN);
703 tem = expand_binop (mode, and_optab, op0, res, NULL_RTX, false,
704 OPTAB_LIB_WIDEN);
705 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
706 NULL_RTX, done_label, PROB_VERY_LIKELY);
707 emit_jump (do_error);
708 emit_label (do_ior_label);
709 tem = expand_binop (mode, ior_optab, op0, res, NULL_RTX, false,
710 OPTAB_LIB_WIDEN);
711 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
712 NULL_RTX, done_label, PROB_VERY_LIKELY);
714 goto do_error_label;
717 /* u1 - u2 -> sr */
718 if (code == MINUS_EXPR && uns0_p && uns1_p && !unsr_p)
720 /* Compute the operation. On RTL level, the addition is always
721 unsigned. */
722 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
723 OPTAB_LIB_WIDEN);
724 rtx_code_label *op0_geu_op1 = gen_label_rtx ();
725 do_compare_rtx_and_jump (op0, op1, GEU, true, mode, NULL_RTX, NULL_RTX,
726 op0_geu_op1, PROB_EVEN);
727 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode, NULL_RTX,
728 NULL_RTX, done_label, PROB_VERY_LIKELY);
729 emit_jump (do_error);
730 emit_label (op0_geu_op1);
731 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode, NULL_RTX,
732 NULL_RTX, done_label, PROB_VERY_LIKELY);
733 goto do_error_label;
736 gcc_assert (!uns0_p && !uns1_p && !unsr_p);
738 /* s1 +- s2 -> sr */
739 do_signed: ;
740 enum insn_code icode;
741 icode = optab_handler (code == PLUS_EXPR ? addv4_optab : subv4_optab, mode);
742 if (icode != CODE_FOR_nothing)
744 struct expand_operand ops[4];
745 rtx_insn *last = get_last_insn ();
747 res = gen_reg_rtx (mode);
748 create_output_operand (&ops[0], res, mode);
749 create_input_operand (&ops[1], op0, mode);
750 create_input_operand (&ops[2], op1, mode);
751 create_fixed_operand (&ops[3], do_error);
752 if (maybe_expand_insn (icode, 4, ops))
754 last = get_last_insn ();
755 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
756 && JUMP_P (last)
757 && any_condjump_p (last)
758 && !find_reg_note (last, REG_BR_PROB, 0))
759 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
760 emit_jump (done_label);
762 else
764 delete_insns_since (last);
765 icode = CODE_FOR_nothing;
769 if (icode == CODE_FOR_nothing)
771 rtx_code_label *sub_check = gen_label_rtx ();
772 int pos_neg = 3;
774 /* Compute the operation. On RTL level, the addition is always
775 unsigned. */
776 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
777 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
779 /* If we can prove one of the arguments (for MINUS_EXPR only
780 the second operand, as subtraction is not commutative) is always
781 non-negative or always negative, we can do just one comparison
782 and conditional jump instead of 2 at runtime, 3 present in the
783 emitted code. If one of the arguments is CONST_INT, all we
784 need is to make sure it is op1, then the first
785 do_compare_rtx_and_jump will be just folded. Otherwise try
786 to use range info if available. */
787 if (code == PLUS_EXPR && CONST_INT_P (op0))
789 rtx tem = op0;
790 op0 = op1;
791 op1 = tem;
793 else if (CONST_INT_P (op1))
795 else if (code == PLUS_EXPR && TREE_CODE (arg0) == SSA_NAME)
797 pos_neg = get_range_pos_neg (arg0);
798 if (pos_neg != 3)
800 rtx tem = op0;
801 op0 = op1;
802 op1 = tem;
805 if (pos_neg == 3 && !CONST_INT_P (op1) && TREE_CODE (arg1) == SSA_NAME)
806 pos_neg = get_range_pos_neg (arg1);
808 /* If the op1 is negative, we have to use a different check. */
809 if (pos_neg == 3)
810 do_compare_rtx_and_jump (op1, const0_rtx, LT, false, mode, NULL_RTX,
811 NULL_RTX, sub_check, PROB_EVEN);
813 /* Compare the result of the operation with one of the operands. */
814 if (pos_neg & 1)
815 do_compare_rtx_and_jump (res, op0, code == PLUS_EXPR ? GE : LE,
816 false, mode, NULL_RTX, NULL_RTX, done_label,
817 PROB_VERY_LIKELY);
819 /* If we get here, we have to print the error. */
820 if (pos_neg == 3)
822 emit_jump (do_error);
824 emit_label (sub_check);
827 /* We have k = a + b for b < 0 here. k <= a must hold. */
828 if (pos_neg & 2)
829 do_compare_rtx_and_jump (res, op0, code == PLUS_EXPR ? LE : GE,
830 false, mode, NULL_RTX, NULL_RTX, done_label,
831 PROB_VERY_LIKELY);
834 do_error_label:
835 emit_label (do_error);
836 if (is_ubsan)
838 /* Expand the ubsan builtin call. */
839 push_temp_slots ();
840 fn = ubsan_build_overflow_builtin (code, loc, TREE_TYPE (arg0),
841 arg0, arg1);
842 expand_normal (fn);
843 pop_temp_slots ();
844 do_pending_stack_adjust ();
846 else if (lhs)
847 write_complex_part (target, const1_rtx, true);
849 /* We're done. */
850 emit_label (done_label);
852 if (lhs)
854 if (is_ubsan)
855 expand_ubsan_result_store (target, res);
856 else
858 if (do_xor)
859 res = expand_binop (mode, add_optab, res, sgn, NULL_RTX, false,
860 OPTAB_LIB_WIDEN);
862 expand_arith_overflow_result_store (lhs, target, mode, res);
867 /* Add negate overflow checking to the statement STMT. */
869 static void
870 expand_neg_overflow (location_t loc, tree lhs, tree arg1, bool is_ubsan)
872 rtx res, op1;
873 tree fn;
874 rtx_code_label *done_label, *do_error;
875 rtx target = NULL_RTX;
877 done_label = gen_label_rtx ();
878 do_error = gen_label_rtx ();
880 do_pending_stack_adjust ();
881 op1 = expand_normal (arg1);
883 machine_mode mode = TYPE_MODE (TREE_TYPE (arg1));
884 if (lhs)
886 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
887 if (!is_ubsan)
888 write_complex_part (target, const0_rtx, true);
891 enum insn_code icode = optab_handler (negv3_optab, mode);
892 if (icode != CODE_FOR_nothing)
894 struct expand_operand ops[3];
895 rtx_insn *last = get_last_insn ();
897 res = gen_reg_rtx (mode);
898 create_output_operand (&ops[0], res, mode);
899 create_input_operand (&ops[1], op1, mode);
900 create_fixed_operand (&ops[2], do_error);
901 if (maybe_expand_insn (icode, 3, ops))
903 last = get_last_insn ();
904 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
905 && JUMP_P (last)
906 && any_condjump_p (last)
907 && !find_reg_note (last, REG_BR_PROB, 0))
908 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
909 emit_jump (done_label);
911 else
913 delete_insns_since (last);
914 icode = CODE_FOR_nothing;
918 if (icode == CODE_FOR_nothing)
920 /* Compute the operation. On RTL level, the addition is always
921 unsigned. */
922 res = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
924 /* Compare the operand with the most negative value. */
925 rtx minv = expand_normal (TYPE_MIN_VALUE (TREE_TYPE (arg1)));
926 do_compare_rtx_and_jump (op1, minv, NE, true, mode, NULL_RTX, NULL_RTX,
927 done_label, PROB_VERY_LIKELY);
930 emit_label (do_error);
931 if (is_ubsan)
933 /* Expand the ubsan builtin call. */
934 push_temp_slots ();
935 fn = ubsan_build_overflow_builtin (NEGATE_EXPR, loc, TREE_TYPE (arg1),
936 arg1, NULL_TREE);
937 expand_normal (fn);
938 pop_temp_slots ();
939 do_pending_stack_adjust ();
941 else if (lhs)
942 write_complex_part (target, const1_rtx, true);
944 /* We're done. */
945 emit_label (done_label);
947 if (lhs)
949 if (is_ubsan)
950 expand_ubsan_result_store (target, res);
951 else
952 expand_arith_overflow_result_store (lhs, target, mode, res);
956 /* Add mul overflow checking to the statement STMT. */
958 static void
959 expand_mul_overflow (location_t loc, tree lhs, tree arg0, tree arg1,
960 bool unsr_p, bool uns0_p, bool uns1_p, bool is_ubsan)
962 rtx res, op0, op1;
963 tree fn, type;
964 rtx_code_label *done_label, *do_error;
965 rtx target = NULL_RTX;
966 signop sign;
967 enum insn_code icode;
969 done_label = gen_label_rtx ();
970 do_error = gen_label_rtx ();
972 do_pending_stack_adjust ();
973 op0 = expand_normal (arg0);
974 op1 = expand_normal (arg1);
976 machine_mode mode = TYPE_MODE (TREE_TYPE (arg0));
977 bool uns = unsr_p;
978 if (lhs)
980 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
981 if (!is_ubsan)
982 write_complex_part (target, const0_rtx, true);
985 if (is_ubsan)
986 gcc_assert (!unsr_p && !uns0_p && !uns1_p);
988 /* We assume both operands and result have the same precision
989 here (GET_MODE_BITSIZE (mode)), S stands for signed type
990 with that precision, U for unsigned type with that precision,
991 sgn for unsigned most significant bit in that precision.
992 s1 is signed first operand, u1 is unsigned first operand,
993 s2 is signed second operand, u2 is unsigned second operand,
994 sr is signed result, ur is unsigned result and the following
995 rules say how to compute result (which is always result of
996 the operands as if both were unsigned, cast to the right
997 signedness) and how to compute whether operation overflowed.
998 main_ovf (false) stands for jump on signed multiplication
999 overflow or the main algorithm with uns == false.
1000 main_ovf (true) stands for jump on unsigned multiplication
1001 overflow or the main algorithm with uns == true.
1003 s1 * s2 -> sr
1004 res = (S) ((U) s1 * (U) s2)
1005 ovf = main_ovf (false)
1006 u1 * u2 -> ur
1007 res = u1 * u2
1008 ovf = main_ovf (true)
1009 s1 * u2 -> ur
1010 res = (U) s1 * u2
1011 ovf = (s1 < 0 && u2) || main_ovf (true)
1012 u1 * u2 -> sr
1013 res = (S) (u1 * u2)
1014 ovf = res < 0 || main_ovf (true)
1015 s1 * u2 -> sr
1016 res = (S) ((U) s1 * u2)
1017 ovf = (S) u2 >= 0 ? main_ovf (false)
1018 : (s1 != 0 && (s1 != -1 || u2 != (U) res))
1019 s1 * s2 -> ur
1020 t1 = (s1 & s2) < 0 ? (-(U) s1) : ((U) s1)
1021 t2 = (s1 & s2) < 0 ? (-(U) s2) : ((U) s2)
1022 res = t1 * t2
1023 ovf = (s1 ^ s2) < 0 ? (s1 && s2) : main_ovf (true) */
1025 if (uns0_p && !uns1_p)
1027 /* Multiplication is commutative, if operand signedness differs,
1028 canonicalize to the first operand being signed and second
1029 unsigned to simplify following code. */
1030 rtx tem = op1;
1031 op1 = op0;
1032 op0 = tem;
1033 tree t = arg1;
1034 arg1 = arg0;
1035 arg0 = t;
1036 uns0_p = 0;
1037 uns1_p = 1;
1040 int pos_neg0 = get_range_pos_neg (arg0);
1041 int pos_neg1 = get_range_pos_neg (arg1);
1043 /* s1 * u2 -> ur */
1044 if (!uns0_p && uns1_p && unsr_p)
1046 switch (pos_neg0)
1048 case 1:
1049 /* If s1 is non-negative, just perform normal u1 * u2 -> ur. */
1050 goto do_main;
1051 case 2:
1052 /* If s1 is negative, avoid the main code, just multiply and
1053 signal overflow if op1 is not 0. */
1054 struct separate_ops ops;
1055 ops.code = MULT_EXPR;
1056 ops.type = TREE_TYPE (arg1);
1057 ops.op0 = make_tree (ops.type, op0);
1058 ops.op1 = make_tree (ops.type, op1);
1059 ops.op2 = NULL_TREE;
1060 ops.location = loc;
1061 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1062 do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1063 NULL_RTX, done_label, PROB_VERY_LIKELY);
1064 goto do_error_label;
1065 case 3:
1066 rtx_code_label *do_main_label;
1067 do_main_label = gen_label_rtx ();
1068 do_compare_rtx_and_jump (op0, const0_rtx, GE, false, mode, NULL_RTX,
1069 NULL_RTX, do_main_label, PROB_VERY_LIKELY);
1070 do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1071 NULL_RTX, do_main_label, PROB_VERY_LIKELY);
1072 write_complex_part (target, const1_rtx, true);
1073 emit_label (do_main_label);
1074 goto do_main;
1075 default:
1076 gcc_unreachable ();
1080 /* u1 * u2 -> sr */
1081 if (uns0_p && uns1_p && !unsr_p)
1083 uns = true;
1084 /* Rest of handling of this case after res is computed. */
1085 goto do_main;
1088 /* s1 * u2 -> sr */
1089 if (!uns0_p && uns1_p && !unsr_p)
1091 switch (pos_neg1)
1093 case 1:
1094 goto do_main;
1095 case 2:
1096 /* If (S) u2 is negative (i.e. u2 is larger than maximum of S,
1097 avoid the main code, just multiply and signal overflow
1098 unless 0 * u2 or -1 * ((U) Smin). */
1099 struct separate_ops ops;
1100 ops.code = MULT_EXPR;
1101 ops.type = TREE_TYPE (arg1);
1102 ops.op0 = make_tree (ops.type, op0);
1103 ops.op1 = make_tree (ops.type, op1);
1104 ops.op2 = NULL_TREE;
1105 ops.location = loc;
1106 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1107 do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
1108 NULL_RTX, done_label, PROB_VERY_LIKELY);
1109 do_compare_rtx_and_jump (op0, constm1_rtx, NE, true, mode, NULL_RTX,
1110 NULL_RTX, do_error, PROB_VERY_UNLIKELY);
1111 int prec;
1112 prec = GET_MODE_PRECISION (mode);
1113 rtx sgn;
1114 sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
1115 do_compare_rtx_and_jump (op1, sgn, EQ, true, mode, NULL_RTX,
1116 NULL_RTX, done_label, PROB_VERY_LIKELY);
1117 goto do_error_label;
1118 case 3:
1119 /* Rest of handling of this case after res is computed. */
1120 goto do_main;
1121 default:
1122 gcc_unreachable ();
1126 /* s1 * s2 -> ur */
1127 if (!uns0_p && !uns1_p && unsr_p)
1129 rtx tem, tem2;
1130 switch (pos_neg0 | pos_neg1)
1132 case 1: /* Both operands known to be non-negative. */
1133 goto do_main;
1134 case 2: /* Both operands known to be negative. */
1135 op0 = expand_unop (mode, neg_optab, op0, NULL_RTX, false);
1136 op1 = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
1137 /* Avoid looking at arg0/arg1 ranges, as we've changed
1138 the arguments. */
1139 arg0 = error_mark_node;
1140 arg1 = error_mark_node;
1141 goto do_main;
1142 case 3:
1143 if ((pos_neg0 ^ pos_neg1) == 3)
1145 /* If one operand is known to be negative and the other
1146 non-negative, this overflows always, unless the non-negative
1147 one is 0. Just do normal multiply and set overflow
1148 unless one of the operands is 0. */
1149 struct separate_ops ops;
1150 ops.code = MULT_EXPR;
1151 ops.type
1152 = build_nonstandard_integer_type (GET_MODE_PRECISION (mode),
1154 ops.op0 = make_tree (ops.type, op0);
1155 ops.op1 = make_tree (ops.type, op1);
1156 ops.op2 = NULL_TREE;
1157 ops.location = loc;
1158 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1159 tem = expand_binop (mode, and_optab, op0, op1, NULL_RTX, false,
1160 OPTAB_LIB_WIDEN);
1161 do_compare_rtx_and_jump (tem, const0_rtx, EQ, true, mode,
1162 NULL_RTX, NULL_RTX, done_label,
1163 PROB_VERY_LIKELY);
1164 goto do_error_label;
1166 /* The general case, do all the needed comparisons at runtime. */
1167 rtx_code_label *do_main_label, *after_negate_label;
1168 rtx rop0, rop1;
1169 rop0 = gen_reg_rtx (mode);
1170 rop1 = gen_reg_rtx (mode);
1171 emit_move_insn (rop0, op0);
1172 emit_move_insn (rop1, op1);
1173 op0 = rop0;
1174 op1 = rop1;
1175 do_main_label = gen_label_rtx ();
1176 after_negate_label = gen_label_rtx ();
1177 tem = expand_binop (mode, and_optab, op0, op1, NULL_RTX, false,
1178 OPTAB_LIB_WIDEN);
1179 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
1180 NULL_RTX, after_negate_label,
1181 PROB_VERY_LIKELY);
1182 /* Both arguments negative here, negate them and continue with
1183 normal unsigned overflow checking multiplication. */
1184 emit_move_insn (op0, expand_unop (mode, neg_optab, op0,
1185 NULL_RTX, false));
1186 emit_move_insn (op1, expand_unop (mode, neg_optab, op1,
1187 NULL_RTX, false));
1188 /* Avoid looking at arg0/arg1 ranges, as we might have changed
1189 the arguments. */
1190 arg0 = error_mark_node;
1191 arg1 = error_mark_node;
1192 emit_jump (do_main_label);
1193 emit_label (after_negate_label);
1194 tem2 = expand_binop (mode, xor_optab, op0, op1, NULL_RTX, false,
1195 OPTAB_LIB_WIDEN);
1196 do_compare_rtx_and_jump (tem2, const0_rtx, GE, false, mode, NULL_RTX,
1197 NULL_RTX, do_main_label, PROB_VERY_LIKELY);
1198 /* One argument is negative here, the other positive. This
1199 overflows always, unless one of the arguments is 0. But
1200 if e.g. s2 is 0, (U) s1 * 0 doesn't overflow, whatever s1
1201 is, thus we can keep do_main code oring in overflow as is. */
1202 do_compare_rtx_and_jump (tem, const0_rtx, EQ, true, mode, NULL_RTX,
1203 NULL_RTX, do_main_label, PROB_VERY_LIKELY);
1204 write_complex_part (target, const1_rtx, true);
1205 emit_label (do_main_label);
1206 goto do_main;
1207 default:
1208 gcc_unreachable ();
1212 do_main:
1213 type = build_nonstandard_integer_type (GET_MODE_PRECISION (mode), uns);
1214 sign = uns ? UNSIGNED : SIGNED;
1215 icode = optab_handler (uns ? umulv4_optab : mulv4_optab, mode);
1216 if (icode != CODE_FOR_nothing)
1218 struct expand_operand ops[4];
1219 rtx_insn *last = get_last_insn ();
1221 res = gen_reg_rtx (mode);
1222 create_output_operand (&ops[0], res, mode);
1223 create_input_operand (&ops[1], op0, mode);
1224 create_input_operand (&ops[2], op1, mode);
1225 create_fixed_operand (&ops[3], do_error);
1226 if (maybe_expand_insn (icode, 4, ops))
1228 last = get_last_insn ();
1229 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
1230 && JUMP_P (last)
1231 && any_condjump_p (last)
1232 && !find_reg_note (last, REG_BR_PROB, 0))
1233 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
1234 emit_jump (done_label);
1236 else
1238 delete_insns_since (last);
1239 icode = CODE_FOR_nothing;
1243 if (icode == CODE_FOR_nothing)
1245 struct separate_ops ops;
1246 int prec = GET_MODE_PRECISION (mode);
1247 machine_mode hmode = mode_for_size (prec / 2, MODE_INT, 1);
1248 ops.op0 = make_tree (type, op0);
1249 ops.op1 = make_tree (type, op1);
1250 ops.op2 = NULL_TREE;
1251 ops.location = loc;
1252 if (GET_MODE_2XWIDER_MODE (mode) != VOIDmode
1253 && targetm.scalar_mode_supported_p (GET_MODE_2XWIDER_MODE (mode)))
1255 machine_mode wmode = GET_MODE_2XWIDER_MODE (mode);
1256 ops.code = WIDEN_MULT_EXPR;
1257 ops.type
1258 = build_nonstandard_integer_type (GET_MODE_PRECISION (wmode), uns);
1260 res = expand_expr_real_2 (&ops, NULL_RTX, wmode, EXPAND_NORMAL);
1261 rtx hipart = expand_shift (RSHIFT_EXPR, wmode, res, prec,
1262 NULL_RTX, uns);
1263 hipart = gen_lowpart (mode, hipart);
1264 res = gen_lowpart (mode, res);
1265 if (uns)
1266 /* For the unsigned multiplication, there was overflow if
1267 HIPART is non-zero. */
1268 do_compare_rtx_and_jump (hipart, const0_rtx, EQ, true, mode,
1269 NULL_RTX, NULL_RTX, done_label,
1270 PROB_VERY_LIKELY);
1271 else
1273 rtx signbit = expand_shift (RSHIFT_EXPR, mode, res, prec - 1,
1274 NULL_RTX, 0);
1275 /* RES is low half of the double width result, HIPART
1276 the high half. There was overflow if
1277 HIPART is different from RES < 0 ? -1 : 0. */
1278 do_compare_rtx_and_jump (signbit, hipart, EQ, true, mode,
1279 NULL_RTX, NULL_RTX, done_label,
1280 PROB_VERY_LIKELY);
1283 else if (hmode != BLKmode && 2 * GET_MODE_PRECISION (hmode) == prec)
1285 rtx_code_label *large_op0 = gen_label_rtx ();
1286 rtx_code_label *small_op0_large_op1 = gen_label_rtx ();
1287 rtx_code_label *one_small_one_large = gen_label_rtx ();
1288 rtx_code_label *both_ops_large = gen_label_rtx ();
1289 rtx_code_label *after_hipart_neg = uns ? NULL : gen_label_rtx ();
1290 rtx_code_label *after_lopart_neg = uns ? NULL : gen_label_rtx ();
1291 rtx_code_label *do_overflow = gen_label_rtx ();
1292 rtx_code_label *hipart_different = uns ? NULL : gen_label_rtx ();
1294 unsigned int hprec = GET_MODE_PRECISION (hmode);
1295 rtx hipart0 = expand_shift (RSHIFT_EXPR, mode, op0, hprec,
1296 NULL_RTX, uns);
1297 hipart0 = gen_lowpart (hmode, hipart0);
1298 rtx lopart0 = gen_lowpart (hmode, op0);
1299 rtx signbit0 = const0_rtx;
1300 if (!uns)
1301 signbit0 = expand_shift (RSHIFT_EXPR, hmode, lopart0, hprec - 1,
1302 NULL_RTX, 0);
1303 rtx hipart1 = expand_shift (RSHIFT_EXPR, mode, op1, hprec,
1304 NULL_RTX, uns);
1305 hipart1 = gen_lowpart (hmode, hipart1);
1306 rtx lopart1 = gen_lowpart (hmode, op1);
1307 rtx signbit1 = const0_rtx;
1308 if (!uns)
1309 signbit1 = expand_shift (RSHIFT_EXPR, hmode, lopart1, hprec - 1,
1310 NULL_RTX, 0);
1312 res = gen_reg_rtx (mode);
1314 /* True if op0 resp. op1 are known to be in the range of
1315 halfstype. */
1316 bool op0_small_p = false;
1317 bool op1_small_p = false;
1318 /* True if op0 resp. op1 are known to have all zeros or all ones
1319 in the upper half of bits, but are not known to be
1320 op{0,1}_small_p. */
1321 bool op0_medium_p = false;
1322 bool op1_medium_p = false;
1323 /* -1 if op{0,1} is known to be negative, 0 if it is known to be
1324 nonnegative, 1 if unknown. */
1325 int op0_sign = 1;
1326 int op1_sign = 1;
1328 if (pos_neg0 == 1)
1329 op0_sign = 0;
1330 else if (pos_neg0 == 2)
1331 op0_sign = -1;
1332 if (pos_neg1 == 1)
1333 op1_sign = 0;
1334 else if (pos_neg1 == 2)
1335 op1_sign = -1;
1337 unsigned int mprec0 = prec;
1338 if (arg0 != error_mark_node)
1339 mprec0 = get_min_precision (arg0, sign);
1340 if (mprec0 <= hprec)
1341 op0_small_p = true;
1342 else if (!uns && mprec0 <= hprec + 1)
1343 op0_medium_p = true;
1344 unsigned int mprec1 = prec;
1345 if (arg1 != error_mark_node)
1346 mprec1 = get_min_precision (arg1, sign);
1347 if (mprec1 <= hprec)
1348 op1_small_p = true;
1349 else if (!uns && mprec1 <= hprec + 1)
1350 op1_medium_p = true;
1352 int smaller_sign = 1;
1353 int larger_sign = 1;
1354 if (op0_small_p)
1356 smaller_sign = op0_sign;
1357 larger_sign = op1_sign;
1359 else if (op1_small_p)
1361 smaller_sign = op1_sign;
1362 larger_sign = op0_sign;
1364 else if (op0_sign == op1_sign)
1366 smaller_sign = op0_sign;
1367 larger_sign = op0_sign;
1370 if (!op0_small_p)
1371 do_compare_rtx_and_jump (signbit0, hipart0, NE, true, hmode,
1372 NULL_RTX, NULL_RTX, large_op0,
1373 PROB_UNLIKELY);
1375 if (!op1_small_p)
1376 do_compare_rtx_and_jump (signbit1, hipart1, NE, true, hmode,
1377 NULL_RTX, NULL_RTX, small_op0_large_op1,
1378 PROB_UNLIKELY);
1380 /* If both op0 and op1 are sign (!uns) or zero (uns) extended from
1381 hmode to mode, the multiplication will never overflow. We can
1382 do just one hmode x hmode => mode widening multiplication. */
1383 rtx lopart0s = lopart0, lopart1s = lopart1;
1384 if (GET_CODE (lopart0) == SUBREG)
1386 lopart0s = shallow_copy_rtx (lopart0);
1387 SUBREG_PROMOTED_VAR_P (lopart0s) = 1;
1388 SUBREG_PROMOTED_SET (lopart0s, uns ? SRP_UNSIGNED : SRP_SIGNED);
1390 if (GET_CODE (lopart1) == SUBREG)
1392 lopart1s = shallow_copy_rtx (lopart1);
1393 SUBREG_PROMOTED_VAR_P (lopart1s) = 1;
1394 SUBREG_PROMOTED_SET (lopart1s, uns ? SRP_UNSIGNED : SRP_SIGNED);
1396 tree halfstype = build_nonstandard_integer_type (hprec, uns);
1397 ops.op0 = make_tree (halfstype, lopart0s);
1398 ops.op1 = make_tree (halfstype, lopart1s);
1399 ops.code = WIDEN_MULT_EXPR;
1400 ops.type = type;
1401 rtx thisres
1402 = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1403 emit_move_insn (res, thisres);
1404 emit_jump (done_label);
1406 emit_label (small_op0_large_op1);
1408 /* If op0 is sign (!uns) or zero (uns) extended from hmode to mode,
1409 but op1 is not, just swap the arguments and handle it as op1
1410 sign/zero extended, op0 not. */
1411 rtx larger = gen_reg_rtx (mode);
1412 rtx hipart = gen_reg_rtx (hmode);
1413 rtx lopart = gen_reg_rtx (hmode);
1414 emit_move_insn (larger, op1);
1415 emit_move_insn (hipart, hipart1);
1416 emit_move_insn (lopart, lopart0);
1417 emit_jump (one_small_one_large);
1419 emit_label (large_op0);
1421 if (!op1_small_p)
1422 do_compare_rtx_and_jump (signbit1, hipart1, NE, true, hmode,
1423 NULL_RTX, NULL_RTX, both_ops_large,
1424 PROB_UNLIKELY);
1426 /* If op1 is sign (!uns) or zero (uns) extended from hmode to mode,
1427 but op0 is not, prepare larger, hipart and lopart pseudos and
1428 handle it together with small_op0_large_op1. */
1429 emit_move_insn (larger, op0);
1430 emit_move_insn (hipart, hipart0);
1431 emit_move_insn (lopart, lopart1);
1433 emit_label (one_small_one_large);
1435 /* lopart is the low part of the operand that is sign extended
1436 to mode, larger is the the other operand, hipart is the
1437 high part of larger and lopart0 and lopart1 are the low parts
1438 of both operands.
1439 We perform lopart0 * lopart1 and lopart * hipart widening
1440 multiplications. */
1441 tree halfutype = build_nonstandard_integer_type (hprec, 1);
1442 ops.op0 = make_tree (halfutype, lopart0);
1443 ops.op1 = make_tree (halfutype, lopart1);
1444 rtx lo0xlo1
1445 = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1447 ops.op0 = make_tree (halfutype, lopart);
1448 ops.op1 = make_tree (halfutype, hipart);
1449 rtx loxhi = gen_reg_rtx (mode);
1450 rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1451 emit_move_insn (loxhi, tem);
1453 if (!uns)
1455 /* if (hipart < 0) loxhi -= lopart << (bitsize / 2); */
1456 if (larger_sign == 0)
1457 emit_jump (after_hipart_neg);
1458 else if (larger_sign != -1)
1459 do_compare_rtx_and_jump (hipart, const0_rtx, GE, false, hmode,
1460 NULL_RTX, NULL_RTX, after_hipart_neg,
1461 PROB_EVEN);
1463 tem = convert_modes (mode, hmode, lopart, 1);
1464 tem = expand_shift (LSHIFT_EXPR, mode, tem, hprec, NULL_RTX, 1);
1465 tem = expand_simple_binop (mode, MINUS, loxhi, tem, NULL_RTX,
1466 1, OPTAB_DIRECT);
1467 emit_move_insn (loxhi, tem);
1469 emit_label (after_hipart_neg);
1471 /* if (lopart < 0) loxhi -= larger; */
1472 if (smaller_sign == 0)
1473 emit_jump (after_lopart_neg);
1474 else if (smaller_sign != -1)
1475 do_compare_rtx_and_jump (lopart, const0_rtx, GE, false, hmode,
1476 NULL_RTX, NULL_RTX, after_lopart_neg,
1477 PROB_EVEN);
1479 tem = expand_simple_binop (mode, MINUS, loxhi, larger, NULL_RTX,
1480 1, OPTAB_DIRECT);
1481 emit_move_insn (loxhi, tem);
1483 emit_label (after_lopart_neg);
1486 /* loxhi += (uns) lo0xlo1 >> (bitsize / 2); */
1487 tem = expand_shift (RSHIFT_EXPR, mode, lo0xlo1, hprec, NULL_RTX, 1);
1488 tem = expand_simple_binop (mode, PLUS, loxhi, tem, NULL_RTX,
1489 1, OPTAB_DIRECT);
1490 emit_move_insn (loxhi, tem);
1492 /* if (loxhi >> (bitsize / 2)
1493 == (hmode) loxhi >> (bitsize / 2 - 1)) (if !uns)
1494 if (loxhi >> (bitsize / 2) == 0 (if uns). */
1495 rtx hipartloxhi = expand_shift (RSHIFT_EXPR, mode, loxhi, hprec,
1496 NULL_RTX, 0);
1497 hipartloxhi = gen_lowpart (hmode, hipartloxhi);
1498 rtx signbitloxhi = const0_rtx;
1499 if (!uns)
1500 signbitloxhi = expand_shift (RSHIFT_EXPR, hmode,
1501 gen_lowpart (hmode, loxhi),
1502 hprec - 1, NULL_RTX, 0);
1504 do_compare_rtx_and_jump (signbitloxhi, hipartloxhi, NE, true, hmode,
1505 NULL_RTX, NULL_RTX, do_overflow,
1506 PROB_VERY_UNLIKELY);
1508 /* res = (loxhi << (bitsize / 2)) | (hmode) lo0xlo1; */
1509 rtx loxhishifted = expand_shift (LSHIFT_EXPR, mode, loxhi, hprec,
1510 NULL_RTX, 1);
1511 tem = convert_modes (mode, hmode, gen_lowpart (hmode, lo0xlo1), 1);
1513 tem = expand_simple_binop (mode, IOR, loxhishifted, tem, res,
1514 1, OPTAB_DIRECT);
1515 if (tem != res)
1516 emit_move_insn (res, tem);
1517 emit_jump (done_label);
1519 emit_label (both_ops_large);
1521 /* If both operands are large (not sign (!uns) or zero (uns)
1522 extended from hmode), then perform the full multiplication
1523 which will be the result of the operation.
1524 The only cases which don't overflow are for signed multiplication
1525 some cases where both hipart0 and highpart1 are 0 or -1.
1526 For unsigned multiplication when high parts are both non-zero
1527 this overflows always. */
1528 ops.code = MULT_EXPR;
1529 ops.op0 = make_tree (type, op0);
1530 ops.op1 = make_tree (type, op1);
1531 tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1532 emit_move_insn (res, tem);
1534 if (!uns)
1536 if (!op0_medium_p)
1538 tem = expand_simple_binop (hmode, PLUS, hipart0, const1_rtx,
1539 NULL_RTX, 1, OPTAB_DIRECT);
1540 do_compare_rtx_and_jump (tem, const1_rtx, GTU, true, hmode,
1541 NULL_RTX, NULL_RTX, do_error,
1542 PROB_VERY_UNLIKELY);
1545 if (!op1_medium_p)
1547 tem = expand_simple_binop (hmode, PLUS, hipart1, const1_rtx,
1548 NULL_RTX, 1, OPTAB_DIRECT);
1549 do_compare_rtx_and_jump (tem, const1_rtx, GTU, true, hmode,
1550 NULL_RTX, NULL_RTX, do_error,
1551 PROB_VERY_UNLIKELY);
1554 /* At this point hipart{0,1} are both in [-1, 0]. If they are
1555 the same, overflow happened if res is negative, if they are
1556 different, overflow happened if res is positive. */
1557 if (op0_sign != 1 && op1_sign != 1 && op0_sign != op1_sign)
1558 emit_jump (hipart_different);
1559 else if (op0_sign == 1 || op1_sign == 1)
1560 do_compare_rtx_and_jump (hipart0, hipart1, NE, true, hmode,
1561 NULL_RTX, NULL_RTX, hipart_different,
1562 PROB_EVEN);
1564 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode,
1565 NULL_RTX, NULL_RTX, do_error,
1566 PROB_VERY_UNLIKELY);
1567 emit_jump (done_label);
1569 emit_label (hipart_different);
1571 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode,
1572 NULL_RTX, NULL_RTX, do_error,
1573 PROB_VERY_UNLIKELY);
1574 emit_jump (done_label);
1577 emit_label (do_overflow);
1579 /* Overflow, do full multiplication and fallthru into do_error. */
1580 ops.op0 = make_tree (type, op0);
1581 ops.op1 = make_tree (type, op1);
1582 tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1583 emit_move_insn (res, tem);
1585 else
1587 gcc_assert (!is_ubsan);
1588 ops.code = MULT_EXPR;
1589 ops.type = type;
1590 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1591 emit_jump (done_label);
1595 do_error_label:
1596 emit_label (do_error);
1597 if (is_ubsan)
1599 /* Expand the ubsan builtin call. */
1600 push_temp_slots ();
1601 fn = ubsan_build_overflow_builtin (MULT_EXPR, loc, TREE_TYPE (arg0),
1602 arg0, arg1);
1603 expand_normal (fn);
1604 pop_temp_slots ();
1605 do_pending_stack_adjust ();
1607 else if (lhs)
1608 write_complex_part (target, const1_rtx, true);
1610 /* We're done. */
1611 emit_label (done_label);
1613 /* u1 * u2 -> sr */
1614 if (uns0_p && uns1_p && !unsr_p)
1616 rtx_code_label *all_done_label = gen_label_rtx ();
1617 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode, NULL_RTX,
1618 NULL_RTX, all_done_label, PROB_VERY_LIKELY);
1619 write_complex_part (target, const1_rtx, true);
1620 emit_label (all_done_label);
1623 /* s1 * u2 -> sr */
1624 if (!uns0_p && uns1_p && !unsr_p && pos_neg1 == 3)
1626 rtx_code_label *all_done_label = gen_label_rtx ();
1627 rtx_code_label *set_noovf = gen_label_rtx ();
1628 do_compare_rtx_and_jump (op1, const0_rtx, GE, false, mode, NULL_RTX,
1629 NULL_RTX, all_done_label, PROB_VERY_LIKELY);
1630 write_complex_part (target, const1_rtx, true);
1631 do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
1632 NULL_RTX, set_noovf, PROB_VERY_LIKELY);
1633 do_compare_rtx_and_jump (op0, constm1_rtx, NE, true, mode, NULL_RTX,
1634 NULL_RTX, all_done_label, PROB_VERY_UNLIKELY);
1635 do_compare_rtx_and_jump (op1, res, NE, true, mode, NULL_RTX, NULL_RTX,
1636 all_done_label, PROB_VERY_UNLIKELY);
1637 emit_label (set_noovf);
1638 write_complex_part (target, const0_rtx, true);
1639 emit_label (all_done_label);
1642 if (lhs)
1644 if (is_ubsan)
1645 expand_ubsan_result_store (target, res);
1646 else
1647 expand_arith_overflow_result_store (lhs, target, mode, res);
1651 /* Expand UBSAN_CHECK_ADD call STMT. */
1653 static void
1654 expand_UBSAN_CHECK_ADD (gcall *stmt)
1656 location_t loc = gimple_location (stmt);
1657 tree lhs = gimple_call_lhs (stmt);
1658 tree arg0 = gimple_call_arg (stmt, 0);
1659 tree arg1 = gimple_call_arg (stmt, 1);
1660 expand_addsub_overflow (loc, PLUS_EXPR, lhs, arg0, arg1,
1661 false, false, false, true);
1664 /* Expand UBSAN_CHECK_SUB call STMT. */
1666 static void
1667 expand_UBSAN_CHECK_SUB (gcall *stmt)
1669 location_t loc = gimple_location (stmt);
1670 tree lhs = gimple_call_lhs (stmt);
1671 tree arg0 = gimple_call_arg (stmt, 0);
1672 tree arg1 = gimple_call_arg (stmt, 1);
1673 if (integer_zerop (arg0))
1674 expand_neg_overflow (loc, lhs, arg1, true);
1675 else
1676 expand_addsub_overflow (loc, MINUS_EXPR, lhs, arg0, arg1,
1677 false, false, false, true);
1680 /* Expand UBSAN_CHECK_MUL call STMT. */
1682 static void
1683 expand_UBSAN_CHECK_MUL (gcall *stmt)
1685 location_t loc = gimple_location (stmt);
1686 tree lhs = gimple_call_lhs (stmt);
1687 tree arg0 = gimple_call_arg (stmt, 0);
1688 tree arg1 = gimple_call_arg (stmt, 1);
1689 expand_mul_overflow (loc, lhs, arg0, arg1, false, false, false, true);
1692 /* Helper function for {ADD,SUB,MUL}_OVERFLOW call stmt expansion. */
1694 static void
1695 expand_arith_overflow (enum tree_code code, gimple stmt)
1697 tree lhs = gimple_call_lhs (stmt);
1698 if (lhs == NULL_TREE)
1699 return;
1700 tree arg0 = gimple_call_arg (stmt, 0);
1701 tree arg1 = gimple_call_arg (stmt, 1);
1702 tree type = TREE_TYPE (TREE_TYPE (lhs));
1703 int uns0_p = TYPE_UNSIGNED (TREE_TYPE (arg0));
1704 int uns1_p = TYPE_UNSIGNED (TREE_TYPE (arg1));
1705 int unsr_p = TYPE_UNSIGNED (type);
1706 int prec0 = TYPE_PRECISION (TREE_TYPE (arg0));
1707 int prec1 = TYPE_PRECISION (TREE_TYPE (arg1));
1708 int precres = TYPE_PRECISION (type);
1709 location_t loc = gimple_location (stmt);
1710 if (!uns0_p && get_range_pos_neg (arg0) == 1)
1711 uns0_p = true;
1712 if (!uns1_p && get_range_pos_neg (arg1) == 1)
1713 uns1_p = true;
1714 int pr = get_min_precision (arg0, uns0_p ? UNSIGNED : SIGNED);
1715 prec0 = MIN (prec0, pr);
1716 pr = get_min_precision (arg1, uns1_p ? UNSIGNED : SIGNED);
1717 prec1 = MIN (prec1, pr);
1719 /* If uns0_p && uns1_p, precop is minimum needed precision
1720 of unsigned type to hold the exact result, otherwise
1721 precop is minimum needed precision of signed type to
1722 hold the exact result. */
1723 int precop;
1724 if (code == MULT_EXPR)
1725 precop = prec0 + prec1 + (uns0_p != uns1_p);
1726 else
1728 if (uns0_p == uns1_p)
1729 precop = MAX (prec0, prec1) + 1;
1730 else if (uns0_p)
1731 precop = MAX (prec0 + 1, prec1) + 1;
1732 else
1733 precop = MAX (prec0, prec1 + 1) + 1;
1735 int orig_precres = precres;
1739 if ((uns0_p && uns1_p)
1740 ? ((precop + !unsr_p) <= precres
1741 /* u1 - u2 -> ur can overflow, no matter what precision
1742 the result has. */
1743 && (code != MINUS_EXPR || !unsr_p))
1744 : (!unsr_p && precop <= precres))
1746 /* The infinity precision result will always fit into result. */
1747 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1748 write_complex_part (target, const0_rtx, true);
1749 enum machine_mode mode = TYPE_MODE (type);
1750 struct separate_ops ops;
1751 ops.code = code;
1752 ops.type = type;
1753 ops.op0 = fold_convert_loc (loc, type, arg0);
1754 ops.op1 = fold_convert_loc (loc, type, arg1);
1755 ops.op2 = NULL_TREE;
1756 ops.location = loc;
1757 rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1758 expand_arith_overflow_result_store (lhs, target, mode, tem);
1759 return;
1762 #ifdef WORD_REGISTER_OPERATIONS
1763 /* For sub-word operations, if target doesn't have them, start
1764 with precres widening right away, otherwise do it only
1765 if the most simple cases can't be used. */
1766 if (orig_precres == precres && precres < BITS_PER_WORD)
1768 else
1769 #endif
1770 if ((uns0_p && uns1_p && unsr_p && prec0 <= precres && prec1 <= precres)
1771 || ((!uns0_p || !uns1_p) && !unsr_p
1772 && prec0 + uns0_p <= precres
1773 && prec1 + uns1_p <= precres))
1775 arg0 = fold_convert_loc (loc, type, arg0);
1776 arg1 = fold_convert_loc (loc, type, arg1);
1777 switch (code)
1779 case MINUS_EXPR:
1780 if (integer_zerop (arg0) && !unsr_p)
1781 expand_neg_overflow (loc, lhs, arg1, false);
1782 /* FALLTHRU */
1783 case PLUS_EXPR:
1784 expand_addsub_overflow (loc, code, lhs, arg0, arg1,
1785 unsr_p, unsr_p, unsr_p, false);
1786 return;
1787 case MULT_EXPR:
1788 expand_mul_overflow (loc, lhs, arg0, arg1,
1789 unsr_p, unsr_p, unsr_p, false);
1790 return;
1791 default:
1792 gcc_unreachable ();
1796 /* For sub-word operations, retry with a wider type first. */
1797 if (orig_precres == precres && precop <= BITS_PER_WORD)
1799 #ifdef WORD_REGISTER_OPERATIONS
1800 int p = BITS_PER_WORD;
1801 #else
1802 int p = precop;
1803 #endif
1804 enum machine_mode m = smallest_mode_for_size (p, MODE_INT);
1805 tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
1806 uns0_p && uns1_p
1807 && unsr_p);
1808 p = TYPE_PRECISION (optype);
1809 if (p > precres)
1811 precres = p;
1812 unsr_p = TYPE_UNSIGNED (optype);
1813 type = optype;
1814 continue;
1818 if (prec0 <= precres && prec1 <= precres)
1820 tree types[2];
1821 if (unsr_p)
1823 types[0] = build_nonstandard_integer_type (precres, 0);
1824 types[1] = type;
1826 else
1828 types[0] = type;
1829 types[1] = build_nonstandard_integer_type (precres, 1);
1831 arg0 = fold_convert_loc (loc, types[uns0_p], arg0);
1832 arg1 = fold_convert_loc (loc, types[uns1_p], arg1);
1833 if (code != MULT_EXPR)
1834 expand_addsub_overflow (loc, code, lhs, arg0, arg1, unsr_p,
1835 uns0_p, uns1_p, false);
1836 else
1837 expand_mul_overflow (loc, lhs, arg0, arg1, unsr_p,
1838 uns0_p, uns1_p, false);
1839 return;
1842 /* Retry with a wider type. */
1843 if (orig_precres == precres)
1845 int p = MAX (prec0, prec1);
1846 enum machine_mode m = smallest_mode_for_size (p, MODE_INT);
1847 tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
1848 uns0_p && uns1_p
1849 && unsr_p);
1850 p = TYPE_PRECISION (optype);
1851 if (p > precres)
1853 precres = p;
1854 unsr_p = TYPE_UNSIGNED (optype);
1855 type = optype;
1856 continue;
1860 gcc_unreachable ();
1862 while (1);
1865 /* Expand ADD_OVERFLOW STMT. */
1867 static void
1868 expand_ADD_OVERFLOW (gcall *stmt)
1870 expand_arith_overflow (PLUS_EXPR, stmt);
1873 /* Expand SUB_OVERFLOW STMT. */
1875 static void
1876 expand_SUB_OVERFLOW (gcall *stmt)
1878 expand_arith_overflow (MINUS_EXPR, stmt);
1881 /* Expand MUL_OVERFLOW STMT. */
1883 static void
1884 expand_MUL_OVERFLOW (gcall *stmt)
1886 expand_arith_overflow (MULT_EXPR, stmt);
1889 /* This should get folded in tree-vectorizer.c. */
1891 static void
1892 expand_LOOP_VECTORIZED (gcall *stmt ATTRIBUTE_UNUSED)
1894 gcc_unreachable ();
1897 static void
1898 expand_MASK_LOAD (gcall *stmt)
1900 struct expand_operand ops[3];
1901 tree type, lhs, rhs, maskt;
1902 rtx mem, target, mask;
1904 maskt = gimple_call_arg (stmt, 2);
1905 lhs = gimple_call_lhs (stmt);
1906 if (lhs == NULL_TREE)
1907 return;
1908 type = TREE_TYPE (lhs);
1909 rhs = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0),
1910 gimple_call_arg (stmt, 1));
1912 mem = expand_expr (rhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1913 gcc_assert (MEM_P (mem));
1914 mask = expand_normal (maskt);
1915 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1916 create_output_operand (&ops[0], target, TYPE_MODE (type));
1917 create_fixed_operand (&ops[1], mem);
1918 create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
1919 expand_insn (optab_handler (maskload_optab, TYPE_MODE (type)), 3, ops);
1922 static void
1923 expand_MASK_STORE (gcall *stmt)
1925 struct expand_operand ops[3];
1926 tree type, lhs, rhs, maskt;
1927 rtx mem, reg, mask;
1929 maskt = gimple_call_arg (stmt, 2);
1930 rhs = gimple_call_arg (stmt, 3);
1931 type = TREE_TYPE (rhs);
1932 lhs = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0),
1933 gimple_call_arg (stmt, 1));
1935 mem = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1936 gcc_assert (MEM_P (mem));
1937 mask = expand_normal (maskt);
1938 reg = expand_normal (rhs);
1939 create_fixed_operand (&ops[0], mem);
1940 create_input_operand (&ops[1], reg, TYPE_MODE (type));
1941 create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
1942 expand_insn (optab_handler (maskstore_optab, TYPE_MODE (type)), 3, ops);
1945 static void
1946 expand_ABNORMAL_DISPATCHER (gcall *)
1950 static void
1951 expand_BUILTIN_EXPECT (gcall *stmt)
1953 /* When guessing was done, the hints should be already stripped away. */
1954 gcc_assert (!flag_guess_branch_prob || optimize == 0 || seen_error ());
1956 rtx target;
1957 tree lhs = gimple_call_lhs (stmt);
1958 if (lhs)
1959 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1960 else
1961 target = const0_rtx;
1962 rtx val = expand_expr (gimple_call_arg (stmt, 0), target, VOIDmode, EXPAND_NORMAL);
1963 if (lhs && val != target)
1964 emit_move_insn (target, val);
1967 /* Routines to expand each internal function, indexed by function number.
1968 Each routine has the prototype:
1970 expand_<NAME> (gcall *stmt)
1972 where STMT is the statement that performs the call. */
1973 static void (*const internal_fn_expanders[]) (gcall *) = {
1974 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) expand_##CODE,
1975 #include "internal-fn.def"
1976 #undef DEF_INTERNAL_FN
1980 /* Expand STMT, which is a call to internal function FN. */
1982 void
1983 expand_internal_call (gcall *stmt)
1985 internal_fn_expanders[(int) gimple_call_internal_fn (stmt)] (stmt);