Daily bump.
[official-gcc.git] / gcc / internal-fn.c
blobc398c125bcf7e46c3724d8360b8a7d93bbf25a86
1 /* Internal functions.
2 Copyright (C) 2011-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "alias.h"
24 #include "symtab.h"
25 #include "options.h"
26 #include "tree.h"
27 #include "fold-const.h"
28 #include "internal-fn.h"
29 #include "stor-layout.h"
30 #include "tm.h"
31 #include "hard-reg-set.h"
32 #include "function.h"
33 #include "rtl.h"
34 #include "flags.h"
35 #include "insn-config.h"
36 #include "expmed.h"
37 #include "dojump.h"
38 #include "explow.h"
39 #include "calls.h"
40 #include "emit-rtl.h"
41 #include "varasm.h"
42 #include "stmt.h"
43 #include "expr.h"
44 #include "insn-codes.h"
45 #include "optabs.h"
46 #include "predict.h"
47 #include "dominance.h"
48 #include "cfg.h"
49 #include "basic-block.h"
50 #include "tree-ssa-alias.h"
51 #include "gimple-expr.h"
52 #include "gimple.h"
53 #include "ubsan.h"
54 #include "target.h"
55 #include "stringpool.h"
56 #include "tree-ssanames.h"
57 #include "diagnostic-core.h"
59 /* The names of each internal function, indexed by function number. */
60 const char *const internal_fn_name_array[] = {
61 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) #CODE,
62 #include "internal-fn.def"
63 #undef DEF_INTERNAL_FN
64 "<invalid-fn>"
67 /* The ECF_* flags of each internal function, indexed by function number. */
68 const int internal_fn_flags_array[] = {
69 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) FLAGS,
70 #include "internal-fn.def"
71 #undef DEF_INTERNAL_FN
75 /* Fnspec of each internal function, indexed by function number. */
76 const_tree internal_fn_fnspec_array[IFN_LAST + 1];
78 void
79 init_internal_fns ()
81 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
82 if (FNSPEC) internal_fn_fnspec_array[IFN_##CODE] = \
83 build_string ((int) sizeof (FNSPEC), FNSPEC ? FNSPEC : "");
84 #include "internal-fn.def"
85 #undef DEF_INTERNAL_FN
86 internal_fn_fnspec_array[IFN_LAST] = 0;
89 /* ARRAY_TYPE is an array of vector modes. Return the associated insn
90 for load-lanes-style optab OPTAB. The insn must exist. */
92 static enum insn_code
93 get_multi_vector_move (tree array_type, convert_optab optab)
95 enum insn_code icode;
96 machine_mode imode;
97 machine_mode vmode;
99 gcc_assert (TREE_CODE (array_type) == ARRAY_TYPE);
100 imode = TYPE_MODE (array_type);
101 vmode = TYPE_MODE (TREE_TYPE (array_type));
103 icode = convert_optab_handler (optab, imode, vmode);
104 gcc_assert (icode != CODE_FOR_nothing);
105 return icode;
108 /* Expand LOAD_LANES call STMT. */
110 static void
111 expand_LOAD_LANES (gcall *stmt)
113 struct expand_operand ops[2];
114 tree type, lhs, rhs;
115 rtx target, mem;
117 lhs = gimple_call_lhs (stmt);
118 rhs = gimple_call_arg (stmt, 0);
119 type = TREE_TYPE (lhs);
121 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
122 mem = expand_normal (rhs);
124 gcc_assert (MEM_P (mem));
125 PUT_MODE (mem, TYPE_MODE (type));
127 create_output_operand (&ops[0], target, TYPE_MODE (type));
128 create_fixed_operand (&ops[1], mem);
129 expand_insn (get_multi_vector_move (type, vec_load_lanes_optab), 2, ops);
132 /* Expand STORE_LANES call STMT. */
134 static void
135 expand_STORE_LANES (gcall *stmt)
137 struct expand_operand ops[2];
138 tree type, lhs, rhs;
139 rtx target, reg;
141 lhs = gimple_call_lhs (stmt);
142 rhs = gimple_call_arg (stmt, 0);
143 type = TREE_TYPE (rhs);
145 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
146 reg = expand_normal (rhs);
148 gcc_assert (MEM_P (target));
149 PUT_MODE (target, TYPE_MODE (type));
151 create_fixed_operand (&ops[0], target);
152 create_input_operand (&ops[1], reg, TYPE_MODE (type));
153 expand_insn (get_multi_vector_move (type, vec_store_lanes_optab), 2, ops);
156 static void
157 expand_ANNOTATE (gcall *)
159 gcc_unreachable ();
162 /* This should get expanded in adjust_simduid_builtins. */
164 static void
165 expand_GOMP_SIMD_LANE (gcall *)
167 gcc_unreachable ();
170 /* This should get expanded in adjust_simduid_builtins. */
172 static void
173 expand_GOMP_SIMD_VF (gcall *)
175 gcc_unreachable ();
178 /* This should get expanded in adjust_simduid_builtins. */
180 static void
181 expand_GOMP_SIMD_LAST_LANE (gcall *)
183 gcc_unreachable ();
186 /* This should get expanded in the sanopt pass. */
188 static void
189 expand_UBSAN_NULL (gcall *)
191 gcc_unreachable ();
194 /* This should get expanded in the sanopt pass. */
196 static void
197 expand_UBSAN_BOUNDS (gcall *)
199 gcc_unreachable ();
202 /* This should get expanded in the sanopt pass. */
204 static void
205 expand_UBSAN_VPTR (gcall *)
207 gcc_unreachable ();
210 /* This should get expanded in the sanopt pass. */
212 static void
213 expand_UBSAN_OBJECT_SIZE (gcall *)
215 gcc_unreachable ();
218 /* This should get expanded in the sanopt pass. */
220 static void
221 expand_ASAN_CHECK (gcall *)
223 gcc_unreachable ();
226 /* This should get expanded in the tsan pass. */
228 static void
229 expand_TSAN_FUNC_EXIT (gcall *)
231 gcc_unreachable ();
234 /* Helper function for expand_addsub_overflow. Return 1
235 if ARG interpreted as signed in its precision is known to be always
236 positive or 2 if ARG is known to be always negative, or 3 if ARG may
237 be positive or negative. */
239 static int
240 get_range_pos_neg (tree arg)
242 if (arg == error_mark_node)
243 return 3;
245 int prec = TYPE_PRECISION (TREE_TYPE (arg));
246 int cnt = 0;
247 if (TREE_CODE (arg) == INTEGER_CST)
249 wide_int w = wi::sext (arg, prec);
250 if (wi::neg_p (w))
251 return 2;
252 else
253 return 1;
255 while (CONVERT_EXPR_P (arg)
256 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
257 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg, 0))) <= prec)
259 arg = TREE_OPERAND (arg, 0);
260 /* Narrower value zero extended into wider type
261 will always result in positive values. */
262 if (TYPE_UNSIGNED (TREE_TYPE (arg))
263 && TYPE_PRECISION (TREE_TYPE (arg)) < prec)
264 return 1;
265 prec = TYPE_PRECISION (TREE_TYPE (arg));
266 if (++cnt > 30)
267 return 3;
270 if (TREE_CODE (arg) != SSA_NAME)
271 return 3;
272 wide_int arg_min, arg_max;
273 while (get_range_info (arg, &arg_min, &arg_max) != VR_RANGE)
275 gimple g = SSA_NAME_DEF_STMT (arg);
276 if (is_gimple_assign (g)
277 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g)))
279 tree t = gimple_assign_rhs1 (g);
280 if (INTEGRAL_TYPE_P (TREE_TYPE (t))
281 && TYPE_PRECISION (TREE_TYPE (t)) <= prec)
283 if (TYPE_UNSIGNED (TREE_TYPE (t))
284 && TYPE_PRECISION (TREE_TYPE (t)) < prec)
285 return 1;
286 prec = TYPE_PRECISION (TREE_TYPE (t));
287 arg = t;
288 if (++cnt > 30)
289 return 3;
290 continue;
293 return 3;
295 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
297 /* For unsigned values, the "positive" range comes
298 below the "negative" range. */
299 if (!wi::neg_p (wi::sext (arg_max, prec), SIGNED))
300 return 1;
301 if (wi::neg_p (wi::sext (arg_min, prec), SIGNED))
302 return 2;
304 else
306 if (!wi::neg_p (wi::sext (arg_min, prec), SIGNED))
307 return 1;
308 if (wi::neg_p (wi::sext (arg_max, prec), SIGNED))
309 return 2;
311 return 3;
314 /* Return minimum precision needed to represent all values
315 of ARG in SIGNed integral type. */
317 static int
318 get_min_precision (tree arg, signop sign)
320 int prec = TYPE_PRECISION (TREE_TYPE (arg));
321 int cnt = 0;
322 signop orig_sign = sign;
323 if (TREE_CODE (arg) == INTEGER_CST)
325 int p;
326 if (TYPE_SIGN (TREE_TYPE (arg)) != sign)
328 widest_int w = wi::to_widest (arg);
329 w = wi::ext (w, prec, sign);
330 p = wi::min_precision (w, sign);
332 else
333 p = wi::min_precision (arg, sign);
334 return MIN (p, prec);
336 while (CONVERT_EXPR_P (arg)
337 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
338 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg, 0))) <= prec)
340 arg = TREE_OPERAND (arg, 0);
341 if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
343 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
344 sign = UNSIGNED;
345 else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
346 return prec + (orig_sign != sign);
347 prec = TYPE_PRECISION (TREE_TYPE (arg));
349 if (++cnt > 30)
350 return prec + (orig_sign != sign);
352 if (TREE_CODE (arg) != SSA_NAME)
353 return prec + (orig_sign != sign);
354 wide_int arg_min, arg_max;
355 while (get_range_info (arg, &arg_min, &arg_max) != VR_RANGE)
357 gimple g = SSA_NAME_DEF_STMT (arg);
358 if (is_gimple_assign (g)
359 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g)))
361 tree t = gimple_assign_rhs1 (g);
362 if (INTEGRAL_TYPE_P (TREE_TYPE (t))
363 && TYPE_PRECISION (TREE_TYPE (t)) <= prec)
365 arg = t;
366 if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
368 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
369 sign = UNSIGNED;
370 else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
371 return prec + (orig_sign != sign);
372 prec = TYPE_PRECISION (TREE_TYPE (arg));
374 if (++cnt > 30)
375 return prec + (orig_sign != sign);
376 continue;
379 return prec + (orig_sign != sign);
381 if (sign == TYPE_SIGN (TREE_TYPE (arg)))
383 int p1 = wi::min_precision (arg_min, sign);
384 int p2 = wi::min_precision (arg_max, sign);
385 p1 = MAX (p1, p2);
386 prec = MIN (prec, p1);
388 else if (sign == UNSIGNED && !wi::neg_p (arg_min, SIGNED))
390 int p = wi::min_precision (arg_max, UNSIGNED);
391 prec = MIN (prec, p);
393 return prec + (orig_sign != sign);
396 /* Helper for expand_*_overflow. Store RES into the __real__ part
397 of TARGET. If RES has larger MODE than __real__ part of TARGET,
398 set the __imag__ part to 1 if RES doesn't fit into it. */
400 static void
401 expand_arith_overflow_result_store (tree lhs, rtx target,
402 machine_mode mode, rtx res)
404 machine_mode tgtmode = GET_MODE_INNER (GET_MODE (target));
405 rtx lres = res;
406 if (tgtmode != mode)
408 rtx_code_label *done_label = gen_label_rtx ();
409 int uns = TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs)));
410 lres = convert_modes (tgtmode, mode, res, uns);
411 gcc_assert (GET_MODE_PRECISION (tgtmode) < GET_MODE_PRECISION (mode));
412 do_compare_rtx_and_jump (res, convert_modes (mode, tgtmode, lres, uns),
413 EQ, true, mode, NULL_RTX, NULL, done_label,
414 PROB_VERY_LIKELY);
415 write_complex_part (target, const1_rtx, true);
416 emit_label (done_label);
418 write_complex_part (target, lres, false);
421 /* Helper for expand_*_overflow. Store RES into TARGET. */
423 static void
424 expand_ubsan_result_store (rtx target, rtx res)
426 if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target))
427 /* If this is a scalar in a register that is stored in a wider mode
428 than the declared mode, compute the result into its declared mode
429 and then convert to the wider mode. Our value is the computed
430 expression. */
431 convert_move (SUBREG_REG (target), res, SUBREG_PROMOTED_SIGN (target));
432 else
433 emit_move_insn (target, res);
436 /* Add sub/add overflow checking to the statement STMT.
437 CODE says whether the operation is +, or -. */
439 static void
440 expand_addsub_overflow (location_t loc, tree_code code, tree lhs,
441 tree arg0, tree arg1, bool unsr_p, bool uns0_p,
442 bool uns1_p, bool is_ubsan)
444 rtx res, target = NULL_RTX;
445 tree fn;
446 rtx_code_label *done_label = gen_label_rtx ();
447 rtx_code_label *do_error = gen_label_rtx ();
448 do_pending_stack_adjust ();
449 rtx op0 = expand_normal (arg0);
450 rtx op1 = expand_normal (arg1);
451 machine_mode mode = TYPE_MODE (TREE_TYPE (arg0));
452 int prec = GET_MODE_PRECISION (mode);
453 rtx sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
454 bool do_xor = false;
456 if (is_ubsan)
457 gcc_assert (!unsr_p && !uns0_p && !uns1_p);
459 if (lhs)
461 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
462 if (!is_ubsan)
463 write_complex_part (target, const0_rtx, true);
466 /* We assume both operands and result have the same precision
467 here (GET_MODE_BITSIZE (mode)), S stands for signed type
468 with that precision, U for unsigned type with that precision,
469 sgn for unsigned most significant bit in that precision.
470 s1 is signed first operand, u1 is unsigned first operand,
471 s2 is signed second operand, u2 is unsigned second operand,
472 sr is signed result, ur is unsigned result and the following
473 rules say how to compute result (which is always result of
474 the operands as if both were unsigned, cast to the right
475 signedness) and how to compute whether operation overflowed.
477 s1 + s2 -> sr
478 res = (S) ((U) s1 + (U) s2)
479 ovf = s2 < 0 ? res > s1 : res < s1 (or jump on overflow)
480 s1 - s2 -> sr
481 res = (S) ((U) s1 - (U) s2)
482 ovf = s2 < 0 ? res < s1 : res > s2 (or jump on overflow)
483 u1 + u2 -> ur
484 res = u1 + u2
485 ovf = res < u1 (or jump on carry, but RTL opts will handle it)
486 u1 - u2 -> ur
487 res = u1 - u2
488 ovf = res > u1 (or jump on carry, but RTL opts will handle it)
489 s1 + u2 -> sr
490 res = (S) ((U) s1 + u2)
491 ovf = ((U) res ^ sgn) < u2
492 s1 + u2 -> ur
493 t1 = (S) (u2 ^ sgn)
494 t2 = s1 + t1
495 res = (U) t2 ^ sgn
496 ovf = t1 < 0 ? t2 > s1 : t2 < s1 (or jump on overflow)
497 s1 - u2 -> sr
498 res = (S) ((U) s1 - u2)
499 ovf = u2 > ((U) s1 ^ sgn)
500 s1 - u2 -> ur
501 res = (U) s1 - u2
502 ovf = s1 < 0 || u2 > (U) s1
503 u1 - s2 -> sr
504 res = u1 - (U) s2
505 ovf = u1 >= ((U) s2 ^ sgn)
506 u1 - s2 -> ur
507 t1 = u1 ^ sgn
508 t2 = t1 - (U) s2
509 res = t2 ^ sgn
510 ovf = s2 < 0 ? (S) t2 < (S) t1 : (S) t2 > (S) t1 (or jump on overflow)
511 s1 + s2 -> ur
512 res = (U) s1 + (U) s2
513 ovf = s2 < 0 ? (s1 | (S) res) < 0) : (s1 & (S) res) < 0)
514 u1 + u2 -> sr
515 res = (S) (u1 + u2)
516 ovf = (U) res < u2 || res < 0
517 u1 - u2 -> sr
518 res = (S) (u1 - u2)
519 ovf = u1 >= u2 ? res < 0 : res >= 0
520 s1 - s2 -> ur
521 res = (U) s1 - (U) s2
522 ovf = s2 >= 0 ? ((s1 | (S) res) < 0) : ((s1 & (S) res) < 0) */
524 if (code == PLUS_EXPR && uns0_p && !uns1_p)
526 /* PLUS_EXPR is commutative, if operand signedness differs,
527 canonicalize to the first operand being signed and second
528 unsigned to simplify following code. */
529 std::swap (op0, op1);
530 std::swap (arg0, arg1);
531 uns0_p = false;
532 uns1_p = true;
535 /* u1 +- u2 -> ur */
536 if (uns0_p && uns1_p && unsr_p)
538 /* Compute the operation. On RTL level, the addition is always
539 unsigned. */
540 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
541 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
542 rtx tem = op0;
543 /* For PLUS_EXPR, the operation is commutative, so we can pick
544 operand to compare against. For prec <= BITS_PER_WORD, I think
545 preferring REG operand is better over CONST_INT, because
546 the CONST_INT might enlarge the instruction or CSE would need
547 to figure out we'd already loaded it into a register before.
548 For prec > BITS_PER_WORD, I think CONST_INT might be more beneficial,
549 as then the multi-word comparison can be perhaps simplified. */
550 if (code == PLUS_EXPR
551 && (prec <= BITS_PER_WORD
552 ? (CONST_SCALAR_INT_P (op0) && REG_P (op1))
553 : CONST_SCALAR_INT_P (op1)))
554 tem = op1;
555 do_compare_rtx_and_jump (res, tem, code == PLUS_EXPR ? GEU : LEU,
556 true, mode, NULL_RTX, NULL, done_label,
557 PROB_VERY_LIKELY);
558 goto do_error_label;
561 /* s1 +- u2 -> sr */
562 if (!uns0_p && uns1_p && !unsr_p)
564 /* Compute the operation. On RTL level, the addition is always
565 unsigned. */
566 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
567 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
568 rtx tem = expand_binop (mode, add_optab,
569 code == PLUS_EXPR ? res : op0, sgn,
570 NULL_RTX, false, OPTAB_LIB_WIDEN);
571 do_compare_rtx_and_jump (tem, op1, GEU, true, mode, NULL_RTX, NULL,
572 done_label, PROB_VERY_LIKELY);
573 goto do_error_label;
576 /* s1 + u2 -> ur */
577 if (code == PLUS_EXPR && !uns0_p && uns1_p && unsr_p)
579 op1 = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
580 OPTAB_LIB_WIDEN);
581 /* As we've changed op1, we have to avoid using the value range
582 for the original argument. */
583 arg1 = error_mark_node;
584 do_xor = true;
585 goto do_signed;
588 /* u1 - s2 -> ur */
589 if (code == MINUS_EXPR && uns0_p && !uns1_p && unsr_p)
591 op0 = expand_binop (mode, add_optab, op0, sgn, NULL_RTX, false,
592 OPTAB_LIB_WIDEN);
593 /* As we've changed op0, we have to avoid using the value range
594 for the original argument. */
595 arg0 = error_mark_node;
596 do_xor = true;
597 goto do_signed;
600 /* s1 - u2 -> ur */
601 if (code == MINUS_EXPR && !uns0_p && uns1_p && unsr_p)
603 /* Compute the operation. On RTL level, the addition is always
604 unsigned. */
605 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
606 OPTAB_LIB_WIDEN);
607 int pos_neg = get_range_pos_neg (arg0);
608 if (pos_neg == 2)
609 /* If ARG0 is known to be always negative, this is always overflow. */
610 emit_jump (do_error);
611 else if (pos_neg == 3)
612 /* If ARG0 is not known to be always positive, check at runtime. */
613 do_compare_rtx_and_jump (op0, const0_rtx, LT, false, mode, NULL_RTX,
614 NULL, do_error, PROB_VERY_UNLIKELY);
615 do_compare_rtx_and_jump (op1, op0, LEU, true, mode, NULL_RTX, NULL,
616 done_label, PROB_VERY_LIKELY);
617 goto do_error_label;
620 /* u1 - s2 -> sr */
621 if (code == MINUS_EXPR && uns0_p && !uns1_p && !unsr_p)
623 /* Compute the operation. On RTL level, the addition is always
624 unsigned. */
625 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
626 OPTAB_LIB_WIDEN);
627 rtx tem = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
628 OPTAB_LIB_WIDEN);
629 do_compare_rtx_and_jump (op0, tem, LTU, true, mode, NULL_RTX, NULL,
630 done_label, PROB_VERY_LIKELY);
631 goto do_error_label;
634 /* u1 + u2 -> sr */
635 if (code == PLUS_EXPR && uns0_p && uns1_p && !unsr_p)
637 /* Compute the operation. On RTL level, the addition is always
638 unsigned. */
639 res = expand_binop (mode, add_optab, op0, op1, NULL_RTX, false,
640 OPTAB_LIB_WIDEN);
641 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode, NULL_RTX,
642 NULL, do_error, PROB_VERY_UNLIKELY);
643 rtx tem = op1;
644 /* The operation is commutative, so we can pick operand to compare
645 against. For prec <= BITS_PER_WORD, I think preferring REG operand
646 is better over CONST_INT, because the CONST_INT might enlarge the
647 instruction or CSE would need to figure out we'd already loaded it
648 into a register before. For prec > BITS_PER_WORD, I think CONST_INT
649 might be more beneficial, as then the multi-word comparison can be
650 perhaps simplified. */
651 if (prec <= BITS_PER_WORD
652 ? (CONST_SCALAR_INT_P (op1) && REG_P (op0))
653 : CONST_SCALAR_INT_P (op0))
654 tem = op0;
655 do_compare_rtx_and_jump (res, tem, GEU, true, mode, NULL_RTX, NULL,
656 done_label, PROB_VERY_LIKELY);
657 goto do_error_label;
660 /* s1 +- s2 -> ur */
661 if (!uns0_p && !uns1_p && unsr_p)
663 /* Compute the operation. On RTL level, the addition is always
664 unsigned. */
665 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
666 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
667 int pos_neg = get_range_pos_neg (arg1);
668 if (code == PLUS_EXPR)
670 int pos_neg0 = get_range_pos_neg (arg0);
671 if (pos_neg0 != 3 && pos_neg == 3)
673 std::swap (op0, op1);
674 pos_neg = pos_neg0;
677 rtx tem;
678 if (pos_neg != 3)
680 tem = expand_binop (mode, ((pos_neg == 1) ^ (code == MINUS_EXPR))
681 ? and_optab : ior_optab,
682 op0, res, NULL_RTX, false, OPTAB_LIB_WIDEN);
683 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL,
684 NULL, done_label, PROB_VERY_LIKELY);
686 else
688 rtx_code_label *do_ior_label = gen_label_rtx ();
689 do_compare_rtx_and_jump (op1, const0_rtx,
690 code == MINUS_EXPR ? GE : LT, false, mode,
691 NULL_RTX, NULL, do_ior_label,
692 PROB_EVEN);
693 tem = expand_binop (mode, and_optab, op0, res, NULL_RTX, false,
694 OPTAB_LIB_WIDEN);
695 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
696 NULL, done_label, PROB_VERY_LIKELY);
697 emit_jump (do_error);
698 emit_label (do_ior_label);
699 tem = expand_binop (mode, ior_optab, op0, res, NULL_RTX, false,
700 OPTAB_LIB_WIDEN);
701 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
702 NULL, done_label, PROB_VERY_LIKELY);
704 goto do_error_label;
707 /* u1 - u2 -> sr */
708 if (code == MINUS_EXPR && uns0_p && uns1_p && !unsr_p)
710 /* Compute the operation. On RTL level, the addition is always
711 unsigned. */
712 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
713 OPTAB_LIB_WIDEN);
714 rtx_code_label *op0_geu_op1 = gen_label_rtx ();
715 do_compare_rtx_and_jump (op0, op1, GEU, true, mode, NULL_RTX, NULL,
716 op0_geu_op1, PROB_EVEN);
717 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode, NULL_RTX,
718 NULL, done_label, PROB_VERY_LIKELY);
719 emit_jump (do_error);
720 emit_label (op0_geu_op1);
721 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode, NULL_RTX,
722 NULL, done_label, PROB_VERY_LIKELY);
723 goto do_error_label;
726 gcc_assert (!uns0_p && !uns1_p && !unsr_p);
728 /* s1 +- s2 -> sr */
729 do_signed: ;
730 enum insn_code icode;
731 icode = optab_handler (code == PLUS_EXPR ? addv4_optab : subv4_optab, mode);
732 if (icode != CODE_FOR_nothing)
734 struct expand_operand ops[4];
735 rtx_insn *last = get_last_insn ();
737 res = gen_reg_rtx (mode);
738 create_output_operand (&ops[0], res, mode);
739 create_input_operand (&ops[1], op0, mode);
740 create_input_operand (&ops[2], op1, mode);
741 create_fixed_operand (&ops[3], do_error);
742 if (maybe_expand_insn (icode, 4, ops))
744 last = get_last_insn ();
745 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
746 && JUMP_P (last)
747 && any_condjump_p (last)
748 && !find_reg_note (last, REG_BR_PROB, 0))
749 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
750 emit_jump (done_label);
752 else
754 delete_insns_since (last);
755 icode = CODE_FOR_nothing;
759 if (icode == CODE_FOR_nothing)
761 rtx_code_label *sub_check = gen_label_rtx ();
762 int pos_neg = 3;
764 /* Compute the operation. On RTL level, the addition is always
765 unsigned. */
766 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
767 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
769 /* If we can prove one of the arguments (for MINUS_EXPR only
770 the second operand, as subtraction is not commutative) is always
771 non-negative or always negative, we can do just one comparison
772 and conditional jump instead of 2 at runtime, 3 present in the
773 emitted code. If one of the arguments is CONST_INT, all we
774 need is to make sure it is op1, then the first
775 do_compare_rtx_and_jump will be just folded. Otherwise try
776 to use range info if available. */
777 if (code == PLUS_EXPR && CONST_INT_P (op0))
778 std::swap (op0, op1);
779 else if (CONST_INT_P (op1))
781 else if (code == PLUS_EXPR && TREE_CODE (arg0) == SSA_NAME)
783 pos_neg = get_range_pos_neg (arg0);
784 if (pos_neg != 3)
785 std::swap (op0, op1);
787 if (pos_neg == 3 && !CONST_INT_P (op1) && TREE_CODE (arg1) == SSA_NAME)
788 pos_neg = get_range_pos_neg (arg1);
790 /* If the op1 is negative, we have to use a different check. */
791 if (pos_neg == 3)
792 do_compare_rtx_and_jump (op1, const0_rtx, LT, false, mode, NULL_RTX,
793 NULL, sub_check, PROB_EVEN);
795 /* Compare the result of the operation with one of the operands. */
796 if (pos_neg & 1)
797 do_compare_rtx_and_jump (res, op0, code == PLUS_EXPR ? GE : LE,
798 false, mode, NULL_RTX, NULL, done_label,
799 PROB_VERY_LIKELY);
801 /* If we get here, we have to print the error. */
802 if (pos_neg == 3)
804 emit_jump (do_error);
806 emit_label (sub_check);
809 /* We have k = a + b for b < 0 here. k <= a must hold. */
810 if (pos_neg & 2)
811 do_compare_rtx_and_jump (res, op0, code == PLUS_EXPR ? LE : GE,
812 false, mode, NULL_RTX, NULL, done_label,
813 PROB_VERY_LIKELY);
816 do_error_label:
817 emit_label (do_error);
818 if (is_ubsan)
820 /* Expand the ubsan builtin call. */
821 push_temp_slots ();
822 fn = ubsan_build_overflow_builtin (code, loc, TREE_TYPE (arg0),
823 arg0, arg1);
824 expand_normal (fn);
825 pop_temp_slots ();
826 do_pending_stack_adjust ();
828 else if (lhs)
829 write_complex_part (target, const1_rtx, true);
831 /* We're done. */
832 emit_label (done_label);
834 if (lhs)
836 if (is_ubsan)
837 expand_ubsan_result_store (target, res);
838 else
840 if (do_xor)
841 res = expand_binop (mode, add_optab, res, sgn, NULL_RTX, false,
842 OPTAB_LIB_WIDEN);
844 expand_arith_overflow_result_store (lhs, target, mode, res);
849 /* Add negate overflow checking to the statement STMT. */
851 static void
852 expand_neg_overflow (location_t loc, tree lhs, tree arg1, bool is_ubsan)
854 rtx res, op1;
855 tree fn;
856 rtx_code_label *done_label, *do_error;
857 rtx target = NULL_RTX;
859 done_label = gen_label_rtx ();
860 do_error = gen_label_rtx ();
862 do_pending_stack_adjust ();
863 op1 = expand_normal (arg1);
865 machine_mode mode = TYPE_MODE (TREE_TYPE (arg1));
866 if (lhs)
868 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
869 if (!is_ubsan)
870 write_complex_part (target, const0_rtx, true);
873 enum insn_code icode = optab_handler (negv3_optab, mode);
874 if (icode != CODE_FOR_nothing)
876 struct expand_operand ops[3];
877 rtx_insn *last = get_last_insn ();
879 res = gen_reg_rtx (mode);
880 create_output_operand (&ops[0], res, mode);
881 create_input_operand (&ops[1], op1, mode);
882 create_fixed_operand (&ops[2], do_error);
883 if (maybe_expand_insn (icode, 3, ops))
885 last = get_last_insn ();
886 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
887 && JUMP_P (last)
888 && any_condjump_p (last)
889 && !find_reg_note (last, REG_BR_PROB, 0))
890 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
891 emit_jump (done_label);
893 else
895 delete_insns_since (last);
896 icode = CODE_FOR_nothing;
900 if (icode == CODE_FOR_nothing)
902 /* Compute the operation. On RTL level, the addition is always
903 unsigned. */
904 res = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
906 /* Compare the operand with the most negative value. */
907 rtx minv = expand_normal (TYPE_MIN_VALUE (TREE_TYPE (arg1)));
908 do_compare_rtx_and_jump (op1, minv, NE, true, mode, NULL_RTX, NULL,
909 done_label, PROB_VERY_LIKELY);
912 emit_label (do_error);
913 if (is_ubsan)
915 /* Expand the ubsan builtin call. */
916 push_temp_slots ();
917 fn = ubsan_build_overflow_builtin (NEGATE_EXPR, loc, TREE_TYPE (arg1),
918 arg1, NULL_TREE);
919 expand_normal (fn);
920 pop_temp_slots ();
921 do_pending_stack_adjust ();
923 else if (lhs)
924 write_complex_part (target, const1_rtx, true);
926 /* We're done. */
927 emit_label (done_label);
929 if (lhs)
931 if (is_ubsan)
932 expand_ubsan_result_store (target, res);
933 else
934 expand_arith_overflow_result_store (lhs, target, mode, res);
938 /* Add mul overflow checking to the statement STMT. */
940 static void
941 expand_mul_overflow (location_t loc, tree lhs, tree arg0, tree arg1,
942 bool unsr_p, bool uns0_p, bool uns1_p, bool is_ubsan)
944 rtx res, op0, op1;
945 tree fn, type;
946 rtx_code_label *done_label, *do_error;
947 rtx target = NULL_RTX;
948 signop sign;
949 enum insn_code icode;
951 done_label = gen_label_rtx ();
952 do_error = gen_label_rtx ();
954 do_pending_stack_adjust ();
955 op0 = expand_normal (arg0);
956 op1 = expand_normal (arg1);
958 machine_mode mode = TYPE_MODE (TREE_TYPE (arg0));
959 bool uns = unsr_p;
960 if (lhs)
962 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
963 if (!is_ubsan)
964 write_complex_part (target, const0_rtx, true);
967 if (is_ubsan)
968 gcc_assert (!unsr_p && !uns0_p && !uns1_p);
970 /* We assume both operands and result have the same precision
971 here (GET_MODE_BITSIZE (mode)), S stands for signed type
972 with that precision, U for unsigned type with that precision,
973 sgn for unsigned most significant bit in that precision.
974 s1 is signed first operand, u1 is unsigned first operand,
975 s2 is signed second operand, u2 is unsigned second operand,
976 sr is signed result, ur is unsigned result and the following
977 rules say how to compute result (which is always result of
978 the operands as if both were unsigned, cast to the right
979 signedness) and how to compute whether operation overflowed.
980 main_ovf (false) stands for jump on signed multiplication
981 overflow or the main algorithm with uns == false.
982 main_ovf (true) stands for jump on unsigned multiplication
983 overflow or the main algorithm with uns == true.
985 s1 * s2 -> sr
986 res = (S) ((U) s1 * (U) s2)
987 ovf = main_ovf (false)
988 u1 * u2 -> ur
989 res = u1 * u2
990 ovf = main_ovf (true)
991 s1 * u2 -> ur
992 res = (U) s1 * u2
993 ovf = (s1 < 0 && u2) || main_ovf (true)
994 u1 * u2 -> sr
995 res = (S) (u1 * u2)
996 ovf = res < 0 || main_ovf (true)
997 s1 * u2 -> sr
998 res = (S) ((U) s1 * u2)
999 ovf = (S) u2 >= 0 ? main_ovf (false)
1000 : (s1 != 0 && (s1 != -1 || u2 != (U) res))
1001 s1 * s2 -> ur
1002 t1 = (s1 & s2) < 0 ? (-(U) s1) : ((U) s1)
1003 t2 = (s1 & s2) < 0 ? (-(U) s2) : ((U) s2)
1004 res = t1 * t2
1005 ovf = (s1 ^ s2) < 0 ? (s1 && s2) : main_ovf (true) */
1007 if (uns0_p && !uns1_p)
1009 /* Multiplication is commutative, if operand signedness differs,
1010 canonicalize to the first operand being signed and second
1011 unsigned to simplify following code. */
1012 std::swap (op0, op1);
1013 std::swap (arg0, arg1);
1014 uns0_p = false;
1015 uns1_p = true;
1018 int pos_neg0 = get_range_pos_neg (arg0);
1019 int pos_neg1 = get_range_pos_neg (arg1);
1021 /* s1 * u2 -> ur */
1022 if (!uns0_p && uns1_p && unsr_p)
1024 switch (pos_neg0)
1026 case 1:
1027 /* If s1 is non-negative, just perform normal u1 * u2 -> ur. */
1028 goto do_main;
1029 case 2:
1030 /* If s1 is negative, avoid the main code, just multiply and
1031 signal overflow if op1 is not 0. */
1032 struct separate_ops ops;
1033 ops.code = MULT_EXPR;
1034 ops.type = TREE_TYPE (arg1);
1035 ops.op0 = make_tree (ops.type, op0);
1036 ops.op1 = make_tree (ops.type, op1);
1037 ops.op2 = NULL_TREE;
1038 ops.location = loc;
1039 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1040 do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1041 NULL, done_label, PROB_VERY_LIKELY);
1042 goto do_error_label;
1043 case 3:
1044 rtx_code_label *do_main_label;
1045 do_main_label = gen_label_rtx ();
1046 do_compare_rtx_and_jump (op0, const0_rtx, GE, false, mode, NULL_RTX,
1047 NULL, do_main_label, PROB_VERY_LIKELY);
1048 do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1049 NULL, do_main_label, PROB_VERY_LIKELY);
1050 write_complex_part (target, const1_rtx, true);
1051 emit_label (do_main_label);
1052 goto do_main;
1053 default:
1054 gcc_unreachable ();
1058 /* u1 * u2 -> sr */
1059 if (uns0_p && uns1_p && !unsr_p)
1061 uns = true;
1062 /* Rest of handling of this case after res is computed. */
1063 goto do_main;
1066 /* s1 * u2 -> sr */
1067 if (!uns0_p && uns1_p && !unsr_p)
1069 switch (pos_neg1)
1071 case 1:
1072 goto do_main;
1073 case 2:
1074 /* If (S) u2 is negative (i.e. u2 is larger than maximum of S,
1075 avoid the main code, just multiply and signal overflow
1076 unless 0 * u2 or -1 * ((U) Smin). */
1077 struct separate_ops ops;
1078 ops.code = MULT_EXPR;
1079 ops.type = TREE_TYPE (arg1);
1080 ops.op0 = make_tree (ops.type, op0);
1081 ops.op1 = make_tree (ops.type, op1);
1082 ops.op2 = NULL_TREE;
1083 ops.location = loc;
1084 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1085 do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
1086 NULL, done_label, PROB_VERY_LIKELY);
1087 do_compare_rtx_and_jump (op0, constm1_rtx, NE, true, mode, NULL_RTX,
1088 NULL, do_error, PROB_VERY_UNLIKELY);
1089 int prec;
1090 prec = GET_MODE_PRECISION (mode);
1091 rtx sgn;
1092 sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
1093 do_compare_rtx_and_jump (op1, sgn, EQ, true, mode, NULL_RTX,
1094 NULL, done_label, PROB_VERY_LIKELY);
1095 goto do_error_label;
1096 case 3:
1097 /* Rest of handling of this case after res is computed. */
1098 goto do_main;
1099 default:
1100 gcc_unreachable ();
1104 /* s1 * s2 -> ur */
1105 if (!uns0_p && !uns1_p && unsr_p)
1107 rtx tem, tem2;
1108 switch (pos_neg0 | pos_neg1)
1110 case 1: /* Both operands known to be non-negative. */
1111 goto do_main;
1112 case 2: /* Both operands known to be negative. */
1113 op0 = expand_unop (mode, neg_optab, op0, NULL_RTX, false);
1114 op1 = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
1115 /* Avoid looking at arg0/arg1 ranges, as we've changed
1116 the arguments. */
1117 arg0 = error_mark_node;
1118 arg1 = error_mark_node;
1119 goto do_main;
1120 case 3:
1121 if ((pos_neg0 ^ pos_neg1) == 3)
1123 /* If one operand is known to be negative and the other
1124 non-negative, this overflows always, unless the non-negative
1125 one is 0. Just do normal multiply and set overflow
1126 unless one of the operands is 0. */
1127 struct separate_ops ops;
1128 ops.code = MULT_EXPR;
1129 ops.type
1130 = build_nonstandard_integer_type (GET_MODE_PRECISION (mode),
1132 ops.op0 = make_tree (ops.type, op0);
1133 ops.op1 = make_tree (ops.type, op1);
1134 ops.op2 = NULL_TREE;
1135 ops.location = loc;
1136 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1137 tem = expand_binop (mode, and_optab, op0, op1, NULL_RTX, false,
1138 OPTAB_LIB_WIDEN);
1139 do_compare_rtx_and_jump (tem, const0_rtx, EQ, true, mode,
1140 NULL_RTX, NULL, done_label,
1141 PROB_VERY_LIKELY);
1142 goto do_error_label;
1144 /* The general case, do all the needed comparisons at runtime. */
1145 rtx_code_label *do_main_label, *after_negate_label;
1146 rtx rop0, rop1;
1147 rop0 = gen_reg_rtx (mode);
1148 rop1 = gen_reg_rtx (mode);
1149 emit_move_insn (rop0, op0);
1150 emit_move_insn (rop1, op1);
1151 op0 = rop0;
1152 op1 = rop1;
1153 do_main_label = gen_label_rtx ();
1154 after_negate_label = gen_label_rtx ();
1155 tem = expand_binop (mode, and_optab, op0, op1, NULL_RTX, false,
1156 OPTAB_LIB_WIDEN);
1157 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
1158 NULL, after_negate_label, PROB_VERY_LIKELY);
1159 /* Both arguments negative here, negate them and continue with
1160 normal unsigned overflow checking multiplication. */
1161 emit_move_insn (op0, expand_unop (mode, neg_optab, op0,
1162 NULL_RTX, false));
1163 emit_move_insn (op1, expand_unop (mode, neg_optab, op1,
1164 NULL_RTX, false));
1165 /* Avoid looking at arg0/arg1 ranges, as we might have changed
1166 the arguments. */
1167 arg0 = error_mark_node;
1168 arg1 = error_mark_node;
1169 emit_jump (do_main_label);
1170 emit_label (after_negate_label);
1171 tem2 = expand_binop (mode, xor_optab, op0, op1, NULL_RTX, false,
1172 OPTAB_LIB_WIDEN);
1173 do_compare_rtx_and_jump (tem2, const0_rtx, GE, false, mode, NULL_RTX,
1174 NULL, do_main_label, PROB_VERY_LIKELY);
1175 /* One argument is negative here, the other positive. This
1176 overflows always, unless one of the arguments is 0. But
1177 if e.g. s2 is 0, (U) s1 * 0 doesn't overflow, whatever s1
1178 is, thus we can keep do_main code oring in overflow as is. */
1179 do_compare_rtx_and_jump (tem, const0_rtx, EQ, true, mode, NULL_RTX,
1180 NULL, do_main_label, PROB_VERY_LIKELY);
1181 write_complex_part (target, const1_rtx, true);
1182 emit_label (do_main_label);
1183 goto do_main;
1184 default:
1185 gcc_unreachable ();
1189 do_main:
1190 type = build_nonstandard_integer_type (GET_MODE_PRECISION (mode), uns);
1191 sign = uns ? UNSIGNED : SIGNED;
1192 icode = optab_handler (uns ? umulv4_optab : mulv4_optab, mode);
1193 if (icode != CODE_FOR_nothing)
1195 struct expand_operand ops[4];
1196 rtx_insn *last = get_last_insn ();
1198 res = gen_reg_rtx (mode);
1199 create_output_operand (&ops[0], res, mode);
1200 create_input_operand (&ops[1], op0, mode);
1201 create_input_operand (&ops[2], op1, mode);
1202 create_fixed_operand (&ops[3], do_error);
1203 if (maybe_expand_insn (icode, 4, ops))
1205 last = get_last_insn ();
1206 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
1207 && JUMP_P (last)
1208 && any_condjump_p (last)
1209 && !find_reg_note (last, REG_BR_PROB, 0))
1210 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
1211 emit_jump (done_label);
1213 else
1215 delete_insns_since (last);
1216 icode = CODE_FOR_nothing;
1220 if (icode == CODE_FOR_nothing)
1222 struct separate_ops ops;
1223 int prec = GET_MODE_PRECISION (mode);
1224 machine_mode hmode = mode_for_size (prec / 2, MODE_INT, 1);
1225 ops.op0 = make_tree (type, op0);
1226 ops.op1 = make_tree (type, op1);
1227 ops.op2 = NULL_TREE;
1228 ops.location = loc;
1229 if (GET_MODE_2XWIDER_MODE (mode) != VOIDmode
1230 && targetm.scalar_mode_supported_p (GET_MODE_2XWIDER_MODE (mode)))
1232 machine_mode wmode = GET_MODE_2XWIDER_MODE (mode);
1233 ops.code = WIDEN_MULT_EXPR;
1234 ops.type
1235 = build_nonstandard_integer_type (GET_MODE_PRECISION (wmode), uns);
1237 res = expand_expr_real_2 (&ops, NULL_RTX, wmode, EXPAND_NORMAL);
1238 rtx hipart = expand_shift (RSHIFT_EXPR, wmode, res, prec,
1239 NULL_RTX, uns);
1240 hipart = gen_lowpart (mode, hipart);
1241 res = gen_lowpart (mode, res);
1242 if (uns)
1243 /* For the unsigned multiplication, there was overflow if
1244 HIPART is non-zero. */
1245 do_compare_rtx_and_jump (hipart, const0_rtx, EQ, true, mode,
1246 NULL_RTX, NULL, done_label,
1247 PROB_VERY_LIKELY);
1248 else
1250 rtx signbit = expand_shift (RSHIFT_EXPR, mode, res, prec - 1,
1251 NULL_RTX, 0);
1252 /* RES is low half of the double width result, HIPART
1253 the high half. There was overflow if
1254 HIPART is different from RES < 0 ? -1 : 0. */
1255 do_compare_rtx_and_jump (signbit, hipart, EQ, true, mode,
1256 NULL_RTX, NULL, done_label,
1257 PROB_VERY_LIKELY);
1260 else if (hmode != BLKmode && 2 * GET_MODE_PRECISION (hmode) == prec)
1262 rtx_code_label *large_op0 = gen_label_rtx ();
1263 rtx_code_label *small_op0_large_op1 = gen_label_rtx ();
1264 rtx_code_label *one_small_one_large = gen_label_rtx ();
1265 rtx_code_label *both_ops_large = gen_label_rtx ();
1266 rtx_code_label *after_hipart_neg = uns ? NULL : gen_label_rtx ();
1267 rtx_code_label *after_lopart_neg = uns ? NULL : gen_label_rtx ();
1268 rtx_code_label *do_overflow = gen_label_rtx ();
1269 rtx_code_label *hipart_different = uns ? NULL : gen_label_rtx ();
1271 unsigned int hprec = GET_MODE_PRECISION (hmode);
1272 rtx hipart0 = expand_shift (RSHIFT_EXPR, mode, op0, hprec,
1273 NULL_RTX, uns);
1274 hipart0 = gen_lowpart (hmode, hipart0);
1275 rtx lopart0 = gen_lowpart (hmode, op0);
1276 rtx signbit0 = const0_rtx;
1277 if (!uns)
1278 signbit0 = expand_shift (RSHIFT_EXPR, hmode, lopart0, hprec - 1,
1279 NULL_RTX, 0);
1280 rtx hipart1 = expand_shift (RSHIFT_EXPR, mode, op1, hprec,
1281 NULL_RTX, uns);
1282 hipart1 = gen_lowpart (hmode, hipart1);
1283 rtx lopart1 = gen_lowpart (hmode, op1);
1284 rtx signbit1 = const0_rtx;
1285 if (!uns)
1286 signbit1 = expand_shift (RSHIFT_EXPR, hmode, lopart1, hprec - 1,
1287 NULL_RTX, 0);
1289 res = gen_reg_rtx (mode);
1291 /* True if op0 resp. op1 are known to be in the range of
1292 halfstype. */
1293 bool op0_small_p = false;
1294 bool op1_small_p = false;
1295 /* True if op0 resp. op1 are known to have all zeros or all ones
1296 in the upper half of bits, but are not known to be
1297 op{0,1}_small_p. */
1298 bool op0_medium_p = false;
1299 bool op1_medium_p = false;
1300 /* -1 if op{0,1} is known to be negative, 0 if it is known to be
1301 nonnegative, 1 if unknown. */
1302 int op0_sign = 1;
1303 int op1_sign = 1;
1305 if (pos_neg0 == 1)
1306 op0_sign = 0;
1307 else if (pos_neg0 == 2)
1308 op0_sign = -1;
1309 if (pos_neg1 == 1)
1310 op1_sign = 0;
1311 else if (pos_neg1 == 2)
1312 op1_sign = -1;
1314 unsigned int mprec0 = prec;
1315 if (arg0 != error_mark_node)
1316 mprec0 = get_min_precision (arg0, sign);
1317 if (mprec0 <= hprec)
1318 op0_small_p = true;
1319 else if (!uns && mprec0 <= hprec + 1)
1320 op0_medium_p = true;
1321 unsigned int mprec1 = prec;
1322 if (arg1 != error_mark_node)
1323 mprec1 = get_min_precision (arg1, sign);
1324 if (mprec1 <= hprec)
1325 op1_small_p = true;
1326 else if (!uns && mprec1 <= hprec + 1)
1327 op1_medium_p = true;
1329 int smaller_sign = 1;
1330 int larger_sign = 1;
1331 if (op0_small_p)
1333 smaller_sign = op0_sign;
1334 larger_sign = op1_sign;
1336 else if (op1_small_p)
1338 smaller_sign = op1_sign;
1339 larger_sign = op0_sign;
1341 else if (op0_sign == op1_sign)
1343 smaller_sign = op0_sign;
1344 larger_sign = op0_sign;
1347 if (!op0_small_p)
1348 do_compare_rtx_and_jump (signbit0, hipart0, NE, true, hmode,
1349 NULL_RTX, NULL, large_op0,
1350 PROB_UNLIKELY);
1352 if (!op1_small_p)
1353 do_compare_rtx_and_jump (signbit1, hipart1, NE, true, hmode,
1354 NULL_RTX, NULL, small_op0_large_op1,
1355 PROB_UNLIKELY);
1357 /* If both op0 and op1 are sign (!uns) or zero (uns) extended from
1358 hmode to mode, the multiplication will never overflow. We can
1359 do just one hmode x hmode => mode widening multiplication. */
1360 rtx lopart0s = lopart0, lopart1s = lopart1;
1361 if (GET_CODE (lopart0) == SUBREG)
1363 lopart0s = shallow_copy_rtx (lopart0);
1364 SUBREG_PROMOTED_VAR_P (lopart0s) = 1;
1365 SUBREG_PROMOTED_SET (lopart0s, uns ? SRP_UNSIGNED : SRP_SIGNED);
1367 if (GET_CODE (lopart1) == SUBREG)
1369 lopart1s = shallow_copy_rtx (lopart1);
1370 SUBREG_PROMOTED_VAR_P (lopart1s) = 1;
1371 SUBREG_PROMOTED_SET (lopart1s, uns ? SRP_UNSIGNED : SRP_SIGNED);
1373 tree halfstype = build_nonstandard_integer_type (hprec, uns);
1374 ops.op0 = make_tree (halfstype, lopart0s);
1375 ops.op1 = make_tree (halfstype, lopart1s);
1376 ops.code = WIDEN_MULT_EXPR;
1377 ops.type = type;
1378 rtx thisres
1379 = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1380 emit_move_insn (res, thisres);
1381 emit_jump (done_label);
1383 emit_label (small_op0_large_op1);
1385 /* If op0 is sign (!uns) or zero (uns) extended from hmode to mode,
1386 but op1 is not, just swap the arguments and handle it as op1
1387 sign/zero extended, op0 not. */
1388 rtx larger = gen_reg_rtx (mode);
1389 rtx hipart = gen_reg_rtx (hmode);
1390 rtx lopart = gen_reg_rtx (hmode);
1391 emit_move_insn (larger, op1);
1392 emit_move_insn (hipart, hipart1);
1393 emit_move_insn (lopart, lopart0);
1394 emit_jump (one_small_one_large);
1396 emit_label (large_op0);
1398 if (!op1_small_p)
1399 do_compare_rtx_and_jump (signbit1, hipart1, NE, true, hmode,
1400 NULL_RTX, NULL, both_ops_large,
1401 PROB_UNLIKELY);
1403 /* If op1 is sign (!uns) or zero (uns) extended from hmode to mode,
1404 but op0 is not, prepare larger, hipart and lopart pseudos and
1405 handle it together with small_op0_large_op1. */
1406 emit_move_insn (larger, op0);
1407 emit_move_insn (hipart, hipart0);
1408 emit_move_insn (lopart, lopart1);
1410 emit_label (one_small_one_large);
1412 /* lopart is the low part of the operand that is sign extended
1413 to mode, larger is the the other operand, hipart is the
1414 high part of larger and lopart0 and lopart1 are the low parts
1415 of both operands.
1416 We perform lopart0 * lopart1 and lopart * hipart widening
1417 multiplications. */
1418 tree halfutype = build_nonstandard_integer_type (hprec, 1);
1419 ops.op0 = make_tree (halfutype, lopart0);
1420 ops.op1 = make_tree (halfutype, lopart1);
1421 rtx lo0xlo1
1422 = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1424 ops.op0 = make_tree (halfutype, lopart);
1425 ops.op1 = make_tree (halfutype, hipart);
1426 rtx loxhi = gen_reg_rtx (mode);
1427 rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1428 emit_move_insn (loxhi, tem);
1430 if (!uns)
1432 /* if (hipart < 0) loxhi -= lopart << (bitsize / 2); */
1433 if (larger_sign == 0)
1434 emit_jump (after_hipart_neg);
1435 else if (larger_sign != -1)
1436 do_compare_rtx_and_jump (hipart, const0_rtx, GE, false, hmode,
1437 NULL_RTX, NULL, after_hipart_neg,
1438 PROB_EVEN);
1440 tem = convert_modes (mode, hmode, lopart, 1);
1441 tem = expand_shift (LSHIFT_EXPR, mode, tem, hprec, NULL_RTX, 1);
1442 tem = expand_simple_binop (mode, MINUS, loxhi, tem, NULL_RTX,
1443 1, OPTAB_DIRECT);
1444 emit_move_insn (loxhi, tem);
1446 emit_label (after_hipart_neg);
1448 /* if (lopart < 0) loxhi -= larger; */
1449 if (smaller_sign == 0)
1450 emit_jump (after_lopart_neg);
1451 else if (smaller_sign != -1)
1452 do_compare_rtx_and_jump (lopart, const0_rtx, GE, false, hmode,
1453 NULL_RTX, NULL, after_lopart_neg,
1454 PROB_EVEN);
1456 tem = expand_simple_binop (mode, MINUS, loxhi, larger, NULL_RTX,
1457 1, OPTAB_DIRECT);
1458 emit_move_insn (loxhi, tem);
1460 emit_label (after_lopart_neg);
1463 /* loxhi += (uns) lo0xlo1 >> (bitsize / 2); */
1464 tem = expand_shift (RSHIFT_EXPR, mode, lo0xlo1, hprec, NULL_RTX, 1);
1465 tem = expand_simple_binop (mode, PLUS, loxhi, tem, NULL_RTX,
1466 1, OPTAB_DIRECT);
1467 emit_move_insn (loxhi, tem);
1469 /* if (loxhi >> (bitsize / 2)
1470 == (hmode) loxhi >> (bitsize / 2 - 1)) (if !uns)
1471 if (loxhi >> (bitsize / 2) == 0 (if uns). */
1472 rtx hipartloxhi = expand_shift (RSHIFT_EXPR, mode, loxhi, hprec,
1473 NULL_RTX, 0);
1474 hipartloxhi = gen_lowpart (hmode, hipartloxhi);
1475 rtx signbitloxhi = const0_rtx;
1476 if (!uns)
1477 signbitloxhi = expand_shift (RSHIFT_EXPR, hmode,
1478 gen_lowpart (hmode, loxhi),
1479 hprec - 1, NULL_RTX, 0);
1481 do_compare_rtx_and_jump (signbitloxhi, hipartloxhi, NE, true, hmode,
1482 NULL_RTX, NULL, do_overflow,
1483 PROB_VERY_UNLIKELY);
1485 /* res = (loxhi << (bitsize / 2)) | (hmode) lo0xlo1; */
1486 rtx loxhishifted = expand_shift (LSHIFT_EXPR, mode, loxhi, hprec,
1487 NULL_RTX, 1);
1488 tem = convert_modes (mode, hmode, gen_lowpart (hmode, lo0xlo1), 1);
1490 tem = expand_simple_binop (mode, IOR, loxhishifted, tem, res,
1491 1, OPTAB_DIRECT);
1492 if (tem != res)
1493 emit_move_insn (res, tem);
1494 emit_jump (done_label);
1496 emit_label (both_ops_large);
1498 /* If both operands are large (not sign (!uns) or zero (uns)
1499 extended from hmode), then perform the full multiplication
1500 which will be the result of the operation.
1501 The only cases which don't overflow are for signed multiplication
1502 some cases where both hipart0 and highpart1 are 0 or -1.
1503 For unsigned multiplication when high parts are both non-zero
1504 this overflows always. */
1505 ops.code = MULT_EXPR;
1506 ops.op0 = make_tree (type, op0);
1507 ops.op1 = make_tree (type, op1);
1508 tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1509 emit_move_insn (res, tem);
1511 if (!uns)
1513 if (!op0_medium_p)
1515 tem = expand_simple_binop (hmode, PLUS, hipart0, const1_rtx,
1516 NULL_RTX, 1, OPTAB_DIRECT);
1517 do_compare_rtx_and_jump (tem, const1_rtx, GTU, true, hmode,
1518 NULL_RTX, NULL, do_error,
1519 PROB_VERY_UNLIKELY);
1522 if (!op1_medium_p)
1524 tem = expand_simple_binop (hmode, PLUS, hipart1, const1_rtx,
1525 NULL_RTX, 1, OPTAB_DIRECT);
1526 do_compare_rtx_and_jump (tem, const1_rtx, GTU, true, hmode,
1527 NULL_RTX, NULL, do_error,
1528 PROB_VERY_UNLIKELY);
1531 /* At this point hipart{0,1} are both in [-1, 0]. If they are
1532 the same, overflow happened if res is negative, if they are
1533 different, overflow happened if res is positive. */
1534 if (op0_sign != 1 && op1_sign != 1 && op0_sign != op1_sign)
1535 emit_jump (hipart_different);
1536 else if (op0_sign == 1 || op1_sign == 1)
1537 do_compare_rtx_and_jump (hipart0, hipart1, NE, true, hmode,
1538 NULL_RTX, NULL, hipart_different,
1539 PROB_EVEN);
1541 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode,
1542 NULL_RTX, NULL, do_error,
1543 PROB_VERY_UNLIKELY);
1544 emit_jump (done_label);
1546 emit_label (hipart_different);
1548 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode,
1549 NULL_RTX, NULL, do_error,
1550 PROB_VERY_UNLIKELY);
1551 emit_jump (done_label);
1554 emit_label (do_overflow);
1556 /* Overflow, do full multiplication and fallthru into do_error. */
1557 ops.op0 = make_tree (type, op0);
1558 ops.op1 = make_tree (type, op1);
1559 tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1560 emit_move_insn (res, tem);
1562 else
1564 gcc_assert (!is_ubsan);
1565 ops.code = MULT_EXPR;
1566 ops.type = type;
1567 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1568 emit_jump (done_label);
1572 do_error_label:
1573 emit_label (do_error);
1574 if (is_ubsan)
1576 /* Expand the ubsan builtin call. */
1577 push_temp_slots ();
1578 fn = ubsan_build_overflow_builtin (MULT_EXPR, loc, TREE_TYPE (arg0),
1579 arg0, arg1);
1580 expand_normal (fn);
1581 pop_temp_slots ();
1582 do_pending_stack_adjust ();
1584 else if (lhs)
1585 write_complex_part (target, const1_rtx, true);
1587 /* We're done. */
1588 emit_label (done_label);
1590 /* u1 * u2 -> sr */
1591 if (uns0_p && uns1_p && !unsr_p)
1593 rtx_code_label *all_done_label = gen_label_rtx ();
1594 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode, NULL_RTX,
1595 NULL, all_done_label, PROB_VERY_LIKELY);
1596 write_complex_part (target, const1_rtx, true);
1597 emit_label (all_done_label);
1600 /* s1 * u2 -> sr */
1601 if (!uns0_p && uns1_p && !unsr_p && pos_neg1 == 3)
1603 rtx_code_label *all_done_label = gen_label_rtx ();
1604 rtx_code_label *set_noovf = gen_label_rtx ();
1605 do_compare_rtx_and_jump (op1, const0_rtx, GE, false, mode, NULL_RTX,
1606 NULL, all_done_label, PROB_VERY_LIKELY);
1607 write_complex_part (target, const1_rtx, true);
1608 do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
1609 NULL, set_noovf, PROB_VERY_LIKELY);
1610 do_compare_rtx_and_jump (op0, constm1_rtx, NE, true, mode, NULL_RTX,
1611 NULL, all_done_label, PROB_VERY_UNLIKELY);
1612 do_compare_rtx_and_jump (op1, res, NE, true, mode, NULL_RTX, NULL,
1613 all_done_label, PROB_VERY_UNLIKELY);
1614 emit_label (set_noovf);
1615 write_complex_part (target, const0_rtx, true);
1616 emit_label (all_done_label);
1619 if (lhs)
1621 if (is_ubsan)
1622 expand_ubsan_result_store (target, res);
1623 else
1624 expand_arith_overflow_result_store (lhs, target, mode, res);
1628 /* Expand UBSAN_CHECK_ADD call STMT. */
1630 static void
1631 expand_UBSAN_CHECK_ADD (gcall *stmt)
1633 location_t loc = gimple_location (stmt);
1634 tree lhs = gimple_call_lhs (stmt);
1635 tree arg0 = gimple_call_arg (stmt, 0);
1636 tree arg1 = gimple_call_arg (stmt, 1);
1637 expand_addsub_overflow (loc, PLUS_EXPR, lhs, arg0, arg1,
1638 false, false, false, true);
1641 /* Expand UBSAN_CHECK_SUB call STMT. */
1643 static void
1644 expand_UBSAN_CHECK_SUB (gcall *stmt)
1646 location_t loc = gimple_location (stmt);
1647 tree lhs = gimple_call_lhs (stmt);
1648 tree arg0 = gimple_call_arg (stmt, 0);
1649 tree arg1 = gimple_call_arg (stmt, 1);
1650 if (integer_zerop (arg0))
1651 expand_neg_overflow (loc, lhs, arg1, true);
1652 else
1653 expand_addsub_overflow (loc, MINUS_EXPR, lhs, arg0, arg1,
1654 false, false, false, true);
1657 /* Expand UBSAN_CHECK_MUL call STMT. */
1659 static void
1660 expand_UBSAN_CHECK_MUL (gcall *stmt)
1662 location_t loc = gimple_location (stmt);
1663 tree lhs = gimple_call_lhs (stmt);
1664 tree arg0 = gimple_call_arg (stmt, 0);
1665 tree arg1 = gimple_call_arg (stmt, 1);
1666 expand_mul_overflow (loc, lhs, arg0, arg1, false, false, false, true);
1669 /* Helper function for {ADD,SUB,MUL}_OVERFLOW call stmt expansion. */
1671 static void
1672 expand_arith_overflow (enum tree_code code, gimple stmt)
1674 tree lhs = gimple_call_lhs (stmt);
1675 if (lhs == NULL_TREE)
1676 return;
1677 tree arg0 = gimple_call_arg (stmt, 0);
1678 tree arg1 = gimple_call_arg (stmt, 1);
1679 tree type = TREE_TYPE (TREE_TYPE (lhs));
1680 int uns0_p = TYPE_UNSIGNED (TREE_TYPE (arg0));
1681 int uns1_p = TYPE_UNSIGNED (TREE_TYPE (arg1));
1682 int unsr_p = TYPE_UNSIGNED (type);
1683 int prec0 = TYPE_PRECISION (TREE_TYPE (arg0));
1684 int prec1 = TYPE_PRECISION (TREE_TYPE (arg1));
1685 int precres = TYPE_PRECISION (type);
1686 location_t loc = gimple_location (stmt);
1687 if (!uns0_p && get_range_pos_neg (arg0) == 1)
1688 uns0_p = true;
1689 if (!uns1_p && get_range_pos_neg (arg1) == 1)
1690 uns1_p = true;
1691 int pr = get_min_precision (arg0, uns0_p ? UNSIGNED : SIGNED);
1692 prec0 = MIN (prec0, pr);
1693 pr = get_min_precision (arg1, uns1_p ? UNSIGNED : SIGNED);
1694 prec1 = MIN (prec1, pr);
1696 /* If uns0_p && uns1_p, precop is minimum needed precision
1697 of unsigned type to hold the exact result, otherwise
1698 precop is minimum needed precision of signed type to
1699 hold the exact result. */
1700 int precop;
1701 if (code == MULT_EXPR)
1702 precop = prec0 + prec1 + (uns0_p != uns1_p);
1703 else
1705 if (uns0_p == uns1_p)
1706 precop = MAX (prec0, prec1) + 1;
1707 else if (uns0_p)
1708 precop = MAX (prec0 + 1, prec1) + 1;
1709 else
1710 precop = MAX (prec0, prec1 + 1) + 1;
1712 int orig_precres = precres;
1716 if ((uns0_p && uns1_p)
1717 ? ((precop + !unsr_p) <= precres
1718 /* u1 - u2 -> ur can overflow, no matter what precision
1719 the result has. */
1720 && (code != MINUS_EXPR || !unsr_p))
1721 : (!unsr_p && precop <= precres))
1723 /* The infinity precision result will always fit into result. */
1724 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1725 write_complex_part (target, const0_rtx, true);
1726 enum machine_mode mode = TYPE_MODE (type);
1727 struct separate_ops ops;
1728 ops.code = code;
1729 ops.type = type;
1730 ops.op0 = fold_convert_loc (loc, type, arg0);
1731 ops.op1 = fold_convert_loc (loc, type, arg1);
1732 ops.op2 = NULL_TREE;
1733 ops.location = loc;
1734 rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1735 expand_arith_overflow_result_store (lhs, target, mode, tem);
1736 return;
1739 #ifdef WORD_REGISTER_OPERATIONS
1740 /* For sub-word operations, if target doesn't have them, start
1741 with precres widening right away, otherwise do it only
1742 if the most simple cases can't be used. */
1743 if (orig_precres == precres && precres < BITS_PER_WORD)
1745 else
1746 #endif
1747 if ((uns0_p && uns1_p && unsr_p && prec0 <= precres && prec1 <= precres)
1748 || ((!uns0_p || !uns1_p) && !unsr_p
1749 && prec0 + uns0_p <= precres
1750 && prec1 + uns1_p <= precres))
1752 arg0 = fold_convert_loc (loc, type, arg0);
1753 arg1 = fold_convert_loc (loc, type, arg1);
1754 switch (code)
1756 case MINUS_EXPR:
1757 if (integer_zerop (arg0) && !unsr_p)
1758 expand_neg_overflow (loc, lhs, arg1, false);
1759 /* FALLTHRU */
1760 case PLUS_EXPR:
1761 expand_addsub_overflow (loc, code, lhs, arg0, arg1,
1762 unsr_p, unsr_p, unsr_p, false);
1763 return;
1764 case MULT_EXPR:
1765 expand_mul_overflow (loc, lhs, arg0, arg1,
1766 unsr_p, unsr_p, unsr_p, false);
1767 return;
1768 default:
1769 gcc_unreachable ();
1773 /* For sub-word operations, retry with a wider type first. */
1774 if (orig_precres == precres && precop <= BITS_PER_WORD)
1776 #ifdef WORD_REGISTER_OPERATIONS
1777 int p = BITS_PER_WORD;
1778 #else
1779 int p = precop;
1780 #endif
1781 enum machine_mode m = smallest_mode_for_size (p, MODE_INT);
1782 tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
1783 uns0_p && uns1_p
1784 && unsr_p);
1785 p = TYPE_PRECISION (optype);
1786 if (p > precres)
1788 precres = p;
1789 unsr_p = TYPE_UNSIGNED (optype);
1790 type = optype;
1791 continue;
1795 if (prec0 <= precres && prec1 <= precres)
1797 tree types[2];
1798 if (unsr_p)
1800 types[0] = build_nonstandard_integer_type (precres, 0);
1801 types[1] = type;
1803 else
1805 types[0] = type;
1806 types[1] = build_nonstandard_integer_type (precres, 1);
1808 arg0 = fold_convert_loc (loc, types[uns0_p], arg0);
1809 arg1 = fold_convert_loc (loc, types[uns1_p], arg1);
1810 if (code != MULT_EXPR)
1811 expand_addsub_overflow (loc, code, lhs, arg0, arg1, unsr_p,
1812 uns0_p, uns1_p, false);
1813 else
1814 expand_mul_overflow (loc, lhs, arg0, arg1, unsr_p,
1815 uns0_p, uns1_p, false);
1816 return;
1819 /* Retry with a wider type. */
1820 if (orig_precres == precres)
1822 int p = MAX (prec0, prec1);
1823 enum machine_mode m = smallest_mode_for_size (p, MODE_INT);
1824 tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
1825 uns0_p && uns1_p
1826 && unsr_p);
1827 p = TYPE_PRECISION (optype);
1828 if (p > precres)
1830 precres = p;
1831 unsr_p = TYPE_UNSIGNED (optype);
1832 type = optype;
1833 continue;
1837 gcc_unreachable ();
1839 while (1);
1842 /* Expand ADD_OVERFLOW STMT. */
1844 static void
1845 expand_ADD_OVERFLOW (gcall *stmt)
1847 expand_arith_overflow (PLUS_EXPR, stmt);
1850 /* Expand SUB_OVERFLOW STMT. */
1852 static void
1853 expand_SUB_OVERFLOW (gcall *stmt)
1855 expand_arith_overflow (MINUS_EXPR, stmt);
1858 /* Expand MUL_OVERFLOW STMT. */
1860 static void
1861 expand_MUL_OVERFLOW (gcall *stmt)
1863 expand_arith_overflow (MULT_EXPR, stmt);
1866 /* This should get folded in tree-vectorizer.c. */
1868 static void
1869 expand_LOOP_VECTORIZED (gcall *)
1871 gcc_unreachable ();
1874 static void
1875 expand_MASK_LOAD (gcall *stmt)
1877 struct expand_operand ops[3];
1878 tree type, lhs, rhs, maskt;
1879 rtx mem, target, mask;
1881 maskt = gimple_call_arg (stmt, 2);
1882 lhs = gimple_call_lhs (stmt);
1883 if (lhs == NULL_TREE)
1884 return;
1885 type = TREE_TYPE (lhs);
1886 rhs = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0),
1887 gimple_call_arg (stmt, 1));
1889 mem = expand_expr (rhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1890 gcc_assert (MEM_P (mem));
1891 mask = expand_normal (maskt);
1892 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1893 create_output_operand (&ops[0], target, TYPE_MODE (type));
1894 create_fixed_operand (&ops[1], mem);
1895 create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
1896 expand_insn (optab_handler (maskload_optab, TYPE_MODE (type)), 3, ops);
1899 static void
1900 expand_MASK_STORE (gcall *stmt)
1902 struct expand_operand ops[3];
1903 tree type, lhs, rhs, maskt;
1904 rtx mem, reg, mask;
1906 maskt = gimple_call_arg (stmt, 2);
1907 rhs = gimple_call_arg (stmt, 3);
1908 type = TREE_TYPE (rhs);
1909 lhs = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0),
1910 gimple_call_arg (stmt, 1));
1912 mem = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1913 gcc_assert (MEM_P (mem));
1914 mask = expand_normal (maskt);
1915 reg = expand_normal (rhs);
1916 create_fixed_operand (&ops[0], mem);
1917 create_input_operand (&ops[1], reg, TYPE_MODE (type));
1918 create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
1919 expand_insn (optab_handler (maskstore_optab, TYPE_MODE (type)), 3, ops);
1922 static void
1923 expand_ABNORMAL_DISPATCHER (gcall *)
1927 static void
1928 expand_BUILTIN_EXPECT (gcall *stmt)
1930 /* When guessing was done, the hints should be already stripped away. */
1931 gcc_assert (!flag_guess_branch_prob || optimize == 0 || seen_error ());
1933 rtx target;
1934 tree lhs = gimple_call_lhs (stmt);
1935 if (lhs)
1936 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1937 else
1938 target = const0_rtx;
1939 rtx val = expand_expr (gimple_call_arg (stmt, 0), target, VOIDmode, EXPAND_NORMAL);
1940 if (lhs && val != target)
1941 emit_move_insn (target, val);
1944 /* IFN_VA_ARG is supposed to be expanded at pass_stdarg. So this dummy function
1945 should never be called. */
1947 static void
1948 expand_VA_ARG (gcall *stmt ATTRIBUTE_UNUSED)
1950 gcc_unreachable ();
1953 /* Routines to expand each internal function, indexed by function number.
1954 Each routine has the prototype:
1956 expand_<NAME> (gcall *stmt)
1958 where STMT is the statement that performs the call. */
1959 static void (*const internal_fn_expanders[]) (gcall *) = {
1960 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) expand_##CODE,
1961 #include "internal-fn.def"
1962 #undef DEF_INTERNAL_FN
1966 /* Expand STMT, which is a call to internal function FN. */
1968 void
1969 expand_internal_call (gcall *stmt)
1971 internal_fn_expanders[(int) gimple_call_internal_fn (stmt)] (stmt);