2016-11-10 Vladimir Makarov <vmakarov@redhat.com>
[official-gcc.git] / gcc / internal-fn.c
blobcbee97ea82c88fb29c16549fd7a8ce2ce7f41f14
1 /* Internal functions.
2 Copyright (C) 2011-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "target.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "predict.h"
29 #include "stringpool.h"
30 #include "tree-vrp.h"
31 #include "tree-ssanames.h"
32 #include "expmed.h"
33 #include "memmodel.h"
34 #include "optabs.h"
35 #include "emit-rtl.h"
36 #include "diagnostic-core.h"
37 #include "fold-const.h"
38 #include "internal-fn.h"
39 #include "stor-layout.h"
40 #include "dojump.h"
41 #include "expr.h"
42 #include "ubsan.h"
43 #include "recog.h"
44 #include "builtins.h"
46 /* The names of each internal function, indexed by function number. */
47 const char *const internal_fn_name_array[] = {
48 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) #CODE,
49 #include "internal-fn.def"
50 "<invalid-fn>"
53 /* The ECF_* flags of each internal function, indexed by function number. */
54 const int internal_fn_flags_array[] = {
55 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) FLAGS,
56 #include "internal-fn.def"
60 /* Fnspec of each internal function, indexed by function number. */
61 const_tree internal_fn_fnspec_array[IFN_LAST + 1];
63 void
64 init_internal_fns ()
66 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
67 if (FNSPEC) internal_fn_fnspec_array[IFN_##CODE] = \
68 build_string ((int) sizeof (FNSPEC), FNSPEC ? FNSPEC : "");
69 #include "internal-fn.def"
70 internal_fn_fnspec_array[IFN_LAST] = 0;
73 /* Create static initializers for the information returned by
74 direct_internal_fn. */
75 #define not_direct { -2, -2, false }
76 #define mask_load_direct { -1, 2, false }
77 #define load_lanes_direct { -1, -1, false }
78 #define mask_store_direct { 3, 2, false }
79 #define store_lanes_direct { 0, 0, false }
80 #define unary_direct { 0, 0, true }
81 #define binary_direct { 0, 0, true }
83 const direct_internal_fn_info direct_internal_fn_array[IFN_LAST + 1] = {
84 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) not_direct,
85 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) TYPE##_direct,
86 #include "internal-fn.def"
87 not_direct
90 /* ARRAY_TYPE is an array of vector modes. Return the associated insn
91 for load-lanes-style optab OPTAB, or CODE_FOR_nothing if none. */
93 static enum insn_code
94 get_multi_vector_move (tree array_type, convert_optab optab)
96 machine_mode imode;
97 machine_mode vmode;
99 gcc_assert (TREE_CODE (array_type) == ARRAY_TYPE);
100 imode = TYPE_MODE (array_type);
101 vmode = TYPE_MODE (TREE_TYPE (array_type));
103 return convert_optab_handler (optab, imode, vmode);
106 /* Expand LOAD_LANES call STMT using optab OPTAB. */
108 static void
109 expand_load_lanes_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
111 struct expand_operand ops[2];
112 tree type, lhs, rhs;
113 rtx target, mem;
115 lhs = gimple_call_lhs (stmt);
116 rhs = gimple_call_arg (stmt, 0);
117 type = TREE_TYPE (lhs);
119 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
120 mem = expand_normal (rhs);
122 gcc_assert (MEM_P (mem));
123 PUT_MODE (mem, TYPE_MODE (type));
125 create_output_operand (&ops[0], target, TYPE_MODE (type));
126 create_fixed_operand (&ops[1], mem);
127 expand_insn (get_multi_vector_move (type, optab), 2, ops);
130 /* Expand STORE_LANES call STMT using optab OPTAB. */
132 static void
133 expand_store_lanes_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
135 struct expand_operand ops[2];
136 tree type, lhs, rhs;
137 rtx target, reg;
139 lhs = gimple_call_lhs (stmt);
140 rhs = gimple_call_arg (stmt, 0);
141 type = TREE_TYPE (rhs);
143 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
144 reg = expand_normal (rhs);
146 gcc_assert (MEM_P (target));
147 PUT_MODE (target, TYPE_MODE (type));
149 create_fixed_operand (&ops[0], target);
150 create_input_operand (&ops[1], reg, TYPE_MODE (type));
151 expand_insn (get_multi_vector_move (type, optab), 2, ops);
154 static void
155 expand_ANNOTATE (internal_fn, gcall *)
157 gcc_unreachable ();
160 /* This should get expanded in adjust_simduid_builtins. */
162 static void
163 expand_GOMP_SIMD_LANE (internal_fn, gcall *)
165 gcc_unreachable ();
168 /* This should get expanded in adjust_simduid_builtins. */
170 static void
171 expand_GOMP_SIMD_VF (internal_fn, gcall *)
173 gcc_unreachable ();
176 /* This should get expanded in adjust_simduid_builtins. */
178 static void
179 expand_GOMP_SIMD_LAST_LANE (internal_fn, gcall *)
181 gcc_unreachable ();
184 /* This should get expanded in adjust_simduid_builtins. */
186 static void
187 expand_GOMP_SIMD_ORDERED_START (internal_fn, gcall *)
189 gcc_unreachable ();
192 /* This should get expanded in adjust_simduid_builtins. */
194 static void
195 expand_GOMP_SIMD_ORDERED_END (internal_fn, gcall *)
197 gcc_unreachable ();
200 /* This should get expanded in the sanopt pass. */
202 static void
203 expand_UBSAN_NULL (internal_fn, gcall *)
205 gcc_unreachable ();
208 /* This should get expanded in the sanopt pass. */
210 static void
211 expand_UBSAN_BOUNDS (internal_fn, gcall *)
213 gcc_unreachable ();
216 /* This should get expanded in the sanopt pass. */
218 static void
219 expand_UBSAN_VPTR (internal_fn, gcall *)
221 gcc_unreachable ();
224 /* This should get expanded in the sanopt pass. */
226 static void
227 expand_UBSAN_OBJECT_SIZE (internal_fn, gcall *)
229 gcc_unreachable ();
232 /* This should get expanded in the sanopt pass. */
234 static void
235 expand_ASAN_CHECK (internal_fn, gcall *)
237 gcc_unreachable ();
240 /* This should get expanded in the sanopt pass. */
242 static void
243 expand_ASAN_MARK (internal_fn, gcall *)
245 gcc_unreachable ();
249 /* This should get expanded in the tsan pass. */
251 static void
252 expand_TSAN_FUNC_EXIT (internal_fn, gcall *)
254 gcc_unreachable ();
257 /* This should get expanded in the lower pass. */
259 static void
260 expand_FALLTHROUGH (internal_fn, gcall *call)
262 error_at (gimple_location (call),
263 "invalid use of attribute %<fallthrough%>");
266 /* Helper function for expand_addsub_overflow. Return 1
267 if ARG interpreted as signed in its precision is known to be always
268 positive or 2 if ARG is known to be always negative, or 3 if ARG may
269 be positive or negative. */
271 static int
272 get_range_pos_neg (tree arg)
274 if (arg == error_mark_node)
275 return 3;
277 int prec = TYPE_PRECISION (TREE_TYPE (arg));
278 int cnt = 0;
279 if (TREE_CODE (arg) == INTEGER_CST)
281 wide_int w = wi::sext (arg, prec);
282 if (wi::neg_p (w))
283 return 2;
284 else
285 return 1;
287 while (CONVERT_EXPR_P (arg)
288 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
289 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg, 0))) <= prec)
291 arg = TREE_OPERAND (arg, 0);
292 /* Narrower value zero extended into wider type
293 will always result in positive values. */
294 if (TYPE_UNSIGNED (TREE_TYPE (arg))
295 && TYPE_PRECISION (TREE_TYPE (arg)) < prec)
296 return 1;
297 prec = TYPE_PRECISION (TREE_TYPE (arg));
298 if (++cnt > 30)
299 return 3;
302 if (TREE_CODE (arg) != SSA_NAME)
303 return 3;
304 wide_int arg_min, arg_max;
305 while (get_range_info (arg, &arg_min, &arg_max) != VR_RANGE)
307 gimple *g = SSA_NAME_DEF_STMT (arg);
308 if (is_gimple_assign (g)
309 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g)))
311 tree t = gimple_assign_rhs1 (g);
312 if (INTEGRAL_TYPE_P (TREE_TYPE (t))
313 && TYPE_PRECISION (TREE_TYPE (t)) <= prec)
315 if (TYPE_UNSIGNED (TREE_TYPE (t))
316 && TYPE_PRECISION (TREE_TYPE (t)) < prec)
317 return 1;
318 prec = TYPE_PRECISION (TREE_TYPE (t));
319 arg = t;
320 if (++cnt > 30)
321 return 3;
322 continue;
325 return 3;
327 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
329 /* For unsigned values, the "positive" range comes
330 below the "negative" range. */
331 if (!wi::neg_p (wi::sext (arg_max, prec), SIGNED))
332 return 1;
333 if (wi::neg_p (wi::sext (arg_min, prec), SIGNED))
334 return 2;
336 else
338 if (!wi::neg_p (wi::sext (arg_min, prec), SIGNED))
339 return 1;
340 if (wi::neg_p (wi::sext (arg_max, prec), SIGNED))
341 return 2;
343 return 3;
346 /* Return minimum precision needed to represent all values
347 of ARG in SIGNed integral type. */
349 static int
350 get_min_precision (tree arg, signop sign)
352 int prec = TYPE_PRECISION (TREE_TYPE (arg));
353 int cnt = 0;
354 signop orig_sign = sign;
355 if (TREE_CODE (arg) == INTEGER_CST)
357 int p;
358 if (TYPE_SIGN (TREE_TYPE (arg)) != sign)
360 widest_int w = wi::to_widest (arg);
361 w = wi::ext (w, prec, sign);
362 p = wi::min_precision (w, sign);
364 else
365 p = wi::min_precision (arg, sign);
366 return MIN (p, prec);
368 while (CONVERT_EXPR_P (arg)
369 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
370 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg, 0))) <= prec)
372 arg = TREE_OPERAND (arg, 0);
373 if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
375 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
376 sign = UNSIGNED;
377 else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
378 return prec + (orig_sign != sign);
379 prec = TYPE_PRECISION (TREE_TYPE (arg));
381 if (++cnt > 30)
382 return prec + (orig_sign != sign);
384 if (TREE_CODE (arg) != SSA_NAME)
385 return prec + (orig_sign != sign);
386 wide_int arg_min, arg_max;
387 while (get_range_info (arg, &arg_min, &arg_max) != VR_RANGE)
389 gimple *g = SSA_NAME_DEF_STMT (arg);
390 if (is_gimple_assign (g)
391 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g)))
393 tree t = gimple_assign_rhs1 (g);
394 if (INTEGRAL_TYPE_P (TREE_TYPE (t))
395 && TYPE_PRECISION (TREE_TYPE (t)) <= prec)
397 arg = t;
398 if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
400 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
401 sign = UNSIGNED;
402 else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
403 return prec + (orig_sign != sign);
404 prec = TYPE_PRECISION (TREE_TYPE (arg));
406 if (++cnt > 30)
407 return prec + (orig_sign != sign);
408 continue;
411 return prec + (orig_sign != sign);
413 if (sign == TYPE_SIGN (TREE_TYPE (arg)))
415 int p1 = wi::min_precision (arg_min, sign);
416 int p2 = wi::min_precision (arg_max, sign);
417 p1 = MAX (p1, p2);
418 prec = MIN (prec, p1);
420 else if (sign == UNSIGNED && !wi::neg_p (arg_min, SIGNED))
422 int p = wi::min_precision (arg_max, UNSIGNED);
423 prec = MIN (prec, p);
425 return prec + (orig_sign != sign);
428 /* Helper for expand_*_overflow. Set the __imag__ part to true
429 (1 except for signed:1 type, in which case store -1). */
431 static void
432 expand_arith_set_overflow (tree lhs, rtx target)
434 if (TYPE_PRECISION (TREE_TYPE (TREE_TYPE (lhs))) == 1
435 && !TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs))))
436 write_complex_part (target, constm1_rtx, true);
437 else
438 write_complex_part (target, const1_rtx, true);
441 /* Helper for expand_*_overflow. Store RES into the __real__ part
442 of TARGET. If RES has larger MODE than __real__ part of TARGET,
443 set the __imag__ part to 1 if RES doesn't fit into it. Similarly
444 if LHS has smaller precision than its mode. */
446 static void
447 expand_arith_overflow_result_store (tree lhs, rtx target,
448 machine_mode mode, rtx res)
450 machine_mode tgtmode = GET_MODE_INNER (GET_MODE (target));
451 rtx lres = res;
452 if (tgtmode != mode)
454 rtx_code_label *done_label = gen_label_rtx ();
455 int uns = TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs)));
456 lres = convert_modes (tgtmode, mode, res, uns);
457 gcc_assert (GET_MODE_PRECISION (tgtmode) < GET_MODE_PRECISION (mode));
458 do_compare_rtx_and_jump (res, convert_modes (mode, tgtmode, lres, uns),
459 EQ, true, mode, NULL_RTX, NULL, done_label,
460 PROB_VERY_LIKELY);
461 expand_arith_set_overflow (lhs, target);
462 emit_label (done_label);
464 int prec = TYPE_PRECISION (TREE_TYPE (TREE_TYPE (lhs)));
465 int tgtprec = GET_MODE_PRECISION (tgtmode);
466 if (prec < tgtprec)
468 rtx_code_label *done_label = gen_label_rtx ();
469 int uns = TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs)));
470 res = lres;
471 if (uns)
473 rtx mask
474 = immed_wide_int_const (wi::shifted_mask (0, prec, false, tgtprec),
475 tgtmode);
476 lres = expand_simple_binop (tgtmode, AND, res, mask, NULL_RTX,
477 true, OPTAB_LIB_WIDEN);
479 else
481 lres = expand_shift (LSHIFT_EXPR, tgtmode, res, tgtprec - prec,
482 NULL_RTX, 1);
483 lres = expand_shift (RSHIFT_EXPR, tgtmode, lres, tgtprec - prec,
484 NULL_RTX, 0);
486 do_compare_rtx_and_jump (res, lres,
487 EQ, true, tgtmode, NULL_RTX, NULL, done_label,
488 PROB_VERY_LIKELY);
489 expand_arith_set_overflow (lhs, target);
490 emit_label (done_label);
492 write_complex_part (target, lres, false);
495 /* Helper for expand_*_overflow. Store RES into TARGET. */
497 static void
498 expand_ubsan_result_store (rtx target, rtx res)
500 if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target))
501 /* If this is a scalar in a register that is stored in a wider mode
502 than the declared mode, compute the result into its declared mode
503 and then convert to the wider mode. Our value is the computed
504 expression. */
505 convert_move (SUBREG_REG (target), res, SUBREG_PROMOTED_SIGN (target));
506 else
507 emit_move_insn (target, res);
510 /* Add sub/add overflow checking to the statement STMT.
511 CODE says whether the operation is +, or -. */
513 static void
514 expand_addsub_overflow (location_t loc, tree_code code, tree lhs,
515 tree arg0, tree arg1, bool unsr_p, bool uns0_p,
516 bool uns1_p, bool is_ubsan)
518 rtx res, target = NULL_RTX;
519 tree fn;
520 rtx_code_label *done_label = gen_label_rtx ();
521 rtx_code_label *do_error = gen_label_rtx ();
522 do_pending_stack_adjust ();
523 rtx op0 = expand_normal (arg0);
524 rtx op1 = expand_normal (arg1);
525 machine_mode mode = TYPE_MODE (TREE_TYPE (arg0));
526 int prec = GET_MODE_PRECISION (mode);
527 rtx sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
528 bool do_xor = false;
530 if (is_ubsan)
531 gcc_assert (!unsr_p && !uns0_p && !uns1_p);
533 if (lhs)
535 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
536 if (!is_ubsan)
537 write_complex_part (target, const0_rtx, true);
540 /* We assume both operands and result have the same precision
541 here (GET_MODE_BITSIZE (mode)), S stands for signed type
542 with that precision, U for unsigned type with that precision,
543 sgn for unsigned most significant bit in that precision.
544 s1 is signed first operand, u1 is unsigned first operand,
545 s2 is signed second operand, u2 is unsigned second operand,
546 sr is signed result, ur is unsigned result and the following
547 rules say how to compute result (which is always result of
548 the operands as if both were unsigned, cast to the right
549 signedness) and how to compute whether operation overflowed.
551 s1 + s2 -> sr
552 res = (S) ((U) s1 + (U) s2)
553 ovf = s2 < 0 ? res > s1 : res < s1 (or jump on overflow)
554 s1 - s2 -> sr
555 res = (S) ((U) s1 - (U) s2)
556 ovf = s2 < 0 ? res < s1 : res > s2 (or jump on overflow)
557 u1 + u2 -> ur
558 res = u1 + u2
559 ovf = res < u1 (or jump on carry, but RTL opts will handle it)
560 u1 - u2 -> ur
561 res = u1 - u2
562 ovf = res > u1 (or jump on carry, but RTL opts will handle it)
563 s1 + u2 -> sr
564 res = (S) ((U) s1 + u2)
565 ovf = ((U) res ^ sgn) < u2
566 s1 + u2 -> ur
567 t1 = (S) (u2 ^ sgn)
568 t2 = s1 + t1
569 res = (U) t2 ^ sgn
570 ovf = t1 < 0 ? t2 > s1 : t2 < s1 (or jump on overflow)
571 s1 - u2 -> sr
572 res = (S) ((U) s1 - u2)
573 ovf = u2 > ((U) s1 ^ sgn)
574 s1 - u2 -> ur
575 res = (U) s1 - u2
576 ovf = s1 < 0 || u2 > (U) s1
577 u1 - s2 -> sr
578 res = u1 - (U) s2
579 ovf = u1 >= ((U) s2 ^ sgn)
580 u1 - s2 -> ur
581 t1 = u1 ^ sgn
582 t2 = t1 - (U) s2
583 res = t2 ^ sgn
584 ovf = s2 < 0 ? (S) t2 < (S) t1 : (S) t2 > (S) t1 (or jump on overflow)
585 s1 + s2 -> ur
586 res = (U) s1 + (U) s2
587 ovf = s2 < 0 ? (s1 | (S) res) < 0) : (s1 & (S) res) < 0)
588 u1 + u2 -> sr
589 res = (S) (u1 + u2)
590 ovf = (U) res < u2 || res < 0
591 u1 - u2 -> sr
592 res = (S) (u1 - u2)
593 ovf = u1 >= u2 ? res < 0 : res >= 0
594 s1 - s2 -> ur
595 res = (U) s1 - (U) s2
596 ovf = s2 >= 0 ? ((s1 | (S) res) < 0) : ((s1 & (S) res) < 0) */
598 if (code == PLUS_EXPR && uns0_p && !uns1_p)
600 /* PLUS_EXPR is commutative, if operand signedness differs,
601 canonicalize to the first operand being signed and second
602 unsigned to simplify following code. */
603 std::swap (op0, op1);
604 std::swap (arg0, arg1);
605 uns0_p = false;
606 uns1_p = true;
609 /* u1 +- u2 -> ur */
610 if (uns0_p && uns1_p && unsr_p)
612 insn_code icode = optab_handler (code == PLUS_EXPR ? uaddv4_optab
613 : usubv4_optab, mode);
614 if (icode != CODE_FOR_nothing)
616 struct expand_operand ops[4];
617 rtx_insn *last = get_last_insn ();
619 res = gen_reg_rtx (mode);
620 create_output_operand (&ops[0], res, mode);
621 create_input_operand (&ops[1], op0, mode);
622 create_input_operand (&ops[2], op1, mode);
623 create_fixed_operand (&ops[3], do_error);
624 if (maybe_expand_insn (icode, 4, ops))
626 last = get_last_insn ();
627 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
628 && JUMP_P (last)
629 && any_condjump_p (last)
630 && !find_reg_note (last, REG_BR_PROB, 0))
631 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
632 emit_jump (done_label);
633 goto do_error_label;
636 delete_insns_since (last);
639 /* Compute the operation. On RTL level, the addition is always
640 unsigned. */
641 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
642 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
643 rtx tem = op0;
644 /* For PLUS_EXPR, the operation is commutative, so we can pick
645 operand to compare against. For prec <= BITS_PER_WORD, I think
646 preferring REG operand is better over CONST_INT, because
647 the CONST_INT might enlarge the instruction or CSE would need
648 to figure out we'd already loaded it into a register before.
649 For prec > BITS_PER_WORD, I think CONST_INT might be more beneficial,
650 as then the multi-word comparison can be perhaps simplified. */
651 if (code == PLUS_EXPR
652 && (prec <= BITS_PER_WORD
653 ? (CONST_SCALAR_INT_P (op0) && REG_P (op1))
654 : CONST_SCALAR_INT_P (op1)))
655 tem = op1;
656 do_compare_rtx_and_jump (res, tem, code == PLUS_EXPR ? GEU : LEU,
657 true, mode, NULL_RTX, NULL, done_label,
658 PROB_VERY_LIKELY);
659 goto do_error_label;
662 /* s1 +- u2 -> sr */
663 if (!uns0_p && uns1_p && !unsr_p)
665 /* Compute the operation. On RTL level, the addition is always
666 unsigned. */
667 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
668 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
669 rtx tem = expand_binop (mode, add_optab,
670 code == PLUS_EXPR ? res : op0, sgn,
671 NULL_RTX, false, OPTAB_LIB_WIDEN);
672 do_compare_rtx_and_jump (tem, op1, GEU, true, mode, NULL_RTX, NULL,
673 done_label, PROB_VERY_LIKELY);
674 goto do_error_label;
677 /* s1 + u2 -> ur */
678 if (code == PLUS_EXPR && !uns0_p && uns1_p && unsr_p)
680 op1 = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
681 OPTAB_LIB_WIDEN);
682 /* As we've changed op1, we have to avoid using the value range
683 for the original argument. */
684 arg1 = error_mark_node;
685 do_xor = true;
686 goto do_signed;
689 /* u1 - s2 -> ur */
690 if (code == MINUS_EXPR && uns0_p && !uns1_p && unsr_p)
692 op0 = expand_binop (mode, add_optab, op0, sgn, NULL_RTX, false,
693 OPTAB_LIB_WIDEN);
694 /* As we've changed op0, we have to avoid using the value range
695 for the original argument. */
696 arg0 = error_mark_node;
697 do_xor = true;
698 goto do_signed;
701 /* s1 - u2 -> ur */
702 if (code == MINUS_EXPR && !uns0_p && uns1_p && unsr_p)
704 /* Compute the operation. On RTL level, the addition is always
705 unsigned. */
706 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
707 OPTAB_LIB_WIDEN);
708 int pos_neg = get_range_pos_neg (arg0);
709 if (pos_neg == 2)
710 /* If ARG0 is known to be always negative, this is always overflow. */
711 emit_jump (do_error);
712 else if (pos_neg == 3)
713 /* If ARG0 is not known to be always positive, check at runtime. */
714 do_compare_rtx_and_jump (op0, const0_rtx, LT, false, mode, NULL_RTX,
715 NULL, do_error, PROB_VERY_UNLIKELY);
716 do_compare_rtx_and_jump (op1, op0, LEU, true, mode, NULL_RTX, NULL,
717 done_label, PROB_VERY_LIKELY);
718 goto do_error_label;
721 /* u1 - s2 -> sr */
722 if (code == MINUS_EXPR && uns0_p && !uns1_p && !unsr_p)
724 /* Compute the operation. On RTL level, the addition is always
725 unsigned. */
726 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
727 OPTAB_LIB_WIDEN);
728 rtx tem = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
729 OPTAB_LIB_WIDEN);
730 do_compare_rtx_and_jump (op0, tem, LTU, true, mode, NULL_RTX, NULL,
731 done_label, PROB_VERY_LIKELY);
732 goto do_error_label;
735 /* u1 + u2 -> sr */
736 if (code == PLUS_EXPR && uns0_p && uns1_p && !unsr_p)
738 /* Compute the operation. On RTL level, the addition is always
739 unsigned. */
740 res = expand_binop (mode, add_optab, op0, op1, NULL_RTX, false,
741 OPTAB_LIB_WIDEN);
742 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode, NULL_RTX,
743 NULL, do_error, PROB_VERY_UNLIKELY);
744 rtx tem = op1;
745 /* The operation is commutative, so we can pick operand to compare
746 against. For prec <= BITS_PER_WORD, I think preferring REG operand
747 is better over CONST_INT, because the CONST_INT might enlarge the
748 instruction or CSE would need to figure out we'd already loaded it
749 into a register before. For prec > BITS_PER_WORD, I think CONST_INT
750 might be more beneficial, as then the multi-word comparison can be
751 perhaps simplified. */
752 if (prec <= BITS_PER_WORD
753 ? (CONST_SCALAR_INT_P (op1) && REG_P (op0))
754 : CONST_SCALAR_INT_P (op0))
755 tem = op0;
756 do_compare_rtx_and_jump (res, tem, GEU, true, mode, NULL_RTX, NULL,
757 done_label, PROB_VERY_LIKELY);
758 goto do_error_label;
761 /* s1 +- s2 -> ur */
762 if (!uns0_p && !uns1_p && unsr_p)
764 /* Compute the operation. On RTL level, the addition is always
765 unsigned. */
766 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
767 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
768 int pos_neg = get_range_pos_neg (arg1);
769 if (code == PLUS_EXPR)
771 int pos_neg0 = get_range_pos_neg (arg0);
772 if (pos_neg0 != 3 && pos_neg == 3)
774 std::swap (op0, op1);
775 pos_neg = pos_neg0;
778 rtx tem;
779 if (pos_neg != 3)
781 tem = expand_binop (mode, ((pos_neg == 1) ^ (code == MINUS_EXPR))
782 ? and_optab : ior_optab,
783 op0, res, NULL_RTX, false, OPTAB_LIB_WIDEN);
784 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL,
785 NULL, done_label, PROB_VERY_LIKELY);
787 else
789 rtx_code_label *do_ior_label = gen_label_rtx ();
790 do_compare_rtx_and_jump (op1, const0_rtx,
791 code == MINUS_EXPR ? GE : LT, false, mode,
792 NULL_RTX, NULL, do_ior_label,
793 PROB_EVEN);
794 tem = expand_binop (mode, and_optab, op0, res, NULL_RTX, false,
795 OPTAB_LIB_WIDEN);
796 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
797 NULL, done_label, PROB_VERY_LIKELY);
798 emit_jump (do_error);
799 emit_label (do_ior_label);
800 tem = expand_binop (mode, ior_optab, op0, res, NULL_RTX, false,
801 OPTAB_LIB_WIDEN);
802 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
803 NULL, done_label, PROB_VERY_LIKELY);
805 goto do_error_label;
808 /* u1 - u2 -> sr */
809 if (code == MINUS_EXPR && uns0_p && uns1_p && !unsr_p)
811 /* Compute the operation. On RTL level, the addition is always
812 unsigned. */
813 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
814 OPTAB_LIB_WIDEN);
815 rtx_code_label *op0_geu_op1 = gen_label_rtx ();
816 do_compare_rtx_and_jump (op0, op1, GEU, true, mode, NULL_RTX, NULL,
817 op0_geu_op1, PROB_EVEN);
818 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode, NULL_RTX,
819 NULL, done_label, PROB_VERY_LIKELY);
820 emit_jump (do_error);
821 emit_label (op0_geu_op1);
822 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode, NULL_RTX,
823 NULL, done_label, PROB_VERY_LIKELY);
824 goto do_error_label;
827 gcc_assert (!uns0_p && !uns1_p && !unsr_p);
829 /* s1 +- s2 -> sr */
830 do_signed:
832 insn_code icode = optab_handler (code == PLUS_EXPR ? addv4_optab
833 : subv4_optab, mode);
834 if (icode != CODE_FOR_nothing)
836 struct expand_operand ops[4];
837 rtx_insn *last = get_last_insn ();
839 res = gen_reg_rtx (mode);
840 create_output_operand (&ops[0], res, mode);
841 create_input_operand (&ops[1], op0, mode);
842 create_input_operand (&ops[2], op1, mode);
843 create_fixed_operand (&ops[3], do_error);
844 if (maybe_expand_insn (icode, 4, ops))
846 last = get_last_insn ();
847 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
848 && JUMP_P (last)
849 && any_condjump_p (last)
850 && !find_reg_note (last, REG_BR_PROB, 0))
851 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
852 emit_jump (done_label);
853 goto do_error_label;
856 delete_insns_since (last);
859 /* Compute the operation. On RTL level, the addition is always
860 unsigned. */
861 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
862 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
864 /* If we can prove that one of the arguments (for MINUS_EXPR only
865 the second operand, as subtraction is not commutative) is always
866 non-negative or always negative, we can do just one comparison
867 and conditional jump. */
868 int pos_neg = get_range_pos_neg (arg1);
869 if (code == PLUS_EXPR)
871 int pos_neg0 = get_range_pos_neg (arg0);
872 if (pos_neg0 != 3 && pos_neg == 3)
874 std::swap (op0, op1);
875 pos_neg = pos_neg0;
879 /* Addition overflows if and only if the two operands have the same sign,
880 and the result has the opposite sign. Subtraction overflows if and
881 only if the two operands have opposite sign, and the subtrahend has
882 the same sign as the result. Here 0 is counted as positive. */
883 if (pos_neg == 3)
885 /* Compute op0 ^ op1 (operands have opposite sign). */
886 rtx op_xor = expand_binop (mode, xor_optab, op0, op1, NULL_RTX, false,
887 OPTAB_LIB_WIDEN);
889 /* Compute res ^ op1 (result and 2nd operand have opposite sign). */
890 rtx res_xor = expand_binop (mode, xor_optab, res, op1, NULL_RTX, false,
891 OPTAB_LIB_WIDEN);
893 rtx tem;
894 if (code == PLUS_EXPR)
896 /* Compute (res ^ op1) & ~(op0 ^ op1). */
897 tem = expand_unop (mode, one_cmpl_optab, op_xor, NULL_RTX, false);
898 tem = expand_binop (mode, and_optab, res_xor, tem, NULL_RTX, false,
899 OPTAB_LIB_WIDEN);
901 else
903 /* Compute (op0 ^ op1) & ~(res ^ op1). */
904 tem = expand_unop (mode, one_cmpl_optab, res_xor, NULL_RTX, false);
905 tem = expand_binop (mode, and_optab, op_xor, tem, NULL_RTX, false,
906 OPTAB_LIB_WIDEN);
909 /* No overflow if the result has bit sign cleared. */
910 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
911 NULL, done_label, PROB_VERY_LIKELY);
914 /* Compare the result of the operation with the first operand.
915 No overflow for addition if second operand is positive and result
916 is larger or second operand is negative and result is smaller.
917 Likewise for subtraction with sign of second operand flipped. */
918 else
919 do_compare_rtx_and_jump (res, op0,
920 (pos_neg == 1) ^ (code == MINUS_EXPR) ? GE : LE,
921 false, mode, NULL_RTX, NULL, done_label,
922 PROB_VERY_LIKELY);
925 do_error_label:
926 emit_label (do_error);
927 if (is_ubsan)
929 /* Expand the ubsan builtin call. */
930 push_temp_slots ();
931 fn = ubsan_build_overflow_builtin (code, loc, TREE_TYPE (arg0),
932 arg0, arg1);
933 expand_normal (fn);
934 pop_temp_slots ();
935 do_pending_stack_adjust ();
937 else if (lhs)
938 expand_arith_set_overflow (lhs, target);
940 /* We're done. */
941 emit_label (done_label);
943 if (lhs)
945 if (is_ubsan)
946 expand_ubsan_result_store (target, res);
947 else
949 if (do_xor)
950 res = expand_binop (mode, add_optab, res, sgn, NULL_RTX, false,
951 OPTAB_LIB_WIDEN);
953 expand_arith_overflow_result_store (lhs, target, mode, res);
958 /* Add negate overflow checking to the statement STMT. */
960 static void
961 expand_neg_overflow (location_t loc, tree lhs, tree arg1, bool is_ubsan)
963 rtx res, op1;
964 tree fn;
965 rtx_code_label *done_label, *do_error;
966 rtx target = NULL_RTX;
968 done_label = gen_label_rtx ();
969 do_error = gen_label_rtx ();
971 do_pending_stack_adjust ();
972 op1 = expand_normal (arg1);
974 machine_mode mode = TYPE_MODE (TREE_TYPE (arg1));
975 if (lhs)
977 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
978 if (!is_ubsan)
979 write_complex_part (target, const0_rtx, true);
982 enum insn_code icode = optab_handler (negv3_optab, mode);
983 if (icode != CODE_FOR_nothing)
985 struct expand_operand ops[3];
986 rtx_insn *last = get_last_insn ();
988 res = gen_reg_rtx (mode);
989 create_output_operand (&ops[0], res, mode);
990 create_input_operand (&ops[1], op1, mode);
991 create_fixed_operand (&ops[2], do_error);
992 if (maybe_expand_insn (icode, 3, ops))
994 last = get_last_insn ();
995 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
996 && JUMP_P (last)
997 && any_condjump_p (last)
998 && !find_reg_note (last, REG_BR_PROB, 0))
999 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
1000 emit_jump (done_label);
1002 else
1004 delete_insns_since (last);
1005 icode = CODE_FOR_nothing;
1009 if (icode == CODE_FOR_nothing)
1011 /* Compute the operation. On RTL level, the addition is always
1012 unsigned. */
1013 res = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
1015 /* Compare the operand with the most negative value. */
1016 rtx minv = expand_normal (TYPE_MIN_VALUE (TREE_TYPE (arg1)));
1017 do_compare_rtx_and_jump (op1, minv, NE, true, mode, NULL_RTX, NULL,
1018 done_label, PROB_VERY_LIKELY);
1021 emit_label (do_error);
1022 if (is_ubsan)
1024 /* Expand the ubsan builtin call. */
1025 push_temp_slots ();
1026 fn = ubsan_build_overflow_builtin (NEGATE_EXPR, loc, TREE_TYPE (arg1),
1027 arg1, NULL_TREE);
1028 expand_normal (fn);
1029 pop_temp_slots ();
1030 do_pending_stack_adjust ();
1032 else if (lhs)
1033 expand_arith_set_overflow (lhs, target);
1035 /* We're done. */
1036 emit_label (done_label);
1038 if (lhs)
1040 if (is_ubsan)
1041 expand_ubsan_result_store (target, res);
1042 else
1043 expand_arith_overflow_result_store (lhs, target, mode, res);
1047 /* Add mul overflow checking to the statement STMT. */
1049 static void
1050 expand_mul_overflow (location_t loc, tree lhs, tree arg0, tree arg1,
1051 bool unsr_p, bool uns0_p, bool uns1_p, bool is_ubsan)
1053 rtx res, op0, op1;
1054 tree fn, type;
1055 rtx_code_label *done_label, *do_error;
1056 rtx target = NULL_RTX;
1057 signop sign;
1058 enum insn_code icode;
1060 done_label = gen_label_rtx ();
1061 do_error = gen_label_rtx ();
1063 do_pending_stack_adjust ();
1064 op0 = expand_normal (arg0);
1065 op1 = expand_normal (arg1);
1067 machine_mode mode = TYPE_MODE (TREE_TYPE (arg0));
1068 bool uns = unsr_p;
1069 if (lhs)
1071 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1072 if (!is_ubsan)
1073 write_complex_part (target, const0_rtx, true);
1076 if (is_ubsan)
1077 gcc_assert (!unsr_p && !uns0_p && !uns1_p);
1079 /* We assume both operands and result have the same precision
1080 here (GET_MODE_BITSIZE (mode)), S stands for signed type
1081 with that precision, U for unsigned type with that precision,
1082 sgn for unsigned most significant bit in that precision.
1083 s1 is signed first operand, u1 is unsigned first operand,
1084 s2 is signed second operand, u2 is unsigned second operand,
1085 sr is signed result, ur is unsigned result and the following
1086 rules say how to compute result (which is always result of
1087 the operands as if both were unsigned, cast to the right
1088 signedness) and how to compute whether operation overflowed.
1089 main_ovf (false) stands for jump on signed multiplication
1090 overflow or the main algorithm with uns == false.
1091 main_ovf (true) stands for jump on unsigned multiplication
1092 overflow or the main algorithm with uns == true.
1094 s1 * s2 -> sr
1095 res = (S) ((U) s1 * (U) s2)
1096 ovf = main_ovf (false)
1097 u1 * u2 -> ur
1098 res = u1 * u2
1099 ovf = main_ovf (true)
1100 s1 * u2 -> ur
1101 res = (U) s1 * u2
1102 ovf = (s1 < 0 && u2) || main_ovf (true)
1103 u1 * u2 -> sr
1104 res = (S) (u1 * u2)
1105 ovf = res < 0 || main_ovf (true)
1106 s1 * u2 -> sr
1107 res = (S) ((U) s1 * u2)
1108 ovf = (S) u2 >= 0 ? main_ovf (false)
1109 : (s1 != 0 && (s1 != -1 || u2 != (U) res))
1110 s1 * s2 -> ur
1111 t1 = (s1 & s2) < 0 ? (-(U) s1) : ((U) s1)
1112 t2 = (s1 & s2) < 0 ? (-(U) s2) : ((U) s2)
1113 res = t1 * t2
1114 ovf = (s1 ^ s2) < 0 ? (s1 && s2) : main_ovf (true) */
1116 if (uns0_p && !uns1_p)
1118 /* Multiplication is commutative, if operand signedness differs,
1119 canonicalize to the first operand being signed and second
1120 unsigned to simplify following code. */
1121 std::swap (op0, op1);
1122 std::swap (arg0, arg1);
1123 uns0_p = false;
1124 uns1_p = true;
1127 int pos_neg0 = get_range_pos_neg (arg0);
1128 int pos_neg1 = get_range_pos_neg (arg1);
1130 /* s1 * u2 -> ur */
1131 if (!uns0_p && uns1_p && unsr_p)
1133 switch (pos_neg0)
1135 case 1:
1136 /* If s1 is non-negative, just perform normal u1 * u2 -> ur. */
1137 goto do_main;
1138 case 2:
1139 /* If s1 is negative, avoid the main code, just multiply and
1140 signal overflow if op1 is not 0. */
1141 struct separate_ops ops;
1142 ops.code = MULT_EXPR;
1143 ops.type = TREE_TYPE (arg1);
1144 ops.op0 = make_tree (ops.type, op0);
1145 ops.op1 = make_tree (ops.type, op1);
1146 ops.op2 = NULL_TREE;
1147 ops.location = loc;
1148 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1149 do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1150 NULL, done_label, PROB_VERY_LIKELY);
1151 goto do_error_label;
1152 case 3:
1153 rtx_code_label *do_main_label;
1154 do_main_label = gen_label_rtx ();
1155 do_compare_rtx_and_jump (op0, const0_rtx, GE, false, mode, NULL_RTX,
1156 NULL, do_main_label, PROB_VERY_LIKELY);
1157 do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1158 NULL, do_main_label, PROB_VERY_LIKELY);
1159 expand_arith_set_overflow (lhs, target);
1160 emit_label (do_main_label);
1161 goto do_main;
1162 default:
1163 gcc_unreachable ();
1167 /* u1 * u2 -> sr */
1168 if (uns0_p && uns1_p && !unsr_p)
1170 uns = true;
1171 /* Rest of handling of this case after res is computed. */
1172 goto do_main;
1175 /* s1 * u2 -> sr */
1176 if (!uns0_p && uns1_p && !unsr_p)
1178 switch (pos_neg1)
1180 case 1:
1181 goto do_main;
1182 case 2:
1183 /* If (S) u2 is negative (i.e. u2 is larger than maximum of S,
1184 avoid the main code, just multiply and signal overflow
1185 unless 0 * u2 or -1 * ((U) Smin). */
1186 struct separate_ops ops;
1187 ops.code = MULT_EXPR;
1188 ops.type = TREE_TYPE (arg1);
1189 ops.op0 = make_tree (ops.type, op0);
1190 ops.op1 = make_tree (ops.type, op1);
1191 ops.op2 = NULL_TREE;
1192 ops.location = loc;
1193 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1194 do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
1195 NULL, done_label, PROB_VERY_LIKELY);
1196 do_compare_rtx_and_jump (op0, constm1_rtx, NE, true, mode, NULL_RTX,
1197 NULL, do_error, PROB_VERY_UNLIKELY);
1198 int prec;
1199 prec = GET_MODE_PRECISION (mode);
1200 rtx sgn;
1201 sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
1202 do_compare_rtx_and_jump (op1, sgn, EQ, true, mode, NULL_RTX,
1203 NULL, done_label, PROB_VERY_LIKELY);
1204 goto do_error_label;
1205 case 3:
1206 /* Rest of handling of this case after res is computed. */
1207 goto do_main;
1208 default:
1209 gcc_unreachable ();
1213 /* s1 * s2 -> ur */
1214 if (!uns0_p && !uns1_p && unsr_p)
1216 rtx tem, tem2;
1217 switch (pos_neg0 | pos_neg1)
1219 case 1: /* Both operands known to be non-negative. */
1220 goto do_main;
1221 case 2: /* Both operands known to be negative. */
1222 op0 = expand_unop (mode, neg_optab, op0, NULL_RTX, false);
1223 op1 = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
1224 /* Avoid looking at arg0/arg1 ranges, as we've changed
1225 the arguments. */
1226 arg0 = error_mark_node;
1227 arg1 = error_mark_node;
1228 goto do_main;
1229 case 3:
1230 if ((pos_neg0 ^ pos_neg1) == 3)
1232 /* If one operand is known to be negative and the other
1233 non-negative, this overflows always, unless the non-negative
1234 one is 0. Just do normal multiply and set overflow
1235 unless one of the operands is 0. */
1236 struct separate_ops ops;
1237 ops.code = MULT_EXPR;
1238 ops.type
1239 = build_nonstandard_integer_type (GET_MODE_PRECISION (mode),
1241 ops.op0 = make_tree (ops.type, op0);
1242 ops.op1 = make_tree (ops.type, op1);
1243 ops.op2 = NULL_TREE;
1244 ops.location = loc;
1245 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1246 tem = expand_binop (mode, and_optab, op0, op1, NULL_RTX, false,
1247 OPTAB_LIB_WIDEN);
1248 do_compare_rtx_and_jump (tem, const0_rtx, EQ, true, mode,
1249 NULL_RTX, NULL, done_label,
1250 PROB_VERY_LIKELY);
1251 goto do_error_label;
1253 /* The general case, do all the needed comparisons at runtime. */
1254 rtx_code_label *do_main_label, *after_negate_label;
1255 rtx rop0, rop1;
1256 rop0 = gen_reg_rtx (mode);
1257 rop1 = gen_reg_rtx (mode);
1258 emit_move_insn (rop0, op0);
1259 emit_move_insn (rop1, op1);
1260 op0 = rop0;
1261 op1 = rop1;
1262 do_main_label = gen_label_rtx ();
1263 after_negate_label = gen_label_rtx ();
1264 tem = expand_binop (mode, and_optab, op0, op1, NULL_RTX, false,
1265 OPTAB_LIB_WIDEN);
1266 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
1267 NULL, after_negate_label, PROB_VERY_LIKELY);
1268 /* Both arguments negative here, negate them and continue with
1269 normal unsigned overflow checking multiplication. */
1270 emit_move_insn (op0, expand_unop (mode, neg_optab, op0,
1271 NULL_RTX, false));
1272 emit_move_insn (op1, expand_unop (mode, neg_optab, op1,
1273 NULL_RTX, false));
1274 /* Avoid looking at arg0/arg1 ranges, as we might have changed
1275 the arguments. */
1276 arg0 = error_mark_node;
1277 arg1 = error_mark_node;
1278 emit_jump (do_main_label);
1279 emit_label (after_negate_label);
1280 tem2 = expand_binop (mode, xor_optab, op0, op1, NULL_RTX, false,
1281 OPTAB_LIB_WIDEN);
1282 do_compare_rtx_and_jump (tem2, const0_rtx, GE, false, mode, NULL_RTX,
1283 NULL, do_main_label, PROB_VERY_LIKELY);
1284 /* One argument is negative here, the other positive. This
1285 overflows always, unless one of the arguments is 0. But
1286 if e.g. s2 is 0, (U) s1 * 0 doesn't overflow, whatever s1
1287 is, thus we can keep do_main code oring in overflow as is. */
1288 do_compare_rtx_and_jump (tem, const0_rtx, EQ, true, mode, NULL_RTX,
1289 NULL, do_main_label, PROB_VERY_LIKELY);
1290 expand_arith_set_overflow (lhs, target);
1291 emit_label (do_main_label);
1292 goto do_main;
1293 default:
1294 gcc_unreachable ();
1298 do_main:
1299 type = build_nonstandard_integer_type (GET_MODE_PRECISION (mode), uns);
1300 sign = uns ? UNSIGNED : SIGNED;
1301 icode = optab_handler (uns ? umulv4_optab : mulv4_optab, mode);
1302 if (icode != CODE_FOR_nothing)
1304 struct expand_operand ops[4];
1305 rtx_insn *last = get_last_insn ();
1307 res = gen_reg_rtx (mode);
1308 create_output_operand (&ops[0], res, mode);
1309 create_input_operand (&ops[1], op0, mode);
1310 create_input_operand (&ops[2], op1, mode);
1311 create_fixed_operand (&ops[3], do_error);
1312 if (maybe_expand_insn (icode, 4, ops))
1314 last = get_last_insn ();
1315 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
1316 && JUMP_P (last)
1317 && any_condjump_p (last)
1318 && !find_reg_note (last, REG_BR_PROB, 0))
1319 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
1320 emit_jump (done_label);
1322 else
1324 delete_insns_since (last);
1325 icode = CODE_FOR_nothing;
1329 if (icode == CODE_FOR_nothing)
1331 struct separate_ops ops;
1332 int prec = GET_MODE_PRECISION (mode);
1333 machine_mode hmode = mode_for_size (prec / 2, MODE_INT, 1);
1334 ops.op0 = make_tree (type, op0);
1335 ops.op1 = make_tree (type, op1);
1336 ops.op2 = NULL_TREE;
1337 ops.location = loc;
1338 if (GET_MODE_2XWIDER_MODE (mode) != VOIDmode
1339 && targetm.scalar_mode_supported_p (GET_MODE_2XWIDER_MODE (mode)))
1341 machine_mode wmode = GET_MODE_2XWIDER_MODE (mode);
1342 ops.code = WIDEN_MULT_EXPR;
1343 ops.type
1344 = build_nonstandard_integer_type (GET_MODE_PRECISION (wmode), uns);
1346 res = expand_expr_real_2 (&ops, NULL_RTX, wmode, EXPAND_NORMAL);
1347 rtx hipart = expand_shift (RSHIFT_EXPR, wmode, res, prec,
1348 NULL_RTX, uns);
1349 hipart = gen_lowpart (mode, hipart);
1350 res = gen_lowpart (mode, res);
1351 if (uns)
1352 /* For the unsigned multiplication, there was overflow if
1353 HIPART is non-zero. */
1354 do_compare_rtx_and_jump (hipart, const0_rtx, EQ, true, mode,
1355 NULL_RTX, NULL, done_label,
1356 PROB_VERY_LIKELY);
1357 else
1359 rtx signbit = expand_shift (RSHIFT_EXPR, mode, res, prec - 1,
1360 NULL_RTX, 0);
1361 /* RES is low half of the double width result, HIPART
1362 the high half. There was overflow if
1363 HIPART is different from RES < 0 ? -1 : 0. */
1364 do_compare_rtx_and_jump (signbit, hipart, EQ, true, mode,
1365 NULL_RTX, NULL, done_label,
1366 PROB_VERY_LIKELY);
1369 else if (hmode != BLKmode && 2 * GET_MODE_PRECISION (hmode) == prec)
1371 rtx_code_label *large_op0 = gen_label_rtx ();
1372 rtx_code_label *small_op0_large_op1 = gen_label_rtx ();
1373 rtx_code_label *one_small_one_large = gen_label_rtx ();
1374 rtx_code_label *both_ops_large = gen_label_rtx ();
1375 rtx_code_label *after_hipart_neg = uns ? NULL : gen_label_rtx ();
1376 rtx_code_label *after_lopart_neg = uns ? NULL : gen_label_rtx ();
1377 rtx_code_label *do_overflow = gen_label_rtx ();
1378 rtx_code_label *hipart_different = uns ? NULL : gen_label_rtx ();
1380 unsigned int hprec = GET_MODE_PRECISION (hmode);
1381 rtx hipart0 = expand_shift (RSHIFT_EXPR, mode, op0, hprec,
1382 NULL_RTX, uns);
1383 hipart0 = gen_lowpart (hmode, hipart0);
1384 rtx lopart0 = gen_lowpart (hmode, op0);
1385 rtx signbit0 = const0_rtx;
1386 if (!uns)
1387 signbit0 = expand_shift (RSHIFT_EXPR, hmode, lopart0, hprec - 1,
1388 NULL_RTX, 0);
1389 rtx hipart1 = expand_shift (RSHIFT_EXPR, mode, op1, hprec,
1390 NULL_RTX, uns);
1391 hipart1 = gen_lowpart (hmode, hipart1);
1392 rtx lopart1 = gen_lowpart (hmode, op1);
1393 rtx signbit1 = const0_rtx;
1394 if (!uns)
1395 signbit1 = expand_shift (RSHIFT_EXPR, hmode, lopart1, hprec - 1,
1396 NULL_RTX, 0);
1398 res = gen_reg_rtx (mode);
1400 /* True if op0 resp. op1 are known to be in the range of
1401 halfstype. */
1402 bool op0_small_p = false;
1403 bool op1_small_p = false;
1404 /* True if op0 resp. op1 are known to have all zeros or all ones
1405 in the upper half of bits, but are not known to be
1406 op{0,1}_small_p. */
1407 bool op0_medium_p = false;
1408 bool op1_medium_p = false;
1409 /* -1 if op{0,1} is known to be negative, 0 if it is known to be
1410 nonnegative, 1 if unknown. */
1411 int op0_sign = 1;
1412 int op1_sign = 1;
1414 if (pos_neg0 == 1)
1415 op0_sign = 0;
1416 else if (pos_neg0 == 2)
1417 op0_sign = -1;
1418 if (pos_neg1 == 1)
1419 op1_sign = 0;
1420 else if (pos_neg1 == 2)
1421 op1_sign = -1;
1423 unsigned int mprec0 = prec;
1424 if (arg0 != error_mark_node)
1425 mprec0 = get_min_precision (arg0, sign);
1426 if (mprec0 <= hprec)
1427 op0_small_p = true;
1428 else if (!uns && mprec0 <= hprec + 1)
1429 op0_medium_p = true;
1430 unsigned int mprec1 = prec;
1431 if (arg1 != error_mark_node)
1432 mprec1 = get_min_precision (arg1, sign);
1433 if (mprec1 <= hprec)
1434 op1_small_p = true;
1435 else if (!uns && mprec1 <= hprec + 1)
1436 op1_medium_p = true;
1438 int smaller_sign = 1;
1439 int larger_sign = 1;
1440 if (op0_small_p)
1442 smaller_sign = op0_sign;
1443 larger_sign = op1_sign;
1445 else if (op1_small_p)
1447 smaller_sign = op1_sign;
1448 larger_sign = op0_sign;
1450 else if (op0_sign == op1_sign)
1452 smaller_sign = op0_sign;
1453 larger_sign = op0_sign;
1456 if (!op0_small_p)
1457 do_compare_rtx_and_jump (signbit0, hipart0, NE, true, hmode,
1458 NULL_RTX, NULL, large_op0,
1459 PROB_UNLIKELY);
1461 if (!op1_small_p)
1462 do_compare_rtx_and_jump (signbit1, hipart1, NE, true, hmode,
1463 NULL_RTX, NULL, small_op0_large_op1,
1464 PROB_UNLIKELY);
1466 /* If both op0 and op1 are sign (!uns) or zero (uns) extended from
1467 hmode to mode, the multiplication will never overflow. We can
1468 do just one hmode x hmode => mode widening multiplication. */
1469 rtx lopart0s = lopart0, lopart1s = lopart1;
1470 if (GET_CODE (lopart0) == SUBREG)
1472 lopart0s = shallow_copy_rtx (lopart0);
1473 SUBREG_PROMOTED_VAR_P (lopart0s) = 1;
1474 SUBREG_PROMOTED_SET (lopart0s, uns ? SRP_UNSIGNED : SRP_SIGNED);
1476 if (GET_CODE (lopart1) == SUBREG)
1478 lopart1s = shallow_copy_rtx (lopart1);
1479 SUBREG_PROMOTED_VAR_P (lopart1s) = 1;
1480 SUBREG_PROMOTED_SET (lopart1s, uns ? SRP_UNSIGNED : SRP_SIGNED);
1482 tree halfstype = build_nonstandard_integer_type (hprec, uns);
1483 ops.op0 = make_tree (halfstype, lopart0s);
1484 ops.op1 = make_tree (halfstype, lopart1s);
1485 ops.code = WIDEN_MULT_EXPR;
1486 ops.type = type;
1487 rtx thisres
1488 = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1489 emit_move_insn (res, thisres);
1490 emit_jump (done_label);
1492 emit_label (small_op0_large_op1);
1494 /* If op0 is sign (!uns) or zero (uns) extended from hmode to mode,
1495 but op1 is not, just swap the arguments and handle it as op1
1496 sign/zero extended, op0 not. */
1497 rtx larger = gen_reg_rtx (mode);
1498 rtx hipart = gen_reg_rtx (hmode);
1499 rtx lopart = gen_reg_rtx (hmode);
1500 emit_move_insn (larger, op1);
1501 emit_move_insn (hipart, hipart1);
1502 emit_move_insn (lopart, lopart0);
1503 emit_jump (one_small_one_large);
1505 emit_label (large_op0);
1507 if (!op1_small_p)
1508 do_compare_rtx_and_jump (signbit1, hipart1, NE, true, hmode,
1509 NULL_RTX, NULL, both_ops_large,
1510 PROB_UNLIKELY);
1512 /* If op1 is sign (!uns) or zero (uns) extended from hmode to mode,
1513 but op0 is not, prepare larger, hipart and lopart pseudos and
1514 handle it together with small_op0_large_op1. */
1515 emit_move_insn (larger, op0);
1516 emit_move_insn (hipart, hipart0);
1517 emit_move_insn (lopart, lopart1);
1519 emit_label (one_small_one_large);
1521 /* lopart is the low part of the operand that is sign extended
1522 to mode, larger is the other operand, hipart is the
1523 high part of larger and lopart0 and lopart1 are the low parts
1524 of both operands.
1525 We perform lopart0 * lopart1 and lopart * hipart widening
1526 multiplications. */
1527 tree halfutype = build_nonstandard_integer_type (hprec, 1);
1528 ops.op0 = make_tree (halfutype, lopart0);
1529 ops.op1 = make_tree (halfutype, lopart1);
1530 rtx lo0xlo1
1531 = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1533 ops.op0 = make_tree (halfutype, lopart);
1534 ops.op1 = make_tree (halfutype, hipart);
1535 rtx loxhi = gen_reg_rtx (mode);
1536 rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1537 emit_move_insn (loxhi, tem);
1539 if (!uns)
1541 /* if (hipart < 0) loxhi -= lopart << (bitsize / 2); */
1542 if (larger_sign == 0)
1543 emit_jump (after_hipart_neg);
1544 else if (larger_sign != -1)
1545 do_compare_rtx_and_jump (hipart, const0_rtx, GE, false, hmode,
1546 NULL_RTX, NULL, after_hipart_neg,
1547 PROB_EVEN);
1549 tem = convert_modes (mode, hmode, lopart, 1);
1550 tem = expand_shift (LSHIFT_EXPR, mode, tem, hprec, NULL_RTX, 1);
1551 tem = expand_simple_binop (mode, MINUS, loxhi, tem, NULL_RTX,
1552 1, OPTAB_DIRECT);
1553 emit_move_insn (loxhi, tem);
1555 emit_label (after_hipart_neg);
1557 /* if (lopart < 0) loxhi -= larger; */
1558 if (smaller_sign == 0)
1559 emit_jump (after_lopart_neg);
1560 else if (smaller_sign != -1)
1561 do_compare_rtx_and_jump (lopart, const0_rtx, GE, false, hmode,
1562 NULL_RTX, NULL, after_lopart_neg,
1563 PROB_EVEN);
1565 tem = expand_simple_binop (mode, MINUS, loxhi, larger, NULL_RTX,
1566 1, OPTAB_DIRECT);
1567 emit_move_insn (loxhi, tem);
1569 emit_label (after_lopart_neg);
1572 /* loxhi += (uns) lo0xlo1 >> (bitsize / 2); */
1573 tem = expand_shift (RSHIFT_EXPR, mode, lo0xlo1, hprec, NULL_RTX, 1);
1574 tem = expand_simple_binop (mode, PLUS, loxhi, tem, NULL_RTX,
1575 1, OPTAB_DIRECT);
1576 emit_move_insn (loxhi, tem);
1578 /* if (loxhi >> (bitsize / 2)
1579 == (hmode) loxhi >> (bitsize / 2 - 1)) (if !uns)
1580 if (loxhi >> (bitsize / 2) == 0 (if uns). */
1581 rtx hipartloxhi = expand_shift (RSHIFT_EXPR, mode, loxhi, hprec,
1582 NULL_RTX, 0);
1583 hipartloxhi = gen_lowpart (hmode, hipartloxhi);
1584 rtx signbitloxhi = const0_rtx;
1585 if (!uns)
1586 signbitloxhi = expand_shift (RSHIFT_EXPR, hmode,
1587 gen_lowpart (hmode, loxhi),
1588 hprec - 1, NULL_RTX, 0);
1590 do_compare_rtx_and_jump (signbitloxhi, hipartloxhi, NE, true, hmode,
1591 NULL_RTX, NULL, do_overflow,
1592 PROB_VERY_UNLIKELY);
1594 /* res = (loxhi << (bitsize / 2)) | (hmode) lo0xlo1; */
1595 rtx loxhishifted = expand_shift (LSHIFT_EXPR, mode, loxhi, hprec,
1596 NULL_RTX, 1);
1597 tem = convert_modes (mode, hmode, gen_lowpart (hmode, lo0xlo1), 1);
1599 tem = expand_simple_binop (mode, IOR, loxhishifted, tem, res,
1600 1, OPTAB_DIRECT);
1601 if (tem != res)
1602 emit_move_insn (res, tem);
1603 emit_jump (done_label);
1605 emit_label (both_ops_large);
1607 /* If both operands are large (not sign (!uns) or zero (uns)
1608 extended from hmode), then perform the full multiplication
1609 which will be the result of the operation.
1610 The only cases which don't overflow are for signed multiplication
1611 some cases where both hipart0 and highpart1 are 0 or -1.
1612 For unsigned multiplication when high parts are both non-zero
1613 this overflows always. */
1614 ops.code = MULT_EXPR;
1615 ops.op0 = make_tree (type, op0);
1616 ops.op1 = make_tree (type, op1);
1617 tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1618 emit_move_insn (res, tem);
1620 if (!uns)
1622 if (!op0_medium_p)
1624 tem = expand_simple_binop (hmode, PLUS, hipart0, const1_rtx,
1625 NULL_RTX, 1, OPTAB_DIRECT);
1626 do_compare_rtx_and_jump (tem, const1_rtx, GTU, true, hmode,
1627 NULL_RTX, NULL, do_error,
1628 PROB_VERY_UNLIKELY);
1631 if (!op1_medium_p)
1633 tem = expand_simple_binop (hmode, PLUS, hipart1, const1_rtx,
1634 NULL_RTX, 1, OPTAB_DIRECT);
1635 do_compare_rtx_and_jump (tem, const1_rtx, GTU, true, hmode,
1636 NULL_RTX, NULL, do_error,
1637 PROB_VERY_UNLIKELY);
1640 /* At this point hipart{0,1} are both in [-1, 0]. If they are
1641 the same, overflow happened if res is negative, if they are
1642 different, overflow happened if res is positive. */
1643 if (op0_sign != 1 && op1_sign != 1 && op0_sign != op1_sign)
1644 emit_jump (hipart_different);
1645 else if (op0_sign == 1 || op1_sign == 1)
1646 do_compare_rtx_and_jump (hipart0, hipart1, NE, true, hmode,
1647 NULL_RTX, NULL, hipart_different,
1648 PROB_EVEN);
1650 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode,
1651 NULL_RTX, NULL, do_error,
1652 PROB_VERY_UNLIKELY);
1653 emit_jump (done_label);
1655 emit_label (hipart_different);
1657 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode,
1658 NULL_RTX, NULL, do_error,
1659 PROB_VERY_UNLIKELY);
1660 emit_jump (done_label);
1663 emit_label (do_overflow);
1665 /* Overflow, do full multiplication and fallthru into do_error. */
1666 ops.op0 = make_tree (type, op0);
1667 ops.op1 = make_tree (type, op1);
1668 tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1669 emit_move_insn (res, tem);
1671 else
1673 gcc_assert (!is_ubsan);
1674 ops.code = MULT_EXPR;
1675 ops.type = type;
1676 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1677 emit_jump (done_label);
1681 do_error_label:
1682 emit_label (do_error);
1683 if (is_ubsan)
1685 /* Expand the ubsan builtin call. */
1686 push_temp_slots ();
1687 fn = ubsan_build_overflow_builtin (MULT_EXPR, loc, TREE_TYPE (arg0),
1688 arg0, arg1);
1689 expand_normal (fn);
1690 pop_temp_slots ();
1691 do_pending_stack_adjust ();
1693 else if (lhs)
1694 expand_arith_set_overflow (lhs, target);
1696 /* We're done. */
1697 emit_label (done_label);
1699 /* u1 * u2 -> sr */
1700 if (uns0_p && uns1_p && !unsr_p)
1702 rtx_code_label *all_done_label = gen_label_rtx ();
1703 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode, NULL_RTX,
1704 NULL, all_done_label, PROB_VERY_LIKELY);
1705 expand_arith_set_overflow (lhs, target);
1706 emit_label (all_done_label);
1709 /* s1 * u2 -> sr */
1710 if (!uns0_p && uns1_p && !unsr_p && pos_neg1 == 3)
1712 rtx_code_label *all_done_label = gen_label_rtx ();
1713 rtx_code_label *set_noovf = gen_label_rtx ();
1714 do_compare_rtx_and_jump (op1, const0_rtx, GE, false, mode, NULL_RTX,
1715 NULL, all_done_label, PROB_VERY_LIKELY);
1716 expand_arith_set_overflow (lhs, target);
1717 do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
1718 NULL, set_noovf, PROB_VERY_LIKELY);
1719 do_compare_rtx_and_jump (op0, constm1_rtx, NE, true, mode, NULL_RTX,
1720 NULL, all_done_label, PROB_VERY_UNLIKELY);
1721 do_compare_rtx_and_jump (op1, res, NE, true, mode, NULL_RTX, NULL,
1722 all_done_label, PROB_VERY_UNLIKELY);
1723 emit_label (set_noovf);
1724 write_complex_part (target, const0_rtx, true);
1725 emit_label (all_done_label);
1728 if (lhs)
1730 if (is_ubsan)
1731 expand_ubsan_result_store (target, res);
1732 else
1733 expand_arith_overflow_result_store (lhs, target, mode, res);
1737 /* Expand UBSAN_CHECK_ADD call STMT. */
1739 static void
1740 expand_UBSAN_CHECK_ADD (internal_fn, gcall *stmt)
1742 location_t loc = gimple_location (stmt);
1743 tree lhs = gimple_call_lhs (stmt);
1744 tree arg0 = gimple_call_arg (stmt, 0);
1745 tree arg1 = gimple_call_arg (stmt, 1);
1746 expand_addsub_overflow (loc, PLUS_EXPR, lhs, arg0, arg1,
1747 false, false, false, true);
1750 /* Expand UBSAN_CHECK_SUB call STMT. */
1752 static void
1753 expand_UBSAN_CHECK_SUB (internal_fn, gcall *stmt)
1755 location_t loc = gimple_location (stmt);
1756 tree lhs = gimple_call_lhs (stmt);
1757 tree arg0 = gimple_call_arg (stmt, 0);
1758 tree arg1 = gimple_call_arg (stmt, 1);
1759 if (integer_zerop (arg0))
1760 expand_neg_overflow (loc, lhs, arg1, true);
1761 else
1762 expand_addsub_overflow (loc, MINUS_EXPR, lhs, arg0, arg1,
1763 false, false, false, true);
1766 /* Expand UBSAN_CHECK_MUL call STMT. */
1768 static void
1769 expand_UBSAN_CHECK_MUL (internal_fn, gcall *stmt)
1771 location_t loc = gimple_location (stmt);
1772 tree lhs = gimple_call_lhs (stmt);
1773 tree arg0 = gimple_call_arg (stmt, 0);
1774 tree arg1 = gimple_call_arg (stmt, 1);
1775 expand_mul_overflow (loc, lhs, arg0, arg1, false, false, false, true);
1778 /* Helper function for {ADD,SUB,MUL}_OVERFLOW call stmt expansion. */
1780 static void
1781 expand_arith_overflow (enum tree_code code, gimple *stmt)
1783 tree lhs = gimple_call_lhs (stmt);
1784 if (lhs == NULL_TREE)
1785 return;
1786 tree arg0 = gimple_call_arg (stmt, 0);
1787 tree arg1 = gimple_call_arg (stmt, 1);
1788 tree type = TREE_TYPE (TREE_TYPE (lhs));
1789 int uns0_p = TYPE_UNSIGNED (TREE_TYPE (arg0));
1790 int uns1_p = TYPE_UNSIGNED (TREE_TYPE (arg1));
1791 int unsr_p = TYPE_UNSIGNED (type);
1792 int prec0 = TYPE_PRECISION (TREE_TYPE (arg0));
1793 int prec1 = TYPE_PRECISION (TREE_TYPE (arg1));
1794 int precres = TYPE_PRECISION (type);
1795 location_t loc = gimple_location (stmt);
1796 if (!uns0_p && get_range_pos_neg (arg0) == 1)
1797 uns0_p = true;
1798 if (!uns1_p && get_range_pos_neg (arg1) == 1)
1799 uns1_p = true;
1800 int pr = get_min_precision (arg0, uns0_p ? UNSIGNED : SIGNED);
1801 prec0 = MIN (prec0, pr);
1802 pr = get_min_precision (arg1, uns1_p ? UNSIGNED : SIGNED);
1803 prec1 = MIN (prec1, pr);
1805 /* If uns0_p && uns1_p, precop is minimum needed precision
1806 of unsigned type to hold the exact result, otherwise
1807 precop is minimum needed precision of signed type to
1808 hold the exact result. */
1809 int precop;
1810 if (code == MULT_EXPR)
1811 precop = prec0 + prec1 + (uns0_p != uns1_p);
1812 else
1814 if (uns0_p == uns1_p)
1815 precop = MAX (prec0, prec1) + 1;
1816 else if (uns0_p)
1817 precop = MAX (prec0 + 1, prec1) + 1;
1818 else
1819 precop = MAX (prec0, prec1 + 1) + 1;
1821 int orig_precres = precres;
1825 if ((uns0_p && uns1_p)
1826 ? ((precop + !unsr_p) <= precres
1827 /* u1 - u2 -> ur can overflow, no matter what precision
1828 the result has. */
1829 && (code != MINUS_EXPR || !unsr_p))
1830 : (!unsr_p && precop <= precres))
1832 /* The infinity precision result will always fit into result. */
1833 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1834 write_complex_part (target, const0_rtx, true);
1835 enum machine_mode mode = TYPE_MODE (type);
1836 struct separate_ops ops;
1837 ops.code = code;
1838 ops.type = type;
1839 ops.op0 = fold_convert_loc (loc, type, arg0);
1840 ops.op1 = fold_convert_loc (loc, type, arg1);
1841 ops.op2 = NULL_TREE;
1842 ops.location = loc;
1843 rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1844 expand_arith_overflow_result_store (lhs, target, mode, tem);
1845 return;
1848 /* For operations with low precision, if target doesn't have them, start
1849 with precres widening right away, otherwise do it only if the most
1850 simple cases can't be used. */
1851 const int min_precision = targetm.min_arithmetic_precision ();
1852 if (orig_precres == precres && precres < min_precision)
1854 else if ((uns0_p && uns1_p && unsr_p && prec0 <= precres
1855 && prec1 <= precres)
1856 || ((!uns0_p || !uns1_p) && !unsr_p
1857 && prec0 + uns0_p <= precres
1858 && prec1 + uns1_p <= precres))
1860 arg0 = fold_convert_loc (loc, type, arg0);
1861 arg1 = fold_convert_loc (loc, type, arg1);
1862 switch (code)
1864 case MINUS_EXPR:
1865 if (integer_zerop (arg0) && !unsr_p)
1867 expand_neg_overflow (loc, lhs, arg1, false);
1868 return;
1870 /* FALLTHRU */
1871 case PLUS_EXPR:
1872 expand_addsub_overflow (loc, code, lhs, arg0, arg1,
1873 unsr_p, unsr_p, unsr_p, false);
1874 return;
1875 case MULT_EXPR:
1876 expand_mul_overflow (loc, lhs, arg0, arg1,
1877 unsr_p, unsr_p, unsr_p, false);
1878 return;
1879 default:
1880 gcc_unreachable ();
1884 /* For sub-word operations, retry with a wider type first. */
1885 if (orig_precres == precres && precop <= BITS_PER_WORD)
1887 int p = MAX (min_precision, precop);
1888 enum machine_mode m = smallest_mode_for_size (p, MODE_INT);
1889 tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
1890 uns0_p && uns1_p
1891 && unsr_p);
1892 p = TYPE_PRECISION (optype);
1893 if (p > precres)
1895 precres = p;
1896 unsr_p = TYPE_UNSIGNED (optype);
1897 type = optype;
1898 continue;
1902 if (prec0 <= precres && prec1 <= precres)
1904 tree types[2];
1905 if (unsr_p)
1907 types[0] = build_nonstandard_integer_type (precres, 0);
1908 types[1] = type;
1910 else
1912 types[0] = type;
1913 types[1] = build_nonstandard_integer_type (precres, 1);
1915 arg0 = fold_convert_loc (loc, types[uns0_p], arg0);
1916 arg1 = fold_convert_loc (loc, types[uns1_p], arg1);
1917 if (code != MULT_EXPR)
1918 expand_addsub_overflow (loc, code, lhs, arg0, arg1, unsr_p,
1919 uns0_p, uns1_p, false);
1920 else
1921 expand_mul_overflow (loc, lhs, arg0, arg1, unsr_p,
1922 uns0_p, uns1_p, false);
1923 return;
1926 /* Retry with a wider type. */
1927 if (orig_precres == precres)
1929 int p = MAX (prec0, prec1);
1930 enum machine_mode m = smallest_mode_for_size (p, MODE_INT);
1931 tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
1932 uns0_p && uns1_p
1933 && unsr_p);
1934 p = TYPE_PRECISION (optype);
1935 if (p > precres)
1937 precres = p;
1938 unsr_p = TYPE_UNSIGNED (optype);
1939 type = optype;
1940 continue;
1944 gcc_unreachable ();
1946 while (1);
1949 /* Expand ADD_OVERFLOW STMT. */
1951 static void
1952 expand_ADD_OVERFLOW (internal_fn, gcall *stmt)
1954 expand_arith_overflow (PLUS_EXPR, stmt);
1957 /* Expand SUB_OVERFLOW STMT. */
1959 static void
1960 expand_SUB_OVERFLOW (internal_fn, gcall *stmt)
1962 expand_arith_overflow (MINUS_EXPR, stmt);
1965 /* Expand MUL_OVERFLOW STMT. */
1967 static void
1968 expand_MUL_OVERFLOW (internal_fn, gcall *stmt)
1970 expand_arith_overflow (MULT_EXPR, stmt);
1973 /* This should get folded in tree-vectorizer.c. */
1975 static void
1976 expand_LOOP_VECTORIZED (internal_fn, gcall *)
1978 gcc_unreachable ();
1981 /* Expand MASK_LOAD call STMT using optab OPTAB. */
1983 static void
1984 expand_mask_load_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
1986 struct expand_operand ops[3];
1987 tree type, lhs, rhs, maskt, ptr;
1988 rtx mem, target, mask;
1989 unsigned align;
1991 maskt = gimple_call_arg (stmt, 2);
1992 lhs = gimple_call_lhs (stmt);
1993 if (lhs == NULL_TREE)
1994 return;
1995 type = TREE_TYPE (lhs);
1996 ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)), 0);
1997 align = tree_to_shwi (gimple_call_arg (stmt, 1));
1998 if (TYPE_ALIGN (type) != align)
1999 type = build_aligned_type (type, align);
2000 rhs = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0), ptr);
2002 mem = expand_expr (rhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2003 gcc_assert (MEM_P (mem));
2004 mask = expand_normal (maskt);
2005 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2006 create_output_operand (&ops[0], target, TYPE_MODE (type));
2007 create_fixed_operand (&ops[1], mem);
2008 create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
2009 expand_insn (convert_optab_handler (optab, TYPE_MODE (type),
2010 TYPE_MODE (TREE_TYPE (maskt))),
2011 3, ops);
2014 /* Expand MASK_STORE call STMT using optab OPTAB. */
2016 static void
2017 expand_mask_store_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
2019 struct expand_operand ops[3];
2020 tree type, lhs, rhs, maskt, ptr;
2021 rtx mem, reg, mask;
2022 unsigned align;
2024 maskt = gimple_call_arg (stmt, 2);
2025 rhs = gimple_call_arg (stmt, 3);
2026 type = TREE_TYPE (rhs);
2027 ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)), 0);
2028 align = tree_to_shwi (gimple_call_arg (stmt, 1));
2029 if (TYPE_ALIGN (type) != align)
2030 type = build_aligned_type (type, align);
2031 lhs = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0), ptr);
2033 mem = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2034 gcc_assert (MEM_P (mem));
2035 mask = expand_normal (maskt);
2036 reg = expand_normal (rhs);
2037 create_fixed_operand (&ops[0], mem);
2038 create_input_operand (&ops[1], reg, TYPE_MODE (type));
2039 create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
2040 expand_insn (convert_optab_handler (optab, TYPE_MODE (type),
2041 TYPE_MODE (TREE_TYPE (maskt))),
2042 3, ops);
2045 static void
2046 expand_ABNORMAL_DISPATCHER (internal_fn, gcall *)
2050 static void
2051 expand_BUILTIN_EXPECT (internal_fn, gcall *stmt)
2053 /* When guessing was done, the hints should be already stripped away. */
2054 gcc_assert (!flag_guess_branch_prob || optimize == 0 || seen_error ());
2056 rtx target;
2057 tree lhs = gimple_call_lhs (stmt);
2058 if (lhs)
2059 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2060 else
2061 target = const0_rtx;
2062 rtx val = expand_expr (gimple_call_arg (stmt, 0), target, VOIDmode, EXPAND_NORMAL);
2063 if (lhs && val != target)
2064 emit_move_insn (target, val);
2067 /* IFN_VA_ARG is supposed to be expanded at pass_stdarg. So this dummy function
2068 should never be called. */
2070 static void
2071 expand_VA_ARG (internal_fn, gcall *)
2073 gcc_unreachable ();
2076 /* Expand the IFN_UNIQUE function according to its first argument. */
2078 static void
2079 expand_UNIQUE (internal_fn, gcall *stmt)
2081 rtx pattern = NULL_RTX;
2082 enum ifn_unique_kind kind
2083 = (enum ifn_unique_kind) TREE_INT_CST_LOW (gimple_call_arg (stmt, 0));
2085 switch (kind)
2087 default:
2088 gcc_unreachable ();
2090 case IFN_UNIQUE_UNSPEC:
2091 if (targetm.have_unique ())
2092 pattern = targetm.gen_unique ();
2093 break;
2095 case IFN_UNIQUE_OACC_FORK:
2096 case IFN_UNIQUE_OACC_JOIN:
2097 if (targetm.have_oacc_fork () && targetm.have_oacc_join ())
2099 tree lhs = gimple_call_lhs (stmt);
2100 rtx target = const0_rtx;
2102 if (lhs)
2103 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2105 rtx data_dep = expand_normal (gimple_call_arg (stmt, 1));
2106 rtx axis = expand_normal (gimple_call_arg (stmt, 2));
2108 if (kind == IFN_UNIQUE_OACC_FORK)
2109 pattern = targetm.gen_oacc_fork (target, data_dep, axis);
2110 else
2111 pattern = targetm.gen_oacc_join (target, data_dep, axis);
2113 else
2114 gcc_unreachable ();
2115 break;
2118 if (pattern)
2119 emit_insn (pattern);
2122 /* The size of an OpenACC compute dimension. */
2124 static void
2125 expand_GOACC_DIM_SIZE (internal_fn, gcall *stmt)
2127 tree lhs = gimple_call_lhs (stmt);
2129 if (!lhs)
2130 return;
2132 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2133 if (targetm.have_oacc_dim_size ())
2135 rtx dim = expand_expr (gimple_call_arg (stmt, 0), NULL_RTX,
2136 VOIDmode, EXPAND_NORMAL);
2137 emit_insn (targetm.gen_oacc_dim_size (target, dim));
2139 else
2140 emit_move_insn (target, GEN_INT (1));
2143 /* The position of an OpenACC execution engine along one compute axis. */
2145 static void
2146 expand_GOACC_DIM_POS (internal_fn, gcall *stmt)
2148 tree lhs = gimple_call_lhs (stmt);
2150 if (!lhs)
2151 return;
2153 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2154 if (targetm.have_oacc_dim_pos ())
2156 rtx dim = expand_expr (gimple_call_arg (stmt, 0), NULL_RTX,
2157 VOIDmode, EXPAND_NORMAL);
2158 emit_insn (targetm.gen_oacc_dim_pos (target, dim));
2160 else
2161 emit_move_insn (target, const0_rtx);
2164 /* This is expanded by oacc_device_lower pass. */
2166 static void
2167 expand_GOACC_LOOP (internal_fn, gcall *)
2169 gcc_unreachable ();
2172 /* This is expanded by oacc_device_lower pass. */
2174 static void
2175 expand_GOACC_REDUCTION (internal_fn, gcall *)
2177 gcc_unreachable ();
2180 /* Set errno to EDOM. */
2182 static void
2183 expand_SET_EDOM (internal_fn, gcall *)
2185 #ifdef TARGET_EDOM
2186 #ifdef GEN_ERRNO_RTX
2187 rtx errno_rtx = GEN_ERRNO_RTX;
2188 #else
2189 rtx errno_rtx = gen_rtx_MEM (word_mode, gen_rtx_SYMBOL_REF (Pmode, "errno"));
2190 #endif
2191 emit_move_insn (errno_rtx,
2192 gen_int_mode (TARGET_EDOM, GET_MODE (errno_rtx)));
2193 #else
2194 gcc_unreachable ();
2195 #endif
2198 /* Expand atomic bit test and set. */
2200 static void
2201 expand_ATOMIC_BIT_TEST_AND_SET (internal_fn, gcall *call)
2203 expand_ifn_atomic_bit_test_and (call);
2206 /* Expand atomic bit test and complement. */
2208 static void
2209 expand_ATOMIC_BIT_TEST_AND_COMPLEMENT (internal_fn, gcall *call)
2211 expand_ifn_atomic_bit_test_and (call);
2214 /* Expand atomic bit test and reset. */
2216 static void
2217 expand_ATOMIC_BIT_TEST_AND_RESET (internal_fn, gcall *call)
2219 expand_ifn_atomic_bit_test_and (call);
2222 /* Expand atomic bit test and set. */
2224 static void
2225 expand_ATOMIC_COMPARE_EXCHANGE (internal_fn, gcall *call)
2227 expand_ifn_atomic_compare_exchange (call);
2230 /* Expand LAUNDER to assignment, lhs = arg0. */
2232 static void
2233 expand_LAUNDER (internal_fn, gcall *call)
2235 tree lhs = gimple_call_lhs (call);
2237 if (!lhs)
2238 return;
2240 expand_assignment (lhs, gimple_call_arg (call, 0), false);
2243 /* Expand DIVMOD() using:
2244 a) optab handler for udivmod/sdivmod if it is available.
2245 b) If optab_handler doesn't exist, generate call to
2246 target-specific divmod libfunc. */
2248 static void
2249 expand_DIVMOD (internal_fn, gcall *call_stmt)
2251 tree lhs = gimple_call_lhs (call_stmt);
2252 tree arg0 = gimple_call_arg (call_stmt, 0);
2253 tree arg1 = gimple_call_arg (call_stmt, 1);
2255 gcc_assert (TREE_CODE (TREE_TYPE (lhs)) == COMPLEX_TYPE);
2256 tree type = TREE_TYPE (TREE_TYPE (lhs));
2257 machine_mode mode = TYPE_MODE (type);
2258 bool unsignedp = TYPE_UNSIGNED (type);
2259 optab tab = (unsignedp) ? udivmod_optab : sdivmod_optab;
2261 rtx op0 = expand_normal (arg0);
2262 rtx op1 = expand_normal (arg1);
2263 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2265 rtx quotient, remainder, libfunc;
2267 /* Check if optab_handler exists for divmod_optab for given mode. */
2268 if (optab_handler (tab, mode) != CODE_FOR_nothing)
2270 quotient = gen_reg_rtx (mode);
2271 remainder = gen_reg_rtx (mode);
2272 expand_twoval_binop (tab, op0, op1, quotient, remainder, unsignedp);
2275 /* Generate call to divmod libfunc if it exists. */
2276 else if ((libfunc = optab_libfunc (tab, mode)) != NULL_RTX)
2277 targetm.expand_divmod_libfunc (libfunc, mode, op0, op1,
2278 &quotient, &remainder);
2280 else
2281 gcc_unreachable ();
2283 /* Wrap the return value (quotient, remainder) within COMPLEX_EXPR. */
2284 expand_expr (build2 (COMPLEX_EXPR, TREE_TYPE (lhs),
2285 make_tree (TREE_TYPE (arg0), quotient),
2286 make_tree (TREE_TYPE (arg1), remainder)),
2287 target, VOIDmode, EXPAND_NORMAL);
2290 /* Expand a call to FN using the operands in STMT. FN has a single
2291 output operand and NARGS input operands. */
2293 static void
2294 expand_direct_optab_fn (internal_fn fn, gcall *stmt, direct_optab optab,
2295 unsigned int nargs)
2297 expand_operand *ops = XALLOCAVEC (expand_operand, nargs + 1);
2299 tree_pair types = direct_internal_fn_types (fn, stmt);
2300 insn_code icode = direct_optab_handler (optab, TYPE_MODE (types.first));
2302 tree lhs = gimple_call_lhs (stmt);
2303 tree lhs_type = TREE_TYPE (lhs);
2304 rtx lhs_rtx = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2305 create_output_operand (&ops[0], lhs_rtx, insn_data[icode].operand[0].mode);
2307 for (unsigned int i = 0; i < nargs; ++i)
2309 tree rhs = gimple_call_arg (stmt, i);
2310 tree rhs_type = TREE_TYPE (rhs);
2311 rtx rhs_rtx = expand_normal (rhs);
2312 if (INTEGRAL_TYPE_P (rhs_type))
2313 create_convert_operand_from (&ops[i + 1], rhs_rtx,
2314 TYPE_MODE (rhs_type),
2315 TYPE_UNSIGNED (rhs_type));
2316 else
2317 create_input_operand (&ops[i + 1], rhs_rtx, TYPE_MODE (rhs_type));
2320 expand_insn (icode, nargs + 1, ops);
2321 if (!rtx_equal_p (lhs_rtx, ops[0].value))
2323 /* If the return value has an integral type, convert the instruction
2324 result to that type. This is useful for things that return an
2325 int regardless of the size of the input. If the instruction result
2326 is smaller than required, assume that it is signed.
2328 If the return value has a nonintegral type, its mode must match
2329 the instruction result. */
2330 if (GET_CODE (lhs_rtx) == SUBREG && SUBREG_PROMOTED_VAR_P (lhs_rtx))
2332 /* If this is a scalar in a register that is stored in a wider
2333 mode than the declared mode, compute the result into its
2334 declared mode and then convert to the wider mode. */
2335 gcc_checking_assert (INTEGRAL_TYPE_P (lhs_type));
2336 rtx tmp = convert_to_mode (GET_MODE (lhs_rtx), ops[0].value, 0);
2337 convert_move (SUBREG_REG (lhs_rtx), tmp,
2338 SUBREG_PROMOTED_SIGN (lhs_rtx));
2340 else if (GET_MODE (lhs_rtx) == GET_MODE (ops[0].value))
2341 emit_move_insn (lhs_rtx, ops[0].value);
2342 else
2344 gcc_checking_assert (INTEGRAL_TYPE_P (lhs_type));
2345 convert_move (lhs_rtx, ops[0].value, 0);
2350 /* Expanders for optabs that can use expand_direct_optab_fn. */
2352 #define expand_unary_optab_fn(FN, STMT, OPTAB) \
2353 expand_direct_optab_fn (FN, STMT, OPTAB, 1)
2355 #define expand_binary_optab_fn(FN, STMT, OPTAB) \
2356 expand_direct_optab_fn (FN, STMT, OPTAB, 2)
2358 /* RETURN_TYPE and ARGS are a return type and argument list that are
2359 in principle compatible with FN (which satisfies direct_internal_fn_p).
2360 Return the types that should be used to determine whether the
2361 target supports FN. */
2363 tree_pair
2364 direct_internal_fn_types (internal_fn fn, tree return_type, tree *args)
2366 const direct_internal_fn_info &info = direct_internal_fn (fn);
2367 tree type0 = (info.type0 < 0 ? return_type : TREE_TYPE (args[info.type0]));
2368 tree type1 = (info.type1 < 0 ? return_type : TREE_TYPE (args[info.type1]));
2369 return tree_pair (type0, type1);
2372 /* CALL is a call whose return type and arguments are in principle
2373 compatible with FN (which satisfies direct_internal_fn_p). Return the
2374 types that should be used to determine whether the target supports FN. */
2376 tree_pair
2377 direct_internal_fn_types (internal_fn fn, gcall *call)
2379 const direct_internal_fn_info &info = direct_internal_fn (fn);
2380 tree op0 = (info.type0 < 0
2381 ? gimple_call_lhs (call)
2382 : gimple_call_arg (call, info.type0));
2383 tree op1 = (info.type1 < 0
2384 ? gimple_call_lhs (call)
2385 : gimple_call_arg (call, info.type1));
2386 return tree_pair (TREE_TYPE (op0), TREE_TYPE (op1));
2389 /* Return true if OPTAB is supported for TYPES (whose modes should be
2390 the same) when the optimization type is OPT_TYPE. Used for simple
2391 direct optabs. */
2393 static bool
2394 direct_optab_supported_p (direct_optab optab, tree_pair types,
2395 optimization_type opt_type)
2397 machine_mode mode = TYPE_MODE (types.first);
2398 gcc_checking_assert (mode == TYPE_MODE (types.second));
2399 return direct_optab_handler (optab, mode, opt_type) != CODE_FOR_nothing;
2402 /* Return true if load/store lanes optab OPTAB is supported for
2403 array type TYPES.first when the optimization type is OPT_TYPE. */
2405 static bool
2406 multi_vector_optab_supported_p (convert_optab optab, tree_pair types,
2407 optimization_type opt_type)
2409 gcc_assert (TREE_CODE (types.first) == ARRAY_TYPE);
2410 machine_mode imode = TYPE_MODE (types.first);
2411 machine_mode vmode = TYPE_MODE (TREE_TYPE (types.first));
2412 return (convert_optab_handler (optab, imode, vmode, opt_type)
2413 != CODE_FOR_nothing);
2416 #define direct_unary_optab_supported_p direct_optab_supported_p
2417 #define direct_binary_optab_supported_p direct_optab_supported_p
2418 #define direct_mask_load_optab_supported_p direct_optab_supported_p
2419 #define direct_load_lanes_optab_supported_p multi_vector_optab_supported_p
2420 #define direct_mask_store_optab_supported_p direct_optab_supported_p
2421 #define direct_store_lanes_optab_supported_p multi_vector_optab_supported_p
2423 /* Return true if FN is supported for the types in TYPES when the
2424 optimization type is OPT_TYPE. The types are those associated with
2425 the "type0" and "type1" fields of FN's direct_internal_fn_info
2426 structure. */
2428 bool
2429 direct_internal_fn_supported_p (internal_fn fn, tree_pair types,
2430 optimization_type opt_type)
2432 switch (fn)
2434 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
2435 case IFN_##CODE: break;
2436 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
2437 case IFN_##CODE: \
2438 return direct_##TYPE##_optab_supported_p (OPTAB##_optab, types, \
2439 opt_type);
2440 #include "internal-fn.def"
2442 case IFN_LAST:
2443 break;
2445 gcc_unreachable ();
2448 /* Return true if FN is supported for type TYPE when the optimization
2449 type is OPT_TYPE. The caller knows that the "type0" and "type1"
2450 fields of FN's direct_internal_fn_info structure are the same. */
2452 bool
2453 direct_internal_fn_supported_p (internal_fn fn, tree type,
2454 optimization_type opt_type)
2456 const direct_internal_fn_info &info = direct_internal_fn (fn);
2457 gcc_checking_assert (info.type0 == info.type1);
2458 return direct_internal_fn_supported_p (fn, tree_pair (type, type), opt_type);
2461 /* Return true if IFN_SET_EDOM is supported. */
2463 bool
2464 set_edom_supported_p (void)
2466 #ifdef TARGET_EDOM
2467 return true;
2468 #else
2469 return false;
2470 #endif
2473 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
2474 static void \
2475 expand_##CODE (internal_fn fn, gcall *stmt) \
2477 expand_##TYPE##_optab_fn (fn, stmt, OPTAB##_optab); \
2479 #include "internal-fn.def"
2481 /* Routines to expand each internal function, indexed by function number.
2482 Each routine has the prototype:
2484 expand_<NAME> (gcall *stmt)
2486 where STMT is the statement that performs the call. */
2487 static void (*const internal_fn_expanders[]) (internal_fn, gcall *) = {
2488 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) expand_##CODE,
2489 #include "internal-fn.def"
2493 /* Expand STMT as though it were a call to internal function FN. */
2495 void
2496 expand_internal_call (internal_fn fn, gcall *stmt)
2498 internal_fn_expanders[fn] (fn, stmt);
2501 /* Expand STMT, which is a call to internal function FN. */
2503 void
2504 expand_internal_call (gcall *stmt)
2506 expand_internal_call (gimple_call_internal_fn (stmt), stmt);