Daily bump.
[official-gcc.git] / gcc / internal-fn.c
blob073d941d8ce5d3de6613e2ca2b8893ec76346f00
1 /* Internal functions.
2 Copyright (C) 2011-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tree.h"
24 #include "internal-fn.h"
25 #include "stor-layout.h"
26 #include "expr.h"
27 #include "insn-codes.h"
28 #include "optabs.h"
29 #include "predict.h"
30 #include "vec.h"
31 #include "hashtab.h"
32 #include "hash-set.h"
33 #include "machmode.h"
34 #include "tm.h"
35 #include "hard-reg-set.h"
36 #include "input.h"
37 #include "function.h"
38 #include "dominance.h"
39 #include "cfg.h"
40 #include "basic-block.h"
41 #include "tree-ssa-alias.h"
42 #include "internal-fn.h"
43 #include "gimple-expr.h"
44 #include "is-a.h"
45 #include "gimple.h"
46 #include "ubsan.h"
47 #include "target.h"
48 #include "stringpool.h"
49 #include "tree-ssanames.h"
50 #include "diagnostic-core.h"
52 /* The names of each internal function, indexed by function number. */
53 const char *const internal_fn_name_array[] = {
54 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) #CODE,
55 #include "internal-fn.def"
56 #undef DEF_INTERNAL_FN
57 "<invalid-fn>"
60 /* The ECF_* flags of each internal function, indexed by function number. */
61 const int internal_fn_flags_array[] = {
62 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) FLAGS,
63 #include "internal-fn.def"
64 #undef DEF_INTERNAL_FN
68 /* Fnspec of each internal function, indexed by function number. */
69 const_tree internal_fn_fnspec_array[IFN_LAST + 1];
71 void
72 init_internal_fns ()
74 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
75 if (FNSPEC) internal_fn_fnspec_array[IFN_##CODE] = \
76 build_string ((int) sizeof (FNSPEC), FNSPEC ? FNSPEC : "");
77 #include "internal-fn.def"
78 #undef DEF_INTERNAL_FN
79 internal_fn_fnspec_array[IFN_LAST] = 0;
82 /* ARRAY_TYPE is an array of vector modes. Return the associated insn
83 for load-lanes-style optab OPTAB. The insn must exist. */
85 static enum insn_code
86 get_multi_vector_move (tree array_type, convert_optab optab)
88 enum insn_code icode;
89 machine_mode imode;
90 machine_mode vmode;
92 gcc_assert (TREE_CODE (array_type) == ARRAY_TYPE);
93 imode = TYPE_MODE (array_type);
94 vmode = TYPE_MODE (TREE_TYPE (array_type));
96 icode = convert_optab_handler (optab, imode, vmode);
97 gcc_assert (icode != CODE_FOR_nothing);
98 return icode;
101 /* Expand LOAD_LANES call STMT. */
103 static void
104 expand_LOAD_LANES (gimple stmt)
106 struct expand_operand ops[2];
107 tree type, lhs, rhs;
108 rtx target, mem;
110 lhs = gimple_call_lhs (stmt);
111 rhs = gimple_call_arg (stmt, 0);
112 type = TREE_TYPE (lhs);
114 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
115 mem = expand_normal (rhs);
117 gcc_assert (MEM_P (mem));
118 PUT_MODE (mem, TYPE_MODE (type));
120 create_output_operand (&ops[0], target, TYPE_MODE (type));
121 create_fixed_operand (&ops[1], mem);
122 expand_insn (get_multi_vector_move (type, vec_load_lanes_optab), 2, ops);
125 /* Expand STORE_LANES call STMT. */
127 static void
128 expand_STORE_LANES (gimple stmt)
130 struct expand_operand ops[2];
131 tree type, lhs, rhs;
132 rtx target, reg;
134 lhs = gimple_call_lhs (stmt);
135 rhs = gimple_call_arg (stmt, 0);
136 type = TREE_TYPE (rhs);
138 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
139 reg = expand_normal (rhs);
141 gcc_assert (MEM_P (target));
142 PUT_MODE (target, TYPE_MODE (type));
144 create_fixed_operand (&ops[0], target);
145 create_input_operand (&ops[1], reg, TYPE_MODE (type));
146 expand_insn (get_multi_vector_move (type, vec_store_lanes_optab), 2, ops);
149 static void
150 expand_ANNOTATE (gimple stmt ATTRIBUTE_UNUSED)
152 gcc_unreachable ();
155 /* This should get expanded in adjust_simduid_builtins. */
157 static void
158 expand_GOMP_SIMD_LANE (gimple stmt ATTRIBUTE_UNUSED)
160 gcc_unreachable ();
163 /* This should get expanded in adjust_simduid_builtins. */
165 static void
166 expand_GOMP_SIMD_VF (gimple stmt ATTRIBUTE_UNUSED)
168 gcc_unreachable ();
171 /* This should get expanded in adjust_simduid_builtins. */
173 static void
174 expand_GOMP_SIMD_LAST_LANE (gimple stmt ATTRIBUTE_UNUSED)
176 gcc_unreachable ();
179 /* This should get expanded in the sanopt pass. */
181 static void
182 expand_UBSAN_NULL (gimple stmt ATTRIBUTE_UNUSED)
184 gcc_unreachable ();
187 /* This should get expanded in the sanopt pass. */
189 static void
190 expand_UBSAN_BOUNDS (gimple stmt ATTRIBUTE_UNUSED)
192 gcc_unreachable ();
195 /* This should get expanded in the sanopt pass. */
197 static void
198 expand_UBSAN_OBJECT_SIZE (gimple stmt ATTRIBUTE_UNUSED)
200 gcc_unreachable ();
203 /* This should get expanded in the sanopt pass. */
205 static void
206 expand_ASAN_CHECK (gimple stmt ATTRIBUTE_UNUSED)
208 gcc_unreachable ();
211 /* Helper function for expand_addsub_overflow. Return 1
212 if ARG interpreted as signed in its precision is known to be always
213 positive or 2 if ARG is known to be always negative, or 3 if ARG may
214 be positive or negative. */
216 static int
217 get_range_pos_neg (tree arg)
219 if (arg == error_mark_node)
220 return 3;
222 int prec = TYPE_PRECISION (TREE_TYPE (arg));
223 int cnt = 0;
224 if (TREE_CODE (arg) == INTEGER_CST)
226 wide_int w = wi::sext (arg, prec);
227 if (wi::neg_p (w))
228 return 2;
229 else
230 return 1;
232 while (CONVERT_EXPR_P (arg)
233 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
234 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg, 0))) <= prec)
236 arg = TREE_OPERAND (arg, 0);
237 /* Narrower value zero extended into wider type
238 will always result in positive values. */
239 if (TYPE_UNSIGNED (TREE_TYPE (arg))
240 && TYPE_PRECISION (TREE_TYPE (arg)) < prec)
241 return 1;
242 prec = TYPE_PRECISION (TREE_TYPE (arg));
243 if (++cnt > 30)
244 return 3;
247 if (TREE_CODE (arg) != SSA_NAME)
248 return 3;
249 wide_int arg_min, arg_max;
250 while (get_range_info (arg, &arg_min, &arg_max) != VR_RANGE)
252 gimple g = SSA_NAME_DEF_STMT (arg);
253 if (is_gimple_assign (g)
254 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g)))
256 tree t = gimple_assign_rhs1 (g);
257 if (INTEGRAL_TYPE_P (TREE_TYPE (t))
258 && TYPE_PRECISION (TREE_TYPE (t)) <= prec)
260 if (TYPE_UNSIGNED (TREE_TYPE (t))
261 && TYPE_PRECISION (TREE_TYPE (t)) < prec)
262 return 1;
263 prec = TYPE_PRECISION (TREE_TYPE (t));
264 arg = t;
265 if (++cnt > 30)
266 return 3;
267 continue;
270 return 3;
272 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
274 /* For unsigned values, the "positive" range comes
275 below the "negative" range. */
276 if (!wi::neg_p (wi::sext (arg_max, prec), SIGNED))
277 return 1;
278 if (wi::neg_p (wi::sext (arg_min, prec), SIGNED))
279 return 2;
281 else
283 if (!wi::neg_p (wi::sext (arg_min, prec), SIGNED))
284 return 1;
285 if (wi::neg_p (wi::sext (arg_max, prec), SIGNED))
286 return 2;
288 return 3;
291 /* Return minimum precision needed to represent all values
292 of ARG in SIGNed integral type. */
294 static int
295 get_min_precision (tree arg, signop sign)
297 int prec = TYPE_PRECISION (TREE_TYPE (arg));
298 int cnt = 0;
299 signop orig_sign = sign;
300 if (TREE_CODE (arg) == INTEGER_CST)
302 int p;
303 if (TYPE_SIGN (TREE_TYPE (arg)) != sign)
305 widest_int w = wi::to_widest (arg);
306 w = wi::ext (w, prec, sign);
307 p = wi::min_precision (w, sign);
309 else
310 p = wi::min_precision (arg, sign);
311 return MIN (p, prec);
313 while (CONVERT_EXPR_P (arg)
314 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
315 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg, 0))) <= prec)
317 arg = TREE_OPERAND (arg, 0);
318 if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
320 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
321 sign = UNSIGNED;
322 else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
323 return prec + (orig_sign != sign);
324 prec = TYPE_PRECISION (TREE_TYPE (arg));
326 if (++cnt > 30)
327 return prec + (orig_sign != sign);
329 if (TREE_CODE (arg) != SSA_NAME)
330 return prec + (orig_sign != sign);
331 wide_int arg_min, arg_max;
332 while (get_range_info (arg, &arg_min, &arg_max) != VR_RANGE)
334 gimple g = SSA_NAME_DEF_STMT (arg);
335 if (is_gimple_assign (g)
336 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g)))
338 tree t = gimple_assign_rhs1 (g);
339 if (INTEGRAL_TYPE_P (TREE_TYPE (t))
340 && TYPE_PRECISION (TREE_TYPE (t)) <= prec)
342 arg = t;
343 if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
345 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
346 sign = UNSIGNED;
347 else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
348 return prec + (orig_sign != sign);
349 prec = TYPE_PRECISION (TREE_TYPE (arg));
351 if (++cnt > 30)
352 return prec + (orig_sign != sign);
353 continue;
356 return prec + (orig_sign != sign);
358 if (sign == TYPE_SIGN (TREE_TYPE (arg)))
360 int p1 = wi::min_precision (arg_min, sign);
361 int p2 = wi::min_precision (arg_max, sign);
362 p1 = MAX (p1, p2);
363 prec = MIN (prec, p1);
365 else if (sign == UNSIGNED && !wi::neg_p (arg_min, SIGNED))
367 int p = wi::min_precision (arg_max, SIGNED);
368 prec = MIN (prec, p);
370 return prec + (orig_sign != sign);
373 /* Helper for expand_*_overflow. Store RES into the __real__ part
374 of TARGET. If RES has larger MODE than __real__ part of TARGET,
375 set the __imag__ part to 1 if RES doesn't fit into it. */
377 static void
378 expand_arith_overflow_result_store (tree lhs, rtx target,
379 machine_mode mode, rtx res)
381 machine_mode tgtmode = GET_MODE_INNER (GET_MODE (target));
382 rtx lres = res;
383 if (tgtmode != mode)
385 rtx_code_label *done_label = gen_label_rtx ();
386 int uns = TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs)));
387 lres = convert_modes (tgtmode, mode, res, uns);
388 gcc_assert (GET_MODE_PRECISION (tgtmode) < GET_MODE_PRECISION (mode));
389 emit_cmp_and_jump_insns (res, convert_modes (mode, tgtmode, lres, uns),
390 EQ, NULL_RTX, mode, false, done_label,
391 PROB_VERY_LIKELY);
392 write_complex_part (target, const1_rtx, true);
393 emit_label (done_label);
395 write_complex_part (target, lres, false);
398 /* Add sub/add overflow checking to the statement STMT.
399 CODE says whether the operation is +, or -. */
401 static void
402 expand_addsub_overflow (location_t loc, tree_code code, tree lhs,
403 tree arg0, tree arg1, bool unsr_p, bool uns0_p,
404 bool uns1_p, bool is_ubsan)
406 rtx res, target = NULL_RTX;
407 tree fn;
408 rtx_code_label *done_label = gen_label_rtx ();
409 rtx_code_label *do_error = gen_label_rtx ();
410 do_pending_stack_adjust ();
411 rtx op0 = expand_normal (arg0);
412 rtx op1 = expand_normal (arg1);
413 machine_mode mode = TYPE_MODE (TREE_TYPE (arg0));
414 int prec = GET_MODE_PRECISION (mode);
415 rtx sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
416 bool do_xor = false;
418 if (is_ubsan)
419 gcc_assert (!unsr_p && !uns0_p && !uns1_p);
421 if (lhs)
423 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
424 if (!is_ubsan)
425 write_complex_part (target, const0_rtx, true);
428 /* We assume both operands and result have the same precision
429 here (GET_MODE_BITSIZE (mode)), S stands for signed type
430 with that precision, U for unsigned type with that precision,
431 sgn for unsigned most significant bit in that precision.
432 s1 is signed first operand, u1 is unsigned first operand,
433 s2 is signed second operand, u2 is unsigned second operand,
434 sr is signed result, ur is unsigned result and the following
435 rules say how to compute result (which is always result of
436 the operands as if both were unsigned, cast to the right
437 signedness) and how to compute whether operation overflowed.
439 s1 + s2 -> sr
440 res = (S) ((U) s1 + (U) s2)
441 ovf = s2 < 0 ? res > s1 : res < s1 (or jump on overflow)
442 s1 - s2 -> sr
443 res = (S) ((U) s1 - (U) s2)
444 ovf = s2 < 0 ? res < s1 : res > s2 (or jump on overflow)
445 u1 + u2 -> ur
446 res = u1 + u2
447 ovf = res < u1 (or jump on carry, but RTL opts will handle it)
448 u1 - u2 -> ur
449 res = u1 - u2
450 ovf = res > u1 (or jump on carry, but RTL opts will handle it)
451 s1 + u2 -> sr
452 res = (S) ((U) s1 + u2)
453 ovf = ((U) res ^ sgn) < u2
454 s1 + u2 -> ur
455 t1 = (S) (u2 ^ sgn)
456 t2 = s1 + t1
457 res = (U) t2 ^ sgn
458 ovf = t1 < 0 ? t2 > s1 : t2 < s1 (or jump on overflow)
459 s1 - u2 -> sr
460 res = (S) ((U) s1 - u2)
461 ovf = u2 > ((U) s1 ^ sgn)
462 s1 - u2 -> ur
463 res = (U) s1 - u2
464 ovf = s1 < 0 || u2 > (U) s1
465 u1 - s2 -> sr
466 res = u1 - (U) s2
467 ovf = u1 >= ((U) s2 ^ sgn)
468 u1 - s2 -> ur
469 t1 = u1 ^ sgn
470 t2 = t1 - (U) s2
471 res = t2 ^ sgn
472 ovf = s2 < 0 ? (S) t2 < (S) t1 : (S) t2 > (S) t1 (or jump on overflow)
473 s1 + s2 -> ur
474 res = (U) s1 + (U) s2
475 ovf = s2 < 0 ? (s1 | (S) res) < 0) : (s1 & (S) res) < 0)
476 u1 + u2 -> sr
477 res = (S) (u1 + u2)
478 ovf = (U) res < u2 || res < 0
479 u1 - u2 -> sr
480 res = (S) (u1 - u2)
481 ovf = u1 >= u2 ? res < 0 : res >= 0
482 s1 - s2 -> ur
483 res = (U) s1 - (U) s2
484 ovf = s2 >= 0 ? ((s1 | (S) res) < 0) : ((s1 & (S) res) < 0) */
486 if (code == PLUS_EXPR && uns0_p && !uns1_p)
488 /* PLUS_EXPR is commutative, if operand signedness differs,
489 canonicalize to the first operand being signed and second
490 unsigned to simplify following code. */
491 rtx tem = op1;
492 op1 = op0;
493 op0 = tem;
494 tree t = arg1;
495 arg1 = arg0;
496 arg0 = t;
497 uns0_p = 0;
498 uns1_p = 1;
501 /* u1 +- u2 -> ur */
502 if (uns0_p && uns1_p && unsr_p)
504 /* Compute the operation. On RTL level, the addition is always
505 unsigned. */
506 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
507 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
508 rtx tem = op0;
509 /* For PLUS_EXPR, the operation is commutative, so we can pick
510 operand to compare against. For prec <= BITS_PER_WORD, I think
511 preferring REG operand is better over CONST_INT, because
512 the CONST_INT might enlarge the instruction or CSE would need
513 to figure out we'd already loaded it into a register before.
514 For prec > BITS_PER_WORD, I think CONST_INT might be more beneficial,
515 as then the multi-word comparison can be perhaps simplified. */
516 if (code == PLUS_EXPR
517 && (prec <= BITS_PER_WORD
518 ? (CONST_SCALAR_INT_P (op0) && REG_P (op1))
519 : CONST_SCALAR_INT_P (op1)))
520 tem = op1;
521 emit_cmp_and_jump_insns (res, tem, code == PLUS_EXPR ? GEU : LEU,
522 NULL_RTX, mode, false, done_label,
523 PROB_VERY_LIKELY);
524 goto do_error_label;
527 /* s1 +- u2 -> sr */
528 if (!uns0_p && uns1_p && !unsr_p)
530 /* Compute the operation. On RTL level, the addition is always
531 unsigned. */
532 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
533 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
534 rtx tem = expand_binop (mode, add_optab,
535 code == PLUS_EXPR ? res : op0, sgn,
536 NULL_RTX, false, OPTAB_LIB_WIDEN);
537 emit_cmp_and_jump_insns (tem, op1, GEU, NULL_RTX, mode, false,
538 done_label, PROB_VERY_LIKELY);
539 goto do_error_label;
542 /* s1 + u2 -> ur */
543 if (code == PLUS_EXPR && !uns0_p && uns1_p && unsr_p)
545 op1 = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
546 OPTAB_LIB_WIDEN);
547 /* As we've changed op1, we have to avoid using the value range
548 for the original argument. */
549 arg1 = error_mark_node;
550 do_xor = true;
551 goto do_signed;
554 /* u1 - s2 -> ur */
555 if (code == MINUS_EXPR && uns0_p && !uns1_p && unsr_p)
557 op0 = expand_binop (mode, add_optab, op0, sgn, NULL_RTX, false,
558 OPTAB_LIB_WIDEN);
559 /* As we've changed op0, we have to avoid using the value range
560 for the original argument. */
561 arg0 = error_mark_node;
562 do_xor = true;
563 goto do_signed;
566 /* s1 - u2 -> ur */
567 if (code == MINUS_EXPR && !uns0_p && uns1_p && unsr_p)
569 /* Compute the operation. On RTL level, the addition is always
570 unsigned. */
571 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
572 OPTAB_LIB_WIDEN);
573 int pos_neg = get_range_pos_neg (arg0);
574 if (pos_neg == 2)
575 /* If ARG0 is known to be always negative, this is always overflow. */
576 emit_jump (do_error);
577 else if (pos_neg == 3)
578 /* If ARG0 is not known to be always positive, check at runtime. */
579 emit_cmp_and_jump_insns (op0, const0_rtx, LT, NULL_RTX, mode, false,
580 do_error, PROB_VERY_UNLIKELY);
581 emit_cmp_and_jump_insns (op1, op0, LEU, NULL_RTX, mode, false,
582 done_label, PROB_VERY_LIKELY);
583 goto do_error_label;
586 /* u1 - s2 -> sr */
587 if (code == MINUS_EXPR && uns0_p && !uns1_p && !unsr_p)
589 /* Compute the operation. On RTL level, the addition is always
590 unsigned. */
591 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
592 OPTAB_LIB_WIDEN);
593 rtx tem = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
594 OPTAB_LIB_WIDEN);
595 emit_cmp_and_jump_insns (op0, tem, LTU, NULL_RTX, mode, false,
596 done_label, PROB_VERY_LIKELY);
597 goto do_error_label;
600 /* u1 + u2 -> sr */
601 if (code == PLUS_EXPR && uns0_p && uns1_p && !unsr_p)
603 /* Compute the operation. On RTL level, the addition is always
604 unsigned. */
605 res = expand_binop (mode, add_optab, op0, op1, NULL_RTX, false,
606 OPTAB_LIB_WIDEN);
607 emit_cmp_and_jump_insns (res, const0_rtx, LT, NULL_RTX, mode, false,
608 do_error, PROB_VERY_UNLIKELY);
609 rtx tem = op1;
610 /* The operation is commutative, so we can pick operand to compare
611 against. For prec <= BITS_PER_WORD, I think preferring REG operand
612 is better over CONST_INT, because the CONST_INT might enlarge the
613 instruction or CSE would need to figure out we'd already loaded it
614 into a register before. For prec > BITS_PER_WORD, I think CONST_INT
615 might be more beneficial, as then the multi-word comparison can be
616 perhaps simplified. */
617 if (prec <= BITS_PER_WORD
618 ? (CONST_SCALAR_INT_P (op1) && REG_P (op0))
619 : CONST_SCALAR_INT_P (op0))
620 tem = op0;
621 emit_cmp_and_jump_insns (res, tem, GEU, NULL_RTX, mode, false,
622 done_label, PROB_VERY_LIKELY);
623 goto do_error_label;
626 /* s1 +- s2 -> ur */
627 if (!uns0_p && !uns1_p && unsr_p)
629 /* Compute the operation. On RTL level, the addition is always
630 unsigned. */
631 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
632 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
633 int pos_neg = get_range_pos_neg (arg1);
634 if (code == PLUS_EXPR)
636 int pos_neg0 = get_range_pos_neg (arg0);
637 if (pos_neg0 != 3 && pos_neg == 3)
639 rtx tem = op1;
640 op1 = op0;
641 op0 = tem;
642 pos_neg = pos_neg0;
645 rtx tem;
646 if (pos_neg != 3)
648 tem = expand_binop (mode, ((pos_neg == 1) ^ (code == MINUS_EXPR))
649 ? and_optab : ior_optab,
650 op0, res, NULL_RTX, false, OPTAB_LIB_WIDEN);
651 emit_cmp_and_jump_insns (tem, const0_rtx, GE, NULL_RTX, mode, false,
652 done_label, PROB_VERY_LIKELY);
654 else
656 rtx_code_label *do_ior_label = gen_label_rtx ();
657 emit_cmp_and_jump_insns (op1, const0_rtx,
658 code == MINUS_EXPR ? GE : LT, NULL_RTX,
659 mode, false, do_ior_label, PROB_EVEN);
660 tem = expand_binop (mode, and_optab, op0, res, NULL_RTX, false,
661 OPTAB_LIB_WIDEN);
662 emit_cmp_and_jump_insns (tem, const0_rtx, GE, NULL_RTX, mode, false,
663 done_label, PROB_VERY_LIKELY);
664 emit_jump (do_error);
665 emit_label (do_ior_label);
666 tem = expand_binop (mode, ior_optab, op0, res, NULL_RTX, false,
667 OPTAB_LIB_WIDEN);
668 emit_cmp_and_jump_insns (tem, const0_rtx, GE, NULL_RTX, mode, false,
669 done_label, PROB_VERY_LIKELY);
671 goto do_error_label;
674 /* u1 - u2 -> sr */
675 if (code == MINUS_EXPR && uns0_p && uns1_p && !unsr_p)
677 /* Compute the operation. On RTL level, the addition is always
678 unsigned. */
679 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
680 OPTAB_LIB_WIDEN);
681 rtx_code_label *op0_geu_op1 = gen_label_rtx ();
682 emit_cmp_and_jump_insns (op0, op1, GEU, NULL_RTX, mode, false,
683 op0_geu_op1, PROB_EVEN);
684 emit_cmp_and_jump_insns (res, const0_rtx, LT, NULL_RTX, mode, false,
685 done_label, PROB_VERY_LIKELY);
686 emit_jump (do_error);
687 emit_label (op0_geu_op1);
688 emit_cmp_and_jump_insns (res, const0_rtx, GE, NULL_RTX, mode, false,
689 done_label, PROB_VERY_LIKELY);
690 goto do_error_label;
693 gcc_assert (!uns0_p && !uns1_p && !unsr_p);
695 /* s1 +- s2 -> sr */
696 do_signed: ;
697 enum insn_code icode;
698 icode = optab_handler (code == PLUS_EXPR ? addv4_optab : subv4_optab, mode);
699 if (icode != CODE_FOR_nothing)
701 struct expand_operand ops[4];
702 rtx_insn *last = get_last_insn ();
704 res = gen_reg_rtx (mode);
705 create_output_operand (&ops[0], res, mode);
706 create_input_operand (&ops[1], op0, mode);
707 create_input_operand (&ops[2], op1, mode);
708 create_fixed_operand (&ops[3], do_error);
709 if (maybe_expand_insn (icode, 4, ops))
711 last = get_last_insn ();
712 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
713 && JUMP_P (last)
714 && any_condjump_p (last)
715 && !find_reg_note (last, REG_BR_PROB, 0))
716 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
717 emit_jump (done_label);
719 else
721 delete_insns_since (last);
722 icode = CODE_FOR_nothing;
726 if (icode == CODE_FOR_nothing)
728 rtx_code_label *sub_check = gen_label_rtx ();
729 int pos_neg = 3;
731 /* Compute the operation. On RTL level, the addition is always
732 unsigned. */
733 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
734 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
736 /* If we can prove one of the arguments (for MINUS_EXPR only
737 the second operand, as subtraction is not commutative) is always
738 non-negative or always negative, we can do just one comparison
739 and conditional jump instead of 2 at runtime, 3 present in the
740 emitted code. If one of the arguments is CONST_INT, all we
741 need is to make sure it is op1, then the first
742 emit_cmp_and_jump_insns will be just folded. Otherwise try
743 to use range info if available. */
744 if (code == PLUS_EXPR && CONST_INT_P (op0))
746 rtx tem = op0;
747 op0 = op1;
748 op1 = tem;
750 else if (CONST_INT_P (op1))
752 else if (code == PLUS_EXPR && TREE_CODE (arg0) == SSA_NAME)
754 pos_neg = get_range_pos_neg (arg0);
755 if (pos_neg != 3)
757 rtx tem = op0;
758 op0 = op1;
759 op1 = tem;
762 if (pos_neg == 3 && !CONST_INT_P (op1) && TREE_CODE (arg1) == SSA_NAME)
763 pos_neg = get_range_pos_neg (arg1);
765 /* If the op1 is negative, we have to use a different check. */
766 if (pos_neg == 3)
767 emit_cmp_and_jump_insns (op1, const0_rtx, LT, NULL_RTX, mode,
768 false, sub_check, PROB_EVEN);
770 /* Compare the result of the operation with one of the operands. */
771 if (pos_neg & 1)
772 emit_cmp_and_jump_insns (res, op0, code == PLUS_EXPR ? GE : LE,
773 NULL_RTX, mode, false, done_label,
774 PROB_VERY_LIKELY);
776 /* If we get here, we have to print the error. */
777 if (pos_neg == 3)
779 emit_jump (do_error);
781 emit_label (sub_check);
784 /* We have k = a + b for b < 0 here. k <= a must hold. */
785 if (pos_neg & 2)
786 emit_cmp_and_jump_insns (res, op0, code == PLUS_EXPR ? LE : GE,
787 NULL_RTX, mode, false, done_label,
788 PROB_VERY_LIKELY);
791 do_error_label:
792 emit_label (do_error);
793 if (is_ubsan)
795 /* Expand the ubsan builtin call. */
796 push_temp_slots ();
797 fn = ubsan_build_overflow_builtin (code, loc, TREE_TYPE (arg0),
798 arg0, arg1);
799 expand_normal (fn);
800 pop_temp_slots ();
801 do_pending_stack_adjust ();
803 else if (lhs)
804 write_complex_part (target, const1_rtx, true);
806 /* We're done. */
807 emit_label (done_label);
809 if (lhs)
811 if (is_ubsan)
812 emit_move_insn (target, res);
813 else
815 if (do_xor)
816 res = expand_binop (mode, add_optab, res, sgn, NULL_RTX, false,
817 OPTAB_LIB_WIDEN);
819 expand_arith_overflow_result_store (lhs, target, mode, res);
824 /* Add negate overflow checking to the statement STMT. */
826 static void
827 expand_neg_overflow (location_t loc, tree lhs, tree arg1, bool is_ubsan)
829 rtx res, op1;
830 tree fn;
831 rtx_code_label *done_label, *do_error;
832 rtx target = NULL_RTX;
834 done_label = gen_label_rtx ();
835 do_error = gen_label_rtx ();
837 do_pending_stack_adjust ();
838 op1 = expand_normal (arg1);
840 machine_mode mode = TYPE_MODE (TREE_TYPE (arg1));
841 if (lhs)
843 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
844 if (!is_ubsan)
845 write_complex_part (target, const0_rtx, true);
848 enum insn_code icode = optab_handler (negv3_optab, mode);
849 if (icode != CODE_FOR_nothing)
851 struct expand_operand ops[3];
852 rtx_insn *last = get_last_insn ();
854 res = gen_reg_rtx (mode);
855 create_output_operand (&ops[0], res, mode);
856 create_input_operand (&ops[1], op1, mode);
857 create_fixed_operand (&ops[2], do_error);
858 if (maybe_expand_insn (icode, 3, ops))
860 last = get_last_insn ();
861 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
862 && JUMP_P (last)
863 && any_condjump_p (last)
864 && !find_reg_note (last, REG_BR_PROB, 0))
865 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
866 emit_jump (done_label);
868 else
870 delete_insns_since (last);
871 icode = CODE_FOR_nothing;
875 if (icode == CODE_FOR_nothing)
877 /* Compute the operation. On RTL level, the addition is always
878 unsigned. */
879 res = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
881 /* Compare the operand with the most negative value. */
882 rtx minv = expand_normal (TYPE_MIN_VALUE (TREE_TYPE (arg1)));
883 emit_cmp_and_jump_insns (op1, minv, NE, NULL_RTX, mode, false,
884 done_label, PROB_VERY_LIKELY);
887 emit_label (do_error);
888 if (is_ubsan)
890 /* Expand the ubsan builtin call. */
891 push_temp_slots ();
892 fn = ubsan_build_overflow_builtin (NEGATE_EXPR, loc, TREE_TYPE (arg1),
893 arg1, NULL_TREE);
894 expand_normal (fn);
895 pop_temp_slots ();
896 do_pending_stack_adjust ();
898 else if (lhs)
899 write_complex_part (target, const1_rtx, true);
901 /* We're done. */
902 emit_label (done_label);
904 if (lhs)
906 if (is_ubsan)
907 emit_move_insn (target, res);
908 else
909 expand_arith_overflow_result_store (lhs, target, mode, res);
913 /* Add mul overflow checking to the statement STMT. */
915 static void
916 expand_mul_overflow (location_t loc, tree lhs, tree arg0, tree arg1,
917 bool unsr_p, bool uns0_p, bool uns1_p, bool is_ubsan)
919 rtx res, op0, op1;
920 tree fn, type;
921 rtx_code_label *done_label, *do_error;
922 rtx target = NULL_RTX;
923 signop sign;
924 enum insn_code icode;
926 done_label = gen_label_rtx ();
927 do_error = gen_label_rtx ();
929 do_pending_stack_adjust ();
930 op0 = expand_normal (arg0);
931 op1 = expand_normal (arg1);
933 machine_mode mode = TYPE_MODE (TREE_TYPE (arg0));
934 bool uns = unsr_p;
935 if (lhs)
937 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
938 if (!is_ubsan)
939 write_complex_part (target, const0_rtx, true);
942 if (is_ubsan)
943 gcc_assert (!unsr_p && !uns0_p && !uns1_p);
945 /* We assume both operands and result have the same precision
946 here (GET_MODE_BITSIZE (mode)), S stands for signed type
947 with that precision, U for unsigned type with that precision,
948 sgn for unsigned most significant bit in that precision.
949 s1 is signed first operand, u1 is unsigned first operand,
950 s2 is signed second operand, u2 is unsigned second operand,
951 sr is signed result, ur is unsigned result and the following
952 rules say how to compute result (which is always result of
953 the operands as if both were unsigned, cast to the right
954 signedness) and how to compute whether operation overflowed.
955 main_ovf (false) stands for jump on signed multiplication
956 overflow or the main algorithm with uns == false.
957 main_ovf (true) stands for jump on unsigned multiplication
958 overflow or the main algorithm with uns == true.
960 s1 * s2 -> sr
961 res = (S) ((U) s1 * (U) s2)
962 ovf = main_ovf (false)
963 u1 * u2 -> ur
964 res = u1 * u2
965 ovf = main_ovf (true)
966 s1 * u2 -> ur
967 res = (U) s1 * u2
968 ovf = (s1 < 0 && u2) || main_ovf (true)
969 u1 * u2 -> sr
970 res = (S) (u1 * u2)
971 ovf = res < 0 || main_ovf (true)
972 s1 * u2 -> sr
973 res = (S) ((U) s1 * u2)
974 ovf = (S) u2 >= 0 ? main_ovf (false)
975 : (s1 != 0 && (s1 != -1 || u2 != (U) res))
976 s1 * s2 -> ur
977 t1 = (s1 & s2) < 0 ? (-(U) s1) : ((U) s1)
978 t2 = (s1 & s2) < 0 ? (-(U) s2) : ((U) s2)
979 res = t1 * t2
980 ovf = (s1 ^ s2) < 0 ? (s1 && s2) : main_ovf (true) */
982 if (uns0_p && !uns1_p)
984 /* Multiplication is commutative, if operand signedness differs,
985 canonicalize to the first operand being signed and second
986 unsigned to simplify following code. */
987 rtx tem = op1;
988 op1 = op0;
989 op0 = tem;
990 tree t = arg1;
991 arg1 = arg0;
992 arg0 = t;
993 uns0_p = 0;
994 uns1_p = 1;
997 int pos_neg0 = get_range_pos_neg (arg0);
998 int pos_neg1 = get_range_pos_neg (arg1);
1000 /* s1 * u2 -> ur */
1001 if (!uns0_p && uns1_p && unsr_p)
1003 switch (pos_neg0)
1005 case 1:
1006 /* If s1 is non-negative, just perform normal u1 * u2 -> ur. */
1007 goto do_main;
1008 case 2:
1009 /* If s1 is negative, avoid the main code, just multiply and
1010 signal overflow if op1 is not 0. */
1011 struct separate_ops ops;
1012 ops.code = MULT_EXPR;
1013 ops.type = TREE_TYPE (arg1);
1014 ops.op0 = make_tree (ops.type, op0);
1015 ops.op1 = make_tree (ops.type, op1);
1016 ops.op2 = NULL_TREE;
1017 ops.location = loc;
1018 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1019 emit_cmp_and_jump_insns (op1, const0_rtx, EQ, NULL_RTX, mode,
1020 false, done_label, PROB_VERY_LIKELY);
1021 goto do_error_label;
1022 case 3:
1023 rtx_code_label *do_main_label;
1024 do_main_label = gen_label_rtx ();
1025 emit_cmp_and_jump_insns (op0, const0_rtx, GE, NULL_RTX, mode,
1026 false, do_main_label, PROB_VERY_LIKELY);
1027 emit_cmp_and_jump_insns (op1, const0_rtx, EQ, NULL_RTX, mode,
1028 false, do_main_label, PROB_VERY_LIKELY);
1029 write_complex_part (target, const1_rtx, true);
1030 emit_label (do_main_label);
1031 goto do_main;
1032 default:
1033 gcc_unreachable ();
1037 /* u1 * u2 -> sr */
1038 if (uns0_p && uns1_p && !unsr_p)
1040 uns = true;
1041 /* Rest of handling of this case after res is computed. */
1042 goto do_main;
1045 /* s1 * u2 -> sr */
1046 if (!uns0_p && uns1_p && !unsr_p)
1048 switch (pos_neg1)
1050 case 1:
1051 goto do_main;
1052 case 2:
1053 /* If (S) u2 is negative (i.e. u2 is larger than maximum of S,
1054 avoid the main code, just multiply and signal overflow
1055 unless 0 * u2 or -1 * ((U) Smin). */
1056 struct separate_ops ops;
1057 ops.code = MULT_EXPR;
1058 ops.type = TREE_TYPE (arg1);
1059 ops.op0 = make_tree (ops.type, op0);
1060 ops.op1 = make_tree (ops.type, op1);
1061 ops.op2 = NULL_TREE;
1062 ops.location = loc;
1063 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1064 emit_cmp_and_jump_insns (op0, const0_rtx, EQ, NULL_RTX, mode,
1065 false, done_label, PROB_VERY_LIKELY);
1066 emit_cmp_and_jump_insns (op0, constm1_rtx, NE, NULL_RTX, mode,
1067 false, do_error, PROB_VERY_UNLIKELY);
1068 int prec;
1069 prec = GET_MODE_PRECISION (mode);
1070 rtx sgn;
1071 sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
1072 emit_cmp_and_jump_insns (op1, sgn, EQ, NULL_RTX, mode,
1073 false, done_label, PROB_VERY_LIKELY);
1074 goto do_error_label;
1075 case 3:
1076 /* Rest of handling of this case after res is computed. */
1077 goto do_main;
1078 default:
1079 gcc_unreachable ();
1083 /* s1 * s2 -> ur */
1084 if (!uns0_p && !uns1_p && unsr_p)
1086 rtx tem, tem2;
1087 switch (pos_neg0 | pos_neg1)
1089 case 1: /* Both operands known to be non-negative. */
1090 goto do_main;
1091 case 2: /* Both operands known to be negative. */
1092 op0 = expand_unop (mode, neg_optab, op0, NULL_RTX, false);
1093 op1 = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
1094 /* Avoid looking at arg0/arg1 ranges, as we've changed
1095 the arguments. */
1096 arg0 = error_mark_node;
1097 arg1 = error_mark_node;
1098 goto do_main;
1099 case 3:
1100 if ((pos_neg0 ^ pos_neg1) == 3)
1102 /* If one operand is known to be negative and the other
1103 non-negative, this overflows always, unless the non-negative
1104 one is 0. Just do normal multiply and set overflow
1105 unless one of the operands is 0. */
1106 struct separate_ops ops;
1107 ops.code = MULT_EXPR;
1108 ops.type
1109 = build_nonstandard_integer_type (GET_MODE_PRECISION (mode),
1111 ops.op0 = make_tree (ops.type, op0);
1112 ops.op1 = make_tree (ops.type, op1);
1113 ops.op2 = NULL_TREE;
1114 ops.location = loc;
1115 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1116 tem = expand_binop (mode, and_optab, op0, op1, NULL_RTX, false,
1117 OPTAB_LIB_WIDEN);
1118 emit_cmp_and_jump_insns (tem, const0_rtx, EQ, NULL_RTX, mode,
1119 false, done_label, PROB_VERY_LIKELY);
1120 goto do_error_label;
1122 /* The general case, do all the needed comparisons at runtime. */
1123 rtx_code_label *do_main_label, *after_negate_label;
1124 rtx rop0, rop1;
1125 rop0 = gen_reg_rtx (mode);
1126 rop1 = gen_reg_rtx (mode);
1127 emit_move_insn (rop0, op0);
1128 emit_move_insn (rop1, op1);
1129 op0 = rop0;
1130 op1 = rop1;
1131 do_main_label = gen_label_rtx ();
1132 after_negate_label = gen_label_rtx ();
1133 tem = expand_binop (mode, and_optab, op0, op1, NULL_RTX, false,
1134 OPTAB_LIB_WIDEN);
1135 emit_cmp_and_jump_insns (tem, const0_rtx, GE, NULL_RTX, mode, false,
1136 after_negate_label, PROB_VERY_LIKELY);
1137 /* Both arguments negative here, negate them and continue with
1138 normal unsigned overflow checking multiplication. */
1139 emit_move_insn (op0, expand_unop (mode, neg_optab, op0,
1140 NULL_RTX, false));
1141 emit_move_insn (op1, expand_unop (mode, neg_optab, op1,
1142 NULL_RTX, false));
1143 /* Avoid looking at arg0/arg1 ranges, as we might have changed
1144 the arguments. */
1145 arg0 = error_mark_node;
1146 arg1 = error_mark_node;
1147 emit_jump (do_main_label);
1148 emit_label (after_negate_label);
1149 tem2 = expand_binop (mode, xor_optab, op0, op1, NULL_RTX, false,
1150 OPTAB_LIB_WIDEN);
1151 emit_cmp_and_jump_insns (tem2, const0_rtx, GE, NULL_RTX, mode, false,
1152 do_main_label, PROB_VERY_LIKELY);
1153 /* One argument is negative here, the other positive. This
1154 overflows always, unless one of the arguments is 0. But
1155 if e.g. s2 is 0, (U) s1 * 0 doesn't overflow, whatever s1
1156 is, thus we can keep do_main code oring in overflow as is. */
1157 emit_cmp_and_jump_insns (tem, const0_rtx, EQ, NULL_RTX, mode, false,
1158 do_main_label, PROB_VERY_LIKELY);
1159 write_complex_part (target, const1_rtx, true);
1160 emit_label (do_main_label);
1161 goto do_main;
1162 default:
1163 gcc_unreachable ();
1167 do_main:
1168 type = build_nonstandard_integer_type (GET_MODE_PRECISION (mode), uns);
1169 sign = uns ? UNSIGNED : SIGNED;
1170 icode = optab_handler (uns ? umulv4_optab : mulv4_optab, mode);
1171 if (icode != CODE_FOR_nothing)
1173 struct expand_operand ops[4];
1174 rtx_insn *last = get_last_insn ();
1176 res = gen_reg_rtx (mode);
1177 create_output_operand (&ops[0], res, mode);
1178 create_input_operand (&ops[1], op0, mode);
1179 create_input_operand (&ops[2], op1, mode);
1180 create_fixed_operand (&ops[3], do_error);
1181 if (maybe_expand_insn (icode, 4, ops))
1183 last = get_last_insn ();
1184 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
1185 && JUMP_P (last)
1186 && any_condjump_p (last)
1187 && !find_reg_note (last, REG_BR_PROB, 0))
1188 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
1189 emit_jump (done_label);
1191 else
1193 delete_insns_since (last);
1194 icode = CODE_FOR_nothing;
1198 if (icode == CODE_FOR_nothing)
1200 struct separate_ops ops;
1201 int prec = GET_MODE_PRECISION (mode);
1202 machine_mode hmode = mode_for_size (prec / 2, MODE_INT, 1);
1203 ops.op0 = make_tree (type, op0);
1204 ops.op1 = make_tree (type, op1);
1205 ops.op2 = NULL_TREE;
1206 ops.location = loc;
1207 if (GET_MODE_2XWIDER_MODE (mode) != VOIDmode
1208 && targetm.scalar_mode_supported_p (GET_MODE_2XWIDER_MODE (mode)))
1210 machine_mode wmode = GET_MODE_2XWIDER_MODE (mode);
1211 ops.code = WIDEN_MULT_EXPR;
1212 ops.type
1213 = build_nonstandard_integer_type (GET_MODE_PRECISION (wmode), uns);
1215 res = expand_expr_real_2 (&ops, NULL_RTX, wmode, EXPAND_NORMAL);
1216 rtx hipart = expand_shift (RSHIFT_EXPR, wmode, res, prec,
1217 NULL_RTX, uns);
1218 hipart = gen_lowpart (mode, hipart);
1219 res = gen_lowpart (mode, res);
1220 if (uns)
1221 /* For the unsigned multiplication, there was overflow if
1222 HIPART is non-zero. */
1223 emit_cmp_and_jump_insns (hipart, const0_rtx, EQ, NULL_RTX, mode,
1224 false, done_label, PROB_VERY_LIKELY);
1225 else
1227 rtx signbit = expand_shift (RSHIFT_EXPR, mode, res, prec - 1,
1228 NULL_RTX, 0);
1229 /* RES is low half of the double width result, HIPART
1230 the high half. There was overflow if
1231 HIPART is different from RES < 0 ? -1 : 0. */
1232 emit_cmp_and_jump_insns (signbit, hipart, EQ, NULL_RTX, mode,
1233 false, done_label, PROB_VERY_LIKELY);
1236 else if (hmode != BLKmode && 2 * GET_MODE_PRECISION (hmode) == prec)
1238 rtx_code_label *large_op0 = gen_label_rtx ();
1239 rtx_code_label *small_op0_large_op1 = gen_label_rtx ();
1240 rtx_code_label *one_small_one_large = gen_label_rtx ();
1241 rtx_code_label *both_ops_large = gen_label_rtx ();
1242 rtx_code_label *after_hipart_neg = uns ? NULL : gen_label_rtx ();
1243 rtx_code_label *after_lopart_neg = uns ? NULL : gen_label_rtx ();
1244 rtx_code_label *do_overflow = gen_label_rtx ();
1245 rtx_code_label *hipart_different = uns ? NULL : gen_label_rtx ();
1247 unsigned int hprec = GET_MODE_PRECISION (hmode);
1248 rtx hipart0 = expand_shift (RSHIFT_EXPR, mode, op0, hprec,
1249 NULL_RTX, uns);
1250 hipart0 = gen_lowpart (hmode, hipart0);
1251 rtx lopart0 = gen_lowpart (hmode, op0);
1252 rtx signbit0 = const0_rtx;
1253 if (!uns)
1254 signbit0 = expand_shift (RSHIFT_EXPR, hmode, lopart0, hprec - 1,
1255 NULL_RTX, 0);
1256 rtx hipart1 = expand_shift (RSHIFT_EXPR, mode, op1, hprec,
1257 NULL_RTX, uns);
1258 hipart1 = gen_lowpart (hmode, hipart1);
1259 rtx lopart1 = gen_lowpart (hmode, op1);
1260 rtx signbit1 = const0_rtx;
1261 if (!uns)
1262 signbit1 = expand_shift (RSHIFT_EXPR, hmode, lopart1, hprec - 1,
1263 NULL_RTX, 0);
1265 res = gen_reg_rtx (mode);
1267 /* True if op0 resp. op1 are known to be in the range of
1268 halfstype. */
1269 bool op0_small_p = false;
1270 bool op1_small_p = false;
1271 /* True if op0 resp. op1 are known to have all zeros or all ones
1272 in the upper half of bits, but are not known to be
1273 op{0,1}_small_p. */
1274 bool op0_medium_p = false;
1275 bool op1_medium_p = false;
1276 /* -1 if op{0,1} is known to be negative, 0 if it is known to be
1277 nonnegative, 1 if unknown. */
1278 int op0_sign = 1;
1279 int op1_sign = 1;
1281 if (pos_neg0 == 1)
1282 op0_sign = 0;
1283 else if (pos_neg0 == 2)
1284 op0_sign = -1;
1285 if (pos_neg1 == 1)
1286 op1_sign = 0;
1287 else if (pos_neg1 == 2)
1288 op1_sign = -1;
1290 unsigned int mprec0 = prec;
1291 if (arg0 != error_mark_node)
1292 mprec0 = get_min_precision (arg0, sign);
1293 if (mprec0 <= hprec)
1294 op0_small_p = true;
1295 else if (!uns && mprec0 <= hprec + 1)
1296 op0_medium_p = true;
1297 unsigned int mprec1 = prec;
1298 if (arg1 != error_mark_node)
1299 mprec1 = get_min_precision (arg1, sign);
1300 if (mprec1 <= hprec)
1301 op1_small_p = true;
1302 else if (!uns && mprec1 <= hprec + 1)
1303 op1_medium_p = true;
1305 int smaller_sign = 1;
1306 int larger_sign = 1;
1307 if (op0_small_p)
1309 smaller_sign = op0_sign;
1310 larger_sign = op1_sign;
1312 else if (op1_small_p)
1314 smaller_sign = op1_sign;
1315 larger_sign = op0_sign;
1317 else if (op0_sign == op1_sign)
1319 smaller_sign = op0_sign;
1320 larger_sign = op0_sign;
1323 if (!op0_small_p)
1324 emit_cmp_and_jump_insns (signbit0, hipart0, NE, NULL_RTX, hmode,
1325 false, large_op0, PROB_UNLIKELY);
1327 if (!op1_small_p)
1328 emit_cmp_and_jump_insns (signbit1, hipart1, NE, NULL_RTX, hmode,
1329 false, small_op0_large_op1,
1330 PROB_UNLIKELY);
1332 /* If both op0 and op1 are sign (!uns) or zero (uns) extended from
1333 hmode to mode, the multiplication will never overflow. We can
1334 do just one hmode x hmode => mode widening multiplication. */
1335 rtx lopart0s = lopart0, lopart1s = lopart1;
1336 if (GET_CODE (lopart0) == SUBREG)
1338 lopart0s = shallow_copy_rtx (lopart0);
1339 SUBREG_PROMOTED_VAR_P (lopart0s) = 1;
1340 SUBREG_PROMOTED_SET (lopart0s, uns ? SRP_UNSIGNED : SRP_SIGNED);
1342 if (GET_CODE (lopart1) == SUBREG)
1344 lopart1s = shallow_copy_rtx (lopart1);
1345 SUBREG_PROMOTED_VAR_P (lopart1s) = 1;
1346 SUBREG_PROMOTED_SET (lopart1s, uns ? SRP_UNSIGNED : SRP_SIGNED);
1348 tree halfstype = build_nonstandard_integer_type (hprec, uns);
1349 ops.op0 = make_tree (halfstype, lopart0s);
1350 ops.op1 = make_tree (halfstype, lopart1s);
1351 ops.code = WIDEN_MULT_EXPR;
1352 ops.type = type;
1353 rtx thisres
1354 = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1355 emit_move_insn (res, thisres);
1356 emit_jump (done_label);
1358 emit_label (small_op0_large_op1);
1360 /* If op0 is sign (!uns) or zero (uns) extended from hmode to mode,
1361 but op1 is not, just swap the arguments and handle it as op1
1362 sign/zero extended, op0 not. */
1363 rtx larger = gen_reg_rtx (mode);
1364 rtx hipart = gen_reg_rtx (hmode);
1365 rtx lopart = gen_reg_rtx (hmode);
1366 emit_move_insn (larger, op1);
1367 emit_move_insn (hipart, hipart1);
1368 emit_move_insn (lopart, lopart0);
1369 emit_jump (one_small_one_large);
1371 emit_label (large_op0);
1373 if (!op1_small_p)
1374 emit_cmp_and_jump_insns (signbit1, hipart1, NE, NULL_RTX, hmode,
1375 false, both_ops_large, PROB_UNLIKELY);
1377 /* If op1 is sign (!uns) or zero (uns) extended from hmode to mode,
1378 but op0 is not, prepare larger, hipart and lopart pseudos and
1379 handle it together with small_op0_large_op1. */
1380 emit_move_insn (larger, op0);
1381 emit_move_insn (hipart, hipart0);
1382 emit_move_insn (lopart, lopart1);
1384 emit_label (one_small_one_large);
1386 /* lopart is the low part of the operand that is sign extended
1387 to mode, larger is the the other operand, hipart is the
1388 high part of larger and lopart0 and lopart1 are the low parts
1389 of both operands.
1390 We perform lopart0 * lopart1 and lopart * hipart widening
1391 multiplications. */
1392 tree halfutype = build_nonstandard_integer_type (hprec, 1);
1393 ops.op0 = make_tree (halfutype, lopart0);
1394 ops.op1 = make_tree (halfutype, lopart1);
1395 rtx lo0xlo1
1396 = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1398 ops.op0 = make_tree (halfutype, lopart);
1399 ops.op1 = make_tree (halfutype, hipart);
1400 rtx loxhi = gen_reg_rtx (mode);
1401 rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1402 emit_move_insn (loxhi, tem);
1404 if (!uns)
1406 /* if (hipart < 0) loxhi -= lopart << (bitsize / 2); */
1407 if (larger_sign == 0)
1408 emit_jump (after_hipart_neg);
1409 else if (larger_sign != -1)
1410 emit_cmp_and_jump_insns (hipart, const0_rtx, GE, NULL_RTX,
1411 hmode, false, after_hipart_neg,
1412 PROB_EVEN);
1414 tem = convert_modes (mode, hmode, lopart, 1);
1415 tem = expand_shift (LSHIFT_EXPR, mode, tem, hprec, NULL_RTX, 1);
1416 tem = expand_simple_binop (mode, MINUS, loxhi, tem, NULL_RTX,
1417 1, OPTAB_DIRECT);
1418 emit_move_insn (loxhi, tem);
1420 emit_label (after_hipart_neg);
1422 /* if (lopart < 0) loxhi -= larger; */
1423 if (smaller_sign == 0)
1424 emit_jump (after_lopart_neg);
1425 else if (smaller_sign != -1)
1426 emit_cmp_and_jump_insns (lopart, const0_rtx, GE, NULL_RTX,
1427 hmode, false, after_lopart_neg,
1428 PROB_EVEN);
1430 tem = expand_simple_binop (mode, MINUS, loxhi, larger, NULL_RTX,
1431 1, OPTAB_DIRECT);
1432 emit_move_insn (loxhi, tem);
1434 emit_label (after_lopart_neg);
1437 /* loxhi += (uns) lo0xlo1 >> (bitsize / 2); */
1438 tem = expand_shift (RSHIFT_EXPR, mode, lo0xlo1, hprec, NULL_RTX, 1);
1439 tem = expand_simple_binop (mode, PLUS, loxhi, tem, NULL_RTX,
1440 1, OPTAB_DIRECT);
1441 emit_move_insn (loxhi, tem);
1443 /* if (loxhi >> (bitsize / 2)
1444 == (hmode) loxhi >> (bitsize / 2 - 1)) (if !uns)
1445 if (loxhi >> (bitsize / 2) == 0 (if uns). */
1446 rtx hipartloxhi = expand_shift (RSHIFT_EXPR, mode, loxhi, hprec,
1447 NULL_RTX, 0);
1448 hipartloxhi = gen_lowpart (hmode, hipartloxhi);
1449 rtx signbitloxhi = const0_rtx;
1450 if (!uns)
1451 signbitloxhi = expand_shift (RSHIFT_EXPR, hmode,
1452 gen_lowpart (hmode, loxhi),
1453 hprec - 1, NULL_RTX, 0);
1455 emit_cmp_and_jump_insns (signbitloxhi, hipartloxhi, NE, NULL_RTX,
1456 hmode, false, do_overflow,
1457 PROB_VERY_UNLIKELY);
1459 /* res = (loxhi << (bitsize / 2)) | (hmode) lo0xlo1; */
1460 rtx loxhishifted = expand_shift (LSHIFT_EXPR, mode, loxhi, hprec,
1461 NULL_RTX, 1);
1462 tem = convert_modes (mode, hmode, gen_lowpart (hmode, lo0xlo1), 1);
1464 tem = expand_simple_binop (mode, IOR, loxhishifted, tem, res,
1465 1, OPTAB_DIRECT);
1466 if (tem != res)
1467 emit_move_insn (res, tem);
1468 emit_jump (done_label);
1470 emit_label (both_ops_large);
1472 /* If both operands are large (not sign (!uns) or zero (uns)
1473 extended from hmode), then perform the full multiplication
1474 which will be the result of the operation.
1475 The only cases which don't overflow are for signed multiplication
1476 some cases where both hipart0 and highpart1 are 0 or -1.
1477 For unsigned multiplication when high parts are both non-zero
1478 this overflows always. */
1479 ops.code = MULT_EXPR;
1480 ops.op0 = make_tree (type, op0);
1481 ops.op1 = make_tree (type, op1);
1482 tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1483 emit_move_insn (res, tem);
1485 if (!uns)
1487 if (!op0_medium_p)
1489 tem = expand_simple_binop (hmode, PLUS, hipart0, const1_rtx,
1490 NULL_RTX, 1, OPTAB_DIRECT);
1491 emit_cmp_and_jump_insns (tem, const1_rtx, GTU, NULL_RTX,
1492 hmode, true, do_error,
1493 PROB_VERY_UNLIKELY);
1496 if (!op1_medium_p)
1498 tem = expand_simple_binop (hmode, PLUS, hipart1, const1_rtx,
1499 NULL_RTX, 1, OPTAB_DIRECT);
1500 emit_cmp_and_jump_insns (tem, const1_rtx, GTU, NULL_RTX,
1501 hmode, true, do_error,
1502 PROB_VERY_UNLIKELY);
1505 /* At this point hipart{0,1} are both in [-1, 0]. If they are
1506 the same, overflow happened if res is negative, if they are
1507 different, overflow happened if res is positive. */
1508 if (op0_sign != 1 && op1_sign != 1 && op0_sign != op1_sign)
1509 emit_jump (hipart_different);
1510 else if (op0_sign == 1 || op1_sign == 1)
1511 emit_cmp_and_jump_insns (hipart0, hipart1, NE, NULL_RTX, hmode,
1512 true, hipart_different, PROB_EVEN);
1514 emit_cmp_and_jump_insns (res, const0_rtx, LT, NULL_RTX, mode,
1515 false, do_error, PROB_VERY_UNLIKELY);
1516 emit_jump (done_label);
1518 emit_label (hipart_different);
1520 emit_cmp_and_jump_insns (res, const0_rtx, GE, NULL_RTX, mode,
1521 false, do_error, PROB_VERY_UNLIKELY);
1522 emit_jump (done_label);
1525 emit_label (do_overflow);
1527 /* Overflow, do full multiplication and fallthru into do_error. */
1528 ops.op0 = make_tree (type, op0);
1529 ops.op1 = make_tree (type, op1);
1530 tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1531 emit_move_insn (res, tem);
1533 else
1535 gcc_assert (!is_ubsan);
1536 ops.code = MULT_EXPR;
1537 ops.type = type;
1538 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1539 emit_jump (done_label);
1543 do_error_label:
1544 emit_label (do_error);
1545 if (is_ubsan)
1547 /* Expand the ubsan builtin call. */
1548 push_temp_slots ();
1549 fn = ubsan_build_overflow_builtin (MULT_EXPR, loc, TREE_TYPE (arg0),
1550 arg0, arg1);
1551 expand_normal (fn);
1552 pop_temp_slots ();
1553 do_pending_stack_adjust ();
1555 else if (lhs)
1556 write_complex_part (target, const1_rtx, true);
1558 /* We're done. */
1559 emit_label (done_label);
1561 /* u1 * u2 -> sr */
1562 if (uns0_p && uns1_p && !unsr_p)
1564 rtx_code_label *all_done_label = gen_label_rtx ();
1565 emit_cmp_and_jump_insns (res, const0_rtx, GE, NULL_RTX, mode,
1566 false, all_done_label, PROB_VERY_LIKELY);
1567 write_complex_part (target, const1_rtx, true);
1568 emit_label (all_done_label);
1571 /* s1 * u2 -> sr */
1572 if (!uns0_p && uns1_p && !unsr_p && pos_neg1 == 3)
1574 rtx_code_label *all_done_label = gen_label_rtx ();
1575 rtx_code_label *set_noovf = gen_label_rtx ();
1576 emit_cmp_and_jump_insns (op1, const0_rtx, GE, NULL_RTX, mode,
1577 false, all_done_label, PROB_VERY_LIKELY);
1578 write_complex_part (target, const1_rtx, true);
1579 emit_cmp_and_jump_insns (op0, const0_rtx, EQ, NULL_RTX, mode,
1580 false, set_noovf, PROB_VERY_LIKELY);
1581 emit_cmp_and_jump_insns (op0, constm1_rtx, NE, NULL_RTX, mode,
1582 false, all_done_label, PROB_VERY_UNLIKELY);
1583 emit_cmp_and_jump_insns (op1, res, NE, NULL_RTX, mode,
1584 false, all_done_label, PROB_VERY_UNLIKELY);
1585 emit_label (set_noovf);
1586 write_complex_part (target, const0_rtx, true);
1587 emit_label (all_done_label);
1590 if (lhs)
1592 if (is_ubsan)
1593 emit_move_insn (target, res);
1594 else
1595 expand_arith_overflow_result_store (lhs, target, mode, res);
1599 /* Expand UBSAN_CHECK_ADD call STMT. */
1601 static void
1602 expand_UBSAN_CHECK_ADD (gimple stmt)
1604 location_t loc = gimple_location (stmt);
1605 tree lhs = gimple_call_lhs (stmt);
1606 tree arg0 = gimple_call_arg (stmt, 0);
1607 tree arg1 = gimple_call_arg (stmt, 1);
1608 expand_addsub_overflow (loc, PLUS_EXPR, lhs, arg0, arg1,
1609 false, false, false, true);
1612 /* Expand UBSAN_CHECK_SUB call STMT. */
1614 static void
1615 expand_UBSAN_CHECK_SUB (gimple stmt)
1617 location_t loc = gimple_location (stmt);
1618 tree lhs = gimple_call_lhs (stmt);
1619 tree arg0 = gimple_call_arg (stmt, 0);
1620 tree arg1 = gimple_call_arg (stmt, 1);
1621 if (integer_zerop (arg0))
1622 expand_neg_overflow (loc, lhs, arg1, true);
1623 else
1624 expand_addsub_overflow (loc, MINUS_EXPR, lhs, arg0, arg1,
1625 false, false, false, true);
1628 /* Expand UBSAN_CHECK_MUL call STMT. */
1630 static void
1631 expand_UBSAN_CHECK_MUL (gimple stmt)
1633 location_t loc = gimple_location (stmt);
1634 tree lhs = gimple_call_lhs (stmt);
1635 tree arg0 = gimple_call_arg (stmt, 0);
1636 tree arg1 = gimple_call_arg (stmt, 1);
1637 expand_mul_overflow (loc, lhs, arg0, arg1, false, false, false, true);
1640 /* Helper function for {ADD,SUB,MUL}_OVERFLOW call stmt expansion. */
1642 static void
1643 expand_arith_overflow (enum tree_code code, gimple stmt)
1645 tree lhs = gimple_call_lhs (stmt);
1646 if (lhs == NULL_TREE)
1647 return;
1648 tree arg0 = gimple_call_arg (stmt, 0);
1649 tree arg1 = gimple_call_arg (stmt, 1);
1650 tree type = TREE_TYPE (TREE_TYPE (lhs));
1651 int uns0_p = TYPE_UNSIGNED (TREE_TYPE (arg0));
1652 int uns1_p = TYPE_UNSIGNED (TREE_TYPE (arg1));
1653 int unsr_p = TYPE_UNSIGNED (type);
1654 int prec0 = TYPE_PRECISION (TREE_TYPE (arg0));
1655 int prec1 = TYPE_PRECISION (TREE_TYPE (arg1));
1656 int precres = TYPE_PRECISION (type);
1657 location_t loc = gimple_location (stmt);
1658 if (!uns0_p && get_range_pos_neg (arg0) == 1)
1659 uns0_p = true;
1660 if (!uns1_p && get_range_pos_neg (arg1) == 1)
1661 uns1_p = true;
1662 int pr = get_min_precision (arg0, uns0_p ? UNSIGNED : SIGNED);
1663 prec0 = MIN (prec0, pr);
1664 pr = get_min_precision (arg1, uns1_p ? UNSIGNED : SIGNED);
1665 prec1 = MIN (prec1, pr);
1667 /* If uns0_p && uns1_p, precop is minimum needed precision
1668 of unsigned type to hold the exact result, otherwise
1669 precop is minimum needed precision of signed type to
1670 hold the exact result. */
1671 int precop;
1672 if (code == MULT_EXPR)
1673 precop = prec0 + prec1 + (uns0_p != uns1_p);
1674 else
1676 if (uns0_p == uns1_p)
1677 precop = MAX (prec0, prec1) + 1;
1678 else if (uns0_p)
1679 precop = MAX (prec0 + 1, prec1) + 1;
1680 else
1681 precop = MAX (prec0, prec1 + 1) + 1;
1683 int orig_precres = precres;
1687 if ((uns0_p && uns1_p)
1688 ? ((precop + !unsr_p) <= precres
1689 /* u1 - u2 -> ur can overflow, no matter what precision
1690 the result has. */
1691 && (code != MINUS_EXPR || !unsr_p))
1692 : (!unsr_p && precop <= precres))
1694 /* The infinity precision result will always fit into result. */
1695 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1696 write_complex_part (target, const0_rtx, true);
1697 enum machine_mode mode = TYPE_MODE (type);
1698 struct separate_ops ops;
1699 ops.code = code;
1700 ops.type = type;
1701 ops.op0 = fold_convert_loc (loc, type, arg0);
1702 ops.op1 = fold_convert_loc (loc, type, arg1);
1703 ops.op2 = NULL_TREE;
1704 ops.location = loc;
1705 rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1706 expand_arith_overflow_result_store (lhs, target, mode, tem);
1707 return;
1710 #ifdef WORD_REGISTER_OPERATIONS
1711 /* For sub-word operations, if target doesn't have them, start
1712 with precres widening right away, otherwise do it only
1713 if the most simple cases can't be used. */
1714 if (orig_precres == precres && precres < BITS_PER_WORD)
1716 else
1717 #endif
1718 if ((uns0_p && uns1_p && unsr_p && prec0 <= precres && prec1 <= precres)
1719 || ((!uns0_p || !uns1_p) && !unsr_p
1720 && prec0 + uns0_p <= precres
1721 && prec1 + uns1_p <= precres))
1723 arg0 = fold_convert_loc (loc, type, arg0);
1724 arg1 = fold_convert_loc (loc, type, arg1);
1725 switch (code)
1727 case MINUS_EXPR:
1728 if (integer_zerop (arg0) && !unsr_p)
1729 expand_neg_overflow (loc, lhs, arg1, false);
1730 /* FALLTHRU */
1731 case PLUS_EXPR:
1732 expand_addsub_overflow (loc, code, lhs, arg0, arg1,
1733 unsr_p, unsr_p, unsr_p, false);
1734 return;
1735 case MULT_EXPR:
1736 expand_mul_overflow (loc, lhs, arg0, arg1,
1737 unsr_p, unsr_p, unsr_p, false);
1738 return;
1739 default:
1740 gcc_unreachable ();
1744 /* For sub-word operations, retry with a wider type first. */
1745 if (orig_precres == precres && precop <= BITS_PER_WORD)
1747 #ifdef WORD_REGISTER_OPERATIONS
1748 int p = BITS_PER_WORD;
1749 #else
1750 int p = precop;
1751 #endif
1752 enum machine_mode m = smallest_mode_for_size (p, MODE_INT);
1753 tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
1754 uns0_p && uns1_p
1755 && unsr_p);
1756 p = TYPE_PRECISION (optype);
1757 if (p > precres)
1759 precres = p;
1760 unsr_p = TYPE_UNSIGNED (optype);
1761 type = optype;
1762 continue;
1766 if (prec0 <= precres && prec1 <= precres)
1768 tree types[2];
1769 if (unsr_p)
1771 types[0] = build_nonstandard_integer_type (precres, 0);
1772 types[1] = type;
1774 else
1776 types[0] = type;
1777 types[1] = build_nonstandard_integer_type (precres, 1);
1779 arg0 = fold_convert_loc (loc, types[uns0_p], arg0);
1780 arg1 = fold_convert_loc (loc, types[uns1_p], arg1);
1781 if (code != MULT_EXPR)
1782 expand_addsub_overflow (loc, code, lhs, arg0, arg1, unsr_p,
1783 uns0_p, uns1_p, false);
1784 else
1785 expand_mul_overflow (loc, lhs, arg0, arg1, unsr_p,
1786 uns0_p, uns1_p, false);
1787 return;
1790 /* Retry with a wider type. */
1791 if (orig_precres == precres)
1793 int p = MAX (prec0, prec1);
1794 enum machine_mode m = smallest_mode_for_size (p, MODE_INT);
1795 tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
1796 uns0_p && uns1_p
1797 && unsr_p);
1798 p = TYPE_PRECISION (optype);
1799 if (p > precres)
1801 precres = p;
1802 unsr_p = TYPE_UNSIGNED (optype);
1803 type = optype;
1804 continue;
1808 gcc_unreachable ();
1810 while (1);
1813 /* Expand ADD_OVERFLOW STMT. */
1815 static void
1816 expand_ADD_OVERFLOW (gimple stmt)
1818 expand_arith_overflow (PLUS_EXPR, stmt);
1821 /* Expand SUB_OVERFLOW STMT. */
1823 static void
1824 expand_SUB_OVERFLOW (gimple stmt)
1826 expand_arith_overflow (MINUS_EXPR, stmt);
1829 /* Expand MUL_OVERFLOW STMT. */
1831 static void
1832 expand_MUL_OVERFLOW (gimple stmt)
1834 expand_arith_overflow (MULT_EXPR, stmt);
1837 /* This should get folded in tree-vectorizer.c. */
1839 static void
1840 expand_LOOP_VECTORIZED (gimple stmt ATTRIBUTE_UNUSED)
1842 gcc_unreachable ();
1845 static void
1846 expand_MASK_LOAD (gimple stmt)
1848 struct expand_operand ops[3];
1849 tree type, lhs, rhs, maskt;
1850 rtx mem, target, mask;
1852 maskt = gimple_call_arg (stmt, 2);
1853 lhs = gimple_call_lhs (stmt);
1854 if (lhs == NULL_TREE)
1855 return;
1856 type = TREE_TYPE (lhs);
1857 rhs = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0),
1858 gimple_call_arg (stmt, 1));
1860 mem = expand_expr (rhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1861 gcc_assert (MEM_P (mem));
1862 mask = expand_normal (maskt);
1863 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1864 create_output_operand (&ops[0], target, TYPE_MODE (type));
1865 create_fixed_operand (&ops[1], mem);
1866 create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
1867 expand_insn (optab_handler (maskload_optab, TYPE_MODE (type)), 3, ops);
1870 static void
1871 expand_MASK_STORE (gimple stmt)
1873 struct expand_operand ops[3];
1874 tree type, lhs, rhs, maskt;
1875 rtx mem, reg, mask;
1877 maskt = gimple_call_arg (stmt, 2);
1878 rhs = gimple_call_arg (stmt, 3);
1879 type = TREE_TYPE (rhs);
1880 lhs = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0),
1881 gimple_call_arg (stmt, 1));
1883 mem = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1884 gcc_assert (MEM_P (mem));
1885 mask = expand_normal (maskt);
1886 reg = expand_normal (rhs);
1887 create_fixed_operand (&ops[0], mem);
1888 create_input_operand (&ops[1], reg, TYPE_MODE (type));
1889 create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
1890 expand_insn (optab_handler (maskstore_optab, TYPE_MODE (type)), 3, ops);
1893 static void
1894 expand_ABNORMAL_DISPATCHER (gimple)
1898 static void
1899 expand_BUILTIN_EXPECT (gimple stmt)
1901 /* When guessing was done, the hints should be already stripped away. */
1902 gcc_assert (!flag_guess_branch_prob || optimize == 0 || seen_error ());
1904 rtx target;
1905 tree lhs = gimple_call_lhs (stmt);
1906 if (lhs)
1907 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1908 else
1909 target = const0_rtx;
1910 rtx val = expand_expr (gimple_call_arg (stmt, 0), target, VOIDmode, EXPAND_NORMAL);
1911 if (lhs && val != target)
1912 emit_move_insn (target, val);
1915 /* Routines to expand each internal function, indexed by function number.
1916 Each routine has the prototype:
1918 expand_<NAME> (gimple stmt)
1920 where STMT is the statement that performs the call. */
1921 static void (*const internal_fn_expanders[]) (gimple) = {
1922 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) expand_##CODE,
1923 #include "internal-fn.def"
1924 #undef DEF_INTERNAL_FN
1928 /* Expand STMT, which is a call to internal function FN. */
1930 void
1931 expand_internal_call (gimple stmt)
1933 internal_fn_expanders[(int) gimple_call_internal_fn (stmt)] (stmt);