ada: Compiler hangs on invalid postcondition
[official-gcc.git] / gcc / internal-fn.cc
blob4138cc31d7eda6f5b2550a968c2b0f15c5f9e9ca
1 /* Internal functions.
2 Copyright (C) 2011-2023 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "target.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "predict.h"
29 #include "stringpool.h"
30 #include "tree-vrp.h"
31 #include "tree-ssanames.h"
32 #include "expmed.h"
33 #include "memmodel.h"
34 #include "optabs.h"
35 #include "emit-rtl.h"
36 #include "diagnostic-core.h"
37 #include "fold-const.h"
38 #include "internal-fn.h"
39 #include "stor-layout.h"
40 #include "dojump.h"
41 #include "expr.h"
42 #include "stringpool.h"
43 #include "attribs.h"
44 #include "asan.h"
45 #include "ubsan.h"
46 #include "recog.h"
47 #include "builtins.h"
48 #include "optabs-tree.h"
49 #include "gimple-ssa.h"
50 #include "tree-phinodes.h"
51 #include "ssa-iterators.h"
52 #include "explow.h"
53 #include "rtl-iter.h"
54 #include "gimple-range.h"
56 /* For lang_hooks.types.type_for_mode. */
57 #include "langhooks.h"
59 /* The names of each internal function, indexed by function number. */
60 const char *const internal_fn_name_array[] = {
61 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) #CODE,
62 #include "internal-fn.def"
63 "<invalid-fn>"
66 /* The ECF_* flags of each internal function, indexed by function number. */
67 const int internal_fn_flags_array[] = {
68 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) FLAGS,
69 #include "internal-fn.def"
73 /* Return the internal function called NAME, or IFN_LAST if there's
74 no such function. */
76 internal_fn
77 lookup_internal_fn (const char *name)
79 typedef hash_map<nofree_string_hash, internal_fn> name_to_fn_map_type;
80 static name_to_fn_map_type *name_to_fn_map;
82 if (!name_to_fn_map)
84 name_to_fn_map = new name_to_fn_map_type (IFN_LAST);
85 for (unsigned int i = 0; i < IFN_LAST; ++i)
86 name_to_fn_map->put (internal_fn_name (internal_fn (i)),
87 internal_fn (i));
89 internal_fn *entry = name_to_fn_map->get (name);
90 return entry ? *entry : IFN_LAST;
93 /* Geven an internal_fn IFN that is a widening function, return its
94 corresponding LO and HI internal_fns. */
96 extern void
97 lookup_hilo_internal_fn (internal_fn ifn, internal_fn *lo, internal_fn *hi)
99 gcc_assert (widening_fn_p (ifn));
101 switch (ifn)
103 default:
104 gcc_unreachable ();
105 #undef DEF_INTERNAL_FN
106 #undef DEF_INTERNAL_WIDENING_OPTAB_FN
107 #define DEF_INTERNAL_FN(NAME, FLAGS, TYPE)
108 #define DEF_INTERNAL_WIDENING_OPTAB_FN(NAME, F, S, SO, UO, T) \
109 case IFN_##NAME: \
110 *lo = internal_fn (IFN_##NAME##_LO); \
111 *hi = internal_fn (IFN_##NAME##_HI); \
112 break;
113 #include "internal-fn.def"
114 #undef DEF_INTERNAL_FN
115 #undef DEF_INTERNAL_WIDENING_OPTAB_FN
119 /* Given an internal_fn IFN that is a widening function, return its
120 corresponding _EVEN and _ODD internal_fns in *EVEN and *ODD. */
122 extern void
123 lookup_evenodd_internal_fn (internal_fn ifn, internal_fn *even,
124 internal_fn *odd)
126 gcc_assert (widening_fn_p (ifn));
128 switch (ifn)
130 default:
131 gcc_unreachable ();
132 #undef DEF_INTERNAL_FN
133 #undef DEF_INTERNAL_WIDENING_OPTAB_FN
134 #define DEF_INTERNAL_FN(NAME, FLAGS, TYPE)
135 #define DEF_INTERNAL_WIDENING_OPTAB_FN(NAME, F, S, SO, UO, T) \
136 case IFN_##NAME: \
137 *even = internal_fn (IFN_##NAME##_EVEN); \
138 *odd = internal_fn (IFN_##NAME##_ODD); \
139 break;
140 #include "internal-fn.def"
141 #undef DEF_INTERNAL_FN
142 #undef DEF_INTERNAL_WIDENING_OPTAB_FN
147 /* Fnspec of each internal function, indexed by function number. */
148 const_tree internal_fn_fnspec_array[IFN_LAST + 1];
150 void
151 init_internal_fns ()
153 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
154 if (FNSPEC) internal_fn_fnspec_array[IFN_##CODE] = \
155 build_string ((int) sizeof (FNSPEC) - 1, FNSPEC ? FNSPEC : "");
156 #include "internal-fn.def"
157 internal_fn_fnspec_array[IFN_LAST] = 0;
160 /* Create static initializers for the information returned by
161 direct_internal_fn. */
162 #define not_direct { -2, -2, false }
163 #define mask_load_direct { -1, 2, false }
164 #define load_lanes_direct { -1, -1, false }
165 #define mask_load_lanes_direct { -1, -1, false }
166 #define gather_load_direct { 3, 1, false }
167 #define len_load_direct { -1, -1, false }
168 #define mask_len_load_direct { -1, 4, false }
169 #define mask_store_direct { 3, 2, false }
170 #define store_lanes_direct { 0, 0, false }
171 #define mask_store_lanes_direct { 0, 0, false }
172 #define vec_cond_mask_direct { 1, 0, false }
173 #define vec_cond_direct { 2, 0, false }
174 #define scatter_store_direct { 3, 1, false }
175 #define len_store_direct { 3, 3, false }
176 #define mask_len_store_direct { 4, 5, false }
177 #define vec_set_direct { 3, 3, false }
178 #define vec_extract_direct { 0, -1, false }
179 #define unary_direct { 0, 0, true }
180 #define unary_convert_direct { -1, 0, true }
181 #define binary_direct { 0, 0, true }
182 #define ternary_direct { 0, 0, true }
183 #define cond_unary_direct { 1, 1, true }
184 #define cond_binary_direct { 1, 1, true }
185 #define cond_ternary_direct { 1, 1, true }
186 #define cond_len_unary_direct { 1, 1, true }
187 #define cond_len_binary_direct { 1, 1, true }
188 #define cond_len_ternary_direct { 1, 1, true }
189 #define while_direct { 0, 2, false }
190 #define fold_extract_direct { 2, 2, false }
191 #define fold_len_extract_direct { 2, 2, false }
192 #define fold_left_direct { 1, 1, false }
193 #define mask_fold_left_direct { 1, 1, false }
194 #define mask_len_fold_left_direct { 1, 1, false }
195 #define check_ptrs_direct { 0, 0, false }
197 const direct_internal_fn_info direct_internal_fn_array[IFN_LAST + 1] = {
198 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) not_direct,
199 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) TYPE##_direct,
200 #define DEF_INTERNAL_SIGNED_OPTAB_FN(CODE, FLAGS, SELECTOR, SIGNED_OPTAB, \
201 UNSIGNED_OPTAB, TYPE) TYPE##_direct,
202 #include "internal-fn.def"
203 not_direct
206 /* Expand STMT using instruction ICODE. The instruction has NOUTPUTS
207 output operands and NINPUTS input operands, where NOUTPUTS is either
208 0 or 1. The output operand (if any) comes first, followed by the
209 NINPUTS input operands. */
211 static void
212 expand_fn_using_insn (gcall *stmt, insn_code icode, unsigned int noutputs,
213 unsigned int ninputs)
215 gcc_assert (icode != CODE_FOR_nothing);
217 expand_operand *ops = XALLOCAVEC (expand_operand, noutputs + ninputs);
218 unsigned int opno = 0;
219 rtx lhs_rtx = NULL_RTX;
220 tree lhs = gimple_call_lhs (stmt);
222 if (noutputs)
224 gcc_assert (noutputs == 1);
225 if (lhs)
226 lhs_rtx = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
228 /* Do not assign directly to a promoted subreg, since there is no
229 guarantee that the instruction will leave the upper bits of the
230 register in the state required by SUBREG_PROMOTED_SIGN. */
231 rtx dest = lhs_rtx;
232 if (dest && GET_CODE (dest) == SUBREG && SUBREG_PROMOTED_VAR_P (dest))
233 dest = NULL_RTX;
234 create_output_operand (&ops[opno], dest,
235 insn_data[icode].operand[opno].mode);
236 opno += 1;
238 else
239 gcc_assert (!lhs);
241 for (unsigned int i = 0; i < ninputs; ++i)
243 tree rhs = gimple_call_arg (stmt, i);
244 tree rhs_type = TREE_TYPE (rhs);
245 rtx rhs_rtx = expand_normal (rhs);
246 if (INTEGRAL_TYPE_P (rhs_type))
247 create_convert_operand_from (&ops[opno], rhs_rtx,
248 TYPE_MODE (rhs_type),
249 TYPE_UNSIGNED (rhs_type));
250 else
251 create_input_operand (&ops[opno], rhs_rtx, TYPE_MODE (rhs_type));
252 opno += 1;
255 gcc_assert (opno == noutputs + ninputs);
256 expand_insn (icode, opno, ops);
257 if (lhs_rtx && !rtx_equal_p (lhs_rtx, ops[0].value))
259 /* If the return value has an integral type, convert the instruction
260 result to that type. This is useful for things that return an
261 int regardless of the size of the input. If the instruction result
262 is smaller than required, assume that it is signed.
264 If the return value has a nonintegral type, its mode must match
265 the instruction result. */
266 if (GET_CODE (lhs_rtx) == SUBREG && SUBREG_PROMOTED_VAR_P (lhs_rtx))
268 /* If this is a scalar in a register that is stored in a wider
269 mode than the declared mode, compute the result into its
270 declared mode and then convert to the wider mode. */
271 gcc_checking_assert (INTEGRAL_TYPE_P (TREE_TYPE (lhs)));
272 rtx tmp = convert_to_mode (GET_MODE (lhs_rtx), ops[0].value, 0);
273 convert_move (SUBREG_REG (lhs_rtx), tmp,
274 SUBREG_PROMOTED_SIGN (lhs_rtx));
276 else if (GET_MODE (lhs_rtx) == GET_MODE (ops[0].value))
277 emit_move_insn (lhs_rtx, ops[0].value);
278 else
280 gcc_checking_assert (INTEGRAL_TYPE_P (TREE_TYPE (lhs)));
281 convert_move (lhs_rtx, ops[0].value, 0);
286 /* ARRAY_TYPE is an array of vector modes. Return the associated insn
287 for load-lanes-style optab OPTAB, or CODE_FOR_nothing if none. */
289 static enum insn_code
290 get_multi_vector_move (tree array_type, convert_optab optab)
292 machine_mode imode;
293 machine_mode vmode;
295 gcc_assert (TREE_CODE (array_type) == ARRAY_TYPE);
296 imode = TYPE_MODE (array_type);
297 vmode = TYPE_MODE (TREE_TYPE (array_type));
299 return convert_optab_handler (optab, imode, vmode);
302 /* Add mask and len arguments according to the STMT. */
304 static unsigned int
305 add_mask_and_len_args (expand_operand *ops, unsigned int opno, gcall *stmt)
307 internal_fn ifn = gimple_call_internal_fn (stmt);
308 int len_index = internal_fn_len_index (ifn);
309 /* BIAS is always consecutive next of LEN. */
310 int bias_index = len_index + 1;
311 int mask_index = internal_fn_mask_index (ifn);
312 /* The order of arguments are always {len,bias,mask}. */
313 if (mask_index >= 0)
315 tree mask = gimple_call_arg (stmt, mask_index);
316 rtx mask_rtx = expand_normal (mask);
317 create_input_operand (&ops[opno++], mask_rtx,
318 TYPE_MODE (TREE_TYPE (mask)));
320 if (len_index >= 0)
322 tree len = gimple_call_arg (stmt, len_index);
323 rtx len_rtx = expand_normal (len);
324 create_convert_operand_from (&ops[opno++], len_rtx,
325 TYPE_MODE (TREE_TYPE (len)),
326 TYPE_UNSIGNED (TREE_TYPE (len)));
327 tree biast = gimple_call_arg (stmt, bias_index);
328 rtx bias = expand_normal (biast);
329 create_input_operand (&ops[opno++], bias, QImode);
331 return opno;
334 /* Expand LOAD_LANES call STMT using optab OPTAB. */
336 static void
337 expand_load_lanes_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
339 class expand_operand ops[2];
340 tree type, lhs, rhs;
341 rtx target, mem;
343 lhs = gimple_call_lhs (stmt);
344 rhs = gimple_call_arg (stmt, 0);
345 type = TREE_TYPE (lhs);
347 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
348 mem = expand_normal (rhs);
350 gcc_assert (MEM_P (mem));
351 PUT_MODE (mem, TYPE_MODE (type));
353 create_output_operand (&ops[0], target, TYPE_MODE (type));
354 create_fixed_operand (&ops[1], mem);
355 expand_insn (get_multi_vector_move (type, optab), 2, ops);
356 if (!rtx_equal_p (target, ops[0].value))
357 emit_move_insn (target, ops[0].value);
360 /* Expand STORE_LANES call STMT using optab OPTAB. */
362 static void
363 expand_store_lanes_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
365 class expand_operand ops[2];
366 tree type, lhs, rhs;
367 rtx target, reg;
369 lhs = gimple_call_lhs (stmt);
370 rhs = gimple_call_arg (stmt, 0);
371 type = TREE_TYPE (rhs);
373 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
374 reg = expand_normal (rhs);
376 gcc_assert (MEM_P (target));
377 PUT_MODE (target, TYPE_MODE (type));
379 create_fixed_operand (&ops[0], target);
380 create_input_operand (&ops[1], reg, TYPE_MODE (type));
381 expand_insn (get_multi_vector_move (type, optab), 2, ops);
384 static void
385 expand_ANNOTATE (internal_fn, gcall *)
387 gcc_unreachable ();
390 /* This should get expanded in omp_device_lower pass. */
392 static void
393 expand_GOMP_USE_SIMT (internal_fn, gcall *)
395 gcc_unreachable ();
398 /* This should get expanded in omp_device_lower pass. */
400 static void
401 expand_GOMP_SIMT_ENTER (internal_fn, gcall *)
403 gcc_unreachable ();
406 /* Allocate per-lane storage and begin non-uniform execution region. */
408 static void
409 expand_GOMP_SIMT_ENTER_ALLOC (internal_fn, gcall *stmt)
411 rtx target;
412 tree lhs = gimple_call_lhs (stmt);
413 if (lhs)
414 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
415 else
416 target = gen_reg_rtx (Pmode);
417 rtx size = expand_normal (gimple_call_arg (stmt, 0));
418 rtx align = expand_normal (gimple_call_arg (stmt, 1));
419 class expand_operand ops[3];
420 create_output_operand (&ops[0], target, Pmode);
421 create_input_operand (&ops[1], size, Pmode);
422 create_input_operand (&ops[2], align, Pmode);
423 gcc_assert (targetm.have_omp_simt_enter ());
424 expand_insn (targetm.code_for_omp_simt_enter, 3, ops);
425 if (!rtx_equal_p (target, ops[0].value))
426 emit_move_insn (target, ops[0].value);
429 /* Deallocate per-lane storage and leave non-uniform execution region. */
431 static void
432 expand_GOMP_SIMT_EXIT (internal_fn, gcall *stmt)
434 gcc_checking_assert (!gimple_call_lhs (stmt));
435 rtx arg = expand_normal (gimple_call_arg (stmt, 0));
436 class expand_operand ops[1];
437 create_input_operand (&ops[0], arg, Pmode);
438 gcc_assert (targetm.have_omp_simt_exit ());
439 expand_insn (targetm.code_for_omp_simt_exit, 1, ops);
442 /* Lane index on SIMT targets: thread index in the warp on NVPTX. On targets
443 without SIMT execution this should be expanded in omp_device_lower pass. */
445 static void
446 expand_GOMP_SIMT_LANE (internal_fn, gcall *stmt)
448 tree lhs = gimple_call_lhs (stmt);
449 if (!lhs)
450 return;
452 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
453 gcc_assert (targetm.have_omp_simt_lane ());
454 emit_insn (targetm.gen_omp_simt_lane (target));
457 /* This should get expanded in omp_device_lower pass. */
459 static void
460 expand_GOMP_SIMT_VF (internal_fn, gcall *)
462 gcc_unreachable ();
465 /* This should get expanded in omp_device_lower pass. */
467 static void
468 expand_GOMP_TARGET_REV (internal_fn, gcall *)
470 gcc_unreachable ();
473 /* Lane index of the first SIMT lane that supplies a non-zero argument.
474 This is a SIMT counterpart to GOMP_SIMD_LAST_LANE, used to represent the
475 lane that executed the last iteration for handling OpenMP lastprivate. */
477 static void
478 expand_GOMP_SIMT_LAST_LANE (internal_fn, gcall *stmt)
480 tree lhs = gimple_call_lhs (stmt);
481 if (!lhs)
482 return;
484 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
485 rtx cond = expand_normal (gimple_call_arg (stmt, 0));
486 machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
487 class expand_operand ops[2];
488 create_output_operand (&ops[0], target, mode);
489 create_input_operand (&ops[1], cond, mode);
490 gcc_assert (targetm.have_omp_simt_last_lane ());
491 expand_insn (targetm.code_for_omp_simt_last_lane, 2, ops);
492 if (!rtx_equal_p (target, ops[0].value))
493 emit_move_insn (target, ops[0].value);
496 /* Non-transparent predicate used in SIMT lowering of OpenMP "ordered". */
498 static void
499 expand_GOMP_SIMT_ORDERED_PRED (internal_fn, gcall *stmt)
501 tree lhs = gimple_call_lhs (stmt);
502 if (!lhs)
503 return;
505 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
506 rtx ctr = expand_normal (gimple_call_arg (stmt, 0));
507 machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
508 class expand_operand ops[2];
509 create_output_operand (&ops[0], target, mode);
510 create_input_operand (&ops[1], ctr, mode);
511 gcc_assert (targetm.have_omp_simt_ordered ());
512 expand_insn (targetm.code_for_omp_simt_ordered, 2, ops);
513 if (!rtx_equal_p (target, ops[0].value))
514 emit_move_insn (target, ops[0].value);
517 /* "Or" boolean reduction across SIMT lanes: return non-zero in all lanes if
518 any lane supplies a non-zero argument. */
520 static void
521 expand_GOMP_SIMT_VOTE_ANY (internal_fn, gcall *stmt)
523 tree lhs = gimple_call_lhs (stmt);
524 if (!lhs)
525 return;
527 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
528 rtx cond = expand_normal (gimple_call_arg (stmt, 0));
529 machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
530 class expand_operand ops[2];
531 create_output_operand (&ops[0], target, mode);
532 create_input_operand (&ops[1], cond, mode);
533 gcc_assert (targetm.have_omp_simt_vote_any ());
534 expand_insn (targetm.code_for_omp_simt_vote_any, 2, ops);
535 if (!rtx_equal_p (target, ops[0].value))
536 emit_move_insn (target, ops[0].value);
539 /* Exchange between SIMT lanes with a "butterfly" pattern: source lane index
540 is destination lane index XOR given offset. */
542 static void
543 expand_GOMP_SIMT_XCHG_BFLY (internal_fn, gcall *stmt)
545 tree lhs = gimple_call_lhs (stmt);
546 if (!lhs)
547 return;
549 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
550 rtx src = expand_normal (gimple_call_arg (stmt, 0));
551 rtx idx = expand_normal (gimple_call_arg (stmt, 1));
552 machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
553 class expand_operand ops[3];
554 create_output_operand (&ops[0], target, mode);
555 create_input_operand (&ops[1], src, mode);
556 create_input_operand (&ops[2], idx, SImode);
557 gcc_assert (targetm.have_omp_simt_xchg_bfly ());
558 expand_insn (targetm.code_for_omp_simt_xchg_bfly, 3, ops);
559 if (!rtx_equal_p (target, ops[0].value))
560 emit_move_insn (target, ops[0].value);
563 /* Exchange between SIMT lanes according to given source lane index. */
565 static void
566 expand_GOMP_SIMT_XCHG_IDX (internal_fn, gcall *stmt)
568 tree lhs = gimple_call_lhs (stmt);
569 if (!lhs)
570 return;
572 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
573 rtx src = expand_normal (gimple_call_arg (stmt, 0));
574 rtx idx = expand_normal (gimple_call_arg (stmt, 1));
575 machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
576 class expand_operand ops[3];
577 create_output_operand (&ops[0], target, mode);
578 create_input_operand (&ops[1], src, mode);
579 create_input_operand (&ops[2], idx, SImode);
580 gcc_assert (targetm.have_omp_simt_xchg_idx ());
581 expand_insn (targetm.code_for_omp_simt_xchg_idx, 3, ops);
582 if (!rtx_equal_p (target, ops[0].value))
583 emit_move_insn (target, ops[0].value);
586 /* This should get expanded in adjust_simduid_builtins. */
588 static void
589 expand_GOMP_SIMD_LANE (internal_fn, gcall *)
591 gcc_unreachable ();
594 /* This should get expanded in adjust_simduid_builtins. */
596 static void
597 expand_GOMP_SIMD_VF (internal_fn, gcall *)
599 gcc_unreachable ();
602 /* This should get expanded in adjust_simduid_builtins. */
604 static void
605 expand_GOMP_SIMD_LAST_LANE (internal_fn, gcall *)
607 gcc_unreachable ();
610 /* This should get expanded in adjust_simduid_builtins. */
612 static void
613 expand_GOMP_SIMD_ORDERED_START (internal_fn, gcall *)
615 gcc_unreachable ();
618 /* This should get expanded in adjust_simduid_builtins. */
620 static void
621 expand_GOMP_SIMD_ORDERED_END (internal_fn, gcall *)
623 gcc_unreachable ();
626 /* This should get expanded in the sanopt pass. */
628 static void
629 expand_UBSAN_NULL (internal_fn, gcall *)
631 gcc_unreachable ();
634 /* This should get expanded in the sanopt pass. */
636 static void
637 expand_UBSAN_BOUNDS (internal_fn, gcall *)
639 gcc_unreachable ();
642 /* This should get expanded in the sanopt pass. */
644 static void
645 expand_UBSAN_VPTR (internal_fn, gcall *)
647 gcc_unreachable ();
650 /* This should get expanded in the sanopt pass. */
652 static void
653 expand_UBSAN_PTR (internal_fn, gcall *)
655 gcc_unreachable ();
658 /* This should get expanded in the sanopt pass. */
660 static void
661 expand_UBSAN_OBJECT_SIZE (internal_fn, gcall *)
663 gcc_unreachable ();
666 /* This should get expanded in the sanopt pass. */
668 static void
669 expand_HWASAN_CHECK (internal_fn, gcall *)
671 gcc_unreachable ();
674 /* For hwasan stack tagging:
675 Clear tags on the dynamically allocated space.
676 For use after an object dynamically allocated on the stack goes out of
677 scope. */
678 static void
679 expand_HWASAN_ALLOCA_UNPOISON (internal_fn, gcall *gc)
681 gcc_assert (Pmode == ptr_mode);
682 tree restored_position = gimple_call_arg (gc, 0);
683 rtx restored_rtx = expand_expr (restored_position, NULL_RTX, VOIDmode,
684 EXPAND_NORMAL);
685 rtx func = init_one_libfunc ("__hwasan_tag_memory");
686 rtx off = expand_simple_binop (Pmode, MINUS, restored_rtx,
687 stack_pointer_rtx, NULL_RTX, 0,
688 OPTAB_WIDEN);
689 emit_library_call_value (func, NULL_RTX, LCT_NORMAL, VOIDmode,
690 virtual_stack_dynamic_rtx, Pmode,
691 HWASAN_STACK_BACKGROUND, QImode,
692 off, Pmode);
695 /* For hwasan stack tagging:
696 Return a tag to be used for a dynamic allocation. */
697 static void
698 expand_HWASAN_CHOOSE_TAG (internal_fn, gcall *gc)
700 tree tag = gimple_call_lhs (gc);
701 rtx target = expand_expr (tag, NULL_RTX, VOIDmode, EXPAND_NORMAL);
702 machine_mode mode = GET_MODE (target);
703 gcc_assert (mode == QImode);
705 rtx base_tag = targetm.memtag.extract_tag (hwasan_frame_base (), NULL_RTX);
706 gcc_assert (base_tag);
707 rtx tag_offset = gen_int_mode (hwasan_current_frame_tag (), QImode);
708 rtx chosen_tag = expand_simple_binop (QImode, PLUS, base_tag, tag_offset,
709 target, /* unsignedp = */1,
710 OPTAB_WIDEN);
711 chosen_tag = hwasan_truncate_to_tag_size (chosen_tag, target);
713 /* Really need to put the tag into the `target` RTX. */
714 if (chosen_tag != target)
716 rtx temp = chosen_tag;
717 gcc_assert (GET_MODE (chosen_tag) == mode);
718 emit_move_insn (target, temp);
721 hwasan_increment_frame_tag ();
724 /* For hwasan stack tagging:
725 Tag a region of space in the shadow stack according to the base pointer of
726 an object on the stack. N.b. the length provided in the internal call is
727 required to be aligned to HWASAN_TAG_GRANULE_SIZE. */
728 static void
729 expand_HWASAN_MARK (internal_fn, gcall *gc)
731 gcc_assert (ptr_mode == Pmode);
732 HOST_WIDE_INT flag = tree_to_shwi (gimple_call_arg (gc, 0));
733 bool is_poison = ((asan_mark_flags)flag) == ASAN_MARK_POISON;
735 tree base = gimple_call_arg (gc, 1);
736 gcc_checking_assert (TREE_CODE (base) == ADDR_EXPR);
737 rtx base_rtx = expand_normal (base);
739 rtx tag = is_poison ? HWASAN_STACK_BACKGROUND
740 : targetm.memtag.extract_tag (base_rtx, NULL_RTX);
741 rtx address = targetm.memtag.untagged_pointer (base_rtx, NULL_RTX);
743 tree len = gimple_call_arg (gc, 2);
744 rtx r_len = expand_normal (len);
746 rtx func = init_one_libfunc ("__hwasan_tag_memory");
747 emit_library_call (func, LCT_NORMAL, VOIDmode, address, Pmode,
748 tag, QImode, r_len, Pmode);
751 /* For hwasan stack tagging:
752 Store a tag into a pointer. */
753 static void
754 expand_HWASAN_SET_TAG (internal_fn, gcall *gc)
756 gcc_assert (ptr_mode == Pmode);
757 tree g_target = gimple_call_lhs (gc);
758 tree g_ptr = gimple_call_arg (gc, 0);
759 tree g_tag = gimple_call_arg (gc, 1);
761 rtx ptr = expand_normal (g_ptr);
762 rtx tag = expand_expr (g_tag, NULL_RTX, QImode, EXPAND_NORMAL);
763 rtx target = expand_normal (g_target);
765 rtx untagged = targetm.memtag.untagged_pointer (ptr, target);
766 rtx tagged_value = targetm.memtag.set_tag (untagged, tag, target);
767 if (tagged_value != target)
768 emit_move_insn (target, tagged_value);
771 /* This should get expanded in the sanopt pass. */
773 static void
774 expand_ASAN_CHECK (internal_fn, gcall *)
776 gcc_unreachable ();
779 /* This should get expanded in the sanopt pass. */
781 static void
782 expand_ASAN_MARK (internal_fn, gcall *)
784 gcc_unreachable ();
787 /* This should get expanded in the sanopt pass. */
789 static void
790 expand_ASAN_POISON (internal_fn, gcall *)
792 gcc_unreachable ();
795 /* This should get expanded in the sanopt pass. */
797 static void
798 expand_ASAN_POISON_USE (internal_fn, gcall *)
800 gcc_unreachable ();
803 /* This should get expanded in the tsan pass. */
805 static void
806 expand_TSAN_FUNC_EXIT (internal_fn, gcall *)
808 gcc_unreachable ();
811 /* This should get expanded in the lower pass. */
813 static void
814 expand_FALLTHROUGH (internal_fn, gcall *call)
816 error_at (gimple_location (call),
817 "invalid use of attribute %<fallthrough%>");
820 /* Return minimum precision needed to represent all values
821 of ARG in SIGNed integral type. */
823 static int
824 get_min_precision (tree arg, signop sign)
826 int prec = TYPE_PRECISION (TREE_TYPE (arg));
827 int cnt = 0;
828 signop orig_sign = sign;
829 if (TREE_CODE (arg) == INTEGER_CST)
831 int p;
832 if (TYPE_SIGN (TREE_TYPE (arg)) != sign)
834 widest_int w = wi::to_widest (arg);
835 w = wi::ext (w, prec, sign);
836 p = wi::min_precision (w, sign);
838 else
839 p = wi::min_precision (wi::to_wide (arg), sign);
840 return MIN (p, prec);
842 while (CONVERT_EXPR_P (arg)
843 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
844 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg, 0))) <= prec)
846 arg = TREE_OPERAND (arg, 0);
847 if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
849 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
850 sign = UNSIGNED;
851 else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
852 return prec + (orig_sign != sign);
853 prec = TYPE_PRECISION (TREE_TYPE (arg));
855 if (++cnt > 30)
856 return prec + (orig_sign != sign);
858 if (CONVERT_EXPR_P (arg)
859 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
860 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg, 0))) > prec)
862 /* We have e.g. (unsigned short) y_2 where int y_2 = (int) x_1(D);
863 If y_2's min precision is smaller than prec, return that. */
864 int oprec = get_min_precision (TREE_OPERAND (arg, 0), sign);
865 if (oprec < prec)
866 return oprec + (orig_sign != sign);
868 if (TREE_CODE (arg) != SSA_NAME)
869 return prec + (orig_sign != sign);
870 value_range r;
871 while (!get_global_range_query ()->range_of_expr (r, arg)
872 || r.varying_p ()
873 || r.undefined_p ())
875 gimple *g = SSA_NAME_DEF_STMT (arg);
876 if (is_gimple_assign (g)
877 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g)))
879 tree t = gimple_assign_rhs1 (g);
880 if (INTEGRAL_TYPE_P (TREE_TYPE (t))
881 && TYPE_PRECISION (TREE_TYPE (t)) <= prec)
883 arg = t;
884 if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
886 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
887 sign = UNSIGNED;
888 else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
889 return prec + (orig_sign != sign);
890 prec = TYPE_PRECISION (TREE_TYPE (arg));
892 if (++cnt > 30)
893 return prec + (orig_sign != sign);
894 continue;
897 return prec + (orig_sign != sign);
899 if (sign == TYPE_SIGN (TREE_TYPE (arg)))
901 int p1 = wi::min_precision (r.lower_bound (), sign);
902 int p2 = wi::min_precision (r.upper_bound (), sign);
903 p1 = MAX (p1, p2);
904 prec = MIN (prec, p1);
906 else if (sign == UNSIGNED && !wi::neg_p (r.lower_bound (), SIGNED))
908 int p = wi::min_precision (r.upper_bound (), UNSIGNED);
909 prec = MIN (prec, p);
911 return prec + (orig_sign != sign);
914 /* Helper for expand_*_overflow. Set the __imag__ part to true
915 (1 except for signed:1 type, in which case store -1). */
917 static void
918 expand_arith_set_overflow (tree lhs, rtx target)
920 if (TYPE_PRECISION (TREE_TYPE (TREE_TYPE (lhs))) == 1
921 && !TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs))))
922 write_complex_part (target, constm1_rtx, true, false);
923 else
924 write_complex_part (target, const1_rtx, true, false);
927 /* Helper for expand_*_overflow. Store RES into the __real__ part
928 of TARGET. If RES has larger MODE than __real__ part of TARGET,
929 set the __imag__ part to 1 if RES doesn't fit into it. Similarly
930 if LHS has smaller precision than its mode. */
932 static void
933 expand_arith_overflow_result_store (tree lhs, rtx target,
934 scalar_int_mode mode, rtx res)
936 scalar_int_mode tgtmode
937 = as_a <scalar_int_mode> (GET_MODE_INNER (GET_MODE (target)));
938 rtx lres = res;
939 if (tgtmode != mode)
941 rtx_code_label *done_label = gen_label_rtx ();
942 int uns = TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs)));
943 lres = convert_modes (tgtmode, mode, res, uns);
944 gcc_assert (GET_MODE_PRECISION (tgtmode) < GET_MODE_PRECISION (mode));
945 do_compare_rtx_and_jump (res, convert_modes (mode, tgtmode, lres, uns),
946 EQ, true, mode, NULL_RTX, NULL, done_label,
947 profile_probability::very_likely ());
948 expand_arith_set_overflow (lhs, target);
949 emit_label (done_label);
951 int prec = TYPE_PRECISION (TREE_TYPE (TREE_TYPE (lhs)));
952 int tgtprec = GET_MODE_PRECISION (tgtmode);
953 if (prec < tgtprec)
955 rtx_code_label *done_label = gen_label_rtx ();
956 int uns = TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs)));
957 res = lres;
958 if (uns)
960 rtx mask
961 = immed_wide_int_const (wi::shifted_mask (0, prec, false, tgtprec),
962 tgtmode);
963 lres = expand_simple_binop (tgtmode, AND, res, mask, NULL_RTX,
964 true, OPTAB_LIB_WIDEN);
966 else
968 lres = expand_shift (LSHIFT_EXPR, tgtmode, res, tgtprec - prec,
969 NULL_RTX, 1);
970 lres = expand_shift (RSHIFT_EXPR, tgtmode, lres, tgtprec - prec,
971 NULL_RTX, 0);
973 do_compare_rtx_and_jump (res, lres,
974 EQ, true, tgtmode, NULL_RTX, NULL, done_label,
975 profile_probability::very_likely ());
976 expand_arith_set_overflow (lhs, target);
977 emit_label (done_label);
979 write_complex_part (target, lres, false, false);
982 /* Helper for expand_*_overflow. Store RES into TARGET. */
984 static void
985 expand_ubsan_result_store (rtx target, rtx res)
987 if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target))
988 /* If this is a scalar in a register that is stored in a wider mode
989 than the declared mode, compute the result into its declared mode
990 and then convert to the wider mode. Our value is the computed
991 expression. */
992 convert_move (SUBREG_REG (target), res, SUBREG_PROMOTED_SIGN (target));
993 else
994 emit_move_insn (target, res);
997 /* Add sub/add overflow checking to the statement STMT.
998 CODE says whether the operation is +, or -. */
1000 void
1001 expand_addsub_overflow (location_t loc, tree_code code, tree lhs,
1002 tree arg0, tree arg1, bool unsr_p, bool uns0_p,
1003 bool uns1_p, bool is_ubsan, tree *datap)
1005 rtx res, target = NULL_RTX;
1006 tree fn;
1007 rtx_code_label *done_label = gen_label_rtx ();
1008 rtx_code_label *do_error = gen_label_rtx ();
1009 do_pending_stack_adjust ();
1010 rtx op0 = expand_normal (arg0);
1011 rtx op1 = expand_normal (arg1);
1012 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (arg0));
1013 int prec = GET_MODE_PRECISION (mode);
1014 rtx sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
1015 bool do_xor = false;
1017 if (is_ubsan)
1018 gcc_assert (!unsr_p && !uns0_p && !uns1_p);
1020 if (lhs)
1022 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1023 if (!is_ubsan)
1024 write_complex_part (target, const0_rtx, true, false);
1027 /* We assume both operands and result have the same precision
1028 here (GET_MODE_BITSIZE (mode)), S stands for signed type
1029 with that precision, U for unsigned type with that precision,
1030 sgn for unsigned most significant bit in that precision.
1031 s1 is signed first operand, u1 is unsigned first operand,
1032 s2 is signed second operand, u2 is unsigned second operand,
1033 sr is signed result, ur is unsigned result and the following
1034 rules say how to compute result (which is always result of
1035 the operands as if both were unsigned, cast to the right
1036 signedness) and how to compute whether operation overflowed.
1038 s1 + s2 -> sr
1039 res = (S) ((U) s1 + (U) s2)
1040 ovf = s2 < 0 ? res > s1 : res < s1 (or jump on overflow)
1041 s1 - s2 -> sr
1042 res = (S) ((U) s1 - (U) s2)
1043 ovf = s2 < 0 ? res < s1 : res > s2 (or jump on overflow)
1044 u1 + u2 -> ur
1045 res = u1 + u2
1046 ovf = res < u1 (or jump on carry, but RTL opts will handle it)
1047 u1 - u2 -> ur
1048 res = u1 - u2
1049 ovf = res > u1 (or jump on carry, but RTL opts will handle it)
1050 s1 + u2 -> sr
1051 res = (S) ((U) s1 + u2)
1052 ovf = ((U) res ^ sgn) < u2
1053 s1 + u2 -> ur
1054 t1 = (S) (u2 ^ sgn)
1055 t2 = s1 + t1
1056 res = (U) t2 ^ sgn
1057 ovf = t1 < 0 ? t2 > s1 : t2 < s1 (or jump on overflow)
1058 s1 - u2 -> sr
1059 res = (S) ((U) s1 - u2)
1060 ovf = u2 > ((U) s1 ^ sgn)
1061 s1 - u2 -> ur
1062 res = (U) s1 - u2
1063 ovf = s1 < 0 || u2 > (U) s1
1064 u1 - s2 -> sr
1065 res = u1 - (U) s2
1066 ovf = u1 >= ((U) s2 ^ sgn)
1067 u1 - s2 -> ur
1068 t1 = u1 ^ sgn
1069 t2 = t1 - (U) s2
1070 res = t2 ^ sgn
1071 ovf = s2 < 0 ? (S) t2 < (S) t1 : (S) t2 > (S) t1 (or jump on overflow)
1072 s1 + s2 -> ur
1073 res = (U) s1 + (U) s2
1074 ovf = s2 < 0 ? (s1 | (S) res) < 0) : (s1 & (S) res) < 0)
1075 u1 + u2 -> sr
1076 res = (S) (u1 + u2)
1077 ovf = (U) res < u2 || res < 0
1078 u1 - u2 -> sr
1079 res = (S) (u1 - u2)
1080 ovf = u1 >= u2 ? res < 0 : res >= 0
1081 s1 - s2 -> ur
1082 res = (U) s1 - (U) s2
1083 ovf = s2 >= 0 ? ((s1 | (S) res) < 0) : ((s1 & (S) res) < 0) */
1085 if (code == PLUS_EXPR && uns0_p && !uns1_p)
1087 /* PLUS_EXPR is commutative, if operand signedness differs,
1088 canonicalize to the first operand being signed and second
1089 unsigned to simplify following code. */
1090 std::swap (op0, op1);
1091 std::swap (arg0, arg1);
1092 uns0_p = false;
1093 uns1_p = true;
1096 /* u1 +- u2 -> ur */
1097 if (uns0_p && uns1_p && unsr_p)
1099 insn_code icode = optab_handler (code == PLUS_EXPR ? uaddv4_optab
1100 : usubv4_optab, mode);
1101 if (icode != CODE_FOR_nothing)
1103 class expand_operand ops[4];
1104 rtx_insn *last = get_last_insn ();
1106 res = gen_reg_rtx (mode);
1107 create_output_operand (&ops[0], res, mode);
1108 create_input_operand (&ops[1], op0, mode);
1109 create_input_operand (&ops[2], op1, mode);
1110 create_fixed_operand (&ops[3], do_error);
1111 if (maybe_expand_insn (icode, 4, ops))
1113 last = get_last_insn ();
1114 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
1115 && JUMP_P (last)
1116 && any_condjump_p (last)
1117 && !find_reg_note (last, REG_BR_PROB, 0))
1118 add_reg_br_prob_note (last,
1119 profile_probability::very_unlikely ());
1120 emit_jump (done_label);
1121 goto do_error_label;
1124 delete_insns_since (last);
1127 /* Compute the operation. On RTL level, the addition is always
1128 unsigned. */
1129 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
1130 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
1131 rtx tem = op0;
1132 /* For PLUS_EXPR, the operation is commutative, so we can pick
1133 operand to compare against. For prec <= BITS_PER_WORD, I think
1134 preferring REG operand is better over CONST_INT, because
1135 the CONST_INT might enlarge the instruction or CSE would need
1136 to figure out we'd already loaded it into a register before.
1137 For prec > BITS_PER_WORD, I think CONST_INT might be more beneficial,
1138 as then the multi-word comparison can be perhaps simplified. */
1139 if (code == PLUS_EXPR
1140 && (prec <= BITS_PER_WORD
1141 ? (CONST_SCALAR_INT_P (op0) && REG_P (op1))
1142 : CONST_SCALAR_INT_P (op1)))
1143 tem = op1;
1144 do_compare_rtx_and_jump (res, tem, code == PLUS_EXPR ? GEU : LEU,
1145 true, mode, NULL_RTX, NULL, done_label,
1146 profile_probability::very_likely ());
1147 goto do_error_label;
1150 /* s1 +- u2 -> sr */
1151 if (!uns0_p && uns1_p && !unsr_p)
1153 /* Compute the operation. On RTL level, the addition is always
1154 unsigned. */
1155 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
1156 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
1157 rtx tem = expand_binop (mode, add_optab,
1158 code == PLUS_EXPR ? res : op0, sgn,
1159 NULL_RTX, false, OPTAB_LIB_WIDEN);
1160 do_compare_rtx_and_jump (tem, op1, GEU, true, mode, NULL_RTX, NULL,
1161 done_label, profile_probability::very_likely ());
1162 goto do_error_label;
1165 /* s1 + u2 -> ur */
1166 if (code == PLUS_EXPR && !uns0_p && uns1_p && unsr_p)
1168 op1 = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
1169 OPTAB_LIB_WIDEN);
1170 /* As we've changed op1, we have to avoid using the value range
1171 for the original argument. */
1172 arg1 = error_mark_node;
1173 do_xor = true;
1174 goto do_signed;
1177 /* u1 - s2 -> ur */
1178 if (code == MINUS_EXPR && uns0_p && !uns1_p && unsr_p)
1180 op0 = expand_binop (mode, add_optab, op0, sgn, NULL_RTX, false,
1181 OPTAB_LIB_WIDEN);
1182 /* As we've changed op0, we have to avoid using the value range
1183 for the original argument. */
1184 arg0 = error_mark_node;
1185 do_xor = true;
1186 goto do_signed;
1189 /* s1 - u2 -> ur */
1190 if (code == MINUS_EXPR && !uns0_p && uns1_p && unsr_p)
1192 /* Compute the operation. On RTL level, the addition is always
1193 unsigned. */
1194 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
1195 OPTAB_LIB_WIDEN);
1196 int pos_neg = get_range_pos_neg (arg0);
1197 if (pos_neg == 2)
1198 /* If ARG0 is known to be always negative, this is always overflow. */
1199 emit_jump (do_error);
1200 else if (pos_neg == 3)
1201 /* If ARG0 is not known to be always positive, check at runtime. */
1202 do_compare_rtx_and_jump (op0, const0_rtx, LT, false, mode, NULL_RTX,
1203 NULL, do_error, profile_probability::very_unlikely ());
1204 do_compare_rtx_and_jump (op1, op0, LEU, true, mode, NULL_RTX, NULL,
1205 done_label, profile_probability::very_likely ());
1206 goto do_error_label;
1209 /* u1 - s2 -> sr */
1210 if (code == MINUS_EXPR && uns0_p && !uns1_p && !unsr_p)
1212 /* Compute the operation. On RTL level, the addition is always
1213 unsigned. */
1214 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
1215 OPTAB_LIB_WIDEN);
1216 rtx tem = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
1217 OPTAB_LIB_WIDEN);
1218 do_compare_rtx_and_jump (op0, tem, LTU, true, mode, NULL_RTX, NULL,
1219 done_label, profile_probability::very_likely ());
1220 goto do_error_label;
1223 /* u1 + u2 -> sr */
1224 if (code == PLUS_EXPR && uns0_p && uns1_p && !unsr_p)
1226 /* Compute the operation. On RTL level, the addition is always
1227 unsigned. */
1228 res = expand_binop (mode, add_optab, op0, op1, NULL_RTX, false,
1229 OPTAB_LIB_WIDEN);
1230 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode, NULL_RTX,
1231 NULL, do_error, profile_probability::very_unlikely ());
1232 rtx tem = op1;
1233 /* The operation is commutative, so we can pick operand to compare
1234 against. For prec <= BITS_PER_WORD, I think preferring REG operand
1235 is better over CONST_INT, because the CONST_INT might enlarge the
1236 instruction or CSE would need to figure out we'd already loaded it
1237 into a register before. For prec > BITS_PER_WORD, I think CONST_INT
1238 might be more beneficial, as then the multi-word comparison can be
1239 perhaps simplified. */
1240 if (prec <= BITS_PER_WORD
1241 ? (CONST_SCALAR_INT_P (op1) && REG_P (op0))
1242 : CONST_SCALAR_INT_P (op0))
1243 tem = op0;
1244 do_compare_rtx_and_jump (res, tem, GEU, true, mode, NULL_RTX, NULL,
1245 done_label, profile_probability::very_likely ());
1246 goto do_error_label;
1249 /* s1 +- s2 -> ur */
1250 if (!uns0_p && !uns1_p && unsr_p)
1252 /* Compute the operation. On RTL level, the addition is always
1253 unsigned. */
1254 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
1255 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
1256 int pos_neg = get_range_pos_neg (arg1);
1257 if (code == PLUS_EXPR)
1259 int pos_neg0 = get_range_pos_neg (arg0);
1260 if (pos_neg0 != 3 && pos_neg == 3)
1262 std::swap (op0, op1);
1263 pos_neg = pos_neg0;
1266 rtx tem;
1267 if (pos_neg != 3)
1269 tem = expand_binop (mode, ((pos_neg == 1) ^ (code == MINUS_EXPR))
1270 ? and_optab : ior_optab,
1271 op0, res, NULL_RTX, false, OPTAB_LIB_WIDEN);
1272 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL,
1273 NULL, done_label, profile_probability::very_likely ());
1275 else
1277 rtx_code_label *do_ior_label = gen_label_rtx ();
1278 do_compare_rtx_and_jump (op1, const0_rtx,
1279 code == MINUS_EXPR ? GE : LT, false, mode,
1280 NULL_RTX, NULL, do_ior_label,
1281 profile_probability::even ());
1282 tem = expand_binop (mode, and_optab, op0, res, NULL_RTX, false,
1283 OPTAB_LIB_WIDEN);
1284 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
1285 NULL, done_label, profile_probability::very_likely ());
1286 emit_jump (do_error);
1287 emit_label (do_ior_label);
1288 tem = expand_binop (mode, ior_optab, op0, res, NULL_RTX, false,
1289 OPTAB_LIB_WIDEN);
1290 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
1291 NULL, done_label, profile_probability::very_likely ());
1293 goto do_error_label;
1296 /* u1 - u2 -> sr */
1297 if (code == MINUS_EXPR && uns0_p && uns1_p && !unsr_p)
1299 /* Compute the operation. On RTL level, the addition is always
1300 unsigned. */
1301 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
1302 OPTAB_LIB_WIDEN);
1303 rtx_code_label *op0_geu_op1 = gen_label_rtx ();
1304 do_compare_rtx_and_jump (op0, op1, GEU, true, mode, NULL_RTX, NULL,
1305 op0_geu_op1, profile_probability::even ());
1306 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode, NULL_RTX,
1307 NULL, done_label, profile_probability::very_likely ());
1308 emit_jump (do_error);
1309 emit_label (op0_geu_op1);
1310 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode, NULL_RTX,
1311 NULL, done_label, profile_probability::very_likely ());
1312 goto do_error_label;
1315 gcc_assert (!uns0_p && !uns1_p && !unsr_p);
1317 /* s1 +- s2 -> sr */
1318 do_signed:
1320 insn_code icode = optab_handler (code == PLUS_EXPR ? addv4_optab
1321 : subv4_optab, mode);
1322 if (icode != CODE_FOR_nothing)
1324 class expand_operand ops[4];
1325 rtx_insn *last = get_last_insn ();
1327 res = gen_reg_rtx (mode);
1328 create_output_operand (&ops[0], res, mode);
1329 create_input_operand (&ops[1], op0, mode);
1330 create_input_operand (&ops[2], op1, mode);
1331 create_fixed_operand (&ops[3], do_error);
1332 if (maybe_expand_insn (icode, 4, ops))
1334 last = get_last_insn ();
1335 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
1336 && JUMP_P (last)
1337 && any_condjump_p (last)
1338 && !find_reg_note (last, REG_BR_PROB, 0))
1339 add_reg_br_prob_note (last,
1340 profile_probability::very_unlikely ());
1341 emit_jump (done_label);
1342 goto do_error_label;
1345 delete_insns_since (last);
1348 /* Compute the operation. On RTL level, the addition is always
1349 unsigned. */
1350 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
1351 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
1353 /* If we can prove that one of the arguments (for MINUS_EXPR only
1354 the second operand, as subtraction is not commutative) is always
1355 non-negative or always negative, we can do just one comparison
1356 and conditional jump. */
1357 int pos_neg = get_range_pos_neg (arg1);
1358 if (code == PLUS_EXPR)
1360 int pos_neg0 = get_range_pos_neg (arg0);
1361 if (pos_neg0 != 3 && pos_neg == 3)
1363 std::swap (op0, op1);
1364 pos_neg = pos_neg0;
1368 /* Addition overflows if and only if the two operands have the same sign,
1369 and the result has the opposite sign. Subtraction overflows if and
1370 only if the two operands have opposite sign, and the subtrahend has
1371 the same sign as the result. Here 0 is counted as positive. */
1372 if (pos_neg == 3)
1374 /* Compute op0 ^ op1 (operands have opposite sign). */
1375 rtx op_xor = expand_binop (mode, xor_optab, op0, op1, NULL_RTX, false,
1376 OPTAB_LIB_WIDEN);
1378 /* Compute res ^ op1 (result and 2nd operand have opposite sign). */
1379 rtx res_xor = expand_binop (mode, xor_optab, res, op1, NULL_RTX, false,
1380 OPTAB_LIB_WIDEN);
1382 rtx tem;
1383 if (code == PLUS_EXPR)
1385 /* Compute (res ^ op1) & ~(op0 ^ op1). */
1386 tem = expand_unop (mode, one_cmpl_optab, op_xor, NULL_RTX, false);
1387 tem = expand_binop (mode, and_optab, res_xor, tem, NULL_RTX, false,
1388 OPTAB_LIB_WIDEN);
1390 else
1392 /* Compute (op0 ^ op1) & ~(res ^ op1). */
1393 tem = expand_unop (mode, one_cmpl_optab, res_xor, NULL_RTX, false);
1394 tem = expand_binop (mode, and_optab, op_xor, tem, NULL_RTX, false,
1395 OPTAB_LIB_WIDEN);
1398 /* No overflow if the result has bit sign cleared. */
1399 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
1400 NULL, done_label, profile_probability::very_likely ());
1403 /* Compare the result of the operation with the first operand.
1404 No overflow for addition if second operand is positive and result
1405 is larger or second operand is negative and result is smaller.
1406 Likewise for subtraction with sign of second operand flipped. */
1407 else
1408 do_compare_rtx_and_jump (res, op0,
1409 (pos_neg == 1) ^ (code == MINUS_EXPR) ? GE : LE,
1410 false, mode, NULL_RTX, NULL, done_label,
1411 profile_probability::very_likely ());
1414 do_error_label:
1415 emit_label (do_error);
1416 if (is_ubsan)
1418 /* Expand the ubsan builtin call. */
1419 push_temp_slots ();
1420 fn = ubsan_build_overflow_builtin (code, loc, TREE_TYPE (arg0),
1421 arg0, arg1, datap);
1422 expand_normal (fn);
1423 pop_temp_slots ();
1424 do_pending_stack_adjust ();
1426 else if (lhs)
1427 expand_arith_set_overflow (lhs, target);
1429 /* We're done. */
1430 emit_label (done_label);
1432 if (lhs)
1434 if (is_ubsan)
1435 expand_ubsan_result_store (target, res);
1436 else
1438 if (do_xor)
1439 res = expand_binop (mode, add_optab, res, sgn, NULL_RTX, false,
1440 OPTAB_LIB_WIDEN);
1442 expand_arith_overflow_result_store (lhs, target, mode, res);
1447 /* Add negate overflow checking to the statement STMT. */
1449 static void
1450 expand_neg_overflow (location_t loc, tree lhs, tree arg1, bool is_ubsan,
1451 tree *datap)
1453 rtx res, op1;
1454 tree fn;
1455 rtx_code_label *done_label, *do_error;
1456 rtx target = NULL_RTX;
1458 done_label = gen_label_rtx ();
1459 do_error = gen_label_rtx ();
1461 do_pending_stack_adjust ();
1462 op1 = expand_normal (arg1);
1464 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (arg1));
1465 if (lhs)
1467 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1468 if (!is_ubsan)
1469 write_complex_part (target, const0_rtx, true, false);
1472 enum insn_code icode = optab_handler (negv3_optab, mode);
1473 if (icode != CODE_FOR_nothing)
1475 class expand_operand ops[3];
1476 rtx_insn *last = get_last_insn ();
1478 res = gen_reg_rtx (mode);
1479 create_output_operand (&ops[0], res, mode);
1480 create_input_operand (&ops[1], op1, mode);
1481 create_fixed_operand (&ops[2], do_error);
1482 if (maybe_expand_insn (icode, 3, ops))
1484 last = get_last_insn ();
1485 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
1486 && JUMP_P (last)
1487 && any_condjump_p (last)
1488 && !find_reg_note (last, REG_BR_PROB, 0))
1489 add_reg_br_prob_note (last,
1490 profile_probability::very_unlikely ());
1491 emit_jump (done_label);
1493 else
1495 delete_insns_since (last);
1496 icode = CODE_FOR_nothing;
1500 if (icode == CODE_FOR_nothing)
1502 /* Compute the operation. On RTL level, the addition is always
1503 unsigned. */
1504 res = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
1506 /* Compare the operand with the most negative value. */
1507 rtx minv = expand_normal (TYPE_MIN_VALUE (TREE_TYPE (arg1)));
1508 do_compare_rtx_and_jump (op1, minv, NE, true, mode, NULL_RTX, NULL,
1509 done_label, profile_probability::very_likely ());
1512 emit_label (do_error);
1513 if (is_ubsan)
1515 /* Expand the ubsan builtin call. */
1516 push_temp_slots ();
1517 fn = ubsan_build_overflow_builtin (NEGATE_EXPR, loc, TREE_TYPE (arg1),
1518 arg1, NULL_TREE, datap);
1519 expand_normal (fn);
1520 pop_temp_slots ();
1521 do_pending_stack_adjust ();
1523 else if (lhs)
1524 expand_arith_set_overflow (lhs, target);
1526 /* We're done. */
1527 emit_label (done_label);
1529 if (lhs)
1531 if (is_ubsan)
1532 expand_ubsan_result_store (target, res);
1533 else
1534 expand_arith_overflow_result_store (lhs, target, mode, res);
1538 /* Return true if UNS WIDEN_MULT_EXPR with result mode WMODE and operand
1539 mode MODE can be expanded without using a libcall. */
1541 static bool
1542 can_widen_mult_without_libcall (scalar_int_mode wmode, scalar_int_mode mode,
1543 rtx op0, rtx op1, bool uns)
1545 if (find_widening_optab_handler (umul_widen_optab, wmode, mode)
1546 != CODE_FOR_nothing)
1547 return true;
1549 if (find_widening_optab_handler (smul_widen_optab, wmode, mode)
1550 != CODE_FOR_nothing)
1551 return true;
1553 rtx_insn *last = get_last_insn ();
1554 if (CONSTANT_P (op0))
1555 op0 = convert_modes (wmode, mode, op0, uns);
1556 else
1557 op0 = gen_raw_REG (wmode, LAST_VIRTUAL_REGISTER + 1);
1558 if (CONSTANT_P (op1))
1559 op1 = convert_modes (wmode, mode, op1, uns);
1560 else
1561 op1 = gen_raw_REG (wmode, LAST_VIRTUAL_REGISTER + 2);
1562 rtx ret = expand_mult (wmode, op0, op1, NULL_RTX, uns, true);
1563 delete_insns_since (last);
1564 return ret != NULL_RTX;
1567 /* Add mul overflow checking to the statement STMT. */
1569 static void
1570 expand_mul_overflow (location_t loc, tree lhs, tree arg0, tree arg1,
1571 bool unsr_p, bool uns0_p, bool uns1_p, bool is_ubsan,
1572 tree *datap)
1574 rtx res, op0, op1;
1575 tree fn, type;
1576 rtx_code_label *done_label, *do_error;
1577 rtx target = NULL_RTX;
1578 signop sign;
1579 enum insn_code icode;
1581 done_label = gen_label_rtx ();
1582 do_error = gen_label_rtx ();
1584 do_pending_stack_adjust ();
1585 op0 = expand_normal (arg0);
1586 op1 = expand_normal (arg1);
1588 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (arg0));
1589 bool uns = unsr_p;
1590 if (lhs)
1592 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1593 if (!is_ubsan)
1594 write_complex_part (target, const0_rtx, true, false);
1597 if (is_ubsan)
1598 gcc_assert (!unsr_p && !uns0_p && !uns1_p);
1600 /* We assume both operands and result have the same precision
1601 here (GET_MODE_BITSIZE (mode)), S stands for signed type
1602 with that precision, U for unsigned type with that precision,
1603 sgn for unsigned most significant bit in that precision.
1604 s1 is signed first operand, u1 is unsigned first operand,
1605 s2 is signed second operand, u2 is unsigned second operand,
1606 sr is signed result, ur is unsigned result and the following
1607 rules say how to compute result (which is always result of
1608 the operands as if both were unsigned, cast to the right
1609 signedness) and how to compute whether operation overflowed.
1610 main_ovf (false) stands for jump on signed multiplication
1611 overflow or the main algorithm with uns == false.
1612 main_ovf (true) stands for jump on unsigned multiplication
1613 overflow or the main algorithm with uns == true.
1615 s1 * s2 -> sr
1616 res = (S) ((U) s1 * (U) s2)
1617 ovf = main_ovf (false)
1618 u1 * u2 -> ur
1619 res = u1 * u2
1620 ovf = main_ovf (true)
1621 s1 * u2 -> ur
1622 res = (U) s1 * u2
1623 ovf = (s1 < 0 && u2) || main_ovf (true)
1624 u1 * u2 -> sr
1625 res = (S) (u1 * u2)
1626 ovf = res < 0 || main_ovf (true)
1627 s1 * u2 -> sr
1628 res = (S) ((U) s1 * u2)
1629 ovf = (S) u2 >= 0 ? main_ovf (false)
1630 : (s1 != 0 && (s1 != -1 || u2 != (U) res))
1631 s1 * s2 -> ur
1632 t1 = (s1 & s2) < 0 ? (-(U) s1) : ((U) s1)
1633 t2 = (s1 & s2) < 0 ? (-(U) s2) : ((U) s2)
1634 res = t1 * t2
1635 ovf = (s1 ^ s2) < 0 ? (s1 && s2) : main_ovf (true) */
1637 if (uns0_p && !uns1_p)
1639 /* Multiplication is commutative, if operand signedness differs,
1640 canonicalize to the first operand being signed and second
1641 unsigned to simplify following code. */
1642 std::swap (op0, op1);
1643 std::swap (arg0, arg1);
1644 uns0_p = false;
1645 uns1_p = true;
1648 int pos_neg0 = get_range_pos_neg (arg0);
1649 int pos_neg1 = get_range_pos_neg (arg1);
1651 /* s1 * u2 -> ur */
1652 if (!uns0_p && uns1_p && unsr_p)
1654 switch (pos_neg0)
1656 case 1:
1657 /* If s1 is non-negative, just perform normal u1 * u2 -> ur. */
1658 goto do_main;
1659 case 2:
1660 /* If s1 is negative, avoid the main code, just multiply and
1661 signal overflow if op1 is not 0. */
1662 struct separate_ops ops;
1663 ops.code = MULT_EXPR;
1664 ops.type = TREE_TYPE (arg1);
1665 ops.op0 = make_tree (ops.type, op0);
1666 ops.op1 = make_tree (ops.type, op1);
1667 ops.op2 = NULL_TREE;
1668 ops.location = loc;
1669 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1670 do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1671 NULL, done_label, profile_probability::very_likely ());
1672 goto do_error_label;
1673 case 3:
1674 if (get_min_precision (arg1, UNSIGNED)
1675 + get_min_precision (arg0, SIGNED) <= GET_MODE_PRECISION (mode))
1677 /* If the first operand is sign extended from narrower type, the
1678 second operand is zero extended from narrower type and
1679 the sum of the two precisions is smaller or equal to the
1680 result precision: if the first argument is at runtime
1681 non-negative, maximum result will be 0x7e81 or 0x7f..fe80..01
1682 and there will be no overflow, if the first argument is
1683 negative and the second argument zero, the result will be
1684 0 and there will be no overflow, if the first argument is
1685 negative and the second argument positive, the result when
1686 treated as signed will be negative (minimum -0x7f80 or
1687 -0x7f..f80..0) there will be always overflow. So, do
1688 res = (U) (s1 * u2)
1689 ovf = (S) res < 0 */
1690 struct separate_ops ops;
1691 ops.code = MULT_EXPR;
1692 ops.type
1693 = build_nonstandard_integer_type (GET_MODE_PRECISION (mode),
1695 ops.op0 = make_tree (ops.type, op0);
1696 ops.op1 = make_tree (ops.type, op1);
1697 ops.op2 = NULL_TREE;
1698 ops.location = loc;
1699 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1700 do_compare_rtx_and_jump (res, const0_rtx, GE, false,
1701 mode, NULL_RTX, NULL, done_label,
1702 profile_probability::very_likely ());
1703 goto do_error_label;
1705 rtx_code_label *do_main_label;
1706 do_main_label = gen_label_rtx ();
1707 do_compare_rtx_and_jump (op0, const0_rtx, GE, false, mode, NULL_RTX,
1708 NULL, do_main_label, profile_probability::very_likely ());
1709 do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1710 NULL, do_main_label, profile_probability::very_likely ());
1711 expand_arith_set_overflow (lhs, target);
1712 emit_label (do_main_label);
1713 goto do_main;
1714 default:
1715 gcc_unreachable ();
1719 /* u1 * u2 -> sr */
1720 if (uns0_p && uns1_p && !unsr_p)
1722 if ((pos_neg0 | pos_neg1) == 1)
1724 /* If both arguments are zero extended from narrower types,
1725 the MSB will be clear on both and so we can pretend it is
1726 a normal s1 * s2 -> sr multiplication. */
1727 uns0_p = false;
1728 uns1_p = false;
1730 else
1731 uns = true;
1732 /* Rest of handling of this case after res is computed. */
1733 goto do_main;
1736 /* s1 * u2 -> sr */
1737 if (!uns0_p && uns1_p && !unsr_p)
1739 switch (pos_neg1)
1741 case 1:
1742 goto do_main;
1743 case 2:
1744 /* If (S) u2 is negative (i.e. u2 is larger than maximum of S,
1745 avoid the main code, just multiply and signal overflow
1746 unless 0 * u2 or -1 * ((U) Smin). */
1747 struct separate_ops ops;
1748 ops.code = MULT_EXPR;
1749 ops.type = TREE_TYPE (arg1);
1750 ops.op0 = make_tree (ops.type, op0);
1751 ops.op1 = make_tree (ops.type, op1);
1752 ops.op2 = NULL_TREE;
1753 ops.location = loc;
1754 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1755 do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
1756 NULL, done_label, profile_probability::very_likely ());
1757 do_compare_rtx_and_jump (op0, constm1_rtx, NE, true, mode, NULL_RTX,
1758 NULL, do_error, profile_probability::very_unlikely ());
1759 int prec;
1760 prec = GET_MODE_PRECISION (mode);
1761 rtx sgn;
1762 sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
1763 do_compare_rtx_and_jump (op1, sgn, EQ, true, mode, NULL_RTX,
1764 NULL, done_label, profile_probability::very_likely ());
1765 goto do_error_label;
1766 case 3:
1767 /* Rest of handling of this case after res is computed. */
1768 goto do_main;
1769 default:
1770 gcc_unreachable ();
1774 /* s1 * s2 -> ur */
1775 if (!uns0_p && !uns1_p && unsr_p)
1777 rtx tem;
1778 switch (pos_neg0 | pos_neg1)
1780 case 1: /* Both operands known to be non-negative. */
1781 goto do_main;
1782 case 2: /* Both operands known to be negative. */
1783 op0 = expand_unop (mode, neg_optab, op0, NULL_RTX, false);
1784 op1 = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
1785 /* Avoid looking at arg0/arg1 ranges, as we've changed
1786 the arguments. */
1787 arg0 = error_mark_node;
1788 arg1 = error_mark_node;
1789 goto do_main;
1790 case 3:
1791 if ((pos_neg0 ^ pos_neg1) == 3)
1793 /* If one operand is known to be negative and the other
1794 non-negative, this overflows always, unless the non-negative
1795 one is 0. Just do normal multiply and set overflow
1796 unless one of the operands is 0. */
1797 struct separate_ops ops;
1798 ops.code = MULT_EXPR;
1799 ops.type
1800 = build_nonstandard_integer_type (GET_MODE_PRECISION (mode),
1802 ops.op0 = make_tree (ops.type, op0);
1803 ops.op1 = make_tree (ops.type, op1);
1804 ops.op2 = NULL_TREE;
1805 ops.location = loc;
1806 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1807 do_compare_rtx_and_jump (pos_neg0 == 1 ? op0 : op1, const0_rtx, EQ,
1808 true, mode, NULL_RTX, NULL, done_label,
1809 profile_probability::very_likely ());
1810 goto do_error_label;
1812 if (get_min_precision (arg0, SIGNED)
1813 + get_min_precision (arg1, SIGNED) <= GET_MODE_PRECISION (mode))
1815 /* If both operands are sign extended from narrower types and
1816 the sum of the two precisions is smaller or equal to the
1817 result precision: if both arguments are at runtime
1818 non-negative, maximum result will be 0x3f01 or 0x3f..f0..01
1819 and there will be no overflow, if both arguments are negative,
1820 maximum result will be 0x40..00 and there will be no overflow
1821 either, if one argument is positive and the other argument
1822 negative, the result when treated as signed will be negative
1823 and there will be always overflow, and if one argument is
1824 zero and the other negative the result will be zero and no
1825 overflow. So, do
1826 res = (U) (s1 * s2)
1827 ovf = (S) res < 0 */
1828 struct separate_ops ops;
1829 ops.code = MULT_EXPR;
1830 ops.type
1831 = build_nonstandard_integer_type (GET_MODE_PRECISION (mode),
1833 ops.op0 = make_tree (ops.type, op0);
1834 ops.op1 = make_tree (ops.type, op1);
1835 ops.op2 = NULL_TREE;
1836 ops.location = loc;
1837 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1838 do_compare_rtx_and_jump (res, const0_rtx, GE, false,
1839 mode, NULL_RTX, NULL, done_label,
1840 profile_probability::very_likely ());
1841 goto do_error_label;
1843 /* The general case, do all the needed comparisons at runtime. */
1844 rtx_code_label *do_main_label, *after_negate_label;
1845 rtx rop0, rop1;
1846 rop0 = gen_reg_rtx (mode);
1847 rop1 = gen_reg_rtx (mode);
1848 emit_move_insn (rop0, op0);
1849 emit_move_insn (rop1, op1);
1850 op0 = rop0;
1851 op1 = rop1;
1852 do_main_label = gen_label_rtx ();
1853 after_negate_label = gen_label_rtx ();
1854 tem = expand_binop (mode, and_optab, op0, op1, NULL_RTX, false,
1855 OPTAB_LIB_WIDEN);
1856 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
1857 NULL, after_negate_label, profile_probability::very_likely ());
1858 /* Both arguments negative here, negate them and continue with
1859 normal unsigned overflow checking multiplication. */
1860 emit_move_insn (op0, expand_unop (mode, neg_optab, op0,
1861 NULL_RTX, false));
1862 emit_move_insn (op1, expand_unop (mode, neg_optab, op1,
1863 NULL_RTX, false));
1864 /* Avoid looking at arg0/arg1 ranges, as we might have changed
1865 the arguments. */
1866 arg0 = error_mark_node;
1867 arg1 = error_mark_node;
1868 emit_jump (do_main_label);
1869 emit_label (after_negate_label);
1870 tem = expand_binop (mode, xor_optab, op0, op1, NULL_RTX, false,
1871 OPTAB_LIB_WIDEN);
1872 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
1873 NULL, do_main_label,
1874 profile_probability::very_likely ());
1875 /* One argument is negative here, the other positive. This
1876 overflows always, unless one of the arguments is 0. But
1877 if e.g. s2 is 0, (U) s1 * 0 doesn't overflow, whatever s1
1878 is, thus we can keep do_main code oring in overflow as is. */
1879 if (pos_neg0 != 2)
1880 do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
1881 NULL, do_main_label,
1882 profile_probability::very_unlikely ());
1883 if (pos_neg1 != 2)
1884 do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1885 NULL, do_main_label,
1886 profile_probability::very_unlikely ());
1887 expand_arith_set_overflow (lhs, target);
1888 emit_label (do_main_label);
1889 goto do_main;
1890 default:
1891 gcc_unreachable ();
1895 do_main:
1896 type = build_nonstandard_integer_type (GET_MODE_PRECISION (mode), uns);
1897 sign = uns ? UNSIGNED : SIGNED;
1898 icode = optab_handler (uns ? umulv4_optab : mulv4_optab, mode);
1899 if (uns
1900 && (integer_pow2p (arg0) || integer_pow2p (arg1))
1901 && (optimize_insn_for_speed_p () || icode == CODE_FOR_nothing))
1903 /* Optimize unsigned multiplication by power of 2 constant
1904 using 2 shifts, one for result, one to extract the shifted
1905 out bits to see if they are all zero.
1906 Don't do this if optimizing for size and we have umulv4_optab,
1907 in that case assume multiplication will be shorter.
1908 This is heuristics based on the single target that provides
1909 umulv4 right now (i?86/x86_64), if further targets add it, this
1910 might need to be revisited.
1911 Cases where both operands are constant should be folded already
1912 during GIMPLE, and cases where one operand is constant but not
1913 power of 2 are questionable, either the WIDEN_MULT_EXPR case
1914 below can be done without multiplication, just by shifts and adds,
1915 or we'd need to divide the result (and hope it actually doesn't
1916 really divide nor multiply) and compare the result of the division
1917 with the original operand. */
1918 rtx opn0 = op0;
1919 rtx opn1 = op1;
1920 tree argn0 = arg0;
1921 tree argn1 = arg1;
1922 if (integer_pow2p (arg0))
1924 std::swap (opn0, opn1);
1925 std::swap (argn0, argn1);
1927 int cnt = tree_log2 (argn1);
1928 if (cnt >= 0 && cnt < GET_MODE_PRECISION (mode))
1930 rtx upper = const0_rtx;
1931 res = expand_shift (LSHIFT_EXPR, mode, opn0, cnt, NULL_RTX, uns);
1932 if (cnt != 0)
1933 upper = expand_shift (RSHIFT_EXPR, mode, opn0,
1934 GET_MODE_PRECISION (mode) - cnt,
1935 NULL_RTX, uns);
1936 do_compare_rtx_and_jump (upper, const0_rtx, EQ, true, mode,
1937 NULL_RTX, NULL, done_label,
1938 profile_probability::very_likely ());
1939 goto do_error_label;
1942 if (icode != CODE_FOR_nothing)
1944 class expand_operand ops[4];
1945 rtx_insn *last = get_last_insn ();
1947 res = gen_reg_rtx (mode);
1948 create_output_operand (&ops[0], res, mode);
1949 create_input_operand (&ops[1], op0, mode);
1950 create_input_operand (&ops[2], op1, mode);
1951 create_fixed_operand (&ops[3], do_error);
1952 if (maybe_expand_insn (icode, 4, ops))
1954 last = get_last_insn ();
1955 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
1956 && JUMP_P (last)
1957 && any_condjump_p (last)
1958 && !find_reg_note (last, REG_BR_PROB, 0))
1959 add_reg_br_prob_note (last,
1960 profile_probability::very_unlikely ());
1961 emit_jump (done_label);
1963 else
1965 delete_insns_since (last);
1966 icode = CODE_FOR_nothing;
1970 if (icode == CODE_FOR_nothing)
1972 struct separate_ops ops;
1973 int prec = GET_MODE_PRECISION (mode);
1974 scalar_int_mode hmode, wmode;
1975 ops.op0 = make_tree (type, op0);
1976 ops.op1 = make_tree (type, op1);
1977 ops.op2 = NULL_TREE;
1978 ops.location = loc;
1980 /* Optimize unsigned overflow check where we don't use the
1981 multiplication result, just whether overflow happened.
1982 If we can do MULT_HIGHPART_EXPR, that followed by
1983 comparison of the result against zero is cheapest.
1984 We'll still compute res, but it should be DCEd later. */
1985 use_operand_p use;
1986 gimple *use_stmt;
1987 if (!is_ubsan
1988 && lhs
1989 && uns
1990 && !(uns0_p && uns1_p && !unsr_p)
1991 && can_mult_highpart_p (mode, uns) == 1
1992 && single_imm_use (lhs, &use, &use_stmt)
1993 && is_gimple_assign (use_stmt)
1994 && gimple_assign_rhs_code (use_stmt) == IMAGPART_EXPR)
1995 goto highpart;
1997 if (GET_MODE_2XWIDER_MODE (mode).exists (&wmode)
1998 && targetm.scalar_mode_supported_p (wmode)
1999 && can_widen_mult_without_libcall (wmode, mode, op0, op1, uns))
2001 twoxwider:
2002 ops.code = WIDEN_MULT_EXPR;
2003 ops.type
2004 = build_nonstandard_integer_type (GET_MODE_PRECISION (wmode), uns);
2006 res = expand_expr_real_2 (&ops, NULL_RTX, wmode, EXPAND_NORMAL);
2007 rtx hipart = expand_shift (RSHIFT_EXPR, wmode, res, prec,
2008 NULL_RTX, uns);
2009 hipart = convert_modes (mode, wmode, hipart, uns);
2010 res = convert_modes (mode, wmode, res, uns);
2011 if (uns)
2012 /* For the unsigned multiplication, there was overflow if
2013 HIPART is non-zero. */
2014 do_compare_rtx_and_jump (hipart, const0_rtx, EQ, true, mode,
2015 NULL_RTX, NULL, done_label,
2016 profile_probability::very_likely ());
2017 else
2019 /* RES is used more than once, place it in a pseudo. */
2020 res = force_reg (mode, res);
2022 rtx signbit = expand_shift (RSHIFT_EXPR, mode, res, prec - 1,
2023 NULL_RTX, 0);
2024 /* RES is low half of the double width result, HIPART
2025 the high half. There was overflow if
2026 HIPART is different from RES < 0 ? -1 : 0. */
2027 do_compare_rtx_and_jump (signbit, hipart, EQ, true, mode,
2028 NULL_RTX, NULL, done_label,
2029 profile_probability::very_likely ());
2032 else if (can_mult_highpart_p (mode, uns) == 1)
2034 highpart:
2035 ops.code = MULT_HIGHPART_EXPR;
2036 ops.type = type;
2038 rtx hipart = expand_expr_real_2 (&ops, NULL_RTX, mode,
2039 EXPAND_NORMAL);
2040 ops.code = MULT_EXPR;
2041 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
2042 if (uns)
2043 /* For the unsigned multiplication, there was overflow if
2044 HIPART is non-zero. */
2045 do_compare_rtx_and_jump (hipart, const0_rtx, EQ, true, mode,
2046 NULL_RTX, NULL, done_label,
2047 profile_probability::very_likely ());
2048 else
2050 rtx signbit = expand_shift (RSHIFT_EXPR, mode, res, prec - 1,
2051 NULL_RTX, 0);
2052 /* RES is low half of the double width result, HIPART
2053 the high half. There was overflow if
2054 HIPART is different from RES < 0 ? -1 : 0. */
2055 do_compare_rtx_and_jump (signbit, hipart, EQ, true, mode,
2056 NULL_RTX, NULL, done_label,
2057 profile_probability::very_likely ());
2061 else if (int_mode_for_size (prec / 2, 1).exists (&hmode)
2062 && 2 * GET_MODE_PRECISION (hmode) == prec)
2064 rtx_code_label *large_op0 = gen_label_rtx ();
2065 rtx_code_label *small_op0_large_op1 = gen_label_rtx ();
2066 rtx_code_label *one_small_one_large = gen_label_rtx ();
2067 rtx_code_label *both_ops_large = gen_label_rtx ();
2068 rtx_code_label *after_hipart_neg = uns ? NULL : gen_label_rtx ();
2069 rtx_code_label *after_lopart_neg = uns ? NULL : gen_label_rtx ();
2070 rtx_code_label *do_overflow = gen_label_rtx ();
2071 rtx_code_label *hipart_different = uns ? NULL : gen_label_rtx ();
2073 unsigned int hprec = GET_MODE_PRECISION (hmode);
2074 rtx hipart0 = expand_shift (RSHIFT_EXPR, mode, op0, hprec,
2075 NULL_RTX, uns);
2076 hipart0 = convert_modes (hmode, mode, hipart0, uns);
2077 rtx lopart0 = convert_modes (hmode, mode, op0, uns);
2078 rtx signbit0 = const0_rtx;
2079 if (!uns)
2080 signbit0 = expand_shift (RSHIFT_EXPR, hmode, lopart0, hprec - 1,
2081 NULL_RTX, 0);
2082 rtx hipart1 = expand_shift (RSHIFT_EXPR, mode, op1, hprec,
2083 NULL_RTX, uns);
2084 hipart1 = convert_modes (hmode, mode, hipart1, uns);
2085 rtx lopart1 = convert_modes (hmode, mode, op1, uns);
2086 rtx signbit1 = const0_rtx;
2087 if (!uns)
2088 signbit1 = expand_shift (RSHIFT_EXPR, hmode, lopart1, hprec - 1,
2089 NULL_RTX, 0);
2091 res = gen_reg_rtx (mode);
2093 /* True if op0 resp. op1 are known to be in the range of
2094 halfstype. */
2095 bool op0_small_p = false;
2096 bool op1_small_p = false;
2097 /* True if op0 resp. op1 are known to have all zeros or all ones
2098 in the upper half of bits, but are not known to be
2099 op{0,1}_small_p. */
2100 bool op0_medium_p = false;
2101 bool op1_medium_p = false;
2102 /* -1 if op{0,1} is known to be negative, 0 if it is known to be
2103 nonnegative, 1 if unknown. */
2104 int op0_sign = 1;
2105 int op1_sign = 1;
2107 if (pos_neg0 == 1)
2108 op0_sign = 0;
2109 else if (pos_neg0 == 2)
2110 op0_sign = -1;
2111 if (pos_neg1 == 1)
2112 op1_sign = 0;
2113 else if (pos_neg1 == 2)
2114 op1_sign = -1;
2116 unsigned int mprec0 = prec;
2117 if (arg0 != error_mark_node)
2118 mprec0 = get_min_precision (arg0, sign);
2119 if (mprec0 <= hprec)
2120 op0_small_p = true;
2121 else if (!uns && mprec0 <= hprec + 1)
2122 op0_medium_p = true;
2123 unsigned int mprec1 = prec;
2124 if (arg1 != error_mark_node)
2125 mprec1 = get_min_precision (arg1, sign);
2126 if (mprec1 <= hprec)
2127 op1_small_p = true;
2128 else if (!uns && mprec1 <= hprec + 1)
2129 op1_medium_p = true;
2131 int smaller_sign = 1;
2132 int larger_sign = 1;
2133 if (op0_small_p)
2135 smaller_sign = op0_sign;
2136 larger_sign = op1_sign;
2138 else if (op1_small_p)
2140 smaller_sign = op1_sign;
2141 larger_sign = op0_sign;
2143 else if (op0_sign == op1_sign)
2145 smaller_sign = op0_sign;
2146 larger_sign = op0_sign;
2149 if (!op0_small_p)
2150 do_compare_rtx_and_jump (signbit0, hipart0, NE, true, hmode,
2151 NULL_RTX, NULL, large_op0,
2152 profile_probability::unlikely ());
2154 if (!op1_small_p)
2155 do_compare_rtx_and_jump (signbit1, hipart1, NE, true, hmode,
2156 NULL_RTX, NULL, small_op0_large_op1,
2157 profile_probability::unlikely ());
2159 /* If both op0 and op1 are sign (!uns) or zero (uns) extended from
2160 hmode to mode, the multiplication will never overflow. We can
2161 do just one hmode x hmode => mode widening multiplication. */
2162 tree halfstype = build_nonstandard_integer_type (hprec, uns);
2163 ops.op0 = make_tree (halfstype, lopart0);
2164 ops.op1 = make_tree (halfstype, lopart1);
2165 ops.code = WIDEN_MULT_EXPR;
2166 ops.type = type;
2167 rtx thisres
2168 = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
2169 emit_move_insn (res, thisres);
2170 emit_jump (done_label);
2172 emit_label (small_op0_large_op1);
2174 /* If op0 is sign (!uns) or zero (uns) extended from hmode to mode,
2175 but op1 is not, just swap the arguments and handle it as op1
2176 sign/zero extended, op0 not. */
2177 rtx larger = gen_reg_rtx (mode);
2178 rtx hipart = gen_reg_rtx (hmode);
2179 rtx lopart = gen_reg_rtx (hmode);
2180 emit_move_insn (larger, op1);
2181 emit_move_insn (hipart, hipart1);
2182 emit_move_insn (lopart, lopart0);
2183 emit_jump (one_small_one_large);
2185 emit_label (large_op0);
2187 if (!op1_small_p)
2188 do_compare_rtx_and_jump (signbit1, hipart1, NE, true, hmode,
2189 NULL_RTX, NULL, both_ops_large,
2190 profile_probability::unlikely ());
2192 /* If op1 is sign (!uns) or zero (uns) extended from hmode to mode,
2193 but op0 is not, prepare larger, hipart and lopart pseudos and
2194 handle it together with small_op0_large_op1. */
2195 emit_move_insn (larger, op0);
2196 emit_move_insn (hipart, hipart0);
2197 emit_move_insn (lopart, lopart1);
2199 emit_label (one_small_one_large);
2201 /* lopart is the low part of the operand that is sign extended
2202 to mode, larger is the other operand, hipart is the
2203 high part of larger and lopart0 and lopart1 are the low parts
2204 of both operands.
2205 We perform lopart0 * lopart1 and lopart * hipart widening
2206 multiplications. */
2207 tree halfutype = build_nonstandard_integer_type (hprec, 1);
2208 ops.op0 = make_tree (halfutype, lopart0);
2209 ops.op1 = make_tree (halfutype, lopart1);
2210 rtx lo0xlo1
2211 = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
2213 ops.op0 = make_tree (halfutype, lopart);
2214 ops.op1 = make_tree (halfutype, hipart);
2215 rtx loxhi = gen_reg_rtx (mode);
2216 rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
2217 emit_move_insn (loxhi, tem);
2219 if (!uns)
2221 /* if (hipart < 0) loxhi -= lopart << (bitsize / 2); */
2222 if (larger_sign == 0)
2223 emit_jump (after_hipart_neg);
2224 else if (larger_sign != -1)
2225 do_compare_rtx_and_jump (hipart, const0_rtx, GE, false, hmode,
2226 NULL_RTX, NULL, after_hipart_neg,
2227 profile_probability::even ());
2229 tem = convert_modes (mode, hmode, lopart, 1);
2230 tem = expand_shift (LSHIFT_EXPR, mode, tem, hprec, NULL_RTX, 1);
2231 tem = expand_simple_binop (mode, MINUS, loxhi, tem, NULL_RTX,
2232 1, OPTAB_WIDEN);
2233 emit_move_insn (loxhi, tem);
2235 emit_label (after_hipart_neg);
2237 /* if (lopart < 0) loxhi -= larger; */
2238 if (smaller_sign == 0)
2239 emit_jump (after_lopart_neg);
2240 else if (smaller_sign != -1)
2241 do_compare_rtx_and_jump (lopart, const0_rtx, GE, false, hmode,
2242 NULL_RTX, NULL, after_lopart_neg,
2243 profile_probability::even ());
2245 tem = expand_simple_binop (mode, MINUS, loxhi, larger, NULL_RTX,
2246 1, OPTAB_WIDEN);
2247 emit_move_insn (loxhi, tem);
2249 emit_label (after_lopart_neg);
2252 /* loxhi += (uns) lo0xlo1 >> (bitsize / 2); */
2253 tem = expand_shift (RSHIFT_EXPR, mode, lo0xlo1, hprec, NULL_RTX, 1);
2254 tem = expand_simple_binop (mode, PLUS, loxhi, tem, NULL_RTX,
2255 1, OPTAB_WIDEN);
2256 emit_move_insn (loxhi, tem);
2258 /* if (loxhi >> (bitsize / 2)
2259 == (hmode) loxhi >> (bitsize / 2 - 1)) (if !uns)
2260 if (loxhi >> (bitsize / 2) == 0 (if uns). */
2261 rtx hipartloxhi = expand_shift (RSHIFT_EXPR, mode, loxhi, hprec,
2262 NULL_RTX, 0);
2263 hipartloxhi = convert_modes (hmode, mode, hipartloxhi, 0);
2264 rtx signbitloxhi = const0_rtx;
2265 if (!uns)
2266 signbitloxhi = expand_shift (RSHIFT_EXPR, hmode,
2267 convert_modes (hmode, mode,
2268 loxhi, 0),
2269 hprec - 1, NULL_RTX, 0);
2271 do_compare_rtx_and_jump (signbitloxhi, hipartloxhi, NE, true, hmode,
2272 NULL_RTX, NULL, do_overflow,
2273 profile_probability::very_unlikely ());
2275 /* res = (loxhi << (bitsize / 2)) | (hmode) lo0xlo1; */
2276 rtx loxhishifted = expand_shift (LSHIFT_EXPR, mode, loxhi, hprec,
2277 NULL_RTX, 1);
2278 tem = convert_modes (mode, hmode,
2279 convert_modes (hmode, mode, lo0xlo1, 1), 1);
2281 tem = expand_simple_binop (mode, IOR, loxhishifted, tem, res,
2282 1, OPTAB_WIDEN);
2283 if (tem != res)
2284 emit_move_insn (res, tem);
2285 emit_jump (done_label);
2287 emit_label (both_ops_large);
2289 /* If both operands are large (not sign (!uns) or zero (uns)
2290 extended from hmode), then perform the full multiplication
2291 which will be the result of the operation.
2292 The only cases which don't overflow are for signed multiplication
2293 some cases where both hipart0 and highpart1 are 0 or -1.
2294 For unsigned multiplication when high parts are both non-zero
2295 this overflows always. */
2296 ops.code = MULT_EXPR;
2297 ops.op0 = make_tree (type, op0);
2298 ops.op1 = make_tree (type, op1);
2299 tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
2300 emit_move_insn (res, tem);
2302 if (!uns)
2304 if (!op0_medium_p)
2306 tem = expand_simple_binop (hmode, PLUS, hipart0, const1_rtx,
2307 NULL_RTX, 1, OPTAB_WIDEN);
2308 do_compare_rtx_and_jump (tem, const1_rtx, GTU, true, hmode,
2309 NULL_RTX, NULL, do_error,
2310 profile_probability::very_unlikely ());
2313 if (!op1_medium_p)
2315 tem = expand_simple_binop (hmode, PLUS, hipart1, const1_rtx,
2316 NULL_RTX, 1, OPTAB_WIDEN);
2317 do_compare_rtx_and_jump (tem, const1_rtx, GTU, true, hmode,
2318 NULL_RTX, NULL, do_error,
2319 profile_probability::very_unlikely ());
2322 /* At this point hipart{0,1} are both in [-1, 0]. If they are
2323 the same, overflow happened if res is non-positive, if they
2324 are different, overflow happened if res is positive. */
2325 if (op0_sign != 1 && op1_sign != 1 && op0_sign != op1_sign)
2326 emit_jump (hipart_different);
2327 else if (op0_sign == 1 || op1_sign == 1)
2328 do_compare_rtx_and_jump (hipart0, hipart1, NE, true, hmode,
2329 NULL_RTX, NULL, hipart_different,
2330 profile_probability::even ());
2332 do_compare_rtx_and_jump (res, const0_rtx, LE, false, mode,
2333 NULL_RTX, NULL, do_error,
2334 profile_probability::very_unlikely ());
2335 emit_jump (done_label);
2337 emit_label (hipart_different);
2339 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode,
2340 NULL_RTX, NULL, do_error,
2341 profile_probability::very_unlikely ());
2342 emit_jump (done_label);
2345 emit_label (do_overflow);
2347 /* Overflow, do full multiplication and fallthru into do_error. */
2348 ops.op0 = make_tree (type, op0);
2349 ops.op1 = make_tree (type, op1);
2350 tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
2351 emit_move_insn (res, tem);
2353 else if (GET_MODE_2XWIDER_MODE (mode).exists (&wmode)
2354 && targetm.scalar_mode_supported_p (wmode))
2355 /* Even emitting a libcall is better than not detecting overflow
2356 at all. */
2357 goto twoxwider;
2358 else
2360 gcc_assert (!is_ubsan);
2361 ops.code = MULT_EXPR;
2362 ops.type = type;
2363 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
2364 emit_jump (done_label);
2368 do_error_label:
2369 emit_label (do_error);
2370 if (is_ubsan)
2372 /* Expand the ubsan builtin call. */
2373 push_temp_slots ();
2374 fn = ubsan_build_overflow_builtin (MULT_EXPR, loc, TREE_TYPE (arg0),
2375 arg0, arg1, datap);
2376 expand_normal (fn);
2377 pop_temp_slots ();
2378 do_pending_stack_adjust ();
2380 else if (lhs)
2381 expand_arith_set_overflow (lhs, target);
2383 /* We're done. */
2384 emit_label (done_label);
2386 /* u1 * u2 -> sr */
2387 if (uns0_p && uns1_p && !unsr_p)
2389 rtx_code_label *all_done_label = gen_label_rtx ();
2390 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode, NULL_RTX,
2391 NULL, all_done_label, profile_probability::very_likely ());
2392 expand_arith_set_overflow (lhs, target);
2393 emit_label (all_done_label);
2396 /* s1 * u2 -> sr */
2397 if (!uns0_p && uns1_p && !unsr_p && pos_neg1 == 3)
2399 rtx_code_label *all_done_label = gen_label_rtx ();
2400 rtx_code_label *set_noovf = gen_label_rtx ();
2401 do_compare_rtx_and_jump (op1, const0_rtx, GE, false, mode, NULL_RTX,
2402 NULL, all_done_label, profile_probability::very_likely ());
2403 expand_arith_set_overflow (lhs, target);
2404 do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
2405 NULL, set_noovf, profile_probability::very_likely ());
2406 do_compare_rtx_and_jump (op0, constm1_rtx, NE, true, mode, NULL_RTX,
2407 NULL, all_done_label, profile_probability::very_unlikely ());
2408 do_compare_rtx_and_jump (op1, res, NE, true, mode, NULL_RTX, NULL,
2409 all_done_label, profile_probability::very_unlikely ());
2410 emit_label (set_noovf);
2411 write_complex_part (target, const0_rtx, true, false);
2412 emit_label (all_done_label);
2415 if (lhs)
2417 if (is_ubsan)
2418 expand_ubsan_result_store (target, res);
2419 else
2420 expand_arith_overflow_result_store (lhs, target, mode, res);
2424 /* Expand UBSAN_CHECK_* internal function if it has vector operands. */
2426 static void
2427 expand_vector_ubsan_overflow (location_t loc, enum tree_code code, tree lhs,
2428 tree arg0, tree arg1)
2430 poly_uint64 cnt = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0));
2431 rtx_code_label *loop_lab = NULL;
2432 rtx cntvar = NULL_RTX;
2433 tree cntv = NULL_TREE;
2434 tree eltype = TREE_TYPE (TREE_TYPE (arg0));
2435 tree sz = TYPE_SIZE (eltype);
2436 tree data = NULL_TREE;
2437 tree resv = NULL_TREE;
2438 rtx lhsr = NULL_RTX;
2439 rtx resvr = NULL_RTX;
2440 unsigned HOST_WIDE_INT const_cnt = 0;
2441 bool use_loop_p = (!cnt.is_constant (&const_cnt) || const_cnt > 4);
2443 if (lhs)
2445 optab op;
2446 lhsr = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2447 if (!VECTOR_MODE_P (GET_MODE (lhsr))
2448 || (op = optab_for_tree_code (code, TREE_TYPE (arg0),
2449 optab_default)) == unknown_optab
2450 || (optab_handler (op, TYPE_MODE (TREE_TYPE (arg0)))
2451 == CODE_FOR_nothing))
2453 if (MEM_P (lhsr))
2454 resv = make_tree (TREE_TYPE (lhs), lhsr);
2455 else
2457 resvr = assign_temp (TREE_TYPE (lhs), 1, 1);
2458 resv = make_tree (TREE_TYPE (lhs), resvr);
2462 if (use_loop_p)
2464 do_pending_stack_adjust ();
2465 loop_lab = gen_label_rtx ();
2466 cntvar = gen_reg_rtx (TYPE_MODE (sizetype));
2467 cntv = make_tree (sizetype, cntvar);
2468 emit_move_insn (cntvar, const0_rtx);
2469 emit_label (loop_lab);
2471 if (TREE_CODE (arg0) != VECTOR_CST)
2473 rtx arg0r = expand_normal (arg0);
2474 arg0 = make_tree (TREE_TYPE (arg0), arg0r);
2476 if (TREE_CODE (arg1) != VECTOR_CST)
2478 rtx arg1r = expand_normal (arg1);
2479 arg1 = make_tree (TREE_TYPE (arg1), arg1r);
2481 for (unsigned int i = 0; i < (use_loop_p ? 1 : const_cnt); i++)
2483 tree op0, op1, res = NULL_TREE;
2484 if (use_loop_p)
2486 tree atype = build_array_type_nelts (eltype, cnt);
2487 op0 = uniform_vector_p (arg0);
2488 if (op0 == NULL_TREE)
2490 op0 = fold_build1_loc (loc, VIEW_CONVERT_EXPR, atype, arg0);
2491 op0 = build4_loc (loc, ARRAY_REF, eltype, op0, cntv,
2492 NULL_TREE, NULL_TREE);
2494 op1 = uniform_vector_p (arg1);
2495 if (op1 == NULL_TREE)
2497 op1 = fold_build1_loc (loc, VIEW_CONVERT_EXPR, atype, arg1);
2498 op1 = build4_loc (loc, ARRAY_REF, eltype, op1, cntv,
2499 NULL_TREE, NULL_TREE);
2501 if (resv)
2503 res = fold_build1_loc (loc, VIEW_CONVERT_EXPR, atype, resv);
2504 res = build4_loc (loc, ARRAY_REF, eltype, res, cntv,
2505 NULL_TREE, NULL_TREE);
2508 else
2510 tree bitpos = bitsize_int (tree_to_uhwi (sz) * i);
2511 op0 = fold_build3_loc (loc, BIT_FIELD_REF, eltype, arg0, sz, bitpos);
2512 op1 = fold_build3_loc (loc, BIT_FIELD_REF, eltype, arg1, sz, bitpos);
2513 if (resv)
2514 res = fold_build3_loc (loc, BIT_FIELD_REF, eltype, resv, sz,
2515 bitpos);
2517 switch (code)
2519 case PLUS_EXPR:
2520 expand_addsub_overflow (loc, PLUS_EXPR, res, op0, op1,
2521 false, false, false, true, &data);
2522 break;
2523 case MINUS_EXPR:
2524 if (use_loop_p ? integer_zerop (arg0) : integer_zerop (op0))
2525 expand_neg_overflow (loc, res, op1, true, &data);
2526 else
2527 expand_addsub_overflow (loc, MINUS_EXPR, res, op0, op1,
2528 false, false, false, true, &data);
2529 break;
2530 case MULT_EXPR:
2531 expand_mul_overflow (loc, res, op0, op1, false, false, false,
2532 true, &data);
2533 break;
2534 default:
2535 gcc_unreachable ();
2538 if (use_loop_p)
2540 struct separate_ops ops;
2541 ops.code = PLUS_EXPR;
2542 ops.type = TREE_TYPE (cntv);
2543 ops.op0 = cntv;
2544 ops.op1 = build_int_cst (TREE_TYPE (cntv), 1);
2545 ops.op2 = NULL_TREE;
2546 ops.location = loc;
2547 rtx ret = expand_expr_real_2 (&ops, cntvar, TYPE_MODE (sizetype),
2548 EXPAND_NORMAL);
2549 if (ret != cntvar)
2550 emit_move_insn (cntvar, ret);
2551 rtx cntrtx = gen_int_mode (cnt, TYPE_MODE (sizetype));
2552 do_compare_rtx_and_jump (cntvar, cntrtx, NE, false,
2553 TYPE_MODE (sizetype), NULL_RTX, NULL, loop_lab,
2554 profile_probability::very_likely ());
2556 if (lhs && resv == NULL_TREE)
2558 struct separate_ops ops;
2559 ops.code = code;
2560 ops.type = TREE_TYPE (arg0);
2561 ops.op0 = arg0;
2562 ops.op1 = arg1;
2563 ops.op2 = NULL_TREE;
2564 ops.location = loc;
2565 rtx ret = expand_expr_real_2 (&ops, lhsr, TYPE_MODE (TREE_TYPE (arg0)),
2566 EXPAND_NORMAL);
2567 if (ret != lhsr)
2568 emit_move_insn (lhsr, ret);
2570 else if (resvr)
2571 emit_move_insn (lhsr, resvr);
2574 /* Expand UBSAN_CHECK_ADD call STMT. */
2576 static void
2577 expand_UBSAN_CHECK_ADD (internal_fn, gcall *stmt)
2579 location_t loc = gimple_location (stmt);
2580 tree lhs = gimple_call_lhs (stmt);
2581 tree arg0 = gimple_call_arg (stmt, 0);
2582 tree arg1 = gimple_call_arg (stmt, 1);
2583 if (VECTOR_TYPE_P (TREE_TYPE (arg0)))
2584 expand_vector_ubsan_overflow (loc, PLUS_EXPR, lhs, arg0, arg1);
2585 else
2586 expand_addsub_overflow (loc, PLUS_EXPR, lhs, arg0, arg1,
2587 false, false, false, true, NULL);
2590 /* Expand UBSAN_CHECK_SUB call STMT. */
2592 static void
2593 expand_UBSAN_CHECK_SUB (internal_fn, gcall *stmt)
2595 location_t loc = gimple_location (stmt);
2596 tree lhs = gimple_call_lhs (stmt);
2597 tree arg0 = gimple_call_arg (stmt, 0);
2598 tree arg1 = gimple_call_arg (stmt, 1);
2599 if (VECTOR_TYPE_P (TREE_TYPE (arg0)))
2600 expand_vector_ubsan_overflow (loc, MINUS_EXPR, lhs, arg0, arg1);
2601 else if (integer_zerop (arg0))
2602 expand_neg_overflow (loc, lhs, arg1, true, NULL);
2603 else
2604 expand_addsub_overflow (loc, MINUS_EXPR, lhs, arg0, arg1,
2605 false, false, false, true, NULL);
2608 /* Expand UBSAN_CHECK_MUL call STMT. */
2610 static void
2611 expand_UBSAN_CHECK_MUL (internal_fn, gcall *stmt)
2613 location_t loc = gimple_location (stmt);
2614 tree lhs = gimple_call_lhs (stmt);
2615 tree arg0 = gimple_call_arg (stmt, 0);
2616 tree arg1 = gimple_call_arg (stmt, 1);
2617 if (VECTOR_TYPE_P (TREE_TYPE (arg0)))
2618 expand_vector_ubsan_overflow (loc, MULT_EXPR, lhs, arg0, arg1);
2619 else
2620 expand_mul_overflow (loc, lhs, arg0, arg1, false, false, false, true,
2621 NULL);
2624 /* Helper function for {ADD,SUB,MUL}_OVERFLOW call stmt expansion. */
2626 static void
2627 expand_arith_overflow (enum tree_code code, gimple *stmt)
2629 tree lhs = gimple_call_lhs (stmt);
2630 if (lhs == NULL_TREE)
2631 return;
2632 tree arg0 = gimple_call_arg (stmt, 0);
2633 tree arg1 = gimple_call_arg (stmt, 1);
2634 tree type = TREE_TYPE (TREE_TYPE (lhs));
2635 int uns0_p = TYPE_UNSIGNED (TREE_TYPE (arg0));
2636 int uns1_p = TYPE_UNSIGNED (TREE_TYPE (arg1));
2637 int unsr_p = TYPE_UNSIGNED (type);
2638 int prec0 = TYPE_PRECISION (TREE_TYPE (arg0));
2639 int prec1 = TYPE_PRECISION (TREE_TYPE (arg1));
2640 int precres = TYPE_PRECISION (type);
2641 location_t loc = gimple_location (stmt);
2642 if (!uns0_p && get_range_pos_neg (arg0) == 1)
2643 uns0_p = true;
2644 if (!uns1_p && get_range_pos_neg (arg1) == 1)
2645 uns1_p = true;
2646 int pr = get_min_precision (arg0, uns0_p ? UNSIGNED : SIGNED);
2647 prec0 = MIN (prec0, pr);
2648 pr = get_min_precision (arg1, uns1_p ? UNSIGNED : SIGNED);
2649 prec1 = MIN (prec1, pr);
2651 /* If uns0_p && uns1_p, precop is minimum needed precision
2652 of unsigned type to hold the exact result, otherwise
2653 precop is minimum needed precision of signed type to
2654 hold the exact result. */
2655 int precop;
2656 if (code == MULT_EXPR)
2657 precop = prec0 + prec1 + (uns0_p != uns1_p);
2658 else
2660 if (uns0_p == uns1_p)
2661 precop = MAX (prec0, prec1) + 1;
2662 else if (uns0_p)
2663 precop = MAX (prec0 + 1, prec1) + 1;
2664 else
2665 precop = MAX (prec0, prec1 + 1) + 1;
2667 int orig_precres = precres;
2671 if ((uns0_p && uns1_p)
2672 ? ((precop + !unsr_p) <= precres
2673 /* u1 - u2 -> ur can overflow, no matter what precision
2674 the result has. */
2675 && (code != MINUS_EXPR || !unsr_p))
2676 : (!unsr_p && precop <= precres))
2678 /* The infinity precision result will always fit into result. */
2679 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2680 write_complex_part (target, const0_rtx, true, false);
2681 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (type);
2682 struct separate_ops ops;
2683 ops.code = code;
2684 ops.type = type;
2685 ops.op0 = fold_convert_loc (loc, type, arg0);
2686 ops.op1 = fold_convert_loc (loc, type, arg1);
2687 ops.op2 = NULL_TREE;
2688 ops.location = loc;
2689 rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
2690 expand_arith_overflow_result_store (lhs, target, mode, tem);
2691 return;
2694 /* For operations with low precision, if target doesn't have them, start
2695 with precres widening right away, otherwise do it only if the most
2696 simple cases can't be used. */
2697 const int min_precision = targetm.min_arithmetic_precision ();
2698 if (orig_precres == precres && precres < min_precision)
2700 else if ((uns0_p && uns1_p && unsr_p && prec0 <= precres
2701 && prec1 <= precres)
2702 || ((!uns0_p || !uns1_p) && !unsr_p
2703 && prec0 + uns0_p <= precres
2704 && prec1 + uns1_p <= precres))
2706 arg0 = fold_convert_loc (loc, type, arg0);
2707 arg1 = fold_convert_loc (loc, type, arg1);
2708 switch (code)
2710 case MINUS_EXPR:
2711 if (integer_zerop (arg0) && !unsr_p)
2713 expand_neg_overflow (loc, lhs, arg1, false, NULL);
2714 return;
2716 /* FALLTHRU */
2717 case PLUS_EXPR:
2718 expand_addsub_overflow (loc, code, lhs, arg0, arg1, unsr_p,
2719 unsr_p, unsr_p, false, NULL);
2720 return;
2721 case MULT_EXPR:
2722 expand_mul_overflow (loc, lhs, arg0, arg1, unsr_p,
2723 unsr_p, unsr_p, false, NULL);
2724 return;
2725 default:
2726 gcc_unreachable ();
2730 /* For sub-word operations, retry with a wider type first. */
2731 if (orig_precres == precres && precop <= BITS_PER_WORD)
2733 int p = MAX (min_precision, precop);
2734 scalar_int_mode m = smallest_int_mode_for_size (p);
2735 tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
2736 uns0_p && uns1_p
2737 && unsr_p);
2738 p = TYPE_PRECISION (optype);
2739 if (p > precres)
2741 precres = p;
2742 unsr_p = TYPE_UNSIGNED (optype);
2743 type = optype;
2744 continue;
2748 if (prec0 <= precres && prec1 <= precres)
2750 tree types[2];
2751 if (unsr_p)
2753 types[0] = build_nonstandard_integer_type (precres, 0);
2754 types[1] = type;
2756 else
2758 types[0] = type;
2759 types[1] = build_nonstandard_integer_type (precres, 1);
2761 arg0 = fold_convert_loc (loc, types[uns0_p], arg0);
2762 arg1 = fold_convert_loc (loc, types[uns1_p], arg1);
2763 if (code != MULT_EXPR)
2764 expand_addsub_overflow (loc, code, lhs, arg0, arg1, unsr_p,
2765 uns0_p, uns1_p, false, NULL);
2766 else
2767 expand_mul_overflow (loc, lhs, arg0, arg1, unsr_p,
2768 uns0_p, uns1_p, false, NULL);
2769 return;
2772 /* Retry with a wider type. */
2773 if (orig_precres == precres)
2775 int p = MAX (prec0, prec1);
2776 scalar_int_mode m = smallest_int_mode_for_size (p);
2777 tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
2778 uns0_p && uns1_p
2779 && unsr_p);
2780 p = TYPE_PRECISION (optype);
2781 if (p > precres)
2783 precres = p;
2784 unsr_p = TYPE_UNSIGNED (optype);
2785 type = optype;
2786 continue;
2790 gcc_unreachable ();
2792 while (1);
2795 /* Expand ADD_OVERFLOW STMT. */
2797 static void
2798 expand_ADD_OVERFLOW (internal_fn, gcall *stmt)
2800 expand_arith_overflow (PLUS_EXPR, stmt);
2803 /* Expand SUB_OVERFLOW STMT. */
2805 static void
2806 expand_SUB_OVERFLOW (internal_fn, gcall *stmt)
2808 expand_arith_overflow (MINUS_EXPR, stmt);
2811 /* Expand MUL_OVERFLOW STMT. */
2813 static void
2814 expand_MUL_OVERFLOW (internal_fn, gcall *stmt)
2816 expand_arith_overflow (MULT_EXPR, stmt);
2819 /* Expand UADDC STMT. */
2821 static void
2822 expand_UADDC (internal_fn ifn, gcall *stmt)
2824 tree lhs = gimple_call_lhs (stmt);
2825 tree arg1 = gimple_call_arg (stmt, 0);
2826 tree arg2 = gimple_call_arg (stmt, 1);
2827 tree arg3 = gimple_call_arg (stmt, 2);
2828 tree type = TREE_TYPE (arg1);
2829 machine_mode mode = TYPE_MODE (type);
2830 insn_code icode = optab_handler (ifn == IFN_UADDC
2831 ? uaddc5_optab : usubc5_optab, mode);
2832 rtx op1 = expand_normal (arg1);
2833 rtx op2 = expand_normal (arg2);
2834 rtx op3 = expand_normal (arg3);
2835 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2836 rtx re = gen_reg_rtx (mode);
2837 rtx im = gen_reg_rtx (mode);
2838 class expand_operand ops[5];
2839 create_output_operand (&ops[0], re, mode);
2840 create_output_operand (&ops[1], im, mode);
2841 create_input_operand (&ops[2], op1, mode);
2842 create_input_operand (&ops[3], op2, mode);
2843 create_input_operand (&ops[4], op3, mode);
2844 expand_insn (icode, 5, ops);
2845 write_complex_part (target, re, false, false);
2846 write_complex_part (target, im, true, false);
2849 /* Expand USUBC STMT. */
2851 static void
2852 expand_USUBC (internal_fn ifn, gcall *stmt)
2854 expand_UADDC (ifn, stmt);
2857 /* This should get folded in tree-vectorizer.cc. */
2859 static void
2860 expand_LOOP_VECTORIZED (internal_fn, gcall *)
2862 gcc_unreachable ();
2865 /* This should get folded in tree-vectorizer.cc. */
2867 static void
2868 expand_LOOP_DIST_ALIAS (internal_fn, gcall *)
2870 gcc_unreachable ();
2873 /* Return a memory reference of type TYPE for argument INDEX of STMT.
2874 Use argument INDEX + 1 to derive the second (TBAA) operand. */
2876 static tree
2877 expand_call_mem_ref (tree type, gcall *stmt, int index)
2879 tree addr = gimple_call_arg (stmt, index);
2880 tree alias_ptr_type = TREE_TYPE (gimple_call_arg (stmt, index + 1));
2881 unsigned int align = tree_to_shwi (gimple_call_arg (stmt, index + 1));
2882 if (TYPE_ALIGN (type) != align)
2883 type = build_aligned_type (type, align);
2885 tree tmp = addr;
2886 if (TREE_CODE (tmp) == SSA_NAME)
2888 gimple *def = SSA_NAME_DEF_STMT (tmp);
2889 if (gimple_assign_single_p (def))
2890 tmp = gimple_assign_rhs1 (def);
2893 if (TREE_CODE (tmp) == ADDR_EXPR)
2895 tree mem = TREE_OPERAND (tmp, 0);
2896 if (TREE_CODE (mem) == TARGET_MEM_REF
2897 && types_compatible_p (TREE_TYPE (mem), type))
2899 tree offset = TMR_OFFSET (mem);
2900 if (type != TREE_TYPE (mem)
2901 || alias_ptr_type != TREE_TYPE (offset)
2902 || !integer_zerop (offset))
2904 mem = copy_node (mem);
2905 TMR_OFFSET (mem) = wide_int_to_tree (alias_ptr_type,
2906 wi::to_poly_wide (offset));
2907 TREE_TYPE (mem) = type;
2909 return mem;
2913 return fold_build2 (MEM_REF, type, addr, build_int_cst (alias_ptr_type, 0));
2916 /* Expand MASK_LOAD{,_LANES}, MASK_LEN_LOAD or LEN_LOAD call STMT using optab
2917 * OPTAB. */
2919 static void
2920 expand_partial_load_optab_fn (internal_fn ifn, gcall *stmt, convert_optab optab)
2922 int i = 0;
2923 class expand_operand ops[5];
2924 tree type, lhs, rhs, maskt;
2925 rtx mem, target;
2926 insn_code icode;
2928 maskt = gimple_call_arg (stmt, internal_fn_mask_index (ifn));
2929 lhs = gimple_call_lhs (stmt);
2930 if (lhs == NULL_TREE)
2931 return;
2932 type = TREE_TYPE (lhs);
2933 rhs = expand_call_mem_ref (type, stmt, 0);
2935 if (optab == vec_mask_load_lanes_optab
2936 || optab == vec_mask_len_load_lanes_optab)
2937 icode = get_multi_vector_move (type, optab);
2938 else if (optab == len_load_optab)
2939 icode = direct_optab_handler (optab, TYPE_MODE (type));
2940 else
2941 icode = convert_optab_handler (optab, TYPE_MODE (type),
2942 TYPE_MODE (TREE_TYPE (maskt)));
2944 mem = expand_expr (rhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2945 gcc_assert (MEM_P (mem));
2946 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2947 create_output_operand (&ops[i++], target, TYPE_MODE (type));
2948 create_fixed_operand (&ops[i++], mem);
2949 i = add_mask_and_len_args (ops, i, stmt);
2950 expand_insn (icode, i, ops);
2952 if (!rtx_equal_p (target, ops[0].value))
2953 emit_move_insn (target, ops[0].value);
2956 #define expand_mask_load_optab_fn expand_partial_load_optab_fn
2957 #define expand_mask_load_lanes_optab_fn expand_mask_load_optab_fn
2958 #define expand_len_load_optab_fn expand_partial_load_optab_fn
2959 #define expand_mask_len_load_optab_fn expand_partial_load_optab_fn
2961 /* Expand MASK_STORE{,_LANES}, MASK_LEN_STORE or LEN_STORE call STMT using optab
2962 * OPTAB. */
2964 static void
2965 expand_partial_store_optab_fn (internal_fn ifn, gcall *stmt, convert_optab optab)
2967 int i = 0;
2968 class expand_operand ops[5];
2969 tree type, lhs, rhs, maskt;
2970 rtx mem, reg;
2971 insn_code icode;
2973 maskt = gimple_call_arg (stmt, internal_fn_mask_index (ifn));
2974 rhs = gimple_call_arg (stmt, internal_fn_stored_value_index (ifn));
2975 type = TREE_TYPE (rhs);
2976 lhs = expand_call_mem_ref (type, stmt, 0);
2978 if (optab == vec_mask_store_lanes_optab
2979 || optab == vec_mask_len_store_lanes_optab)
2980 icode = get_multi_vector_move (type, optab);
2981 else if (optab == len_store_optab)
2982 icode = direct_optab_handler (optab, TYPE_MODE (type));
2983 else
2984 icode = convert_optab_handler (optab, TYPE_MODE (type),
2985 TYPE_MODE (TREE_TYPE (maskt)));
2987 mem = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2988 gcc_assert (MEM_P (mem));
2989 reg = expand_normal (rhs);
2990 create_fixed_operand (&ops[i++], mem);
2991 create_input_operand (&ops[i++], reg, TYPE_MODE (type));
2992 i = add_mask_and_len_args (ops, i, stmt);
2993 expand_insn (icode, i, ops);
2996 #define expand_mask_store_optab_fn expand_partial_store_optab_fn
2997 #define expand_mask_store_lanes_optab_fn expand_mask_store_optab_fn
2998 #define expand_len_store_optab_fn expand_partial_store_optab_fn
2999 #define expand_mask_len_store_optab_fn expand_partial_store_optab_fn
3001 /* Expand VCOND, VCONDU and VCONDEQ optab internal functions.
3002 The expansion of STMT happens based on OPTAB table associated. */
3004 static void
3005 expand_vec_cond_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
3007 class expand_operand ops[6];
3008 insn_code icode;
3009 tree lhs = gimple_call_lhs (stmt);
3010 tree op0a = gimple_call_arg (stmt, 0);
3011 tree op0b = gimple_call_arg (stmt, 1);
3012 tree op1 = gimple_call_arg (stmt, 2);
3013 tree op2 = gimple_call_arg (stmt, 3);
3014 enum tree_code tcode = (tree_code) int_cst_value (gimple_call_arg (stmt, 4));
3016 tree vec_cond_type = TREE_TYPE (lhs);
3017 tree op_mode = TREE_TYPE (op0a);
3018 bool unsignedp = TYPE_UNSIGNED (op_mode);
3020 machine_mode mode = TYPE_MODE (vec_cond_type);
3021 machine_mode cmp_op_mode = TYPE_MODE (op_mode);
3023 icode = convert_optab_handler (optab, mode, cmp_op_mode);
3024 rtx comparison
3025 = vector_compare_rtx (VOIDmode, tcode, op0a, op0b, unsignedp, icode, 4);
3026 /* vector_compare_rtx legitimizes operands, preserve equality when
3027 expanding op1/op2. */
3028 rtx rtx_op1, rtx_op2;
3029 if (operand_equal_p (op1, op0a))
3030 rtx_op1 = XEXP (comparison, 0);
3031 else if (operand_equal_p (op1, op0b))
3032 rtx_op1 = XEXP (comparison, 1);
3033 else
3034 rtx_op1 = expand_normal (op1);
3035 if (operand_equal_p (op2, op0a))
3036 rtx_op2 = XEXP (comparison, 0);
3037 else if (operand_equal_p (op2, op0b))
3038 rtx_op2 = XEXP (comparison, 1);
3039 else
3040 rtx_op2 = expand_normal (op2);
3042 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
3043 create_output_operand (&ops[0], target, mode);
3044 create_input_operand (&ops[1], rtx_op1, mode);
3045 create_input_operand (&ops[2], rtx_op2, mode);
3046 create_fixed_operand (&ops[3], comparison);
3047 create_fixed_operand (&ops[4], XEXP (comparison, 0));
3048 create_fixed_operand (&ops[5], XEXP (comparison, 1));
3049 expand_insn (icode, 6, ops);
3050 if (!rtx_equal_p (ops[0].value, target))
3051 emit_move_insn (target, ops[0].value);
3054 /* Expand VCOND_MASK optab internal function.
3055 The expansion of STMT happens based on OPTAB table associated. */
3057 static void
3058 expand_vec_cond_mask_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
3060 class expand_operand ops[4];
3062 tree lhs = gimple_call_lhs (stmt);
3063 tree op0 = gimple_call_arg (stmt, 0);
3064 tree op1 = gimple_call_arg (stmt, 1);
3065 tree op2 = gimple_call_arg (stmt, 2);
3066 tree vec_cond_type = TREE_TYPE (lhs);
3068 machine_mode mode = TYPE_MODE (vec_cond_type);
3069 machine_mode mask_mode = TYPE_MODE (TREE_TYPE (op0));
3070 enum insn_code icode = convert_optab_handler (optab, mode, mask_mode);
3071 rtx mask, rtx_op1, rtx_op2;
3073 gcc_assert (icode != CODE_FOR_nothing);
3075 mask = expand_normal (op0);
3076 rtx_op1 = expand_normal (op1);
3077 rtx_op2 = expand_normal (op2);
3079 mask = force_reg (mask_mode, mask);
3080 rtx_op1 = force_reg (mode, rtx_op1);
3082 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
3083 create_output_operand (&ops[0], target, mode);
3084 create_input_operand (&ops[1], rtx_op1, mode);
3085 create_input_operand (&ops[2], rtx_op2, mode);
3086 create_input_operand (&ops[3], mask, mask_mode);
3087 expand_insn (icode, 4, ops);
3088 if (!rtx_equal_p (ops[0].value, target))
3089 emit_move_insn (target, ops[0].value);
3092 /* Expand VEC_SET internal functions. */
3094 static void
3095 expand_vec_set_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
3097 tree lhs = gimple_call_lhs (stmt);
3098 tree op0 = gimple_call_arg (stmt, 0);
3099 tree op1 = gimple_call_arg (stmt, 1);
3100 tree op2 = gimple_call_arg (stmt, 2);
3101 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
3102 rtx src = expand_normal (op0);
3104 machine_mode outermode = TYPE_MODE (TREE_TYPE (op0));
3105 scalar_mode innermode = GET_MODE_INNER (outermode);
3107 rtx value = expand_normal (op1);
3108 rtx pos = expand_normal (op2);
3110 class expand_operand ops[3];
3111 enum insn_code icode = optab_handler (optab, outermode);
3113 if (icode != CODE_FOR_nothing)
3115 rtx temp = gen_reg_rtx (outermode);
3116 emit_move_insn (temp, src);
3118 create_fixed_operand (&ops[0], temp);
3119 create_input_operand (&ops[1], value, innermode);
3120 create_convert_operand_from (&ops[2], pos, TYPE_MODE (TREE_TYPE (op2)),
3121 true);
3122 if (maybe_expand_insn (icode, 3, ops))
3124 emit_move_insn (target, temp);
3125 return;
3128 gcc_unreachable ();
3131 static void
3132 expand_ABNORMAL_DISPATCHER (internal_fn, gcall *)
3136 static void
3137 expand_BUILTIN_EXPECT (internal_fn, gcall *stmt)
3139 /* When guessing was done, the hints should be already stripped away. */
3140 gcc_assert (!flag_guess_branch_prob || optimize == 0 || seen_error ());
3142 rtx target;
3143 tree lhs = gimple_call_lhs (stmt);
3144 if (lhs)
3145 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
3146 else
3147 target = const0_rtx;
3148 rtx val = expand_expr (gimple_call_arg (stmt, 0), target, VOIDmode, EXPAND_NORMAL);
3149 if (lhs && val != target)
3150 emit_move_insn (target, val);
3153 /* IFN_VA_ARG is supposed to be expanded at pass_stdarg. So this dummy function
3154 should never be called. */
3156 static void
3157 expand_VA_ARG (internal_fn, gcall *)
3159 gcc_unreachable ();
3162 /* IFN_VEC_CONVERT is supposed to be expanded at pass_lower_vector. So this
3163 dummy function should never be called. */
3165 static void
3166 expand_VEC_CONVERT (internal_fn, gcall *)
3168 gcc_unreachable ();
3171 /* Expand IFN_RAWMEMCHAR internal function. */
3173 void
3174 expand_RAWMEMCHR (internal_fn, gcall *stmt)
3176 expand_operand ops[3];
3178 tree lhs = gimple_call_lhs (stmt);
3179 if (!lhs)
3180 return;
3181 machine_mode lhs_mode = TYPE_MODE (TREE_TYPE (lhs));
3182 rtx lhs_rtx = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
3183 create_output_operand (&ops[0], lhs_rtx, lhs_mode);
3185 tree mem = gimple_call_arg (stmt, 0);
3186 rtx mem_rtx = get_memory_rtx (mem, NULL);
3187 create_fixed_operand (&ops[1], mem_rtx);
3189 tree pattern = gimple_call_arg (stmt, 1);
3190 machine_mode mode = TYPE_MODE (TREE_TYPE (pattern));
3191 rtx pattern_rtx = expand_normal (pattern);
3192 create_input_operand (&ops[2], pattern_rtx, mode);
3194 insn_code icode = direct_optab_handler (rawmemchr_optab, mode);
3196 expand_insn (icode, 3, ops);
3197 if (!rtx_equal_p (lhs_rtx, ops[0].value))
3198 emit_move_insn (lhs_rtx, ops[0].value);
3201 /* Expand the IFN_UNIQUE function according to its first argument. */
3203 static void
3204 expand_UNIQUE (internal_fn, gcall *stmt)
3206 rtx pattern = NULL_RTX;
3207 enum ifn_unique_kind kind
3208 = (enum ifn_unique_kind) TREE_INT_CST_LOW (gimple_call_arg (stmt, 0));
3210 switch (kind)
3212 default:
3213 gcc_unreachable ();
3215 case IFN_UNIQUE_UNSPEC:
3216 if (targetm.have_unique ())
3217 pattern = targetm.gen_unique ();
3218 break;
3220 case IFN_UNIQUE_OACC_FORK:
3221 case IFN_UNIQUE_OACC_JOIN:
3222 if (targetm.have_oacc_fork () && targetm.have_oacc_join ())
3224 tree lhs = gimple_call_lhs (stmt);
3225 rtx target = const0_rtx;
3227 if (lhs)
3228 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
3230 rtx data_dep = expand_normal (gimple_call_arg (stmt, 1));
3231 rtx axis = expand_normal (gimple_call_arg (stmt, 2));
3233 if (kind == IFN_UNIQUE_OACC_FORK)
3234 pattern = targetm.gen_oacc_fork (target, data_dep, axis);
3235 else
3236 pattern = targetm.gen_oacc_join (target, data_dep, axis);
3238 else
3239 gcc_unreachable ();
3240 break;
3243 if (pattern)
3244 emit_insn (pattern);
3247 /* Expand the IFN_DEFERRED_INIT function:
3248 LHS = DEFERRED_INIT (SIZE of the DECL, INIT_TYPE, NAME of the DECL);
3250 Initialize the LHS with zero/pattern according to its second argument
3251 INIT_TYPE:
3252 if INIT_TYPE is AUTO_INIT_ZERO, use zeroes to initialize;
3253 if INIT_TYPE is AUTO_INIT_PATTERN, use 0xFE byte-repeatable pattern
3254 to initialize;
3255 The LHS variable is initialized including paddings.
3256 The reasons to choose 0xFE for pattern initialization are:
3257 1. It is a non-canonical virtual address on x86_64, and at the
3258 high end of the i386 kernel address space.
3259 2. It is a very large float value (-1.694739530317379e+38).
3260 3. It is also an unusual number for integers. */
3261 #define INIT_PATTERN_VALUE 0xFE
3262 static void
3263 expand_DEFERRED_INIT (internal_fn, gcall *stmt)
3265 tree lhs = gimple_call_lhs (stmt);
3266 tree var_size = gimple_call_arg (stmt, 0);
3267 enum auto_init_type init_type
3268 = (enum auto_init_type) TREE_INT_CST_LOW (gimple_call_arg (stmt, 1));
3269 bool reg_lhs = true;
3271 tree var_type = TREE_TYPE (lhs);
3272 gcc_assert (init_type > AUTO_INIT_UNINITIALIZED);
3274 if (TREE_CODE (lhs) == SSA_NAME)
3275 reg_lhs = true;
3276 else
3278 tree lhs_base = lhs;
3279 while (handled_component_p (lhs_base))
3280 lhs_base = TREE_OPERAND (lhs_base, 0);
3281 reg_lhs = (mem_ref_refers_to_non_mem_p (lhs_base)
3282 || non_mem_decl_p (lhs_base));
3283 /* If this expands to a register and the underlying decl is wrapped in
3284 a MEM_REF that just serves as an access type change expose the decl
3285 if it is of correct size. This avoids a situation as in PR103271
3286 if the target does not support a direct move to the registers mode. */
3287 if (reg_lhs
3288 && TREE_CODE (lhs_base) == MEM_REF
3289 && TREE_CODE (TREE_OPERAND (lhs_base, 0)) == ADDR_EXPR
3290 && DECL_P (TREE_OPERAND (TREE_OPERAND (lhs_base, 0), 0))
3291 && integer_zerop (TREE_OPERAND (lhs_base, 1))
3292 && tree_fits_uhwi_p (var_size)
3293 && tree_int_cst_equal
3294 (var_size,
3295 DECL_SIZE_UNIT (TREE_OPERAND (TREE_OPERAND (lhs_base, 0), 0))))
3297 lhs = TREE_OPERAND (TREE_OPERAND (lhs_base, 0), 0);
3298 var_type = TREE_TYPE (lhs);
3302 if (!reg_lhs)
3304 /* If the variable is not in register, expand to a memset
3305 to initialize it. */
3306 mark_addressable (lhs);
3307 tree var_addr = build_fold_addr_expr (lhs);
3309 tree value = (init_type == AUTO_INIT_PATTERN)
3310 ? build_int_cst (integer_type_node,
3311 INIT_PATTERN_VALUE)
3312 : integer_zero_node;
3313 tree m_call = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMSET),
3314 3, var_addr, value, var_size);
3315 /* Expand this memset call. */
3316 expand_builtin_memset (m_call, NULL_RTX, TYPE_MODE (var_type));
3318 else
3320 /* If this variable is in a register use expand_assignment.
3321 For boolean scalars force zero-init. */
3322 tree init;
3323 scalar_int_mode var_mode;
3324 if (TREE_CODE (TREE_TYPE (lhs)) != BOOLEAN_TYPE
3325 && tree_fits_uhwi_p (var_size)
3326 && (init_type == AUTO_INIT_PATTERN
3327 || !is_gimple_reg_type (var_type))
3328 && int_mode_for_size (tree_to_uhwi (var_size) * BITS_PER_UNIT,
3329 0).exists (&var_mode)
3330 && have_insn_for (SET, var_mode))
3332 unsigned HOST_WIDE_INT total_bytes = tree_to_uhwi (var_size);
3333 unsigned char *buf = XALLOCAVEC (unsigned char, total_bytes);
3334 memset (buf, (init_type == AUTO_INIT_PATTERN
3335 ? INIT_PATTERN_VALUE : 0), total_bytes);
3336 tree itype = build_nonstandard_integer_type
3337 (total_bytes * BITS_PER_UNIT, 1);
3338 wide_int w = wi::from_buffer (buf, total_bytes);
3339 init = wide_int_to_tree (itype, w);
3340 /* Pun the LHS to make sure its type has constant size
3341 unless it is an SSA name where that's already known. */
3342 if (TREE_CODE (lhs) != SSA_NAME)
3343 lhs = build1 (VIEW_CONVERT_EXPR, itype, lhs);
3344 else
3345 init = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs), init);
3347 else
3348 /* Use zero-init also for variable-length sizes. */
3349 init = build_zero_cst (var_type);
3351 expand_assignment (lhs, init, false);
3355 /* The size of an OpenACC compute dimension. */
3357 static void
3358 expand_GOACC_DIM_SIZE (internal_fn, gcall *stmt)
3360 tree lhs = gimple_call_lhs (stmt);
3362 if (!lhs)
3363 return;
3365 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
3366 if (targetm.have_oacc_dim_size ())
3368 rtx dim = expand_expr (gimple_call_arg (stmt, 0), NULL_RTX,
3369 VOIDmode, EXPAND_NORMAL);
3370 emit_insn (targetm.gen_oacc_dim_size (target, dim));
3372 else
3373 emit_move_insn (target, GEN_INT (1));
3376 /* The position of an OpenACC execution engine along one compute axis. */
3378 static void
3379 expand_GOACC_DIM_POS (internal_fn, gcall *stmt)
3381 tree lhs = gimple_call_lhs (stmt);
3383 if (!lhs)
3384 return;
3386 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
3387 if (targetm.have_oacc_dim_pos ())
3389 rtx dim = expand_expr (gimple_call_arg (stmt, 0), NULL_RTX,
3390 VOIDmode, EXPAND_NORMAL);
3391 emit_insn (targetm.gen_oacc_dim_pos (target, dim));
3393 else
3394 emit_move_insn (target, const0_rtx);
3397 /* This is expanded by oacc_device_lower pass. */
3399 static void
3400 expand_GOACC_LOOP (internal_fn, gcall *)
3402 gcc_unreachable ();
3405 /* This is expanded by oacc_device_lower pass. */
3407 static void
3408 expand_GOACC_REDUCTION (internal_fn, gcall *)
3410 gcc_unreachable ();
3413 /* This is expanded by oacc_device_lower pass. */
3415 static void
3416 expand_GOACC_TILE (internal_fn, gcall *)
3418 gcc_unreachable ();
3421 /* Set errno to EDOM. */
3423 static void
3424 expand_SET_EDOM (internal_fn, gcall *)
3426 #ifdef TARGET_EDOM
3427 #ifdef GEN_ERRNO_RTX
3428 rtx errno_rtx = GEN_ERRNO_RTX;
3429 #else
3430 rtx errno_rtx = gen_rtx_MEM (word_mode, gen_rtx_SYMBOL_REF (Pmode, "errno"));
3431 #endif
3432 emit_move_insn (errno_rtx,
3433 gen_int_mode (TARGET_EDOM, GET_MODE (errno_rtx)));
3434 #else
3435 gcc_unreachable ();
3436 #endif
3439 /* Expand atomic bit test and set. */
3441 static void
3442 expand_ATOMIC_BIT_TEST_AND_SET (internal_fn, gcall *call)
3444 expand_ifn_atomic_bit_test_and (call);
3447 /* Expand atomic bit test and complement. */
3449 static void
3450 expand_ATOMIC_BIT_TEST_AND_COMPLEMENT (internal_fn, gcall *call)
3452 expand_ifn_atomic_bit_test_and (call);
3455 /* Expand atomic bit test and reset. */
3457 static void
3458 expand_ATOMIC_BIT_TEST_AND_RESET (internal_fn, gcall *call)
3460 expand_ifn_atomic_bit_test_and (call);
3463 /* Expand atomic bit test and set. */
3465 static void
3466 expand_ATOMIC_COMPARE_EXCHANGE (internal_fn, gcall *call)
3468 expand_ifn_atomic_compare_exchange (call);
3471 /* Expand atomic add fetch and cmp with 0. */
3473 static void
3474 expand_ATOMIC_ADD_FETCH_CMP_0 (internal_fn, gcall *call)
3476 expand_ifn_atomic_op_fetch_cmp_0 (call);
3479 /* Expand atomic sub fetch and cmp with 0. */
3481 static void
3482 expand_ATOMIC_SUB_FETCH_CMP_0 (internal_fn, gcall *call)
3484 expand_ifn_atomic_op_fetch_cmp_0 (call);
3487 /* Expand atomic and fetch and cmp with 0. */
3489 static void
3490 expand_ATOMIC_AND_FETCH_CMP_0 (internal_fn, gcall *call)
3492 expand_ifn_atomic_op_fetch_cmp_0 (call);
3495 /* Expand atomic or fetch and cmp with 0. */
3497 static void
3498 expand_ATOMIC_OR_FETCH_CMP_0 (internal_fn, gcall *call)
3500 expand_ifn_atomic_op_fetch_cmp_0 (call);
3503 /* Expand atomic xor fetch and cmp with 0. */
3505 static void
3506 expand_ATOMIC_XOR_FETCH_CMP_0 (internal_fn, gcall *call)
3508 expand_ifn_atomic_op_fetch_cmp_0 (call);
3511 /* Expand LAUNDER to assignment, lhs = arg0. */
3513 static void
3514 expand_LAUNDER (internal_fn, gcall *call)
3516 tree lhs = gimple_call_lhs (call);
3518 if (!lhs)
3519 return;
3521 expand_assignment (lhs, gimple_call_arg (call, 0), false);
3524 /* Expand {MASK_,}SCATTER_STORE{S,U} call CALL using optab OPTAB. */
3526 static void
3527 expand_scatter_store_optab_fn (internal_fn, gcall *stmt, direct_optab optab)
3529 internal_fn ifn = gimple_call_internal_fn (stmt);
3530 int rhs_index = internal_fn_stored_value_index (ifn);
3531 tree base = gimple_call_arg (stmt, 0);
3532 tree offset = gimple_call_arg (stmt, 1);
3533 tree scale = gimple_call_arg (stmt, 2);
3534 tree rhs = gimple_call_arg (stmt, rhs_index);
3536 rtx base_rtx = expand_normal (base);
3537 rtx offset_rtx = expand_normal (offset);
3538 HOST_WIDE_INT scale_int = tree_to_shwi (scale);
3539 rtx rhs_rtx = expand_normal (rhs);
3541 class expand_operand ops[8];
3542 int i = 0;
3543 create_address_operand (&ops[i++], base_rtx);
3544 create_input_operand (&ops[i++], offset_rtx, TYPE_MODE (TREE_TYPE (offset)));
3545 create_integer_operand (&ops[i++], TYPE_UNSIGNED (TREE_TYPE (offset)));
3546 create_integer_operand (&ops[i++], scale_int);
3547 create_input_operand (&ops[i++], rhs_rtx, TYPE_MODE (TREE_TYPE (rhs)));
3548 i = add_mask_and_len_args (ops, i, stmt);
3550 insn_code icode = convert_optab_handler (optab, TYPE_MODE (TREE_TYPE (rhs)),
3551 TYPE_MODE (TREE_TYPE (offset)));
3552 expand_insn (icode, i, ops);
3555 /* Expand {MASK_,}GATHER_LOAD call CALL using optab OPTAB. */
3557 static void
3558 expand_gather_load_optab_fn (internal_fn, gcall *stmt, direct_optab optab)
3560 tree lhs = gimple_call_lhs (stmt);
3561 tree base = gimple_call_arg (stmt, 0);
3562 tree offset = gimple_call_arg (stmt, 1);
3563 tree scale = gimple_call_arg (stmt, 2);
3565 rtx lhs_rtx = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
3566 rtx base_rtx = expand_normal (base);
3567 rtx offset_rtx = expand_normal (offset);
3568 HOST_WIDE_INT scale_int = tree_to_shwi (scale);
3570 int i = 0;
3571 class expand_operand ops[8];
3572 create_output_operand (&ops[i++], lhs_rtx, TYPE_MODE (TREE_TYPE (lhs)));
3573 create_address_operand (&ops[i++], base_rtx);
3574 create_input_operand (&ops[i++], offset_rtx, TYPE_MODE (TREE_TYPE (offset)));
3575 create_integer_operand (&ops[i++], TYPE_UNSIGNED (TREE_TYPE (offset)));
3576 create_integer_operand (&ops[i++], scale_int);
3577 i = add_mask_and_len_args (ops, i, stmt);
3578 insn_code icode = convert_optab_handler (optab, TYPE_MODE (TREE_TYPE (lhs)),
3579 TYPE_MODE (TREE_TYPE (offset)));
3580 expand_insn (icode, i, ops);
3581 if (!rtx_equal_p (lhs_rtx, ops[0].value))
3582 emit_move_insn (lhs_rtx, ops[0].value);
3585 /* Helper for expand_DIVMOD. Return true if the sequence starting with
3586 INSN contains any call insns or insns with {,U}{DIV,MOD} rtxes. */
3588 static bool
3589 contains_call_div_mod (rtx_insn *insn)
3591 subrtx_iterator::array_type array;
3592 for (; insn; insn = NEXT_INSN (insn))
3593 if (CALL_P (insn))
3594 return true;
3595 else if (INSN_P (insn))
3596 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
3597 switch (GET_CODE (*iter))
3599 case CALL:
3600 case DIV:
3601 case UDIV:
3602 case MOD:
3603 case UMOD:
3604 return true;
3605 default:
3606 break;
3608 return false;
3611 /* Expand DIVMOD() using:
3612 a) optab handler for udivmod/sdivmod if it is available.
3613 b) If optab_handler doesn't exist, generate call to
3614 target-specific divmod libfunc. */
3616 static void
3617 expand_DIVMOD (internal_fn, gcall *call_stmt)
3619 tree lhs = gimple_call_lhs (call_stmt);
3620 tree arg0 = gimple_call_arg (call_stmt, 0);
3621 tree arg1 = gimple_call_arg (call_stmt, 1);
3623 gcc_assert (TREE_CODE (TREE_TYPE (lhs)) == COMPLEX_TYPE);
3624 tree type = TREE_TYPE (TREE_TYPE (lhs));
3625 machine_mode mode = TYPE_MODE (type);
3626 bool unsignedp = TYPE_UNSIGNED (type);
3627 optab tab = (unsignedp) ? udivmod_optab : sdivmod_optab;
3629 rtx op0 = expand_normal (arg0);
3630 rtx op1 = expand_normal (arg1);
3631 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
3633 rtx quotient = NULL_RTX, remainder = NULL_RTX;
3634 rtx_insn *insns = NULL;
3636 if (TREE_CODE (arg1) == INTEGER_CST)
3638 /* For DIVMOD by integral constants, there could be efficient code
3639 expanded inline e.g. using shifts and plus/minus. Try to expand
3640 the division and modulo and if it emits any library calls or any
3641 {,U}{DIV,MOD} rtxes throw it away and use a divmod optab or
3642 divmod libcall. */
3643 scalar_int_mode int_mode;
3644 if (remainder == NULL_RTX
3645 && optimize
3646 && CONST_INT_P (op1)
3647 && !pow2p_hwi (INTVAL (op1))
3648 && is_int_mode (TYPE_MODE (type), &int_mode)
3649 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
3650 && optab_handler (and_optab, word_mode) != CODE_FOR_nothing
3651 && optab_handler (add_optab, word_mode) != CODE_FOR_nothing
3652 && optimize_insn_for_speed_p ())
3654 rtx_insn *last = get_last_insn ();
3655 remainder = NULL_RTX;
3656 quotient = expand_doubleword_divmod (int_mode, op0, op1, &remainder,
3657 TYPE_UNSIGNED (type));
3658 if (quotient != NULL_RTX)
3660 if (optab_handler (mov_optab, int_mode) != CODE_FOR_nothing)
3662 rtx_insn *move = emit_move_insn (quotient, quotient);
3663 set_dst_reg_note (move, REG_EQUAL,
3664 gen_rtx_fmt_ee (TYPE_UNSIGNED (type)
3665 ? UDIV : DIV, int_mode,
3666 copy_rtx (op0), op1),
3667 quotient);
3668 move = emit_move_insn (remainder, remainder);
3669 set_dst_reg_note (move, REG_EQUAL,
3670 gen_rtx_fmt_ee (TYPE_UNSIGNED (type)
3671 ? UMOD : MOD, int_mode,
3672 copy_rtx (op0), op1),
3673 quotient);
3676 else
3677 delete_insns_since (last);
3680 if (remainder == NULL_RTX)
3682 struct separate_ops ops;
3683 ops.code = TRUNC_DIV_EXPR;
3684 ops.type = type;
3685 ops.op0 = make_tree (ops.type, op0);
3686 ops.op1 = arg1;
3687 ops.op2 = NULL_TREE;
3688 ops.location = gimple_location (call_stmt);
3689 start_sequence ();
3690 quotient = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
3691 if (contains_call_div_mod (get_insns ()))
3692 quotient = NULL_RTX;
3693 else
3695 ops.code = TRUNC_MOD_EXPR;
3696 remainder = expand_expr_real_2 (&ops, NULL_RTX, mode,
3697 EXPAND_NORMAL);
3698 if (contains_call_div_mod (get_insns ()))
3699 remainder = NULL_RTX;
3701 if (remainder)
3702 insns = get_insns ();
3703 end_sequence ();
3707 if (remainder)
3708 emit_insn (insns);
3710 /* Check if optab_handler exists for divmod_optab for given mode. */
3711 else if (optab_handler (tab, mode) != CODE_FOR_nothing)
3713 quotient = gen_reg_rtx (mode);
3714 remainder = gen_reg_rtx (mode);
3715 expand_twoval_binop (tab, op0, op1, quotient, remainder, unsignedp);
3718 /* Generate call to divmod libfunc if it exists. */
3719 else if (rtx libfunc = optab_libfunc (tab, mode))
3720 targetm.expand_divmod_libfunc (libfunc, mode, op0, op1,
3721 &quotient, &remainder);
3723 else
3724 gcc_unreachable ();
3726 /* Wrap the return value (quotient, remainder) within COMPLEX_EXPR. */
3727 expand_expr (build2 (COMPLEX_EXPR, TREE_TYPE (lhs),
3728 make_tree (TREE_TYPE (arg0), quotient),
3729 make_tree (TREE_TYPE (arg1), remainder)),
3730 target, VOIDmode, EXPAND_NORMAL);
3733 /* Expand a NOP. */
3735 static void
3736 expand_NOP (internal_fn, gcall *)
3738 /* Nothing. But it shouldn't really prevail. */
3741 /* Coroutines, all should have been processed at this stage. */
3743 static void
3744 expand_CO_FRAME (internal_fn, gcall *)
3746 gcc_unreachable ();
3749 static void
3750 expand_CO_YIELD (internal_fn, gcall *)
3752 gcc_unreachable ();
3755 static void
3756 expand_CO_SUSPN (internal_fn, gcall *)
3758 gcc_unreachable ();
3761 static void
3762 expand_CO_ACTOR (internal_fn, gcall *)
3764 gcc_unreachable ();
3767 /* Expand a call to FN using the operands in STMT. FN has a single
3768 output operand and NARGS input operands. */
3770 static void
3771 expand_direct_optab_fn (internal_fn fn, gcall *stmt, direct_optab optab,
3772 unsigned int nargs)
3774 tree_pair types = direct_internal_fn_types (fn, stmt);
3775 insn_code icode = direct_optab_handler (optab, TYPE_MODE (types.first));
3776 expand_fn_using_insn (stmt, icode, 1, nargs);
3779 /* Expand WHILE_ULT call STMT using optab OPTAB. */
3781 static void
3782 expand_while_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
3784 expand_operand ops[4];
3785 tree rhs_type[2];
3787 tree lhs = gimple_call_lhs (stmt);
3788 tree lhs_type = TREE_TYPE (lhs);
3789 rtx lhs_rtx = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
3790 create_output_operand (&ops[0], lhs_rtx, TYPE_MODE (lhs_type));
3792 for (unsigned int i = 0; i < 2; ++i)
3794 tree rhs = gimple_call_arg (stmt, i);
3795 rhs_type[i] = TREE_TYPE (rhs);
3796 rtx rhs_rtx = expand_normal (rhs);
3797 create_input_operand (&ops[i + 1], rhs_rtx, TYPE_MODE (rhs_type[i]));
3800 int opcnt;
3801 if (!VECTOR_MODE_P (TYPE_MODE (lhs_type)))
3803 /* When the mask is an integer mode the exact vector length may not
3804 be clear to the backend, so we pass it in operand[3].
3805 Use the vector in arg2 for the most reliable intended size. */
3806 tree type = TREE_TYPE (gimple_call_arg (stmt, 2));
3807 create_integer_operand (&ops[3], TYPE_VECTOR_SUBPARTS (type));
3808 opcnt = 4;
3810 else
3811 /* The mask has a vector type so the length operand is unnecessary. */
3812 opcnt = 3;
3814 insn_code icode = convert_optab_handler (optab, TYPE_MODE (rhs_type[0]),
3815 TYPE_MODE (lhs_type));
3817 expand_insn (icode, opcnt, ops);
3818 if (!rtx_equal_p (lhs_rtx, ops[0].value))
3819 emit_move_insn (lhs_rtx, ops[0].value);
3822 /* Expand a call to a convert-like optab using the operands in STMT.
3823 FN has a single output operand and NARGS input operands. */
3825 static void
3826 expand_convert_optab_fn (internal_fn fn, gcall *stmt, convert_optab optab,
3827 unsigned int nargs)
3829 tree_pair types = direct_internal_fn_types (fn, stmt);
3830 insn_code icode = convert_optab_handler (optab, TYPE_MODE (types.first),
3831 TYPE_MODE (types.second));
3832 expand_fn_using_insn (stmt, icode, 1, nargs);
3835 /* Expanders for optabs that can use expand_direct_optab_fn. */
3837 #define expand_unary_optab_fn(FN, STMT, OPTAB) \
3838 expand_direct_optab_fn (FN, STMT, OPTAB, 1)
3840 #define expand_binary_optab_fn(FN, STMT, OPTAB) \
3841 expand_direct_optab_fn (FN, STMT, OPTAB, 2)
3843 #define expand_ternary_optab_fn(FN, STMT, OPTAB) \
3844 expand_direct_optab_fn (FN, STMT, OPTAB, 3)
3846 #define expand_cond_unary_optab_fn(FN, STMT, OPTAB) \
3847 expand_direct_optab_fn (FN, STMT, OPTAB, 3)
3849 #define expand_cond_binary_optab_fn(FN, STMT, OPTAB) \
3850 expand_direct_optab_fn (FN, STMT, OPTAB, 4)
3852 #define expand_cond_ternary_optab_fn(FN, STMT, OPTAB) \
3853 expand_direct_optab_fn (FN, STMT, OPTAB, 5)
3855 #define expand_cond_len_unary_optab_fn(FN, STMT, OPTAB) \
3856 expand_direct_optab_fn (FN, STMT, OPTAB, 5)
3858 #define expand_cond_len_binary_optab_fn(FN, STMT, OPTAB) \
3859 expand_direct_optab_fn (FN, STMT, OPTAB, 6)
3861 #define expand_cond_len_ternary_optab_fn(FN, STMT, OPTAB) \
3862 expand_direct_optab_fn (FN, STMT, OPTAB, 7)
3864 #define expand_fold_extract_optab_fn(FN, STMT, OPTAB) \
3865 expand_direct_optab_fn (FN, STMT, OPTAB, 3)
3867 #define expand_fold_len_extract_optab_fn(FN, STMT, OPTAB) \
3868 expand_direct_optab_fn (FN, STMT, OPTAB, 5)
3870 #define expand_fold_left_optab_fn(FN, STMT, OPTAB) \
3871 expand_direct_optab_fn (FN, STMT, OPTAB, 2)
3873 #define expand_mask_fold_left_optab_fn(FN, STMT, OPTAB) \
3874 expand_direct_optab_fn (FN, STMT, OPTAB, 3)
3876 #define expand_mask_len_fold_left_optab_fn(FN, STMT, OPTAB) \
3877 expand_direct_optab_fn (FN, STMT, OPTAB, 5)
3879 #define expand_check_ptrs_optab_fn(FN, STMT, OPTAB) \
3880 expand_direct_optab_fn (FN, STMT, OPTAB, 4)
3882 /* Expanders for optabs that can use expand_convert_optab_fn. */
3884 #define expand_unary_convert_optab_fn(FN, STMT, OPTAB) \
3885 expand_convert_optab_fn (FN, STMT, OPTAB, 1)
3887 #define expand_vec_extract_optab_fn(FN, STMT, OPTAB) \
3888 expand_convert_optab_fn (FN, STMT, OPTAB, 2)
3890 /* RETURN_TYPE and ARGS are a return type and argument list that are
3891 in principle compatible with FN (which satisfies direct_internal_fn_p).
3892 Return the types that should be used to determine whether the
3893 target supports FN. */
3895 tree_pair
3896 direct_internal_fn_types (internal_fn fn, tree return_type, tree *args)
3898 const direct_internal_fn_info &info = direct_internal_fn (fn);
3899 tree type0 = (info.type0 < 0 ? return_type : TREE_TYPE (args[info.type0]));
3900 tree type1 = (info.type1 < 0 ? return_type : TREE_TYPE (args[info.type1]));
3901 return tree_pair (type0, type1);
3904 /* CALL is a call whose return type and arguments are in principle
3905 compatible with FN (which satisfies direct_internal_fn_p). Return the
3906 types that should be used to determine whether the target supports FN. */
3908 tree_pair
3909 direct_internal_fn_types (internal_fn fn, gcall *call)
3911 const direct_internal_fn_info &info = direct_internal_fn (fn);
3912 tree op0 = (info.type0 < 0
3913 ? gimple_call_lhs (call)
3914 : gimple_call_arg (call, info.type0));
3915 tree op1 = (info.type1 < 0
3916 ? gimple_call_lhs (call)
3917 : gimple_call_arg (call, info.type1));
3918 return tree_pair (TREE_TYPE (op0), TREE_TYPE (op1));
3921 /* Return true if OPTAB is supported for TYPES (whose modes should be
3922 the same) when the optimization type is OPT_TYPE. Used for simple
3923 direct optabs. */
3925 static bool
3926 direct_optab_supported_p (direct_optab optab, tree_pair types,
3927 optimization_type opt_type)
3929 machine_mode mode = TYPE_MODE (types.first);
3930 gcc_checking_assert (mode == TYPE_MODE (types.second));
3931 return direct_optab_handler (optab, mode, opt_type) != CODE_FOR_nothing;
3934 /* Return true if OPTAB is supported for TYPES, where the first type
3935 is the destination and the second type is the source. Used for
3936 convert optabs. */
3938 static bool
3939 convert_optab_supported_p (convert_optab optab, tree_pair types,
3940 optimization_type opt_type)
3942 return (convert_optab_handler (optab, TYPE_MODE (types.first),
3943 TYPE_MODE (types.second), opt_type)
3944 != CODE_FOR_nothing);
3947 /* Return true if load/store lanes optab OPTAB is supported for
3948 array type TYPES.first when the optimization type is OPT_TYPE. */
3950 static bool
3951 multi_vector_optab_supported_p (convert_optab optab, tree_pair types,
3952 optimization_type opt_type)
3954 gcc_assert (TREE_CODE (types.first) == ARRAY_TYPE);
3955 machine_mode imode = TYPE_MODE (types.first);
3956 machine_mode vmode = TYPE_MODE (TREE_TYPE (types.first));
3957 return (convert_optab_handler (optab, imode, vmode, opt_type)
3958 != CODE_FOR_nothing);
3961 #define direct_unary_optab_supported_p direct_optab_supported_p
3962 #define direct_unary_convert_optab_supported_p convert_optab_supported_p
3963 #define direct_binary_optab_supported_p direct_optab_supported_p
3964 #define direct_ternary_optab_supported_p direct_optab_supported_p
3965 #define direct_cond_unary_optab_supported_p direct_optab_supported_p
3966 #define direct_cond_binary_optab_supported_p direct_optab_supported_p
3967 #define direct_cond_ternary_optab_supported_p direct_optab_supported_p
3968 #define direct_cond_len_unary_optab_supported_p direct_optab_supported_p
3969 #define direct_cond_len_binary_optab_supported_p direct_optab_supported_p
3970 #define direct_cond_len_ternary_optab_supported_p direct_optab_supported_p
3971 #define direct_mask_load_optab_supported_p convert_optab_supported_p
3972 #define direct_load_lanes_optab_supported_p multi_vector_optab_supported_p
3973 #define direct_mask_load_lanes_optab_supported_p multi_vector_optab_supported_p
3974 #define direct_gather_load_optab_supported_p convert_optab_supported_p
3975 #define direct_len_load_optab_supported_p direct_optab_supported_p
3976 #define direct_mask_len_load_optab_supported_p convert_optab_supported_p
3977 #define direct_mask_store_optab_supported_p convert_optab_supported_p
3978 #define direct_store_lanes_optab_supported_p multi_vector_optab_supported_p
3979 #define direct_mask_store_lanes_optab_supported_p multi_vector_optab_supported_p
3980 #define direct_vec_cond_mask_optab_supported_p convert_optab_supported_p
3981 #define direct_vec_cond_optab_supported_p convert_optab_supported_p
3982 #define direct_scatter_store_optab_supported_p convert_optab_supported_p
3983 #define direct_len_store_optab_supported_p direct_optab_supported_p
3984 #define direct_mask_len_store_optab_supported_p convert_optab_supported_p
3985 #define direct_while_optab_supported_p convert_optab_supported_p
3986 #define direct_fold_extract_optab_supported_p direct_optab_supported_p
3987 #define direct_fold_len_extract_optab_supported_p direct_optab_supported_p
3988 #define direct_fold_left_optab_supported_p direct_optab_supported_p
3989 #define direct_mask_fold_left_optab_supported_p direct_optab_supported_p
3990 #define direct_mask_len_fold_left_optab_supported_p direct_optab_supported_p
3991 #define direct_check_ptrs_optab_supported_p direct_optab_supported_p
3992 #define direct_vec_set_optab_supported_p direct_optab_supported_p
3993 #define direct_vec_extract_optab_supported_p convert_optab_supported_p
3995 /* Return the optab used by internal function FN. */
3997 optab
3998 direct_internal_fn_optab (internal_fn fn, tree_pair types)
4000 switch (fn)
4002 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
4003 case IFN_##CODE: break;
4004 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
4005 case IFN_##CODE: return OPTAB##_optab;
4006 #define DEF_INTERNAL_SIGNED_OPTAB_FN(CODE, FLAGS, SELECTOR, SIGNED_OPTAB, \
4007 UNSIGNED_OPTAB, TYPE) \
4008 case IFN_##CODE: return (TYPE_UNSIGNED (types.SELECTOR) \
4009 ? UNSIGNED_OPTAB ## _optab \
4010 : SIGNED_OPTAB ## _optab);
4011 #include "internal-fn.def"
4013 case IFN_LAST:
4014 break;
4016 gcc_unreachable ();
4019 /* Return the optab used by internal function FN. */
4021 static optab
4022 direct_internal_fn_optab (internal_fn fn)
4024 switch (fn)
4026 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
4027 case IFN_##CODE: break;
4028 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
4029 case IFN_##CODE: return OPTAB##_optab;
4030 #include "internal-fn.def"
4032 case IFN_LAST:
4033 break;
4035 gcc_unreachable ();
4038 /* Return true if FN is supported for the types in TYPES when the
4039 optimization type is OPT_TYPE. The types are those associated with
4040 the "type0" and "type1" fields of FN's direct_internal_fn_info
4041 structure. */
4043 bool
4044 direct_internal_fn_supported_p (internal_fn fn, tree_pair types,
4045 optimization_type opt_type)
4047 switch (fn)
4049 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
4050 case IFN_##CODE: break;
4051 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
4052 case IFN_##CODE: \
4053 return direct_##TYPE##_optab_supported_p (OPTAB##_optab, types, \
4054 opt_type);
4055 #define DEF_INTERNAL_SIGNED_OPTAB_FN(CODE, FLAGS, SELECTOR, SIGNED_OPTAB, \
4056 UNSIGNED_OPTAB, TYPE) \
4057 case IFN_##CODE: \
4059 optab which_optab = (TYPE_UNSIGNED (types.SELECTOR) \
4060 ? UNSIGNED_OPTAB ## _optab \
4061 : SIGNED_OPTAB ## _optab); \
4062 return direct_##TYPE##_optab_supported_p (which_optab, types, \
4063 opt_type); \
4065 #include "internal-fn.def"
4067 case IFN_LAST:
4068 break;
4070 gcc_unreachable ();
4073 /* Return true if FN is supported for type TYPE when the optimization
4074 type is OPT_TYPE. The caller knows that the "type0" and "type1"
4075 fields of FN's direct_internal_fn_info structure are the same. */
4077 bool
4078 direct_internal_fn_supported_p (internal_fn fn, tree type,
4079 optimization_type opt_type)
4081 const direct_internal_fn_info &info = direct_internal_fn (fn);
4082 gcc_checking_assert (info.type0 == info.type1);
4083 return direct_internal_fn_supported_p (fn, tree_pair (type, type), opt_type);
4086 /* Return true if the STMT is supported when the optimization type is OPT_TYPE,
4087 given that STMT is a call to a direct internal function. */
4089 bool
4090 direct_internal_fn_supported_p (gcall *stmt, optimization_type opt_type)
4092 internal_fn fn = gimple_call_internal_fn (stmt);
4093 tree_pair types = direct_internal_fn_types (fn, stmt);
4094 return direct_internal_fn_supported_p (fn, types, opt_type);
4097 /* Return true if FN is a binary operation and if FN is commutative. */
4099 bool
4100 commutative_binary_fn_p (internal_fn fn)
4102 switch (fn)
4104 case IFN_AVG_FLOOR:
4105 case IFN_AVG_CEIL:
4106 case IFN_MULH:
4107 case IFN_MULHS:
4108 case IFN_MULHRS:
4109 case IFN_FMIN:
4110 case IFN_FMAX:
4111 case IFN_COMPLEX_MUL:
4112 case IFN_UBSAN_CHECK_ADD:
4113 case IFN_UBSAN_CHECK_MUL:
4114 case IFN_ADD_OVERFLOW:
4115 case IFN_MUL_OVERFLOW:
4116 case IFN_VEC_WIDEN_PLUS:
4117 case IFN_VEC_WIDEN_PLUS_LO:
4118 case IFN_VEC_WIDEN_PLUS_HI:
4119 case IFN_VEC_WIDEN_PLUS_EVEN:
4120 case IFN_VEC_WIDEN_PLUS_ODD:
4121 return true;
4123 default:
4124 return false;
4128 /* Return true if FN is a ternary operation and if its first two arguments
4129 are commutative. */
4131 bool
4132 commutative_ternary_fn_p (internal_fn fn)
4134 switch (fn)
4136 case IFN_FMA:
4137 case IFN_FMS:
4138 case IFN_FNMA:
4139 case IFN_FNMS:
4140 case IFN_UADDC:
4141 return true;
4143 default:
4144 return false;
4148 /* Return true if FN is an associative binary operation. */
4150 bool
4151 associative_binary_fn_p (internal_fn fn)
4153 switch (fn)
4155 case IFN_FMIN:
4156 case IFN_FMAX:
4157 return true;
4159 default:
4160 return false;
4164 /* If FN is commutative in two consecutive arguments, return the
4165 index of the first, otherwise return -1. */
4168 first_commutative_argument (internal_fn fn)
4170 switch (fn)
4172 case IFN_COND_ADD:
4173 case IFN_COND_MUL:
4174 case IFN_COND_MIN:
4175 case IFN_COND_MAX:
4176 case IFN_COND_FMIN:
4177 case IFN_COND_FMAX:
4178 case IFN_COND_AND:
4179 case IFN_COND_IOR:
4180 case IFN_COND_XOR:
4181 case IFN_COND_FMA:
4182 case IFN_COND_FMS:
4183 case IFN_COND_FNMA:
4184 case IFN_COND_FNMS:
4185 case IFN_COND_LEN_ADD:
4186 case IFN_COND_LEN_MUL:
4187 case IFN_COND_LEN_MIN:
4188 case IFN_COND_LEN_MAX:
4189 case IFN_COND_LEN_FMIN:
4190 case IFN_COND_LEN_FMAX:
4191 case IFN_COND_LEN_AND:
4192 case IFN_COND_LEN_IOR:
4193 case IFN_COND_LEN_XOR:
4194 case IFN_COND_LEN_FMA:
4195 case IFN_COND_LEN_FMS:
4196 case IFN_COND_LEN_FNMA:
4197 case IFN_COND_LEN_FNMS:
4198 return 1;
4200 default:
4201 if (commutative_binary_fn_p (fn)
4202 || commutative_ternary_fn_p (fn))
4203 return 0;
4204 return -1;
4208 /* Return true if this CODE describes an internal_fn that returns a vector with
4209 elements twice as wide as the element size of the input vectors. */
4211 bool
4212 widening_fn_p (code_helper code)
4214 if (!code.is_fn_code ())
4215 return false;
4217 if (!internal_fn_p ((combined_fn) code))
4218 return false;
4220 internal_fn fn = as_internal_fn ((combined_fn) code);
4221 switch (fn)
4223 #undef DEF_INTERNAL_WIDENING_OPTAB_FN
4224 #define DEF_INTERNAL_WIDENING_OPTAB_FN(NAME, F, S, SO, UO, T) \
4225 case IFN_##NAME: \
4226 case IFN_##NAME##_HI: \
4227 case IFN_##NAME##_LO: \
4228 case IFN_##NAME##_EVEN: \
4229 case IFN_##NAME##_ODD: \
4230 return true;
4231 #include "internal-fn.def"
4232 #undef DEF_INTERNAL_WIDENING_OPTAB_FN
4234 default:
4235 return false;
4239 /* Return true if IFN_SET_EDOM is supported. */
4241 bool
4242 set_edom_supported_p (void)
4244 #ifdef TARGET_EDOM
4245 return true;
4246 #else
4247 return false;
4248 #endif
4251 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
4252 static void \
4253 expand_##CODE (internal_fn fn, gcall *stmt) \
4255 expand_##TYPE##_optab_fn (fn, stmt, OPTAB##_optab); \
4257 #define DEF_INTERNAL_SIGNED_OPTAB_FN(CODE, FLAGS, SELECTOR, SIGNED_OPTAB, \
4258 UNSIGNED_OPTAB, TYPE) \
4259 static void \
4260 expand_##CODE (internal_fn fn, gcall *stmt) \
4262 tree_pair types = direct_internal_fn_types (fn, stmt); \
4263 optab which_optab = direct_internal_fn_optab (fn, types); \
4264 expand_##TYPE##_optab_fn (fn, stmt, which_optab); \
4266 #include "internal-fn.def"
4267 #undef DEF_INTERNAL_OPTAB_FN
4268 #undef DEF_INTERNAL_SIGNED_OPTAB_FN
4270 /* Routines to expand each internal function, indexed by function number.
4271 Each routine has the prototype:
4273 expand_<NAME> (gcall *stmt)
4275 where STMT is the statement that performs the call. */
4276 static void (*const internal_fn_expanders[]) (internal_fn, gcall *) = {
4278 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) expand_##CODE,
4279 #include "internal-fn.def"
4283 /* Invoke T(CODE, SUFFIX) for each conditional function IFN_COND_##SUFFIX
4284 that maps to a tree code CODE. There is also an IFN_COND_LEN_##SUFFIX
4285 for each such IFN_COND_##SUFFIX. */
4286 #define FOR_EACH_CODE_MAPPING(T) \
4287 T (PLUS_EXPR, ADD) \
4288 T (MINUS_EXPR, SUB) \
4289 T (MULT_EXPR, MUL) \
4290 T (TRUNC_DIV_EXPR, DIV) \
4291 T (TRUNC_MOD_EXPR, MOD) \
4292 T (RDIV_EXPR, RDIV) \
4293 T (MIN_EXPR, MIN) \
4294 T (MAX_EXPR, MAX) \
4295 T (BIT_AND_EXPR, AND) \
4296 T (BIT_IOR_EXPR, IOR) \
4297 T (BIT_XOR_EXPR, XOR) \
4298 T (LSHIFT_EXPR, SHL) \
4299 T (RSHIFT_EXPR, SHR) \
4300 T (NEGATE_EXPR, NEG)
4302 /* Return a function that only performs CODE when a certain condition is met
4303 and that uses a given fallback value otherwise. For example, if CODE is
4304 a binary operation associated with conditional function FN:
4306 LHS = FN (COND, A, B, ELSE)
4308 is equivalent to the C expression:
4310 LHS = COND ? A CODE B : ELSE;
4312 operating elementwise if the operands are vectors.
4314 Return IFN_LAST if no such function exists. */
4316 internal_fn
4317 get_conditional_internal_fn (tree_code code)
4319 switch (code)
4321 #define CASE(CODE, IFN) case CODE: return IFN_COND_##IFN;
4322 FOR_EACH_CODE_MAPPING(CASE)
4323 #undef CASE
4324 default:
4325 return IFN_LAST;
4329 /* If IFN implements the conditional form of a tree code, return that
4330 tree code, otherwise return ERROR_MARK. */
4332 tree_code
4333 conditional_internal_fn_code (internal_fn ifn)
4335 switch (ifn)
4337 #define CASE(CODE, IFN) \
4338 case IFN_COND_##IFN: \
4339 case IFN_COND_LEN_##IFN: \
4340 return CODE;
4341 FOR_EACH_CODE_MAPPING (CASE)
4342 #undef CASE
4343 default:
4344 return ERROR_MARK;
4348 /* Like get_conditional_internal_fn, but return a function that
4349 additionally restricts the operation to the leading elements
4350 of a vector. The number of elements to process is given by a length
4351 and bias pair, as for IFN_LOAD_LEN. The values of the remaining
4352 elements are taken from the fallback ("else") argument.
4354 For example, if CODE is a binary operation associated with FN:
4356 LHS = FN (COND, A, B, ELSE, LEN, BIAS)
4358 is equivalent to the C code:
4360 for (int i = 0; i < NUNITS; i++)
4362 if (i < LEN + BIAS && COND[i])
4363 LHS[i] = A[i] CODE B[i];
4364 else
4365 LHS[i] = ELSE[i];
4369 internal_fn
4370 get_conditional_len_internal_fn (tree_code code)
4372 switch (code)
4374 #define CASE(CODE, IFN) case CODE: return IFN_COND_LEN_##IFN;
4375 FOR_EACH_CODE_MAPPING(CASE)
4376 #undef CASE
4377 default:
4378 return IFN_LAST;
4382 /* Invoke T(IFN) for each internal function IFN that also has an
4383 IFN_COND_* form. */
4384 #define FOR_EACH_COND_FN_PAIR(T) \
4385 T (FMAX) \
4386 T (FMIN) \
4387 T (FMA) \
4388 T (FMS) \
4389 T (FNMA) \
4390 T (FNMS)
4392 /* Return a function that only performs internal function FN when a
4393 certain condition is met and that uses a given fallback value otherwise.
4394 In other words, the returned function FN' is such that:
4396 LHS = FN' (COND, A1, ... An, ELSE)
4398 is equivalent to the C expression:
4400 LHS = COND ? FN (A1, ..., An) : ELSE;
4402 operating elementwise if the operands are vectors.
4404 Return IFN_LAST if no such function exists. */
4406 internal_fn
4407 get_conditional_internal_fn (internal_fn fn)
4409 switch (fn)
4411 #define CASE(NAME) case IFN_##NAME: return IFN_COND_##NAME;
4412 FOR_EACH_COND_FN_PAIR(CASE)
4413 #undef CASE
4414 default:
4415 return IFN_LAST;
4419 /* If there exists an internal function like IFN that operates on vectors,
4420 but with additional length and bias parameters, return the internal_fn
4421 for that function, otherwise return IFN_LAST. */
4422 internal_fn
4423 get_len_internal_fn (internal_fn fn)
4425 switch (fn)
4427 #undef DEF_INTERNAL_COND_FN
4428 #undef DEF_INTERNAL_SIGNED_COND_FN
4429 #define DEF_INTERNAL_COND_FN(NAME, ...) \
4430 case IFN_COND_##NAME: \
4431 return IFN_COND_LEN_##NAME;
4432 #define DEF_INTERNAL_SIGNED_COND_FN(NAME, ...) \
4433 case IFN_COND_##NAME: \
4434 return IFN_COND_LEN_##NAME;
4435 #include "internal-fn.def"
4436 #undef DEF_INTERNAL_COND_FN
4437 #undef DEF_INTERNAL_SIGNED_COND_FN
4438 default:
4439 return IFN_LAST;
4443 /* If IFN implements the conditional form of an unconditional internal
4444 function, return that unconditional function, otherwise return IFN_LAST. */
4446 internal_fn
4447 get_unconditional_internal_fn (internal_fn ifn)
4449 switch (ifn)
4451 #define CASE(NAME) \
4452 case IFN_COND_##NAME: \
4453 case IFN_COND_LEN_##NAME: \
4454 return IFN_##NAME;
4455 FOR_EACH_COND_FN_PAIR (CASE)
4456 #undef CASE
4457 default:
4458 return IFN_LAST;
4462 /* Return true if STMT can be interpreted as a conditional tree code
4463 operation of the form:
4465 LHS = COND ? OP (RHS1, ...) : ELSE;
4467 operating elementwise if the operands are vectors. This includes
4468 the case of an all-true COND, so that the operation always happens.
4470 There is an alternative approach to interpret the STMT when the operands
4471 are vectors which is the operation predicated by both conditional mask
4472 and loop control length, the equivalent C code:
4474 for (int i = 0; i < NUNTIS; i++)
4476 if (i < LEN + BIAS && COND[i])
4477 LHS[i] = A[i] CODE B[i];
4478 else
4479 LHS[i] = ELSE[i];
4482 When returning true, set:
4484 - *COND_OUT to the condition COND, or to NULL_TREE if the condition
4485 is known to be all-true
4486 - *CODE_OUT to the tree code
4487 - OPS[I] to operand I of *CODE_OUT
4488 - *ELSE_OUT to the fallback value ELSE, or to NULL_TREE if the
4489 condition is known to be all true.
4490 - *LEN to the len argument if it COND_LEN_* operations or to NULL_TREE.
4491 - *BIAS to the bias argument if it COND_LEN_* operations or to NULL_TREE. */
4493 bool
4494 can_interpret_as_conditional_op_p (gimple *stmt, tree *cond_out,
4495 tree_code *code_out,
4496 tree (&ops)[3], tree *else_out,
4497 tree *len, tree *bias)
4499 *len = NULL_TREE;
4500 *bias = NULL_TREE;
4501 if (gassign *assign = dyn_cast <gassign *> (stmt))
4503 *cond_out = NULL_TREE;
4504 *code_out = gimple_assign_rhs_code (assign);
4505 ops[0] = gimple_assign_rhs1 (assign);
4506 ops[1] = gimple_assign_rhs2 (assign);
4507 ops[2] = gimple_assign_rhs3 (assign);
4508 *else_out = NULL_TREE;
4509 return true;
4511 if (gcall *call = dyn_cast <gcall *> (stmt))
4512 if (gimple_call_internal_p (call))
4514 internal_fn ifn = gimple_call_internal_fn (call);
4515 tree_code code = conditional_internal_fn_code (ifn);
4516 int len_index = internal_fn_len_index (ifn);
4517 int cond_nargs = len_index >= 0 ? 4 : 2;
4518 if (code != ERROR_MARK)
4520 *cond_out = gimple_call_arg (call, 0);
4521 *code_out = code;
4522 unsigned int nops = gimple_call_num_args (call) - cond_nargs;
4523 for (unsigned int i = 0; i < 3; ++i)
4524 ops[i] = i < nops ? gimple_call_arg (call, i + 1) : NULL_TREE;
4525 *else_out = gimple_call_arg (call, nops + 1);
4526 if (len_index < 0)
4528 if (integer_truep (*cond_out))
4530 *cond_out = NULL_TREE;
4531 *else_out = NULL_TREE;
4534 else
4536 *len = gimple_call_arg (call, len_index);
4537 *bias = gimple_call_arg (call, len_index + 1);
4539 return true;
4542 return false;
4545 /* Return true if IFN is some form of load from memory. */
4547 bool
4548 internal_load_fn_p (internal_fn fn)
4550 switch (fn)
4552 case IFN_MASK_LOAD:
4553 case IFN_LOAD_LANES:
4554 case IFN_MASK_LOAD_LANES:
4555 case IFN_MASK_LEN_LOAD_LANES:
4556 case IFN_GATHER_LOAD:
4557 case IFN_MASK_GATHER_LOAD:
4558 case IFN_MASK_LEN_GATHER_LOAD:
4559 case IFN_LEN_LOAD:
4560 case IFN_MASK_LEN_LOAD:
4561 return true;
4563 default:
4564 return false;
4568 /* Return true if IFN is some form of store to memory. */
4570 bool
4571 internal_store_fn_p (internal_fn fn)
4573 switch (fn)
4575 case IFN_MASK_STORE:
4576 case IFN_STORE_LANES:
4577 case IFN_MASK_STORE_LANES:
4578 case IFN_MASK_LEN_STORE_LANES:
4579 case IFN_SCATTER_STORE:
4580 case IFN_MASK_SCATTER_STORE:
4581 case IFN_MASK_LEN_SCATTER_STORE:
4582 case IFN_LEN_STORE:
4583 case IFN_MASK_LEN_STORE:
4584 return true;
4586 default:
4587 return false;
4591 /* Return true if IFN is some form of gather load or scatter store. */
4593 bool
4594 internal_gather_scatter_fn_p (internal_fn fn)
4596 switch (fn)
4598 case IFN_GATHER_LOAD:
4599 case IFN_MASK_GATHER_LOAD:
4600 case IFN_MASK_LEN_GATHER_LOAD:
4601 case IFN_SCATTER_STORE:
4602 case IFN_MASK_SCATTER_STORE:
4603 case IFN_MASK_LEN_SCATTER_STORE:
4604 return true;
4606 default:
4607 return false;
4611 /* If FN takes a vector len argument, return the index of that argument,
4612 otherwise return -1. */
4615 internal_fn_len_index (internal_fn fn)
4617 switch (fn)
4619 case IFN_LEN_LOAD:
4620 case IFN_LEN_STORE:
4621 return 2;
4623 case IFN_MASK_LEN_GATHER_LOAD:
4624 case IFN_MASK_LEN_SCATTER_STORE:
4625 case IFN_COND_LEN_FMA:
4626 case IFN_COND_LEN_FMS:
4627 case IFN_COND_LEN_FNMA:
4628 case IFN_COND_LEN_FNMS:
4629 return 5;
4631 case IFN_COND_LEN_ADD:
4632 case IFN_COND_LEN_SUB:
4633 case IFN_COND_LEN_MUL:
4634 case IFN_COND_LEN_DIV:
4635 case IFN_COND_LEN_MOD:
4636 case IFN_COND_LEN_RDIV:
4637 case IFN_COND_LEN_MIN:
4638 case IFN_COND_LEN_MAX:
4639 case IFN_COND_LEN_FMIN:
4640 case IFN_COND_LEN_FMAX:
4641 case IFN_COND_LEN_AND:
4642 case IFN_COND_LEN_IOR:
4643 case IFN_COND_LEN_XOR:
4644 case IFN_COND_LEN_SHL:
4645 case IFN_COND_LEN_SHR:
4646 return 4;
4648 case IFN_COND_LEN_NEG:
4649 case IFN_MASK_LEN_LOAD:
4650 case IFN_MASK_LEN_STORE:
4651 case IFN_MASK_LEN_LOAD_LANES:
4652 case IFN_MASK_LEN_STORE_LANES:
4653 return 3;
4655 default:
4656 return -1;
4660 /* If FN takes a vector mask argument, return the index of that argument,
4661 otherwise return -1. */
4664 internal_fn_mask_index (internal_fn fn)
4666 switch (fn)
4668 case IFN_MASK_LOAD:
4669 case IFN_MASK_LOAD_LANES:
4670 case IFN_MASK_LEN_LOAD_LANES:
4671 case IFN_MASK_STORE:
4672 case IFN_MASK_STORE_LANES:
4673 case IFN_MASK_LEN_STORE_LANES:
4674 case IFN_MASK_LEN_LOAD:
4675 case IFN_MASK_LEN_STORE:
4676 return 2;
4678 case IFN_MASK_GATHER_LOAD:
4679 case IFN_MASK_SCATTER_STORE:
4680 case IFN_MASK_LEN_GATHER_LOAD:
4681 case IFN_MASK_LEN_SCATTER_STORE:
4682 return 4;
4684 default:
4685 return (conditional_internal_fn_code (fn) != ERROR_MARK
4686 || get_unconditional_internal_fn (fn) != IFN_LAST ? 0 : -1);
4690 /* If FN takes a value that should be stored to memory, return the index
4691 of that argument, otherwise return -1. */
4694 internal_fn_stored_value_index (internal_fn fn)
4696 switch (fn)
4698 case IFN_MASK_STORE:
4699 case IFN_MASK_STORE_LANES:
4700 case IFN_SCATTER_STORE:
4701 case IFN_MASK_SCATTER_STORE:
4702 case IFN_MASK_LEN_SCATTER_STORE:
4703 return 3;
4705 case IFN_LEN_STORE:
4706 return 4;
4708 case IFN_MASK_LEN_STORE:
4709 case IFN_MASK_LEN_STORE_LANES:
4710 return 5;
4712 default:
4713 return -1;
4717 /* Return true if the target supports gather load or scatter store function
4718 IFN. For loads, VECTOR_TYPE is the vector type of the load result,
4719 while for stores it is the vector type of the stored data argument.
4720 MEMORY_ELEMENT_TYPE is the type of the memory elements being loaded
4721 or stored. OFFSET_VECTOR_TYPE is the vector type that holds the
4722 offset from the shared base address of each loaded or stored element.
4723 SCALE is the amount by which these offsets should be multiplied
4724 *after* they have been extended to address width. */
4726 bool
4727 internal_gather_scatter_fn_supported_p (internal_fn ifn, tree vector_type,
4728 tree memory_element_type,
4729 tree offset_vector_type, int scale)
4731 if (!tree_int_cst_equal (TYPE_SIZE (TREE_TYPE (vector_type)),
4732 TYPE_SIZE (memory_element_type)))
4733 return false;
4734 if (maybe_ne (TYPE_VECTOR_SUBPARTS (vector_type),
4735 TYPE_VECTOR_SUBPARTS (offset_vector_type)))
4736 return false;
4737 optab optab = direct_internal_fn_optab (ifn);
4738 insn_code icode = convert_optab_handler (optab, TYPE_MODE (vector_type),
4739 TYPE_MODE (offset_vector_type));
4740 int output_ops = internal_load_fn_p (ifn) ? 1 : 0;
4741 bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (offset_vector_type));
4742 return (icode != CODE_FOR_nothing
4743 && insn_operand_matches (icode, 2 + output_ops, GEN_INT (unsigned_p))
4744 && insn_operand_matches (icode, 3 + output_ops, GEN_INT (scale)));
4747 /* Return true if the target supports IFN_CHECK_{RAW,WAR}_PTRS function IFN
4748 for pointers of type TYPE when the accesses have LENGTH bytes and their
4749 common byte alignment is ALIGN. */
4751 bool
4752 internal_check_ptrs_fn_supported_p (internal_fn ifn, tree type,
4753 poly_uint64 length, unsigned int align)
4755 machine_mode mode = TYPE_MODE (type);
4756 optab optab = direct_internal_fn_optab (ifn);
4757 insn_code icode = direct_optab_handler (optab, mode);
4758 if (icode == CODE_FOR_nothing)
4759 return false;
4760 rtx length_rtx = immed_wide_int_const (length, mode);
4761 return (insn_operand_matches (icode, 3, length_rtx)
4762 && insn_operand_matches (icode, 4, GEN_INT (align)));
4765 /* Return the supported bias for IFN which is either IFN_{LEN_,MASK_LEN_,}LOAD
4766 or IFN_{LEN_,MASK_LEN_,}STORE. For now we only support the biases of 0 and
4767 -1 (in case 0 is not an allowable length for {len_,mask_len_}load or
4768 {len_,mask_len_}store). If none of the biases match what the backend
4769 provides, return VECT_PARTIAL_BIAS_UNSUPPORTED. */
4771 signed char
4772 internal_len_load_store_bias (internal_fn ifn, machine_mode mode)
4774 optab optab = direct_internal_fn_optab (ifn);
4775 insn_code icode = direct_optab_handler (optab, mode);
4776 int bias_no = 3;
4778 if (icode == CODE_FOR_nothing)
4780 machine_mode mask_mode;
4781 if (!targetm.vectorize.get_mask_mode (mode).exists (&mask_mode))
4782 return VECT_PARTIAL_BIAS_UNSUPPORTED;
4783 if (ifn == IFN_LEN_LOAD)
4785 /* Try MASK_LEN_LOAD. */
4786 optab = direct_internal_fn_optab (IFN_MASK_LEN_LOAD);
4788 else
4790 /* Try MASK_LEN_STORE. */
4791 optab = direct_internal_fn_optab (IFN_MASK_LEN_STORE);
4793 icode = convert_optab_handler (optab, mode, mask_mode);
4794 bias_no = 4;
4797 if (icode != CODE_FOR_nothing)
4799 /* For now we only support biases of 0 or -1. Try both of them. */
4800 if (insn_operand_matches (icode, bias_no, GEN_INT (0)))
4801 return 0;
4802 if (insn_operand_matches (icode, bias_no, GEN_INT (-1)))
4803 return -1;
4806 return VECT_PARTIAL_BIAS_UNSUPPORTED;
4809 /* Expand STMT as though it were a call to internal function FN. */
4811 void
4812 expand_internal_call (internal_fn fn, gcall *stmt)
4814 internal_fn_expanders[fn] (fn, stmt);
4817 /* Expand STMT, which is a call to internal function FN. */
4819 void
4820 expand_internal_call (gcall *stmt)
4822 expand_internal_call (gimple_call_internal_fn (stmt), stmt);
4825 /* If TYPE is a vector type, return true if IFN is a direct internal
4826 function that is supported for that type. If TYPE is a scalar type,
4827 return true if IFN is a direct internal function that is supported for
4828 the target's preferred vector version of TYPE. */
4830 bool
4831 vectorized_internal_fn_supported_p (internal_fn ifn, tree type)
4833 if (VECTOR_MODE_P (TYPE_MODE (type)))
4834 return direct_internal_fn_supported_p (ifn, type, OPTIMIZE_FOR_SPEED);
4836 scalar_mode smode;
4837 if (VECTOR_TYPE_P (type)
4838 || !is_a <scalar_mode> (TYPE_MODE (type), &smode))
4839 return false;
4841 machine_mode vmode = targetm.vectorize.preferred_simd_mode (smode);
4842 if (VECTOR_MODE_P (vmode))
4844 tree vectype = build_vector_type_for_mode (type, vmode);
4845 if (direct_internal_fn_supported_p (ifn, vectype, OPTIMIZE_FOR_SPEED))
4846 return true;
4849 auto_vector_modes vector_modes;
4850 targetm.vectorize.autovectorize_vector_modes (&vector_modes, true);
4851 for (machine_mode base_mode : vector_modes)
4852 if (related_vector_mode (base_mode, smode).exists (&vmode))
4854 tree vectype = build_vector_type_for_mode (type, vmode);
4855 if (direct_internal_fn_supported_p (ifn, vectype, OPTIMIZE_FOR_SPEED))
4856 return true;
4859 return false;
4862 void
4863 expand_SHUFFLEVECTOR (internal_fn, gcall *)
4865 gcc_unreachable ();
4868 void
4869 expand_PHI (internal_fn, gcall *)
4871 gcc_unreachable ();
4874 void
4875 expand_SPACESHIP (internal_fn, gcall *stmt)
4877 tree lhs = gimple_call_lhs (stmt);
4878 tree rhs1 = gimple_call_arg (stmt, 0);
4879 tree rhs2 = gimple_call_arg (stmt, 1);
4880 tree type = TREE_TYPE (rhs1);
4882 do_pending_stack_adjust ();
4884 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
4885 rtx op1 = expand_normal (rhs1);
4886 rtx op2 = expand_normal (rhs2);
4888 class expand_operand ops[3];
4889 create_output_operand (&ops[0], target, TYPE_MODE (TREE_TYPE (lhs)));
4890 create_input_operand (&ops[1], op1, TYPE_MODE (type));
4891 create_input_operand (&ops[2], op2, TYPE_MODE (type));
4892 insn_code icode = optab_handler (spaceship_optab, TYPE_MODE (type));
4893 expand_insn (icode, 3, ops);
4894 if (!rtx_equal_p (target, ops[0].value))
4895 emit_move_insn (target, ops[0].value);
4898 void
4899 expand_ASSUME (internal_fn, gcall *)
4903 void
4904 expand_MASK_CALL (internal_fn, gcall *)
4906 /* This IFN should only exist between ifcvt and vect passes. */
4907 gcc_unreachable ();