Default to dwarf version 4 on hppa64-hpux
[official-gcc.git] / gcc / internal-fn.c
blob8312d08aab2a9dd1c346aecd8a46ee4856deb322
1 /* Internal functions.
2 Copyright (C) 2011-2021 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "target.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "predict.h"
29 #include "stringpool.h"
30 #include "tree-vrp.h"
31 #include "tree-ssanames.h"
32 #include "expmed.h"
33 #include "memmodel.h"
34 #include "optabs.h"
35 #include "emit-rtl.h"
36 #include "diagnostic-core.h"
37 #include "fold-const.h"
38 #include "internal-fn.h"
39 #include "stor-layout.h"
40 #include "dojump.h"
41 #include "expr.h"
42 #include "stringpool.h"
43 #include "attribs.h"
44 #include "asan.h"
45 #include "ubsan.h"
46 #include "recog.h"
47 #include "builtins.h"
48 #include "optabs-tree.h"
49 #include "gimple-ssa.h"
50 #include "tree-phinodes.h"
51 #include "ssa-iterators.h"
52 #include "explow.h"
53 #include "rtl-iter.h"
54 #include "gimple-range.h"
56 /* For lang_hooks.types.type_for_mode. */
57 #include "langhooks.h"
59 /* The names of each internal function, indexed by function number. */
60 const char *const internal_fn_name_array[] = {
61 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) #CODE,
62 #include "internal-fn.def"
63 "<invalid-fn>"
66 /* The ECF_* flags of each internal function, indexed by function number. */
67 const int internal_fn_flags_array[] = {
68 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) FLAGS,
69 #include "internal-fn.def"
73 /* Return the internal function called NAME, or IFN_LAST if there's
74 no such function. */
76 internal_fn
77 lookup_internal_fn (const char *name)
79 typedef hash_map<nofree_string_hash, internal_fn> name_to_fn_map_type;
80 static name_to_fn_map_type *name_to_fn_map;
82 if (!name_to_fn_map)
84 name_to_fn_map = new name_to_fn_map_type (IFN_LAST);
85 for (unsigned int i = 0; i < IFN_LAST; ++i)
86 name_to_fn_map->put (internal_fn_name (internal_fn (i)),
87 internal_fn (i));
89 internal_fn *entry = name_to_fn_map->get (name);
90 return entry ? *entry : IFN_LAST;
93 /* Fnspec of each internal function, indexed by function number. */
94 const_tree internal_fn_fnspec_array[IFN_LAST + 1];
96 void
97 init_internal_fns ()
99 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
100 if (FNSPEC) internal_fn_fnspec_array[IFN_##CODE] = \
101 build_string ((int) sizeof (FNSPEC) - 1, FNSPEC ? FNSPEC : "");
102 #include "internal-fn.def"
103 internal_fn_fnspec_array[IFN_LAST] = 0;
106 /* Create static initializers for the information returned by
107 direct_internal_fn. */
108 #define not_direct { -2, -2, false }
109 #define mask_load_direct { -1, 2, false }
110 #define load_lanes_direct { -1, -1, false }
111 #define mask_load_lanes_direct { -1, -1, false }
112 #define gather_load_direct { 3, 1, false }
113 #define len_load_direct { -1, -1, false }
114 #define mask_store_direct { 3, 2, false }
115 #define store_lanes_direct { 0, 0, false }
116 #define mask_store_lanes_direct { 0, 0, false }
117 #define vec_cond_mask_direct { 1, 0, false }
118 #define vec_cond_direct { 2, 0, false }
119 #define scatter_store_direct { 3, 1, false }
120 #define len_store_direct { 3, 3, false }
121 #define vec_set_direct { 3, 3, false }
122 #define unary_direct { 0, 0, true }
123 #define binary_direct { 0, 0, true }
124 #define ternary_direct { 0, 0, true }
125 #define cond_unary_direct { 1, 1, true }
126 #define cond_binary_direct { 1, 1, true }
127 #define cond_ternary_direct { 1, 1, true }
128 #define while_direct { 0, 2, false }
129 #define fold_extract_direct { 2, 2, false }
130 #define fold_left_direct { 1, 1, false }
131 #define mask_fold_left_direct { 1, 1, false }
132 #define check_ptrs_direct { 0, 0, false }
134 const direct_internal_fn_info direct_internal_fn_array[IFN_LAST + 1] = {
135 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) not_direct,
136 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) TYPE##_direct,
137 #define DEF_INTERNAL_SIGNED_OPTAB_FN(CODE, FLAGS, SELECTOR, SIGNED_OPTAB, \
138 UNSIGNED_OPTAB, TYPE) TYPE##_direct,
139 #include "internal-fn.def"
140 not_direct
143 /* ARRAY_TYPE is an array of vector modes. Return the associated insn
144 for load-lanes-style optab OPTAB, or CODE_FOR_nothing if none. */
146 static enum insn_code
147 get_multi_vector_move (tree array_type, convert_optab optab)
149 machine_mode imode;
150 machine_mode vmode;
152 gcc_assert (TREE_CODE (array_type) == ARRAY_TYPE);
153 imode = TYPE_MODE (array_type);
154 vmode = TYPE_MODE (TREE_TYPE (array_type));
156 return convert_optab_handler (optab, imode, vmode);
159 /* Expand LOAD_LANES call STMT using optab OPTAB. */
161 static void
162 expand_load_lanes_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
164 class expand_operand ops[2];
165 tree type, lhs, rhs;
166 rtx target, mem;
168 lhs = gimple_call_lhs (stmt);
169 rhs = gimple_call_arg (stmt, 0);
170 type = TREE_TYPE (lhs);
172 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
173 mem = expand_normal (rhs);
175 gcc_assert (MEM_P (mem));
176 PUT_MODE (mem, TYPE_MODE (type));
178 create_output_operand (&ops[0], target, TYPE_MODE (type));
179 create_fixed_operand (&ops[1], mem);
180 expand_insn (get_multi_vector_move (type, optab), 2, ops);
181 if (!rtx_equal_p (target, ops[0].value))
182 emit_move_insn (target, ops[0].value);
185 /* Expand STORE_LANES call STMT using optab OPTAB. */
187 static void
188 expand_store_lanes_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
190 class expand_operand ops[2];
191 tree type, lhs, rhs;
192 rtx target, reg;
194 lhs = gimple_call_lhs (stmt);
195 rhs = gimple_call_arg (stmt, 0);
196 type = TREE_TYPE (rhs);
198 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
199 reg = expand_normal (rhs);
201 gcc_assert (MEM_P (target));
202 PUT_MODE (target, TYPE_MODE (type));
204 create_fixed_operand (&ops[0], target);
205 create_input_operand (&ops[1], reg, TYPE_MODE (type));
206 expand_insn (get_multi_vector_move (type, optab), 2, ops);
209 static void
210 expand_ANNOTATE (internal_fn, gcall *)
212 gcc_unreachable ();
215 /* This should get expanded in omp_device_lower pass. */
217 static void
218 expand_GOMP_USE_SIMT (internal_fn, gcall *)
220 gcc_unreachable ();
223 /* This should get expanded in omp_device_lower pass. */
225 static void
226 expand_GOMP_SIMT_ENTER (internal_fn, gcall *)
228 gcc_unreachable ();
231 /* Allocate per-lane storage and begin non-uniform execution region. */
233 static void
234 expand_GOMP_SIMT_ENTER_ALLOC (internal_fn, gcall *stmt)
236 rtx target;
237 tree lhs = gimple_call_lhs (stmt);
238 if (lhs)
239 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
240 else
241 target = gen_reg_rtx (Pmode);
242 rtx size = expand_normal (gimple_call_arg (stmt, 0));
243 rtx align = expand_normal (gimple_call_arg (stmt, 1));
244 class expand_operand ops[3];
245 create_output_operand (&ops[0], target, Pmode);
246 create_input_operand (&ops[1], size, Pmode);
247 create_input_operand (&ops[2], align, Pmode);
248 gcc_assert (targetm.have_omp_simt_enter ());
249 expand_insn (targetm.code_for_omp_simt_enter, 3, ops);
250 if (!rtx_equal_p (target, ops[0].value))
251 emit_move_insn (target, ops[0].value);
254 /* Deallocate per-lane storage and leave non-uniform execution region. */
256 static void
257 expand_GOMP_SIMT_EXIT (internal_fn, gcall *stmt)
259 gcc_checking_assert (!gimple_call_lhs (stmt));
260 rtx arg = expand_normal (gimple_call_arg (stmt, 0));
261 class expand_operand ops[1];
262 create_input_operand (&ops[0], arg, Pmode);
263 gcc_assert (targetm.have_omp_simt_exit ());
264 expand_insn (targetm.code_for_omp_simt_exit, 1, ops);
267 /* Lane index on SIMT targets: thread index in the warp on NVPTX. On targets
268 without SIMT execution this should be expanded in omp_device_lower pass. */
270 static void
271 expand_GOMP_SIMT_LANE (internal_fn, gcall *stmt)
273 tree lhs = gimple_call_lhs (stmt);
274 if (!lhs)
275 return;
277 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
278 gcc_assert (targetm.have_omp_simt_lane ());
279 emit_insn (targetm.gen_omp_simt_lane (target));
282 /* This should get expanded in omp_device_lower pass. */
284 static void
285 expand_GOMP_SIMT_VF (internal_fn, gcall *)
287 gcc_unreachable ();
290 /* Lane index of the first SIMT lane that supplies a non-zero argument.
291 This is a SIMT counterpart to GOMP_SIMD_LAST_LANE, used to represent the
292 lane that executed the last iteration for handling OpenMP lastprivate. */
294 static void
295 expand_GOMP_SIMT_LAST_LANE (internal_fn, gcall *stmt)
297 tree lhs = gimple_call_lhs (stmt);
298 if (!lhs)
299 return;
301 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
302 rtx cond = expand_normal (gimple_call_arg (stmt, 0));
303 machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
304 class expand_operand ops[2];
305 create_output_operand (&ops[0], target, mode);
306 create_input_operand (&ops[1], cond, mode);
307 gcc_assert (targetm.have_omp_simt_last_lane ());
308 expand_insn (targetm.code_for_omp_simt_last_lane, 2, ops);
309 if (!rtx_equal_p (target, ops[0].value))
310 emit_move_insn (target, ops[0].value);
313 /* Non-transparent predicate used in SIMT lowering of OpenMP "ordered". */
315 static void
316 expand_GOMP_SIMT_ORDERED_PRED (internal_fn, gcall *stmt)
318 tree lhs = gimple_call_lhs (stmt);
319 if (!lhs)
320 return;
322 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
323 rtx ctr = expand_normal (gimple_call_arg (stmt, 0));
324 machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
325 class expand_operand ops[2];
326 create_output_operand (&ops[0], target, mode);
327 create_input_operand (&ops[1], ctr, mode);
328 gcc_assert (targetm.have_omp_simt_ordered ());
329 expand_insn (targetm.code_for_omp_simt_ordered, 2, ops);
330 if (!rtx_equal_p (target, ops[0].value))
331 emit_move_insn (target, ops[0].value);
334 /* "Or" boolean reduction across SIMT lanes: return non-zero in all lanes if
335 any lane supplies a non-zero argument. */
337 static void
338 expand_GOMP_SIMT_VOTE_ANY (internal_fn, gcall *stmt)
340 tree lhs = gimple_call_lhs (stmt);
341 if (!lhs)
342 return;
344 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
345 rtx cond = expand_normal (gimple_call_arg (stmt, 0));
346 machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
347 class expand_operand ops[2];
348 create_output_operand (&ops[0], target, mode);
349 create_input_operand (&ops[1], cond, mode);
350 gcc_assert (targetm.have_omp_simt_vote_any ());
351 expand_insn (targetm.code_for_omp_simt_vote_any, 2, ops);
352 if (!rtx_equal_p (target, ops[0].value))
353 emit_move_insn (target, ops[0].value);
356 /* Exchange between SIMT lanes with a "butterfly" pattern: source lane index
357 is destination lane index XOR given offset. */
359 static void
360 expand_GOMP_SIMT_XCHG_BFLY (internal_fn, gcall *stmt)
362 tree lhs = gimple_call_lhs (stmt);
363 if (!lhs)
364 return;
366 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
367 rtx src = expand_normal (gimple_call_arg (stmt, 0));
368 rtx idx = expand_normal (gimple_call_arg (stmt, 1));
369 machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
370 class expand_operand ops[3];
371 create_output_operand (&ops[0], target, mode);
372 create_input_operand (&ops[1], src, mode);
373 create_input_operand (&ops[2], idx, SImode);
374 gcc_assert (targetm.have_omp_simt_xchg_bfly ());
375 expand_insn (targetm.code_for_omp_simt_xchg_bfly, 3, ops);
376 if (!rtx_equal_p (target, ops[0].value))
377 emit_move_insn (target, ops[0].value);
380 /* Exchange between SIMT lanes according to given source lane index. */
382 static void
383 expand_GOMP_SIMT_XCHG_IDX (internal_fn, gcall *stmt)
385 tree lhs = gimple_call_lhs (stmt);
386 if (!lhs)
387 return;
389 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
390 rtx src = expand_normal (gimple_call_arg (stmt, 0));
391 rtx idx = expand_normal (gimple_call_arg (stmt, 1));
392 machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
393 class expand_operand ops[3];
394 create_output_operand (&ops[0], target, mode);
395 create_input_operand (&ops[1], src, mode);
396 create_input_operand (&ops[2], idx, SImode);
397 gcc_assert (targetm.have_omp_simt_xchg_idx ());
398 expand_insn (targetm.code_for_omp_simt_xchg_idx, 3, ops);
399 if (!rtx_equal_p (target, ops[0].value))
400 emit_move_insn (target, ops[0].value);
403 /* This should get expanded in adjust_simduid_builtins. */
405 static void
406 expand_GOMP_SIMD_LANE (internal_fn, gcall *)
408 gcc_unreachable ();
411 /* This should get expanded in adjust_simduid_builtins. */
413 static void
414 expand_GOMP_SIMD_VF (internal_fn, gcall *)
416 gcc_unreachable ();
419 /* This should get expanded in adjust_simduid_builtins. */
421 static void
422 expand_GOMP_SIMD_LAST_LANE (internal_fn, gcall *)
424 gcc_unreachable ();
427 /* This should get expanded in adjust_simduid_builtins. */
429 static void
430 expand_GOMP_SIMD_ORDERED_START (internal_fn, gcall *)
432 gcc_unreachable ();
435 /* This should get expanded in adjust_simduid_builtins. */
437 static void
438 expand_GOMP_SIMD_ORDERED_END (internal_fn, gcall *)
440 gcc_unreachable ();
443 /* This should get expanded in the sanopt pass. */
445 static void
446 expand_UBSAN_NULL (internal_fn, gcall *)
448 gcc_unreachable ();
451 /* This should get expanded in the sanopt pass. */
453 static void
454 expand_UBSAN_BOUNDS (internal_fn, gcall *)
456 gcc_unreachable ();
459 /* This should get expanded in the sanopt pass. */
461 static void
462 expand_UBSAN_VPTR (internal_fn, gcall *)
464 gcc_unreachable ();
467 /* This should get expanded in the sanopt pass. */
469 static void
470 expand_UBSAN_PTR (internal_fn, gcall *)
472 gcc_unreachable ();
475 /* This should get expanded in the sanopt pass. */
477 static void
478 expand_UBSAN_OBJECT_SIZE (internal_fn, gcall *)
480 gcc_unreachable ();
483 /* This should get expanded in the sanopt pass. */
485 static void
486 expand_HWASAN_CHECK (internal_fn, gcall *)
488 gcc_unreachable ();
491 /* For hwasan stack tagging:
492 Clear tags on the dynamically allocated space.
493 For use after an object dynamically allocated on the stack goes out of
494 scope. */
495 static void
496 expand_HWASAN_ALLOCA_UNPOISON (internal_fn, gcall *gc)
498 gcc_assert (Pmode == ptr_mode);
499 tree restored_position = gimple_call_arg (gc, 0);
500 rtx restored_rtx = expand_expr (restored_position, NULL_RTX, VOIDmode,
501 EXPAND_NORMAL);
502 rtx func = init_one_libfunc ("__hwasan_tag_memory");
503 rtx off = expand_simple_binop (Pmode, MINUS, restored_rtx,
504 stack_pointer_rtx, NULL_RTX, 0,
505 OPTAB_WIDEN);
506 emit_library_call_value (func, NULL_RTX, LCT_NORMAL, VOIDmode,
507 virtual_stack_dynamic_rtx, Pmode,
508 HWASAN_STACK_BACKGROUND, QImode,
509 off, Pmode);
512 /* For hwasan stack tagging:
513 Return a tag to be used for a dynamic allocation. */
514 static void
515 expand_HWASAN_CHOOSE_TAG (internal_fn, gcall *gc)
517 tree tag = gimple_call_lhs (gc);
518 rtx target = expand_expr (tag, NULL_RTX, VOIDmode, EXPAND_NORMAL);
519 machine_mode mode = GET_MODE (target);
520 gcc_assert (mode == QImode);
522 rtx base_tag = targetm.memtag.extract_tag (hwasan_frame_base (), NULL_RTX);
523 gcc_assert (base_tag);
524 rtx tag_offset = gen_int_mode (hwasan_current_frame_tag (), QImode);
525 rtx chosen_tag = expand_simple_binop (QImode, PLUS, base_tag, tag_offset,
526 target, /* unsignedp = */1,
527 OPTAB_WIDEN);
528 chosen_tag = hwasan_truncate_to_tag_size (chosen_tag, target);
530 /* Really need to put the tag into the `target` RTX. */
531 if (chosen_tag != target)
533 rtx temp = chosen_tag;
534 gcc_assert (GET_MODE (chosen_tag) == mode);
535 emit_move_insn (target, temp);
538 hwasan_increment_frame_tag ();
541 /* For hwasan stack tagging:
542 Tag a region of space in the shadow stack according to the base pointer of
543 an object on the stack. N.b. the length provided in the internal call is
544 required to be aligned to HWASAN_TAG_GRANULE_SIZE. */
545 static void
546 expand_HWASAN_MARK (internal_fn, gcall *gc)
548 gcc_assert (ptr_mode == Pmode);
549 HOST_WIDE_INT flag = tree_to_shwi (gimple_call_arg (gc, 0));
550 bool is_poison = ((asan_mark_flags)flag) == ASAN_MARK_POISON;
552 tree base = gimple_call_arg (gc, 1);
553 gcc_checking_assert (TREE_CODE (base) == ADDR_EXPR);
554 rtx base_rtx = expand_normal (base);
556 rtx tag = is_poison ? HWASAN_STACK_BACKGROUND
557 : targetm.memtag.extract_tag (base_rtx, NULL_RTX);
558 rtx address = targetm.memtag.untagged_pointer (base_rtx, NULL_RTX);
560 tree len = gimple_call_arg (gc, 2);
561 rtx r_len = expand_normal (len);
563 rtx func = init_one_libfunc ("__hwasan_tag_memory");
564 emit_library_call (func, LCT_NORMAL, VOIDmode, address, Pmode,
565 tag, QImode, r_len, Pmode);
568 /* For hwasan stack tagging:
569 Store a tag into a pointer. */
570 static void
571 expand_HWASAN_SET_TAG (internal_fn, gcall *gc)
573 gcc_assert (ptr_mode == Pmode);
574 tree g_target = gimple_call_lhs (gc);
575 tree g_ptr = gimple_call_arg (gc, 0);
576 tree g_tag = gimple_call_arg (gc, 1);
578 rtx ptr = expand_normal (g_ptr);
579 rtx tag = expand_expr (g_tag, NULL_RTX, QImode, EXPAND_NORMAL);
580 rtx target = expand_normal (g_target);
582 rtx untagged = targetm.memtag.untagged_pointer (ptr, target);
583 rtx tagged_value = targetm.memtag.set_tag (untagged, tag, target);
584 if (tagged_value != target)
585 emit_move_insn (target, tagged_value);
588 /* This should get expanded in the sanopt pass. */
590 static void
591 expand_ASAN_CHECK (internal_fn, gcall *)
593 gcc_unreachable ();
596 /* This should get expanded in the sanopt pass. */
598 static void
599 expand_ASAN_MARK (internal_fn, gcall *)
601 gcc_unreachable ();
604 /* This should get expanded in the sanopt pass. */
606 static void
607 expand_ASAN_POISON (internal_fn, gcall *)
609 gcc_unreachable ();
612 /* This should get expanded in the sanopt pass. */
614 static void
615 expand_ASAN_POISON_USE (internal_fn, gcall *)
617 gcc_unreachable ();
620 /* This should get expanded in the tsan pass. */
622 static void
623 expand_TSAN_FUNC_EXIT (internal_fn, gcall *)
625 gcc_unreachable ();
628 /* This should get expanded in the lower pass. */
630 static void
631 expand_FALLTHROUGH (internal_fn, gcall *call)
633 error_at (gimple_location (call),
634 "invalid use of attribute %<fallthrough%>");
637 /* Return minimum precision needed to represent all values
638 of ARG in SIGNed integral type. */
640 static int
641 get_min_precision (tree arg, signop sign)
643 int prec = TYPE_PRECISION (TREE_TYPE (arg));
644 int cnt = 0;
645 signop orig_sign = sign;
646 if (TREE_CODE (arg) == INTEGER_CST)
648 int p;
649 if (TYPE_SIGN (TREE_TYPE (arg)) != sign)
651 widest_int w = wi::to_widest (arg);
652 w = wi::ext (w, prec, sign);
653 p = wi::min_precision (w, sign);
655 else
656 p = wi::min_precision (wi::to_wide (arg), sign);
657 return MIN (p, prec);
659 while (CONVERT_EXPR_P (arg)
660 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
661 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg, 0))) <= prec)
663 arg = TREE_OPERAND (arg, 0);
664 if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
666 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
667 sign = UNSIGNED;
668 else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
669 return prec + (orig_sign != sign);
670 prec = TYPE_PRECISION (TREE_TYPE (arg));
672 if (++cnt > 30)
673 return prec + (orig_sign != sign);
675 if (CONVERT_EXPR_P (arg)
676 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
677 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg, 0))) > prec)
679 /* We have e.g. (unsigned short) y_2 where int y_2 = (int) x_1(D);
680 If y_2's min precision is smaller than prec, return that. */
681 int oprec = get_min_precision (TREE_OPERAND (arg, 0), sign);
682 if (oprec < prec)
683 return oprec + (orig_sign != sign);
685 if (TREE_CODE (arg) != SSA_NAME)
686 return prec + (orig_sign != sign);
687 value_range r;
688 while (!get_global_range_query ()->range_of_expr (r, arg)
689 || r.kind () != VR_RANGE)
691 gimple *g = SSA_NAME_DEF_STMT (arg);
692 if (is_gimple_assign (g)
693 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g)))
695 tree t = gimple_assign_rhs1 (g);
696 if (INTEGRAL_TYPE_P (TREE_TYPE (t))
697 && TYPE_PRECISION (TREE_TYPE (t)) <= prec)
699 arg = t;
700 if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
702 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
703 sign = UNSIGNED;
704 else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
705 return prec + (orig_sign != sign);
706 prec = TYPE_PRECISION (TREE_TYPE (arg));
708 if (++cnt > 30)
709 return prec + (orig_sign != sign);
710 continue;
713 return prec + (orig_sign != sign);
715 if (sign == TYPE_SIGN (TREE_TYPE (arg)))
717 int p1 = wi::min_precision (r.lower_bound (), sign);
718 int p2 = wi::min_precision (r.upper_bound (), sign);
719 p1 = MAX (p1, p2);
720 prec = MIN (prec, p1);
722 else if (sign == UNSIGNED && !wi::neg_p (r.lower_bound (), SIGNED))
724 int p = wi::min_precision (r.upper_bound (), UNSIGNED);
725 prec = MIN (prec, p);
727 return prec + (orig_sign != sign);
730 /* Helper for expand_*_overflow. Set the __imag__ part to true
731 (1 except for signed:1 type, in which case store -1). */
733 static void
734 expand_arith_set_overflow (tree lhs, rtx target)
736 if (TYPE_PRECISION (TREE_TYPE (TREE_TYPE (lhs))) == 1
737 && !TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs))))
738 write_complex_part (target, constm1_rtx, true);
739 else
740 write_complex_part (target, const1_rtx, true);
743 /* Helper for expand_*_overflow. Store RES into the __real__ part
744 of TARGET. If RES has larger MODE than __real__ part of TARGET,
745 set the __imag__ part to 1 if RES doesn't fit into it. Similarly
746 if LHS has smaller precision than its mode. */
748 static void
749 expand_arith_overflow_result_store (tree lhs, rtx target,
750 scalar_int_mode mode, rtx res)
752 scalar_int_mode tgtmode
753 = as_a <scalar_int_mode> (GET_MODE_INNER (GET_MODE (target)));
754 rtx lres = res;
755 if (tgtmode != mode)
757 rtx_code_label *done_label = gen_label_rtx ();
758 int uns = TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs)));
759 lres = convert_modes (tgtmode, mode, res, uns);
760 gcc_assert (GET_MODE_PRECISION (tgtmode) < GET_MODE_PRECISION (mode));
761 do_compare_rtx_and_jump (res, convert_modes (mode, tgtmode, lres, uns),
762 EQ, true, mode, NULL_RTX, NULL, done_label,
763 profile_probability::very_likely ());
764 expand_arith_set_overflow (lhs, target);
765 emit_label (done_label);
767 int prec = TYPE_PRECISION (TREE_TYPE (TREE_TYPE (lhs)));
768 int tgtprec = GET_MODE_PRECISION (tgtmode);
769 if (prec < tgtprec)
771 rtx_code_label *done_label = gen_label_rtx ();
772 int uns = TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs)));
773 res = lres;
774 if (uns)
776 rtx mask
777 = immed_wide_int_const (wi::shifted_mask (0, prec, false, tgtprec),
778 tgtmode);
779 lres = expand_simple_binop (tgtmode, AND, res, mask, NULL_RTX,
780 true, OPTAB_LIB_WIDEN);
782 else
784 lres = expand_shift (LSHIFT_EXPR, tgtmode, res, tgtprec - prec,
785 NULL_RTX, 1);
786 lres = expand_shift (RSHIFT_EXPR, tgtmode, lres, tgtprec - prec,
787 NULL_RTX, 0);
789 do_compare_rtx_and_jump (res, lres,
790 EQ, true, tgtmode, NULL_RTX, NULL, done_label,
791 profile_probability::very_likely ());
792 expand_arith_set_overflow (lhs, target);
793 emit_label (done_label);
795 write_complex_part (target, lres, false);
798 /* Helper for expand_*_overflow. Store RES into TARGET. */
800 static void
801 expand_ubsan_result_store (rtx target, rtx res)
803 if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target))
804 /* If this is a scalar in a register that is stored in a wider mode
805 than the declared mode, compute the result into its declared mode
806 and then convert to the wider mode. Our value is the computed
807 expression. */
808 convert_move (SUBREG_REG (target), res, SUBREG_PROMOTED_SIGN (target));
809 else
810 emit_move_insn (target, res);
813 /* Add sub/add overflow checking to the statement STMT.
814 CODE says whether the operation is +, or -. */
816 void
817 expand_addsub_overflow (location_t loc, tree_code code, tree lhs,
818 tree arg0, tree arg1, bool unsr_p, bool uns0_p,
819 bool uns1_p, bool is_ubsan, tree *datap)
821 rtx res, target = NULL_RTX;
822 tree fn;
823 rtx_code_label *done_label = gen_label_rtx ();
824 rtx_code_label *do_error = gen_label_rtx ();
825 do_pending_stack_adjust ();
826 rtx op0 = expand_normal (arg0);
827 rtx op1 = expand_normal (arg1);
828 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (arg0));
829 int prec = GET_MODE_PRECISION (mode);
830 rtx sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
831 bool do_xor = false;
833 if (is_ubsan)
834 gcc_assert (!unsr_p && !uns0_p && !uns1_p);
836 if (lhs)
838 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
839 if (!is_ubsan)
840 write_complex_part (target, const0_rtx, true);
843 /* We assume both operands and result have the same precision
844 here (GET_MODE_BITSIZE (mode)), S stands for signed type
845 with that precision, U for unsigned type with that precision,
846 sgn for unsigned most significant bit in that precision.
847 s1 is signed first operand, u1 is unsigned first operand,
848 s2 is signed second operand, u2 is unsigned second operand,
849 sr is signed result, ur is unsigned result and the following
850 rules say how to compute result (which is always result of
851 the operands as if both were unsigned, cast to the right
852 signedness) and how to compute whether operation overflowed.
854 s1 + s2 -> sr
855 res = (S) ((U) s1 + (U) s2)
856 ovf = s2 < 0 ? res > s1 : res < s1 (or jump on overflow)
857 s1 - s2 -> sr
858 res = (S) ((U) s1 - (U) s2)
859 ovf = s2 < 0 ? res < s1 : res > s2 (or jump on overflow)
860 u1 + u2 -> ur
861 res = u1 + u2
862 ovf = res < u1 (or jump on carry, but RTL opts will handle it)
863 u1 - u2 -> ur
864 res = u1 - u2
865 ovf = res > u1 (or jump on carry, but RTL opts will handle it)
866 s1 + u2 -> sr
867 res = (S) ((U) s1 + u2)
868 ovf = ((U) res ^ sgn) < u2
869 s1 + u2 -> ur
870 t1 = (S) (u2 ^ sgn)
871 t2 = s1 + t1
872 res = (U) t2 ^ sgn
873 ovf = t1 < 0 ? t2 > s1 : t2 < s1 (or jump on overflow)
874 s1 - u2 -> sr
875 res = (S) ((U) s1 - u2)
876 ovf = u2 > ((U) s1 ^ sgn)
877 s1 - u2 -> ur
878 res = (U) s1 - u2
879 ovf = s1 < 0 || u2 > (U) s1
880 u1 - s2 -> sr
881 res = u1 - (U) s2
882 ovf = u1 >= ((U) s2 ^ sgn)
883 u1 - s2 -> ur
884 t1 = u1 ^ sgn
885 t2 = t1 - (U) s2
886 res = t2 ^ sgn
887 ovf = s2 < 0 ? (S) t2 < (S) t1 : (S) t2 > (S) t1 (or jump on overflow)
888 s1 + s2 -> ur
889 res = (U) s1 + (U) s2
890 ovf = s2 < 0 ? (s1 | (S) res) < 0) : (s1 & (S) res) < 0)
891 u1 + u2 -> sr
892 res = (S) (u1 + u2)
893 ovf = (U) res < u2 || res < 0
894 u1 - u2 -> sr
895 res = (S) (u1 - u2)
896 ovf = u1 >= u2 ? res < 0 : res >= 0
897 s1 - s2 -> ur
898 res = (U) s1 - (U) s2
899 ovf = s2 >= 0 ? ((s1 | (S) res) < 0) : ((s1 & (S) res) < 0) */
901 if (code == PLUS_EXPR && uns0_p && !uns1_p)
903 /* PLUS_EXPR is commutative, if operand signedness differs,
904 canonicalize to the first operand being signed and second
905 unsigned to simplify following code. */
906 std::swap (op0, op1);
907 std::swap (arg0, arg1);
908 uns0_p = false;
909 uns1_p = true;
912 /* u1 +- u2 -> ur */
913 if (uns0_p && uns1_p && unsr_p)
915 insn_code icode = optab_handler (code == PLUS_EXPR ? uaddv4_optab
916 : usubv4_optab, mode);
917 if (icode != CODE_FOR_nothing)
919 class expand_operand ops[4];
920 rtx_insn *last = get_last_insn ();
922 res = gen_reg_rtx (mode);
923 create_output_operand (&ops[0], res, mode);
924 create_input_operand (&ops[1], op0, mode);
925 create_input_operand (&ops[2], op1, mode);
926 create_fixed_operand (&ops[3], do_error);
927 if (maybe_expand_insn (icode, 4, ops))
929 last = get_last_insn ();
930 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
931 && JUMP_P (last)
932 && any_condjump_p (last)
933 && !find_reg_note (last, REG_BR_PROB, 0))
934 add_reg_br_prob_note (last,
935 profile_probability::very_unlikely ());
936 emit_jump (done_label);
937 goto do_error_label;
940 delete_insns_since (last);
943 /* Compute the operation. On RTL level, the addition is always
944 unsigned. */
945 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
946 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
947 rtx tem = op0;
948 /* For PLUS_EXPR, the operation is commutative, so we can pick
949 operand to compare against. For prec <= BITS_PER_WORD, I think
950 preferring REG operand is better over CONST_INT, because
951 the CONST_INT might enlarge the instruction or CSE would need
952 to figure out we'd already loaded it into a register before.
953 For prec > BITS_PER_WORD, I think CONST_INT might be more beneficial,
954 as then the multi-word comparison can be perhaps simplified. */
955 if (code == PLUS_EXPR
956 && (prec <= BITS_PER_WORD
957 ? (CONST_SCALAR_INT_P (op0) && REG_P (op1))
958 : CONST_SCALAR_INT_P (op1)))
959 tem = op1;
960 do_compare_rtx_and_jump (res, tem, code == PLUS_EXPR ? GEU : LEU,
961 true, mode, NULL_RTX, NULL, done_label,
962 profile_probability::very_likely ());
963 goto do_error_label;
966 /* s1 +- u2 -> sr */
967 if (!uns0_p && uns1_p && !unsr_p)
969 /* Compute the operation. On RTL level, the addition is always
970 unsigned. */
971 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
972 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
973 rtx tem = expand_binop (mode, add_optab,
974 code == PLUS_EXPR ? res : op0, sgn,
975 NULL_RTX, false, OPTAB_LIB_WIDEN);
976 do_compare_rtx_and_jump (tem, op1, GEU, true, mode, NULL_RTX, NULL,
977 done_label, profile_probability::very_likely ());
978 goto do_error_label;
981 /* s1 + u2 -> ur */
982 if (code == PLUS_EXPR && !uns0_p && uns1_p && unsr_p)
984 op1 = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
985 OPTAB_LIB_WIDEN);
986 /* As we've changed op1, we have to avoid using the value range
987 for the original argument. */
988 arg1 = error_mark_node;
989 do_xor = true;
990 goto do_signed;
993 /* u1 - s2 -> ur */
994 if (code == MINUS_EXPR && uns0_p && !uns1_p && unsr_p)
996 op0 = expand_binop (mode, add_optab, op0, sgn, NULL_RTX, false,
997 OPTAB_LIB_WIDEN);
998 /* As we've changed op0, we have to avoid using the value range
999 for the original argument. */
1000 arg0 = error_mark_node;
1001 do_xor = true;
1002 goto do_signed;
1005 /* s1 - u2 -> ur */
1006 if (code == MINUS_EXPR && !uns0_p && uns1_p && unsr_p)
1008 /* Compute the operation. On RTL level, the addition is always
1009 unsigned. */
1010 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
1011 OPTAB_LIB_WIDEN);
1012 int pos_neg = get_range_pos_neg (arg0);
1013 if (pos_neg == 2)
1014 /* If ARG0 is known to be always negative, this is always overflow. */
1015 emit_jump (do_error);
1016 else if (pos_neg == 3)
1017 /* If ARG0 is not known to be always positive, check at runtime. */
1018 do_compare_rtx_and_jump (op0, const0_rtx, LT, false, mode, NULL_RTX,
1019 NULL, do_error, profile_probability::very_unlikely ());
1020 do_compare_rtx_and_jump (op1, op0, LEU, true, mode, NULL_RTX, NULL,
1021 done_label, profile_probability::very_likely ());
1022 goto do_error_label;
1025 /* u1 - s2 -> sr */
1026 if (code == MINUS_EXPR && uns0_p && !uns1_p && !unsr_p)
1028 /* Compute the operation. On RTL level, the addition is always
1029 unsigned. */
1030 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
1031 OPTAB_LIB_WIDEN);
1032 rtx tem = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
1033 OPTAB_LIB_WIDEN);
1034 do_compare_rtx_and_jump (op0, tem, LTU, true, mode, NULL_RTX, NULL,
1035 done_label, profile_probability::very_likely ());
1036 goto do_error_label;
1039 /* u1 + u2 -> sr */
1040 if (code == PLUS_EXPR && uns0_p && uns1_p && !unsr_p)
1042 /* Compute the operation. On RTL level, the addition is always
1043 unsigned. */
1044 res = expand_binop (mode, add_optab, op0, op1, NULL_RTX, false,
1045 OPTAB_LIB_WIDEN);
1046 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode, NULL_RTX,
1047 NULL, do_error, profile_probability::very_unlikely ());
1048 rtx tem = op1;
1049 /* The operation is commutative, so we can pick operand to compare
1050 against. For prec <= BITS_PER_WORD, I think preferring REG operand
1051 is better over CONST_INT, because the CONST_INT might enlarge the
1052 instruction or CSE would need to figure out we'd already loaded it
1053 into a register before. For prec > BITS_PER_WORD, I think CONST_INT
1054 might be more beneficial, as then the multi-word comparison can be
1055 perhaps simplified. */
1056 if (prec <= BITS_PER_WORD
1057 ? (CONST_SCALAR_INT_P (op1) && REG_P (op0))
1058 : CONST_SCALAR_INT_P (op0))
1059 tem = op0;
1060 do_compare_rtx_and_jump (res, tem, GEU, true, mode, NULL_RTX, NULL,
1061 done_label, profile_probability::very_likely ());
1062 goto do_error_label;
1065 /* s1 +- s2 -> ur */
1066 if (!uns0_p && !uns1_p && unsr_p)
1068 /* Compute the operation. On RTL level, the addition is always
1069 unsigned. */
1070 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
1071 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
1072 int pos_neg = get_range_pos_neg (arg1);
1073 if (code == PLUS_EXPR)
1075 int pos_neg0 = get_range_pos_neg (arg0);
1076 if (pos_neg0 != 3 && pos_neg == 3)
1078 std::swap (op0, op1);
1079 pos_neg = pos_neg0;
1082 rtx tem;
1083 if (pos_neg != 3)
1085 tem = expand_binop (mode, ((pos_neg == 1) ^ (code == MINUS_EXPR))
1086 ? and_optab : ior_optab,
1087 op0, res, NULL_RTX, false, OPTAB_LIB_WIDEN);
1088 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL,
1089 NULL, done_label, profile_probability::very_likely ());
1091 else
1093 rtx_code_label *do_ior_label = gen_label_rtx ();
1094 do_compare_rtx_and_jump (op1, const0_rtx,
1095 code == MINUS_EXPR ? GE : LT, false, mode,
1096 NULL_RTX, NULL, do_ior_label,
1097 profile_probability::even ());
1098 tem = expand_binop (mode, and_optab, op0, res, NULL_RTX, false,
1099 OPTAB_LIB_WIDEN);
1100 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
1101 NULL, done_label, profile_probability::very_likely ());
1102 emit_jump (do_error);
1103 emit_label (do_ior_label);
1104 tem = expand_binop (mode, ior_optab, op0, res, NULL_RTX, false,
1105 OPTAB_LIB_WIDEN);
1106 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
1107 NULL, done_label, profile_probability::very_likely ());
1109 goto do_error_label;
1112 /* u1 - u2 -> sr */
1113 if (code == MINUS_EXPR && uns0_p && uns1_p && !unsr_p)
1115 /* Compute the operation. On RTL level, the addition is always
1116 unsigned. */
1117 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
1118 OPTAB_LIB_WIDEN);
1119 rtx_code_label *op0_geu_op1 = gen_label_rtx ();
1120 do_compare_rtx_and_jump (op0, op1, GEU, true, mode, NULL_RTX, NULL,
1121 op0_geu_op1, profile_probability::even ());
1122 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode, NULL_RTX,
1123 NULL, done_label, profile_probability::very_likely ());
1124 emit_jump (do_error);
1125 emit_label (op0_geu_op1);
1126 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode, NULL_RTX,
1127 NULL, done_label, profile_probability::very_likely ());
1128 goto do_error_label;
1131 gcc_assert (!uns0_p && !uns1_p && !unsr_p);
1133 /* s1 +- s2 -> sr */
1134 do_signed:
1136 insn_code icode = optab_handler (code == PLUS_EXPR ? addv4_optab
1137 : subv4_optab, mode);
1138 if (icode != CODE_FOR_nothing)
1140 class expand_operand ops[4];
1141 rtx_insn *last = get_last_insn ();
1143 res = gen_reg_rtx (mode);
1144 create_output_operand (&ops[0], res, mode);
1145 create_input_operand (&ops[1], op0, mode);
1146 create_input_operand (&ops[2], op1, mode);
1147 create_fixed_operand (&ops[3], do_error);
1148 if (maybe_expand_insn (icode, 4, ops))
1150 last = get_last_insn ();
1151 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
1152 && JUMP_P (last)
1153 && any_condjump_p (last)
1154 && !find_reg_note (last, REG_BR_PROB, 0))
1155 add_reg_br_prob_note (last,
1156 profile_probability::very_unlikely ());
1157 emit_jump (done_label);
1158 goto do_error_label;
1161 delete_insns_since (last);
1164 /* Compute the operation. On RTL level, the addition is always
1165 unsigned. */
1166 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
1167 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
1169 /* If we can prove that one of the arguments (for MINUS_EXPR only
1170 the second operand, as subtraction is not commutative) is always
1171 non-negative or always negative, we can do just one comparison
1172 and conditional jump. */
1173 int pos_neg = get_range_pos_neg (arg1);
1174 if (code == PLUS_EXPR)
1176 int pos_neg0 = get_range_pos_neg (arg0);
1177 if (pos_neg0 != 3 && pos_neg == 3)
1179 std::swap (op0, op1);
1180 pos_neg = pos_neg0;
1184 /* Addition overflows if and only if the two operands have the same sign,
1185 and the result has the opposite sign. Subtraction overflows if and
1186 only if the two operands have opposite sign, and the subtrahend has
1187 the same sign as the result. Here 0 is counted as positive. */
1188 if (pos_neg == 3)
1190 /* Compute op0 ^ op1 (operands have opposite sign). */
1191 rtx op_xor = expand_binop (mode, xor_optab, op0, op1, NULL_RTX, false,
1192 OPTAB_LIB_WIDEN);
1194 /* Compute res ^ op1 (result and 2nd operand have opposite sign). */
1195 rtx res_xor = expand_binop (mode, xor_optab, res, op1, NULL_RTX, false,
1196 OPTAB_LIB_WIDEN);
1198 rtx tem;
1199 if (code == PLUS_EXPR)
1201 /* Compute (res ^ op1) & ~(op0 ^ op1). */
1202 tem = expand_unop (mode, one_cmpl_optab, op_xor, NULL_RTX, false);
1203 tem = expand_binop (mode, and_optab, res_xor, tem, NULL_RTX, false,
1204 OPTAB_LIB_WIDEN);
1206 else
1208 /* Compute (op0 ^ op1) & ~(res ^ op1). */
1209 tem = expand_unop (mode, one_cmpl_optab, res_xor, NULL_RTX, false);
1210 tem = expand_binop (mode, and_optab, op_xor, tem, NULL_RTX, false,
1211 OPTAB_LIB_WIDEN);
1214 /* No overflow if the result has bit sign cleared. */
1215 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
1216 NULL, done_label, profile_probability::very_likely ());
1219 /* Compare the result of the operation with the first operand.
1220 No overflow for addition if second operand is positive and result
1221 is larger or second operand is negative and result is smaller.
1222 Likewise for subtraction with sign of second operand flipped. */
1223 else
1224 do_compare_rtx_and_jump (res, op0,
1225 (pos_neg == 1) ^ (code == MINUS_EXPR) ? GE : LE,
1226 false, mode, NULL_RTX, NULL, done_label,
1227 profile_probability::very_likely ());
1230 do_error_label:
1231 emit_label (do_error);
1232 if (is_ubsan)
1234 /* Expand the ubsan builtin call. */
1235 push_temp_slots ();
1236 fn = ubsan_build_overflow_builtin (code, loc, TREE_TYPE (arg0),
1237 arg0, arg1, datap);
1238 expand_normal (fn);
1239 pop_temp_slots ();
1240 do_pending_stack_adjust ();
1242 else if (lhs)
1243 expand_arith_set_overflow (lhs, target);
1245 /* We're done. */
1246 emit_label (done_label);
1248 if (lhs)
1250 if (is_ubsan)
1251 expand_ubsan_result_store (target, res);
1252 else
1254 if (do_xor)
1255 res = expand_binop (mode, add_optab, res, sgn, NULL_RTX, false,
1256 OPTAB_LIB_WIDEN);
1258 expand_arith_overflow_result_store (lhs, target, mode, res);
1263 /* Add negate overflow checking to the statement STMT. */
1265 static void
1266 expand_neg_overflow (location_t loc, tree lhs, tree arg1, bool is_ubsan,
1267 tree *datap)
1269 rtx res, op1;
1270 tree fn;
1271 rtx_code_label *done_label, *do_error;
1272 rtx target = NULL_RTX;
1274 done_label = gen_label_rtx ();
1275 do_error = gen_label_rtx ();
1277 do_pending_stack_adjust ();
1278 op1 = expand_normal (arg1);
1280 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (arg1));
1281 if (lhs)
1283 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1284 if (!is_ubsan)
1285 write_complex_part (target, const0_rtx, true);
1288 enum insn_code icode = optab_handler (negv3_optab, mode);
1289 if (icode != CODE_FOR_nothing)
1291 class expand_operand ops[3];
1292 rtx_insn *last = get_last_insn ();
1294 res = gen_reg_rtx (mode);
1295 create_output_operand (&ops[0], res, mode);
1296 create_input_operand (&ops[1], op1, mode);
1297 create_fixed_operand (&ops[2], do_error);
1298 if (maybe_expand_insn (icode, 3, ops))
1300 last = get_last_insn ();
1301 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
1302 && JUMP_P (last)
1303 && any_condjump_p (last)
1304 && !find_reg_note (last, REG_BR_PROB, 0))
1305 add_reg_br_prob_note (last,
1306 profile_probability::very_unlikely ());
1307 emit_jump (done_label);
1309 else
1311 delete_insns_since (last);
1312 icode = CODE_FOR_nothing;
1316 if (icode == CODE_FOR_nothing)
1318 /* Compute the operation. On RTL level, the addition is always
1319 unsigned. */
1320 res = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
1322 /* Compare the operand with the most negative value. */
1323 rtx minv = expand_normal (TYPE_MIN_VALUE (TREE_TYPE (arg1)));
1324 do_compare_rtx_and_jump (op1, minv, NE, true, mode, NULL_RTX, NULL,
1325 done_label, profile_probability::very_likely ());
1328 emit_label (do_error);
1329 if (is_ubsan)
1331 /* Expand the ubsan builtin call. */
1332 push_temp_slots ();
1333 fn = ubsan_build_overflow_builtin (NEGATE_EXPR, loc, TREE_TYPE (arg1),
1334 arg1, NULL_TREE, datap);
1335 expand_normal (fn);
1336 pop_temp_slots ();
1337 do_pending_stack_adjust ();
1339 else if (lhs)
1340 expand_arith_set_overflow (lhs, target);
1342 /* We're done. */
1343 emit_label (done_label);
1345 if (lhs)
1347 if (is_ubsan)
1348 expand_ubsan_result_store (target, res);
1349 else
1350 expand_arith_overflow_result_store (lhs, target, mode, res);
1354 /* Return true if UNS WIDEN_MULT_EXPR with result mode WMODE and operand
1355 mode MODE can be expanded without using a libcall. */
1357 static bool
1358 can_widen_mult_without_libcall (scalar_int_mode wmode, scalar_int_mode mode,
1359 rtx op0, rtx op1, bool uns)
1361 if (find_widening_optab_handler (umul_widen_optab, wmode, mode)
1362 != CODE_FOR_nothing)
1363 return true;
1365 if (find_widening_optab_handler (smul_widen_optab, wmode, mode)
1366 != CODE_FOR_nothing)
1367 return true;
1369 rtx_insn *last = get_last_insn ();
1370 if (CONSTANT_P (op0))
1371 op0 = convert_modes (wmode, mode, op0, uns);
1372 else
1373 op0 = gen_raw_REG (wmode, LAST_VIRTUAL_REGISTER + 1);
1374 if (CONSTANT_P (op1))
1375 op1 = convert_modes (wmode, mode, op1, uns);
1376 else
1377 op1 = gen_raw_REG (wmode, LAST_VIRTUAL_REGISTER + 2);
1378 rtx ret = expand_mult (wmode, op0, op1, NULL_RTX, uns, true);
1379 delete_insns_since (last);
1380 return ret != NULL_RTX;
1383 /* Add mul overflow checking to the statement STMT. */
1385 static void
1386 expand_mul_overflow (location_t loc, tree lhs, tree arg0, tree arg1,
1387 bool unsr_p, bool uns0_p, bool uns1_p, bool is_ubsan,
1388 tree *datap)
1390 rtx res, op0, op1;
1391 tree fn, type;
1392 rtx_code_label *done_label, *do_error;
1393 rtx target = NULL_RTX;
1394 signop sign;
1395 enum insn_code icode;
1397 done_label = gen_label_rtx ();
1398 do_error = gen_label_rtx ();
1400 do_pending_stack_adjust ();
1401 op0 = expand_normal (arg0);
1402 op1 = expand_normal (arg1);
1404 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (arg0));
1405 bool uns = unsr_p;
1406 if (lhs)
1408 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1409 if (!is_ubsan)
1410 write_complex_part (target, const0_rtx, true);
1413 if (is_ubsan)
1414 gcc_assert (!unsr_p && !uns0_p && !uns1_p);
1416 /* We assume both operands and result have the same precision
1417 here (GET_MODE_BITSIZE (mode)), S stands for signed type
1418 with that precision, U for unsigned type with that precision,
1419 sgn for unsigned most significant bit in that precision.
1420 s1 is signed first operand, u1 is unsigned first operand,
1421 s2 is signed second operand, u2 is unsigned second operand,
1422 sr is signed result, ur is unsigned result and the following
1423 rules say how to compute result (which is always result of
1424 the operands as if both were unsigned, cast to the right
1425 signedness) and how to compute whether operation overflowed.
1426 main_ovf (false) stands for jump on signed multiplication
1427 overflow or the main algorithm with uns == false.
1428 main_ovf (true) stands for jump on unsigned multiplication
1429 overflow or the main algorithm with uns == true.
1431 s1 * s2 -> sr
1432 res = (S) ((U) s1 * (U) s2)
1433 ovf = main_ovf (false)
1434 u1 * u2 -> ur
1435 res = u1 * u2
1436 ovf = main_ovf (true)
1437 s1 * u2 -> ur
1438 res = (U) s1 * u2
1439 ovf = (s1 < 0 && u2) || main_ovf (true)
1440 u1 * u2 -> sr
1441 res = (S) (u1 * u2)
1442 ovf = res < 0 || main_ovf (true)
1443 s1 * u2 -> sr
1444 res = (S) ((U) s1 * u2)
1445 ovf = (S) u2 >= 0 ? main_ovf (false)
1446 : (s1 != 0 && (s1 != -1 || u2 != (U) res))
1447 s1 * s2 -> ur
1448 t1 = (s1 & s2) < 0 ? (-(U) s1) : ((U) s1)
1449 t2 = (s1 & s2) < 0 ? (-(U) s2) : ((U) s2)
1450 res = t1 * t2
1451 ovf = (s1 ^ s2) < 0 ? (s1 && s2) : main_ovf (true) */
1453 if (uns0_p && !uns1_p)
1455 /* Multiplication is commutative, if operand signedness differs,
1456 canonicalize to the first operand being signed and second
1457 unsigned to simplify following code. */
1458 std::swap (op0, op1);
1459 std::swap (arg0, arg1);
1460 uns0_p = false;
1461 uns1_p = true;
1464 int pos_neg0 = get_range_pos_neg (arg0);
1465 int pos_neg1 = get_range_pos_neg (arg1);
1467 /* s1 * u2 -> ur */
1468 if (!uns0_p && uns1_p && unsr_p)
1470 switch (pos_neg0)
1472 case 1:
1473 /* If s1 is non-negative, just perform normal u1 * u2 -> ur. */
1474 goto do_main;
1475 case 2:
1476 /* If s1 is negative, avoid the main code, just multiply and
1477 signal overflow if op1 is not 0. */
1478 struct separate_ops ops;
1479 ops.code = MULT_EXPR;
1480 ops.type = TREE_TYPE (arg1);
1481 ops.op0 = make_tree (ops.type, op0);
1482 ops.op1 = make_tree (ops.type, op1);
1483 ops.op2 = NULL_TREE;
1484 ops.location = loc;
1485 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1486 do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1487 NULL, done_label, profile_probability::very_likely ());
1488 goto do_error_label;
1489 case 3:
1490 if (get_min_precision (arg1, UNSIGNED)
1491 + get_min_precision (arg0, SIGNED) <= GET_MODE_PRECISION (mode))
1493 /* If the first operand is sign extended from narrower type, the
1494 second operand is zero extended from narrower type and
1495 the sum of the two precisions is smaller or equal to the
1496 result precision: if the first argument is at runtime
1497 non-negative, maximum result will be 0x7e81 or 0x7f..fe80..01
1498 and there will be no overflow, if the first argument is
1499 negative and the second argument zero, the result will be
1500 0 and there will be no overflow, if the first argument is
1501 negative and the second argument positive, the result when
1502 treated as signed will be negative (minimum -0x7f80 or
1503 -0x7f..f80..0) there there will be always overflow. So, do
1504 res = (U) (s1 * u2)
1505 ovf = (S) res < 0 */
1506 struct separate_ops ops;
1507 ops.code = MULT_EXPR;
1508 ops.type
1509 = build_nonstandard_integer_type (GET_MODE_PRECISION (mode),
1511 ops.op0 = make_tree (ops.type, op0);
1512 ops.op1 = make_tree (ops.type, op1);
1513 ops.op2 = NULL_TREE;
1514 ops.location = loc;
1515 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1516 do_compare_rtx_and_jump (res, const0_rtx, GE, false,
1517 mode, NULL_RTX, NULL, done_label,
1518 profile_probability::very_likely ());
1519 goto do_error_label;
1521 rtx_code_label *do_main_label;
1522 do_main_label = gen_label_rtx ();
1523 do_compare_rtx_and_jump (op0, const0_rtx, GE, false, mode, NULL_RTX,
1524 NULL, do_main_label, profile_probability::very_likely ());
1525 do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1526 NULL, do_main_label, profile_probability::very_likely ());
1527 expand_arith_set_overflow (lhs, target);
1528 emit_label (do_main_label);
1529 goto do_main;
1530 default:
1531 gcc_unreachable ();
1535 /* u1 * u2 -> sr */
1536 if (uns0_p && uns1_p && !unsr_p)
1538 if ((pos_neg0 | pos_neg1) == 1)
1540 /* If both arguments are zero extended from narrower types,
1541 the MSB will be clear on both and so we can pretend it is
1542 a normal s1 * s2 -> sr multiplication. */
1543 uns0_p = false;
1544 uns1_p = false;
1546 else
1547 uns = true;
1548 /* Rest of handling of this case after res is computed. */
1549 goto do_main;
1552 /* s1 * u2 -> sr */
1553 if (!uns0_p && uns1_p && !unsr_p)
1555 switch (pos_neg1)
1557 case 1:
1558 goto do_main;
1559 case 2:
1560 /* If (S) u2 is negative (i.e. u2 is larger than maximum of S,
1561 avoid the main code, just multiply and signal overflow
1562 unless 0 * u2 or -1 * ((U) Smin). */
1563 struct separate_ops ops;
1564 ops.code = MULT_EXPR;
1565 ops.type = TREE_TYPE (arg1);
1566 ops.op0 = make_tree (ops.type, op0);
1567 ops.op1 = make_tree (ops.type, op1);
1568 ops.op2 = NULL_TREE;
1569 ops.location = loc;
1570 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1571 do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
1572 NULL, done_label, profile_probability::very_likely ());
1573 do_compare_rtx_and_jump (op0, constm1_rtx, NE, true, mode, NULL_RTX,
1574 NULL, do_error, profile_probability::very_unlikely ());
1575 int prec;
1576 prec = GET_MODE_PRECISION (mode);
1577 rtx sgn;
1578 sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
1579 do_compare_rtx_and_jump (op1, sgn, EQ, true, mode, NULL_RTX,
1580 NULL, done_label, profile_probability::very_likely ());
1581 goto do_error_label;
1582 case 3:
1583 /* Rest of handling of this case after res is computed. */
1584 goto do_main;
1585 default:
1586 gcc_unreachable ();
1590 /* s1 * s2 -> ur */
1591 if (!uns0_p && !uns1_p && unsr_p)
1593 rtx tem;
1594 switch (pos_neg0 | pos_neg1)
1596 case 1: /* Both operands known to be non-negative. */
1597 goto do_main;
1598 case 2: /* Both operands known to be negative. */
1599 op0 = expand_unop (mode, neg_optab, op0, NULL_RTX, false);
1600 op1 = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
1601 /* Avoid looking at arg0/arg1 ranges, as we've changed
1602 the arguments. */
1603 arg0 = error_mark_node;
1604 arg1 = error_mark_node;
1605 goto do_main;
1606 case 3:
1607 if ((pos_neg0 ^ pos_neg1) == 3)
1609 /* If one operand is known to be negative and the other
1610 non-negative, this overflows always, unless the non-negative
1611 one is 0. Just do normal multiply and set overflow
1612 unless one of the operands is 0. */
1613 struct separate_ops ops;
1614 ops.code = MULT_EXPR;
1615 ops.type
1616 = build_nonstandard_integer_type (GET_MODE_PRECISION (mode),
1618 ops.op0 = make_tree (ops.type, op0);
1619 ops.op1 = make_tree (ops.type, op1);
1620 ops.op2 = NULL_TREE;
1621 ops.location = loc;
1622 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1623 do_compare_rtx_and_jump (pos_neg0 == 1 ? op0 : op1, const0_rtx, EQ,
1624 true, mode, NULL_RTX, NULL, done_label,
1625 profile_probability::very_likely ());
1626 goto do_error_label;
1628 if (get_min_precision (arg0, SIGNED)
1629 + get_min_precision (arg1, SIGNED) <= GET_MODE_PRECISION (mode))
1631 /* If both operands are sign extended from narrower types and
1632 the sum of the two precisions is smaller or equal to the
1633 result precision: if both arguments are at runtime
1634 non-negative, maximum result will be 0x3f01 or 0x3f..f0..01
1635 and there will be no overflow, if both arguments are negative,
1636 maximum result will be 0x40..00 and there will be no overflow
1637 either, if one argument is positive and the other argument
1638 negative, the result when treated as signed will be negative
1639 and there will be always overflow, and if one argument is
1640 zero and the other negative the result will be zero and no
1641 overflow. So, do
1642 res = (U) (s1 * s2)
1643 ovf = (S) res < 0 */
1644 struct separate_ops ops;
1645 ops.code = MULT_EXPR;
1646 ops.type
1647 = build_nonstandard_integer_type (GET_MODE_PRECISION (mode),
1649 ops.op0 = make_tree (ops.type, op0);
1650 ops.op1 = make_tree (ops.type, op1);
1651 ops.op2 = NULL_TREE;
1652 ops.location = loc;
1653 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1654 do_compare_rtx_and_jump (res, const0_rtx, GE, false,
1655 mode, NULL_RTX, NULL, done_label,
1656 profile_probability::very_likely ());
1657 goto do_error_label;
1659 /* The general case, do all the needed comparisons at runtime. */
1660 rtx_code_label *do_main_label, *after_negate_label;
1661 rtx rop0, rop1;
1662 rop0 = gen_reg_rtx (mode);
1663 rop1 = gen_reg_rtx (mode);
1664 emit_move_insn (rop0, op0);
1665 emit_move_insn (rop1, op1);
1666 op0 = rop0;
1667 op1 = rop1;
1668 do_main_label = gen_label_rtx ();
1669 after_negate_label = gen_label_rtx ();
1670 tem = expand_binop (mode, and_optab, op0, op1, NULL_RTX, false,
1671 OPTAB_LIB_WIDEN);
1672 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
1673 NULL, after_negate_label, profile_probability::very_likely ());
1674 /* Both arguments negative here, negate them and continue with
1675 normal unsigned overflow checking multiplication. */
1676 emit_move_insn (op0, expand_unop (mode, neg_optab, op0,
1677 NULL_RTX, false));
1678 emit_move_insn (op1, expand_unop (mode, neg_optab, op1,
1679 NULL_RTX, false));
1680 /* Avoid looking at arg0/arg1 ranges, as we might have changed
1681 the arguments. */
1682 arg0 = error_mark_node;
1683 arg1 = error_mark_node;
1684 emit_jump (do_main_label);
1685 emit_label (after_negate_label);
1686 tem = expand_binop (mode, xor_optab, op0, op1, NULL_RTX, false,
1687 OPTAB_LIB_WIDEN);
1688 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
1689 NULL, do_main_label,
1690 profile_probability::very_likely ());
1691 /* One argument is negative here, the other positive. This
1692 overflows always, unless one of the arguments is 0. But
1693 if e.g. s2 is 0, (U) s1 * 0 doesn't overflow, whatever s1
1694 is, thus we can keep do_main code oring in overflow as is. */
1695 if (pos_neg0 != 2)
1696 do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
1697 NULL, do_main_label,
1698 profile_probability::very_unlikely ());
1699 if (pos_neg1 != 2)
1700 do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1701 NULL, do_main_label,
1702 profile_probability::very_unlikely ());
1703 expand_arith_set_overflow (lhs, target);
1704 emit_label (do_main_label);
1705 goto do_main;
1706 default:
1707 gcc_unreachable ();
1711 do_main:
1712 type = build_nonstandard_integer_type (GET_MODE_PRECISION (mode), uns);
1713 sign = uns ? UNSIGNED : SIGNED;
1714 icode = optab_handler (uns ? umulv4_optab : mulv4_optab, mode);
1715 if (uns
1716 && (integer_pow2p (arg0) || integer_pow2p (arg1))
1717 && (optimize_insn_for_speed_p () || icode == CODE_FOR_nothing))
1719 /* Optimize unsigned multiplication by power of 2 constant
1720 using 2 shifts, one for result, one to extract the shifted
1721 out bits to see if they are all zero.
1722 Don't do this if optimizing for size and we have umulv4_optab,
1723 in that case assume multiplication will be shorter.
1724 This is heuristics based on the single target that provides
1725 umulv4 right now (i?86/x86_64), if further targets add it, this
1726 might need to be revisited.
1727 Cases where both operands are constant should be folded already
1728 during GIMPLE, and cases where one operand is constant but not
1729 power of 2 are questionable, either the WIDEN_MULT_EXPR case
1730 below can be done without multiplication, just by shifts and adds,
1731 or we'd need to divide the result (and hope it actually doesn't
1732 really divide nor multiply) and compare the result of the division
1733 with the original operand. */
1734 rtx opn0 = op0;
1735 rtx opn1 = op1;
1736 tree argn0 = arg0;
1737 tree argn1 = arg1;
1738 if (integer_pow2p (arg0))
1740 std::swap (opn0, opn1);
1741 std::swap (argn0, argn1);
1743 int cnt = tree_log2 (argn1);
1744 if (cnt >= 0 && cnt < GET_MODE_PRECISION (mode))
1746 rtx upper = const0_rtx;
1747 res = expand_shift (LSHIFT_EXPR, mode, opn0, cnt, NULL_RTX, uns);
1748 if (cnt != 0)
1749 upper = expand_shift (RSHIFT_EXPR, mode, opn0,
1750 GET_MODE_PRECISION (mode) - cnt,
1751 NULL_RTX, uns);
1752 do_compare_rtx_and_jump (upper, const0_rtx, EQ, true, mode,
1753 NULL_RTX, NULL, done_label,
1754 profile_probability::very_likely ());
1755 goto do_error_label;
1758 if (icode != CODE_FOR_nothing)
1760 class expand_operand ops[4];
1761 rtx_insn *last = get_last_insn ();
1763 res = gen_reg_rtx (mode);
1764 create_output_operand (&ops[0], res, mode);
1765 create_input_operand (&ops[1], op0, mode);
1766 create_input_operand (&ops[2], op1, mode);
1767 create_fixed_operand (&ops[3], do_error);
1768 if (maybe_expand_insn (icode, 4, ops))
1770 last = get_last_insn ();
1771 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
1772 && JUMP_P (last)
1773 && any_condjump_p (last)
1774 && !find_reg_note (last, REG_BR_PROB, 0))
1775 add_reg_br_prob_note (last,
1776 profile_probability::very_unlikely ());
1777 emit_jump (done_label);
1779 else
1781 delete_insns_since (last);
1782 icode = CODE_FOR_nothing;
1786 if (icode == CODE_FOR_nothing)
1788 struct separate_ops ops;
1789 int prec = GET_MODE_PRECISION (mode);
1790 scalar_int_mode hmode, wmode;
1791 ops.op0 = make_tree (type, op0);
1792 ops.op1 = make_tree (type, op1);
1793 ops.op2 = NULL_TREE;
1794 ops.location = loc;
1796 /* Optimize unsigned overflow check where we don't use the
1797 multiplication result, just whether overflow happened.
1798 If we can do MULT_HIGHPART_EXPR, that followed by
1799 comparison of the result against zero is cheapest.
1800 We'll still compute res, but it should be DCEd later. */
1801 use_operand_p use;
1802 gimple *use_stmt;
1803 if (!is_ubsan
1804 && lhs
1805 && uns
1806 && !(uns0_p && uns1_p && !unsr_p)
1807 && can_mult_highpart_p (mode, uns) == 1
1808 && single_imm_use (lhs, &use, &use_stmt)
1809 && is_gimple_assign (use_stmt)
1810 && gimple_assign_rhs_code (use_stmt) == IMAGPART_EXPR)
1811 goto highpart;
1813 if (GET_MODE_2XWIDER_MODE (mode).exists (&wmode)
1814 && targetm.scalar_mode_supported_p (wmode)
1815 && can_widen_mult_without_libcall (wmode, mode, op0, op1, uns))
1817 twoxwider:
1818 ops.code = WIDEN_MULT_EXPR;
1819 ops.type
1820 = build_nonstandard_integer_type (GET_MODE_PRECISION (wmode), uns);
1822 res = expand_expr_real_2 (&ops, NULL_RTX, wmode, EXPAND_NORMAL);
1823 rtx hipart = expand_shift (RSHIFT_EXPR, wmode, res, prec,
1824 NULL_RTX, uns);
1825 hipart = convert_modes (mode, wmode, hipart, uns);
1826 res = convert_modes (mode, wmode, res, uns);
1827 if (uns)
1828 /* For the unsigned multiplication, there was overflow if
1829 HIPART is non-zero. */
1830 do_compare_rtx_and_jump (hipart, const0_rtx, EQ, true, mode,
1831 NULL_RTX, NULL, done_label,
1832 profile_probability::very_likely ());
1833 else
1835 /* RES is used more than once, place it in a pseudo. */
1836 res = force_reg (mode, res);
1838 rtx signbit = expand_shift (RSHIFT_EXPR, mode, res, prec - 1,
1839 NULL_RTX, 0);
1840 /* RES is low half of the double width result, HIPART
1841 the high half. There was overflow if
1842 HIPART is different from RES < 0 ? -1 : 0. */
1843 do_compare_rtx_and_jump (signbit, hipart, EQ, true, mode,
1844 NULL_RTX, NULL, done_label,
1845 profile_probability::very_likely ());
1848 else if (can_mult_highpart_p (mode, uns) == 1)
1850 highpart:
1851 ops.code = MULT_HIGHPART_EXPR;
1852 ops.type = type;
1854 rtx hipart = expand_expr_real_2 (&ops, NULL_RTX, mode,
1855 EXPAND_NORMAL);
1856 ops.code = MULT_EXPR;
1857 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1858 if (uns)
1859 /* For the unsigned multiplication, there was overflow if
1860 HIPART is non-zero. */
1861 do_compare_rtx_and_jump (hipart, const0_rtx, EQ, true, mode,
1862 NULL_RTX, NULL, done_label,
1863 profile_probability::very_likely ());
1864 else
1866 rtx signbit = expand_shift (RSHIFT_EXPR, mode, res, prec - 1,
1867 NULL_RTX, 0);
1868 /* RES is low half of the double width result, HIPART
1869 the high half. There was overflow if
1870 HIPART is different from RES < 0 ? -1 : 0. */
1871 do_compare_rtx_and_jump (signbit, hipart, EQ, true, mode,
1872 NULL_RTX, NULL, done_label,
1873 profile_probability::very_likely ());
1877 else if (int_mode_for_size (prec / 2, 1).exists (&hmode)
1878 && 2 * GET_MODE_PRECISION (hmode) == prec)
1880 rtx_code_label *large_op0 = gen_label_rtx ();
1881 rtx_code_label *small_op0_large_op1 = gen_label_rtx ();
1882 rtx_code_label *one_small_one_large = gen_label_rtx ();
1883 rtx_code_label *both_ops_large = gen_label_rtx ();
1884 rtx_code_label *after_hipart_neg = uns ? NULL : gen_label_rtx ();
1885 rtx_code_label *after_lopart_neg = uns ? NULL : gen_label_rtx ();
1886 rtx_code_label *do_overflow = gen_label_rtx ();
1887 rtx_code_label *hipart_different = uns ? NULL : gen_label_rtx ();
1889 unsigned int hprec = GET_MODE_PRECISION (hmode);
1890 rtx hipart0 = expand_shift (RSHIFT_EXPR, mode, op0, hprec,
1891 NULL_RTX, uns);
1892 hipart0 = convert_modes (hmode, mode, hipart0, uns);
1893 rtx lopart0 = convert_modes (hmode, mode, op0, uns);
1894 rtx signbit0 = const0_rtx;
1895 if (!uns)
1896 signbit0 = expand_shift (RSHIFT_EXPR, hmode, lopart0, hprec - 1,
1897 NULL_RTX, 0);
1898 rtx hipart1 = expand_shift (RSHIFT_EXPR, mode, op1, hprec,
1899 NULL_RTX, uns);
1900 hipart1 = convert_modes (hmode, mode, hipart1, uns);
1901 rtx lopart1 = convert_modes (hmode, mode, op1, uns);
1902 rtx signbit1 = const0_rtx;
1903 if (!uns)
1904 signbit1 = expand_shift (RSHIFT_EXPR, hmode, lopart1, hprec - 1,
1905 NULL_RTX, 0);
1907 res = gen_reg_rtx (mode);
1909 /* True if op0 resp. op1 are known to be in the range of
1910 halfstype. */
1911 bool op0_small_p = false;
1912 bool op1_small_p = false;
1913 /* True if op0 resp. op1 are known to have all zeros or all ones
1914 in the upper half of bits, but are not known to be
1915 op{0,1}_small_p. */
1916 bool op0_medium_p = false;
1917 bool op1_medium_p = false;
1918 /* -1 if op{0,1} is known to be negative, 0 if it is known to be
1919 nonnegative, 1 if unknown. */
1920 int op0_sign = 1;
1921 int op1_sign = 1;
1923 if (pos_neg0 == 1)
1924 op0_sign = 0;
1925 else if (pos_neg0 == 2)
1926 op0_sign = -1;
1927 if (pos_neg1 == 1)
1928 op1_sign = 0;
1929 else if (pos_neg1 == 2)
1930 op1_sign = -1;
1932 unsigned int mprec0 = prec;
1933 if (arg0 != error_mark_node)
1934 mprec0 = get_min_precision (arg0, sign);
1935 if (mprec0 <= hprec)
1936 op0_small_p = true;
1937 else if (!uns && mprec0 <= hprec + 1)
1938 op0_medium_p = true;
1939 unsigned int mprec1 = prec;
1940 if (arg1 != error_mark_node)
1941 mprec1 = get_min_precision (arg1, sign);
1942 if (mprec1 <= hprec)
1943 op1_small_p = true;
1944 else if (!uns && mprec1 <= hprec + 1)
1945 op1_medium_p = true;
1947 int smaller_sign = 1;
1948 int larger_sign = 1;
1949 if (op0_small_p)
1951 smaller_sign = op0_sign;
1952 larger_sign = op1_sign;
1954 else if (op1_small_p)
1956 smaller_sign = op1_sign;
1957 larger_sign = op0_sign;
1959 else if (op0_sign == op1_sign)
1961 smaller_sign = op0_sign;
1962 larger_sign = op0_sign;
1965 if (!op0_small_p)
1966 do_compare_rtx_and_jump (signbit0, hipart0, NE, true, hmode,
1967 NULL_RTX, NULL, large_op0,
1968 profile_probability::unlikely ());
1970 if (!op1_small_p)
1971 do_compare_rtx_and_jump (signbit1, hipart1, NE, true, hmode,
1972 NULL_RTX, NULL, small_op0_large_op1,
1973 profile_probability::unlikely ());
1975 /* If both op0 and op1 are sign (!uns) or zero (uns) extended from
1976 hmode to mode, the multiplication will never overflow. We can
1977 do just one hmode x hmode => mode widening multiplication. */
1978 tree halfstype = build_nonstandard_integer_type (hprec, uns);
1979 ops.op0 = make_tree (halfstype, lopart0);
1980 ops.op1 = make_tree (halfstype, lopart1);
1981 ops.code = WIDEN_MULT_EXPR;
1982 ops.type = type;
1983 rtx thisres
1984 = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1985 emit_move_insn (res, thisres);
1986 emit_jump (done_label);
1988 emit_label (small_op0_large_op1);
1990 /* If op0 is sign (!uns) or zero (uns) extended from hmode to mode,
1991 but op1 is not, just swap the arguments and handle it as op1
1992 sign/zero extended, op0 not. */
1993 rtx larger = gen_reg_rtx (mode);
1994 rtx hipart = gen_reg_rtx (hmode);
1995 rtx lopart = gen_reg_rtx (hmode);
1996 emit_move_insn (larger, op1);
1997 emit_move_insn (hipart, hipart1);
1998 emit_move_insn (lopart, lopart0);
1999 emit_jump (one_small_one_large);
2001 emit_label (large_op0);
2003 if (!op1_small_p)
2004 do_compare_rtx_and_jump (signbit1, hipart1, NE, true, hmode,
2005 NULL_RTX, NULL, both_ops_large,
2006 profile_probability::unlikely ());
2008 /* If op1 is sign (!uns) or zero (uns) extended from hmode to mode,
2009 but op0 is not, prepare larger, hipart and lopart pseudos and
2010 handle it together with small_op0_large_op1. */
2011 emit_move_insn (larger, op0);
2012 emit_move_insn (hipart, hipart0);
2013 emit_move_insn (lopart, lopart1);
2015 emit_label (one_small_one_large);
2017 /* lopart is the low part of the operand that is sign extended
2018 to mode, larger is the other operand, hipart is the
2019 high part of larger and lopart0 and lopart1 are the low parts
2020 of both operands.
2021 We perform lopart0 * lopart1 and lopart * hipart widening
2022 multiplications. */
2023 tree halfutype = build_nonstandard_integer_type (hprec, 1);
2024 ops.op0 = make_tree (halfutype, lopart0);
2025 ops.op1 = make_tree (halfutype, lopart1);
2026 rtx lo0xlo1
2027 = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
2029 ops.op0 = make_tree (halfutype, lopart);
2030 ops.op1 = make_tree (halfutype, hipart);
2031 rtx loxhi = gen_reg_rtx (mode);
2032 rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
2033 emit_move_insn (loxhi, tem);
2035 if (!uns)
2037 /* if (hipart < 0) loxhi -= lopart << (bitsize / 2); */
2038 if (larger_sign == 0)
2039 emit_jump (after_hipart_neg);
2040 else if (larger_sign != -1)
2041 do_compare_rtx_and_jump (hipart, const0_rtx, GE, false, hmode,
2042 NULL_RTX, NULL, after_hipart_neg,
2043 profile_probability::even ());
2045 tem = convert_modes (mode, hmode, lopart, 1);
2046 tem = expand_shift (LSHIFT_EXPR, mode, tem, hprec, NULL_RTX, 1);
2047 tem = expand_simple_binop (mode, MINUS, loxhi, tem, NULL_RTX,
2048 1, OPTAB_WIDEN);
2049 emit_move_insn (loxhi, tem);
2051 emit_label (after_hipart_neg);
2053 /* if (lopart < 0) loxhi -= larger; */
2054 if (smaller_sign == 0)
2055 emit_jump (after_lopart_neg);
2056 else if (smaller_sign != -1)
2057 do_compare_rtx_and_jump (lopart, const0_rtx, GE, false, hmode,
2058 NULL_RTX, NULL, after_lopart_neg,
2059 profile_probability::even ());
2061 tem = expand_simple_binop (mode, MINUS, loxhi, larger, NULL_RTX,
2062 1, OPTAB_WIDEN);
2063 emit_move_insn (loxhi, tem);
2065 emit_label (after_lopart_neg);
2068 /* loxhi += (uns) lo0xlo1 >> (bitsize / 2); */
2069 tem = expand_shift (RSHIFT_EXPR, mode, lo0xlo1, hprec, NULL_RTX, 1);
2070 tem = expand_simple_binop (mode, PLUS, loxhi, tem, NULL_RTX,
2071 1, OPTAB_WIDEN);
2072 emit_move_insn (loxhi, tem);
2074 /* if (loxhi >> (bitsize / 2)
2075 == (hmode) loxhi >> (bitsize / 2 - 1)) (if !uns)
2076 if (loxhi >> (bitsize / 2) == 0 (if uns). */
2077 rtx hipartloxhi = expand_shift (RSHIFT_EXPR, mode, loxhi, hprec,
2078 NULL_RTX, 0);
2079 hipartloxhi = convert_modes (hmode, mode, hipartloxhi, 0);
2080 rtx signbitloxhi = const0_rtx;
2081 if (!uns)
2082 signbitloxhi = expand_shift (RSHIFT_EXPR, hmode,
2083 convert_modes (hmode, mode,
2084 loxhi, 0),
2085 hprec - 1, NULL_RTX, 0);
2087 do_compare_rtx_and_jump (signbitloxhi, hipartloxhi, NE, true, hmode,
2088 NULL_RTX, NULL, do_overflow,
2089 profile_probability::very_unlikely ());
2091 /* res = (loxhi << (bitsize / 2)) | (hmode) lo0xlo1; */
2092 rtx loxhishifted = expand_shift (LSHIFT_EXPR, mode, loxhi, hprec,
2093 NULL_RTX, 1);
2094 tem = convert_modes (mode, hmode,
2095 convert_modes (hmode, mode, lo0xlo1, 1), 1);
2097 tem = expand_simple_binop (mode, IOR, loxhishifted, tem, res,
2098 1, OPTAB_WIDEN);
2099 if (tem != res)
2100 emit_move_insn (res, tem);
2101 emit_jump (done_label);
2103 emit_label (both_ops_large);
2105 /* If both operands are large (not sign (!uns) or zero (uns)
2106 extended from hmode), then perform the full multiplication
2107 which will be the result of the operation.
2108 The only cases which don't overflow are for signed multiplication
2109 some cases where both hipart0 and highpart1 are 0 or -1.
2110 For unsigned multiplication when high parts are both non-zero
2111 this overflows always. */
2112 ops.code = MULT_EXPR;
2113 ops.op0 = make_tree (type, op0);
2114 ops.op1 = make_tree (type, op1);
2115 tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
2116 emit_move_insn (res, tem);
2118 if (!uns)
2120 if (!op0_medium_p)
2122 tem = expand_simple_binop (hmode, PLUS, hipart0, const1_rtx,
2123 NULL_RTX, 1, OPTAB_WIDEN);
2124 do_compare_rtx_and_jump (tem, const1_rtx, GTU, true, hmode,
2125 NULL_RTX, NULL, do_error,
2126 profile_probability::very_unlikely ());
2129 if (!op1_medium_p)
2131 tem = expand_simple_binop (hmode, PLUS, hipart1, const1_rtx,
2132 NULL_RTX, 1, OPTAB_WIDEN);
2133 do_compare_rtx_and_jump (tem, const1_rtx, GTU, true, hmode,
2134 NULL_RTX, NULL, do_error,
2135 profile_probability::very_unlikely ());
2138 /* At this point hipart{0,1} are both in [-1, 0]. If they are
2139 the same, overflow happened if res is non-positive, if they
2140 are different, overflow happened if res is positive. */
2141 if (op0_sign != 1 && op1_sign != 1 && op0_sign != op1_sign)
2142 emit_jump (hipart_different);
2143 else if (op0_sign == 1 || op1_sign == 1)
2144 do_compare_rtx_and_jump (hipart0, hipart1, NE, true, hmode,
2145 NULL_RTX, NULL, hipart_different,
2146 profile_probability::even ());
2148 do_compare_rtx_and_jump (res, const0_rtx, LE, false, mode,
2149 NULL_RTX, NULL, do_error,
2150 profile_probability::very_unlikely ());
2151 emit_jump (done_label);
2153 emit_label (hipart_different);
2155 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode,
2156 NULL_RTX, NULL, do_error,
2157 profile_probability::very_unlikely ());
2158 emit_jump (done_label);
2161 emit_label (do_overflow);
2163 /* Overflow, do full multiplication and fallthru into do_error. */
2164 ops.op0 = make_tree (type, op0);
2165 ops.op1 = make_tree (type, op1);
2166 tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
2167 emit_move_insn (res, tem);
2169 else if (GET_MODE_2XWIDER_MODE (mode).exists (&wmode)
2170 && targetm.scalar_mode_supported_p (wmode))
2171 /* Even emitting a libcall is better than not detecting overflow
2172 at all. */
2173 goto twoxwider;
2174 else
2176 gcc_assert (!is_ubsan);
2177 ops.code = MULT_EXPR;
2178 ops.type = type;
2179 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
2180 emit_jump (done_label);
2184 do_error_label:
2185 emit_label (do_error);
2186 if (is_ubsan)
2188 /* Expand the ubsan builtin call. */
2189 push_temp_slots ();
2190 fn = ubsan_build_overflow_builtin (MULT_EXPR, loc, TREE_TYPE (arg0),
2191 arg0, arg1, datap);
2192 expand_normal (fn);
2193 pop_temp_slots ();
2194 do_pending_stack_adjust ();
2196 else if (lhs)
2197 expand_arith_set_overflow (lhs, target);
2199 /* We're done. */
2200 emit_label (done_label);
2202 /* u1 * u2 -> sr */
2203 if (uns0_p && uns1_p && !unsr_p)
2205 rtx_code_label *all_done_label = gen_label_rtx ();
2206 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode, NULL_RTX,
2207 NULL, all_done_label, profile_probability::very_likely ());
2208 expand_arith_set_overflow (lhs, target);
2209 emit_label (all_done_label);
2212 /* s1 * u2 -> sr */
2213 if (!uns0_p && uns1_p && !unsr_p && pos_neg1 == 3)
2215 rtx_code_label *all_done_label = gen_label_rtx ();
2216 rtx_code_label *set_noovf = gen_label_rtx ();
2217 do_compare_rtx_and_jump (op1, const0_rtx, GE, false, mode, NULL_RTX,
2218 NULL, all_done_label, profile_probability::very_likely ());
2219 expand_arith_set_overflow (lhs, target);
2220 do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
2221 NULL, set_noovf, profile_probability::very_likely ());
2222 do_compare_rtx_and_jump (op0, constm1_rtx, NE, true, mode, NULL_RTX,
2223 NULL, all_done_label, profile_probability::very_unlikely ());
2224 do_compare_rtx_and_jump (op1, res, NE, true, mode, NULL_RTX, NULL,
2225 all_done_label, profile_probability::very_unlikely ());
2226 emit_label (set_noovf);
2227 write_complex_part (target, const0_rtx, true);
2228 emit_label (all_done_label);
2231 if (lhs)
2233 if (is_ubsan)
2234 expand_ubsan_result_store (target, res);
2235 else
2236 expand_arith_overflow_result_store (lhs, target, mode, res);
2240 /* Expand UBSAN_CHECK_* internal function if it has vector operands. */
2242 static void
2243 expand_vector_ubsan_overflow (location_t loc, enum tree_code code, tree lhs,
2244 tree arg0, tree arg1)
2246 poly_uint64 cnt = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0));
2247 rtx_code_label *loop_lab = NULL;
2248 rtx cntvar = NULL_RTX;
2249 tree cntv = NULL_TREE;
2250 tree eltype = TREE_TYPE (TREE_TYPE (arg0));
2251 tree sz = TYPE_SIZE (eltype);
2252 tree data = NULL_TREE;
2253 tree resv = NULL_TREE;
2254 rtx lhsr = NULL_RTX;
2255 rtx resvr = NULL_RTX;
2256 unsigned HOST_WIDE_INT const_cnt = 0;
2257 bool use_loop_p = (!cnt.is_constant (&const_cnt) || const_cnt > 4);
2259 if (lhs)
2261 optab op;
2262 lhsr = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2263 if (!VECTOR_MODE_P (GET_MODE (lhsr))
2264 || (op = optab_for_tree_code (code, TREE_TYPE (arg0),
2265 optab_default)) == unknown_optab
2266 || (optab_handler (op, TYPE_MODE (TREE_TYPE (arg0)))
2267 == CODE_FOR_nothing))
2269 if (MEM_P (lhsr))
2270 resv = make_tree (TREE_TYPE (lhs), lhsr);
2271 else
2273 resvr = assign_temp (TREE_TYPE (lhs), 1, 1);
2274 resv = make_tree (TREE_TYPE (lhs), resvr);
2278 if (use_loop_p)
2280 do_pending_stack_adjust ();
2281 loop_lab = gen_label_rtx ();
2282 cntvar = gen_reg_rtx (TYPE_MODE (sizetype));
2283 cntv = make_tree (sizetype, cntvar);
2284 emit_move_insn (cntvar, const0_rtx);
2285 emit_label (loop_lab);
2287 if (TREE_CODE (arg0) != VECTOR_CST)
2289 rtx arg0r = expand_normal (arg0);
2290 arg0 = make_tree (TREE_TYPE (arg0), arg0r);
2292 if (TREE_CODE (arg1) != VECTOR_CST)
2294 rtx arg1r = expand_normal (arg1);
2295 arg1 = make_tree (TREE_TYPE (arg1), arg1r);
2297 for (unsigned int i = 0; i < (use_loop_p ? 1 : const_cnt); i++)
2299 tree op0, op1, res = NULL_TREE;
2300 if (use_loop_p)
2302 tree atype = build_array_type_nelts (eltype, cnt);
2303 op0 = uniform_vector_p (arg0);
2304 if (op0 == NULL_TREE)
2306 op0 = fold_build1_loc (loc, VIEW_CONVERT_EXPR, atype, arg0);
2307 op0 = build4_loc (loc, ARRAY_REF, eltype, op0, cntv,
2308 NULL_TREE, NULL_TREE);
2310 op1 = uniform_vector_p (arg1);
2311 if (op1 == NULL_TREE)
2313 op1 = fold_build1_loc (loc, VIEW_CONVERT_EXPR, atype, arg1);
2314 op1 = build4_loc (loc, ARRAY_REF, eltype, op1, cntv,
2315 NULL_TREE, NULL_TREE);
2317 if (resv)
2319 res = fold_build1_loc (loc, VIEW_CONVERT_EXPR, atype, resv);
2320 res = build4_loc (loc, ARRAY_REF, eltype, res, cntv,
2321 NULL_TREE, NULL_TREE);
2324 else
2326 tree bitpos = bitsize_int (tree_to_uhwi (sz) * i);
2327 op0 = fold_build3_loc (loc, BIT_FIELD_REF, eltype, arg0, sz, bitpos);
2328 op1 = fold_build3_loc (loc, BIT_FIELD_REF, eltype, arg1, sz, bitpos);
2329 if (resv)
2330 res = fold_build3_loc (loc, BIT_FIELD_REF, eltype, resv, sz,
2331 bitpos);
2333 switch (code)
2335 case PLUS_EXPR:
2336 expand_addsub_overflow (loc, PLUS_EXPR, res, op0, op1,
2337 false, false, false, true, &data);
2338 break;
2339 case MINUS_EXPR:
2340 if (use_loop_p ? integer_zerop (arg0) : integer_zerop (op0))
2341 expand_neg_overflow (loc, res, op1, true, &data);
2342 else
2343 expand_addsub_overflow (loc, MINUS_EXPR, res, op0, op1,
2344 false, false, false, true, &data);
2345 break;
2346 case MULT_EXPR:
2347 expand_mul_overflow (loc, res, op0, op1, false, false, false,
2348 true, &data);
2349 break;
2350 default:
2351 gcc_unreachable ();
2354 if (use_loop_p)
2356 struct separate_ops ops;
2357 ops.code = PLUS_EXPR;
2358 ops.type = TREE_TYPE (cntv);
2359 ops.op0 = cntv;
2360 ops.op1 = build_int_cst (TREE_TYPE (cntv), 1);
2361 ops.op2 = NULL_TREE;
2362 ops.location = loc;
2363 rtx ret = expand_expr_real_2 (&ops, cntvar, TYPE_MODE (sizetype),
2364 EXPAND_NORMAL);
2365 if (ret != cntvar)
2366 emit_move_insn (cntvar, ret);
2367 rtx cntrtx = gen_int_mode (cnt, TYPE_MODE (sizetype));
2368 do_compare_rtx_and_jump (cntvar, cntrtx, NE, false,
2369 TYPE_MODE (sizetype), NULL_RTX, NULL, loop_lab,
2370 profile_probability::very_likely ());
2372 if (lhs && resv == NULL_TREE)
2374 struct separate_ops ops;
2375 ops.code = code;
2376 ops.type = TREE_TYPE (arg0);
2377 ops.op0 = arg0;
2378 ops.op1 = arg1;
2379 ops.op2 = NULL_TREE;
2380 ops.location = loc;
2381 rtx ret = expand_expr_real_2 (&ops, lhsr, TYPE_MODE (TREE_TYPE (arg0)),
2382 EXPAND_NORMAL);
2383 if (ret != lhsr)
2384 emit_move_insn (lhsr, ret);
2386 else if (resvr)
2387 emit_move_insn (lhsr, resvr);
2390 /* Expand UBSAN_CHECK_ADD call STMT. */
2392 static void
2393 expand_UBSAN_CHECK_ADD (internal_fn, gcall *stmt)
2395 location_t loc = gimple_location (stmt);
2396 tree lhs = gimple_call_lhs (stmt);
2397 tree arg0 = gimple_call_arg (stmt, 0);
2398 tree arg1 = gimple_call_arg (stmt, 1);
2399 if (VECTOR_TYPE_P (TREE_TYPE (arg0)))
2400 expand_vector_ubsan_overflow (loc, PLUS_EXPR, lhs, arg0, arg1);
2401 else
2402 expand_addsub_overflow (loc, PLUS_EXPR, lhs, arg0, arg1,
2403 false, false, false, true, NULL);
2406 /* Expand UBSAN_CHECK_SUB call STMT. */
2408 static void
2409 expand_UBSAN_CHECK_SUB (internal_fn, gcall *stmt)
2411 location_t loc = gimple_location (stmt);
2412 tree lhs = gimple_call_lhs (stmt);
2413 tree arg0 = gimple_call_arg (stmt, 0);
2414 tree arg1 = gimple_call_arg (stmt, 1);
2415 if (VECTOR_TYPE_P (TREE_TYPE (arg0)))
2416 expand_vector_ubsan_overflow (loc, MINUS_EXPR, lhs, arg0, arg1);
2417 else if (integer_zerop (arg0))
2418 expand_neg_overflow (loc, lhs, arg1, true, NULL);
2419 else
2420 expand_addsub_overflow (loc, MINUS_EXPR, lhs, arg0, arg1,
2421 false, false, false, true, NULL);
2424 /* Expand UBSAN_CHECK_MUL call STMT. */
2426 static void
2427 expand_UBSAN_CHECK_MUL (internal_fn, gcall *stmt)
2429 location_t loc = gimple_location (stmt);
2430 tree lhs = gimple_call_lhs (stmt);
2431 tree arg0 = gimple_call_arg (stmt, 0);
2432 tree arg1 = gimple_call_arg (stmt, 1);
2433 if (VECTOR_TYPE_P (TREE_TYPE (arg0)))
2434 expand_vector_ubsan_overflow (loc, MULT_EXPR, lhs, arg0, arg1);
2435 else
2436 expand_mul_overflow (loc, lhs, arg0, arg1, false, false, false, true,
2437 NULL);
2440 /* Helper function for {ADD,SUB,MUL}_OVERFLOW call stmt expansion. */
2442 static void
2443 expand_arith_overflow (enum tree_code code, gimple *stmt)
2445 tree lhs = gimple_call_lhs (stmt);
2446 if (lhs == NULL_TREE)
2447 return;
2448 tree arg0 = gimple_call_arg (stmt, 0);
2449 tree arg1 = gimple_call_arg (stmt, 1);
2450 tree type = TREE_TYPE (TREE_TYPE (lhs));
2451 int uns0_p = TYPE_UNSIGNED (TREE_TYPE (arg0));
2452 int uns1_p = TYPE_UNSIGNED (TREE_TYPE (arg1));
2453 int unsr_p = TYPE_UNSIGNED (type);
2454 int prec0 = TYPE_PRECISION (TREE_TYPE (arg0));
2455 int prec1 = TYPE_PRECISION (TREE_TYPE (arg1));
2456 int precres = TYPE_PRECISION (type);
2457 location_t loc = gimple_location (stmt);
2458 if (!uns0_p && get_range_pos_neg (arg0) == 1)
2459 uns0_p = true;
2460 if (!uns1_p && get_range_pos_neg (arg1) == 1)
2461 uns1_p = true;
2462 int pr = get_min_precision (arg0, uns0_p ? UNSIGNED : SIGNED);
2463 prec0 = MIN (prec0, pr);
2464 pr = get_min_precision (arg1, uns1_p ? UNSIGNED : SIGNED);
2465 prec1 = MIN (prec1, pr);
2467 /* If uns0_p && uns1_p, precop is minimum needed precision
2468 of unsigned type to hold the exact result, otherwise
2469 precop is minimum needed precision of signed type to
2470 hold the exact result. */
2471 int precop;
2472 if (code == MULT_EXPR)
2473 precop = prec0 + prec1 + (uns0_p != uns1_p);
2474 else
2476 if (uns0_p == uns1_p)
2477 precop = MAX (prec0, prec1) + 1;
2478 else if (uns0_p)
2479 precop = MAX (prec0 + 1, prec1) + 1;
2480 else
2481 precop = MAX (prec0, prec1 + 1) + 1;
2483 int orig_precres = precres;
2487 if ((uns0_p && uns1_p)
2488 ? ((precop + !unsr_p) <= precres
2489 /* u1 - u2 -> ur can overflow, no matter what precision
2490 the result has. */
2491 && (code != MINUS_EXPR || !unsr_p))
2492 : (!unsr_p && precop <= precres))
2494 /* The infinity precision result will always fit into result. */
2495 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2496 write_complex_part (target, const0_rtx, true);
2497 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (type);
2498 struct separate_ops ops;
2499 ops.code = code;
2500 ops.type = type;
2501 ops.op0 = fold_convert_loc (loc, type, arg0);
2502 ops.op1 = fold_convert_loc (loc, type, arg1);
2503 ops.op2 = NULL_TREE;
2504 ops.location = loc;
2505 rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
2506 expand_arith_overflow_result_store (lhs, target, mode, tem);
2507 return;
2510 /* For operations with low precision, if target doesn't have them, start
2511 with precres widening right away, otherwise do it only if the most
2512 simple cases can't be used. */
2513 const int min_precision = targetm.min_arithmetic_precision ();
2514 if (orig_precres == precres && precres < min_precision)
2516 else if ((uns0_p && uns1_p && unsr_p && prec0 <= precres
2517 && prec1 <= precres)
2518 || ((!uns0_p || !uns1_p) && !unsr_p
2519 && prec0 + uns0_p <= precres
2520 && prec1 + uns1_p <= precres))
2522 arg0 = fold_convert_loc (loc, type, arg0);
2523 arg1 = fold_convert_loc (loc, type, arg1);
2524 switch (code)
2526 case MINUS_EXPR:
2527 if (integer_zerop (arg0) && !unsr_p)
2529 expand_neg_overflow (loc, lhs, arg1, false, NULL);
2530 return;
2532 /* FALLTHRU */
2533 case PLUS_EXPR:
2534 expand_addsub_overflow (loc, code, lhs, arg0, arg1, unsr_p,
2535 unsr_p, unsr_p, false, NULL);
2536 return;
2537 case MULT_EXPR:
2538 expand_mul_overflow (loc, lhs, arg0, arg1, unsr_p,
2539 unsr_p, unsr_p, false, NULL);
2540 return;
2541 default:
2542 gcc_unreachable ();
2546 /* For sub-word operations, retry with a wider type first. */
2547 if (orig_precres == precres && precop <= BITS_PER_WORD)
2549 int p = MAX (min_precision, precop);
2550 scalar_int_mode m = smallest_int_mode_for_size (p);
2551 tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
2552 uns0_p && uns1_p
2553 && unsr_p);
2554 p = TYPE_PRECISION (optype);
2555 if (p > precres)
2557 precres = p;
2558 unsr_p = TYPE_UNSIGNED (optype);
2559 type = optype;
2560 continue;
2564 if (prec0 <= precres && prec1 <= precres)
2566 tree types[2];
2567 if (unsr_p)
2569 types[0] = build_nonstandard_integer_type (precres, 0);
2570 types[1] = type;
2572 else
2574 types[0] = type;
2575 types[1] = build_nonstandard_integer_type (precres, 1);
2577 arg0 = fold_convert_loc (loc, types[uns0_p], arg0);
2578 arg1 = fold_convert_loc (loc, types[uns1_p], arg1);
2579 if (code != MULT_EXPR)
2580 expand_addsub_overflow (loc, code, lhs, arg0, arg1, unsr_p,
2581 uns0_p, uns1_p, false, NULL);
2582 else
2583 expand_mul_overflow (loc, lhs, arg0, arg1, unsr_p,
2584 uns0_p, uns1_p, false, NULL);
2585 return;
2588 /* Retry with a wider type. */
2589 if (orig_precres == precres)
2591 int p = MAX (prec0, prec1);
2592 scalar_int_mode m = smallest_int_mode_for_size (p);
2593 tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
2594 uns0_p && uns1_p
2595 && unsr_p);
2596 p = TYPE_PRECISION (optype);
2597 if (p > precres)
2599 precres = p;
2600 unsr_p = TYPE_UNSIGNED (optype);
2601 type = optype;
2602 continue;
2606 gcc_unreachable ();
2608 while (1);
2611 /* Expand ADD_OVERFLOW STMT. */
2613 static void
2614 expand_ADD_OVERFLOW (internal_fn, gcall *stmt)
2616 expand_arith_overflow (PLUS_EXPR, stmt);
2619 /* Expand SUB_OVERFLOW STMT. */
2621 static void
2622 expand_SUB_OVERFLOW (internal_fn, gcall *stmt)
2624 expand_arith_overflow (MINUS_EXPR, stmt);
2627 /* Expand MUL_OVERFLOW STMT. */
2629 static void
2630 expand_MUL_OVERFLOW (internal_fn, gcall *stmt)
2632 expand_arith_overflow (MULT_EXPR, stmt);
2635 /* This should get folded in tree-vectorizer.c. */
2637 static void
2638 expand_LOOP_VECTORIZED (internal_fn, gcall *)
2640 gcc_unreachable ();
2643 /* This should get folded in tree-vectorizer.c. */
2645 static void
2646 expand_LOOP_DIST_ALIAS (internal_fn, gcall *)
2648 gcc_unreachable ();
2651 /* Return a memory reference of type TYPE for argument INDEX of STMT.
2652 Use argument INDEX + 1 to derive the second (TBAA) operand. */
2654 static tree
2655 expand_call_mem_ref (tree type, gcall *stmt, int index)
2657 tree addr = gimple_call_arg (stmt, index);
2658 tree alias_ptr_type = TREE_TYPE (gimple_call_arg (stmt, index + 1));
2659 unsigned int align = tree_to_shwi (gimple_call_arg (stmt, index + 1));
2660 if (TYPE_ALIGN (type) != align)
2661 type = build_aligned_type (type, align);
2663 tree tmp = addr;
2664 if (TREE_CODE (tmp) == SSA_NAME)
2666 gimple *def = SSA_NAME_DEF_STMT (tmp);
2667 if (gimple_assign_single_p (def))
2668 tmp = gimple_assign_rhs1 (def);
2671 if (TREE_CODE (tmp) == ADDR_EXPR)
2673 tree mem = TREE_OPERAND (tmp, 0);
2674 if (TREE_CODE (mem) == TARGET_MEM_REF
2675 && types_compatible_p (TREE_TYPE (mem), type))
2677 tree offset = TMR_OFFSET (mem);
2678 if (type != TREE_TYPE (mem)
2679 || alias_ptr_type != TREE_TYPE (offset)
2680 || !integer_zerop (offset))
2682 mem = copy_node (mem);
2683 TMR_OFFSET (mem) = wide_int_to_tree (alias_ptr_type,
2684 wi::to_poly_wide (offset));
2685 TREE_TYPE (mem) = type;
2687 return mem;
2691 return fold_build2 (MEM_REF, type, addr, build_int_cst (alias_ptr_type, 0));
2694 /* Expand MASK_LOAD{,_LANES} or LEN_LOAD call STMT using optab OPTAB. */
2696 static void
2697 expand_partial_load_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
2699 class expand_operand ops[3];
2700 tree type, lhs, rhs, maskt;
2701 rtx mem, target, mask;
2702 insn_code icode;
2704 maskt = gimple_call_arg (stmt, 2);
2705 lhs = gimple_call_lhs (stmt);
2706 if (lhs == NULL_TREE)
2707 return;
2708 type = TREE_TYPE (lhs);
2709 rhs = expand_call_mem_ref (type, stmt, 0);
2711 if (optab == vec_mask_load_lanes_optab)
2712 icode = get_multi_vector_move (type, optab);
2713 else if (optab == len_load_optab)
2714 icode = direct_optab_handler (optab, TYPE_MODE (type));
2715 else
2716 icode = convert_optab_handler (optab, TYPE_MODE (type),
2717 TYPE_MODE (TREE_TYPE (maskt)));
2719 mem = expand_expr (rhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2720 gcc_assert (MEM_P (mem));
2721 mask = expand_normal (maskt);
2722 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2723 create_output_operand (&ops[0], target, TYPE_MODE (type));
2724 create_fixed_operand (&ops[1], mem);
2725 if (optab == len_load_optab)
2726 create_convert_operand_from (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)),
2727 TYPE_UNSIGNED (TREE_TYPE (maskt)));
2728 else
2729 create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
2730 expand_insn (icode, 3, ops);
2731 if (!rtx_equal_p (target, ops[0].value))
2732 emit_move_insn (target, ops[0].value);
2735 #define expand_mask_load_optab_fn expand_partial_load_optab_fn
2736 #define expand_mask_load_lanes_optab_fn expand_mask_load_optab_fn
2737 #define expand_len_load_optab_fn expand_partial_load_optab_fn
2739 /* Expand MASK_STORE{,_LANES} or LEN_STORE call STMT using optab OPTAB. */
2741 static void
2742 expand_partial_store_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
2744 class expand_operand ops[3];
2745 tree type, lhs, rhs, maskt;
2746 rtx mem, reg, mask;
2747 insn_code icode;
2749 maskt = gimple_call_arg (stmt, 2);
2750 rhs = gimple_call_arg (stmt, 3);
2751 type = TREE_TYPE (rhs);
2752 lhs = expand_call_mem_ref (type, stmt, 0);
2754 if (optab == vec_mask_store_lanes_optab)
2755 icode = get_multi_vector_move (type, optab);
2756 else if (optab == len_store_optab)
2757 icode = direct_optab_handler (optab, TYPE_MODE (type));
2758 else
2759 icode = convert_optab_handler (optab, TYPE_MODE (type),
2760 TYPE_MODE (TREE_TYPE (maskt)));
2762 mem = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2763 gcc_assert (MEM_P (mem));
2764 mask = expand_normal (maskt);
2765 reg = expand_normal (rhs);
2766 create_fixed_operand (&ops[0], mem);
2767 create_input_operand (&ops[1], reg, TYPE_MODE (type));
2768 if (optab == len_store_optab)
2769 create_convert_operand_from (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)),
2770 TYPE_UNSIGNED (TREE_TYPE (maskt)));
2771 else
2772 create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
2773 expand_insn (icode, 3, ops);
2776 #define expand_mask_store_optab_fn expand_partial_store_optab_fn
2777 #define expand_mask_store_lanes_optab_fn expand_mask_store_optab_fn
2778 #define expand_len_store_optab_fn expand_partial_store_optab_fn
2780 /* Expand VCOND, VCONDU and VCONDEQ optab internal functions.
2781 The expansion of STMT happens based on OPTAB table associated. */
2783 static void
2784 expand_vec_cond_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
2786 class expand_operand ops[6];
2787 insn_code icode;
2788 tree lhs = gimple_call_lhs (stmt);
2789 tree op0a = gimple_call_arg (stmt, 0);
2790 tree op0b = gimple_call_arg (stmt, 1);
2791 tree op1 = gimple_call_arg (stmt, 2);
2792 tree op2 = gimple_call_arg (stmt, 3);
2793 enum tree_code tcode = (tree_code) int_cst_value (gimple_call_arg (stmt, 4));
2795 tree vec_cond_type = TREE_TYPE (lhs);
2796 tree op_mode = TREE_TYPE (op0a);
2797 bool unsignedp = TYPE_UNSIGNED (op_mode);
2799 machine_mode mode = TYPE_MODE (vec_cond_type);
2800 machine_mode cmp_op_mode = TYPE_MODE (op_mode);
2802 icode = convert_optab_handler (optab, mode, cmp_op_mode);
2803 rtx comparison
2804 = vector_compare_rtx (VOIDmode, tcode, op0a, op0b, unsignedp, icode, 4);
2805 rtx rtx_op1 = expand_normal (op1);
2806 rtx rtx_op2 = expand_normal (op2);
2808 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2809 create_output_operand (&ops[0], target, mode);
2810 create_input_operand (&ops[1], rtx_op1, mode);
2811 create_input_operand (&ops[2], rtx_op2, mode);
2812 create_fixed_operand (&ops[3], comparison);
2813 create_fixed_operand (&ops[4], XEXP (comparison, 0));
2814 create_fixed_operand (&ops[5], XEXP (comparison, 1));
2815 expand_insn (icode, 6, ops);
2816 if (!rtx_equal_p (ops[0].value, target))
2817 emit_move_insn (target, ops[0].value);
2820 /* Expand VCOND_MASK optab internal function.
2821 The expansion of STMT happens based on OPTAB table associated. */
2823 static void
2824 expand_vec_cond_mask_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
2826 class expand_operand ops[4];
2828 tree lhs = gimple_call_lhs (stmt);
2829 tree op0 = gimple_call_arg (stmt, 0);
2830 tree op1 = gimple_call_arg (stmt, 1);
2831 tree op2 = gimple_call_arg (stmt, 2);
2832 tree vec_cond_type = TREE_TYPE (lhs);
2834 machine_mode mode = TYPE_MODE (vec_cond_type);
2835 machine_mode mask_mode = TYPE_MODE (TREE_TYPE (op0));
2836 enum insn_code icode = convert_optab_handler (optab, mode, mask_mode);
2837 rtx mask, rtx_op1, rtx_op2;
2839 gcc_assert (icode != CODE_FOR_nothing);
2841 mask = expand_normal (op0);
2842 rtx_op1 = expand_normal (op1);
2843 rtx_op2 = expand_normal (op2);
2845 mask = force_reg (mask_mode, mask);
2846 rtx_op1 = force_reg (mode, rtx_op1);
2848 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2849 create_output_operand (&ops[0], target, mode);
2850 create_input_operand (&ops[1], rtx_op1, mode);
2851 create_input_operand (&ops[2], rtx_op2, mode);
2852 create_input_operand (&ops[3], mask, mask_mode);
2853 expand_insn (icode, 4, ops);
2854 if (!rtx_equal_p (ops[0].value, target))
2855 emit_move_insn (target, ops[0].value);
2858 /* Expand VEC_SET internal functions. */
2860 static void
2861 expand_vec_set_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
2863 tree lhs = gimple_call_lhs (stmt);
2864 tree op0 = gimple_call_arg (stmt, 0);
2865 tree op1 = gimple_call_arg (stmt, 1);
2866 tree op2 = gimple_call_arg (stmt, 2);
2867 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2868 rtx src = expand_normal (op0);
2870 machine_mode outermode = TYPE_MODE (TREE_TYPE (op0));
2871 scalar_mode innermode = GET_MODE_INNER (outermode);
2873 rtx value = expand_normal (op1);
2874 rtx pos = expand_normal (op2);
2876 class expand_operand ops[3];
2877 enum insn_code icode = optab_handler (optab, outermode);
2879 if (icode != CODE_FOR_nothing)
2881 rtx temp = gen_reg_rtx (outermode);
2882 emit_move_insn (temp, src);
2884 create_fixed_operand (&ops[0], temp);
2885 create_input_operand (&ops[1], value, innermode);
2886 create_convert_operand_from (&ops[2], pos, TYPE_MODE (TREE_TYPE (op2)),
2887 true);
2888 if (maybe_expand_insn (icode, 3, ops))
2890 emit_move_insn (target, temp);
2891 return;
2894 gcc_unreachable ();
2897 static void
2898 expand_ABNORMAL_DISPATCHER (internal_fn, gcall *)
2902 static void
2903 expand_BUILTIN_EXPECT (internal_fn, gcall *stmt)
2905 /* When guessing was done, the hints should be already stripped away. */
2906 gcc_assert (!flag_guess_branch_prob || optimize == 0 || seen_error ());
2908 rtx target;
2909 tree lhs = gimple_call_lhs (stmt);
2910 if (lhs)
2911 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2912 else
2913 target = const0_rtx;
2914 rtx val = expand_expr (gimple_call_arg (stmt, 0), target, VOIDmode, EXPAND_NORMAL);
2915 if (lhs && val != target)
2916 emit_move_insn (target, val);
2919 /* IFN_VA_ARG is supposed to be expanded at pass_stdarg. So this dummy function
2920 should never be called. */
2922 static void
2923 expand_VA_ARG (internal_fn, gcall *)
2925 gcc_unreachable ();
2928 /* IFN_VEC_CONVERT is supposed to be expanded at pass_lower_vector. So this
2929 dummy function should never be called. */
2931 static void
2932 expand_VEC_CONVERT (internal_fn, gcall *)
2934 gcc_unreachable ();
2937 /* Expand the IFN_UNIQUE function according to its first argument. */
2939 static void
2940 expand_UNIQUE (internal_fn, gcall *stmt)
2942 rtx pattern = NULL_RTX;
2943 enum ifn_unique_kind kind
2944 = (enum ifn_unique_kind) TREE_INT_CST_LOW (gimple_call_arg (stmt, 0));
2946 switch (kind)
2948 default:
2949 gcc_unreachable ();
2951 case IFN_UNIQUE_UNSPEC:
2952 if (targetm.have_unique ())
2953 pattern = targetm.gen_unique ();
2954 break;
2956 case IFN_UNIQUE_OACC_FORK:
2957 case IFN_UNIQUE_OACC_JOIN:
2958 if (targetm.have_oacc_fork () && targetm.have_oacc_join ())
2960 tree lhs = gimple_call_lhs (stmt);
2961 rtx target = const0_rtx;
2963 if (lhs)
2964 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2966 rtx data_dep = expand_normal (gimple_call_arg (stmt, 1));
2967 rtx axis = expand_normal (gimple_call_arg (stmt, 2));
2969 if (kind == IFN_UNIQUE_OACC_FORK)
2970 pattern = targetm.gen_oacc_fork (target, data_dep, axis);
2971 else
2972 pattern = targetm.gen_oacc_join (target, data_dep, axis);
2974 else
2975 gcc_unreachable ();
2976 break;
2979 if (pattern)
2980 emit_insn (pattern);
2983 /* Expand the IFN_DEFERRED_INIT function:
2984 LHS = DEFERRED_INIT (SIZE of the DECL, INIT_TYPE, IS_VLA);
2986 if IS_VLA is false, the LHS is the DECL itself,
2987 if IS_VLA is true, the LHS is a MEM_REF whose address is the pointer
2988 to this DECL.
2990 Initialize the LHS with zero/pattern according to its second argument
2991 INIT_TYPE:
2992 if INIT_TYPE is AUTO_INIT_ZERO, use zeroes to initialize;
2993 if INIT_TYPE is AUTO_INIT_PATTERN, use 0xFE byte-repeatable pattern
2994 to initialize;
2995 The LHS variable is initialized including paddings.
2996 The reasons to choose 0xFE for pattern initialization are:
2997 1. It is a non-canonical virtual address on x86_64, and at the
2998 high end of the i386 kernel address space.
2999 2. It is a very large float value (-1.694739530317379e+38).
3000 3. It is also an unusual number for integers. */
3001 #define INIT_PATTERN_VALUE 0xFE
3002 static void
3003 expand_DEFERRED_INIT (internal_fn, gcall *stmt)
3005 tree lhs = gimple_call_lhs (stmt);
3006 tree var_size = gimple_call_arg (stmt, 0);
3007 enum auto_init_type init_type
3008 = (enum auto_init_type) TREE_INT_CST_LOW (gimple_call_arg (stmt, 1));
3009 bool reg_lhs = true;
3011 tree var_type = TREE_TYPE (lhs);
3012 gcc_assert (init_type > AUTO_INIT_UNINITIALIZED);
3014 if (TREE_CODE (lhs) == SSA_NAME)
3015 reg_lhs = true;
3016 else
3018 rtx tem = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
3019 reg_lhs = !MEM_P (tem);
3022 if (!reg_lhs)
3024 /* If this is a VLA or the variable is not in register,
3025 expand to a memset to initialize it. */
3026 mark_addressable (lhs);
3027 tree var_addr = build_fold_addr_expr (lhs);
3029 tree value = (init_type == AUTO_INIT_PATTERN) ?
3030 build_int_cst (integer_type_node,
3031 INIT_PATTERN_VALUE) :
3032 integer_zero_node;
3033 tree m_call = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMSET),
3034 3, var_addr, value, var_size);
3035 /* Expand this memset call. */
3036 expand_builtin_memset (m_call, NULL_RTX, TYPE_MODE (var_type));
3038 else
3040 /* If this variable is in a register, use expand_assignment might
3041 generate better code. */
3042 tree init = build_zero_cst (var_type);
3043 unsigned HOST_WIDE_INT total_bytes
3044 = tree_to_uhwi (TYPE_SIZE_UNIT (var_type));
3046 if (init_type == AUTO_INIT_PATTERN)
3048 unsigned char *buf = (unsigned char *) xmalloc (total_bytes);
3049 memset (buf, INIT_PATTERN_VALUE, total_bytes);
3050 if (can_native_interpret_type_p (var_type))
3051 init = native_interpret_expr (var_type, buf, total_bytes);
3052 else
3054 tree itype = build_nonstandard_integer_type
3055 (total_bytes * BITS_PER_UNIT, 1);
3056 wide_int w = wi::from_buffer (buf, total_bytes);
3057 init = build1 (VIEW_CONVERT_EXPR, var_type,
3058 wide_int_to_tree (itype, w));
3062 expand_assignment (lhs, init, false);
3066 /* The size of an OpenACC compute dimension. */
3068 static void
3069 expand_GOACC_DIM_SIZE (internal_fn, gcall *stmt)
3071 tree lhs = gimple_call_lhs (stmt);
3073 if (!lhs)
3074 return;
3076 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
3077 if (targetm.have_oacc_dim_size ())
3079 rtx dim = expand_expr (gimple_call_arg (stmt, 0), NULL_RTX,
3080 VOIDmode, EXPAND_NORMAL);
3081 emit_insn (targetm.gen_oacc_dim_size (target, dim));
3083 else
3084 emit_move_insn (target, GEN_INT (1));
3087 /* The position of an OpenACC execution engine along one compute axis. */
3089 static void
3090 expand_GOACC_DIM_POS (internal_fn, gcall *stmt)
3092 tree lhs = gimple_call_lhs (stmt);
3094 if (!lhs)
3095 return;
3097 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
3098 if (targetm.have_oacc_dim_pos ())
3100 rtx dim = expand_expr (gimple_call_arg (stmt, 0), NULL_RTX,
3101 VOIDmode, EXPAND_NORMAL);
3102 emit_insn (targetm.gen_oacc_dim_pos (target, dim));
3104 else
3105 emit_move_insn (target, const0_rtx);
3108 /* This is expanded by oacc_device_lower pass. */
3110 static void
3111 expand_GOACC_LOOP (internal_fn, gcall *)
3113 gcc_unreachable ();
3116 /* This is expanded by oacc_device_lower pass. */
3118 static void
3119 expand_GOACC_REDUCTION (internal_fn, gcall *)
3121 gcc_unreachable ();
3124 /* This is expanded by oacc_device_lower pass. */
3126 static void
3127 expand_GOACC_TILE (internal_fn, gcall *)
3129 gcc_unreachable ();
3132 /* Set errno to EDOM. */
3134 static void
3135 expand_SET_EDOM (internal_fn, gcall *)
3137 #ifdef TARGET_EDOM
3138 #ifdef GEN_ERRNO_RTX
3139 rtx errno_rtx = GEN_ERRNO_RTX;
3140 #else
3141 rtx errno_rtx = gen_rtx_MEM (word_mode, gen_rtx_SYMBOL_REF (Pmode, "errno"));
3142 #endif
3143 emit_move_insn (errno_rtx,
3144 gen_int_mode (TARGET_EDOM, GET_MODE (errno_rtx)));
3145 #else
3146 gcc_unreachable ();
3147 #endif
3150 /* Expand atomic bit test and set. */
3152 static void
3153 expand_ATOMIC_BIT_TEST_AND_SET (internal_fn, gcall *call)
3155 expand_ifn_atomic_bit_test_and (call);
3158 /* Expand atomic bit test and complement. */
3160 static void
3161 expand_ATOMIC_BIT_TEST_AND_COMPLEMENT (internal_fn, gcall *call)
3163 expand_ifn_atomic_bit_test_and (call);
3166 /* Expand atomic bit test and reset. */
3168 static void
3169 expand_ATOMIC_BIT_TEST_AND_RESET (internal_fn, gcall *call)
3171 expand_ifn_atomic_bit_test_and (call);
3174 /* Expand atomic bit test and set. */
3176 static void
3177 expand_ATOMIC_COMPARE_EXCHANGE (internal_fn, gcall *call)
3179 expand_ifn_atomic_compare_exchange (call);
3182 /* Expand LAUNDER to assignment, lhs = arg0. */
3184 static void
3185 expand_LAUNDER (internal_fn, gcall *call)
3187 tree lhs = gimple_call_lhs (call);
3189 if (!lhs)
3190 return;
3192 expand_assignment (lhs, gimple_call_arg (call, 0), false);
3195 /* Expand {MASK_,}SCATTER_STORE{S,U} call CALL using optab OPTAB. */
3197 static void
3198 expand_scatter_store_optab_fn (internal_fn, gcall *stmt, direct_optab optab)
3200 internal_fn ifn = gimple_call_internal_fn (stmt);
3201 int rhs_index = internal_fn_stored_value_index (ifn);
3202 int mask_index = internal_fn_mask_index (ifn);
3203 tree base = gimple_call_arg (stmt, 0);
3204 tree offset = gimple_call_arg (stmt, 1);
3205 tree scale = gimple_call_arg (stmt, 2);
3206 tree rhs = gimple_call_arg (stmt, rhs_index);
3208 rtx base_rtx = expand_normal (base);
3209 rtx offset_rtx = expand_normal (offset);
3210 HOST_WIDE_INT scale_int = tree_to_shwi (scale);
3211 rtx rhs_rtx = expand_normal (rhs);
3213 class expand_operand ops[6];
3214 int i = 0;
3215 create_address_operand (&ops[i++], base_rtx);
3216 create_input_operand (&ops[i++], offset_rtx, TYPE_MODE (TREE_TYPE (offset)));
3217 create_integer_operand (&ops[i++], TYPE_UNSIGNED (TREE_TYPE (offset)));
3218 create_integer_operand (&ops[i++], scale_int);
3219 create_input_operand (&ops[i++], rhs_rtx, TYPE_MODE (TREE_TYPE (rhs)));
3220 if (mask_index >= 0)
3222 tree mask = gimple_call_arg (stmt, mask_index);
3223 rtx mask_rtx = expand_normal (mask);
3224 create_input_operand (&ops[i++], mask_rtx, TYPE_MODE (TREE_TYPE (mask)));
3227 insn_code icode = convert_optab_handler (optab, TYPE_MODE (TREE_TYPE (rhs)),
3228 TYPE_MODE (TREE_TYPE (offset)));
3229 expand_insn (icode, i, ops);
3232 /* Expand {MASK_,}GATHER_LOAD call CALL using optab OPTAB. */
3234 static void
3235 expand_gather_load_optab_fn (internal_fn, gcall *stmt, direct_optab optab)
3237 tree lhs = gimple_call_lhs (stmt);
3238 tree base = gimple_call_arg (stmt, 0);
3239 tree offset = gimple_call_arg (stmt, 1);
3240 tree scale = gimple_call_arg (stmt, 2);
3242 rtx lhs_rtx = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
3243 rtx base_rtx = expand_normal (base);
3244 rtx offset_rtx = expand_normal (offset);
3245 HOST_WIDE_INT scale_int = tree_to_shwi (scale);
3247 int i = 0;
3248 class expand_operand ops[6];
3249 create_output_operand (&ops[i++], lhs_rtx, TYPE_MODE (TREE_TYPE (lhs)));
3250 create_address_operand (&ops[i++], base_rtx);
3251 create_input_operand (&ops[i++], offset_rtx, TYPE_MODE (TREE_TYPE (offset)));
3252 create_integer_operand (&ops[i++], TYPE_UNSIGNED (TREE_TYPE (offset)));
3253 create_integer_operand (&ops[i++], scale_int);
3254 if (optab == mask_gather_load_optab)
3256 tree mask = gimple_call_arg (stmt, 4);
3257 rtx mask_rtx = expand_normal (mask);
3258 create_input_operand (&ops[i++], mask_rtx, TYPE_MODE (TREE_TYPE (mask)));
3260 insn_code icode = convert_optab_handler (optab, TYPE_MODE (TREE_TYPE (lhs)),
3261 TYPE_MODE (TREE_TYPE (offset)));
3262 expand_insn (icode, i, ops);
3263 if (!rtx_equal_p (lhs_rtx, ops[0].value))
3264 emit_move_insn (lhs_rtx, ops[0].value);
3267 /* Helper for expand_DIVMOD. Return true if the sequence starting with
3268 INSN contains any call insns or insns with {,U}{DIV,MOD} rtxes. */
3270 static bool
3271 contains_call_div_mod (rtx_insn *insn)
3273 subrtx_iterator::array_type array;
3274 for (; insn; insn = NEXT_INSN (insn))
3275 if (CALL_P (insn))
3276 return true;
3277 else if (INSN_P (insn))
3278 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
3279 switch (GET_CODE (*iter))
3281 case CALL:
3282 case DIV:
3283 case UDIV:
3284 case MOD:
3285 case UMOD:
3286 return true;
3287 default:
3288 break;
3290 return false;
3293 /* Expand DIVMOD() using:
3294 a) optab handler for udivmod/sdivmod if it is available.
3295 b) If optab_handler doesn't exist, generate call to
3296 target-specific divmod libfunc. */
3298 static void
3299 expand_DIVMOD (internal_fn, gcall *call_stmt)
3301 tree lhs = gimple_call_lhs (call_stmt);
3302 tree arg0 = gimple_call_arg (call_stmt, 0);
3303 tree arg1 = gimple_call_arg (call_stmt, 1);
3305 gcc_assert (TREE_CODE (TREE_TYPE (lhs)) == COMPLEX_TYPE);
3306 tree type = TREE_TYPE (TREE_TYPE (lhs));
3307 machine_mode mode = TYPE_MODE (type);
3308 bool unsignedp = TYPE_UNSIGNED (type);
3309 optab tab = (unsignedp) ? udivmod_optab : sdivmod_optab;
3311 rtx op0 = expand_normal (arg0);
3312 rtx op1 = expand_normal (arg1);
3313 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
3315 rtx quotient = NULL_RTX, remainder = NULL_RTX;
3316 rtx_insn *insns = NULL;
3318 if (TREE_CODE (arg1) == INTEGER_CST)
3320 /* For DIVMOD by integral constants, there could be efficient code
3321 expanded inline e.g. using shifts and plus/minus. Try to expand
3322 the division and modulo and if it emits any library calls or any
3323 {,U}{DIV,MOD} rtxes throw it away and use a divmod optab or
3324 divmod libcall. */
3325 scalar_int_mode int_mode;
3326 if (remainder == NULL_RTX
3327 && optimize
3328 && CONST_INT_P (op1)
3329 && !pow2p_hwi (INTVAL (op1))
3330 && is_int_mode (TYPE_MODE (type), &int_mode)
3331 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
3332 && optab_handler (and_optab, word_mode) != CODE_FOR_nothing
3333 && optab_handler (add_optab, word_mode) != CODE_FOR_nothing
3334 && optimize_insn_for_speed_p ())
3336 rtx_insn *last = get_last_insn ();
3337 remainder = NULL_RTX;
3338 quotient = expand_doubleword_divmod (int_mode, op0, op1, &remainder,
3339 TYPE_UNSIGNED (type));
3340 if (quotient != NULL_RTX)
3342 if (optab_handler (mov_optab, int_mode) != CODE_FOR_nothing)
3344 rtx_insn *move = emit_move_insn (quotient, quotient);
3345 set_dst_reg_note (move, REG_EQUAL,
3346 gen_rtx_fmt_ee (TYPE_UNSIGNED (type)
3347 ? UDIV : DIV, int_mode,
3348 copy_rtx (op0), op1),
3349 quotient);
3350 move = emit_move_insn (remainder, remainder);
3351 set_dst_reg_note (move, REG_EQUAL,
3352 gen_rtx_fmt_ee (TYPE_UNSIGNED (type)
3353 ? UMOD : MOD, int_mode,
3354 copy_rtx (op0), op1),
3355 quotient);
3358 else
3359 delete_insns_since (last);
3362 if (remainder == NULL_RTX)
3364 struct separate_ops ops;
3365 ops.code = TRUNC_DIV_EXPR;
3366 ops.type = type;
3367 ops.op0 = make_tree (ops.type, op0);
3368 ops.op1 = arg1;
3369 ops.op2 = NULL_TREE;
3370 ops.location = gimple_location (call_stmt);
3371 start_sequence ();
3372 quotient = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
3373 if (contains_call_div_mod (get_insns ()))
3374 quotient = NULL_RTX;
3375 else
3377 ops.code = TRUNC_MOD_EXPR;
3378 remainder = expand_expr_real_2 (&ops, NULL_RTX, mode,
3379 EXPAND_NORMAL);
3380 if (contains_call_div_mod (get_insns ()))
3381 remainder = NULL_RTX;
3383 if (remainder)
3384 insns = get_insns ();
3385 end_sequence ();
3389 if (remainder)
3390 emit_insn (insns);
3392 /* Check if optab_handler exists for divmod_optab for given mode. */
3393 else if (optab_handler (tab, mode) != CODE_FOR_nothing)
3395 quotient = gen_reg_rtx (mode);
3396 remainder = gen_reg_rtx (mode);
3397 expand_twoval_binop (tab, op0, op1, quotient, remainder, unsignedp);
3400 /* Generate call to divmod libfunc if it exists. */
3401 else if (rtx libfunc = optab_libfunc (tab, mode))
3402 targetm.expand_divmod_libfunc (libfunc, mode, op0, op1,
3403 &quotient, &remainder);
3405 else
3406 gcc_unreachable ();
3408 /* Wrap the return value (quotient, remainder) within COMPLEX_EXPR. */
3409 expand_expr (build2 (COMPLEX_EXPR, TREE_TYPE (lhs),
3410 make_tree (TREE_TYPE (arg0), quotient),
3411 make_tree (TREE_TYPE (arg1), remainder)),
3412 target, VOIDmode, EXPAND_NORMAL);
3415 /* Expand a NOP. */
3417 static void
3418 expand_NOP (internal_fn, gcall *)
3420 /* Nothing. But it shouldn't really prevail. */
3423 /* Coroutines, all should have been processed at this stage. */
3425 static void
3426 expand_CO_FRAME (internal_fn, gcall *)
3428 gcc_unreachable ();
3431 static void
3432 expand_CO_YIELD (internal_fn, gcall *)
3434 gcc_unreachable ();
3437 static void
3438 expand_CO_SUSPN (internal_fn, gcall *)
3440 gcc_unreachable ();
3443 static void
3444 expand_CO_ACTOR (internal_fn, gcall *)
3446 gcc_unreachable ();
3449 /* Expand a call to FN using the operands in STMT. FN has a single
3450 output operand and NARGS input operands. */
3452 static void
3453 expand_direct_optab_fn (internal_fn fn, gcall *stmt, direct_optab optab,
3454 unsigned int nargs)
3456 expand_operand *ops = XALLOCAVEC (expand_operand, nargs + 1);
3458 tree_pair types = direct_internal_fn_types (fn, stmt);
3459 insn_code icode = direct_optab_handler (optab, TYPE_MODE (types.first));
3460 gcc_assert (icode != CODE_FOR_nothing);
3462 tree lhs = gimple_call_lhs (stmt);
3463 rtx lhs_rtx = NULL_RTX;
3464 if (lhs)
3465 lhs_rtx = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
3467 /* Do not assign directly to a promoted subreg, since there is no
3468 guarantee that the instruction will leave the upper bits of the
3469 register in the state required by SUBREG_PROMOTED_SIGN. */
3470 rtx dest = lhs_rtx;
3471 if (dest && GET_CODE (dest) == SUBREG && SUBREG_PROMOTED_VAR_P (dest))
3472 dest = NULL_RTX;
3474 create_output_operand (&ops[0], dest, insn_data[icode].operand[0].mode);
3476 for (unsigned int i = 0; i < nargs; ++i)
3478 tree rhs = gimple_call_arg (stmt, i);
3479 tree rhs_type = TREE_TYPE (rhs);
3480 rtx rhs_rtx = expand_normal (rhs);
3481 if (INTEGRAL_TYPE_P (rhs_type))
3482 create_convert_operand_from (&ops[i + 1], rhs_rtx,
3483 TYPE_MODE (rhs_type),
3484 TYPE_UNSIGNED (rhs_type));
3485 else
3486 create_input_operand (&ops[i + 1], rhs_rtx, TYPE_MODE (rhs_type));
3489 expand_insn (icode, nargs + 1, ops);
3490 if (lhs_rtx && !rtx_equal_p (lhs_rtx, ops[0].value))
3492 /* If the return value has an integral type, convert the instruction
3493 result to that type. This is useful for things that return an
3494 int regardless of the size of the input. If the instruction result
3495 is smaller than required, assume that it is signed.
3497 If the return value has a nonintegral type, its mode must match
3498 the instruction result. */
3499 if (GET_CODE (lhs_rtx) == SUBREG && SUBREG_PROMOTED_VAR_P (lhs_rtx))
3501 /* If this is a scalar in a register that is stored in a wider
3502 mode than the declared mode, compute the result into its
3503 declared mode and then convert to the wider mode. */
3504 gcc_checking_assert (INTEGRAL_TYPE_P (TREE_TYPE (lhs)));
3505 rtx tmp = convert_to_mode (GET_MODE (lhs_rtx), ops[0].value, 0);
3506 convert_move (SUBREG_REG (lhs_rtx), tmp,
3507 SUBREG_PROMOTED_SIGN (lhs_rtx));
3509 else if (GET_MODE (lhs_rtx) == GET_MODE (ops[0].value))
3510 emit_move_insn (lhs_rtx, ops[0].value);
3511 else
3513 gcc_checking_assert (INTEGRAL_TYPE_P (TREE_TYPE (lhs)));
3514 convert_move (lhs_rtx, ops[0].value, 0);
3519 /* Expand WHILE_ULT call STMT using optab OPTAB. */
3521 static void
3522 expand_while_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
3524 expand_operand ops[3];
3525 tree rhs_type[2];
3527 tree lhs = gimple_call_lhs (stmt);
3528 tree lhs_type = TREE_TYPE (lhs);
3529 rtx lhs_rtx = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
3530 create_output_operand (&ops[0], lhs_rtx, TYPE_MODE (lhs_type));
3532 for (unsigned int i = 0; i < 2; ++i)
3534 tree rhs = gimple_call_arg (stmt, i);
3535 rhs_type[i] = TREE_TYPE (rhs);
3536 rtx rhs_rtx = expand_normal (rhs);
3537 create_input_operand (&ops[i + 1], rhs_rtx, TYPE_MODE (rhs_type[i]));
3540 insn_code icode = convert_optab_handler (optab, TYPE_MODE (rhs_type[0]),
3541 TYPE_MODE (lhs_type));
3543 expand_insn (icode, 3, ops);
3544 if (!rtx_equal_p (lhs_rtx, ops[0].value))
3545 emit_move_insn (lhs_rtx, ops[0].value);
3548 /* Expanders for optabs that can use expand_direct_optab_fn. */
3550 #define expand_unary_optab_fn(FN, STMT, OPTAB) \
3551 expand_direct_optab_fn (FN, STMT, OPTAB, 1)
3553 #define expand_binary_optab_fn(FN, STMT, OPTAB) \
3554 expand_direct_optab_fn (FN, STMT, OPTAB, 2)
3556 #define expand_ternary_optab_fn(FN, STMT, OPTAB) \
3557 expand_direct_optab_fn (FN, STMT, OPTAB, 3)
3559 #define expand_cond_unary_optab_fn(FN, STMT, OPTAB) \
3560 expand_direct_optab_fn (FN, STMT, OPTAB, 3)
3562 #define expand_cond_binary_optab_fn(FN, STMT, OPTAB) \
3563 expand_direct_optab_fn (FN, STMT, OPTAB, 4)
3565 #define expand_cond_ternary_optab_fn(FN, STMT, OPTAB) \
3566 expand_direct_optab_fn (FN, STMT, OPTAB, 5)
3568 #define expand_fold_extract_optab_fn(FN, STMT, OPTAB) \
3569 expand_direct_optab_fn (FN, STMT, OPTAB, 3)
3571 #define expand_fold_left_optab_fn(FN, STMT, OPTAB) \
3572 expand_direct_optab_fn (FN, STMT, OPTAB, 2)
3574 #define expand_mask_fold_left_optab_fn(FN, STMT, OPTAB) \
3575 expand_direct_optab_fn (FN, STMT, OPTAB, 3)
3577 #define expand_check_ptrs_optab_fn(FN, STMT, OPTAB) \
3578 expand_direct_optab_fn (FN, STMT, OPTAB, 4)
3580 /* RETURN_TYPE and ARGS are a return type and argument list that are
3581 in principle compatible with FN (which satisfies direct_internal_fn_p).
3582 Return the types that should be used to determine whether the
3583 target supports FN. */
3585 tree_pair
3586 direct_internal_fn_types (internal_fn fn, tree return_type, tree *args)
3588 const direct_internal_fn_info &info = direct_internal_fn (fn);
3589 tree type0 = (info.type0 < 0 ? return_type : TREE_TYPE (args[info.type0]));
3590 tree type1 = (info.type1 < 0 ? return_type : TREE_TYPE (args[info.type1]));
3591 return tree_pair (type0, type1);
3594 /* CALL is a call whose return type and arguments are in principle
3595 compatible with FN (which satisfies direct_internal_fn_p). Return the
3596 types that should be used to determine whether the target supports FN. */
3598 tree_pair
3599 direct_internal_fn_types (internal_fn fn, gcall *call)
3601 const direct_internal_fn_info &info = direct_internal_fn (fn);
3602 tree op0 = (info.type0 < 0
3603 ? gimple_call_lhs (call)
3604 : gimple_call_arg (call, info.type0));
3605 tree op1 = (info.type1 < 0
3606 ? gimple_call_lhs (call)
3607 : gimple_call_arg (call, info.type1));
3608 return tree_pair (TREE_TYPE (op0), TREE_TYPE (op1));
3611 /* Return true if OPTAB is supported for TYPES (whose modes should be
3612 the same) when the optimization type is OPT_TYPE. Used for simple
3613 direct optabs. */
3615 static bool
3616 direct_optab_supported_p (direct_optab optab, tree_pair types,
3617 optimization_type opt_type)
3619 machine_mode mode = TYPE_MODE (types.first);
3620 gcc_checking_assert (mode == TYPE_MODE (types.second));
3621 return direct_optab_handler (optab, mode, opt_type) != CODE_FOR_nothing;
3624 /* Return true if OPTAB is supported for TYPES, where the first type
3625 is the destination and the second type is the source. Used for
3626 convert optabs. */
3628 static bool
3629 convert_optab_supported_p (convert_optab optab, tree_pair types,
3630 optimization_type opt_type)
3632 return (convert_optab_handler (optab, TYPE_MODE (types.first),
3633 TYPE_MODE (types.second), opt_type)
3634 != CODE_FOR_nothing);
3637 /* Return true if load/store lanes optab OPTAB is supported for
3638 array type TYPES.first when the optimization type is OPT_TYPE. */
3640 static bool
3641 multi_vector_optab_supported_p (convert_optab optab, tree_pair types,
3642 optimization_type opt_type)
3644 gcc_assert (TREE_CODE (types.first) == ARRAY_TYPE);
3645 machine_mode imode = TYPE_MODE (types.first);
3646 machine_mode vmode = TYPE_MODE (TREE_TYPE (types.first));
3647 return (convert_optab_handler (optab, imode, vmode, opt_type)
3648 != CODE_FOR_nothing);
3651 #define direct_unary_optab_supported_p direct_optab_supported_p
3652 #define direct_binary_optab_supported_p direct_optab_supported_p
3653 #define direct_ternary_optab_supported_p direct_optab_supported_p
3654 #define direct_cond_unary_optab_supported_p direct_optab_supported_p
3655 #define direct_cond_binary_optab_supported_p direct_optab_supported_p
3656 #define direct_cond_ternary_optab_supported_p direct_optab_supported_p
3657 #define direct_mask_load_optab_supported_p convert_optab_supported_p
3658 #define direct_load_lanes_optab_supported_p multi_vector_optab_supported_p
3659 #define direct_mask_load_lanes_optab_supported_p multi_vector_optab_supported_p
3660 #define direct_gather_load_optab_supported_p convert_optab_supported_p
3661 #define direct_len_load_optab_supported_p direct_optab_supported_p
3662 #define direct_mask_store_optab_supported_p convert_optab_supported_p
3663 #define direct_store_lanes_optab_supported_p multi_vector_optab_supported_p
3664 #define direct_mask_store_lanes_optab_supported_p multi_vector_optab_supported_p
3665 #define direct_vec_cond_mask_optab_supported_p convert_optab_supported_p
3666 #define direct_vec_cond_optab_supported_p convert_optab_supported_p
3667 #define direct_scatter_store_optab_supported_p convert_optab_supported_p
3668 #define direct_len_store_optab_supported_p direct_optab_supported_p
3669 #define direct_while_optab_supported_p convert_optab_supported_p
3670 #define direct_fold_extract_optab_supported_p direct_optab_supported_p
3671 #define direct_fold_left_optab_supported_p direct_optab_supported_p
3672 #define direct_mask_fold_left_optab_supported_p direct_optab_supported_p
3673 #define direct_check_ptrs_optab_supported_p direct_optab_supported_p
3674 #define direct_vec_set_optab_supported_p direct_optab_supported_p
3676 /* Return the optab used by internal function FN. */
3678 static optab
3679 direct_internal_fn_optab (internal_fn fn, tree_pair types)
3681 switch (fn)
3683 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
3684 case IFN_##CODE: break;
3685 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
3686 case IFN_##CODE: return OPTAB##_optab;
3687 #define DEF_INTERNAL_SIGNED_OPTAB_FN(CODE, FLAGS, SELECTOR, SIGNED_OPTAB, \
3688 UNSIGNED_OPTAB, TYPE) \
3689 case IFN_##CODE: return (TYPE_UNSIGNED (types.SELECTOR) \
3690 ? UNSIGNED_OPTAB ## _optab \
3691 : SIGNED_OPTAB ## _optab);
3692 #include "internal-fn.def"
3694 case IFN_LAST:
3695 break;
3697 gcc_unreachable ();
3700 /* Return the optab used by internal function FN. */
3702 static optab
3703 direct_internal_fn_optab (internal_fn fn)
3705 switch (fn)
3707 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
3708 case IFN_##CODE: break;
3709 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
3710 case IFN_##CODE: return OPTAB##_optab;
3711 #include "internal-fn.def"
3713 case IFN_LAST:
3714 break;
3716 gcc_unreachable ();
3719 /* Return true if FN is supported for the types in TYPES when the
3720 optimization type is OPT_TYPE. The types are those associated with
3721 the "type0" and "type1" fields of FN's direct_internal_fn_info
3722 structure. */
3724 bool
3725 direct_internal_fn_supported_p (internal_fn fn, tree_pair types,
3726 optimization_type opt_type)
3728 switch (fn)
3730 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
3731 case IFN_##CODE: break;
3732 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
3733 case IFN_##CODE: \
3734 return direct_##TYPE##_optab_supported_p (OPTAB##_optab, types, \
3735 opt_type);
3736 #define DEF_INTERNAL_SIGNED_OPTAB_FN(CODE, FLAGS, SELECTOR, SIGNED_OPTAB, \
3737 UNSIGNED_OPTAB, TYPE) \
3738 case IFN_##CODE: \
3740 optab which_optab = (TYPE_UNSIGNED (types.SELECTOR) \
3741 ? UNSIGNED_OPTAB ## _optab \
3742 : SIGNED_OPTAB ## _optab); \
3743 return direct_##TYPE##_optab_supported_p (which_optab, types, \
3744 opt_type); \
3746 #include "internal-fn.def"
3748 case IFN_LAST:
3749 break;
3751 gcc_unreachable ();
3754 /* Return true if FN is supported for type TYPE when the optimization
3755 type is OPT_TYPE. The caller knows that the "type0" and "type1"
3756 fields of FN's direct_internal_fn_info structure are the same. */
3758 bool
3759 direct_internal_fn_supported_p (internal_fn fn, tree type,
3760 optimization_type opt_type)
3762 const direct_internal_fn_info &info = direct_internal_fn (fn);
3763 gcc_checking_assert (info.type0 == info.type1);
3764 return direct_internal_fn_supported_p (fn, tree_pair (type, type), opt_type);
3767 /* Return true if the STMT is supported when the optimization type is OPT_TYPE,
3768 given that STMT is a call to a direct internal function. */
3770 bool
3771 direct_internal_fn_supported_p (gcall *stmt, optimization_type opt_type)
3773 internal_fn fn = gimple_call_internal_fn (stmt);
3774 tree_pair types = direct_internal_fn_types (fn, stmt);
3775 return direct_internal_fn_supported_p (fn, types, opt_type);
3778 /* If FN is commutative in two consecutive arguments, return the
3779 index of the first, otherwise return -1. */
3782 first_commutative_argument (internal_fn fn)
3784 switch (fn)
3786 case IFN_FMA:
3787 case IFN_FMS:
3788 case IFN_FNMA:
3789 case IFN_FNMS:
3790 case IFN_AVG_FLOOR:
3791 case IFN_AVG_CEIL:
3792 case IFN_MULH:
3793 case IFN_MULHS:
3794 case IFN_MULHRS:
3795 case IFN_FMIN:
3796 case IFN_FMAX:
3797 return 0;
3799 case IFN_COND_ADD:
3800 case IFN_COND_MUL:
3801 case IFN_COND_MIN:
3802 case IFN_COND_MAX:
3803 case IFN_COND_AND:
3804 case IFN_COND_IOR:
3805 case IFN_COND_XOR:
3806 case IFN_COND_FMA:
3807 case IFN_COND_FMS:
3808 case IFN_COND_FNMA:
3809 case IFN_COND_FNMS:
3810 return 1;
3812 default:
3813 return -1;
3817 /* Return true if IFN_SET_EDOM is supported. */
3819 bool
3820 set_edom_supported_p (void)
3822 #ifdef TARGET_EDOM
3823 return true;
3824 #else
3825 return false;
3826 #endif
3829 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
3830 static void \
3831 expand_##CODE (internal_fn fn, gcall *stmt) \
3833 expand_##TYPE##_optab_fn (fn, stmt, OPTAB##_optab); \
3835 #define DEF_INTERNAL_SIGNED_OPTAB_FN(CODE, FLAGS, SELECTOR, SIGNED_OPTAB, \
3836 UNSIGNED_OPTAB, TYPE) \
3837 static void \
3838 expand_##CODE (internal_fn fn, gcall *stmt) \
3840 tree_pair types = direct_internal_fn_types (fn, stmt); \
3841 optab which_optab = direct_internal_fn_optab (fn, types); \
3842 expand_##TYPE##_optab_fn (fn, stmt, which_optab); \
3844 #include "internal-fn.def"
3846 /* Routines to expand each internal function, indexed by function number.
3847 Each routine has the prototype:
3849 expand_<NAME> (gcall *stmt)
3851 where STMT is the statement that performs the call. */
3852 static void (*const internal_fn_expanders[]) (internal_fn, gcall *) = {
3853 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) expand_##CODE,
3854 #include "internal-fn.def"
3858 /* Invoke T(CODE, IFN) for each conditional function IFN that maps to a
3859 tree code CODE. */
3860 #define FOR_EACH_CODE_MAPPING(T) \
3861 T (PLUS_EXPR, IFN_COND_ADD) \
3862 T (MINUS_EXPR, IFN_COND_SUB) \
3863 T (MULT_EXPR, IFN_COND_MUL) \
3864 T (TRUNC_DIV_EXPR, IFN_COND_DIV) \
3865 T (TRUNC_MOD_EXPR, IFN_COND_MOD) \
3866 T (RDIV_EXPR, IFN_COND_RDIV) \
3867 T (MIN_EXPR, IFN_COND_MIN) \
3868 T (MAX_EXPR, IFN_COND_MAX) \
3869 T (BIT_AND_EXPR, IFN_COND_AND) \
3870 T (BIT_IOR_EXPR, IFN_COND_IOR) \
3871 T (BIT_XOR_EXPR, IFN_COND_XOR) \
3872 T (LSHIFT_EXPR, IFN_COND_SHL) \
3873 T (RSHIFT_EXPR, IFN_COND_SHR)
3875 /* Return a function that only performs CODE when a certain condition is met
3876 and that uses a given fallback value otherwise. For example, if CODE is
3877 a binary operation associated with conditional function FN:
3879 LHS = FN (COND, A, B, ELSE)
3881 is equivalent to the C expression:
3883 LHS = COND ? A CODE B : ELSE;
3885 operating elementwise if the operands are vectors.
3887 Return IFN_LAST if no such function exists. */
3889 internal_fn
3890 get_conditional_internal_fn (tree_code code)
3892 switch (code)
3894 #define CASE(CODE, IFN) case CODE: return IFN;
3895 FOR_EACH_CODE_MAPPING(CASE)
3896 #undef CASE
3897 default:
3898 return IFN_LAST;
3902 /* If IFN implements the conditional form of a tree code, return that
3903 tree code, otherwise return ERROR_MARK. */
3905 tree_code
3906 conditional_internal_fn_code (internal_fn ifn)
3908 switch (ifn)
3910 #define CASE(CODE, IFN) case IFN: return CODE;
3911 FOR_EACH_CODE_MAPPING(CASE)
3912 #undef CASE
3913 default:
3914 return ERROR_MARK;
3918 /* Invoke T(IFN) for each internal function IFN that also has an
3919 IFN_COND_* form. */
3920 #define FOR_EACH_COND_FN_PAIR(T) \
3921 T (FMA) \
3922 T (FMS) \
3923 T (FNMA) \
3924 T (FNMS)
3926 /* Return a function that only performs internal function FN when a
3927 certain condition is met and that uses a given fallback value otherwise.
3928 In other words, the returned function FN' is such that:
3930 LHS = FN' (COND, A1, ... An, ELSE)
3932 is equivalent to the C expression:
3934 LHS = COND ? FN (A1, ..., An) : ELSE;
3936 operating elementwise if the operands are vectors.
3938 Return IFN_LAST if no such function exists. */
3940 internal_fn
3941 get_conditional_internal_fn (internal_fn fn)
3943 switch (fn)
3945 #define CASE(NAME) case IFN_##NAME: return IFN_COND_##NAME;
3946 FOR_EACH_COND_FN_PAIR(CASE)
3947 #undef CASE
3948 default:
3949 return IFN_LAST;
3953 /* If IFN implements the conditional form of an unconditional internal
3954 function, return that unconditional function, otherwise return IFN_LAST. */
3956 internal_fn
3957 get_unconditional_internal_fn (internal_fn ifn)
3959 switch (ifn)
3961 #define CASE(NAME) case IFN_COND_##NAME: return IFN_##NAME;
3962 FOR_EACH_COND_FN_PAIR(CASE)
3963 #undef CASE
3964 default:
3965 return IFN_LAST;
3969 /* Return true if STMT can be interpreted as a conditional tree code
3970 operation of the form:
3972 LHS = COND ? OP (RHS1, ...) : ELSE;
3974 operating elementwise if the operands are vectors. This includes
3975 the case of an all-true COND, so that the operation always happens.
3977 When returning true, set:
3979 - *COND_OUT to the condition COND, or to NULL_TREE if the condition
3980 is known to be all-true
3981 - *CODE_OUT to the tree code
3982 - OPS[I] to operand I of *CODE_OUT
3983 - *ELSE_OUT to the fallback value ELSE, or to NULL_TREE if the
3984 condition is known to be all true. */
3986 bool
3987 can_interpret_as_conditional_op_p (gimple *stmt, tree *cond_out,
3988 tree_code *code_out,
3989 tree (&ops)[3], tree *else_out)
3991 if (gassign *assign = dyn_cast <gassign *> (stmt))
3993 *cond_out = NULL_TREE;
3994 *code_out = gimple_assign_rhs_code (assign);
3995 ops[0] = gimple_assign_rhs1 (assign);
3996 ops[1] = gimple_assign_rhs2 (assign);
3997 ops[2] = gimple_assign_rhs3 (assign);
3998 *else_out = NULL_TREE;
3999 return true;
4001 if (gcall *call = dyn_cast <gcall *> (stmt))
4002 if (gimple_call_internal_p (call))
4004 internal_fn ifn = gimple_call_internal_fn (call);
4005 tree_code code = conditional_internal_fn_code (ifn);
4006 if (code != ERROR_MARK)
4008 *cond_out = gimple_call_arg (call, 0);
4009 *code_out = code;
4010 unsigned int nops = gimple_call_num_args (call) - 2;
4011 for (unsigned int i = 0; i < 3; ++i)
4012 ops[i] = i < nops ? gimple_call_arg (call, i + 1) : NULL_TREE;
4013 *else_out = gimple_call_arg (call, nops + 1);
4014 if (integer_truep (*cond_out))
4016 *cond_out = NULL_TREE;
4017 *else_out = NULL_TREE;
4019 return true;
4022 return false;
4025 /* Return true if IFN is some form of load from memory. */
4027 bool
4028 internal_load_fn_p (internal_fn fn)
4030 switch (fn)
4032 case IFN_MASK_LOAD:
4033 case IFN_LOAD_LANES:
4034 case IFN_MASK_LOAD_LANES:
4035 case IFN_GATHER_LOAD:
4036 case IFN_MASK_GATHER_LOAD:
4037 case IFN_LEN_LOAD:
4038 return true;
4040 default:
4041 return false;
4045 /* Return true if IFN is some form of store to memory. */
4047 bool
4048 internal_store_fn_p (internal_fn fn)
4050 switch (fn)
4052 case IFN_MASK_STORE:
4053 case IFN_STORE_LANES:
4054 case IFN_MASK_STORE_LANES:
4055 case IFN_SCATTER_STORE:
4056 case IFN_MASK_SCATTER_STORE:
4057 case IFN_LEN_STORE:
4058 return true;
4060 default:
4061 return false;
4065 /* Return true if IFN is some form of gather load or scatter store. */
4067 bool
4068 internal_gather_scatter_fn_p (internal_fn fn)
4070 switch (fn)
4072 case IFN_GATHER_LOAD:
4073 case IFN_MASK_GATHER_LOAD:
4074 case IFN_SCATTER_STORE:
4075 case IFN_MASK_SCATTER_STORE:
4076 return true;
4078 default:
4079 return false;
4083 /* If FN takes a vector mask argument, return the index of that argument,
4084 otherwise return -1. */
4087 internal_fn_mask_index (internal_fn fn)
4089 switch (fn)
4091 case IFN_MASK_LOAD:
4092 case IFN_MASK_LOAD_LANES:
4093 case IFN_MASK_STORE:
4094 case IFN_MASK_STORE_LANES:
4095 return 2;
4097 case IFN_MASK_GATHER_LOAD:
4098 case IFN_MASK_SCATTER_STORE:
4099 return 4;
4101 default:
4102 return (conditional_internal_fn_code (fn) != ERROR_MARK
4103 || get_unconditional_internal_fn (fn) != IFN_LAST ? 0 : -1);
4107 /* If FN takes a value that should be stored to memory, return the index
4108 of that argument, otherwise return -1. */
4111 internal_fn_stored_value_index (internal_fn fn)
4113 switch (fn)
4115 case IFN_MASK_STORE:
4116 case IFN_MASK_STORE_LANES:
4117 case IFN_SCATTER_STORE:
4118 case IFN_MASK_SCATTER_STORE:
4119 case IFN_LEN_STORE:
4120 return 3;
4122 default:
4123 return -1;
4127 /* Return true if the target supports gather load or scatter store function
4128 IFN. For loads, VECTOR_TYPE is the vector type of the load result,
4129 while for stores it is the vector type of the stored data argument.
4130 MEMORY_ELEMENT_TYPE is the type of the memory elements being loaded
4131 or stored. OFFSET_VECTOR_TYPE is the vector type that holds the
4132 offset from the shared base address of each loaded or stored element.
4133 SCALE is the amount by which these offsets should be multiplied
4134 *after* they have been extended to address width. */
4136 bool
4137 internal_gather_scatter_fn_supported_p (internal_fn ifn, tree vector_type,
4138 tree memory_element_type,
4139 tree offset_vector_type, int scale)
4141 if (!tree_int_cst_equal (TYPE_SIZE (TREE_TYPE (vector_type)),
4142 TYPE_SIZE (memory_element_type)))
4143 return false;
4144 if (maybe_ne (TYPE_VECTOR_SUBPARTS (vector_type),
4145 TYPE_VECTOR_SUBPARTS (offset_vector_type)))
4146 return false;
4147 optab optab = direct_internal_fn_optab (ifn);
4148 insn_code icode = convert_optab_handler (optab, TYPE_MODE (vector_type),
4149 TYPE_MODE (offset_vector_type));
4150 int output_ops = internal_load_fn_p (ifn) ? 1 : 0;
4151 bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (offset_vector_type));
4152 return (icode != CODE_FOR_nothing
4153 && insn_operand_matches (icode, 2 + output_ops, GEN_INT (unsigned_p))
4154 && insn_operand_matches (icode, 3 + output_ops, GEN_INT (scale)));
4157 /* Return true if the target supports IFN_CHECK_{RAW,WAR}_PTRS function IFN
4158 for pointers of type TYPE when the accesses have LENGTH bytes and their
4159 common byte alignment is ALIGN. */
4161 bool
4162 internal_check_ptrs_fn_supported_p (internal_fn ifn, tree type,
4163 poly_uint64 length, unsigned int align)
4165 machine_mode mode = TYPE_MODE (type);
4166 optab optab = direct_internal_fn_optab (ifn);
4167 insn_code icode = direct_optab_handler (optab, mode);
4168 if (icode == CODE_FOR_nothing)
4169 return false;
4170 rtx length_rtx = immed_wide_int_const (length, mode);
4171 return (insn_operand_matches (icode, 3, length_rtx)
4172 && insn_operand_matches (icode, 4, GEN_INT (align)));
4175 /* Expand STMT as though it were a call to internal function FN. */
4177 void
4178 expand_internal_call (internal_fn fn, gcall *stmt)
4180 internal_fn_expanders[fn] (fn, stmt);
4183 /* Expand STMT, which is a call to internal function FN. */
4185 void
4186 expand_internal_call (gcall *stmt)
4188 expand_internal_call (gimple_call_internal_fn (stmt), stmt);
4191 /* If TYPE is a vector type, return true if IFN is a direct internal
4192 function that is supported for that type. If TYPE is a scalar type,
4193 return true if IFN is a direct internal function that is supported for
4194 the target's preferred vector version of TYPE. */
4196 bool
4197 vectorized_internal_fn_supported_p (internal_fn ifn, tree type)
4199 if (VECTOR_MODE_P (TYPE_MODE (type)))
4200 return direct_internal_fn_supported_p (ifn, type, OPTIMIZE_FOR_SPEED);
4202 scalar_mode smode;
4203 if (!is_a <scalar_mode> (TYPE_MODE (type), &smode))
4204 return false;
4206 machine_mode vmode = targetm.vectorize.preferred_simd_mode (smode);
4207 if (VECTOR_MODE_P (vmode))
4209 tree vectype = build_vector_type_for_mode (type, vmode);
4210 if (direct_internal_fn_supported_p (ifn, vectype, OPTIMIZE_FOR_SPEED))
4211 return true;
4214 auto_vector_modes vector_modes;
4215 targetm.vectorize.autovectorize_vector_modes (&vector_modes, true);
4216 for (machine_mode base_mode : vector_modes)
4217 if (related_vector_mode (base_mode, smode).exists (&vmode))
4219 tree vectype = build_vector_type_for_mode (type, vmode);
4220 if (direct_internal_fn_supported_p (ifn, vectype, OPTIMIZE_FOR_SPEED))
4221 return true;
4224 return false;
4227 void
4228 expand_SHUFFLEVECTOR (internal_fn, gcall *)
4230 gcc_unreachable ();
4233 void
4234 expand_PHI (internal_fn, gcall *)
4236 gcc_unreachable ();