d: Add testcase from PR108962
[official-gcc.git] / gcc / targhooks.cc
blobe190369f87a92e6a92372dc348d9374c3a965c0a
1 /* Default target hook functions.
2 Copyright (C) 2003-2023 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* The migration of target macros to target hooks works as follows:
22 1. Create a target hook that uses the existing target macros to
23 implement the same functionality.
25 2. Convert all the MI files to use the hook instead of the macro.
27 3. Repeat for a majority of the remaining target macros. This will
28 take some time.
30 4. Tell target maintainers to start migrating.
32 5. Eventually convert the backends to override the hook instead of
33 defining the macros. This will take some time too.
35 6. TBD when, poison the macros. Unmigrated targets will break at
36 this point.
38 Note that we expect steps 1-3 to be done by the people that
39 understand what the MI does with each macro, and step 5 to be done
40 by the target maintainers for their respective targets.
42 Note that steps 1 and 2 don't have to be done together, but no
43 target can override the new hook until step 2 is complete for it.
45 Once the macros are poisoned, we will revert to the old migration
46 rules - migrate the macro, callers, and targets all at once. This
47 comment can thus be removed at that point. */
49 #include "config.h"
50 #include "system.h"
51 #include "coretypes.h"
52 #include "target.h"
53 #include "function.h"
54 #include "rtl.h"
55 #include "tree.h"
56 #include "tree-ssa-alias.h"
57 #include "gimple-expr.h"
58 #include "memmodel.h"
59 #include "backend.h"
60 #include "emit-rtl.h"
61 #include "df.h"
62 #include "tm_p.h"
63 #include "stringpool.h"
64 #include "tree-vrp.h"
65 #include "tree-ssanames.h"
66 #include "profile-count.h"
67 #include "optabs.h"
68 #include "regs.h"
69 #include "recog.h"
70 #include "diagnostic-core.h"
71 #include "fold-const.h"
72 #include "stor-layout.h"
73 #include "varasm.h"
74 #include "flags.h"
75 #include "explow.h"
76 #include "expmed.h"
77 #include "calls.h"
78 #include "expr.h"
79 #include "output.h"
80 #include "common/common-target.h"
81 #include "reload.h"
82 #include "intl.h"
83 #include "opts.h"
84 #include "gimplify.h"
85 #include "predict.h"
86 #include "real.h"
87 #include "langhooks.h"
88 #include "sbitmap.h"
89 #include "function-abi.h"
90 #include "attribs.h"
91 #include "asan.h"
92 #include "emit-rtl.h"
93 #include "gimple.h"
94 #include "cfgloop.h"
95 #include "tree-vectorizer.h"
96 #include "options.h"
97 #include "case-cfn-macros.h"
99 bool
100 default_legitimate_address_p (machine_mode mode ATTRIBUTE_UNUSED,
101 rtx addr ATTRIBUTE_UNUSED,
102 bool strict ATTRIBUTE_UNUSED)
104 #ifdef GO_IF_LEGITIMATE_ADDRESS
105 /* Defer to the old implementation using a goto. */
106 if (strict)
107 return strict_memory_address_p (mode, addr);
108 else
109 return memory_address_p (mode, addr);
110 #else
111 gcc_unreachable ();
112 #endif
115 void
116 default_external_libcall (rtx fun ATTRIBUTE_UNUSED)
118 #ifdef ASM_OUTPUT_EXTERNAL_LIBCALL
119 ASM_OUTPUT_EXTERNAL_LIBCALL (asm_out_file, fun);
120 #endif
124 default_unspec_may_trap_p (const_rtx x, unsigned flags)
126 int i;
128 /* Any floating arithmetic may trap. */
129 if ((SCALAR_FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math))
130 return 1;
132 for (i = 0; i < XVECLEN (x, 0); ++i)
134 if (may_trap_p_1 (XVECEXP (x, 0, i), flags))
135 return 1;
138 return 0;
141 machine_mode
142 default_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
143 machine_mode mode,
144 int *punsignedp ATTRIBUTE_UNUSED,
145 const_tree funtype ATTRIBUTE_UNUSED,
146 int for_return ATTRIBUTE_UNUSED)
148 if (type != NULL_TREE && for_return == 2)
149 return promote_mode (type, mode, punsignedp);
150 return mode;
153 machine_mode
154 default_promote_function_mode_always_promote (const_tree type,
155 machine_mode mode,
156 int *punsignedp,
157 const_tree funtype ATTRIBUTE_UNUSED,
158 int for_return ATTRIBUTE_UNUSED)
160 return promote_mode (type, mode, punsignedp);
163 machine_mode
164 default_cc_modes_compatible (machine_mode m1, machine_mode m2)
166 if (m1 == m2)
167 return m1;
168 return VOIDmode;
171 bool
172 default_return_in_memory (const_tree type,
173 const_tree fntype ATTRIBUTE_UNUSED)
175 return (TYPE_MODE (type) == BLKmode);
179 default_legitimize_address (rtx x, rtx orig_x ATTRIBUTE_UNUSED,
180 machine_mode mode ATTRIBUTE_UNUSED)
182 return x;
185 bool
186 default_legitimize_address_displacement (rtx *, rtx *, poly_int64,
187 machine_mode)
189 return false;
192 bool
193 default_const_not_ok_for_debug_p (rtx x)
195 if (GET_CODE (x) == UNSPEC)
196 return true;
197 return false;
201 default_expand_builtin_saveregs (void)
203 error ("%<__builtin_saveregs%> not supported by this target");
204 return const0_rtx;
207 void
208 default_setup_incoming_varargs (cumulative_args_t,
209 const function_arg_info &, int *, int)
213 /* The default implementation of TARGET_BUILTIN_SETJMP_FRAME_VALUE. */
216 default_builtin_setjmp_frame_value (void)
218 return virtual_stack_vars_rtx;
221 /* Generic hook that takes a CUMULATIVE_ARGS pointer and returns false. */
223 bool
224 hook_bool_CUMULATIVE_ARGS_false (cumulative_args_t ca ATTRIBUTE_UNUSED)
226 return false;
229 bool
230 default_pretend_outgoing_varargs_named (cumulative_args_t ca ATTRIBUTE_UNUSED)
232 return (targetm.calls.setup_incoming_varargs
233 != default_setup_incoming_varargs);
236 scalar_int_mode
237 default_eh_return_filter_mode (void)
239 return targetm.unwind_word_mode ();
242 scalar_int_mode
243 default_libgcc_cmp_return_mode (void)
245 return word_mode;
248 scalar_int_mode
249 default_libgcc_shift_count_mode (void)
251 return word_mode;
254 scalar_int_mode
255 default_unwind_word_mode (void)
257 return word_mode;
260 /* The default implementation of TARGET_SHIFT_TRUNCATION_MASK. */
262 unsigned HOST_WIDE_INT
263 default_shift_truncation_mask (machine_mode mode)
265 return SHIFT_COUNT_TRUNCATED ? GET_MODE_UNIT_BITSIZE (mode) - 1 : 0;
268 /* The default implementation of TARGET_MIN_DIVISIONS_FOR_RECIP_MUL. */
270 unsigned int
271 default_min_divisions_for_recip_mul (machine_mode mode ATTRIBUTE_UNUSED)
273 return have_insn_for (DIV, mode) ? 3 : 2;
276 /* The default implementation of TARGET_MODE_REP_EXTENDED. */
279 default_mode_rep_extended (scalar_int_mode, scalar_int_mode)
281 return UNKNOWN;
284 /* Generic hook that takes a CUMULATIVE_ARGS pointer and returns true. */
286 bool
287 hook_bool_CUMULATIVE_ARGS_true (cumulative_args_t a ATTRIBUTE_UNUSED)
289 return true;
292 /* Return machine mode for non-standard suffix
293 or VOIDmode if non-standard suffixes are unsupported. */
294 machine_mode
295 default_mode_for_suffix (char suffix ATTRIBUTE_UNUSED)
297 return VOIDmode;
300 /* The generic C++ ABI specifies this is a 64-bit value. */
301 tree
302 default_cxx_guard_type (void)
304 return long_long_integer_type_node;
307 /* Returns the size of the cookie to use when allocating an array
308 whose elements have the indicated TYPE. Assumes that it is already
309 known that a cookie is needed. */
311 tree
312 default_cxx_get_cookie_size (tree type)
314 tree cookie_size;
316 /* We need to allocate an additional max (sizeof (size_t), alignof
317 (true_type)) bytes. */
318 tree sizetype_size;
319 tree type_align;
321 sizetype_size = size_in_bytes (sizetype);
322 type_align = size_int (TYPE_ALIGN_UNIT (type));
323 if (tree_int_cst_lt (type_align, sizetype_size))
324 cookie_size = sizetype_size;
325 else
326 cookie_size = type_align;
328 return cookie_size;
331 /* Return true if a parameter must be passed by reference. This version
332 of the TARGET_PASS_BY_REFERENCE hook uses just MUST_PASS_IN_STACK. */
334 bool
335 hook_pass_by_reference_must_pass_in_stack (cumulative_args_t,
336 const function_arg_info &arg)
338 return targetm.calls.must_pass_in_stack (arg);
341 /* Return true if a parameter follows callee copies conventions. This
342 version of the hook is true for all named arguments. */
344 bool
345 hook_callee_copies_named (cumulative_args_t, const function_arg_info &arg)
347 return arg.named;
350 /* Emit to STREAM the assembler syntax for insn operand X. */
352 void
353 default_print_operand (FILE *stream ATTRIBUTE_UNUSED, rtx x ATTRIBUTE_UNUSED,
354 int code ATTRIBUTE_UNUSED)
356 #ifdef PRINT_OPERAND
357 PRINT_OPERAND (stream, x, code);
358 #else
359 gcc_unreachable ();
360 #endif
363 /* Emit to STREAM the assembler syntax for an insn operand whose memory
364 address is X. */
366 void
367 default_print_operand_address (FILE *stream ATTRIBUTE_UNUSED,
368 machine_mode /*mode*/,
369 rtx x ATTRIBUTE_UNUSED)
371 #ifdef PRINT_OPERAND_ADDRESS
372 PRINT_OPERAND_ADDRESS (stream, x);
373 #else
374 gcc_unreachable ();
375 #endif
378 /* Return true if CODE is a valid punctuation character for the
379 `print_operand' hook. */
381 bool
382 default_print_operand_punct_valid_p (unsigned char code ATTRIBUTE_UNUSED)
384 #ifdef PRINT_OPERAND_PUNCT_VALID_P
385 return PRINT_OPERAND_PUNCT_VALID_P (code);
386 #else
387 return false;
388 #endif
391 /* The default implementation of TARGET_MANGLE_ASSEMBLER_NAME. */
392 tree
393 default_mangle_assembler_name (const char *name ATTRIBUTE_UNUSED)
395 const char *skipped = name + (*name == '*' ? 1 : 0);
396 const char *stripped = targetm.strip_name_encoding (skipped);
397 if (*name != '*' && user_label_prefix[0])
398 stripped = ACONCAT ((user_label_prefix, stripped, NULL));
399 return get_identifier (stripped);
402 /* The default implementation of TARGET_TRANSLATE_MODE_ATTRIBUTE. */
404 machine_mode
405 default_translate_mode_attribute (machine_mode mode)
407 return mode;
410 /* True if MODE is valid for the target. By "valid", we mean able to
411 be manipulated in non-trivial ways. In particular, this means all
412 the arithmetic is supported.
414 By default we guess this means that any C type is supported. If
415 we can't map the mode back to a type that would be available in C,
416 then reject it. Special case, here, is the double-word arithmetic
417 supported by optabs.cc. */
419 bool
420 default_scalar_mode_supported_p (scalar_mode mode)
422 int precision = GET_MODE_PRECISION (mode);
424 switch (GET_MODE_CLASS (mode))
426 case MODE_PARTIAL_INT:
427 case MODE_INT:
428 if (precision == CHAR_TYPE_SIZE)
429 return true;
430 if (precision == SHORT_TYPE_SIZE)
431 return true;
432 if (precision == INT_TYPE_SIZE)
433 return true;
434 if (precision == LONG_TYPE_SIZE)
435 return true;
436 if (precision == LONG_LONG_TYPE_SIZE)
437 return true;
438 if (precision == 2 * BITS_PER_WORD)
439 return true;
440 return false;
442 case MODE_FLOAT:
443 if (precision == FLOAT_TYPE_SIZE)
444 return true;
445 if (precision == DOUBLE_TYPE_SIZE)
446 return true;
447 if (precision == LONG_DOUBLE_TYPE_SIZE)
448 return true;
449 return false;
451 case MODE_DECIMAL_FLOAT:
452 case MODE_FRACT:
453 case MODE_UFRACT:
454 case MODE_ACCUM:
455 case MODE_UACCUM:
456 return false;
458 default:
459 gcc_unreachable ();
463 /* Return true if libgcc supports floating-point mode MODE (known to
464 be supported as a scalar mode). */
466 bool
467 default_libgcc_floating_mode_supported_p (scalar_float_mode mode)
469 switch (mode)
471 #ifdef HAVE_SFmode
472 case E_SFmode:
473 #endif
474 #ifdef HAVE_DFmode
475 case E_DFmode:
476 #endif
477 #ifdef HAVE_XFmode
478 case E_XFmode:
479 #endif
480 #ifdef HAVE_TFmode
481 case E_TFmode:
482 #endif
483 return true;
485 default:
486 return false;
490 /* Return the machine mode to use for the type _FloatN, if EXTENDED is
491 false, or _FloatNx, if EXTENDED is true, or VOIDmode if not
492 supported. */
493 opt_scalar_float_mode
494 default_floatn_mode (int n, bool extended)
496 if (extended)
498 opt_scalar_float_mode cand1, cand2;
499 scalar_float_mode mode;
500 switch (n)
502 case 32:
503 #ifdef HAVE_DFmode
504 cand1 = DFmode;
505 #endif
506 break;
508 case 64:
509 #ifdef HAVE_XFmode
510 cand1 = XFmode;
511 #endif
512 #ifdef HAVE_TFmode
513 cand2 = TFmode;
514 #endif
515 break;
517 case 128:
518 break;
520 default:
521 /* Those are the only valid _FloatNx types. */
522 gcc_unreachable ();
524 if (cand1.exists (&mode)
525 && REAL_MODE_FORMAT (mode)->ieee_bits > n
526 && targetm.scalar_mode_supported_p (mode)
527 && targetm.libgcc_floating_mode_supported_p (mode))
528 return cand1;
529 if (cand2.exists (&mode)
530 && REAL_MODE_FORMAT (mode)->ieee_bits > n
531 && targetm.scalar_mode_supported_p (mode)
532 && targetm.libgcc_floating_mode_supported_p (mode))
533 return cand2;
535 else
537 opt_scalar_float_mode cand;
538 scalar_float_mode mode;
539 switch (n)
541 case 16:
542 /* Always enable _Float16 if we have basic support for the mode.
543 Targets can control the range and precision of operations on
544 the _Float16 type using TARGET_C_EXCESS_PRECISION. */
545 #ifdef HAVE_HFmode
546 cand = HFmode;
547 #endif
548 break;
550 case 32:
551 #ifdef HAVE_SFmode
552 cand = SFmode;
553 #endif
554 break;
556 case 64:
557 #ifdef HAVE_DFmode
558 cand = DFmode;
559 #endif
560 break;
562 case 128:
563 #ifdef HAVE_TFmode
564 cand = TFmode;
565 #endif
566 break;
568 default:
569 break;
571 if (cand.exists (&mode)
572 && REAL_MODE_FORMAT (mode)->ieee_bits == n
573 && targetm.scalar_mode_supported_p (mode)
574 && targetm.libgcc_floating_mode_supported_p (mode))
575 return cand;
577 return opt_scalar_float_mode ();
580 /* Define this to return true if the _Floatn and _Floatnx built-in functions
581 should implicitly enable the built-in function without the __builtin_ prefix
582 in addition to the normal built-in function with the __builtin_ prefix. The
583 default is to only enable built-in functions without the __builtin_ prefix
584 for the GNU C langauge. The argument FUNC is the enum builtin_in_function
585 id of the function to be enabled. */
587 bool
588 default_floatn_builtin_p (int func ATTRIBUTE_UNUSED)
590 static bool first_time_p = true;
591 static bool c_or_objective_c;
593 if (first_time_p)
595 first_time_p = false;
596 c_or_objective_c = lang_GNU_C () || lang_GNU_OBJC ();
599 return c_or_objective_c;
602 /* Make some target macros useable by target-independent code. */
603 bool
604 targhook_words_big_endian (void)
606 return !!WORDS_BIG_ENDIAN;
609 bool
610 targhook_float_words_big_endian (void)
612 return !!FLOAT_WORDS_BIG_ENDIAN;
615 /* True if the target supports floating-point exceptions and rounding
616 modes. */
618 bool
619 default_float_exceptions_rounding_supported_p (void)
621 #ifdef HAVE_adddf3
622 return HAVE_adddf3;
623 #else
624 return false;
625 #endif
628 /* True if the target supports decimal floating point. */
630 bool
631 default_decimal_float_supported_p (void)
633 return ENABLE_DECIMAL_FLOAT;
636 /* True if the target supports fixed-point arithmetic. */
638 bool
639 default_fixed_point_supported_p (void)
641 return ENABLE_FIXED_POINT;
644 /* True if the target supports GNU indirect functions. */
646 bool
647 default_has_ifunc_p (void)
649 return HAVE_GNU_INDIRECT_FUNCTION;
652 /* Return true if we predict the loop LOOP will be transformed to a
653 low-overhead loop, otherwise return false.
655 By default, false is returned, as this hook's applicability should be
656 verified for each target. Target maintainers should re-define the hook
657 if the target can take advantage of it. */
659 bool
660 default_predict_doloop_p (class loop *loop ATTRIBUTE_UNUSED)
662 return false;
665 /* By default, just use the input MODE itself. */
667 machine_mode
668 default_preferred_doloop_mode (machine_mode mode)
670 return mode;
673 /* NULL if INSN insn is valid within a low-overhead loop, otherwise returns
674 an error message.
676 This function checks whether a given INSN is valid within a low-overhead
677 loop. If INSN is invalid it returns the reason for that, otherwise it
678 returns NULL. A called function may clobber any special registers required
679 for low-overhead looping. Additionally, some targets (eg, PPC) use the count
680 register for branch on table instructions. We reject the doloop pattern in
681 these cases. */
683 const char *
684 default_invalid_within_doloop (const rtx_insn *insn)
686 if (CALL_P (insn))
687 return "Function call in loop.";
689 if (tablejump_p (insn, NULL, NULL) || computed_jump_p (insn))
690 return "Computed branch in the loop.";
692 return NULL;
695 /* Mapping of builtin functions to vectorized variants. */
697 tree
698 default_builtin_vectorized_function (unsigned int, tree, tree)
700 return NULL_TREE;
703 /* Mapping of target builtin functions to vectorized variants. */
705 tree
706 default_builtin_md_vectorized_function (tree, tree, tree)
708 return NULL_TREE;
711 /* Default vectorizer cost model values. */
714 default_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
715 tree vectype,
716 int misalign ATTRIBUTE_UNUSED)
718 switch (type_of_cost)
720 case scalar_stmt:
721 case scalar_load:
722 case scalar_store:
723 case vector_stmt:
724 case vector_load:
725 case vector_store:
726 case vec_to_scalar:
727 case scalar_to_vec:
728 case cond_branch_not_taken:
729 case vec_perm:
730 case vec_promote_demote:
731 return 1;
733 case unaligned_load:
734 case unaligned_store:
735 return 2;
737 case cond_branch_taken:
738 return 3;
740 case vec_construct:
741 return estimated_poly_value (TYPE_VECTOR_SUBPARTS (vectype)) - 1;
743 default:
744 gcc_unreachable ();
748 /* Reciprocal. */
750 tree
751 default_builtin_reciprocal (tree)
753 return NULL_TREE;
756 void
757 default_emit_support_tinfos (emit_support_tinfos_callback)
761 bool
762 hook_bool_CUMULATIVE_ARGS_arg_info_false (cumulative_args_t,
763 const function_arg_info &)
765 return false;
768 bool
769 hook_bool_CUMULATIVE_ARGS_arg_info_true (cumulative_args_t,
770 const function_arg_info &)
772 return true;
776 hook_int_CUMULATIVE_ARGS_arg_info_0 (cumulative_args_t,
777 const function_arg_info &)
779 return 0;
782 void
783 hook_void_CUMULATIVE_ARGS_tree (cumulative_args_t ca ATTRIBUTE_UNUSED,
784 tree ATTRIBUTE_UNUSED)
788 /* Default implementation of TARGET_PUSH_ARGUMENT. */
790 bool
791 default_push_argument (unsigned int)
793 #ifdef PUSH_ROUNDING
794 return !ACCUMULATE_OUTGOING_ARGS;
795 #else
796 return false;
797 #endif
800 void
801 default_function_arg_advance (cumulative_args_t, const function_arg_info &)
803 gcc_unreachable ();
806 /* Default implementation of TARGET_FUNCTION_ARG_OFFSET. */
808 HOST_WIDE_INT
809 default_function_arg_offset (machine_mode, const_tree)
811 return 0;
814 /* Default implementation of TARGET_FUNCTION_ARG_PADDING: usually pad
815 upward, but pad short args downward on big-endian machines. */
817 pad_direction
818 default_function_arg_padding (machine_mode mode, const_tree type)
820 if (!BYTES_BIG_ENDIAN)
821 return PAD_UPWARD;
823 unsigned HOST_WIDE_INT size;
824 if (mode == BLKmode)
826 if (!type || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
827 return PAD_UPWARD;
828 size = int_size_in_bytes (type);
830 else
831 /* Targets with variable-sized modes must override this hook
832 and handle variable-sized modes explicitly. */
833 size = GET_MODE_SIZE (mode).to_constant ();
835 if (size < (PARM_BOUNDARY / BITS_PER_UNIT))
836 return PAD_DOWNWARD;
838 return PAD_UPWARD;
842 default_function_arg (cumulative_args_t, const function_arg_info &)
844 gcc_unreachable ();
848 default_function_incoming_arg (cumulative_args_t, const function_arg_info &)
850 gcc_unreachable ();
853 unsigned int
854 default_function_arg_boundary (machine_mode mode ATTRIBUTE_UNUSED,
855 const_tree type ATTRIBUTE_UNUSED)
857 return PARM_BOUNDARY;
860 unsigned int
861 default_function_arg_round_boundary (machine_mode mode ATTRIBUTE_UNUSED,
862 const_tree type ATTRIBUTE_UNUSED)
864 return PARM_BOUNDARY;
867 void
868 hook_void_bitmap (bitmap regs ATTRIBUTE_UNUSED)
872 const char *
873 hook_invalid_arg_for_unprototyped_fn (
874 const_tree typelist ATTRIBUTE_UNUSED,
875 const_tree funcdecl ATTRIBUTE_UNUSED,
876 const_tree val ATTRIBUTE_UNUSED)
878 return NULL;
881 /* Initialize the stack protection decls. */
883 /* Stack protection related decls living in libgcc. */
884 static GTY(()) tree stack_chk_guard_decl;
886 tree
887 default_stack_protect_guard (void)
889 tree t = stack_chk_guard_decl;
891 if (t == NULL)
893 rtx x;
895 t = build_decl (UNKNOWN_LOCATION,
896 VAR_DECL, get_identifier ("__stack_chk_guard"),
897 ptr_type_node);
898 TREE_STATIC (t) = 1;
899 TREE_PUBLIC (t) = 1;
900 DECL_EXTERNAL (t) = 1;
901 TREE_USED (t) = 1;
902 TREE_THIS_VOLATILE (t) = 1;
903 DECL_ARTIFICIAL (t) = 1;
904 DECL_IGNORED_P (t) = 1;
906 /* Do not share RTL as the declaration is visible outside of
907 current function. */
908 x = DECL_RTL (t);
909 RTX_FLAG (x, used) = 1;
911 stack_chk_guard_decl = t;
914 return t;
917 static GTY(()) tree stack_chk_fail_decl;
919 tree
920 default_external_stack_protect_fail (void)
922 tree t = stack_chk_fail_decl;
924 if (t == NULL_TREE)
926 t = build_function_type_list (void_type_node, NULL_TREE);
927 t = build_decl (UNKNOWN_LOCATION,
928 FUNCTION_DECL, get_identifier ("__stack_chk_fail"), t);
929 TREE_STATIC (t) = 1;
930 TREE_PUBLIC (t) = 1;
931 DECL_EXTERNAL (t) = 1;
932 TREE_USED (t) = 1;
933 TREE_THIS_VOLATILE (t) = 1;
934 TREE_NOTHROW (t) = 1;
935 DECL_ARTIFICIAL (t) = 1;
936 DECL_IGNORED_P (t) = 1;
937 DECL_VISIBILITY (t) = VISIBILITY_DEFAULT;
938 DECL_VISIBILITY_SPECIFIED (t) = 1;
940 stack_chk_fail_decl = t;
943 return build_call_expr (t, 0);
946 tree
947 default_hidden_stack_protect_fail (void)
949 #ifndef HAVE_GAS_HIDDEN
950 return default_external_stack_protect_fail ();
951 #else
952 tree t = stack_chk_fail_decl;
954 if (!flag_pic)
955 return default_external_stack_protect_fail ();
957 if (t == NULL_TREE)
959 t = build_function_type_list (void_type_node, NULL_TREE);
960 t = build_decl (UNKNOWN_LOCATION, FUNCTION_DECL,
961 get_identifier ("__stack_chk_fail_local"), t);
962 TREE_STATIC (t) = 1;
963 TREE_PUBLIC (t) = 1;
964 DECL_EXTERNAL (t) = 1;
965 TREE_USED (t) = 1;
966 TREE_THIS_VOLATILE (t) = 1;
967 TREE_NOTHROW (t) = 1;
968 DECL_ARTIFICIAL (t) = 1;
969 DECL_IGNORED_P (t) = 1;
970 DECL_VISIBILITY_SPECIFIED (t) = 1;
971 DECL_VISIBILITY (t) = VISIBILITY_HIDDEN;
973 stack_chk_fail_decl = t;
976 return build_call_expr (t, 0);
977 #endif
980 bool
981 hook_bool_const_rtx_commutative_p (const_rtx x,
982 int outer_code ATTRIBUTE_UNUSED)
984 return COMMUTATIVE_P (x);
988 default_function_value (const_tree ret_type ATTRIBUTE_UNUSED,
989 const_tree fn_decl_or_type,
990 bool outgoing ATTRIBUTE_UNUSED)
992 /* The old interface doesn't handle receiving the function type. */
993 if (fn_decl_or_type
994 && !DECL_P (fn_decl_or_type))
995 fn_decl_or_type = NULL;
997 #ifdef FUNCTION_VALUE
998 return FUNCTION_VALUE (ret_type, fn_decl_or_type);
999 #else
1000 gcc_unreachable ();
1001 #endif
1005 default_libcall_value (machine_mode mode ATTRIBUTE_UNUSED,
1006 const_rtx fun ATTRIBUTE_UNUSED)
1008 #ifdef LIBCALL_VALUE
1009 return LIBCALL_VALUE (MACRO_MODE (mode));
1010 #else
1011 gcc_unreachable ();
1012 #endif
1015 /* The default hook for TARGET_FUNCTION_VALUE_REGNO_P. */
1017 bool
1018 default_function_value_regno_p (const unsigned int regno ATTRIBUTE_UNUSED)
1020 #ifdef FUNCTION_VALUE_REGNO_P
1021 return FUNCTION_VALUE_REGNO_P (regno);
1022 #else
1023 gcc_unreachable ();
1024 #endif
1027 /* Choose the mode and rtx to use to zero REGNO, storing tem in PMODE and
1028 PREGNO_RTX and returning TRUE if successful, otherwise returning FALSE. If
1029 the natural mode for REGNO doesn't work, attempt to group it with subsequent
1030 adjacent registers set in TOZERO. */
1032 static inline bool
1033 zcur_select_mode_rtx (unsigned int regno, machine_mode *pmode,
1034 rtx *pregno_rtx, HARD_REG_SET tozero)
1036 rtx regno_rtx = regno_reg_rtx[regno];
1037 machine_mode mode = GET_MODE (regno_rtx);
1039 /* If the natural mode doesn't work, try some wider mode. */
1040 if (!targetm.hard_regno_mode_ok (regno, mode))
1042 bool found = false;
1043 for (int nregs = 2;
1044 !found && nregs <= hard_regno_max_nregs
1045 && regno + nregs <= FIRST_PSEUDO_REGISTER
1046 && TEST_HARD_REG_BIT (tozero,
1047 regno + nregs - 1);
1048 nregs++)
1050 mode = choose_hard_reg_mode (regno, nregs, 0);
1051 if (mode == E_VOIDmode)
1052 continue;
1053 gcc_checking_assert (targetm.hard_regno_mode_ok (regno, mode));
1054 regno_rtx = gen_rtx_REG (mode, regno);
1055 found = true;
1057 if (!found)
1058 return false;
1061 *pmode = mode;
1062 *pregno_rtx = regno_rtx;
1063 return true;
1066 /* The default hook for TARGET_ZERO_CALL_USED_REGS. */
1068 HARD_REG_SET
1069 default_zero_call_used_regs (HARD_REG_SET need_zeroed_hardregs)
1071 gcc_assert (!hard_reg_set_empty_p (need_zeroed_hardregs));
1073 HARD_REG_SET failed;
1074 CLEAR_HARD_REG_SET (failed);
1075 bool progress = false;
1077 /* First, try to zero each register in need_zeroed_hardregs by
1078 loading a zero into it, taking note of any failures in
1079 FAILED. */
1080 for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
1081 if (TEST_HARD_REG_BIT (need_zeroed_hardregs, regno))
1083 rtx_insn *last_insn = get_last_insn ();
1084 rtx regno_rtx;
1085 machine_mode mode;
1087 if (!zcur_select_mode_rtx (regno, &mode, &regno_rtx,
1088 need_zeroed_hardregs))
1090 SET_HARD_REG_BIT (failed, regno);
1091 continue;
1094 rtx zero = CONST0_RTX (mode);
1095 rtx_insn *insn = emit_move_insn (regno_rtx, zero);
1096 if (!valid_insn_p (insn))
1098 SET_HARD_REG_BIT (failed, regno);
1099 delete_insns_since (last_insn);
1101 else
1103 progress = true;
1104 regno += hard_regno_nregs (regno, mode) - 1;
1108 /* Now retry with copies from zeroed registers, as long as we've
1109 made some PROGRESS, and registers remain to be zeroed in
1110 FAILED. */
1111 while (progress && !hard_reg_set_empty_p (failed))
1113 HARD_REG_SET retrying = failed;
1115 CLEAR_HARD_REG_SET (failed);
1116 progress = false;
1118 for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
1119 if (TEST_HARD_REG_BIT (retrying, regno))
1121 rtx regno_rtx;
1122 machine_mode mode;
1124 /* This might select registers we've already zeroed. If grouping
1125 with them is what it takes to get regno zeroed, so be it. */
1126 if (!zcur_select_mode_rtx (regno, &mode, &regno_rtx,
1127 need_zeroed_hardregs))
1129 SET_HARD_REG_BIT (failed, regno);
1130 continue;
1133 bool success = false;
1134 /* Look for a source. */
1135 for (unsigned int src = 0; src < FIRST_PSEUDO_REGISTER; src++)
1137 /* If SRC hasn't been zeroed (yet?), skip it. */
1138 if (! TEST_HARD_REG_BIT (need_zeroed_hardregs, src))
1139 continue;
1140 if (TEST_HARD_REG_BIT (retrying, src))
1141 continue;
1143 /* Check that SRC can hold MODE, and that any other
1144 registers needed to hold MODE in SRC have also been
1145 zeroed. */
1146 if (!targetm.hard_regno_mode_ok (src, mode))
1147 continue;
1148 unsigned n = targetm.hard_regno_nregs (src, mode);
1149 bool ok = true;
1150 for (unsigned i = 1; ok && i < n; i++)
1151 ok = (TEST_HARD_REG_BIT (need_zeroed_hardregs, src + i)
1152 && !TEST_HARD_REG_BIT (retrying, src + i));
1153 if (!ok)
1154 continue;
1156 /* SRC is usable, try to copy from it. */
1157 rtx_insn *last_insn = get_last_insn ();
1158 rtx src_rtx = gen_rtx_REG (mode, src);
1159 rtx_insn *insn = emit_move_insn (regno_rtx, src_rtx);
1160 if (!valid_insn_p (insn))
1161 /* It didn't work, remove any inserts. We'll look
1162 for another SRC. */
1163 delete_insns_since (last_insn);
1164 else
1166 /* We're done for REGNO. */
1167 success = true;
1168 break;
1172 /* If nothing worked for REGNO this round, mark it to be
1173 retried if we get another round. */
1174 if (!success)
1175 SET_HARD_REG_BIT (failed, regno);
1176 else
1178 /* Take note so as to enable another round if needed. */
1179 progress = true;
1180 regno += hard_regno_nregs (regno, mode) - 1;
1185 /* If any register remained, report it. */
1186 if (!progress)
1188 static bool issued_error;
1189 if (!issued_error)
1191 const char *name = NULL;
1192 for (unsigned int i = 0; zero_call_used_regs_opts[i].name != NULL;
1193 ++i)
1194 if (flag_zero_call_used_regs == zero_call_used_regs_opts[i].flag)
1196 name = zero_call_used_regs_opts[i].name;
1197 break;
1200 if (!name)
1201 name = "";
1203 issued_error = true;
1204 sorry ("argument %qs is not supported for %qs on this target",
1205 name, "-fzero-call-used-regs");
1209 return need_zeroed_hardregs;
1213 default_internal_arg_pointer (void)
1215 /* If the reg that the virtual arg pointer will be translated into is
1216 not a fixed reg or is the stack pointer, make a copy of the virtual
1217 arg pointer, and address parms via the copy. The frame pointer is
1218 considered fixed even though it is not marked as such. */
1219 if ((ARG_POINTER_REGNUM == STACK_POINTER_REGNUM
1220 || ! (fixed_regs[ARG_POINTER_REGNUM]
1221 || ARG_POINTER_REGNUM == FRAME_POINTER_REGNUM)))
1222 return copy_to_reg (virtual_incoming_args_rtx);
1223 else
1224 return virtual_incoming_args_rtx;
1228 default_static_chain (const_tree ARG_UNUSED (fndecl_or_type), bool incoming_p)
1230 if (incoming_p)
1232 #ifdef STATIC_CHAIN_INCOMING_REGNUM
1233 return gen_rtx_REG (Pmode, STATIC_CHAIN_INCOMING_REGNUM);
1234 #endif
1237 #ifdef STATIC_CHAIN_REGNUM
1238 return gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
1239 #endif
1242 static bool issued_error;
1243 if (!issued_error)
1245 issued_error = true;
1246 sorry ("nested functions not supported on this target");
1249 /* It really doesn't matter what we return here, so long at it
1250 doesn't cause the rest of the compiler to crash. */
1251 return gen_rtx_MEM (Pmode, stack_pointer_rtx);
1255 void
1256 default_trampoline_init (rtx ARG_UNUSED (m_tramp), tree ARG_UNUSED (t_func),
1257 rtx ARG_UNUSED (r_chain))
1259 sorry ("nested function trampolines not supported on this target");
1262 poly_int64
1263 default_return_pops_args (tree, tree, poly_int64)
1265 return 0;
1268 reg_class_t
1269 default_ira_change_pseudo_allocno_class (int regno ATTRIBUTE_UNUSED,
1270 reg_class_t cl,
1271 reg_class_t best_cl ATTRIBUTE_UNUSED)
1273 return cl;
1276 extern bool
1277 default_lra_p (void)
1279 return true;
1283 default_register_priority (int hard_regno ATTRIBUTE_UNUSED)
1285 return 0;
1288 extern bool
1289 default_register_usage_leveling_p (void)
1291 return false;
1294 extern bool
1295 default_different_addr_displacement_p (void)
1297 return false;
1300 reg_class_t
1301 default_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x ATTRIBUTE_UNUSED,
1302 reg_class_t reload_class_i ATTRIBUTE_UNUSED,
1303 machine_mode reload_mode ATTRIBUTE_UNUSED,
1304 secondary_reload_info *sri)
1306 enum reg_class rclass = NO_REGS;
1307 enum reg_class reload_class = (enum reg_class) reload_class_i;
1309 if (sri->prev_sri && sri->prev_sri->t_icode != CODE_FOR_nothing)
1311 sri->icode = sri->prev_sri->t_icode;
1312 return NO_REGS;
1314 #ifdef SECONDARY_INPUT_RELOAD_CLASS
1315 if (in_p)
1316 rclass = SECONDARY_INPUT_RELOAD_CLASS (reload_class,
1317 MACRO_MODE (reload_mode), x);
1318 #endif
1319 #ifdef SECONDARY_OUTPUT_RELOAD_CLASS
1320 if (! in_p)
1321 rclass = SECONDARY_OUTPUT_RELOAD_CLASS (reload_class,
1322 MACRO_MODE (reload_mode), x);
1323 #endif
1324 if (rclass != NO_REGS)
1326 enum insn_code icode
1327 = direct_optab_handler (in_p ? reload_in_optab : reload_out_optab,
1328 reload_mode);
1330 if (icode != CODE_FOR_nothing
1331 && !insn_operand_matches (icode, in_p, x))
1332 icode = CODE_FOR_nothing;
1333 else if (icode != CODE_FOR_nothing)
1335 const char *insn_constraint, *scratch_constraint;
1336 enum reg_class insn_class, scratch_class;
1338 gcc_assert (insn_data[(int) icode].n_operands == 3);
1339 insn_constraint = insn_data[(int) icode].operand[!in_p].constraint;
1340 if (!*insn_constraint)
1341 insn_class = ALL_REGS;
1342 else
1344 if (in_p)
1346 gcc_assert (*insn_constraint == '=');
1347 insn_constraint++;
1349 insn_class = (reg_class_for_constraint
1350 (lookup_constraint (insn_constraint)));
1351 gcc_assert (insn_class != NO_REGS);
1354 scratch_constraint = insn_data[(int) icode].operand[2].constraint;
1355 /* The scratch register's constraint must start with "=&",
1356 except for an input reload, where only "=" is necessary,
1357 and where it might be beneficial to re-use registers from
1358 the input. */
1359 gcc_assert (scratch_constraint[0] == '='
1360 && (in_p || scratch_constraint[1] == '&'));
1361 scratch_constraint++;
1362 if (*scratch_constraint == '&')
1363 scratch_constraint++;
1364 scratch_class = (reg_class_for_constraint
1365 (lookup_constraint (scratch_constraint)));
1367 if (reg_class_subset_p (reload_class, insn_class))
1369 gcc_assert (scratch_class == rclass);
1370 rclass = NO_REGS;
1372 else
1373 rclass = insn_class;
1376 if (rclass == NO_REGS)
1377 sri->icode = icode;
1378 else
1379 sri->t_icode = icode;
1381 return rclass;
1384 /* The default implementation of TARGET_SECONDARY_MEMORY_NEEDED_MODE. */
1386 machine_mode
1387 default_secondary_memory_needed_mode (machine_mode mode)
1389 if (!targetm.lra_p ()
1390 && known_lt (GET_MODE_BITSIZE (mode), BITS_PER_WORD)
1391 && INTEGRAL_MODE_P (mode))
1392 return mode_for_size (BITS_PER_WORD, GET_MODE_CLASS (mode), 0).require ();
1393 return mode;
1396 /* By default, if flag_pic is true, then neither local nor global relocs
1397 should be placed in readonly memory. */
1400 default_reloc_rw_mask (void)
1402 return flag_pic ? 3 : 0;
1405 /* By default, address diff vectors are generated
1406 for jump tables when flag_pic is true. */
1408 bool
1409 default_generate_pic_addr_diff_vec (void)
1411 return flag_pic;
1414 /* Record an element in the table of global constructors. SYMBOL is
1415 a SYMBOL_REF of the function to be called; PRIORITY is a number
1416 between 0 and MAX_INIT_PRIORITY. */
1418 void
1419 default_asm_out_constructor (rtx symbol ATTRIBUTE_UNUSED,
1420 int priority ATTRIBUTE_UNUSED)
1422 sorry ("global constructors not supported on this target");
1425 /* Likewise for global destructors. */
1427 void
1428 default_asm_out_destructor (rtx symbol ATTRIBUTE_UNUSED,
1429 int priority ATTRIBUTE_UNUSED)
1431 sorry ("global destructors not supported on this target");
1434 /* By default, do no modification. */
1435 tree default_mangle_decl_assembler_name (tree decl ATTRIBUTE_UNUSED,
1436 tree id)
1438 return id;
1441 /* The default implementation of TARGET_STATIC_RTX_ALIGNMENT. */
1443 HOST_WIDE_INT
1444 default_static_rtx_alignment (machine_mode mode)
1446 return GET_MODE_ALIGNMENT (mode);
1449 /* The default implementation of TARGET_CONSTANT_ALIGNMENT. */
1451 HOST_WIDE_INT
1452 default_constant_alignment (const_tree, HOST_WIDE_INT align)
1454 return align;
1457 /* An implementation of TARGET_CONSTANT_ALIGNMENT that aligns strings
1458 to at least BITS_PER_WORD but otherwise makes no changes. */
1460 HOST_WIDE_INT
1461 constant_alignment_word_strings (const_tree exp, HOST_WIDE_INT align)
1463 if (TREE_CODE (exp) == STRING_CST)
1464 return MAX (align, BITS_PER_WORD);
1465 return align;
1468 /* Default to natural alignment for vector types, bounded by
1469 MAX_OFILE_ALIGNMENT. */
1471 HOST_WIDE_INT
1472 default_vector_alignment (const_tree type)
1474 unsigned HOST_WIDE_INT align = MAX_OFILE_ALIGNMENT;
1475 tree size = TYPE_SIZE (type);
1476 if (tree_fits_uhwi_p (size))
1477 align = tree_to_uhwi (size);
1478 if (align >= MAX_OFILE_ALIGNMENT)
1479 return MAX_OFILE_ALIGNMENT;
1480 return MAX (align, GET_MODE_ALIGNMENT (TYPE_MODE (type)));
1483 /* The default implementation of
1484 TARGET_VECTORIZE_PREFERRED_VECTOR_ALIGNMENT. */
1486 poly_uint64
1487 default_preferred_vector_alignment (const_tree type)
1489 return TYPE_ALIGN (type);
1492 /* The default implementation of
1493 TARGET_VECTORIZE_PREFERRED_DIV_AS_SHIFTS_OVER_MULT. */
1495 bool
1496 default_preferred_div_as_shifts_over_mult (const_tree type)
1498 return !can_mult_highpart_p (TYPE_MODE (type), TYPE_UNSIGNED (type));
1501 /* By default assume vectors of element TYPE require a multiple of the natural
1502 alignment of TYPE. TYPE is naturally aligned if IS_PACKED is false. */
1503 bool
1504 default_builtin_vector_alignment_reachable (const_tree /*type*/, bool is_packed)
1506 return ! is_packed;
1509 /* By default, assume that a target supports any factor of misalignment
1510 memory access if it supports movmisalign patten.
1511 is_packed is true if the memory access is defined in a packed struct. */
1512 bool
1513 default_builtin_support_vector_misalignment (machine_mode mode,
1514 const_tree type
1515 ATTRIBUTE_UNUSED,
1516 int misalignment
1517 ATTRIBUTE_UNUSED,
1518 bool is_packed
1519 ATTRIBUTE_UNUSED)
1521 if (optab_handler (movmisalign_optab, mode) != CODE_FOR_nothing)
1522 return true;
1523 return false;
1526 /* By default, only attempt to parallelize bitwise operations, and
1527 possibly adds/subtracts using bit-twiddling. */
1529 machine_mode
1530 default_preferred_simd_mode (scalar_mode)
1532 return word_mode;
1535 /* By default do not split reductions further. */
1537 machine_mode
1538 default_split_reduction (machine_mode mode)
1540 return mode;
1543 /* By default only the preferred vector mode is tried. */
1545 unsigned int
1546 default_autovectorize_vector_modes (vector_modes *, bool)
1548 return 0;
1551 /* The default implementation of TARGET_VECTORIZE_RELATED_MODE. */
1553 opt_machine_mode
1554 default_vectorize_related_mode (machine_mode vector_mode,
1555 scalar_mode element_mode,
1556 poly_uint64 nunits)
1558 machine_mode result_mode;
1559 if ((maybe_ne (nunits, 0U)
1560 || multiple_p (GET_MODE_SIZE (vector_mode),
1561 GET_MODE_SIZE (element_mode), &nunits))
1562 && mode_for_vector (element_mode, nunits).exists (&result_mode)
1563 && VECTOR_MODE_P (result_mode)
1564 && targetm.vector_mode_supported_p (result_mode))
1565 return result_mode;
1567 return opt_machine_mode ();
1570 /* By default a vector of integers is used as a mask. */
1572 opt_machine_mode
1573 default_get_mask_mode (machine_mode mode)
1575 return related_int_vector_mode (mode);
1578 /* By default consider masked stores to be expensive. */
1580 bool
1581 default_empty_mask_is_expensive (unsigned ifn)
1583 return ifn == IFN_MASK_STORE;
1586 /* By default, the cost model accumulates three separate costs (prologue,
1587 loop body, and epilogue) for a vectorized loop or block. So allocate an
1588 array of three unsigned ints, set it to zero, and return its address. */
1590 vector_costs *
1591 default_vectorize_create_costs (vec_info *vinfo, bool costing_for_scalar)
1593 return new vector_costs (vinfo, costing_for_scalar);
1596 /* Determine whether or not a pointer mode is valid. Assume defaults
1597 of ptr_mode or Pmode - can be overridden. */
1598 bool
1599 default_valid_pointer_mode (scalar_int_mode mode)
1601 return (mode == ptr_mode || mode == Pmode);
1604 /* Determine whether the memory reference specified by REF may alias
1605 the C libraries errno location. */
1606 bool
1607 default_ref_may_alias_errno (ao_ref *ref)
1609 tree base = ao_ref_base (ref);
1610 /* The default implementation assumes the errno location is
1611 a declaration of type int or is always accessed via a
1612 pointer to int. We assume that accesses to errno are
1613 not deliberately obfuscated (even in conforming ways). */
1614 if (TYPE_UNSIGNED (TREE_TYPE (base))
1615 || TYPE_MODE (TREE_TYPE (base)) != TYPE_MODE (integer_type_node))
1616 return false;
1617 /* The default implementation assumes an errno location declaration
1618 is never defined in the current compilation unit and may not be
1619 aliased by a local variable. */
1620 if (DECL_P (base)
1621 && DECL_EXTERNAL (base)
1622 && !TREE_STATIC (base))
1623 return true;
1624 else if (TREE_CODE (base) == MEM_REF
1625 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
1627 struct ptr_info_def *pi = SSA_NAME_PTR_INFO (TREE_OPERAND (base, 0));
1628 return !pi || pi->pt.anything || pi->pt.nonlocal;
1630 return false;
1633 /* Return the mode for a pointer to a given ADDRSPACE,
1634 defaulting to ptr_mode for all address spaces. */
1636 scalar_int_mode
1637 default_addr_space_pointer_mode (addr_space_t addrspace ATTRIBUTE_UNUSED)
1639 return ptr_mode;
1642 /* Return the mode for an address in a given ADDRSPACE,
1643 defaulting to Pmode for all address spaces. */
1645 scalar_int_mode
1646 default_addr_space_address_mode (addr_space_t addrspace ATTRIBUTE_UNUSED)
1648 return Pmode;
1651 /* Named address space version of valid_pointer_mode.
1652 To match the above, the same modes apply to all address spaces. */
1654 bool
1655 default_addr_space_valid_pointer_mode (scalar_int_mode mode,
1656 addr_space_t as ATTRIBUTE_UNUSED)
1658 return targetm.valid_pointer_mode (mode);
1661 /* Some places still assume that all pointer or address modes are the
1662 standard Pmode and ptr_mode. These optimizations become invalid if
1663 the target actually supports multiple different modes. For now,
1664 we disable such optimizations on such targets, using this function. */
1666 bool
1667 target_default_pointer_address_modes_p (void)
1669 if (targetm.addr_space.address_mode != default_addr_space_address_mode)
1670 return false;
1671 if (targetm.addr_space.pointer_mode != default_addr_space_pointer_mode)
1672 return false;
1674 return true;
1677 /* Named address space version of legitimate_address_p.
1678 By default, all address spaces have the same form. */
1680 bool
1681 default_addr_space_legitimate_address_p (machine_mode mode, rtx mem,
1682 bool strict,
1683 addr_space_t as ATTRIBUTE_UNUSED)
1685 return targetm.legitimate_address_p (mode, mem, strict);
1688 /* Named address space version of LEGITIMIZE_ADDRESS.
1689 By default, all address spaces have the same form. */
1692 default_addr_space_legitimize_address (rtx x, rtx oldx, machine_mode mode,
1693 addr_space_t as ATTRIBUTE_UNUSED)
1695 return targetm.legitimize_address (x, oldx, mode);
1698 /* The default hook for determining if one named address space is a subset of
1699 another and to return which address space to use as the common address
1700 space. */
1702 bool
1703 default_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
1705 return (subset == superset);
1708 /* The default hook for determining if 0 within a named address
1709 space is a valid address. */
1711 bool
1712 default_addr_space_zero_address_valid (addr_space_t as ATTRIBUTE_UNUSED)
1714 return false;
1717 /* The default hook for debugging the address space is to return the
1718 address space number to indicate DW_AT_address_class. */
1720 default_addr_space_debug (addr_space_t as)
1722 return as;
1725 /* The default hook implementation for TARGET_ADDR_SPACE_DIAGNOSE_USAGE.
1726 Don't complain about any address space. */
1728 void
1729 default_addr_space_diagnose_usage (addr_space_t, location_t)
1734 /* The default hook for TARGET_ADDR_SPACE_CONVERT. This hook should never be
1735 called for targets with only a generic address space. */
1738 default_addr_space_convert (rtx op ATTRIBUTE_UNUSED,
1739 tree from_type ATTRIBUTE_UNUSED,
1740 tree to_type ATTRIBUTE_UNUSED)
1742 gcc_unreachable ();
1745 /* The defualt implementation of TARGET_HARD_REGNO_NREGS. */
1747 unsigned int
1748 default_hard_regno_nregs (unsigned int, machine_mode mode)
1750 /* Targets with variable-sized modes must provide their own definition
1751 of this hook. */
1752 return CEIL (GET_MODE_SIZE (mode).to_constant (), UNITS_PER_WORD);
1755 bool
1756 default_hard_regno_scratch_ok (unsigned int regno ATTRIBUTE_UNUSED)
1758 return true;
1761 /* The default implementation of TARGET_MODE_DEPENDENT_ADDRESS_P. */
1763 bool
1764 default_mode_dependent_address_p (const_rtx addr ATTRIBUTE_UNUSED,
1765 addr_space_t addrspace ATTRIBUTE_UNUSED)
1767 return false;
1770 extern bool default_new_address_profitable_p (rtx, rtx);
1773 /* The default implementation of TARGET_NEW_ADDRESS_PROFITABLE_P. */
1775 bool
1776 default_new_address_profitable_p (rtx memref ATTRIBUTE_UNUSED,
1777 rtx_insn *insn ATTRIBUTE_UNUSED,
1778 rtx new_addr ATTRIBUTE_UNUSED)
1780 return true;
1783 bool
1784 default_target_option_valid_attribute_p (tree ARG_UNUSED (fndecl),
1785 tree ARG_UNUSED (name),
1786 tree ARG_UNUSED (args),
1787 int ARG_UNUSED (flags))
1789 warning (OPT_Wattributes,
1790 "target attribute is not supported on this machine");
1792 return false;
1795 bool
1796 default_target_option_pragma_parse (tree ARG_UNUSED (args),
1797 tree ARG_UNUSED (pop_target))
1799 /* If args is NULL the caller is handle_pragma_pop_options (). In that case,
1800 emit no warning because "#pragma GCC pop_target" is valid on targets that
1801 do not have the "target" pragma. */
1802 if (args)
1803 warning (OPT_Wpragmas,
1804 "%<#pragma GCC target%> is not supported for this machine");
1806 return false;
1809 bool
1810 default_target_can_inline_p (tree caller, tree callee)
1812 tree callee_opts = DECL_FUNCTION_SPECIFIC_TARGET (callee);
1813 tree caller_opts = DECL_FUNCTION_SPECIFIC_TARGET (caller);
1814 if (! callee_opts)
1815 callee_opts = target_option_default_node;
1816 if (! caller_opts)
1817 caller_opts = target_option_default_node;
1819 /* If both caller and callee have attributes, assume that if the
1820 pointer is different, the two functions have different target
1821 options since build_target_option_node uses a hash table for the
1822 options. */
1823 return callee_opts == caller_opts;
1826 /* By default, return false to not need to collect any target information
1827 for inlining. Target maintainer should re-define the hook if the
1828 target want to take advantage of it. */
1830 bool
1831 default_need_ipa_fn_target_info (const_tree, unsigned int &)
1833 return false;
1836 bool
1837 default_update_ipa_fn_target_info (unsigned int &, const gimple *)
1839 return false;
1842 /* If the machine does not have a case insn that compares the bounds,
1843 this means extra overhead for dispatch tables, which raises the
1844 threshold for using them. */
1846 unsigned int
1847 default_case_values_threshold (void)
1849 return (targetm.have_casesi () ? 4 : 5);
1852 bool
1853 default_have_conditional_execution (void)
1855 return HAVE_conditional_execution;
1858 /* By default we assume that c99 functions are present at the runtime,
1859 but sincos is not. */
1860 bool
1861 default_libc_has_function (enum function_class fn_class,
1862 tree type ATTRIBUTE_UNUSED)
1864 if (fn_class == function_c94
1865 || fn_class == function_c99_misc
1866 || fn_class == function_c99_math_complex)
1867 return true;
1869 return false;
1872 /* By default assume that libc has not a fast implementation. */
1874 bool
1875 default_libc_has_fast_function (int fcode ATTRIBUTE_UNUSED)
1877 return false;
1880 bool
1881 gnu_libc_has_function (enum function_class fn_class ATTRIBUTE_UNUSED,
1882 tree type ATTRIBUTE_UNUSED)
1884 return true;
1887 bool
1888 no_c99_libc_has_function (enum function_class fn_class ATTRIBUTE_UNUSED,
1889 tree type ATTRIBUTE_UNUSED)
1891 return false;
1894 /* Assume some c99 functions are present at the runtime including sincos. */
1895 bool
1896 bsd_libc_has_function (enum function_class fn_class,
1897 tree type ATTRIBUTE_UNUSED)
1899 if (fn_class == function_c94
1900 || fn_class == function_c99_misc
1901 || fn_class == function_sincos)
1902 return true;
1904 return false;
1907 unsigned
1908 default_libm_function_max_error (unsigned, machine_mode, bool)
1910 return ~0U;
1913 unsigned
1914 glibc_linux_libm_function_max_error (unsigned cfn, machine_mode mode,
1915 bool boundary_p)
1917 /* Let's use
1918 https://www.gnu.org/software/libc/manual/2.22/html_node/Errors-in-Math-Functions.html
1919 https://www.gnu.org/software/libc/manual/html_node/Errors-in-Math-Functions.html
1920 with usual values recorded here and significant outliers handled in
1921 target CPU specific overriders. The tables only record default
1922 rounding to nearest, for -frounding-math let's add some extra ulps.
1923 For boundary_p values (say finite results outside of [-1.,1.] for
1924 sin/cos, or [-0.,+Inf] for sqrt etc. let's use custom random testers. */
1925 int rnd = flag_rounding_math ? 4 : 0;
1926 bool sf = (REAL_MODE_FORMAT (mode) == &ieee_single_format
1927 || REAL_MODE_FORMAT (mode) == &mips_single_format
1928 || REAL_MODE_FORMAT (mode) == &motorola_single_format);
1929 bool df = (REAL_MODE_FORMAT (mode) == &ieee_double_format
1930 || REAL_MODE_FORMAT (mode) == &mips_double_format
1931 || REAL_MODE_FORMAT (mode) == &motorola_double_format);
1932 bool xf = (REAL_MODE_FORMAT (mode) == &ieee_extended_intel_96_format
1933 || REAL_MODE_FORMAT (mode) == &ieee_extended_intel_128_format
1934 || REAL_MODE_FORMAT (mode) == &ieee_extended_motorola_format);
1935 bool tf = (REAL_MODE_FORMAT (mode) == &ieee_quad_format
1936 || REAL_MODE_FORMAT (mode) == &mips_quad_format);
1938 switch (cfn)
1940 CASE_CFN_SQRT:
1941 CASE_CFN_SQRT_FN:
1942 if (boundary_p)
1943 /* https://gcc.gnu.org/pipermail/gcc-patches/2023-April/616595.html */
1944 return 0;
1945 if (sf || df || xf || tf)
1946 return 0 + rnd;
1947 break;
1948 CASE_CFN_COS:
1949 CASE_CFN_COS_FN:
1950 /* cos is generally errors like sin, but far more arches have 2ulps
1951 for double. */
1952 if (!boundary_p && df)
1953 return 2 + rnd;
1954 gcc_fallthrough ();
1955 CASE_CFN_SIN:
1956 CASE_CFN_SIN_FN:
1957 if (boundary_p)
1958 /* According to
1959 https://sourceware.org/pipermail/gcc-patches/2023-April/616315.html
1960 seems default rounding sin/cos stay strictly in [-1.,1.] range,
1961 with rounding to infinity it can be 1ulp larger/smaller. */
1962 return flag_rounding_math ? 1 : 0;
1963 if (sf || df)
1964 return 1 + rnd;
1965 if (xf || tf)
1966 return 2 + rnd;
1967 break;
1968 default:
1969 break;
1972 return default_libm_function_max_error (cfn, mode, boundary_p);
1975 tree
1976 default_builtin_tm_load_store (tree ARG_UNUSED (type))
1978 return NULL_TREE;
1981 /* Compute cost of moving registers to/from memory. */
1984 default_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
1985 reg_class_t rclass ATTRIBUTE_UNUSED,
1986 bool in ATTRIBUTE_UNUSED)
1988 #ifndef MEMORY_MOVE_COST
1989 return (4 + memory_move_secondary_cost (mode, (enum reg_class) rclass, in));
1990 #else
1991 return MEMORY_MOVE_COST (MACRO_MODE (mode), (enum reg_class) rclass, in);
1992 #endif
1995 /* Compute cost of moving data from a register of class FROM to one of
1996 TO, using MODE. */
1999 default_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
2000 reg_class_t from ATTRIBUTE_UNUSED,
2001 reg_class_t to ATTRIBUTE_UNUSED)
2003 #ifndef REGISTER_MOVE_COST
2004 return 2;
2005 #else
2006 return REGISTER_MOVE_COST (MACRO_MODE (mode),
2007 (enum reg_class) from, (enum reg_class) to);
2008 #endif
2011 /* The default implementation of TARGET_SLOW_UNALIGNED_ACCESS. */
2013 bool
2014 default_slow_unaligned_access (machine_mode, unsigned int)
2016 return STRICT_ALIGNMENT;
2019 /* The default implementation of TARGET_ESTIMATED_POLY_VALUE. */
2021 HOST_WIDE_INT
2022 default_estimated_poly_value (poly_int64 x, poly_value_estimate_kind)
2024 return x.coeffs[0];
2027 /* For hooks which use the MOVE_RATIO macro, this gives the legacy default
2028 behavior. SPEED_P is true if we are compiling for speed. */
2030 unsigned int
2031 get_move_ratio (bool speed_p ATTRIBUTE_UNUSED)
2033 unsigned int move_ratio;
2034 #ifdef MOVE_RATIO
2035 move_ratio = (unsigned int) MOVE_RATIO (speed_p);
2036 #else
2037 #if defined (HAVE_cpymemqi) || defined (HAVE_cpymemhi) || defined (HAVE_cpymemsi) || defined (HAVE_cpymemdi) || defined (HAVE_cpymemti)
2038 move_ratio = 2;
2039 #else /* No cpymem patterns, pick a default. */
2040 move_ratio = ((speed_p) ? 15 : 3);
2041 #endif
2042 #endif
2043 return move_ratio;
2046 /* Return TRUE if the move_by_pieces/set_by_pieces infrastructure should be
2047 used; return FALSE if the cpymem/setmem optab should be expanded, or
2048 a call to memcpy emitted. */
2050 bool
2051 default_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
2052 unsigned int alignment,
2053 enum by_pieces_operation op,
2054 bool speed_p)
2056 unsigned int max_size = 0;
2057 unsigned int ratio = 0;
2059 switch (op)
2061 case CLEAR_BY_PIECES:
2062 max_size = STORE_MAX_PIECES;
2063 ratio = CLEAR_RATIO (speed_p);
2064 break;
2065 case MOVE_BY_PIECES:
2066 max_size = MOVE_MAX_PIECES;
2067 ratio = get_move_ratio (speed_p);
2068 break;
2069 case SET_BY_PIECES:
2070 max_size = STORE_MAX_PIECES;
2071 ratio = SET_RATIO (speed_p);
2072 break;
2073 case STORE_BY_PIECES:
2074 max_size = STORE_MAX_PIECES;
2075 ratio = get_move_ratio (speed_p);
2076 break;
2077 case COMPARE_BY_PIECES:
2078 max_size = COMPARE_MAX_PIECES;
2079 /* Pick a likely default, just as in get_move_ratio. */
2080 ratio = speed_p ? 15 : 3;
2081 break;
2084 return by_pieces_ninsns (size, alignment, max_size + 1, op) < ratio;
2087 /* This hook controls code generation for expanding a memcmp operation by
2088 pieces. Return 1 for the normal pattern of compare/jump after each pair
2089 of loads, or a higher number to reduce the number of branches. */
2092 default_compare_by_pieces_branch_ratio (machine_mode)
2094 return 1;
2097 /* Write PATCH_AREA_SIZE NOPs into the asm outfile FILE around a function
2098 entry. If RECORD_P is true and the target supports named sections,
2099 the location of the NOPs will be recorded in a special object section
2100 called "__patchable_function_entries". This routine may be called
2101 twice per function to put NOPs before and after the function
2102 entry. */
2104 void
2105 default_print_patchable_function_entry (FILE *file,
2106 unsigned HOST_WIDE_INT patch_area_size,
2107 bool record_p)
2109 const char *nop_templ = 0;
2110 int code_num;
2111 rtx_insn *my_nop = make_insn_raw (gen_nop ());
2113 /* We use the template alone, relying on the (currently sane) assumption
2114 that the NOP template does not have variable operands. */
2115 code_num = recog_memoized (my_nop);
2116 nop_templ = get_insn_template (code_num, my_nop);
2118 if (record_p && targetm_common.have_named_sections)
2120 char buf[256];
2121 section *previous_section = in_section;
2122 const char *asm_op = integer_asm_op (POINTER_SIZE_UNITS, false);
2124 gcc_assert (asm_op != NULL);
2125 /* If SECTION_LINK_ORDER is supported, this internal label will
2126 be filled as the symbol for linked_to section. */
2127 ASM_GENERATE_INTERNAL_LABEL (buf, "LPFE", current_function_funcdef_no);
2129 unsigned int flags = SECTION_WRITE | SECTION_RELRO;
2130 if (HAVE_GAS_SECTION_LINK_ORDER)
2131 flags |= SECTION_LINK_ORDER;
2133 section *sect = get_section ("__patchable_function_entries",
2134 flags, current_function_decl);
2135 if (HAVE_COMDAT_GROUP && DECL_COMDAT_GROUP (current_function_decl))
2136 switch_to_comdat_section (sect, current_function_decl);
2137 else
2138 switch_to_section (sect);
2139 assemble_align (POINTER_SIZE);
2140 fputs (asm_op, file);
2141 assemble_name_raw (file, buf);
2142 fputc ('\n', file);
2144 switch_to_section (previous_section);
2145 ASM_OUTPUT_LABEL (file, buf);
2148 unsigned i;
2149 for (i = 0; i < patch_area_size; ++i)
2150 output_asm_insn (nop_templ, NULL);
2153 bool
2154 default_profile_before_prologue (void)
2156 #ifdef PROFILE_BEFORE_PROLOGUE
2157 return true;
2158 #else
2159 return false;
2160 #endif
2163 /* The default implementation of TARGET_PREFERRED_RELOAD_CLASS. */
2165 reg_class_t
2166 default_preferred_reload_class (rtx x ATTRIBUTE_UNUSED,
2167 reg_class_t rclass)
2169 #ifdef PREFERRED_RELOAD_CLASS
2170 return (reg_class_t) PREFERRED_RELOAD_CLASS (x, (enum reg_class) rclass);
2171 #else
2172 return rclass;
2173 #endif
2176 /* The default implementation of TARGET_OUTPUT_PREFERRED_RELOAD_CLASS. */
2178 reg_class_t
2179 default_preferred_output_reload_class (rtx x ATTRIBUTE_UNUSED,
2180 reg_class_t rclass)
2182 return rclass;
2185 /* The default implementation of TARGET_PREFERRED_RENAME_CLASS. */
2186 reg_class_t
2187 default_preferred_rename_class (reg_class_t rclass ATTRIBUTE_UNUSED)
2189 return NO_REGS;
2192 /* The default implementation of TARGET_CLASS_LIKELY_SPILLED_P. */
2194 bool
2195 default_class_likely_spilled_p (reg_class_t rclass)
2197 return (reg_class_size[(int) rclass] == 1);
2200 /* The default implementation of TARGET_CLASS_MAX_NREGS. */
2202 unsigned char
2203 default_class_max_nregs (reg_class_t rclass ATTRIBUTE_UNUSED,
2204 machine_mode mode ATTRIBUTE_UNUSED)
2206 #ifdef CLASS_MAX_NREGS
2207 return (unsigned char) CLASS_MAX_NREGS ((enum reg_class) rclass,
2208 MACRO_MODE (mode));
2209 #else
2210 /* Targets with variable-sized modes must provide their own definition
2211 of this hook. */
2212 unsigned int size = GET_MODE_SIZE (mode).to_constant ();
2213 return (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2214 #endif
2217 /* Determine the debugging unwind mechanism for the target. */
2219 enum unwind_info_type
2220 default_debug_unwind_info (void)
2222 /* If the target wants to force the use of dwarf2 unwind info, let it. */
2223 /* ??? Change all users to the hook, then poison this. */
2224 #ifdef DWARF2_FRAME_INFO
2225 if (DWARF2_FRAME_INFO)
2226 return UI_DWARF2;
2227 #endif
2229 /* Otherwise, only turn it on if dwarf2 debugging is enabled. */
2230 #ifdef DWARF2_DEBUGGING_INFO
2231 if (dwarf_debuginfo_p ())
2232 return UI_DWARF2;
2233 #endif
2235 return UI_NONE;
2238 /* Targets that set NUM_POLY_INT_COEFFS to something greater than 1
2239 must define this hook. */
2241 unsigned int
2242 default_dwarf_poly_indeterminate_value (unsigned int, unsigned int *, int *)
2244 gcc_unreachable ();
2247 /* Determine the correct mode for a Dwarf frame register that represents
2248 register REGNO. */
2250 machine_mode
2251 default_dwarf_frame_reg_mode (int regno)
2253 machine_mode save_mode = reg_raw_mode[regno];
2255 if (targetm.hard_regno_call_part_clobbered (eh_edge_abi.id (),
2256 regno, save_mode))
2257 save_mode = choose_hard_reg_mode (regno, 1, &eh_edge_abi);
2258 return save_mode;
2261 /* To be used by targets where reg_raw_mode doesn't return the right
2262 mode for registers used in apply_builtin_return and apply_builtin_arg. */
2264 fixed_size_mode
2265 default_get_reg_raw_mode (int regno)
2267 /* Targets must override this hook if the underlying register is
2268 variable-sized. */
2269 return as_a <fixed_size_mode> (reg_raw_mode[regno]);
2272 /* Return true if a leaf function should stay leaf even with profiling
2273 enabled. */
2275 bool
2276 default_keep_leaf_when_profiled ()
2278 return false;
2281 /* Return true if the state of option OPTION should be stored in PCH files
2282 and checked by default_pch_valid_p. Store the option's current state
2283 in STATE if so. */
2285 static inline bool
2286 option_affects_pch_p (int option, struct cl_option_state *state)
2288 if ((cl_options[option].flags & CL_TARGET) == 0)
2289 return false;
2290 if ((cl_options[option].flags & CL_PCH_IGNORE) != 0)
2291 return false;
2292 if (option_flag_var (option, &global_options) == &target_flags)
2293 if (targetm.check_pch_target_flags)
2294 return false;
2295 return get_option_state (&global_options, option, state);
2298 /* Default version of get_pch_validity.
2299 By default, every flag difference is fatal; that will be mostly right for
2300 most targets, but completely right for very few. */
2302 void *
2303 default_get_pch_validity (size_t *sz)
2305 struct cl_option_state state;
2306 size_t i;
2307 char *result, *r;
2309 *sz = 2;
2310 if (targetm.check_pch_target_flags)
2311 *sz += sizeof (target_flags);
2312 for (i = 0; i < cl_options_count; i++)
2313 if (option_affects_pch_p (i, &state))
2314 *sz += state.size;
2316 result = r = XNEWVEC (char, *sz);
2317 r[0] = flag_pic;
2318 r[1] = flag_pie;
2319 r += 2;
2320 if (targetm.check_pch_target_flags)
2322 memcpy (r, &target_flags, sizeof (target_flags));
2323 r += sizeof (target_flags);
2326 for (i = 0; i < cl_options_count; i++)
2327 if (option_affects_pch_p (i, &state))
2329 memcpy (r, state.data, state.size);
2330 r += state.size;
2333 return result;
2336 /* Return a message which says that a PCH file was created with a different
2337 setting of OPTION. */
2339 static const char *
2340 pch_option_mismatch (const char *option)
2342 return xasprintf (_("created and used with differing settings of '%s'"),
2343 option);
2346 /* Default version of pch_valid_p. */
2348 const char *
2349 default_pch_valid_p (const void *data_p, size_t len ATTRIBUTE_UNUSED)
2351 struct cl_option_state state;
2352 const char *data = (const char *)data_p;
2353 size_t i;
2355 /* -fpic and -fpie also usually make a PCH invalid. */
2356 if (data[0] != flag_pic)
2357 return _("created and used with different settings of %<-fpic%>");
2358 if (data[1] != flag_pie)
2359 return _("created and used with different settings of %<-fpie%>");
2360 data += 2;
2362 /* Check target_flags. */
2363 if (targetm.check_pch_target_flags)
2365 int tf;
2366 const char *r;
2368 memcpy (&tf, data, sizeof (target_flags));
2369 data += sizeof (target_flags);
2370 r = targetm.check_pch_target_flags (tf);
2371 if (r != NULL)
2372 return r;
2375 for (i = 0; i < cl_options_count; i++)
2376 if (option_affects_pch_p (i, &state))
2378 if (memcmp (data, state.data, state.size) != 0)
2379 return pch_option_mismatch (cl_options[i].opt_text);
2380 data += state.size;
2383 return NULL;
2386 /* Default version of cstore_mode. */
2388 scalar_int_mode
2389 default_cstore_mode (enum insn_code icode)
2391 return as_a <scalar_int_mode> (insn_data[(int) icode].operand[0].mode);
2394 /* Default version of member_type_forces_blk. */
2396 bool
2397 default_member_type_forces_blk (const_tree, machine_mode)
2399 return false;
2402 /* Default version of canonicalize_comparison. */
2404 void
2405 default_canonicalize_comparison (int *, rtx *, rtx *, bool)
2409 /* Default implementation of TARGET_ATOMIC_ASSIGN_EXPAND_FENV. */
2411 void
2412 default_atomic_assign_expand_fenv (tree *, tree *, tree *)
2416 #ifndef PAD_VARARGS_DOWN
2417 #define PAD_VARARGS_DOWN BYTES_BIG_ENDIAN
2418 #endif
2420 /* Build an indirect-ref expression over the given TREE, which represents a
2421 piece of a va_arg() expansion. */
2422 tree
2423 build_va_arg_indirect_ref (tree addr)
2425 addr = build_simple_mem_ref_loc (EXPR_LOCATION (addr), addr);
2426 return addr;
2429 /* The "standard" implementation of va_arg: read the value from the
2430 current (padded) address and increment by the (padded) size. */
2432 tree
2433 std_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
2434 gimple_seq *post_p)
2436 tree addr, t, type_size, rounded_size, valist_tmp;
2437 unsigned HOST_WIDE_INT align, boundary;
2438 bool indirect;
2440 /* All of the alignment and movement below is for args-grow-up machines.
2441 As of 2004, there are only 3 ARGS_GROW_DOWNWARD targets, and they all
2442 implement their own specialized gimplify_va_arg_expr routines. */
2443 if (ARGS_GROW_DOWNWARD)
2444 gcc_unreachable ();
2446 indirect = pass_va_arg_by_reference (type);
2447 if (indirect)
2448 type = build_pointer_type (type);
2450 if (targetm.calls.split_complex_arg
2451 && TREE_CODE (type) == COMPLEX_TYPE
2452 && targetm.calls.split_complex_arg (type))
2454 tree real_part, imag_part;
2456 real_part = std_gimplify_va_arg_expr (valist,
2457 TREE_TYPE (type), pre_p, NULL);
2458 real_part = get_initialized_tmp_var (real_part, pre_p);
2460 imag_part = std_gimplify_va_arg_expr (unshare_expr (valist),
2461 TREE_TYPE (type), pre_p, NULL);
2462 imag_part = get_initialized_tmp_var (imag_part, pre_p);
2464 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
2467 align = PARM_BOUNDARY / BITS_PER_UNIT;
2468 boundary = targetm.calls.function_arg_boundary (TYPE_MODE (type), type);
2470 /* When we align parameter on stack for caller, if the parameter
2471 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
2472 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
2473 here with caller. */
2474 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
2475 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
2477 boundary /= BITS_PER_UNIT;
2479 /* Hoist the valist value into a temporary for the moment. */
2480 valist_tmp = get_initialized_tmp_var (valist, pre_p);
2482 /* va_list pointer is aligned to PARM_BOUNDARY. If argument actually
2483 requires greater alignment, we must perform dynamic alignment. */
2484 if (boundary > align
2485 && !TYPE_EMPTY_P (type)
2486 && !integer_zerop (TYPE_SIZE (type)))
2488 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
2489 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
2490 gimplify_and_add (t, pre_p);
2492 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
2493 fold_build2 (BIT_AND_EXPR, TREE_TYPE (valist),
2494 valist_tmp,
2495 build_int_cst (TREE_TYPE (valist), -boundary)));
2496 gimplify_and_add (t, pre_p);
2498 else
2499 boundary = align;
2501 /* If the actual alignment is less than the alignment of the type,
2502 adjust the type accordingly so that we don't assume strict alignment
2503 when dereferencing the pointer. */
2504 boundary *= BITS_PER_UNIT;
2505 if (boundary < TYPE_ALIGN (type))
2507 type = build_variant_type_copy (type);
2508 SET_TYPE_ALIGN (type, boundary);
2511 /* Compute the rounded size of the type. */
2512 type_size = arg_size_in_bytes (type);
2513 rounded_size = round_up (type_size, align);
2515 /* Reduce rounded_size so it's sharable with the postqueue. */
2516 gimplify_expr (&rounded_size, pre_p, post_p, is_gimple_val, fb_rvalue);
2518 /* Get AP. */
2519 addr = valist_tmp;
2520 if (PAD_VARARGS_DOWN && !integer_zerop (rounded_size))
2522 /* Small args are padded downward. */
2523 t = fold_build2_loc (input_location, GT_EXPR, sizetype,
2524 rounded_size, size_int (align));
2525 t = fold_build3 (COND_EXPR, sizetype, t, size_zero_node,
2526 size_binop (MINUS_EXPR, rounded_size, type_size));
2527 addr = fold_build_pointer_plus (addr, t);
2530 /* Compute new value for AP. */
2531 t = fold_build_pointer_plus (valist_tmp, rounded_size);
2532 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
2533 gimplify_and_add (t, pre_p);
2535 addr = fold_convert (build_pointer_type (type), addr);
2537 if (indirect)
2538 addr = build_va_arg_indirect_ref (addr);
2540 return build_va_arg_indirect_ref (addr);
2543 /* An implementation of TARGET_CAN_USE_DOLOOP_P for targets that do
2544 not support nested low-overhead loops. */
2546 bool
2547 can_use_doloop_if_innermost (const widest_int &, const widest_int &,
2548 unsigned int loop_depth, bool)
2550 return loop_depth == 1;
2553 /* Default implementation of TARGET_OPTAB_SUPPORTED_P. */
2555 bool
2556 default_optab_supported_p (int, machine_mode, machine_mode, optimization_type)
2558 return true;
2561 /* Default implementation of TARGET_MAX_NOCE_IFCVT_SEQ_COST. */
2563 unsigned int
2564 default_max_noce_ifcvt_seq_cost (edge e)
2566 bool predictable_p = predictable_edge_p (e);
2568 if (predictable_p)
2570 if (OPTION_SET_P (param_max_rtl_if_conversion_predictable_cost))
2571 return param_max_rtl_if_conversion_predictable_cost;
2573 else
2575 if (OPTION_SET_P (param_max_rtl_if_conversion_unpredictable_cost))
2576 return param_max_rtl_if_conversion_unpredictable_cost;
2579 return BRANCH_COST (true, predictable_p) * COSTS_N_INSNS (3);
2582 /* Default implementation of TARGET_MIN_ARITHMETIC_PRECISION. */
2584 unsigned int
2585 default_min_arithmetic_precision (void)
2587 return WORD_REGISTER_OPERATIONS ? BITS_PER_WORD : BITS_PER_UNIT;
2590 /* Default implementation of TARGET_C_EXCESS_PRECISION. */
2592 enum flt_eval_method
2593 default_excess_precision (enum excess_precision_type ATTRIBUTE_UNUSED)
2595 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
2598 /* Default implementation for
2599 TARGET_STACK_CLASH_PROTECTION_ALLOCA_PROBE_RANGE. */
2600 HOST_WIDE_INT
2601 default_stack_clash_protection_alloca_probe_range (void)
2603 return 0;
2606 /* The default implementation of TARGET_EARLY_REMAT_MODES. */
2608 void
2609 default_select_early_remat_modes (sbitmap)
2613 /* The default implementation of TARGET_PREFERRED_ELSE_VALUE. */
2615 tree
2616 default_preferred_else_value (unsigned, tree type, unsigned, tree *)
2618 return build_zero_cst (type);
2621 /* Default implementation of TARGET_HAVE_SPECULATION_SAFE_VALUE. */
2622 bool
2623 default_have_speculation_safe_value (bool active ATTRIBUTE_UNUSED)
2625 #ifdef HAVE_speculation_barrier
2626 return active ? HAVE_speculation_barrier : true;
2627 #else
2628 return false;
2629 #endif
2631 /* Alternative implementation of TARGET_HAVE_SPECULATION_SAFE_VALUE
2632 that can be used on targets that never have speculative execution. */
2633 bool
2634 speculation_safe_value_not_needed (bool active)
2636 return !active;
2639 /* Default implementation of the speculation-safe-load builtin. This
2640 implementation simply copies val to result and generates a
2641 speculation_barrier insn, if such a pattern is defined. */
2643 default_speculation_safe_value (machine_mode mode ATTRIBUTE_UNUSED,
2644 rtx result, rtx val,
2645 rtx failval ATTRIBUTE_UNUSED)
2647 emit_move_insn (result, val);
2649 #ifdef HAVE_speculation_barrier
2650 /* Assume the target knows what it is doing: if it defines a
2651 speculation barrier, but it is not enabled, then assume that one
2652 isn't needed. */
2653 if (HAVE_speculation_barrier)
2654 emit_insn (gen_speculation_barrier ());
2655 #endif
2657 return result;
2660 /* How many bits to shift in order to access the tag bits.
2661 The default is to store the tag in the top 8 bits of a 64 bit pointer, hence
2662 shifting 56 bits will leave just the tag. */
2663 #define HWASAN_SHIFT (GET_MODE_PRECISION (Pmode) - 8)
2664 #define HWASAN_SHIFT_RTX GEN_INT (HWASAN_SHIFT)
2666 bool
2667 default_memtag_can_tag_addresses ()
2669 return false;
2672 uint8_t
2673 default_memtag_tag_size ()
2675 return 8;
2678 uint8_t
2679 default_memtag_granule_size ()
2681 return 16;
2684 /* The default implementation of TARGET_MEMTAG_INSERT_RANDOM_TAG. */
2686 default_memtag_insert_random_tag (rtx untagged, rtx target)
2688 gcc_assert (param_hwasan_instrument_stack);
2689 if (param_hwasan_random_frame_tag)
2691 rtx fn = init_one_libfunc ("__hwasan_generate_tag");
2692 rtx new_tag = emit_library_call_value (fn, NULL_RTX, LCT_NORMAL, QImode);
2693 return targetm.memtag.set_tag (untagged, new_tag, target);
2695 else
2697 /* NOTE: The kernel API does not have __hwasan_generate_tag exposed.
2698 In the future we may add the option emit random tags with inline
2699 instrumentation instead of function calls. This would be the same
2700 between the kernel and userland. */
2701 return untagged;
2705 /* The default implementation of TARGET_MEMTAG_ADD_TAG. */
2707 default_memtag_add_tag (rtx base, poly_int64 offset, uint8_t tag_offset)
2709 /* Need to look into what the most efficient code sequence is.
2710 This is a code sequence that would be emitted *many* times, so we
2711 want it as small as possible.
2713 There are two places where tag overflow is a question:
2714 - Tagging the shadow stack.
2715 (both tagging and untagging).
2716 - Tagging addressable pointers.
2718 We need to ensure both behaviors are the same (i.e. that the tag that
2719 ends up in a pointer after "overflowing" the tag bits with a tag addition
2720 is the same that ends up in the shadow space).
2722 The aim is that the behavior of tag addition should follow modulo
2723 wrapping in both instances.
2725 The libhwasan code doesn't have any path that increments a pointer's tag,
2726 which means it has no opinion on what happens when a tag increment
2727 overflows (and hence we can choose our own behavior). */
2729 offset += ((uint64_t)tag_offset << HWASAN_SHIFT);
2730 return plus_constant (Pmode, base, offset);
2733 /* The default implementation of TARGET_MEMTAG_SET_TAG. */
2735 default_memtag_set_tag (rtx untagged, rtx tag, rtx target)
2737 gcc_assert (GET_MODE (untagged) == Pmode && GET_MODE (tag) == QImode);
2738 tag = expand_simple_binop (Pmode, ASHIFT, tag, HWASAN_SHIFT_RTX, NULL_RTX,
2739 /* unsignedp = */1, OPTAB_WIDEN);
2740 rtx ret = expand_simple_binop (Pmode, IOR, untagged, tag, target,
2741 /* unsignedp = */1, OPTAB_DIRECT);
2742 gcc_assert (ret);
2743 return ret;
2746 /* The default implementation of TARGET_MEMTAG_EXTRACT_TAG. */
2748 default_memtag_extract_tag (rtx tagged_pointer, rtx target)
2750 rtx tag = expand_simple_binop (Pmode, LSHIFTRT, tagged_pointer,
2751 HWASAN_SHIFT_RTX, target,
2752 /* unsignedp = */0,
2753 OPTAB_DIRECT);
2754 rtx ret = gen_lowpart (QImode, tag);
2755 gcc_assert (ret);
2756 return ret;
2759 /* The default implementation of TARGET_MEMTAG_UNTAGGED_POINTER. */
2761 default_memtag_untagged_pointer (rtx tagged_pointer, rtx target)
2763 rtx tag_mask = gen_int_mode ((HOST_WIDE_INT_1U << HWASAN_SHIFT) - 1, Pmode);
2764 rtx untagged_base = expand_simple_binop (Pmode, AND, tagged_pointer,
2765 tag_mask, target, true,
2766 OPTAB_DIRECT);
2767 gcc_assert (untagged_base);
2768 return untagged_base;
2771 /* The default implementation of TARGET_GCOV_TYPE_SIZE. */
2772 HOST_WIDE_INT
2773 default_gcov_type_size (void)
2775 return TYPE_PRECISION (long_long_integer_type_node) > 32 ? 64 : 32;
2778 #include "gt-targhooks.h"