1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988-2023 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #define IN_TARGET_CODE 1
24 #include "coretypes.h"
34 #include "stringpool.h"
41 #include "diagnostic.h"
44 #include "fold-const.h"
47 #include "stor-layout.h"
50 #include "insn-attr.h"
56 #include "common/common-target.h"
57 #include "langhooks.h"
61 #include "tm-constrs.h"
63 #include "sched-int.h"
65 #include "tree-pass.h"
67 #include "pass_manager.h"
68 #include "target-globals.h"
69 #include "gimple-iterator.h"
70 #include "gimple-fold.h"
71 #include "tree-vectorizer.h"
72 #include "shrink-wrap.h"
75 #include "tree-iterator.h"
77 #include "case-cfn-macros.h"
79 #include "fold-const-call.h"
81 #include "tree-ssanames.h"
83 #include "selftest-rtl.h"
84 #include "print-rtl.h"
87 #include "symbol-summary.h"
89 #include "ipa-fnsummary.h"
90 #include "wide-int-bitmask.h"
91 #include "tree-vector-builder.h"
93 #include "dwarf2out.h"
94 #include "i386-options.h"
95 #include "i386-builtins.h"
96 #include "i386-expand.h"
97 #include "i386-features.h"
98 #include "function-abi.h"
99 #include "rtl-error.h"
101 /* This file should be included last. */
102 #include "target-def.h"
104 static rtx
legitimize_dllimport_symbol (rtx
, bool);
105 static rtx
legitimize_pe_coff_extern_decl (rtx
, bool);
106 static void ix86_print_operand_address_as (FILE *, rtx
, addr_space_t
, bool);
107 static void ix86_emit_restore_reg_using_pop (rtx
);
110 #ifndef CHECK_STACK_LIMIT
111 #define CHECK_STACK_LIMIT (-1)
114 /* Return index of given mode in mult and division cost tables. */
115 #define MODE_INDEX(mode) \
116 ((mode) == QImode ? 0 \
117 : (mode) == HImode ? 1 \
118 : (mode) == SImode ? 2 \
119 : (mode) == DImode ? 3 \
124 const struct processor_costs
*ix86_tune_cost
= NULL
;
126 /* Set by -mtune or -Os. */
127 const struct processor_costs
*ix86_cost
= NULL
;
129 /* In case the average insn count for single function invocation is
130 lower than this constant, emit fast (but longer) prologue and
132 #define FAST_PROLOGUE_INSN_COUNT 20
134 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
135 static const char *const qi_reg_name
[] = QI_REGISTER_NAMES
;
136 static const char *const qi_high_reg_name
[] = QI_HIGH_REGISTER_NAMES
;
137 static const char *const hi_reg_name
[] = HI_REGISTER_NAMES
;
139 /* Array of the smallest class containing reg number REGNO, indexed by
140 REGNO. Used by REGNO_REG_CLASS in i386.h. */
142 enum reg_class
const regclass_map
[FIRST_PSEUDO_REGISTER
] =
145 AREG
, DREG
, CREG
, BREG
,
147 SIREG
, DIREG
, NON_Q_REGS
, NON_Q_REGS
,
149 FP_TOP_REG
, FP_SECOND_REG
, FLOAT_REGS
, FLOAT_REGS
,
150 FLOAT_REGS
, FLOAT_REGS
, FLOAT_REGS
, FLOAT_REGS
,
151 /* arg pointer, flags, fpsr, frame */
152 NON_Q_REGS
, NO_REGS
, NO_REGS
, NON_Q_REGS
,
154 SSE_FIRST_REG
, SSE_REGS
, SSE_REGS
, SSE_REGS
,
155 SSE_REGS
, SSE_REGS
, SSE_REGS
, SSE_REGS
,
157 MMX_REGS
, MMX_REGS
, MMX_REGS
, MMX_REGS
,
158 MMX_REGS
, MMX_REGS
, MMX_REGS
, MMX_REGS
,
160 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
161 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
162 /* SSE REX registers */
163 SSE_REGS
, SSE_REGS
, SSE_REGS
, SSE_REGS
,
164 SSE_REGS
, SSE_REGS
, SSE_REGS
, SSE_REGS
,
165 /* AVX-512 SSE registers */
166 ALL_SSE_REGS
, ALL_SSE_REGS
, ALL_SSE_REGS
, ALL_SSE_REGS
,
167 ALL_SSE_REGS
, ALL_SSE_REGS
, ALL_SSE_REGS
, ALL_SSE_REGS
,
168 ALL_SSE_REGS
, ALL_SSE_REGS
, ALL_SSE_REGS
, ALL_SSE_REGS
,
169 ALL_SSE_REGS
, ALL_SSE_REGS
, ALL_SSE_REGS
, ALL_SSE_REGS
,
170 /* Mask registers. */
171 ALL_MASK_REGS
, MASK_REGS
, MASK_REGS
, MASK_REGS
,
172 MASK_REGS
, MASK_REGS
, MASK_REGS
, MASK_REGS
175 /* The "default" register map used in 32bit mode. */
177 int const debugger_register_map
[FIRST_PSEUDO_REGISTER
] =
180 0, 2, 1, 3, 6, 7, 4, 5,
182 12, 13, 14, 15, 16, 17, 18, 19,
183 /* arg, flags, fpsr, frame */
184 IGNORED_DWARF_REGNUM
, IGNORED_DWARF_REGNUM
,
185 IGNORED_DWARF_REGNUM
, IGNORED_DWARF_REGNUM
,
187 21, 22, 23, 24, 25, 26, 27, 28,
189 29, 30, 31, 32, 33, 34, 35, 36,
190 /* extended integer registers */
191 INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
,
192 INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
,
193 /* extended sse registers */
194 INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
,
195 INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
,
196 /* AVX-512 registers 16-23 */
197 INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
,
198 INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
,
199 /* AVX-512 registers 24-31 */
200 INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
,
201 INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
,
203 93, 94, 95, 96, 97, 98, 99, 100
206 /* The "default" register map used in 64bit mode. */
208 int const debugger64_register_map
[FIRST_PSEUDO_REGISTER
] =
211 0, 1, 2, 3, 4, 5, 6, 7,
213 33, 34, 35, 36, 37, 38, 39, 40,
214 /* arg, flags, fpsr, frame */
215 IGNORED_DWARF_REGNUM
, IGNORED_DWARF_REGNUM
,
216 IGNORED_DWARF_REGNUM
, IGNORED_DWARF_REGNUM
,
218 17, 18, 19, 20, 21, 22, 23, 24,
220 41, 42, 43, 44, 45, 46, 47, 48,
221 /* extended integer registers */
222 8, 9, 10, 11, 12, 13, 14, 15,
223 /* extended SSE registers */
224 25, 26, 27, 28, 29, 30, 31, 32,
225 /* AVX-512 registers 16-23 */
226 67, 68, 69, 70, 71, 72, 73, 74,
227 /* AVX-512 registers 24-31 */
228 75, 76, 77, 78, 79, 80, 81, 82,
230 118, 119, 120, 121, 122, 123, 124, 125
233 /* Define the register numbers to be used in Dwarf debugging information.
234 The SVR4 reference port C compiler uses the following register numbers
235 in its Dwarf output code:
236 0 for %eax (gcc regno = 0)
237 1 for %ecx (gcc regno = 2)
238 2 for %edx (gcc regno = 1)
239 3 for %ebx (gcc regno = 3)
240 4 for %esp (gcc regno = 7)
241 5 for %ebp (gcc regno = 6)
242 6 for %esi (gcc regno = 4)
243 7 for %edi (gcc regno = 5)
244 The following three DWARF register numbers are never generated by
245 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
246 believed these numbers have these meanings.
247 8 for %eip (no gcc equivalent)
248 9 for %eflags (gcc regno = 17)
249 10 for %trapno (no gcc equivalent)
250 It is not at all clear how we should number the FP stack registers
251 for the x86 architecture. If the version of SDB on x86/svr4 were
252 a bit less brain dead with respect to floating-point then we would
253 have a precedent to follow with respect to DWARF register numbers
254 for x86 FP registers, but the SDB on x86/svr4 was so completely
255 broken with respect to FP registers that it is hardly worth thinking
256 of it as something to strive for compatibility with.
257 The version of x86/svr4 SDB I had does (partially)
258 seem to believe that DWARF register number 11 is associated with
259 the x86 register %st(0), but that's about all. Higher DWARF
260 register numbers don't seem to be associated with anything in
261 particular, and even for DWARF regno 11, SDB only seemed to under-
262 stand that it should say that a variable lives in %st(0) (when
263 asked via an `=' command) if we said it was in DWARF regno 11,
264 but SDB still printed garbage when asked for the value of the
265 variable in question (via a `/' command).
266 (Also note that the labels SDB printed for various FP stack regs
267 when doing an `x' command were all wrong.)
268 Note that these problems generally don't affect the native SVR4
269 C compiler because it doesn't allow the use of -O with -g and
270 because when it is *not* optimizing, it allocates a memory
271 location for each floating-point variable, and the memory
272 location is what gets described in the DWARF AT_location
273 attribute for the variable in question.
274 Regardless of the severe mental illness of the x86/svr4 SDB, we
275 do something sensible here and we use the following DWARF
276 register numbers. Note that these are all stack-top-relative
278 11 for %st(0) (gcc regno = 8)
279 12 for %st(1) (gcc regno = 9)
280 13 for %st(2) (gcc regno = 10)
281 14 for %st(3) (gcc regno = 11)
282 15 for %st(4) (gcc regno = 12)
283 16 for %st(5) (gcc regno = 13)
284 17 for %st(6) (gcc regno = 14)
285 18 for %st(7) (gcc regno = 15)
287 int const svr4_debugger_register_map
[FIRST_PSEUDO_REGISTER
] =
290 0, 2, 1, 3, 6, 7, 5, 4,
292 11, 12, 13, 14, 15, 16, 17, 18,
293 /* arg, flags, fpsr, frame */
294 IGNORED_DWARF_REGNUM
, 9,
295 IGNORED_DWARF_REGNUM
, IGNORED_DWARF_REGNUM
,
297 21, 22, 23, 24, 25, 26, 27, 28,
299 29, 30, 31, 32, 33, 34, 35, 36,
300 /* extended integer registers */
301 INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
,
302 INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
,
303 /* extended sse registers */
304 INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
,
305 INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
,
306 /* AVX-512 registers 16-23 */
307 INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
,
308 INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
,
309 /* AVX-512 registers 24-31 */
310 INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
,
311 INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
, INVALID_REGNUM
,
313 93, 94, 95, 96, 97, 98, 99, 100
316 /* Define parameter passing and return registers. */
318 static int const x86_64_int_parameter_registers
[6] =
320 DI_REG
, SI_REG
, DX_REG
, CX_REG
, R8_REG
, R9_REG
323 static int const x86_64_ms_abi_int_parameter_registers
[4] =
325 CX_REG
, DX_REG
, R8_REG
, R9_REG
328 static int const x86_64_int_return_registers
[4] =
330 AX_REG
, DX_REG
, DI_REG
, SI_REG
333 /* Define the structure for the machine field in struct function. */
335 struct GTY(()) stack_local_entry
{
339 struct stack_local_entry
*next
;
342 /* Which cpu are we scheduling for. */
343 enum attr_cpu ix86_schedule
;
345 /* Which cpu are we optimizing for. */
346 enum processor_type ix86_tune
;
348 /* Which instruction set architecture to use. */
349 enum processor_type ix86_arch
;
351 /* True if processor has SSE prefetch instruction. */
352 unsigned char ix86_prefetch_sse
;
354 /* Preferred alignment for stack boundary in bits. */
355 unsigned int ix86_preferred_stack_boundary
;
357 /* Alignment for incoming stack boundary in bits specified at
359 unsigned int ix86_user_incoming_stack_boundary
;
361 /* Default alignment for incoming stack boundary in bits. */
362 unsigned int ix86_default_incoming_stack_boundary
;
364 /* Alignment for incoming stack boundary in bits. */
365 unsigned int ix86_incoming_stack_boundary
;
367 /* True if there is no direct access to extern symbols. */
368 bool ix86_has_no_direct_extern_access
;
370 /* Calling abi specific va_list type nodes. */
371 tree sysv_va_list_type_node
;
372 tree ms_va_list_type_node
;
374 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
375 char internal_label_prefix
[16];
376 int internal_label_prefix_len
;
378 /* Fence to use after loop using movnt. */
381 /* Register class used for passing given 64bit part of the argument.
382 These represent classes as documented by the PS ABI, with the exception
383 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
384 use SF or DFmode move instead of DImode to avoid reformatting penalties.
386 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
387 whenever possible (upper half does contain padding). */
388 enum x86_64_reg_class
391 X86_64_INTEGER_CLASS
,
392 X86_64_INTEGERSI_CLASS
,
400 X86_64_COMPLEX_X87_CLASS
,
404 #define MAX_CLASSES 8
406 /* Table of constants used by fldpi, fldln2, etc.... */
407 static REAL_VALUE_TYPE ext_80387_constants_table
[5];
408 static bool ext_80387_constants_init
;
411 static rtx
ix86_function_value (const_tree
, const_tree
, bool);
412 static bool ix86_function_value_regno_p (const unsigned int);
413 static unsigned int ix86_function_arg_boundary (machine_mode
,
415 static rtx
ix86_static_chain (const_tree
, bool);
416 static int ix86_function_regparm (const_tree
, const_tree
);
417 static void ix86_compute_frame_layout (void);
418 static tree
ix86_canonical_va_list_type (tree
);
419 static unsigned int split_stack_prologue_scratch_regno (void);
420 static bool i386_asm_output_addr_const_extra (FILE *, rtx
);
422 static bool ix86_can_inline_p (tree
, tree
);
423 static unsigned int ix86_minimum_incoming_stack_boundary (bool);
426 /* Whether -mtune= or -march= were specified */
427 int ix86_tune_defaulted
;
428 int ix86_arch_specified
;
430 /* Return true if a red-zone is in use. We can't use red-zone when
431 there are local indirect jumps, like "indirect_jump" or "tablejump",
432 which jumps to another place in the function, since "call" in the
433 indirect thunk pushes the return address onto stack, destroying
436 TODO: If we can reserve the first 2 WORDs, for PUSH and, another
437 for CALL, in red-zone, we can allow local indirect jumps with
441 ix86_using_red_zone (void)
443 return (TARGET_RED_ZONE
444 && !TARGET_64BIT_MS_ABI
445 && (!cfun
->machine
->has_local_indirect_jump
446 || cfun
->machine
->indirect_branch_type
== indirect_branch_keep
));
449 /* Return true, if profiling code should be emitted before
450 prologue. Otherwise it returns false.
451 Note: For x86 with "hotfix" it is sorried. */
453 ix86_profile_before_prologue (void)
455 return flag_fentry
!= 0;
458 /* Update register usage after having seen the compiler flags. */
461 ix86_conditional_register_usage (void)
465 /* If there are no caller-saved registers, preserve all registers.
466 except fixed_regs and registers used for function return value
467 since aggregate_value_p checks call_used_regs[regno] on return
469 if (cfun
&& cfun
->machine
->no_caller_saved_registers
)
470 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
471 if (!fixed_regs
[i
] && !ix86_function_value_regno_p (i
))
472 call_used_regs
[i
] = 0;
474 /* For 32-bit targets, disable the REX registers. */
477 for (i
= FIRST_REX_INT_REG
; i
<= LAST_REX_INT_REG
; i
++)
478 CLEAR_HARD_REG_BIT (accessible_reg_set
, i
);
479 for (i
= FIRST_REX_SSE_REG
; i
<= LAST_REX_SSE_REG
; i
++)
480 CLEAR_HARD_REG_BIT (accessible_reg_set
, i
);
481 for (i
= FIRST_EXT_REX_SSE_REG
; i
<= LAST_EXT_REX_SSE_REG
; i
++)
482 CLEAR_HARD_REG_BIT (accessible_reg_set
, i
);
485 /* See the definition of CALL_USED_REGISTERS in i386.h. */
486 c_mask
= CALL_USED_REGISTERS_MASK (TARGET_64BIT_MS_ABI
);
488 CLEAR_HARD_REG_SET (reg_class_contents
[(int)CLOBBERED_REGS
]);
490 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
492 /* Set/reset conditionally defined registers from
493 CALL_USED_REGISTERS initializer. */
494 if (call_used_regs
[i
] > 1)
495 call_used_regs
[i
] = !!(call_used_regs
[i
] & c_mask
);
497 /* Calculate registers of CLOBBERED_REGS register set
498 as call used registers from GENERAL_REGS register set. */
499 if (TEST_HARD_REG_BIT (reg_class_contents
[(int)GENERAL_REGS
], i
)
500 && call_used_regs
[i
])
501 SET_HARD_REG_BIT (reg_class_contents
[(int)CLOBBERED_REGS
], i
);
504 /* If MMX is disabled, disable the registers. */
506 accessible_reg_set
&= ~reg_class_contents
[MMX_REGS
];
508 /* If SSE is disabled, disable the registers. */
510 accessible_reg_set
&= ~reg_class_contents
[ALL_SSE_REGS
];
512 /* If the FPU is disabled, disable the registers. */
513 if (! (TARGET_80387
|| TARGET_FLOAT_RETURNS_IN_80387
))
514 accessible_reg_set
&= ~reg_class_contents
[FLOAT_REGS
];
516 /* If AVX512F is disabled, disable the registers. */
517 if (! TARGET_AVX512F
)
519 for (i
= FIRST_EXT_REX_SSE_REG
; i
<= LAST_EXT_REX_SSE_REG
; i
++)
520 CLEAR_HARD_REG_BIT (accessible_reg_set
, i
);
522 accessible_reg_set
&= ~reg_class_contents
[ALL_MASK_REGS
];
526 /* Canonicalize a comparison from one we don't have to one we do have. */
529 ix86_canonicalize_comparison (int *code
, rtx
*op0
, rtx
*op1
,
530 bool op0_preserve_value
)
532 /* The order of operands in x87 ficom compare is forced by combine in
533 simplify_comparison () function. Float operator is treated as RTX_OBJ
534 with a precedence over other operators and is always put in the first
535 place. Swap condition and operands to match ficom instruction. */
536 if (!op0_preserve_value
537 && GET_CODE (*op0
) == FLOAT
&& MEM_P (XEXP (*op0
, 0)) && REG_P (*op1
))
539 enum rtx_code scode
= swap_condition ((enum rtx_code
) *code
);
541 /* We are called only for compares that are split to SAHF instruction.
542 Ensure that we have setcc/jcc insn for the swapped condition. */
543 if (ix86_fp_compare_code_to_integer (scode
) != UNKNOWN
)
545 std::swap (*op0
, *op1
);
552 /* Hook to determine if one function can safely inline another. */
555 ix86_can_inline_p (tree caller
, tree callee
)
557 tree caller_tree
= DECL_FUNCTION_SPECIFIC_TARGET (caller
);
558 tree callee_tree
= DECL_FUNCTION_SPECIFIC_TARGET (callee
);
560 /* Changes of those flags can be tolerated for always inlines. Lets hope
561 user knows what he is doing. */
562 unsigned HOST_WIDE_INT always_inline_safe_mask
563 = (MASK_USE_8BIT_IDIV
| MASK_ACCUMULATE_OUTGOING_ARGS
564 | MASK_NO_ALIGN_STRINGOPS
| MASK_AVX256_SPLIT_UNALIGNED_LOAD
565 | MASK_AVX256_SPLIT_UNALIGNED_STORE
| MASK_CLD
566 | MASK_NO_FANCY_MATH_387
| MASK_IEEE_FP
| MASK_INLINE_ALL_STRINGOPS
567 | MASK_INLINE_STRINGOPS_DYNAMICALLY
| MASK_RECIP
| MASK_STACK_PROBE
568 | MASK_STV
| MASK_TLS_DIRECT_SEG_REFS
| MASK_VZEROUPPER
569 | MASK_NO_PUSH_ARGS
| MASK_OMIT_LEAF_FRAME_POINTER
);
573 callee_tree
= target_option_default_node
;
575 caller_tree
= target_option_default_node
;
576 if (callee_tree
== caller_tree
)
579 struct cl_target_option
*caller_opts
= TREE_TARGET_OPTION (caller_tree
);
580 struct cl_target_option
*callee_opts
= TREE_TARGET_OPTION (callee_tree
);
583 = (DECL_DISREGARD_INLINE_LIMITS (callee
)
584 && lookup_attribute ("always_inline",
585 DECL_ATTRIBUTES (callee
)));
587 /* If callee only uses GPRs, ignore MASK_80387. */
588 if (TARGET_GENERAL_REGS_ONLY_P (callee_opts
->x_ix86_target_flags
))
589 always_inline_safe_mask
|= MASK_80387
;
591 cgraph_node
*callee_node
= cgraph_node::get (callee
);
592 /* Callee's isa options should be a subset of the caller's, i.e. a SSE4
593 function can inline a SSE2 function but a SSE2 function can't inline
595 if (((caller_opts
->x_ix86_isa_flags
& callee_opts
->x_ix86_isa_flags
)
596 != callee_opts
->x_ix86_isa_flags
)
597 || ((caller_opts
->x_ix86_isa_flags2
& callee_opts
->x_ix86_isa_flags2
)
598 != callee_opts
->x_ix86_isa_flags2
))
601 /* See if we have the same non-isa options. */
602 else if ((!always_inline
603 && caller_opts
->x_target_flags
!= callee_opts
->x_target_flags
)
604 || (caller_opts
->x_target_flags
& ~always_inline_safe_mask
)
605 != (callee_opts
->x_target_flags
& ~always_inline_safe_mask
))
608 /* See if arch, tune, etc. are the same. */
609 else if (caller_opts
->arch
!= callee_opts
->arch
)
612 else if (!always_inline
&& caller_opts
->tune
!= callee_opts
->tune
)
615 else if (caller_opts
->x_ix86_fpmath
!= callee_opts
->x_ix86_fpmath
616 /* If the calle doesn't use FP expressions differences in
617 ix86_fpmath can be ignored. We are called from FEs
618 for multi-versioning call optimization, so beware of
619 ipa_fn_summaries not available. */
620 && (! ipa_fn_summaries
621 || ipa_fn_summaries
->get (callee_node
) == NULL
622 || ipa_fn_summaries
->get (callee_node
)->fp_expressions
))
625 else if (!always_inline
626 && caller_opts
->branch_cost
!= callee_opts
->branch_cost
)
635 /* Return true if this goes in large data/bss. */
638 ix86_in_large_data_p (tree exp
)
640 if (ix86_cmodel
!= CM_MEDIUM
&& ix86_cmodel
!= CM_MEDIUM_PIC
)
643 if (exp
== NULL_TREE
)
646 /* Functions are never large data. */
647 if (TREE_CODE (exp
) == FUNCTION_DECL
)
650 /* Automatic variables are never large data. */
651 if (VAR_P (exp
) && !is_global_var (exp
))
654 if (VAR_P (exp
) && DECL_SECTION_NAME (exp
))
656 const char *section
= DECL_SECTION_NAME (exp
);
657 if (strcmp (section
, ".ldata") == 0
658 || strcmp (section
, ".lbss") == 0)
664 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (exp
));
666 /* If this is an incomplete type with size 0, then we can't put it
667 in data because it might be too big when completed. Also,
668 int_size_in_bytes returns -1 if size can vary or is larger than
669 an integer in which case also it is safer to assume that it goes in
671 if (size
<= 0 || size
> ix86_section_threshold
)
678 /* i386-specific section flag to mark large sections. */
679 #define SECTION_LARGE SECTION_MACH_DEP
681 /* Switch to the appropriate section for output of DECL.
682 DECL is either a `VAR_DECL' node or a constant of some sort.
683 RELOC indicates whether forming the initial value of DECL requires
684 link-time relocations. */
686 ATTRIBUTE_UNUSED
static section
*
687 x86_64_elf_select_section (tree decl
, int reloc
,
688 unsigned HOST_WIDE_INT align
)
690 if (ix86_in_large_data_p (decl
))
692 const char *sname
= NULL
;
693 unsigned int flags
= SECTION_WRITE
| SECTION_LARGE
;
694 switch (categorize_decl_for_section (decl
, reloc
))
699 case SECCAT_DATA_REL
:
700 sname
= ".ldata.rel";
702 case SECCAT_DATA_REL_LOCAL
:
703 sname
= ".ldata.rel.local";
705 case SECCAT_DATA_REL_RO
:
706 sname
= ".ldata.rel.ro";
708 case SECCAT_DATA_REL_RO_LOCAL
:
709 sname
= ".ldata.rel.ro.local";
713 flags
|= SECTION_BSS
;
716 case SECCAT_RODATA_MERGE_STR
:
717 case SECCAT_RODATA_MERGE_STR_INIT
:
718 case SECCAT_RODATA_MERGE_CONST
:
720 flags
&= ~SECTION_WRITE
;
729 /* We don't split these for medium model. Place them into
730 default sections and hope for best. */
735 /* We might get called with string constants, but get_named_section
736 doesn't like them as they are not DECLs. Also, we need to set
737 flags in that case. */
739 return get_section (sname
, flags
, NULL
);
740 return get_named_section (decl
, sname
, reloc
);
743 return default_elf_select_section (decl
, reloc
, align
);
746 /* Select a set of attributes for section NAME based on the properties
747 of DECL and whether or not RELOC indicates that DECL's initializer
748 might contain runtime relocations. */
750 static unsigned int ATTRIBUTE_UNUSED
751 x86_64_elf_section_type_flags (tree decl
, const char *name
, int reloc
)
753 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
755 if (ix86_in_large_data_p (decl
))
756 flags
|= SECTION_LARGE
;
758 if (decl
== NULL_TREE
759 && (strcmp (name
, ".ldata.rel.ro") == 0
760 || strcmp (name
, ".ldata.rel.ro.local") == 0))
761 flags
|= SECTION_RELRO
;
763 if (strcmp (name
, ".lbss") == 0
764 || startswith (name
, ".lbss.")
765 || startswith (name
, ".gnu.linkonce.lb."))
766 flags
|= SECTION_BSS
;
771 /* Build up a unique section name, expressed as a
772 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
773 RELOC indicates whether the initial value of EXP requires
774 link-time relocations. */
776 static void ATTRIBUTE_UNUSED
777 x86_64_elf_unique_section (tree decl
, int reloc
)
779 if (ix86_in_large_data_p (decl
))
781 const char *prefix
= NULL
;
782 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
783 bool one_only
= DECL_COMDAT_GROUP (decl
) && !HAVE_COMDAT_GROUP
;
785 switch (categorize_decl_for_section (decl
, reloc
))
788 case SECCAT_DATA_REL
:
789 case SECCAT_DATA_REL_LOCAL
:
790 case SECCAT_DATA_REL_RO
:
791 case SECCAT_DATA_REL_RO_LOCAL
:
792 prefix
= one_only
? ".ld" : ".ldata";
795 prefix
= one_only
? ".lb" : ".lbss";
798 case SECCAT_RODATA_MERGE_STR
:
799 case SECCAT_RODATA_MERGE_STR_INIT
:
800 case SECCAT_RODATA_MERGE_CONST
:
801 prefix
= one_only
? ".lr" : ".lrodata";
810 /* We don't split these for medium model. Place them into
811 default sections and hope for best. */
816 const char *name
, *linkonce
;
819 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
820 name
= targetm
.strip_name_encoding (name
);
822 /* If we're using one_only, then there needs to be a .gnu.linkonce
823 prefix to the section name. */
824 linkonce
= one_only
? ".gnu.linkonce" : "";
826 string
= ACONCAT ((linkonce
, prefix
, ".", name
, NULL
));
828 set_decl_section_name (decl
, string
);
832 default_unique_section (decl
, reloc
);
837 #ifndef LARGECOMM_SECTION_ASM_OP
838 #define LARGECOMM_SECTION_ASM_OP "\t.largecomm\t"
841 /* This says how to output assembler code to declare an
842 uninitialized external linkage data object.
844 For medium model x86-64 we need to use LARGECOMM_SECTION_ASM_OP opcode for
847 x86_elf_aligned_decl_common (FILE *file
, tree decl
,
848 const char *name
, unsigned HOST_WIDE_INT size
,
851 if ((ix86_cmodel
== CM_MEDIUM
|| ix86_cmodel
== CM_MEDIUM_PIC
)
852 && size
> (unsigned int)ix86_section_threshold
)
854 switch_to_section (get_named_section (decl
, ".lbss", 0));
855 fputs (LARGECOMM_SECTION_ASM_OP
, file
);
858 fputs (COMMON_ASM_OP
, file
);
859 assemble_name (file
, name
);
860 fprintf (file
, "," HOST_WIDE_INT_PRINT_UNSIGNED
",%u\n",
861 size
, align
/ BITS_PER_UNIT
);
865 /* Utility function for targets to use in implementing
866 ASM_OUTPUT_ALIGNED_BSS. */
869 x86_output_aligned_bss (FILE *file
, tree decl
, const char *name
,
870 unsigned HOST_WIDE_INT size
, unsigned align
)
872 if ((ix86_cmodel
== CM_MEDIUM
|| ix86_cmodel
== CM_MEDIUM_PIC
)
873 && size
> (unsigned int)ix86_section_threshold
)
874 switch_to_section (get_named_section (decl
, ".lbss", 0));
876 switch_to_section (bss_section
);
877 ASM_OUTPUT_ALIGN (file
, floor_log2 (align
/ BITS_PER_UNIT
));
878 #ifdef ASM_DECLARE_OBJECT_NAME
879 last_assemble_variable_decl
= decl
;
880 ASM_DECLARE_OBJECT_NAME (file
, name
, decl
);
882 /* Standard thing is just output label for the object. */
883 ASM_OUTPUT_LABEL (file
, name
);
884 #endif /* ASM_DECLARE_OBJECT_NAME */
885 ASM_OUTPUT_SKIP (file
, size
? size
: 1);
888 /* Decide whether we must probe the stack before any space allocation
889 on this target. It's essentially TARGET_STACK_PROBE except when
890 -fstack-check causes the stack to be already probed differently. */
893 ix86_target_stack_probe (void)
895 /* Do not probe the stack twice if static stack checking is enabled. */
896 if (flag_stack_check
== STATIC_BUILTIN_STACK_CHECK
)
899 return TARGET_STACK_PROBE
;
902 /* Decide whether we can make a sibling call to a function. DECL is the
903 declaration of the function being targeted by the call and EXP is the
904 CALL_EXPR representing the call. */
907 ix86_function_ok_for_sibcall (tree decl
, tree exp
)
909 tree type
, decl_or_type
;
911 bool bind_global
= decl
&& !targetm
.binds_local_p (decl
);
913 if (ix86_function_naked (current_function_decl
))
916 /* Sibling call isn't OK if there are no caller-saved registers
917 since all registers must be preserved before return. */
918 if (cfun
->machine
->no_caller_saved_registers
)
921 /* If we are generating position-independent code, we cannot sibcall
922 optimize direct calls to global functions, as the PLT requires
923 %ebx be live. (Darwin does not have a PLT.) */
931 /* If we need to align the outgoing stack, then sibcalling would
932 unalign the stack, which may break the called function. */
933 if (ix86_minimum_incoming_stack_boundary (true)
934 < PREFERRED_STACK_BOUNDARY
)
940 type
= TREE_TYPE (decl
);
944 /* We're looking at the CALL_EXPR, we need the type of the function. */
945 type
= CALL_EXPR_FN (exp
); /* pointer expression */
946 type
= TREE_TYPE (type
); /* pointer type */
947 type
= TREE_TYPE (type
); /* function type */
951 /* If outgoing reg parm stack space changes, we cannot do sibcall. */
952 if ((OUTGOING_REG_PARM_STACK_SPACE (type
)
953 != OUTGOING_REG_PARM_STACK_SPACE (TREE_TYPE (current_function_decl
)))
954 || (REG_PARM_STACK_SPACE (decl_or_type
)
955 != REG_PARM_STACK_SPACE (current_function_decl
)))
957 maybe_complain_about_tail_call (exp
,
958 "inconsistent size of stack space"
959 " allocated for arguments which are"
960 " passed in registers");
964 /* Check that the return value locations are the same. Like
965 if we are returning floats on the 80387 register stack, we cannot
966 make a sibcall from a function that doesn't return a float to a
967 function that does or, conversely, from a function that does return
968 a float to a function that doesn't; the necessary stack adjustment
969 would not be executed. This is also the place we notice
970 differences in the return value ABI. Note that it is ok for one
971 of the functions to have void return type as long as the return
972 value of the other is passed in a register. */
973 a
= ix86_function_value (TREE_TYPE (exp
), decl_or_type
, false);
974 b
= ix86_function_value (TREE_TYPE (DECL_RESULT (cfun
->decl
)),
976 if (STACK_REG_P (a
) || STACK_REG_P (b
))
978 if (!rtx_equal_p (a
, b
))
981 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun
->decl
))))
983 else if (!rtx_equal_p (a
, b
))
988 /* The SYSV ABI has more call-clobbered registers;
989 disallow sibcalls from MS to SYSV. */
990 if (cfun
->machine
->call_abi
== MS_ABI
991 && ix86_function_type_abi (type
) == SYSV_ABI
)
996 /* If this call is indirect, we'll need to be able to use a
997 call-clobbered register for the address of the target function.
998 Make sure that all such registers are not used for passing
999 parameters. Note that DLLIMPORT functions and call to global
1000 function via GOT slot are indirect. */
1002 || (bind_global
&& flag_pic
&& !flag_plt
)
1003 || (TARGET_DLLIMPORT_DECL_ATTRIBUTES
&& DECL_DLLIMPORT_P (decl
))
1004 || flag_force_indirect_call
)
1006 /* Check if regparm >= 3 since arg_reg_available is set to
1007 false if regparm == 0. If regparm is 1 or 2, there is
1008 always a call-clobbered register available.
1010 ??? The symbol indirect call doesn't need a call-clobbered
1011 register. But we don't know if this is a symbol indirect
1012 call or not here. */
1013 if (ix86_function_regparm (type
, decl
) >= 3
1014 && !cfun
->machine
->arg_reg_available
)
1019 if (decl
&& ix86_use_pseudo_pic_reg ())
1021 /* When PIC register is used, it must be restored after ifunc
1022 function returns. */
1023 cgraph_node
*node
= cgraph_node::get (decl
);
1024 if (node
&& node
->ifunc_resolver
)
1028 /* Disable sibcall if callee has indirect_return attribute and
1029 caller doesn't since callee will return to the caller's caller
1030 via an indirect jump. */
1031 if (((flag_cf_protection
& (CF_RETURN
| CF_BRANCH
))
1032 == (CF_RETURN
| CF_BRANCH
))
1033 && lookup_attribute ("indirect_return", TYPE_ATTRIBUTES (type
))
1034 && !lookup_attribute ("indirect_return",
1035 TYPE_ATTRIBUTES (TREE_TYPE (cfun
->decl
))))
1038 /* Otherwise okay. That also includes certain types of indirect calls. */
1042 /* This function determines from TYPE the calling-convention. */
1045 ix86_get_callcvt (const_tree type
)
1047 unsigned int ret
= 0;
1052 return IX86_CALLCVT_CDECL
;
1054 attrs
= TYPE_ATTRIBUTES (type
);
1055 if (attrs
!= NULL_TREE
)
1057 if (lookup_attribute ("cdecl", attrs
))
1058 ret
|= IX86_CALLCVT_CDECL
;
1059 else if (lookup_attribute ("stdcall", attrs
))
1060 ret
|= IX86_CALLCVT_STDCALL
;
1061 else if (lookup_attribute ("fastcall", attrs
))
1062 ret
|= IX86_CALLCVT_FASTCALL
;
1063 else if (lookup_attribute ("thiscall", attrs
))
1064 ret
|= IX86_CALLCVT_THISCALL
;
1066 /* Regparam isn't allowed for thiscall and fastcall. */
1067 if ((ret
& (IX86_CALLCVT_THISCALL
| IX86_CALLCVT_FASTCALL
)) == 0)
1069 if (lookup_attribute ("regparm", attrs
))
1070 ret
|= IX86_CALLCVT_REGPARM
;
1071 if (lookup_attribute ("sseregparm", attrs
))
1072 ret
|= IX86_CALLCVT_SSEREGPARM
;
1075 if (IX86_BASE_CALLCVT(ret
) != 0)
1079 is_stdarg
= stdarg_p (type
);
1080 if (TARGET_RTD
&& !is_stdarg
)
1081 return IX86_CALLCVT_STDCALL
| ret
;
1085 || TREE_CODE (type
) != METHOD_TYPE
1086 || ix86_function_type_abi (type
) != MS_ABI
)
1087 return IX86_CALLCVT_CDECL
| ret
;
1089 return IX86_CALLCVT_THISCALL
;
1092 /* Return 0 if the attributes for two types are incompatible, 1 if they
1093 are compatible, and 2 if they are nearly compatible (which causes a
1094 warning to be generated). */
1097 ix86_comp_type_attributes (const_tree type1
, const_tree type2
)
1099 unsigned int ccvt1
, ccvt2
;
1101 if (TREE_CODE (type1
) != FUNCTION_TYPE
1102 && TREE_CODE (type1
) != METHOD_TYPE
)
1105 ccvt1
= ix86_get_callcvt (type1
);
1106 ccvt2
= ix86_get_callcvt (type2
);
1109 if (ix86_function_regparm (type1
, NULL
)
1110 != ix86_function_regparm (type2
, NULL
))
1116 /* Return the regparm value for a function with the indicated TYPE and DECL.
1117 DECL may be NULL when calling function indirectly
1118 or considering a libcall. */
1121 ix86_function_regparm (const_tree type
, const_tree decl
)
1128 return (ix86_function_type_abi (type
) == SYSV_ABI
1129 ? X86_64_REGPARM_MAX
: X86_64_MS_REGPARM_MAX
);
1130 ccvt
= ix86_get_callcvt (type
);
1131 regparm
= ix86_regparm
;
1133 if ((ccvt
& IX86_CALLCVT_REGPARM
) != 0)
1135 attr
= lookup_attribute ("regparm", TYPE_ATTRIBUTES (type
));
1138 regparm
= TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr
)));
1142 else if ((ccvt
& IX86_CALLCVT_FASTCALL
) != 0)
1144 else if ((ccvt
& IX86_CALLCVT_THISCALL
) != 0)
1147 /* Use register calling convention for local functions when possible. */
1149 && TREE_CODE (decl
) == FUNCTION_DECL
)
1151 cgraph_node
*target
= cgraph_node::get (decl
);
1153 target
= target
->function_symbol ();
1155 /* Caller and callee must agree on the calling convention, so
1156 checking here just optimize means that with
1157 __attribute__((optimize (...))) caller could use regparm convention
1158 and callee not, or vice versa. Instead look at whether the callee
1159 is optimized or not. */
1160 if (target
&& opt_for_fn (target
->decl
, optimize
)
1161 && !(profile_flag
&& !flag_fentry
))
1163 if (target
->local
&& target
->can_change_signature
)
1165 int local_regparm
, globals
= 0, regno
;
1167 /* Make sure no regparm register is taken by a
1168 fixed register variable. */
1169 for (local_regparm
= 0; local_regparm
< REGPARM_MAX
;
1171 if (fixed_regs
[local_regparm
])
1174 /* We don't want to use regparm(3) for nested functions as
1175 these use a static chain pointer in the third argument. */
1176 if (local_regparm
== 3 && DECL_STATIC_CHAIN (target
->decl
))
1179 /* Save a register for the split stack. */
1180 if (flag_split_stack
)
1182 if (local_regparm
== 3)
1184 else if (local_regparm
== 2
1185 && DECL_STATIC_CHAIN (target
->decl
))
1189 /* Each fixed register usage increases register pressure,
1190 so less registers should be used for argument passing.
1191 This functionality can be overriden by an explicit
1193 for (regno
= AX_REG
; regno
<= DI_REG
; regno
++)
1194 if (fixed_regs
[regno
])
1198 = globals
< local_regparm
? local_regparm
- globals
: 0;
1200 if (local_regparm
> regparm
)
1201 regparm
= local_regparm
;
1209 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
1210 DFmode (2) arguments in SSE registers for a function with the
1211 indicated TYPE and DECL. DECL may be NULL when calling function
1212 indirectly or considering a libcall. Return -1 if any FP parameter
1213 should be rejected by error. This is used in siutation we imply SSE
1214 calling convetion but the function is called from another function with
1215 SSE disabled. Otherwise return 0. */
1218 ix86_function_sseregparm (const_tree type
, const_tree decl
, bool warn
)
1220 gcc_assert (!TARGET_64BIT
);
1222 /* Use SSE registers to pass SFmode and DFmode arguments if requested
1223 by the sseregparm attribute. */
1224 if (TARGET_SSEREGPARM
1225 || (type
&& lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type
))))
1232 error ("calling %qD with attribute sseregparm without "
1233 "SSE/SSE2 enabled", decl
);
1235 error ("calling %qT with attribute sseregparm without "
1236 "SSE/SSE2 enabled", type
);
1247 cgraph_node
*target
= cgraph_node::get (decl
);
1249 target
= target
->function_symbol ();
1251 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
1252 (and DFmode for SSE2) arguments in SSE registers. */
1254 /* TARGET_SSE_MATH */
1255 && (target_opts_for_fn (target
->decl
)->x_ix86_fpmath
& FPMATH_SSE
)
1256 && opt_for_fn (target
->decl
, optimize
)
1257 && !(profile_flag
&& !flag_fentry
))
1259 if (target
->local
&& target
->can_change_signature
)
1261 /* Refuse to produce wrong code when local function with SSE enabled
1262 is called from SSE disabled function.
1263 FIXME: We need a way to detect these cases cross-ltrans partition
1264 and avoid using SSE calling conventions on local functions called
1265 from function with SSE disabled. For now at least delay the
1266 warning until we know we are going to produce wrong code.
1268 if (!TARGET_SSE
&& warn
)
1270 return TARGET_SSE2_P (target_opts_for_fn (target
->decl
)
1271 ->x_ix86_isa_flags
) ? 2 : 1;
1278 /* Return true if EAX is live at the start of the function. Used by
1279 ix86_expand_prologue to determine if we need special help before
1280 calling allocate_stack_worker. */
1283 ix86_eax_live_at_start_p (void)
1285 /* Cheat. Don't bother working forward from ix86_function_regparm
1286 to the function type to whether an actual argument is located in
1287 eax. Instead just look at cfg info, which is still close enough
1288 to correct at this point. This gives false positives for broken
1289 functions that might use uninitialized data that happens to be
1290 allocated in eax, but who cares? */
1291 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR_FOR_FN (cfun
)), 0);
1295 ix86_keep_aggregate_return_pointer (tree fntype
)
1301 attr
= lookup_attribute ("callee_pop_aggregate_return",
1302 TYPE_ATTRIBUTES (fntype
));
1304 return (TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr
))) == 0);
1306 /* For 32-bit MS-ABI the default is to keep aggregate
1308 if (ix86_function_type_abi (fntype
) == MS_ABI
)
1311 return KEEP_AGGREGATE_RETURN_POINTER
!= 0;
1314 /* Value is the number of bytes of arguments automatically
1315 popped when returning from a subroutine call.
1316 FUNDECL is the declaration node of the function (as a tree),
1317 FUNTYPE is the data type of the function (as a tree),
1318 or for a library call it is an identifier node for the subroutine name.
1319 SIZE is the number of bytes of arguments passed on the stack.
1321 On the 80386, the RTD insn may be used to pop them if the number
1322 of args is fixed, but if the number is variable then the caller
1323 must pop them all. RTD can't be used for library calls now
1324 because the library is compiled with the Unix compiler.
1325 Use of RTD is a selectable option, since it is incompatible with
1326 standard Unix calling sequences. If the option is not selected,
1327 the caller must always pop the args.
1329 The attribute stdcall is equivalent to RTD on a per module basis. */
1332 ix86_return_pops_args (tree fundecl
, tree funtype
, poly_int64 size
)
1336 /* None of the 64-bit ABIs pop arguments. */
1340 ccvt
= ix86_get_callcvt (funtype
);
1342 if ((ccvt
& (IX86_CALLCVT_STDCALL
| IX86_CALLCVT_FASTCALL
1343 | IX86_CALLCVT_THISCALL
)) != 0
1344 && ! stdarg_p (funtype
))
1347 /* Lose any fake structure return argument if it is passed on the stack. */
1348 if (aggregate_value_p (TREE_TYPE (funtype
), fundecl
)
1349 && !ix86_keep_aggregate_return_pointer (funtype
))
1351 int nregs
= ix86_function_regparm (funtype
, fundecl
);
1353 return GET_MODE_SIZE (Pmode
);
1359 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
1362 ix86_legitimate_combined_insn (rtx_insn
*insn
)
1366 /* Check operand constraints in case hard registers were propagated
1367 into insn pattern. This check prevents combine pass from
1368 generating insn patterns with invalid hard register operands.
1369 These invalid insns can eventually confuse reload to error out
1370 with a spill failure. See also PRs 46829 and 46843. */
1372 gcc_assert (INSN_CODE (insn
) >= 0);
1374 extract_insn (insn
);
1375 preprocess_constraints (insn
);
1377 int n_operands
= recog_data
.n_operands
;
1378 int n_alternatives
= recog_data
.n_alternatives
;
1379 for (i
= 0; i
< n_operands
; i
++)
1381 rtx op
= recog_data
.operand
[i
];
1382 machine_mode mode
= GET_MODE (op
);
1383 const operand_alternative
*op_alt
;
1388 /* A unary operator may be accepted by the predicate, but it
1389 is irrelevant for matching constraints. */
1395 if (REG_P (SUBREG_REG (op
))
1396 && REGNO (SUBREG_REG (op
)) < FIRST_PSEUDO_REGISTER
)
1397 offset
= subreg_regno_offset (REGNO (SUBREG_REG (op
)),
1398 GET_MODE (SUBREG_REG (op
)),
1401 op
= SUBREG_REG (op
);
1404 if (!(REG_P (op
) && HARD_REGISTER_P (op
)))
1407 op_alt
= recog_op_alt
;
1409 /* Operand has no constraints, anything is OK. */
1410 win
= !n_alternatives
;
1412 alternative_mask preferred
= get_preferred_alternatives (insn
);
1413 for (j
= 0; j
< n_alternatives
; j
++, op_alt
+= n_operands
)
1415 if (!TEST_BIT (preferred
, j
))
1417 if (op_alt
[i
].anything_ok
1418 || (op_alt
[i
].matches
!= -1
1420 (recog_data
.operand
[i
],
1421 recog_data
.operand
[op_alt
[i
].matches
]))
1422 || reg_fits_class_p (op
, op_alt
[i
].cl
, offset
, mode
))
1436 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
1438 static unsigned HOST_WIDE_INT
1439 ix86_asan_shadow_offset (void)
1441 return SUBTARGET_SHADOW_OFFSET
;
1444 /* Argument support functions. */
1446 /* Return true when register may be used to pass function parameters. */
1448 ix86_function_arg_regno_p (int regno
)
1451 enum calling_abi call_abi
;
1452 const int *parm_regs
;
1454 if (TARGET_SSE
&& SSE_REGNO_P (regno
)
1455 && regno
< FIRST_SSE_REG
+ SSE_REGPARM_MAX
)
1459 return (regno
< REGPARM_MAX
1460 || (TARGET_MMX
&& MMX_REGNO_P (regno
)
1461 && regno
< FIRST_MMX_REG
+ MMX_REGPARM_MAX
));
1463 /* TODO: The function should depend on current function ABI but
1464 builtins.cc would need updating then. Therefore we use the
1466 call_abi
= ix86_cfun_abi ();
1468 /* RAX is used as hidden argument to va_arg functions. */
1469 if (call_abi
== SYSV_ABI
&& regno
== AX_REG
)
1472 if (call_abi
== MS_ABI
)
1473 parm_regs
= x86_64_ms_abi_int_parameter_registers
;
1475 parm_regs
= x86_64_int_parameter_registers
;
1477 for (i
= 0; i
< (call_abi
== MS_ABI
1478 ? X86_64_MS_REGPARM_MAX
: X86_64_REGPARM_MAX
); i
++)
1479 if (regno
== parm_regs
[i
])
1484 /* Return if we do not know how to pass ARG solely in registers. */
1487 ix86_must_pass_in_stack (const function_arg_info
&arg
)
1489 if (must_pass_in_stack_var_size_or_pad (arg
))
1492 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
1493 The layout_type routine is crafty and tries to trick us into passing
1494 currently unsupported vector types on the stack by using TImode. */
1495 return (!TARGET_64BIT
&& arg
.mode
== TImode
1496 && arg
.type
&& TREE_CODE (arg
.type
) != VECTOR_TYPE
);
1499 /* It returns the size, in bytes, of the area reserved for arguments passed
1500 in registers for the function represented by fndecl dependent to the used
1503 ix86_reg_parm_stack_space (const_tree fndecl
)
1505 enum calling_abi call_abi
= SYSV_ABI
;
1506 if (fndecl
!= NULL_TREE
&& TREE_CODE (fndecl
) == FUNCTION_DECL
)
1507 call_abi
= ix86_function_abi (fndecl
);
1509 call_abi
= ix86_function_type_abi (fndecl
);
1510 if (TARGET_64BIT
&& call_abi
== MS_ABI
)
1515 /* We add this as a workaround in order to use libc_has_function
1518 ix86_libc_has_function (enum function_class fn_class
)
1520 return targetm
.libc_has_function (fn_class
, NULL_TREE
);
1523 /* Returns value SYSV_ABI, MS_ABI dependent on fntype,
1524 specifying the call abi used. */
1526 ix86_function_type_abi (const_tree fntype
)
1528 enum calling_abi abi
= ix86_abi
;
1530 if (fntype
== NULL_TREE
|| TYPE_ATTRIBUTES (fntype
) == NULL_TREE
)
1534 && lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype
)))
1537 if (TARGET_X32
&& !warned
)
1539 error ("X32 does not support %<ms_abi%> attribute");
1545 else if (abi
== MS_ABI
1546 && lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype
)))
1553 ix86_function_abi (const_tree fndecl
)
1555 return fndecl
? ix86_function_type_abi (TREE_TYPE (fndecl
)) : ix86_abi
;
1558 /* Returns value SYSV_ABI, MS_ABI dependent on cfun,
1559 specifying the call abi used. */
1561 ix86_cfun_abi (void)
1563 return cfun
? cfun
->machine
->call_abi
: ix86_abi
;
1567 ix86_function_ms_hook_prologue (const_tree fn
)
1569 if (fn
&& lookup_attribute ("ms_hook_prologue", DECL_ATTRIBUTES (fn
)))
1571 if (decl_function_context (fn
) != NULL_TREE
)
1572 error_at (DECL_SOURCE_LOCATION (fn
),
1573 "%<ms_hook_prologue%> attribute is not compatible "
1574 "with nested function");
1582 ix86_function_naked (const_tree fn
)
1584 if (fn
&& lookup_attribute ("naked", DECL_ATTRIBUTES (fn
)))
1590 /* Write the extra assembler code needed to declare a function properly. */
1593 ix86_asm_output_function_label (FILE *out_file
, const char *fname
,
1596 bool is_ms_hook
= ix86_function_ms_hook_prologue (decl
);
1599 cfun
->machine
->function_label_emitted
= true;
1603 int i
, filler_count
= (TARGET_64BIT
? 32 : 16);
1604 unsigned int filler_cc
= 0xcccccccc;
1606 for (i
= 0; i
< filler_count
; i
+= 4)
1607 fprintf (out_file
, ASM_LONG
" %#x\n", filler_cc
);
1610 #ifdef SUBTARGET_ASM_UNWIND_INIT
1611 SUBTARGET_ASM_UNWIND_INIT (out_file
);
1614 ASM_OUTPUT_LABEL (out_file
, fname
);
1616 /* Output magic byte marker, if hot-patch attribute is set. */
1621 /* leaq [%rsp + 0], %rsp */
1622 fputs (ASM_BYTE
"0x48, 0x8d, 0xa4, 0x24, 0x00, 0x00, 0x00, 0x00\n",
1627 /* movl.s %edi, %edi
1629 movl.s %esp, %ebp */
1630 fputs (ASM_BYTE
"0x8b, 0xff, 0x55, 0x8b, 0xec\n", out_file
);
1635 /* Implementation of call abi switching target hook. Specific to FNDECL
1636 the specific call register sets are set. See also
1637 ix86_conditional_register_usage for more details. */
1639 ix86_call_abi_override (const_tree fndecl
)
1641 cfun
->machine
->call_abi
= ix86_function_abi (fndecl
);
1644 /* Return 1 if pseudo register should be created and used to hold
1645 GOT address for PIC code. */
1647 ix86_use_pseudo_pic_reg (void)
1650 && (ix86_cmodel
== CM_SMALL_PIC
1657 /* Initialize large model PIC register. */
1660 ix86_init_large_pic_reg (unsigned int tmp_regno
)
1662 rtx_code_label
*label
;
1665 gcc_assert (Pmode
== DImode
);
1666 label
= gen_label_rtx ();
1668 LABEL_PRESERVE_P (label
) = 1;
1669 tmp_reg
= gen_rtx_REG (Pmode
, tmp_regno
);
1670 gcc_assert (REGNO (pic_offset_table_rtx
) != tmp_regno
);
1671 emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx
,
1673 emit_insn (gen_set_got_offset_rex64 (tmp_reg
, label
));
1674 emit_insn (gen_add2_insn (pic_offset_table_rtx
, tmp_reg
));
1675 const char *name
= LABEL_NAME (label
);
1676 PUT_CODE (label
, NOTE
);
1677 NOTE_KIND (label
) = NOTE_INSN_DELETED_LABEL
;
1678 NOTE_DELETED_LABEL_NAME (label
) = name
;
1681 /* Create and initialize PIC register if required. */
1683 ix86_init_pic_reg (void)
1688 if (!ix86_use_pseudo_pic_reg ())
1695 if (ix86_cmodel
== CM_LARGE_PIC
)
1696 ix86_init_large_pic_reg (R11_REG
);
1698 emit_insn (gen_set_got_rex64 (pic_offset_table_rtx
));
1702 /* If there is future mcount call in the function it is more profitable
1703 to emit SET_GOT into ABI defined REAL_PIC_OFFSET_TABLE_REGNUM. */
1704 rtx reg
= crtl
->profile
1705 ? gen_rtx_REG (Pmode
, REAL_PIC_OFFSET_TABLE_REGNUM
)
1706 : pic_offset_table_rtx
;
1707 rtx_insn
*insn
= emit_insn (gen_set_got (reg
));
1708 RTX_FRAME_RELATED_P (insn
) = 1;
1710 emit_move_insn (pic_offset_table_rtx
, reg
);
1711 add_reg_note (insn
, REG_CFA_FLUSH_QUEUE
, NULL_RTX
);
1717 entry_edge
= single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun
));
1718 insert_insn_on_edge (seq
, entry_edge
);
1719 commit_one_edge_insertion (entry_edge
);
1722 /* Initialize a variable CUM of type CUMULATIVE_ARGS
1723 for a call to a function whose data type is FNTYPE.
1724 For a library call, FNTYPE is 0. */
1727 init_cumulative_args (CUMULATIVE_ARGS
*cum
, /* Argument info to initialize */
1728 tree fntype
, /* tree ptr for function decl */
1729 rtx libname
, /* SYMBOL_REF of library name or 0 */
1733 struct cgraph_node
*local_info_node
= NULL
;
1734 struct cgraph_node
*target
= NULL
;
1736 /* Set silent_p to false to raise an error for invalid calls when
1737 expanding function body. */
1738 cfun
->machine
->silent_p
= false;
1740 memset (cum
, 0, sizeof (*cum
));
1744 target
= cgraph_node::get (fndecl
);
1747 target
= target
->function_symbol ();
1748 local_info_node
= cgraph_node::local_info_node (target
->decl
);
1749 cum
->call_abi
= ix86_function_abi (target
->decl
);
1752 cum
->call_abi
= ix86_function_abi (fndecl
);
1755 cum
->call_abi
= ix86_function_type_abi (fntype
);
1757 cum
->caller
= caller
;
1759 /* Set up the number of registers to use for passing arguments. */
1760 cum
->nregs
= ix86_regparm
;
1763 cum
->nregs
= (cum
->call_abi
== SYSV_ABI
1764 ? X86_64_REGPARM_MAX
1765 : X86_64_MS_REGPARM_MAX
);
1769 cum
->sse_nregs
= SSE_REGPARM_MAX
;
1772 cum
->sse_nregs
= (cum
->call_abi
== SYSV_ABI
1773 ? X86_64_SSE_REGPARM_MAX
1774 : X86_64_MS_SSE_REGPARM_MAX
);
1778 cum
->mmx_nregs
= MMX_REGPARM_MAX
;
1779 cum
->warn_avx512f
= true;
1780 cum
->warn_avx
= true;
1781 cum
->warn_sse
= true;
1782 cum
->warn_mmx
= true;
1784 /* Because type might mismatch in between caller and callee, we need to
1785 use actual type of function for local calls.
1786 FIXME: cgraph_analyze can be told to actually record if function uses
1787 va_start so for local functions maybe_vaarg can be made aggressive
1789 FIXME: once typesytem is fixed, we won't need this code anymore. */
1790 if (local_info_node
&& local_info_node
->local
1791 && local_info_node
->can_change_signature
)
1792 fntype
= TREE_TYPE (target
->decl
);
1793 cum
->stdarg
= stdarg_p (fntype
);
1794 cum
->maybe_vaarg
= (fntype
1795 ? (!prototype_p (fntype
) || stdarg_p (fntype
))
1800 cum
->warn_empty
= !warn_abi
|| cum
->stdarg
;
1801 if (!cum
->warn_empty
&& fntype
)
1803 function_args_iterator iter
;
1805 bool seen_empty_type
= false;
1806 FOREACH_FUNCTION_ARGS (fntype
, argtype
, iter
)
1808 if (argtype
== error_mark_node
|| VOID_TYPE_P (argtype
))
1810 if (TYPE_EMPTY_P (argtype
))
1811 seen_empty_type
= true;
1812 else if (seen_empty_type
)
1814 cum
->warn_empty
= true;
1822 /* If there are variable arguments, then we won't pass anything
1823 in registers in 32-bit mode. */
1824 if (stdarg_p (fntype
))
1827 /* Since in 32-bit, variable arguments are always passed on
1828 stack, there is scratch register available for indirect
1830 cfun
->machine
->arg_reg_available
= true;
1833 cum
->warn_avx512f
= false;
1834 cum
->warn_avx
= false;
1835 cum
->warn_sse
= false;
1836 cum
->warn_mmx
= false;
1840 /* Use ecx and edx registers if function has fastcall attribute,
1841 else look for regparm information. */
1844 unsigned int ccvt
= ix86_get_callcvt (fntype
);
1845 if ((ccvt
& IX86_CALLCVT_THISCALL
) != 0)
1848 cum
->fastcall
= 1; /* Same first register as in fastcall. */
1850 else if ((ccvt
& IX86_CALLCVT_FASTCALL
) != 0)
1856 cum
->nregs
= ix86_function_regparm (fntype
, fndecl
);
1859 /* Set up the number of SSE registers used for passing SFmode
1860 and DFmode arguments. Warn for mismatching ABI. */
1861 cum
->float_in_sse
= ix86_function_sseregparm (fntype
, fndecl
, true);
1864 cfun
->machine
->arg_reg_available
= (cum
->nregs
> 0);
1867 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
1868 But in the case of vector types, it is some vector mode.
1870 When we have only some of our vector isa extensions enabled, then there
1871 are some modes for which vector_mode_supported_p is false. For these
1872 modes, the generic vector support in gcc will choose some non-vector mode
1873 in order to implement the type. By computing the natural mode, we'll
1874 select the proper ABI location for the operand and not depend on whatever
1875 the middle-end decides to do with these vector types.
1877 The midde-end can't deal with the vector types > 16 bytes. In this
1878 case, we return the original mode and warn ABI change if CUM isn't
1881 If INT_RETURN is true, warn ABI change if the vector mode isn't
1882 available for function return value. */
1885 type_natural_mode (const_tree type
, const CUMULATIVE_ARGS
*cum
,
1888 machine_mode mode
= TYPE_MODE (type
);
1890 if (VECTOR_TYPE_P (type
) && !VECTOR_MODE_P (mode
))
1892 HOST_WIDE_INT size
= int_size_in_bytes (type
);
1893 if ((size
== 8 || size
== 16 || size
== 32 || size
== 64)
1894 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
1895 && TYPE_VECTOR_SUBPARTS (type
) > 1)
1897 machine_mode innermode
= TYPE_MODE (TREE_TYPE (type
));
1899 /* There are no XFmode vector modes ... */
1900 if (innermode
== XFmode
)
1903 /* ... and no decimal float vector modes. */
1904 if (DECIMAL_FLOAT_MODE_P (innermode
))
1907 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (type
)))
1908 mode
= MIN_MODE_VECTOR_FLOAT
;
1910 mode
= MIN_MODE_VECTOR_INT
;
1912 /* Get the mode which has this inner mode and number of units. */
1913 FOR_EACH_MODE_FROM (mode
, mode
)
1914 if (GET_MODE_NUNITS (mode
) == TYPE_VECTOR_SUBPARTS (type
)
1915 && GET_MODE_INNER (mode
) == innermode
)
1917 if (size
== 64 && !TARGET_AVX512F
&& !TARGET_IAMCU
)
1919 static bool warnedavx512f
;
1920 static bool warnedavx512f_ret
;
1922 if (cum
&& cum
->warn_avx512f
&& !warnedavx512f
)
1924 if (warning (OPT_Wpsabi
, "AVX512F vector argument "
1925 "without AVX512F enabled changes the ABI"))
1926 warnedavx512f
= true;
1928 else if (in_return
&& !warnedavx512f_ret
)
1930 if (warning (OPT_Wpsabi
, "AVX512F vector return "
1931 "without AVX512F enabled changes the ABI"))
1932 warnedavx512f_ret
= true;
1935 return TYPE_MODE (type
);
1937 else if (size
== 32 && !TARGET_AVX
&& !TARGET_IAMCU
)
1939 static bool warnedavx
;
1940 static bool warnedavx_ret
;
1942 if (cum
&& cum
->warn_avx
&& !warnedavx
)
1944 if (warning (OPT_Wpsabi
, "AVX vector argument "
1945 "without AVX enabled changes the ABI"))
1948 else if (in_return
&& !warnedavx_ret
)
1950 if (warning (OPT_Wpsabi
, "AVX vector return "
1951 "without AVX enabled changes the ABI"))
1952 warnedavx_ret
= true;
1955 return TYPE_MODE (type
);
1957 else if (((size
== 8 && TARGET_64BIT
) || size
== 16)
1961 static bool warnedsse
;
1962 static bool warnedsse_ret
;
1964 if (cum
&& cum
->warn_sse
&& !warnedsse
)
1966 if (warning (OPT_Wpsabi
, "SSE vector argument "
1967 "without SSE enabled changes the ABI"))
1970 else if (!TARGET_64BIT
&& in_return
&& !warnedsse_ret
)
1972 if (warning (OPT_Wpsabi
, "SSE vector return "
1973 "without SSE enabled changes the ABI"))
1974 warnedsse_ret
= true;
1977 else if ((size
== 8 && !TARGET_64BIT
)
1979 || cfun
->machine
->func_type
== TYPE_NORMAL
)
1983 static bool warnedmmx
;
1984 static bool warnedmmx_ret
;
1986 if (cum
&& cum
->warn_mmx
&& !warnedmmx
)
1988 if (warning (OPT_Wpsabi
, "MMX vector argument "
1989 "without MMX enabled changes the ABI"))
1992 else if (in_return
&& !warnedmmx_ret
)
1994 if (warning (OPT_Wpsabi
, "MMX vector return "
1995 "without MMX enabled changes the ABI"))
1996 warnedmmx_ret
= true;
2009 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
2010 this may not agree with the mode that the type system has chosen for the
2011 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
2012 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
2015 gen_reg_or_parallel (machine_mode mode
, machine_mode orig_mode
,
2020 if (orig_mode
!= BLKmode
)
2021 tmp
= gen_rtx_REG (orig_mode
, regno
);
2024 tmp
= gen_rtx_REG (mode
, regno
);
2025 tmp
= gen_rtx_EXPR_LIST (VOIDmode
, tmp
, const0_rtx
);
2026 tmp
= gen_rtx_PARALLEL (orig_mode
, gen_rtvec (1, tmp
));
2032 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
2033 of this code is to classify each 8bytes of incoming argument by the register
2034 class and assign registers accordingly. */
2036 /* Return the union class of CLASS1 and CLASS2.
2037 See the x86-64 PS ABI for details. */
2039 static enum x86_64_reg_class
2040 merge_classes (enum x86_64_reg_class class1
, enum x86_64_reg_class class2
)
2042 /* Rule #1: If both classes are equal, this is the resulting class. */
2043 if (class1
== class2
)
2046 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
2048 if (class1
== X86_64_NO_CLASS
)
2050 if (class2
== X86_64_NO_CLASS
)
2053 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
2054 if (class1
== X86_64_MEMORY_CLASS
|| class2
== X86_64_MEMORY_CLASS
)
2055 return X86_64_MEMORY_CLASS
;
2057 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
2058 if ((class1
== X86_64_INTEGERSI_CLASS
2059 && (class2
== X86_64_SSESF_CLASS
|| class2
== X86_64_SSEHF_CLASS
))
2060 || (class2
== X86_64_INTEGERSI_CLASS
2061 && (class1
== X86_64_SSESF_CLASS
|| class1
== X86_64_SSEHF_CLASS
)))
2062 return X86_64_INTEGERSI_CLASS
;
2063 if (class1
== X86_64_INTEGER_CLASS
|| class1
== X86_64_INTEGERSI_CLASS
2064 || class2
== X86_64_INTEGER_CLASS
|| class2
== X86_64_INTEGERSI_CLASS
)
2065 return X86_64_INTEGER_CLASS
;
2067 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
2069 if (class1
== X86_64_X87_CLASS
2070 || class1
== X86_64_X87UP_CLASS
2071 || class1
== X86_64_COMPLEX_X87_CLASS
2072 || class2
== X86_64_X87_CLASS
2073 || class2
== X86_64_X87UP_CLASS
2074 || class2
== X86_64_COMPLEX_X87_CLASS
)
2075 return X86_64_MEMORY_CLASS
;
2077 /* Rule #6: Otherwise class SSE is used. */
2078 return X86_64_SSE_CLASS
;
2081 /* Classify the argument of type TYPE and mode MODE.
2082 CLASSES will be filled by the register class used to pass each word
2083 of the operand. The number of words is returned. In case the parameter
2084 should be passed in memory, 0 is returned. As a special case for zero
2085 sized containers, classes[0] will be NO_CLASS and 1 is returned.
2087 BIT_OFFSET is used internally for handling records and specifies offset
2088 of the offset in bits modulo 512 to avoid overflow cases.
2090 See the x86-64 PS ABI for details.
2094 classify_argument (machine_mode mode
, const_tree type
,
2095 enum x86_64_reg_class classes
[MAX_CLASSES
], int bit_offset
,
2096 int &zero_width_bitfields
)
2099 = mode
== BLKmode
? int_size_in_bytes (type
) : (int) GET_MODE_SIZE (mode
);
2100 int words
= CEIL (bytes
+ (bit_offset
% 64) / 8, UNITS_PER_WORD
);
2102 /* Variable sized entities are always passed/returned in memory. */
2106 if (mode
!= VOIDmode
)
2108 /* The value of "named" doesn't matter. */
2109 function_arg_info
arg (const_cast<tree
> (type
), mode
, /*named=*/true);
2110 if (targetm
.calls
.must_pass_in_stack (arg
))
2114 if (type
&& AGGREGATE_TYPE_P (type
))
2118 enum x86_64_reg_class subclasses
[MAX_CLASSES
];
2120 /* On x86-64 we pass structures larger than 64 bytes on the stack. */
2124 for (i
= 0; i
< words
; i
++)
2125 classes
[i
] = X86_64_NO_CLASS
;
2127 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
2128 signalize memory class, so handle it as special case. */
2131 classes
[0] = X86_64_NO_CLASS
;
2135 /* Classify each field of record and merge classes. */
2136 switch (TREE_CODE (type
))
2139 /* And now merge the fields of structure. */
2140 for (field
= TYPE_FIELDS (type
); field
; field
= DECL_CHAIN (field
))
2142 if (TREE_CODE (field
) == FIELD_DECL
)
2146 if (TREE_TYPE (field
) == error_mark_node
)
2149 /* Bitfields are always classified as integer. Handle them
2150 early, since later code would consider them to be
2151 misaligned integers. */
2152 if (DECL_BIT_FIELD (field
))
2154 if (integer_zerop (DECL_SIZE (field
)))
2156 if (DECL_FIELD_CXX_ZERO_WIDTH_BIT_FIELD (field
))
2158 if (zero_width_bitfields
!= 2)
2160 zero_width_bitfields
= 1;
2164 for (i
= (int_bit_position (field
)
2165 + (bit_offset
% 64)) / 8 / 8;
2166 i
< ((int_bit_position (field
) + (bit_offset
% 64))
2167 + tree_to_shwi (DECL_SIZE (field
))
2170 = merge_classes (X86_64_INTEGER_CLASS
, classes
[i
]);
2176 type
= TREE_TYPE (field
);
2178 /* Flexible array member is ignored. */
2179 if (TYPE_MODE (type
) == BLKmode
2180 && TREE_CODE (type
) == ARRAY_TYPE
2181 && TYPE_SIZE (type
) == NULL_TREE
2182 && TYPE_DOMAIN (type
) != NULL_TREE
2183 && (TYPE_MAX_VALUE (TYPE_DOMAIN (type
))
2188 if (!warned
&& warn_psabi
)
2191 inform (input_location
,
2192 "the ABI of passing struct with"
2193 " a flexible array member has"
2194 " changed in GCC 4.4");
2198 num
= classify_argument (TYPE_MODE (type
), type
,
2200 (int_bit_position (field
)
2201 + bit_offset
) % 512,
2202 zero_width_bitfields
);
2205 pos
= (int_bit_position (field
)
2206 + (bit_offset
% 64)) / 8 / 8;
2207 for (i
= 0; i
< num
&& (i
+ pos
) < words
; i
++)
2209 = merge_classes (subclasses
[i
], classes
[i
+ pos
]);
2216 /* Arrays are handled as small records. */
2219 num
= classify_argument (TYPE_MODE (TREE_TYPE (type
)),
2220 TREE_TYPE (type
), subclasses
, bit_offset
,
2221 zero_width_bitfields
);
2225 /* The partial classes are now full classes. */
2226 if (subclasses
[0] == X86_64_SSESF_CLASS
&& bytes
!= 4)
2227 subclasses
[0] = X86_64_SSE_CLASS
;
2228 if (subclasses
[0] == X86_64_SSEHF_CLASS
&& bytes
!= 2)
2229 subclasses
[0] = X86_64_SSE_CLASS
;
2230 if (subclasses
[0] == X86_64_INTEGERSI_CLASS
2231 && !((bit_offset
% 64) == 0 && bytes
== 4))
2232 subclasses
[0] = X86_64_INTEGER_CLASS
;
2234 for (i
= 0; i
< words
; i
++)
2235 classes
[i
] = subclasses
[i
% num
];
2240 case QUAL_UNION_TYPE
:
2241 /* Unions are similar to RECORD_TYPE but offset is always 0.
2243 for (field
= TYPE_FIELDS (type
); field
; field
= DECL_CHAIN (field
))
2245 if (TREE_CODE (field
) == FIELD_DECL
)
2249 if (TREE_TYPE (field
) == error_mark_node
)
2252 num
= classify_argument (TYPE_MODE (TREE_TYPE (field
)),
2253 TREE_TYPE (field
), subclasses
,
2254 bit_offset
, zero_width_bitfields
);
2257 for (i
= 0; i
< num
&& i
< words
; i
++)
2258 classes
[i
] = merge_classes (subclasses
[i
], classes
[i
]);
2269 /* When size > 16 bytes, if the first one isn't
2270 X86_64_SSE_CLASS or any other ones aren't
2271 X86_64_SSEUP_CLASS, everything should be passed in
2273 if (classes
[0] != X86_64_SSE_CLASS
)
2276 for (i
= 1; i
< words
; i
++)
2277 if (classes
[i
] != X86_64_SSEUP_CLASS
)
2281 /* Final merger cleanup. */
2282 for (i
= 0; i
< words
; i
++)
2284 /* If one class is MEMORY, everything should be passed in
2286 if (classes
[i
] == X86_64_MEMORY_CLASS
)
2289 /* The X86_64_SSEUP_CLASS should be always preceded by
2290 X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */
2291 if (classes
[i
] == X86_64_SSEUP_CLASS
2292 && classes
[i
- 1] != X86_64_SSE_CLASS
2293 && classes
[i
- 1] != X86_64_SSEUP_CLASS
)
2295 /* The first one should never be X86_64_SSEUP_CLASS. */
2296 gcc_assert (i
!= 0);
2297 classes
[i
] = X86_64_SSE_CLASS
;
2300 /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS,
2301 everything should be passed in memory. */
2302 if (classes
[i
] == X86_64_X87UP_CLASS
2303 && (classes
[i
- 1] != X86_64_X87_CLASS
))
2307 /* The first one should never be X86_64_X87UP_CLASS. */
2308 gcc_assert (i
!= 0);
2309 if (!warned
&& warn_psabi
)
2312 inform (input_location
,
2313 "the ABI of passing union with %<long double%>"
2314 " has changed in GCC 4.4");
2322 /* Compute alignment needed. We align all types to natural boundaries with
2323 exception of XFmode that is aligned to 64bits. */
2324 if (mode
!= VOIDmode
&& mode
!= BLKmode
)
2326 int mode_alignment
= GET_MODE_BITSIZE (mode
);
2329 mode_alignment
= 128;
2330 else if (mode
== XCmode
)
2331 mode_alignment
= 256;
2332 if (COMPLEX_MODE_P (mode
))
2333 mode_alignment
/= 2;
2334 /* Misaligned fields are always returned in memory. */
2335 if (bit_offset
% mode_alignment
)
2339 /* for V1xx modes, just use the base mode */
2340 if (VECTOR_MODE_P (mode
) && mode
!= V1DImode
&& mode
!= V1TImode
2341 && GET_MODE_UNIT_SIZE (mode
) == bytes
)
2342 mode
= GET_MODE_INNER (mode
);
2344 /* Classification of atomic types. */
2349 classes
[0] = X86_64_SSE_CLASS
;
2352 classes
[0] = X86_64_SSE_CLASS
;
2353 classes
[1] = X86_64_SSEUP_CLASS
;
2363 int size
= bit_offset
+ (int) GET_MODE_BITSIZE (mode
);
2365 /* Analyze last 128 bits only. */
2366 size
= (size
- 1) & 0x7f;
2370 classes
[0] = X86_64_INTEGERSI_CLASS
;
2375 classes
[0] = X86_64_INTEGER_CLASS
;
2378 else if (size
< 64+32)
2380 classes
[0] = X86_64_INTEGER_CLASS
;
2381 classes
[1] = X86_64_INTEGERSI_CLASS
;
2384 else if (size
< 64+64)
2386 classes
[0] = classes
[1] = X86_64_INTEGER_CLASS
;
2394 classes
[0] = classes
[1] = X86_64_INTEGER_CLASS
;
2398 /* OImode shouldn't be used directly. */
2404 if (!(bit_offset
% 64))
2405 classes
[0] = X86_64_SSEHF_CLASS
;
2407 classes
[0] = X86_64_SSE_CLASS
;
2410 if (!(bit_offset
% 64))
2411 classes
[0] = X86_64_SSESF_CLASS
;
2413 classes
[0] = X86_64_SSE_CLASS
;
2416 classes
[0] = X86_64_SSEDF_CLASS
;
2419 classes
[0] = X86_64_X87_CLASS
;
2420 classes
[1] = X86_64_X87UP_CLASS
;
2423 classes
[0] = X86_64_SSE_CLASS
;
2424 classes
[1] = X86_64_SSEUP_CLASS
;
2428 classes
[0] = X86_64_SSE_CLASS
;
2429 if (!(bit_offset
% 64))
2433 classes
[1] = X86_64_SSEHF_CLASS
;
2437 classes
[0] = X86_64_SSE_CLASS
;
2438 if (!(bit_offset
% 64))
2444 if (!warned
&& warn_psabi
)
2447 inform (input_location
,
2448 "the ABI of passing structure with %<complex float%>"
2449 " member has changed in GCC 4.4");
2451 classes
[1] = X86_64_SSESF_CLASS
;
2455 classes
[0] = X86_64_SSEDF_CLASS
;
2456 classes
[1] = X86_64_SSEDF_CLASS
;
2459 classes
[0] = X86_64_COMPLEX_X87_CLASS
;
2462 /* This modes is larger than 16 bytes. */
2472 classes
[0] = X86_64_SSE_CLASS
;
2473 classes
[1] = X86_64_SSEUP_CLASS
;
2474 classes
[2] = X86_64_SSEUP_CLASS
;
2475 classes
[3] = X86_64_SSEUP_CLASS
;
2485 classes
[0] = X86_64_SSE_CLASS
;
2486 classes
[1] = X86_64_SSEUP_CLASS
;
2487 classes
[2] = X86_64_SSEUP_CLASS
;
2488 classes
[3] = X86_64_SSEUP_CLASS
;
2489 classes
[4] = X86_64_SSEUP_CLASS
;
2490 classes
[5] = X86_64_SSEUP_CLASS
;
2491 classes
[6] = X86_64_SSEUP_CLASS
;
2492 classes
[7] = X86_64_SSEUP_CLASS
;
2502 classes
[0] = X86_64_SSE_CLASS
;
2503 classes
[1] = X86_64_SSEUP_CLASS
;
2515 classes
[0] = X86_64_SSE_CLASS
;
2521 gcc_assert (VECTOR_MODE_P (mode
));
2526 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode
)) == MODE_INT
);
2528 if (bit_offset
+ GET_MODE_BITSIZE (mode
) <= 32)
2529 classes
[0] = X86_64_INTEGERSI_CLASS
;
2531 classes
[0] = X86_64_INTEGER_CLASS
;
2532 classes
[1] = X86_64_INTEGER_CLASS
;
2533 return 1 + (bytes
> 8);
2537 /* Wrapper around classify_argument with the extra zero_width_bitfields
2538 argument, to diagnose GCC 12.1 ABI differences for C. */
2541 classify_argument (machine_mode mode
, const_tree type
,
2542 enum x86_64_reg_class classes
[MAX_CLASSES
], int bit_offset
)
2544 int zero_width_bitfields
= 0;
2545 static bool warned
= false;
2546 int n
= classify_argument (mode
, type
, classes
, bit_offset
,
2547 zero_width_bitfields
);
2548 if (!zero_width_bitfields
|| warned
|| !warn_psabi
)
2550 enum x86_64_reg_class alt_classes
[MAX_CLASSES
];
2551 zero_width_bitfields
= 2;
2552 if (classify_argument (mode
, type
, alt_classes
, bit_offset
,
2553 zero_width_bitfields
) != n
)
2554 zero_width_bitfields
= 3;
2556 for (int i
= 0; i
< n
; i
++)
2557 if (classes
[i
] != alt_classes
[i
])
2559 zero_width_bitfields
= 3;
2562 if (zero_width_bitfields
== 3)
2566 = CHANGES_ROOT_URL
"gcc-12/changes.html#zero_width_bitfields";
2568 inform (input_location
,
2569 "the ABI of passing C structures with zero-width bit-fields"
2570 " has changed in GCC %{12.1%}", url
);
2575 /* Examine the argument and return set number of register required in each
2576 class. Return true iff parameter should be passed in memory. */
2579 examine_argument (machine_mode mode
, const_tree type
, int in_return
,
2580 int *int_nregs
, int *sse_nregs
)
2582 enum x86_64_reg_class regclass
[MAX_CLASSES
];
2583 int n
= classify_argument (mode
, type
, regclass
, 0);
2590 for (n
--; n
>= 0; n
--)
2591 switch (regclass
[n
])
2593 case X86_64_INTEGER_CLASS
:
2594 case X86_64_INTEGERSI_CLASS
:
2597 case X86_64_SSE_CLASS
:
2598 case X86_64_SSEHF_CLASS
:
2599 case X86_64_SSESF_CLASS
:
2600 case X86_64_SSEDF_CLASS
:
2603 case X86_64_NO_CLASS
:
2604 case X86_64_SSEUP_CLASS
:
2606 case X86_64_X87_CLASS
:
2607 case X86_64_X87UP_CLASS
:
2608 case X86_64_COMPLEX_X87_CLASS
:
2612 case X86_64_MEMORY_CLASS
:
2619 /* Construct container for the argument used by GCC interface. See
2620 FUNCTION_ARG for the detailed description. */
2623 construct_container (machine_mode mode
, machine_mode orig_mode
,
2624 const_tree type
, int in_return
, int nintregs
, int nsseregs
,
2625 const int *intreg
, int sse_regno
)
2627 /* The following variables hold the static issued_error state. */
2628 static bool issued_sse_arg_error
;
2629 static bool issued_sse_ret_error
;
2630 static bool issued_x87_ret_error
;
2632 machine_mode tmpmode
;
2634 = mode
== BLKmode
? int_size_in_bytes (type
) : (int) GET_MODE_SIZE (mode
);
2635 enum x86_64_reg_class regclass
[MAX_CLASSES
];
2639 int needed_sseregs
, needed_intregs
;
2640 rtx exp
[MAX_CLASSES
];
2643 n
= classify_argument (mode
, type
, regclass
, 0);
2646 if (examine_argument (mode
, type
, in_return
, &needed_intregs
,
2649 if (needed_intregs
> nintregs
|| needed_sseregs
> nsseregs
)
2652 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
2653 some less clueful developer tries to use floating-point anyway. */
2654 if (needed_sseregs
&& !TARGET_SSE
)
2656 /* Return early if we shouldn't raise an error for invalid
2658 if (cfun
!= NULL
&& cfun
->machine
->silent_p
)
2662 if (!issued_sse_ret_error
)
2664 error ("SSE register return with SSE disabled");
2665 issued_sse_ret_error
= true;
2668 else if (!issued_sse_arg_error
)
2670 error ("SSE register argument with SSE disabled");
2671 issued_sse_arg_error
= true;
2676 /* Likewise, error if the ABI requires us to return values in the
2677 x87 registers and the user specified -mno-80387. */
2678 if (!TARGET_FLOAT_RETURNS_IN_80387
&& in_return
)
2679 for (i
= 0; i
< n
; i
++)
2680 if (regclass
[i
] == X86_64_X87_CLASS
2681 || regclass
[i
] == X86_64_X87UP_CLASS
2682 || regclass
[i
] == X86_64_COMPLEX_X87_CLASS
)
2684 /* Return early if we shouldn't raise an error for invalid
2686 if (cfun
!= NULL
&& cfun
->machine
->silent_p
)
2688 if (!issued_x87_ret_error
)
2690 error ("x87 register return with x87 disabled");
2691 issued_x87_ret_error
= true;
2696 /* First construct simple cases. Avoid SCmode, since we want to use
2697 single register to pass this type. */
2698 if (n
== 1 && mode
!= SCmode
&& mode
!= HCmode
)
2699 switch (regclass
[0])
2701 case X86_64_INTEGER_CLASS
:
2702 case X86_64_INTEGERSI_CLASS
:
2703 return gen_rtx_REG (mode
, intreg
[0]);
2704 case X86_64_SSE_CLASS
:
2705 case X86_64_SSEHF_CLASS
:
2706 case X86_64_SSESF_CLASS
:
2707 case X86_64_SSEDF_CLASS
:
2708 if (mode
!= BLKmode
)
2709 return gen_reg_or_parallel (mode
, orig_mode
,
2710 GET_SSE_REGNO (sse_regno
));
2712 case X86_64_X87_CLASS
:
2713 case X86_64_COMPLEX_X87_CLASS
:
2714 return gen_rtx_REG (mode
, FIRST_STACK_REG
);
2715 case X86_64_NO_CLASS
:
2716 /* Zero sized array, struct or class. */
2722 && regclass
[0] == X86_64_SSE_CLASS
2723 && regclass
[1] == X86_64_SSEUP_CLASS
2725 return gen_reg_or_parallel (mode
, orig_mode
,
2726 GET_SSE_REGNO (sse_regno
));
2728 && regclass
[0] == X86_64_SSE_CLASS
2729 && regclass
[1] == X86_64_SSEUP_CLASS
2730 && regclass
[2] == X86_64_SSEUP_CLASS
2731 && regclass
[3] == X86_64_SSEUP_CLASS
2733 return gen_reg_or_parallel (mode
, orig_mode
,
2734 GET_SSE_REGNO (sse_regno
));
2736 && regclass
[0] == X86_64_SSE_CLASS
2737 && regclass
[1] == X86_64_SSEUP_CLASS
2738 && regclass
[2] == X86_64_SSEUP_CLASS
2739 && regclass
[3] == X86_64_SSEUP_CLASS
2740 && regclass
[4] == X86_64_SSEUP_CLASS
2741 && regclass
[5] == X86_64_SSEUP_CLASS
2742 && regclass
[6] == X86_64_SSEUP_CLASS
2743 && regclass
[7] == X86_64_SSEUP_CLASS
2745 return gen_reg_or_parallel (mode
, orig_mode
,
2746 GET_SSE_REGNO (sse_regno
));
2748 && regclass
[0] == X86_64_X87_CLASS
2749 && regclass
[1] == X86_64_X87UP_CLASS
)
2750 return gen_rtx_REG (XFmode
, FIRST_STACK_REG
);
2753 && regclass
[0] == X86_64_INTEGER_CLASS
2754 && regclass
[1] == X86_64_INTEGER_CLASS
2755 && (mode
== CDImode
|| mode
== TImode
|| mode
== BLKmode
)
2756 && intreg
[0] + 1 == intreg
[1])
2758 if (mode
== BLKmode
)
2760 /* Use TImode for BLKmode values in 2 integer registers. */
2761 exp
[0] = gen_rtx_EXPR_LIST (VOIDmode
,
2762 gen_rtx_REG (TImode
, intreg
[0]),
2764 ret
= gen_rtx_PARALLEL (mode
, rtvec_alloc (1));
2765 XVECEXP (ret
, 0, 0) = exp
[0];
2769 return gen_rtx_REG (mode
, intreg
[0]);
2772 /* Otherwise figure out the entries of the PARALLEL. */
2773 for (i
= 0; i
< n
; i
++)
2777 switch (regclass
[i
])
2779 case X86_64_NO_CLASS
:
2781 case X86_64_INTEGER_CLASS
:
2782 case X86_64_INTEGERSI_CLASS
:
2783 /* Merge TImodes on aligned occasions here too. */
2784 if (i
* 8 + 8 > bytes
)
2786 unsigned int tmpbits
= (bytes
- i
* 8) * BITS_PER_UNIT
;
2787 if (!int_mode_for_size (tmpbits
, 0).exists (&tmpmode
))
2788 /* We've requested 24 bytes we
2789 don't have mode for. Use DImode. */
2792 else if (regclass
[i
] == X86_64_INTEGERSI_CLASS
)
2797 = gen_rtx_EXPR_LIST (VOIDmode
,
2798 gen_rtx_REG (tmpmode
, *intreg
),
2802 case X86_64_SSEHF_CLASS
:
2803 tmpmode
= (mode
== BFmode
? BFmode
: HFmode
);
2805 = gen_rtx_EXPR_LIST (VOIDmode
,
2806 gen_rtx_REG (tmpmode
,
2807 GET_SSE_REGNO (sse_regno
)),
2811 case X86_64_SSESF_CLASS
:
2813 = gen_rtx_EXPR_LIST (VOIDmode
,
2814 gen_rtx_REG (SFmode
,
2815 GET_SSE_REGNO (sse_regno
)),
2819 case X86_64_SSEDF_CLASS
:
2821 = gen_rtx_EXPR_LIST (VOIDmode
,
2822 gen_rtx_REG (DFmode
,
2823 GET_SSE_REGNO (sse_regno
)),
2827 case X86_64_SSE_CLASS
:
2835 if (i
== 0 && regclass
[1] == X86_64_SSEUP_CLASS
)
2845 && regclass
[1] == X86_64_SSEUP_CLASS
2846 && regclass
[2] == X86_64_SSEUP_CLASS
2847 && regclass
[3] == X86_64_SSEUP_CLASS
);
2853 && regclass
[1] == X86_64_SSEUP_CLASS
2854 && regclass
[2] == X86_64_SSEUP_CLASS
2855 && regclass
[3] == X86_64_SSEUP_CLASS
2856 && regclass
[4] == X86_64_SSEUP_CLASS
2857 && regclass
[5] == X86_64_SSEUP_CLASS
2858 && regclass
[6] == X86_64_SSEUP_CLASS
2859 && regclass
[7] == X86_64_SSEUP_CLASS
);
2867 = gen_rtx_EXPR_LIST (VOIDmode
,
2868 gen_rtx_REG (tmpmode
,
2869 GET_SSE_REGNO (sse_regno
)),
2878 /* Empty aligned struct, union or class. */
2882 ret
= gen_rtx_PARALLEL (mode
, rtvec_alloc (nexps
));
2883 for (i
= 0; i
< nexps
; i
++)
2884 XVECEXP (ret
, 0, i
) = exp
[i
];
2888 /* Update the data in CUM to advance over an argument of mode MODE
2889 and data type TYPE. (TYPE is null for libcalls where that information
2890 may not be available.)
2892 Return a number of integer regsiters advanced over. */
2895 function_arg_advance_32 (CUMULATIVE_ARGS
*cum
, machine_mode mode
,
2896 const_tree type
, HOST_WIDE_INT bytes
,
2897 HOST_WIDE_INT words
)
2900 bool error_p
= false;
2904 /* Intel MCU psABI passes scalars and aggregates no larger than 8
2905 bytes in registers. */
2906 if (!VECTOR_MODE_P (mode
) && bytes
<= 8)
2926 cum
->words
+= words
;
2927 cum
->nregs
-= words
;
2928 cum
->regno
+= words
;
2929 if (cum
->nregs
>= 0)
2931 if (cum
->nregs
<= 0)
2934 cfun
->machine
->arg_reg_available
= false;
2940 /* OImode shouldn't be used directly. */
2944 if (cum
->float_in_sse
== -1)
2946 if (cum
->float_in_sse
< 2)
2950 if (cum
->float_in_sse
== -1)
2952 if (cum
->float_in_sse
< 1)
2981 if (!type
|| !AGGREGATE_TYPE_P (type
))
2983 cum
->sse_words
+= words
;
2984 cum
->sse_nregs
-= 1;
2985 cum
->sse_regno
+= 1;
2986 if (cum
->sse_nregs
<= 0)
3002 if (!type
|| !AGGREGATE_TYPE_P (type
))
3004 cum
->mmx_words
+= words
;
3005 cum
->mmx_nregs
-= 1;
3006 cum
->mmx_regno
+= 1;
3007 if (cum
->mmx_nregs
<= 0)
3017 cum
->float_in_sse
= 0;
3018 error ("calling %qD with SSE calling convention without "
3019 "SSE/SSE2 enabled", cum
->decl
);
3020 sorry ("this is a GCC bug that can be worked around by adding "
3021 "attribute used to function called");
3028 function_arg_advance_64 (CUMULATIVE_ARGS
*cum
, machine_mode mode
,
3029 const_tree type
, HOST_WIDE_INT words
, bool named
)
3031 int int_nregs
, sse_nregs
;
3033 /* Unnamed 512 and 256bit vector mode parameters are passed on stack. */
3034 if (!named
&& (VALID_AVX512F_REG_MODE (mode
)
3035 || VALID_AVX256_REG_MODE (mode
)))
3038 if (!examine_argument (mode
, type
, 0, &int_nregs
, &sse_nregs
)
3039 && sse_nregs
<= cum
->sse_nregs
&& int_nregs
<= cum
->nregs
)
3041 cum
->nregs
-= int_nregs
;
3042 cum
->sse_nregs
-= sse_nregs
;
3043 cum
->regno
+= int_nregs
;
3044 cum
->sse_regno
+= sse_nregs
;
3049 int align
= ix86_function_arg_boundary (mode
, type
) / BITS_PER_WORD
;
3050 cum
->words
= ROUND_UP (cum
->words
, align
);
3051 cum
->words
+= words
;
3057 function_arg_advance_ms_64 (CUMULATIVE_ARGS
*cum
, HOST_WIDE_INT bytes
,
3058 HOST_WIDE_INT words
)
3060 /* Otherwise, this should be passed indirect. */
3061 gcc_assert (bytes
== 1 || bytes
== 2 || bytes
== 4 || bytes
== 8);
3063 cum
->words
+= words
;
3073 /* Update the data in CUM to advance over argument ARG. */
3076 ix86_function_arg_advance (cumulative_args_t cum_v
,
3077 const function_arg_info
&arg
)
3079 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
3080 machine_mode mode
= arg
.mode
;
3081 HOST_WIDE_INT bytes
, words
;
3084 /* The argument of interrupt handler is a special case and is
3085 handled in ix86_function_arg. */
3086 if (!cum
->caller
&& cfun
->machine
->func_type
!= TYPE_NORMAL
)
3089 bytes
= arg
.promoted_size_in_bytes ();
3090 words
= CEIL (bytes
, UNITS_PER_WORD
);
3093 mode
= type_natural_mode (arg
.type
, NULL
, false);
3097 enum calling_abi call_abi
= cum
? cum
->call_abi
: ix86_abi
;
3099 if (call_abi
== MS_ABI
)
3100 nregs
= function_arg_advance_ms_64 (cum
, bytes
, words
);
3102 nregs
= function_arg_advance_64 (cum
, mode
, arg
.type
, words
,
3106 nregs
= function_arg_advance_32 (cum
, mode
, arg
.type
, bytes
, words
);
3110 /* Track if there are outgoing arguments on stack. */
3112 cfun
->machine
->outgoing_args_on_stack
= true;
3116 /* Define where to put the arguments to a function.
3117 Value is zero to push the argument on the stack,
3118 or a hard register in which to store the argument.
3120 MODE is the argument's machine mode.
3121 TYPE is the data type of the argument (as a tree).
3122 This is null for libcalls where that information may
3124 CUM is a variable of type CUMULATIVE_ARGS which gives info about
3125 the preceding args and about the function being called.
3126 NAMED is nonzero if this argument is a named parameter
3127 (otherwise it is an extra parameter matching an ellipsis). */
3130 function_arg_32 (CUMULATIVE_ARGS
*cum
, machine_mode mode
,
3131 machine_mode orig_mode
, const_tree type
,
3132 HOST_WIDE_INT bytes
, HOST_WIDE_INT words
)
3134 bool error_p
= false;
3136 /* Avoid the AL settings for the Unix64 ABI. */
3137 if (mode
== VOIDmode
)
3142 /* Intel MCU psABI passes scalars and aggregates no larger than 8
3143 bytes in registers. */
3144 if (!VECTOR_MODE_P (mode
) && bytes
<= 8)
3163 if (words
<= cum
->nregs
)
3165 int regno
= cum
->regno
;
3167 /* Fastcall allocates the first two DWORD (SImode) or
3168 smaller arguments to ECX and EDX if it isn't an
3174 || (type
&& AGGREGATE_TYPE_P (type
)))
3177 /* ECX not EAX is the first allocated register. */
3178 if (regno
== AX_REG
)
3181 return gen_rtx_REG (mode
, regno
);
3186 if (cum
->float_in_sse
== -1)
3188 if (cum
->float_in_sse
< 2)
3192 if (cum
->float_in_sse
== -1)
3194 if (cum
->float_in_sse
< 1)
3198 /* In 32bit, we pass TImode in xmm registers. */
3207 if (!type
|| !AGGREGATE_TYPE_P (type
))
3210 return gen_reg_or_parallel (mode
, orig_mode
,
3211 cum
->sse_regno
+ FIRST_SSE_REG
);
3217 /* OImode and XImode shouldn't be used directly. */
3236 if (!type
|| !AGGREGATE_TYPE_P (type
))
3239 return gen_reg_or_parallel (mode
, orig_mode
,
3240 cum
->sse_regno
+ FIRST_SSE_REG
);
3252 if (!type
|| !AGGREGATE_TYPE_P (type
))
3255 return gen_reg_or_parallel (mode
, orig_mode
,
3256 cum
->mmx_regno
+ FIRST_MMX_REG
);
3262 cum
->float_in_sse
= 0;
3263 error ("calling %qD with SSE calling convention without "
3264 "SSE/SSE2 enabled", cum
->decl
);
3265 sorry ("this is a GCC bug that can be worked around by adding "
3266 "attribute used to function called");
3273 function_arg_64 (const CUMULATIVE_ARGS
*cum
, machine_mode mode
,
3274 machine_mode orig_mode
, const_tree type
, bool named
)
3276 /* Handle a hidden AL argument containing number of registers
3277 for varargs x86-64 functions. */
3278 if (mode
== VOIDmode
)
3279 return GEN_INT (cum
->maybe_vaarg
3280 ? (cum
->sse_nregs
< 0
3281 ? X86_64_SSE_REGPARM_MAX
3306 /* Unnamed 256 and 512bit vector mode parameters are passed on stack. */
3312 return construct_container (mode
, orig_mode
, type
, 0, cum
->nregs
,
3314 &x86_64_int_parameter_registers
[cum
->regno
],
3319 function_arg_ms_64 (const CUMULATIVE_ARGS
*cum
, machine_mode mode
,
3320 machine_mode orig_mode
, bool named
, const_tree type
,
3321 HOST_WIDE_INT bytes
)
3325 /* We need to add clobber for MS_ABI->SYSV ABI calls in expand_call.
3326 We use value of -2 to specify that current function call is MSABI. */
3327 if (mode
== VOIDmode
)
3328 return GEN_INT (-2);
3330 /* If we've run out of registers, it goes on the stack. */
3331 if (cum
->nregs
== 0)
3334 regno
= x86_64_ms_abi_int_parameter_registers
[cum
->regno
];
3336 /* Only floating point modes are passed in anything but integer regs. */
3337 if (TARGET_SSE
&& (mode
== SFmode
|| mode
== DFmode
))
3341 if (type
== NULL_TREE
|| !AGGREGATE_TYPE_P (type
))
3342 regno
= cum
->regno
+ FIRST_SSE_REG
;
3348 /* Unnamed floating parameters are passed in both the
3349 SSE and integer registers. */
3350 t1
= gen_rtx_REG (mode
, cum
->regno
+ FIRST_SSE_REG
);
3351 t2
= gen_rtx_REG (mode
, regno
);
3352 t1
= gen_rtx_EXPR_LIST (VOIDmode
, t1
, const0_rtx
);
3353 t2
= gen_rtx_EXPR_LIST (VOIDmode
, t2
, const0_rtx
);
3354 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, t1
, t2
));
3357 /* Handle aggregated types passed in register. */
3358 if (orig_mode
== BLKmode
)
3360 if (bytes
> 0 && bytes
<= 8)
3361 mode
= (bytes
> 4 ? DImode
: SImode
);
3362 if (mode
== BLKmode
)
3366 return gen_reg_or_parallel (mode
, orig_mode
, regno
);
3369 /* Return where to put the arguments to a function.
3370 Return zero to push the argument on the stack, or a hard register in which to store the argument.
3372 ARG describes the argument while CUM gives information about the
3373 preceding args and about the function being called. */
3376 ix86_function_arg (cumulative_args_t cum_v
, const function_arg_info
&arg
)
3378 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
3379 machine_mode mode
= arg
.mode
;
3380 HOST_WIDE_INT bytes
, words
;
3383 if (!cum
->caller
&& cfun
->machine
->func_type
!= TYPE_NORMAL
)
3385 gcc_assert (arg
.type
!= NULL_TREE
);
3386 if (POINTER_TYPE_P (arg
.type
))
3388 /* This is the pointer argument. */
3389 gcc_assert (TYPE_MODE (arg
.type
) == ptr_mode
);
3390 /* It is at -WORD(AP) in the current frame in interrupt and
3391 exception handlers. */
3392 reg
= plus_constant (Pmode
, arg_pointer_rtx
, -UNITS_PER_WORD
);
3396 gcc_assert (cfun
->machine
->func_type
== TYPE_EXCEPTION
3397 && TREE_CODE (arg
.type
) == INTEGER_TYPE
3398 && TYPE_MODE (arg
.type
) == word_mode
);
3399 /* The error code is the word-mode integer argument at
3400 -2 * WORD(AP) in the current frame of the exception
3402 reg
= gen_rtx_MEM (word_mode
,
3403 plus_constant (Pmode
,
3405 -2 * UNITS_PER_WORD
));
3410 bytes
= arg
.promoted_size_in_bytes ();
3411 words
= CEIL (bytes
, UNITS_PER_WORD
);
3413 /* To simplify the code below, represent vector types with a vector mode
3414 even if MMX/SSE are not active. */
3415 if (arg
.type
&& VECTOR_TYPE_P (arg
.type
))
3416 mode
= type_natural_mode (arg
.type
, cum
, false);
3420 enum calling_abi call_abi
= cum
? cum
->call_abi
: ix86_abi
;
3422 if (call_abi
== MS_ABI
)
3423 reg
= function_arg_ms_64 (cum
, mode
, arg
.mode
, arg
.named
,
3426 reg
= function_arg_64 (cum
, mode
, arg
.mode
, arg
.type
, arg
.named
);
3429 reg
= function_arg_32 (cum
, mode
, arg
.mode
, arg
.type
, bytes
, words
);
3431 /* Track if there are outgoing arguments on stack. */
3432 if (reg
== NULL_RTX
&& cum
->caller
)
3433 cfun
->machine
->outgoing_args_on_stack
= true;
3438 /* A C expression that indicates when an argument must be passed by
3439 reference. If nonzero for an argument, a copy of that argument is
3440 made in memory and a pointer to the argument is passed instead of
3441 the argument itself. The pointer is passed in whatever way is
3442 appropriate for passing a pointer to that type. */
3445 ix86_pass_by_reference (cumulative_args_t cum_v
, const function_arg_info
&arg
)
3447 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
3451 enum calling_abi call_abi
= cum
? cum
->call_abi
: ix86_abi
;
3453 /* See Windows x64 Software Convention. */
3454 if (call_abi
== MS_ABI
)
3456 HOST_WIDE_INT msize
= GET_MODE_SIZE (arg
.mode
);
3458 if (tree type
= arg
.type
)
3460 /* Arrays are passed by reference. */
3461 if (TREE_CODE (type
) == ARRAY_TYPE
)
3464 if (RECORD_OR_UNION_TYPE_P (type
))
3466 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
3467 are passed by reference. */
3468 msize
= int_size_in_bytes (type
);
3472 /* __m128 is passed by reference. */
3473 return msize
!= 1 && msize
!= 2 && msize
!= 4 && msize
!= 8;
3475 else if (arg
.type
&& int_size_in_bytes (arg
.type
) == -1)
3482 /* Return true when TYPE should be 128bit aligned for 32bit argument
3483 passing ABI. XXX: This function is obsolete and is only used for
3484 checking psABI compatibility with previous versions of GCC. */
3487 ix86_compat_aligned_value_p (const_tree type
)
3489 machine_mode mode
= TYPE_MODE (type
);
3490 if (((TARGET_SSE
&& SSE_REG_MODE_P (mode
))
3494 && (!TYPE_USER_ALIGN (type
) || TYPE_ALIGN (type
) > 128))
3496 if (TYPE_ALIGN (type
) < 128)
3499 if (AGGREGATE_TYPE_P (type
))
3501 /* Walk the aggregates recursively. */
3502 switch (TREE_CODE (type
))
3506 case QUAL_UNION_TYPE
:
3510 /* Walk all the structure fields. */
3511 for (field
= TYPE_FIELDS (type
); field
; field
= DECL_CHAIN (field
))
3513 if (TREE_CODE (field
) == FIELD_DECL
3514 && ix86_compat_aligned_value_p (TREE_TYPE (field
)))
3521 /* Just for use if some languages passes arrays by value. */
3522 if (ix86_compat_aligned_value_p (TREE_TYPE (type
)))
3533 /* Return the alignment boundary for MODE and TYPE with alignment ALIGN.
3534 XXX: This function is obsolete and is only used for checking psABI
3535 compatibility with previous versions of GCC. */
3538 ix86_compat_function_arg_boundary (machine_mode mode
,
3539 const_tree type
, unsigned int align
)
3541 /* In 32bit, only _Decimal128 and __float128 are aligned to their
3542 natural boundaries. */
3543 if (!TARGET_64BIT
&& mode
!= TDmode
&& mode
!= TFmode
)
3545 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
3546 make an exception for SSE modes since these require 128bit
3549 The handling here differs from field_alignment. ICC aligns MMX
3550 arguments to 4 byte boundaries, while structure fields are aligned
3551 to 8 byte boundaries. */
3554 if (!(TARGET_SSE
&& SSE_REG_MODE_P (mode
)))
3555 align
= PARM_BOUNDARY
;
3559 if (!ix86_compat_aligned_value_p (type
))
3560 align
= PARM_BOUNDARY
;
3563 if (align
> BIGGEST_ALIGNMENT
)
3564 align
= BIGGEST_ALIGNMENT
;
3568 /* Return true when TYPE should be 128bit aligned for 32bit argument
3572 ix86_contains_aligned_value_p (const_tree type
)
3574 machine_mode mode
= TYPE_MODE (type
);
3576 if (mode
== XFmode
|| mode
== XCmode
)
3579 if (TYPE_ALIGN (type
) < 128)
3582 if (AGGREGATE_TYPE_P (type
))
3584 /* Walk the aggregates recursively. */
3585 switch (TREE_CODE (type
))
3589 case QUAL_UNION_TYPE
:
3593 /* Walk all the structure fields. */
3594 for (field
= TYPE_FIELDS (type
);
3596 field
= DECL_CHAIN (field
))
3598 if (TREE_CODE (field
) == FIELD_DECL
3599 && ix86_contains_aligned_value_p (TREE_TYPE (field
)))
3606 /* Just for use if some languages passes arrays by value. */
3607 if (ix86_contains_aligned_value_p (TREE_TYPE (type
)))
3616 return TYPE_ALIGN (type
) >= 128;
3621 /* Gives the alignment boundary, in bits, of an argument with the
3622 specified mode and type. */
3625 ix86_function_arg_boundary (machine_mode mode
, const_tree type
)
3630 /* Since the main variant type is used for call, we convert it to
3631 the main variant type. */
3632 type
= TYPE_MAIN_VARIANT (type
);
3633 align
= TYPE_ALIGN (type
);
3634 if (TYPE_EMPTY_P (type
))
3635 return PARM_BOUNDARY
;
3638 align
= GET_MODE_ALIGNMENT (mode
);
3639 if (align
< PARM_BOUNDARY
)
3640 align
= PARM_BOUNDARY
;
3644 unsigned int saved_align
= align
;
3648 /* i386 ABI defines XFmode arguments to be 4 byte aligned. */
3651 if (mode
== XFmode
|| mode
== XCmode
)
3652 align
= PARM_BOUNDARY
;
3654 else if (!ix86_contains_aligned_value_p (type
))
3655 align
= PARM_BOUNDARY
;
3658 align
= PARM_BOUNDARY
;
3663 && align
!= ix86_compat_function_arg_boundary (mode
, type
,
3667 inform (input_location
,
3668 "the ABI for passing parameters with %d-byte"
3669 " alignment has changed in GCC 4.6",
3670 align
/ BITS_PER_UNIT
);
3677 /* Return true if N is a possible register number of function value. */
3680 ix86_function_value_regno_p (const unsigned int regno
)
3687 return (!TARGET_64BIT
|| ix86_cfun_abi () != MS_ABI
);
3690 return TARGET_64BIT
&& ix86_cfun_abi () != MS_ABI
;
3692 /* Complex values are returned in %st(0)/%st(1) pair. */
3695 /* TODO: The function should depend on current function ABI but
3696 builtins.cc would need updating then. Therefore we use the
3698 if (TARGET_64BIT
&& ix86_cfun_abi () == MS_ABI
)
3700 return TARGET_FLOAT_RETURNS_IN_80387
;
3702 /* Complex values are returned in %xmm0/%xmm1 pair. */
3708 if (TARGET_MACHO
|| TARGET_64BIT
)
3716 /* Check whether the register REGNO should be zeroed on X86.
3717 When ALL_SSE_ZEROED is true, all SSE registers have been zeroed
3718 together, no need to zero it again.
3719 When NEED_ZERO_MMX is true, MMX registers should be cleared. */
3722 zero_call_used_regno_p (const unsigned int regno
,
3723 bool all_sse_zeroed
,
3726 return GENERAL_REGNO_P (regno
)
3727 || (!all_sse_zeroed
&& SSE_REGNO_P (regno
))
3728 || MASK_REGNO_P (regno
)
3729 || (need_zero_mmx
&& MMX_REGNO_P (regno
));
3732 /* Return the machine_mode that is used to zero register REGNO. */
3735 zero_call_used_regno_mode (const unsigned int regno
)
3737 /* NB: We only need to zero the lower 32 bits for integer registers
3738 and the lower 128 bits for vector registers since destination are
3739 zero-extended to the full register width. */
3740 if (GENERAL_REGNO_P (regno
))
3742 else if (SSE_REGNO_P (regno
))
3744 else if (MASK_REGNO_P (regno
))
3746 else if (MMX_REGNO_P (regno
))
3752 /* Generate a rtx to zero all vector registers together if possible,
3753 otherwise, return NULL. */
3756 zero_all_vector_registers (HARD_REG_SET need_zeroed_hardregs
)
3761 for (unsigned int regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
3762 if ((LEGACY_SSE_REGNO_P (regno
)
3764 && (REX_SSE_REGNO_P (regno
)
3765 || (TARGET_AVX512F
&& EXT_REX_SSE_REGNO_P (regno
)))))
3766 && !TEST_HARD_REG_BIT (need_zeroed_hardregs
, regno
))
3769 return gen_avx_vzeroall ();
3772 /* Generate insns to zero all st registers together.
3773 Return true when zeroing instructions are generated.
3774 Assume the number of st registers that are zeroed is num_of_st,
3775 we will emit the following sequence to zero them together:
3784 i.e., num_of_st fldz followed by num_of_st fstp to clear the stack
3785 mark stack slots empty.
3787 How to compute the num_of_st:
3788 There is no direct mapping from stack registers to hard register
3789 numbers. If one stack register needs to be cleared, we don't know
3790 where in the stack the value remains. So, if any stack register
3791 needs to be cleared, the whole stack should be cleared. However,
3792 x87 stack registers that hold the return value should be excluded.
3793 x87 returns in the top (two for complex values) register, so
3794 num_of_st should be 7/6 when x87 returns, otherwise it will be 8.
3795 return the value of num_of_st. */
3799 zero_all_st_registers (HARD_REG_SET need_zeroed_hardregs
)
3802 /* If the FPU is disabled, no need to zero all st registers. */
3803 if (! (TARGET_80387
|| TARGET_FLOAT_RETURNS_IN_80387
))
3806 unsigned int num_of_st
= 0;
3807 for (unsigned int regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
3808 if ((STACK_REGNO_P (regno
) || MMX_REGNO_P (regno
))
3809 && TEST_HARD_REG_BIT (need_zeroed_hardregs
, regno
))
3818 bool return_with_x87
= false;
3819 return_with_x87
= (crtl
->return_rtx
3820 && (STACK_REG_P (crtl
->return_rtx
)));
3822 bool complex_return
= false;
3823 complex_return
= (crtl
->return_rtx
3824 && COMPLEX_MODE_P (GET_MODE (crtl
->return_rtx
)));
3826 if (return_with_x87
)
3834 rtx st_reg
= gen_rtx_REG (XFmode
, FIRST_STACK_REG
);
3835 for (unsigned int i
= 0; i
< num_of_st
; i
++)
3836 emit_insn (gen_rtx_SET (st_reg
, CONST0_RTX (XFmode
)));
3838 for (unsigned int i
= 0; i
< num_of_st
; i
++)
3841 insn
= emit_insn (gen_rtx_SET (st_reg
, st_reg
));
3842 add_reg_note (insn
, REG_DEAD
, st_reg
);
3848 /* When the routine exit in MMX mode, if any ST register needs
3849 to be zeroed, we should clear all MMX registers except the
3850 RET_MMX_REGNO that holds the return value. */
3852 zero_all_mm_registers (HARD_REG_SET need_zeroed_hardregs
,
3853 unsigned int ret_mmx_regno
)
3855 bool need_zero_all_mm
= false;
3856 for (unsigned int regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
3857 if (STACK_REGNO_P (regno
)
3858 && TEST_HARD_REG_BIT (need_zeroed_hardregs
, regno
))
3860 need_zero_all_mm
= true;
3864 if (!need_zero_all_mm
)
3867 machine_mode mode
= V2SImode
;
3868 for (unsigned int regno
= FIRST_MMX_REG
; regno
<= LAST_MMX_REG
; regno
++)
3869 if (regno
!= ret_mmx_regno
)
3871 rtx reg
= gen_rtx_REG (mode
, regno
);
3872 emit_insn (gen_rtx_SET (reg
, CONST0_RTX (mode
)));
3877 /* TARGET_ZERO_CALL_USED_REGS. */
3878 /* Generate a sequence of instructions that zero registers specified by
3879 NEED_ZEROED_HARDREGS. Return the ZEROED_HARDREGS that are actually
3882 ix86_zero_call_used_regs (HARD_REG_SET need_zeroed_hardregs
)
3884 HARD_REG_SET zeroed_hardregs
;
3885 bool all_sse_zeroed
= false;
3886 int all_st_zeroed_num
= 0;
3887 bool all_mm_zeroed
= false;
3889 CLEAR_HARD_REG_SET (zeroed_hardregs
);
3891 /* first, let's see whether we can zero all vector registers together. */
3892 rtx zero_all_vec_insn
= zero_all_vector_registers (need_zeroed_hardregs
);
3893 if (zero_all_vec_insn
)
3895 emit_insn (zero_all_vec_insn
);
3896 all_sse_zeroed
= true;
3899 /* mm/st registers are shared registers set, we should follow the following
3900 rules to clear them:
3901 MMX exit mode x87 exit mode
3902 -------------|----------------------|---------------
3903 uses x87 reg | clear all MMX | clear all x87
3904 uses MMX reg | clear individual MMX | clear all x87
3905 x87 + MMX | clear all MMX | clear all x87
3907 first, we should decide which mode (MMX mode or x87 mode) the function
3910 bool exit_with_mmx_mode
= (crtl
->return_rtx
3911 && (MMX_REG_P (crtl
->return_rtx
)));
3913 if (!exit_with_mmx_mode
)
3914 /* x87 exit mode, we should zero all st registers together. */
3916 all_st_zeroed_num
= zero_all_st_registers (need_zeroed_hardregs
);
3918 if (all_st_zeroed_num
> 0)
3919 for (unsigned int regno
= FIRST_STACK_REG
; regno
<= LAST_STACK_REG
; regno
++)
3920 /* x87 stack registers that hold the return value should be excluded.
3921 x87 returns in the top (two for complex values) register. */
3922 if (all_st_zeroed_num
== 8
3923 || !((all_st_zeroed_num
>= 6 && regno
== REGNO (crtl
->return_rtx
))
3924 || (all_st_zeroed_num
== 6
3925 && (regno
== (REGNO (crtl
->return_rtx
) + 1)))))
3926 SET_HARD_REG_BIT (zeroed_hardregs
, regno
);
3929 /* MMX exit mode, check whether we can zero all mm registers. */
3931 unsigned int exit_mmx_regno
= REGNO (crtl
->return_rtx
);
3932 all_mm_zeroed
= zero_all_mm_registers (need_zeroed_hardregs
,
3935 for (unsigned int regno
= FIRST_MMX_REG
; regno
<= LAST_MMX_REG
; regno
++)
3936 if (regno
!= exit_mmx_regno
)
3937 SET_HARD_REG_BIT (zeroed_hardregs
, regno
);
3940 /* Now, generate instructions to zero all the other registers. */
3942 for (unsigned int regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
3944 if (!TEST_HARD_REG_BIT (need_zeroed_hardregs
, regno
))
3946 if (!zero_call_used_regno_p (regno
, all_sse_zeroed
,
3947 exit_with_mmx_mode
&& !all_mm_zeroed
))
3950 SET_HARD_REG_BIT (zeroed_hardregs
, regno
);
3952 machine_mode mode
= zero_call_used_regno_mode (regno
);
3954 rtx reg
= gen_rtx_REG (mode
, regno
);
3955 rtx tmp
= gen_rtx_SET (reg
, CONST0_RTX (mode
));
3960 if (!TARGET_USE_MOV0
|| optimize_insn_for_size_p ())
3962 rtx clob
= gen_rtx_CLOBBER (VOIDmode
,
3963 gen_rtx_REG (CCmode
,
3965 tmp
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2,
3981 return zeroed_hardregs
;
3984 /* Define how to find the value returned by a function.
3985 VALTYPE is the data type of the value (as a tree).
3986 If the precise function being called is known, FUNC is its FUNCTION_DECL;
3987 otherwise, FUNC is 0. */
3990 function_value_32 (machine_mode orig_mode
, machine_mode mode
,
3991 const_tree fntype
, const_tree fn
)
3995 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
3996 we normally prevent this case when mmx is not available. However
3997 some ABIs may require the result to be returned like DImode. */
3998 if (VECTOR_MODE_P (mode
) && GET_MODE_SIZE (mode
) == 8)
3999 regno
= FIRST_MMX_REG
;
4001 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
4002 we prevent this case when sse is not available. However some ABIs
4003 may require the result to be returned like integer TImode. */
4004 else if (mode
== TImode
4005 || (VECTOR_MODE_P (mode
) && GET_MODE_SIZE (mode
) == 16))
4006 regno
= FIRST_SSE_REG
;
4008 /* 32-byte vector modes in %ymm0. */
4009 else if (VECTOR_MODE_P (mode
) && GET_MODE_SIZE (mode
) == 32)
4010 regno
= FIRST_SSE_REG
;
4012 /* 64-byte vector modes in %zmm0. */
4013 else if (VECTOR_MODE_P (mode
) && GET_MODE_SIZE (mode
) == 64)
4014 regno
= FIRST_SSE_REG
;
4016 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
4017 else if (X87_FLOAT_MODE_P (mode
) && TARGET_FLOAT_RETURNS_IN_80387
)
4018 regno
= FIRST_FLOAT_REG
;
4020 /* Most things go in %eax. */
4023 /* Return __bf16/ _Float16/_Complex _Foat16 by sse register. */
4024 if (mode
== HFmode
|| mode
== BFmode
)
4025 regno
= FIRST_SSE_REG
;
4028 rtx ret
= gen_rtx_PARALLEL (mode
, rtvec_alloc(1));
4030 = gen_rtx_EXPR_LIST (VOIDmode
,
4031 gen_rtx_REG (SImode
, FIRST_SSE_REG
),
4036 /* Override FP return register with %xmm0 for local functions when
4037 SSE math is enabled or for functions with sseregparm attribute. */
4038 if ((fn
|| fntype
) && (mode
== SFmode
|| mode
== DFmode
))
4040 int sse_level
= ix86_function_sseregparm (fntype
, fn
, false);
4041 if (sse_level
== -1)
4043 error ("calling %qD with SSE calling convention without "
4044 "SSE/SSE2 enabled", fn
);
4045 sorry ("this is a GCC bug that can be worked around by adding "
4046 "attribute used to function called");
4048 else if ((sse_level
>= 1 && mode
== SFmode
)
4049 || (sse_level
== 2 && mode
== DFmode
))
4050 regno
= FIRST_SSE_REG
;
4053 /* OImode shouldn't be used directly. */
4054 gcc_assert (mode
!= OImode
);
4056 return gen_rtx_REG (orig_mode
, regno
);
4060 function_value_64 (machine_mode orig_mode
, machine_mode mode
,
4065 /* Handle libcalls, which don't provide a type node. */
4066 if (valtype
== NULL
)
4083 regno
= FIRST_SSE_REG
;
4087 regno
= FIRST_FLOAT_REG
;
4095 return gen_rtx_REG (mode
, regno
);
4097 else if (POINTER_TYPE_P (valtype
))
4099 /* Pointers are always returned in word_mode. */
4103 ret
= construct_container (mode
, orig_mode
, valtype
, 1,
4104 X86_64_REGPARM_MAX
, X86_64_SSE_REGPARM_MAX
,
4105 x86_64_int_return_registers
, 0);
4107 /* For zero sized structures, construct_container returns NULL, but we
4108 need to keep rest of compiler happy by returning meaningful value. */
4110 ret
= gen_rtx_REG (orig_mode
, AX_REG
);
4116 function_value_ms_32 (machine_mode orig_mode
, machine_mode mode
,
4117 const_tree fntype
, const_tree fn
, const_tree valtype
)
4121 /* Floating point return values in %st(0)
4122 (unless -mno-fp-ret-in-387 or aggregate type of up to 8 bytes). */
4123 if (X87_FLOAT_MODE_P (mode
) && TARGET_FLOAT_RETURNS_IN_80387
4124 && (GET_MODE_SIZE (mode
) > 8
4125 || valtype
== NULL_TREE
|| !AGGREGATE_TYPE_P (valtype
)))
4127 regno
= FIRST_FLOAT_REG
;
4128 return gen_rtx_REG (orig_mode
, regno
);
4131 return function_value_32(orig_mode
, mode
, fntype
,fn
);
4135 function_value_ms_64 (machine_mode orig_mode
, machine_mode mode
,
4138 unsigned int regno
= AX_REG
;
4142 switch (GET_MODE_SIZE (mode
))
4145 if (valtype
!= NULL_TREE
4146 && !VECTOR_INTEGER_TYPE_P (valtype
)
4147 && !VECTOR_INTEGER_TYPE_P (valtype
)
4148 && !INTEGRAL_TYPE_P (valtype
)
4149 && !VECTOR_FLOAT_TYPE_P (valtype
))
4151 if ((SCALAR_INT_MODE_P (mode
) || VECTOR_MODE_P (mode
))
4152 && !COMPLEX_MODE_P (mode
))
4153 regno
= FIRST_SSE_REG
;
4157 if (valtype
!= NULL_TREE
&& AGGREGATE_TYPE_P (valtype
))
4159 if (mode
== SFmode
|| mode
== DFmode
)
4160 regno
= FIRST_SSE_REG
;
4166 return gen_rtx_REG (orig_mode
, regno
);
4170 ix86_function_value_1 (const_tree valtype
, const_tree fntype_or_decl
,
4171 machine_mode orig_mode
, machine_mode mode
)
4173 const_tree fn
, fntype
;
4176 if (fntype_or_decl
&& DECL_P (fntype_or_decl
))
4177 fn
= fntype_or_decl
;
4178 fntype
= fn
? TREE_TYPE (fn
) : fntype_or_decl
;
4180 if (ix86_function_type_abi (fntype
) == MS_ABI
)
4183 return function_value_ms_64 (orig_mode
, mode
, valtype
);
4185 return function_value_ms_32 (orig_mode
, mode
, fntype
, fn
, valtype
);
4187 else if (TARGET_64BIT
)
4188 return function_value_64 (orig_mode
, mode
, valtype
);
4190 return function_value_32 (orig_mode
, mode
, fntype
, fn
);
4194 ix86_function_value (const_tree valtype
, const_tree fntype_or_decl
, bool)
4196 machine_mode mode
, orig_mode
;
4198 orig_mode
= TYPE_MODE (valtype
);
4199 mode
= type_natural_mode (valtype
, NULL
, true);
4200 return ix86_function_value_1 (valtype
, fntype_or_decl
, orig_mode
, mode
);
4203 /* Pointer function arguments and return values are promoted to
4204 word_mode for normal functions. */
4207 ix86_promote_function_mode (const_tree type
, machine_mode mode
,
4208 int *punsignedp
, const_tree fntype
,
4211 if (cfun
->machine
->func_type
== TYPE_NORMAL
4212 && type
!= NULL_TREE
4213 && POINTER_TYPE_P (type
))
4215 *punsignedp
= POINTERS_EXTEND_UNSIGNED
;
4218 return default_promote_function_mode (type
, mode
, punsignedp
, fntype
,
4222 /* Return true if a structure, union or array with MODE containing FIELD
4223 should be accessed using BLKmode. */
4226 ix86_member_type_forces_blk (const_tree field
, machine_mode mode
)
4228 /* Union with XFmode must be in BLKmode. */
4229 return (mode
== XFmode
4230 && (TREE_CODE (DECL_FIELD_CONTEXT (field
)) == UNION_TYPE
4231 || TREE_CODE (DECL_FIELD_CONTEXT (field
)) == QUAL_UNION_TYPE
));
4235 ix86_libcall_value (machine_mode mode
)
4237 return ix86_function_value_1 (NULL
, NULL
, mode
, mode
);
4240 /* Return true iff type is returned in memory. */
4243 ix86_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
4245 const machine_mode mode
= type_natural_mode (type
, NULL
, true);
4250 if (ix86_function_type_abi (fntype
) == MS_ABI
)
4252 size
= int_size_in_bytes (type
);
4254 /* __m128 is returned in xmm0. */
4255 if ((!type
|| VECTOR_INTEGER_TYPE_P (type
)
4256 || INTEGRAL_TYPE_P (type
)
4257 || VECTOR_FLOAT_TYPE_P (type
))
4258 && (SCALAR_INT_MODE_P (mode
) || VECTOR_MODE_P (mode
))
4259 && !COMPLEX_MODE_P (mode
)
4260 && (GET_MODE_SIZE (mode
) == 16 || size
== 16))
4263 /* Otherwise, the size must be exactly in [1248]. */
4264 return size
!= 1 && size
!= 2 && size
!= 4 && size
!= 8;
4268 int needed_intregs
, needed_sseregs
;
4270 return examine_argument (mode
, type
, 1,
4271 &needed_intregs
, &needed_sseregs
);
4276 size
= int_size_in_bytes (type
);
4278 /* Intel MCU psABI returns scalars and aggregates no larger than 8
4279 bytes in registers. */
4281 return VECTOR_MODE_P (mode
) || size
< 0 || size
> 8;
4283 if (mode
== BLKmode
)
4286 if (MS_AGGREGATE_RETURN
&& AGGREGATE_TYPE_P (type
) && size
<= 8)
4289 if (VECTOR_MODE_P (mode
) || mode
== TImode
)
4291 /* User-created vectors small enough to fit in EAX. */
4295 /* Unless ABI prescibes otherwise,
4296 MMX/3dNow values are returned in MM0 if available. */
4299 return TARGET_VECT8_RETURNS
|| !TARGET_MMX
;
4301 /* SSE values are returned in XMM0 if available. */
4305 /* AVX values are returned in YMM0 if available. */
4309 /* AVX512F values are returned in ZMM0 if available. */
4311 return !TARGET_AVX512F
;
4320 /* OImode shouldn't be used directly. */
4321 gcc_assert (mode
!= OImode
);
4327 /* Implement TARGET_PUSH_ARGUMENT. */
4330 ix86_push_argument (unsigned int npush
)
4332 /* If SSE2 is available, use vector move to put large argument onto
4333 stack. NB: In 32-bit mode, use 8-byte vector move. */
4334 return ((!TARGET_SSE2
|| npush
< (TARGET_64BIT
? 16 : 8))
4336 && !ACCUMULATE_OUTGOING_ARGS
);
4340 /* Create the va_list data type. */
4343 ix86_build_builtin_va_list_64 (void)
4345 tree f_gpr
, f_fpr
, f_ovf
, f_sav
, record
, type_decl
;
4347 record
= lang_hooks
.types
.make_type (RECORD_TYPE
);
4348 type_decl
= build_decl (BUILTINS_LOCATION
,
4349 TYPE_DECL
, get_identifier ("__va_list_tag"), record
);
4351 f_gpr
= build_decl (BUILTINS_LOCATION
,
4352 FIELD_DECL
, get_identifier ("gp_offset"),
4353 unsigned_type_node
);
4354 f_fpr
= build_decl (BUILTINS_LOCATION
,
4355 FIELD_DECL
, get_identifier ("fp_offset"),
4356 unsigned_type_node
);
4357 f_ovf
= build_decl (BUILTINS_LOCATION
,
4358 FIELD_DECL
, get_identifier ("overflow_arg_area"),
4360 f_sav
= build_decl (BUILTINS_LOCATION
,
4361 FIELD_DECL
, get_identifier ("reg_save_area"),
4364 va_list_gpr_counter_field
= f_gpr
;
4365 va_list_fpr_counter_field
= f_fpr
;
4367 DECL_FIELD_CONTEXT (f_gpr
) = record
;
4368 DECL_FIELD_CONTEXT (f_fpr
) = record
;
4369 DECL_FIELD_CONTEXT (f_ovf
) = record
;
4370 DECL_FIELD_CONTEXT (f_sav
) = record
;
4372 TYPE_STUB_DECL (record
) = type_decl
;
4373 TYPE_NAME (record
) = type_decl
;
4374 TYPE_FIELDS (record
) = f_gpr
;
4375 DECL_CHAIN (f_gpr
) = f_fpr
;
4376 DECL_CHAIN (f_fpr
) = f_ovf
;
4377 DECL_CHAIN (f_ovf
) = f_sav
;
4379 layout_type (record
);
4381 TYPE_ATTRIBUTES (record
) = tree_cons (get_identifier ("sysv_abi va_list"),
4382 NULL_TREE
, TYPE_ATTRIBUTES (record
));
4384 /* The correct type is an array type of one element. */
4385 return build_array_type (record
, build_index_type (size_zero_node
));
4388 /* Setup the builtin va_list data type and for 64-bit the additional
4389 calling convention specific va_list data types. */
4392 ix86_build_builtin_va_list (void)
4396 /* Initialize ABI specific va_list builtin types.
4398 In lto1, we can encounter two va_list types:
4399 - one as a result of the type-merge across TUs, and
4400 - the one constructed here.
4401 These two types will not have the same TYPE_MAIN_VARIANT, and therefore
4402 a type identity check in canonical_va_list_type based on
4403 TYPE_MAIN_VARIANT (which we used to have) will not work.
4404 Instead, we tag each va_list_type_node with its unique attribute, and
4405 look for the attribute in the type identity check in
4406 canonical_va_list_type.
4408 Tagging sysv_va_list_type_node directly with the attribute is
4409 problematic since it's a array of one record, which will degrade into a
4410 pointer to record when used as parameter (see build_va_arg comments for
4411 an example), dropping the attribute in the process. So we tag the
4414 /* For SYSV_ABI we use an array of one record. */
4415 sysv_va_list_type_node
= ix86_build_builtin_va_list_64 ();
4417 /* For MS_ABI we use plain pointer to argument area. */
4418 tree char_ptr_type
= build_pointer_type (char_type_node
);
4419 tree attr
= tree_cons (get_identifier ("ms_abi va_list"), NULL_TREE
,
4420 TYPE_ATTRIBUTES (char_ptr_type
));
4421 ms_va_list_type_node
= build_type_attribute_variant (char_ptr_type
, attr
);
4423 return ((ix86_abi
== MS_ABI
)
4424 ? ms_va_list_type_node
4425 : sysv_va_list_type_node
);
4429 /* For i386 we use plain pointer to argument area. */
4430 return build_pointer_type (char_type_node
);
4434 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
4437 setup_incoming_varargs_64 (CUMULATIVE_ARGS
*cum
)
4443 /* GPR size of varargs save area. */
4444 if (cfun
->va_list_gpr_size
)
4445 ix86_varargs_gpr_size
= X86_64_REGPARM_MAX
* UNITS_PER_WORD
;
4447 ix86_varargs_gpr_size
= 0;
4449 /* FPR size of varargs save area. We don't need it if we don't pass
4450 anything in SSE registers. */
4451 if (TARGET_SSE
&& cfun
->va_list_fpr_size
)
4452 ix86_varargs_fpr_size
= X86_64_SSE_REGPARM_MAX
* 16;
4454 ix86_varargs_fpr_size
= 0;
4456 if (! ix86_varargs_gpr_size
&& ! ix86_varargs_fpr_size
)
4459 save_area
= frame_pointer_rtx
;
4460 set
= get_varargs_alias_set ();
4462 max
= cum
->regno
+ cfun
->va_list_gpr_size
/ UNITS_PER_WORD
;
4463 if (max
> X86_64_REGPARM_MAX
)
4464 max
= X86_64_REGPARM_MAX
;
4466 for (i
= cum
->regno
; i
< max
; i
++)
4468 mem
= gen_rtx_MEM (word_mode
,
4469 plus_constant (Pmode
, save_area
, i
* UNITS_PER_WORD
));
4470 MEM_NOTRAP_P (mem
) = 1;
4471 set_mem_alias_set (mem
, set
);
4472 emit_move_insn (mem
,
4473 gen_rtx_REG (word_mode
,
4474 x86_64_int_parameter_registers
[i
]));
4477 if (ix86_varargs_fpr_size
)
4480 rtx_code_label
*label
;
4483 /* Now emit code to save SSE registers. The AX parameter contains number
4484 of SSE parameter registers used to call this function, though all we
4485 actually check here is the zero/non-zero status. */
4487 label
= gen_label_rtx ();
4488 test
= gen_rtx_EQ (VOIDmode
, gen_rtx_REG (QImode
, AX_REG
), const0_rtx
);
4489 emit_jump_insn (gen_cbranchqi4 (test
, XEXP (test
, 0), XEXP (test
, 1),
4492 /* ??? If !TARGET_SSE_TYPELESS_STORES, would we perform better if
4493 we used movdqa (i.e. TImode) instead? Perhaps even better would
4494 be if we could determine the real mode of the data, via a hook
4495 into pass_stdarg. Ignore all that for now. */
4497 if (crtl
->stack_alignment_needed
< GET_MODE_ALIGNMENT (smode
))
4498 crtl
->stack_alignment_needed
= GET_MODE_ALIGNMENT (smode
);
4500 max
= cum
->sse_regno
+ cfun
->va_list_fpr_size
/ 16;
4501 if (max
> X86_64_SSE_REGPARM_MAX
)
4502 max
= X86_64_SSE_REGPARM_MAX
;
4504 for (i
= cum
->sse_regno
; i
< max
; ++i
)
4506 mem
= plus_constant (Pmode
, save_area
,
4507 i
* 16 + ix86_varargs_gpr_size
);
4508 mem
= gen_rtx_MEM (smode
, mem
);
4509 MEM_NOTRAP_P (mem
) = 1;
4510 set_mem_alias_set (mem
, set
);
4511 set_mem_align (mem
, GET_MODE_ALIGNMENT (smode
));
4513 emit_move_insn (mem
, gen_rtx_REG (smode
, GET_SSE_REGNO (i
)));
4521 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS
*cum
)
4523 alias_set_type set
= get_varargs_alias_set ();
4526 /* Reset to zero, as there might be a sysv vaarg used
4528 ix86_varargs_gpr_size
= 0;
4529 ix86_varargs_fpr_size
= 0;
4531 for (i
= cum
->regno
; i
< X86_64_MS_REGPARM_MAX
; i
++)
4535 mem
= gen_rtx_MEM (Pmode
,
4536 plus_constant (Pmode
, virtual_incoming_args_rtx
,
4537 i
* UNITS_PER_WORD
));
4538 MEM_NOTRAP_P (mem
) = 1;
4539 set_mem_alias_set (mem
, set
);
4541 reg
= gen_rtx_REG (Pmode
, x86_64_ms_abi_int_parameter_registers
[i
]);
4542 emit_move_insn (mem
, reg
);
4547 ix86_setup_incoming_varargs (cumulative_args_t cum_v
,
4548 const function_arg_info
&arg
,
4551 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
4552 CUMULATIVE_ARGS next_cum
;
4555 /* This argument doesn't appear to be used anymore. Which is good,
4556 because the old code here didn't suppress rtl generation. */
4557 gcc_assert (!no_rtl
);
4562 fntype
= TREE_TYPE (current_function_decl
);
4564 /* For varargs, we do not want to skip the dummy va_dcl argument.
4565 For stdargs, we do want to skip the last named argument. */
4567 if (!TYPE_NO_NAMED_ARGS_STDARG_P (TREE_TYPE (current_function_decl
))
4568 && stdarg_p (fntype
))
4569 ix86_function_arg_advance (pack_cumulative_args (&next_cum
), arg
);
4571 if (cum
->call_abi
== MS_ABI
)
4572 setup_incoming_varargs_ms_64 (&next_cum
);
4574 setup_incoming_varargs_64 (&next_cum
);
4577 /* Checks if TYPE is of kind va_list char *. */
4580 is_va_list_char_pointer (tree type
)
4584 /* For 32-bit it is always true. */
4587 canonic
= ix86_canonical_va_list_type (type
);
4588 return (canonic
== ms_va_list_type_node
4589 || (ix86_abi
== MS_ABI
&& canonic
== va_list_type_node
));
4592 /* Implement va_start. */
4595 ix86_va_start (tree valist
, rtx nextarg
)
4597 HOST_WIDE_INT words
, n_gpr
, n_fpr
;
4598 tree f_gpr
, f_fpr
, f_ovf
, f_sav
;
4599 tree gpr
, fpr
, ovf
, sav
, t
;
4603 if (flag_split_stack
4604 && cfun
->machine
->split_stack_varargs_pointer
== NULL_RTX
)
4606 unsigned int scratch_regno
;
4608 /* When we are splitting the stack, we can't refer to the stack
4609 arguments using internal_arg_pointer, because they may be on
4610 the old stack. The split stack prologue will arrange to
4611 leave a pointer to the old stack arguments in a scratch
4612 register, which we here copy to a pseudo-register. The split
4613 stack prologue can't set the pseudo-register directly because
4614 it (the prologue) runs before any registers have been saved. */
4616 scratch_regno
= split_stack_prologue_scratch_regno ();
4617 if (scratch_regno
!= INVALID_REGNUM
)
4622 reg
= gen_reg_rtx (Pmode
);
4623 cfun
->machine
->split_stack_varargs_pointer
= reg
;
4626 emit_move_insn (reg
, gen_rtx_REG (Pmode
, scratch_regno
));
4630 push_topmost_sequence ();
4631 emit_insn_after (seq
, entry_of_function ());
4632 pop_topmost_sequence ();
4636 /* Only 64bit target needs something special. */
4637 if (is_va_list_char_pointer (TREE_TYPE (valist
)))
4639 if (cfun
->machine
->split_stack_varargs_pointer
== NULL_RTX
)
4640 std_expand_builtin_va_start (valist
, nextarg
);
4645 va_r
= expand_expr (valist
, NULL_RTX
, VOIDmode
, EXPAND_WRITE
);
4646 next
= expand_binop (ptr_mode
, add_optab
,
4647 cfun
->machine
->split_stack_varargs_pointer
,
4648 crtl
->args
.arg_offset_rtx
,
4649 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
4650 convert_move (va_r
, next
, 0);
4655 f_gpr
= TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node
));
4656 f_fpr
= DECL_CHAIN (f_gpr
);
4657 f_ovf
= DECL_CHAIN (f_fpr
);
4658 f_sav
= DECL_CHAIN (f_ovf
);
4660 valist
= build_simple_mem_ref (valist
);
4661 TREE_TYPE (valist
) = TREE_TYPE (sysv_va_list_type_node
);
4662 /* The following should be folded into the MEM_REF offset. */
4663 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), unshare_expr (valist
),
4665 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), unshare_expr (valist
),
4667 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), unshare_expr (valist
),
4669 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), unshare_expr (valist
),
4672 /* Count number of gp and fp argument registers used. */
4673 words
= crtl
->args
.info
.words
;
4674 n_gpr
= crtl
->args
.info
.regno
;
4675 n_fpr
= crtl
->args
.info
.sse_regno
;
4677 if (cfun
->va_list_gpr_size
)
4679 type
= TREE_TYPE (gpr
);
4680 t
= build2 (MODIFY_EXPR
, type
,
4681 gpr
, build_int_cst (type
, n_gpr
* 8));
4682 TREE_SIDE_EFFECTS (t
) = 1;
4683 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
4686 if (TARGET_SSE
&& cfun
->va_list_fpr_size
)
4688 type
= TREE_TYPE (fpr
);
4689 t
= build2 (MODIFY_EXPR
, type
, fpr
,
4690 build_int_cst (type
, n_fpr
* 16 + 8*X86_64_REGPARM_MAX
));
4691 TREE_SIDE_EFFECTS (t
) = 1;
4692 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
4695 /* Find the overflow area. */
4696 type
= TREE_TYPE (ovf
);
4697 if (cfun
->machine
->split_stack_varargs_pointer
== NULL_RTX
)
4698 ovf_rtx
= crtl
->args
.internal_arg_pointer
;
4700 ovf_rtx
= cfun
->machine
->split_stack_varargs_pointer
;
4701 t
= make_tree (type
, ovf_rtx
);
4703 t
= fold_build_pointer_plus_hwi (t
, words
* UNITS_PER_WORD
);
4705 t
= build2 (MODIFY_EXPR
, type
, ovf
, t
);
4706 TREE_SIDE_EFFECTS (t
) = 1;
4707 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
4709 if (ix86_varargs_gpr_size
|| ix86_varargs_fpr_size
)
4711 /* Find the register save area.
4712 Prologue of the function save it right above stack frame. */
4713 type
= TREE_TYPE (sav
);
4714 t
= make_tree (type
, frame_pointer_rtx
);
4715 if (!ix86_varargs_gpr_size
)
4716 t
= fold_build_pointer_plus_hwi (t
, -8 * X86_64_REGPARM_MAX
);
4718 t
= build2 (MODIFY_EXPR
, type
, sav
, t
);
4719 TREE_SIDE_EFFECTS (t
) = 1;
4720 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
4724 /* Implement va_arg. */
4727 ix86_gimplify_va_arg (tree valist
, tree type
, gimple_seq
*pre_p
,
4730 static const int intreg
[6] = { 0, 1, 2, 3, 4, 5 };
4731 tree f_gpr
, f_fpr
, f_ovf
, f_sav
;
4732 tree gpr
, fpr
, ovf
, sav
, t
;
4734 tree lab_false
, lab_over
= NULL_TREE
;
4739 machine_mode nat_mode
;
4740 unsigned int arg_boundary
;
4741 unsigned int type_align
;
4743 /* Only 64bit target needs something special. */
4744 if (is_va_list_char_pointer (TREE_TYPE (valist
)))
4745 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
4747 f_gpr
= TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node
));
4748 f_fpr
= DECL_CHAIN (f_gpr
);
4749 f_ovf
= DECL_CHAIN (f_fpr
);
4750 f_sav
= DECL_CHAIN (f_ovf
);
4752 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
),
4753 valist
, f_gpr
, NULL_TREE
);
4755 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), valist
, f_fpr
, NULL_TREE
);
4756 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), valist
, f_ovf
, NULL_TREE
);
4757 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), valist
, f_sav
, NULL_TREE
);
4759 indirect_p
= pass_va_arg_by_reference (type
);
4761 type
= build_pointer_type (type
);
4762 size
= arg_int_size_in_bytes (type
);
4763 rsize
= CEIL (size
, UNITS_PER_WORD
);
4765 nat_mode
= type_natural_mode (type
, NULL
, false);
4784 /* Unnamed 256 and 512bit vector mode parameters are passed on stack. */
4785 if (!TARGET_64BIT_MS_ABI
)
4793 container
= construct_container (nat_mode
, TYPE_MODE (type
),
4794 type
, 0, X86_64_REGPARM_MAX
,
4795 X86_64_SSE_REGPARM_MAX
, intreg
,
4800 /* Pull the value out of the saved registers. */
4802 addr
= create_tmp_var (ptr_type_node
, "addr");
4803 type_align
= TYPE_ALIGN (type
);
4807 int needed_intregs
, needed_sseregs
;
4809 tree int_addr
, sse_addr
;
4811 lab_false
= create_artificial_label (UNKNOWN_LOCATION
);
4812 lab_over
= create_artificial_label (UNKNOWN_LOCATION
);
4814 examine_argument (nat_mode
, type
, 0, &needed_intregs
, &needed_sseregs
);
4816 need_temp
= (!REG_P (container
)
4817 && ((needed_intregs
&& TYPE_ALIGN (type
) > 64)
4818 || TYPE_ALIGN (type
) > 128));
4820 /* In case we are passing structure, verify that it is consecutive block
4821 on the register save area. If not we need to do moves. */
4822 if (!need_temp
&& !REG_P (container
))
4824 /* Verify that all registers are strictly consecutive */
4825 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container
, 0, 0), 0))))
4829 for (i
= 0; i
< XVECLEN (container
, 0) && !need_temp
; i
++)
4831 rtx slot
= XVECEXP (container
, 0, i
);
4832 if (REGNO (XEXP (slot
, 0)) != FIRST_SSE_REG
+ (unsigned int) i
4833 || INTVAL (XEXP (slot
, 1)) != i
* 16)
4841 for (i
= 0; i
< XVECLEN (container
, 0) && !need_temp
; i
++)
4843 rtx slot
= XVECEXP (container
, 0, i
);
4844 if (REGNO (XEXP (slot
, 0)) != (unsigned int) i
4845 || INTVAL (XEXP (slot
, 1)) != i
* 8)
4857 int_addr
= create_tmp_var (ptr_type_node
, "int_addr");
4858 sse_addr
= create_tmp_var (ptr_type_node
, "sse_addr");
4861 /* First ensure that we fit completely in registers. */
4864 t
= build_int_cst (TREE_TYPE (gpr
),
4865 (X86_64_REGPARM_MAX
- needed_intregs
+ 1) * 8);
4866 t
= build2 (GE_EXPR
, boolean_type_node
, gpr
, t
);
4867 t2
= build1 (GOTO_EXPR
, void_type_node
, lab_false
);
4868 t
= build3 (COND_EXPR
, void_type_node
, t
, t2
, NULL_TREE
);
4869 gimplify_and_add (t
, pre_p
);
4873 t
= build_int_cst (TREE_TYPE (fpr
),
4874 (X86_64_SSE_REGPARM_MAX
- needed_sseregs
+ 1) * 16
4875 + X86_64_REGPARM_MAX
* 8);
4876 t
= build2 (GE_EXPR
, boolean_type_node
, fpr
, t
);
4877 t2
= build1 (GOTO_EXPR
, void_type_node
, lab_false
);
4878 t
= build3 (COND_EXPR
, void_type_node
, t
, t2
, NULL_TREE
);
4879 gimplify_and_add (t
, pre_p
);
4882 /* Compute index to start of area used for integer regs. */
4885 /* int_addr = gpr + sav; */
4886 t
= fold_build_pointer_plus (sav
, gpr
);
4887 gimplify_assign (int_addr
, t
, pre_p
);
4891 /* sse_addr = fpr + sav; */
4892 t
= fold_build_pointer_plus (sav
, fpr
);
4893 gimplify_assign (sse_addr
, t
, pre_p
);
4897 int i
, prev_size
= 0;
4898 tree temp
= create_tmp_var (type
, "va_arg_tmp");
4899 TREE_ADDRESSABLE (temp
) = 1;
4902 t
= build1 (ADDR_EXPR
, build_pointer_type (type
), temp
);
4903 gimplify_assign (addr
, t
, pre_p
);
4905 for (i
= 0; i
< XVECLEN (container
, 0); i
++)
4907 rtx slot
= XVECEXP (container
, 0, i
);
4908 rtx reg
= XEXP (slot
, 0);
4909 machine_mode mode
= GET_MODE (reg
);
4915 tree dest_addr
, dest
;
4916 int cur_size
= GET_MODE_SIZE (mode
);
4918 gcc_assert (prev_size
<= INTVAL (XEXP (slot
, 1)));
4919 prev_size
= INTVAL (XEXP (slot
, 1));
4920 if (prev_size
+ cur_size
> size
)
4922 cur_size
= size
- prev_size
;
4923 unsigned int nbits
= cur_size
* BITS_PER_UNIT
;
4924 if (!int_mode_for_size (nbits
, 1).exists (&mode
))
4927 piece_type
= lang_hooks
.types
.type_for_mode (mode
, 1);
4928 if (mode
== GET_MODE (reg
))
4929 addr_type
= build_pointer_type (piece_type
);
4931 addr_type
= build_pointer_type_for_mode (piece_type
, ptr_mode
,
4933 daddr_type
= build_pointer_type_for_mode (piece_type
, ptr_mode
,
4936 if (SSE_REGNO_P (REGNO (reg
)))
4938 src_addr
= sse_addr
;
4939 src_offset
= (REGNO (reg
) - FIRST_SSE_REG
) * 16;
4943 src_addr
= int_addr
;
4944 src_offset
= REGNO (reg
) * 8;
4946 src_addr
= fold_convert (addr_type
, src_addr
);
4947 src_addr
= fold_build_pointer_plus_hwi (src_addr
, src_offset
);
4949 dest_addr
= fold_convert (daddr_type
, addr
);
4950 dest_addr
= fold_build_pointer_plus_hwi (dest_addr
, prev_size
);
4951 if (cur_size
== GET_MODE_SIZE (mode
))
4953 src
= build_va_arg_indirect_ref (src_addr
);
4954 dest
= build_va_arg_indirect_ref (dest_addr
);
4956 gimplify_assign (dest
, src
, pre_p
);
4961 = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY
),
4962 3, dest_addr
, src_addr
,
4963 size_int (cur_size
));
4964 gimplify_and_add (copy
, pre_p
);
4966 prev_size
+= cur_size
;
4972 t
= build2 (PLUS_EXPR
, TREE_TYPE (gpr
), gpr
,
4973 build_int_cst (TREE_TYPE (gpr
), needed_intregs
* 8));
4974 gimplify_assign (gpr
, t
, pre_p
);
4975 /* The GPR save area guarantees only 8-byte alignment. */
4977 type_align
= MIN (type_align
, 64);
4982 t
= build2 (PLUS_EXPR
, TREE_TYPE (fpr
), fpr
,
4983 build_int_cst (TREE_TYPE (fpr
), needed_sseregs
* 16));
4984 gimplify_assign (unshare_expr (fpr
), t
, pre_p
);
4987 gimple_seq_add_stmt (pre_p
, gimple_build_goto (lab_over
));
4989 gimple_seq_add_stmt (pre_p
, gimple_build_label (lab_false
));
4992 /* ... otherwise out of the overflow area. */
4994 /* When we align parameter on stack for caller, if the parameter
4995 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
4996 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
4997 here with caller. */
4998 arg_boundary
= ix86_function_arg_boundary (VOIDmode
, type
);
4999 if ((unsigned int) arg_boundary
> MAX_SUPPORTED_STACK_ALIGNMENT
)
5000 arg_boundary
= MAX_SUPPORTED_STACK_ALIGNMENT
;
5002 /* Care for on-stack alignment if needed. */
5003 if (arg_boundary
<= 64 || size
== 0)
5007 HOST_WIDE_INT align
= arg_boundary
/ 8;
5008 t
= fold_build_pointer_plus_hwi (ovf
, align
- 1);
5009 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (t
), t
,
5010 build_int_cst (TREE_TYPE (t
), -align
));
5013 gimplify_expr (&t
, pre_p
, NULL
, is_gimple_val
, fb_rvalue
);
5014 gimplify_assign (addr
, t
, pre_p
);
5016 t
= fold_build_pointer_plus_hwi (t
, rsize
* UNITS_PER_WORD
);
5017 gimplify_assign (unshare_expr (ovf
), t
, pre_p
);
5020 gimple_seq_add_stmt (pre_p
, gimple_build_label (lab_over
));
5022 type
= build_aligned_type (type
, type_align
);
5023 ptrtype
= build_pointer_type_for_mode (type
, ptr_mode
, true);
5024 addr
= fold_convert (ptrtype
, addr
);
5027 addr
= build_va_arg_indirect_ref (addr
);
5028 return build_va_arg_indirect_ref (addr
);
5031 /* Return true if OPNUM's MEM should be matched
5032 in movabs* patterns. */
5035 ix86_check_movabs (rtx insn
, int opnum
)
5039 set
= PATTERN (insn
);
5040 if (GET_CODE (set
) == PARALLEL
)
5041 set
= XVECEXP (set
, 0, 0);
5042 gcc_assert (GET_CODE (set
) == SET
);
5043 mem
= XEXP (set
, opnum
);
5044 while (SUBREG_P (mem
))
5045 mem
= SUBREG_REG (mem
);
5046 gcc_assert (MEM_P (mem
));
5047 return volatile_ok
|| !MEM_VOLATILE_P (mem
);
5050 /* Return false if INSN contains a MEM with a non-default address space. */
5052 ix86_check_no_addr_space (rtx insn
)
5054 subrtx_var_iterator::array_type array
;
5055 FOR_EACH_SUBRTX_VAR (iter
, array
, PATTERN (insn
), ALL
)
5058 if (MEM_P (x
) && !ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (x
)))
5064 /* Initialize the table of extra 80387 mathematical constants. */
5067 init_ext_80387_constants (void)
5069 static const char * cst
[5] =
5071 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
5072 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
5073 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
5074 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
5075 "3.1415926535897932385128089594061862044", /* 4: fldpi */
5079 for (i
= 0; i
< 5; i
++)
5081 real_from_string (&ext_80387_constants_table
[i
], cst
[i
]);
5082 /* Ensure each constant is rounded to XFmode precision. */
5083 real_convert (&ext_80387_constants_table
[i
],
5084 XFmode
, &ext_80387_constants_table
[i
]);
5087 ext_80387_constants_init
= 1;
5090 /* Return non-zero if the constant is something that
5091 can be loaded with a special instruction. */
5094 standard_80387_constant_p (rtx x
)
5096 machine_mode mode
= GET_MODE (x
);
5098 const REAL_VALUE_TYPE
*r
;
5100 if (!(CONST_DOUBLE_P (x
) && X87_FLOAT_MODE_P (mode
)))
5103 if (x
== CONST0_RTX (mode
))
5105 if (x
== CONST1_RTX (mode
))
5108 r
= CONST_DOUBLE_REAL_VALUE (x
);
5110 /* For XFmode constants, try to find a special 80387 instruction when
5111 optimizing for size or on those CPUs that benefit from them. */
5113 && (optimize_function_for_size_p (cfun
) || TARGET_EXT_80387_CONSTANTS
)
5114 && !flag_rounding_math
)
5118 if (! ext_80387_constants_init
)
5119 init_ext_80387_constants ();
5121 for (i
= 0; i
< 5; i
++)
5122 if (real_identical (r
, &ext_80387_constants_table
[i
]))
5126 /* Load of the constant -0.0 or -1.0 will be split as
5127 fldz;fchs or fld1;fchs sequence. */
5128 if (real_isnegzero (r
))
5130 if (real_identical (r
, &dconstm1
))
5136 /* Return the opcode of the special instruction to be used to load
5140 standard_80387_constant_opcode (rtx x
)
5142 switch (standard_80387_constant_p (x
))
5166 /* Return the CONST_DOUBLE representing the 80387 constant that is
5167 loaded by the specified special instruction. The argument IDX
5168 matches the return value from standard_80387_constant_p. */
5171 standard_80387_constant_rtx (int idx
)
5175 if (! ext_80387_constants_init
)
5176 init_ext_80387_constants ();
5192 return const_double_from_real_value (ext_80387_constants_table
[i
],
5196 /* Return 1 if X is all bits 0, 2 if X is all bits 1
5197 and 3 if X is all bits 1 with zero extend
5198 in supported SSE/AVX vector mode. */
5201 standard_sse_constant_p (rtx x
, machine_mode pred_mode
)
5208 mode
= GET_MODE (x
);
5210 if (x
== const0_rtx
|| const0_operand (x
, mode
))
5213 if (x
== constm1_rtx
5214 || vector_all_ones_operand (x
, mode
)
5215 || ((GET_MODE_CLASS (mode
) == MODE_VECTOR_FLOAT
5216 || GET_MODE_CLASS (pred_mode
) == MODE_VECTOR_FLOAT
)
5217 && float_vector_all_ones_operand (x
, mode
)))
5219 /* VOIDmode integer constant, get mode from the predicate. */
5220 if (mode
== VOIDmode
)
5223 switch (GET_MODE_SIZE (mode
))
5245 if (vector_all_ones_zero_extend_half_operand (x
, mode
)
5246 || vector_all_ones_zero_extend_quarter_operand (x
, mode
))
5252 /* Return the opcode of the special instruction to be used to load
5253 the constant operands[1] into operands[0]. */
5256 standard_sse_constant_opcode (rtx_insn
*insn
, rtx
*operands
)
5259 rtx x
= operands
[1];
5261 gcc_assert (TARGET_SSE
);
5263 mode
= GET_MODE (x
);
5265 if (x
== const0_rtx
|| const0_operand (x
, mode
))
5267 switch (get_attr_mode (insn
))
5270 if (!EXT_REX_SSE_REG_P (operands
[0]))
5271 return "%vpxor\t%0, %d0";
5275 if (EXT_REX_SSE_REG_P (operands
[0]))
5276 return (TARGET_AVX512VL
5277 ? "vpxord\t%x0, %x0, %x0"
5278 : "vpxord\t%g0, %g0, %g0");
5279 return "vpxor\t%x0, %x0, %x0";
5282 if (!EXT_REX_SSE_REG_P (operands
[0]))
5283 return "%vxorpd\t%0, %d0";
5287 if (!EXT_REX_SSE_REG_P (operands
[0]))
5288 return "vxorpd\t%x0, %x0, %x0";
5289 else if (TARGET_AVX512DQ
)
5290 return (TARGET_AVX512VL
5291 ? "vxorpd\t%x0, %x0, %x0"
5292 : "vxorpd\t%g0, %g0, %g0");
5294 return (TARGET_AVX512VL
5295 ? "vpxorq\t%x0, %x0, %x0"
5296 : "vpxorq\t%g0, %g0, %g0");
5299 if (!EXT_REX_SSE_REG_P (operands
[0]))
5300 return "%vxorps\t%0, %d0";
5304 if (!EXT_REX_SSE_REG_P (operands
[0]))
5305 return "vxorps\t%x0, %x0, %x0";
5306 else if (TARGET_AVX512DQ
)
5307 return (TARGET_AVX512VL
5308 ? "vxorps\t%x0, %x0, %x0"
5309 : "vxorps\t%g0, %g0, %g0");
5311 return (TARGET_AVX512VL
5312 ? "vpxord\t%x0, %x0, %x0"
5313 : "vpxord\t%g0, %g0, %g0");
5319 else if (x
== constm1_rtx
5320 || vector_all_ones_operand (x
, mode
)
5321 || (GET_MODE_CLASS (mode
) == MODE_VECTOR_FLOAT
5322 && float_vector_all_ones_operand (x
, mode
)))
5324 enum attr_mode insn_mode
= get_attr_mode (insn
);
5331 gcc_assert (TARGET_AVX512F
);
5332 return "vpternlogd\t{$0xFF, %g0, %g0, %g0|%g0, %g0, %g0, 0xFF}";
5337 gcc_assert (TARGET_AVX2
);
5342 gcc_assert (TARGET_SSE2
);
5343 if (!EXT_REX_SSE_REG_P (operands
[0]))
5345 ? "vpcmpeqd\t%0, %0, %0"
5346 : "pcmpeqd\t%0, %0");
5347 else if (TARGET_AVX512VL
)
5348 return "vpternlogd\t{$0xFF, %0, %0, %0|%0, %0, %0, 0xFF}";
5350 return "vpternlogd\t{$0xFF, %g0, %g0, %g0|%g0, %g0, %g0, 0xFF}";
5356 else if (vector_all_ones_zero_extend_half_operand (x
, mode
))
5358 if (GET_MODE_SIZE (mode
) == 64)
5360 gcc_assert (TARGET_AVX512F
);
5361 return "vpcmpeqd \t %t0, %t0, %t0";
5363 else if (GET_MODE_SIZE (mode
) == 32)
5365 gcc_assert (TARGET_AVX
);
5366 return "vpcmpeqd \t %x0, %x0, %x0";
5370 else if (vector_all_ones_zero_extend_quarter_operand (x
, mode
))
5372 gcc_assert (TARGET_AVX512F
);
5373 return "vpcmpeqd \t %x0, %x0, %x0";
5379 /* Returns true if INSN can be transformed from a memory load
5380 to a supported FP constant load. */
5383 ix86_standard_x87sse_constant_load_p (const rtx_insn
*insn
, rtx dst
)
5385 rtx src
= find_constant_src (insn
);
5387 gcc_assert (REG_P (dst
));
5390 || (SSE_REGNO_P (REGNO (dst
))
5391 && standard_sse_constant_p (src
, GET_MODE (dst
)) != 1)
5392 || (STACK_REGNO_P (REGNO (dst
))
5393 && standard_80387_constant_p (src
) < 1))
5399 /* Predicate for pre-reload splitters with associated instructions,
5400 which can match any time before the split1 pass (usually combine),
5401 then are unconditionally split in that pass and should not be
5402 matched again afterwards. */
5405 ix86_pre_reload_split (void)
5407 return (can_create_pseudo_p ()
5408 && !(cfun
->curr_properties
& PROP_rtl_split_insns
));
5411 /* Return the opcode of the TYPE_SSEMOV instruction. To move from
5412 or to xmm16-xmm31/ymm16-ymm31 registers, we either require
5413 TARGET_AVX512VL or it is a register to register move which can
5414 be done with zmm register move. */
5417 ix86_get_ssemov (rtx
*operands
, unsigned size
,
5418 enum attr_mode insn_mode
, machine_mode mode
)
5421 bool misaligned_p
= (misaligned_operand (operands
[0], mode
)
5422 || misaligned_operand (operands
[1], mode
));
5423 bool evex_reg_p
= (size
== 64
5424 || EXT_REX_SSE_REG_P (operands
[0])
5425 || EXT_REX_SSE_REG_P (operands
[1]));
5426 machine_mode scalar_mode
;
5428 const char *opcode
= NULL
;
5434 } type
= opcode_int
;
5441 scalar_mode
= E_SFmode
;
5442 type
= opcode_float
;
5447 scalar_mode
= E_DFmode
;
5448 type
= opcode_double
;
5453 scalar_mode
= GET_MODE_INNER (mode
);
5459 /* NB: To move xmm16-xmm31/ymm16-ymm31 registers without AVX512VL,
5460 we can only use zmm register move without memory operand. */
5463 && GET_MODE_SIZE (mode
) < 64)
5465 /* NB: Even though ix86_hard_regno_mode_ok doesn't allow
5466 xmm16-xmm31 nor ymm16-ymm31 in 128/256 bit modes when
5467 AVX512VL is disabled, LRA can still generate reg to
5468 reg moves with xmm16-xmm31 and ymm16-ymm31 in 128/256 bit
5470 if (memory_operand (operands
[0], mode
)
5471 || memory_operand (operands
[1], mode
))
5477 if (scalar_mode
== E_HFmode
|| scalar_mode
== E_BFmode
)
5478 opcode
= (misaligned_p
5479 ? (TARGET_AVX512BW
? "vmovdqu16" : "vmovdqu64")
5482 opcode
= misaligned_p
? "vmovdqu32" : "vmovdqa32";
5485 opcode
= misaligned_p
? "vmovups" : "vmovaps";
5488 opcode
= misaligned_p
? "vmovupd" : "vmovapd";
5492 else if (SCALAR_FLOAT_MODE_P (scalar_mode
))
5494 switch (scalar_mode
)
5499 opcode
= (misaligned_p
5505 opcode
= (misaligned_p
5512 opcode
= misaligned_p
? "%vmovups" : "%vmovaps";
5515 opcode
= misaligned_p
? "%vmovupd" : "%vmovapd";
5519 opcode
= misaligned_p
? "vmovdqu64" : "vmovdqa64";
5521 opcode
= misaligned_p
? "%vmovdqu" : "%vmovdqa";
5527 else if (SCALAR_INT_MODE_P (scalar_mode
))
5529 switch (scalar_mode
)
5533 opcode
= (misaligned_p
5539 opcode
= (misaligned_p
5547 opcode
= (misaligned_p
5553 opcode
= (misaligned_p
5561 opcode
= misaligned_p
? "vmovdqu32" : "vmovdqa32";
5563 opcode
= misaligned_p
? "%vmovdqu" : "%vmovdqa";
5569 opcode
= misaligned_p
? "vmovdqu64" : "vmovdqa64";
5571 opcode
= misaligned_p
? "%vmovdqu" : "%vmovdqa";
5574 opcode
= misaligned_p
? "vmovdqu64" : "vmovdqa64";
5586 snprintf (buf
, sizeof (buf
), "%s\t{%%g1, %%g0|%%g0, %%g1}",
5590 snprintf (buf
, sizeof (buf
), "%s\t{%%t1, %%t0|%%t0, %%t1}",
5594 snprintf (buf
, sizeof (buf
), "%s\t{%%x1, %%x0|%%x0, %%x1}",
5600 output_asm_insn (buf
, operands
);
5604 /* Return the template of the TYPE_SSEMOV instruction to move
5605 operands[1] into operands[0]. */
5608 ix86_output_ssemov (rtx_insn
*insn
, rtx
*operands
)
5610 machine_mode mode
= GET_MODE (operands
[0]);
5611 if (get_attr_type (insn
) != TYPE_SSEMOV
5612 || mode
!= GET_MODE (operands
[1]))
5615 enum attr_mode insn_mode
= get_attr_mode (insn
);
5622 return ix86_get_ssemov (operands
, 64, insn_mode
, mode
);
5627 return ix86_get_ssemov (operands
, 32, insn_mode
, mode
);
5632 return ix86_get_ssemov (operands
, 16, insn_mode
, mode
);
5635 /* Handle broken assemblers that require movd instead of movq. */
5636 if (GENERAL_REG_P (operands
[0]))
5638 if (HAVE_AS_IX86_INTERUNIT_MOVQ
)
5639 return "%vmovq\t{%1, %q0|%q0, %1}";
5641 return "%vmovd\t{%1, %q0|%q0, %1}";
5643 else if (GENERAL_REG_P (operands
[1]))
5645 if (HAVE_AS_IX86_INTERUNIT_MOVQ
)
5646 return "%vmovq\t{%q1, %0|%0, %q1}";
5648 return "%vmovd\t{%q1, %0|%0, %q1}";
5651 return "%vmovq\t{%1, %0|%0, %1}";
5654 if (GENERAL_REG_P (operands
[0]))
5655 return "%vmovd\t{%1, %k0|%k0, %1}";
5656 else if (GENERAL_REG_P (operands
[1]))
5657 return "%vmovd\t{%k1, %0|%0, %k1}";
5659 return "%vmovd\t{%1, %0|%0, %1}";
5662 if (GENERAL_REG_P (operands
[0]))
5663 return "vmovw\t{%1, %k0|%k0, %1}";
5664 else if (GENERAL_REG_P (operands
[1]))
5665 return "vmovw\t{%k1, %0|%0, %k1}";
5667 return "vmovw\t{%1, %0|%0, %1}";
5670 if (TARGET_AVX
&& REG_P (operands
[0]) && REG_P (operands
[1]))
5671 return "vmovsd\t{%d1, %0|%0, %d1}";
5673 return "%vmovsd\t{%1, %0|%0, %1}";
5676 if (TARGET_AVX
&& REG_P (operands
[0]) && REG_P (operands
[1]))
5677 return "vmovss\t{%d1, %0|%0, %d1}";
5679 return "%vmovss\t{%1, %0|%0, %1}";
5683 if (REG_P (operands
[0]) && REG_P (operands
[1]))
5684 return "vmovsh\t{%d1, %0|%0, %d1}";
5686 return "vmovsh\t{%1, %0|%0, %1}";
5689 gcc_assert (!TARGET_AVX
);
5690 return "movlpd\t{%1, %0|%0, %1}";
5693 if (TARGET_AVX
&& REG_P (operands
[0]))
5694 return "vmovlps\t{%1, %d0|%d0, %1}";
5696 return "%vmovlps\t{%1, %0|%0, %1}";
5703 /* Returns true if OP contains a symbol reference */
5706 symbolic_reference_mentioned_p (rtx op
)
5711 if (GET_CODE (op
) == SYMBOL_REF
|| GET_CODE (op
) == LABEL_REF
)
5714 fmt
= GET_RTX_FORMAT (GET_CODE (op
));
5715 for (i
= GET_RTX_LENGTH (GET_CODE (op
)) - 1; i
>= 0; i
--)
5721 for (j
= XVECLEN (op
, i
) - 1; j
>= 0; j
--)
5722 if (symbolic_reference_mentioned_p (XVECEXP (op
, i
, j
)))
5726 else if (fmt
[i
] == 'e' && symbolic_reference_mentioned_p (XEXP (op
, i
)))
5733 /* Return true if it is appropriate to emit `ret' instructions in the
5734 body of a function. Do this only if the epilogue is simple, needing a
5735 couple of insns. Prior to reloading, we can't tell how many registers
5736 must be saved, so return false then. Return false if there is no frame
5737 marker to de-allocate. */
5740 ix86_can_use_return_insn_p (void)
5742 if (ix86_function_ms_hook_prologue (current_function_decl
))
5745 if (ix86_function_naked (current_function_decl
))
5748 /* Don't use `ret' instruction in interrupt handler. */
5749 if (! reload_completed
5750 || frame_pointer_needed
5751 || cfun
->machine
->func_type
!= TYPE_NORMAL
)
5754 /* Don't allow more than 32k pop, since that's all we can do
5755 with one instruction. */
5756 if (crtl
->args
.pops_args
&& crtl
->args
.size
>= 32768)
5759 struct ix86_frame
&frame
= cfun
->machine
->frame
;
5760 return (frame
.stack_pointer_offset
== UNITS_PER_WORD
5761 && (frame
.nregs
+ frame
.nsseregs
) == 0);
5764 /* Return stack frame size. get_frame_size () returns used stack slots
5765 during compilation, which may be optimized out later. If stack frame
5766 is needed, stack_frame_required should be true. */
5768 static HOST_WIDE_INT
5769 ix86_get_frame_size (void)
5771 if (cfun
->machine
->stack_frame_required
)
5772 return get_frame_size ();
5777 /* Value should be nonzero if functions must have frame pointers.
5778 Zero means the frame pointer need not be set up (and parms may
5779 be accessed via the stack pointer) in functions that seem suitable. */
5782 ix86_frame_pointer_required (void)
5784 /* If we accessed previous frames, then the generated code expects
5785 to be able to access the saved ebp value in our frame. */
5786 if (cfun
->machine
->accesses_prev_frame
)
5789 /* Several x86 os'es need a frame pointer for other reasons,
5790 usually pertaining to setjmp. */
5791 if (SUBTARGET_FRAME_POINTER_REQUIRED
)
5794 /* For older 32-bit runtimes setjmp requires valid frame-pointer. */
5795 if (TARGET_32BIT_MS_ABI
&& cfun
->calls_setjmp
)
5798 /* Win64 SEH, very large frames need a frame-pointer as maximum stack
5799 allocation is 4GB. */
5800 if (TARGET_64BIT_MS_ABI
&& ix86_get_frame_size () > SEH_MAX_FRAME_SIZE
)
5803 /* SSE saves require frame-pointer when stack is misaligned. */
5804 if (TARGET_64BIT_MS_ABI
&& ix86_incoming_stack_boundary
< 128)
5807 /* In ix86_option_override_internal, TARGET_OMIT_LEAF_FRAME_POINTER
5808 turns off the frame pointer by default. Turn it back on now if
5809 we've not got a leaf function. */
5810 if (TARGET_OMIT_LEAF_FRAME_POINTER
5812 || ix86_current_function_calls_tls_descriptor
))
5815 /* Several versions of mcount for the x86 assumes that there is a
5816 frame, so we cannot allow profiling without a frame pointer. */
5817 if (crtl
->profile
&& !flag_fentry
)
5823 /* Record that the current function accesses previous call frames. */
5826 ix86_setup_frame_addresses (void)
5828 cfun
->machine
->accesses_prev_frame
= 1;
5831 #ifndef USE_HIDDEN_LINKONCE
5832 # if defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)
5833 # define USE_HIDDEN_LINKONCE 1
5835 # define USE_HIDDEN_LINKONCE 0
5839 /* Label count for call and return thunks. It is used to make unique
5840 labels in call and return thunks. */
5841 static int indirectlabelno
;
5843 /* True if call thunk function is needed. */
5844 static bool indirect_thunk_needed
= false;
5846 /* Bit masks of integer registers, which contain branch target, used
5847 by call thunk functions. */
5848 static HARD_REG_SET indirect_thunks_used
;
5850 /* True if return thunk function is needed. */
5851 static bool indirect_return_needed
= false;
5853 /* True if return thunk function via CX is needed. */
5854 static bool indirect_return_via_cx
;
5856 #ifndef INDIRECT_LABEL
5857 # define INDIRECT_LABEL "LIND"
5860 /* Indicate what prefix is needed for an indirect branch. */
5861 enum indirect_thunk_prefix
5863 indirect_thunk_prefix_none
,
5864 indirect_thunk_prefix_nt
5867 /* Return the prefix needed for an indirect branch INSN. */
5869 enum indirect_thunk_prefix
5870 indirect_thunk_need_prefix (rtx_insn
*insn
)
5872 enum indirect_thunk_prefix need_prefix
;
5873 if ((cfun
->machine
->indirect_branch_type
5874 == indirect_branch_thunk_extern
)
5875 && ix86_notrack_prefixed_insn_p (insn
))
5877 /* NOTRACK prefix is only used with external thunk so that it
5878 can be properly updated to support CET at run-time. */
5879 need_prefix
= indirect_thunk_prefix_nt
;
5882 need_prefix
= indirect_thunk_prefix_none
;
5886 /* Fills in the label name that should be used for the indirect thunk. */
5889 indirect_thunk_name (char name
[32], unsigned int regno
,
5890 enum indirect_thunk_prefix need_prefix
,
5893 if (regno
!= INVALID_REGNUM
&& regno
!= CX_REG
&& ret_p
)
5896 if (USE_HIDDEN_LINKONCE
)
5900 if (need_prefix
== indirect_thunk_prefix_nt
5901 && regno
!= INVALID_REGNUM
)
5903 /* NOTRACK prefix is only used with external thunk via
5904 register so that NOTRACK prefix can be added to indirect
5905 branch via register to support CET at run-time. */
5911 const char *ret
= ret_p
? "return" : "indirect";
5913 if (regno
!= INVALID_REGNUM
)
5915 const char *reg_prefix
;
5916 if (LEGACY_INT_REGNO_P (regno
))
5917 reg_prefix
= TARGET_64BIT
? "r" : "e";
5920 sprintf (name
, "__x86_%s_thunk%s_%s%s",
5921 ret
, prefix
, reg_prefix
, reg_names
[regno
]);
5924 sprintf (name
, "__x86_%s_thunk%s", ret
, prefix
);
5928 if (regno
!= INVALID_REGNUM
)
5929 ASM_GENERATE_INTERNAL_LABEL (name
, "LITR", regno
);
5933 ASM_GENERATE_INTERNAL_LABEL (name
, "LRT", 0);
5935 ASM_GENERATE_INTERNAL_LABEL (name
, "LIT", 0);
5940 /* Output a call and return thunk for indirect branch. If REGNO != -1,
5941 the function address is in REGNO and the call and return thunk looks like:
5952 Otherwise, the function address is on the top of stack and the
5953 call and return thunk looks like:
5961 lea WORD_SIZE(%sp), %sp
5966 output_indirect_thunk (unsigned int regno
)
5968 char indirectlabel1
[32];
5969 char indirectlabel2
[32];
5971 ASM_GENERATE_INTERNAL_LABEL (indirectlabel1
, INDIRECT_LABEL
,
5973 ASM_GENERATE_INTERNAL_LABEL (indirectlabel2
, INDIRECT_LABEL
,
5977 fputs ("\tcall\t", asm_out_file
);
5978 assemble_name_raw (asm_out_file
, indirectlabel2
);
5979 fputc ('\n', asm_out_file
);
5981 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, indirectlabel1
);
5983 /* AMD and Intel CPUs prefer each a different instruction as loop filler.
5984 Usage of both pause + lfence is compromise solution. */
5985 fprintf (asm_out_file
, "\tpause\n\tlfence\n");
5988 fputs ("\tjmp\t", asm_out_file
);
5989 assemble_name_raw (asm_out_file
, indirectlabel1
);
5990 fputc ('\n', asm_out_file
);
5992 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, indirectlabel2
);
5994 /* The above call insn pushed a word to stack. Adjust CFI info. */
5995 if (flag_asynchronous_unwind_tables
&& dwarf2out_do_frame ())
5997 if (! dwarf2out_do_cfi_asm ())
5999 dw_cfi_ref xcfi
= ggc_cleared_alloc
<dw_cfi_node
> ();
6000 xcfi
->dw_cfi_opc
= DW_CFA_advance_loc4
;
6001 xcfi
->dw_cfi_oprnd1
.dw_cfi_addr
= ggc_strdup (indirectlabel2
);
6002 vec_safe_push (cfun
->fde
->dw_fde_cfi
, xcfi
);
6004 dw_cfi_ref xcfi
= ggc_cleared_alloc
<dw_cfi_node
> ();
6005 xcfi
->dw_cfi_opc
= DW_CFA_def_cfa_offset
;
6006 xcfi
->dw_cfi_oprnd1
.dw_cfi_offset
= 2 * UNITS_PER_WORD
;
6007 vec_safe_push (cfun
->fde
->dw_fde_cfi
, xcfi
);
6008 dwarf2out_emit_cfi (xcfi
);
6011 if (regno
!= INVALID_REGNUM
)
6015 xops
[0] = gen_rtx_MEM (word_mode
, stack_pointer_rtx
);
6016 xops
[1] = gen_rtx_REG (word_mode
, regno
);
6017 output_asm_insn ("mov\t{%1, %0|%0, %1}", xops
);
6023 xops
[0] = stack_pointer_rtx
;
6024 xops
[1] = plus_constant (Pmode
, stack_pointer_rtx
, UNITS_PER_WORD
);
6025 output_asm_insn ("lea\t{%E1, %0|%0, %E1}", xops
);
6028 fputs ("\tret\n", asm_out_file
);
6029 if ((ix86_harden_sls
& harden_sls_return
))
6030 fputs ("\tint3\n", asm_out_file
);
6033 /* Output a funtion with a call and return thunk for indirect branch.
6034 If REGNO != INVALID_REGNUM, the function address is in REGNO.
6035 Otherwise, the function address is on the top of stack. Thunk is
6036 used for function return if RET_P is true. */
6039 output_indirect_thunk_function (enum indirect_thunk_prefix need_prefix
,
6040 unsigned int regno
, bool ret_p
)
6045 /* Create __x86_indirect_thunk. */
6046 indirect_thunk_name (name
, regno
, need_prefix
, ret_p
);
6047 decl
= build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
,
6048 get_identifier (name
),
6049 build_function_type_list (void_type_node
, NULL_TREE
));
6050 DECL_RESULT (decl
) = build_decl (BUILTINS_LOCATION
, RESULT_DECL
,
6051 NULL_TREE
, void_type_node
);
6052 TREE_PUBLIC (decl
) = 1;
6053 TREE_STATIC (decl
) = 1;
6054 DECL_IGNORED_P (decl
) = 1;
6059 switch_to_section (darwin_sections
[picbase_thunk_section
]);
6060 fputs ("\t.weak_definition\t", asm_out_file
);
6061 assemble_name (asm_out_file
, name
);
6062 fputs ("\n\t.private_extern\t", asm_out_file
);
6063 assemble_name (asm_out_file
, name
);
6064 putc ('\n', asm_out_file
);
6065 ASM_OUTPUT_LABEL (asm_out_file
, name
);
6066 DECL_WEAK (decl
) = 1;
6070 if (USE_HIDDEN_LINKONCE
)
6072 cgraph_node::create (decl
)->set_comdat_group (DECL_ASSEMBLER_NAME (decl
));
6074 targetm
.asm_out
.unique_section (decl
, 0);
6075 switch_to_section (get_named_section (decl
, NULL
, 0));
6077 targetm
.asm_out
.globalize_label (asm_out_file
, name
);
6078 fputs ("\t.hidden\t", asm_out_file
);
6079 assemble_name (asm_out_file
, name
);
6080 putc ('\n', asm_out_file
);
6081 ASM_DECLARE_FUNCTION_NAME (asm_out_file
, name
, decl
);
6085 switch_to_section (text_section
);
6086 ASM_OUTPUT_LABEL (asm_out_file
, name
);
6089 DECL_INITIAL (decl
) = make_node (BLOCK
);
6090 current_function_decl
= decl
;
6091 allocate_struct_function (decl
, false);
6092 init_function_start (decl
);
6093 /* We're about to hide the function body from callees of final_* by
6094 emitting it directly; tell them we're a thunk, if they care. */
6095 cfun
->is_thunk
= true;
6096 first_function_block_is_cold
= false;
6097 /* Make sure unwind info is emitted for the thunk if needed. */
6098 final_start_function (emit_barrier (), asm_out_file
, 1);
6100 output_indirect_thunk (regno
);
6102 final_end_function ();
6103 init_insn_lengths ();
6104 free_after_compilation (cfun
);
6106 current_function_decl
= NULL
;
6109 static int pic_labels_used
;
6111 /* Fills in the label name that should be used for a pc thunk for
6112 the given register. */
6115 get_pc_thunk_name (char name
[32], unsigned int regno
)
6117 gcc_assert (!TARGET_64BIT
);
6119 if (USE_HIDDEN_LINKONCE
)
6120 sprintf (name
, "__x86.get_pc_thunk.%s", reg_names
[regno
]);
6122 ASM_GENERATE_INTERNAL_LABEL (name
, "LPR", regno
);
6126 /* This function generates code for -fpic that loads %ebx with
6127 the return address of the caller and then returns. */
6130 ix86_code_end (void)
6135 if (indirect_return_needed
)
6136 output_indirect_thunk_function (indirect_thunk_prefix_none
,
6137 INVALID_REGNUM
, true);
6138 if (indirect_return_via_cx
)
6139 output_indirect_thunk_function (indirect_thunk_prefix_none
,
6141 if (indirect_thunk_needed
)
6142 output_indirect_thunk_function (indirect_thunk_prefix_none
,
6143 INVALID_REGNUM
, false);
6145 for (regno
= FIRST_REX_INT_REG
; regno
<= LAST_REX_INT_REG
; regno
++)
6147 if (TEST_HARD_REG_BIT (indirect_thunks_used
, regno
))
6148 output_indirect_thunk_function (indirect_thunk_prefix_none
,
6152 for (regno
= FIRST_INT_REG
; regno
<= LAST_INT_REG
; regno
++)
6157 if (TEST_HARD_REG_BIT (indirect_thunks_used
, regno
))
6158 output_indirect_thunk_function (indirect_thunk_prefix_none
,
6161 if (!(pic_labels_used
& (1 << regno
)))
6164 get_pc_thunk_name (name
, regno
);
6166 decl
= build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
,
6167 get_identifier (name
),
6168 build_function_type_list (void_type_node
, NULL_TREE
));
6169 DECL_RESULT (decl
) = build_decl (BUILTINS_LOCATION
, RESULT_DECL
,
6170 NULL_TREE
, void_type_node
);
6171 TREE_PUBLIC (decl
) = 1;
6172 TREE_STATIC (decl
) = 1;
6173 DECL_IGNORED_P (decl
) = 1;
6178 switch_to_section (darwin_sections
[picbase_thunk_section
]);
6179 fputs ("\t.weak_definition\t", asm_out_file
);
6180 assemble_name (asm_out_file
, name
);
6181 fputs ("\n\t.private_extern\t", asm_out_file
);
6182 assemble_name (asm_out_file
, name
);
6183 putc ('\n', asm_out_file
);
6184 ASM_OUTPUT_LABEL (asm_out_file
, name
);
6185 DECL_WEAK (decl
) = 1;
6189 if (USE_HIDDEN_LINKONCE
)
6191 cgraph_node::create (decl
)->set_comdat_group (DECL_ASSEMBLER_NAME (decl
));
6193 targetm
.asm_out
.unique_section (decl
, 0);
6194 switch_to_section (get_named_section (decl
, NULL
, 0));
6196 targetm
.asm_out
.globalize_label (asm_out_file
, name
);
6197 fputs ("\t.hidden\t", asm_out_file
);
6198 assemble_name (asm_out_file
, name
);
6199 putc ('\n', asm_out_file
);
6200 ASM_DECLARE_FUNCTION_NAME (asm_out_file
, name
, decl
);
6204 switch_to_section (text_section
);
6205 ASM_OUTPUT_LABEL (asm_out_file
, name
);
6208 DECL_INITIAL (decl
) = make_node (BLOCK
);
6209 current_function_decl
= decl
;
6210 allocate_struct_function (decl
, false);
6211 init_function_start (decl
);
6212 /* We're about to hide the function body from callees of final_* by
6213 emitting it directly; tell them we're a thunk, if they care. */
6214 cfun
->is_thunk
= true;
6215 first_function_block_is_cold
= false;
6216 /* Make sure unwind info is emitted for the thunk if needed. */
6217 final_start_function (emit_barrier (), asm_out_file
, 1);
6219 /* Pad stack IP move with 4 instructions (two NOPs count
6220 as one instruction). */
6221 if (TARGET_PAD_SHORT_FUNCTION
)
6226 fputs ("\tnop\n", asm_out_file
);
6229 xops
[0] = gen_rtx_REG (Pmode
, regno
);
6230 xops
[1] = gen_rtx_MEM (Pmode
, stack_pointer_rtx
);
6231 output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops
);
6232 fputs ("\tret\n", asm_out_file
);
6233 final_end_function ();
6234 init_insn_lengths ();
6235 free_after_compilation (cfun
);
6237 current_function_decl
= NULL
;
6240 if (flag_split_stack
)
6241 file_end_indicate_split_stack ();
6244 /* Emit code for the SET_GOT patterns. */
6247 output_set_got (rtx dest
, rtx label
)
6253 if (TARGET_VXWORKS_RTP
&& flag_pic
)
6255 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
6256 xops
[2] = gen_rtx_MEM (Pmode
,
6257 gen_rtx_SYMBOL_REF (Pmode
, VXWORKS_GOTT_BASE
));
6258 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops
);
6260 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
6261 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
6262 an unadorned address. */
6263 xops
[2] = gen_rtx_SYMBOL_REF (Pmode
, VXWORKS_GOTT_INDEX
);
6264 SYMBOL_REF_FLAGS (xops
[2]) |= SYMBOL_FLAG_LOCAL
;
6265 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops
);
6269 xops
[1] = gen_rtx_SYMBOL_REF (Pmode
, GOT_SYMBOL_NAME
);
6274 get_pc_thunk_name (name
, REGNO (dest
));
6275 pic_labels_used
|= 1 << REGNO (dest
);
6277 xops
[2] = gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (name
));
6278 xops
[2] = gen_rtx_MEM (QImode
, xops
[2]);
6279 output_asm_insn ("%!call\t%X2", xops
);
6282 /* Output the Mach-O "canonical" pic base label name ("Lxx$pb") here.
6283 This is what will be referenced by the Mach-O PIC subsystem. */
6284 if (machopic_should_output_picbase_label () || !label
)
6285 ASM_OUTPUT_LABEL (asm_out_file
, MACHOPIC_FUNCTION_BASE_NAME
);
6287 /* When we are restoring the pic base at the site of a nonlocal label,
6288 and we decided to emit the pic base above, we will still output a
6289 local label used for calculating the correction offset (even though
6290 the offset will be 0 in that case). */
6292 targetm
.asm_out
.internal_label (asm_out_file
, "L",
6293 CODE_LABEL_NUMBER (label
));
6299 /* We don't need a pic base, we're not producing pic. */
6302 xops
[2] = gen_rtx_LABEL_REF (Pmode
, label
? label
: gen_label_rtx ());
6303 output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops
);
6304 targetm
.asm_out
.internal_label (asm_out_file
, "L",
6305 CODE_LABEL_NUMBER (XEXP (xops
[2], 0)));
6309 output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops
);
6314 /* Generate an "push" pattern for input ARG. */
6319 struct machine_function
*m
= cfun
->machine
;
6321 if (m
->fs
.cfa_reg
== stack_pointer_rtx
)
6322 m
->fs
.cfa_offset
+= UNITS_PER_WORD
;
6323 m
->fs
.sp_offset
+= UNITS_PER_WORD
;
6325 if (REG_P (arg
) && GET_MODE (arg
) != word_mode
)
6326 arg
= gen_rtx_REG (word_mode
, REGNO (arg
));
6328 return gen_rtx_SET (gen_rtx_MEM (word_mode
,
6329 gen_rtx_PRE_DEC (Pmode
,
6330 stack_pointer_rtx
)),
6334 /* Generate an "pop" pattern for input ARG. */
6339 if (REG_P (arg
) && GET_MODE (arg
) != word_mode
)
6340 arg
= gen_rtx_REG (word_mode
, REGNO (arg
));
6342 return gen_rtx_SET (arg
,
6343 gen_rtx_MEM (word_mode
,
6344 gen_rtx_POST_INC (Pmode
,
6345 stack_pointer_rtx
)));
6348 /* Return >= 0 if there is an unused call-clobbered register available
6349 for the entire function. */
6352 ix86_select_alt_pic_regnum (void)
6354 if (ix86_use_pseudo_pic_reg ())
6355 return INVALID_REGNUM
;
6359 && !ix86_current_function_calls_tls_descriptor
)
6362 /* Can't use the same register for both PIC and DRAP. */
6364 drap
= REGNO (crtl
->drap_reg
);
6367 for (i
= 2; i
>= 0; --i
)
6368 if (i
!= drap
&& !df_regs_ever_live_p (i
))
6372 return INVALID_REGNUM
;
6375 /* Return true if REGNO is used by the epilogue. */
6378 ix86_epilogue_uses (int regno
)
6380 /* If there are no caller-saved registers, we preserve all registers,
6381 except for MMX and x87 registers which aren't supported when saving
6382 and restoring registers. Don't explicitly save SP register since
6383 it is always preserved. */
6384 return (epilogue_completed
6385 && cfun
->machine
->no_caller_saved_registers
6386 && !fixed_regs
[regno
]
6387 && !STACK_REGNO_P (regno
)
6388 && !MMX_REGNO_P (regno
));
6391 /* Return nonzero if register REGNO can be used as a scratch register
6395 ix86_hard_regno_scratch_ok (unsigned int regno
)
6397 /* If there are no caller-saved registers, we can't use any register
6398 as a scratch register after epilogue and use REGNO as scratch
6399 register only if it has been used before to avoid saving and
6401 return (!cfun
->machine
->no_caller_saved_registers
6402 || (!epilogue_completed
6403 && df_regs_ever_live_p (regno
)));
6406 /* Return TRUE if we need to save REGNO. */
6409 ix86_save_reg (unsigned int regno
, bool maybe_eh_return
, bool ignore_outlined
)
6411 /* If there are no caller-saved registers, we preserve all registers,
6412 except for MMX and x87 registers which aren't supported when saving
6413 and restoring registers. Don't explicitly save SP register since
6414 it is always preserved. */
6415 if (cfun
->machine
->no_caller_saved_registers
)
6417 /* Don't preserve registers used for function return value. */
6418 rtx reg
= crtl
->return_rtx
;
6421 unsigned int i
= REGNO (reg
);
6422 unsigned int nregs
= REG_NREGS (reg
);
6424 if ((i
+ nregs
) == regno
)
6428 return (df_regs_ever_live_p (regno
)
6429 && !fixed_regs
[regno
]
6430 && !STACK_REGNO_P (regno
)
6431 && !MMX_REGNO_P (regno
)
6432 && (regno
!= HARD_FRAME_POINTER_REGNUM
6433 || !frame_pointer_needed
));
6436 if (regno
== REAL_PIC_OFFSET_TABLE_REGNUM
6437 && pic_offset_table_rtx
)
6439 if (ix86_use_pseudo_pic_reg ())
6441 /* REAL_PIC_OFFSET_TABLE_REGNUM used by call to
6442 _mcount in prologue. */
6443 if (!TARGET_64BIT
&& flag_pic
&& crtl
->profile
)
6446 else if (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM
)
6448 || crtl
->calls_eh_return
6449 || crtl
->uses_const_pool
6450 || cfun
->has_nonlocal_label
)
6451 return ix86_select_alt_pic_regnum () == INVALID_REGNUM
;
6454 if (crtl
->calls_eh_return
&& maybe_eh_return
)
6459 unsigned test
= EH_RETURN_DATA_REGNO (i
);
6460 if (test
== INVALID_REGNUM
)
6467 if (ignore_outlined
&& cfun
->machine
->call_ms2sysv
)
6469 unsigned count
= cfun
->machine
->call_ms2sysv_extra_regs
6470 + xlogue_layout::MIN_REGS
;
6471 if (xlogue_layout::is_stub_managed_reg (regno
, count
))
6476 && regno
== REGNO (crtl
->drap_reg
)
6477 && !cfun
->machine
->no_drap_save_restore
)
6480 return (df_regs_ever_live_p (regno
)
6481 && !call_used_or_fixed_reg_p (regno
)
6482 && (regno
!= HARD_FRAME_POINTER_REGNUM
|| !frame_pointer_needed
));
6485 /* Return number of saved general prupose registers. */
6488 ix86_nsaved_regs (void)
6493 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
6494 if (GENERAL_REGNO_P (regno
) && ix86_save_reg (regno
, true, true))
6499 /* Return number of saved SSE registers. */
6502 ix86_nsaved_sseregs (void)
6507 if (!TARGET_64BIT_MS_ABI
)
6509 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
6510 if (SSE_REGNO_P (regno
) && ix86_save_reg (regno
, true, true))
6515 /* Given FROM and TO register numbers, say whether this elimination is
6516 allowed. If stack alignment is needed, we can only replace argument
6517 pointer with hard frame pointer, or replace frame pointer with stack
6518 pointer. Otherwise, frame pointer elimination is automatically
6519 handled and all other eliminations are valid. */
6522 ix86_can_eliminate (const int from
, const int to
)
6524 if (stack_realign_fp
)
6525 return ((from
== ARG_POINTER_REGNUM
6526 && to
== HARD_FRAME_POINTER_REGNUM
)
6527 || (from
== FRAME_POINTER_REGNUM
6528 && to
== STACK_POINTER_REGNUM
));
6530 return to
== STACK_POINTER_REGNUM
? !frame_pointer_needed
: true;
6533 /* Return the offset between two registers, one to be eliminated, and the other
6534 its replacement, at the start of a routine. */
6537 ix86_initial_elimination_offset (int from
, int to
)
6539 struct ix86_frame
&frame
= cfun
->machine
->frame
;
6541 if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
6542 return frame
.hard_frame_pointer_offset
;
6543 else if (from
== FRAME_POINTER_REGNUM
6544 && to
== HARD_FRAME_POINTER_REGNUM
)
6545 return frame
.hard_frame_pointer_offset
- frame
.frame_pointer_offset
;
6548 gcc_assert (to
== STACK_POINTER_REGNUM
);
6550 if (from
== ARG_POINTER_REGNUM
)
6551 return frame
.stack_pointer_offset
;
6553 gcc_assert (from
== FRAME_POINTER_REGNUM
);
6554 return frame
.stack_pointer_offset
- frame
.frame_pointer_offset
;
6558 /* Emits a warning for unsupported msabi to sysv pro/epilogues. */
6560 warn_once_call_ms2sysv_xlogues (const char *feature
)
6562 static bool warned_once
= false;
6565 warning (0, "%<-mcall-ms2sysv-xlogues%> is not compatible with %s",
6571 /* Return the probing interval for -fstack-clash-protection. */
6573 static HOST_WIDE_INT
6574 get_probe_interval (void)
6576 if (flag_stack_clash_protection
)
6577 return (HOST_WIDE_INT_1U
6578 << param_stack_clash_protection_probe_interval
);
6580 return (HOST_WIDE_INT_1U
<< STACK_CHECK_PROBE_INTERVAL_EXP
);
6583 /* When using -fsplit-stack, the allocation routines set a field in
6584 the TCB to the bottom of the stack plus this much space, measured
6587 #define SPLIT_STACK_AVAILABLE 256
6589 /* Fill structure ix86_frame about frame of currently computed function. */
6592 ix86_compute_frame_layout (void)
6594 struct ix86_frame
*frame
= &cfun
->machine
->frame
;
6595 struct machine_function
*m
= cfun
->machine
;
6596 unsigned HOST_WIDE_INT stack_alignment_needed
;
6597 HOST_WIDE_INT offset
;
6598 unsigned HOST_WIDE_INT preferred_alignment
;
6599 HOST_WIDE_INT size
= ix86_get_frame_size ();
6600 HOST_WIDE_INT to_allocate
;
6602 /* m->call_ms2sysv is initially enabled in ix86_expand_call for all 64-bit
6603 * ms_abi functions that call a sysv function. We now need to prune away
6604 * cases where it should be disabled. */
6605 if (TARGET_64BIT
&& m
->call_ms2sysv
)
6607 gcc_assert (TARGET_64BIT_MS_ABI
);
6608 gcc_assert (TARGET_CALL_MS2SYSV_XLOGUES
);
6609 gcc_assert (!TARGET_SEH
);
6610 gcc_assert (TARGET_SSE
);
6611 gcc_assert (!ix86_using_red_zone ());
6613 if (crtl
->calls_eh_return
)
6615 gcc_assert (!reload_completed
);
6616 m
->call_ms2sysv
= false;
6617 warn_once_call_ms2sysv_xlogues ("__builtin_eh_return");
6620 else if (ix86_static_chain_on_stack
)
6622 gcc_assert (!reload_completed
);
6623 m
->call_ms2sysv
= false;
6624 warn_once_call_ms2sysv_xlogues ("static call chains");
6627 /* Finally, compute which registers the stub will manage. */
6630 unsigned count
= xlogue_layout::count_stub_managed_regs ();
6631 m
->call_ms2sysv_extra_regs
= count
- xlogue_layout::MIN_REGS
;
6632 m
->call_ms2sysv_pad_in
= 0;
6636 frame
->nregs
= ix86_nsaved_regs ();
6637 frame
->nsseregs
= ix86_nsaved_sseregs ();
6639 /* 64-bit MS ABI seem to require stack alignment to be always 16,
6640 except for function prologues, leaf functions and when the defult
6641 incoming stack boundary is overriden at command line or via
6642 force_align_arg_pointer attribute.
6644 Darwin's ABI specifies 128b alignment for both 32 and 64 bit variants
6645 at call sites, including profile function calls.
6647 if (((TARGET_64BIT_MS_ABI
|| TARGET_MACHO
)
6648 && crtl
->preferred_stack_boundary
< 128)
6649 && (!crtl
->is_leaf
|| cfun
->calls_alloca
!= 0
6650 || ix86_current_function_calls_tls_descriptor
6651 || (TARGET_MACHO
&& crtl
->profile
)
6652 || ix86_incoming_stack_boundary
< 128))
6654 crtl
->preferred_stack_boundary
= 128;
6655 crtl
->stack_alignment_needed
= 128;
6658 stack_alignment_needed
= crtl
->stack_alignment_needed
/ BITS_PER_UNIT
;
6659 preferred_alignment
= crtl
->preferred_stack_boundary
/ BITS_PER_UNIT
;
6661 gcc_assert (!size
|| stack_alignment_needed
);
6662 gcc_assert (preferred_alignment
>= STACK_BOUNDARY
/ BITS_PER_UNIT
);
6663 gcc_assert (preferred_alignment
<= stack_alignment_needed
);
6665 /* The only ABI saving SSE regs should be 64-bit ms_abi. */
6666 gcc_assert (TARGET_64BIT
|| !frame
->nsseregs
);
6667 if (TARGET_64BIT
&& m
->call_ms2sysv
)
6669 gcc_assert (stack_alignment_needed
>= 16);
6670 gcc_assert (!frame
->nsseregs
);
6673 /* For SEH we have to limit the amount of code movement into the prologue.
6674 At present we do this via a BLOCKAGE, at which point there's very little
6675 scheduling that can be done, which means that there's very little point
6676 in doing anything except PUSHs. */
6678 m
->use_fast_prologue_epilogue
= false;
6679 else if (!optimize_bb_for_size_p (ENTRY_BLOCK_PTR_FOR_FN (cfun
)))
6681 int count
= frame
->nregs
;
6682 struct cgraph_node
*node
= cgraph_node::get (current_function_decl
);
6684 /* The fast prologue uses move instead of push to save registers. This
6685 is significantly longer, but also executes faster as modern hardware
6686 can execute the moves in parallel, but can't do that for push/pop.
6688 Be careful about choosing what prologue to emit: When function takes
6689 many instructions to execute we may use slow version as well as in
6690 case function is known to be outside hot spot (this is known with
6691 feedback only). Weight the size of function by number of registers
6692 to save as it is cheap to use one or two push instructions but very
6693 slow to use many of them.
6695 Calling this hook multiple times with the same frame requirements
6696 must produce the same layout, since the RA might otherwise be
6697 unable to reach a fixed point or might fail its final sanity checks.
6698 This means that once we've assumed that a function does or doesn't
6699 have a particular size, we have to stick to that assumption
6700 regardless of how the function has changed since. */
6702 count
= (count
- 1) * FAST_PROLOGUE_INSN_COUNT
;
6703 if (node
->frequency
< NODE_FREQUENCY_NORMAL
6704 || (flag_branch_probabilities
6705 && node
->frequency
< NODE_FREQUENCY_HOT
))
6706 m
->use_fast_prologue_epilogue
= false;
6709 if (count
!= frame
->expensive_count
)
6711 frame
->expensive_count
= count
;
6712 frame
->expensive_p
= expensive_function_p (count
);
6714 m
->use_fast_prologue_epilogue
= !frame
->expensive_p
;
6718 frame
->save_regs_using_mov
6719 = TARGET_PROLOGUE_USING_MOVE
&& m
->use_fast_prologue_epilogue
;
6721 /* Skip return address and error code in exception handler. */
6722 offset
= INCOMING_FRAME_SP_OFFSET
;
6724 /* Skip pushed static chain. */
6725 if (ix86_static_chain_on_stack
)
6726 offset
+= UNITS_PER_WORD
;
6728 /* Skip saved base pointer. */
6729 if (frame_pointer_needed
)
6730 offset
+= UNITS_PER_WORD
;
6731 frame
->hfp_save_offset
= offset
;
6733 /* The traditional frame pointer location is at the top of the frame. */
6734 frame
->hard_frame_pointer_offset
= offset
;
6736 /* Register save area */
6737 offset
+= frame
->nregs
* UNITS_PER_WORD
;
6738 frame
->reg_save_offset
= offset
;
6740 /* Calculate the size of the va-arg area (not including padding, if any). */
6741 frame
->va_arg_size
= ix86_varargs_gpr_size
+ ix86_varargs_fpr_size
;
6743 /* Also adjust stack_realign_offset for the largest alignment of
6744 stack slot actually used. */
6745 if (stack_realign_fp
6746 || (cfun
->machine
->max_used_stack_alignment
!= 0
6747 && (offset
% cfun
->machine
->max_used_stack_alignment
) != 0))
6749 /* We may need a 16-byte aligned stack for the remainder of the
6750 register save area, but the stack frame for the local function
6751 may require a greater alignment if using AVX/2/512. In order
6752 to avoid wasting space, we first calculate the space needed for
6753 the rest of the register saves, add that to the stack pointer,
6754 and then realign the stack to the boundary of the start of the
6755 frame for the local function. */
6756 HOST_WIDE_INT space_needed
= 0;
6757 HOST_WIDE_INT sse_reg_space_needed
= 0;
6761 if (m
->call_ms2sysv
)
6763 m
->call_ms2sysv_pad_in
= 0;
6764 space_needed
= xlogue_layout::get_instance ().get_stack_space_used ();
6767 else if (frame
->nsseregs
)
6768 /* The only ABI that has saved SSE registers (Win64) also has a
6769 16-byte aligned default stack. However, many programs violate
6770 the ABI, and Wine64 forces stack realignment to compensate. */
6771 space_needed
= frame
->nsseregs
* 16;
6773 sse_reg_space_needed
= space_needed
= ROUND_UP (space_needed
, 16);
6775 /* 64-bit frame->va_arg_size should always be a multiple of 16, but
6776 rounding to be pedantic. */
6777 space_needed
= ROUND_UP (space_needed
+ frame
->va_arg_size
, 16);
6780 space_needed
= frame
->va_arg_size
;
6782 /* Record the allocation size required prior to the realignment AND. */
6783 frame
->stack_realign_allocate
= space_needed
;
6785 /* The re-aligned stack starts at frame->stack_realign_offset. Values
6786 before this point are not directly comparable with values below
6787 this point. Use sp_valid_at to determine if the stack pointer is
6788 valid for a given offset, fp_valid_at for the frame pointer, or
6789 choose_baseaddr to have a base register chosen for you.
6791 Note that the result of (frame->stack_realign_offset
6792 & (stack_alignment_needed - 1)) may not equal zero. */
6793 offset
= ROUND_UP (offset
+ space_needed
, stack_alignment_needed
);
6794 frame
->stack_realign_offset
= offset
- space_needed
;
6795 frame
->sse_reg_save_offset
= frame
->stack_realign_offset
6796 + sse_reg_space_needed
;
6800 frame
->stack_realign_offset
= offset
;
6802 if (TARGET_64BIT
&& m
->call_ms2sysv
)
6804 m
->call_ms2sysv_pad_in
= !!(offset
& UNITS_PER_WORD
);
6805 offset
+= xlogue_layout::get_instance ().get_stack_space_used ();
6808 /* Align and set SSE register save area. */
6809 else if (frame
->nsseregs
)
6811 /* If the incoming stack boundary is at least 16 bytes, or DRAP is
6812 required and the DRAP re-alignment boundary is at least 16 bytes,
6813 then we want the SSE register save area properly aligned. */
6814 if (ix86_incoming_stack_boundary
>= 128
6815 || (stack_realign_drap
&& stack_alignment_needed
>= 16))
6816 offset
= ROUND_UP (offset
, 16);
6817 offset
+= frame
->nsseregs
* 16;
6819 frame
->sse_reg_save_offset
= offset
;
6820 offset
+= frame
->va_arg_size
;
6823 /* Align start of frame for local function. When a function call
6824 is removed, it may become a leaf function. But if argument may
6825 be passed on stack, we need to align the stack when there is no
6828 || frame
->va_arg_size
!= 0
6831 || (!crtl
->tail_call_emit
6832 && cfun
->machine
->outgoing_args_on_stack
)
6833 || cfun
->calls_alloca
6834 || ix86_current_function_calls_tls_descriptor
)
6835 offset
= ROUND_UP (offset
, stack_alignment_needed
);
6837 /* Frame pointer points here. */
6838 frame
->frame_pointer_offset
= offset
;
6842 /* Add outgoing arguments area. Can be skipped if we eliminated
6843 all the function calls as dead code.
6844 Skipping is however impossible when function calls alloca. Alloca
6845 expander assumes that last crtl->outgoing_args_size
6846 of stack frame are unused. */
6847 if (ACCUMULATE_OUTGOING_ARGS
6848 && (!crtl
->is_leaf
|| cfun
->calls_alloca
6849 || ix86_current_function_calls_tls_descriptor
))
6851 offset
+= crtl
->outgoing_args_size
;
6852 frame
->outgoing_arguments_size
= crtl
->outgoing_args_size
;
6855 frame
->outgoing_arguments_size
= 0;
6857 /* Align stack boundary. Only needed if we're calling another function
6859 if (!crtl
->is_leaf
|| cfun
->calls_alloca
6860 || ix86_current_function_calls_tls_descriptor
)
6861 offset
= ROUND_UP (offset
, preferred_alignment
);
6863 /* We've reached end of stack frame. */
6864 frame
->stack_pointer_offset
= offset
;
6866 /* Size prologue needs to allocate. */
6867 to_allocate
= offset
- frame
->sse_reg_save_offset
;
6869 if ((!to_allocate
&& frame
->nregs
<= 1)
6870 || (TARGET_64BIT
&& to_allocate
>= HOST_WIDE_INT_C (0x80000000))
6871 /* If static stack checking is enabled and done with probes,
6872 the registers need to be saved before allocating the frame. */
6873 || flag_stack_check
== STATIC_BUILTIN_STACK_CHECK
6874 /* If stack clash probing needs a loop, then it needs a
6875 scratch register. But the returned register is only guaranteed
6876 to be safe to use after register saves are complete. So if
6877 stack clash protections are enabled and the allocated frame is
6878 larger than the probe interval, then use pushes to save
6879 callee saved registers. */
6880 || (flag_stack_clash_protection
6881 && !ix86_target_stack_probe ()
6882 && to_allocate
> get_probe_interval ()))
6883 frame
->save_regs_using_mov
= false;
6885 if (ix86_using_red_zone ()
6886 && crtl
->sp_is_unchanging
6888 && !ix86_pc_thunk_call_expanded
6889 && !ix86_current_function_calls_tls_descriptor
)
6891 frame
->red_zone_size
= to_allocate
;
6892 if (frame
->save_regs_using_mov
)
6893 frame
->red_zone_size
+= frame
->nregs
* UNITS_PER_WORD
;
6894 if (frame
->red_zone_size
> RED_ZONE_SIZE
- RED_ZONE_RESERVE
)
6895 frame
->red_zone_size
= RED_ZONE_SIZE
- RED_ZONE_RESERVE
;
6898 frame
->red_zone_size
= 0;
6899 frame
->stack_pointer_offset
-= frame
->red_zone_size
;
6901 /* The SEH frame pointer location is near the bottom of the frame.
6902 This is enforced by the fact that the difference between the
6903 stack pointer and the frame pointer is limited to 240 bytes in
6904 the unwind data structure. */
6907 /* Force the frame pointer to point at or below the lowest register save
6908 area, see the SEH code in config/i386/winnt.cc for the rationale. */
6909 frame
->hard_frame_pointer_offset
= frame
->sse_reg_save_offset
;
6911 /* If we can leave the frame pointer where it is, do so; however return
6912 the establisher frame for __builtin_frame_address (0) or else if the
6913 frame overflows the SEH maximum frame size.
6915 Note that the value returned by __builtin_frame_address (0) is quite
6916 constrained, because setjmp is piggybacked on the SEH machinery with
6917 recent versions of MinGW:
6919 # elif defined(__SEH__)
6920 # if defined(__aarch64__) || defined(_ARM64_)
6921 # define setjmp(BUF) _setjmp((BUF), __builtin_sponentry())
6922 # elif (__MINGW_GCC_VERSION < 40702)
6923 # define setjmp(BUF) _setjmp((BUF), mingw_getsp())
6925 # define setjmp(BUF) _setjmp((BUF), __builtin_frame_address (0))
6928 and the second argument passed to _setjmp, if not null, is forwarded
6929 to the TargetFrame parameter of RtlUnwindEx by longjmp (after it has
6930 built an ExceptionRecord on the fly describing the setjmp buffer). */
6931 const HOST_WIDE_INT diff
6932 = frame
->stack_pointer_offset
- frame
->hard_frame_pointer_offset
;
6933 if (diff
<= 255 && !crtl
->accesses_prior_frames
)
6935 /* The resulting diff will be a multiple of 16 lower than 255,
6936 i.e. at most 240 as required by the unwind data structure. */
6937 frame
->hard_frame_pointer_offset
+= (diff
& 15);
6939 else if (diff
<= SEH_MAX_FRAME_SIZE
&& !crtl
->accesses_prior_frames
)
6941 /* Ideally we'd determine what portion of the local stack frame
6942 (within the constraint of the lowest 240) is most heavily used.
6943 But without that complication, simply bias the frame pointer
6944 by 128 bytes so as to maximize the amount of the local stack
6945 frame that is addressable with 8-bit offsets. */
6946 frame
->hard_frame_pointer_offset
= frame
->stack_pointer_offset
- 128;
6949 frame
->hard_frame_pointer_offset
= frame
->hfp_save_offset
;
6953 /* This is semi-inlined memory_address_length, but simplified
6954 since we know that we're always dealing with reg+offset, and
6955 to avoid having to create and discard all that rtl. */
6958 choose_baseaddr_len (unsigned int regno
, HOST_WIDE_INT offset
)
6964 /* EBP and R13 cannot be encoded without an offset. */
6965 len
= (regno
== BP_REG
|| regno
== R13_REG
);
6967 else if (IN_RANGE (offset
, -128, 127))
6970 /* ESP and R12 must be encoded with a SIB byte. */
6971 if (regno
== SP_REG
|| regno
== R12_REG
)
6977 /* Determine if the stack pointer is valid for accessing the CFA_OFFSET in
6978 the frame save area. The register is saved at CFA - CFA_OFFSET. */
6981 sp_valid_at (HOST_WIDE_INT cfa_offset
)
6983 const struct machine_frame_state
&fs
= cfun
->machine
->fs
;
6984 if (fs
.sp_realigned
&& cfa_offset
<= fs
.sp_realigned_offset
)
6986 /* Validate that the cfa_offset isn't in a "no-man's land". */
6987 gcc_assert (cfa_offset
<= fs
.sp_realigned_fp_last
);
6993 /* Determine if the frame pointer is valid for accessing the CFA_OFFSET in
6994 the frame save area. The register is saved at CFA - CFA_OFFSET. */
6997 fp_valid_at (HOST_WIDE_INT cfa_offset
)
6999 const struct machine_frame_state
&fs
= cfun
->machine
->fs
;
7000 if (fs
.sp_realigned
&& cfa_offset
> fs
.sp_realigned_fp_last
)
7002 /* Validate that the cfa_offset isn't in a "no-man's land". */
7003 gcc_assert (cfa_offset
>= fs
.sp_realigned_offset
);
7009 /* Choose a base register based upon alignment requested, speed and/or
7013 choose_basereg (HOST_WIDE_INT cfa_offset
, rtx
&base_reg
,
7014 HOST_WIDE_INT
&base_offset
,
7015 unsigned int align_reqested
, unsigned int *align
)
7017 const struct machine_function
*m
= cfun
->machine
;
7018 unsigned int hfp_align
;
7019 unsigned int drap_align
;
7020 unsigned int sp_align
;
7021 bool hfp_ok
= fp_valid_at (cfa_offset
);
7022 bool drap_ok
= m
->fs
.drap_valid
;
7023 bool sp_ok
= sp_valid_at (cfa_offset
);
7025 hfp_align
= drap_align
= sp_align
= INCOMING_STACK_BOUNDARY
;
7027 /* Filter out any registers that don't meet the requested alignment
7031 if (m
->fs
.realigned
)
7032 hfp_align
= drap_align
= sp_align
= crtl
->stack_alignment_needed
;
7033 /* SEH unwind code does do not currently support REG_CFA_EXPRESSION
7034 notes (which we would need to use a realigned stack pointer),
7035 so disable on SEH targets. */
7036 else if (m
->fs
.sp_realigned
)
7037 sp_align
= crtl
->stack_alignment_needed
;
7039 hfp_ok
= hfp_ok
&& hfp_align
>= align_reqested
;
7040 drap_ok
= drap_ok
&& drap_align
>= align_reqested
;
7041 sp_ok
= sp_ok
&& sp_align
>= align_reqested
;
7044 if (m
->use_fast_prologue_epilogue
)
7046 /* Choose the base register most likely to allow the most scheduling
7047 opportunities. Generally FP is valid throughout the function,
7048 while DRAP must be reloaded within the epilogue. But choose either
7049 over the SP due to increased encoding size. */
7053 base_reg
= hard_frame_pointer_rtx
;
7054 base_offset
= m
->fs
.fp_offset
- cfa_offset
;
7058 base_reg
= crtl
->drap_reg
;
7059 base_offset
= 0 - cfa_offset
;
7063 base_reg
= stack_pointer_rtx
;
7064 base_offset
= m
->fs
.sp_offset
- cfa_offset
;
7069 HOST_WIDE_INT toffset
;
7072 /* Choose the base register with the smallest address encoding.
7073 With a tie, choose FP > DRAP > SP. */
7076 base_reg
= stack_pointer_rtx
;
7077 base_offset
= m
->fs
.sp_offset
- cfa_offset
;
7078 len
= choose_baseaddr_len (STACK_POINTER_REGNUM
, base_offset
);
7082 toffset
= 0 - cfa_offset
;
7083 tlen
= choose_baseaddr_len (REGNO (crtl
->drap_reg
), toffset
);
7086 base_reg
= crtl
->drap_reg
;
7087 base_offset
= toffset
;
7093 toffset
= m
->fs
.fp_offset
- cfa_offset
;
7094 tlen
= choose_baseaddr_len (HARD_FRAME_POINTER_REGNUM
, toffset
);
7097 base_reg
= hard_frame_pointer_rtx
;
7098 base_offset
= toffset
;
7103 /* Set the align return value. */
7106 if (base_reg
== stack_pointer_rtx
)
7108 else if (base_reg
== crtl
->drap_reg
)
7109 *align
= drap_align
;
7110 else if (base_reg
== hard_frame_pointer_rtx
)
7115 /* Return an RTX that points to CFA_OFFSET within the stack frame and
7116 the alignment of address. If ALIGN is non-null, it should point to
7117 an alignment value (in bits) that is preferred or zero and will
7118 recieve the alignment of the base register that was selected,
7119 irrespective of rather or not CFA_OFFSET is a multiple of that
7120 alignment value. If it is possible for the base register offset to be
7121 non-immediate then SCRATCH_REGNO should specify a scratch register to
7124 The valid base registers are taken from CFUN->MACHINE->FS. */
7127 choose_baseaddr (HOST_WIDE_INT cfa_offset
, unsigned int *align
,
7128 unsigned int scratch_regno
= INVALID_REGNUM
)
7130 rtx base_reg
= NULL
;
7131 HOST_WIDE_INT base_offset
= 0;
7133 /* If a specific alignment is requested, try to get a base register
7134 with that alignment first. */
7135 if (align
&& *align
)
7136 choose_basereg (cfa_offset
, base_reg
, base_offset
, *align
, align
);
7139 choose_basereg (cfa_offset
, base_reg
, base_offset
, 0, align
);
7141 gcc_assert (base_reg
!= NULL
);
7143 rtx base_offset_rtx
= GEN_INT (base_offset
);
7145 if (!x86_64_immediate_operand (base_offset_rtx
, Pmode
))
7147 gcc_assert (scratch_regno
!= INVALID_REGNUM
);
7149 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
7150 emit_move_insn (scratch_reg
, base_offset_rtx
);
7152 return gen_rtx_PLUS (Pmode
, base_reg
, scratch_reg
);
7155 return plus_constant (Pmode
, base_reg
, base_offset
);
7158 /* Emit code to save registers in the prologue. */
7161 ix86_emit_save_regs (void)
7166 for (regno
= FIRST_PSEUDO_REGISTER
- 1; regno
-- > 0; )
7167 if (GENERAL_REGNO_P (regno
) && ix86_save_reg (regno
, true, true))
7169 insn
= emit_insn (gen_push (gen_rtx_REG (word_mode
, regno
)));
7170 RTX_FRAME_RELATED_P (insn
) = 1;
7174 /* Emit a single register save at CFA - CFA_OFFSET. */
7177 ix86_emit_save_reg_using_mov (machine_mode mode
, unsigned int regno
,
7178 HOST_WIDE_INT cfa_offset
)
7180 struct machine_function
*m
= cfun
->machine
;
7181 rtx reg
= gen_rtx_REG (mode
, regno
);
7182 rtx mem
, addr
, base
, insn
;
7183 unsigned int align
= GET_MODE_ALIGNMENT (mode
);
7185 addr
= choose_baseaddr (cfa_offset
, &align
);
7186 mem
= gen_frame_mem (mode
, addr
);
7188 /* The location aligment depends upon the base register. */
7189 align
= MIN (GET_MODE_ALIGNMENT (mode
), align
);
7190 gcc_assert (! (cfa_offset
& (align
/ BITS_PER_UNIT
- 1)));
7191 set_mem_align (mem
, align
);
7193 insn
= emit_insn (gen_rtx_SET (mem
, reg
));
7194 RTX_FRAME_RELATED_P (insn
) = 1;
7197 if (GET_CODE (base
) == PLUS
)
7198 base
= XEXP (base
, 0);
7199 gcc_checking_assert (REG_P (base
));
7201 /* When saving registers into a re-aligned local stack frame, avoid
7202 any tricky guessing by dwarf2out. */
7203 if (m
->fs
.realigned
)
7205 gcc_checking_assert (stack_realign_drap
);
7207 if (regno
== REGNO (crtl
->drap_reg
))
7209 /* A bit of a hack. We force the DRAP register to be saved in
7210 the re-aligned stack frame, which provides us with a copy
7211 of the CFA that will last past the prologue. Install it. */
7212 gcc_checking_assert (cfun
->machine
->fs
.fp_valid
);
7213 addr
= plus_constant (Pmode
, hard_frame_pointer_rtx
,
7214 cfun
->machine
->fs
.fp_offset
- cfa_offset
);
7215 mem
= gen_rtx_MEM (mode
, addr
);
7216 add_reg_note (insn
, REG_CFA_DEF_CFA
, mem
);
7220 /* The frame pointer is a stable reference within the
7221 aligned frame. Use it. */
7222 gcc_checking_assert (cfun
->machine
->fs
.fp_valid
);
7223 addr
= plus_constant (Pmode
, hard_frame_pointer_rtx
,
7224 cfun
->machine
->fs
.fp_offset
- cfa_offset
);
7225 mem
= gen_rtx_MEM (mode
, addr
);
7226 add_reg_note (insn
, REG_CFA_EXPRESSION
, gen_rtx_SET (mem
, reg
));
7230 else if (base
== stack_pointer_rtx
&& m
->fs
.sp_realigned
7231 && cfa_offset
>= m
->fs
.sp_realigned_offset
)
7233 gcc_checking_assert (stack_realign_fp
);
7234 add_reg_note (insn
, REG_CFA_EXPRESSION
, gen_rtx_SET (mem
, reg
));
7237 /* The memory may not be relative to the current CFA register,
7238 which means that we may need to generate a new pattern for
7239 use by the unwind info. */
7240 else if (base
!= m
->fs
.cfa_reg
)
7242 addr
= plus_constant (Pmode
, m
->fs
.cfa_reg
,
7243 m
->fs
.cfa_offset
- cfa_offset
);
7244 mem
= gen_rtx_MEM (mode
, addr
);
7245 add_reg_note (insn
, REG_CFA_OFFSET
, gen_rtx_SET (mem
, reg
));
7249 /* Emit code to save registers using MOV insns.
7250 First register is stored at CFA - CFA_OFFSET. */
7252 ix86_emit_save_regs_using_mov (HOST_WIDE_INT cfa_offset
)
7256 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
7257 if (GENERAL_REGNO_P (regno
) && ix86_save_reg (regno
, true, true))
7259 ix86_emit_save_reg_using_mov (word_mode
, regno
, cfa_offset
);
7260 cfa_offset
-= UNITS_PER_WORD
;
7264 /* Emit code to save SSE registers using MOV insns.
7265 First register is stored at CFA - CFA_OFFSET. */
7267 ix86_emit_save_sse_regs_using_mov (HOST_WIDE_INT cfa_offset
)
7271 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
7272 if (SSE_REGNO_P (regno
) && ix86_save_reg (regno
, true, true))
7274 ix86_emit_save_reg_using_mov (V4SFmode
, regno
, cfa_offset
);
7275 cfa_offset
-= GET_MODE_SIZE (V4SFmode
);
7279 static GTY(()) rtx queued_cfa_restores
;
7281 /* Add a REG_CFA_RESTORE REG note to INSN or queue them until next stack
7282 manipulation insn. The value is on the stack at CFA - CFA_OFFSET.
7283 Don't add the note if the previously saved value will be left untouched
7284 within stack red-zone till return, as unwinders can find the same value
7285 in the register and on the stack. */
7288 ix86_add_cfa_restore_note (rtx_insn
*insn
, rtx reg
, HOST_WIDE_INT cfa_offset
)
7290 if (!crtl
->shrink_wrapped
7291 && cfa_offset
<= cfun
->machine
->fs
.red_zone_offset
)
7296 add_reg_note (insn
, REG_CFA_RESTORE
, reg
);
7297 RTX_FRAME_RELATED_P (insn
) = 1;
7301 = alloc_reg_note (REG_CFA_RESTORE
, reg
, queued_cfa_restores
);
7304 /* Add queued REG_CFA_RESTORE notes if any to INSN. */
7307 ix86_add_queued_cfa_restore_notes (rtx insn
)
7310 if (!queued_cfa_restores
)
7312 for (last
= queued_cfa_restores
; XEXP (last
, 1); last
= XEXP (last
, 1))
7314 XEXP (last
, 1) = REG_NOTES (insn
);
7315 REG_NOTES (insn
) = queued_cfa_restores
;
7316 queued_cfa_restores
= NULL_RTX
;
7317 RTX_FRAME_RELATED_P (insn
) = 1;
7320 /* Expand prologue or epilogue stack adjustment.
7321 The pattern exist to put a dependency on all ebp-based memory accesses.
7322 STYLE should be negative if instructions should be marked as frame related,
7323 zero if %r11 register is live and cannot be freely used and positive
7327 pro_epilogue_adjust_stack (rtx dest
, rtx src
, rtx offset
,
7328 int style
, bool set_cfa
)
7330 struct machine_function
*m
= cfun
->machine
;
7331 rtx addend
= offset
;
7333 bool add_frame_related_expr
= false;
7335 if (!x86_64_immediate_operand (offset
, Pmode
))
7337 /* r11 is used by indirect sibcall return as well, set before the
7338 epilogue and used after the epilogue. */
7340 addend
= gen_rtx_REG (Pmode
, R11_REG
);
7343 gcc_assert (src
!= hard_frame_pointer_rtx
7344 && dest
!= hard_frame_pointer_rtx
);
7345 addend
= hard_frame_pointer_rtx
;
7347 emit_insn (gen_rtx_SET (addend
, offset
));
7349 add_frame_related_expr
= true;
7352 insn
= emit_insn (gen_pro_epilogue_adjust_stack_add
7353 (Pmode
, dest
, src
, addend
));
7355 ix86_add_queued_cfa_restore_notes (insn
);
7361 gcc_assert (m
->fs
.cfa_reg
== src
);
7362 m
->fs
.cfa_offset
+= INTVAL (offset
);
7363 m
->fs
.cfa_reg
= dest
;
7365 r
= gen_rtx_PLUS (Pmode
, src
, offset
);
7366 r
= gen_rtx_SET (dest
, r
);
7367 add_reg_note (insn
, REG_CFA_ADJUST_CFA
, r
);
7368 RTX_FRAME_RELATED_P (insn
) = 1;
7372 RTX_FRAME_RELATED_P (insn
) = 1;
7373 if (add_frame_related_expr
)
7375 rtx r
= gen_rtx_PLUS (Pmode
, src
, offset
);
7376 r
= gen_rtx_SET (dest
, r
);
7377 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, r
);
7381 if (dest
== stack_pointer_rtx
)
7383 HOST_WIDE_INT ooffset
= m
->fs
.sp_offset
;
7384 bool valid
= m
->fs
.sp_valid
;
7385 bool realigned
= m
->fs
.sp_realigned
;
7387 if (src
== hard_frame_pointer_rtx
)
7389 valid
= m
->fs
.fp_valid
;
7391 ooffset
= m
->fs
.fp_offset
;
7393 else if (src
== crtl
->drap_reg
)
7395 valid
= m
->fs
.drap_valid
;
7401 /* Else there are two possibilities: SP itself, which we set
7402 up as the default above. Or EH_RETURN_STACKADJ_RTX, which is
7403 taken care of this by hand along the eh_return path. */
7404 gcc_checking_assert (src
== stack_pointer_rtx
7405 || offset
== const0_rtx
);
7408 m
->fs
.sp_offset
= ooffset
- INTVAL (offset
);
7409 m
->fs
.sp_valid
= valid
;
7410 m
->fs
.sp_realigned
= realigned
;
7415 /* Find an available register to be used as dynamic realign argument
7416 pointer regsiter. Such a register will be written in prologue and
7417 used in begin of body, so it must not be
7418 1. parameter passing register.
7420 We reuse static-chain register if it is available. Otherwise, we
7421 use DI for i386 and R13 for x86-64. We chose R13 since it has
7424 Return: the regno of chosen register. */
7427 find_drap_reg (void)
7429 tree decl
= cfun
->decl
;
7431 /* Always use callee-saved register if there are no caller-saved
7435 /* Use R13 for nested function or function need static chain.
7436 Since function with tail call may use any caller-saved
7437 registers in epilogue, DRAP must not use caller-saved
7438 register in such case. */
7439 if (DECL_STATIC_CHAIN (decl
)
7440 || cfun
->machine
->no_caller_saved_registers
7441 || crtl
->tail_call_emit
)
7448 /* Use DI for nested function or function need static chain.
7449 Since function with tail call may use any caller-saved
7450 registers in epilogue, DRAP must not use caller-saved
7451 register in such case. */
7452 if (DECL_STATIC_CHAIN (decl
)
7453 || cfun
->machine
->no_caller_saved_registers
7454 || crtl
->tail_call_emit
7455 || crtl
->calls_eh_return
)
7458 /* Reuse static chain register if it isn't used for parameter
7460 if (ix86_function_regparm (TREE_TYPE (decl
), decl
) <= 2)
7462 unsigned int ccvt
= ix86_get_callcvt (TREE_TYPE (decl
));
7463 if ((ccvt
& (IX86_CALLCVT_FASTCALL
| IX86_CALLCVT_THISCALL
)) == 0)
7470 /* Return minimum incoming stack alignment. */
7473 ix86_minimum_incoming_stack_boundary (bool sibcall
)
7475 unsigned int incoming_stack_boundary
;
7477 /* Stack of interrupt handler is aligned to 128 bits in 64bit mode. */
7478 if (cfun
->machine
->func_type
!= TYPE_NORMAL
)
7479 incoming_stack_boundary
= TARGET_64BIT
? 128 : MIN_STACK_BOUNDARY
;
7480 /* Prefer the one specified at command line. */
7481 else if (ix86_user_incoming_stack_boundary
)
7482 incoming_stack_boundary
= ix86_user_incoming_stack_boundary
;
7483 /* In 32bit, use MIN_STACK_BOUNDARY for incoming stack boundary
7484 if -mstackrealign is used, it isn't used for sibcall check and
7485 estimated stack alignment is 128bit. */
7487 && ix86_force_align_arg_pointer
7488 && crtl
->stack_alignment_estimated
== 128)
7489 incoming_stack_boundary
= MIN_STACK_BOUNDARY
;
7491 incoming_stack_boundary
= ix86_default_incoming_stack_boundary
;
7493 /* Incoming stack alignment can be changed on individual functions
7494 via force_align_arg_pointer attribute. We use the smallest
7495 incoming stack boundary. */
7496 if (incoming_stack_boundary
> MIN_STACK_BOUNDARY
7497 && lookup_attribute ("force_align_arg_pointer",
7498 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl
))))
7499 incoming_stack_boundary
= MIN_STACK_BOUNDARY
;
7501 /* The incoming stack frame has to be aligned at least at
7502 parm_stack_boundary. */
7503 if (incoming_stack_boundary
< crtl
->parm_stack_boundary
)
7504 incoming_stack_boundary
= crtl
->parm_stack_boundary
;
7506 /* Stack at entrance of main is aligned by runtime. We use the
7507 smallest incoming stack boundary. */
7508 if (incoming_stack_boundary
> MAIN_STACK_BOUNDARY
7509 && DECL_NAME (current_function_decl
)
7510 && MAIN_NAME_P (DECL_NAME (current_function_decl
))
7511 && DECL_FILE_SCOPE_P (current_function_decl
))
7512 incoming_stack_boundary
= MAIN_STACK_BOUNDARY
;
7514 return incoming_stack_boundary
;
7517 /* Update incoming stack boundary and estimated stack alignment. */
7520 ix86_update_stack_boundary (void)
7522 ix86_incoming_stack_boundary
7523 = ix86_minimum_incoming_stack_boundary (false);
7525 /* x86_64 vararg needs 16byte stack alignment for register save area. */
7528 && crtl
->stack_alignment_estimated
< 128)
7529 crtl
->stack_alignment_estimated
= 128;
7531 /* __tls_get_addr needs to be called with 16-byte aligned stack. */
7532 if (ix86_tls_descriptor_calls_expanded_in_cfun
7533 && crtl
->preferred_stack_boundary
< 128)
7534 crtl
->preferred_stack_boundary
= 128;
7537 /* Handle the TARGET_GET_DRAP_RTX hook. Return NULL if no DRAP is
7538 needed or an rtx for DRAP otherwise. */
7541 ix86_get_drap_rtx (void)
7543 /* We must use DRAP if there are outgoing arguments on stack or
7544 the stack pointer register is clobbered by asm statment and
7545 ACCUMULATE_OUTGOING_ARGS is false. */
7547 || ((cfun
->machine
->outgoing_args_on_stack
7548 || crtl
->sp_is_clobbered_by_asm
)
7549 && !ACCUMULATE_OUTGOING_ARGS
))
7550 crtl
->need_drap
= true;
7552 if (stack_realign_drap
)
7554 /* Assign DRAP to vDRAP and returns vDRAP */
7555 unsigned int regno
= find_drap_reg ();
7558 rtx_insn
*seq
, *insn
;
7560 arg_ptr
= gen_rtx_REG (Pmode
, regno
);
7561 crtl
->drap_reg
= arg_ptr
;
7564 drap_vreg
= copy_to_reg (arg_ptr
);
7568 insn
= emit_insn_before (seq
, NEXT_INSN (entry_of_function ()));
7571 add_reg_note (insn
, REG_CFA_SET_VDRAP
, drap_vreg
);
7572 RTX_FRAME_RELATED_P (insn
) = 1;
7580 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
7583 ix86_internal_arg_pointer (void)
7585 return virtual_incoming_args_rtx
;
7588 struct scratch_reg
{
7593 /* Return a short-lived scratch register for use on function entry.
7594 In 32-bit mode, it is valid only after the registers are saved
7595 in the prologue. This register must be released by means of
7596 release_scratch_register_on_entry once it is dead. */
7599 get_scratch_register_on_entry (struct scratch_reg
*sr
)
7607 /* We always use R11 in 64-bit mode. */
7612 tree decl
= current_function_decl
, fntype
= TREE_TYPE (decl
);
7614 = lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype
)) != NULL_TREE
;
7616 = lookup_attribute ("thiscall", TYPE_ATTRIBUTES (fntype
)) != NULL_TREE
;
7617 bool static_chain_p
= DECL_STATIC_CHAIN (decl
);
7618 int regparm
= ix86_function_regparm (fntype
, decl
);
7620 = crtl
->drap_reg
? REGNO (crtl
->drap_reg
) : INVALID_REGNUM
;
7622 /* 'fastcall' sets regparm to 2, uses ecx/edx for arguments and eax
7623 for the static chain register. */
7624 if ((regparm
< 1 || (fastcall_p
&& !static_chain_p
))
7625 && drap_regno
!= AX_REG
)
7627 /* 'thiscall' sets regparm to 1, uses ecx for arguments and edx
7628 for the static chain register. */
7629 else if (thiscall_p
&& !static_chain_p
&& drap_regno
!= AX_REG
)
7631 else if (regparm
< 2 && !thiscall_p
&& drap_regno
!= DX_REG
)
7633 /* ecx is the static chain register. */
7634 else if (regparm
< 3 && !fastcall_p
&& !thiscall_p
7636 && drap_regno
!= CX_REG
)
7638 else if (ix86_save_reg (BX_REG
, true, false))
7640 /* esi is the static chain register. */
7641 else if (!(regparm
== 3 && static_chain_p
)
7642 && ix86_save_reg (SI_REG
, true, false))
7644 else if (ix86_save_reg (DI_REG
, true, false))
7648 regno
= (drap_regno
== AX_REG
? DX_REG
: AX_REG
);
7653 sr
->reg
= gen_rtx_REG (Pmode
, regno
);
7656 rtx_insn
*insn
= emit_insn (gen_push (sr
->reg
));
7657 RTX_FRAME_RELATED_P (insn
) = 1;
7661 /* Release a scratch register obtained from the preceding function.
7663 If RELEASE_VIA_POP is true, we just pop the register off the stack
7664 to release it. This is what non-Linux systems use with -fstack-check.
7666 Otherwise we use OFFSET to locate the saved register and the
7667 allocated stack space becomes part of the local frame and is
7668 deallocated by the epilogue. */
7671 release_scratch_register_on_entry (struct scratch_reg
*sr
, HOST_WIDE_INT offset
,
7672 bool release_via_pop
)
7676 if (release_via_pop
)
7678 struct machine_function
*m
= cfun
->machine
;
7679 rtx x
, insn
= emit_insn (gen_pop (sr
->reg
));
7681 /* The RX FRAME_RELATED_P mechanism doesn't know about pop. */
7682 RTX_FRAME_RELATED_P (insn
) = 1;
7683 x
= plus_constant (Pmode
, stack_pointer_rtx
, UNITS_PER_WORD
);
7684 x
= gen_rtx_SET (stack_pointer_rtx
, x
);
7685 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, x
);
7686 m
->fs
.sp_offset
-= UNITS_PER_WORD
;
7690 rtx x
= plus_constant (Pmode
, stack_pointer_rtx
, offset
);
7691 x
= gen_rtx_SET (sr
->reg
, gen_rtx_MEM (word_mode
, x
));
7697 /* Emit code to adjust the stack pointer by SIZE bytes while probing it.
7699 If INT_REGISTERS_SAVED is true, then integer registers have already been
7700 pushed on the stack.
7702 If PROTECTION AREA is true, then probe PROBE_INTERVAL plus a small dope
7705 This assumes no knowledge of the current probing state, i.e. it is never
7706 allowed to allocate more than PROBE_INTERVAL bytes of stack space without
7707 a suitable probe. */
7710 ix86_adjust_stack_and_probe (HOST_WIDE_INT size
,
7711 const bool int_registers_saved
,
7712 const bool protection_area
)
7714 struct machine_function
*m
= cfun
->machine
;
7716 /* If this function does not statically allocate stack space, then
7717 no probes are needed. */
7720 /* However, the allocation of space via pushes for register
7721 saves could be viewed as allocating space, but without the
7723 if (m
->frame
.nregs
|| m
->frame
.nsseregs
|| frame_pointer_needed
)
7724 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME
, true);
7726 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME
, false);
7730 /* If we are a noreturn function, then we have to consider the
7731 possibility that we're called via a jump rather than a call.
7733 Thus we don't have the implicit probe generated by saving the
7734 return address into the stack at the call. Thus, the stack
7735 pointer could be anywhere in the guard page. The safe thing
7736 to do is emit a probe now.
7738 The probe can be avoided if we have already emitted any callee
7739 register saves into the stack or have a frame pointer (which will
7740 have been saved as well). Those saves will function as implicit
7743 ?!? This should be revamped to work like aarch64 and s390 where
7744 we track the offset from the most recent probe. Normally that
7745 offset would be zero. For a noreturn function we would reset
7746 it to PROBE_INTERVAL - (STACK_BOUNDARY / BITS_PER_UNIT). Then
7747 we just probe when we cross PROBE_INTERVAL. */
7748 if (TREE_THIS_VOLATILE (cfun
->decl
)
7749 && !(m
->frame
.nregs
|| m
->frame
.nsseregs
|| frame_pointer_needed
))
7751 /* We can safely use any register here since we're just going to push
7752 its value and immediately pop it back. But we do try and avoid
7753 argument passing registers so as not to introduce dependencies in
7754 the pipeline. For 32 bit we use %esi and for 64 bit we use %rax. */
7755 rtx dummy_reg
= gen_rtx_REG (word_mode
, TARGET_64BIT
? AX_REG
: SI_REG
);
7756 rtx_insn
*insn_push
= emit_insn (gen_push (dummy_reg
));
7757 rtx_insn
*insn_pop
= emit_insn (gen_pop (dummy_reg
));
7758 m
->fs
.sp_offset
-= UNITS_PER_WORD
;
7759 if (m
->fs
.cfa_reg
== stack_pointer_rtx
)
7761 m
->fs
.cfa_offset
-= UNITS_PER_WORD
;
7762 rtx x
= plus_constant (Pmode
, stack_pointer_rtx
, -UNITS_PER_WORD
);
7763 x
= gen_rtx_SET (stack_pointer_rtx
, x
);
7764 add_reg_note (insn_push
, REG_CFA_ADJUST_CFA
, x
);
7765 RTX_FRAME_RELATED_P (insn_push
) = 1;
7766 x
= plus_constant (Pmode
, stack_pointer_rtx
, UNITS_PER_WORD
);
7767 x
= gen_rtx_SET (stack_pointer_rtx
, x
);
7768 add_reg_note (insn_pop
, REG_CFA_ADJUST_CFA
, x
);
7769 RTX_FRAME_RELATED_P (insn_pop
) = 1;
7771 emit_insn (gen_blockage ());
7774 const HOST_WIDE_INT probe_interval
= get_probe_interval ();
7775 const int dope
= 4 * UNITS_PER_WORD
;
7777 /* If there is protection area, take it into account in the size. */
7778 if (protection_area
)
7779 size
+= probe_interval
+ dope
;
7781 /* If we allocate less than the size of the guard statically,
7782 then no probing is necessary, but we do need to allocate
7784 else if (size
< (1 << param_stack_clash_protection_guard_size
))
7786 pro_epilogue_adjust_stack (stack_pointer_rtx
, stack_pointer_rtx
,
7787 GEN_INT (-size
), -1,
7788 m
->fs
.cfa_reg
== stack_pointer_rtx
);
7789 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME
, true);
7793 /* We're allocating a large enough stack frame that we need to
7794 emit probes. Either emit them inline or in a loop depending
7796 if (size
<= 4 * probe_interval
)
7799 for (i
= probe_interval
; i
<= size
; i
+= probe_interval
)
7801 /* Allocate PROBE_INTERVAL bytes. */
7803 = pro_epilogue_adjust_stack (stack_pointer_rtx
, stack_pointer_rtx
,
7804 GEN_INT (-probe_interval
), -1,
7805 m
->fs
.cfa_reg
== stack_pointer_rtx
);
7806 add_reg_note (insn
, REG_STACK_CHECK
, const0_rtx
);
7808 /* And probe at *sp. */
7809 emit_stack_probe (stack_pointer_rtx
);
7810 emit_insn (gen_blockage ());
7813 /* We need to allocate space for the residual, but we do not need
7814 to probe the residual... */
7815 HOST_WIDE_INT residual
= (i
- probe_interval
- size
);
7818 pro_epilogue_adjust_stack (stack_pointer_rtx
, stack_pointer_rtx
,
7819 GEN_INT (residual
), -1,
7820 m
->fs
.cfa_reg
== stack_pointer_rtx
);
7822 /* ...except if there is a protection area to maintain. */
7823 if (protection_area
)
7824 emit_stack_probe (stack_pointer_rtx
);
7827 dump_stack_clash_frame_info (PROBE_INLINE
, residual
!= 0);
7831 /* We expect the GP registers to be saved when probes are used
7832 as the probing sequences might need a scratch register and
7833 the routine to allocate one assumes the integer registers
7834 have already been saved. */
7835 gcc_assert (int_registers_saved
);
7837 struct scratch_reg sr
;
7838 get_scratch_register_on_entry (&sr
);
7840 /* If we needed to save a register, then account for any space
7841 that was pushed (we are not going to pop the register when
7842 we do the restore). */
7844 size
-= UNITS_PER_WORD
;
7846 /* Step 1: round SIZE down to a multiple of the interval. */
7847 HOST_WIDE_INT rounded_size
= size
& -probe_interval
;
7849 /* Step 2: compute final value of the loop counter. Use lea if
7851 rtx addr
= plus_constant (Pmode
, stack_pointer_rtx
, -rounded_size
);
7853 if (address_no_seg_operand (addr
, Pmode
))
7854 insn
= emit_insn (gen_rtx_SET (sr
.reg
, addr
));
7857 emit_move_insn (sr
.reg
, GEN_INT (-rounded_size
));
7858 insn
= emit_insn (gen_rtx_SET (sr
.reg
,
7859 gen_rtx_PLUS (Pmode
, sr
.reg
,
7860 stack_pointer_rtx
)));
7862 if (m
->fs
.cfa_reg
== stack_pointer_rtx
)
7864 add_reg_note (insn
, REG_CFA_DEF_CFA
,
7865 plus_constant (Pmode
, sr
.reg
,
7866 m
->fs
.cfa_offset
+ rounded_size
));
7867 RTX_FRAME_RELATED_P (insn
) = 1;
7870 /* Step 3: the loop. */
7871 rtx size_rtx
= GEN_INT (rounded_size
);
7872 insn
= emit_insn (gen_adjust_stack_and_probe (Pmode
, sr
.reg
, sr
.reg
,
7874 if (m
->fs
.cfa_reg
== stack_pointer_rtx
)
7876 m
->fs
.cfa_offset
+= rounded_size
;
7877 add_reg_note (insn
, REG_CFA_DEF_CFA
,
7878 plus_constant (Pmode
, stack_pointer_rtx
,
7880 RTX_FRAME_RELATED_P (insn
) = 1;
7882 m
->fs
.sp_offset
+= rounded_size
;
7883 emit_insn (gen_blockage ());
7885 /* Step 4: adjust SP if we cannot assert at compile-time that SIZE
7886 is equal to ROUNDED_SIZE. */
7888 if (size
!= rounded_size
)
7890 pro_epilogue_adjust_stack (stack_pointer_rtx
, stack_pointer_rtx
,
7891 GEN_INT (rounded_size
- size
), -1,
7892 m
->fs
.cfa_reg
== stack_pointer_rtx
);
7894 if (protection_area
)
7895 emit_stack_probe (stack_pointer_rtx
);
7898 dump_stack_clash_frame_info (PROBE_LOOP
, size
!= rounded_size
);
7900 /* This does not deallocate the space reserved for the scratch
7901 register. That will be deallocated in the epilogue. */
7902 release_scratch_register_on_entry (&sr
, size
, false);
7905 /* Adjust back to account for the protection area. */
7906 if (protection_area
)
7907 pro_epilogue_adjust_stack (stack_pointer_rtx
, stack_pointer_rtx
,
7908 GEN_INT (probe_interval
+ dope
), -1,
7909 m
->fs
.cfa_reg
== stack_pointer_rtx
);
7911 /* Make sure nothing is scheduled before we are done. */
7912 emit_insn (gen_blockage ());
7915 /* Adjust the stack pointer up to REG while probing it. */
7918 output_adjust_stack_and_probe (rtx reg
)
7920 static int labelno
= 0;
7924 ASM_GENERATE_INTERNAL_LABEL (loop_lab
, "LPSRL", labelno
++);
7927 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, loop_lab
);
7929 /* SP = SP + PROBE_INTERVAL. */
7930 xops
[0] = stack_pointer_rtx
;
7931 xops
[1] = GEN_INT (get_probe_interval ());
7932 output_asm_insn ("sub%z0\t{%1, %0|%0, %1}", xops
);
7935 xops
[1] = const0_rtx
;
7936 output_asm_insn ("or%z0\t{%1, (%0)|DWORD PTR [%0], %1}", xops
);
7938 /* Test if SP == LAST_ADDR. */
7939 xops
[0] = stack_pointer_rtx
;
7941 output_asm_insn ("cmp%z0\t{%1, %0|%0, %1}", xops
);
7944 fputs ("\tjne\t", asm_out_file
);
7945 assemble_name_raw (asm_out_file
, loop_lab
);
7946 fputc ('\n', asm_out_file
);
7951 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
7952 inclusive. These are offsets from the current stack pointer.
7954 INT_REGISTERS_SAVED is true if integer registers have already been
7955 pushed on the stack. */
7958 ix86_emit_probe_stack_range (HOST_WIDE_INT first
, HOST_WIDE_INT size
,
7959 const bool int_registers_saved
)
7961 const HOST_WIDE_INT probe_interval
= get_probe_interval ();
7963 /* See if we have a constant small number of probes to generate. If so,
7964 that's the easy case. The run-time loop is made up of 6 insns in the
7965 generic case while the compile-time loop is made up of n insns for n #
7967 if (size
<= 6 * probe_interval
)
7971 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
7972 it exceeds SIZE. If only one probe is needed, this will not
7973 generate any code. Then probe at FIRST + SIZE. */
7974 for (i
= probe_interval
; i
< size
; i
+= probe_interval
)
7975 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
7978 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
7982 /* Otherwise, do the same as above, but in a loop. Note that we must be
7983 extra careful with variables wrapping around because we might be at
7984 the very top (or the very bottom) of the address space and we have
7985 to be able to handle this case properly; in particular, we use an
7986 equality test for the loop condition. */
7989 /* We expect the GP registers to be saved when probes are used
7990 as the probing sequences might need a scratch register and
7991 the routine to allocate one assumes the integer registers
7992 have already been saved. */
7993 gcc_assert (int_registers_saved
);
7995 HOST_WIDE_INT rounded_size
, last
;
7996 struct scratch_reg sr
;
7998 get_scratch_register_on_entry (&sr
);
8001 /* Step 1: round SIZE to the previous multiple of the interval. */
8003 rounded_size
= ROUND_DOWN (size
, probe_interval
);
8006 /* Step 2: compute initial and final value of the loop counter. */
8008 /* TEST_OFFSET = FIRST. */
8009 emit_move_insn (sr
.reg
, GEN_INT (-first
));
8011 /* LAST_OFFSET = FIRST + ROUNDED_SIZE. */
8012 last
= first
+ rounded_size
;
8019 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
8022 while (TEST_ADDR != LAST_ADDR)
8024 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
8025 until it is equal to ROUNDED_SIZE. */
8028 (gen_probe_stack_range (Pmode
, sr
.reg
, sr
.reg
, GEN_INT (-last
)));
8031 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
8032 that SIZE is equal to ROUNDED_SIZE. */
8034 if (size
!= rounded_size
)
8035 emit_stack_probe (plus_constant (Pmode
,
8036 gen_rtx_PLUS (Pmode
,
8039 rounded_size
- size
));
8041 release_scratch_register_on_entry (&sr
, size
, true);
8044 /* Make sure nothing is scheduled before we are done. */
8045 emit_insn (gen_blockage ());
8048 /* Probe a range of stack addresses from REG to END, inclusive. These are
8049 offsets from the current stack pointer. */
8052 output_probe_stack_range (rtx reg
, rtx end
)
8054 static int labelno
= 0;
8058 ASM_GENERATE_INTERNAL_LABEL (loop_lab
, "LPSRL", labelno
++);
8061 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, loop_lab
);
8063 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
8065 xops
[1] = GEN_INT (get_probe_interval ());
8066 output_asm_insn ("sub%z0\t{%1, %0|%0, %1}", xops
);
8068 /* Probe at TEST_ADDR. */
8069 xops
[0] = stack_pointer_rtx
;
8071 xops
[2] = const0_rtx
;
8072 output_asm_insn ("or%z0\t{%2, (%0,%1)|DWORD PTR [%0+%1], %2}", xops
);
8074 /* Test if TEST_ADDR == LAST_ADDR. */
8077 output_asm_insn ("cmp%z0\t{%1, %0|%0, %1}", xops
);
8080 fputs ("\tjne\t", asm_out_file
);
8081 assemble_name_raw (asm_out_file
, loop_lab
);
8082 fputc ('\n', asm_out_file
);
8087 /* Set stack_frame_required to false if stack frame isn't required.
8088 Update STACK_ALIGNMENT to the largest alignment, in bits, of stack
8089 slot used if stack frame is required and CHECK_STACK_SLOT is true. */
8092 ix86_find_max_used_stack_alignment (unsigned int &stack_alignment
,
8093 bool check_stack_slot
)
8095 HARD_REG_SET set_up_by_prologue
, prologue_used
;
8098 CLEAR_HARD_REG_SET (prologue_used
);
8099 CLEAR_HARD_REG_SET (set_up_by_prologue
);
8100 add_to_hard_reg_set (&set_up_by_prologue
, Pmode
, STACK_POINTER_REGNUM
);
8101 add_to_hard_reg_set (&set_up_by_prologue
, Pmode
, ARG_POINTER_REGNUM
);
8102 add_to_hard_reg_set (&set_up_by_prologue
, Pmode
,
8103 HARD_FRAME_POINTER_REGNUM
);
8105 /* The preferred stack alignment is the minimum stack alignment. */
8106 if (stack_alignment
> crtl
->preferred_stack_boundary
)
8107 stack_alignment
= crtl
->preferred_stack_boundary
;
8109 bool require_stack_frame
= false;
8111 FOR_EACH_BB_FN (bb
, cfun
)
8114 FOR_BB_INSNS (bb
, insn
)
8115 if (NONDEBUG_INSN_P (insn
)
8116 && requires_stack_frame_p (insn
, prologue_used
,
8117 set_up_by_prologue
))
8119 require_stack_frame
= true;
8121 if (check_stack_slot
)
8123 /* Find the maximum stack alignment. */
8124 subrtx_iterator::array_type array
;
8125 FOR_EACH_SUBRTX (iter
, array
, PATTERN (insn
), ALL
)
8127 && (reg_mentioned_p (stack_pointer_rtx
,
8129 || reg_mentioned_p (frame_pointer_rtx
,
8132 unsigned int alignment
= MEM_ALIGN (*iter
);
8133 if (alignment
> stack_alignment
)
8134 stack_alignment
= alignment
;
8140 cfun
->machine
->stack_frame_required
= require_stack_frame
;
8143 /* Finalize stack_realign_needed and frame_pointer_needed flags, which
8144 will guide prologue/epilogue to be generated in correct form. */
8147 ix86_finalize_stack_frame_flags (void)
8149 /* Check if stack realign is really needed after reload, and
8150 stores result in cfun */
8151 unsigned int incoming_stack_boundary
8152 = (crtl
->parm_stack_boundary
> ix86_incoming_stack_boundary
8153 ? crtl
->parm_stack_boundary
: ix86_incoming_stack_boundary
);
8154 unsigned int stack_alignment
8155 = (crtl
->is_leaf
&& !ix86_current_function_calls_tls_descriptor
8156 ? crtl
->max_used_stack_slot_alignment
8157 : crtl
->stack_alignment_needed
);
8158 unsigned int stack_realign
8159 = (incoming_stack_boundary
< stack_alignment
);
8160 bool recompute_frame_layout_p
= false;
8162 if (crtl
->stack_realign_finalized
)
8164 /* After stack_realign_needed is finalized, we can't no longer
8166 gcc_assert (crtl
->stack_realign_needed
== stack_realign
);
8170 /* It is always safe to compute max_used_stack_alignment. We
8171 compute it only if 128-bit aligned load/store may be generated
8172 on misaligned stack slot which will lead to segfault. */
8173 bool check_stack_slot
8174 = (stack_realign
|| crtl
->max_used_stack_slot_alignment
>= 128);
8175 ix86_find_max_used_stack_alignment (stack_alignment
,
8178 /* If the only reason for frame_pointer_needed is that we conservatively
8179 assumed stack realignment might be needed or -fno-omit-frame-pointer
8180 is used, but in the end nothing that needed the stack alignment had
8181 been spilled nor stack access, clear frame_pointer_needed and say we
8182 don't need stack realignment.
8184 When vector register is used for piecewise move and store, we don't
8185 increase stack_alignment_needed as there is no register spill for
8186 piecewise move and store. Since stack_realign_needed is set to true
8187 by checking stack_alignment_estimated which is updated by pseudo
8188 vector register usage, we also need to check stack_realign_needed to
8189 eliminate frame pointer. */
8191 || (!flag_omit_frame_pointer
&& optimize
)
8192 || crtl
->stack_realign_needed
)
8193 && frame_pointer_needed
8195 && crtl
->sp_is_unchanging
8196 && !ix86_current_function_calls_tls_descriptor
8197 && !crtl
->accesses_prior_frames
8198 && !cfun
->calls_alloca
8199 && !crtl
->calls_eh_return
8200 /* See ira_setup_eliminable_regset for the rationale. */
8201 && !(STACK_CHECK_MOVING_SP
8204 && cfun
->can_throw_non_call_exceptions
)
8205 && !ix86_frame_pointer_required ()
8206 && ix86_get_frame_size () == 0
8207 && ix86_nsaved_sseregs () == 0
8208 && ix86_varargs_gpr_size
+ ix86_varargs_fpr_size
== 0)
8210 if (cfun
->machine
->stack_frame_required
)
8212 /* Stack frame is required. If stack alignment needed is less
8213 than incoming stack boundary, don't realign stack. */
8214 stack_realign
= incoming_stack_boundary
< stack_alignment
;
8217 crtl
->max_used_stack_slot_alignment
8218 = incoming_stack_boundary
;
8219 crtl
->stack_alignment_needed
8220 = incoming_stack_boundary
;
8221 /* Also update preferred_stack_boundary for leaf
8223 crtl
->preferred_stack_boundary
8224 = incoming_stack_boundary
;
8229 /* If drap has been set, but it actually isn't live at the
8230 start of the function, there is no reason to set it up. */
8233 basic_block bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
;
8234 if (! REGNO_REG_SET_P (DF_LR_IN (bb
),
8235 REGNO (crtl
->drap_reg
)))
8237 crtl
->drap_reg
= NULL_RTX
;
8238 crtl
->need_drap
= false;
8242 cfun
->machine
->no_drap_save_restore
= true;
8244 frame_pointer_needed
= false;
8245 stack_realign
= false;
8246 crtl
->max_used_stack_slot_alignment
= incoming_stack_boundary
;
8247 crtl
->stack_alignment_needed
= incoming_stack_boundary
;
8248 crtl
->stack_alignment_estimated
= incoming_stack_boundary
;
8249 if (crtl
->preferred_stack_boundary
> incoming_stack_boundary
)
8250 crtl
->preferred_stack_boundary
= incoming_stack_boundary
;
8251 df_finish_pass (true);
8252 df_scan_alloc (NULL
);
8254 df_compute_regs_ever_live (true);
8257 if (flag_var_tracking
)
8259 /* Since frame pointer is no longer available, replace it with
8260 stack pointer - UNITS_PER_WORD in debug insns. */
8262 for (ref
= DF_REG_USE_CHAIN (HARD_FRAME_POINTER_REGNUM
);
8265 next
= DF_REF_NEXT_REG (ref
);
8266 if (!DF_REF_INSN_INFO (ref
))
8269 /* Make sure the next ref is for a different instruction,
8270 so that we're not affected by the rescan. */
8271 rtx_insn
*insn
= DF_REF_INSN (ref
);
8272 while (next
&& DF_REF_INSN (next
) == insn
)
8273 next
= DF_REF_NEXT_REG (next
);
8275 if (DEBUG_INSN_P (insn
))
8277 bool changed
= false;
8278 for (; ref
!= next
; ref
= DF_REF_NEXT_REG (ref
))
8280 rtx
*loc
= DF_REF_LOC (ref
);
8281 if (*loc
== hard_frame_pointer_rtx
)
8283 *loc
= plus_constant (Pmode
,
8290 df_insn_rescan (insn
);
8295 recompute_frame_layout_p
= true;
8298 else if (crtl
->max_used_stack_slot_alignment
>= 128
8299 && cfun
->machine
->stack_frame_required
)
8301 /* We don't need to realign stack. max_used_stack_alignment is
8302 used to decide how stack frame should be aligned. This is
8303 independent of any psABIs nor 32-bit vs 64-bit. */
8304 cfun
->machine
->max_used_stack_alignment
8305 = stack_alignment
/ BITS_PER_UNIT
;
8308 if (crtl
->stack_realign_needed
!= stack_realign
)
8309 recompute_frame_layout_p
= true;
8310 crtl
->stack_realign_needed
= stack_realign
;
8311 crtl
->stack_realign_finalized
= true;
8312 if (recompute_frame_layout_p
)
8313 ix86_compute_frame_layout ();
8316 /* Delete SET_GOT right after entry block if it is allocated to reg. */
8319 ix86_elim_entry_set_got (rtx reg
)
8321 basic_block bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
;
8322 rtx_insn
*c_insn
= BB_HEAD (bb
);
8323 if (!NONDEBUG_INSN_P (c_insn
))
8324 c_insn
= next_nonnote_nondebug_insn (c_insn
);
8325 if (c_insn
&& NONJUMP_INSN_P (c_insn
))
8327 rtx pat
= PATTERN (c_insn
);
8328 if (GET_CODE (pat
) == PARALLEL
)
8330 rtx vec
= XVECEXP (pat
, 0, 0);
8331 if (GET_CODE (vec
) == SET
8332 && XINT (XEXP (vec
, 1), 1) == UNSPEC_SET_GOT
8333 && REGNO (XEXP (vec
, 0)) == REGNO (reg
))
8334 delete_insn (c_insn
);
8340 gen_frame_set (rtx reg
, rtx frame_reg
, int offset
, bool store
)
8345 addr
= plus_constant (Pmode
, frame_reg
, offset
);
8346 mem
= gen_frame_mem (GET_MODE (reg
), offset
? addr
: frame_reg
);
8347 return gen_rtx_SET (store
? mem
: reg
, store
? reg
: mem
);
8351 gen_frame_load (rtx reg
, rtx frame_reg
, int offset
)
8353 return gen_frame_set (reg
, frame_reg
, offset
, false);
8357 gen_frame_store (rtx reg
, rtx frame_reg
, int offset
)
8359 return gen_frame_set (reg
, frame_reg
, offset
, true);
8363 ix86_emit_outlined_ms2sysv_save (const struct ix86_frame
&frame
)
8365 struct machine_function
*m
= cfun
->machine
;
8366 const unsigned ncregs
= NUM_X86_64_MS_CLOBBERED_REGS
8367 + m
->call_ms2sysv_extra_regs
;
8368 rtvec v
= rtvec_alloc (ncregs
+ 1);
8369 unsigned int align
, i
, vi
= 0;
8372 rtx rax
= gen_rtx_REG (word_mode
, AX_REG
);
8373 const class xlogue_layout
&xlogue
= xlogue_layout::get_instance ();
8375 /* AL should only be live with sysv_abi. */
8376 gcc_assert (!ix86_eax_live_at_start_p ());
8377 gcc_assert (m
->fs
.sp_offset
>= frame
.sse_reg_save_offset
);
8379 /* Setup RAX as the stub's base pointer. We use stack_realign_offset rather
8380 we've actually realigned the stack or not. */
8381 align
= GET_MODE_ALIGNMENT (V4SFmode
);
8382 addr
= choose_baseaddr (frame
.stack_realign_offset
8383 + xlogue
.get_stub_ptr_offset (), &align
, AX_REG
);
8384 gcc_assert (align
>= GET_MODE_ALIGNMENT (V4SFmode
));
8386 emit_insn (gen_rtx_SET (rax
, addr
));
8388 /* Get the stub symbol. */
8389 sym
= xlogue
.get_stub_rtx (frame_pointer_needed
? XLOGUE_STUB_SAVE_HFP
8390 : XLOGUE_STUB_SAVE
);
8391 RTVEC_ELT (v
, vi
++) = gen_rtx_USE (VOIDmode
, sym
);
8393 for (i
= 0; i
< ncregs
; ++i
)
8395 const xlogue_layout::reginfo
&r
= xlogue
.get_reginfo (i
);
8396 rtx reg
= gen_rtx_REG ((SSE_REGNO_P (r
.regno
) ? V4SFmode
: word_mode
),
8398 RTVEC_ELT (v
, vi
++) = gen_frame_store (reg
, rax
, -r
.offset
);
8401 gcc_assert (vi
== (unsigned)GET_NUM_ELEM (v
));
8403 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, v
));
8404 RTX_FRAME_RELATED_P (insn
) = true;
8407 /* Generate and return an insn body to AND X with Y. */
8410 gen_and2_insn (rtx x
, rtx y
)
8412 enum insn_code icode
= optab_handler (and_optab
, GET_MODE (x
));
8414 gcc_assert (insn_operand_matches (icode
, 0, x
));
8415 gcc_assert (insn_operand_matches (icode
, 1, x
));
8416 gcc_assert (insn_operand_matches (icode
, 2, y
));
8418 return GEN_FCN (icode
) (x
, x
, y
);
8421 /* Expand the prologue into a bunch of separate insns. */
8424 ix86_expand_prologue (void)
8426 struct machine_function
*m
= cfun
->machine
;
8428 HOST_WIDE_INT allocate
;
8429 bool int_registers_saved
;
8430 bool sse_registers_saved
;
8431 bool save_stub_call_needed
;
8432 rtx static_chain
= NULL_RTX
;
8434 ix86_last_zero_store_uid
= 0;
8435 if (ix86_function_naked (current_function_decl
))
8437 if (flag_stack_usage_info
)
8438 current_function_static_stack_size
= 0;
8442 ix86_finalize_stack_frame_flags ();
8444 /* DRAP should not coexist with stack_realign_fp */
8445 gcc_assert (!(crtl
->drap_reg
&& stack_realign_fp
));
8447 memset (&m
->fs
, 0, sizeof (m
->fs
));
8449 /* Initialize CFA state for before the prologue. */
8450 m
->fs
.cfa_reg
= stack_pointer_rtx
;
8451 m
->fs
.cfa_offset
= INCOMING_FRAME_SP_OFFSET
;
8453 /* Track SP offset to the CFA. We continue tracking this after we've
8454 swapped the CFA register away from SP. In the case of re-alignment
8455 this is fudged; we're interested to offsets within the local frame. */
8456 m
->fs
.sp_offset
= INCOMING_FRAME_SP_OFFSET
;
8457 m
->fs
.sp_valid
= true;
8458 m
->fs
.sp_realigned
= false;
8460 const struct ix86_frame
&frame
= cfun
->machine
->frame
;
8462 if (!TARGET_64BIT
&& ix86_function_ms_hook_prologue (current_function_decl
))
8464 /* We should have already generated an error for any use of
8465 ms_hook on a nested function. */
8466 gcc_checking_assert (!ix86_static_chain_on_stack
);
8468 /* Check if profiling is active and we shall use profiling before
8469 prologue variant. If so sorry. */
8470 if (crtl
->profile
&& flag_fentry
!= 0)
8471 sorry ("%<ms_hook_prologue%> attribute is not compatible "
8472 "with %<-mfentry%> for 32-bit");
8474 /* In ix86_asm_output_function_label we emitted:
8475 8b ff movl.s %edi,%edi
8477 8b ec movl.s %esp,%ebp
8479 This matches the hookable function prologue in Win32 API
8480 functions in Microsoft Windows XP Service Pack 2 and newer.
8481 Wine uses this to enable Windows apps to hook the Win32 API
8482 functions provided by Wine.
8484 What that means is that we've already set up the frame pointer. */
8486 if (frame_pointer_needed
8487 && !(crtl
->drap_reg
&& crtl
->stack_realign_needed
))
8491 /* We've decided to use the frame pointer already set up.
8492 Describe this to the unwinder by pretending that both
8493 push and mov insns happen right here.
8495 Putting the unwind info here at the end of the ms_hook
8496 is done so that we can make absolutely certain we get
8497 the required byte sequence at the start of the function,
8498 rather than relying on an assembler that can produce
8499 the exact encoding required.
8501 However it does mean (in the unpatched case) that we have
8502 a 1 insn window where the asynchronous unwind info is
8503 incorrect. However, if we placed the unwind info at
8504 its correct location we would have incorrect unwind info
8505 in the patched case. Which is probably all moot since
8506 I don't expect Wine generates dwarf2 unwind info for the
8507 system libraries that use this feature. */
8509 insn
= emit_insn (gen_blockage ());
8511 push
= gen_push (hard_frame_pointer_rtx
);
8512 mov
= gen_rtx_SET (hard_frame_pointer_rtx
,
8514 RTX_FRAME_RELATED_P (push
) = 1;
8515 RTX_FRAME_RELATED_P (mov
) = 1;
8517 RTX_FRAME_RELATED_P (insn
) = 1;
8518 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
8519 gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, push
, mov
)));
8521 /* Note that gen_push incremented m->fs.cfa_offset, even
8522 though we didn't emit the push insn here. */
8523 m
->fs
.cfa_reg
= hard_frame_pointer_rtx
;
8524 m
->fs
.fp_offset
= m
->fs
.cfa_offset
;
8525 m
->fs
.fp_valid
= true;
8529 /* The frame pointer is not needed so pop %ebp again.
8530 This leaves us with a pristine state. */
8531 emit_insn (gen_pop (hard_frame_pointer_rtx
));
8535 /* The first insn of a function that accepts its static chain on the
8536 stack is to push the register that would be filled in by a direct
8537 call. This insn will be skipped by the trampoline. */
8538 else if (ix86_static_chain_on_stack
)
8540 static_chain
= ix86_static_chain (cfun
->decl
, false);
8541 insn
= emit_insn (gen_push (static_chain
));
8542 emit_insn (gen_blockage ());
8544 /* We don't want to interpret this push insn as a register save,
8545 only as a stack adjustment. The real copy of the register as
8546 a save will be done later, if needed. */
8547 t
= plus_constant (Pmode
, stack_pointer_rtx
, -UNITS_PER_WORD
);
8548 t
= gen_rtx_SET (stack_pointer_rtx
, t
);
8549 add_reg_note (insn
, REG_CFA_ADJUST_CFA
, t
);
8550 RTX_FRAME_RELATED_P (insn
) = 1;
8553 /* Emit prologue code to adjust stack alignment and setup DRAP, in case
8554 of DRAP is needed and stack realignment is really needed after reload */
8555 if (stack_realign_drap
)
8557 int align_bytes
= crtl
->stack_alignment_needed
/ BITS_PER_UNIT
;
8559 /* Can't use DRAP in interrupt function. */
8560 if (cfun
->machine
->func_type
!= TYPE_NORMAL
)
8561 sorry ("Dynamic Realign Argument Pointer (DRAP) not supported "
8562 "in interrupt service routine. This may be worked "
8563 "around by avoiding functions with aggregate return.");
8565 /* Only need to push parameter pointer reg if it is caller saved. */
8566 if (!call_used_or_fixed_reg_p (REGNO (crtl
->drap_reg
)))
8568 /* Push arg pointer reg */
8569 insn
= emit_insn (gen_push (crtl
->drap_reg
));
8570 RTX_FRAME_RELATED_P (insn
) = 1;
8573 /* Grab the argument pointer. */
8574 t
= plus_constant (Pmode
, stack_pointer_rtx
, m
->fs
.sp_offset
);
8575 insn
= emit_insn (gen_rtx_SET (crtl
->drap_reg
, t
));
8576 RTX_FRAME_RELATED_P (insn
) = 1;
8577 m
->fs
.cfa_reg
= crtl
->drap_reg
;
8578 m
->fs
.cfa_offset
= 0;
8580 /* Align the stack. */
8581 insn
= emit_insn (gen_and2_insn (stack_pointer_rtx
,
8582 GEN_INT (-align_bytes
)));
8583 RTX_FRAME_RELATED_P (insn
) = 1;
8585 /* Replicate the return address on the stack so that return
8586 address can be reached via (argp - 1) slot. This is needed
8587 to implement macro RETURN_ADDR_RTX and intrinsic function
8588 expand_builtin_return_addr etc. */
8589 t
= plus_constant (Pmode
, crtl
->drap_reg
, -UNITS_PER_WORD
);
8590 t
= gen_frame_mem (word_mode
, t
);
8591 insn
= emit_insn (gen_push (t
));
8592 RTX_FRAME_RELATED_P (insn
) = 1;
8594 /* For the purposes of frame and register save area addressing,
8595 we've started over with a new frame. */
8596 m
->fs
.sp_offset
= INCOMING_FRAME_SP_OFFSET
;
8597 m
->fs
.realigned
= true;
8601 /* Replicate static chain on the stack so that static chain
8602 can be reached via (argp - 2) slot. This is needed for
8603 nested function with stack realignment. */
8604 insn
= emit_insn (gen_push (static_chain
));
8605 RTX_FRAME_RELATED_P (insn
) = 1;
8609 int_registers_saved
= (frame
.nregs
== 0);
8610 sse_registers_saved
= (frame
.nsseregs
== 0);
8611 save_stub_call_needed
= (m
->call_ms2sysv
);
8612 gcc_assert (sse_registers_saved
|| !save_stub_call_needed
);
8614 if (frame_pointer_needed
&& !m
->fs
.fp_valid
)
8616 /* Note: AT&T enter does NOT have reversed args. Enter is probably
8617 slower on all targets. Also sdb didn't like it. */
8618 insn
= emit_insn (gen_push (hard_frame_pointer_rtx
));
8619 RTX_FRAME_RELATED_P (insn
) = 1;
8621 if (m
->fs
.sp_offset
== frame
.hard_frame_pointer_offset
)
8623 insn
= emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
);
8624 RTX_FRAME_RELATED_P (insn
) = 1;
8626 if (m
->fs
.cfa_reg
== stack_pointer_rtx
)
8627 m
->fs
.cfa_reg
= hard_frame_pointer_rtx
;
8628 m
->fs
.fp_offset
= m
->fs
.sp_offset
;
8629 m
->fs
.fp_valid
= true;
8633 if (!int_registers_saved
)
8635 /* If saving registers via PUSH, do so now. */
8636 if (!frame
.save_regs_using_mov
)
8638 ix86_emit_save_regs ();
8639 int_registers_saved
= true;
8640 gcc_assert (m
->fs
.sp_offset
== frame
.reg_save_offset
);
8643 /* When using red zone we may start register saving before allocating
8644 the stack frame saving one cycle of the prologue. However, avoid
8645 doing this if we have to probe the stack; at least on x86_64 the
8646 stack probe can turn into a call that clobbers a red zone location. */
8647 else if (ix86_using_red_zone ()
8648 && (! TARGET_STACK_PROBE
8649 || frame
.stack_pointer_offset
< CHECK_STACK_LIMIT
))
8651 ix86_emit_save_regs_using_mov (frame
.reg_save_offset
);
8652 cfun
->machine
->red_zone_used
= true;
8653 int_registers_saved
= true;
8657 if (frame
.red_zone_size
!= 0)
8658 cfun
->machine
->red_zone_used
= true;
8660 if (stack_realign_fp
)
8662 int align_bytes
= crtl
->stack_alignment_needed
/ BITS_PER_UNIT
;
8663 gcc_assert (align_bytes
> MIN_STACK_BOUNDARY
/ BITS_PER_UNIT
);
8665 /* Record last valid frame pointer offset. */
8666 m
->fs
.sp_realigned_fp_last
= frame
.reg_save_offset
;
8668 /* The computation of the size of the re-aligned stack frame means
8669 that we must allocate the size of the register save area before
8670 performing the actual alignment. Otherwise we cannot guarantee
8671 that there's enough storage above the realignment point. */
8672 allocate
= frame
.reg_save_offset
- m
->fs
.sp_offset
8673 + frame
.stack_realign_allocate
;
8675 pro_epilogue_adjust_stack (stack_pointer_rtx
, stack_pointer_rtx
,
8676 GEN_INT (-allocate
), -1, false);
8678 /* Align the stack. */
8679 emit_insn (gen_and2_insn (stack_pointer_rtx
, GEN_INT (-align_bytes
)));
8680 m
->fs
.sp_offset
= ROUND_UP (m
->fs
.sp_offset
, align_bytes
);
8681 m
->fs
.sp_realigned_offset
= m
->fs
.sp_offset
8682 - frame
.stack_realign_allocate
;
8683 /* The stack pointer may no longer be equal to CFA - m->fs.sp_offset.
8684 Beyond this point, stack access should be done via choose_baseaddr or
8685 by using sp_valid_at and fp_valid_at to determine the correct base
8686 register. Henceforth, any CFA offset should be thought of as logical
8687 and not physical. */
8688 gcc_assert (m
->fs
.sp_realigned_offset
>= m
->fs
.sp_realigned_fp_last
);
8689 gcc_assert (m
->fs
.sp_realigned_offset
== frame
.stack_realign_offset
);
8690 m
->fs
.sp_realigned
= true;
8692 /* SEH unwind emit doesn't currently support REG_CFA_EXPRESSION, which
8693 is needed to describe where a register is saved using a realigned
8694 stack pointer, so we need to invalidate the stack pointer for that
8697 m
->fs
.sp_valid
= false;
8699 /* If SP offset is non-immediate after allocation of the stack frame,
8700 then emit SSE saves or stub call prior to allocating the rest of the
8701 stack frame. This is less efficient for the out-of-line stub because
8702 we can't combine allocations across the call barrier, but it's better
8703 than using a scratch register. */
8704 else if (!x86_64_immediate_operand (GEN_INT (frame
.stack_pointer_offset
8705 - m
->fs
.sp_realigned_offset
),
8708 if (!sse_registers_saved
)
8710 ix86_emit_save_sse_regs_using_mov (frame
.sse_reg_save_offset
);
8711 sse_registers_saved
= true;
8713 else if (save_stub_call_needed
)
8715 ix86_emit_outlined_ms2sysv_save (frame
);
8716 save_stub_call_needed
= false;
8721 allocate
= frame
.stack_pointer_offset
- m
->fs
.sp_offset
;
8723 if (flag_stack_usage_info
)
8725 /* We start to count from ARG_POINTER. */
8726 HOST_WIDE_INT stack_size
= frame
.stack_pointer_offset
;
8728 /* If it was realigned, take into account the fake frame. */
8729 if (stack_realign_drap
)
8731 if (ix86_static_chain_on_stack
)
8732 stack_size
+= UNITS_PER_WORD
;
8734 if (!call_used_or_fixed_reg_p (REGNO (crtl
->drap_reg
)))
8735 stack_size
+= UNITS_PER_WORD
;
8737 /* This over-estimates by 1 minimal-stack-alignment-unit but
8738 mitigates that by counting in the new return address slot. */
8739 current_function_dynamic_stack_size
8740 += crtl
->stack_alignment_needed
/ BITS_PER_UNIT
;
8743 current_function_static_stack_size
= stack_size
;
8746 /* On SEH target with very large frame size, allocate an area to save
8747 SSE registers (as the very large allocation won't be described). */
8749 && frame
.stack_pointer_offset
> SEH_MAX_FRAME_SIZE
8750 && !sse_registers_saved
)
8752 HOST_WIDE_INT sse_size
8753 = frame
.sse_reg_save_offset
- frame
.reg_save_offset
;
8755 gcc_assert (int_registers_saved
);
8757 /* No need to do stack checking as the area will be immediately
8759 pro_epilogue_adjust_stack (stack_pointer_rtx
, stack_pointer_rtx
,
8760 GEN_INT (-sse_size
), -1,
8761 m
->fs
.cfa_reg
== stack_pointer_rtx
);
8762 allocate
-= sse_size
;
8763 ix86_emit_save_sse_regs_using_mov (frame
.sse_reg_save_offset
);
8764 sse_registers_saved
= true;
8767 /* If stack clash protection is requested, then probe the stack, unless it
8768 is already probed on the target. */
8770 && flag_stack_clash_protection
8771 && !ix86_target_stack_probe ())
8773 ix86_adjust_stack_and_probe (allocate
, int_registers_saved
, false);
8777 /* The stack has already been decremented by the instruction calling us
8778 so probe if the size is non-negative to preserve the protection area. */
8779 else if (allocate
>= 0 && flag_stack_check
== STATIC_BUILTIN_STACK_CHECK
)
8781 const HOST_WIDE_INT probe_interval
= get_probe_interval ();
8783 if (STACK_CHECK_MOVING_SP
)
8786 && !cfun
->calls_alloca
8787 && allocate
<= probe_interval
)
8792 ix86_adjust_stack_and_probe (allocate
, int_registers_saved
, true);
8799 HOST_WIDE_INT size
= allocate
;
8801 if (TARGET_64BIT
&& size
>= HOST_WIDE_INT_C (0x80000000))
8802 size
= 0x80000000 - get_stack_check_protect () - 1;
8804 if (TARGET_STACK_PROBE
)
8806 if (crtl
->is_leaf
&& !cfun
->calls_alloca
)
8808 if (size
> probe_interval
)
8809 ix86_emit_probe_stack_range (0, size
, int_registers_saved
);
8812 ix86_emit_probe_stack_range (0,
8813 size
+ get_stack_check_protect (),
8814 int_registers_saved
);
8818 if (crtl
->is_leaf
&& !cfun
->calls_alloca
)
8820 if (size
> probe_interval
8821 && size
> get_stack_check_protect ())
8822 ix86_emit_probe_stack_range (get_stack_check_protect (),
8824 - get_stack_check_protect ()),
8825 int_registers_saved
);
8828 ix86_emit_probe_stack_range (get_stack_check_protect (), size
,
8829 int_registers_saved
);
8836 else if (!ix86_target_stack_probe ()
8837 || frame
.stack_pointer_offset
< CHECK_STACK_LIMIT
)
8839 pro_epilogue_adjust_stack (stack_pointer_rtx
, stack_pointer_rtx
,
8840 GEN_INT (-allocate
), -1,
8841 m
->fs
.cfa_reg
== stack_pointer_rtx
);
8845 rtx eax
= gen_rtx_REG (Pmode
, AX_REG
);
8847 const bool sp_is_cfa_reg
= (m
->fs
.cfa_reg
== stack_pointer_rtx
);
8848 bool eax_live
= ix86_eax_live_at_start_p ();
8849 bool r10_live
= false;
8852 r10_live
= (DECL_STATIC_CHAIN (current_function_decl
) != 0);
8856 insn
= emit_insn (gen_push (eax
));
8857 allocate
-= UNITS_PER_WORD
;
8858 /* Note that SEH directives need to continue tracking the stack
8859 pointer even after the frame pointer has been set up. */
8860 if (sp_is_cfa_reg
|| TARGET_SEH
)
8863 m
->fs
.cfa_offset
+= UNITS_PER_WORD
;
8864 RTX_FRAME_RELATED_P (insn
) = 1;
8865 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
8866 gen_rtx_SET (stack_pointer_rtx
,
8867 plus_constant (Pmode
,
8875 r10
= gen_rtx_REG (Pmode
, R10_REG
);
8876 insn
= emit_insn (gen_push (r10
));
8877 allocate
-= UNITS_PER_WORD
;
8878 if (sp_is_cfa_reg
|| TARGET_SEH
)
8881 m
->fs
.cfa_offset
+= UNITS_PER_WORD
;
8882 RTX_FRAME_RELATED_P (insn
) = 1;
8883 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
8884 gen_rtx_SET (stack_pointer_rtx
,
8885 plus_constant (Pmode
,
8891 emit_move_insn (eax
, GEN_INT (allocate
));
8892 emit_insn (gen_allocate_stack_worker_probe (Pmode
, eax
, eax
));
8894 /* Use the fact that AX still contains ALLOCATE. */
8895 insn
= emit_insn (gen_pro_epilogue_adjust_stack_sub
8896 (Pmode
, stack_pointer_rtx
, stack_pointer_rtx
, eax
));
8898 if (sp_is_cfa_reg
|| TARGET_SEH
)
8901 m
->fs
.cfa_offset
+= allocate
;
8902 RTX_FRAME_RELATED_P (insn
) = 1;
8903 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
8904 gen_rtx_SET (stack_pointer_rtx
,
8905 plus_constant (Pmode
, stack_pointer_rtx
,
8908 m
->fs
.sp_offset
+= allocate
;
8910 /* Use stack_pointer_rtx for relative addressing so that code works for
8911 realigned stack. But this means that we need a blockage to prevent
8912 stores based on the frame pointer from being scheduled before. */
8913 if (r10_live
&& eax_live
)
8915 t
= gen_rtx_PLUS (Pmode
, stack_pointer_rtx
, eax
);
8916 emit_move_insn (gen_rtx_REG (word_mode
, R10_REG
),
8917 gen_frame_mem (word_mode
, t
));
8918 t
= plus_constant (Pmode
, t
, UNITS_PER_WORD
);
8919 emit_move_insn (gen_rtx_REG (word_mode
, AX_REG
),
8920 gen_frame_mem (word_mode
, t
));
8921 emit_insn (gen_memory_blockage ());
8923 else if (eax_live
|| r10_live
)
8925 t
= gen_rtx_PLUS (Pmode
, stack_pointer_rtx
, eax
);
8926 emit_move_insn (gen_rtx_REG (word_mode
,
8927 (eax_live
? AX_REG
: R10_REG
)),
8928 gen_frame_mem (word_mode
, t
));
8929 emit_insn (gen_memory_blockage ());
8932 gcc_assert (m
->fs
.sp_offset
== frame
.stack_pointer_offset
);
8934 /* If we havn't already set up the frame pointer, do so now. */
8935 if (frame_pointer_needed
&& !m
->fs
.fp_valid
)
8937 insn
= gen_add3_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
,
8938 GEN_INT (frame
.stack_pointer_offset
8939 - frame
.hard_frame_pointer_offset
));
8940 insn
= emit_insn (insn
);
8941 RTX_FRAME_RELATED_P (insn
) = 1;
8942 add_reg_note (insn
, REG_CFA_ADJUST_CFA
, NULL
);
8944 if (m
->fs
.cfa_reg
== stack_pointer_rtx
)
8945 m
->fs
.cfa_reg
= hard_frame_pointer_rtx
;
8946 m
->fs
.fp_offset
= frame
.hard_frame_pointer_offset
;
8947 m
->fs
.fp_valid
= true;
8950 if (!int_registers_saved
)
8951 ix86_emit_save_regs_using_mov (frame
.reg_save_offset
);
8952 if (!sse_registers_saved
)
8953 ix86_emit_save_sse_regs_using_mov (frame
.sse_reg_save_offset
);
8954 else if (save_stub_call_needed
)
8955 ix86_emit_outlined_ms2sysv_save (frame
);
8957 /* For the mcount profiling on 32 bit PIC mode we need to emit SET_GOT
8959 if (!TARGET_64BIT
&& pic_offset_table_rtx
&& crtl
->profile
&& !flag_fentry
)
8961 rtx pic
= gen_rtx_REG (Pmode
, REAL_PIC_OFFSET_TABLE_REGNUM
);
8962 insn
= emit_insn (gen_set_got (pic
));
8963 RTX_FRAME_RELATED_P (insn
) = 1;
8964 add_reg_note (insn
, REG_CFA_FLUSH_QUEUE
, NULL_RTX
);
8965 emit_insn (gen_prologue_use (pic
));
8966 /* Deleting already emmitted SET_GOT if exist and allocated to
8967 REAL_PIC_OFFSET_TABLE_REGNUM. */
8968 ix86_elim_entry_set_got (pic
);
8971 if (crtl
->drap_reg
&& !crtl
->stack_realign_needed
)
8973 /* vDRAP is setup but after reload it turns out stack realign
8974 isn't necessary, here we will emit prologue to setup DRAP
8975 without stack realign adjustment */
8976 t
= choose_baseaddr (0, NULL
);
8977 emit_insn (gen_rtx_SET (crtl
->drap_reg
, t
));
8980 /* Prevent instructions from being scheduled into register save push
8981 sequence when access to the redzone area is done through frame pointer.
8982 The offset between the frame pointer and the stack pointer is calculated
8983 relative to the value of the stack pointer at the end of the function
8984 prologue, and moving instructions that access redzone area via frame
8985 pointer inside push sequence violates this assumption. */
8986 if (frame_pointer_needed
&& frame
.red_zone_size
)
8987 emit_insn (gen_memory_blockage ());
8989 /* SEH requires that the prologue end within 256 bytes of the start of
8990 the function. Prevent instruction schedules that would extend that.
8991 Further, prevent alloca modifications to the stack pointer from being
8992 combined with prologue modifications. */
8994 emit_insn (gen_prologue_use (stack_pointer_rtx
));
8997 /* Emit code to restore REG using a POP insn. */
9000 ix86_emit_restore_reg_using_pop (rtx reg
)
9002 struct machine_function
*m
= cfun
->machine
;
9003 rtx_insn
*insn
= emit_insn (gen_pop (reg
));
9005 ix86_add_cfa_restore_note (insn
, reg
, m
->fs
.sp_offset
);
9006 m
->fs
.sp_offset
-= UNITS_PER_WORD
;
9008 if (m
->fs
.cfa_reg
== crtl
->drap_reg
9009 && REGNO (reg
) == REGNO (crtl
->drap_reg
))
9011 /* Previously we'd represented the CFA as an expression
9012 like *(%ebp - 8). We've just popped that value from
9013 the stack, which means we need to reset the CFA to
9014 the drap register. This will remain until we restore
9015 the stack pointer. */
9016 add_reg_note (insn
, REG_CFA_DEF_CFA
, reg
);
9017 RTX_FRAME_RELATED_P (insn
) = 1;
9019 /* This means that the DRAP register is valid for addressing too. */
9020 m
->fs
.drap_valid
= true;
9024 if (m
->fs
.cfa_reg
== stack_pointer_rtx
)
9026 rtx x
= plus_constant (Pmode
, stack_pointer_rtx
, UNITS_PER_WORD
);
9027 x
= gen_rtx_SET (stack_pointer_rtx
, x
);
9028 add_reg_note (insn
, REG_CFA_ADJUST_CFA
, x
);
9029 RTX_FRAME_RELATED_P (insn
) = 1;
9031 m
->fs
.cfa_offset
-= UNITS_PER_WORD
;
9034 /* When the frame pointer is the CFA, and we pop it, we are
9035 swapping back to the stack pointer as the CFA. This happens
9036 for stack frames that don't allocate other data, so we assume
9037 the stack pointer is now pointing at the return address, i.e.
9038 the function entry state, which makes the offset be 1 word. */
9039 if (reg
== hard_frame_pointer_rtx
)
9041 m
->fs
.fp_valid
= false;
9042 if (m
->fs
.cfa_reg
== hard_frame_pointer_rtx
)
9044 m
->fs
.cfa_reg
= stack_pointer_rtx
;
9045 m
->fs
.cfa_offset
-= UNITS_PER_WORD
;
9047 add_reg_note (insn
, REG_CFA_DEF_CFA
,
9048 plus_constant (Pmode
, stack_pointer_rtx
,
9050 RTX_FRAME_RELATED_P (insn
) = 1;
9055 /* Emit code to restore saved registers using POP insns. */
9058 ix86_emit_restore_regs_using_pop (void)
9062 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
9063 if (GENERAL_REGNO_P (regno
) && ix86_save_reg (regno
, false, true))
9064 ix86_emit_restore_reg_using_pop (gen_rtx_REG (word_mode
, regno
));
9067 /* Emit code and notes for the LEAVE instruction. If insn is non-null,
9068 omits the emit and only attaches the notes. */
9071 ix86_emit_leave (rtx_insn
*insn
)
9073 struct machine_function
*m
= cfun
->machine
;
9076 insn
= emit_insn (gen_leave (word_mode
));
9078 ix86_add_queued_cfa_restore_notes (insn
);
9080 gcc_assert (m
->fs
.fp_valid
);
9081 m
->fs
.sp_valid
= true;
9082 m
->fs
.sp_realigned
= false;
9083 m
->fs
.sp_offset
= m
->fs
.fp_offset
- UNITS_PER_WORD
;
9084 m
->fs
.fp_valid
= false;
9086 if (m
->fs
.cfa_reg
== hard_frame_pointer_rtx
)
9088 m
->fs
.cfa_reg
= stack_pointer_rtx
;
9089 m
->fs
.cfa_offset
= m
->fs
.sp_offset
;
9091 add_reg_note (insn
, REG_CFA_DEF_CFA
,
9092 plus_constant (Pmode
, stack_pointer_rtx
,
9094 RTX_FRAME_RELATED_P (insn
) = 1;
9096 ix86_add_cfa_restore_note (insn
, hard_frame_pointer_rtx
,
9100 /* Emit code to restore saved registers using MOV insns.
9101 First register is restored from CFA - CFA_OFFSET. */
9103 ix86_emit_restore_regs_using_mov (HOST_WIDE_INT cfa_offset
,
9104 bool maybe_eh_return
)
9106 struct machine_function
*m
= cfun
->machine
;
9109 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
9110 if (GENERAL_REGNO_P (regno
) && ix86_save_reg (regno
, maybe_eh_return
, true))
9112 rtx reg
= gen_rtx_REG (word_mode
, regno
);
9116 mem
= choose_baseaddr (cfa_offset
, NULL
);
9117 mem
= gen_frame_mem (word_mode
, mem
);
9118 insn
= emit_move_insn (reg
, mem
);
9120 if (m
->fs
.cfa_reg
== crtl
->drap_reg
&& regno
== REGNO (crtl
->drap_reg
))
9122 /* Previously we'd represented the CFA as an expression
9123 like *(%ebp - 8). We've just popped that value from
9124 the stack, which means we need to reset the CFA to
9125 the drap register. This will remain until we restore
9126 the stack pointer. */
9127 add_reg_note (insn
, REG_CFA_DEF_CFA
, reg
);
9128 RTX_FRAME_RELATED_P (insn
) = 1;
9130 /* This means that the DRAP register is valid for addressing. */
9131 m
->fs
.drap_valid
= true;
9134 ix86_add_cfa_restore_note (NULL
, reg
, cfa_offset
);
9136 cfa_offset
-= UNITS_PER_WORD
;
9140 /* Emit code to restore saved registers using MOV insns.
9141 First register is restored from CFA - CFA_OFFSET. */
9143 ix86_emit_restore_sse_regs_using_mov (HOST_WIDE_INT cfa_offset
,
9144 bool maybe_eh_return
)
9148 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
9149 if (SSE_REGNO_P (regno
) && ix86_save_reg (regno
, maybe_eh_return
, true))
9151 rtx reg
= gen_rtx_REG (V4SFmode
, regno
);
9153 unsigned int align
= GET_MODE_ALIGNMENT (V4SFmode
);
9155 mem
= choose_baseaddr (cfa_offset
, &align
);
9156 mem
= gen_rtx_MEM (V4SFmode
, mem
);
9158 /* The location aligment depends upon the base register. */
9159 align
= MIN (GET_MODE_ALIGNMENT (V4SFmode
), align
);
9160 gcc_assert (! (cfa_offset
& (align
/ BITS_PER_UNIT
- 1)));
9161 set_mem_align (mem
, align
);
9162 emit_insn (gen_rtx_SET (reg
, mem
));
9164 ix86_add_cfa_restore_note (NULL
, reg
, cfa_offset
);
9166 cfa_offset
-= GET_MODE_SIZE (V4SFmode
);
9171 ix86_emit_outlined_ms2sysv_restore (const struct ix86_frame
&frame
,
9172 bool use_call
, int style
)
9174 struct machine_function
*m
= cfun
->machine
;
9175 const unsigned ncregs
= NUM_X86_64_MS_CLOBBERED_REGS
9176 + m
->call_ms2sysv_extra_regs
;
9178 unsigned int elems_needed
, align
, i
, vi
= 0;
9181 rtx rsi
= gen_rtx_REG (word_mode
, SI_REG
);
9183 const class xlogue_layout
&xlogue
= xlogue_layout::get_instance ();
9184 HOST_WIDE_INT stub_ptr_offset
= xlogue
.get_stub_ptr_offset ();
9185 HOST_WIDE_INT rsi_offset
= frame
.stack_realign_offset
+ stub_ptr_offset
;
9186 rtx rsi_frame_load
= NULL_RTX
;
9187 HOST_WIDE_INT rsi_restore_offset
= (HOST_WIDE_INT
)-1;
9188 enum xlogue_stub stub
;
9190 gcc_assert (!m
->fs
.fp_valid
|| frame_pointer_needed
);
9192 /* If using a realigned stack, we should never start with padding. */
9193 gcc_assert (!stack_realign_fp
|| !xlogue
.get_stack_align_off_in ());
9195 /* Setup RSI as the stub's base pointer. */
9196 align
= GET_MODE_ALIGNMENT (V4SFmode
);
9197 tmp
= choose_baseaddr (rsi_offset
, &align
, SI_REG
);
9198 gcc_assert (align
>= GET_MODE_ALIGNMENT (V4SFmode
));
9200 emit_insn (gen_rtx_SET (rsi
, tmp
));
9202 /* Get a symbol for the stub. */
9203 if (frame_pointer_needed
)
9204 stub
= use_call
? XLOGUE_STUB_RESTORE_HFP
9205 : XLOGUE_STUB_RESTORE_HFP_TAIL
;
9207 stub
= use_call
? XLOGUE_STUB_RESTORE
9208 : XLOGUE_STUB_RESTORE_TAIL
;
9209 sym
= xlogue
.get_stub_rtx (stub
);
9211 elems_needed
= ncregs
;
9215 elems_needed
+= frame_pointer_needed
? 5 : 3;
9216 v
= rtvec_alloc (elems_needed
);
9218 /* We call the epilogue stub when we need to pop incoming args or we are
9219 doing a sibling call as the tail. Otherwise, we will emit a jmp to the
9220 epilogue stub and it is the tail-call. */
9222 RTVEC_ELT (v
, vi
++) = gen_rtx_USE (VOIDmode
, sym
);
9225 RTVEC_ELT (v
, vi
++) = ret_rtx
;
9226 RTVEC_ELT (v
, vi
++) = gen_rtx_USE (VOIDmode
, sym
);
9227 if (frame_pointer_needed
)
9229 rtx rbp
= gen_rtx_REG (DImode
, BP_REG
);
9230 gcc_assert (m
->fs
.fp_valid
);
9231 gcc_assert (m
->fs
.cfa_reg
== hard_frame_pointer_rtx
);
9233 tmp
= plus_constant (DImode
, rbp
, 8);
9234 RTVEC_ELT (v
, vi
++) = gen_rtx_SET (stack_pointer_rtx
, tmp
);
9235 RTVEC_ELT (v
, vi
++) = gen_rtx_SET (rbp
, gen_rtx_MEM (DImode
, rbp
));
9236 tmp
= gen_rtx_MEM (BLKmode
, gen_rtx_SCRATCH (VOIDmode
));
9237 RTVEC_ELT (v
, vi
++) = gen_rtx_CLOBBER (VOIDmode
, tmp
);
9241 /* If no hard frame pointer, we set R10 to the SP restore value. */
9242 gcc_assert (!m
->fs
.fp_valid
);
9243 gcc_assert (m
->fs
.cfa_reg
== stack_pointer_rtx
);
9244 gcc_assert (m
->fs
.sp_valid
);
9246 r10
= gen_rtx_REG (DImode
, R10_REG
);
9247 tmp
= plus_constant (Pmode
, rsi
, stub_ptr_offset
);
9248 emit_insn (gen_rtx_SET (r10
, tmp
));
9250 RTVEC_ELT (v
, vi
++) = gen_rtx_SET (stack_pointer_rtx
, r10
);
9254 /* Generate frame load insns and restore notes. */
9255 for (i
= 0; i
< ncregs
; ++i
)
9257 const xlogue_layout::reginfo
&r
= xlogue
.get_reginfo (i
);
9258 machine_mode mode
= SSE_REGNO_P (r
.regno
) ? V4SFmode
: word_mode
;
9259 rtx reg
, frame_load
;
9261 reg
= gen_rtx_REG (mode
, r
.regno
);
9262 frame_load
= gen_frame_load (reg
, rsi
, r
.offset
);
9264 /* Save RSI frame load insn & note to add last. */
9265 if (r
.regno
== SI_REG
)
9267 gcc_assert (!rsi_frame_load
);
9268 rsi_frame_load
= frame_load
;
9269 rsi_restore_offset
= r
.offset
;
9273 RTVEC_ELT (v
, vi
++) = frame_load
;
9274 ix86_add_cfa_restore_note (NULL
, reg
, r
.offset
);
9278 /* Add RSI frame load & restore note at the end. */
9279 gcc_assert (rsi_frame_load
);
9280 gcc_assert (rsi_restore_offset
!= (HOST_WIDE_INT
)-1);
9281 RTVEC_ELT (v
, vi
++) = rsi_frame_load
;
9282 ix86_add_cfa_restore_note (NULL
, gen_rtx_REG (DImode
, SI_REG
),
9283 rsi_restore_offset
);
9285 /* Finally, for tail-call w/o a hard frame pointer, set SP to R10. */
9286 if (!use_call
&& !frame_pointer_needed
)
9288 gcc_assert (m
->fs
.sp_valid
);
9289 gcc_assert (!m
->fs
.sp_realigned
);
9291 /* At this point, R10 should point to frame.stack_realign_offset. */
9292 if (m
->fs
.cfa_reg
== stack_pointer_rtx
)
9293 m
->fs
.cfa_offset
+= m
->fs
.sp_offset
- frame
.stack_realign_offset
;
9294 m
->fs
.sp_offset
= frame
.stack_realign_offset
;
9297 gcc_assert (vi
== (unsigned int)GET_NUM_ELEM (v
));
9298 tmp
= gen_rtx_PARALLEL (VOIDmode
, v
);
9300 insn
= emit_insn (tmp
);
9303 insn
= emit_jump_insn (tmp
);
9304 JUMP_LABEL (insn
) = ret_rtx
;
9306 if (frame_pointer_needed
)
9307 ix86_emit_leave (insn
);
9310 /* Need CFA adjust note. */
9311 tmp
= gen_rtx_SET (stack_pointer_rtx
, r10
);
9312 add_reg_note (insn
, REG_CFA_ADJUST_CFA
, tmp
);
9316 RTX_FRAME_RELATED_P (insn
) = true;
9317 ix86_add_queued_cfa_restore_notes (insn
);
9319 /* If we're not doing a tail-call, we need to adjust the stack. */
9320 if (use_call
&& m
->fs
.sp_valid
)
9322 HOST_WIDE_INT dealloc
= m
->fs
.sp_offset
- frame
.stack_realign_offset
;
9323 pro_epilogue_adjust_stack (stack_pointer_rtx
, stack_pointer_rtx
,
9324 GEN_INT (dealloc
), style
,
9325 m
->fs
.cfa_reg
== stack_pointer_rtx
);
9329 /* Restore function stack, frame, and registers. */
9332 ix86_expand_epilogue (int style
)
9334 struct machine_function
*m
= cfun
->machine
;
9335 struct machine_frame_state frame_state_save
= m
->fs
;
9336 bool restore_regs_via_mov
;
9338 bool restore_stub_is_tail
= false;
9340 if (ix86_function_naked (current_function_decl
))
9342 /* The program should not reach this point. */
9343 emit_insn (gen_ud2 ());
9347 ix86_finalize_stack_frame_flags ();
9348 const struct ix86_frame
&frame
= cfun
->machine
->frame
;
9350 m
->fs
.sp_realigned
= stack_realign_fp
;
9351 m
->fs
.sp_valid
= stack_realign_fp
9352 || !frame_pointer_needed
9353 || crtl
->sp_is_unchanging
;
9354 gcc_assert (!m
->fs
.sp_valid
9355 || m
->fs
.sp_offset
== frame
.stack_pointer_offset
);
9357 /* The FP must be valid if the frame pointer is present. */
9358 gcc_assert (frame_pointer_needed
== m
->fs
.fp_valid
);
9359 gcc_assert (!m
->fs
.fp_valid
9360 || m
->fs
.fp_offset
== frame
.hard_frame_pointer_offset
);
9362 /* We must have *some* valid pointer to the stack frame. */
9363 gcc_assert (m
->fs
.sp_valid
|| m
->fs
.fp_valid
);
9365 /* The DRAP is never valid at this point. */
9366 gcc_assert (!m
->fs
.drap_valid
);
9368 /* See the comment about red zone and frame
9369 pointer usage in ix86_expand_prologue. */
9370 if (frame_pointer_needed
&& frame
.red_zone_size
)
9371 emit_insn (gen_memory_blockage ());
9373 using_drap
= crtl
->drap_reg
&& crtl
->stack_realign_needed
;
9374 gcc_assert (!using_drap
|| m
->fs
.cfa_reg
== crtl
->drap_reg
);
9376 /* Determine the CFA offset of the end of the red-zone. */
9377 m
->fs
.red_zone_offset
= 0;
9378 if (ix86_using_red_zone () && crtl
->args
.pops_args
< 65536)
9380 /* The red-zone begins below return address and error code in
9381 exception handler. */
9382 m
->fs
.red_zone_offset
= RED_ZONE_SIZE
+ INCOMING_FRAME_SP_OFFSET
;
9384 /* When the register save area is in the aligned portion of
9385 the stack, determine the maximum runtime displacement that
9386 matches up with the aligned frame. */
9387 if (stack_realign_drap
)
9388 m
->fs
.red_zone_offset
-= (crtl
->stack_alignment_needed
/ BITS_PER_UNIT
9392 HOST_WIDE_INT reg_save_offset
= frame
.reg_save_offset
;
9394 /* Special care must be taken for the normal return case of a function
9395 using eh_return: the eax and edx registers are marked as saved, but
9396 not restored along this path. Adjust the save location to match. */
9397 if (crtl
->calls_eh_return
&& style
!= 2)
9398 reg_save_offset
-= 2 * UNITS_PER_WORD
;
9400 /* EH_RETURN requires the use of moves to function properly. */
9401 if (crtl
->calls_eh_return
)
9402 restore_regs_via_mov
= true;
9403 /* SEH requires the use of pops to identify the epilogue. */
9404 else if (TARGET_SEH
)
9405 restore_regs_via_mov
= false;
9406 /* If we're only restoring one register and sp cannot be used then
9407 using a move instruction to restore the register since it's
9408 less work than reloading sp and popping the register. */
9409 else if (!sp_valid_at (frame
.hfp_save_offset
) && frame
.nregs
<= 1)
9410 restore_regs_via_mov
= true;
9411 else if (TARGET_EPILOGUE_USING_MOVE
9412 && cfun
->machine
->use_fast_prologue_epilogue
9414 || m
->fs
.sp_offset
!= reg_save_offset
))
9415 restore_regs_via_mov
= true;
9416 else if (frame_pointer_needed
9418 && m
->fs
.sp_offset
!= reg_save_offset
)
9419 restore_regs_via_mov
= true;
9420 else if (frame_pointer_needed
9422 && cfun
->machine
->use_fast_prologue_epilogue
9423 && frame
.nregs
== 1)
9424 restore_regs_via_mov
= true;
9426 restore_regs_via_mov
= false;
9428 if (restore_regs_via_mov
|| frame
.nsseregs
)
9430 /* Ensure that the entire register save area is addressable via
9431 the stack pointer, if we will restore SSE regs via sp. */
9433 && m
->fs
.sp_offset
> 0x7fffffff
9434 && sp_valid_at (frame
.stack_realign_offset
+ 1)
9435 && (frame
.nsseregs
+ frame
.nregs
) != 0)
9437 pro_epilogue_adjust_stack (stack_pointer_rtx
, stack_pointer_rtx
,
9438 GEN_INT (m
->fs
.sp_offset
9439 - frame
.sse_reg_save_offset
),
9441 m
->fs
.cfa_reg
== stack_pointer_rtx
);
9445 /* If there are any SSE registers to restore, then we have to do it
9446 via moves, since there's obviously no pop for SSE regs. */
9448 ix86_emit_restore_sse_regs_using_mov (frame
.sse_reg_save_offset
,
9451 if (m
->call_ms2sysv
)
9453 int pop_incoming_args
= crtl
->args
.pops_args
&& crtl
->args
.size
;
9455 /* We cannot use a tail-call for the stub if:
9456 1. We have to pop incoming args,
9457 2. We have additional int regs to restore, or
9458 3. A sibling call will be the tail-call, or
9459 4. We are emitting an eh_return_internal epilogue.
9461 TODO: Item 4 has not yet tested!
9463 If any of the above are true, we will call the stub rather than
9465 restore_stub_is_tail
= !(pop_incoming_args
|| frame
.nregs
|| style
!= 1);
9466 ix86_emit_outlined_ms2sysv_restore (frame
, !restore_stub_is_tail
, style
);
9469 /* If using out-of-line stub that is a tail-call, then...*/
9470 if (m
->call_ms2sysv
&& restore_stub_is_tail
)
9472 /* TODO: parinoid tests. (remove eventually) */
9473 gcc_assert (m
->fs
.sp_valid
);
9474 gcc_assert (!m
->fs
.sp_realigned
);
9475 gcc_assert (!m
->fs
.fp_valid
);
9476 gcc_assert (!m
->fs
.realigned
);
9477 gcc_assert (m
->fs
.sp_offset
== UNITS_PER_WORD
);
9478 gcc_assert (!crtl
->drap_reg
);
9479 gcc_assert (!frame
.nregs
);
9481 else if (restore_regs_via_mov
)
9486 ix86_emit_restore_regs_using_mov (reg_save_offset
, style
== 2);
9488 /* eh_return epilogues need %ecx added to the stack pointer. */
9491 rtx sa
= EH_RETURN_STACKADJ_RTX
;
9494 /* Stack realignment doesn't work with eh_return. */
9495 if (crtl
->stack_realign_needed
)
9496 sorry ("Stack realignment not supported with "
9497 "%<__builtin_eh_return%>");
9499 /* regparm nested functions don't work with eh_return. */
9500 if (ix86_static_chain_on_stack
)
9501 sorry ("regparm nested function not supported with "
9502 "%<__builtin_eh_return%>");
9504 if (frame_pointer_needed
)
9506 t
= gen_rtx_PLUS (Pmode
, hard_frame_pointer_rtx
, sa
);
9507 t
= plus_constant (Pmode
, t
, m
->fs
.fp_offset
- UNITS_PER_WORD
);
9508 emit_insn (gen_rtx_SET (sa
, t
));
9510 /* NB: eh_return epilogues must restore the frame pointer
9511 in word_mode since the upper 32 bits of RBP register
9512 can have any values. */
9513 t
= gen_frame_mem (word_mode
, hard_frame_pointer_rtx
);
9514 rtx frame_reg
= gen_rtx_REG (word_mode
,
9515 HARD_FRAME_POINTER_REGNUM
);
9516 insn
= emit_move_insn (frame_reg
, t
);
9518 /* Note that we use SA as a temporary CFA, as the return
9519 address is at the proper place relative to it. We
9520 pretend this happens at the FP restore insn because
9521 prior to this insn the FP would be stored at the wrong
9522 offset relative to SA, and after this insn we have no
9523 other reasonable register to use for the CFA. We don't
9524 bother resetting the CFA to the SP for the duration of
9525 the return insn, unless the control flow instrumentation
9526 is done. In this case the SP is used later and we have
9527 to reset CFA to SP. */
9528 add_reg_note (insn
, REG_CFA_DEF_CFA
,
9529 plus_constant (Pmode
, sa
, UNITS_PER_WORD
));
9530 ix86_add_queued_cfa_restore_notes (insn
);
9531 add_reg_note (insn
, REG_CFA_RESTORE
, frame_reg
);
9532 RTX_FRAME_RELATED_P (insn
) = 1;
9535 m
->fs
.cfa_offset
= UNITS_PER_WORD
;
9536 m
->fs
.fp_valid
= false;
9538 pro_epilogue_adjust_stack (stack_pointer_rtx
, sa
,
9540 flag_cf_protection
);
9544 t
= gen_rtx_PLUS (Pmode
, stack_pointer_rtx
, sa
);
9545 t
= plus_constant (Pmode
, t
, m
->fs
.sp_offset
- UNITS_PER_WORD
);
9546 insn
= emit_insn (gen_rtx_SET (stack_pointer_rtx
, t
));
9547 ix86_add_queued_cfa_restore_notes (insn
);
9549 gcc_assert (m
->fs
.cfa_reg
== stack_pointer_rtx
);
9550 if (m
->fs
.cfa_offset
!= UNITS_PER_WORD
)
9552 m
->fs
.cfa_offset
= UNITS_PER_WORD
;
9553 add_reg_note (insn
, REG_CFA_DEF_CFA
,
9554 plus_constant (Pmode
, stack_pointer_rtx
,
9556 RTX_FRAME_RELATED_P (insn
) = 1;
9559 m
->fs
.sp_offset
= UNITS_PER_WORD
;
9560 m
->fs
.sp_valid
= true;
9561 m
->fs
.sp_realigned
= false;
9566 /* SEH requires that the function end with (1) a stack adjustment
9567 if necessary, (2) a sequence of pops, and (3) a return or
9568 jump instruction. Prevent insns from the function body from
9569 being scheduled into this sequence. */
9572 /* Prevent a catch region from being adjacent to the standard
9573 epilogue sequence. Unfortunately neither crtl->uses_eh_lsda
9574 nor several other flags that would be interesting to test are
9576 if (flag_non_call_exceptions
)
9577 emit_insn (gen_nops (const1_rtx
));
9579 emit_insn (gen_blockage ());
9582 /* First step is to deallocate the stack frame so that we can
9583 pop the registers. If the stack pointer was realigned, it needs
9584 to be restored now. Also do it on SEH target for very large
9585 frame as the emitted instructions aren't allowed by the ABI
9587 if (!m
->fs
.sp_valid
|| m
->fs
.sp_realigned
9589 && (m
->fs
.sp_offset
- reg_save_offset
9590 >= SEH_MAX_FRAME_SIZE
)))
9592 pro_epilogue_adjust_stack (stack_pointer_rtx
, hard_frame_pointer_rtx
,
9593 GEN_INT (m
->fs
.fp_offset
9597 else if (m
->fs
.sp_offset
!= reg_save_offset
)
9599 pro_epilogue_adjust_stack (stack_pointer_rtx
, stack_pointer_rtx
,
9600 GEN_INT (m
->fs
.sp_offset
9603 m
->fs
.cfa_reg
== stack_pointer_rtx
);
9606 ix86_emit_restore_regs_using_pop ();
9609 /* If we used a stack pointer and haven't already got rid of it,
9613 /* If the stack pointer is valid and pointing at the frame
9614 pointer store address, then we only need a pop. */
9615 if (sp_valid_at (frame
.hfp_save_offset
)
9616 && m
->fs
.sp_offset
== frame
.hfp_save_offset
)
9617 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx
);
9618 /* Leave results in shorter dependency chains on CPUs that are
9619 able to grok it fast. */
9620 else if (TARGET_USE_LEAVE
9621 || optimize_bb_for_size_p (EXIT_BLOCK_PTR_FOR_FN (cfun
))
9622 || !cfun
->machine
->use_fast_prologue_epilogue
)
9623 ix86_emit_leave (NULL
);
9626 pro_epilogue_adjust_stack (stack_pointer_rtx
,
9627 hard_frame_pointer_rtx
,
9628 const0_rtx
, style
, !using_drap
);
9629 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx
);
9635 int param_ptr_offset
= UNITS_PER_WORD
;
9638 gcc_assert (stack_realign_drap
);
9640 if (ix86_static_chain_on_stack
)
9641 param_ptr_offset
+= UNITS_PER_WORD
;
9642 if (!call_used_or_fixed_reg_p (REGNO (crtl
->drap_reg
)))
9643 param_ptr_offset
+= UNITS_PER_WORD
;
9645 insn
= emit_insn (gen_rtx_SET
9647 plus_constant (Pmode
, crtl
->drap_reg
,
9648 -param_ptr_offset
)));
9649 m
->fs
.cfa_reg
= stack_pointer_rtx
;
9650 m
->fs
.cfa_offset
= param_ptr_offset
;
9651 m
->fs
.sp_offset
= param_ptr_offset
;
9652 m
->fs
.realigned
= false;
9654 add_reg_note (insn
, REG_CFA_DEF_CFA
,
9655 plus_constant (Pmode
, stack_pointer_rtx
,
9657 RTX_FRAME_RELATED_P (insn
) = 1;
9659 if (!call_used_or_fixed_reg_p (REGNO (crtl
->drap_reg
)))
9660 ix86_emit_restore_reg_using_pop (crtl
->drap_reg
);
9663 /* At this point the stack pointer must be valid, and we must have
9664 restored all of the registers. We may not have deallocated the
9665 entire stack frame. We've delayed this until now because it may
9666 be possible to merge the local stack deallocation with the
9667 deallocation forced by ix86_static_chain_on_stack. */
9668 gcc_assert (m
->fs
.sp_valid
);
9669 gcc_assert (!m
->fs
.sp_realigned
);
9670 gcc_assert (!m
->fs
.fp_valid
);
9671 gcc_assert (!m
->fs
.realigned
);
9672 if (m
->fs
.sp_offset
!= UNITS_PER_WORD
)
9674 pro_epilogue_adjust_stack (stack_pointer_rtx
, stack_pointer_rtx
,
9675 GEN_INT (m
->fs
.sp_offset
- UNITS_PER_WORD
),
9679 ix86_add_queued_cfa_restore_notes (get_last_insn ());
9681 /* Sibcall epilogues don't want a return instruction. */
9684 m
->fs
= frame_state_save
;
9688 if (cfun
->machine
->func_type
!= TYPE_NORMAL
)
9689 emit_jump_insn (gen_interrupt_return ());
9690 else if (crtl
->args
.pops_args
&& crtl
->args
.size
)
9692 rtx popc
= GEN_INT (crtl
->args
.pops_args
);
9694 /* i386 can only pop 64K bytes. If asked to pop more, pop return
9695 address, do explicit add, and jump indirectly to the caller. */
9697 if (crtl
->args
.pops_args
>= 65536)
9699 rtx ecx
= gen_rtx_REG (SImode
, CX_REG
);
9702 /* There is no "pascal" calling convention in any 64bit ABI. */
9703 gcc_assert (!TARGET_64BIT
);
9705 insn
= emit_insn (gen_pop (ecx
));
9706 m
->fs
.cfa_offset
-= UNITS_PER_WORD
;
9707 m
->fs
.sp_offset
-= UNITS_PER_WORD
;
9709 rtx x
= plus_constant (Pmode
, stack_pointer_rtx
, UNITS_PER_WORD
);
9710 x
= gen_rtx_SET (stack_pointer_rtx
, x
);
9711 add_reg_note (insn
, REG_CFA_ADJUST_CFA
, x
);
9712 add_reg_note (insn
, REG_CFA_REGISTER
, gen_rtx_SET (ecx
, pc_rtx
));
9713 RTX_FRAME_RELATED_P (insn
) = 1;
9715 pro_epilogue_adjust_stack (stack_pointer_rtx
, stack_pointer_rtx
,
9717 emit_jump_insn (gen_simple_return_indirect_internal (ecx
));
9720 emit_jump_insn (gen_simple_return_pop_internal (popc
));
9722 else if (!m
->call_ms2sysv
|| !restore_stub_is_tail
)
9724 /* In case of return from EH a simple return cannot be used
9725 as a return address will be compared with a shadow stack
9726 return address. Use indirect jump instead. */
9727 if (style
== 2 && flag_cf_protection
)
9729 /* Register used in indirect jump must be in word_mode. But
9730 Pmode may not be the same as word_mode for x32. */
9731 rtx ecx
= gen_rtx_REG (word_mode
, CX_REG
);
9734 insn
= emit_insn (gen_pop (ecx
));
9735 m
->fs
.cfa_offset
-= UNITS_PER_WORD
;
9736 m
->fs
.sp_offset
-= UNITS_PER_WORD
;
9738 rtx x
= plus_constant (Pmode
, stack_pointer_rtx
, UNITS_PER_WORD
);
9739 x
= gen_rtx_SET (stack_pointer_rtx
, x
);
9740 add_reg_note (insn
, REG_CFA_ADJUST_CFA
, x
);
9741 add_reg_note (insn
, REG_CFA_REGISTER
, gen_rtx_SET (ecx
, pc_rtx
));
9742 RTX_FRAME_RELATED_P (insn
) = 1;
9744 emit_jump_insn (gen_simple_return_indirect_internal (ecx
));
9747 emit_jump_insn (gen_simple_return_internal ());
9750 /* Restore the state back to the state from the prologue,
9751 so that it's correct for the next epilogue. */
9752 m
->fs
= frame_state_save
;
9755 /* Reset from the function's potential modifications. */
9758 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED
)
9760 if (pic_offset_table_rtx
9761 && !ix86_use_pseudo_pic_reg ())
9762 SET_REGNO (pic_offset_table_rtx
, REAL_PIC_OFFSET_TABLE_REGNUM
);
9766 rtx_insn
*insn
= get_last_insn ();
9767 rtx_insn
*deleted_debug_label
= NULL
;
9769 /* Mach-O doesn't support labels at the end of objects, so if
9770 it looks like we might want one, take special action.
9771 First, collect any sequence of deleted debug labels. */
9774 && NOTE_KIND (insn
) != NOTE_INSN_DELETED_LABEL
)
9776 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
9777 notes only, instead set their CODE_LABEL_NUMBER to -1,
9778 otherwise there would be code generation differences
9779 in between -g and -g0. */
9780 if (NOTE_P (insn
) && NOTE_KIND (insn
)
9781 == NOTE_INSN_DELETED_DEBUG_LABEL
)
9782 deleted_debug_label
= insn
;
9783 insn
= PREV_INSN (insn
);
9789 then this needs to be detected, so skip past the barrier. */
9791 if (insn
&& BARRIER_P (insn
))
9792 insn
= PREV_INSN (insn
);
9794 /* Up to now we've only seen notes or barriers. */
9799 && NOTE_KIND (insn
) == NOTE_INSN_DELETED_LABEL
))
9800 /* Trailing label. */
9801 fputs ("\tnop\n", file
);
9802 else if (cfun
&& ! cfun
->is_thunk
)
9804 /* See if we have a completely empty function body, skipping
9805 the special case of the picbase thunk emitted as asm. */
9806 while (insn
&& ! INSN_P (insn
))
9807 insn
= PREV_INSN (insn
);
9808 /* If we don't find any insns, we've got an empty function body;
9809 I.e. completely empty - without a return or branch. This is
9810 taken as the case where a function body has been removed
9811 because it contains an inline __builtin_unreachable(). GCC
9812 declares that reaching __builtin_unreachable() means UB so
9813 we're not obliged to do anything special; however, we want
9814 non-zero-sized function bodies. To meet this, and help the
9815 user out, let's trap the case. */
9817 fputs ("\tud2\n", file
);
9820 else if (deleted_debug_label
)
9821 for (insn
= deleted_debug_label
; insn
; insn
= NEXT_INSN (insn
))
9822 if (NOTE_KIND (insn
) == NOTE_INSN_DELETED_DEBUG_LABEL
)
9823 CODE_LABEL_NUMBER (insn
) = -1;
9827 /* Implement TARGET_ASM_PRINT_PATCHABLE_FUNCTION_ENTRY. */
9830 ix86_print_patchable_function_entry (FILE *file
,
9831 unsigned HOST_WIDE_INT patch_area_size
,
9834 if (cfun
->machine
->function_label_emitted
)
9836 /* NB: When ix86_print_patchable_function_entry is called after
9837 function table has been emitted, we have inserted or queued
9838 a pseudo UNSPECV_PATCHABLE_AREA instruction at the proper
9839 place. There is nothing to do here. */
9843 default_print_patchable_function_entry (file
, patch_area_size
,
9847 /* Output patchable area. NB: default_print_patchable_function_entry
9848 isn't available in i386.md. */
9851 ix86_output_patchable_area (unsigned int patch_area_size
,
9854 default_print_patchable_function_entry (asm_out_file
,
9859 /* Return a scratch register to use in the split stack prologue. The
9860 split stack prologue is used for -fsplit-stack. It is the first
9861 instructions in the function, even before the regular prologue.
9862 The scratch register can be any caller-saved register which is not
9863 used for parameters or for the static chain. */
9866 split_stack_prologue_scratch_regno (void)
9872 bool is_fastcall
, is_thiscall
;
9875 is_fastcall
= (lookup_attribute ("fastcall",
9876 TYPE_ATTRIBUTES (TREE_TYPE (cfun
->decl
)))
9878 is_thiscall
= (lookup_attribute ("thiscall",
9879 TYPE_ATTRIBUTES (TREE_TYPE (cfun
->decl
)))
9881 regparm
= ix86_function_regparm (TREE_TYPE (cfun
->decl
), cfun
->decl
);
9885 if (DECL_STATIC_CHAIN (cfun
->decl
))
9887 sorry ("%<-fsplit-stack%> does not support fastcall with "
9889 return INVALID_REGNUM
;
9893 else if (is_thiscall
)
9895 if (!DECL_STATIC_CHAIN (cfun
->decl
))
9899 else if (regparm
< 3)
9901 if (!DECL_STATIC_CHAIN (cfun
->decl
))
9907 sorry ("%<-fsplit-stack%> does not support 2 register "
9908 "parameters for a nested function");
9909 return INVALID_REGNUM
;
9916 /* FIXME: We could make this work by pushing a register
9917 around the addition and comparison. */
9918 sorry ("%<-fsplit-stack%> does not support 3 register parameters");
9919 return INVALID_REGNUM
;
9924 /* A SYMBOL_REF for the function which allocates new stackspace for
9927 static GTY(()) rtx split_stack_fn
;
9929 /* A SYMBOL_REF for the more stack function when using the large
9932 static GTY(()) rtx split_stack_fn_large
;
9934 /* Return location of the stack guard value in the TLS block. */
9937 ix86_split_stack_guard (void)
9940 addr_space_t as
= DEFAULT_TLS_SEG_REG
;
9943 gcc_assert (flag_split_stack
);
9945 #ifdef TARGET_THREAD_SPLIT_STACK_OFFSET
9946 offset
= TARGET_THREAD_SPLIT_STACK_OFFSET
;
9951 r
= GEN_INT (offset
);
9952 r
= gen_const_mem (Pmode
, r
);
9953 set_mem_addr_space (r
, as
);
9958 /* Handle -fsplit-stack. These are the first instructions in the
9959 function, even before the regular prologue. */
9962 ix86_expand_split_stack_prologue (void)
9964 HOST_WIDE_INT allocate
;
9965 unsigned HOST_WIDE_INT args_size
;
9966 rtx_code_label
*label
;
9967 rtx limit
, current
, allocate_rtx
, call_fusage
;
9968 rtx_insn
*call_insn
;
9969 rtx scratch_reg
= NULL_RTX
;
9970 rtx_code_label
*varargs_label
= NULL
;
9973 gcc_assert (flag_split_stack
&& reload_completed
);
9975 ix86_finalize_stack_frame_flags ();
9976 struct ix86_frame
&frame
= cfun
->machine
->frame
;
9977 allocate
= frame
.stack_pointer_offset
- INCOMING_FRAME_SP_OFFSET
;
9979 /* This is the label we will branch to if we have enough stack
9980 space. We expect the basic block reordering pass to reverse this
9981 branch if optimizing, so that we branch in the unlikely case. */
9982 label
= gen_label_rtx ();
9984 /* We need to compare the stack pointer minus the frame size with
9985 the stack boundary in the TCB. The stack boundary always gives
9986 us SPLIT_STACK_AVAILABLE bytes, so if we need less than that we
9987 can compare directly. Otherwise we need to do an addition. */
9989 limit
= ix86_split_stack_guard ();
9991 if (allocate
< SPLIT_STACK_AVAILABLE
)
9992 current
= stack_pointer_rtx
;
9995 unsigned int scratch_regno
;
9998 /* We need a scratch register to hold the stack pointer minus
9999 the required frame size. Since this is the very start of the
10000 function, the scratch register can be any caller-saved
10001 register which is not used for parameters. */
10002 offset
= GEN_INT (- allocate
);
10003 scratch_regno
= split_stack_prologue_scratch_regno ();
10004 if (scratch_regno
== INVALID_REGNUM
)
10006 scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
10007 if (!TARGET_64BIT
|| x86_64_immediate_operand (offset
, Pmode
))
10009 /* We don't use gen_add in this case because it will
10010 want to split to lea, but when not optimizing the insn
10011 will not be split after this point. */
10012 emit_insn (gen_rtx_SET (scratch_reg
,
10013 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
10018 emit_move_insn (scratch_reg
, offset
);
10019 emit_insn (gen_add2_insn (scratch_reg
, stack_pointer_rtx
));
10021 current
= scratch_reg
;
10024 ix86_expand_branch (GEU
, current
, limit
, label
);
10025 rtx_insn
*jump_insn
= get_last_insn ();
10026 JUMP_LABEL (jump_insn
) = label
;
10028 /* Mark the jump as very likely to be taken. */
10029 add_reg_br_prob_note (jump_insn
, profile_probability::very_likely ());
10031 if (split_stack_fn
== NULL_RTX
)
10033 split_stack_fn
= gen_rtx_SYMBOL_REF (Pmode
, "__morestack");
10034 SYMBOL_REF_FLAGS (split_stack_fn
) |= SYMBOL_FLAG_LOCAL
;
10036 fn
= split_stack_fn
;
10038 /* Get more stack space. We pass in the desired stack space and the
10039 size of the arguments to copy to the new stack. In 32-bit mode
10040 we push the parameters; __morestack will return on a new stack
10041 anyhow. In 64-bit mode we pass the parameters in r10 and
10043 allocate_rtx
= GEN_INT (allocate
);
10044 args_size
= crtl
->args
.size
>= 0 ? (HOST_WIDE_INT
) crtl
->args
.size
: 0;
10045 call_fusage
= NULL_RTX
;
10046 rtx pop
= NULL_RTX
;
10051 reg10
= gen_rtx_REG (Pmode
, R10_REG
);
10052 reg11
= gen_rtx_REG (Pmode
, R11_REG
);
10054 /* If this function uses a static chain, it will be in %r10.
10055 Preserve it across the call to __morestack. */
10056 if (DECL_STATIC_CHAIN (cfun
->decl
))
10060 rax
= gen_rtx_REG (word_mode
, AX_REG
);
10061 emit_move_insn (rax
, gen_rtx_REG (word_mode
, R10_REG
));
10062 use_reg (&call_fusage
, rax
);
10065 if ((ix86_cmodel
== CM_LARGE
|| ix86_cmodel
== CM_LARGE_PIC
)
10068 HOST_WIDE_INT argval
;
10070 gcc_assert (Pmode
== DImode
);
10071 /* When using the large model we need to load the address
10072 into a register, and we've run out of registers. So we
10073 switch to a different calling convention, and we call a
10074 different function: __morestack_large. We pass the
10075 argument size in the upper 32 bits of r10 and pass the
10076 frame size in the lower 32 bits. */
10077 gcc_assert ((allocate
& HOST_WIDE_INT_C (0xffffffff)) == allocate
);
10078 gcc_assert ((args_size
& 0xffffffff) == args_size
);
10080 if (split_stack_fn_large
== NULL_RTX
)
10082 split_stack_fn_large
10083 = gen_rtx_SYMBOL_REF (Pmode
, "__morestack_large_model");
10084 SYMBOL_REF_FLAGS (split_stack_fn_large
) |= SYMBOL_FLAG_LOCAL
;
10086 if (ix86_cmodel
== CM_LARGE_PIC
)
10088 rtx_code_label
*label
;
10091 label
= gen_label_rtx ();
10092 emit_label (label
);
10093 LABEL_PRESERVE_P (label
) = 1;
10094 emit_insn (gen_set_rip_rex64 (reg10
, label
));
10095 emit_insn (gen_set_got_offset_rex64 (reg11
, label
));
10096 emit_insn (gen_add2_insn (reg10
, reg11
));
10097 x
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, split_stack_fn_large
),
10099 x
= gen_rtx_CONST (Pmode
, x
);
10100 emit_move_insn (reg11
, x
);
10101 x
= gen_rtx_PLUS (Pmode
, reg10
, reg11
);
10102 x
= gen_const_mem (Pmode
, x
);
10103 emit_move_insn (reg11
, x
);
10106 emit_move_insn (reg11
, split_stack_fn_large
);
10110 argval
= ((args_size
<< 16) << 16) + allocate
;
10111 emit_move_insn (reg10
, GEN_INT (argval
));
10115 emit_move_insn (reg10
, allocate_rtx
);
10116 emit_move_insn (reg11
, GEN_INT (args_size
));
10117 use_reg (&call_fusage
, reg11
);
10120 use_reg (&call_fusage
, reg10
);
10124 rtx_insn
*insn
= emit_insn (gen_push (GEN_INT (args_size
)));
10125 add_reg_note (insn
, REG_ARGS_SIZE
, GEN_INT (UNITS_PER_WORD
));
10126 insn
= emit_insn (gen_push (allocate_rtx
));
10127 add_reg_note (insn
, REG_ARGS_SIZE
, GEN_INT (2 * UNITS_PER_WORD
));
10128 pop
= GEN_INT (2 * UNITS_PER_WORD
);
10130 call_insn
= ix86_expand_call (NULL_RTX
, gen_rtx_MEM (QImode
, fn
),
10131 GEN_INT (UNITS_PER_WORD
), constm1_rtx
,
10133 add_function_usage_to (call_insn
, call_fusage
);
10135 add_reg_note (call_insn
, REG_ARGS_SIZE
, GEN_INT (0));
10136 /* Indicate that this function can't jump to non-local gotos. */
10137 make_reg_eh_region_note_nothrow_nononlocal (call_insn
);
10139 /* In order to make call/return prediction work right, we now need
10140 to execute a return instruction. See
10141 libgcc/config/i386/morestack.S for the details on how this works.
10143 For flow purposes gcc must not see this as a return
10144 instruction--we need control flow to continue at the subsequent
10145 label. Therefore, we use an unspec. */
10146 gcc_assert (crtl
->args
.pops_args
< 65536);
10148 = emit_insn (gen_split_stack_return (GEN_INT (crtl
->args
.pops_args
)));
10150 if ((flag_cf_protection
& CF_BRANCH
))
10152 /* Insert ENDBR since __morestack will jump back here via indirect
10154 rtx cet_eb
= gen_nop_endbr ();
10155 emit_insn_after (cet_eb
, ret_insn
);
10158 /* If we are in 64-bit mode and this function uses a static chain,
10159 we saved %r10 in %rax before calling _morestack. */
10160 if (TARGET_64BIT
&& DECL_STATIC_CHAIN (cfun
->decl
))
10161 emit_move_insn (gen_rtx_REG (word_mode
, R10_REG
),
10162 gen_rtx_REG (word_mode
, AX_REG
));
10164 /* If this function calls va_start, we need to store a pointer to
10165 the arguments on the old stack, because they may not have been
10166 all copied to the new stack. At this point the old stack can be
10167 found at the frame pointer value used by __morestack, because
10168 __morestack has set that up before calling back to us. Here we
10169 store that pointer in a scratch register, and in
10170 ix86_expand_prologue we store the scratch register in a stack
10172 if (cfun
->machine
->split_stack_varargs_pointer
!= NULL_RTX
)
10174 unsigned int scratch_regno
;
10178 scratch_regno
= split_stack_prologue_scratch_regno ();
10179 scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
10180 frame_reg
= gen_rtx_REG (Pmode
, BP_REG
);
10184 return address within this function
10185 return address of caller of this function
10187 So we add three words to get to the stack arguments.
10191 return address within this function
10192 first argument to __morestack
10193 second argument to __morestack
10194 return address of caller of this function
10196 So we add five words to get to the stack arguments.
10198 words
= TARGET_64BIT
? 3 : 5;
10199 emit_insn (gen_rtx_SET (scratch_reg
,
10200 plus_constant (Pmode
, frame_reg
,
10201 words
* UNITS_PER_WORD
)));
10203 varargs_label
= gen_label_rtx ();
10204 emit_jump_insn (gen_jump (varargs_label
));
10205 JUMP_LABEL (get_last_insn ()) = varargs_label
;
10210 emit_label (label
);
10211 LABEL_NUSES (label
) = 1;
10213 /* If this function calls va_start, we now have to set the scratch
10214 register for the case where we do not call __morestack. In this
10215 case we need to set it based on the stack pointer. */
10216 if (cfun
->machine
->split_stack_varargs_pointer
!= NULL_RTX
)
10218 emit_insn (gen_rtx_SET (scratch_reg
,
10219 plus_constant (Pmode
, stack_pointer_rtx
,
10222 emit_label (varargs_label
);
10223 LABEL_NUSES (varargs_label
) = 1;
10227 /* We may have to tell the dataflow pass that the split stack prologue
10228 is initializing a scratch register. */
10231 ix86_live_on_entry (bitmap regs
)
10233 if (cfun
->machine
->split_stack_varargs_pointer
!= NULL_RTX
)
10235 gcc_assert (flag_split_stack
);
10236 bitmap_set_bit (regs
, split_stack_prologue_scratch_regno ());
10240 /* Extract the parts of an RTL expression that is a valid memory address
10241 for an instruction. Return false if the structure of the address is
10245 ix86_decompose_address (rtx addr
, struct ix86_address
*out
)
10247 rtx base
= NULL_RTX
, index
= NULL_RTX
, disp
= NULL_RTX
;
10248 rtx base_reg
, index_reg
;
10249 HOST_WIDE_INT scale
= 1;
10250 rtx scale_rtx
= NULL_RTX
;
10252 addr_space_t seg
= ADDR_SPACE_GENERIC
;
10254 /* Allow zero-extended SImode addresses,
10255 they will be emitted with addr32 prefix. */
10256 if (TARGET_64BIT
&& GET_MODE (addr
) == DImode
)
10258 if (GET_CODE (addr
) == ZERO_EXTEND
10259 && GET_MODE (XEXP (addr
, 0)) == SImode
)
10261 addr
= XEXP (addr
, 0);
10262 if (CONST_INT_P (addr
))
10265 else if (GET_CODE (addr
) == AND
10266 && const_32bit_mask (XEXP (addr
, 1), DImode
))
10268 addr
= lowpart_subreg (SImode
, XEXP (addr
, 0), DImode
);
10269 if (addr
== NULL_RTX
)
10272 if (CONST_INT_P (addr
))
10275 else if (GET_CODE (addr
) == AND
)
10277 /* For ASHIFT inside AND, combine will not generate
10278 canonical zero-extend. Merge mask for AND and shift_count
10279 to check if it is canonical zero-extend. */
10280 tmp
= XEXP (addr
, 0);
10281 rtx mask
= XEXP (addr
, 1);
10282 if (tmp
&& GET_CODE(tmp
) == ASHIFT
)
10284 rtx shift_val
= XEXP (tmp
, 1);
10285 if (CONST_INT_P (mask
) && CONST_INT_P (shift_val
)
10286 && (((unsigned HOST_WIDE_INT
) INTVAL(mask
)
10287 | ((HOST_WIDE_INT_1U
<< INTVAL(shift_val
)) - 1))
10290 addr
= lowpart_subreg (SImode
, XEXP (addr
, 0),
10298 /* Allow SImode subregs of DImode addresses,
10299 they will be emitted with addr32 prefix. */
10300 if (TARGET_64BIT
&& GET_MODE (addr
) == SImode
)
10302 if (SUBREG_P (addr
)
10303 && GET_MODE (SUBREG_REG (addr
)) == DImode
)
10305 addr
= SUBREG_REG (addr
);
10306 if (CONST_INT_P (addr
))
10313 else if (SUBREG_P (addr
))
10315 if (REG_P (SUBREG_REG (addr
)))
10320 else if (GET_CODE (addr
) == PLUS
)
10322 rtx addends
[4], op
;
10330 addends
[n
++] = XEXP (op
, 1);
10333 while (GET_CODE (op
) == PLUS
);
10338 for (i
= n
; i
>= 0; --i
)
10341 switch (GET_CODE (op
))
10346 index
= XEXP (op
, 0);
10347 scale_rtx
= XEXP (op
, 1);
10353 index
= XEXP (op
, 0);
10354 tmp
= XEXP (op
, 1);
10355 if (!CONST_INT_P (tmp
))
10357 scale
= INTVAL (tmp
);
10358 if ((unsigned HOST_WIDE_INT
) scale
> 3)
10360 scale
= 1 << scale
;
10365 if (GET_CODE (op
) != UNSPEC
)
10370 if (XINT (op
, 1) == UNSPEC_TP
10371 && TARGET_TLS_DIRECT_SEG_REFS
10372 && seg
== ADDR_SPACE_GENERIC
)
10373 seg
= DEFAULT_TLS_SEG_REG
;
10379 if (!REG_P (SUBREG_REG (op
)))
10406 else if (GET_CODE (addr
) == MULT
)
10408 index
= XEXP (addr
, 0); /* index*scale */
10409 scale_rtx
= XEXP (addr
, 1);
10411 else if (GET_CODE (addr
) == ASHIFT
)
10413 /* We're called for lea too, which implements ashift on occasion. */
10414 index
= XEXP (addr
, 0);
10415 tmp
= XEXP (addr
, 1);
10416 if (!CONST_INT_P (tmp
))
10418 scale
= INTVAL (tmp
);
10419 if ((unsigned HOST_WIDE_INT
) scale
> 3)
10421 scale
= 1 << scale
;
10424 disp
= addr
; /* displacement */
10430 else if (SUBREG_P (index
)
10431 && REG_P (SUBREG_REG (index
)))
10437 /* Extract the integral value of scale. */
10440 if (!CONST_INT_P (scale_rtx
))
10442 scale
= INTVAL (scale_rtx
);
10445 base_reg
= base
&& SUBREG_P (base
) ? SUBREG_REG (base
) : base
;
10446 index_reg
= index
&& SUBREG_P (index
) ? SUBREG_REG (index
) : index
;
10448 /* Avoid useless 0 displacement. */
10449 if (disp
== const0_rtx
&& (base
|| index
))
10452 /* Allow arg pointer and stack pointer as index if there is not scaling. */
10453 if (base_reg
&& index_reg
&& scale
== 1
10454 && (REGNO (index_reg
) == ARG_POINTER_REGNUM
10455 || REGNO (index_reg
) == FRAME_POINTER_REGNUM
10456 || REGNO (index_reg
) == SP_REG
))
10458 std::swap (base
, index
);
10459 std::swap (base_reg
, index_reg
);
10462 /* Special case: %ebp cannot be encoded as a base without a displacement.
10464 if (!disp
&& base_reg
10465 && (REGNO (base_reg
) == ARG_POINTER_REGNUM
10466 || REGNO (base_reg
) == FRAME_POINTER_REGNUM
10467 || REGNO (base_reg
) == BP_REG
10468 || REGNO (base_reg
) == R13_REG
))
10471 /* Special case: on K6, [%esi] makes the instruction vector decoded.
10472 Avoid this by transforming to [%esi+0].
10473 Reload calls address legitimization without cfun defined, so we need
10474 to test cfun for being non-NULL. */
10475 if (TARGET_CPU_P (K6
) && cfun
&& optimize_function_for_speed_p (cfun
)
10476 && base_reg
&& !index_reg
&& !disp
10477 && REGNO (base_reg
) == SI_REG
)
10480 /* Special case: encode reg+reg instead of reg*2. */
10481 if (!base
&& index
&& scale
== 2)
10482 base
= index
, base_reg
= index_reg
, scale
= 1;
10484 /* Special case: scaling cannot be encoded without base or displacement. */
10485 if (!base
&& !disp
&& index
&& scale
!= 1)
10489 out
->index
= index
;
10491 out
->scale
= scale
;
10497 /* Return cost of the memory address x.
10498 For i386, it is better to use a complex address than let gcc copy
10499 the address into a reg and make a new pseudo. But not if the address
10500 requires to two regs - that would mean more pseudos with longer
10503 ix86_address_cost (rtx x
, machine_mode
, addr_space_t
, bool)
10505 struct ix86_address parts
;
10507 int ok
= ix86_decompose_address (x
, &parts
);
10511 if (parts
.base
&& SUBREG_P (parts
.base
))
10512 parts
.base
= SUBREG_REG (parts
.base
);
10513 if (parts
.index
&& SUBREG_P (parts
.index
))
10514 parts
.index
= SUBREG_REG (parts
.index
);
10516 /* Attempt to minimize number of registers in the address by increasing
10517 address cost for each used register. We don't increase address cost
10518 for "pic_offset_table_rtx". When a memopt with "pic_offset_table_rtx"
10519 is not invariant itself it most likely means that base or index is not
10520 invariant. Therefore only "pic_offset_table_rtx" could be hoisted out,
10521 which is not profitable for x86. */
10523 && (!REG_P (parts
.base
) || REGNO (parts
.base
) >= FIRST_PSEUDO_REGISTER
)
10524 && (current_pass
->type
== GIMPLE_PASS
10525 || !pic_offset_table_rtx
10526 || !REG_P (parts
.base
)
10527 || REGNO (pic_offset_table_rtx
) != REGNO (parts
.base
)))
10531 && (!REG_P (parts
.index
) || REGNO (parts
.index
) >= FIRST_PSEUDO_REGISTER
)
10532 && (current_pass
->type
== GIMPLE_PASS
10533 || !pic_offset_table_rtx
10534 || !REG_P (parts
.index
)
10535 || REGNO (pic_offset_table_rtx
) != REGNO (parts
.index
)))
10538 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
10539 since it's predecode logic can't detect the length of instructions
10540 and it degenerates to vector decoded. Increase cost of such
10541 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
10542 to split such addresses or even refuse such addresses at all.
10544 Following addressing modes are affected:
10549 The first and last case may be avoidable by explicitly coding the zero in
10550 memory address, but I don't have AMD-K6 machine handy to check this
10553 if (TARGET_CPU_P (K6
)
10554 && ((!parts
.disp
&& parts
.base
&& parts
.index
&& parts
.scale
!= 1)
10555 || (parts
.disp
&& !parts
.base
&& parts
.index
&& parts
.scale
!= 1)
10556 || (!parts
.disp
&& parts
.base
&& parts
.index
&& parts
.scale
== 1)))
10562 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
10563 this is used for to form addresses to local data when -fPIC is in
10567 darwin_local_data_pic (rtx disp
)
10569 return (GET_CODE (disp
) == UNSPEC
10570 && XINT (disp
, 1) == UNSPEC_MACHOPIC_OFFSET
);
10573 /* True if the function symbol operand X should be loaded from GOT.
10574 If CALL_P is true, X is a call operand.
10576 NB: -mno-direct-extern-access doesn't force load from GOT for
10579 NB: In 32-bit mode, only non-PIC is allowed in inline assembly
10580 statements, since a PIC register could not be available at the
10584 ix86_force_load_from_GOT_p (rtx x
, bool call_p
)
10586 return ((TARGET_64BIT
|| (!flag_pic
&& HAVE_AS_IX86_GOT32X
))
10587 && !TARGET_PECOFF
&& !TARGET_MACHO
10588 && (!flag_pic
|| this_is_asm_operands
)
10589 && ix86_cmodel
!= CM_LARGE
10590 && ix86_cmodel
!= CM_LARGE_PIC
10591 && GET_CODE (x
) == SYMBOL_REF
10593 && (!ix86_direct_extern_access
10594 || (SYMBOL_REF_DECL (x
)
10595 && lookup_attribute ("nodirect_extern_access",
10596 DECL_ATTRIBUTES (SYMBOL_REF_DECL (x
))))))
10597 || (SYMBOL_REF_FUNCTION_P (x
)
10599 || (SYMBOL_REF_DECL (x
)
10600 && lookup_attribute ("noplt",
10601 DECL_ATTRIBUTES (SYMBOL_REF_DECL (x
)))))))
10602 && !SYMBOL_REF_LOCAL_P (x
));
10605 /* Determine if a given RTX is a valid constant. We already know this
10606 satisfies CONSTANT_P. */
10609 ix86_legitimate_constant_p (machine_mode mode
, rtx x
)
10611 switch (GET_CODE (x
))
10616 if (GET_CODE (x
) == PLUS
)
10618 if (!CONST_INT_P (XEXP (x
, 1)))
10623 if (TARGET_MACHO
&& darwin_local_data_pic (x
))
10626 /* Only some unspecs are valid as "constants". */
10627 if (GET_CODE (x
) == UNSPEC
)
10628 switch (XINT (x
, 1))
10631 case UNSPEC_GOTOFF
:
10632 case UNSPEC_PLTOFF
:
10633 return TARGET_64BIT
;
10635 case UNSPEC_NTPOFF
:
10636 x
= XVECEXP (x
, 0, 0);
10637 return (GET_CODE (x
) == SYMBOL_REF
10638 && SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_EXEC
);
10639 case UNSPEC_DTPOFF
:
10640 x
= XVECEXP (x
, 0, 0);
10641 return (GET_CODE (x
) == SYMBOL_REF
10642 && SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_DYNAMIC
);
10647 /* We must have drilled down to a symbol. */
10648 if (GET_CODE (x
) == LABEL_REF
)
10650 if (GET_CODE (x
) != SYMBOL_REF
)
10655 /* TLS symbols are never valid. */
10656 if (SYMBOL_REF_TLS_MODEL (x
))
10659 /* DLLIMPORT symbols are never valid. */
10660 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
10661 && SYMBOL_REF_DLLIMPORT_P (x
))
10665 /* mdynamic-no-pic */
10666 if (MACHO_DYNAMIC_NO_PIC_P
)
10667 return machopic_symbol_defined_p (x
);
10670 /* External function address should be loaded
10671 via the GOT slot to avoid PLT. */
10672 if (ix86_force_load_from_GOT_p (x
))
10677 CASE_CONST_SCALAR_INT
:
10678 if (ix86_endbr_immediate_operand (x
, VOIDmode
))
10689 if (!standard_sse_constant_p (x
, mode
)
10690 && GET_MODE_SIZE (TARGET_AVX512F
10695 ? TImode
: DImode
))) < GET_MODE_SIZE (mode
))
10703 if (!standard_sse_constant_p (x
, mode
))
10708 if (mode
== E_BFmode
)
10715 /* Otherwise we handle everything else in the move patterns. */
10719 /* Determine if it's legal to put X into the constant pool. This
10720 is not possible for the address of thread-local symbols, which
10721 is checked above. */
10724 ix86_cannot_force_const_mem (machine_mode mode
, rtx x
)
10726 /* We can put any immediate constant in memory. */
10727 switch (GET_CODE (x
))
10736 return !ix86_legitimate_constant_p (mode
, x
);
10739 /* Nonzero if the symbol is marked as dllimport, or as stub-variable,
10743 is_imported_p (rtx x
)
10745 if (!TARGET_DLLIMPORT_DECL_ATTRIBUTES
10746 || GET_CODE (x
) != SYMBOL_REF
)
10749 return SYMBOL_REF_DLLIMPORT_P (x
) || SYMBOL_REF_STUBVAR_P (x
);
10753 /* Nonzero if the constant value X is a legitimate general operand
10754 when generating PIC code. It is given that flag_pic is on and
10755 that X satisfies CONSTANT_P. */
10758 legitimate_pic_operand_p (rtx x
)
10762 switch (GET_CODE (x
))
10765 inner
= XEXP (x
, 0);
10766 if (GET_CODE (inner
) == PLUS
10767 && CONST_INT_P (XEXP (inner
, 1)))
10768 inner
= XEXP (inner
, 0);
10770 /* Only some unspecs are valid as "constants". */
10771 if (GET_CODE (inner
) == UNSPEC
)
10772 switch (XINT (inner
, 1))
10775 case UNSPEC_GOTOFF
:
10776 case UNSPEC_PLTOFF
:
10777 return TARGET_64BIT
;
10779 x
= XVECEXP (inner
, 0, 0);
10780 return (GET_CODE (x
) == SYMBOL_REF
10781 && SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_EXEC
);
10782 case UNSPEC_MACHOPIC_OFFSET
:
10783 return legitimate_pic_address_disp_p (x
);
10791 return legitimate_pic_address_disp_p (x
);
10798 /* Determine if a given CONST RTX is a valid memory displacement
10802 legitimate_pic_address_disp_p (rtx disp
)
10806 /* In 64bit mode we can allow direct addresses of symbols and labels
10807 when they are not dynamic symbols. */
10810 rtx op0
= disp
, op1
;
10812 switch (GET_CODE (disp
))
10818 if (GET_CODE (XEXP (disp
, 0)) != PLUS
)
10820 op0
= XEXP (XEXP (disp
, 0), 0);
10821 op1
= XEXP (XEXP (disp
, 0), 1);
10822 if (!CONST_INT_P (op1
))
10824 if (GET_CODE (op0
) == UNSPEC
10825 && (XINT (op0
, 1) == UNSPEC_DTPOFF
10826 || XINT (op0
, 1) == UNSPEC_NTPOFF
)
10827 && trunc_int_for_mode (INTVAL (op1
), SImode
) == INTVAL (op1
))
10829 if (INTVAL (op1
) >= 16*1024*1024
10830 || INTVAL (op1
) < -16*1024*1024)
10832 if (GET_CODE (op0
) == LABEL_REF
)
10834 if (GET_CODE (op0
) == CONST
10835 && GET_CODE (XEXP (op0
, 0)) == UNSPEC
10836 && XINT (XEXP (op0
, 0), 1) == UNSPEC_PCREL
)
10838 if (GET_CODE (op0
) == UNSPEC
10839 && XINT (op0
, 1) == UNSPEC_PCREL
)
10841 if (GET_CODE (op0
) != SYMBOL_REF
)
10846 /* TLS references should always be enclosed in UNSPEC.
10847 The dllimported symbol needs always to be resolved. */
10848 if (SYMBOL_REF_TLS_MODEL (op0
)
10849 || (TARGET_DLLIMPORT_DECL_ATTRIBUTES
&& SYMBOL_REF_DLLIMPORT_P (op0
)))
10854 if (is_imported_p (op0
))
10857 if (SYMBOL_REF_FAR_ADDR_P (op0
) || !SYMBOL_REF_LOCAL_P (op0
))
10860 /* Non-external-weak function symbols need to be resolved only
10861 for the large model. Non-external symbols don't need to be
10862 resolved for large and medium models. For the small model,
10863 we don't need to resolve anything here. */
10864 if ((ix86_cmodel
!= CM_LARGE_PIC
10865 && SYMBOL_REF_FUNCTION_P (op0
)
10866 && !(SYMBOL_REF_EXTERNAL_P (op0
) && SYMBOL_REF_WEAK (op0
)))
10867 || !SYMBOL_REF_EXTERNAL_P (op0
)
10868 || ix86_cmodel
== CM_SMALL_PIC
)
10871 else if (!SYMBOL_REF_FAR_ADDR_P (op0
)
10872 && (SYMBOL_REF_LOCAL_P (op0
)
10873 || ((ix86_direct_extern_access
10874 && !(SYMBOL_REF_DECL (op0
)
10875 && lookup_attribute ("nodirect_extern_access",
10876 DECL_ATTRIBUTES (SYMBOL_REF_DECL (op0
)))))
10877 && HAVE_LD_PIE_COPYRELOC
10879 && !SYMBOL_REF_WEAK (op0
)
10880 && !SYMBOL_REF_FUNCTION_P (op0
)))
10881 && ix86_cmodel
!= CM_LARGE_PIC
)
10889 if (GET_CODE (disp
) != CONST
)
10891 disp
= XEXP (disp
, 0);
10895 /* We are unsafe to allow PLUS expressions. This limit allowed distance
10896 of GOT tables. We should not need these anyway. */
10897 if (GET_CODE (disp
) != UNSPEC
10898 || (XINT (disp
, 1) != UNSPEC_GOTPCREL
10899 && XINT (disp
, 1) != UNSPEC_GOTOFF
10900 && XINT (disp
, 1) != UNSPEC_PCREL
10901 && XINT (disp
, 1) != UNSPEC_PLTOFF
))
10904 if (GET_CODE (XVECEXP (disp
, 0, 0)) != SYMBOL_REF
10905 && GET_CODE (XVECEXP (disp
, 0, 0)) != LABEL_REF
)
10911 if (GET_CODE (disp
) == PLUS
)
10913 if (!CONST_INT_P (XEXP (disp
, 1)))
10915 disp
= XEXP (disp
, 0);
10919 if (TARGET_MACHO
&& darwin_local_data_pic (disp
))
10922 if (GET_CODE (disp
) != UNSPEC
)
10925 switch (XINT (disp
, 1))
10930 /* We need to check for both symbols and labels because VxWorks loads
10931 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
10933 return (GET_CODE (XVECEXP (disp
, 0, 0)) == SYMBOL_REF
10934 || GET_CODE (XVECEXP (disp
, 0, 0)) == LABEL_REF
);
10935 case UNSPEC_GOTOFF
:
10936 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
10937 While ABI specify also 32bit relocation but we don't produce it in
10938 small PIC model at all. */
10939 if ((GET_CODE (XVECEXP (disp
, 0, 0)) == SYMBOL_REF
10940 || GET_CODE (XVECEXP (disp
, 0, 0)) == LABEL_REF
)
10942 return !TARGET_PECOFF
&& gotoff_operand (XVECEXP (disp
, 0, 0), Pmode
);
10944 case UNSPEC_GOTTPOFF
:
10945 case UNSPEC_GOTNTPOFF
:
10946 case UNSPEC_INDNTPOFF
:
10949 disp
= XVECEXP (disp
, 0, 0);
10950 return (GET_CODE (disp
) == SYMBOL_REF
10951 && SYMBOL_REF_TLS_MODEL (disp
) == TLS_MODEL_INITIAL_EXEC
);
10952 case UNSPEC_NTPOFF
:
10953 disp
= XVECEXP (disp
, 0, 0);
10954 return (GET_CODE (disp
) == SYMBOL_REF
10955 && SYMBOL_REF_TLS_MODEL (disp
) == TLS_MODEL_LOCAL_EXEC
);
10956 case UNSPEC_DTPOFF
:
10957 disp
= XVECEXP (disp
, 0, 0);
10958 return (GET_CODE (disp
) == SYMBOL_REF
10959 && SYMBOL_REF_TLS_MODEL (disp
) == TLS_MODEL_LOCAL_DYNAMIC
);
10965 /* Determine if op is suitable RTX for an address register.
10966 Return naked register if a register or a register subreg is
10967 found, otherwise return NULL_RTX. */
10970 ix86_validate_address_register (rtx op
)
10972 machine_mode mode
= GET_MODE (op
);
10974 /* Only SImode or DImode registers can form the address. */
10975 if (mode
!= SImode
&& mode
!= DImode
)
10980 else if (SUBREG_P (op
))
10982 rtx reg
= SUBREG_REG (op
);
10987 mode
= GET_MODE (reg
);
10989 /* Don't allow SUBREGs that span more than a word. It can
10990 lead to spill failures when the register is one word out
10991 of a two word structure. */
10992 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
10995 /* Allow only SUBREGs of non-eliminable hard registers. */
10996 if (register_no_elim_operand (reg
, mode
))
11000 /* Op is not a register. */
11004 /* Recognizes RTL expressions that are valid memory addresses for an
11005 instruction. The MODE argument is the machine mode for the MEM
11006 expression that wants to use this address.
11008 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
11009 convert common non-canonical forms to canonical form so that they will
11013 ix86_legitimate_address_p (machine_mode
, rtx addr
, bool strict
)
11015 struct ix86_address parts
;
11016 rtx base
, index
, disp
;
11017 HOST_WIDE_INT scale
;
11020 if (ix86_decompose_address (addr
, &parts
) == 0)
11021 /* Decomposition failed. */
11025 index
= parts
.index
;
11027 scale
= parts
.scale
;
11030 /* Validate base register. */
11033 rtx reg
= ix86_validate_address_register (base
);
11035 if (reg
== NULL_RTX
)
11038 unsigned int regno
= REGNO (reg
);
11039 if ((strict
&& !REGNO_OK_FOR_BASE_P (regno
))
11040 || (!strict
&& !REGNO_OK_FOR_BASE_NONSTRICT_P (regno
)))
11041 /* Base is not valid. */
11045 /* Validate index register. */
11048 rtx reg
= ix86_validate_address_register (index
);
11050 if (reg
== NULL_RTX
)
11053 unsigned int regno
= REGNO (reg
);
11054 if ((strict
&& !REGNO_OK_FOR_INDEX_P (regno
))
11055 || (!strict
&& !REGNO_OK_FOR_INDEX_NONSTRICT_P (regno
)))
11056 /* Index is not valid. */
11060 /* Index and base should have the same mode. */
11062 && GET_MODE (base
) != GET_MODE (index
))
11065 /* Address override works only on the (%reg) part of %fs:(%reg). */
11066 if (seg
!= ADDR_SPACE_GENERIC
11067 && ((base
&& GET_MODE (base
) != word_mode
)
11068 || (index
&& GET_MODE (index
) != word_mode
)))
11071 /* Validate scale factor. */
11075 /* Scale without index. */
11078 if (scale
!= 2 && scale
!= 4 && scale
!= 8)
11079 /* Scale is not a valid multiplier. */
11083 /* Validate displacement. */
11086 if (ix86_endbr_immediate_operand (disp
, VOIDmode
))
11089 if (GET_CODE (disp
) == CONST
11090 && GET_CODE (XEXP (disp
, 0)) == UNSPEC
11091 && XINT (XEXP (disp
, 0), 1) != UNSPEC_MACHOPIC_OFFSET
)
11092 switch (XINT (XEXP (disp
, 0), 1))
11094 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit
11095 when used. While ABI specify also 32bit relocations, we
11096 don't produce them at all and use IP relative instead.
11097 Allow GOT in 32bit mode for both PIC and non-PIC if symbol
11098 should be loaded via GOT. */
11101 && ix86_force_load_from_GOT_p (XVECEXP (XEXP (disp
, 0), 0, 0)))
11102 goto is_legitimate_pic
;
11104 case UNSPEC_GOTOFF
:
11105 gcc_assert (flag_pic
);
11107 goto is_legitimate_pic
;
11109 /* 64bit address unspec. */
11112 case UNSPEC_GOTPCREL
:
11113 if (ix86_force_load_from_GOT_p (XVECEXP (XEXP (disp
, 0), 0, 0)))
11114 goto is_legitimate_pic
;
11117 gcc_assert (flag_pic
);
11118 goto is_legitimate_pic
;
11120 case UNSPEC_GOTTPOFF
:
11121 case UNSPEC_GOTNTPOFF
:
11122 case UNSPEC_INDNTPOFF
:
11123 case UNSPEC_NTPOFF
:
11124 case UNSPEC_DTPOFF
:
11128 /* Invalid address unspec. */
11132 else if (SYMBOLIC_CONST (disp
)
11135 || (MACHOPIC_INDIRECT
11136 && !machopic_operand_p (disp
))
11142 if (TARGET_64BIT
&& (index
|| base
))
11144 /* foo@dtpoff(%rX) is ok. */
11145 if (GET_CODE (disp
) != CONST
11146 || GET_CODE (XEXP (disp
, 0)) != PLUS
11147 || GET_CODE (XEXP (XEXP (disp
, 0), 0)) != UNSPEC
11148 || !CONST_INT_P (XEXP (XEXP (disp
, 0), 1))
11149 || (XINT (XEXP (XEXP (disp
, 0), 0), 1) != UNSPEC_DTPOFF
11150 && XINT (XEXP (XEXP (disp
, 0), 0), 1) != UNSPEC_NTPOFF
))
11151 /* Non-constant pic memory reference. */
11154 else if ((!TARGET_MACHO
|| flag_pic
)
11155 && ! legitimate_pic_address_disp_p (disp
))
11156 /* Displacement is an invalid pic construct. */
11159 else if (MACHO_DYNAMIC_NO_PIC_P
11160 && !ix86_legitimate_constant_p (Pmode
, disp
))
11161 /* displacment must be referenced via non_lazy_pointer */
11165 /* This code used to verify that a symbolic pic displacement
11166 includes the pic_offset_table_rtx register.
11168 While this is good idea, unfortunately these constructs may
11169 be created by "adds using lea" optimization for incorrect
11178 This code is nonsensical, but results in addressing
11179 GOT table with pic_offset_table_rtx base. We can't
11180 just refuse it easily, since it gets matched by
11181 "addsi3" pattern, that later gets split to lea in the
11182 case output register differs from input. While this
11183 can be handled by separate addsi pattern for this case
11184 that never results in lea, this seems to be easier and
11185 correct fix for crash to disable this test. */
11187 else if (GET_CODE (disp
) != LABEL_REF
11188 && !CONST_INT_P (disp
)
11189 && (GET_CODE (disp
) != CONST
11190 || !ix86_legitimate_constant_p (Pmode
, disp
))
11191 && (GET_CODE (disp
) != SYMBOL_REF
11192 || !ix86_legitimate_constant_p (Pmode
, disp
)))
11193 /* Displacement is not constant. */
11195 else if (TARGET_64BIT
11196 && !x86_64_immediate_operand (disp
, VOIDmode
))
11197 /* Displacement is out of range. */
11199 /* In x32 mode, constant addresses are sign extended to 64bit, so
11200 we have to prevent addresses from 0x80000000 to 0xffffffff. */
11201 else if (TARGET_X32
&& !(index
|| base
)
11202 && CONST_INT_P (disp
)
11203 && val_signbit_known_set_p (SImode
, INTVAL (disp
)))
11207 /* Everything looks valid. */
11211 /* Determine if a given RTX is a valid constant address. */
11214 constant_address_p (rtx x
)
11216 return CONSTANT_P (x
) && ix86_legitimate_address_p (Pmode
, x
, 1);
11219 /* Return a unique alias set for the GOT. */
11222 ix86_GOT_alias_set (void)
11224 static alias_set_type set
= -1;
11226 set
= new_alias_set ();
11230 /* Return a legitimate reference for ORIG (an address) using the
11231 register REG. If REG is 0, a new pseudo is generated.
11233 There are two types of references that must be handled:
11235 1. Global data references must load the address from the GOT, via
11236 the PIC reg. An insn is emitted to do this load, and the reg is
11239 2. Static data references, constant pool addresses, and code labels
11240 compute the address as an offset from the GOT, whose base is in
11241 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
11242 differentiate them from global data objects. The returned
11243 address is the PIC reg + an unspec constant.
11245 TARGET_LEGITIMATE_ADDRESS_P rejects symbolic references unless the PIC
11246 reg also appears in the address. */
11249 legitimize_pic_address (rtx orig
, rtx reg
)
11252 rtx new_rtx
= orig
;
11255 if (TARGET_MACHO
&& !TARGET_64BIT
)
11258 reg
= gen_reg_rtx (Pmode
);
11259 /* Use the generic Mach-O PIC machinery. */
11260 return machopic_legitimize_pic_address (orig
, GET_MODE (orig
), reg
);
11264 if (TARGET_64BIT
&& TARGET_DLLIMPORT_DECL_ATTRIBUTES
)
11266 rtx tmp
= legitimize_pe_coff_symbol (addr
, true);
11271 if (TARGET_64BIT
&& legitimate_pic_address_disp_p (addr
))
11273 else if ((!TARGET_64BIT
11274 || /* TARGET_64BIT && */ ix86_cmodel
!= CM_SMALL_PIC
)
11276 && gotoff_operand (addr
, Pmode
))
11278 /* This symbol may be referenced via a displacement
11279 from the PIC base address (@GOTOFF). */
11280 if (GET_CODE (addr
) == CONST
)
11281 addr
= XEXP (addr
, 0);
11283 if (GET_CODE (addr
) == PLUS
)
11285 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, XEXP (addr
, 0)),
11287 new_rtx
= gen_rtx_PLUS (Pmode
, new_rtx
, XEXP (addr
, 1));
11290 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOTOFF
);
11292 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
11295 new_rtx
= copy_to_suggested_reg (new_rtx
, reg
, Pmode
);
11299 gcc_assert (REG_P (reg
));
11300 new_rtx
= expand_simple_binop (Pmode
, PLUS
, pic_offset_table_rtx
,
11301 new_rtx
, reg
, 1, OPTAB_DIRECT
);
11304 new_rtx
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, new_rtx
);
11306 else if ((GET_CODE (addr
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (addr
) == 0)
11307 /* We can't always use @GOTOFF for text labels
11308 on VxWorks, see gotoff_operand. */
11309 || (TARGET_VXWORKS_RTP
&& GET_CODE (addr
) == LABEL_REF
))
11311 rtx tmp
= legitimize_pe_coff_symbol (addr
, true);
11315 /* For x64 PE-COFF there is no GOT table,
11316 so we use address directly. */
11317 if (TARGET_64BIT
&& TARGET_PECOFF
)
11319 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_PCREL
);
11320 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
11322 else if (TARGET_64BIT
&& ix86_cmodel
!= CM_LARGE_PIC
)
11324 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
),
11326 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
11327 new_rtx
= gen_const_mem (Pmode
, new_rtx
);
11328 set_mem_alias_set (new_rtx
, ix86_GOT_alias_set ());
11332 /* This symbol must be referenced via a load
11333 from the Global Offset Table (@GOT). */
11334 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOT
);
11335 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
11338 new_rtx
= copy_to_suggested_reg (new_rtx
, reg
, Pmode
);
11342 gcc_assert (REG_P (reg
));
11343 new_rtx
= expand_simple_binop (Pmode
, PLUS
, pic_offset_table_rtx
,
11344 new_rtx
, reg
, 1, OPTAB_DIRECT
);
11347 new_rtx
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, new_rtx
);
11349 new_rtx
= gen_const_mem (Pmode
, new_rtx
);
11350 set_mem_alias_set (new_rtx
, ix86_GOT_alias_set ());
11353 new_rtx
= copy_to_suggested_reg (new_rtx
, reg
, Pmode
);
11357 if (CONST_INT_P (addr
)
11358 && !x86_64_immediate_operand (addr
, VOIDmode
))
11359 new_rtx
= copy_to_suggested_reg (addr
, reg
, Pmode
);
11360 else if (GET_CODE (addr
) == CONST
)
11362 addr
= XEXP (addr
, 0);
11364 /* We must match stuff we generate before. Assume the only
11365 unspecs that can get here are ours. Not that we could do
11366 anything with them anyway.... */
11367 if (GET_CODE (addr
) == UNSPEC
11368 || (GET_CODE (addr
) == PLUS
11369 && GET_CODE (XEXP (addr
, 0)) == UNSPEC
))
11371 gcc_assert (GET_CODE (addr
) == PLUS
);
11374 if (GET_CODE (addr
) == PLUS
)
11376 rtx op0
= XEXP (addr
, 0), op1
= XEXP (addr
, 1);
11378 /* Check first to see if this is a constant
11379 offset from a @GOTOFF symbol reference. */
11381 && gotoff_operand (op0
, Pmode
)
11382 && CONST_INT_P (op1
))
11386 new_rtx
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, op0
),
11388 new_rtx
= gen_rtx_PLUS (Pmode
, new_rtx
, op1
);
11389 new_rtx
= gen_rtx_CONST (Pmode
, new_rtx
);
11393 gcc_assert (REG_P (reg
));
11394 new_rtx
= expand_simple_binop (Pmode
, PLUS
,
11395 pic_offset_table_rtx
,
11401 = gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, new_rtx
);
11405 if (INTVAL (op1
) < -16*1024*1024
11406 || INTVAL (op1
) >= 16*1024*1024)
11408 if (!x86_64_immediate_operand (op1
, Pmode
))
11409 op1
= force_reg (Pmode
, op1
);
11412 = gen_rtx_PLUS (Pmode
, force_reg (Pmode
, op0
), op1
);
11418 rtx base
= legitimize_pic_address (op0
, reg
);
11419 machine_mode mode
= GET_MODE (base
);
11421 = legitimize_pic_address (op1
, base
== reg
? NULL_RTX
: reg
);
11423 if (CONST_INT_P (new_rtx
))
11425 if (INTVAL (new_rtx
) < -16*1024*1024
11426 || INTVAL (new_rtx
) >= 16*1024*1024)
11428 if (!x86_64_immediate_operand (new_rtx
, mode
))
11429 new_rtx
= force_reg (mode
, new_rtx
);
11432 = gen_rtx_PLUS (mode
, force_reg (mode
, base
), new_rtx
);
11435 new_rtx
= plus_constant (mode
, base
, INTVAL (new_rtx
));
11439 /* For %rip addressing, we have to use
11440 just disp32, not base nor index. */
11442 && (GET_CODE (base
) == SYMBOL_REF
11443 || GET_CODE (base
) == LABEL_REF
))
11444 base
= force_reg (mode
, base
);
11445 if (GET_CODE (new_rtx
) == PLUS
11446 && CONSTANT_P (XEXP (new_rtx
, 1)))
11448 base
= gen_rtx_PLUS (mode
, base
, XEXP (new_rtx
, 0));
11449 new_rtx
= XEXP (new_rtx
, 1);
11451 new_rtx
= gen_rtx_PLUS (mode
, base
, new_rtx
);
11459 /* Load the thread pointer. If TO_REG is true, force it into a register. */
11462 get_thread_pointer (machine_mode tp_mode
, bool to_reg
)
11464 rtx tp
= gen_rtx_UNSPEC (ptr_mode
, gen_rtvec (1, const0_rtx
), UNSPEC_TP
);
11466 if (GET_MODE (tp
) != tp_mode
)
11468 gcc_assert (GET_MODE (tp
) == SImode
);
11469 gcc_assert (tp_mode
== DImode
);
11471 tp
= gen_rtx_ZERO_EXTEND (tp_mode
, tp
);
11475 tp
= copy_to_mode_reg (tp_mode
, tp
);
11480 /* Construct the SYMBOL_REF for the tls_get_addr function. */
11482 static GTY(()) rtx ix86_tls_symbol
;
11485 ix86_tls_get_addr (void)
11487 if (!ix86_tls_symbol
)
11490 = ((TARGET_ANY_GNU_TLS
&& !TARGET_64BIT
)
11491 ? "___tls_get_addr" : "__tls_get_addr");
11493 ix86_tls_symbol
= gen_rtx_SYMBOL_REF (Pmode
, sym
);
11496 if (ix86_cmodel
== CM_LARGE_PIC
&& !TARGET_PECOFF
)
11498 rtx unspec
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, ix86_tls_symbol
),
11500 return gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
,
11501 gen_rtx_CONST (Pmode
, unspec
));
11504 return ix86_tls_symbol
;
11507 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
11509 static GTY(()) rtx ix86_tls_module_base_symbol
;
11512 ix86_tls_module_base (void)
11514 if (!ix86_tls_module_base_symbol
)
11516 ix86_tls_module_base_symbol
11517 = gen_rtx_SYMBOL_REF (ptr_mode
, "_TLS_MODULE_BASE_");
11519 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol
)
11520 |= TLS_MODEL_GLOBAL_DYNAMIC
<< SYMBOL_FLAG_TLS_SHIFT
;
11523 return ix86_tls_module_base_symbol
;
11526 /* A subroutine of ix86_legitimize_address and ix86_expand_move. FOR_MOV is
11527 false if we expect this to be used for a memory address and true if
11528 we expect to load the address into a register. */
11531 legitimize_tls_address (rtx x
, enum tls_model model
, bool for_mov
)
11533 rtx dest
, base
, off
;
11534 rtx pic
= NULL_RTX
, tp
= NULL_RTX
;
11535 machine_mode tp_mode
= Pmode
;
11538 /* Fall back to global dynamic model if tool chain cannot support local
11540 if (TARGET_SUN_TLS
&& !TARGET_64BIT
11541 && !HAVE_AS_IX86_TLSLDMPLT
&& !HAVE_AS_IX86_TLSLDM
11542 && model
== TLS_MODEL_LOCAL_DYNAMIC
)
11543 model
= TLS_MODEL_GLOBAL_DYNAMIC
;
11547 case TLS_MODEL_GLOBAL_DYNAMIC
:
11550 if (flag_pic
&& !TARGET_PECOFF
)
11551 pic
= pic_offset_table_rtx
;
11554 pic
= gen_reg_rtx (Pmode
);
11555 emit_insn (gen_set_got (pic
));
11559 if (TARGET_GNU2_TLS
)
11561 dest
= gen_reg_rtx (ptr_mode
);
11563 emit_insn (gen_tls_dynamic_gnu2_64 (ptr_mode
, dest
, x
));
11565 emit_insn (gen_tls_dynamic_gnu2_32 (dest
, x
, pic
));
11567 tp
= get_thread_pointer (ptr_mode
, true);
11568 dest
= gen_rtx_PLUS (ptr_mode
, tp
, dest
);
11569 if (GET_MODE (dest
) != Pmode
)
11570 dest
= gen_rtx_ZERO_EXTEND (Pmode
, dest
);
11571 dest
= force_reg (Pmode
, dest
);
11573 if (GET_MODE (x
) != Pmode
)
11574 x
= gen_rtx_ZERO_EXTEND (Pmode
, x
);
11576 set_unique_reg_note (get_last_insn (), REG_EQUAL
, x
);
11580 rtx caddr
= ix86_tls_get_addr ();
11582 dest
= gen_reg_rtx (Pmode
);
11585 rtx rax
= gen_rtx_REG (Pmode
, AX_REG
);
11590 (gen_tls_global_dynamic_64 (Pmode
, rax
, x
, caddr
));
11591 insns
= get_insns ();
11594 if (GET_MODE (x
) != Pmode
)
11595 x
= gen_rtx_ZERO_EXTEND (Pmode
, x
);
11597 RTL_CONST_CALL_P (insns
) = 1;
11598 emit_libcall_block (insns
, dest
, rax
, x
);
11601 emit_insn (gen_tls_global_dynamic_32 (dest
, x
, pic
, caddr
));
11605 case TLS_MODEL_LOCAL_DYNAMIC
:
11609 pic
= pic_offset_table_rtx
;
11612 pic
= gen_reg_rtx (Pmode
);
11613 emit_insn (gen_set_got (pic
));
11617 if (TARGET_GNU2_TLS
)
11619 rtx tmp
= ix86_tls_module_base ();
11621 base
= gen_reg_rtx (ptr_mode
);
11623 emit_insn (gen_tls_dynamic_gnu2_64 (ptr_mode
, base
, tmp
));
11625 emit_insn (gen_tls_dynamic_gnu2_32 (base
, tmp
, pic
));
11627 tp
= get_thread_pointer (ptr_mode
, true);
11628 if (GET_MODE (base
) != Pmode
)
11629 base
= gen_rtx_ZERO_EXTEND (Pmode
, base
);
11630 base
= force_reg (Pmode
, base
);
11634 rtx caddr
= ix86_tls_get_addr ();
11636 base
= gen_reg_rtx (Pmode
);
11639 rtx rax
= gen_rtx_REG (Pmode
, AX_REG
);
11645 (gen_tls_local_dynamic_base_64 (Pmode
, rax
, caddr
));
11646 insns
= get_insns ();
11649 /* Attach a unique REG_EQUAL, to allow the RTL optimizers to
11650 share the LD_BASE result with other LD model accesses. */
11651 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
11652 UNSPEC_TLS_LD_BASE
);
11654 RTL_CONST_CALL_P (insns
) = 1;
11655 emit_libcall_block (insns
, base
, rax
, eqv
);
11658 emit_insn (gen_tls_local_dynamic_base_32 (base
, pic
, caddr
));
11661 off
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
), UNSPEC_DTPOFF
);
11662 off
= gen_rtx_CONST (Pmode
, off
);
11664 dest
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, base
, off
));
11666 if (TARGET_GNU2_TLS
)
11668 if (GET_MODE (tp
) != Pmode
)
11670 dest
= lowpart_subreg (ptr_mode
, dest
, Pmode
);
11671 dest
= gen_rtx_PLUS (ptr_mode
, tp
, dest
);
11672 dest
= gen_rtx_ZERO_EXTEND (Pmode
, dest
);
11675 dest
= gen_rtx_PLUS (Pmode
, tp
, dest
);
11676 dest
= force_reg (Pmode
, dest
);
11678 if (GET_MODE (x
) != Pmode
)
11679 x
= gen_rtx_ZERO_EXTEND (Pmode
, x
);
11681 set_unique_reg_note (get_last_insn (), REG_EQUAL
, x
);
11685 case TLS_MODEL_INITIAL_EXEC
:
11688 if (TARGET_SUN_TLS
&& !TARGET_X32
)
11690 /* The Sun linker took the AMD64 TLS spec literally
11691 and can only handle %rax as destination of the
11692 initial executable code sequence. */
11694 dest
= gen_reg_rtx (DImode
);
11695 emit_insn (gen_tls_initial_exec_64_sun (dest
, x
));
11699 /* Generate DImode references to avoid %fs:(%reg32)
11700 problems and linker IE->LE relaxation bug. */
11703 type
= UNSPEC_GOTNTPOFF
;
11707 pic
= pic_offset_table_rtx
;
11708 type
= TARGET_ANY_GNU_TLS
? UNSPEC_GOTNTPOFF
: UNSPEC_GOTTPOFF
;
11710 else if (!TARGET_ANY_GNU_TLS
)
11712 pic
= gen_reg_rtx (Pmode
);
11713 emit_insn (gen_set_got (pic
));
11714 type
= UNSPEC_GOTTPOFF
;
11719 type
= UNSPEC_INDNTPOFF
;
11722 off
= gen_rtx_UNSPEC (tp_mode
, gen_rtvec (1, x
), type
);
11723 off
= gen_rtx_CONST (tp_mode
, off
);
11725 off
= gen_rtx_PLUS (tp_mode
, pic
, off
);
11726 off
= gen_const_mem (tp_mode
, off
);
11727 set_mem_alias_set (off
, ix86_GOT_alias_set ());
11729 if (TARGET_64BIT
|| TARGET_ANY_GNU_TLS
)
11731 base
= get_thread_pointer (tp_mode
,
11732 for_mov
|| !TARGET_TLS_DIRECT_SEG_REFS
);
11733 off
= force_reg (tp_mode
, off
);
11734 dest
= gen_rtx_PLUS (tp_mode
, base
, off
);
11735 if (tp_mode
!= Pmode
)
11736 dest
= convert_to_mode (Pmode
, dest
, 1);
11740 base
= get_thread_pointer (Pmode
, true);
11741 dest
= gen_reg_rtx (Pmode
);
11742 emit_insn (gen_sub3_insn (dest
, base
, off
));
11746 case TLS_MODEL_LOCAL_EXEC
:
11747 off
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
),
11748 (TARGET_64BIT
|| TARGET_ANY_GNU_TLS
)
11749 ? UNSPEC_NTPOFF
: UNSPEC_TPOFF
);
11750 off
= gen_rtx_CONST (Pmode
, off
);
11752 if (TARGET_64BIT
|| TARGET_ANY_GNU_TLS
)
11754 base
= get_thread_pointer (Pmode
,
11755 for_mov
|| !TARGET_TLS_DIRECT_SEG_REFS
);
11756 return gen_rtx_PLUS (Pmode
, base
, off
);
11760 base
= get_thread_pointer (Pmode
, true);
11761 dest
= gen_reg_rtx (Pmode
);
11762 emit_insn (gen_sub3_insn (dest
, base
, off
));
11767 gcc_unreachable ();
11773 /* Return true if the TLS address requires insn using integer registers.
11774 It's used to prevent KMOV/VMOV in TLS code sequences which require integer
11775 MOV instructions, refer to PR103275. */
11777 ix86_gpr_tls_address_pattern_p (rtx mem
)
11779 gcc_assert (MEM_P (mem
));
11781 rtx addr
= XEXP (mem
, 0);
11782 subrtx_var_iterator::array_type array
;
11783 FOR_EACH_SUBRTX_VAR (iter
, array
, addr
, ALL
)
11786 if (GET_CODE (op
) == UNSPEC
)
11787 switch (XINT (op
, 1))
11789 case UNSPEC_GOTNTPOFF
:
11803 /* Return true if OP refers to a TLS address. */
11805 ix86_tls_address_pattern_p (rtx op
)
11807 subrtx_var_iterator::array_type array
;
11808 FOR_EACH_SUBRTX_VAR (iter
, array
, op
, ALL
)
11813 rtx
*x
= &XEXP (op
, 0);
11814 while (GET_CODE (*x
) == PLUS
)
11817 for (i
= 0; i
< 2; i
++)
11819 rtx u
= XEXP (*x
, i
);
11820 if (GET_CODE (u
) == ZERO_EXTEND
)
11822 if (GET_CODE (u
) == UNSPEC
11823 && XINT (u
, 1) == UNSPEC_TP
)
11829 iter
.skip_subrtxes ();
11836 /* Rewrite *LOC so that it refers to a default TLS address space. */
11838 ix86_rewrite_tls_address_1 (rtx
*loc
)
11840 subrtx_ptr_iterator::array_type array
;
11841 FOR_EACH_SUBRTX_PTR (iter
, array
, loc
, ALL
)
11846 rtx addr
= XEXP (*loc
, 0);
11848 while (GET_CODE (*x
) == PLUS
)
11851 for (i
= 0; i
< 2; i
++)
11853 rtx u
= XEXP (*x
, i
);
11854 if (GET_CODE (u
) == ZERO_EXTEND
)
11856 if (GET_CODE (u
) == UNSPEC
11857 && XINT (u
, 1) == UNSPEC_TP
)
11859 addr_space_t as
= DEFAULT_TLS_SEG_REG
;
11861 *x
= XEXP (*x
, 1 - i
);
11863 *loc
= replace_equiv_address_nv (*loc
, addr
, true);
11864 set_mem_addr_space (*loc
, as
);
11871 iter
.skip_subrtxes ();
11876 /* Rewrite instruction pattern involvning TLS address
11877 so that it refers to a default TLS address space. */
11879 ix86_rewrite_tls_address (rtx pattern
)
11881 pattern
= copy_insn (pattern
);
11882 ix86_rewrite_tls_address_1 (&pattern
);
11886 /* Create or return the unique __imp_DECL dllimport symbol corresponding
11887 to symbol DECL if BEIMPORT is true. Otherwise create or return the
11888 unique refptr-DECL symbol corresponding to symbol DECL. */
11890 struct dllimport_hasher
: ggc_cache_ptr_hash
<tree_map
>
11892 static inline hashval_t
hash (tree_map
*m
) { return m
->hash
; }
11894 equal (tree_map
*a
, tree_map
*b
)
11896 return a
->base
.from
== b
->base
.from
;
11900 keep_cache_entry (tree_map
*&m
)
11902 return ggc_marked_p (m
->base
.from
);
11906 static GTY((cache
)) hash_table
<dllimport_hasher
> *dllimport_map
;
11909 get_dllimport_decl (tree decl
, bool beimport
)
11911 struct tree_map
*h
, in
;
11913 const char *prefix
;
11914 size_t namelen
, prefixlen
;
11919 if (!dllimport_map
)
11920 dllimport_map
= hash_table
<dllimport_hasher
>::create_ggc (512);
11922 in
.hash
= htab_hash_pointer (decl
);
11923 in
.base
.from
= decl
;
11924 tree_map
**loc
= dllimport_map
->find_slot_with_hash (&in
, in
.hash
, INSERT
);
11929 *loc
= h
= ggc_alloc
<tree_map
> ();
11931 h
->base
.from
= decl
;
11932 h
->to
= to
= build_decl (DECL_SOURCE_LOCATION (decl
),
11933 VAR_DECL
, NULL
, ptr_type_node
);
11934 DECL_ARTIFICIAL (to
) = 1;
11935 DECL_IGNORED_P (to
) = 1;
11936 DECL_EXTERNAL (to
) = 1;
11937 TREE_READONLY (to
) = 1;
11939 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
11940 name
= targetm
.strip_name_encoding (name
);
11942 prefix
= name
[0] == FASTCALL_PREFIX
|| user_label_prefix
[0] == 0
11943 ? "*__imp_" : "*__imp__";
11945 prefix
= user_label_prefix
[0] == 0 ? "*.refptr." : "*refptr.";
11946 namelen
= strlen (name
);
11947 prefixlen
= strlen (prefix
);
11948 imp_name
= (char *) alloca (namelen
+ prefixlen
+ 1);
11949 memcpy (imp_name
, prefix
, prefixlen
);
11950 memcpy (imp_name
+ prefixlen
, name
, namelen
+ 1);
11952 name
= ggc_alloc_string (imp_name
, namelen
+ prefixlen
);
11953 rtl
= gen_rtx_SYMBOL_REF (Pmode
, name
);
11954 SET_SYMBOL_REF_DECL (rtl
, to
);
11955 SYMBOL_REF_FLAGS (rtl
) = SYMBOL_FLAG_LOCAL
| SYMBOL_FLAG_STUBVAR
;
11958 SYMBOL_REF_FLAGS (rtl
) |= SYMBOL_FLAG_EXTERNAL
;
11959 #ifdef SUB_TARGET_RECORD_STUB
11960 SUB_TARGET_RECORD_STUB (name
);
11964 rtl
= gen_const_mem (Pmode
, rtl
);
11965 set_mem_alias_set (rtl
, ix86_GOT_alias_set ());
11967 SET_DECL_RTL (to
, rtl
);
11968 SET_DECL_ASSEMBLER_NAME (to
, get_identifier (name
));
11973 /* Expand SYMBOL into its corresponding far-address symbol.
11974 WANT_REG is true if we require the result be a register. */
11977 legitimize_pe_coff_extern_decl (rtx symbol
, bool want_reg
)
11982 gcc_assert (SYMBOL_REF_DECL (symbol
));
11983 imp_decl
= get_dllimport_decl (SYMBOL_REF_DECL (symbol
), false);
11985 x
= DECL_RTL (imp_decl
);
11987 x
= force_reg (Pmode
, x
);
11991 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
11992 true if we require the result be a register. */
11995 legitimize_dllimport_symbol (rtx symbol
, bool want_reg
)
12000 gcc_assert (SYMBOL_REF_DECL (symbol
));
12001 imp_decl
= get_dllimport_decl (SYMBOL_REF_DECL (symbol
), true);
12003 x
= DECL_RTL (imp_decl
);
12005 x
= force_reg (Pmode
, x
);
12009 /* Expand SYMBOL into its corresponding dllimport or refptr symbol. WANT_REG
12010 is true if we require the result be a register. */
12013 legitimize_pe_coff_symbol (rtx addr
, bool inreg
)
12015 if (!TARGET_PECOFF
)
12018 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
)
12020 if (GET_CODE (addr
) == SYMBOL_REF
&& SYMBOL_REF_DLLIMPORT_P (addr
))
12021 return legitimize_dllimport_symbol (addr
, inreg
);
12022 if (GET_CODE (addr
) == CONST
12023 && GET_CODE (XEXP (addr
, 0)) == PLUS
12024 && GET_CODE (XEXP (XEXP (addr
, 0), 0)) == SYMBOL_REF
12025 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr
, 0), 0)))
12027 rtx t
= legitimize_dllimport_symbol (XEXP (XEXP (addr
, 0), 0), inreg
);
12028 return gen_rtx_PLUS (Pmode
, t
, XEXP (XEXP (addr
, 0), 1));
12032 if (ix86_cmodel
!= CM_LARGE_PIC
&& ix86_cmodel
!= CM_MEDIUM_PIC
)
12034 if (GET_CODE (addr
) == SYMBOL_REF
12035 && !is_imported_p (addr
)
12036 && SYMBOL_REF_EXTERNAL_P (addr
)
12037 && SYMBOL_REF_DECL (addr
))
12038 return legitimize_pe_coff_extern_decl (addr
, inreg
);
12040 if (GET_CODE (addr
) == CONST
12041 && GET_CODE (XEXP (addr
, 0)) == PLUS
12042 && GET_CODE (XEXP (XEXP (addr
, 0), 0)) == SYMBOL_REF
12043 && !is_imported_p (XEXP (XEXP (addr
, 0), 0))
12044 && SYMBOL_REF_EXTERNAL_P (XEXP (XEXP (addr
, 0), 0))
12045 && SYMBOL_REF_DECL (XEXP (XEXP (addr
, 0), 0)))
12047 rtx t
= legitimize_pe_coff_extern_decl (XEXP (XEXP (addr
, 0), 0), inreg
);
12048 return gen_rtx_PLUS (Pmode
, t
, XEXP (XEXP (addr
, 0), 1));
12053 /* Try machine-dependent ways of modifying an illegitimate address
12054 to be legitimate. If we find one, return the new, valid address.
12055 This macro is used in only one place: `memory_address' in explow.cc.
12057 OLDX is the address as it was before break_out_memory_refs was called.
12058 In some cases it is useful to look at this to decide what needs to be done.
12060 It is always safe for this macro to do nothing. It exists to recognize
12061 opportunities to optimize the output.
12063 For the 80386, we handle X+REG by loading X into a register R and
12064 using R+REG. R will go in a general reg and indexing will be used.
12065 However, if REG is a broken-out memory address or multiplication,
12066 nothing needs to be done because REG can certainly go in a general reg.
12068 When -fpic is used, special handling is needed for symbolic references.
12069 See comments by legitimize_pic_address in i386.cc for details. */
12072 ix86_legitimize_address (rtx x
, rtx
, machine_mode mode
)
12074 bool changed
= false;
12077 log
= GET_CODE (x
) == SYMBOL_REF
? SYMBOL_REF_TLS_MODEL (x
) : 0;
12079 return legitimize_tls_address (x
, (enum tls_model
) log
, false);
12080 if (GET_CODE (x
) == CONST
12081 && GET_CODE (XEXP (x
, 0)) == PLUS
12082 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
12083 && (log
= SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x
, 0), 0))))
12085 rtx t
= legitimize_tls_address (XEXP (XEXP (x
, 0), 0),
12086 (enum tls_model
) log
, false);
12087 return gen_rtx_PLUS (Pmode
, t
, XEXP (XEXP (x
, 0), 1));
12090 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
)
12092 rtx tmp
= legitimize_pe_coff_symbol (x
, true);
12097 if (flag_pic
&& SYMBOLIC_CONST (x
))
12098 return legitimize_pic_address (x
, 0);
12101 if (MACHO_DYNAMIC_NO_PIC_P
&& SYMBOLIC_CONST (x
))
12102 return machopic_indirect_data_reference (x
, 0);
12105 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
12106 if (GET_CODE (x
) == ASHIFT
12107 && CONST_INT_P (XEXP (x
, 1))
12108 && (unsigned HOST_WIDE_INT
) INTVAL (XEXP (x
, 1)) < 4)
12111 log
= INTVAL (XEXP (x
, 1));
12112 x
= gen_rtx_MULT (Pmode
, force_reg (Pmode
, XEXP (x
, 0)),
12113 GEN_INT (1 << log
));
12116 if (GET_CODE (x
) == PLUS
)
12118 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
12120 if (GET_CODE (XEXP (x
, 0)) == ASHIFT
12121 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
12122 && (unsigned HOST_WIDE_INT
) INTVAL (XEXP (XEXP (x
, 0), 1)) < 4)
12125 log
= INTVAL (XEXP (XEXP (x
, 0), 1));
12126 XEXP (x
, 0) = gen_rtx_MULT (Pmode
,
12127 force_reg (Pmode
, XEXP (XEXP (x
, 0), 0)),
12128 GEN_INT (1 << log
));
12131 if (GET_CODE (XEXP (x
, 1)) == ASHIFT
12132 && CONST_INT_P (XEXP (XEXP (x
, 1), 1))
12133 && (unsigned HOST_WIDE_INT
) INTVAL (XEXP (XEXP (x
, 1), 1)) < 4)
12136 log
= INTVAL (XEXP (XEXP (x
, 1), 1));
12137 XEXP (x
, 1) = gen_rtx_MULT (Pmode
,
12138 force_reg (Pmode
, XEXP (XEXP (x
, 1), 0)),
12139 GEN_INT (1 << log
));
12142 /* Put multiply first if it isn't already. */
12143 if (GET_CODE (XEXP (x
, 1)) == MULT
)
12145 std::swap (XEXP (x
, 0), XEXP (x
, 1));
12149 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
12150 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
12151 created by virtual register instantiation, register elimination, and
12152 similar optimizations. */
12153 if (GET_CODE (XEXP (x
, 0)) == MULT
&& GET_CODE (XEXP (x
, 1)) == PLUS
)
12156 x
= gen_rtx_PLUS (Pmode
,
12157 gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
12158 XEXP (XEXP (x
, 1), 0)),
12159 XEXP (XEXP (x
, 1), 1));
12163 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
12164 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
12165 else if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 0)) == PLUS
12166 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
12167 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == PLUS
12168 && CONSTANT_P (XEXP (x
, 1)))
12171 rtx other
= NULL_RTX
;
12173 if (CONST_INT_P (XEXP (x
, 1)))
12175 constant
= XEXP (x
, 1);
12176 other
= XEXP (XEXP (XEXP (x
, 0), 1), 1);
12178 else if (CONST_INT_P (XEXP (XEXP (XEXP (x
, 0), 1), 1)))
12180 constant
= XEXP (XEXP (XEXP (x
, 0), 1), 1);
12181 other
= XEXP (x
, 1);
12189 x
= gen_rtx_PLUS (Pmode
,
12190 gen_rtx_PLUS (Pmode
, XEXP (XEXP (x
, 0), 0),
12191 XEXP (XEXP (XEXP (x
, 0), 1), 0)),
12192 plus_constant (Pmode
, other
,
12193 INTVAL (constant
)));
12197 if (changed
&& ix86_legitimate_address_p (mode
, x
, false))
12200 if (GET_CODE (XEXP (x
, 0)) == MULT
)
12203 XEXP (x
, 0) = copy_addr_to_reg (XEXP (x
, 0));
12206 if (GET_CODE (XEXP (x
, 1)) == MULT
)
12209 XEXP (x
, 1) = copy_addr_to_reg (XEXP (x
, 1));
12213 && REG_P (XEXP (x
, 1))
12214 && REG_P (XEXP (x
, 0)))
12217 if (flag_pic
&& SYMBOLIC_CONST (XEXP (x
, 1)))
12220 x
= legitimize_pic_address (x
, 0);
12223 if (changed
&& ix86_legitimate_address_p (mode
, x
, false))
12226 if (REG_P (XEXP (x
, 0)))
12228 rtx temp
= gen_reg_rtx (Pmode
);
12229 rtx val
= force_operand (XEXP (x
, 1), temp
);
12232 val
= convert_to_mode (Pmode
, val
, 1);
12233 emit_move_insn (temp
, val
);
12236 XEXP (x
, 1) = temp
;
12240 else if (REG_P (XEXP (x
, 1)))
12242 rtx temp
= gen_reg_rtx (Pmode
);
12243 rtx val
= force_operand (XEXP (x
, 0), temp
);
12246 val
= convert_to_mode (Pmode
, val
, 1);
12247 emit_move_insn (temp
, val
);
12250 XEXP (x
, 0) = temp
;
12258 /* Print an integer constant expression in assembler syntax. Addition
12259 and subtraction are the only arithmetic that may appear in these
12260 expressions. FILE is the stdio stream to write to, X is the rtx, and
12261 CODE is the operand print code from the output string. */
12264 output_pic_addr_const (FILE *file
, rtx x
, int code
)
12268 switch (GET_CODE (x
))
12271 gcc_assert (flag_pic
);
12276 if (TARGET_64BIT
|| ! TARGET_MACHO_SYMBOL_STUBS
)
12277 output_addr_const (file
, x
);
12280 const char *name
= XSTR (x
, 0);
12282 /* Mark the decl as referenced so that cgraph will
12283 output the function. */
12284 if (SYMBOL_REF_DECL (x
))
12285 mark_decl_referenced (SYMBOL_REF_DECL (x
));
12288 if (MACHOPIC_INDIRECT
12289 && machopic_classify_symbol (x
) == MACHOPIC_UNDEFINED_FUNCTION
)
12290 name
= machopic_indirection_name (x
, /*stub_p=*/true);
12292 assemble_name (file
, name
);
12294 if (!TARGET_MACHO
&& !(TARGET_64BIT
&& TARGET_PECOFF
)
12295 && code
== 'P' && ix86_call_use_plt_p (x
))
12296 fputs ("@PLT", file
);
12303 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (x
));
12304 assemble_name (asm_out_file
, buf
);
12308 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
));
12312 /* This used to output parentheses around the expression,
12313 but that does not work on the 386 (either ATT or BSD assembler). */
12314 output_pic_addr_const (file
, XEXP (x
, 0), code
);
12318 /* We can't handle floating point constants;
12319 TARGET_PRINT_OPERAND must handle them. */
12320 output_operand_lossage ("floating constant misused");
12324 /* Some assemblers need integer constants to appear first. */
12325 if (CONST_INT_P (XEXP (x
, 0)))
12327 output_pic_addr_const (file
, XEXP (x
, 0), code
);
12329 output_pic_addr_const (file
, XEXP (x
, 1), code
);
12333 gcc_assert (CONST_INT_P (XEXP (x
, 1)));
12334 output_pic_addr_const (file
, XEXP (x
, 1), code
);
12336 output_pic_addr_const (file
, XEXP (x
, 0), code
);
12342 putc (ASSEMBLER_DIALECT
== ASM_INTEL
? '(' : '[', file
);
12343 output_pic_addr_const (file
, XEXP (x
, 0), code
);
12345 output_pic_addr_const (file
, XEXP (x
, 1), code
);
12347 putc (ASSEMBLER_DIALECT
== ASM_INTEL
? ')' : ']', file
);
12351 gcc_assert (XVECLEN (x
, 0) == 1);
12352 output_pic_addr_const (file
, XVECEXP (x
, 0, 0), code
);
12353 switch (XINT (x
, 1))
12356 fputs ("@GOT", file
);
12358 case UNSPEC_GOTOFF
:
12359 fputs ("@GOTOFF", file
);
12361 case UNSPEC_PLTOFF
:
12362 fputs ("@PLTOFF", file
);
12365 fputs (ASSEMBLER_DIALECT
== ASM_ATT
?
12366 "(%rip)" : "[rip]", file
);
12368 case UNSPEC_GOTPCREL
:
12369 fputs (ASSEMBLER_DIALECT
== ASM_ATT
?
12370 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file
);
12372 case UNSPEC_GOTTPOFF
:
12373 /* FIXME: This might be @TPOFF in Sun ld too. */
12374 fputs ("@gottpoff", file
);
12377 fputs ("@tpoff", file
);
12379 case UNSPEC_NTPOFF
:
12381 fputs ("@tpoff", file
);
12383 fputs ("@ntpoff", file
);
12385 case UNSPEC_DTPOFF
:
12386 fputs ("@dtpoff", file
);
12388 case UNSPEC_GOTNTPOFF
:
12390 fputs (ASSEMBLER_DIALECT
== ASM_ATT
?
12391 "@gottpoff(%rip)": "@gottpoff[rip]", file
);
12393 fputs ("@gotntpoff", file
);
12395 case UNSPEC_INDNTPOFF
:
12396 fputs ("@indntpoff", file
);
12399 case UNSPEC_MACHOPIC_OFFSET
:
12401 machopic_output_function_base_name (file
);
12405 output_operand_lossage ("invalid UNSPEC as operand");
12411 output_operand_lossage ("invalid expression as operand");
12415 /* This is called from dwarf2out.cc via TARGET_ASM_OUTPUT_DWARF_DTPREL.
12416 We need to emit DTP-relative relocations. */
12418 static void ATTRIBUTE_UNUSED
12419 i386_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
12421 fputs (ASM_LONG
, file
);
12422 output_addr_const (file
, x
);
12423 fputs ("@dtpoff", file
);
12429 fputs (", 0", file
);
12432 gcc_unreachable ();
12436 /* Return true if X is a representation of the PIC register. This copes
12437 with calls from ix86_find_base_term, where the register might have
12438 been replaced by a cselib value. */
12441 ix86_pic_register_p (rtx x
)
12443 if (GET_CODE (x
) == VALUE
&& CSELIB_VAL_PTR (x
))
12444 return (pic_offset_table_rtx
12445 && rtx_equal_for_cselib_p (x
, pic_offset_table_rtx
));
12446 else if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_SET_GOT
)
12448 else if (!REG_P (x
))
12450 else if (pic_offset_table_rtx
)
12452 if (REGNO (x
) == REGNO (pic_offset_table_rtx
))
12454 if (HARD_REGISTER_P (x
)
12455 && !HARD_REGISTER_P (pic_offset_table_rtx
)
12456 && ORIGINAL_REGNO (x
) == REGNO (pic_offset_table_rtx
))
12461 return REGNO (x
) == PIC_OFFSET_TABLE_REGNUM
;
12464 /* Helper function for ix86_delegitimize_address.
12465 Attempt to delegitimize TLS local-exec accesses. */
12468 ix86_delegitimize_tls_address (rtx orig_x
)
12470 rtx x
= orig_x
, unspec
;
12471 struct ix86_address addr
;
12473 if (!TARGET_TLS_DIRECT_SEG_REFS
)
12477 if (GET_CODE (x
) != PLUS
|| GET_MODE (x
) != Pmode
)
12479 if (ix86_decompose_address (x
, &addr
) == 0
12480 || addr
.seg
!= DEFAULT_TLS_SEG_REG
12481 || addr
.disp
== NULL_RTX
12482 || GET_CODE (addr
.disp
) != CONST
)
12484 unspec
= XEXP (addr
.disp
, 0);
12485 if (GET_CODE (unspec
) == PLUS
&& CONST_INT_P (XEXP (unspec
, 1)))
12486 unspec
= XEXP (unspec
, 0);
12487 if (GET_CODE (unspec
) != UNSPEC
|| XINT (unspec
, 1) != UNSPEC_NTPOFF
)
12489 x
= XVECEXP (unspec
, 0, 0);
12490 gcc_assert (GET_CODE (x
) == SYMBOL_REF
);
12491 if (unspec
!= XEXP (addr
.disp
, 0))
12492 x
= gen_rtx_PLUS (Pmode
, x
, XEXP (XEXP (addr
.disp
, 0), 1));
12495 rtx idx
= addr
.index
;
12496 if (addr
.scale
!= 1)
12497 idx
= gen_rtx_MULT (Pmode
, idx
, GEN_INT (addr
.scale
));
12498 x
= gen_rtx_PLUS (Pmode
, idx
, x
);
12501 x
= gen_rtx_PLUS (Pmode
, addr
.base
, x
);
12502 if (MEM_P (orig_x
))
12503 x
= replace_equiv_address_nv (orig_x
, x
);
12507 /* In the name of slightly smaller debug output, and to cater to
12508 general assembler lossage, recognize PIC+GOTOFF and turn it back
12509 into a direct symbol reference.
12511 On Darwin, this is necessary to avoid a crash, because Darwin
12512 has a different PIC label for each routine but the DWARF debugging
12513 information is not associated with any particular routine, so it's
12514 necessary to remove references to the PIC label from RTL stored by
12515 the DWARF output code.
12517 This helper is used in the normal ix86_delegitimize_address
12518 entrypoint (e.g. used in the target delegitimization hook) and
12519 in ix86_find_base_term. As compile time memory optimization, we
12520 avoid allocating rtxes that will not change anything on the outcome
12521 of the callers (find_base_value and find_base_term). */
12524 ix86_delegitimize_address_1 (rtx x
, bool base_term_p
)
12526 rtx orig_x
= delegitimize_mem_from_attrs (x
);
12527 /* addend is NULL or some rtx if x is something+GOTOFF where
12528 something doesn't include the PIC register. */
12529 rtx addend
= NULL_RTX
;
12530 /* reg_addend is NULL or a multiple of some register. */
12531 rtx reg_addend
= NULL_RTX
;
12532 /* const_addend is NULL or a const_int. */
12533 rtx const_addend
= NULL_RTX
;
12534 /* This is the result, or NULL. */
12535 rtx result
= NULL_RTX
;
12544 if (GET_CODE (x
) == CONST
12545 && GET_CODE (XEXP (x
, 0)) == PLUS
12546 && GET_MODE (XEXP (x
, 0)) == Pmode
12547 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
12548 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == UNSPEC
12549 && XINT (XEXP (XEXP (x
, 0), 0), 1) == UNSPEC_PCREL
)
12551 /* find_base_{value,term} only care about MEMs with arg_pointer_rtx
12552 base. A CONST can't be arg_pointer_rtx based. */
12553 if (base_term_p
&& MEM_P (orig_x
))
12555 rtx x2
= XVECEXP (XEXP (XEXP (x
, 0), 0), 0, 0);
12556 x
= gen_rtx_PLUS (Pmode
, XEXP (XEXP (x
, 0), 1), x2
);
12557 if (MEM_P (orig_x
))
12558 x
= replace_equiv_address_nv (orig_x
, x
);
12562 if (GET_CODE (x
) == CONST
12563 && GET_CODE (XEXP (x
, 0)) == UNSPEC
12564 && (XINT (XEXP (x
, 0), 1) == UNSPEC_GOTPCREL
12565 || XINT (XEXP (x
, 0), 1) == UNSPEC_PCREL
)
12566 && (MEM_P (orig_x
) || XINT (XEXP (x
, 0), 1) == UNSPEC_PCREL
))
12568 x
= XVECEXP (XEXP (x
, 0), 0, 0);
12569 if (GET_MODE (orig_x
) != GET_MODE (x
) && MEM_P (orig_x
))
12571 x
= lowpart_subreg (GET_MODE (orig_x
), x
, GET_MODE (x
));
12578 if (ix86_cmodel
!= CM_MEDIUM_PIC
&& ix86_cmodel
!= CM_LARGE_PIC
)
12579 return ix86_delegitimize_tls_address (orig_x
);
12581 /* Fall thru into the code shared with -m32 for -mcmodel=large -fpic
12582 and -mcmodel=medium -fpic. */
12585 if (GET_CODE (x
) != PLUS
12586 || GET_CODE (XEXP (x
, 1)) != CONST
)
12587 return ix86_delegitimize_tls_address (orig_x
);
12589 if (ix86_pic_register_p (XEXP (x
, 0)))
12590 /* %ebx + GOT/GOTOFF */
12592 else if (GET_CODE (XEXP (x
, 0)) == PLUS
)
12594 /* %ebx + %reg * scale + GOT/GOTOFF */
12595 reg_addend
= XEXP (x
, 0);
12596 if (ix86_pic_register_p (XEXP (reg_addend
, 0)))
12597 reg_addend
= XEXP (reg_addend
, 1);
12598 else if (ix86_pic_register_p (XEXP (reg_addend
, 1)))
12599 reg_addend
= XEXP (reg_addend
, 0);
12602 reg_addend
= NULL_RTX
;
12603 addend
= XEXP (x
, 0);
12607 addend
= XEXP (x
, 0);
12609 x
= XEXP (XEXP (x
, 1), 0);
12610 if (GET_CODE (x
) == PLUS
12611 && CONST_INT_P (XEXP (x
, 1)))
12613 const_addend
= XEXP (x
, 1);
12617 if (GET_CODE (x
) == UNSPEC
12618 && ((XINT (x
, 1) == UNSPEC_GOT
&& MEM_P (orig_x
) && !addend
)
12619 || (XINT (x
, 1) == UNSPEC_GOTOFF
&& !MEM_P (orig_x
))
12620 || (XINT (x
, 1) == UNSPEC_PLTOFF
&& ix86_cmodel
== CM_LARGE_PIC
12621 && !MEM_P (orig_x
) && !addend
)))
12622 result
= XVECEXP (x
, 0, 0);
12624 if (!TARGET_64BIT
&& TARGET_MACHO
&& darwin_local_data_pic (x
)
12625 && !MEM_P (orig_x
))
12626 result
= XVECEXP (x
, 0, 0);
12629 return ix86_delegitimize_tls_address (orig_x
);
12631 /* For (PLUS something CONST_INT) both find_base_{value,term} just
12632 recurse on the first operand. */
12633 if (const_addend
&& !base_term_p
)
12634 result
= gen_rtx_CONST (Pmode
, gen_rtx_PLUS (Pmode
, result
, const_addend
));
12636 result
= gen_rtx_PLUS (Pmode
, reg_addend
, result
);
12639 /* If the rest of original X doesn't involve the PIC register, add
12640 addend and subtract pic_offset_table_rtx. This can happen e.g.
12642 leal (%ebx, %ecx, 4), %ecx
12644 movl foo@GOTOFF(%ecx), %edx
12645 in which case we return (%ecx - %ebx) + foo
12646 or (%ecx - _GLOBAL_OFFSET_TABLE_) + foo if pseudo_pic_reg
12647 and reload has completed. Don't do the latter for debug,
12648 as _GLOBAL_OFFSET_TABLE_ can't be expressed in the assembly. */
12649 if (pic_offset_table_rtx
12650 && (!reload_completed
|| !ix86_use_pseudo_pic_reg ()))
12651 result
= gen_rtx_PLUS (Pmode
, gen_rtx_MINUS (Pmode
, copy_rtx (addend
),
12652 pic_offset_table_rtx
),
12654 else if (base_term_p
12655 && pic_offset_table_rtx
12657 && !TARGET_VXWORKS_RTP
)
12659 rtx tmp
= gen_rtx_SYMBOL_REF (Pmode
, GOT_SYMBOL_NAME
);
12660 tmp
= gen_rtx_MINUS (Pmode
, copy_rtx (addend
), tmp
);
12661 result
= gen_rtx_PLUS (Pmode
, tmp
, result
);
12666 if (GET_MODE (orig_x
) != Pmode
&& MEM_P (orig_x
))
12668 result
= lowpart_subreg (GET_MODE (orig_x
), result
, Pmode
);
12669 if (result
== NULL_RTX
)
12675 /* The normal instantiation of the above template. */
12678 ix86_delegitimize_address (rtx x
)
12680 return ix86_delegitimize_address_1 (x
, false);
12683 /* If X is a machine specific address (i.e. a symbol or label being
12684 referenced as a displacement from the GOT implemented using an
12685 UNSPEC), then return the base term. Otherwise return X. */
12688 ix86_find_base_term (rtx x
)
12694 if (GET_CODE (x
) != CONST
)
12696 term
= XEXP (x
, 0);
12697 if (GET_CODE (term
) == PLUS
12698 && CONST_INT_P (XEXP (term
, 1)))
12699 term
= XEXP (term
, 0);
12700 if (GET_CODE (term
) != UNSPEC
12701 || (XINT (term
, 1) != UNSPEC_GOTPCREL
12702 && XINT (term
, 1) != UNSPEC_PCREL
))
12705 return XVECEXP (term
, 0, 0);
12708 return ix86_delegitimize_address_1 (x
, true);
12711 /* Return true if X shouldn't be emitted into the debug info.
12712 Disallow UNSPECs other than @gotoff - we can't emit _GLOBAL_OFFSET_TABLE_
12713 symbol easily into the .debug_info section, so we need not to
12714 delegitimize, but instead assemble as @gotoff.
12715 Disallow _GLOBAL_OFFSET_TABLE_ SYMBOL_REF - the assembler magically
12716 assembles that as _GLOBAL_OFFSET_TABLE_-. expression. */
12719 ix86_const_not_ok_for_debug_p (rtx x
)
12721 if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) != UNSPEC_GOTOFF
)
12724 if (SYMBOL_REF_P (x
) && strcmp (XSTR (x
, 0), GOT_SYMBOL_NAME
) == 0)
12731 put_condition_code (enum rtx_code code
, machine_mode mode
, bool reverse
,
12732 bool fp
, FILE *file
)
12734 const char *suffix
;
12736 if (mode
== CCFPmode
)
12738 code
= ix86_fp_compare_code_to_integer (code
);
12742 code
= reverse_condition (code
);
12747 gcc_assert (mode
!= CCGZmode
);
12771 gcc_assert (mode
!= CCGZmode
);
12795 gcc_assert (mode
== CCmode
|| mode
== CCNOmode
|| mode
== CCGCmode
);
12799 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
12800 Those same assemblers have the same but opposite lossage on cmov. */
12801 if (mode
== CCmode
)
12802 suffix
= fp
? "nbe" : "a";
12804 gcc_unreachable ();
12821 gcc_unreachable ();
12825 if (mode
== CCmode
|| mode
== CCGZmode
)
12827 else if (mode
== CCCmode
)
12828 suffix
= fp
? "b" : "c";
12830 gcc_unreachable ();
12847 gcc_unreachable ();
12851 if (mode
== CCmode
|| mode
== CCGZmode
)
12853 else if (mode
== CCCmode
)
12854 suffix
= fp
? "nb" : "nc";
12856 gcc_unreachable ();
12859 gcc_assert (mode
== CCmode
|| mode
== CCGCmode
|| mode
== CCNOmode
);
12863 if (mode
== CCmode
)
12866 gcc_unreachable ();
12869 suffix
= fp
? "u" : "p";
12872 suffix
= fp
? "nu" : "np";
12875 gcc_unreachable ();
12877 fputs (suffix
, file
);
12880 /* Print the name of register X to FILE based on its machine mode and number.
12881 If CODE is 'w', pretend the mode is HImode.
12882 If CODE is 'b', pretend the mode is QImode.
12883 If CODE is 'k', pretend the mode is SImode.
12884 If CODE is 'q', pretend the mode is DImode.
12885 If CODE is 'x', pretend the mode is V4SFmode.
12886 If CODE is 't', pretend the mode is V8SFmode.
12887 If CODE is 'g', pretend the mode is V16SFmode.
12888 If CODE is 'h', pretend the reg is the 'high' byte register.
12889 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op.
12890 If CODE is 'd', duplicate the operand for AVX instruction.
12891 If CODE is 'V', print naked full integer register name without %.
12895 print_reg (rtx x
, int code
, FILE *file
)
12899 unsigned int regno
;
12902 if (ASSEMBLER_DIALECT
== ASM_ATT
&& code
!= 'V')
12907 gcc_assert (TARGET_64BIT
);
12908 fputs ("rip", file
);
12912 if (code
== 'y' && STACK_TOP_P (x
))
12914 fputs ("st(0)", file
);
12920 else if (code
== 'b')
12922 else if (code
== 'k')
12924 else if (code
== 'q')
12926 else if (code
== 'h')
12928 else if (code
== 'x')
12930 else if (code
== 't')
12932 else if (code
== 'g')
12935 msize
= GET_MODE_SIZE (GET_MODE (x
));
12939 if (regno
== ARG_POINTER_REGNUM
12940 || regno
== FRAME_POINTER_REGNUM
12941 || regno
== FPSR_REG
)
12943 output_operand_lossage
12944 ("invalid use of register '%s'", reg_names
[regno
]);
12947 else if (regno
== FLAGS_REG
)
12949 output_operand_lossage ("invalid use of asm flag output");
12955 if (GENERAL_REGNO_P (regno
))
12956 msize
= GET_MODE_SIZE (word_mode
);
12958 error ("%<V%> modifier on non-integer register");
12961 duplicated
= code
== 'd' && TARGET_AVX
;
12968 if (GENERAL_REGNO_P (regno
) && msize
> GET_MODE_SIZE (word_mode
))
12969 warning (0, "unsupported size for integer register");
12972 if (LEGACY_INT_REGNO_P (regno
))
12973 putc (msize
> 4 && TARGET_64BIT
? 'r' : 'e', file
);
12977 reg
= hi_reg_name
[regno
];
12980 if (regno
>= ARRAY_SIZE (qi_reg_name
))
12982 if (!ANY_QI_REGNO_P (regno
))
12983 error ("unsupported size for integer register");
12984 reg
= qi_reg_name
[regno
];
12987 if (regno
>= ARRAY_SIZE (qi_high_reg_name
))
12989 reg
= qi_high_reg_name
[regno
];
12993 if (SSE_REGNO_P (regno
))
12995 gcc_assert (!duplicated
);
12996 putc (msize
== 32 ? 'y' : 'z', file
);
12997 reg
= hi_reg_name
[regno
] + 1;
13002 gcc_unreachable ();
13007 /* Irritatingly, AMD extended registers use
13008 different naming convention: "r%d[bwd]" */
13009 if (REX_INT_REGNO_P (regno
))
13011 gcc_assert (TARGET_64BIT
);
13015 error ("extended registers have no high halves");
13030 error ("unsupported operand size for extended register");
13038 if (ASSEMBLER_DIALECT
== ASM_ATT
)
13039 fprintf (file
, ", %%%s", reg
);
13041 fprintf (file
, ", %s", reg
);
13045 /* Meaning of CODE:
13046 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
13047 C -- print opcode suffix for set/cmov insn.
13048 c -- like C, but print reversed condition
13049 F,f -- likewise, but for floating-point.
13050 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
13052 R -- print embedded rounding and sae.
13053 r -- print only sae.
13054 z -- print the opcode suffix for the size of the current operand.
13055 Z -- likewise, with special suffixes for x87 instructions.
13056 * -- print a star (in certain assembler syntax)
13057 A -- print an absolute memory reference.
13058 E -- print address with DImode register names if TARGET_64BIT.
13059 w -- print the operand as if it's a "word" (HImode) even if it isn't.
13060 s -- print a shift double count, followed by the assemblers argument
13062 b -- print the QImode name of the register for the indicated operand.
13063 %b0 would print %al if operands[0] is reg 0.
13064 w -- likewise, print the HImode name of the register.
13065 k -- likewise, print the SImode name of the register.
13066 q -- likewise, print the DImode name of the register.
13067 x -- likewise, print the V4SFmode name of the register.
13068 t -- likewise, print the V8SFmode name of the register.
13069 g -- likewise, print the V16SFmode name of the register.
13070 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
13071 y -- print "st(0)" instead of "st" as a register.
13072 d -- print duplicated register operand for AVX instruction.
13073 D -- print condition for SSE cmp instruction.
13074 P -- if PIC, print an @PLT suffix. For -fno-plt, load function
13076 p -- print raw symbol name.
13077 X -- don't print any sort of PIC '@' suffix for a symbol.
13078 & -- print some in-use local-dynamic symbol name.
13079 H -- print a memory address offset by 8; used for sse high-parts
13080 Y -- print condition for XOP pcom* instruction.
13081 V -- print naked full integer register name without %.
13082 + -- print a branch hint as 'cs' or 'ds' prefix
13083 ; -- print a semicolon (after prefixes due to bug in older gas).
13084 ~ -- print "i" if TARGET_AVX2, "f" otherwise.
13085 ^ -- print addr32 prefix if TARGET_64BIT and Pmode != word_mode
13086 M -- print addr32 prefix for TARGET_X32 with VSIB address.
13087 ! -- print NOTRACK prefix for jxx/call/ret instructions if required.
13088 N -- print maskz if it's constant 0 operand.
13092 ix86_print_operand (FILE *file
, rtx x
, int code
)
13099 switch (ASSEMBLER_DIALECT
)
13106 /* Intel syntax. For absolute addresses, registers should not
13107 be surrounded by braces. */
13111 ix86_print_operand (file
, x
, 0);
13118 gcc_unreachable ();
13121 ix86_print_operand (file
, x
, 0);
13125 /* Wrap address in an UNSPEC to declare special handling. */
13127 x
= gen_rtx_UNSPEC (DImode
, gen_rtvec (1, x
), UNSPEC_LEA_ADDR
);
13129 output_address (VOIDmode
, x
);
13133 if (ASSEMBLER_DIALECT
== ASM_ATT
)
13138 if (ASSEMBLER_DIALECT
== ASM_ATT
)
13143 if (ASSEMBLER_DIALECT
== ASM_ATT
)
13148 if (ASSEMBLER_DIALECT
== ASM_ATT
)
13153 if (ASSEMBLER_DIALECT
== ASM_ATT
)
13158 if (ASSEMBLER_DIALECT
== ASM_ATT
)
13163 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
13164 if (ASSEMBLER_DIALECT
!= ASM_ATT
)
13167 switch (GET_MODE_SIZE (GET_MODE (x
)))
13182 output_operand_lossage ("invalid operand size for operand "
13192 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
)
13194 /* Opcodes don't get size suffixes if using Intel opcodes. */
13195 if (ASSEMBLER_DIALECT
== ASM_INTEL
)
13198 switch (GET_MODE_SIZE (GET_MODE (x
)))
13217 output_operand_lossage ("invalid operand size for operand "
13223 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
13225 if (this_is_asm_operands
)
13226 warning_for_asm (this_is_asm_operands
,
13227 "non-integer operand used with operand code %<z%>");
13229 warning (0, "non-integer operand used with operand code %<z%>");
13234 /* 387 opcodes don't get size suffixes if using Intel opcodes. */
13235 if (ASSEMBLER_DIALECT
== ASM_INTEL
)
13238 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
)
13240 switch (GET_MODE_SIZE (GET_MODE (x
)))
13243 #ifdef HAVE_AS_IX86_FILDS
13253 #ifdef HAVE_AS_IX86_FILDQ
13256 fputs ("ll", file
);
13264 else if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
13266 /* 387 opcodes don't get size suffixes
13267 if the operands are registers. */
13268 if (STACK_REG_P (x
))
13271 switch (GET_MODE_SIZE (GET_MODE (x
)))
13292 output_operand_lossage ("invalid operand type used with "
13293 "operand code '%c'", code
);
13297 output_operand_lossage ("invalid operand size for operand code '%c'",
13318 if (CONST_INT_P (x
) || ! SHIFT_DOUBLE_OMITS_COUNT
)
13320 ix86_print_operand (file
, x
, 0);
13321 fputs (", ", file
);
13326 switch (GET_CODE (x
))
13329 fputs ("neq", file
);
13332 fputs ("eq", file
);
13336 fputs (INTEGRAL_MODE_P (GET_MODE (x
)) ? "ge" : "unlt", file
);
13340 fputs (INTEGRAL_MODE_P (GET_MODE (x
)) ? "gt" : "unle", file
);
13344 fputs ("le", file
);
13348 fputs ("lt", file
);
13351 fputs ("unord", file
);
13354 fputs ("ord", file
);
13357 fputs ("ueq", file
);
13360 fputs ("nlt", file
);
13363 fputs ("nle", file
);
13366 fputs ("ule", file
);
13369 fputs ("ult", file
);
13372 fputs ("une", file
);
13375 output_operand_lossage ("operand is not a condition code, "
13376 "invalid operand code 'Y'");
13382 /* Little bit of braindamage here. The SSE compare instructions
13383 does use completely different names for the comparisons that the
13384 fp conditional moves. */
13385 switch (GET_CODE (x
))
13390 fputs ("eq_us", file
);
13395 fputs ("eq", file
);
13400 fputs ("nge", file
);
13405 fputs ("lt", file
);
13410 fputs ("ngt", file
);
13415 fputs ("le", file
);
13418 fputs ("unord", file
);
13423 fputs ("neq_oq", file
);
13428 fputs ("neq", file
);
13433 fputs ("ge", file
);
13438 fputs ("nlt", file
);
13443 fputs ("gt", file
);
13448 fputs ("nle", file
);
13451 fputs ("ord", file
);
13454 output_operand_lossage ("operand is not a condition code, "
13455 "invalid operand code 'D'");
13462 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
13463 if (ASSEMBLER_DIALECT
== ASM_ATT
)
13465 gcc_fallthrough ();
13470 if (!COMPARISON_P (x
))
13472 output_operand_lossage ("operand is not a condition code, "
13473 "invalid operand code '%c'", code
);
13476 put_condition_code (GET_CODE (x
), GET_MODE (XEXP (x
, 0)),
13477 code
== 'c' || code
== 'f',
13478 code
== 'F' || code
== 'f',
13483 if (!offsettable_memref_p (x
))
13485 output_operand_lossage ("operand is not an offsettable memory "
13486 "reference, invalid operand code 'H'");
13489 /* It doesn't actually matter what mode we use here, as we're
13490 only going to use this for printing. */
13491 x
= adjust_address_nv (x
, DImode
, 8);
13492 /* Output 'qword ptr' for intel assembler dialect. */
13493 if (ASSEMBLER_DIALECT
== ASM_INTEL
)
13498 if (!CONST_INT_P (x
))
13500 output_operand_lossage ("operand is not an integer, invalid "
13501 "operand code 'K'");
13505 if (INTVAL (x
) & IX86_HLE_ACQUIRE
)
13506 #ifdef HAVE_AS_IX86_HLE
13507 fputs ("xacquire ", file
);
13509 fputs ("\n" ASM_BYTE
"0xf2\n\t", file
);
13511 else if (INTVAL (x
) & IX86_HLE_RELEASE
)
13512 #ifdef HAVE_AS_IX86_HLE
13513 fputs ("xrelease ", file
);
13515 fputs ("\n" ASM_BYTE
"0xf3\n\t", file
);
13517 /* We do not want to print value of the operand. */
13521 if (x
== const0_rtx
|| x
== CONST0_RTX (GET_MODE (x
)))
13522 fputs ("{z}", file
);
13526 if (!CONST_INT_P (x
) || INTVAL (x
) != ROUND_SAE
)
13528 output_operand_lossage ("operand is not a specific integer, "
13529 "invalid operand code 'r'");
13533 if (ASSEMBLER_DIALECT
== ASM_INTEL
)
13534 fputs (", ", file
);
13536 fputs ("{sae}", file
);
13538 if (ASSEMBLER_DIALECT
== ASM_ATT
)
13539 fputs (", ", file
);
13544 if (!CONST_INT_P (x
))
13546 output_operand_lossage ("operand is not an integer, invalid "
13547 "operand code 'R'");
13551 if (ASSEMBLER_DIALECT
== ASM_INTEL
)
13552 fputs (", ", file
);
13554 switch (INTVAL (x
))
13556 case ROUND_NEAREST_INT
| ROUND_SAE
:
13557 fputs ("{rn-sae}", file
);
13559 case ROUND_NEG_INF
| ROUND_SAE
:
13560 fputs ("{rd-sae}", file
);
13562 case ROUND_POS_INF
| ROUND_SAE
:
13563 fputs ("{ru-sae}", file
);
13565 case ROUND_ZERO
| ROUND_SAE
:
13566 fputs ("{rz-sae}", file
);
13569 output_operand_lossage ("operand is not a specific integer, "
13570 "invalid operand code 'R'");
13573 if (ASSEMBLER_DIALECT
== ASM_ATT
)
13574 fputs (", ", file
);
13579 if (ASSEMBLER_DIALECT
== ASM_ATT
)
13585 const char *name
= get_some_local_dynamic_name ();
13587 output_operand_lossage ("'%%&' used without any "
13588 "local dynamic TLS references");
13590 assemble_name (file
, name
);
13599 || optimize_function_for_size_p (cfun
)
13600 || !TARGET_BRANCH_PREDICTION_HINTS
)
13603 x
= find_reg_note (current_output_insn
, REG_BR_PROB
, 0);
13606 int pred_val
= profile_probability::from_reg_br_prob_note
13607 (XINT (x
, 0)).to_reg_br_prob_base ();
13609 if (pred_val
< REG_BR_PROB_BASE
* 45 / 100
13610 || pred_val
> REG_BR_PROB_BASE
* 55 / 100)
13612 bool taken
= pred_val
> REG_BR_PROB_BASE
/ 2;
13614 = final_forward_branch_p (current_output_insn
) == 0;
13616 /* Emit hints only in the case default branch prediction
13617 heuristics would fail. */
13618 if (taken
!= cputaken
)
13620 /* We use 3e (DS) prefix for taken branches and
13621 2e (CS) prefix for not taken branches. */
13623 fputs ("ds ; ", file
);
13625 fputs ("cs ; ", file
);
13633 #ifndef HAVE_AS_IX86_REP_LOCK_PREFIX
13639 putc (TARGET_AVX2
? 'i' : 'f', file
);
13645 /* NB: 32-bit indices in VSIB address are sign-extended
13646 to 64 bits. In x32, if 32-bit address 0xf7fa3010 is
13647 sign-extended to 0xfffffffff7fa3010 which is invalid
13648 address. Add addr32 prefix if there is no base
13649 register nor symbol. */
13651 struct ix86_address parts
;
13652 ok
= ix86_decompose_address (x
, &parts
);
13653 gcc_assert (ok
&& parts
.index
== NULL_RTX
);
13654 if (parts
.base
== NULL_RTX
13655 && (parts
.disp
== NULL_RTX
13656 || !symbolic_operand (parts
.disp
,
13657 GET_MODE (parts
.disp
))))
13658 fputs ("addr32 ", file
);
13663 if (TARGET_64BIT
&& Pmode
!= word_mode
)
13664 fputs ("addr32 ", file
);
13668 if (ix86_notrack_prefixed_insn_p (current_output_insn
))
13669 fputs ("notrack ", file
);
13673 output_operand_lossage ("invalid operand code '%c'", code
);
13678 print_reg (x
, code
, file
);
13680 else if (MEM_P (x
))
13682 rtx addr
= XEXP (x
, 0);
13684 /* No `byte ptr' prefix for call instructions ... */
13685 if (ASSEMBLER_DIALECT
== ASM_INTEL
&& code
!= 'X' && code
!= 'P')
13687 machine_mode mode
= GET_MODE (x
);
13690 /* Check for explicit size override codes. */
13693 else if (code
== 'w')
13695 else if (code
== 'k')
13697 else if (code
== 'q')
13699 else if (code
== 'x')
13701 else if (code
== 't')
13703 else if (code
== 'g')
13705 else if (mode
== BLKmode
)
13706 /* ... or BLKmode operands, when not overridden. */
13709 switch (GET_MODE_SIZE (mode
))
13711 case 1: size
= "BYTE"; break;
13712 case 2: size
= "WORD"; break;
13713 case 4: size
= "DWORD"; break;
13714 case 8: size
= "QWORD"; break;
13715 case 12: size
= "TBYTE"; break;
13717 if (mode
== XFmode
)
13722 case 32: size
= "YMMWORD"; break;
13723 case 64: size
= "ZMMWORD"; break;
13725 gcc_unreachable ();
13729 fputs (size
, file
);
13730 fputs (" PTR ", file
);
13734 if (this_is_asm_operands
&& ! address_operand (addr
, VOIDmode
))
13735 output_operand_lossage ("invalid constraints for operand");
13737 ix86_print_operand_address_as
13738 (file
, addr
, MEM_ADDR_SPACE (x
), code
== 'p' || code
== 'P');
13741 else if (CONST_DOUBLE_P (x
) && GET_MODE (x
) == HFmode
)
13743 long l
= real_to_target (NULL
, CONST_DOUBLE_REAL_VALUE (x
),
13744 REAL_MODE_FORMAT (HFmode
));
13745 if (ASSEMBLER_DIALECT
== ASM_ATT
)
13747 fprintf (file
, "0x%04x", (unsigned int) l
);
13750 else if (CONST_DOUBLE_P (x
) && GET_MODE (x
) == SFmode
)
13754 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x
), l
);
13756 if (ASSEMBLER_DIALECT
== ASM_ATT
)
13758 /* Sign extend 32bit SFmode immediate to 8 bytes. */
13760 fprintf (file
, "0x%08" HOST_LONG_LONG_FORMAT
"x",
13761 (unsigned long long) (int) l
);
13763 fprintf (file
, "0x%08x", (unsigned int) l
);
13766 else if (CONST_DOUBLE_P (x
) && GET_MODE (x
) == DFmode
)
13770 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x
), l
);
13772 if (ASSEMBLER_DIALECT
== ASM_ATT
)
13774 fprintf (file
, "0x%lx%08lx", l
[1] & 0xffffffff, l
[0] & 0xffffffff);
13777 /* These float cases don't actually occur as immediate operands. */
13778 else if (CONST_DOUBLE_P (x
) && GET_MODE (x
) == XFmode
)
13782 real_to_decimal (dstr
, CONST_DOUBLE_REAL_VALUE (x
), sizeof (dstr
), 0, 1);
13783 fputs (dstr
, file
);
13786 /* Print bcst_mem_operand. */
13787 else if (GET_CODE (x
) == VEC_DUPLICATE
)
13789 machine_mode vmode
= GET_MODE (x
);
13790 /* Must be bcst_memory_operand. */
13791 gcc_assert (bcst_mem_operand (x
, vmode
));
13793 rtx mem
= XEXP (x
,0);
13794 ix86_print_operand (file
, mem
, 0);
13800 fputs ("{1to2}", file
);
13806 fputs ("{1to4}", file
);
13813 fputs ("{1to8}", file
);
13818 fputs ("{1to16}", file
);
13821 fputs ("{1to32}", file
);
13824 gcc_unreachable ();
13830 /* We have patterns that allow zero sets of memory, for instance.
13831 In 64-bit mode, we should probably support all 8-byte vectors,
13832 since we can in fact encode that into an immediate. */
13833 if (GET_CODE (x
) == CONST_VECTOR
)
13835 if (x
!= CONST0_RTX (GET_MODE (x
)))
13836 output_operand_lossage ("invalid vector immediate");
13842 if (ix86_force_load_from_GOT_p (x
, true))
13844 /* For inline assembly statement, load function address
13845 from GOT with 'P' operand modifier to avoid PLT. */
13846 x
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
),
13850 x
= gen_rtx_CONST (Pmode
, x
);
13851 x
= gen_const_mem (Pmode
, x
);
13852 ix86_print_operand (file
, x
, 'A');
13856 else if (code
!= 'p')
13858 if (CONST_INT_P (x
))
13860 if (ASSEMBLER_DIALECT
== ASM_ATT
)
13863 else if (GET_CODE (x
) == CONST
|| GET_CODE (x
) == SYMBOL_REF
13864 || GET_CODE (x
) == LABEL_REF
)
13866 if (ASSEMBLER_DIALECT
== ASM_ATT
)
13869 fputs ("OFFSET FLAT:", file
);
13872 if (CONST_INT_P (x
))
13873 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
));
13874 else if (flag_pic
|| MACHOPIC_INDIRECT
)
13875 output_pic_addr_const (file
, x
, code
);
13877 output_addr_const (file
, x
);
13882 ix86_print_operand_punct_valid_p (unsigned char code
)
13884 return (code
== '*' || code
== '+' || code
== '&' || code
== ';'
13885 || code
== '~' || code
== '^' || code
== '!');
13888 /* Print a memory operand whose address is ADDR. */
13891 ix86_print_operand_address_as (FILE *file
, rtx addr
,
13892 addr_space_t as
, bool raw
)
13894 struct ix86_address parts
;
13895 rtx base
, index
, disp
;
13901 if (GET_CODE (addr
) == UNSPEC
&& XINT (addr
, 1) == UNSPEC_VSIBADDR
)
13903 ok
= ix86_decompose_address (XVECEXP (addr
, 0, 0), &parts
);
13904 gcc_assert (parts
.index
== NULL_RTX
);
13905 parts
.index
= XVECEXP (addr
, 0, 1);
13906 parts
.scale
= INTVAL (XVECEXP (addr
, 0, 2));
13907 addr
= XVECEXP (addr
, 0, 0);
13910 else if (GET_CODE (addr
) == UNSPEC
&& XINT (addr
, 1) == UNSPEC_LEA_ADDR
)
13912 gcc_assert (TARGET_64BIT
);
13913 ok
= ix86_decompose_address (XVECEXP (addr
, 0, 0), &parts
);
13917 ok
= ix86_decompose_address (addr
, &parts
);
13922 index
= parts
.index
;
13924 scale
= parts
.scale
;
13926 if (ADDR_SPACE_GENERIC_P (as
))
13929 gcc_assert (ADDR_SPACE_GENERIC_P (parts
.seg
));
13931 if (!ADDR_SPACE_GENERIC_P (as
) && !raw
)
13933 if (ASSEMBLER_DIALECT
== ASM_ATT
)
13938 case ADDR_SPACE_SEG_FS
:
13939 fputs ("fs:", file
);
13941 case ADDR_SPACE_SEG_GS
:
13942 fputs ("gs:", file
);
13945 gcc_unreachable ();
13949 /* Use one byte shorter RIP relative addressing for 64bit mode. */
13950 if (TARGET_64BIT
&& !base
&& !index
&& !raw
)
13954 if (GET_CODE (disp
) == CONST
13955 && GET_CODE (XEXP (disp
, 0)) == PLUS
13956 && CONST_INT_P (XEXP (XEXP (disp
, 0), 1)))
13957 symbol
= XEXP (XEXP (disp
, 0), 0);
13959 if (GET_CODE (symbol
) == LABEL_REF
13960 || (GET_CODE (symbol
) == SYMBOL_REF
13961 && SYMBOL_REF_TLS_MODEL (symbol
) == 0))
13965 if (!base
&& !index
)
13967 /* Displacement only requires special attention. */
13968 if (CONST_INT_P (disp
))
13970 if (ASSEMBLER_DIALECT
== ASM_INTEL
&& ADDR_SPACE_GENERIC_P (as
))
13971 fputs ("ds:", file
);
13972 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (disp
));
13974 /* Load the external function address via the GOT slot to avoid PLT. */
13975 else if (GET_CODE (disp
) == CONST
13976 && GET_CODE (XEXP (disp
, 0)) == UNSPEC
13977 && (XINT (XEXP (disp
, 0), 1) == UNSPEC_GOTPCREL
13978 || XINT (XEXP (disp
, 0), 1) == UNSPEC_GOT
)
13979 && ix86_force_load_from_GOT_p (XVECEXP (XEXP (disp
, 0), 0, 0)))
13980 output_pic_addr_const (file
, disp
, 0);
13982 output_pic_addr_const (file
, disp
, 0);
13984 output_addr_const (file
, disp
);
13988 /* Print SImode register names to force addr32 prefix. */
13989 if (SImode_address_operand (addr
, VOIDmode
))
13993 gcc_assert (TARGET_64BIT
);
13994 switch (GET_CODE (addr
))
13997 gcc_assert (GET_MODE (addr
) == SImode
);
13998 gcc_assert (GET_MODE (SUBREG_REG (addr
)) == DImode
);
14002 gcc_assert (GET_MODE (addr
) == DImode
);
14005 gcc_unreachable ();
14008 gcc_assert (!code
);
14014 && CONST_INT_P (disp
)
14015 && INTVAL (disp
) < -16*1024*1024)
14017 /* X32 runs in 64-bit mode, where displacement, DISP, in
14018 address DISP(%r64), is encoded as 32-bit immediate sign-
14019 extended from 32-bit to 64-bit. For -0x40000300(%r64),
14020 address is %r64 + 0xffffffffbffffd00. When %r64 <
14021 0x40000300, like 0x37ffe064, address is 0xfffffffff7ffdd64,
14022 which is invalid for x32. The correct address is %r64
14023 - 0x40000300 == 0xf7ffdd64. To properly encode
14024 -0x40000300(%r64) for x32, we zero-extend negative
14025 displacement by forcing addr32 prefix which truncates
14026 0xfffffffff7ffdd64 to 0xf7ffdd64. In theory, we should
14027 zero-extend all negative displacements, including -1(%rsp).
14028 However, for small negative displacements, sign-extension
14029 won't cause overflow. We only zero-extend negative
14030 displacements if they < -16*1024*1024, which is also used
14031 to check legitimate address displacements for PIC. */
14035 /* Since the upper 32 bits of RSP are always zero for x32,
14036 we can encode %esp as %rsp to avoid 0x67 prefix if
14037 there is no index register. */
14038 if (TARGET_X32
&& Pmode
== SImode
14039 && !index
&& base
&& REG_P (base
) && REGNO (base
) == SP_REG
)
14042 if (ASSEMBLER_DIALECT
== ASM_ATT
)
14047 output_pic_addr_const (file
, disp
, 0);
14048 else if (GET_CODE (disp
) == LABEL_REF
)
14049 output_asm_label (disp
);
14051 output_addr_const (file
, disp
);
14056 print_reg (base
, code
, file
);
14060 print_reg (index
, vsib
? 0 : code
, file
);
14061 if (scale
!= 1 || vsib
)
14062 fprintf (file
, ",%d", scale
);
14068 rtx offset
= NULL_RTX
;
14072 /* Pull out the offset of a symbol; print any symbol itself. */
14073 if (GET_CODE (disp
) == CONST
14074 && GET_CODE (XEXP (disp
, 0)) == PLUS
14075 && CONST_INT_P (XEXP (XEXP (disp
, 0), 1)))
14077 offset
= XEXP (XEXP (disp
, 0), 1);
14078 disp
= gen_rtx_CONST (VOIDmode
,
14079 XEXP (XEXP (disp
, 0), 0));
14083 output_pic_addr_const (file
, disp
, 0);
14084 else if (GET_CODE (disp
) == LABEL_REF
)
14085 output_asm_label (disp
);
14086 else if (CONST_INT_P (disp
))
14089 output_addr_const (file
, disp
);
14095 print_reg (base
, code
, file
);
14098 if (INTVAL (offset
) >= 0)
14100 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (offset
));
14104 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (offset
));
14111 print_reg (index
, vsib
? 0 : code
, file
);
14112 if (scale
!= 1 || vsib
)
14113 fprintf (file
, "*%d", scale
);
14121 ix86_print_operand_address (FILE *file
, machine_mode
/*mode*/, rtx addr
)
14123 if (this_is_asm_operands
&& ! address_operand (addr
, VOIDmode
))
14124 output_operand_lossage ("invalid constraints for operand");
14126 ix86_print_operand_address_as (file
, addr
, ADDR_SPACE_GENERIC
, false);
14129 /* Implementation of TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
14132 i386_asm_output_addr_const_extra (FILE *file
, rtx x
)
14136 if (GET_CODE (x
) != UNSPEC
)
14139 op
= XVECEXP (x
, 0, 0);
14140 switch (XINT (x
, 1))
14142 case UNSPEC_GOTOFF
:
14143 output_addr_const (file
, op
);
14144 fputs ("@gotoff", file
);
14146 case UNSPEC_GOTTPOFF
:
14147 output_addr_const (file
, op
);
14148 /* FIXME: This might be @TPOFF in Sun ld. */
14149 fputs ("@gottpoff", file
);
14152 output_addr_const (file
, op
);
14153 fputs ("@tpoff", file
);
14155 case UNSPEC_NTPOFF
:
14156 output_addr_const (file
, op
);
14158 fputs ("@tpoff", file
);
14160 fputs ("@ntpoff", file
);
14162 case UNSPEC_DTPOFF
:
14163 output_addr_const (file
, op
);
14164 fputs ("@dtpoff", file
);
14166 case UNSPEC_GOTNTPOFF
:
14167 output_addr_const (file
, op
);
14169 fputs (ASSEMBLER_DIALECT
== ASM_ATT
?
14170 "@gottpoff(%rip)" : "@gottpoff[rip]", file
);
14172 fputs ("@gotntpoff", file
);
14174 case UNSPEC_INDNTPOFF
:
14175 output_addr_const (file
, op
);
14176 fputs ("@indntpoff", file
);
14179 case UNSPEC_MACHOPIC_OFFSET
:
14180 output_addr_const (file
, op
);
14182 machopic_output_function_base_name (file
);
14194 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
14195 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
14196 is the expression of the binary operation. The output may either be
14197 emitted here, or returned to the caller, like all output_* functions.
14199 There is no guarantee that the operands are the same mode, as they
14200 might be within FLOAT or FLOAT_EXTEND expressions. */
14202 #ifndef SYSV386_COMPAT
14203 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
14204 wants to fix the assemblers because that causes incompatibility
14205 with gcc. No-one wants to fix gcc because that causes
14206 incompatibility with assemblers... You can use the option of
14207 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
14208 #define SYSV386_COMPAT 1
14212 output_387_binary_op (rtx_insn
*insn
, rtx
*operands
)
14214 static char buf
[40];
14217 = (SSE_REG_P (operands
[0])
14218 || SSE_REG_P (operands
[1]) || SSE_REG_P (operands
[2]));
14222 else if (GET_MODE_CLASS (GET_MODE (operands
[1])) == MODE_INT
14223 || GET_MODE_CLASS (GET_MODE (operands
[2])) == MODE_INT
)
14230 switch (GET_CODE (operands
[3]))
14241 gcc_unreachable ();
14248 p
= GET_MODE (operands
[0]) == SFmode
? "ss" : "sd";
14252 p
= "\t{%2, %1, %0|%0, %1, %2}";
14254 p
= "\t{%2, %0|%0, %2}";
14260 /* Even if we do not want to check the inputs, this documents input
14261 constraints. Which helps in understanding the following code. */
14264 if (STACK_REG_P (operands
[0])
14265 && ((REG_P (operands
[1])
14266 && REGNO (operands
[0]) == REGNO (operands
[1])
14267 && (STACK_REG_P (operands
[2]) || MEM_P (operands
[2])))
14268 || (REG_P (operands
[2])
14269 && REGNO (operands
[0]) == REGNO (operands
[2])
14270 && (STACK_REG_P (operands
[1]) || MEM_P (operands
[1]))))
14271 && (STACK_TOP_P (operands
[1]) || STACK_TOP_P (operands
[2])))
14274 gcc_unreachable ();
14277 switch (GET_CODE (operands
[3]))
14281 if (REG_P (operands
[2]) && REGNO (operands
[0]) == REGNO (operands
[2]))
14282 std::swap (operands
[1], operands
[2]);
14284 /* know operands[0] == operands[1]. */
14286 if (MEM_P (operands
[2]))
14292 if (find_regno_note (insn
, REG_DEAD
, REGNO (operands
[2])))
14294 if (STACK_TOP_P (operands
[0]))
14295 /* How is it that we are storing to a dead operand[2]?
14296 Well, presumably operands[1] is dead too. We can't
14297 store the result to st(0) as st(0) gets popped on this
14298 instruction. Instead store to operands[2] (which I
14299 think has to be st(1)). st(1) will be popped later.
14300 gcc <= 2.8.1 didn't have this check and generated
14301 assembly code that the Unixware assembler rejected. */
14302 p
= "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
14304 p
= "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
14308 if (STACK_TOP_P (operands
[0]))
14309 p
= "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
14311 p
= "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
14316 if (MEM_P (operands
[1]))
14322 if (MEM_P (operands
[2]))
14328 if (find_regno_note (insn
, REG_DEAD
, REGNO (operands
[2])))
14331 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
14332 derived assemblers, confusingly reverse the direction of
14333 the operation for fsub{r} and fdiv{r} when the
14334 destination register is not st(0). The Intel assembler
14335 doesn't have this brain damage. Read !SYSV386_COMPAT to
14336 figure out what the hardware really does. */
14337 if (STACK_TOP_P (operands
[0]))
14338 p
= "{p\t%0, %2|rp\t%2, %0}";
14340 p
= "{rp\t%2, %0|p\t%0, %2}";
14342 if (STACK_TOP_P (operands
[0]))
14343 /* As above for fmul/fadd, we can't store to st(0). */
14344 p
= "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
14346 p
= "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
14351 if (find_regno_note (insn
, REG_DEAD
, REGNO (operands
[1])))
14354 if (STACK_TOP_P (operands
[0]))
14355 p
= "{rp\t%0, %1|p\t%1, %0}";
14357 p
= "{p\t%1, %0|rp\t%0, %1}";
14359 if (STACK_TOP_P (operands
[0]))
14360 p
= "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
14362 p
= "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
14367 if (STACK_TOP_P (operands
[0]))
14369 if (STACK_TOP_P (operands
[1]))
14370 p
= "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
14372 p
= "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
14375 else if (STACK_TOP_P (operands
[1]))
14378 p
= "{\t%1, %0|r\t%0, %1}";
14380 p
= "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
14386 p
= "{r\t%2, %0|\t%0, %2}";
14388 p
= "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
14394 gcc_unreachable ();
14401 /* Return needed mode for entity in optimize_mode_switching pass. */
14404 ix86_dirflag_mode_needed (rtx_insn
*insn
)
14408 if (cfun
->machine
->func_type
== TYPE_NORMAL
)
14409 return X86_DIRFLAG_ANY
;
14411 /* No need to emit CLD in interrupt handler for TARGET_CLD. */
14412 return TARGET_CLD
? X86_DIRFLAG_ANY
: X86_DIRFLAG_RESET
;
14415 if (recog_memoized (insn
) < 0)
14416 return X86_DIRFLAG_ANY
;
14418 if (get_attr_type (insn
) == TYPE_STR
)
14420 /* Emit cld instruction if stringops are used in the function. */
14421 if (cfun
->machine
->func_type
== TYPE_NORMAL
)
14422 return TARGET_CLD
? X86_DIRFLAG_RESET
: X86_DIRFLAG_ANY
;
14424 return X86_DIRFLAG_RESET
;
14427 return X86_DIRFLAG_ANY
;
14430 /* Check if a 256bit or 512 bit AVX register is referenced inside of EXP. */
14433 ix86_check_avx_upper_register (const_rtx exp
)
14435 return (SSE_REG_P (exp
)
14436 && !EXT_REX_SSE_REG_P (exp
)
14437 && GET_MODE_BITSIZE (GET_MODE (exp
)) > 128);
14440 /* Check if a 256bit or 512bit AVX register is referenced in stores. */
14443 ix86_check_avx_upper_stores (rtx dest
, const_rtx
, void *data
)
14445 if (ix86_check_avx_upper_register (dest
))
14447 bool *used
= (bool *) data
;
14452 /* Return needed mode for entity in optimize_mode_switching pass. */
14455 ix86_avx_u128_mode_needed (rtx_insn
*insn
)
14457 if (DEBUG_INSN_P (insn
))
14458 return AVX_U128_ANY
;
14464 /* Needed mode is set to AVX_U128_CLEAN if there are
14465 no 256bit or 512bit modes used in function arguments. */
14466 for (link
= CALL_INSN_FUNCTION_USAGE (insn
);
14468 link
= XEXP (link
, 1))
14470 if (GET_CODE (XEXP (link
, 0)) == USE
)
14472 rtx arg
= XEXP (XEXP (link
, 0), 0);
14474 if (ix86_check_avx_upper_register (arg
))
14475 return AVX_U128_DIRTY
;
14479 /* Needed mode is set to AVX_U128_CLEAN if there are no 256bit
14480 nor 512bit registers used in the function return register. */
14481 bool avx_upper_reg_found
= false;
14482 note_stores (insn
, ix86_check_avx_upper_stores
,
14483 &avx_upper_reg_found
);
14484 if (avx_upper_reg_found
)
14485 return AVX_U128_DIRTY
;
14487 /* If the function is known to preserve some SSE registers,
14488 RA and previous passes can legitimately rely on that for
14489 modes wider than 256 bits. It's only safe to issue a
14490 vzeroupper if all SSE registers are clobbered. */
14491 const function_abi
&abi
= insn_callee_abi (insn
);
14492 if (!hard_reg_set_subset_p (reg_class_contents
[SSE_REGS
],
14493 abi
.mode_clobbers (V4DImode
)))
14494 return AVX_U128_ANY
;
14496 return AVX_U128_CLEAN
;
14499 subrtx_iterator::array_type array
;
14501 rtx set
= single_set (insn
);
14504 rtx dest
= SET_DEST (set
);
14505 rtx src
= SET_SRC (set
);
14506 if (ix86_check_avx_upper_register (dest
))
14508 /* This is an YMM/ZMM load. Return AVX_U128_DIRTY if the
14509 source isn't zero. */
14510 if (standard_sse_constant_p (src
, GET_MODE (dest
)) != 1)
14511 return AVX_U128_DIRTY
;
14513 return AVX_U128_ANY
;
14517 FOR_EACH_SUBRTX (iter
, array
, src
, NONCONST
)
14518 if (ix86_check_avx_upper_register (*iter
))
14519 return AVX_U128_DIRTY
;
14522 /* This isn't YMM/ZMM load/store. */
14523 return AVX_U128_ANY
;
14526 /* Require DIRTY mode if a 256bit or 512bit AVX register is referenced.
14527 Hardware changes state only when a 256bit register is written to,
14528 but we need to prevent the compiler from moving optimal insertion
14529 point above eventual read from 256bit or 512 bit register. */
14530 FOR_EACH_SUBRTX (iter
, array
, PATTERN (insn
), NONCONST
)
14531 if (ix86_check_avx_upper_register (*iter
))
14532 return AVX_U128_DIRTY
;
14534 return AVX_U128_ANY
;
14537 /* Return mode that i387 must be switched into
14538 prior to the execution of insn. */
14541 ix86_i387_mode_needed (int entity
, rtx_insn
*insn
)
14543 enum attr_i387_cw mode
;
14545 /* The mode UNINITIALIZED is used to store control word after a
14546 function call or ASM pattern. The mode ANY specify that function
14547 has no requirements on the control word and make no changes in the
14548 bits we are interested in. */
14551 || (NONJUMP_INSN_P (insn
)
14552 && (asm_noperands (PATTERN (insn
)) >= 0
14553 || GET_CODE (PATTERN (insn
)) == ASM_INPUT
)))
14554 return I387_CW_UNINITIALIZED
;
14556 if (recog_memoized (insn
) < 0)
14557 return I387_CW_ANY
;
14559 mode
= get_attr_i387_cw (insn
);
14563 case I387_ROUNDEVEN
:
14564 if (mode
== I387_CW_ROUNDEVEN
)
14569 if (mode
== I387_CW_TRUNC
)
14574 if (mode
== I387_CW_FLOOR
)
14579 if (mode
== I387_CW_CEIL
)
14584 gcc_unreachable ();
14587 return I387_CW_ANY
;
14590 /* Return mode that entity must be switched into
14591 prior to the execution of insn. */
14594 ix86_mode_needed (int entity
, rtx_insn
*insn
)
14599 return ix86_dirflag_mode_needed (insn
);
14601 return ix86_avx_u128_mode_needed (insn
);
14602 case I387_ROUNDEVEN
:
14606 return ix86_i387_mode_needed (entity
, insn
);
14608 gcc_unreachable ();
14613 /* Calculate mode of upper 128bit AVX registers after the insn. */
14616 ix86_avx_u128_mode_after (int mode
, rtx_insn
*insn
)
14618 rtx pat
= PATTERN (insn
);
14620 if (vzeroupper_pattern (pat
, VOIDmode
)
14621 || vzeroall_pattern (pat
, VOIDmode
))
14622 return AVX_U128_CLEAN
;
14624 /* We know that state is clean after CALL insn if there are no
14625 256bit or 512bit registers used in the function return register. */
14628 bool avx_upper_reg_found
= false;
14629 note_stores (insn
, ix86_check_avx_upper_stores
, &avx_upper_reg_found
);
14631 return avx_upper_reg_found
? AVX_U128_DIRTY
: AVX_U128_CLEAN
;
14634 /* Otherwise, return current mode. Remember that if insn
14635 references AVX 256bit or 512bit registers, the mode was already
14636 changed to DIRTY from MODE_NEEDED. */
14640 /* Return the mode that an insn results in. */
14643 ix86_mode_after (int entity
, int mode
, rtx_insn
*insn
)
14650 return ix86_avx_u128_mode_after (mode
, insn
);
14651 case I387_ROUNDEVEN
:
14657 gcc_unreachable ();
14662 ix86_dirflag_mode_entry (void)
14664 /* For TARGET_CLD or in the interrupt handler we can't assume
14665 direction flag state at function entry. */
14667 || cfun
->machine
->func_type
!= TYPE_NORMAL
)
14668 return X86_DIRFLAG_ANY
;
14670 return X86_DIRFLAG_RESET
;
14674 ix86_avx_u128_mode_entry (void)
14678 /* Entry mode is set to AVX_U128_DIRTY if there are
14679 256bit or 512bit modes used in function arguments. */
14680 for (arg
= DECL_ARGUMENTS (current_function_decl
); arg
;
14681 arg
= TREE_CHAIN (arg
))
14683 rtx incoming
= DECL_INCOMING_RTL (arg
);
14685 if (incoming
&& ix86_check_avx_upper_register (incoming
))
14686 return AVX_U128_DIRTY
;
14689 return AVX_U128_CLEAN
;
14692 /* Return a mode that ENTITY is assumed to be
14693 switched to at function entry. */
14696 ix86_mode_entry (int entity
)
14701 return ix86_dirflag_mode_entry ();
14703 return ix86_avx_u128_mode_entry ();
14704 case I387_ROUNDEVEN
:
14708 return I387_CW_ANY
;
14710 gcc_unreachable ();
14715 ix86_avx_u128_mode_exit (void)
14717 rtx reg
= crtl
->return_rtx
;
14719 /* Exit mode is set to AVX_U128_DIRTY if there are 256bit
14720 or 512 bit modes used in the function return register. */
14721 if (reg
&& ix86_check_avx_upper_register (reg
))
14722 return AVX_U128_DIRTY
;
14724 /* Exit mode is set to AVX_U128_DIRTY if there are 256bit or 512bit
14725 modes used in function arguments, otherwise return AVX_U128_CLEAN.
14727 return ix86_avx_u128_mode_entry ();
14730 /* Return a mode that ENTITY is assumed to be
14731 switched to at function exit. */
14734 ix86_mode_exit (int entity
)
14739 return X86_DIRFLAG_ANY
;
14741 return ix86_avx_u128_mode_exit ();
14742 case I387_ROUNDEVEN
:
14746 return I387_CW_ANY
;
14748 gcc_unreachable ();
14753 ix86_mode_priority (int, int n
)
14758 /* Output code to initialize control word copies used by trunc?f?i and
14759 rounding patterns. CURRENT_MODE is set to current control word,
14760 while NEW_MODE is set to new control word. */
14763 emit_i387_cw_initialization (int mode
)
14765 rtx stored_mode
= assign_386_stack_local (HImode
, SLOT_CW_STORED
);
14768 enum ix86_stack_slot slot
;
14770 rtx reg
= gen_reg_rtx (HImode
);
14772 emit_insn (gen_x86_fnstcw_1 (stored_mode
));
14773 emit_move_insn (reg
, copy_rtx (stored_mode
));
14777 case I387_CW_ROUNDEVEN
:
14778 /* round to nearest */
14779 emit_insn (gen_andhi3 (reg
, reg
, GEN_INT (~0x0c00)));
14780 slot
= SLOT_CW_ROUNDEVEN
;
14783 case I387_CW_TRUNC
:
14784 /* round toward zero (truncate) */
14785 emit_insn (gen_iorhi3 (reg
, reg
, GEN_INT (0x0c00)));
14786 slot
= SLOT_CW_TRUNC
;
14789 case I387_CW_FLOOR
:
14790 /* round down toward -oo */
14791 emit_insn (gen_andhi3 (reg
, reg
, GEN_INT (~0x0c00)));
14792 emit_insn (gen_iorhi3 (reg
, reg
, GEN_INT (0x0400)));
14793 slot
= SLOT_CW_FLOOR
;
14797 /* round up toward +oo */
14798 emit_insn (gen_andhi3 (reg
, reg
, GEN_INT (~0x0c00)));
14799 emit_insn (gen_iorhi3 (reg
, reg
, GEN_INT (0x0800)));
14800 slot
= SLOT_CW_CEIL
;
14804 gcc_unreachable ();
14807 gcc_assert (slot
< MAX_386_STACK_LOCALS
);
14809 new_mode
= assign_386_stack_local (HImode
, slot
);
14810 emit_move_insn (new_mode
, reg
);
14813 /* Generate one or more insns to set ENTITY to MODE. */
14816 ix86_emit_mode_set (int entity
, int mode
, int prev_mode ATTRIBUTE_UNUSED
,
14817 HARD_REG_SET regs_live ATTRIBUTE_UNUSED
)
14822 if (mode
== X86_DIRFLAG_RESET
)
14823 emit_insn (gen_cld ());
14826 if (mode
== AVX_U128_CLEAN
)
14827 ix86_expand_avx_vzeroupper ();
14829 case I387_ROUNDEVEN
:
14833 if (mode
!= I387_CW_ANY
14834 && mode
!= I387_CW_UNINITIALIZED
)
14835 emit_i387_cw_initialization (mode
);
14838 gcc_unreachable ();
14842 /* Output code for INSN to convert a float to a signed int. OPERANDS
14843 are the insn operands. The output may be [HSD]Imode and the input
14844 operand may be [SDX]Fmode. */
14847 output_fix_trunc (rtx_insn
*insn
, rtx
*operands
, bool fisttp
)
14849 bool stack_top_dies
= find_regno_note (insn
, REG_DEAD
, FIRST_STACK_REG
);
14850 bool dimode_p
= GET_MODE (operands
[0]) == DImode
;
14851 int round_mode
= get_attr_i387_cw (insn
);
14853 static char buf
[40];
14856 /* Jump through a hoop or two for DImode, since the hardware has no
14857 non-popping instruction. We used to do this a different way, but
14858 that was somewhat fragile and broke with post-reload splitters. */
14859 if ((dimode_p
|| fisttp
) && !stack_top_dies
)
14860 output_asm_insn ("fld\t%y1", operands
);
14862 gcc_assert (STACK_TOP_P (operands
[1]));
14863 gcc_assert (MEM_P (operands
[0]));
14864 gcc_assert (GET_MODE (operands
[1]) != TFmode
);
14867 return "fisttp%Z0\t%0";
14869 strcpy (buf
, "fist");
14871 if (round_mode
!= I387_CW_ANY
)
14872 output_asm_insn ("fldcw\t%3", operands
);
14875 strcat (buf
, p
+ !(stack_top_dies
|| dimode_p
));
14877 output_asm_insn (buf
, operands
);
14879 if (round_mode
!= I387_CW_ANY
)
14880 output_asm_insn ("fldcw\t%2", operands
);
14885 /* Output code for x87 ffreep insn. The OPNO argument, which may only
14886 have the values zero or one, indicates the ffreep insn's operand
14887 from the OPERANDS array. */
14889 static const char *
14890 output_387_ffreep (rtx
*operands ATTRIBUTE_UNUSED
, int opno
)
14892 if (TARGET_USE_FFREEP
)
14893 #ifdef HAVE_AS_IX86_FFREEP
14894 return opno
? "ffreep\t%y1" : "ffreep\t%y0";
14897 static char retval
[32];
14898 int regno
= REGNO (operands
[opno
]);
14900 gcc_assert (STACK_REGNO_P (regno
));
14902 regno
-= FIRST_STACK_REG
;
14904 snprintf (retval
, sizeof (retval
), ASM_SHORT
"0xc%ddf", regno
);
14909 return opno
? "fstp\t%y1" : "fstp\t%y0";
14913 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
14914 should be used. UNORDERED_P is true when fucom should be used. */
14917 output_fp_compare (rtx_insn
*insn
, rtx
*operands
,
14918 bool eflags_p
, bool unordered_p
)
14920 rtx
*xops
= eflags_p
? &operands
[0] : &operands
[1];
14921 bool stack_top_dies
;
14923 static char buf
[40];
14926 gcc_assert (STACK_TOP_P (xops
[0]));
14928 stack_top_dies
= find_regno_note (insn
, REG_DEAD
, FIRST_STACK_REG
);
14932 p
= unordered_p
? "fucomi" : "fcomi";
14935 p
= "p\t{%y1, %0|%0, %y1}";
14936 strcat (buf
, p
+ !stack_top_dies
);
14941 if (STACK_REG_P (xops
[1])
14943 && find_regno_note (insn
, REG_DEAD
, FIRST_STACK_REG
+ 1))
14945 gcc_assert (REGNO (xops
[1]) == FIRST_STACK_REG
+ 1);
14947 /* If both the top of the 387 stack die, and the other operand
14948 is also a stack register that dies, then this must be a
14949 `fcompp' float compare. */
14950 p
= unordered_p
? "fucompp" : "fcompp";
14953 else if (const0_operand (xops
[1], VOIDmode
))
14955 gcc_assert (!unordered_p
);
14956 strcpy (buf
, "ftst");
14960 if (GET_MODE_CLASS (GET_MODE (xops
[1])) == MODE_INT
)
14962 gcc_assert (!unordered_p
);
14966 p
= unordered_p
? "fucom" : "fcom";
14971 strcat (buf
, p
+ !stack_top_dies
);
14974 output_asm_insn (buf
, operands
);
14975 return "fnstsw\t%0";
14979 ix86_output_addr_vec_elt (FILE *file
, int value
)
14981 const char *directive
= ASM_LONG
;
14985 directive
= ASM_QUAD
;
14987 gcc_assert (!TARGET_64BIT
);
14990 fprintf (file
, "%s%s%d\n", directive
, LPREFIX
, value
);
14994 ix86_output_addr_diff_elt (FILE *file
, int value
, int rel
)
14996 const char *directive
= ASM_LONG
;
14999 if (TARGET_64BIT
&& CASE_VECTOR_MODE
== DImode
)
15000 directive
= ASM_QUAD
;
15002 gcc_assert (!TARGET_64BIT
);
15004 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
15005 if (TARGET_64BIT
|| TARGET_VXWORKS_RTP
)
15006 fprintf (file
, "%s%s%d-%s%d\n",
15007 directive
, LPREFIX
, value
, LPREFIX
, rel
);
15009 else if (TARGET_MACHO
)
15011 fprintf (file
, ASM_LONG
"%s%d-", LPREFIX
, value
);
15012 machopic_output_function_base_name (file
);
15016 else if (HAVE_AS_GOTOFF_IN_DATA
)
15017 fprintf (file
, ASM_LONG
"%s%d@GOTOFF\n", LPREFIX
, value
);
15019 asm_fprintf (file
, ASM_LONG
"%U%s+[.-%s%d]\n",
15020 GOT_SYMBOL_NAME
, LPREFIX
, value
);
15023 #define LEA_MAX_STALL (3)
15024 #define LEA_SEARCH_THRESHOLD (LEA_MAX_STALL << 1)
15026 /* Increase given DISTANCE in half-cycles according to
15027 dependencies between PREV and NEXT instructions.
15028 Add 1 half-cycle if there is no dependency and
15029 go to next cycle if there is some dependecy. */
15031 static unsigned int
15032 increase_distance (rtx_insn
*prev
, rtx_insn
*next
, unsigned int distance
)
15036 if (!prev
|| !next
)
15037 return distance
+ (distance
& 1) + 2;
15039 if (!DF_INSN_USES (next
) || !DF_INSN_DEFS (prev
))
15040 return distance
+ 1;
15042 FOR_EACH_INSN_USE (use
, next
)
15043 FOR_EACH_INSN_DEF (def
, prev
)
15044 if (!DF_REF_IS_ARTIFICIAL (def
)
15045 && DF_REF_REGNO (use
) == DF_REF_REGNO (def
))
15046 return distance
+ (distance
& 1) + 2;
15048 return distance
+ 1;
15051 /* Function checks if instruction INSN defines register number
15052 REGNO1 or REGNO2. */
15055 insn_defines_reg (unsigned int regno1
, unsigned int regno2
,
15060 FOR_EACH_INSN_DEF (def
, insn
)
15061 if (DF_REF_REG_DEF_P (def
)
15062 && !DF_REF_IS_ARTIFICIAL (def
)
15063 && (regno1
== DF_REF_REGNO (def
)
15064 || regno2
== DF_REF_REGNO (def
)))
15070 /* Function checks if instruction INSN uses register number
15071 REGNO as a part of address expression. */
15074 insn_uses_reg_mem (unsigned int regno
, rtx insn
)
15078 FOR_EACH_INSN_USE (use
, insn
)
15079 if (DF_REF_REG_MEM_P (use
) && regno
== DF_REF_REGNO (use
))
15085 /* Search backward for non-agu definition of register number REGNO1
15086 or register number REGNO2 in basic block starting from instruction
15087 START up to head of basic block or instruction INSN.
15089 Function puts true value into *FOUND var if definition was found
15090 and false otherwise.
15092 Distance in half-cycles between START and found instruction or head
15093 of BB is added to DISTANCE and returned. */
15096 distance_non_agu_define_in_bb (unsigned int regno1
, unsigned int regno2
,
15097 rtx_insn
*insn
, int distance
,
15098 rtx_insn
*start
, bool *found
)
15100 basic_block bb
= start
? BLOCK_FOR_INSN (start
) : NULL
;
15101 rtx_insn
*prev
= start
;
15102 rtx_insn
*next
= NULL
;
15108 && distance
< LEA_SEARCH_THRESHOLD
)
15110 if (NONDEBUG_INSN_P (prev
) && NONJUMP_INSN_P (prev
))
15112 distance
= increase_distance (prev
, next
, distance
);
15113 if (insn_defines_reg (regno1
, regno2
, prev
))
15115 if (recog_memoized (prev
) < 0
15116 || get_attr_type (prev
) != TYPE_LEA
)
15125 if (prev
== BB_HEAD (bb
))
15128 prev
= PREV_INSN (prev
);
15134 /* Search backward for non-agu definition of register number REGNO1
15135 or register number REGNO2 in INSN's basic block until
15136 1. Pass LEA_SEARCH_THRESHOLD instructions, or
15137 2. Reach neighbor BBs boundary, or
15138 3. Reach agu definition.
15139 Returns the distance between the non-agu definition point and INSN.
15140 If no definition point, returns -1. */
15143 distance_non_agu_define (unsigned int regno1
, unsigned int regno2
,
15146 basic_block bb
= BLOCK_FOR_INSN (insn
);
15148 bool found
= false;
15150 if (insn
!= BB_HEAD (bb
))
15151 distance
= distance_non_agu_define_in_bb (regno1
, regno2
, insn
,
15152 distance
, PREV_INSN (insn
),
15155 if (!found
&& distance
< LEA_SEARCH_THRESHOLD
)
15159 bool simple_loop
= false;
15161 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
15164 simple_loop
= true;
15169 distance
= distance_non_agu_define_in_bb (regno1
, regno2
,
15171 BB_END (bb
), &found
);
15174 int shortest_dist
= -1;
15175 bool found_in_bb
= false;
15177 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
15180 = distance_non_agu_define_in_bb (regno1
, regno2
,
15186 if (shortest_dist
< 0)
15187 shortest_dist
= bb_dist
;
15188 else if (bb_dist
> 0)
15189 shortest_dist
= MIN (bb_dist
, shortest_dist
);
15195 distance
= shortest_dist
;
15202 return distance
>> 1;
15205 /* Return the distance in half-cycles between INSN and the next
15206 insn that uses register number REGNO in memory address added
15207 to DISTANCE. Return -1 if REGNO0 is set.
15209 Put true value into *FOUND if register usage was found and
15211 Put true value into *REDEFINED if register redefinition was
15212 found and false otherwise. */
15215 distance_agu_use_in_bb (unsigned int regno
,
15216 rtx_insn
*insn
, int distance
, rtx_insn
*start
,
15217 bool *found
, bool *redefined
)
15219 basic_block bb
= NULL
;
15220 rtx_insn
*next
= start
;
15221 rtx_insn
*prev
= NULL
;
15224 *redefined
= false;
15226 if (start
!= NULL_RTX
)
15228 bb
= BLOCK_FOR_INSN (start
);
15229 if (start
!= BB_HEAD (bb
))
15230 /* If insn and start belong to the same bb, set prev to insn,
15231 so the call to increase_distance will increase the distance
15232 between insns by 1. */
15238 && distance
< LEA_SEARCH_THRESHOLD
)
15240 if (NONDEBUG_INSN_P (next
) && NONJUMP_INSN_P (next
))
15242 distance
= increase_distance(prev
, next
, distance
);
15243 if (insn_uses_reg_mem (regno
, next
))
15245 /* Return DISTANCE if OP0 is used in memory
15246 address in NEXT. */
15251 if (insn_defines_reg (regno
, INVALID_REGNUM
, next
))
15253 /* Return -1 if OP0 is set in NEXT. */
15261 if (next
== BB_END (bb
))
15264 next
= NEXT_INSN (next
);
15270 /* Return the distance between INSN and the next insn that uses
15271 register number REGNO0 in memory address. Return -1 if no such
15272 a use is found within LEA_SEARCH_THRESHOLD or REGNO0 is set. */
15275 distance_agu_use (unsigned int regno0
, rtx_insn
*insn
)
15277 basic_block bb
= BLOCK_FOR_INSN (insn
);
15279 bool found
= false;
15280 bool redefined
= false;
15282 if (insn
!= BB_END (bb
))
15283 distance
= distance_agu_use_in_bb (regno0
, insn
, distance
,
15285 &found
, &redefined
);
15287 if (!found
&& !redefined
&& distance
< LEA_SEARCH_THRESHOLD
)
15291 bool simple_loop
= false;
15293 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
15296 simple_loop
= true;
15301 distance
= distance_agu_use_in_bb (regno0
, insn
,
15302 distance
, BB_HEAD (bb
),
15303 &found
, &redefined
);
15306 int shortest_dist
= -1;
15307 bool found_in_bb
= false;
15308 bool redefined_in_bb
= false;
15310 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
15313 = distance_agu_use_in_bb (regno0
, insn
,
15314 distance
, BB_HEAD (e
->dest
),
15315 &found_in_bb
, &redefined_in_bb
);
15318 if (shortest_dist
< 0)
15319 shortest_dist
= bb_dist
;
15320 else if (bb_dist
> 0)
15321 shortest_dist
= MIN (bb_dist
, shortest_dist
);
15327 distance
= shortest_dist
;
15331 if (!found
|| redefined
)
15334 return distance
>> 1;
15337 /* Define this macro to tune LEA priority vs ADD, it take effect when
15338 there is a dilemma of choosing LEA or ADD
15339 Negative value: ADD is more preferred than LEA
15341 Positive value: LEA is more preferred than ADD. */
15342 #define IX86_LEA_PRIORITY 0
15344 /* Return true if usage of lea INSN has performance advantage
15345 over a sequence of instructions. Instructions sequence has
15346 SPLIT_COST cycles higher latency than lea latency. */
15349 ix86_lea_outperforms (rtx_insn
*insn
, unsigned int regno0
, unsigned int regno1
,
15350 unsigned int regno2
, int split_cost
, bool has_scale
)
15352 int dist_define
, dist_use
;
15354 /* For Atom processors newer than Bonnell, if using a 2-source or
15355 3-source LEA for non-destructive destination purposes, or due to
15356 wanting ability to use SCALE, the use of LEA is justified. */
15357 if (!TARGET_CPU_P (BONNELL
))
15361 if (split_cost
< 1)
15363 if (regno0
== regno1
|| regno0
== regno2
)
15368 /* Remember recog_data content. */
15369 struct recog_data_d recog_data_save
= recog_data
;
15371 dist_define
= distance_non_agu_define (regno1
, regno2
, insn
);
15372 dist_use
= distance_agu_use (regno0
, insn
);
15374 /* distance_non_agu_define can call get_attr_type which can call
15375 recog_memoized, restore recog_data back to previous content. */
15376 recog_data
= recog_data_save
;
15378 if (dist_define
< 0 || dist_define
>= LEA_MAX_STALL
)
15380 /* If there is no non AGU operand definition, no AGU
15381 operand usage and split cost is 0 then both lea
15382 and non lea variants have same priority. Currently
15383 we prefer lea for 64 bit code and non lea on 32 bit
15385 if (dist_use
< 0 && split_cost
== 0)
15386 return TARGET_64BIT
|| IX86_LEA_PRIORITY
;
15391 /* With longer definitions distance lea is more preferable.
15392 Here we change it to take into account splitting cost and
15394 dist_define
+= split_cost
+ IX86_LEA_PRIORITY
;
15396 /* If there is no use in memory addess then we just check
15397 that split cost exceeds AGU stall. */
15399 return dist_define
> LEA_MAX_STALL
;
15401 /* If this insn has both backward non-agu dependence and forward
15402 agu dependence, the one with short distance takes effect. */
15403 return dist_define
>= dist_use
;
15406 /* Return true if we need to split op0 = op1 + op2 into a sequence of
15407 move and add to avoid AGU stalls. */
15410 ix86_avoid_lea_for_add (rtx_insn
*insn
, rtx operands
[])
15412 unsigned int regno0
, regno1
, regno2
;
15414 /* Check if we need to optimize. */
15415 if (!TARGET_OPT_AGU
|| optimize_function_for_size_p (cfun
))
15418 regno0
= true_regnum (operands
[0]);
15419 regno1
= true_regnum (operands
[1]);
15420 regno2
= true_regnum (operands
[2]);
15422 /* We need to split only adds with non destructive
15423 destination operand. */
15424 if (regno0
== regno1
|| regno0
== regno2
)
15427 return !ix86_lea_outperforms (insn
, regno0
, regno1
, regno2
, 1, false);
15430 /* Return true if we should emit lea instruction instead of mov
15434 ix86_use_lea_for_mov (rtx_insn
*insn
, rtx operands
[])
15436 unsigned int regno0
, regno1
;
15438 /* Check if we need to optimize. */
15439 if (!TARGET_OPT_AGU
|| optimize_function_for_size_p (cfun
))
15442 /* Use lea for reg to reg moves only. */
15443 if (!REG_P (operands
[0]) || !REG_P (operands
[1]))
15446 regno0
= true_regnum (operands
[0]);
15447 regno1
= true_regnum (operands
[1]);
15449 return ix86_lea_outperforms (insn
, regno0
, regno1
, INVALID_REGNUM
, 0, false);
15452 /* Return true if we need to split lea into a sequence of
15453 instructions to avoid AGU stalls during peephole2. */
15456 ix86_avoid_lea_for_addr (rtx_insn
*insn
, rtx operands
[])
15458 unsigned int regno0
, regno1
, regno2
;
15460 struct ix86_address parts
;
15463 /* The "at least two components" test below might not catch simple
15464 move or zero extension insns if parts.base is non-NULL and parts.disp
15465 is const0_rtx as the only components in the address, e.g. if the
15466 register is %rbp or %r13. As this test is much cheaper and moves or
15467 zero extensions are the common case, do this check first. */
15468 if (REG_P (operands
[1])
15469 || (SImode_address_operand (operands
[1], VOIDmode
)
15470 && REG_P (XEXP (operands
[1], 0))))
15473 ok
= ix86_decompose_address (operands
[1], &parts
);
15476 /* There should be at least two components in the address. */
15477 if ((parts
.base
!= NULL_RTX
) + (parts
.index
!= NULL_RTX
)
15478 + (parts
.disp
!= NULL_RTX
) + (parts
.scale
> 1) < 2)
15481 /* We should not split into add if non legitimate pic
15482 operand is used as displacement. */
15483 if (parts
.disp
&& flag_pic
&& !LEGITIMATE_PIC_OPERAND_P (parts
.disp
))
15486 regno0
= true_regnum (operands
[0]) ;
15487 regno1
= INVALID_REGNUM
;
15488 regno2
= INVALID_REGNUM
;
15491 regno1
= true_regnum (parts
.base
);
15493 regno2
= true_regnum (parts
.index
);
15495 /* Use add for a = a + b and a = b + a since it is faster and shorter
15496 than lea for most processors. For the processors like BONNELL, if
15497 the destination register of LEA holds an actual address which will
15498 be used soon, LEA is better and otherwise ADD is better. */
15499 if (!TARGET_CPU_P (BONNELL
)
15500 && parts
.scale
== 1
15501 && (!parts
.disp
|| parts
.disp
== const0_rtx
)
15502 && (regno0
== regno1
|| regno0
== regno2
))
15505 /* Check we need to optimize. */
15506 if (!TARGET_AVOID_LEA_FOR_ADDR
|| optimize_function_for_size_p (cfun
))
15511 /* Compute how many cycles we will add to execution time
15512 if split lea into a sequence of instructions. */
15513 if (parts
.base
|| parts
.index
)
15515 /* Have to use mov instruction if non desctructive
15516 destination form is used. */
15517 if (regno1
!= regno0
&& regno2
!= regno0
)
15520 /* Have to add index to base if both exist. */
15521 if (parts
.base
&& parts
.index
)
15524 /* Have to use shift and adds if scale is 2 or greater. */
15525 if (parts
.scale
> 1)
15527 if (regno0
!= regno1
)
15529 else if (regno2
== regno0
)
15532 split_cost
+= parts
.scale
;
15535 /* Have to use add instruction with immediate if
15536 disp is non zero. */
15537 if (parts
.disp
&& parts
.disp
!= const0_rtx
)
15540 /* Subtract the price of lea. */
15544 return !ix86_lea_outperforms (insn
, regno0
, regno1
, regno2
, split_cost
,
15548 /* Return true if it is ok to optimize an ADD operation to LEA
15549 operation to avoid flag register consumation. For most processors,
15550 ADD is faster than LEA. For the processors like BONNELL, if the
15551 destination register of LEA holds an actual address which will be
15552 used soon, LEA is better and otherwise ADD is better. */
15555 ix86_lea_for_add_ok (rtx_insn
*insn
, rtx operands
[])
15557 unsigned int regno0
= true_regnum (operands
[0]);
15558 unsigned int regno1
= true_regnum (operands
[1]);
15559 unsigned int regno2
= true_regnum (operands
[2]);
15561 /* If a = b + c, (a!=b && a!=c), must use lea form. */
15562 if (regno0
!= regno1
&& regno0
!= regno2
)
15565 if (!TARGET_OPT_AGU
|| optimize_function_for_size_p (cfun
))
15568 return ix86_lea_outperforms (insn
, regno0
, regno1
, regno2
, 0, false);
15571 /* Return true if destination reg of SET_BODY is shift count of
15575 ix86_dep_by_shift_count_body (const_rtx set_body
, const_rtx use_body
)
15581 /* Retrieve destination of SET_BODY. */
15582 switch (GET_CODE (set_body
))
15585 set_dest
= SET_DEST (set_body
);
15586 if (!set_dest
|| !REG_P (set_dest
))
15590 for (i
= XVECLEN (set_body
, 0) - 1; i
>= 0; i
--)
15591 if (ix86_dep_by_shift_count_body (XVECEXP (set_body
, 0, i
),
15599 /* Retrieve shift count of USE_BODY. */
15600 switch (GET_CODE (use_body
))
15603 shift_rtx
= XEXP (use_body
, 1);
15606 for (i
= XVECLEN (use_body
, 0) - 1; i
>= 0; i
--)
15607 if (ix86_dep_by_shift_count_body (set_body
,
15608 XVECEXP (use_body
, 0, i
)))
15616 && (GET_CODE (shift_rtx
) == ASHIFT
15617 || GET_CODE (shift_rtx
) == LSHIFTRT
15618 || GET_CODE (shift_rtx
) == ASHIFTRT
15619 || GET_CODE (shift_rtx
) == ROTATE
15620 || GET_CODE (shift_rtx
) == ROTATERT
))
15622 rtx shift_count
= XEXP (shift_rtx
, 1);
15624 /* Return true if shift count is dest of SET_BODY. */
15625 if (REG_P (shift_count
))
15627 /* Add check since it can be invoked before register
15628 allocation in pre-reload schedule. */
15629 if (reload_completed
15630 && true_regnum (set_dest
) == true_regnum (shift_count
))
15632 else if (REGNO(set_dest
) == REGNO(shift_count
))
15640 /* Return true if destination reg of SET_INSN is shift count of
15644 ix86_dep_by_shift_count (const_rtx set_insn
, const_rtx use_insn
)
15646 return ix86_dep_by_shift_count_body (PATTERN (set_insn
),
15647 PATTERN (use_insn
));
15650 /* Return TRUE or FALSE depending on whether the unary operator meets the
15651 appropriate constraints. */
15654 ix86_unary_operator_ok (enum rtx_code
,
15658 /* If one of operands is memory, source and destination must match. */
15659 if ((MEM_P (operands
[0])
15660 || MEM_P (operands
[1]))
15661 && ! rtx_equal_p (operands
[0], operands
[1]))
15666 /* Return TRUE if the operands to a vec_interleave_{high,low}v2df
15667 are ok, keeping in mind the possible movddup alternative. */
15670 ix86_vec_interleave_v2df_operator_ok (rtx operands
[3], bool high
)
15672 if (MEM_P (operands
[0]))
15673 return rtx_equal_p (operands
[0], operands
[1 + high
]);
15674 if (MEM_P (operands
[1]) && MEM_P (operands
[2]))
15679 /* A subroutine of ix86_build_signbit_mask. If VECT is true,
15680 then replicate the value for all elements of the vector
15684 ix86_build_const_vector (machine_mode mode
, bool vect
, rtx value
)
15688 machine_mode scalar_mode
;
15717 n_elt
= GET_MODE_NUNITS (mode
);
15718 v
= rtvec_alloc (n_elt
);
15719 scalar_mode
= GET_MODE_INNER (mode
);
15721 RTVEC_ELT (v
, 0) = value
;
15723 for (i
= 1; i
< n_elt
; ++i
)
15724 RTVEC_ELT (v
, i
) = vect
? value
: CONST0_RTX (scalar_mode
);
15726 return gen_rtx_CONST_VECTOR (mode
, v
);
15729 gcc_unreachable ();
15733 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
15734 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
15735 for an SSE register. If VECT is true, then replicate the mask for
15736 all elements of the vector register. If INVERT is true, then create
15737 a mask excluding the sign bit. */
15740 ix86_build_signbit_mask (machine_mode mode
, bool vect
, bool invert
)
15742 machine_mode vec_mode
, imode
;
15779 vec_mode
= VOIDmode
;
15784 gcc_unreachable ();
15787 machine_mode inner_mode
= GET_MODE_INNER (mode
);
15788 w
= wi::set_bit_in_zero (GET_MODE_BITSIZE (inner_mode
) - 1,
15789 GET_MODE_BITSIZE (inner_mode
));
15791 w
= wi::bit_not (w
);
15793 /* Force this value into the low part of a fp vector constant. */
15794 mask
= immed_wide_int_const (w
, imode
);
15795 mask
= gen_lowpart (inner_mode
, mask
);
15797 if (vec_mode
== VOIDmode
)
15798 return force_reg (inner_mode
, mask
);
15800 v
= ix86_build_const_vector (vec_mode
, vect
, mask
);
15801 return force_reg (vec_mode
, v
);
15804 /* Return HOST_WIDE_INT for const vector OP in MODE. */
15807 ix86_convert_const_vector_to_integer (rtx op
, machine_mode mode
)
15809 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
15810 gcc_unreachable ();
15812 int nunits
= GET_MODE_NUNITS (mode
);
15813 wide_int val
= wi::zero (GET_MODE_BITSIZE (mode
));
15814 machine_mode innermode
= GET_MODE_INNER (mode
);
15815 unsigned int innermode_bits
= GET_MODE_BITSIZE (innermode
);
15825 for (int i
= 0; i
< nunits
; ++i
)
15827 int v
= INTVAL (XVECEXP (op
, 0, i
));
15828 wide_int wv
= wi::shwi (v
, innermode_bits
);
15829 val
= wi::insert (val
, wv
, innermode_bits
* i
, innermode_bits
);
15837 for (int i
= 0; i
< nunits
; ++i
)
15839 rtx x
= XVECEXP (op
, 0, i
);
15840 int v
= real_to_target (NULL
, CONST_DOUBLE_REAL_VALUE (x
),
15841 REAL_MODE_FORMAT (innermode
));
15842 wide_int wv
= wi::shwi (v
, innermode_bits
);
15843 val
= wi::insert (val
, wv
, innermode_bits
* i
, innermode_bits
);
15847 gcc_unreachable ();
15850 return val
.to_shwi ();
15853 /* Return TRUE or FALSE depending on whether the first SET in INSN
15854 has source and destination with matching CC modes, and that the
15855 CC mode is at least as constrained as REQ_MODE. */
15858 ix86_match_ccmode (rtx insn
, machine_mode req_mode
)
15861 machine_mode set_mode
;
15863 set
= PATTERN (insn
);
15864 if (GET_CODE (set
) == PARALLEL
)
15865 set
= XVECEXP (set
, 0, 0);
15866 gcc_assert (GET_CODE (set
) == SET
);
15867 gcc_assert (GET_CODE (SET_SRC (set
)) == COMPARE
);
15869 set_mode
= GET_MODE (SET_DEST (set
));
15873 if (req_mode
!= CCNOmode
15874 && (req_mode
!= CCmode
15875 || XEXP (SET_SRC (set
), 1) != const0_rtx
))
15879 if (req_mode
== CCGCmode
)
15883 if (req_mode
== CCGOCmode
|| req_mode
== CCNOmode
)
15887 if (req_mode
== CCZmode
)
15900 if (set_mode
!= req_mode
)
15905 gcc_unreachable ();
15908 return GET_MODE (SET_SRC (set
)) == set_mode
;
15912 ix86_cc_mode (enum rtx_code code
, rtx op0
, rtx op1
)
15914 machine_mode mode
= GET_MODE (op0
);
15916 if (SCALAR_FLOAT_MODE_P (mode
))
15918 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode
));
15924 /* Only zero flag is needed. */
15925 case EQ
: /* ZF=0 */
15926 case NE
: /* ZF!=0 */
15928 /* Codes needing carry flag. */
15929 case GEU
: /* CF=0 */
15930 case LTU
: /* CF=1 */
15932 /* Detect overflow checks. They need just the carry flag. */
15933 if (GET_CODE (op0
) == PLUS
15934 && (rtx_equal_p (op1
, XEXP (op0
, 0))
15935 || rtx_equal_p (op1
, XEXP (op0
, 1))))
15937 /* Similarly for *setcc_qi_addqi3_cconly_overflow_1_* patterns.
15939 (neg:QI (geu:QI (reg:CC_CCC FLAGS_REG) (const_int 0)))
15941 (ltu:QI (reg:CC_CCC FLAGS_REG) (const_int 0))
15942 where CC_CCC is either CC or CCC. */
15943 else if (code
== LTU
15944 && GET_CODE (op0
) == NEG
15945 && GET_CODE (geu
= XEXP (op0
, 0)) == GEU
15946 && REG_P (XEXP (geu
, 0))
15947 && (GET_MODE (XEXP (geu
, 0)) == CCCmode
15948 || GET_MODE (XEXP (geu
, 0)) == CCmode
)
15949 && REGNO (XEXP (geu
, 0)) == FLAGS_REG
15950 && XEXP (geu
, 1) == const0_rtx
15951 && GET_CODE (op1
) == LTU
15952 && REG_P (XEXP (op1
, 0))
15953 && GET_MODE (XEXP (op1
, 0)) == GET_MODE (XEXP (geu
, 0))
15954 && REGNO (XEXP (op1
, 0)) == FLAGS_REG
15955 && XEXP (op1
, 1) == const0_rtx
)
15959 case GTU
: /* CF=0 & ZF=0 */
15960 case LEU
: /* CF=1 | ZF=1 */
15962 /* Codes possibly doable only with sign flag when
15963 comparing against zero. */
15964 case GE
: /* SF=OF or SF=0 */
15965 case LT
: /* SF<>OF or SF=1 */
15966 if (op1
== const0_rtx
)
15969 /* For other cases Carry flag is not required. */
15971 /* Codes doable only with sign flag when comparing
15972 against zero, but we miss jump instruction for it
15973 so we need to use relational tests against overflow
15974 that thus needs to be zero. */
15975 case GT
: /* ZF=0 & SF=OF */
15976 case LE
: /* ZF=1 | SF<>OF */
15977 if (op1
== const0_rtx
)
15981 /* strcmp pattern do (use flags) and combine may ask us for proper
15986 gcc_unreachable ();
15990 /* Return the fixed registers used for condition codes. */
15993 ix86_fixed_condition_code_regs (unsigned int *p1
, unsigned int *p2
)
15996 *p2
= INVALID_REGNUM
;
16000 /* If two condition code modes are compatible, return a condition code
16001 mode which is compatible with both. Otherwise, return
16004 static machine_mode
16005 ix86_cc_modes_compatible (machine_mode m1
, machine_mode m2
)
16010 if (GET_MODE_CLASS (m1
) != MODE_CC
|| GET_MODE_CLASS (m2
) != MODE_CC
)
16013 if ((m1
== CCGCmode
&& m2
== CCGOCmode
)
16014 || (m1
== CCGOCmode
&& m2
== CCGCmode
))
16017 if ((m1
== CCNOmode
&& m2
== CCGOCmode
)
16018 || (m1
== CCGOCmode
&& m2
== CCNOmode
))
16022 && (m2
== CCGCmode
|| m2
== CCGOCmode
|| m2
== CCNOmode
))
16024 else if (m2
== CCZmode
16025 && (m1
== CCGCmode
|| m1
== CCGOCmode
|| m1
== CCNOmode
))
16031 gcc_unreachable ();
16062 /* These are only compatible with themselves, which we already
16068 /* Return strategy to use for floating-point. We assume that fcomi is always
16069 preferrable where available, since that is also true when looking at size
16070 (2 bytes, vs. 3 for fnstsw+sahf and at least 5 for fnstsw+test). */
16072 enum ix86_fpcmp_strategy
16073 ix86_fp_comparison_strategy (enum rtx_code
)
16075 /* Do fcomi/sahf based test when profitable. */
16078 return IX86_FPCMP_COMI
;
16080 if (TARGET_SAHF
&& (TARGET_USE_SAHF
|| optimize_insn_for_size_p ()))
16081 return IX86_FPCMP_SAHF
;
16083 return IX86_FPCMP_ARITH
;
16086 /* Convert comparison codes we use to represent FP comparison to integer
16087 code that will result in proper branch. Return UNKNOWN if no such code
16091 ix86_fp_compare_code_to_integer (enum rtx_code code
)
16115 /* Zero extend possibly SImode EXP to Pmode register. */
16117 ix86_zero_extend_to_Pmode (rtx exp
)
16119 return force_reg (Pmode
, convert_to_mode (Pmode
, exp
, 1));
16122 /* Return true if the function is called via PLT. */
16125 ix86_call_use_plt_p (rtx call_op
)
16127 if (SYMBOL_REF_LOCAL_P (call_op
))
16129 if (SYMBOL_REF_DECL (call_op
)
16130 && TREE_CODE (SYMBOL_REF_DECL (call_op
)) == FUNCTION_DECL
)
16132 /* NB: All ifunc functions must be called via PLT. */
16134 = cgraph_node::get (SYMBOL_REF_DECL (call_op
));
16135 if (node
&& node
->ifunc_resolver
)
16143 /* Implement TARGET_IFUNC_REF_LOCAL_OK. If this hook returns true,
16144 the PLT entry will be used as the function address for local IFUNC
16145 functions. When the PIC register is needed for PLT call, indirect
16146 call via the PLT entry will fail since the PIC register may not be
16147 set up properly for indirect call. In this case, we should return
16151 ix86_ifunc_ref_local_ok (void)
16153 return !flag_pic
|| (TARGET_64BIT
&& ix86_cmodel
!= CM_LARGE_PIC
);
16156 /* Return true if the function being called was marked with attribute
16157 "noplt" or using -fno-plt and we are compiling for non-PIC. We need
16158 to handle the non-PIC case in the backend because there is no easy
16159 interface for the front-end to force non-PLT calls to use the GOT.
16160 This is currently used only with 64-bit or 32-bit GOT32X ELF targets
16161 to call the function marked "noplt" indirectly. */
16164 ix86_nopic_noplt_attribute_p (rtx call_op
)
16166 if (flag_pic
|| ix86_cmodel
== CM_LARGE
16167 || !(TARGET_64BIT
|| HAVE_AS_IX86_GOT32X
)
16168 || TARGET_MACHO
|| TARGET_SEH
|| TARGET_PECOFF
16169 || SYMBOL_REF_LOCAL_P (call_op
))
16172 tree symbol_decl
= SYMBOL_REF_DECL (call_op
);
16175 || (symbol_decl
!= NULL_TREE
16176 && lookup_attribute ("noplt", DECL_ATTRIBUTES (symbol_decl
))))
16182 /* Helper to output the jmp/call. */
16184 ix86_output_jmp_thunk_or_indirect (const char *thunk_name
, const int regno
)
16186 if (thunk_name
!= NULL
)
16188 if (REX_INT_REGNO_P (regno
)
16189 && ix86_indirect_branch_cs_prefix
)
16190 fprintf (asm_out_file
, "\tcs\n");
16191 fprintf (asm_out_file
, "\tjmp\t");
16192 assemble_name (asm_out_file
, thunk_name
);
16193 putc ('\n', asm_out_file
);
16194 if ((ix86_harden_sls
& harden_sls_indirect_jmp
))
16195 fputs ("\tint3\n", asm_out_file
);
16198 output_indirect_thunk (regno
);
16201 /* Output indirect branch via a call and return thunk. CALL_OP is a
16202 register which contains the branch target. XASM is the assembly
16203 template for CALL_OP. Branch is a tail call if SIBCALL_P is true.
16204 A normal call is converted to:
16206 call __x86_indirect_thunk_reg
16208 and a tail call is converted to:
16210 jmp __x86_indirect_thunk_reg
16214 ix86_output_indirect_branch_via_reg (rtx call_op
, bool sibcall_p
)
16216 char thunk_name_buf
[32];
16218 enum indirect_thunk_prefix need_prefix
16219 = indirect_thunk_need_prefix (current_output_insn
);
16220 int regno
= REGNO (call_op
);
16222 if (cfun
->machine
->indirect_branch_type
16223 != indirect_branch_thunk_inline
)
16225 if (cfun
->machine
->indirect_branch_type
== indirect_branch_thunk
)
16226 SET_HARD_REG_BIT (indirect_thunks_used
, regno
);
16228 indirect_thunk_name (thunk_name_buf
, regno
, need_prefix
, false);
16229 thunk_name
= thunk_name_buf
;
16235 ix86_output_jmp_thunk_or_indirect (thunk_name
, regno
);
16238 if (thunk_name
!= NULL
)
16240 if (REX_INT_REGNO_P (regno
)
16241 && ix86_indirect_branch_cs_prefix
)
16242 fprintf (asm_out_file
, "\tcs\n");
16243 fprintf (asm_out_file
, "\tcall\t");
16244 assemble_name (asm_out_file
, thunk_name
);
16245 putc ('\n', asm_out_file
);
16249 char indirectlabel1
[32];
16250 char indirectlabel2
[32];
16252 ASM_GENERATE_INTERNAL_LABEL (indirectlabel1
,
16254 indirectlabelno
++);
16255 ASM_GENERATE_INTERNAL_LABEL (indirectlabel2
,
16257 indirectlabelno
++);
16260 fputs ("\tjmp\t", asm_out_file
);
16261 assemble_name_raw (asm_out_file
, indirectlabel2
);
16262 fputc ('\n', asm_out_file
);
16264 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, indirectlabel1
);
16266 ix86_output_jmp_thunk_or_indirect (thunk_name
, regno
);
16268 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, indirectlabel2
);
16271 fputs ("\tcall\t", asm_out_file
);
16272 assemble_name_raw (asm_out_file
, indirectlabel1
);
16273 fputc ('\n', asm_out_file
);
16277 /* Output indirect branch via a call and return thunk. CALL_OP is
16278 the branch target. XASM is the assembly template for CALL_OP.
16279 Branch is a tail call if SIBCALL_P is true. A normal call is
16285 jmp __x86_indirect_thunk
16289 and a tail call is converted to:
16292 jmp __x86_indirect_thunk
16296 ix86_output_indirect_branch_via_push (rtx call_op
, const char *xasm
,
16299 char thunk_name_buf
[32];
16302 enum indirect_thunk_prefix need_prefix
16303 = indirect_thunk_need_prefix (current_output_insn
);
16306 if (cfun
->machine
->indirect_branch_type
16307 != indirect_branch_thunk_inline
)
16309 if (cfun
->machine
->indirect_branch_type
== indirect_branch_thunk
)
16310 indirect_thunk_needed
= true;
16311 indirect_thunk_name (thunk_name_buf
, regno
, need_prefix
, false);
16312 thunk_name
= thunk_name_buf
;
16317 snprintf (push_buf
, sizeof (push_buf
), "push{%c}\t%s",
16318 TARGET_64BIT
? 'q' : 'l', xasm
);
16322 output_asm_insn (push_buf
, &call_op
);
16323 ix86_output_jmp_thunk_or_indirect (thunk_name
, regno
);
16327 char indirectlabel1
[32];
16328 char indirectlabel2
[32];
16330 ASM_GENERATE_INTERNAL_LABEL (indirectlabel1
,
16332 indirectlabelno
++);
16333 ASM_GENERATE_INTERNAL_LABEL (indirectlabel2
,
16335 indirectlabelno
++);
16338 fputs ("\tjmp\t", asm_out_file
);
16339 assemble_name_raw (asm_out_file
, indirectlabel2
);
16340 fputc ('\n', asm_out_file
);
16342 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, indirectlabel1
);
16344 /* An external function may be called via GOT, instead of PLT. */
16345 if (MEM_P (call_op
))
16347 struct ix86_address parts
;
16348 rtx addr
= XEXP (call_op
, 0);
16349 if (ix86_decompose_address (addr
, &parts
)
16350 && parts
.base
== stack_pointer_rtx
)
16352 /* Since call will adjust stack by -UNITS_PER_WORD,
16353 we must convert "disp(stack, index, scale)" to
16354 "disp+UNITS_PER_WORD(stack, index, scale)". */
16357 addr
= gen_rtx_MULT (Pmode
, parts
.index
,
16358 GEN_INT (parts
.scale
));
16359 addr
= gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
16363 addr
= stack_pointer_rtx
;
16366 if (parts
.disp
!= NULL_RTX
)
16367 disp
= plus_constant (Pmode
, parts
.disp
,
16370 disp
= GEN_INT (UNITS_PER_WORD
);
16372 addr
= gen_rtx_PLUS (Pmode
, addr
, disp
);
16373 call_op
= gen_rtx_MEM (GET_MODE (call_op
), addr
);
16377 output_asm_insn (push_buf
, &call_op
);
16379 ix86_output_jmp_thunk_or_indirect (thunk_name
, regno
);
16381 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, indirectlabel2
);
16384 fputs ("\tcall\t", asm_out_file
);
16385 assemble_name_raw (asm_out_file
, indirectlabel1
);
16386 fputc ('\n', asm_out_file
);
16390 /* Output indirect branch via a call and return thunk. CALL_OP is
16391 the branch target. XASM is the assembly template for CALL_OP.
16392 Branch is a tail call if SIBCALL_P is true. */
16395 ix86_output_indirect_branch (rtx call_op
, const char *xasm
,
16398 if (REG_P (call_op
))
16399 ix86_output_indirect_branch_via_reg (call_op
, sibcall_p
);
16401 ix86_output_indirect_branch_via_push (call_op
, xasm
, sibcall_p
);
16404 /* Output indirect jump. CALL_OP is the jump target. */
16407 ix86_output_indirect_jmp (rtx call_op
)
16409 if (cfun
->machine
->indirect_branch_type
!= indirect_branch_keep
)
16411 /* We can't have red-zone since "call" in the indirect thunk
16412 pushes the return address onto stack, destroying red-zone. */
16413 if (ix86_red_zone_used
)
16414 gcc_unreachable ();
16416 ix86_output_indirect_branch (call_op
, "%0", true);
16419 output_asm_insn ("%!jmp\t%A0", &call_op
);
16420 return (ix86_harden_sls
& harden_sls_indirect_jmp
) ? "int3" : "";
16423 /* Output return instrumentation for current function if needed. */
16426 output_return_instrumentation (void)
16428 if (ix86_instrument_return
!= instrument_return_none
16430 && !DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (cfun
->decl
))
16432 if (ix86_flag_record_return
)
16433 fprintf (asm_out_file
, "1:\n");
16434 switch (ix86_instrument_return
)
16436 case instrument_return_call
:
16437 fprintf (asm_out_file
, "\tcall\t__return__\n");
16439 case instrument_return_nop5
:
16440 /* 5 byte nop: nopl 0(%[re]ax,%[re]ax,1) */
16441 fprintf (asm_out_file
, ASM_BYTE
"0x0f, 0x1f, 0x44, 0x00, 0x00\n");
16443 case instrument_return_none
:
16447 if (ix86_flag_record_return
)
16449 fprintf (asm_out_file
, "\t.section __return_loc, \"a\",@progbits\n");
16450 fprintf (asm_out_file
, "\t.%s 1b\n", TARGET_64BIT
? "quad" : "long");
16451 fprintf (asm_out_file
, "\t.previous\n");
16456 /* Output function return. CALL_OP is the jump target. Add a REP
16457 prefix to RET if LONG_P is true and function return is kept. */
16460 ix86_output_function_return (bool long_p
)
16462 output_return_instrumentation ();
16464 if (cfun
->machine
->function_return_type
!= indirect_branch_keep
)
16466 char thunk_name
[32];
16467 enum indirect_thunk_prefix need_prefix
16468 = indirect_thunk_need_prefix (current_output_insn
);
16470 if (cfun
->machine
->function_return_type
16471 != indirect_branch_thunk_inline
)
16473 bool need_thunk
= (cfun
->machine
->function_return_type
16474 == indirect_branch_thunk
);
16475 indirect_thunk_name (thunk_name
, INVALID_REGNUM
, need_prefix
,
16477 indirect_return_needed
|= need_thunk
;
16478 fprintf (asm_out_file
, "\tjmp\t");
16479 assemble_name (asm_out_file
, thunk_name
);
16480 putc ('\n', asm_out_file
);
16483 output_indirect_thunk (INVALID_REGNUM
);
16488 output_asm_insn (long_p
? "rep%; ret" : "ret", nullptr);
16489 return (ix86_harden_sls
& harden_sls_return
) ? "int3" : "";
16492 /* Output indirect function return. RET_OP is the function return
16496 ix86_output_indirect_function_return (rtx ret_op
)
16498 if (cfun
->machine
->function_return_type
!= indirect_branch_keep
)
16500 char thunk_name
[32];
16501 enum indirect_thunk_prefix need_prefix
16502 = indirect_thunk_need_prefix (current_output_insn
);
16503 unsigned int regno
= REGNO (ret_op
);
16504 gcc_assert (regno
== CX_REG
);
16506 if (cfun
->machine
->function_return_type
16507 != indirect_branch_thunk_inline
)
16509 bool need_thunk
= (cfun
->machine
->function_return_type
16510 == indirect_branch_thunk
);
16511 indirect_thunk_name (thunk_name
, regno
, need_prefix
, true);
16515 indirect_return_via_cx
= true;
16516 SET_HARD_REG_BIT (indirect_thunks_used
, CX_REG
);
16518 fprintf (asm_out_file
, "\tjmp\t");
16519 assemble_name (asm_out_file
, thunk_name
);
16520 putc ('\n', asm_out_file
);
16523 output_indirect_thunk (regno
);
16527 output_asm_insn ("%!jmp\t%A0", &ret_op
);
16528 if (ix86_harden_sls
& harden_sls_indirect_jmp
)
16529 fputs ("\tint3\n", asm_out_file
);
16534 /* Output the assembly for a call instruction. */
16537 ix86_output_call_insn (rtx_insn
*insn
, rtx call_op
)
16539 bool direct_p
= constant_call_address_operand (call_op
, VOIDmode
);
16540 bool output_indirect_p
16542 && cfun
->machine
->indirect_branch_type
!= indirect_branch_keep
);
16543 bool seh_nop_p
= false;
16546 if (SIBLING_CALL_P (insn
))
16548 output_return_instrumentation ();
16551 if (ix86_nopic_noplt_attribute_p (call_op
))
16556 if (output_indirect_p
)
16557 xasm
= "{%p0@GOTPCREL(%%rip)|[QWORD PTR %p0@GOTPCREL[rip]]}";
16559 xasm
= "%!jmp\t{*%p0@GOTPCREL(%%rip)|[QWORD PTR %p0@GOTPCREL[rip]]}";
16563 if (output_indirect_p
)
16564 xasm
= "{%p0@GOT|[DWORD PTR %p0@GOT]}";
16566 xasm
= "%!jmp\t{*%p0@GOT|[DWORD PTR %p0@GOT]}";
16570 xasm
= "%!jmp\t%P0";
16572 /* SEH epilogue detection requires the indirect branch case
16573 to include REX.W. */
16574 else if (TARGET_SEH
)
16575 xasm
= "%!rex.W jmp\t%A0";
16578 if (output_indirect_p
)
16581 xasm
= "%!jmp\t%A0";
16584 if (output_indirect_p
&& !direct_p
)
16585 ix86_output_indirect_branch (call_op
, xasm
, true);
16588 output_asm_insn (xasm
, &call_op
);
16590 && (ix86_harden_sls
& harden_sls_indirect_jmp
))
16596 /* SEH unwinding can require an extra nop to be emitted in several
16597 circumstances. Determine if we have one of those. */
16602 for (i
= NEXT_INSN (insn
); i
; i
= NEXT_INSN (i
))
16604 /* Prevent a catch region from being adjacent to a jump that would
16605 be interpreted as an epilogue sequence by the unwinder. */
16606 if (JUMP_P(i
) && CROSSING_JUMP_P (i
))
16612 /* If we get to another real insn, we don't need the nop. */
16616 /* If we get to the epilogue note, prevent a catch region from
16617 being adjacent to the standard epilogue sequence. Note that,
16618 if non-call exceptions are enabled, we already did it during
16619 epilogue expansion, or else, if the insn can throw internally,
16620 we already did it during the reorg pass. */
16621 if (NOTE_P (i
) && NOTE_KIND (i
) == NOTE_INSN_EPILOGUE_BEG
16622 && !flag_non_call_exceptions
16623 && !can_throw_internal (insn
))
16630 /* If we didn't find a real insn following the call, prevent the
16631 unwinder from looking into the next function. */
16638 if (ix86_nopic_noplt_attribute_p (call_op
))
16643 if (output_indirect_p
)
16644 xasm
= "{%p0@GOTPCREL(%%rip)|[QWORD PTR %p0@GOTPCREL[rip]]}";
16646 xasm
= "%!call\t{*%p0@GOTPCREL(%%rip)|[QWORD PTR %p0@GOTPCREL[rip]]}";
16650 if (output_indirect_p
)
16651 xasm
= "{%p0@GOT|[DWORD PTR %p0@GOT]}";
16653 xasm
= "%!call\t{*%p0@GOT|[DWORD PTR %p0@GOT]}";
16657 xasm
= "%!call\t%P0";
16661 if (output_indirect_p
)
16664 xasm
= "%!call\t%A0";
16667 if (output_indirect_p
&& !direct_p
)
16668 ix86_output_indirect_branch (call_op
, xasm
, false);
16670 output_asm_insn (xasm
, &call_op
);
16678 /* Return a MEM corresponding to a stack slot with mode MODE.
16679 Allocate a new slot if necessary.
16681 The RTL for a function can have several slots available: N is
16682 which slot to use. */
16685 assign_386_stack_local (machine_mode mode
, enum ix86_stack_slot n
)
16687 struct stack_local_entry
*s
;
16689 gcc_assert (n
< MAX_386_STACK_LOCALS
);
16691 for (s
= ix86_stack_locals
; s
; s
= s
->next
)
16692 if (s
->mode
== mode
&& s
->n
== n
)
16693 return validize_mem (copy_rtx (s
->rtl
));
16696 /* For DImode with SLOT_FLOATxFDI_387 use 32-bit
16697 alignment with -m32 -mpreferred-stack-boundary=2. */
16700 && n
== SLOT_FLOATxFDI_387
16701 && ix86_preferred_stack_boundary
< GET_MODE_ALIGNMENT (DImode
))
16703 s
= ggc_alloc
<stack_local_entry
> ();
16706 s
->rtl
= assign_stack_local (mode
, GET_MODE_SIZE (mode
), align
);
16708 s
->next
= ix86_stack_locals
;
16709 ix86_stack_locals
= s
;
16710 return validize_mem (copy_rtx (s
->rtl
));
16714 ix86_instantiate_decls (void)
16716 struct stack_local_entry
*s
;
16718 for (s
= ix86_stack_locals
; s
; s
= s
->next
)
16719 if (s
->rtl
!= NULL_RTX
)
16720 instantiate_decl_rtl (s
->rtl
);
16723 /* Check whether x86 address PARTS is a pc-relative address. */
16726 ix86_rip_relative_addr_p (struct ix86_address
*parts
)
16728 rtx base
, index
, disp
;
16730 base
= parts
->base
;
16731 index
= parts
->index
;
16732 disp
= parts
->disp
;
16734 if (disp
&& !base
&& !index
)
16740 if (GET_CODE (disp
) == CONST
)
16741 symbol
= XEXP (disp
, 0);
16742 if (GET_CODE (symbol
) == PLUS
16743 && CONST_INT_P (XEXP (symbol
, 1)))
16744 symbol
= XEXP (symbol
, 0);
16746 if (GET_CODE (symbol
) == LABEL_REF
16747 || (GET_CODE (symbol
) == SYMBOL_REF
16748 && SYMBOL_REF_TLS_MODEL (symbol
) == 0)
16749 || (GET_CODE (symbol
) == UNSPEC
16750 && (XINT (symbol
, 1) == UNSPEC_GOTPCREL
16751 || XINT (symbol
, 1) == UNSPEC_PCREL
16752 || XINT (symbol
, 1) == UNSPEC_GOTNTPOFF
)))
16759 /* Calculate the length of the memory address in the instruction encoding.
16760 Includes addr32 prefix, does not include the one-byte modrm, opcode,
16761 or other prefixes. We never generate addr32 prefix for LEA insn. */
16764 memory_address_length (rtx addr
, bool lea
)
16766 struct ix86_address parts
;
16767 rtx base
, index
, disp
;
16771 if (GET_CODE (addr
) == PRE_DEC
16772 || GET_CODE (addr
) == POST_INC
16773 || GET_CODE (addr
) == PRE_MODIFY
16774 || GET_CODE (addr
) == POST_MODIFY
)
16777 ok
= ix86_decompose_address (addr
, &parts
);
16780 len
= (parts
.seg
== ADDR_SPACE_GENERIC
) ? 0 : 1;
16782 /* If this is not LEA instruction, add the length of addr32 prefix. */
16783 if (TARGET_64BIT
&& !lea
16784 && (SImode_address_operand (addr
, VOIDmode
)
16785 || (parts
.base
&& GET_MODE (parts
.base
) == SImode
)
16786 || (parts
.index
&& GET_MODE (parts
.index
) == SImode
)))
16790 index
= parts
.index
;
16793 if (base
&& SUBREG_P (base
))
16794 base
= SUBREG_REG (base
);
16795 if (index
&& SUBREG_P (index
))
16796 index
= SUBREG_REG (index
);
16798 gcc_assert (base
== NULL_RTX
|| REG_P (base
));
16799 gcc_assert (index
== NULL_RTX
|| REG_P (index
));
16802 - esp as the base always wants an index,
16803 - ebp as the base always wants a displacement,
16804 - r12 as the base always wants an index,
16805 - r13 as the base always wants a displacement. */
16807 /* Register Indirect. */
16808 if (base
&& !index
&& !disp
)
16810 /* esp (for its index) and ebp (for its displacement) need
16811 the two-byte modrm form. Similarly for r12 and r13 in 64-bit
16813 if (base
== arg_pointer_rtx
16814 || base
== frame_pointer_rtx
16815 || REGNO (base
) == SP_REG
16816 || REGNO (base
) == BP_REG
16817 || REGNO (base
) == R12_REG
16818 || REGNO (base
) == R13_REG
)
16822 /* Direct Addressing. In 64-bit mode mod 00 r/m 5
16823 is not disp32, but disp32(%rip), so for disp32
16824 SIB byte is needed, unless print_operand_address
16825 optimizes it into disp32(%rip) or (%rip) is implied
16827 else if (disp
&& !base
&& !index
)
16830 if (!ix86_rip_relative_addr_p (&parts
))
16835 /* Find the length of the displacement constant. */
16838 if (base
&& satisfies_constraint_K (disp
))
16843 /* ebp always wants a displacement. Similarly r13. */
16844 else if (base
&& (REGNO (base
) == BP_REG
|| REGNO (base
) == R13_REG
))
16847 /* An index requires the two-byte modrm form.... */
16849 /* ...like esp (or r12), which always wants an index. */
16850 || base
== arg_pointer_rtx
16851 || base
== frame_pointer_rtx
16852 || (base
&& (REGNO (base
) == SP_REG
|| REGNO (base
) == R12_REG
)))
16859 /* Compute default value for "length_immediate" attribute. When SHORTFORM
16860 is set, expect that insn have 8bit immediate alternative. */
16862 ix86_attr_length_immediate_default (rtx_insn
*insn
, bool shortform
)
16866 extract_insn_cached (insn
);
16867 for (i
= recog_data
.n_operands
- 1; i
>= 0; --i
)
16868 if (CONSTANT_P (recog_data
.operand
[i
]))
16870 enum attr_mode mode
= get_attr_mode (insn
);
16873 if (shortform
&& CONST_INT_P (recog_data
.operand
[i
]))
16875 HOST_WIDE_INT ival
= INTVAL (recog_data
.operand
[i
]);
16882 ival
= trunc_int_for_mode (ival
, HImode
);
16885 ival
= trunc_int_for_mode (ival
, SImode
);
16890 if (IN_RANGE (ival
, -128, 127))
16907 /* Immediates for DImode instructions are encoded
16908 as 32bit sign extended values. */
16913 fatal_insn ("unknown insn mode", insn
);
16919 /* Compute default value for "length_address" attribute. */
16921 ix86_attr_length_address_default (rtx_insn
*insn
)
16925 if (get_attr_type (insn
) == TYPE_LEA
)
16927 rtx set
= PATTERN (insn
), addr
;
16929 if (GET_CODE (set
) == PARALLEL
)
16930 set
= XVECEXP (set
, 0, 0);
16932 gcc_assert (GET_CODE (set
) == SET
);
16934 addr
= SET_SRC (set
);
16936 return memory_address_length (addr
, true);
16939 extract_insn_cached (insn
);
16940 for (i
= recog_data
.n_operands
- 1; i
>= 0; --i
)
16942 rtx op
= recog_data
.operand
[i
];
16945 constrain_operands_cached (insn
, reload_completed
);
16946 if (which_alternative
!= -1)
16948 const char *constraints
= recog_data
.constraints
[i
];
16949 int alt
= which_alternative
;
16951 while (*constraints
== '=' || *constraints
== '+')
16954 while (*constraints
++ != ',')
16956 /* Skip ignored operands. */
16957 if (*constraints
== 'X')
16961 int len
= memory_address_length (XEXP (op
, 0), false);
16963 /* Account for segment prefix for non-default addr spaces. */
16964 if (!ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (op
)))
16973 /* Compute default value for "length_vex" attribute. It includes
16974 2 or 3 byte VEX prefix and 1 opcode byte. */
16977 ix86_attr_length_vex_default (rtx_insn
*insn
, bool has_0f_opcode
,
16980 int i
, reg_only
= 2 + 1;
16981 bool has_mem
= false;
16983 /* Only 0f opcode can use 2 byte VEX prefix and VEX W bit uses 3
16984 byte VEX prefix. */
16985 if (!has_0f_opcode
|| has_vex_w
)
16988 /* We can always use 2 byte VEX prefix in 32bit. */
16992 extract_insn_cached (insn
);
16994 for (i
= recog_data
.n_operands
- 1; i
>= 0; --i
)
16995 if (REG_P (recog_data
.operand
[i
]))
16997 /* REX.W bit uses 3 byte VEX prefix. */
16998 if (GET_MODE (recog_data
.operand
[i
]) == DImode
16999 && GENERAL_REG_P (recog_data
.operand
[i
]))
17002 /* REX.B bit requires 3-byte VEX. Right here we don't know which
17003 operand will be encoded using VEX.B, so be conservative. */
17004 if (REX_INT_REGNO_P (recog_data
.operand
[i
])
17005 || REX_SSE_REGNO_P (recog_data
.operand
[i
]))
17008 else if (MEM_P (recog_data
.operand
[i
]))
17010 /* REX.X or REX.B bits use 3 byte VEX prefix. */
17011 if (x86_extended_reg_mentioned_p (recog_data
.operand
[i
]))
17017 return has_mem
? 2 + 1 : reg_only
;
17022 ix86_class_likely_spilled_p (reg_class_t
);
17024 /* Returns true if lhs of insn is HW function argument register and set up
17025 is_spilled to true if it is likely spilled HW register. */
17027 insn_is_function_arg (rtx insn
, bool* is_spilled
)
17031 if (!NONDEBUG_INSN_P (insn
))
17033 /* Call instructions are not movable, ignore it. */
17036 insn
= PATTERN (insn
);
17037 if (GET_CODE (insn
) == PARALLEL
)
17038 insn
= XVECEXP (insn
, 0, 0);
17039 if (GET_CODE (insn
) != SET
)
17041 dst
= SET_DEST (insn
);
17042 if (REG_P (dst
) && HARD_REGISTER_P (dst
)
17043 && ix86_function_arg_regno_p (REGNO (dst
)))
17045 /* Is it likely spilled HW register? */
17046 if (!TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (dst
))
17047 && ix86_class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dst
))))
17048 *is_spilled
= true;
17054 /* Add output dependencies for chain of function adjacent arguments if only
17055 there is a move to likely spilled HW register. Return first argument
17056 if at least one dependence was added or NULL otherwise. */
17058 add_parameter_dependencies (rtx_insn
*call
, rtx_insn
*head
)
17061 rtx_insn
*last
= call
;
17062 rtx_insn
*first_arg
= NULL
;
17063 bool is_spilled
= false;
17065 head
= PREV_INSN (head
);
17067 /* Find nearest to call argument passing instruction. */
17070 last
= PREV_INSN (last
);
17073 if (!NONDEBUG_INSN_P (last
))
17075 if (insn_is_function_arg (last
, &is_spilled
))
17083 insn
= PREV_INSN (last
);
17084 if (!INSN_P (insn
))
17088 if (!NONDEBUG_INSN_P (insn
))
17093 if (insn_is_function_arg (insn
, &is_spilled
))
17095 /* Add output depdendence between two function arguments if chain
17096 of output arguments contains likely spilled HW registers. */
17098 add_dependence (first_arg
, insn
, REG_DEP_OUTPUT
);
17099 first_arg
= last
= insn
;
17109 /* Add output or anti dependency from insn to first_arg to restrict its code
17112 avoid_func_arg_motion (rtx_insn
*first_arg
, rtx_insn
*insn
)
17117 set
= single_set (insn
);
17120 tmp
= SET_DEST (set
);
17123 /* Add output dependency to the first function argument. */
17124 add_dependence (first_arg
, insn
, REG_DEP_OUTPUT
);
17127 /* Add anti dependency. */
17128 add_dependence (first_arg
, insn
, REG_DEP_ANTI
);
17131 /* Avoid cross block motion of function argument through adding dependency
17132 from the first non-jump instruction in bb. */
17134 add_dependee_for_func_arg (rtx_insn
*arg
, basic_block bb
)
17136 rtx_insn
*insn
= BB_END (bb
);
17140 if (NONDEBUG_INSN_P (insn
) && NONJUMP_INSN_P (insn
))
17142 rtx set
= single_set (insn
);
17145 avoid_func_arg_motion (arg
, insn
);
17149 if (insn
== BB_HEAD (bb
))
17151 insn
= PREV_INSN (insn
);
17155 /* Hook for pre-reload schedule - avoid motion of function arguments
17156 passed in likely spilled HW registers. */
17158 ix86_dependencies_evaluation_hook (rtx_insn
*head
, rtx_insn
*tail
)
17161 rtx_insn
*first_arg
= NULL
;
17162 if (reload_completed
)
17164 while (head
!= tail
&& DEBUG_INSN_P (head
))
17165 head
= NEXT_INSN (head
);
17166 for (insn
= tail
; insn
!= head
; insn
= PREV_INSN (insn
))
17167 if (INSN_P (insn
) && CALL_P (insn
))
17169 first_arg
= add_parameter_dependencies (insn
, head
);
17172 /* Add dependee for first argument to predecessors if only
17173 region contains more than one block. */
17174 basic_block bb
= BLOCK_FOR_INSN (insn
);
17175 int rgn
= CONTAINING_RGN (bb
->index
);
17176 int nr_blks
= RGN_NR_BLOCKS (rgn
);
17177 /* Skip trivial regions and region head blocks that can have
17178 predecessors outside of region. */
17179 if (nr_blks
> 1 && BLOCK_TO_BB (bb
->index
) != 0)
17184 /* Regions are SCCs with the exception of selective
17185 scheduling with pipelining of outer blocks enabled.
17186 So also check that immediate predecessors of a non-head
17187 block are in the same region. */
17188 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
17190 /* Avoid creating of loop-carried dependencies through
17191 using topological ordering in the region. */
17192 if (rgn
== CONTAINING_RGN (e
->src
->index
)
17193 && BLOCK_TO_BB (bb
->index
) > BLOCK_TO_BB (e
->src
->index
))
17194 add_dependee_for_func_arg (first_arg
, e
->src
);
17202 else if (first_arg
)
17203 avoid_func_arg_motion (first_arg
, insn
);
17206 /* Hook for pre-reload schedule - set priority of moves from likely spilled
17207 HW registers to maximum, to schedule them at soon as possible. These are
17208 moves from function argument registers at the top of the function entry
17209 and moves from function return value registers after call. */
17211 ix86_adjust_priority (rtx_insn
*insn
, int priority
)
17215 if (reload_completed
)
17218 if (!NONDEBUG_INSN_P (insn
))
17221 set
= single_set (insn
);
17224 rtx tmp
= SET_SRC (set
);
17226 && HARD_REGISTER_P (tmp
)
17227 && !TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (tmp
))
17228 && ix86_class_likely_spilled_p (REGNO_REG_CLASS (REGNO (tmp
))))
17229 return current_sched_info
->sched_max_insns_priority
;
17235 /* Prepare for scheduling pass. */
17237 ix86_sched_init_global (FILE *, int, int)
17239 /* Install scheduling hooks for current CPU. Some of these hooks are used
17240 in time-critical parts of the scheduler, so we only set them up when
17241 they are actually used. */
17244 case PROCESSOR_CORE2
:
17245 case PROCESSOR_NEHALEM
:
17246 case PROCESSOR_SANDYBRIDGE
:
17247 case PROCESSOR_HASWELL
:
17248 case PROCESSOR_TREMONT
:
17249 case PROCESSOR_ALDERLAKE
:
17250 case PROCESSOR_GENERIC
:
17251 /* Do not perform multipass scheduling for pre-reload schedule
17252 to save compile time. */
17253 if (reload_completed
)
17255 ix86_core2i7_init_hooks ();
17258 /* Fall through. */
17260 targetm
.sched
.dfa_post_advance_cycle
= NULL
;
17261 targetm
.sched
.first_cycle_multipass_init
= NULL
;
17262 targetm
.sched
.first_cycle_multipass_begin
= NULL
;
17263 targetm
.sched
.first_cycle_multipass_issue
= NULL
;
17264 targetm
.sched
.first_cycle_multipass_backtrack
= NULL
;
17265 targetm
.sched
.first_cycle_multipass_end
= NULL
;
17266 targetm
.sched
.first_cycle_multipass_fini
= NULL
;
17272 /* Implement TARGET_STATIC_RTX_ALIGNMENT. */
17274 static HOST_WIDE_INT
17275 ix86_static_rtx_alignment (machine_mode mode
)
17277 if (mode
== DFmode
)
17279 if (ALIGN_MODE_128 (mode
))
17280 return MAX (128, GET_MODE_ALIGNMENT (mode
));
17281 return GET_MODE_ALIGNMENT (mode
);
17284 /* Implement TARGET_CONSTANT_ALIGNMENT. */
17286 static HOST_WIDE_INT
17287 ix86_constant_alignment (const_tree exp
, HOST_WIDE_INT align
)
17289 if (TREE_CODE (exp
) == REAL_CST
|| TREE_CODE (exp
) == VECTOR_CST
17290 || TREE_CODE (exp
) == INTEGER_CST
)
17292 machine_mode mode
= TYPE_MODE (TREE_TYPE (exp
));
17293 HOST_WIDE_INT mode_align
= ix86_static_rtx_alignment (mode
);
17294 return MAX (mode_align
, align
);
17296 else if (!optimize_size
&& TREE_CODE (exp
) == STRING_CST
17297 && TREE_STRING_LENGTH (exp
) >= 31 && align
< BITS_PER_WORD
)
17298 return BITS_PER_WORD
;
17303 /* Implement TARGET_EMPTY_RECORD_P. */
17306 ix86_is_empty_record (const_tree type
)
17310 return default_is_empty_record (type
);
17313 /* Implement TARGET_WARN_PARAMETER_PASSING_ABI. */
17316 ix86_warn_parameter_passing_abi (cumulative_args_t cum_v
, tree type
)
17318 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
17320 if (!cum
->warn_empty
)
17323 if (!TYPE_EMPTY_P (type
))
17326 /* Don't warn if the function isn't visible outside of the TU. */
17327 if (cum
->decl
&& !TREE_PUBLIC (cum
->decl
))
17330 const_tree ctx
= get_ultimate_context (cum
->decl
);
17331 if (ctx
!= NULL_TREE
17332 && !TRANSLATION_UNIT_WARN_EMPTY_P (ctx
))
17335 /* If the actual size of the type is zero, then there is no change
17336 in how objects of this size are passed. */
17337 if (int_size_in_bytes (type
) == 0)
17340 warning (OPT_Wabi
, "empty class %qT parameter passing ABI "
17341 "changes in %<-fabi-version=12%> (GCC 8)", type
);
17343 /* Only warn once. */
17344 cum
->warn_empty
= false;
17347 /* This hook returns name of multilib ABI. */
17349 static const char *
17350 ix86_get_multilib_abi_name (void)
17352 if (!(TARGET_64BIT_P (ix86_isa_flags
)))
17354 else if (TARGET_X32_P (ix86_isa_flags
))
17360 /* Compute the alignment for a variable for Intel MCU psABI. TYPE is
17361 the data type, and ALIGN is the alignment that the object would
17362 ordinarily have. */
17365 iamcu_alignment (tree type
, int align
)
17369 if (align
< 32 || TYPE_USER_ALIGN (type
))
17372 /* Intel MCU psABI specifies scalar types > 4 bytes aligned to 4
17374 type
= strip_array_types (type
);
17375 if (TYPE_ATOMIC (type
))
17378 mode
= TYPE_MODE (type
);
17379 switch (GET_MODE_CLASS (mode
))
17382 case MODE_COMPLEX_INT
:
17383 case MODE_COMPLEX_FLOAT
:
17385 case MODE_DECIMAL_FLOAT
:
17392 /* Compute the alignment for a static variable.
17393 TYPE is the data type, and ALIGN is the alignment that
17394 the object would ordinarily have. The value of this function is used
17395 instead of that alignment to align the object. */
17398 ix86_data_alignment (tree type
, unsigned int align
, bool opt
)
17400 /* GCC 4.8 and earlier used to incorrectly assume this alignment even
17401 for symbols from other compilation units or symbols that don't need
17402 to bind locally. In order to preserve some ABI compatibility with
17403 those compilers, ensure we don't decrease alignment from what we
17406 unsigned int max_align_compat
= MIN (256, MAX_OFILE_ALIGNMENT
);
17408 /* A data structure, equal or greater than the size of a cache line
17409 (64 bytes in the Pentium 4 and other recent Intel processors, including
17410 processors based on Intel Core microarchitecture) should be aligned
17411 so that its base address is a multiple of a cache line size. */
17413 unsigned int max_align
17414 = MIN ((unsigned) ix86_tune_cost
->prefetch_block
* 8, MAX_OFILE_ALIGNMENT
);
17416 if (max_align
< BITS_PER_WORD
)
17417 max_align
= BITS_PER_WORD
;
17419 switch (ix86_align_data_type
)
17421 case ix86_align_data_type_abi
: opt
= false; break;
17422 case ix86_align_data_type_compat
: max_align
= BITS_PER_WORD
; break;
17423 case ix86_align_data_type_cacheline
: break;
17427 align
= iamcu_alignment (type
, align
);
17430 && AGGREGATE_TYPE_P (type
)
17431 && TYPE_SIZE (type
)
17432 && TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
)
17434 if (wi::geu_p (wi::to_wide (TYPE_SIZE (type
)), max_align_compat
)
17435 && align
< max_align_compat
)
17436 align
= max_align_compat
;
17437 if (wi::geu_p (wi::to_wide (TYPE_SIZE (type
)), max_align
)
17438 && align
< max_align
)
17442 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
17443 to 16byte boundary. */
17446 if ((opt
? AGGREGATE_TYPE_P (type
) : TREE_CODE (type
) == ARRAY_TYPE
)
17447 && TYPE_SIZE (type
)
17448 && TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
17449 && wi::geu_p (wi::to_wide (TYPE_SIZE (type
)), 128)
17457 if (TREE_CODE (type
) == ARRAY_TYPE
)
17459 if (TYPE_MODE (TREE_TYPE (type
)) == DFmode
&& align
< 64)
17461 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type
))) && align
< 128)
17464 else if (TREE_CODE (type
) == COMPLEX_TYPE
)
17467 if (TYPE_MODE (type
) == DCmode
&& align
< 64)
17469 if ((TYPE_MODE (type
) == XCmode
17470 || TYPE_MODE (type
) == TCmode
) && align
< 128)
17473 else if (RECORD_OR_UNION_TYPE_P (type
)
17474 && TYPE_FIELDS (type
))
17476 if (DECL_MODE (TYPE_FIELDS (type
)) == DFmode
&& align
< 64)
17478 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type
))) && align
< 128)
17481 else if (SCALAR_FLOAT_TYPE_P (type
) || VECTOR_TYPE_P (type
)
17482 || TREE_CODE (type
) == INTEGER_TYPE
)
17484 if (TYPE_MODE (type
) == DFmode
&& align
< 64)
17486 if (ALIGN_MODE_128 (TYPE_MODE (type
)) && align
< 128)
17493 /* Implememnt TARGET_LOWER_LOCAL_DECL_ALIGNMENT. */
17495 ix86_lower_local_decl_alignment (tree decl
)
17497 unsigned int new_align
= ix86_local_alignment (decl
, VOIDmode
,
17498 DECL_ALIGN (decl
), true);
17499 if (new_align
< DECL_ALIGN (decl
))
17500 SET_DECL_ALIGN (decl
, new_align
);
17503 /* Compute the alignment for a local variable or a stack slot. EXP is
17504 the data type or decl itself, MODE is the widest mode available and
17505 ALIGN is the alignment that the object would ordinarily have. The
17506 value of this macro is used instead of that alignment to align the
17510 ix86_local_alignment (tree exp
, machine_mode mode
,
17511 unsigned int align
, bool may_lower
)
17515 if (exp
&& DECL_P (exp
))
17517 type
= TREE_TYPE (exp
);
17526 /* Don't do dynamic stack realignment for long long objects with
17527 -mpreferred-stack-boundary=2. */
17531 && ix86_preferred_stack_boundary
< 64
17532 && (mode
== DImode
|| (type
&& TYPE_MODE (type
) == DImode
))
17533 && (!type
|| (!TYPE_USER_ALIGN (type
)
17534 && !TYPE_ATOMIC (strip_array_types (type
))))
17535 && (!decl
|| !DECL_USER_ALIGN (decl
)))
17538 /* If TYPE is NULL, we are allocating a stack slot for caller-save
17539 register in MODE. We will return the largest alignment of XF
17543 if (mode
== XFmode
&& align
< GET_MODE_ALIGNMENT (DFmode
))
17544 align
= GET_MODE_ALIGNMENT (DFmode
);
17548 /* Don't increase alignment for Intel MCU psABI. */
17552 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
17553 to 16byte boundary. Exact wording is:
17555 An array uses the same alignment as its elements, except that a local or
17556 global array variable of length at least 16 bytes or
17557 a C99 variable-length array variable always has alignment of at least 16 bytes.
17559 This was added to allow use of aligned SSE instructions at arrays. This
17560 rule is meant for static storage (where compiler cannot do the analysis
17561 by itself). We follow it for automatic variables only when convenient.
17562 We fully control everything in the function compiled and functions from
17563 other unit cannot rely on the alignment.
17565 Exclude va_list type. It is the common case of local array where
17566 we cannot benefit from the alignment.
17568 TODO: Probably one should optimize for size only when var is not escaping. */
17569 if (TARGET_64BIT
&& optimize_function_for_speed_p (cfun
)
17572 if (AGGREGATE_TYPE_P (type
)
17573 && (va_list_type_node
== NULL_TREE
17574 || (TYPE_MAIN_VARIANT (type
)
17575 != TYPE_MAIN_VARIANT (va_list_type_node
)))
17576 && TYPE_SIZE (type
)
17577 && TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
17578 && wi::geu_p (wi::to_wide (TYPE_SIZE (type
)), 128)
17582 if (TREE_CODE (type
) == ARRAY_TYPE
)
17584 if (TYPE_MODE (TREE_TYPE (type
)) == DFmode
&& align
< 64)
17586 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type
))) && align
< 128)
17589 else if (TREE_CODE (type
) == COMPLEX_TYPE
)
17591 if (TYPE_MODE (type
) == DCmode
&& align
< 64)
17593 if ((TYPE_MODE (type
) == XCmode
17594 || TYPE_MODE (type
) == TCmode
) && align
< 128)
17597 else if (RECORD_OR_UNION_TYPE_P (type
)
17598 && TYPE_FIELDS (type
))
17600 if (DECL_MODE (TYPE_FIELDS (type
)) == DFmode
&& align
< 64)
17602 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type
))) && align
< 128)
17605 else if (SCALAR_FLOAT_TYPE_P (type
) || VECTOR_TYPE_P (type
)
17606 || TREE_CODE (type
) == INTEGER_TYPE
)
17609 if (TYPE_MODE (type
) == DFmode
&& align
< 64)
17611 if (ALIGN_MODE_128 (TYPE_MODE (type
)) && align
< 128)
17617 /* Compute the minimum required alignment for dynamic stack realignment
17618 purposes for a local variable, parameter or a stack slot. EXP is
17619 the data type or decl itself, MODE is its mode and ALIGN is the
17620 alignment that the object would ordinarily have. */
17623 ix86_minimum_alignment (tree exp
, machine_mode mode
,
17624 unsigned int align
)
17628 if (exp
&& DECL_P (exp
))
17630 type
= TREE_TYPE (exp
);
17639 if (TARGET_64BIT
|| align
!= 64 || ix86_preferred_stack_boundary
>= 64)
17642 /* Don't do dynamic stack realignment for long long objects with
17643 -mpreferred-stack-boundary=2. */
17644 if ((mode
== DImode
|| (type
&& TYPE_MODE (type
) == DImode
))
17645 && (!type
|| (!TYPE_USER_ALIGN (type
)
17646 && !TYPE_ATOMIC (strip_array_types (type
))))
17647 && (!decl
|| !DECL_USER_ALIGN (decl
)))
17649 gcc_checking_assert (!TARGET_STV
);
17656 /* Find a location for the static chain incoming to a nested function.
17657 This is a register, unless all free registers are used by arguments. */
17660 ix86_static_chain (const_tree fndecl_or_type
, bool incoming_p
)
17666 /* We always use R10 in 64-bit mode. */
17671 const_tree fntype
, fndecl
;
17674 /* By default in 32-bit mode we use ECX to pass the static chain. */
17677 if (TREE_CODE (fndecl_or_type
) == FUNCTION_DECL
)
17679 fntype
= TREE_TYPE (fndecl_or_type
);
17680 fndecl
= fndecl_or_type
;
17684 fntype
= fndecl_or_type
;
17688 ccvt
= ix86_get_callcvt (fntype
);
17689 if ((ccvt
& IX86_CALLCVT_FASTCALL
) != 0)
17691 /* Fastcall functions use ecx/edx for arguments, which leaves
17692 us with EAX for the static chain.
17693 Thiscall functions use ecx for arguments, which also
17694 leaves us with EAX for the static chain. */
17697 else if ((ccvt
& IX86_CALLCVT_THISCALL
) != 0)
17699 /* Thiscall functions use ecx for arguments, which leaves
17700 us with EAX and EDX for the static chain.
17701 We are using for abi-compatibility EAX. */
17704 else if (ix86_function_regparm (fntype
, fndecl
) == 3)
17706 /* For regparm 3, we have no free call-clobbered registers in
17707 which to store the static chain. In order to implement this,
17708 we have the trampoline push the static chain to the stack.
17709 However, we can't push a value below the return address when
17710 we call the nested function directly, so we have to use an
17711 alternate entry point. For this we use ESI, and have the
17712 alternate entry point push ESI, so that things appear the
17713 same once we're executing the nested function. */
17716 if (fndecl
== current_function_decl
17717 && !ix86_static_chain_on_stack
)
17719 gcc_assert (!reload_completed
);
17720 ix86_static_chain_on_stack
= true;
17722 return gen_frame_mem (SImode
,
17723 plus_constant (Pmode
,
17724 arg_pointer_rtx
, -8));
17730 return gen_rtx_REG (Pmode
, regno
);
17733 /* Emit RTL insns to initialize the variable parts of a trampoline.
17734 FNDECL is the decl of the target address; M_TRAMP is a MEM for
17735 the trampoline, and CHAIN_VALUE is an RTX for the static chain
17736 to be passed to the target function. */
17739 ix86_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
17744 bool need_endbr
= (flag_cf_protection
& CF_BRANCH
);
17746 fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
17754 /* Insert ENDBR64. */
17755 mem
= adjust_address (m_tramp
, SImode
, offset
);
17756 emit_move_insn (mem
, gen_int_mode (0xfa1e0ff3, SImode
));
17760 /* Load the function address to r11. Try to load address using
17761 the shorter movl instead of movabs. We may want to support
17762 movq for kernel mode, but kernel does not use trampolines at
17763 the moment. FNADDR is a 32bit address and may not be in
17764 DImode when ptr_mode == SImode. Always use movl in this
17766 if (ptr_mode
== SImode
17767 || x86_64_zext_immediate_operand (fnaddr
, VOIDmode
))
17769 fnaddr
= copy_addr_to_reg (fnaddr
);
17771 mem
= adjust_address (m_tramp
, HImode
, offset
);
17772 emit_move_insn (mem
, gen_int_mode (0xbb41, HImode
));
17774 mem
= adjust_address (m_tramp
, SImode
, offset
+ 2);
17775 emit_move_insn (mem
, gen_lowpart (SImode
, fnaddr
));
17780 mem
= adjust_address (m_tramp
, HImode
, offset
);
17781 emit_move_insn (mem
, gen_int_mode (0xbb49, HImode
));
17783 mem
= adjust_address (m_tramp
, DImode
, offset
+ 2);
17784 emit_move_insn (mem
, fnaddr
);
17788 /* Load static chain using movabs to r10. Use the shorter movl
17789 instead of movabs when ptr_mode == SImode. */
17790 if (ptr_mode
== SImode
)
17801 mem
= adjust_address (m_tramp
, HImode
, offset
);
17802 emit_move_insn (mem
, gen_int_mode (opcode
, HImode
));
17804 mem
= adjust_address (m_tramp
, ptr_mode
, offset
+ 2);
17805 emit_move_insn (mem
, chain_value
);
17808 /* Jump to r11; the last (unused) byte is a nop, only there to
17809 pad the write out to a single 32-bit store. */
17810 mem
= adjust_address (m_tramp
, SImode
, offset
);
17811 emit_move_insn (mem
, gen_int_mode (0x90e3ff49, SImode
));
17818 /* Depending on the static chain location, either load a register
17819 with a constant, or push the constant to the stack. All of the
17820 instructions are the same size. */
17821 chain
= ix86_static_chain (fndecl
, true);
17824 switch (REGNO (chain
))
17827 opcode
= 0xb8; break;
17829 opcode
= 0xb9; break;
17831 gcc_unreachable ();
17839 /* Insert ENDBR32. */
17840 mem
= adjust_address (m_tramp
, SImode
, offset
);
17841 emit_move_insn (mem
, gen_int_mode (0xfb1e0ff3, SImode
));
17845 mem
= adjust_address (m_tramp
, QImode
, offset
);
17846 emit_move_insn (mem
, gen_int_mode (opcode
, QImode
));
17848 mem
= adjust_address (m_tramp
, SImode
, offset
+ 1);
17849 emit_move_insn (mem
, chain_value
);
17852 mem
= adjust_address (m_tramp
, QImode
, offset
);
17853 emit_move_insn (mem
, gen_int_mode (0xe9, QImode
));
17855 mem
= adjust_address (m_tramp
, SImode
, offset
+ 1);
17857 /* Compute offset from the end of the jmp to the target function.
17858 In the case in which the trampoline stores the static chain on
17859 the stack, we need to skip the first insn which pushes the
17860 (call-saved) register static chain; this push is 1 byte. */
17862 int skip
= MEM_P (chain
) ? 1 : 0;
17863 /* Skip ENDBR32 at the entry of the target function. */
17865 && !cgraph_node::get (fndecl
)->only_called_directly_p ())
17867 disp
= expand_binop (SImode
, sub_optab
, fnaddr
,
17868 plus_constant (Pmode
, XEXP (m_tramp
, 0),
17870 NULL_RTX
, 1, OPTAB_DIRECT
);
17871 emit_move_insn (mem
, disp
);
17874 gcc_assert (offset
<= TRAMPOLINE_SIZE
);
17876 #ifdef HAVE_ENABLE_EXECUTE_STACK
17877 #ifdef CHECK_EXECUTE_STACK_ENABLED
17878 if (CHECK_EXECUTE_STACK_ENABLED
)
17880 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__enable_execute_stack"),
17881 LCT_NORMAL
, VOIDmode
, XEXP (m_tramp
, 0), Pmode
);
17886 ix86_allocate_stack_slots_for_args (void)
17888 /* Naked functions should not allocate stack slots for arguments. */
17889 return !ix86_function_naked (current_function_decl
);
17893 ix86_warn_func_return (tree decl
)
17895 /* Naked functions are implemented entirely in assembly, including the
17896 return sequence, so suppress warnings about this. */
17897 return !ix86_function_naked (decl
);
17900 /* Return the shift count of a vector by scalar shift builtin second argument
17903 ix86_vector_shift_count (tree arg1
)
17905 if (tree_fits_uhwi_p (arg1
))
17907 else if (TREE_CODE (arg1
) == VECTOR_CST
&& CHAR_BIT
== 8)
17909 /* The count argument is weird, passed in as various 128-bit
17910 (or 64-bit) vectors, the low 64 bits from it are the count. */
17911 unsigned char buf
[16];
17912 int len
= native_encode_expr (arg1
, buf
, 16);
17915 tree t
= native_interpret_expr (uint64_type_node
, buf
, len
);
17916 if (t
&& tree_fits_uhwi_p (t
))
17922 /* Return true if arg_mask is all ones, ELEMS is elements number of
17923 corresponding vector. */
17925 ix86_masked_all_ones (unsigned HOST_WIDE_INT elems
, tree arg_mask
)
17927 if (TREE_CODE (arg_mask
) != INTEGER_CST
)
17930 unsigned HOST_WIDE_INT mask
= TREE_INT_CST_LOW (arg_mask
);
17931 if ((mask
| (HOST_WIDE_INT_M1U
<< elems
)) != HOST_WIDE_INT_M1U
)
17938 ix86_fold_builtin (tree fndecl
, int n_args
,
17939 tree
*args
, bool ignore ATTRIBUTE_UNUSED
)
17941 if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
)
17943 enum ix86_builtins fn_code
17944 = (enum ix86_builtins
) DECL_MD_FUNCTION_CODE (fndecl
);
17945 enum rtx_code rcode
;
17947 unsigned HOST_WIDE_INT mask
;
17951 case IX86_BUILTIN_CPU_IS
:
17952 case IX86_BUILTIN_CPU_SUPPORTS
:
17953 gcc_assert (n_args
== 1);
17954 return fold_builtin_cpu (fndecl
, args
);
17956 case IX86_BUILTIN_NANQ
:
17957 case IX86_BUILTIN_NANSQ
:
17959 tree type
= TREE_TYPE (TREE_TYPE (fndecl
));
17960 const char *str
= c_getstr (*args
);
17961 int quiet
= fn_code
== IX86_BUILTIN_NANQ
;
17962 REAL_VALUE_TYPE real
;
17964 if (str
&& real_nan (&real
, str
, quiet
, TYPE_MODE (type
)))
17965 return build_real (type
, real
);
17969 case IX86_BUILTIN_INFQ
:
17970 case IX86_BUILTIN_HUGE_VALQ
:
17972 tree type
= TREE_TYPE (TREE_TYPE (fndecl
));
17973 REAL_VALUE_TYPE inf
;
17975 return build_real (type
, inf
);
17978 case IX86_BUILTIN_TZCNT16
:
17979 case IX86_BUILTIN_CTZS
:
17980 case IX86_BUILTIN_TZCNT32
:
17981 case IX86_BUILTIN_TZCNT64
:
17982 gcc_assert (n_args
== 1);
17983 if (TREE_CODE (args
[0]) == INTEGER_CST
)
17985 tree type
= TREE_TYPE (TREE_TYPE (fndecl
));
17986 tree arg
= args
[0];
17987 if (fn_code
== IX86_BUILTIN_TZCNT16
17988 || fn_code
== IX86_BUILTIN_CTZS
)
17989 arg
= fold_convert (short_unsigned_type_node
, arg
);
17990 if (integer_zerop (arg
))
17991 return build_int_cst (type
, TYPE_PRECISION (TREE_TYPE (arg
)));
17993 return fold_const_call (CFN_CTZ
, type
, arg
);
17997 case IX86_BUILTIN_LZCNT16
:
17998 case IX86_BUILTIN_CLZS
:
17999 case IX86_BUILTIN_LZCNT32
:
18000 case IX86_BUILTIN_LZCNT64
:
18001 gcc_assert (n_args
== 1);
18002 if (TREE_CODE (args
[0]) == INTEGER_CST
)
18004 tree type
= TREE_TYPE (TREE_TYPE (fndecl
));
18005 tree arg
= args
[0];
18006 if (fn_code
== IX86_BUILTIN_LZCNT16
18007 || fn_code
== IX86_BUILTIN_CLZS
)
18008 arg
= fold_convert (short_unsigned_type_node
, arg
);
18009 if (integer_zerop (arg
))
18010 return build_int_cst (type
, TYPE_PRECISION (TREE_TYPE (arg
)));
18012 return fold_const_call (CFN_CLZ
, type
, arg
);
18016 case IX86_BUILTIN_BEXTR32
:
18017 case IX86_BUILTIN_BEXTR64
:
18018 case IX86_BUILTIN_BEXTRI32
:
18019 case IX86_BUILTIN_BEXTRI64
:
18020 gcc_assert (n_args
== 2);
18021 if (tree_fits_uhwi_p (args
[1]))
18023 unsigned HOST_WIDE_INT res
= 0;
18024 unsigned int prec
= TYPE_PRECISION (TREE_TYPE (args
[0]));
18025 unsigned int start
= tree_to_uhwi (args
[1]);
18026 unsigned int len
= (start
& 0xff00) >> 8;
18028 if (start
>= prec
|| len
== 0)
18030 else if (!tree_fits_uhwi_p (args
[0]))
18033 res
= tree_to_uhwi (args
[0]) >> start
;
18036 if (len
< HOST_BITS_PER_WIDE_INT
)
18037 res
&= (HOST_WIDE_INT_1U
<< len
) - 1;
18038 return build_int_cstu (TREE_TYPE (TREE_TYPE (fndecl
)), res
);
18042 case IX86_BUILTIN_BZHI32
:
18043 case IX86_BUILTIN_BZHI64
:
18044 gcc_assert (n_args
== 2);
18045 if (tree_fits_uhwi_p (args
[1]))
18047 unsigned int idx
= tree_to_uhwi (args
[1]) & 0xff;
18048 if (idx
>= TYPE_PRECISION (TREE_TYPE (args
[0])))
18051 return build_int_cst (TREE_TYPE (TREE_TYPE (fndecl
)), 0);
18052 if (!tree_fits_uhwi_p (args
[0]))
18054 unsigned HOST_WIDE_INT res
= tree_to_uhwi (args
[0]);
18055 res
&= ~(HOST_WIDE_INT_M1U
<< idx
);
18056 return build_int_cstu (TREE_TYPE (TREE_TYPE (fndecl
)), res
);
18060 case IX86_BUILTIN_PDEP32
:
18061 case IX86_BUILTIN_PDEP64
:
18062 gcc_assert (n_args
== 2);
18063 if (tree_fits_uhwi_p (args
[0]) && tree_fits_uhwi_p (args
[1]))
18065 unsigned HOST_WIDE_INT src
= tree_to_uhwi (args
[0]);
18066 unsigned HOST_WIDE_INT mask
= tree_to_uhwi (args
[1]);
18067 unsigned HOST_WIDE_INT res
= 0;
18068 unsigned HOST_WIDE_INT m
, k
= 1;
18069 for (m
= 1; m
; m
<<= 1)
18070 if ((mask
& m
) != 0)
18072 if ((src
& k
) != 0)
18076 return build_int_cstu (TREE_TYPE (TREE_TYPE (fndecl
)), res
);
18080 case IX86_BUILTIN_PEXT32
:
18081 case IX86_BUILTIN_PEXT64
:
18082 gcc_assert (n_args
== 2);
18083 if (tree_fits_uhwi_p (args
[0]) && tree_fits_uhwi_p (args
[1]))
18085 unsigned HOST_WIDE_INT src
= tree_to_uhwi (args
[0]);
18086 unsigned HOST_WIDE_INT mask
= tree_to_uhwi (args
[1]);
18087 unsigned HOST_WIDE_INT res
= 0;
18088 unsigned HOST_WIDE_INT m
, k
= 1;
18089 for (m
= 1; m
; m
<<= 1)
18090 if ((mask
& m
) != 0)
18092 if ((src
& m
) != 0)
18096 return build_int_cstu (TREE_TYPE (TREE_TYPE (fndecl
)), res
);
18100 case IX86_BUILTIN_MOVMSKPS
:
18101 case IX86_BUILTIN_PMOVMSKB
:
18102 case IX86_BUILTIN_MOVMSKPD
:
18103 case IX86_BUILTIN_PMOVMSKB128
:
18104 case IX86_BUILTIN_MOVMSKPD256
:
18105 case IX86_BUILTIN_MOVMSKPS256
:
18106 case IX86_BUILTIN_PMOVMSKB256
:
18107 gcc_assert (n_args
== 1);
18108 if (TREE_CODE (args
[0]) == VECTOR_CST
)
18110 HOST_WIDE_INT res
= 0;
18111 for (unsigned i
= 0; i
< VECTOR_CST_NELTS (args
[0]); ++i
)
18113 tree e
= VECTOR_CST_ELT (args
[0], i
);
18114 if (TREE_CODE (e
) == INTEGER_CST
&& !TREE_OVERFLOW (e
))
18116 if (wi::neg_p (wi::to_wide (e
)))
18117 res
|= HOST_WIDE_INT_1
<< i
;
18119 else if (TREE_CODE (e
) == REAL_CST
&& !TREE_OVERFLOW (e
))
18121 if (TREE_REAL_CST (e
).sign
)
18122 res
|= HOST_WIDE_INT_1
<< i
;
18127 return build_int_cst (TREE_TYPE (TREE_TYPE (fndecl
)), res
);
18131 case IX86_BUILTIN_PSLLD
:
18132 case IX86_BUILTIN_PSLLD128
:
18133 case IX86_BUILTIN_PSLLD128_MASK
:
18134 case IX86_BUILTIN_PSLLD256
:
18135 case IX86_BUILTIN_PSLLD256_MASK
:
18136 case IX86_BUILTIN_PSLLD512
:
18137 case IX86_BUILTIN_PSLLDI
:
18138 case IX86_BUILTIN_PSLLDI128
:
18139 case IX86_BUILTIN_PSLLDI128_MASK
:
18140 case IX86_BUILTIN_PSLLDI256
:
18141 case IX86_BUILTIN_PSLLDI256_MASK
:
18142 case IX86_BUILTIN_PSLLDI512
:
18143 case IX86_BUILTIN_PSLLQ
:
18144 case IX86_BUILTIN_PSLLQ128
:
18145 case IX86_BUILTIN_PSLLQ128_MASK
:
18146 case IX86_BUILTIN_PSLLQ256
:
18147 case IX86_BUILTIN_PSLLQ256_MASK
:
18148 case IX86_BUILTIN_PSLLQ512
:
18149 case IX86_BUILTIN_PSLLQI
:
18150 case IX86_BUILTIN_PSLLQI128
:
18151 case IX86_BUILTIN_PSLLQI128_MASK
:
18152 case IX86_BUILTIN_PSLLQI256
:
18153 case IX86_BUILTIN_PSLLQI256_MASK
:
18154 case IX86_BUILTIN_PSLLQI512
:
18155 case IX86_BUILTIN_PSLLW
:
18156 case IX86_BUILTIN_PSLLW128
:
18157 case IX86_BUILTIN_PSLLW128_MASK
:
18158 case IX86_BUILTIN_PSLLW256
:
18159 case IX86_BUILTIN_PSLLW256_MASK
:
18160 case IX86_BUILTIN_PSLLW512_MASK
:
18161 case IX86_BUILTIN_PSLLWI
:
18162 case IX86_BUILTIN_PSLLWI128
:
18163 case IX86_BUILTIN_PSLLWI128_MASK
:
18164 case IX86_BUILTIN_PSLLWI256
:
18165 case IX86_BUILTIN_PSLLWI256_MASK
:
18166 case IX86_BUILTIN_PSLLWI512_MASK
:
18170 case IX86_BUILTIN_PSRAD
:
18171 case IX86_BUILTIN_PSRAD128
:
18172 case IX86_BUILTIN_PSRAD128_MASK
:
18173 case IX86_BUILTIN_PSRAD256
:
18174 case IX86_BUILTIN_PSRAD256_MASK
:
18175 case IX86_BUILTIN_PSRAD512
:
18176 case IX86_BUILTIN_PSRADI
:
18177 case IX86_BUILTIN_PSRADI128
:
18178 case IX86_BUILTIN_PSRADI128_MASK
:
18179 case IX86_BUILTIN_PSRADI256
:
18180 case IX86_BUILTIN_PSRADI256_MASK
:
18181 case IX86_BUILTIN_PSRADI512
:
18182 case IX86_BUILTIN_PSRAQ128_MASK
:
18183 case IX86_BUILTIN_PSRAQ256_MASK
:
18184 case IX86_BUILTIN_PSRAQ512
:
18185 case IX86_BUILTIN_PSRAQI128_MASK
:
18186 case IX86_BUILTIN_PSRAQI256_MASK
:
18187 case IX86_BUILTIN_PSRAQI512
:
18188 case IX86_BUILTIN_PSRAW
:
18189 case IX86_BUILTIN_PSRAW128
:
18190 case IX86_BUILTIN_PSRAW128_MASK
:
18191 case IX86_BUILTIN_PSRAW256
:
18192 case IX86_BUILTIN_PSRAW256_MASK
:
18193 case IX86_BUILTIN_PSRAW512
:
18194 case IX86_BUILTIN_PSRAWI
:
18195 case IX86_BUILTIN_PSRAWI128
:
18196 case IX86_BUILTIN_PSRAWI128_MASK
:
18197 case IX86_BUILTIN_PSRAWI256
:
18198 case IX86_BUILTIN_PSRAWI256_MASK
:
18199 case IX86_BUILTIN_PSRAWI512
:
18203 case IX86_BUILTIN_PSRLD
:
18204 case IX86_BUILTIN_PSRLD128
:
18205 case IX86_BUILTIN_PSRLD128_MASK
:
18206 case IX86_BUILTIN_PSRLD256
:
18207 case IX86_BUILTIN_PSRLD256_MASK
:
18208 case IX86_BUILTIN_PSRLD512
:
18209 case IX86_BUILTIN_PSRLDI
:
18210 case IX86_BUILTIN_PSRLDI128
:
18211 case IX86_BUILTIN_PSRLDI128_MASK
:
18212 case IX86_BUILTIN_PSRLDI256
:
18213 case IX86_BUILTIN_PSRLDI256_MASK
:
18214 case IX86_BUILTIN_PSRLDI512
:
18215 case IX86_BUILTIN_PSRLQ
:
18216 case IX86_BUILTIN_PSRLQ128
:
18217 case IX86_BUILTIN_PSRLQ128_MASK
:
18218 case IX86_BUILTIN_PSRLQ256
:
18219 case IX86_BUILTIN_PSRLQ256_MASK
:
18220 case IX86_BUILTIN_PSRLQ512
:
18221 case IX86_BUILTIN_PSRLQI
:
18222 case IX86_BUILTIN_PSRLQI128
:
18223 case IX86_BUILTIN_PSRLQI128_MASK
:
18224 case IX86_BUILTIN_PSRLQI256
:
18225 case IX86_BUILTIN_PSRLQI256_MASK
:
18226 case IX86_BUILTIN_PSRLQI512
:
18227 case IX86_BUILTIN_PSRLW
:
18228 case IX86_BUILTIN_PSRLW128
:
18229 case IX86_BUILTIN_PSRLW128_MASK
:
18230 case IX86_BUILTIN_PSRLW256
:
18231 case IX86_BUILTIN_PSRLW256_MASK
:
18232 case IX86_BUILTIN_PSRLW512
:
18233 case IX86_BUILTIN_PSRLWI
:
18234 case IX86_BUILTIN_PSRLWI128
:
18235 case IX86_BUILTIN_PSRLWI128_MASK
:
18236 case IX86_BUILTIN_PSRLWI256
:
18237 case IX86_BUILTIN_PSRLWI256_MASK
:
18238 case IX86_BUILTIN_PSRLWI512
:
18242 case IX86_BUILTIN_PSLLVV16HI
:
18243 case IX86_BUILTIN_PSLLVV16SI
:
18244 case IX86_BUILTIN_PSLLVV2DI
:
18245 case IX86_BUILTIN_PSLLVV2DI_MASK
:
18246 case IX86_BUILTIN_PSLLVV32HI
:
18247 case IX86_BUILTIN_PSLLVV4DI
:
18248 case IX86_BUILTIN_PSLLVV4DI_MASK
:
18249 case IX86_BUILTIN_PSLLVV4SI
:
18250 case IX86_BUILTIN_PSLLVV4SI_MASK
:
18251 case IX86_BUILTIN_PSLLVV8DI
:
18252 case IX86_BUILTIN_PSLLVV8HI
:
18253 case IX86_BUILTIN_PSLLVV8SI
:
18254 case IX86_BUILTIN_PSLLVV8SI_MASK
:
18258 case IX86_BUILTIN_PSRAVQ128
:
18259 case IX86_BUILTIN_PSRAVQ256
:
18260 case IX86_BUILTIN_PSRAVV16HI
:
18261 case IX86_BUILTIN_PSRAVV16SI
:
18262 case IX86_BUILTIN_PSRAVV32HI
:
18263 case IX86_BUILTIN_PSRAVV4SI
:
18264 case IX86_BUILTIN_PSRAVV4SI_MASK
:
18265 case IX86_BUILTIN_PSRAVV8DI
:
18266 case IX86_BUILTIN_PSRAVV8HI
:
18267 case IX86_BUILTIN_PSRAVV8SI
:
18268 case IX86_BUILTIN_PSRAVV8SI_MASK
:
18272 case IX86_BUILTIN_PSRLVV16HI
:
18273 case IX86_BUILTIN_PSRLVV16SI
:
18274 case IX86_BUILTIN_PSRLVV2DI
:
18275 case IX86_BUILTIN_PSRLVV2DI_MASK
:
18276 case IX86_BUILTIN_PSRLVV32HI
:
18277 case IX86_BUILTIN_PSRLVV4DI
:
18278 case IX86_BUILTIN_PSRLVV4DI_MASK
:
18279 case IX86_BUILTIN_PSRLVV4SI
:
18280 case IX86_BUILTIN_PSRLVV4SI_MASK
:
18281 case IX86_BUILTIN_PSRLVV8DI
:
18282 case IX86_BUILTIN_PSRLVV8HI
:
18283 case IX86_BUILTIN_PSRLVV8SI
:
18284 case IX86_BUILTIN_PSRLVV8SI_MASK
:
18290 gcc_assert (n_args
>= 2);
18291 if (TREE_CODE (args
[0]) != VECTOR_CST
)
18293 mask
= HOST_WIDE_INT_M1U
;
18296 /* This is masked shift. */
18297 if (!tree_fits_uhwi_p (args
[n_args
- 1])
18298 || TREE_SIDE_EFFECTS (args
[n_args
- 2]))
18300 mask
= tree_to_uhwi (args
[n_args
- 1]);
18301 unsigned elems
= TYPE_VECTOR_SUBPARTS (TREE_TYPE (args
[0]));
18302 mask
|= HOST_WIDE_INT_M1U
<< elems
;
18303 if (mask
!= HOST_WIDE_INT_M1U
18304 && TREE_CODE (args
[n_args
- 2]) != VECTOR_CST
)
18306 if (mask
== (HOST_WIDE_INT_M1U
<< elems
))
18307 return args
[n_args
- 2];
18309 if (is_vshift
&& TREE_CODE (args
[1]) != VECTOR_CST
)
18311 if (tree tem
= (is_vshift
? integer_one_node
18312 : ix86_vector_shift_count (args
[1])))
18314 unsigned HOST_WIDE_INT count
= tree_to_uhwi (tem
);
18315 unsigned HOST_WIDE_INT prec
18316 = TYPE_PRECISION (TREE_TYPE (TREE_TYPE (args
[0])));
18317 if (count
== 0 && mask
== HOST_WIDE_INT_M1U
)
18321 if (rcode
== ASHIFTRT
)
18323 else if (mask
== HOST_WIDE_INT_M1U
)
18324 return build_zero_cst (TREE_TYPE (args
[0]));
18326 tree countt
= NULL_TREE
;
18330 countt
= integer_zero_node
;
18332 countt
= build_int_cst (integer_type_node
, count
);
18334 tree_vector_builder builder
;
18335 if (mask
!= HOST_WIDE_INT_M1U
|| is_vshift
)
18336 builder
.new_vector (TREE_TYPE (args
[0]),
18337 TYPE_VECTOR_SUBPARTS (TREE_TYPE (args
[0])),
18340 builder
.new_unary_operation (TREE_TYPE (args
[0]), args
[0],
18342 unsigned int cnt
= builder
.encoded_nelts ();
18343 for (unsigned int i
= 0; i
< cnt
; ++i
)
18345 tree elt
= VECTOR_CST_ELT (args
[0], i
);
18346 if (TREE_CODE (elt
) != INTEGER_CST
|| TREE_OVERFLOW (elt
))
18348 tree type
= TREE_TYPE (elt
);
18349 if (rcode
== LSHIFTRT
)
18350 elt
= fold_convert (unsigned_type_for (type
), elt
);
18353 countt
= VECTOR_CST_ELT (args
[1], i
);
18354 if (TREE_CODE (countt
) != INTEGER_CST
18355 || TREE_OVERFLOW (countt
))
18357 if (wi::neg_p (wi::to_wide (countt
))
18358 || wi::to_widest (countt
) >= prec
)
18360 if (rcode
== ASHIFTRT
)
18361 countt
= build_int_cst (TREE_TYPE (countt
),
18365 elt
= build_zero_cst (TREE_TYPE (elt
));
18366 countt
= build_zero_cst (TREE_TYPE (countt
));
18370 else if (count
>= prec
)
18371 elt
= build_zero_cst (TREE_TYPE (elt
));
18372 elt
= const_binop (rcode
== ASHIFT
18373 ? LSHIFT_EXPR
: RSHIFT_EXPR
,
18374 TREE_TYPE (elt
), elt
, countt
);
18375 if (!elt
|| TREE_CODE (elt
) != INTEGER_CST
)
18377 if (rcode
== LSHIFTRT
)
18378 elt
= fold_convert (type
, elt
);
18379 if ((mask
& (HOST_WIDE_INT_1U
<< i
)) == 0)
18381 elt
= VECTOR_CST_ELT (args
[n_args
- 2], i
);
18382 if (TREE_CODE (elt
) != INTEGER_CST
18383 || TREE_OVERFLOW (elt
))
18386 builder
.quick_push (elt
);
18388 return builder
.build ();
18397 #ifdef SUBTARGET_FOLD_BUILTIN
18398 return SUBTARGET_FOLD_BUILTIN (fndecl
, n_args
, args
, ignore
);
18404 /* Fold a MD builtin (use ix86_fold_builtin for folding into
18405 constant) in GIMPLE. */
18408 ix86_gimple_fold_builtin (gimple_stmt_iterator
*gsi
)
18410 gimple
*stmt
= gsi_stmt (*gsi
);
18411 tree fndecl
= gimple_call_fndecl (stmt
);
18412 gcc_checking_assert (fndecl
&& fndecl_built_in_p (fndecl
, BUILT_IN_MD
));
18413 int n_args
= gimple_call_num_args (stmt
);
18414 enum ix86_builtins fn_code
18415 = (enum ix86_builtins
) DECL_MD_FUNCTION_CODE (fndecl
);
18416 tree decl
= NULL_TREE
;
18417 tree arg0
, arg1
, arg2
;
18418 enum rtx_code rcode
;
18419 enum tree_code tcode
;
18420 unsigned HOST_WIDE_INT count
;
18422 unsigned HOST_WIDE_INT elems
;
18424 /* Don't fold when there's isa mismatch. */
18425 if (!ix86_check_builtin_isa_match (fn_code
, NULL
, NULL
))
18430 case IX86_BUILTIN_TZCNT32
:
18431 decl
= builtin_decl_implicit (BUILT_IN_CTZ
);
18432 goto fold_tzcnt_lzcnt
;
18434 case IX86_BUILTIN_TZCNT64
:
18435 decl
= builtin_decl_implicit (BUILT_IN_CTZLL
);
18436 goto fold_tzcnt_lzcnt
;
18438 case IX86_BUILTIN_LZCNT32
:
18439 decl
= builtin_decl_implicit (BUILT_IN_CLZ
);
18440 goto fold_tzcnt_lzcnt
;
18442 case IX86_BUILTIN_LZCNT64
:
18443 decl
= builtin_decl_implicit (BUILT_IN_CLZLL
);
18444 goto fold_tzcnt_lzcnt
;
18447 gcc_assert (n_args
== 1);
18448 arg0
= gimple_call_arg (stmt
, 0);
18449 if (TREE_CODE (arg0
) == SSA_NAME
&& decl
&& gimple_call_lhs (stmt
))
18451 int prec
= TYPE_PRECISION (TREE_TYPE (arg0
));
18452 /* If arg0 is provably non-zero, optimize into generic
18453 __builtin_c[tl]z{,ll} function the middle-end handles
18455 if (!expr_not_equal_to (arg0
, wi::zero (prec
)))
18458 location_t loc
= gimple_location (stmt
);
18459 gimple
*g
= gimple_build_call (decl
, 1, arg0
);
18460 gimple_set_location (g
, loc
);
18461 tree lhs
= make_ssa_name (integer_type_node
);
18462 gimple_call_set_lhs (g
, lhs
);
18463 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
18464 g
= gimple_build_assign (gimple_call_lhs (stmt
), NOP_EXPR
, lhs
);
18465 gimple_set_location (g
, loc
);
18466 gsi_replace (gsi
, g
, false);
18471 case IX86_BUILTIN_BZHI32
:
18472 case IX86_BUILTIN_BZHI64
:
18473 gcc_assert (n_args
== 2);
18474 arg1
= gimple_call_arg (stmt
, 1);
18475 if (tree_fits_uhwi_p (arg1
) && gimple_call_lhs (stmt
))
18477 unsigned int idx
= tree_to_uhwi (arg1
) & 0xff;
18478 arg0
= gimple_call_arg (stmt
, 0);
18479 if (idx
< TYPE_PRECISION (TREE_TYPE (arg0
)))
18481 location_t loc
= gimple_location (stmt
);
18482 gimple
*g
= gimple_build_assign (gimple_call_lhs (stmt
), arg0
);
18483 gimple_set_location (g
, loc
);
18484 gsi_replace (gsi
, g
, false);
18489 case IX86_BUILTIN_PDEP32
:
18490 case IX86_BUILTIN_PDEP64
:
18491 case IX86_BUILTIN_PEXT32
:
18492 case IX86_BUILTIN_PEXT64
:
18493 gcc_assert (n_args
== 2);
18494 arg1
= gimple_call_arg (stmt
, 1);
18495 if (integer_all_onesp (arg1
) && gimple_call_lhs (stmt
))
18497 location_t loc
= gimple_location (stmt
);
18498 arg0
= gimple_call_arg (stmt
, 0);
18499 gimple
*g
= gimple_build_assign (gimple_call_lhs (stmt
), arg0
);
18500 gimple_set_location (g
, loc
);
18501 gsi_replace (gsi
, g
, false);
18506 case IX86_BUILTIN_PBLENDVB256
:
18507 case IX86_BUILTIN_BLENDVPS256
:
18508 case IX86_BUILTIN_BLENDVPD256
:
18509 /* pcmpeqb/d/q is under avx2, w/o avx2, it's veclower
18510 to scalar operations and not combined back. */
18515 case IX86_BUILTIN_BLENDVPD
:
18516 /* blendvpd is under sse4.1 but pcmpgtq is under sse4.2,
18517 w/o sse4.2, it's veclowered to scalar operations and
18518 not combined back. */
18519 if (!TARGET_SSE4_2
)
18522 case IX86_BUILTIN_PBLENDVB128
:
18523 case IX86_BUILTIN_BLENDVPS
:
18524 gcc_assert (n_args
== 3);
18525 arg0
= gimple_call_arg (stmt
, 0);
18526 arg1
= gimple_call_arg (stmt
, 1);
18527 arg2
= gimple_call_arg (stmt
, 2);
18528 if (gimple_call_lhs (stmt
))
18530 location_t loc
= gimple_location (stmt
);
18531 tree type
= TREE_TYPE (arg2
);
18532 gimple_seq stmts
= NULL
;
18533 if (VECTOR_FLOAT_TYPE_P (type
))
18535 tree itype
= GET_MODE_INNER (TYPE_MODE (type
)) == E_SFmode
18536 ? intSI_type_node
: intDI_type_node
;
18537 type
= get_same_sized_vectype (itype
, type
);
18538 arg2
= gimple_build (&stmts
, VIEW_CONVERT_EXPR
, type
, arg2
);
18540 tree zero_vec
= build_zero_cst (type
);
18541 tree cmp_type
= truth_type_for (type
);
18542 tree cmp
= gimple_build (&stmts
, LT_EXPR
, cmp_type
, arg2
, zero_vec
);
18543 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
18544 gimple
*g
= gimple_build_assign (gimple_call_lhs (stmt
),
18545 VEC_COND_EXPR
, cmp
,
18547 gimple_set_location (g
, loc
);
18548 gsi_replace (gsi
, g
, false);
18551 gsi_replace (gsi
, gimple_build_nop (), false);
18555 case IX86_BUILTIN_PCMPEQB128
:
18556 case IX86_BUILTIN_PCMPEQW128
:
18557 case IX86_BUILTIN_PCMPEQD128
:
18558 case IX86_BUILTIN_PCMPEQQ
:
18559 case IX86_BUILTIN_PCMPEQB256
:
18560 case IX86_BUILTIN_PCMPEQW256
:
18561 case IX86_BUILTIN_PCMPEQD256
:
18562 case IX86_BUILTIN_PCMPEQQ256
:
18566 case IX86_BUILTIN_PCMPGTB128
:
18567 case IX86_BUILTIN_PCMPGTW128
:
18568 case IX86_BUILTIN_PCMPGTD128
:
18569 case IX86_BUILTIN_PCMPGTQ
:
18570 case IX86_BUILTIN_PCMPGTB256
:
18571 case IX86_BUILTIN_PCMPGTW256
:
18572 case IX86_BUILTIN_PCMPGTD256
:
18573 case IX86_BUILTIN_PCMPGTQ256
:
18577 gcc_assert (n_args
== 2);
18578 arg0
= gimple_call_arg (stmt
, 0);
18579 arg1
= gimple_call_arg (stmt
, 1);
18580 if (gimple_call_lhs (stmt
))
18582 location_t loc
= gimple_location (stmt
);
18583 tree type
= TREE_TYPE (arg0
);
18584 tree zero_vec
= build_zero_cst (type
);
18585 tree minus_one_vec
= build_minus_one_cst (type
);
18586 tree cmp_type
= truth_type_for (type
);
18587 gimple_seq stmts
= NULL
;
18588 tree cmp
= gimple_build (&stmts
, tcode
, cmp_type
, arg0
, arg1
);
18589 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
18590 gimple
* g
= gimple_build_assign (gimple_call_lhs (stmt
),
18591 VEC_COND_EXPR
, cmp
,
18592 minus_one_vec
, zero_vec
);
18593 gimple_set_location (g
, loc
);
18594 gsi_replace (gsi
, g
, false);
18597 gsi_replace (gsi
, gimple_build_nop (), false);
18600 case IX86_BUILTIN_PSLLD
:
18601 case IX86_BUILTIN_PSLLD128
:
18602 case IX86_BUILTIN_PSLLD128_MASK
:
18603 case IX86_BUILTIN_PSLLD256
:
18604 case IX86_BUILTIN_PSLLD256_MASK
:
18605 case IX86_BUILTIN_PSLLD512
:
18606 case IX86_BUILTIN_PSLLDI
:
18607 case IX86_BUILTIN_PSLLDI128
:
18608 case IX86_BUILTIN_PSLLDI128_MASK
:
18609 case IX86_BUILTIN_PSLLDI256
:
18610 case IX86_BUILTIN_PSLLDI256_MASK
:
18611 case IX86_BUILTIN_PSLLDI512
:
18612 case IX86_BUILTIN_PSLLQ
:
18613 case IX86_BUILTIN_PSLLQ128
:
18614 case IX86_BUILTIN_PSLLQ128_MASK
:
18615 case IX86_BUILTIN_PSLLQ256
:
18616 case IX86_BUILTIN_PSLLQ256_MASK
:
18617 case IX86_BUILTIN_PSLLQ512
:
18618 case IX86_BUILTIN_PSLLQI
:
18619 case IX86_BUILTIN_PSLLQI128
:
18620 case IX86_BUILTIN_PSLLQI128_MASK
:
18621 case IX86_BUILTIN_PSLLQI256
:
18622 case IX86_BUILTIN_PSLLQI256_MASK
:
18623 case IX86_BUILTIN_PSLLQI512
:
18624 case IX86_BUILTIN_PSLLW
:
18625 case IX86_BUILTIN_PSLLW128
:
18626 case IX86_BUILTIN_PSLLW128_MASK
:
18627 case IX86_BUILTIN_PSLLW256
:
18628 case IX86_BUILTIN_PSLLW256_MASK
:
18629 case IX86_BUILTIN_PSLLW512_MASK
:
18630 case IX86_BUILTIN_PSLLWI
:
18631 case IX86_BUILTIN_PSLLWI128
:
18632 case IX86_BUILTIN_PSLLWI128_MASK
:
18633 case IX86_BUILTIN_PSLLWI256
:
18634 case IX86_BUILTIN_PSLLWI256_MASK
:
18635 case IX86_BUILTIN_PSLLWI512_MASK
:
18639 case IX86_BUILTIN_PSRAD
:
18640 case IX86_BUILTIN_PSRAD128
:
18641 case IX86_BUILTIN_PSRAD128_MASK
:
18642 case IX86_BUILTIN_PSRAD256
:
18643 case IX86_BUILTIN_PSRAD256_MASK
:
18644 case IX86_BUILTIN_PSRAD512
:
18645 case IX86_BUILTIN_PSRADI
:
18646 case IX86_BUILTIN_PSRADI128
:
18647 case IX86_BUILTIN_PSRADI128_MASK
:
18648 case IX86_BUILTIN_PSRADI256
:
18649 case IX86_BUILTIN_PSRADI256_MASK
:
18650 case IX86_BUILTIN_PSRADI512
:
18651 case IX86_BUILTIN_PSRAQ128_MASK
:
18652 case IX86_BUILTIN_PSRAQ256_MASK
:
18653 case IX86_BUILTIN_PSRAQ512
:
18654 case IX86_BUILTIN_PSRAQI128_MASK
:
18655 case IX86_BUILTIN_PSRAQI256_MASK
:
18656 case IX86_BUILTIN_PSRAQI512
:
18657 case IX86_BUILTIN_PSRAW
:
18658 case IX86_BUILTIN_PSRAW128
:
18659 case IX86_BUILTIN_PSRAW128_MASK
:
18660 case IX86_BUILTIN_PSRAW256
:
18661 case IX86_BUILTIN_PSRAW256_MASK
:
18662 case IX86_BUILTIN_PSRAW512
:
18663 case IX86_BUILTIN_PSRAWI
:
18664 case IX86_BUILTIN_PSRAWI128
:
18665 case IX86_BUILTIN_PSRAWI128_MASK
:
18666 case IX86_BUILTIN_PSRAWI256
:
18667 case IX86_BUILTIN_PSRAWI256_MASK
:
18668 case IX86_BUILTIN_PSRAWI512
:
18672 case IX86_BUILTIN_PSRLD
:
18673 case IX86_BUILTIN_PSRLD128
:
18674 case IX86_BUILTIN_PSRLD128_MASK
:
18675 case IX86_BUILTIN_PSRLD256
:
18676 case IX86_BUILTIN_PSRLD256_MASK
:
18677 case IX86_BUILTIN_PSRLD512
:
18678 case IX86_BUILTIN_PSRLDI
:
18679 case IX86_BUILTIN_PSRLDI128
:
18680 case IX86_BUILTIN_PSRLDI128_MASK
:
18681 case IX86_BUILTIN_PSRLDI256
:
18682 case IX86_BUILTIN_PSRLDI256_MASK
:
18683 case IX86_BUILTIN_PSRLDI512
:
18684 case IX86_BUILTIN_PSRLQ
:
18685 case IX86_BUILTIN_PSRLQ128
:
18686 case IX86_BUILTIN_PSRLQ128_MASK
:
18687 case IX86_BUILTIN_PSRLQ256
:
18688 case IX86_BUILTIN_PSRLQ256_MASK
:
18689 case IX86_BUILTIN_PSRLQ512
:
18690 case IX86_BUILTIN_PSRLQI
:
18691 case IX86_BUILTIN_PSRLQI128
:
18692 case IX86_BUILTIN_PSRLQI128_MASK
:
18693 case IX86_BUILTIN_PSRLQI256
:
18694 case IX86_BUILTIN_PSRLQI256_MASK
:
18695 case IX86_BUILTIN_PSRLQI512
:
18696 case IX86_BUILTIN_PSRLW
:
18697 case IX86_BUILTIN_PSRLW128
:
18698 case IX86_BUILTIN_PSRLW128_MASK
:
18699 case IX86_BUILTIN_PSRLW256
:
18700 case IX86_BUILTIN_PSRLW256_MASK
:
18701 case IX86_BUILTIN_PSRLW512
:
18702 case IX86_BUILTIN_PSRLWI
:
18703 case IX86_BUILTIN_PSRLWI128
:
18704 case IX86_BUILTIN_PSRLWI128_MASK
:
18705 case IX86_BUILTIN_PSRLWI256
:
18706 case IX86_BUILTIN_PSRLWI256_MASK
:
18707 case IX86_BUILTIN_PSRLWI512
:
18711 case IX86_BUILTIN_PSLLVV16HI
:
18712 case IX86_BUILTIN_PSLLVV16SI
:
18713 case IX86_BUILTIN_PSLLVV2DI
:
18714 case IX86_BUILTIN_PSLLVV2DI_MASK
:
18715 case IX86_BUILTIN_PSLLVV32HI
:
18716 case IX86_BUILTIN_PSLLVV4DI
:
18717 case IX86_BUILTIN_PSLLVV4DI_MASK
:
18718 case IX86_BUILTIN_PSLLVV4SI
:
18719 case IX86_BUILTIN_PSLLVV4SI_MASK
:
18720 case IX86_BUILTIN_PSLLVV8DI
:
18721 case IX86_BUILTIN_PSLLVV8HI
:
18722 case IX86_BUILTIN_PSLLVV8SI
:
18723 case IX86_BUILTIN_PSLLVV8SI_MASK
:
18727 case IX86_BUILTIN_PSRAVQ128
:
18728 case IX86_BUILTIN_PSRAVQ256
:
18729 case IX86_BUILTIN_PSRAVV16HI
:
18730 case IX86_BUILTIN_PSRAVV16SI
:
18731 case IX86_BUILTIN_PSRAVV32HI
:
18732 case IX86_BUILTIN_PSRAVV4SI
:
18733 case IX86_BUILTIN_PSRAVV4SI_MASK
:
18734 case IX86_BUILTIN_PSRAVV8DI
:
18735 case IX86_BUILTIN_PSRAVV8HI
:
18736 case IX86_BUILTIN_PSRAVV8SI
:
18737 case IX86_BUILTIN_PSRAVV8SI_MASK
:
18741 case IX86_BUILTIN_PSRLVV16HI
:
18742 case IX86_BUILTIN_PSRLVV16SI
:
18743 case IX86_BUILTIN_PSRLVV2DI
:
18744 case IX86_BUILTIN_PSRLVV2DI_MASK
:
18745 case IX86_BUILTIN_PSRLVV32HI
:
18746 case IX86_BUILTIN_PSRLVV4DI
:
18747 case IX86_BUILTIN_PSRLVV4DI_MASK
:
18748 case IX86_BUILTIN_PSRLVV4SI
:
18749 case IX86_BUILTIN_PSRLVV4SI_MASK
:
18750 case IX86_BUILTIN_PSRLVV8DI
:
18751 case IX86_BUILTIN_PSRLVV8HI
:
18752 case IX86_BUILTIN_PSRLVV8SI
:
18753 case IX86_BUILTIN_PSRLVV8SI_MASK
:
18759 gcc_assert (n_args
>= 2);
18760 if (!gimple_call_lhs (stmt
))
18762 arg0
= gimple_call_arg (stmt
, 0);
18763 arg1
= gimple_call_arg (stmt
, 1);
18764 elems
= TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0
));
18765 /* For masked shift, only optimize if the mask is all ones. */
18767 && !ix86_masked_all_ones (elems
, gimple_call_arg (stmt
, n_args
- 1)))
18771 if (TREE_CODE (arg1
) != VECTOR_CST
)
18773 count
= TYPE_PRECISION (TREE_TYPE (TREE_TYPE (arg0
)));
18774 if (integer_zerop (arg1
))
18776 else if (rcode
== ASHIFTRT
)
18779 for (unsigned int i
= 0; i
< VECTOR_CST_NELTS (arg1
); ++i
)
18781 tree elt
= VECTOR_CST_ELT (arg1
, i
);
18782 if (!wi::neg_p (wi::to_wide (elt
))
18783 && wi::to_widest (elt
) < count
)
18789 arg1
= ix86_vector_shift_count (arg1
);
18792 count
= tree_to_uhwi (arg1
);
18796 /* Just return the first argument for shift by 0. */
18797 location_t loc
= gimple_location (stmt
);
18798 gimple
*g
= gimple_build_assign (gimple_call_lhs (stmt
), arg0
);
18799 gimple_set_location (g
, loc
);
18800 gsi_replace (gsi
, g
, false);
18803 if (rcode
!= ASHIFTRT
18804 && count
>= TYPE_PRECISION (TREE_TYPE (TREE_TYPE (arg0
))))
18806 /* For shift counts equal or greater than precision, except for
18807 arithmetic right shift the result is zero. */
18808 location_t loc
= gimple_location (stmt
);
18809 gimple
*g
= gimple_build_assign (gimple_call_lhs (stmt
),
18810 build_zero_cst (TREE_TYPE (arg0
)));
18811 gimple_set_location (g
, loc
);
18812 gsi_replace (gsi
, g
, false);
18817 case IX86_BUILTIN_SHUFPD512
:
18818 case IX86_BUILTIN_SHUFPS512
:
18819 case IX86_BUILTIN_SHUFPD
:
18820 case IX86_BUILTIN_SHUFPD256
:
18821 case IX86_BUILTIN_SHUFPS
:
18822 case IX86_BUILTIN_SHUFPS256
:
18823 arg0
= gimple_call_arg (stmt
, 0);
18824 elems
= TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0
));
18825 /* This is masked shuffle. Only optimize if the mask is all ones. */
18827 && !ix86_masked_all_ones (elems
,
18828 gimple_call_arg (stmt
, n_args
- 1)))
18830 arg2
= gimple_call_arg (stmt
, 2);
18831 if (TREE_CODE (arg2
) == INTEGER_CST
&& gimple_call_lhs (stmt
))
18833 unsigned HOST_WIDE_INT shuffle_mask
= TREE_INT_CST_LOW (arg2
);
18834 /* Check valid imm, refer to gcc.target/i386/testimm-10.c. */
18835 if (shuffle_mask
> 255)
18838 machine_mode imode
= GET_MODE_INNER (TYPE_MODE (TREE_TYPE (arg0
)));
18839 location_t loc
= gimple_location (stmt
);
18840 tree itype
= (imode
== E_DFmode
18841 ? long_long_integer_type_node
: integer_type_node
);
18842 tree vtype
= build_vector_type (itype
, elems
);
18843 tree_vector_builder
elts (vtype
, elems
, 1);
18846 /* Transform integer shuffle_mask to vector perm_mask which
18847 is used by vec_perm_expr, refer to shuflp[sd]256/512 in sse.md. */
18848 for (unsigned i
= 0; i
!= elems
; i
++)
18851 /* Imm[1:0](if VL > 128, then use Imm[3:2],Imm[5:4],Imm[7:6])
18852 provide 2 select constrols for each element of the
18854 if (imode
== E_DFmode
)
18855 sel_idx
= (i
& 1) * elems
+ (i
& ~1)
18856 + ((shuffle_mask
>> i
) & 1);
18859 /* Imm[7:0](if VL > 128, also use Imm[7:0]) provide 4 select
18860 controls for each element of the destination. */
18861 unsigned j
= i
% 4;
18862 sel_idx
= ((i
>> 1) & 1) * elems
+ (i
& ~3)
18863 + ((shuffle_mask
>> 2 * j
) & 3);
18865 elts
.quick_push (build_int_cst (itype
, sel_idx
));
18868 tree perm_mask
= elts
.build ();
18869 arg1
= gimple_call_arg (stmt
, 1);
18870 gimple
*g
= gimple_build_assign (gimple_call_lhs (stmt
),
18872 arg0
, arg1
, perm_mask
);
18873 gimple_set_location (g
, loc
);
18874 gsi_replace (gsi
, g
, false);
18877 // Do not error yet, the constant could be propagated later?
18887 /* Handler for an SVML-style interface to
18888 a library with vectorized intrinsics. */
18891 ix86_veclibabi_svml (combined_fn fn
, tree type_out
, tree type_in
)
18894 tree fntype
, new_fndecl
, args
;
18897 machine_mode el_mode
, in_mode
;
18900 /* The SVML is suitable for unsafe math only. */
18901 if (!flag_unsafe_math_optimizations
)
18904 el_mode
= TYPE_MODE (TREE_TYPE (type_out
));
18905 n
= TYPE_VECTOR_SUBPARTS (type_out
);
18906 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
18907 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
18908 if (el_mode
!= in_mode
18932 if ((el_mode
!= DFmode
|| n
!= 2)
18933 && (el_mode
!= SFmode
|| n
!= 4))
18941 tree fndecl
= mathfn_built_in (el_mode
== DFmode
18942 ? double_type_node
: float_type_node
, fn
);
18943 bname
= IDENTIFIER_POINTER (DECL_NAME (fndecl
));
18945 if (DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_LOGF
)
18946 strcpy (name
, "vmlsLn4");
18947 else if (DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_LOG
)
18948 strcpy (name
, "vmldLn2");
18951 sprintf (name
, "vmls%s", bname
+10);
18952 name
[strlen (name
)-1] = '4';
18955 sprintf (name
, "vmld%s2", bname
+10);
18957 /* Convert to uppercase. */
18961 for (args
= DECL_ARGUMENTS (fndecl
); args
; args
= TREE_CHAIN (args
))
18965 fntype
= build_function_type_list (type_out
, type_in
, NULL
);
18967 fntype
= build_function_type_list (type_out
, type_in
, type_in
, NULL
);
18969 /* Build a function declaration for the vectorized function. */
18970 new_fndecl
= build_decl (BUILTINS_LOCATION
,
18971 FUNCTION_DECL
, get_identifier (name
), fntype
);
18972 TREE_PUBLIC (new_fndecl
) = 1;
18973 DECL_EXTERNAL (new_fndecl
) = 1;
18974 DECL_IS_NOVOPS (new_fndecl
) = 1;
18975 TREE_READONLY (new_fndecl
) = 1;
18980 /* Handler for an ACML-style interface to
18981 a library with vectorized intrinsics. */
18984 ix86_veclibabi_acml (combined_fn fn
, tree type_out
, tree type_in
)
18986 char name
[20] = "__vr.._";
18987 tree fntype
, new_fndecl
, args
;
18990 machine_mode el_mode
, in_mode
;
18993 /* The ACML is 64bits only and suitable for unsafe math only as
18994 it does not correctly support parts of IEEE with the required
18995 precision such as denormals. */
18997 || !flag_unsafe_math_optimizations
)
19000 el_mode
= TYPE_MODE (TREE_TYPE (type_out
));
19001 n
= TYPE_VECTOR_SUBPARTS (type_out
);
19002 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
19003 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
19004 if (el_mode
!= in_mode
19016 if (el_mode
== DFmode
&& n
== 2)
19021 else if (el_mode
== SFmode
&& n
== 4)
19034 tree fndecl
= mathfn_built_in (el_mode
== DFmode
19035 ? double_type_node
: float_type_node
, fn
);
19036 bname
= IDENTIFIER_POINTER (DECL_NAME (fndecl
));
19037 sprintf (name
+ 7, "%s", bname
+10);
19040 for (args
= DECL_ARGUMENTS (fndecl
); args
; args
= TREE_CHAIN (args
))
19044 fntype
= build_function_type_list (type_out
, type_in
, NULL
);
19046 fntype
= build_function_type_list (type_out
, type_in
, type_in
, NULL
);
19048 /* Build a function declaration for the vectorized function. */
19049 new_fndecl
= build_decl (BUILTINS_LOCATION
,
19050 FUNCTION_DECL
, get_identifier (name
), fntype
);
19051 TREE_PUBLIC (new_fndecl
) = 1;
19052 DECL_EXTERNAL (new_fndecl
) = 1;
19053 DECL_IS_NOVOPS (new_fndecl
) = 1;
19054 TREE_READONLY (new_fndecl
) = 1;
19059 /* Returns a decl of a function that implements scatter store with
19060 register type VECTYPE and index type INDEX_TYPE and SCALE.
19061 Return NULL_TREE if it is not available. */
19064 ix86_vectorize_builtin_scatter (const_tree vectype
,
19065 const_tree index_type
, int scale
)
19068 enum ix86_builtins code
;
19070 if (!TARGET_AVX512F
)
19073 if (known_eq (TYPE_VECTOR_SUBPARTS (vectype
), 2u)
19074 ? !TARGET_USE_SCATTER_2PARTS
19075 : (known_eq (TYPE_VECTOR_SUBPARTS (vectype
), 4u)
19076 ? !TARGET_USE_SCATTER_4PARTS
19077 : !TARGET_USE_SCATTER
))
19080 if ((TREE_CODE (index_type
) != INTEGER_TYPE
19081 && !POINTER_TYPE_P (index_type
))
19082 || (TYPE_MODE (index_type
) != SImode
19083 && TYPE_MODE (index_type
) != DImode
))
19086 if (TYPE_PRECISION (index_type
) > POINTER_SIZE
)
19089 /* v*scatter* insn sign extends index to pointer mode. */
19090 if (TYPE_PRECISION (index_type
) < POINTER_SIZE
19091 && TYPE_UNSIGNED (index_type
))
19094 /* Scale can be 1, 2, 4 or 8. */
19097 || (scale
& (scale
- 1)) != 0)
19100 si
= TYPE_MODE (index_type
) == SImode
;
19101 switch (TYPE_MODE (vectype
))
19104 code
= si
? IX86_BUILTIN_SCATTERALTSIV8DF
: IX86_BUILTIN_SCATTERDIV8DF
;
19107 code
= si
? IX86_BUILTIN_SCATTERALTSIV8DI
: IX86_BUILTIN_SCATTERDIV8DI
;
19110 code
= si
? IX86_BUILTIN_SCATTERSIV16SF
: IX86_BUILTIN_SCATTERALTDIV16SF
;
19113 code
= si
? IX86_BUILTIN_SCATTERSIV16SI
: IX86_BUILTIN_SCATTERALTDIV16SI
;
19116 if (TARGET_AVX512VL
)
19117 code
= si
? IX86_BUILTIN_SCATTERALTSIV4DF
: IX86_BUILTIN_SCATTERDIV4DF
;
19122 if (TARGET_AVX512VL
)
19123 code
= si
? IX86_BUILTIN_SCATTERALTSIV4DI
: IX86_BUILTIN_SCATTERDIV4DI
;
19128 if (TARGET_AVX512VL
)
19129 code
= si
? IX86_BUILTIN_SCATTERSIV8SF
: IX86_BUILTIN_SCATTERALTDIV8SF
;
19134 if (TARGET_AVX512VL
)
19135 code
= si
? IX86_BUILTIN_SCATTERSIV8SI
: IX86_BUILTIN_SCATTERALTDIV8SI
;
19140 if (TARGET_AVX512VL
)
19141 code
= si
? IX86_BUILTIN_SCATTERALTSIV2DF
: IX86_BUILTIN_SCATTERDIV2DF
;
19146 if (TARGET_AVX512VL
)
19147 code
= si
? IX86_BUILTIN_SCATTERALTSIV2DI
: IX86_BUILTIN_SCATTERDIV2DI
;
19152 if (TARGET_AVX512VL
)
19153 code
= si
? IX86_BUILTIN_SCATTERSIV4SF
: IX86_BUILTIN_SCATTERALTDIV4SF
;
19158 if (TARGET_AVX512VL
)
19159 code
= si
? IX86_BUILTIN_SCATTERSIV4SI
: IX86_BUILTIN_SCATTERALTDIV4SI
;
19167 return get_ix86_builtin (code
);
19170 /* Return true if it is safe to use the rsqrt optabs to optimize
19174 use_rsqrt_p (machine_mode mode
)
19176 return ((mode
== HFmode
19177 || (TARGET_SSE
&& TARGET_SSE_MATH
))
19178 && flag_finite_math_only
19179 && !flag_trapping_math
19180 && flag_unsafe_math_optimizations
);
19183 /* Helper for avx_vpermilps256_operand et al. This is also used by
19184 the expansion functions to turn the parallel back into a mask.
19185 The return value is 0 for no match and the imm8+1 for a match. */
19188 avx_vpermilp_parallel (rtx par
, machine_mode mode
)
19190 unsigned i
, nelt
= GET_MODE_NUNITS (mode
);
19192 unsigned char ipar
[16] = {}; /* Silence -Wuninitialized warning. */
19194 if (XVECLEN (par
, 0) != (int) nelt
)
19197 /* Validate that all of the elements are constants, and not totally
19198 out of range. Copy the data into an integral array to make the
19199 subsequent checks easier. */
19200 for (i
= 0; i
< nelt
; ++i
)
19202 rtx er
= XVECEXP (par
, 0, i
);
19203 unsigned HOST_WIDE_INT ei
;
19205 if (!CONST_INT_P (er
))
19216 /* In the 512-bit DFmode case, we can only move elements within
19217 a 128-bit lane. First fill the second part of the mask,
19219 for (i
= 4; i
< 6; ++i
)
19221 if (ipar
[i
] < 4 || ipar
[i
] >= 6)
19223 mask
|= (ipar
[i
] - 4) << i
;
19225 for (i
= 6; i
< 8; ++i
)
19229 mask
|= (ipar
[i
] - 6) << i
;
19234 /* In the 256-bit DFmode case, we can only move elements within
19236 for (i
= 0; i
< 2; ++i
)
19240 mask
|= ipar
[i
] << i
;
19242 for (i
= 2; i
< 4; ++i
)
19246 mask
|= (ipar
[i
] - 2) << i
;
19251 /* In 512 bit SFmode case, permutation in the upper 256 bits
19252 must mirror the permutation in the lower 256-bits. */
19253 for (i
= 0; i
< 8; ++i
)
19254 if (ipar
[i
] + 8 != ipar
[i
+ 8])
19259 /* In 256 bit SFmode case, we have full freedom of
19260 movement within the low 128-bit lane, but the high 128-bit
19261 lane must mirror the exact same pattern. */
19262 for (i
= 0; i
< 4; ++i
)
19263 if (ipar
[i
] + 4 != ipar
[i
+ 4])
19270 /* In the 128-bit case, we've full freedom in the placement of
19271 the elements from the source operand. */
19272 for (i
= 0; i
< nelt
; ++i
)
19273 mask
|= ipar
[i
] << (i
* (nelt
/ 2));
19277 gcc_unreachable ();
19280 /* Make sure success has a non-zero value by adding one. */
19284 /* Helper for avx_vperm2f128_v4df_operand et al. This is also used by
19285 the expansion functions to turn the parallel back into a mask.
19286 The return value is 0 for no match and the imm8+1 for a match. */
19289 avx_vperm2f128_parallel (rtx par
, machine_mode mode
)
19291 unsigned i
, nelt
= GET_MODE_NUNITS (mode
), nelt2
= nelt
/ 2;
19293 unsigned char ipar
[8] = {}; /* Silence -Wuninitialized warning. */
19295 if (XVECLEN (par
, 0) != (int) nelt
)
19298 /* Validate that all of the elements are constants, and not totally
19299 out of range. Copy the data into an integral array to make the
19300 subsequent checks easier. */
19301 for (i
= 0; i
< nelt
; ++i
)
19303 rtx er
= XVECEXP (par
, 0, i
);
19304 unsigned HOST_WIDE_INT ei
;
19306 if (!CONST_INT_P (er
))
19309 if (ei
>= 2 * nelt
)
19314 /* Validate that the halves of the permute are halves. */
19315 for (i
= 0; i
< nelt2
- 1; ++i
)
19316 if (ipar
[i
] + 1 != ipar
[i
+ 1])
19318 for (i
= nelt2
; i
< nelt
- 1; ++i
)
19319 if (ipar
[i
] + 1 != ipar
[i
+ 1])
19322 /* Reconstruct the mask. */
19323 for (i
= 0; i
< 2; ++i
)
19325 unsigned e
= ipar
[i
* nelt2
];
19329 mask
|= e
<< (i
* 4);
19332 /* Make sure success has a non-zero value by adding one. */
19336 /* Return a register priority for hard reg REGNO. */
19338 ix86_register_priority (int hard_regno
)
19340 /* ebp and r13 as the base always wants a displacement, r12 as the
19341 base always wants an index. So discourage their usage in an
19343 if (hard_regno
== R12_REG
|| hard_regno
== R13_REG
)
19345 if (hard_regno
== BP_REG
)
19347 /* New x86-64 int registers result in bigger code size. Discourage them. */
19348 if (REX_INT_REGNO_P (hard_regno
))
19350 /* New x86-64 SSE registers result in bigger code size. Discourage them. */
19351 if (REX_SSE_REGNO_P (hard_regno
))
19353 if (EXT_REX_SSE_REGNO_P (hard_regno
))
19355 /* Usage of AX register results in smaller code. Prefer it. */
19356 if (hard_regno
== AX_REG
)
19361 /* Implement TARGET_PREFERRED_RELOAD_CLASS.
19363 Put float CONST_DOUBLE in the constant pool instead of fp regs.
19364 QImode must go into class Q_REGS.
19365 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
19366 movdf to do mem-to-mem moves through integer regs. */
19369 ix86_preferred_reload_class (rtx x
, reg_class_t regclass
)
19371 machine_mode mode
= GET_MODE (x
);
19373 /* We're only allowed to return a subclass of CLASS. Many of the
19374 following checks fail for NO_REGS, so eliminate that early. */
19375 if (regclass
== NO_REGS
)
19378 /* All classes can load zeros. */
19379 if (x
== CONST0_RTX (mode
))
19382 /* Force constants into memory if we are loading a (nonzero) constant into
19383 an MMX, SSE or MASK register. This is because there are no MMX/SSE/MASK
19384 instructions to load from a constant. */
19386 && (MAYBE_MMX_CLASS_P (regclass
)
19387 || MAYBE_SSE_CLASS_P (regclass
)
19388 || MAYBE_MASK_CLASS_P (regclass
)))
19391 /* Floating-point constants need more complex checks. */
19392 if (CONST_DOUBLE_P (x
))
19394 /* General regs can load everything. */
19395 if (INTEGER_CLASS_P (regclass
))
19398 /* Floats can load 0 and 1 plus some others. Note that we eliminated
19399 zero above. We only want to wind up preferring 80387 registers if
19400 we plan on doing computation with them. */
19401 if (IS_STACK_MODE (mode
)
19402 && standard_80387_constant_p (x
) > 0)
19404 /* Limit class to FP regs. */
19405 if (FLOAT_CLASS_P (regclass
))
19412 /* Prefer SSE if we can use them for math. Also allow integer regs
19413 when moves between register units are cheap. */
19414 if (SSE_FLOAT_MODE_P (mode
) && TARGET_SSE_MATH
)
19416 if (TARGET_INTER_UNIT_MOVES_FROM_VEC
19417 && TARGET_INTER_UNIT_MOVES_TO_VEC
19418 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (word_mode
))
19419 return INT_SSE_CLASS_P (regclass
) ? regclass
: NO_REGS
;
19421 return SSE_CLASS_P (regclass
) ? regclass
: NO_REGS
;
19424 /* Generally when we see PLUS here, it's the function invariant
19425 (plus soft-fp const_int). Which can only be computed into general
19427 if (GET_CODE (x
) == PLUS
)
19428 return INTEGER_CLASS_P (regclass
) ? regclass
: NO_REGS
;
19430 /* QImode constants are easy to load, but non-constant QImode data
19431 must go into Q_REGS or ALL_MASK_REGS. */
19432 if (GET_MODE (x
) == QImode
&& !CONSTANT_P (x
))
19434 if (Q_CLASS_P (regclass
))
19436 else if (reg_class_subset_p (Q_REGS
, regclass
))
19438 else if (MASK_CLASS_P (regclass
))
19447 /* Discourage putting floating-point values in SSE registers unless
19448 SSE math is being used, and likewise for the 387 registers. */
19450 ix86_preferred_output_reload_class (rtx x
, reg_class_t regclass
)
19452 /* Restrict the output reload class to the register bank that we are doing
19453 math on. If we would like not to return a subset of CLASS, reject this
19454 alternative: if reload cannot do this, it will still use its choice. */
19455 machine_mode mode
= GET_MODE (x
);
19456 if (SSE_FLOAT_MODE_P (mode
) && TARGET_SSE_MATH
)
19457 return MAYBE_SSE_CLASS_P (regclass
) ? ALL_SSE_REGS
: NO_REGS
;
19459 if (IS_STACK_MODE (mode
))
19460 return FLOAT_CLASS_P (regclass
) ? regclass
: NO_REGS
;
19466 ix86_secondary_reload (bool in_p
, rtx x
, reg_class_t rclass
,
19467 machine_mode mode
, secondary_reload_info
*sri
)
19469 /* Double-word spills from general registers to non-offsettable memory
19470 references (zero-extended addresses) require special handling. */
19473 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
19474 && INTEGER_CLASS_P (rclass
)
19475 && !offsettable_memref_p (x
))
19478 ? CODE_FOR_reload_noff_load
19479 : CODE_FOR_reload_noff_store
);
19480 /* Add the cost of moving address to a temporary. */
19481 sri
->extra_cost
= 1;
19486 /* QImode spills from non-QI registers require
19487 intermediate register on 32bit targets. */
19489 && ((!TARGET_64BIT
&& !in_p
19490 && INTEGER_CLASS_P (rclass
)
19491 && MAYBE_NON_Q_CLASS_P (rclass
))
19492 || (!TARGET_AVX512DQ
19493 && MAYBE_MASK_CLASS_P (rclass
))))
19495 int regno
= true_regnum (x
);
19497 /* Return Q_REGS if the operand is in memory. */
19504 /* Require movement to gpr, and then store to memory. */
19505 if ((mode
== HFmode
|| mode
== HImode
|| mode
== V2QImode
19508 && SSE_CLASS_P (rclass
)
19509 && !in_p
&& MEM_P (x
))
19511 sri
->extra_cost
= 1;
19512 return GENERAL_REGS
;
19515 /* This condition handles corner case where an expression involving
19516 pointers gets vectorized. We're trying to use the address of a
19517 stack slot as a vector initializer.
19519 (set (reg:V2DI 74 [ vect_cst_.2 ])
19520 (vec_duplicate:V2DI (reg/f:DI 20 frame)))
19522 Eventually frame gets turned into sp+offset like this:
19524 (set (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
19525 (vec_duplicate:V2DI (plus:DI (reg/f:DI 7 sp)
19526 (const_int 392 [0x188]))))
19528 That later gets turned into:
19530 (set (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
19531 (vec_duplicate:V2DI (plus:DI (reg/f:DI 7 sp)
19532 (mem/u/c/i:DI (symbol_ref/u:DI ("*.LC0") [flags 0x2]) [0 S8 A64]))))
19534 We'll have the following reload recorded:
19536 Reload 0: reload_in (DI) =
19537 (plus:DI (reg/f:DI 7 sp)
19538 (mem/u/c/i:DI (symbol_ref/u:DI ("*.LC0") [flags 0x2]) [0 S8 A64]))
19539 reload_out (V2DI) = (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
19540 SSE_REGS, RELOAD_OTHER (opnum = 0), can't combine
19541 reload_in_reg: (plus:DI (reg/f:DI 7 sp) (const_int 392 [0x188]))
19542 reload_out_reg: (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
19543 reload_reg_rtx: (reg:V2DI 22 xmm1)
19545 Which isn't going to work since SSE instructions can't handle scalar
19546 additions. Returning GENERAL_REGS forces the addition into integer
19547 register and reload can handle subsequent reloads without problems. */
19549 if (in_p
&& GET_CODE (x
) == PLUS
19550 && SSE_CLASS_P (rclass
)
19551 && SCALAR_INT_MODE_P (mode
))
19552 return GENERAL_REGS
;
19557 /* Implement TARGET_CLASS_LIKELY_SPILLED_P. */
19560 ix86_class_likely_spilled_p (reg_class_t rclass
)
19571 case SSE_FIRST_REG
:
19573 case FP_SECOND_REG
:
19583 /* Return true if a set of DST by the expression SRC should be allowed.
19584 This prevents complex sets of likely_spilled hard regs before reload. */
19587 ix86_hardreg_mov_ok (rtx dst
, rtx src
)
19589 /* Avoid complex sets of likely_spilled hard registers before reload. */
19590 if (REG_P (dst
) && HARD_REGISTER_P (dst
)
19591 && !REG_P (src
) && !MEM_P (src
)
19592 && !(VECTOR_MODE_P (GET_MODE (dst
))
19593 ? standard_sse_constant_p (src
, GET_MODE (dst
))
19594 : x86_64_immediate_operand (src
, GET_MODE (dst
)))
19595 && ix86_class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dst
)))
19596 && !reload_completed
)
19601 /* If we are copying between registers from different register sets
19602 (e.g. FP and integer), we may need a memory location.
19604 The function can't work reliably when one of the CLASSES is a class
19605 containing registers from multiple sets. We avoid this by never combining
19606 different sets in a single alternative in the machine description.
19607 Ensure that this constraint holds to avoid unexpected surprises.
19609 When STRICT is false, we are being called from REGISTER_MOVE_COST,
19610 so do not enforce these sanity checks.
19612 To optimize register_move_cost performance, define inline variant. */
19615 inline_secondary_memory_needed (machine_mode mode
, reg_class_t class1
,
19616 reg_class_t class2
, int strict
)
19618 if (lra_in_progress
&& (class1
== NO_REGS
|| class2
== NO_REGS
))
19621 if (MAYBE_FLOAT_CLASS_P (class1
) != FLOAT_CLASS_P (class1
)
19622 || MAYBE_FLOAT_CLASS_P (class2
) != FLOAT_CLASS_P (class2
)
19623 || MAYBE_SSE_CLASS_P (class1
) != SSE_CLASS_P (class1
)
19624 || MAYBE_SSE_CLASS_P (class2
) != SSE_CLASS_P (class2
)
19625 || MAYBE_MMX_CLASS_P (class1
) != MMX_CLASS_P (class1
)
19626 || MAYBE_MMX_CLASS_P (class2
) != MMX_CLASS_P (class2
)
19627 || MAYBE_MASK_CLASS_P (class1
) != MASK_CLASS_P (class1
)
19628 || MAYBE_MASK_CLASS_P (class2
) != MASK_CLASS_P (class2
))
19630 gcc_assert (!strict
|| lra_in_progress
);
19634 if (FLOAT_CLASS_P (class1
) != FLOAT_CLASS_P (class2
))
19637 /* ??? This is a lie. We do have moves between mmx/general, and for
19638 mmx/sse2. But by saying we need secondary memory we discourage the
19639 register allocator from using the mmx registers unless needed. */
19640 if (MMX_CLASS_P (class1
) != MMX_CLASS_P (class2
))
19643 /* Between mask and general, we have moves no larger than word size. */
19644 if (MASK_CLASS_P (class1
) != MASK_CLASS_P (class2
))
19646 if (!(INTEGER_CLASS_P (class1
) || INTEGER_CLASS_P (class2
))
19647 || GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
19651 if (SSE_CLASS_P (class1
) != SSE_CLASS_P (class2
))
19653 /* SSE1 doesn't have any direct moves from other classes. */
19657 if (!(INTEGER_CLASS_P (class1
) || INTEGER_CLASS_P (class2
)))
19660 int msize
= GET_MODE_SIZE (mode
);
19662 /* Between SSE and general, we have moves no larger than word size. */
19663 if (msize
> UNITS_PER_WORD
)
19666 /* In addition to SImode moves, HImode moves are supported for SSE2 and above,
19667 Use vmovw with AVX512FP16, or pinsrw/pextrw without AVX512FP16. */
19668 int minsize
= GET_MODE_SIZE (TARGET_SSE2
? HImode
: SImode
);
19670 if (msize
< minsize
)
19673 /* If the target says that inter-unit moves are more expensive
19674 than moving through memory, then don't generate them. */
19675 if ((SSE_CLASS_P (class1
) && !TARGET_INTER_UNIT_MOVES_FROM_VEC
)
19676 || (SSE_CLASS_P (class2
) && !TARGET_INTER_UNIT_MOVES_TO_VEC
))
19683 /* Implement TARGET_SECONDARY_MEMORY_NEEDED. */
19686 ix86_secondary_memory_needed (machine_mode mode
, reg_class_t class1
,
19687 reg_class_t class2
)
19689 return inline_secondary_memory_needed (mode
, class1
, class2
, true);
19692 /* Implement TARGET_SECONDARY_MEMORY_NEEDED_MODE.
19694 get_secondary_mem widens integral modes to BITS_PER_WORD.
19695 There is no need to emit full 64 bit move on 64 bit targets
19696 for integral modes that can be moved using 32 bit move. */
19698 static machine_mode
19699 ix86_secondary_memory_needed_mode (machine_mode mode
)
19701 if (GET_MODE_BITSIZE (mode
) < 32 && INTEGRAL_MODE_P (mode
))
19702 return mode_for_size (32, GET_MODE_CLASS (mode
), 0).require ();
19706 /* Implement the TARGET_CLASS_MAX_NREGS hook.
19708 On the 80386, this is the size of MODE in words,
19709 except in the FP regs, where a single reg is always enough. */
19711 static unsigned char
19712 ix86_class_max_nregs (reg_class_t rclass
, machine_mode mode
)
19714 if (MAYBE_INTEGER_CLASS_P (rclass
))
19716 if (mode
== XFmode
)
19717 return (TARGET_64BIT
? 2 : 3);
19718 else if (mode
== XCmode
)
19719 return (TARGET_64BIT
? 4 : 6);
19721 return CEIL (GET_MODE_SIZE (mode
), UNITS_PER_WORD
);
19725 if (COMPLEX_MODE_P (mode
))
19732 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
19735 ix86_can_change_mode_class (machine_mode from
, machine_mode to
,
19736 reg_class_t regclass
)
19741 /* x87 registers can't do subreg at all, as all values are reformatted
19742 to extended precision. */
19743 if (MAYBE_FLOAT_CLASS_P (regclass
))
19746 if (MAYBE_SSE_CLASS_P (regclass
) || MAYBE_MMX_CLASS_P (regclass
))
19748 /* Vector registers do not support QI or HImode loads. If we don't
19749 disallow a change to these modes, reload will assume it's ok to
19750 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
19751 the vec_dupv4hi pattern.
19752 NB: SSE2 can load 16bit data to sse register via pinsrw. */
19753 int mov_size
= MAYBE_SSE_CLASS_P (regclass
) && TARGET_SSE2
? 2 : 4;
19754 if (GET_MODE_SIZE (from
) < mov_size
19755 || GET_MODE_SIZE (to
) < mov_size
)
19762 /* Return index of MODE in the sse load/store tables. */
19765 sse_store_index (machine_mode mode
)
19767 /* NB: Use SFmode cost for HFmode instead of adding HFmode load/store
19768 costs to processor_costs, which requires changes to all entries in
19769 processor cost table. */
19770 if (mode
== E_HFmode
)
19773 switch (GET_MODE_SIZE (mode
))
19790 /* Return the cost of moving data of mode M between a
19791 register and memory. A value of 2 is the default; this cost is
19792 relative to those in `REGISTER_MOVE_COST'.
19794 This function is used extensively by register_move_cost that is used to
19795 build tables at startup. Make it inline in this case.
19796 When IN is 2, return maximum of in and out move cost.
19798 If moving between registers and memory is more expensive than
19799 between two registers, you should define this macro to express the
19802 Model also increased moving costs of QImode registers in non
19806 inline_memory_move_cost (machine_mode mode
, enum reg_class regclass
, int in
)
19810 if (FLOAT_CLASS_P (regclass
))
19828 return MAX (ix86_cost
->hard_register
.fp_load
[index
],
19829 ix86_cost
->hard_register
.fp_store
[index
]);
19830 return in
? ix86_cost
->hard_register
.fp_load
[index
]
19831 : ix86_cost
->hard_register
.fp_store
[index
];
19833 if (SSE_CLASS_P (regclass
))
19835 int index
= sse_store_index (mode
);
19839 return MAX (ix86_cost
->hard_register
.sse_load
[index
],
19840 ix86_cost
->hard_register
.sse_store
[index
]);
19841 return in
? ix86_cost
->hard_register
.sse_load
[index
]
19842 : ix86_cost
->hard_register
.sse_store
[index
];
19844 if (MASK_CLASS_P (regclass
))
19847 switch (GET_MODE_SIZE (mode
))
19855 /* DImode loads and stores assumed to cost the same as SImode. */
19865 return MAX (ix86_cost
->hard_register
.mask_load
[index
],
19866 ix86_cost
->hard_register
.mask_store
[index
]);
19867 return in
? ix86_cost
->hard_register
.mask_load
[2]
19868 : ix86_cost
->hard_register
.mask_store
[2];
19870 if (MMX_CLASS_P (regclass
))
19873 switch (GET_MODE_SIZE (mode
))
19885 return MAX (ix86_cost
->hard_register
.mmx_load
[index
],
19886 ix86_cost
->hard_register
.mmx_store
[index
]);
19887 return in
? ix86_cost
->hard_register
.mmx_load
[index
]
19888 : ix86_cost
->hard_register
.mmx_store
[index
];
19890 switch (GET_MODE_SIZE (mode
))
19893 if (Q_CLASS_P (regclass
) || TARGET_64BIT
)
19896 return ix86_cost
->hard_register
.int_store
[0];
19897 if (TARGET_PARTIAL_REG_DEPENDENCY
19898 && optimize_function_for_speed_p (cfun
))
19899 cost
= ix86_cost
->hard_register
.movzbl_load
;
19901 cost
= ix86_cost
->hard_register
.int_load
[0];
19903 return MAX (cost
, ix86_cost
->hard_register
.int_store
[0]);
19909 return MAX (ix86_cost
->hard_register
.movzbl_load
,
19910 ix86_cost
->hard_register
.int_store
[0] + 4);
19912 return ix86_cost
->hard_register
.movzbl_load
;
19914 return ix86_cost
->hard_register
.int_store
[0] + 4;
19921 cost
= MAX (ix86_cost
->hard_register
.int_load
[1],
19922 ix86_cost
->hard_register
.int_store
[1]);
19924 cost
= in
? ix86_cost
->hard_register
.int_load
[1]
19925 : ix86_cost
->hard_register
.int_store
[1];
19927 if (mode
== E_HFmode
)
19929 /* Prefer SSE over GPR for HFmode. */
19931 int index
= sse_store_index (mode
);
19933 sse_cost
= MAX (ix86_cost
->hard_register
.sse_load
[index
],
19934 ix86_cost
->hard_register
.sse_store
[index
]);
19937 ? ix86_cost
->hard_register
.sse_load
[index
]
19938 : ix86_cost
->hard_register
.sse_store
[index
]);
19939 if (sse_cost
>= cost
)
19940 cost
= sse_cost
+ 1;
19946 cost
= MAX (ix86_cost
->hard_register
.int_load
[2],
19947 ix86_cost
->hard_register
.int_store
[2]);
19949 cost
= ix86_cost
->hard_register
.int_load
[2];
19951 cost
= ix86_cost
->hard_register
.int_store
[2];
19952 /* Multiply with the number of GPR moves needed. */
19953 return cost
* CEIL ((int) GET_MODE_SIZE (mode
), UNITS_PER_WORD
);
19958 ix86_memory_move_cost (machine_mode mode
, reg_class_t regclass
, bool in
)
19960 return inline_memory_move_cost (mode
, (enum reg_class
) regclass
, in
? 1 : 0);
19964 /* Return the cost of moving data from a register in class CLASS1 to
19965 one in class CLASS2.
19967 It is not required that the cost always equal 2 when FROM is the same as TO;
19968 on some machines it is expensive to move between registers if they are not
19969 general registers. */
19972 ix86_register_move_cost (machine_mode mode
, reg_class_t class1_i
,
19973 reg_class_t class2_i
)
19975 enum reg_class class1
= (enum reg_class
) class1_i
;
19976 enum reg_class class2
= (enum reg_class
) class2_i
;
19978 /* In case we require secondary memory, compute cost of the store followed
19979 by load. In order to avoid bad register allocation choices, we need
19980 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
19982 if (inline_secondary_memory_needed (mode
, class1
, class2
, false))
19986 cost
+= inline_memory_move_cost (mode
, class1
, 2);
19987 cost
+= inline_memory_move_cost (mode
, class2
, 2);
19989 /* In case of copying from general_purpose_register we may emit multiple
19990 stores followed by single load causing memory size mismatch stall.
19991 Count this as arbitrarily high cost of 20. */
19992 if (GET_MODE_BITSIZE (mode
) > BITS_PER_WORD
19993 && TARGET_MEMORY_MISMATCH_STALL
19994 && targetm
.class_max_nregs (class1
, mode
)
19995 > targetm
.class_max_nregs (class2
, mode
))
19998 /* In the case of FP/MMX moves, the registers actually overlap, and we
19999 have to switch modes in order to treat them differently. */
20000 if ((MMX_CLASS_P (class1
) && MAYBE_FLOAT_CLASS_P (class2
))
20001 || (MMX_CLASS_P (class2
) && MAYBE_FLOAT_CLASS_P (class1
)))
20007 /* Moves between MMX and non-MMX units require secondary memory. */
20008 if (MMX_CLASS_P (class1
) != MMX_CLASS_P (class2
))
20009 gcc_unreachable ();
20011 if (SSE_CLASS_P (class1
) != SSE_CLASS_P (class2
))
20012 return (SSE_CLASS_P (class1
)
20013 ? ix86_cost
->hard_register
.sse_to_integer
20014 : ix86_cost
->hard_register
.integer_to_sse
);
20016 /* Moves between mask register and GPR. */
20017 if (MASK_CLASS_P (class1
) != MASK_CLASS_P (class2
))
20019 return (MASK_CLASS_P (class1
)
20020 ? ix86_cost
->hard_register
.mask_to_integer
20021 : ix86_cost
->hard_register
.integer_to_mask
);
20023 /* Moving between mask registers. */
20024 if (MASK_CLASS_P (class1
) && MASK_CLASS_P (class2
))
20025 return ix86_cost
->hard_register
.mask_move
;
20027 if (MAYBE_FLOAT_CLASS_P (class1
))
20028 return ix86_cost
->hard_register
.fp_move
;
20029 if (MAYBE_SSE_CLASS_P (class1
))
20031 if (GET_MODE_BITSIZE (mode
) <= 128)
20032 return ix86_cost
->hard_register
.xmm_move
;
20033 if (GET_MODE_BITSIZE (mode
) <= 256)
20034 return ix86_cost
->hard_register
.ymm_move
;
20035 return ix86_cost
->hard_register
.zmm_move
;
20037 if (MAYBE_MMX_CLASS_P (class1
))
20038 return ix86_cost
->hard_register
.mmx_move
;
20042 /* Implement TARGET_HARD_REGNO_NREGS. This is ordinarily the length in
20043 words of a value of mode MODE but can be less for certain modes in
20044 special long registers.
20046 Actually there are no two word move instructions for consecutive
20047 registers. And only registers 0-3 may have mov byte instructions
20048 applied to them. */
20050 static unsigned int
20051 ix86_hard_regno_nregs (unsigned int regno
, machine_mode mode
)
20053 if (GENERAL_REGNO_P (regno
))
20055 if (mode
== XFmode
)
20056 return TARGET_64BIT
? 2 : 3;
20057 if (mode
== XCmode
)
20058 return TARGET_64BIT
? 4 : 6;
20059 return CEIL (GET_MODE_SIZE (mode
), UNITS_PER_WORD
);
20061 if (COMPLEX_MODE_P (mode
))
20063 /* Register pair for mask registers. */
20064 if (mode
== P2QImode
|| mode
== P2HImode
)
20066 if (mode
== V64SFmode
|| mode
== V64SImode
)
20071 /* Implement REGMODE_NATURAL_SIZE(MODE). */
20073 ix86_regmode_natural_size (machine_mode mode
)
20075 if (mode
== P2HImode
|| mode
== P2QImode
)
20076 return GET_MODE_SIZE (mode
) / 2;
20077 return UNITS_PER_WORD
;
20080 /* Implement TARGET_HARD_REGNO_MODE_OK. */
20083 ix86_hard_regno_mode_ok (unsigned int regno
, machine_mode mode
)
20085 /* Flags and only flags can only hold CCmode values. */
20086 if (CC_REGNO_P (regno
))
20087 return GET_MODE_CLASS (mode
) == MODE_CC
;
20088 if (GET_MODE_CLASS (mode
) == MODE_CC
20089 || GET_MODE_CLASS (mode
) == MODE_RANDOM
)
20091 if (STACK_REGNO_P (regno
))
20092 return VALID_FP_MODE_P (mode
);
20093 if (MASK_REGNO_P (regno
))
20095 /* Register pair only starts at even register number. */
20096 if ((mode
== P2QImode
|| mode
== P2HImode
))
20097 return MASK_PAIR_REGNO_P(regno
);
20099 return ((TARGET_AVX512F
&& VALID_MASK_REG_MODE (mode
))
20100 || (TARGET_AVX512BW
20101 && VALID_MASK_AVX512BW_MODE (mode
)));
20104 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
20107 if (SSE_REGNO_P (regno
))
20109 /* We implement the move patterns for all vector modes into and
20110 out of SSE registers, even when no operation instructions
20113 /* For AVX-512 we allow, regardless of regno:
20115 - any of 512-bit wide vector mode
20116 - any scalar mode. */
20118 && (VALID_AVX512F_REG_OR_XI_MODE (mode
)
20119 || VALID_AVX512F_SCALAR_MODE (mode
)))
20122 /* For AVX-5124FMAPS or AVX-5124VNNIW
20123 allow V64SF and V64SI modes for special regnos. */
20124 if ((TARGET_AVX5124FMAPS
|| TARGET_AVX5124VNNIW
)
20125 && (mode
== V64SFmode
|| mode
== V64SImode
)
20126 && MOD4_SSE_REGNO_P (regno
))
20129 /* TODO check for QI/HI scalars. */
20130 /* AVX512VL allows sse regs16+ for 128/256 bit modes. */
20131 if (TARGET_AVX512VL
20132 && (VALID_AVX256_REG_OR_OI_MODE (mode
)
20133 || VALID_AVX512VL_128_REG_MODE (mode
)))
20136 /* xmm16-xmm31 are only available for AVX-512. */
20137 if (EXT_REX_SSE_REGNO_P (regno
))
20140 /* Use pinsrw/pextrw to mov 16-bit data from/to sse to/from integer. */
20141 if (TARGET_SSE2
&& mode
== HImode
)
20144 /* OImode and AVX modes are available only when AVX is enabled. */
20145 return ((TARGET_AVX
20146 && VALID_AVX256_REG_OR_OI_MODE (mode
))
20147 || VALID_SSE_REG_MODE (mode
)
20148 || VALID_SSE2_REG_MODE (mode
)
20149 || VALID_MMX_REG_MODE (mode
)
20150 || VALID_MMX_REG_MODE_3DNOW (mode
));
20152 if (MMX_REGNO_P (regno
))
20154 /* We implement the move patterns for 3DNOW modes even in MMX mode,
20155 so if the register is available at all, then we can move data of
20156 the given mode into or out of it. */
20157 return (VALID_MMX_REG_MODE (mode
)
20158 || VALID_MMX_REG_MODE_3DNOW (mode
));
20161 if (mode
== QImode
)
20163 /* Take care for QImode values - they can be in non-QI regs,
20164 but then they do cause partial register stalls. */
20165 if (ANY_QI_REGNO_P (regno
))
20167 if (!TARGET_PARTIAL_REG_STALL
)
20169 /* LRA checks if the hard register is OK for the given mode.
20170 QImode values can live in non-QI regs, so we allow all
20172 if (lra_in_progress
)
20174 return !can_create_pseudo_p ();
20176 /* We handle both integer and floats in the general purpose registers. */
20177 else if (VALID_INT_MODE_P (mode
)
20178 || VALID_FP_MODE_P (mode
))
20180 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
20181 on to use that value in smaller contexts, this can easily force a
20182 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
20183 supporting DImode, allow it. */
20184 else if (VALID_MMX_REG_MODE_3DNOW (mode
) || VALID_MMX_REG_MODE (mode
))
20190 /* Implement TARGET_INSN_CALLEE_ABI. */
20192 const predefined_function_abi
&
20193 ix86_insn_callee_abi (const rtx_insn
*insn
)
20195 unsigned int abi_id
= 0;
20196 rtx pat
= PATTERN (insn
);
20197 if (vzeroupper_pattern (pat
, VOIDmode
))
20198 abi_id
= ABI_VZEROUPPER
;
20200 return function_abis
[abi_id
];
20203 /* Initialize function_abis with corresponding abi_id,
20204 currently only handle vzeroupper. */
20206 ix86_initialize_callee_abi (unsigned int abi_id
)
20208 gcc_assert (abi_id
== ABI_VZEROUPPER
);
20209 predefined_function_abi
&vzeroupper_abi
= function_abis
[abi_id
];
20210 if (!vzeroupper_abi
.initialized_p ())
20212 HARD_REG_SET full_reg_clobbers
;
20213 CLEAR_HARD_REG_SET (full_reg_clobbers
);
20214 vzeroupper_abi
.initialize (ABI_VZEROUPPER
, full_reg_clobbers
);
20219 ix86_expand_avx_vzeroupper (void)
20221 /* Initialize vzeroupper_abi here. */
20222 ix86_initialize_callee_abi (ABI_VZEROUPPER
);
20223 rtx_insn
*insn
= emit_call_insn (gen_avx_vzeroupper_callee_abi ());
20224 /* Return false for non-local goto in can_nonlocal_goto. */
20225 make_reg_eh_region_note (insn
, 0, INT_MIN
);
20226 /* Flag used for call_insn indicates it's a fake call. */
20227 RTX_FLAG (insn
, used
) = 1;
20231 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. The only ABI that
20232 saves SSE registers across calls is Win64 (thus no need to check the
20233 current ABI here), and with AVX enabled Win64 only guarantees that
20234 the low 16 bytes are saved. */
20237 ix86_hard_regno_call_part_clobbered (unsigned int abi_id
, unsigned int regno
,
20240 /* Special ABI for vzeroupper which only clobber higher part of sse regs. */
20241 if (abi_id
== ABI_VZEROUPPER
)
20242 return (GET_MODE_SIZE (mode
) > 16
20243 && ((TARGET_64BIT
&& REX_SSE_REGNO_P (regno
))
20244 || LEGACY_SSE_REGNO_P (regno
)));
20246 return SSE_REGNO_P (regno
) && GET_MODE_SIZE (mode
) > 16;
20249 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
20250 tieable integer mode. */
20253 ix86_tieable_integer_mode_p (machine_mode mode
)
20262 return TARGET_64BIT
|| !TARGET_PARTIAL_REG_STALL
;
20265 return TARGET_64BIT
;
20272 /* Implement TARGET_MODES_TIEABLE_P.
20274 Return true if MODE1 is accessible in a register that can hold MODE2
20275 without copying. That is, all register classes that can hold MODE2
20276 can also hold MODE1. */
20279 ix86_modes_tieable_p (machine_mode mode1
, machine_mode mode2
)
20281 if (mode1
== mode2
)
20284 if (ix86_tieable_integer_mode_p (mode1
)
20285 && ix86_tieable_integer_mode_p (mode2
))
20288 /* MODE2 being XFmode implies fp stack or general regs, which means we
20289 can tie any smaller floating point modes to it. Note that we do not
20290 tie this with TFmode. */
20291 if (mode2
== XFmode
)
20292 return mode1
== SFmode
|| mode1
== DFmode
;
20294 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
20295 that we can tie it with SFmode. */
20296 if (mode2
== DFmode
)
20297 return mode1
== SFmode
;
20299 /* If MODE2 is only appropriate for an SSE register, then tie with
20300 any other mode acceptable to SSE registers. */
20301 if (GET_MODE_SIZE (mode2
) == 64
20302 && ix86_hard_regno_mode_ok (FIRST_SSE_REG
, mode2
))
20303 return (GET_MODE_SIZE (mode1
) == 64
20304 && ix86_hard_regno_mode_ok (FIRST_SSE_REG
, mode1
));
20305 if (GET_MODE_SIZE (mode2
) == 32
20306 && ix86_hard_regno_mode_ok (FIRST_SSE_REG
, mode2
))
20307 return (GET_MODE_SIZE (mode1
) == 32
20308 && ix86_hard_regno_mode_ok (FIRST_SSE_REG
, mode1
));
20309 if (GET_MODE_SIZE (mode2
) == 16
20310 && ix86_hard_regno_mode_ok (FIRST_SSE_REG
, mode2
))
20311 return (GET_MODE_SIZE (mode1
) == 16
20312 && ix86_hard_regno_mode_ok (FIRST_SSE_REG
, mode1
));
20314 /* If MODE2 is appropriate for an MMX register, then tie
20315 with any other mode acceptable to MMX registers. */
20316 if (GET_MODE_SIZE (mode2
) == 8
20317 && ix86_hard_regno_mode_ok (FIRST_MMX_REG
, mode2
))
20318 return (GET_MODE_SIZE (mode1
) == 8
20319 && ix86_hard_regno_mode_ok (FIRST_MMX_REG
, mode1
));
20321 /* SCmode and DImode can be tied. */
20322 if ((mode1
== E_SCmode
&& mode2
== E_DImode
)
20323 || (mode1
== E_DImode
&& mode2
== E_SCmode
))
20324 return TARGET_64BIT
;
20326 /* [SD]Cmode and V2[SD]Fmode modes can be tied. */
20327 if ((mode1
== E_SCmode
&& mode2
== E_V2SFmode
)
20328 || (mode1
== E_V2SFmode
&& mode2
== E_SCmode
)
20329 || (mode1
== E_DCmode
&& mode2
== E_V2DFmode
)
20330 || (mode1
== E_V2DFmode
&& mode2
== E_DCmode
))
20336 /* Return the cost of moving between two registers of mode MODE. */
20339 ix86_set_reg_reg_cost (machine_mode mode
)
20341 unsigned int units
= UNITS_PER_WORD
;
20343 switch (GET_MODE_CLASS (mode
))
20349 units
= GET_MODE_SIZE (CCmode
);
20353 if ((TARGET_SSE
&& mode
== TFmode
)
20354 || (TARGET_80387
&& mode
== XFmode
)
20355 || ((TARGET_80387
|| TARGET_SSE2
) && mode
== DFmode
)
20356 || ((TARGET_80387
|| TARGET_SSE
) && mode
== SFmode
))
20357 units
= GET_MODE_SIZE (mode
);
20360 case MODE_COMPLEX_FLOAT
:
20361 if ((TARGET_SSE
&& mode
== TCmode
)
20362 || (TARGET_80387
&& mode
== XCmode
)
20363 || ((TARGET_80387
|| TARGET_SSE2
) && mode
== DCmode
)
20364 || ((TARGET_80387
|| TARGET_SSE
) && mode
== SCmode
))
20365 units
= GET_MODE_SIZE (mode
);
20368 case MODE_VECTOR_INT
:
20369 case MODE_VECTOR_FLOAT
:
20370 if ((TARGET_AVX512F
&& VALID_AVX512F_REG_MODE (mode
))
20371 || (TARGET_AVX
&& VALID_AVX256_REG_MODE (mode
))
20372 || (TARGET_SSE2
&& VALID_SSE2_REG_MODE (mode
))
20373 || (TARGET_SSE
&& VALID_SSE_REG_MODE (mode
))
20374 || ((TARGET_MMX
|| TARGET_MMX_WITH_SSE
)
20375 && VALID_MMX_REG_MODE (mode
)))
20376 units
= GET_MODE_SIZE (mode
);
20379 /* Return the cost of moving between two registers of mode MODE,
20380 assuming that the move will be in pieces of at most UNITS bytes. */
20381 return COSTS_N_INSNS (CEIL (GET_MODE_SIZE (mode
), units
));
20384 /* Return cost of vector operation in MODE given that scalar version has
20388 ix86_vec_cost (machine_mode mode
, int cost
)
20390 if (!VECTOR_MODE_P (mode
))
20393 if (GET_MODE_BITSIZE (mode
) == 128
20394 && TARGET_SSE_SPLIT_REGS
)
20395 return cost
* GET_MODE_BITSIZE (mode
) / 64;
20396 else if (GET_MODE_BITSIZE (mode
) > 128
20397 && TARGET_AVX256_SPLIT_REGS
)
20398 return cost
* GET_MODE_BITSIZE (mode
) / 128;
20399 else if (GET_MODE_BITSIZE (mode
) > 256
20400 && TARGET_AVX512_SPLIT_REGS
)
20401 return cost
* GET_MODE_BITSIZE (mode
) / 256;
20405 /* Return cost of vec_widen_<s>mult_hi/lo_<mode>,
20406 vec_widen_<s>mul_hi/lo_<mode> is only available for VI124_AVX2. */
20408 ix86_widen_mult_cost (const struct processor_costs
*cost
,
20409 enum machine_mode mode
, bool uns_p
)
20411 gcc_assert (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
);
20412 int extra_cost
= 0;
20413 int basic_cost
= 0;
20418 if (!uns_p
|| mode
== V16HImode
)
20419 extra_cost
= cost
->sse_op
* 2;
20420 basic_cost
= cost
->mulss
* 2 + cost
->sse_op
* 4;
20424 /* pmulhw/pmullw can be used. */
20425 basic_cost
= cost
->mulss
* 2 + cost
->sse_op
* 2;
20428 /* pmuludq under sse2, pmuldq under sse4.1, for sign_extend,
20429 require extra 4 mul, 4 add, 4 cmp and 2 shift. */
20430 if (!TARGET_SSE4_1
&& !uns_p
)
20431 extra_cost
= (cost
->mulss
+ cost
->addss
+ cost
->sse_op
) * 4
20432 + cost
->sse_op
* 2;
20435 basic_cost
= cost
->mulss
* 2 + cost
->sse_op
* 4;
20438 /* Not implemented. */
20441 return ix86_vec_cost (mode
, basic_cost
+ extra_cost
);
20444 /* Return cost of multiplication in MODE. */
20447 ix86_multiplication_cost (const struct processor_costs
*cost
,
20448 enum machine_mode mode
)
20450 machine_mode inner_mode
= mode
;
20451 if (VECTOR_MODE_P (mode
))
20452 inner_mode
= GET_MODE_INNER (mode
);
20454 if (SSE_FLOAT_MODE_SSEMATH_OR_HF_P (mode
))
20455 return inner_mode
== DFmode
? cost
->mulsd
: cost
->mulss
;
20456 else if (X87_FLOAT_MODE_P (mode
))
20458 else if (FLOAT_MODE_P (mode
))
20459 return ix86_vec_cost (mode
,
20460 inner_mode
== DFmode
? cost
->mulsd
: cost
->mulss
);
20461 else if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
20466 /* Partial V*QImode is emulated with 4-6 insns. */
20467 if (TARGET_AVX512BW
&& TARGET_AVX512VL
)
20468 return ix86_vec_cost (mode
, cost
->mulss
+ cost
->sse_op
* 3);
20469 else if (TARGET_AVX2
)
20470 return ix86_vec_cost (mode
, cost
->mulss
+ cost
->sse_op
* 5);
20471 else if (TARGET_XOP
)
20472 return (ix86_vec_cost (mode
, cost
->mulss
+ cost
->sse_op
* 3)
20473 + cost
->sse_load
[2]);
20475 return (ix86_vec_cost (mode
, cost
->mulss
+ cost
->sse_op
* 4)
20476 + cost
->sse_load
[2]);
20479 /* V*QImode is emulated with 4-11 insns. */
20480 if (TARGET_AVX512BW
&& TARGET_AVX512VL
)
20481 return ix86_vec_cost (mode
, cost
->mulss
+ cost
->sse_op
* 3);
20482 else if (TARGET_AVX2
)
20483 return ix86_vec_cost (mode
, cost
->mulss
* 2 + cost
->sse_op
* 8);
20484 else if (TARGET_XOP
)
20485 return (ix86_vec_cost (mode
, cost
->mulss
* 2 + cost
->sse_op
* 5)
20486 + cost
->sse_load
[2]);
20488 return (ix86_vec_cost (mode
, cost
->mulss
* 2 + cost
->sse_op
* 7)
20489 + cost
->sse_load
[2]);
20492 if (TARGET_AVX512BW
)
20493 return ix86_vec_cost (mode
, cost
->mulss
+ cost
->sse_op
* 3);
20495 return (ix86_vec_cost (mode
, cost
->mulss
* 2 + cost
->sse_op
* 7)
20496 + cost
->sse_load
[3] * 2);
20499 return (ix86_vec_cost (mode
, cost
->mulss
* 2 + cost
->sse_op
* 9)
20500 + cost
->sse_load
[3] * 2
20501 + cost
->sse_load
[4] * 2);
20504 /* pmulld is used in this case. No emulation is needed. */
20507 /* V4SImode is emulated with 7 insns. */
20509 return ix86_vec_cost (mode
, cost
->mulss
* 2 + cost
->sse_op
* 5);
20513 /* vpmullq is used in this case. No emulation is needed. */
20514 if (TARGET_AVX512DQ
&& TARGET_AVX512VL
)
20516 /* V*DImode is emulated with 6-8 insns. */
20517 else if (TARGET_XOP
&& mode
== V2DImode
)
20518 return ix86_vec_cost (mode
, cost
->mulss
* 2 + cost
->sse_op
* 4);
20521 /* vpmullq is used in this case. No emulation is needed. */
20522 if (TARGET_AVX512DQ
&& mode
== V8DImode
)
20525 return ix86_vec_cost (mode
, cost
->mulss
* 3 + cost
->sse_op
* 5);
20529 return ix86_vec_cost (mode
, cost
->mulss
);
20532 return (cost
->mult_init
[MODE_INDEX (mode
)] + cost
->mult_bit
* 7);
20535 /* Return cost of multiplication in MODE. */
20538 ix86_division_cost (const struct processor_costs
*cost
,
20539 enum machine_mode mode
)
20541 machine_mode inner_mode
= mode
;
20542 if (VECTOR_MODE_P (mode
))
20543 inner_mode
= GET_MODE_INNER (mode
);
20545 if (SSE_FLOAT_MODE_SSEMATH_OR_HF_P (mode
))
20546 return inner_mode
== DFmode
? cost
->divsd
: cost
->divss
;
20547 else if (X87_FLOAT_MODE_P (mode
))
20549 else if (FLOAT_MODE_P (mode
))
20550 return ix86_vec_cost (mode
,
20551 inner_mode
== DFmode
? cost
->divsd
: cost
->divss
);
20553 return cost
->divide
[MODE_INDEX (mode
)];
20556 /* Return cost of shift in MODE.
20557 If CONSTANT_OP1 is true, the op1 value is known and set in OP1_VAL.
20558 AND_IN_OP1 specify in op1 is result of AND and SHIFT_AND_TRUNCATE
20559 if op1 is a result of subreg.
20561 SKIP_OP0/1 is set to true if cost of OP0/1 should be ignored. */
20564 ix86_shift_rotate_cost (const struct processor_costs
*cost
,
20565 enum rtx_code code
,
20566 enum machine_mode mode
, bool constant_op1
,
20567 HOST_WIDE_INT op1_val
,
20569 bool shift_and_truncate
,
20570 bool *skip_op0
, bool *skip_op1
)
20573 *skip_op0
= *skip_op1
= false;
20575 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
20578 /* Cost of reading the memory. */
20586 /* Use vpbroadcast. */
20587 extra
= cost
->sse_op
;
20589 extra
= cost
->sse_load
[2];
20593 if (code
== ASHIFTRT
)
20601 else if (TARGET_AVX512BW
&& TARGET_AVX512VL
)
20604 return ix86_vec_cost (mode
, cost
->sse_op
* count
);
20606 else if (TARGET_SSE4_1
)
20608 else if (code
== ASHIFTRT
)
20612 return ix86_vec_cost (mode
, cost
->sse_op
* count
) + extra
;
20617 /* For XOP we use vpshab, which requires a broadcast of the
20618 value to the variable shift insn. For constants this
20619 means a V16Q const in mem; even when we can perform the
20620 shift with one insn set the cost to prefer paddb. */
20623 extra
= cost
->sse_load
[2];
20624 return ix86_vec_cost (mode
, cost
->sse_op
) + extra
;
20628 count
= (code
== ASHIFT
) ? 2 : 3;
20629 return ix86_vec_cost (mode
, cost
->sse_op
* count
);
20635 /* Use vpbroadcast. */
20636 extra
= cost
->sse_op
;
20638 extra
= (mode
== V16QImode
) ? cost
->sse_load
[2] : cost
->sse_load
[3];
20642 if (code
== ASHIFTRT
)
20650 else if (TARGET_SSE4_1
)
20652 else if (code
== ASHIFTRT
)
20656 return ix86_vec_cost (mode
, cost
->sse_op
* count
) + extra
;
20660 /* V*DImode arithmetic right shift is emulated. */
20661 if (code
== ASHIFTRT
&& !TARGET_AVX512VL
)
20666 count
= TARGET_SSE4_2
? 1 : 2;
20667 else if (TARGET_XOP
)
20672 else if (TARGET_XOP
)
20674 else if (TARGET_SSE4_2
)
20679 return ix86_vec_cost (mode
, cost
->sse_op
* count
);
20683 return ix86_vec_cost (mode
, cost
->sse_op
);
20687 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
20692 return cost
->shift_const
+ COSTS_N_INSNS (2);
20694 return cost
->shift_const
* 2;
20699 return cost
->shift_var
* 2;
20701 return cost
->shift_var
* 6 + COSTS_N_INSNS (2);
20707 return cost
->shift_const
;
20708 else if (shift_and_truncate
)
20711 *skip_op0
= *skip_op1
= true;
20712 /* Return the cost after shift-and truncation. */
20713 return cost
->shift_var
;
20716 return cost
->shift_var
;
20720 /* Compute a (partial) cost for rtx X. Return true if the complete
20721 cost has been computed, and false if subexpressions should be
20722 scanned. In either case, *TOTAL contains the cost result. */
20725 ix86_rtx_costs (rtx x
, machine_mode mode
, int outer_code_i
, int opno
,
20726 int *total
, bool speed
)
20729 enum rtx_code code
= GET_CODE (x
);
20730 enum rtx_code outer_code
= (enum rtx_code
) outer_code_i
;
20731 const struct processor_costs
*cost
20732 = speed
? ix86_tune_cost
: &ix86_size_cost
;
20738 if (register_operand (SET_DEST (x
), VOIDmode
)
20739 && register_operand (SET_SRC (x
), VOIDmode
))
20741 *total
= ix86_set_reg_reg_cost (GET_MODE (SET_DEST (x
)));
20745 if (register_operand (SET_SRC (x
), VOIDmode
))
20746 /* Avoid potentially incorrect high cost from rtx_costs
20747 for non-tieable SUBREGs. */
20751 src_cost
= rtx_cost (SET_SRC (x
), mode
, SET
, 1, speed
);
20753 if (CONSTANT_P (SET_SRC (x
)))
20754 /* Constant costs assume a base value of COSTS_N_INSNS (1) and add
20755 a small value, possibly zero for cheap constants. */
20756 src_cost
+= COSTS_N_INSNS (1);
20759 *total
= src_cost
+ rtx_cost (SET_DEST (x
), mode
, SET
, 0, speed
);
20766 if (x86_64_immediate_operand (x
, VOIDmode
))
20773 if (IS_STACK_MODE (mode
))
20774 switch (standard_80387_constant_p (x
))
20782 default: /* Other constants */
20789 switch (standard_sse_constant_p (x
, mode
))
20793 case 1: /* 0: xor eliminates false dependency */
20796 default: /* -1: cmp contains false dependency */
20802 case CONST_WIDE_INT
:
20803 /* Fall back to (MEM (SYMBOL_REF)), since that's where
20804 it'll probably end up. Add a penalty for size. */
20805 *total
= (COSTS_N_INSNS (1)
20806 + (!TARGET_64BIT
&& flag_pic
)
20807 + (GET_MODE_SIZE (mode
) <= 4
20808 ? 0 : GET_MODE_SIZE (mode
) <= 8 ? 1 : 2));
20812 /* The zero extensions is often completely free on x86_64, so make
20813 it as cheap as possible. */
20814 if (TARGET_64BIT
&& mode
== DImode
20815 && GET_MODE (XEXP (x
, 0)) == SImode
)
20817 else if (TARGET_ZERO_EXTEND_WITH_AND
)
20818 *total
= cost
->add
;
20820 *total
= cost
->movzx
;
20824 *total
= cost
->movsx
;
20828 if (SCALAR_INT_MODE_P (mode
)
20829 && GET_MODE_SIZE (mode
) < UNITS_PER_WORD
20830 && CONST_INT_P (XEXP (x
, 1)))
20832 HOST_WIDE_INT value
= INTVAL (XEXP (x
, 1));
20835 *total
= cost
->add
;
20838 if ((value
== 2 || value
== 3)
20839 && cost
->lea
<= cost
->shift_const
)
20841 *total
= cost
->lea
;
20851 bool skip_op0
, skip_op1
;
20852 *total
= ix86_shift_rotate_cost (cost
, code
, mode
,
20853 CONSTANT_P (XEXP (x
, 1)),
20854 CONST_INT_P (XEXP (x
, 1))
20855 ? INTVAL (XEXP (x
, 1)) : -1,
20856 GET_CODE (XEXP (x
, 1)) == AND
,
20857 SUBREG_P (XEXP (x
, 1))
20858 && GET_CODE (XEXP (XEXP (x
, 1),
20860 &skip_op0
, &skip_op1
);
20861 if (skip_op0
|| skip_op1
)
20864 *total
+= rtx_cost (XEXP (x
, 0), mode
, code
, 0, speed
);
20866 *total
+= rtx_cost (XEXP (x
, 1), mode
, code
, 0, speed
);
20875 gcc_assert (FLOAT_MODE_P (mode
));
20876 gcc_assert (TARGET_FMA
|| TARGET_FMA4
|| TARGET_AVX512F
);
20878 *total
= ix86_vec_cost (mode
,
20879 GET_MODE_INNER (mode
) == SFmode
20880 ? cost
->fmass
: cost
->fmasd
);
20881 *total
+= rtx_cost (XEXP (x
, 1), mode
, FMA
, 1, speed
);
20883 /* Negate in op0 or op2 is free: FMS, FNMA, FNMS. */
20885 if (GET_CODE (sub
) == NEG
)
20886 sub
= XEXP (sub
, 0);
20887 *total
+= rtx_cost (sub
, mode
, FMA
, 0, speed
);
20890 if (GET_CODE (sub
) == NEG
)
20891 sub
= XEXP (sub
, 0);
20892 *total
+= rtx_cost (sub
, mode
, FMA
, 2, speed
);
20897 if (!FLOAT_MODE_P (mode
) && !VECTOR_MODE_P (mode
))
20899 rtx op0
= XEXP (x
, 0);
20900 rtx op1
= XEXP (x
, 1);
20902 if (CONST_INT_P (XEXP (x
, 1)))
20904 unsigned HOST_WIDE_INT value
= INTVAL (XEXP (x
, 1));
20905 for (nbits
= 0; value
!= 0; value
&= value
- 1)
20909 /* This is arbitrary. */
20912 /* Compute costs correctly for widening multiplication. */
20913 if ((GET_CODE (op0
) == SIGN_EXTEND
|| GET_CODE (op0
) == ZERO_EXTEND
)
20914 && GET_MODE_SIZE (GET_MODE (XEXP (op0
, 0))) * 2
20915 == GET_MODE_SIZE (mode
))
20917 int is_mulwiden
= 0;
20918 machine_mode inner_mode
= GET_MODE (op0
);
20920 if (GET_CODE (op0
) == GET_CODE (op1
))
20921 is_mulwiden
= 1, op1
= XEXP (op1
, 0);
20922 else if (CONST_INT_P (op1
))
20924 if (GET_CODE (op0
) == SIGN_EXTEND
)
20925 is_mulwiden
= trunc_int_for_mode (INTVAL (op1
), inner_mode
)
20928 is_mulwiden
= !(INTVAL (op1
) & ~GET_MODE_MASK (inner_mode
));
20932 op0
= XEXP (op0
, 0), mode
= GET_MODE (op0
);
20936 // Double word multiplication requires 3 mults and 2 adds.
20937 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
20939 mult_init
= 3 * cost
->mult_init
[MODE_INDEX (word_mode
)]
20943 else mult_init
= cost
->mult_init
[MODE_INDEX (mode
)];
20945 *total
= (mult_init
20946 + nbits
* cost
->mult_bit
20947 + rtx_cost (op0
, mode
, outer_code
, opno
, speed
)
20948 + rtx_cost (op1
, mode
, outer_code
, opno
, speed
));
20952 *total
= ix86_multiplication_cost (cost
, mode
);
20959 *total
= ix86_division_cost (cost
, mode
);
20963 if (GET_MODE_CLASS (mode
) == MODE_INT
20964 && GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
20966 if (GET_CODE (XEXP (x
, 0)) == PLUS
20967 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
20968 && CONST_INT_P (XEXP (XEXP (XEXP (x
, 0), 0), 1))
20969 && CONSTANT_P (XEXP (x
, 1)))
20971 HOST_WIDE_INT val
= INTVAL (XEXP (XEXP (XEXP (x
, 0), 0), 1));
20972 if (val
== 2 || val
== 4 || val
== 8)
20974 *total
= cost
->lea
;
20975 *total
+= rtx_cost (XEXP (XEXP (x
, 0), 1), mode
,
20976 outer_code
, opno
, speed
);
20977 *total
+= rtx_cost (XEXP (XEXP (XEXP (x
, 0), 0), 0), mode
,
20978 outer_code
, opno
, speed
);
20979 *total
+= rtx_cost (XEXP (x
, 1), mode
,
20980 outer_code
, opno
, speed
);
20984 else if (GET_CODE (XEXP (x
, 0)) == MULT
20985 && CONST_INT_P (XEXP (XEXP (x
, 0), 1)))
20987 HOST_WIDE_INT val
= INTVAL (XEXP (XEXP (x
, 0), 1));
20988 if (val
== 2 || val
== 4 || val
== 8)
20990 *total
= cost
->lea
;
20991 *total
+= rtx_cost (XEXP (XEXP (x
, 0), 0), mode
,
20992 outer_code
, opno
, speed
);
20993 *total
+= rtx_cost (XEXP (x
, 1), mode
,
20994 outer_code
, opno
, speed
);
20998 else if (GET_CODE (XEXP (x
, 0)) == PLUS
)
21000 rtx op
= XEXP (XEXP (x
, 0), 0);
21002 /* Add with carry, ignore the cost of adding a carry flag. */
21003 if (ix86_carry_flag_operator (op
, mode
)
21004 || ix86_carry_flag_unset_operator (op
, mode
))
21005 *total
= cost
->add
;
21008 *total
= cost
->lea
;
21009 *total
+= rtx_cost (op
, mode
,
21010 outer_code
, opno
, speed
);
21013 *total
+= rtx_cost (XEXP (XEXP (x
, 0), 1), mode
,
21014 outer_code
, opno
, speed
);
21015 *total
+= rtx_cost (XEXP (x
, 1), mode
,
21016 outer_code
, opno
, speed
);
21023 /* Subtract with borrow, ignore the cost of subtracting a carry flag. */
21024 if (GET_MODE_CLASS (mode
) == MODE_INT
21025 && GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
21026 && GET_CODE (XEXP (x
, 0)) == MINUS
21027 && (ix86_carry_flag_operator (XEXP (XEXP (x
, 0), 1), mode
)
21028 || ix86_carry_flag_unset_operator (XEXP (XEXP (x
, 0), 1), mode
)))
21030 *total
= cost
->add
;
21031 *total
+= rtx_cost (XEXP (XEXP (x
, 0), 0), mode
,
21032 outer_code
, opno
, speed
);
21033 *total
+= rtx_cost (XEXP (x
, 1), mode
,
21034 outer_code
, opno
, speed
);
21038 if (SSE_FLOAT_MODE_SSEMATH_OR_HF_P (mode
))
21039 *total
= cost
->addss
;
21040 else if (X87_FLOAT_MODE_P (mode
))
21041 *total
= cost
->fadd
;
21042 else if (FLOAT_MODE_P (mode
))
21043 *total
= ix86_vec_cost (mode
, cost
->addss
);
21044 else if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
21045 *total
= ix86_vec_cost (mode
, cost
->sse_op
);
21046 else if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
21047 *total
= cost
->add
* 2;
21049 *total
= cost
->add
;
21054 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
21055 *total
= ix86_vec_cost (mode
, cost
->sse_op
);
21056 else if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
21057 *total
= cost
->add
* 2;
21059 *total
= cost
->add
;
21063 if (address_no_seg_operand (x
, mode
))
21065 *total
= cost
->lea
;
21068 else if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
21070 /* pandn is a single instruction. */
21071 if (GET_CODE (XEXP (x
, 0)) == NOT
)
21073 *total
= ix86_vec_cost (mode
, cost
->sse_op
)
21074 + rtx_cost (XEXP (XEXP (x
, 0), 0), mode
,
21075 outer_code
, opno
, speed
)
21076 + rtx_cost (XEXP (x
, 1), mode
,
21077 outer_code
, opno
, speed
);
21080 else if (GET_CODE (XEXP (x
, 1)) == NOT
)
21082 *total
= ix86_vec_cost (mode
, cost
->sse_op
)
21083 + rtx_cost (XEXP (x
, 0), mode
,
21084 outer_code
, opno
, speed
)
21085 + rtx_cost (XEXP (XEXP (x
, 1), 0), mode
,
21086 outer_code
, opno
, speed
);
21089 *total
= ix86_vec_cost (mode
, cost
->sse_op
);
21091 else if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
21093 if (TARGET_BMI
&& GET_CODE (XEXP (x
,0)) == NOT
)
21095 *total
= cost
->add
* 2
21096 + rtx_cost (XEXP (XEXP (x
, 0), 0), mode
,
21097 outer_code
, opno
, speed
)
21098 + rtx_cost (XEXP (x
, 1), mode
,
21099 outer_code
, opno
, speed
);
21102 else if (TARGET_BMI
&& GET_CODE (XEXP (x
, 1)) == NOT
)
21104 *total
= cost
->add
* 2
21105 + rtx_cost (XEXP (x
, 0), mode
,
21106 outer_code
, opno
, speed
)
21107 + rtx_cost (XEXP (XEXP (x
, 1), 0), mode
,
21108 outer_code
, opno
, speed
);
21111 *total
= cost
->add
* 2;
21113 else if (TARGET_BMI
&& GET_CODE (XEXP (x
,0)) == NOT
)
21116 + rtx_cost (XEXP (XEXP (x
, 0), 0), mode
,
21117 outer_code
, opno
, speed
)
21118 + rtx_cost (XEXP (x
, 1), mode
, outer_code
, opno
, speed
);
21121 else if (TARGET_BMI
&& GET_CODE (XEXP (x
,1)) == NOT
)
21124 + rtx_cost (XEXP (x
, 0), mode
, outer_code
, opno
, speed
)
21125 + rtx_cost (XEXP (XEXP (x
, 1), 0), mode
,
21126 outer_code
, opno
, speed
);
21130 *total
= cost
->add
;
21134 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
21135 // vnot is pxor -1.
21136 *total
= ix86_vec_cost (mode
, cost
->sse_op
) + 1;
21137 else if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
21138 *total
= cost
->add
* 2;
21140 *total
= cost
->add
;
21144 if (SSE_FLOAT_MODE_SSEMATH_OR_HF_P (mode
))
21145 *total
= cost
->sse_op
;
21146 else if (X87_FLOAT_MODE_P (mode
))
21147 *total
= cost
->fchs
;
21148 else if (FLOAT_MODE_P (mode
))
21149 *total
= ix86_vec_cost (mode
, cost
->sse_op
);
21150 else if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
21151 *total
= ix86_vec_cost (mode
, cost
->sse_op
);
21152 else if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
21153 *total
= cost
->add
* 3;
21155 *total
= cost
->add
;
21162 if (GET_CODE (op0
) == ZERO_EXTRACT
21163 && XEXP (op0
, 1) == const1_rtx
21164 && CONST_INT_P (XEXP (op0
, 2))
21165 && op1
== const0_rtx
)
21167 /* This kind of construct is implemented using test[bwl].
21168 Treat it as if we had an AND. */
21169 mode
= GET_MODE (XEXP (op0
, 0));
21170 *total
= (cost
->add
21171 + rtx_cost (XEXP (op0
, 0), mode
, outer_code
,
21173 + rtx_cost (const1_rtx
, mode
, outer_code
, opno
, speed
));
21177 if (GET_CODE (op0
) == PLUS
&& rtx_equal_p (XEXP (op0
, 0), op1
))
21179 /* This is an overflow detection, count it as a normal compare. */
21180 *total
= rtx_cost (op0
, GET_MODE (op0
), COMPARE
, 0, speed
);
21186 (compare:CCC (neg:QI (geu:QI (reg:CC_CCC FLAGS_REG) (const_int 0)))
21187 (ltu:QI (reg:CC_CCC FLAGS_REG) (const_int 0))) */
21188 if (mode
== CCCmode
21189 && GET_CODE (op0
) == NEG
21190 && GET_CODE (geu
= XEXP (op0
, 0)) == GEU
21191 && REG_P (XEXP (geu
, 0))
21192 && (GET_MODE (XEXP (geu
, 0)) == CCCmode
21193 || GET_MODE (XEXP (geu
, 0)) == CCmode
)
21194 && REGNO (XEXP (geu
, 0)) == FLAGS_REG
21195 && XEXP (geu
, 1) == const0_rtx
21196 && GET_CODE (op1
) == LTU
21197 && REG_P (XEXP (op1
, 0))
21198 && GET_MODE (XEXP (op1
, 0)) == GET_MODE (XEXP (geu
, 0))
21199 && REGNO (XEXP (op1
, 0)) == FLAGS_REG
21200 && XEXP (op1
, 1) == const0_rtx
)
21202 /* This is *setcc_qi_addqi3_cconly_overflow_1_* patterns, a nop. */
21207 if (SCALAR_INT_MODE_P (GET_MODE (op0
))
21208 && GET_MODE_SIZE (GET_MODE (op0
)) > UNITS_PER_WORD
)
21210 if (op1
== const0_rtx
)
21212 + rtx_cost (op0
, GET_MODE (op0
), outer_code
, opno
, speed
);
21214 *total
= 3*cost
->add
21215 + rtx_cost (op0
, GET_MODE (op0
), outer_code
, opno
, speed
)
21216 + rtx_cost (op1
, GET_MODE (op0
), outer_code
, opno
, speed
);
21220 /* The embedded comparison operand is completely free. */
21221 if (!general_operand (op0
, GET_MODE (op0
)) && op1
== const0_rtx
)
21227 if (!SSE_FLOAT_MODE_SSEMATH_OR_HF_P (mode
))
21230 *total
= ix86_vec_cost (mode
, cost
->addss
);
21233 case FLOAT_TRUNCATE
:
21234 if (!SSE_FLOAT_MODE_SSEMATH_OR_HF_P (mode
))
21235 *total
= cost
->fadd
;
21237 *total
= ix86_vec_cost (mode
, cost
->addss
);
21241 /* SSE requires memory load for the constant operand. It may make
21242 sense to account for this. Of course the constant operand may or
21243 may not be reused. */
21244 if (SSE_FLOAT_MODE_SSEMATH_OR_HF_P (mode
))
21245 *total
= cost
->sse_op
;
21246 else if (X87_FLOAT_MODE_P (mode
))
21247 *total
= cost
->fabs
;
21248 else if (FLOAT_MODE_P (mode
))
21249 *total
= ix86_vec_cost (mode
, cost
->sse_op
);
21253 if (SSE_FLOAT_MODE_SSEMATH_OR_HF_P (mode
))
21254 *total
= mode
== SFmode
? cost
->sqrtss
: cost
->sqrtsd
;
21255 else if (X87_FLOAT_MODE_P (mode
))
21256 *total
= cost
->fsqrt
;
21257 else if (FLOAT_MODE_P (mode
))
21258 *total
= ix86_vec_cost (mode
,
21259 mode
== SFmode
? cost
->sqrtss
: cost
->sqrtsd
);
21263 if (XINT (x
, 1) == UNSPEC_TP
)
21265 else if (XINT (x
, 1) == UNSPEC_VTERNLOG
)
21267 *total
= cost
->sse_op
;
21270 else if (XINT (x
, 1) == UNSPEC_PTEST
)
21272 *total
= cost
->sse_op
;
21273 if (XVECLEN (x
, 0) == 2
21274 && GET_CODE (XVECEXP (x
, 0, 0)) == AND
)
21276 rtx andop
= XVECEXP (x
, 0, 0);
21277 *total
+= rtx_cost (XEXP (andop
, 0), GET_MODE (andop
),
21279 + rtx_cost (XEXP (andop
, 1), GET_MODE (andop
),
21288 case VEC_DUPLICATE
:
21289 /* ??? Assume all of these vector manipulation patterns are
21290 recognizable. In which case they all pretty much have the
21292 *total
= cost
->sse_op
;
21295 mask
= XEXP (x
, 2);
21296 /* This is masked instruction, assume the same cost,
21297 as nonmasked variant. */
21298 if (TARGET_AVX512F
&& register_operand (mask
, GET_MODE (mask
)))
21299 *total
= rtx_cost (XEXP (x
, 0), mode
, outer_code
, opno
, speed
);
21301 *total
= cost
->sse_op
;
21305 /* An insn that accesses memory is slightly more expensive
21306 than one that does not. */
21312 if (XEXP (x
, 1) == const1_rtx
21313 && GET_CODE (XEXP (x
, 2)) == ZERO_EXTEND
21314 && GET_MODE (XEXP (x
, 2)) == SImode
21315 && GET_MODE (XEXP (XEXP (x
, 2), 0)) == QImode
)
21317 /* Ignore cost of zero extension and masking of last argument. */
21318 *total
+= rtx_cost (XEXP (x
, 0), mode
, code
, 0, speed
);
21319 *total
+= rtx_cost (XEXP (x
, 1), mode
, code
, 1, speed
);
21320 *total
+= rtx_cost (XEXP (XEXP (x
, 2), 0), mode
, code
, 2, speed
);
21327 && VECTOR_MODE_P (mode
)
21328 && (GET_MODE_SIZE (mode
) == 16 || GET_MODE_SIZE (mode
) == 32))
21331 *total
= speed
? COSTS_N_INSNS (2) : COSTS_N_BYTES (6);
21332 if (!REG_P (XEXP (x
, 0)))
21333 *total
+= rtx_cost (XEXP (x
, 0), mode
, code
, 0, speed
);
21334 if (!REG_P (XEXP (x
, 1)))
21335 *total
+= rtx_cost (XEXP (x
, 1), mode
, code
, 1, speed
);
21336 if (!REG_P (XEXP (x
, 2)))
21337 *total
+= rtx_cost (XEXP (x
, 2), mode
, code
, 2, speed
);
21340 else if (TARGET_CMOVE
21341 && SCALAR_INT_MODE_P (mode
)
21342 && GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
21345 *total
= COSTS_N_INSNS (1);
21346 if (!REG_P (XEXP (x
, 0)))
21347 *total
+= rtx_cost (XEXP (x
, 0), mode
, code
, 0, speed
);
21348 if (!REG_P (XEXP (x
, 1)))
21349 *total
+= rtx_cost (XEXP (x
, 1), mode
, code
, 1, speed
);
21350 if (!REG_P (XEXP (x
, 2)))
21351 *total
+= rtx_cost (XEXP (x
, 2), mode
, code
, 2, speed
);
21363 static int current_machopic_label_num
;
21365 /* Given a symbol name and its associated stub, write out the
21366 definition of the stub. */
21369 machopic_output_stub (FILE *file
, const char *symb
, const char *stub
)
21371 unsigned int length
;
21372 char *binder_name
, *symbol_name
, lazy_ptr_name
[32];
21373 int label
= ++current_machopic_label_num
;
21375 /* For 64-bit we shouldn't get here. */
21376 gcc_assert (!TARGET_64BIT
);
21378 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
21379 symb
= targetm
.strip_name_encoding (symb
);
21381 length
= strlen (stub
);
21382 binder_name
= XALLOCAVEC (char, length
+ 32);
21383 GEN_BINDER_NAME_FOR_STUB (binder_name
, stub
, length
);
21385 length
= strlen (symb
);
21386 symbol_name
= XALLOCAVEC (char, length
+ 32);
21387 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name
, symb
, length
);
21389 sprintf (lazy_ptr_name
, "L%d$lz", label
);
21391 if (MACHOPIC_ATT_STUB
)
21392 switch_to_section (darwin_sections
[machopic_picsymbol_stub3_section
]);
21393 else if (MACHOPIC_PURE
)
21394 switch_to_section (darwin_sections
[machopic_picsymbol_stub2_section
]);
21396 switch_to_section (darwin_sections
[machopic_symbol_stub_section
]);
21398 fprintf (file
, "%s:\n", stub
);
21399 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
21401 if (MACHOPIC_ATT_STUB
)
21403 fprintf (file
, "\thlt ; hlt ; hlt ; hlt ; hlt\n");
21405 else if (MACHOPIC_PURE
)
21408 /* 25-byte PIC stub using "CALL get_pc_thunk". */
21409 rtx tmp
= gen_rtx_REG (SImode
, 2 /* ECX */);
21410 output_set_got (tmp
, NULL_RTX
); /* "CALL ___<cpu>.get_pc_thunk.cx". */
21411 fprintf (file
, "LPC$%d:\tmovl\t%s-LPC$%d(%%ecx),%%ecx\n",
21412 label
, lazy_ptr_name
, label
);
21413 fprintf (file
, "\tjmp\t*%%ecx\n");
21416 fprintf (file
, "\tjmp\t*%s\n", lazy_ptr_name
);
21418 /* The AT&T-style ("self-modifying") stub is not lazily bound, thus
21419 it needs no stub-binding-helper. */
21420 if (MACHOPIC_ATT_STUB
)
21423 fprintf (file
, "%s:\n", binder_name
);
21427 fprintf (file
, "\tlea\t%s-%s(%%ecx),%%ecx\n", lazy_ptr_name
, binder_name
);
21428 fprintf (file
, "\tpushl\t%%ecx\n");
21431 fprintf (file
, "\tpushl\t$%s\n", lazy_ptr_name
);
21433 fputs ("\tjmp\tdyld_stub_binding_helper\n", file
);
21435 /* N.B. Keep the correspondence of these
21436 'symbol_ptr/symbol_ptr2/symbol_ptr3' sections consistent with the
21437 old-pic/new-pic/non-pic stubs; altering this will break
21438 compatibility with existing dylibs. */
21441 /* 25-byte PIC stub using "CALL get_pc_thunk". */
21442 switch_to_section (darwin_sections
[machopic_lazy_symbol_ptr2_section
]);
21445 /* 16-byte -mdynamic-no-pic stub. */
21446 switch_to_section(darwin_sections
[machopic_lazy_symbol_ptr3_section
]);
21448 fprintf (file
, "%s:\n", lazy_ptr_name
);
21449 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
21450 fprintf (file
, ASM_LONG
"%s\n", binder_name
);
21452 #endif /* TARGET_MACHO */
21454 /* Order the registers for register allocator. */
21457 x86_order_regs_for_local_alloc (void)
21462 /* First allocate the local general purpose registers. */
21463 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
21464 if (GENERAL_REGNO_P (i
) && call_used_or_fixed_reg_p (i
))
21465 reg_alloc_order
[pos
++] = i
;
21467 /* Global general purpose registers. */
21468 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
21469 if (GENERAL_REGNO_P (i
) && !call_used_or_fixed_reg_p (i
))
21470 reg_alloc_order
[pos
++] = i
;
21472 /* x87 registers come first in case we are doing FP math
21474 if (!TARGET_SSE_MATH
)
21475 for (i
= FIRST_STACK_REG
; i
<= LAST_STACK_REG
; i
++)
21476 reg_alloc_order
[pos
++] = i
;
21478 /* SSE registers. */
21479 for (i
= FIRST_SSE_REG
; i
<= LAST_SSE_REG
; i
++)
21480 reg_alloc_order
[pos
++] = i
;
21481 for (i
= FIRST_REX_SSE_REG
; i
<= LAST_REX_SSE_REG
; i
++)
21482 reg_alloc_order
[pos
++] = i
;
21484 /* Extended REX SSE registers. */
21485 for (i
= FIRST_EXT_REX_SSE_REG
; i
<= LAST_EXT_REX_SSE_REG
; i
++)
21486 reg_alloc_order
[pos
++] = i
;
21488 /* Mask register. */
21489 for (i
= FIRST_MASK_REG
; i
<= LAST_MASK_REG
; i
++)
21490 reg_alloc_order
[pos
++] = i
;
21492 /* x87 registers. */
21493 if (TARGET_SSE_MATH
)
21494 for (i
= FIRST_STACK_REG
; i
<= LAST_STACK_REG
; i
++)
21495 reg_alloc_order
[pos
++] = i
;
21497 for (i
= FIRST_MMX_REG
; i
<= LAST_MMX_REG
; i
++)
21498 reg_alloc_order
[pos
++] = i
;
21500 /* Initialize the rest of array as we do not allocate some registers
21502 while (pos
< FIRST_PSEUDO_REGISTER
)
21503 reg_alloc_order
[pos
++] = 0;
21507 ix86_ms_bitfield_layout_p (const_tree record_type
)
21509 return ((TARGET_MS_BITFIELD_LAYOUT
21510 && !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type
)))
21511 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type
)));
21514 /* Returns an expression indicating where the this parameter is
21515 located on entry to the FUNCTION. */
21518 x86_this_parameter (tree function
)
21520 tree type
= TREE_TYPE (function
);
21521 bool aggr
= aggregate_value_p (TREE_TYPE (type
), type
) != 0;
21526 const int *parm_regs
;
21528 if (ix86_function_type_abi (type
) == MS_ABI
)
21529 parm_regs
= x86_64_ms_abi_int_parameter_registers
;
21531 parm_regs
= x86_64_int_parameter_registers
;
21532 return gen_rtx_REG (Pmode
, parm_regs
[aggr
]);
21535 nregs
= ix86_function_regparm (type
, function
);
21537 if (nregs
> 0 && !stdarg_p (type
))
21540 unsigned int ccvt
= ix86_get_callcvt (type
);
21542 if ((ccvt
& IX86_CALLCVT_FASTCALL
) != 0)
21543 regno
= aggr
? DX_REG
: CX_REG
;
21544 else if ((ccvt
& IX86_CALLCVT_THISCALL
) != 0)
21548 return gen_rtx_MEM (SImode
,
21549 plus_constant (Pmode
, stack_pointer_rtx
, 4));
21558 return gen_rtx_MEM (SImode
,
21559 plus_constant (Pmode
,
21560 stack_pointer_rtx
, 4));
21563 return gen_rtx_REG (SImode
, regno
);
21566 return gen_rtx_MEM (SImode
, plus_constant (Pmode
, stack_pointer_rtx
,
21570 /* Determine whether x86_output_mi_thunk can succeed. */
21573 x86_can_output_mi_thunk (const_tree
, HOST_WIDE_INT
, HOST_WIDE_INT vcall_offset
,
21574 const_tree function
)
21576 /* 64-bit can handle anything. */
21580 /* For 32-bit, everything's fine if we have one free register. */
21581 if (ix86_function_regparm (TREE_TYPE (function
), function
) < 3)
21584 /* Need a free register for vcall_offset. */
21588 /* Need a free register for GOT references. */
21589 if (flag_pic
&& !targetm
.binds_local_p (function
))
21592 /* Otherwise ok. */
21596 /* Output the assembler code for a thunk function. THUNK_DECL is the
21597 declaration for the thunk function itself, FUNCTION is the decl for
21598 the target function. DELTA is an immediate constant offset to be
21599 added to THIS. If VCALL_OFFSET is nonzero, the word at
21600 *(*this + vcall_offset) should be added to THIS. */
21603 x86_output_mi_thunk (FILE *file
, tree thunk_fndecl
, HOST_WIDE_INT delta
,
21604 HOST_WIDE_INT vcall_offset
, tree function
)
21606 const char *fnname
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk_fndecl
));
21607 rtx this_param
= x86_this_parameter (function
);
21608 rtx this_reg
, tmp
, fnaddr
;
21609 unsigned int tmp_regno
;
21611 int saved_flag_force_indirect_call
= flag_force_indirect_call
;
21614 tmp_regno
= R10_REG
;
21617 unsigned int ccvt
= ix86_get_callcvt (TREE_TYPE (function
));
21618 if ((ccvt
& IX86_CALLCVT_FASTCALL
) != 0)
21619 tmp_regno
= AX_REG
;
21620 else if ((ccvt
& IX86_CALLCVT_THISCALL
) != 0)
21621 tmp_regno
= DX_REG
;
21623 tmp_regno
= CX_REG
;
21626 flag_force_indirect_call
= 0;
21629 emit_note (NOTE_INSN_PROLOGUE_END
);
21631 /* CET is enabled, insert EB instruction. */
21632 if ((flag_cf_protection
& CF_BRANCH
))
21633 emit_insn (gen_nop_endbr ());
21635 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
21636 pull it in now and let DELTA benefit. */
21637 if (REG_P (this_param
))
21638 this_reg
= this_param
;
21639 else if (vcall_offset
)
21641 /* Put the this parameter into %eax. */
21642 this_reg
= gen_rtx_REG (Pmode
, AX_REG
);
21643 emit_move_insn (this_reg
, this_param
);
21646 this_reg
= NULL_RTX
;
21648 /* Adjust the this parameter by a fixed constant. */
21651 rtx delta_rtx
= GEN_INT (delta
);
21652 rtx delta_dst
= this_reg
? this_reg
: this_param
;
21656 if (!x86_64_general_operand (delta_rtx
, Pmode
))
21658 tmp
= gen_rtx_REG (Pmode
, tmp_regno
);
21659 emit_move_insn (tmp
, delta_rtx
);
21664 ix86_emit_binop (PLUS
, Pmode
, delta_dst
, delta_rtx
);
21667 /* Adjust the this parameter by a value stored in the vtable. */
21670 rtx vcall_addr
, vcall_mem
, this_mem
;
21672 tmp
= gen_rtx_REG (Pmode
, tmp_regno
);
21674 this_mem
= gen_rtx_MEM (ptr_mode
, this_reg
);
21675 if (Pmode
!= ptr_mode
)
21676 this_mem
= gen_rtx_ZERO_EXTEND (Pmode
, this_mem
);
21677 emit_move_insn (tmp
, this_mem
);
21679 /* Adjust the this parameter. */
21680 vcall_addr
= plus_constant (Pmode
, tmp
, vcall_offset
);
21682 && !ix86_legitimate_address_p (ptr_mode
, vcall_addr
, true))
21684 rtx tmp2
= gen_rtx_REG (Pmode
, R11_REG
);
21685 emit_move_insn (tmp2
, GEN_INT (vcall_offset
));
21686 vcall_addr
= gen_rtx_PLUS (Pmode
, tmp
, tmp2
);
21689 vcall_mem
= gen_rtx_MEM (ptr_mode
, vcall_addr
);
21690 if (Pmode
!= ptr_mode
)
21691 emit_insn (gen_addsi_1_zext (this_reg
,
21692 gen_rtx_REG (ptr_mode
,
21696 ix86_emit_binop (PLUS
, Pmode
, this_reg
, vcall_mem
);
21699 /* If necessary, drop THIS back to its stack slot. */
21700 if (this_reg
&& this_reg
!= this_param
)
21701 emit_move_insn (this_param
, this_reg
);
21703 fnaddr
= XEXP (DECL_RTL (function
), 0);
21706 if (!flag_pic
|| targetm
.binds_local_p (function
)
21711 tmp
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, fnaddr
), UNSPEC_GOTPCREL
);
21712 tmp
= gen_rtx_CONST (Pmode
, tmp
);
21713 fnaddr
= gen_const_mem (Pmode
, tmp
);
21718 if (!flag_pic
|| targetm
.binds_local_p (function
))
21721 else if (TARGET_MACHO
)
21723 fnaddr
= machopic_indirect_call_target (DECL_RTL (function
));
21724 fnaddr
= XEXP (fnaddr
, 0);
21726 #endif /* TARGET_MACHO */
21729 tmp
= gen_rtx_REG (Pmode
, CX_REG
);
21730 output_set_got (tmp
, NULL_RTX
);
21732 fnaddr
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, fnaddr
), UNSPEC_GOT
);
21733 fnaddr
= gen_rtx_CONST (Pmode
, fnaddr
);
21734 fnaddr
= gen_rtx_PLUS (Pmode
, tmp
, fnaddr
);
21735 fnaddr
= gen_const_mem (Pmode
, fnaddr
);
21739 /* Our sibling call patterns do not allow memories, because we have no
21740 predicate that can distinguish between frame and non-frame memory.
21741 For our purposes here, we can get away with (ab)using a jump pattern,
21742 because we're going to do no optimization. */
21743 if (MEM_P (fnaddr
))
21745 if (sibcall_insn_operand (fnaddr
, word_mode
))
21747 fnaddr
= XEXP (DECL_RTL (function
), 0);
21748 tmp
= gen_rtx_MEM (QImode
, fnaddr
);
21749 tmp
= gen_rtx_CALL (VOIDmode
, tmp
, const0_rtx
);
21750 tmp
= emit_call_insn (tmp
);
21751 SIBLING_CALL_P (tmp
) = 1;
21754 emit_jump_insn (gen_indirect_jump (fnaddr
));
21758 if (ix86_cmodel
== CM_LARGE_PIC
&& SYMBOLIC_CONST (fnaddr
))
21760 // CM_LARGE_PIC always uses pseudo PIC register which is
21761 // uninitialized. Since FUNCTION is local and calling it
21762 // doesn't go through PLT, we use scratch register %r11 as
21763 // PIC register and initialize it here.
21764 pic_offset_table_rtx
= gen_rtx_REG (Pmode
, R11_REG
);
21765 ix86_init_large_pic_reg (tmp_regno
);
21766 fnaddr
= legitimize_pic_address (fnaddr
,
21767 gen_rtx_REG (Pmode
, tmp_regno
));
21770 if (!sibcall_insn_operand (fnaddr
, word_mode
))
21772 tmp
= gen_rtx_REG (word_mode
, tmp_regno
);
21773 if (GET_MODE (fnaddr
) != word_mode
)
21774 fnaddr
= gen_rtx_ZERO_EXTEND (word_mode
, fnaddr
);
21775 emit_move_insn (tmp
, fnaddr
);
21779 tmp
= gen_rtx_MEM (QImode
, fnaddr
);
21780 tmp
= gen_rtx_CALL (VOIDmode
, tmp
, const0_rtx
);
21781 tmp
= emit_call_insn (tmp
);
21782 SIBLING_CALL_P (tmp
) = 1;
21786 /* Emit just enough of rest_of_compilation to get the insns emitted. */
21787 insn
= get_insns ();
21788 shorten_branches (insn
);
21789 assemble_start_function (thunk_fndecl
, fnname
);
21790 final_start_function (insn
, file
, 1);
21791 final (insn
, file
, 1);
21792 final_end_function ();
21793 assemble_end_function (thunk_fndecl
, fnname
);
21795 flag_force_indirect_call
= saved_flag_force_indirect_call
;
21799 x86_file_start (void)
21801 default_file_start ();
21803 fputs ("\t.code16gcc\n", asm_out_file
);
21805 darwin_file_start ();
21807 if (X86_FILE_START_VERSION_DIRECTIVE
)
21808 fputs ("\t.version\t\"01.01\"\n", asm_out_file
);
21809 if (X86_FILE_START_FLTUSED
)
21810 fputs ("\t.global\t__fltused\n", asm_out_file
);
21811 if (ix86_asm_dialect
== ASM_INTEL
)
21812 fputs ("\t.intel_syntax noprefix\n", asm_out_file
);
21816 x86_field_alignment (tree type
, int computed
)
21820 if (TARGET_64BIT
|| TARGET_ALIGN_DOUBLE
)
21823 return iamcu_alignment (type
, computed
);
21824 type
= strip_array_types (type
);
21825 mode
= TYPE_MODE (type
);
21826 if (mode
== DFmode
|| mode
== DCmode
21827 || GET_MODE_CLASS (mode
) == MODE_INT
21828 || GET_MODE_CLASS (mode
) == MODE_COMPLEX_INT
)
21830 if (TYPE_ATOMIC (type
) && computed
> 32)
21832 static bool warned
;
21834 if (!warned
&& warn_psabi
)
21837 = CHANGES_ROOT_URL
"gcc-11/changes.html#ia32_atomic";
21840 inform (input_location
, "the alignment of %<_Atomic %T%> "
21841 "fields changed in %{GCC 11.1%}",
21842 TYPE_MAIN_VARIANT (type
), url
);
21846 return MIN (32, computed
);
21851 /* Print call to TARGET to FILE. */
21854 x86_print_call_or_nop (FILE *file
, const char *target
)
21856 if (flag_nop_mcount
|| !strcmp (target
, "nop"))
21857 /* 5 byte nop: nopl 0(%[re]ax,%[re]ax,1) */
21858 fprintf (file
, "1:" ASM_BYTE
"0x0f, 0x1f, 0x44, 0x00, 0x00\n");
21860 fprintf (file
, "1:\tcall\t%s\n", target
);
21864 current_fentry_name (const char **name
)
21866 tree attr
= lookup_attribute ("fentry_name",
21867 DECL_ATTRIBUTES (current_function_decl
));
21870 *name
= TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (attr
)));
21875 current_fentry_section (const char **name
)
21877 tree attr
= lookup_attribute ("fentry_section",
21878 DECL_ATTRIBUTES (current_function_decl
));
21881 *name
= TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (attr
)));
21885 /* Output assembler code to FILE to increment profiler label # LABELNO
21886 for profiling a function entry. */
21888 x86_function_profiler (FILE *file
, int labelno ATTRIBUTE_UNUSED
)
21890 if (cfun
->machine
->insn_queued_at_entrance
)
21892 if (cfun
->machine
->insn_queued_at_entrance
== TYPE_ENDBR
)
21893 fprintf (file
, "\t%s\n", TARGET_64BIT
? "endbr64" : "endbr32");
21894 unsigned int patch_area_size
21895 = crtl
->patch_area_size
- crtl
->patch_area_entry
;
21896 if (patch_area_size
)
21897 ix86_output_patchable_area (patch_area_size
,
21898 crtl
->patch_area_entry
== 0);
21901 const char *mcount_name
= MCOUNT_NAME
;
21903 if (current_fentry_name (&mcount_name
))
21905 else if (fentry_name
)
21906 mcount_name
= fentry_name
;
21907 else if (flag_fentry
)
21908 mcount_name
= MCOUNT_NAME_BEFORE_PROLOGUE
;
21912 #ifndef NO_PROFILE_COUNTERS
21913 fprintf (file
, "\tleaq\t%sP%d(%%rip),%%r11\n", LPREFIX
, labelno
);
21916 if (!TARGET_PECOFF
)
21918 switch (ix86_cmodel
)
21921 /* NB: R10 is caller-saved. Although it can be used as a
21922 static chain register, it is preserved when calling
21923 mcount for nested functions. */
21924 fprintf (file
, "1:\tmovabsq\t$%s, %%r10\n\tcall\t*%%r10\n",
21928 #ifdef NO_PROFILE_COUNTERS
21929 fprintf (file
, "1:\tmovabsq\t$_GLOBAL_OFFSET_TABLE_-1b, %%r11\n");
21930 fprintf (file
, "\tleaq\t1b(%%rip), %%r10\n");
21931 fprintf (file
, "\taddq\t%%r11, %%r10\n");
21932 fprintf (file
, "\tmovabsq\t$%s@PLTOFF, %%r11\n", mcount_name
);
21933 fprintf (file
, "\taddq\t%%r11, %%r10\n");
21934 fprintf (file
, "\tcall\t*%%r10\n");
21936 sorry ("profiling %<-mcmodel=large%> with PIC is not supported");
21940 case CM_MEDIUM_PIC
:
21941 if (!ix86_direct_extern_access
)
21943 fprintf (file
, "1:\tcall\t*%s@GOTPCREL(%%rip)\n", mcount_name
);
21948 x86_print_call_or_nop (file
, mcount_name
);
21953 x86_print_call_or_nop (file
, mcount_name
);
21957 #ifndef NO_PROFILE_COUNTERS
21958 fprintf (file
, "\tleal\t%sP%d@GOTOFF(%%ebx),%%" PROFILE_COUNT_REGISTER
"\n",
21961 fprintf (file
, "1:\tcall\t*%s@GOT(%%ebx)\n", mcount_name
);
21965 #ifndef NO_PROFILE_COUNTERS
21966 fprintf (file
, "\tmovl\t$%sP%d,%%" PROFILE_COUNT_REGISTER
"\n",
21969 x86_print_call_or_nop (file
, mcount_name
);
21972 if (flag_record_mcount
21973 || lookup_attribute ("fentry_section",
21974 DECL_ATTRIBUTES (current_function_decl
)))
21976 const char *sname
= "__mcount_loc";
21978 if (current_fentry_section (&sname
))
21980 else if (fentry_section
)
21981 sname
= fentry_section
;
21983 fprintf (file
, "\t.section %s, \"a\",@progbits\n", sname
);
21984 fprintf (file
, "\t.%s 1b\n", TARGET_64BIT
? "quad" : "long");
21985 fprintf (file
, "\t.previous\n");
21989 /* We don't have exact information about the insn sizes, but we may assume
21990 quite safely that we are informed about all 1 byte insns and memory
21991 address sizes. This is enough to eliminate unnecessary padding in
21995 ix86_min_insn_size (rtx_insn
*insn
)
21999 if (!INSN_P (insn
) || !active_insn_p (insn
))
22002 /* Discard alignments we've emit and jump instructions. */
22003 if (GET_CODE (PATTERN (insn
)) == UNSPEC_VOLATILE
22004 && XINT (PATTERN (insn
), 1) == UNSPECV_ALIGN
)
22007 /* Important case - calls are always 5 bytes.
22008 It is common to have many calls in the row. */
22010 && symbolic_reference_mentioned_p (PATTERN (insn
))
22011 && !SIBLING_CALL_P (insn
))
22013 len
= get_attr_length (insn
);
22017 /* For normal instructions we rely on get_attr_length being exact,
22018 with a few exceptions. */
22019 if (!JUMP_P (insn
))
22021 enum attr_type type
= get_attr_type (insn
);
22026 if (GET_CODE (PATTERN (insn
)) == ASM_INPUT
22027 || asm_noperands (PATTERN (insn
)) >= 0)
22034 /* Otherwise trust get_attr_length. */
22038 l
= get_attr_length_address (insn
);
22039 if (l
< 4 && symbolic_reference_mentioned_p (PATTERN (insn
)))
22048 #ifdef ASM_OUTPUT_MAX_SKIP_ALIGN
22050 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
22054 ix86_avoid_jump_mispredicts (void)
22056 rtx_insn
*insn
, *start
= get_insns ();
22057 int nbytes
= 0, njumps
= 0;
22058 bool isjump
= false;
22060 /* Look for all minimal intervals of instructions containing 4 jumps.
22061 The intervals are bounded by START and INSN. NBYTES is the total
22062 size of instructions in the interval including INSN and not including
22063 START. When the NBYTES is smaller than 16 bytes, it is possible
22064 that the end of START and INSN ends up in the same 16byte page.
22066 The smallest offset in the page INSN can start is the case where START
22067 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
22068 We add p2align to 16byte window with maxskip 15 - NBYTES + sizeof (INSN).
22070 Don't consider asm goto as jump, while it can contain a jump, it doesn't
22071 have to, control transfer to label(s) can be performed through other
22072 means, and also we estimate minimum length of all asm stmts as 0. */
22073 for (insn
= start
; insn
; insn
= NEXT_INSN (insn
))
22077 if (LABEL_P (insn
))
22079 align_flags alignment
= label_to_alignment (insn
);
22080 int align
= alignment
.levels
[0].log
;
22081 int max_skip
= alignment
.levels
[0].maxskip
;
22085 /* If align > 3, only up to 16 - max_skip - 1 bytes can be
22086 already in the current 16 byte page, because otherwise
22087 ASM_OUTPUT_MAX_SKIP_ALIGN could skip max_skip or fewer
22088 bytes to reach 16 byte boundary. */
22090 || (align
<= 3 && max_skip
!= (1 << align
) - 1))
22093 fprintf (dump_file
, "Label %i with max_skip %i\n",
22094 INSN_UID (insn
), max_skip
);
22097 while (nbytes
+ max_skip
>= 16)
22099 start
= NEXT_INSN (start
);
22100 if ((JUMP_P (start
) && asm_noperands (PATTERN (start
)) < 0)
22102 njumps
--, isjump
= true;
22105 nbytes
-= ix86_min_insn_size (start
);
22111 min_size
= ix86_min_insn_size (insn
);
22112 nbytes
+= min_size
;
22114 fprintf (dump_file
, "Insn %i estimated to %i bytes\n",
22115 INSN_UID (insn
), min_size
);
22116 if ((JUMP_P (insn
) && asm_noperands (PATTERN (insn
)) < 0)
22124 start
= NEXT_INSN (start
);
22125 if ((JUMP_P (start
) && asm_noperands (PATTERN (start
)) < 0)
22127 njumps
--, isjump
= true;
22130 nbytes
-= ix86_min_insn_size (start
);
22132 gcc_assert (njumps
>= 0);
22134 fprintf (dump_file
, "Interval %i to %i has %i bytes\n",
22135 INSN_UID (start
), INSN_UID (insn
), nbytes
);
22137 if (njumps
== 3 && isjump
&& nbytes
< 16)
22139 int padsize
= 15 - nbytes
+ ix86_min_insn_size (insn
);
22142 fprintf (dump_file
, "Padding insn %i by %i bytes!\n",
22143 INSN_UID (insn
), padsize
);
22144 emit_insn_before (gen_pad (GEN_INT (padsize
)), insn
);
22150 /* AMD Athlon works faster
22151 when RET is not destination of conditional jump or directly preceded
22152 by other jump instruction. We avoid the penalty by inserting NOP just
22153 before the RET instructions in such cases. */
22155 ix86_pad_returns (void)
22160 FOR_EACH_EDGE (e
, ei
, EXIT_BLOCK_PTR_FOR_FN (cfun
)->preds
)
22162 basic_block bb
= e
->src
;
22163 rtx_insn
*ret
= BB_END (bb
);
22165 bool replace
= false;
22167 if (!JUMP_P (ret
) || !ANY_RETURN_P (PATTERN (ret
))
22168 || optimize_bb_for_size_p (bb
))
22170 for (prev
= PREV_INSN (ret
); prev
; prev
= PREV_INSN (prev
))
22171 if (active_insn_p (prev
) || LABEL_P (prev
))
22173 if (prev
&& LABEL_P (prev
))
22178 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
22179 if (EDGE_FREQUENCY (e
) && e
->src
->index
>= 0
22180 && !(e
->flags
& EDGE_FALLTHRU
))
22188 prev
= prev_active_insn (ret
);
22190 && ((JUMP_P (prev
) && any_condjump_p (prev
))
22193 /* Empty functions get branch mispredict even when
22194 the jump destination is not visible to us. */
22195 if (!prev
&& !optimize_function_for_size_p (cfun
))
22200 emit_jump_insn_before (gen_simple_return_internal_long (), ret
);
22206 /* Count the minimum number of instructions in BB. Return 4 if the
22207 number of instructions >= 4. */
22210 ix86_count_insn_bb (basic_block bb
)
22213 int insn_count
= 0;
22215 /* Count number of instructions in this block. Return 4 if the number
22216 of instructions >= 4. */
22217 FOR_BB_INSNS (bb
, insn
)
22219 /* Only happen in exit blocks. */
22221 && ANY_RETURN_P (PATTERN (insn
)))
22224 if (NONDEBUG_INSN_P (insn
)
22225 && GET_CODE (PATTERN (insn
)) != USE
22226 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
22229 if (insn_count
>= 4)
22238 /* Count the minimum number of instructions in code path in BB.
22239 Return 4 if the number of instructions >= 4. */
22242 ix86_count_insn (basic_block bb
)
22246 int min_prev_count
;
22248 /* Only bother counting instructions along paths with no
22249 more than 2 basic blocks between entry and exit. Given
22250 that BB has an edge to exit, determine if a predecessor
22251 of BB has an edge from entry. If so, compute the number
22252 of instructions in the predecessor block. If there
22253 happen to be multiple such blocks, compute the minimum. */
22254 min_prev_count
= 4;
22255 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
22258 edge_iterator prev_ei
;
22260 if (e
->src
== ENTRY_BLOCK_PTR_FOR_FN (cfun
))
22262 min_prev_count
= 0;
22265 FOR_EACH_EDGE (prev_e
, prev_ei
, e
->src
->preds
)
22267 if (prev_e
->src
== ENTRY_BLOCK_PTR_FOR_FN (cfun
))
22269 int count
= ix86_count_insn_bb (e
->src
);
22270 if (count
< min_prev_count
)
22271 min_prev_count
= count
;
22277 if (min_prev_count
< 4)
22278 min_prev_count
+= ix86_count_insn_bb (bb
);
22280 return min_prev_count
;
22283 /* Pad short function to 4 instructions. */
22286 ix86_pad_short_function (void)
22291 FOR_EACH_EDGE (e
, ei
, EXIT_BLOCK_PTR_FOR_FN (cfun
)->preds
)
22293 rtx_insn
*ret
= BB_END (e
->src
);
22294 if (JUMP_P (ret
) && ANY_RETURN_P (PATTERN (ret
)))
22296 int insn_count
= ix86_count_insn (e
->src
);
22298 /* Pad short function. */
22299 if (insn_count
< 4)
22301 rtx_insn
*insn
= ret
;
22303 /* Find epilogue. */
22306 || NOTE_KIND (insn
) != NOTE_INSN_EPILOGUE_BEG
))
22307 insn
= PREV_INSN (insn
);
22312 /* Two NOPs count as one instruction. */
22313 insn_count
= 2 * (4 - insn_count
);
22314 emit_insn_before (gen_nops (GEN_INT (insn_count
)), insn
);
22320 /* Fix up a Windows system unwinder issue. If an EH region falls through into
22321 the epilogue, the Windows system unwinder will apply epilogue logic and
22322 produce incorrect offsets. This can be avoided by adding a nop between
22323 the last insn that can throw and the first insn of the epilogue. */
22326 ix86_seh_fixup_eh_fallthru (void)
22331 FOR_EACH_EDGE (e
, ei
, EXIT_BLOCK_PTR_FOR_FN (cfun
)->preds
)
22333 rtx_insn
*insn
, *next
;
22335 /* Find the beginning of the epilogue. */
22336 for (insn
= BB_END (e
->src
); insn
!= NULL
; insn
= PREV_INSN (insn
))
22337 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_EPILOGUE_BEG
)
22342 /* We only care about preceding insns that can throw. */
22343 insn
= prev_active_insn (insn
);
22344 if (insn
== NULL
|| !can_throw_internal (insn
))
22347 /* Do not separate calls from their debug information. */
22348 for (next
= NEXT_INSN (insn
); next
!= NULL
; next
= NEXT_INSN (next
))
22349 if (NOTE_P (next
) && NOTE_KIND (next
) == NOTE_INSN_VAR_LOCATION
)
22354 emit_insn_after (gen_nops (const1_rtx
), insn
);
22357 /* Split vector load from parm_decl to elemental loads to avoid STLF
22360 ix86_split_stlf_stall_load ()
22362 rtx_insn
* insn
, *start
= get_insns ();
22363 unsigned window
= 0;
22365 for (insn
= start
; insn
; insn
= NEXT_INSN (insn
))
22367 if (!NONDEBUG_INSN_P (insn
))
22370 /* Insert 64 vaddps %xmm18, %xmm19, %xmm20(no dependence between each
22371 other, just emulate for pipeline) before stalled load, stlf stall
22372 case is as fast as no stall cases on CLX.
22373 Since CFG is freed before machine_reorg, just do a rough
22374 calculation of the window according to the layout. */
22375 if (window
> (unsigned) x86_stlf_window_ninsns
)
22378 if (any_uncondjump_p (insn
)
22379 || ANY_RETURN_P (PATTERN (insn
))
22383 rtx set
= single_set (insn
);
22386 rtx src
= SET_SRC (set
);
22388 /* Only handle V2DFmode load since it doesn't need any scratch
22390 || GET_MODE (src
) != E_V2DFmode
22392 || TREE_CODE (get_base_address (MEM_EXPR (src
))) != PARM_DECL
)
22395 rtx zero
= CONST0_RTX (V2DFmode
);
22396 rtx dest
= SET_DEST (set
);
22397 rtx m
= adjust_address (src
, DFmode
, 0);
22398 rtx loadlpd
= gen_sse2_loadlpd (dest
, zero
, m
);
22399 emit_insn_before (loadlpd
, insn
);
22400 m
= adjust_address (src
, DFmode
, 8);
22401 rtx loadhpd
= gen_sse2_loadhpd (dest
, dest
, m
);
22402 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
22404 fputs ("Due to potential STLF stall, split instruction:\n",
22406 print_rtl_single (dump_file
, insn
);
22407 fputs ("To:\n", dump_file
);
22408 print_rtl_single (dump_file
, loadlpd
);
22409 print_rtl_single (dump_file
, loadhpd
);
22411 PATTERN (insn
) = loadhpd
;
22412 INSN_CODE (insn
) = -1;
22413 gcc_assert (recog_memoized (insn
) != -1);
22417 /* Implement machine specific optimizations. We implement padding of returns
22418 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
22422 /* We are freeing block_for_insn in the toplev to keep compatibility
22423 with old MDEP_REORGS that are not CFG based. Recompute it now. */
22424 compute_bb_for_insn ();
22426 if (TARGET_SEH
&& current_function_has_exception_handlers ())
22427 ix86_seh_fixup_eh_fallthru ();
22429 if (optimize
&& optimize_function_for_speed_p (cfun
))
22432 ix86_split_stlf_stall_load ();
22433 if (TARGET_PAD_SHORT_FUNCTION
)
22434 ix86_pad_short_function ();
22435 else if (TARGET_PAD_RETURNS
)
22436 ix86_pad_returns ();
22437 #ifdef ASM_OUTPUT_MAX_SKIP_ALIGN
22438 if (TARGET_FOUR_JUMP_LIMIT
)
22439 ix86_avoid_jump_mispredicts ();
22444 /* Return nonzero when QImode register that must be represented via REX prefix
22447 x86_extended_QIreg_mentioned_p (rtx_insn
*insn
)
22450 extract_insn_cached (insn
);
22451 for (i
= 0; i
< recog_data
.n_operands
; i
++)
22452 if (GENERAL_REG_P (recog_data
.operand
[i
])
22453 && !QI_REGNO_P (REGNO (recog_data
.operand
[i
])))
22458 /* Return true when INSN mentions register that must be encoded using REX
22461 x86_extended_reg_mentioned_p (rtx insn
)
22463 subrtx_iterator::array_type array
;
22464 FOR_EACH_SUBRTX (iter
, array
, INSN_P (insn
) ? PATTERN (insn
) : insn
, NONCONST
)
22466 const_rtx x
= *iter
;
22468 && (REX_INT_REGNO_P (REGNO (x
)) || REX_SSE_REGNO_P (REGNO (x
))))
22474 /* If profitable, negate (without causing overflow) integer constant
22475 of mode MODE at location LOC. Return true in this case. */
22477 x86_maybe_negate_const_int (rtx
*loc
, machine_mode mode
)
22481 if (!CONST_INT_P (*loc
))
22487 /* DImode x86_64 constants must fit in 32 bits. */
22488 gcc_assert (x86_64_immediate_operand (*loc
, mode
));
22499 gcc_unreachable ();
22502 /* Avoid overflows. */
22503 if (mode_signbit_p (mode
, *loc
))
22506 val
= INTVAL (*loc
);
22508 /* Make things pretty and `subl $4,%eax' rather than `addl $-4,%eax'.
22509 Exceptions: -128 encodes smaller than 128, so swap sign and op. */
22510 if ((val
< 0 && val
!= -128)
22513 *loc
= GEN_INT (-val
);
22520 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
22521 optabs would emit if we didn't have TFmode patterns. */
22524 x86_emit_floatuns (rtx operands
[2])
22526 rtx_code_label
*neglab
, *donelab
;
22527 rtx i0
, i1
, f0
, in
, out
;
22528 machine_mode mode
, inmode
;
22530 inmode
= GET_MODE (operands
[1]);
22531 gcc_assert (inmode
== SImode
|| inmode
== DImode
);
22534 in
= force_reg (inmode
, operands
[1]);
22535 mode
= GET_MODE (out
);
22536 neglab
= gen_label_rtx ();
22537 donelab
= gen_label_rtx ();
22538 f0
= gen_reg_rtx (mode
);
22540 emit_cmp_and_jump_insns (in
, const0_rtx
, LT
, const0_rtx
, inmode
, 0, neglab
);
22542 expand_float (out
, in
, 0);
22544 emit_jump_insn (gen_jump (donelab
));
22547 emit_label (neglab
);
22549 i0
= expand_simple_binop (inmode
, LSHIFTRT
, in
, const1_rtx
, NULL
,
22551 i1
= expand_simple_binop (inmode
, AND
, in
, const1_rtx
, NULL
,
22553 i0
= expand_simple_binop (inmode
, IOR
, i0
, i1
, i0
, 1, OPTAB_DIRECT
);
22555 expand_float (f0
, i0
, 0);
22557 emit_insn (gen_rtx_SET (out
, gen_rtx_PLUS (mode
, f0
, f0
)));
22559 emit_label (donelab
);
22562 /* Target hook for scalar_mode_supported_p. */
22564 ix86_scalar_mode_supported_p (scalar_mode mode
)
22566 if (DECIMAL_FLOAT_MODE_P (mode
))
22567 return default_decimal_float_supported_p ();
22568 else if (mode
== TFmode
)
22570 else if ((mode
== HFmode
|| mode
== BFmode
) && TARGET_SSE2
)
22573 return default_scalar_mode_supported_p (mode
);
22576 /* Implement TARGET_LIBGCC_FLOATING_POINT_MODE_SUPPORTED_P - return TRUE
22577 if MODE is HFmode, and punt to the generic implementation otherwise. */
22580 ix86_libgcc_floating_mode_supported_p (scalar_float_mode mode
)
22582 /* NB: Always return TRUE for HFmode so that the _Float16 type will
22583 be defined by the C front-end for AVX512FP16 intrinsics. We will
22584 issue an error in ix86_expand_move for HFmode if AVX512FP16 isn't
22586 return (((mode
== HFmode
|| mode
== BFmode
) && TARGET_SSE2
)
22588 : default_libgcc_floating_mode_supported_p (mode
));
22591 /* Implements target hook vector_mode_supported_p. */
22593 ix86_vector_mode_supported_p (machine_mode mode
)
22595 /* For ia32, scalar TImode isn't supported and so V1TImode shouldn't be
22597 if (!TARGET_64BIT
&& GET_MODE_INNER (mode
) == TImode
)
22599 if (TARGET_SSE
&& VALID_SSE_REG_MODE (mode
))
22601 if (TARGET_SSE2
&& VALID_SSE2_REG_MODE (mode
))
22603 if (TARGET_AVX
&& VALID_AVX256_REG_MODE (mode
))
22605 if (TARGET_AVX512F
&& VALID_AVX512F_REG_MODE (mode
))
22607 if ((TARGET_MMX
|| TARGET_MMX_WITH_SSE
)
22608 && VALID_MMX_REG_MODE (mode
))
22610 if ((TARGET_3DNOW
|| TARGET_MMX_WITH_SSE
)
22611 && VALID_MMX_REG_MODE_3DNOW (mode
))
22613 if (mode
== V2QImode
)
22618 /* Target hook for c_mode_for_suffix. */
22619 static machine_mode
22620 ix86_c_mode_for_suffix (char suffix
)
22630 /* Worker function for TARGET_MD_ASM_ADJUST.
22632 We implement asm flag outputs, and maintain source compatibility
22633 with the old cc0-based compiler. */
22636 ix86_md_asm_adjust (vec
<rtx
> &outputs
, vec
<rtx
> & /*inputs*/,
22637 vec
<machine_mode
> & /*input_modes*/,
22638 vec
<const char *> &constraints
, vec
<rtx
> &clobbers
,
22639 HARD_REG_SET
&clobbered_regs
, location_t loc
)
22641 bool saw_asm_flag
= false;
22644 for (unsigned i
= 0, n
= outputs
.length (); i
< n
; ++i
)
22646 const char *con
= constraints
[i
];
22647 if (!startswith (con
, "=@cc"))
22650 if (strchr (con
, ',') != NULL
)
22652 error_at (loc
, "alternatives not allowed in %<asm%> flag output");
22656 bool invert
= false;
22658 invert
= true, con
++;
22660 machine_mode mode
= CCmode
;
22661 rtx_code code
= UNKNOWN
;
22667 mode
= CCAmode
, code
= EQ
;
22668 else if (con
[1] == 'e' && con
[2] == 0)
22669 mode
= CCCmode
, code
= NE
;
22673 mode
= CCCmode
, code
= EQ
;
22674 else if (con
[1] == 'e' && con
[2] == 0)
22675 mode
= CCAmode
, code
= NE
;
22679 mode
= CCCmode
, code
= EQ
;
22683 mode
= CCZmode
, code
= EQ
;
22687 mode
= CCGCmode
, code
= GT
;
22688 else if (con
[1] == 'e' && con
[2] == 0)
22689 mode
= CCGCmode
, code
= GE
;
22693 mode
= CCGCmode
, code
= LT
;
22694 else if (con
[1] == 'e' && con
[2] == 0)
22695 mode
= CCGCmode
, code
= LE
;
22699 mode
= CCOmode
, code
= EQ
;
22703 mode
= CCPmode
, code
= EQ
;
22707 mode
= CCSmode
, code
= EQ
;
22711 mode
= CCZmode
, code
= EQ
;
22714 if (code
== UNKNOWN
)
22716 error_at (loc
, "unknown %<asm%> flag output %qs", constraints
[i
]);
22720 code
= reverse_condition (code
);
22722 rtx dest
= outputs
[i
];
22725 /* This is the first asm flag output. Here we put the flags
22726 register in as the real output and adjust the condition to
22728 constraints
[i
] = "=Bf";
22729 outputs
[i
] = gen_rtx_REG (CCmode
, FLAGS_REG
);
22730 saw_asm_flag
= true;
22734 /* We don't need the flags register as output twice. */
22735 constraints
[i
] = "=X";
22736 outputs
[i
] = gen_rtx_SCRATCH (SImode
);
22739 rtx x
= gen_rtx_REG (mode
, FLAGS_REG
);
22740 x
= gen_rtx_fmt_ee (code
, QImode
, x
, const0_rtx
);
22742 machine_mode dest_mode
= GET_MODE (dest
);
22743 if (!SCALAR_INT_MODE_P (dest_mode
))
22745 error_at (loc
, "invalid type for %<asm%> flag output");
22749 if (dest_mode
== QImode
)
22750 emit_insn (gen_rtx_SET (dest
, x
));
22753 rtx reg
= gen_reg_rtx (QImode
);
22754 emit_insn (gen_rtx_SET (reg
, x
));
22756 reg
= convert_to_mode (dest_mode
, reg
, 1);
22757 emit_move_insn (dest
, reg
);
22761 rtx_insn
*seq
= get_insns ();
22768 /* If we had no asm flag outputs, clobber the flags. */
22769 clobbers
.safe_push (gen_rtx_REG (CCmode
, FLAGS_REG
));
22770 SET_HARD_REG_BIT (clobbered_regs
, FLAGS_REG
);
22775 /* Implements target vector targetm.asm.encode_section_info. */
22777 static void ATTRIBUTE_UNUSED
22778 ix86_encode_section_info (tree decl
, rtx rtl
, int first
)
22780 default_encode_section_info (decl
, rtl
, first
);
22782 if (ix86_in_large_data_p (decl
))
22783 SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) |= SYMBOL_FLAG_FAR_ADDR
;
22786 /* Worker function for REVERSE_CONDITION. */
22789 ix86_reverse_condition (enum rtx_code code
, machine_mode mode
)
22791 return (mode
== CCFPmode
22792 ? reverse_condition_maybe_unordered (code
)
22793 : reverse_condition (code
));
22796 /* Output code to perform an x87 FP register move, from OPERANDS[1]
22800 output_387_reg_move (rtx_insn
*insn
, rtx
*operands
)
22802 if (REG_P (operands
[0]))
22804 if (REG_P (operands
[1])
22805 && find_regno_note (insn
, REG_DEAD
, REGNO (operands
[1])))
22807 if (REGNO (operands
[0]) == FIRST_STACK_REG
)
22808 return output_387_ffreep (operands
, 0);
22809 return "fstp\t%y0";
22811 if (STACK_TOP_P (operands
[0]))
22812 return "fld%Z1\t%y1";
22815 else if (MEM_P (operands
[0]))
22817 gcc_assert (REG_P (operands
[1]));
22818 if (find_regno_note (insn
, REG_DEAD
, REGNO (operands
[1])))
22819 return "fstp%Z0\t%y0";
22822 /* There is no non-popping store to memory for XFmode.
22823 So if we need one, follow the store with a load. */
22824 if (GET_MODE (operands
[0]) == XFmode
)
22825 return "fstp%Z0\t%y0\n\tfld%Z0\t%y0";
22827 return "fst%Z0\t%y0";
22833 #ifdef TARGET_SOLARIS
22834 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
22837 i386_solaris_elf_named_section (const char *name
, unsigned int flags
,
22840 /* With Binutils 2.15, the "@unwind" marker must be specified on
22841 every occurrence of the ".eh_frame" section, not just the first
22844 && strcmp (name
, ".eh_frame") == 0)
22846 fprintf (asm_out_file
, "\t.section\t%s,\"%s\",@unwind\n", name
,
22847 flags
& SECTION_WRITE
? "aw" : "a");
22852 if (HAVE_COMDAT_GROUP
&& flags
& SECTION_LINKONCE
)
22854 solaris_elf_asm_comdat_section (name
, flags
, decl
);
22858 /* Solaris/x86 as uses the same syntax for the SHF_EXCLUDE flags as the
22859 SPARC assembler. One cannot mix single-letter flags and #exclude, so
22860 only emit the latter here. */
22861 if (flags
& SECTION_EXCLUDE
)
22863 fprintf (asm_out_file
, "\t.section\t%s,#exclude\n", name
);
22868 default_elf_asm_named_section (name
, flags
, decl
);
22870 #endif /* TARGET_SOLARIS */
22872 /* Return the mangling of TYPE if it is an extended fundamental type. */
22874 static const char *
22875 ix86_mangle_type (const_tree type
)
22877 type
= TYPE_MAIN_VARIANT (type
);
22879 if (TREE_CODE (type
) != VOID_TYPE
&& TREE_CODE (type
) != BOOLEAN_TYPE
22880 && TREE_CODE (type
) != INTEGER_TYPE
&& TREE_CODE (type
) != REAL_TYPE
)
22883 if (type
== float128_type_node
|| type
== float64x_type_node
)
22886 switch (TYPE_MODE (type
))
22891 /* _Float16 is "DF16_".
22892 Align with clang's decision in https://reviews.llvm.org/D33719. */
22895 /* __float128 is "g". */
22898 /* "long double" or __float80 is "e". */
22905 /* Create C++ tinfo symbols for only conditionally available fundamental
22909 ix86_emit_support_tinfos (emit_support_tinfos_callback callback
)
22911 extern tree ix86_float16_type_node
;
22912 extern tree ix86_bf16_type_node
;
22916 gcc_checking_assert (!float16_type_node
&& !bfloat16_type_node
);
22917 float16_type_node
= ix86_float16_type_node
;
22918 bfloat16_type_node
= ix86_bf16_type_node
;
22919 callback (float16_type_node
);
22920 callback (bfloat16_type_node
);
22921 float16_type_node
= NULL_TREE
;
22922 bfloat16_type_node
= NULL_TREE
;
22926 static GTY(()) tree ix86_tls_stack_chk_guard_decl
;
22929 ix86_stack_protect_guard (void)
22931 if (TARGET_SSP_TLS_GUARD
)
22933 tree type_node
= lang_hooks
.types
.type_for_mode (ptr_mode
, 1);
22934 int qual
= ENCODE_QUAL_ADDR_SPACE (ix86_stack_protector_guard_reg
);
22935 tree type
= build_qualified_type (type_node
, qual
);
22938 if (OPTION_SET_P (ix86_stack_protector_guard_symbol_str
))
22940 t
= ix86_tls_stack_chk_guard_decl
;
22947 (UNKNOWN_LOCATION
, VAR_DECL
,
22948 get_identifier (ix86_stack_protector_guard_symbol_str
),
22950 TREE_STATIC (t
) = 1;
22951 TREE_PUBLIC (t
) = 1;
22952 DECL_EXTERNAL (t
) = 1;
22954 TREE_THIS_VOLATILE (t
) = 1;
22955 DECL_ARTIFICIAL (t
) = 1;
22956 DECL_IGNORED_P (t
) = 1;
22958 /* Do not share RTL as the declaration is visible outside of
22959 current function. */
22961 RTX_FLAG (x
, used
) = 1;
22963 ix86_tls_stack_chk_guard_decl
= t
;
22968 tree asptrtype
= build_pointer_type (type
);
22970 t
= build_int_cst (asptrtype
, ix86_stack_protector_guard_offset
);
22971 t
= build2 (MEM_REF
, asptrtype
, t
,
22972 build_int_cst (asptrtype
, 0));
22973 TREE_THIS_VOLATILE (t
) = 1;
22979 return default_stack_protect_guard ();
22982 /* For 32-bit code we can save PIC register setup by using
22983 __stack_chk_fail_local hidden function instead of calling
22984 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
22985 register, so it is better to call __stack_chk_fail directly. */
22987 static tree ATTRIBUTE_UNUSED
22988 ix86_stack_protect_fail (void)
22990 return TARGET_64BIT
22991 ? default_external_stack_protect_fail ()
22992 : default_hidden_stack_protect_fail ();
22995 /* Select a format to encode pointers in exception handling data. CODE
22996 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
22997 true if the symbol may be affected by dynamic relocations.
22999 ??? All x86 object file formats are capable of representing this.
23000 After all, the relocation needed is the same as for the call insn.
23001 Whether or not a particular assembler allows us to enter such, I
23002 guess we'll have to see. */
23005 asm_preferred_eh_data_format (int code
, int global
)
23007 /* PE-COFF is effectively always -fPIC because of the .reloc section. */
23008 if (flag_pic
|| TARGET_PECOFF
|| !ix86_direct_extern_access
)
23010 int type
= DW_EH_PE_sdata8
;
23011 if (ptr_mode
== SImode
23012 || ix86_cmodel
== CM_SMALL_PIC
23013 || (ix86_cmodel
== CM_MEDIUM_PIC
&& (global
|| code
)))
23014 type
= DW_EH_PE_sdata4
;
23015 return (global
? DW_EH_PE_indirect
: 0) | DW_EH_PE_pcrel
| type
;
23018 if (ix86_cmodel
== CM_SMALL
23019 || (ix86_cmodel
== CM_MEDIUM
&& code
))
23020 return DW_EH_PE_udata4
;
23022 return DW_EH_PE_absptr
;
23025 /* Implement targetm.vectorize.builtin_vectorization_cost. */
23027 ix86_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost
,
23031 machine_mode mode
= TImode
;
23033 if (vectype
!= NULL
)
23035 fp
= FLOAT_TYPE_P (vectype
);
23036 mode
= TYPE_MODE (vectype
);
23039 switch (type_of_cost
)
23042 return fp
? ix86_cost
->addss
: COSTS_N_INSNS (1);
23045 /* load/store costs are relative to register move which is 2. Recompute
23046 it to COSTS_N_INSNS so everything have same base. */
23047 return COSTS_N_INSNS (fp
? ix86_cost
->sse_load
[0]
23048 : ix86_cost
->int_load
[2]) / 2;
23051 return COSTS_N_INSNS (fp
? ix86_cost
->sse_store
[0]
23052 : ix86_cost
->int_store
[2]) / 2;
23055 return ix86_vec_cost (mode
,
23056 fp
? ix86_cost
->addss
: ix86_cost
->sse_op
);
23059 index
= sse_store_index (mode
);
23060 /* See PR82713 - we may end up being called on non-vector type. */
23063 return COSTS_N_INSNS (ix86_cost
->sse_load
[index
]) / 2;
23066 index
= sse_store_index (mode
);
23067 /* See PR82713 - we may end up being called on non-vector type. */
23070 return COSTS_N_INSNS (ix86_cost
->sse_store
[index
]) / 2;
23072 case vec_to_scalar
:
23073 case scalar_to_vec
:
23074 return ix86_vec_cost (mode
, ix86_cost
->sse_op
);
23076 /* We should have separate costs for unaligned loads and gather/scatter.
23077 Do that incrementally. */
23078 case unaligned_load
:
23079 index
= sse_store_index (mode
);
23080 /* See PR82713 - we may end up being called on non-vector type. */
23083 return COSTS_N_INSNS (ix86_cost
->sse_unaligned_load
[index
]) / 2;
23085 case unaligned_store
:
23086 index
= sse_store_index (mode
);
23087 /* See PR82713 - we may end up being called on non-vector type. */
23090 return COSTS_N_INSNS (ix86_cost
->sse_unaligned_store
[index
]) / 2;
23092 case vector_gather_load
:
23093 return ix86_vec_cost (mode
,
23095 (ix86_cost
->gather_static
23096 + ix86_cost
->gather_per_elt
23097 * TYPE_VECTOR_SUBPARTS (vectype
)) / 2);
23099 case vector_scatter_store
:
23100 return ix86_vec_cost (mode
,
23102 (ix86_cost
->scatter_static
23103 + ix86_cost
->scatter_per_elt
23104 * TYPE_VECTOR_SUBPARTS (vectype
)) / 2);
23106 case cond_branch_taken
:
23107 return ix86_cost
->cond_taken_branch_cost
;
23109 case cond_branch_not_taken
:
23110 return ix86_cost
->cond_not_taken_branch_cost
;
23113 case vec_promote_demote
:
23114 return ix86_vec_cost (mode
, ix86_cost
->sse_op
);
23116 case vec_construct
:
23118 int n
= TYPE_VECTOR_SUBPARTS (vectype
);
23119 /* N - 1 element inserts into an SSE vector, the possible
23120 GPR -> XMM move is accounted for in add_stmt_cost. */
23121 if (GET_MODE_BITSIZE (mode
) <= 128)
23122 return (n
- 1) * ix86_cost
->sse_op
;
23123 /* One vinserti128 for combining two SSE vectors for AVX256. */
23124 else if (GET_MODE_BITSIZE (mode
) == 256)
23125 return ((n
- 2) * ix86_cost
->sse_op
23126 + ix86_vec_cost (mode
, ix86_cost
->addss
));
23127 /* One vinserti64x4 and two vinserti128 for combining SSE
23128 and AVX256 vectors to AVX512. */
23129 else if (GET_MODE_BITSIZE (mode
) == 512)
23130 return ((n
- 4) * ix86_cost
->sse_op
23131 + 3 * ix86_vec_cost (mode
, ix86_cost
->addss
));
23132 gcc_unreachable ();
23136 gcc_unreachable ();
23141 /* This function returns the calling abi specific va_list type node.
23142 It returns the FNDECL specific va_list type. */
23145 ix86_fn_abi_va_list (tree fndecl
)
23148 return va_list_type_node
;
23149 gcc_assert (fndecl
!= NULL_TREE
);
23151 if (ix86_function_abi ((const_tree
) fndecl
) == MS_ABI
)
23152 return ms_va_list_type_node
;
23154 return sysv_va_list_type_node
;
23157 /* Returns the canonical va_list type specified by TYPE. If there
23158 is no valid TYPE provided, it return NULL_TREE. */
23161 ix86_canonical_va_list_type (tree type
)
23165 if (lookup_attribute ("ms_abi va_list", TYPE_ATTRIBUTES (type
)))
23166 return ms_va_list_type_node
;
23168 if ((TREE_CODE (type
) == ARRAY_TYPE
23169 && integer_zerop (array_type_nelts (type
)))
23170 || POINTER_TYPE_P (type
))
23172 tree elem_type
= TREE_TYPE (type
);
23173 if (TREE_CODE (elem_type
) == RECORD_TYPE
23174 && lookup_attribute ("sysv_abi va_list",
23175 TYPE_ATTRIBUTES (elem_type
)))
23176 return sysv_va_list_type_node
;
23182 return std_canonical_va_list_type (type
);
23185 /* Iterate through the target-specific builtin types for va_list.
23186 IDX denotes the iterator, *PTREE is set to the result type of
23187 the va_list builtin, and *PNAME to its internal type.
23188 Returns zero if there is no element for this index, otherwise
23189 IDX should be increased upon the next call.
23190 Note, do not iterate a base builtin's name like __builtin_va_list.
23191 Used from c_common_nodes_and_builtins. */
23194 ix86_enum_va_list (int idx
, const char **pname
, tree
*ptree
)
23204 *ptree
= ms_va_list_type_node
;
23205 *pname
= "__builtin_ms_va_list";
23209 *ptree
= sysv_va_list_type_node
;
23210 *pname
= "__builtin_sysv_va_list";
23218 #undef TARGET_SCHED_DISPATCH
23219 #define TARGET_SCHED_DISPATCH ix86_bd_has_dispatch
23220 #undef TARGET_SCHED_DISPATCH_DO
23221 #define TARGET_SCHED_DISPATCH_DO ix86_bd_do_dispatch
23222 #undef TARGET_SCHED_REASSOCIATION_WIDTH
23223 #define TARGET_SCHED_REASSOCIATION_WIDTH ix86_reassociation_width
23224 #undef TARGET_SCHED_REORDER
23225 #define TARGET_SCHED_REORDER ix86_atom_sched_reorder
23226 #undef TARGET_SCHED_ADJUST_PRIORITY
23227 #define TARGET_SCHED_ADJUST_PRIORITY ix86_adjust_priority
23228 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
23229 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK \
23230 ix86_dependencies_evaluation_hook
23233 /* Implementation of reassociation_width target hook used by
23234 reassoc phase to identify parallelism level in reassociated
23235 tree. Statements tree_code is passed in OPC. Arguments type
23236 is passed in MODE. */
23239 ix86_reassociation_width (unsigned int op
, machine_mode mode
)
23243 if (VECTOR_MODE_P (mode
))
23246 if (INTEGRAL_MODE_P (mode
))
23247 width
= ix86_cost
->reassoc_vec_int
;
23248 else if (FLOAT_MODE_P (mode
))
23249 width
= ix86_cost
->reassoc_vec_fp
;
23254 /* Integer vector instructions execute in FP unit
23255 and can execute 3 additions and one multiplication per cycle. */
23256 if ((ix86_tune
== PROCESSOR_ZNVER1
|| ix86_tune
== PROCESSOR_ZNVER2
23257 || ix86_tune
== PROCESSOR_ZNVER3
|| ix86_tune
== PROCESSOR_ZNVER4
)
23258 && INTEGRAL_MODE_P (mode
) && op
!= PLUS
&& op
!= MINUS
)
23261 /* Account for targets that splits wide vectors into multiple parts. */
23262 if (TARGET_AVX512_SPLIT_REGS
&& GET_MODE_BITSIZE (mode
) > 256)
23263 div
= GET_MODE_BITSIZE (mode
) / 256;
23264 else if (TARGET_AVX256_SPLIT_REGS
&& GET_MODE_BITSIZE (mode
) > 128)
23265 div
= GET_MODE_BITSIZE (mode
) / 128;
23266 else if (TARGET_SSE_SPLIT_REGS
&& GET_MODE_BITSIZE (mode
) > 64)
23267 div
= GET_MODE_BITSIZE (mode
) / 64;
23268 width
= (width
+ div
- 1) / div
;
23271 else if (INTEGRAL_MODE_P (mode
))
23272 width
= ix86_cost
->reassoc_int
;
23273 else if (FLOAT_MODE_P (mode
))
23274 width
= ix86_cost
->reassoc_fp
;
23276 /* Avoid using too many registers in 32bit mode. */
23277 if (!TARGET_64BIT
&& width
> 2)
23282 /* ??? No autovectorization into MMX or 3DNOW until we can reliably
23283 place emms and femms instructions. */
23285 static machine_mode
23286 ix86_preferred_simd_mode (scalar_mode mode
)
23294 if (TARGET_AVX512BW
&& !TARGET_PREFER_AVX256
)
23296 else if (TARGET_AVX
&& !TARGET_PREFER_AVX128
)
23302 if (TARGET_AVX512BW
&& !TARGET_PREFER_AVX256
)
23304 else if (TARGET_AVX
&& !TARGET_PREFER_AVX128
)
23310 if (TARGET_AVX512F
&& !TARGET_PREFER_AVX256
)
23312 else if (TARGET_AVX
&& !TARGET_PREFER_AVX128
)
23318 if (TARGET_AVX512F
&& !TARGET_PREFER_AVX256
)
23320 else if (TARGET_AVX
&& !TARGET_PREFER_AVX128
)
23326 if (TARGET_AVX512FP16
)
23328 if (TARGET_AVX512VL
)
23330 if (TARGET_PREFER_AVX128
)
23332 else if (TARGET_PREFER_AVX256
)
23340 if (TARGET_AVX512F
&& !TARGET_PREFER_AVX256
)
23342 else if (TARGET_AVX
&& !TARGET_PREFER_AVX128
)
23348 if (TARGET_AVX512F
&& !TARGET_PREFER_AVX256
)
23350 else if (TARGET_AVX
&& !TARGET_PREFER_AVX128
)
23352 else if (TARGET_SSE2
)
23361 /* If AVX is enabled then try vectorizing with both 256bit and 128bit
23362 vectors. If AVX512F is enabled then try vectorizing with 512bit,
23363 256bit and 128bit vectors. */
23365 static unsigned int
23366 ix86_autovectorize_vector_modes (vector_modes
*modes
, bool all
)
23368 if (TARGET_AVX512F
&& !TARGET_PREFER_AVX256
)
23370 modes
->safe_push (V64QImode
);
23371 modes
->safe_push (V32QImode
);
23372 modes
->safe_push (V16QImode
);
23374 else if (TARGET_AVX512F
&& all
)
23376 modes
->safe_push (V32QImode
);
23377 modes
->safe_push (V16QImode
);
23378 modes
->safe_push (V64QImode
);
23380 else if (TARGET_AVX
&& !TARGET_PREFER_AVX128
)
23382 modes
->safe_push (V32QImode
);
23383 modes
->safe_push (V16QImode
);
23385 else if (TARGET_AVX
&& all
)
23387 modes
->safe_push (V16QImode
);
23388 modes
->safe_push (V32QImode
);
23390 else if (TARGET_SSE2
)
23391 modes
->safe_push (V16QImode
);
23393 if (TARGET_MMX_WITH_SSE
)
23394 modes
->safe_push (V8QImode
);
23397 modes
->safe_push (V4QImode
);
23402 /* Implemenation of targetm.vectorize.get_mask_mode. */
23404 static opt_machine_mode
23405 ix86_get_mask_mode (machine_mode data_mode
)
23407 unsigned vector_size
= GET_MODE_SIZE (data_mode
);
23408 unsigned nunits
= GET_MODE_NUNITS (data_mode
);
23409 unsigned elem_size
= vector_size
/ nunits
;
23411 /* Scalar mask case. */
23412 if ((TARGET_AVX512F
&& vector_size
== 64)
23413 || (TARGET_AVX512VL
&& (vector_size
== 32 || vector_size
== 16)))
23417 || (TARGET_AVX512BW
&& (elem_size
== 1 || elem_size
== 2)))
23418 return smallest_int_mode_for_size (nunits
);
23421 scalar_int_mode elem_mode
23422 = smallest_int_mode_for_size (elem_size
* BITS_PER_UNIT
);
23424 gcc_assert (elem_size
* nunits
== vector_size
);
23426 return mode_for_vector (elem_mode
, nunits
);
23431 /* Return class of registers which could be used for pseudo of MODE
23432 and of class RCLASS for spilling instead of memory. Return NO_REGS
23433 if it is not possible or non-profitable. */
23435 /* Disabled due to PRs 70902, 71453, 71555, 71596 and 71657. */
23438 ix86_spill_class (reg_class_t rclass
, machine_mode mode
)
23440 if (0 && TARGET_GENERAL_REGS_SSE_SPILL
23442 && TARGET_INTER_UNIT_MOVES_TO_VEC
23443 && TARGET_INTER_UNIT_MOVES_FROM_VEC
23444 && (mode
== SImode
|| (TARGET_64BIT
&& mode
== DImode
))
23445 && INTEGER_CLASS_P (rclass
))
23446 return ALL_SSE_REGS
;
23450 /* Implement TARGET_MAX_NOCE_IFCVT_SEQ_COST. Like the default implementation,
23451 but returns a lower bound. */
23453 static unsigned int
23454 ix86_max_noce_ifcvt_seq_cost (edge e
)
23456 bool predictable_p
= predictable_edge_p (e
);
23459 if (OPTION_SET_P (param_max_rtl_if_conversion_predictable_cost
))
23460 return param_max_rtl_if_conversion_predictable_cost
;
23464 if (OPTION_SET_P (param_max_rtl_if_conversion_unpredictable_cost
))
23465 return param_max_rtl_if_conversion_unpredictable_cost
;
23468 return BRANCH_COST (true, predictable_p
) * COSTS_N_INSNS (2);
23471 /* Return true if SEQ is a good candidate as a replacement for the
23472 if-convertible sequence described in IF_INFO. */
23475 ix86_noce_conversion_profitable_p (rtx_insn
*seq
, struct noce_if_info
*if_info
)
23477 if (TARGET_ONE_IF_CONV_INSN
&& if_info
->speed_p
)
23480 /* Punt if SEQ contains more than one CMOV or FCMOV instruction.
23481 Maybe we should allow even more conditional moves as long as they
23482 are used far enough not to stall the CPU, or also consider
23483 IF_INFO->TEST_BB succ edge probabilities. */
23484 for (rtx_insn
*insn
= seq
; insn
; insn
= NEXT_INSN (insn
))
23486 rtx set
= single_set (insn
);
23489 if (GET_CODE (SET_SRC (set
)) != IF_THEN_ELSE
)
23491 rtx src
= SET_SRC (set
);
23492 machine_mode mode
= GET_MODE (src
);
23493 if (GET_MODE_CLASS (mode
) != MODE_INT
23494 && GET_MODE_CLASS (mode
) != MODE_FLOAT
)
23496 if ((!REG_P (XEXP (src
, 1)) && !MEM_P (XEXP (src
, 1)))
23497 || (!REG_P (XEXP (src
, 2)) && !MEM_P (XEXP (src
, 2))))
23499 /* insn is CMOV or FCMOV. */
23500 if (++cmov_cnt
> 1)
23504 return default_noce_conversion_profitable_p (seq
, if_info
);
23507 /* x86-specific vector costs. */
23508 class ix86_vector_costs
: public vector_costs
23510 using vector_costs::vector_costs
;
23512 unsigned int add_stmt_cost (int count
, vect_cost_for_stmt kind
,
23513 stmt_vec_info stmt_info
, slp_tree node
,
23514 tree vectype
, int misalign
,
23515 vect_cost_model_location where
) override
;
23518 /* Implement targetm.vectorize.create_costs. */
23520 static vector_costs
*
23521 ix86_vectorize_create_costs (vec_info
*vinfo
, bool costing_for_scalar
)
23523 return new ix86_vector_costs (vinfo
, costing_for_scalar
);
23527 ix86_vector_costs::add_stmt_cost (int count
, vect_cost_for_stmt kind
,
23528 stmt_vec_info stmt_info
, slp_tree node
,
23529 tree vectype
, int misalign
,
23530 vect_cost_model_location where
)
23532 unsigned retval
= 0;
23534 = (kind
== scalar_stmt
|| kind
== scalar_load
|| kind
== scalar_store
);
23535 int stmt_cost
= - 1;
23538 machine_mode mode
= scalar_p
? SImode
: TImode
;
23540 if (vectype
!= NULL
)
23542 fp
= FLOAT_TYPE_P (vectype
);
23543 mode
= TYPE_MODE (vectype
);
23545 mode
= TYPE_MODE (TREE_TYPE (vectype
));
23548 if ((kind
== vector_stmt
|| kind
== scalar_stmt
)
23550 && stmt_info
->stmt
&& gimple_code (stmt_info
->stmt
) == GIMPLE_ASSIGN
)
23552 tree_code subcode
= gimple_assign_rhs_code (stmt_info
->stmt
);
23553 /*machine_mode inner_mode = mode;
23554 if (VECTOR_MODE_P (mode))
23555 inner_mode = GET_MODE_INNER (mode);*/
23560 case POINTER_PLUS_EXPR
:
23562 if (kind
== scalar_stmt
)
23564 if (SSE_FLOAT_MODE_SSEMATH_OR_HF_P (mode
))
23565 stmt_cost
= ix86_cost
->addss
;
23566 else if (X87_FLOAT_MODE_P (mode
))
23567 stmt_cost
= ix86_cost
->fadd
;
23569 stmt_cost
= ix86_cost
->add
;
23572 stmt_cost
= ix86_vec_cost (mode
, fp
? ix86_cost
->addss
23573 : ix86_cost
->sse_op
);
23577 /* For MULT_HIGHPART_EXPR, x86 only supports pmulhw,
23578 take it as MULT_EXPR. */
23579 case MULT_HIGHPART_EXPR
:
23580 stmt_cost
= ix86_multiplication_cost (ix86_cost
, mode
);
23582 /* There's no direct instruction for WIDEN_MULT_EXPR,
23583 take emulation into account. */
23584 case WIDEN_MULT_EXPR
:
23585 stmt_cost
= ix86_widen_mult_cost (ix86_cost
, mode
,
23586 TYPE_UNSIGNED (vectype
));
23590 if (SSE_FLOAT_MODE_SSEMATH_OR_HF_P (mode
))
23591 stmt_cost
= ix86_cost
->sse_op
;
23592 else if (X87_FLOAT_MODE_P (mode
))
23593 stmt_cost
= ix86_cost
->fchs
;
23594 else if (VECTOR_MODE_P (mode
))
23595 stmt_cost
= ix86_vec_cost (mode
, ix86_cost
->sse_op
);
23597 stmt_cost
= ix86_cost
->add
;
23599 case TRUNC_DIV_EXPR
:
23600 case CEIL_DIV_EXPR
:
23601 case FLOOR_DIV_EXPR
:
23602 case ROUND_DIV_EXPR
:
23603 case TRUNC_MOD_EXPR
:
23604 case CEIL_MOD_EXPR
:
23605 case FLOOR_MOD_EXPR
:
23607 case ROUND_MOD_EXPR
:
23608 case EXACT_DIV_EXPR
:
23609 stmt_cost
= ix86_division_cost (ix86_cost
, mode
);
23617 tree op1
= gimple_assign_rhs1 (stmt_info
->stmt
);
23618 tree op2
= gimple_assign_rhs2 (stmt_info
->stmt
);
23619 stmt_cost
= ix86_shift_rotate_cost
23621 (subcode
== RSHIFT_EXPR
23622 && !TYPE_UNSIGNED (TREE_TYPE (op1
)))
23623 ? ASHIFTRT
: LSHIFTRT
, mode
,
23624 TREE_CODE (op2
) == INTEGER_CST
,
23625 cst_and_fits_in_hwi (op2
)
23626 ? int_cst_value (op2
) : -1,
23627 false, false, NULL
, NULL
);
23631 /* Only sign-conversions are free. */
23632 if (tree_nop_conversion_p
23633 (TREE_TYPE (gimple_assign_lhs (stmt_info
->stmt
)),
23634 TREE_TYPE (gimple_assign_rhs1 (stmt_info
->stmt
))))
23646 if (SSE_FLOAT_MODE_SSEMATH_OR_HF_P (mode
))
23647 stmt_cost
= ix86_cost
->sse_op
;
23648 else if (VECTOR_MODE_P (mode
))
23649 stmt_cost
= ix86_vec_cost (mode
, ix86_cost
->sse_op
);
23651 stmt_cost
= ix86_cost
->add
;
23659 if ((kind
== vector_stmt
|| kind
== scalar_stmt
)
23662 && (cfn
= gimple_call_combined_fn (stmt_info
->stmt
)) != CFN_LAST
)
23666 stmt_cost
= ix86_vec_cost (mode
,
23667 mode
== SFmode
? ix86_cost
->fmass
23668 : ix86_cost
->fmasd
);
23671 stmt_cost
= ix86_multiplication_cost (ix86_cost
, mode
);
23677 /* If we do elementwise loads into a vector then we are bound by
23678 latency and execution resources for the many scalar loads
23679 (AGU and load ports). Try to account for this by scaling the
23680 construction cost by the number of elements involved. */
23681 if ((kind
== vec_construct
|| kind
== vec_to_scalar
)
23683 && (STMT_VINFO_TYPE (stmt_info
) == load_vec_info_type
23684 || STMT_VINFO_TYPE (stmt_info
) == store_vec_info_type
)
23685 && ((STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
) == VMAT_ELEMENTWISE
23686 && (TREE_CODE (DR_STEP (STMT_VINFO_DATA_REF (stmt_info
)))
23688 || STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
) == VMAT_GATHER_SCATTER
))
23690 stmt_cost
= ix86_builtin_vectorization_cost (kind
, vectype
, misalign
);
23691 stmt_cost
*= (TYPE_VECTOR_SUBPARTS (vectype
) + 1);
23693 else if (kind
== vec_construct
23695 && SLP_TREE_DEF_TYPE (node
) == vect_external_def
23696 && INTEGRAL_TYPE_P (TREE_TYPE (vectype
)))
23698 stmt_cost
= ix86_builtin_vectorization_cost (kind
, vectype
, misalign
);
23701 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_OPS (node
), i
, op
)
23702 if (TREE_CODE (op
) == SSA_NAME
)
23703 TREE_VISITED (op
) = 0;
23704 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_OPS (node
), i
, op
)
23706 if (TREE_CODE (op
) != SSA_NAME
23707 || TREE_VISITED (op
))
23709 TREE_VISITED (op
) = 1;
23710 gimple
*def
= SSA_NAME_DEF_STMT (op
);
23712 if (is_gimple_assign (def
)
23713 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def
))
23714 && ((tem
= gimple_assign_rhs1 (def
)), true)
23715 && TREE_CODE (tem
) == SSA_NAME
23716 /* A sign-change expands to nothing. */
23717 && tree_nop_conversion_p (TREE_TYPE (gimple_assign_lhs (def
)),
23719 def
= SSA_NAME_DEF_STMT (tem
);
23720 /* When the component is loaded from memory we can directly
23721 move it to a vector register, otherwise we have to go
23722 via a GPR or via vpinsr which involves similar cost.
23723 Likewise with a BIT_FIELD_REF extracting from a vector
23724 register we can hope to avoid using a GPR. */
23725 if (!is_gimple_assign (def
)
23726 || (!gimple_assign_load_p (def
)
23727 && (gimple_assign_rhs_code (def
) != BIT_FIELD_REF
23728 || !VECTOR_TYPE_P (TREE_TYPE
23729 (TREE_OPERAND (gimple_assign_rhs1 (def
), 0))))))
23730 stmt_cost
+= ix86_cost
->sse_to_integer
;
23732 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_OPS (node
), i
, op
)
23733 if (TREE_CODE (op
) == SSA_NAME
)
23734 TREE_VISITED (op
) = 0;
23736 if (stmt_cost
== -1)
23737 stmt_cost
= ix86_builtin_vectorization_cost (kind
, vectype
, misalign
);
23739 /* Penalize DFmode vector operations for Bonnell. */
23740 if (TARGET_CPU_P (BONNELL
) && kind
== vector_stmt
23741 && vectype
&& GET_MODE_INNER (TYPE_MODE (vectype
)) == DFmode
)
23742 stmt_cost
*= 5; /* FIXME: The value here is arbitrary. */
23744 /* Statements in an inner loop relative to the loop being
23745 vectorized are weighted more heavily. The value here is
23746 arbitrary and could potentially be improved with analysis. */
23747 retval
= adjust_cost_for_freq (stmt_info
, where
, count
* stmt_cost
);
23749 /* We need to multiply all vector stmt cost by 1.7 (estimated cost)
23750 for Silvermont as it has out of order integer pipeline and can execute
23751 2 scalar instruction per tick, but has in order SIMD pipeline. */
23752 if ((TARGET_CPU_P (SILVERMONT
) || TARGET_CPU_P (GOLDMONT
)
23753 || TARGET_CPU_P (GOLDMONT_PLUS
) || TARGET_CPU_P (INTEL
))
23754 && stmt_info
&& stmt_info
->stmt
)
23756 tree lhs_op
= gimple_get_lhs (stmt_info
->stmt
);
23757 if (lhs_op
&& TREE_CODE (TREE_TYPE (lhs_op
)) == INTEGER_TYPE
)
23758 retval
= (retval
* 17) / 10;
23761 m_costs
[where
] += retval
;
23766 /* Validate target specific memory model bits in VAL. */
23768 static unsigned HOST_WIDE_INT
23769 ix86_memmodel_check (unsigned HOST_WIDE_INT val
)
23771 enum memmodel model
= memmodel_from_int (val
);
23774 if (val
& ~(unsigned HOST_WIDE_INT
)(IX86_HLE_ACQUIRE
|IX86_HLE_RELEASE
23776 || ((val
& IX86_HLE_ACQUIRE
) && (val
& IX86_HLE_RELEASE
)))
23778 warning (OPT_Winvalid_memory_model
,
23779 "unknown architecture specific memory model");
23780 return MEMMODEL_SEQ_CST
;
23782 strong
= (is_mm_acq_rel (model
) || is_mm_seq_cst (model
));
23783 if (val
& IX86_HLE_ACQUIRE
&& !(is_mm_acquire (model
) || strong
))
23785 warning (OPT_Winvalid_memory_model
,
23786 "%<HLE_ACQUIRE%> not used with %<ACQUIRE%> or stronger "
23788 return MEMMODEL_SEQ_CST
| IX86_HLE_ACQUIRE
;
23790 if (val
& IX86_HLE_RELEASE
&& !(is_mm_release (model
) || strong
))
23792 warning (OPT_Winvalid_memory_model
,
23793 "%<HLE_RELEASE%> not used with %<RELEASE%> or stronger "
23795 return MEMMODEL_SEQ_CST
| IX86_HLE_RELEASE
;
23800 /* Set CLONEI->vecsize_mangle, CLONEI->mask_mode, CLONEI->vecsize_int,
23801 CLONEI->vecsize_float and if CLONEI->simdlen is 0, also
23802 CLONEI->simdlen. Return 0 if SIMD clones shouldn't be emitted,
23803 or number of vecsize_mangle variants that should be emitted. */
23806 ix86_simd_clone_compute_vecsize_and_simdlen (struct cgraph_node
*node
,
23807 struct cgraph_simd_clone
*clonei
,
23808 tree base_type
, int num
,
23813 if (clonei
->simdlen
23814 && (clonei
->simdlen
< 2
23815 || clonei
->simdlen
> 1024
23816 || (clonei
->simdlen
& (clonei
->simdlen
- 1)) != 0))
23819 warning_at (DECL_SOURCE_LOCATION (node
->decl
), 0,
23820 "unsupported simdlen %wd", clonei
->simdlen
.to_constant ());
23824 tree ret_type
= TREE_TYPE (TREE_TYPE (node
->decl
));
23825 if (TREE_CODE (ret_type
) != VOID_TYPE
)
23826 switch (TYPE_MODE (ret_type
))
23834 /* case E_SCmode: */
23835 /* case E_DCmode: */
23836 if (!AGGREGATE_TYPE_P (ret_type
))
23841 warning_at (DECL_SOURCE_LOCATION (node
->decl
), 0,
23842 "unsupported return type %qT for simd", ret_type
);
23848 tree type_arg_types
= TYPE_ARG_TYPES (TREE_TYPE (node
->decl
));
23849 bool decl_arg_p
= (node
->definition
|| type_arg_types
== NULL_TREE
);
23851 for (t
= (decl_arg_p
? DECL_ARGUMENTS (node
->decl
) : type_arg_types
), i
= 0;
23852 t
&& t
!= void_list_node
; t
= TREE_CHAIN (t
), i
++)
23854 tree arg_type
= decl_arg_p
? TREE_TYPE (t
) : TREE_VALUE (t
);
23855 switch (TYPE_MODE (arg_type
))
23863 /* case E_SCmode: */
23864 /* case E_DCmode: */
23865 if (!AGGREGATE_TYPE_P (arg_type
))
23869 if (clonei
->args
[i
].arg_type
== SIMD_CLONE_ARG_TYPE_UNIFORM
)
23872 warning_at (DECL_SOURCE_LOCATION (node
->decl
), 0,
23873 "unsupported argument type %qT for simd", arg_type
);
23878 if (!TREE_PUBLIC (node
->decl
) || !explicit_p
)
23880 /* If the function isn't exported, we can pick up just one ISA
23882 if (TARGET_AVX512F
)
23883 clonei
->vecsize_mangle
= 'e';
23884 else if (TARGET_AVX2
)
23885 clonei
->vecsize_mangle
= 'd';
23886 else if (TARGET_AVX
)
23887 clonei
->vecsize_mangle
= 'c';
23889 clonei
->vecsize_mangle
= 'b';
23894 clonei
->vecsize_mangle
= "bcde"[num
];
23897 clonei
->mask_mode
= VOIDmode
;
23898 switch (clonei
->vecsize_mangle
)
23901 clonei
->vecsize_int
= 128;
23902 clonei
->vecsize_float
= 128;
23905 clonei
->vecsize_int
= 128;
23906 clonei
->vecsize_float
= 256;
23909 clonei
->vecsize_int
= 256;
23910 clonei
->vecsize_float
= 256;
23913 clonei
->vecsize_int
= 512;
23914 clonei
->vecsize_float
= 512;
23915 if (TYPE_MODE (base_type
) == QImode
)
23916 clonei
->mask_mode
= DImode
;
23918 clonei
->mask_mode
= SImode
;
23921 if (clonei
->simdlen
== 0)
23923 if (SCALAR_INT_MODE_P (TYPE_MODE (base_type
)))
23924 clonei
->simdlen
= clonei
->vecsize_int
;
23926 clonei
->simdlen
= clonei
->vecsize_float
;
23927 clonei
->simdlen
= clonei
->simdlen
23928 / GET_MODE_BITSIZE (TYPE_MODE (base_type
));
23930 else if (clonei
->simdlen
> 16)
23932 /* For compatibility with ICC, use the same upper bounds
23933 for simdlen. In particular, for CTYPE below, use the return type,
23934 unless the function returns void, in that case use the characteristic
23935 type. If it is possible for given SIMDLEN to pass CTYPE value
23936 in registers (8 [XYZ]MM* regs for 32-bit code, 16 [XYZ]MM* regs
23937 for 64-bit code), accept that SIMDLEN, otherwise warn and don't
23938 emit corresponding clone. */
23939 tree ctype
= ret_type
;
23940 if (VOID_TYPE_P (ret_type
))
23942 int cnt
= GET_MODE_BITSIZE (TYPE_MODE (ctype
)) * clonei
->simdlen
;
23943 if (SCALAR_INT_MODE_P (TYPE_MODE (ctype
)))
23944 cnt
/= clonei
->vecsize_int
;
23946 cnt
/= clonei
->vecsize_float
;
23947 if (cnt
> (TARGET_64BIT
? 16 : 8))
23950 warning_at (DECL_SOURCE_LOCATION (node
->decl
), 0,
23951 "unsupported simdlen %wd",
23952 clonei
->simdlen
.to_constant ());
23959 /* If SIMD clone NODE can't be used in a vectorized loop
23960 in current function, return -1, otherwise return a badness of using it
23961 (0 if it is most desirable from vecsize_mangle point of view, 1
23962 slightly less desirable, etc.). */
23965 ix86_simd_clone_usable (struct cgraph_node
*node
)
23967 switch (node
->simdclone
->vecsize_mangle
)
23974 return TARGET_AVX512F
? 3 : TARGET_AVX2
? 2 : 1;
23978 return TARGET_AVX512F
? 2 : TARGET_AVX2
? 1 : 0;
23982 return TARGET_AVX512F
? 1 : 0;
23984 if (!TARGET_AVX512F
)
23988 gcc_unreachable ();
23992 /* This function adjusts the unroll factor based on
23993 the hardware capabilities. For ex, bdver3 has
23994 a loop buffer which makes unrolling of smaller
23995 loops less important. This function decides the
23996 unroll factor using number of memory references
23997 (value 32 is used) as a heuristic. */
24000 ix86_loop_unroll_adjust (unsigned nunroll
, class loop
*loop
)
24005 unsigned mem_count
= 0;
24007 /* Unroll small size loop when unroll factor is not explicitly
24009 if (ix86_unroll_only_small_loops
&& !loop
->unroll
)
24011 if (loop
->ninsns
<= ix86_cost
->small_unroll_ninsns
)
24012 return MIN (nunroll
, ix86_cost
->small_unroll_factor
);
24017 if (!TARGET_ADJUST_UNROLL
)
24020 /* Count the number of memory references within the loop body.
24021 This value determines the unrolling factor for bdver3 and bdver4
24023 subrtx_iterator::array_type array
;
24024 bbs
= get_loop_body (loop
);
24025 for (i
= 0; i
< loop
->num_nodes
; i
++)
24026 FOR_BB_INSNS (bbs
[i
], insn
)
24027 if (NONDEBUG_INSN_P (insn
))
24028 FOR_EACH_SUBRTX (iter
, array
, PATTERN (insn
), NONCONST
)
24029 if (const_rtx x
= *iter
)
24032 machine_mode mode
= GET_MODE (x
);
24033 unsigned int n_words
= GET_MODE_SIZE (mode
) / UNITS_PER_WORD
;
24041 if (mem_count
&& mem_count
<=32)
24042 return MIN (nunroll
, 32 / mem_count
);
24048 /* Implement TARGET_FLOAT_EXCEPTIONS_ROUNDING_SUPPORTED_P. */
24051 ix86_float_exceptions_rounding_supported_p (void)
24053 /* For x87 floating point with standard excess precision handling,
24054 there is no adddf3 pattern (since x87 floating point only has
24055 XFmode operations) so the default hook implementation gets this
24057 return TARGET_80387
|| (TARGET_SSE
&& TARGET_SSE_MATH
);
24060 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV. */
24063 ix86_atomic_assign_expand_fenv (tree
*hold
, tree
*clear
, tree
*update
)
24065 if (!TARGET_80387
&& !(TARGET_SSE
&& TARGET_SSE_MATH
))
24067 tree exceptions_var
= create_tmp_var_raw (integer_type_node
);
24070 tree fenv_index_type
= build_index_type (size_int (6));
24071 tree fenv_type
= build_array_type (unsigned_type_node
, fenv_index_type
);
24072 tree fenv_var
= create_tmp_var_raw (fenv_type
);
24073 TREE_ADDRESSABLE (fenv_var
) = 1;
24074 tree fenv_ptr
= build_pointer_type (fenv_type
);
24075 tree fenv_addr
= build1 (ADDR_EXPR
, fenv_ptr
, fenv_var
);
24076 fenv_addr
= fold_convert (ptr_type_node
, fenv_addr
);
24077 tree fnstenv
= get_ix86_builtin (IX86_BUILTIN_FNSTENV
);
24078 tree fldenv
= get_ix86_builtin (IX86_BUILTIN_FLDENV
);
24079 tree fnstsw
= get_ix86_builtin (IX86_BUILTIN_FNSTSW
);
24080 tree fnclex
= get_ix86_builtin (IX86_BUILTIN_FNCLEX
);
24081 tree hold_fnstenv
= build_call_expr (fnstenv
, 1, fenv_addr
);
24082 tree hold_fnclex
= build_call_expr (fnclex
, 0);
24083 fenv_var
= build4 (TARGET_EXPR
, fenv_type
, fenv_var
, hold_fnstenv
,
24084 NULL_TREE
, NULL_TREE
);
24085 *hold
= build2 (COMPOUND_EXPR
, void_type_node
, fenv_var
,
24087 *clear
= build_call_expr (fnclex
, 0);
24088 tree sw_var
= create_tmp_var_raw (short_unsigned_type_node
);
24089 tree fnstsw_call
= build_call_expr (fnstsw
, 0);
24090 tree sw_mod
= build4 (TARGET_EXPR
, short_unsigned_type_node
, sw_var
,
24091 fnstsw_call
, NULL_TREE
, NULL_TREE
);
24092 tree exceptions_x87
= fold_convert (integer_type_node
, sw_var
);
24093 tree update_mod
= build4 (TARGET_EXPR
, integer_type_node
,
24094 exceptions_var
, exceptions_x87
,
24095 NULL_TREE
, NULL_TREE
);
24096 *update
= build2 (COMPOUND_EXPR
, integer_type_node
,
24097 sw_mod
, update_mod
);
24098 tree update_fldenv
= build_call_expr (fldenv
, 1, fenv_addr
);
24099 *update
= build2 (COMPOUND_EXPR
, void_type_node
, *update
, update_fldenv
);
24101 if (TARGET_SSE
&& TARGET_SSE_MATH
)
24103 tree mxcsr_orig_var
= create_tmp_var_raw (unsigned_type_node
);
24104 tree mxcsr_mod_var
= create_tmp_var_raw (unsigned_type_node
);
24105 tree stmxcsr
= get_ix86_builtin (IX86_BUILTIN_STMXCSR
);
24106 tree ldmxcsr
= get_ix86_builtin (IX86_BUILTIN_LDMXCSR
);
24107 tree stmxcsr_hold_call
= build_call_expr (stmxcsr
, 0);
24108 tree hold_assign_orig
= build4 (TARGET_EXPR
, unsigned_type_node
,
24109 mxcsr_orig_var
, stmxcsr_hold_call
,
24110 NULL_TREE
, NULL_TREE
);
24111 tree hold_mod_val
= build2 (BIT_IOR_EXPR
, unsigned_type_node
,
24113 build_int_cst (unsigned_type_node
, 0x1f80));
24114 hold_mod_val
= build2 (BIT_AND_EXPR
, unsigned_type_node
, hold_mod_val
,
24115 build_int_cst (unsigned_type_node
, 0xffffffc0));
24116 tree hold_assign_mod
= build4 (TARGET_EXPR
, unsigned_type_node
,
24117 mxcsr_mod_var
, hold_mod_val
,
24118 NULL_TREE
, NULL_TREE
);
24119 tree ldmxcsr_hold_call
= build_call_expr (ldmxcsr
, 1, mxcsr_mod_var
);
24120 tree hold_all
= build2 (COMPOUND_EXPR
, unsigned_type_node
,
24121 hold_assign_orig
, hold_assign_mod
);
24122 hold_all
= build2 (COMPOUND_EXPR
, void_type_node
, hold_all
,
24123 ldmxcsr_hold_call
);
24125 *hold
= build2 (COMPOUND_EXPR
, void_type_node
, *hold
, hold_all
);
24128 tree ldmxcsr_clear_call
= build_call_expr (ldmxcsr
, 1, mxcsr_mod_var
);
24130 *clear
= build2 (COMPOUND_EXPR
, void_type_node
, *clear
,
24131 ldmxcsr_clear_call
);
24133 *clear
= ldmxcsr_clear_call
;
24134 tree stxmcsr_update_call
= build_call_expr (stmxcsr
, 0);
24135 tree exceptions_sse
= fold_convert (integer_type_node
,
24136 stxmcsr_update_call
);
24139 tree exceptions_mod
= build2 (BIT_IOR_EXPR
, integer_type_node
,
24140 exceptions_var
, exceptions_sse
);
24141 tree exceptions_assign
= build2 (MODIFY_EXPR
, integer_type_node
,
24142 exceptions_var
, exceptions_mod
);
24143 *update
= build2 (COMPOUND_EXPR
, integer_type_node
, *update
,
24144 exceptions_assign
);
24147 *update
= build4 (TARGET_EXPR
, integer_type_node
, exceptions_var
,
24148 exceptions_sse
, NULL_TREE
, NULL_TREE
);
24149 tree ldmxcsr_update_call
= build_call_expr (ldmxcsr
, 1, mxcsr_orig_var
);
24150 *update
= build2 (COMPOUND_EXPR
, void_type_node
, *update
,
24151 ldmxcsr_update_call
);
24153 tree atomic_feraiseexcept
24154 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT
);
24155 tree atomic_feraiseexcept_call
= build_call_expr (atomic_feraiseexcept
,
24156 1, exceptions_var
);
24157 *update
= build2 (COMPOUND_EXPR
, void_type_node
, *update
,
24158 atomic_feraiseexcept_call
);
24161 #if !TARGET_MACHO && !TARGET_DLLIMPORT_DECL_ATTRIBUTES
24162 /* For i386, common symbol is local only for non-PIE binaries. For
24163 x86-64, common symbol is local only for non-PIE binaries or linker
24164 supports copy reloc in PIE binaries. */
24167 ix86_binds_local_p (const_tree exp
)
24169 bool direct_extern_access
24170 = (ix86_direct_extern_access
24171 && !(VAR_OR_FUNCTION_DECL_P (exp
)
24172 && lookup_attribute ("nodirect_extern_access",
24173 DECL_ATTRIBUTES (exp
))));
24174 if (!direct_extern_access
)
24175 ix86_has_no_direct_extern_access
= true;
24176 return default_binds_local_p_3 (exp
, flag_shlib
!= 0, true,
24177 direct_extern_access
,
24178 (direct_extern_access
24181 && HAVE_LD_PIE_COPYRELOC
!= 0))));
24184 /* If flag_pic or ix86_direct_extern_access is false, then neither
24185 local nor global relocs should be placed in readonly memory. */
24188 ix86_reloc_rw_mask (void)
24190 return (flag_pic
|| !ix86_direct_extern_access
) ? 3 : 0;
24194 /* If MEM is in the form of [base+offset], extract the two parts
24195 of address and set to BASE and OFFSET, otherwise return false. */
24198 extract_base_offset_in_addr (rtx mem
, rtx
*base
, rtx
*offset
)
24202 gcc_assert (MEM_P (mem
));
24204 addr
= XEXP (mem
, 0);
24206 if (GET_CODE (addr
) == CONST
)
24207 addr
= XEXP (addr
, 0);
24209 if (REG_P (addr
) || GET_CODE (addr
) == SYMBOL_REF
)
24212 *offset
= const0_rtx
;
24216 if (GET_CODE (addr
) == PLUS
24217 && (REG_P (XEXP (addr
, 0))
24218 || GET_CODE (XEXP (addr
, 0)) == SYMBOL_REF
)
24219 && CONST_INT_P (XEXP (addr
, 1)))
24221 *base
= XEXP (addr
, 0);
24222 *offset
= XEXP (addr
, 1);
24229 /* Given OPERANDS of consecutive load/store, check if we can merge
24230 them into move multiple. LOAD is true if they are load instructions.
24231 MODE is the mode of memory operands. */
24234 ix86_operands_ok_for_move_multiple (rtx
*operands
, bool load
,
24237 HOST_WIDE_INT offval_1
, offval_2
, msize
;
24238 rtx mem_1
, mem_2
, reg_1
, reg_2
, base_1
, base_2
, offset_1
, offset_2
;
24242 mem_1
= operands
[1];
24243 mem_2
= operands
[3];
24244 reg_1
= operands
[0];
24245 reg_2
= operands
[2];
24249 mem_1
= operands
[0];
24250 mem_2
= operands
[2];
24251 reg_1
= operands
[1];
24252 reg_2
= operands
[3];
24255 gcc_assert (REG_P (reg_1
) && REG_P (reg_2
));
24257 if (REGNO (reg_1
) != REGNO (reg_2
))
24260 /* Check if the addresses are in the form of [base+offset]. */
24261 if (!extract_base_offset_in_addr (mem_1
, &base_1
, &offset_1
))
24263 if (!extract_base_offset_in_addr (mem_2
, &base_2
, &offset_2
))
24266 /* Check if the bases are the same. */
24267 if (!rtx_equal_p (base_1
, base_2
))
24270 offval_1
= INTVAL (offset_1
);
24271 offval_2
= INTVAL (offset_2
);
24272 msize
= GET_MODE_SIZE (mode
);
24273 /* Check if mem_1 is adjacent to mem_2 and mem_1 has lower address. */
24274 if (offval_1
+ msize
!= offval_2
)
24280 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
24283 ix86_optab_supported_p (int op
, machine_mode mode1
, machine_mode
,
24284 optimization_type opt_type
)
24299 return opt_type
== OPTIMIZE_FOR_SPEED
;
24302 if (SSE_FLOAT_MODE_P (mode1
)
24304 && !flag_trapping_math
24306 && mode1
!= HFmode
)
24307 return opt_type
== OPTIMIZE_FOR_SPEED
;
24313 if (((SSE_FLOAT_MODE_P (mode1
)
24316 || mode1
== HFmode
)
24317 && !flag_trapping_math
)
24319 return opt_type
== OPTIMIZE_FOR_SPEED
;
24322 return opt_type
== OPTIMIZE_FOR_SPEED
&& use_rsqrt_p (mode1
);
24329 /* Address space support.
24331 This is not "far pointers" in the 16-bit sense, but an easy way
24332 to use %fs and %gs segment prefixes. Therefore:
24334 (a) All address spaces have the same modes,
24335 (b) All address spaces have the same addresss forms,
24336 (c) While %fs and %gs are technically subsets of the generic
24337 address space, they are probably not subsets of each other.
24338 (d) Since we have no access to the segment base register values
24339 without resorting to a system call, we cannot convert a
24340 non-default address space to a default address space.
24341 Therefore we do not claim %fs or %gs are subsets of generic.
24343 Therefore we can (mostly) use the default hooks. */
24345 /* All use of segmentation is assumed to make address 0 valid. */
24348 ix86_addr_space_zero_address_valid (addr_space_t as
)
24350 return as
!= ADDR_SPACE_GENERIC
;
24354 ix86_init_libfuncs (void)
24358 set_optab_libfunc (sdivmod_optab
, TImode
, "__divmodti4");
24359 set_optab_libfunc (udivmod_optab
, TImode
, "__udivmodti4");
24363 set_optab_libfunc (sdivmod_optab
, DImode
, "__divmoddi4");
24364 set_optab_libfunc (udivmod_optab
, DImode
, "__udivmoddi4");
24368 darwin_rename_builtins ();
24372 /* Set the value of FLT_EVAL_METHOD in float.h. When using only the
24373 FPU, assume that the fpcw is set to extended precision; when using
24374 only SSE, rounding is correct; when using both SSE and the FPU,
24375 the rounding precision is indeterminate, since either may be chosen
24376 apparently at random. */
24378 static enum flt_eval_method
24379 ix86_get_excess_precision (enum excess_precision_type type
)
24383 case EXCESS_PRECISION_TYPE_FAST
:
24384 /* The fastest type to promote to will always be the native type,
24385 whether that occurs with implicit excess precision or
24387 return TARGET_AVX512FP16
24388 ? FLT_EVAL_METHOD_PROMOTE_TO_FLOAT16
24389 : FLT_EVAL_METHOD_PROMOTE_TO_FLOAT
;
24390 case EXCESS_PRECISION_TYPE_STANDARD
:
24391 case EXCESS_PRECISION_TYPE_IMPLICIT
:
24392 /* Otherwise, the excess precision we want when we are
24393 in a standards compliant mode, and the implicit precision we
24394 provide would be identical were it not for the unpredictable
24396 if (TARGET_AVX512FP16
&& TARGET_SSE_MATH
)
24397 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT16
;
24398 else if (!TARGET_80387
)
24399 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT
;
24400 else if (!TARGET_MIX_SSE_I387
)
24402 if (!(TARGET_SSE
&& TARGET_SSE_MATH
))
24403 return FLT_EVAL_METHOD_PROMOTE_TO_LONG_DOUBLE
;
24404 else if (TARGET_SSE2
)
24405 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT
;
24408 /* If we are in standards compliant mode, but we know we will
24409 calculate in unpredictable precision, return
24410 FLT_EVAL_METHOD_FLOAT. There is no reason to introduce explicit
24411 excess precision if the target can't guarantee it will honor
24413 return (type
== EXCESS_PRECISION_TYPE_STANDARD
24414 ? FLT_EVAL_METHOD_PROMOTE_TO_FLOAT
24415 : FLT_EVAL_METHOD_UNPREDICTABLE
);
24416 case EXCESS_PRECISION_TYPE_FLOAT16
:
24418 && !(TARGET_SSE_MATH
&& TARGET_SSE
))
24419 error ("%<-fexcess-precision=16%> is not compatible with %<-mfpmath=387%>");
24420 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT16
;
24422 gcc_unreachable ();
24425 return FLT_EVAL_METHOD_UNPREDICTABLE
;
24428 /* Implement PUSH_ROUNDING. On 386, we have pushw instruction that
24429 decrements by exactly 2 no matter what the position was, there is no pushb.
24431 But as CIE data alignment factor on this arch is -4 for 32bit targets
24432 and -8 for 64bit targets, we need to make sure all stack pointer adjustments
24433 are in multiple of 4 for 32bit targets and 8 for 64bit targets. */
24436 ix86_push_rounding (poly_int64 bytes
)
24438 return ROUND_UP (bytes
, UNITS_PER_WORD
);
24441 /* Use 8 bits metadata start from bit48 for LAM_U48,
24442 6 bits metadat start from bit57 for LAM_U57. */
24443 #define IX86_HWASAN_SHIFT (ix86_lam_type == lam_u48 \
24445 : (ix86_lam_type == lam_u57 ? 57 : 0))
24446 #define IX86_HWASAN_TAG_SIZE (ix86_lam_type == lam_u48 \
24448 : (ix86_lam_type == lam_u57 ? 6 : 0))
24450 /* Implement TARGET_MEMTAG_CAN_TAG_ADDRESSES. */
24452 ix86_memtag_can_tag_addresses ()
24454 return ix86_lam_type
!= lam_none
&& TARGET_LP64
;
24457 /* Implement TARGET_MEMTAG_TAG_SIZE. */
24459 ix86_memtag_tag_size ()
24461 return IX86_HWASAN_TAG_SIZE
;
24464 /* Implement TARGET_MEMTAG_SET_TAG. */
24466 ix86_memtag_set_tag (rtx untagged
, rtx tag
, rtx target
)
24468 /* default_memtag_insert_random_tag may
24469 generate tag with value more than 6 bits. */
24470 if (ix86_lam_type
== lam_u57
)
24472 unsigned HOST_WIDE_INT and_imm
24473 = (HOST_WIDE_INT_1U
<< IX86_HWASAN_TAG_SIZE
) - 1;
24475 emit_insn (gen_andqi3 (tag
, tag
, GEN_INT (and_imm
)));
24477 tag
= expand_simple_binop (Pmode
, ASHIFT
, tag
,
24478 GEN_INT (IX86_HWASAN_SHIFT
), NULL_RTX
,
24479 /* unsignedp = */1, OPTAB_WIDEN
);
24480 rtx ret
= expand_simple_binop (Pmode
, IOR
, untagged
, tag
, target
,
24481 /* unsignedp = */1, OPTAB_DIRECT
);
24485 /* Implement TARGET_MEMTAG_EXTRACT_TAG. */
24487 ix86_memtag_extract_tag (rtx tagged_pointer
, rtx target
)
24489 rtx tag
= expand_simple_binop (Pmode
, LSHIFTRT
, tagged_pointer
,
24490 GEN_INT (IX86_HWASAN_SHIFT
), target
,
24491 /* unsignedp = */0,
24493 rtx ret
= gen_reg_rtx (QImode
);
24494 /* Mask off bit63 when LAM_U57. */
24495 if (ix86_lam_type
== lam_u57
)
24497 unsigned HOST_WIDE_INT and_imm
24498 = (HOST_WIDE_INT_1U
<< IX86_HWASAN_TAG_SIZE
) - 1;
24499 emit_insn (gen_andqi3 (ret
, gen_lowpart (QImode
, tag
),
24500 gen_int_mode (and_imm
, QImode
)));
24503 emit_move_insn (ret
, gen_lowpart (QImode
, tag
));
24507 /* The default implementation of TARGET_MEMTAG_UNTAGGED_POINTER. */
24509 ix86_memtag_untagged_pointer (rtx tagged_pointer
, rtx target
)
24511 /* Leave bit63 alone. */
24512 rtx tag_mask
= gen_int_mode (((HOST_WIDE_INT_1U
<< IX86_HWASAN_SHIFT
)
24513 + (HOST_WIDE_INT_1U
<< 63) - 1),
24515 rtx untagged_base
= expand_simple_binop (Pmode
, AND
, tagged_pointer
,
24516 tag_mask
, target
, true,
24518 gcc_assert (untagged_base
);
24519 return untagged_base
;
24522 /* Implement TARGET_MEMTAG_ADD_TAG. */
24524 ix86_memtag_add_tag (rtx base
, poly_int64 offset
, unsigned char tag_offset
)
24526 rtx base_tag
= gen_reg_rtx (QImode
);
24527 rtx base_addr
= gen_reg_rtx (Pmode
);
24528 rtx tagged_addr
= gen_reg_rtx (Pmode
);
24529 rtx new_tag
= gen_reg_rtx (QImode
);
24530 unsigned HOST_WIDE_INT and_imm
24531 = (HOST_WIDE_INT_1U
<< IX86_HWASAN_SHIFT
) - 1;
24533 /* When there's "overflow" in tag adding,
24534 need to mask the most significant bit off. */
24535 emit_move_insn (base_tag
, ix86_memtag_extract_tag (base
, NULL_RTX
));
24536 emit_move_insn (base_addr
,
24537 ix86_memtag_untagged_pointer (base
, NULL_RTX
));
24538 emit_insn (gen_add2_insn (base_tag
, gen_int_mode (tag_offset
, QImode
)));
24539 emit_move_insn (new_tag
, base_tag
);
24540 emit_insn (gen_andqi3 (new_tag
, new_tag
, gen_int_mode (and_imm
, QImode
)));
24541 emit_move_insn (tagged_addr
,
24542 ix86_memtag_set_tag (base_addr
, new_tag
, NULL_RTX
));
24543 return plus_constant (Pmode
, tagged_addr
, offset
);
24546 /* Target-specific selftests. */
24550 namespace selftest
{
24552 /* Verify that hard regs are dumped as expected (in compact mode). */
24555 ix86_test_dumping_hard_regs ()
24557 ASSERT_RTL_DUMP_EQ ("(reg:SI ax)", gen_raw_REG (SImode
, 0));
24558 ASSERT_RTL_DUMP_EQ ("(reg:SI dx)", gen_raw_REG (SImode
, 1));
24561 /* Test dumping an insn with repeated references to the same SCRATCH,
24562 to verify the rtx_reuse code. */
24565 ix86_test_dumping_memory_blockage ()
24567 set_new_first_and_last_insn (NULL
, NULL
);
24569 rtx pat
= gen_memory_blockage ();
24570 rtx_reuse_manager r
;
24571 r
.preprocess (pat
);
24573 /* Verify that the repeated references to the SCRATCH show use
24574 reuse IDS. The first should be prefixed with a reuse ID,
24575 and the second should be dumped as a "reuse_rtx" of that ID.
24576 The expected string assumes Pmode == DImode. */
24577 if (Pmode
== DImode
)
24578 ASSERT_RTL_DUMP_EQ_WITH_REUSE
24579 ("(cinsn 1 (set (mem/v:BLK (0|scratch:DI) [0 A8])\n"
24581 " (mem/v:BLK (reuse_rtx 0) [0 A8])\n"
24582 " ] UNSPEC_MEMORY_BLOCKAGE)))\n", pat
, &r
);
24585 /* Verify loading an RTL dump; specifically a dump of copying
24586 a param on x86_64 from a hard reg into the frame.
24587 This test is target-specific since the dump contains target-specific
24591 ix86_test_loading_dump_fragment_1 ()
24593 rtl_dump_test
t (SELFTEST_LOCATION
,
24594 locate_file ("x86_64/copy-hard-reg-into-frame.rtl"));
24596 rtx_insn
*insn
= get_insn_by_uid (1);
24598 /* The block structure and indentation here is purely for
24599 readability; it mirrors the structure of the rtx. */
24602 rtx pat
= PATTERN (insn
);
24603 ASSERT_EQ (SET
, GET_CODE (pat
));
24605 rtx dest
= SET_DEST (pat
);
24606 ASSERT_EQ (MEM
, GET_CODE (dest
));
24607 /* Verify the "/c" was parsed. */
24608 ASSERT_TRUE (RTX_FLAG (dest
, call
));
24609 ASSERT_EQ (SImode
, GET_MODE (dest
));
24611 rtx addr
= XEXP (dest
, 0);
24612 ASSERT_EQ (PLUS
, GET_CODE (addr
));
24613 ASSERT_EQ (DImode
, GET_MODE (addr
));
24615 rtx lhs
= XEXP (addr
, 0);
24616 /* Verify that the "frame" REG was consolidated. */
24617 ASSERT_RTX_PTR_EQ (frame_pointer_rtx
, lhs
);
24620 rtx rhs
= XEXP (addr
, 1);
24621 ASSERT_EQ (CONST_INT
, GET_CODE (rhs
));
24622 ASSERT_EQ (-4, INTVAL (rhs
));
24625 /* Verify the "[1 i+0 S4 A32]" was parsed. */
24626 ASSERT_EQ (1, MEM_ALIAS_SET (dest
));
24627 /* "i" should have been handled by synthesizing a global int
24628 variable named "i". */
24629 mem_expr
= MEM_EXPR (dest
);
24630 ASSERT_NE (mem_expr
, NULL
);
24631 ASSERT_EQ (VAR_DECL
, TREE_CODE (mem_expr
));
24632 ASSERT_EQ (integer_type_node
, TREE_TYPE (mem_expr
));
24633 ASSERT_EQ (IDENTIFIER_NODE
, TREE_CODE (DECL_NAME (mem_expr
)));
24634 ASSERT_STREQ ("i", IDENTIFIER_POINTER (DECL_NAME (mem_expr
)));
24636 ASSERT_TRUE (MEM_OFFSET_KNOWN_P (dest
));
24637 ASSERT_EQ (0, MEM_OFFSET (dest
));
24639 ASSERT_EQ (4, MEM_SIZE (dest
));
24641 ASSERT_EQ (32, MEM_ALIGN (dest
));
24644 rtx src
= SET_SRC (pat
);
24645 ASSERT_EQ (REG
, GET_CODE (src
));
24646 ASSERT_EQ (SImode
, GET_MODE (src
));
24647 ASSERT_EQ (5, REGNO (src
));
24648 tree reg_expr
= REG_EXPR (src
);
24649 /* "i" here should point to the same var as for the MEM_EXPR. */
24650 ASSERT_EQ (reg_expr
, mem_expr
);
24655 /* Verify that the RTL loader copes with a call_insn dump.
24656 This test is target-specific since the dump contains a target-specific
24660 ix86_test_loading_call_insn ()
24662 /* The test dump includes register "xmm0", where requires TARGET_SSE
24667 rtl_dump_test
t (SELFTEST_LOCATION
, locate_file ("x86_64/call-insn.rtl"));
24669 rtx_insn
*insn
= get_insns ();
24670 ASSERT_EQ (CALL_INSN
, GET_CODE (insn
));
24673 ASSERT_TRUE (RTX_FLAG (insn
, jump
));
24675 rtx pat
= PATTERN (insn
);
24676 ASSERT_EQ (CALL
, GET_CODE (SET_SRC (pat
)));
24678 /* Verify REG_NOTES. */
24680 /* "(expr_list:REG_CALL_DECL". */
24681 ASSERT_EQ (EXPR_LIST
, GET_CODE (REG_NOTES (insn
)));
24682 rtx_expr_list
*note0
= as_a
<rtx_expr_list
*> (REG_NOTES (insn
));
24683 ASSERT_EQ (REG_CALL_DECL
, REG_NOTE_KIND (note0
));
24685 /* "(expr_list:REG_EH_REGION (const_int 0 [0])". */
24686 rtx_expr_list
*note1
= note0
->next ();
24687 ASSERT_EQ (REG_EH_REGION
, REG_NOTE_KIND (note1
));
24689 ASSERT_EQ (NULL
, note1
->next ());
24692 /* Verify CALL_INSN_FUNCTION_USAGE. */
24694 /* "(expr_list:DF (use (reg:DF 21 xmm0))". */
24695 rtx_expr_list
*usage
24696 = as_a
<rtx_expr_list
*> (CALL_INSN_FUNCTION_USAGE (insn
));
24697 ASSERT_EQ (EXPR_LIST
, GET_CODE (usage
));
24698 ASSERT_EQ (DFmode
, GET_MODE (usage
));
24699 ASSERT_EQ (USE
, GET_CODE (usage
->element ()));
24700 ASSERT_EQ (NULL
, usage
->next ());
24704 /* Verify that the RTL loader copes a dump from print_rtx_function.
24705 This test is target-specific since the dump contains target-specific
24709 ix86_test_loading_full_dump ()
24711 rtl_dump_test
t (SELFTEST_LOCATION
, locate_file ("x86_64/times-two.rtl"));
24713 ASSERT_STREQ ("times_two", IDENTIFIER_POINTER (DECL_NAME (cfun
->decl
)));
24715 rtx_insn
*insn_1
= get_insn_by_uid (1);
24716 ASSERT_EQ (NOTE
, GET_CODE (insn_1
));
24718 rtx_insn
*insn_7
= get_insn_by_uid (7);
24719 ASSERT_EQ (INSN
, GET_CODE (insn_7
));
24720 ASSERT_EQ (PARALLEL
, GET_CODE (PATTERN (insn_7
)));
24722 rtx_insn
*insn_15
= get_insn_by_uid (15);
24723 ASSERT_EQ (INSN
, GET_CODE (insn_15
));
24724 ASSERT_EQ (USE
, GET_CODE (PATTERN (insn_15
)));
24726 /* Verify crtl->return_rtx. */
24727 ASSERT_EQ (REG
, GET_CODE (crtl
->return_rtx
));
24728 ASSERT_EQ (0, REGNO (crtl
->return_rtx
));
24729 ASSERT_EQ (SImode
, GET_MODE (crtl
->return_rtx
));
24732 /* Verify that the RTL loader copes with UNSPEC and UNSPEC_VOLATILE insns.
24733 In particular, verify that it correctly loads the 2nd operand.
24734 This test is target-specific since these are machine-specific
24735 operands (and enums). */
24738 ix86_test_loading_unspec ()
24740 rtl_dump_test
t (SELFTEST_LOCATION
, locate_file ("x86_64/unspec.rtl"));
24742 ASSERT_STREQ ("test_unspec", IDENTIFIER_POINTER (DECL_NAME (cfun
->decl
)));
24744 ASSERT_TRUE (cfun
);
24746 /* Test of an UNSPEC. */
24747 rtx_insn
*insn
= get_insns ();
24748 ASSERT_EQ (INSN
, GET_CODE (insn
));
24749 rtx set
= single_set (insn
);
24750 ASSERT_NE (NULL
, set
);
24751 rtx dst
= SET_DEST (set
);
24752 ASSERT_EQ (MEM
, GET_CODE (dst
));
24753 rtx src
= SET_SRC (set
);
24754 ASSERT_EQ (UNSPEC
, GET_CODE (src
));
24755 ASSERT_EQ (BLKmode
, GET_MODE (src
));
24756 ASSERT_EQ (UNSPEC_MEMORY_BLOCKAGE
, XINT (src
, 1));
24758 rtx v0
= XVECEXP (src
, 0, 0);
24760 /* Verify that the two uses of the first SCRATCH have pointer
24762 rtx scratch_a
= XEXP (dst
, 0);
24763 ASSERT_EQ (SCRATCH
, GET_CODE (scratch_a
));
24765 rtx scratch_b
= XEXP (v0
, 0);
24766 ASSERT_EQ (SCRATCH
, GET_CODE (scratch_b
));
24768 ASSERT_EQ (scratch_a
, scratch_b
);
24770 /* Verify that the two mems are thus treated as equal. */
24771 ASSERT_TRUE (rtx_equal_p (dst
, v0
));
24773 /* Verify that the insn is recognized. */
24774 ASSERT_NE(-1, recog_memoized (insn
));
24776 /* Test of an UNSPEC_VOLATILE, which has its own enum values. */
24777 insn
= NEXT_INSN (insn
);
24778 ASSERT_EQ (INSN
, GET_CODE (insn
));
24780 set
= single_set (insn
);
24781 ASSERT_NE (NULL
, set
);
24783 src
= SET_SRC (set
);
24784 ASSERT_EQ (UNSPEC_VOLATILE
, GET_CODE (src
));
24785 ASSERT_EQ (UNSPECV_RDTSCP
, XINT (src
, 1));
24788 /* Run all target-specific selftests. */
24791 ix86_run_selftests (void)
24793 ix86_test_dumping_hard_regs ();
24794 ix86_test_dumping_memory_blockage ();
24796 /* Various tests of loading RTL dumps, here because they contain
24797 ix86-isms (e.g. names of hard regs). */
24798 ix86_test_loading_dump_fragment_1 ();
24799 ix86_test_loading_call_insn ();
24800 ix86_test_loading_full_dump ();
24801 ix86_test_loading_unspec ();
24804 } // namespace selftest
24806 #endif /* CHECKING_P */
24808 /* Initialize the GCC target structure. */
24809 #undef TARGET_RETURN_IN_MEMORY
24810 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
24812 #undef TARGET_LEGITIMIZE_ADDRESS
24813 #define TARGET_LEGITIMIZE_ADDRESS ix86_legitimize_address
24815 #undef TARGET_ATTRIBUTE_TABLE
24816 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
24817 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
24818 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
24819 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
24820 # undef TARGET_MERGE_DECL_ATTRIBUTES
24821 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
24824 #undef TARGET_COMP_TYPE_ATTRIBUTES
24825 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
24827 #undef TARGET_INIT_BUILTINS
24828 #define TARGET_INIT_BUILTINS ix86_init_builtins
24829 #undef TARGET_BUILTIN_DECL
24830 #define TARGET_BUILTIN_DECL ix86_builtin_decl
24831 #undef TARGET_EXPAND_BUILTIN
24832 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
24834 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
24835 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
24836 ix86_builtin_vectorized_function
24838 #undef TARGET_VECTORIZE_BUILTIN_GATHER
24839 #define TARGET_VECTORIZE_BUILTIN_GATHER ix86_vectorize_builtin_gather
24841 #undef TARGET_VECTORIZE_BUILTIN_SCATTER
24842 #define TARGET_VECTORIZE_BUILTIN_SCATTER ix86_vectorize_builtin_scatter
24844 #undef TARGET_BUILTIN_RECIPROCAL
24845 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
24847 #undef TARGET_ASM_FUNCTION_EPILOGUE
24848 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
24850 #undef TARGET_ASM_PRINT_PATCHABLE_FUNCTION_ENTRY
24851 #define TARGET_ASM_PRINT_PATCHABLE_FUNCTION_ENTRY \
24852 ix86_print_patchable_function_entry
24854 #undef TARGET_ENCODE_SECTION_INFO
24855 #ifndef SUBTARGET_ENCODE_SECTION_INFO
24856 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
24858 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
24861 #undef TARGET_ASM_OPEN_PAREN
24862 #define TARGET_ASM_OPEN_PAREN ""
24863 #undef TARGET_ASM_CLOSE_PAREN
24864 #define TARGET_ASM_CLOSE_PAREN ""
24866 #undef TARGET_ASM_BYTE_OP
24867 #define TARGET_ASM_BYTE_OP ASM_BYTE
24869 #undef TARGET_ASM_ALIGNED_HI_OP
24870 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
24871 #undef TARGET_ASM_ALIGNED_SI_OP
24872 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
24874 #undef TARGET_ASM_ALIGNED_DI_OP
24875 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
24878 #undef TARGET_PROFILE_BEFORE_PROLOGUE
24879 #define TARGET_PROFILE_BEFORE_PROLOGUE ix86_profile_before_prologue
24881 #undef TARGET_MANGLE_DECL_ASSEMBLER_NAME
24882 #define TARGET_MANGLE_DECL_ASSEMBLER_NAME ix86_mangle_decl_assembler_name
24884 #undef TARGET_ASM_UNALIGNED_HI_OP
24885 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
24886 #undef TARGET_ASM_UNALIGNED_SI_OP
24887 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
24888 #undef TARGET_ASM_UNALIGNED_DI_OP
24889 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
24891 #undef TARGET_PRINT_OPERAND
24892 #define TARGET_PRINT_OPERAND ix86_print_operand
24893 #undef TARGET_PRINT_OPERAND_ADDRESS
24894 #define TARGET_PRINT_OPERAND_ADDRESS ix86_print_operand_address
24895 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
24896 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P ix86_print_operand_punct_valid_p
24897 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
24898 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA i386_asm_output_addr_const_extra
24900 #undef TARGET_SCHED_INIT_GLOBAL
24901 #define TARGET_SCHED_INIT_GLOBAL ix86_sched_init_global
24902 #undef TARGET_SCHED_ADJUST_COST
24903 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
24904 #undef TARGET_SCHED_ISSUE_RATE
24905 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
24906 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
24907 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
24908 ia32_multipass_dfa_lookahead
24909 #undef TARGET_SCHED_MACRO_FUSION_P
24910 #define TARGET_SCHED_MACRO_FUSION_P ix86_macro_fusion_p
24911 #undef TARGET_SCHED_MACRO_FUSION_PAIR_P
24912 #define TARGET_SCHED_MACRO_FUSION_PAIR_P ix86_macro_fusion_pair_p
24914 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
24915 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
24917 #undef TARGET_MEMMODEL_CHECK
24918 #define TARGET_MEMMODEL_CHECK ix86_memmodel_check
24920 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
24921 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV ix86_atomic_assign_expand_fenv
24924 #undef TARGET_HAVE_TLS
24925 #define TARGET_HAVE_TLS true
24927 #undef TARGET_CANNOT_FORCE_CONST_MEM
24928 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
24929 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
24930 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
24932 #undef TARGET_DELEGITIMIZE_ADDRESS
24933 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
24935 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
24936 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P ix86_const_not_ok_for_debug_p
24938 #undef TARGET_MS_BITFIELD_LAYOUT_P
24939 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
24942 #undef TARGET_BINDS_LOCAL_P
24943 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
24945 #undef TARGET_BINDS_LOCAL_P
24946 #define TARGET_BINDS_LOCAL_P ix86_binds_local_p
24948 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
24949 #undef TARGET_BINDS_LOCAL_P
24950 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
24953 #undef TARGET_ASM_OUTPUT_MI_THUNK
24954 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
24955 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
24956 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
24958 #undef TARGET_ASM_FILE_START
24959 #define TARGET_ASM_FILE_START x86_file_start
24961 #undef TARGET_OPTION_OVERRIDE
24962 #define TARGET_OPTION_OVERRIDE ix86_option_override
24964 #undef TARGET_REGISTER_MOVE_COST
24965 #define TARGET_REGISTER_MOVE_COST ix86_register_move_cost
24966 #undef TARGET_MEMORY_MOVE_COST
24967 #define TARGET_MEMORY_MOVE_COST ix86_memory_move_cost
24968 #undef TARGET_RTX_COSTS
24969 #define TARGET_RTX_COSTS ix86_rtx_costs
24970 #undef TARGET_ADDRESS_COST
24971 #define TARGET_ADDRESS_COST ix86_address_cost
24973 #undef TARGET_OVERLAP_OP_BY_PIECES_P
24974 #define TARGET_OVERLAP_OP_BY_PIECES_P hook_bool_void_true
24976 #undef TARGET_FLAGS_REGNUM
24977 #define TARGET_FLAGS_REGNUM FLAGS_REG
24978 #undef TARGET_FIXED_CONDITION_CODE_REGS
24979 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
24980 #undef TARGET_CC_MODES_COMPATIBLE
24981 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
24983 #undef TARGET_MACHINE_DEPENDENT_REORG
24984 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
24986 #undef TARGET_BUILD_BUILTIN_VA_LIST
24987 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
24989 #undef TARGET_FOLD_BUILTIN
24990 #define TARGET_FOLD_BUILTIN ix86_fold_builtin
24992 #undef TARGET_GIMPLE_FOLD_BUILTIN
24993 #define TARGET_GIMPLE_FOLD_BUILTIN ix86_gimple_fold_builtin
24995 #undef TARGET_COMPARE_VERSION_PRIORITY
24996 #define TARGET_COMPARE_VERSION_PRIORITY ix86_compare_version_priority
24998 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
24999 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
25000 ix86_generate_version_dispatcher_body
25002 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
25003 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
25004 ix86_get_function_versions_dispatcher
25006 #undef TARGET_ENUM_VA_LIST_P
25007 #define TARGET_ENUM_VA_LIST_P ix86_enum_va_list
25009 #undef TARGET_FN_ABI_VA_LIST
25010 #define TARGET_FN_ABI_VA_LIST ix86_fn_abi_va_list
25012 #undef TARGET_CANONICAL_VA_LIST_TYPE
25013 #define TARGET_CANONICAL_VA_LIST_TYPE ix86_canonical_va_list_type
25015 #undef TARGET_EXPAND_BUILTIN_VA_START
25016 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
25018 #undef TARGET_MD_ASM_ADJUST
25019 #define TARGET_MD_ASM_ADJUST ix86_md_asm_adjust
25021 #undef TARGET_C_EXCESS_PRECISION
25022 #define TARGET_C_EXCESS_PRECISION ix86_get_excess_precision
25023 #undef TARGET_PROMOTE_PROTOTYPES
25024 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
25025 #undef TARGET_PUSH_ARGUMENT
25026 #define TARGET_PUSH_ARGUMENT ix86_push_argument
25027 #undef TARGET_SETUP_INCOMING_VARARGS
25028 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
25029 #undef TARGET_MUST_PASS_IN_STACK
25030 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
25031 #undef TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS
25032 #define TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS ix86_allocate_stack_slots_for_args
25033 #undef TARGET_FUNCTION_ARG_ADVANCE
25034 #define TARGET_FUNCTION_ARG_ADVANCE ix86_function_arg_advance
25035 #undef TARGET_FUNCTION_ARG
25036 #define TARGET_FUNCTION_ARG ix86_function_arg
25037 #undef TARGET_INIT_PIC_REG
25038 #define TARGET_INIT_PIC_REG ix86_init_pic_reg
25039 #undef TARGET_USE_PSEUDO_PIC_REG
25040 #define TARGET_USE_PSEUDO_PIC_REG ix86_use_pseudo_pic_reg
25041 #undef TARGET_FUNCTION_ARG_BOUNDARY
25042 #define TARGET_FUNCTION_ARG_BOUNDARY ix86_function_arg_boundary
25043 #undef TARGET_PASS_BY_REFERENCE
25044 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
25045 #undef TARGET_INTERNAL_ARG_POINTER
25046 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
25047 #undef TARGET_UPDATE_STACK_BOUNDARY
25048 #define TARGET_UPDATE_STACK_BOUNDARY ix86_update_stack_boundary
25049 #undef TARGET_GET_DRAP_RTX
25050 #define TARGET_GET_DRAP_RTX ix86_get_drap_rtx
25051 #undef TARGET_STRICT_ARGUMENT_NAMING
25052 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
25053 #undef TARGET_STATIC_CHAIN
25054 #define TARGET_STATIC_CHAIN ix86_static_chain
25055 #undef TARGET_TRAMPOLINE_INIT
25056 #define TARGET_TRAMPOLINE_INIT ix86_trampoline_init
25057 #undef TARGET_RETURN_POPS_ARGS
25058 #define TARGET_RETURN_POPS_ARGS ix86_return_pops_args
25060 #undef TARGET_WARN_FUNC_RETURN
25061 #define TARGET_WARN_FUNC_RETURN ix86_warn_func_return
25063 #undef TARGET_LEGITIMATE_COMBINED_INSN
25064 #define TARGET_LEGITIMATE_COMBINED_INSN ix86_legitimate_combined_insn
25066 #undef TARGET_ASAN_SHADOW_OFFSET
25067 #define TARGET_ASAN_SHADOW_OFFSET ix86_asan_shadow_offset
25069 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
25070 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
25072 #undef TARGET_SCALAR_MODE_SUPPORTED_P
25073 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
25075 #undef TARGET_LIBGCC_FLOATING_MODE_SUPPORTED_P
25076 #define TARGET_LIBGCC_FLOATING_MODE_SUPPORTED_P \
25077 ix86_libgcc_floating_mode_supported_p
25079 #undef TARGET_VECTOR_MODE_SUPPORTED_P
25080 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
25082 #undef TARGET_C_MODE_FOR_SUFFIX
25083 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
25086 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
25087 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
25090 #ifdef SUBTARGET_INSERT_ATTRIBUTES
25091 #undef TARGET_INSERT_ATTRIBUTES
25092 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
25095 #undef TARGET_MANGLE_TYPE
25096 #define TARGET_MANGLE_TYPE ix86_mangle_type
25098 #undef TARGET_EMIT_SUPPORT_TINFOS
25099 #define TARGET_EMIT_SUPPORT_TINFOS ix86_emit_support_tinfos
25101 #undef TARGET_STACK_PROTECT_GUARD
25102 #define TARGET_STACK_PROTECT_GUARD ix86_stack_protect_guard
25105 #undef TARGET_STACK_PROTECT_FAIL
25106 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
25109 #undef TARGET_FUNCTION_VALUE
25110 #define TARGET_FUNCTION_VALUE ix86_function_value
25112 #undef TARGET_FUNCTION_VALUE_REGNO_P
25113 #define TARGET_FUNCTION_VALUE_REGNO_P ix86_function_value_regno_p
25115 #undef TARGET_ZERO_CALL_USED_REGS
25116 #define TARGET_ZERO_CALL_USED_REGS ix86_zero_call_used_regs
25118 #undef TARGET_PROMOTE_FUNCTION_MODE
25119 #define TARGET_PROMOTE_FUNCTION_MODE ix86_promote_function_mode
25121 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
25122 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE ix86_override_options_after_change
25124 #undef TARGET_MEMBER_TYPE_FORCES_BLK
25125 #define TARGET_MEMBER_TYPE_FORCES_BLK ix86_member_type_forces_blk
25127 #undef TARGET_INSTANTIATE_DECLS
25128 #define TARGET_INSTANTIATE_DECLS ix86_instantiate_decls
25130 #undef TARGET_SECONDARY_RELOAD
25131 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
25132 #undef TARGET_SECONDARY_MEMORY_NEEDED
25133 #define TARGET_SECONDARY_MEMORY_NEEDED ix86_secondary_memory_needed
25134 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
25135 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE ix86_secondary_memory_needed_mode
25137 #undef TARGET_CLASS_MAX_NREGS
25138 #define TARGET_CLASS_MAX_NREGS ix86_class_max_nregs
25140 #undef TARGET_PREFERRED_RELOAD_CLASS
25141 #define TARGET_PREFERRED_RELOAD_CLASS ix86_preferred_reload_class
25142 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
25143 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS ix86_preferred_output_reload_class
25144 #undef TARGET_CLASS_LIKELY_SPILLED_P
25145 #define TARGET_CLASS_LIKELY_SPILLED_P ix86_class_likely_spilled_p
25147 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
25148 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
25149 ix86_builtin_vectorization_cost
25150 #undef TARGET_VECTORIZE_VEC_PERM_CONST
25151 #define TARGET_VECTORIZE_VEC_PERM_CONST ix86_vectorize_vec_perm_const
25152 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
25153 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
25154 ix86_preferred_simd_mode
25155 #undef TARGET_VECTORIZE_SPLIT_REDUCTION
25156 #define TARGET_VECTORIZE_SPLIT_REDUCTION \
25157 ix86_split_reduction
25158 #undef TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_MODES
25159 #define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_MODES \
25160 ix86_autovectorize_vector_modes
25161 #undef TARGET_VECTORIZE_GET_MASK_MODE
25162 #define TARGET_VECTORIZE_GET_MASK_MODE ix86_get_mask_mode
25163 #undef TARGET_VECTORIZE_CREATE_COSTS
25164 #define TARGET_VECTORIZE_CREATE_COSTS ix86_vectorize_create_costs
25166 #undef TARGET_SET_CURRENT_FUNCTION
25167 #define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
25169 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
25170 #define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_target_attribute_p
25172 #undef TARGET_OPTION_SAVE
25173 #define TARGET_OPTION_SAVE ix86_function_specific_save
25175 #undef TARGET_OPTION_RESTORE
25176 #define TARGET_OPTION_RESTORE ix86_function_specific_restore
25178 #undef TARGET_OPTION_POST_STREAM_IN
25179 #define TARGET_OPTION_POST_STREAM_IN ix86_function_specific_post_stream_in
25181 #undef TARGET_OPTION_PRINT
25182 #define TARGET_OPTION_PRINT ix86_function_specific_print
25184 #undef TARGET_OPTION_FUNCTION_VERSIONS
25185 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
25187 #undef TARGET_CAN_INLINE_P
25188 #define TARGET_CAN_INLINE_P ix86_can_inline_p
25190 #undef TARGET_LEGITIMATE_ADDRESS_P
25191 #define TARGET_LEGITIMATE_ADDRESS_P ix86_legitimate_address_p
25193 #undef TARGET_REGISTER_PRIORITY
25194 #define TARGET_REGISTER_PRIORITY ix86_register_priority
25196 #undef TARGET_REGISTER_USAGE_LEVELING_P
25197 #define TARGET_REGISTER_USAGE_LEVELING_P hook_bool_void_true
25199 #undef TARGET_LEGITIMATE_CONSTANT_P
25200 #define TARGET_LEGITIMATE_CONSTANT_P ix86_legitimate_constant_p
25202 #undef TARGET_COMPUTE_FRAME_LAYOUT
25203 #define TARGET_COMPUTE_FRAME_LAYOUT ix86_compute_frame_layout
25205 #undef TARGET_FRAME_POINTER_REQUIRED
25206 #define TARGET_FRAME_POINTER_REQUIRED ix86_frame_pointer_required
25208 #undef TARGET_CAN_ELIMINATE
25209 #define TARGET_CAN_ELIMINATE ix86_can_eliminate
25211 #undef TARGET_EXTRA_LIVE_ON_ENTRY
25212 #define TARGET_EXTRA_LIVE_ON_ENTRY ix86_live_on_entry
25214 #undef TARGET_ASM_CODE_END
25215 #define TARGET_ASM_CODE_END ix86_code_end
25217 #undef TARGET_CONDITIONAL_REGISTER_USAGE
25218 #define TARGET_CONDITIONAL_REGISTER_USAGE ix86_conditional_register_usage
25220 #undef TARGET_CANONICALIZE_COMPARISON
25221 #define TARGET_CANONICALIZE_COMPARISON ix86_canonicalize_comparison
25223 #undef TARGET_LOOP_UNROLL_ADJUST
25224 #define TARGET_LOOP_UNROLL_ADJUST ix86_loop_unroll_adjust
25226 /* Disabled due to PRs 70902, 71453, 71555, 71596 and 71657. */
25227 #undef TARGET_SPILL_CLASS
25228 #define TARGET_SPILL_CLASS ix86_spill_class
25230 #undef TARGET_SIMD_CLONE_COMPUTE_VECSIZE_AND_SIMDLEN
25231 #define TARGET_SIMD_CLONE_COMPUTE_VECSIZE_AND_SIMDLEN \
25232 ix86_simd_clone_compute_vecsize_and_simdlen
25234 #undef TARGET_SIMD_CLONE_ADJUST
25235 #define TARGET_SIMD_CLONE_ADJUST ix86_simd_clone_adjust
25237 #undef TARGET_SIMD_CLONE_USABLE
25238 #define TARGET_SIMD_CLONE_USABLE ix86_simd_clone_usable
25240 #undef TARGET_OMP_DEVICE_KIND_ARCH_ISA
25241 #define TARGET_OMP_DEVICE_KIND_ARCH_ISA ix86_omp_device_kind_arch_isa
25243 #undef TARGET_FLOAT_EXCEPTIONS_ROUNDING_SUPPORTED_P
25244 #define TARGET_FLOAT_EXCEPTIONS_ROUNDING_SUPPORTED_P \
25245 ix86_float_exceptions_rounding_supported_p
25247 #undef TARGET_MODE_EMIT
25248 #define TARGET_MODE_EMIT ix86_emit_mode_set
25250 #undef TARGET_MODE_NEEDED
25251 #define TARGET_MODE_NEEDED ix86_mode_needed
25253 #undef TARGET_MODE_AFTER
25254 #define TARGET_MODE_AFTER ix86_mode_after
25256 #undef TARGET_MODE_ENTRY
25257 #define TARGET_MODE_ENTRY ix86_mode_entry
25259 #undef TARGET_MODE_EXIT
25260 #define TARGET_MODE_EXIT ix86_mode_exit
25262 #undef TARGET_MODE_PRIORITY
25263 #define TARGET_MODE_PRIORITY ix86_mode_priority
25265 #undef TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS
25266 #define TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS true
25268 #undef TARGET_OFFLOAD_OPTIONS
25269 #define TARGET_OFFLOAD_OPTIONS \
25270 ix86_offload_options
25272 #undef TARGET_ABSOLUTE_BIGGEST_ALIGNMENT
25273 #define TARGET_ABSOLUTE_BIGGEST_ALIGNMENT 512
25275 #undef TARGET_OPTAB_SUPPORTED_P
25276 #define TARGET_OPTAB_SUPPORTED_P ix86_optab_supported_p
25278 #undef TARGET_HARD_REGNO_SCRATCH_OK
25279 #define TARGET_HARD_REGNO_SCRATCH_OK ix86_hard_regno_scratch_ok
25281 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
25282 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
25284 #undef TARGET_ADDR_SPACE_ZERO_ADDRESS_VALID
25285 #define TARGET_ADDR_SPACE_ZERO_ADDRESS_VALID ix86_addr_space_zero_address_valid
25287 #undef TARGET_INIT_LIBFUNCS
25288 #define TARGET_INIT_LIBFUNCS ix86_init_libfuncs
25290 #undef TARGET_EXPAND_DIVMOD_LIBFUNC
25291 #define TARGET_EXPAND_DIVMOD_LIBFUNC ix86_expand_divmod_libfunc
25293 #undef TARGET_MAX_NOCE_IFCVT_SEQ_COST
25294 #define TARGET_MAX_NOCE_IFCVT_SEQ_COST ix86_max_noce_ifcvt_seq_cost
25296 #undef TARGET_NOCE_CONVERSION_PROFITABLE_P
25297 #define TARGET_NOCE_CONVERSION_PROFITABLE_P ix86_noce_conversion_profitable_p
25299 #undef TARGET_HARD_REGNO_NREGS
25300 #define TARGET_HARD_REGNO_NREGS ix86_hard_regno_nregs
25301 #undef TARGET_HARD_REGNO_MODE_OK
25302 #define TARGET_HARD_REGNO_MODE_OK ix86_hard_regno_mode_ok
25304 #undef TARGET_MODES_TIEABLE_P
25305 #define TARGET_MODES_TIEABLE_P ix86_modes_tieable_p
25307 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
25308 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
25309 ix86_hard_regno_call_part_clobbered
25311 #undef TARGET_INSN_CALLEE_ABI
25312 #define TARGET_INSN_CALLEE_ABI ix86_insn_callee_abi
25314 #undef TARGET_CAN_CHANGE_MODE_CLASS
25315 #define TARGET_CAN_CHANGE_MODE_CLASS ix86_can_change_mode_class
25317 #undef TARGET_LOWER_LOCAL_DECL_ALIGNMENT
25318 #define TARGET_LOWER_LOCAL_DECL_ALIGNMENT ix86_lower_local_decl_alignment
25320 #undef TARGET_STATIC_RTX_ALIGNMENT
25321 #define TARGET_STATIC_RTX_ALIGNMENT ix86_static_rtx_alignment
25322 #undef TARGET_CONSTANT_ALIGNMENT
25323 #define TARGET_CONSTANT_ALIGNMENT ix86_constant_alignment
25325 #undef TARGET_EMPTY_RECORD_P
25326 #define TARGET_EMPTY_RECORD_P ix86_is_empty_record
25328 #undef TARGET_WARN_PARAMETER_PASSING_ABI
25329 #define TARGET_WARN_PARAMETER_PASSING_ABI ix86_warn_parameter_passing_abi
25331 #undef TARGET_GET_MULTILIB_ABI_NAME
25332 #define TARGET_GET_MULTILIB_ABI_NAME \
25333 ix86_get_multilib_abi_name
25335 #undef TARGET_IFUNC_REF_LOCAL_OK
25336 #define TARGET_IFUNC_REF_LOCAL_OK ix86_ifunc_ref_local_ok
25338 #if !TARGET_MACHO && !TARGET_DLLIMPORT_DECL_ATTRIBUTES
25339 # undef TARGET_ASM_RELOC_RW_MASK
25340 # define TARGET_ASM_RELOC_RW_MASK ix86_reloc_rw_mask
25343 #undef TARGET_MEMTAG_CAN_TAG_ADDRESSES
25344 #define TARGET_MEMTAG_CAN_TAG_ADDRESSES ix86_memtag_can_tag_addresses
25346 #undef TARGET_MEMTAG_ADD_TAG
25347 #define TARGET_MEMTAG_ADD_TAG ix86_memtag_add_tag
25349 #undef TARGET_MEMTAG_SET_TAG
25350 #define TARGET_MEMTAG_SET_TAG ix86_memtag_set_tag
25352 #undef TARGET_MEMTAG_EXTRACT_TAG
25353 #define TARGET_MEMTAG_EXTRACT_TAG ix86_memtag_extract_tag
25355 #undef TARGET_MEMTAG_UNTAGGED_POINTER
25356 #define TARGET_MEMTAG_UNTAGGED_POINTER ix86_memtag_untagged_pointer
25358 #undef TARGET_MEMTAG_TAG_SIZE
25359 #define TARGET_MEMTAG_TAG_SIZE ix86_memtag_tag_size
25362 ix86_libc_has_fast_function (int fcode ATTRIBUTE_UNUSED
)
25364 #ifdef OPTION_GLIBC
25366 return (built_in_function
)fcode
== BUILT_IN_MEMPCPY
;
25374 #undef TARGET_LIBC_HAS_FAST_FUNCTION
25375 #define TARGET_LIBC_HAS_FAST_FUNCTION ix86_libc_has_fast_function
25378 ix86_libm_function_max_error (unsigned cfn
, machine_mode mode
,
25381 #ifdef OPTION_GLIBC
25382 bool glibc_p
= OPTION_GLIBC
;
25384 bool glibc_p
= false;
25388 /* If __FAST_MATH__ is defined, glibc provides libmvec. */
25389 unsigned int libmvec_ret
= 0;
25390 if (!flag_trapping_math
25391 && flag_unsafe_math_optimizations
25392 && flag_finite_math_only
25393 && !flag_signed_zeros
25394 && !flag_errno_math
)
25403 /* With non-default rounding modes, libmvec provides
25404 complete garbage in results. E.g.
25405 _ZGVcN8v_sinf for 1.40129846e-45f in FE_UPWARD
25406 returns 0.00333309174f rather than 1.40129846e-45f. */
25407 if (flag_rounding_math
)
25409 /* https://www.gnu.org/software/libc/manual/html_node/Errors-in-Math-Functions.html
25410 claims libmvec maximum error is 4ulps.
25411 My own random testing indicates 2ulps for SFmode and
25412 0.5ulps for DFmode, but let's go with the 4ulps. */
25419 unsigned int ret
= glibc_linux_libm_function_max_error (cfn
, mode
,
25421 return MAX (ret
, libmvec_ret
);
25423 return default_libm_function_max_error (cfn
, mode
, boundary_p
);
25426 #undef TARGET_LIBM_FUNCTION_MAX_ERROR
25427 #define TARGET_LIBM_FUNCTION_MAX_ERROR ix86_libm_function_max_error
25430 #undef TARGET_RUN_TARGET_SELFTESTS
25431 #define TARGET_RUN_TARGET_SELFTESTS selftest::ix86_run_selftests
25432 #endif /* #if CHECKING_P */
25434 struct gcc_target targetm
= TARGET_INITIALIZER
;
25436 #include "gt-i386.h"