1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2017 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
33 #include "stringpool.h"
40 #include "diagnostic-core.h"
41 #include "insn-attr.h"
44 #include "fold-const.h"
46 #include "stor-layout.h"
48 #include "print-tree.h"
54 #include "common/common-target.h"
55 #include "langhooks.h"
57 #include "sched-int.h"
59 #include "gimple-fold.h"
60 #include "gimple-iterator.h"
61 #include "gimple-ssa.h"
62 #include "gimple-walk.h"
65 #include "tm-constrs.h"
66 #include "tree-vectorizer.h"
67 #include "target-globals.h"
70 #include "tree-pass.h"
73 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
76 #include "gstab.h" /* for N_SLINE */
78 #include "case-cfn-macros.h"
80 #include "tree-ssa-propagate.h"
82 /* This file should be included last. */
83 #include "target-def.h"
85 #ifndef TARGET_NO_PROTOTYPE
86 #define TARGET_NO_PROTOTYPE 0
89 #define min(A,B) ((A) < (B) ? (A) : (B))
90 #define max(A,B) ((A) > (B) ? (A) : (B))
92 /* Structure used to define the rs6000 stack */
93 typedef struct rs6000_stack
{
94 int reload_completed
; /* stack info won't change from here on */
95 int first_gp_reg_save
; /* first callee saved GP register used */
96 int first_fp_reg_save
; /* first callee saved FP register used */
97 int first_altivec_reg_save
; /* first callee saved AltiVec register used */
98 int lr_save_p
; /* true if the link reg needs to be saved */
99 int cr_save_p
; /* true if the CR reg needs to be saved */
100 unsigned int vrsave_mask
; /* mask of vec registers to save */
101 int push_p
; /* true if we need to allocate stack space */
102 int calls_p
; /* true if the function makes any calls */
103 int world_save_p
; /* true if we're saving *everything*:
104 r13-r31, cr, f14-f31, vrsave, v20-v31 */
105 enum rs6000_abi abi
; /* which ABI to use */
106 int gp_save_offset
; /* offset to save GP regs from initial SP */
107 int fp_save_offset
; /* offset to save FP regs from initial SP */
108 int altivec_save_offset
; /* offset to save AltiVec regs from initial SP */
109 int lr_save_offset
; /* offset to save LR from initial SP */
110 int cr_save_offset
; /* offset to save CR from initial SP */
111 int vrsave_save_offset
; /* offset to save VRSAVE from initial SP */
112 int varargs_save_offset
; /* offset to save the varargs registers */
113 int ehrd_offset
; /* offset to EH return data */
114 int ehcr_offset
; /* offset to EH CR field data */
115 int reg_size
; /* register size (4 or 8) */
116 HOST_WIDE_INT vars_size
; /* variable save area size */
117 int parm_size
; /* outgoing parameter size */
118 int save_size
; /* save area size */
119 int fixed_size
; /* fixed size of stack frame */
120 int gp_size
; /* size of saved GP registers */
121 int fp_size
; /* size of saved FP registers */
122 int altivec_size
; /* size of saved AltiVec registers */
123 int cr_size
; /* size to hold CR if not in fixed area */
124 int vrsave_size
; /* size to hold VRSAVE */
125 int altivec_padding_size
; /* size of altivec alignment padding */
126 HOST_WIDE_INT total_size
; /* total bytes allocated for stack */
130 /* A C structure for machine-specific, per-function data.
131 This is added to the cfun structure. */
132 typedef struct GTY(()) machine_function
134 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
135 int ra_needs_full_frame
;
136 /* Flags if __builtin_return_address (0) was used. */
138 /* Cache lr_save_p after expansion of builtin_eh_return. */
140 /* Whether we need to save the TOC to the reserved stack location in the
141 function prologue. */
142 bool save_toc_in_prologue
;
143 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
144 varargs save area. */
145 HOST_WIDE_INT varargs_save_offset
;
146 /* Alternative internal arg pointer for -fsplit-stack. */
147 rtx split_stack_arg_pointer
;
148 bool split_stack_argp_used
;
149 /* Flag if r2 setup is needed with ELFv2 ABI. */
150 bool r2_setup_needed
;
151 /* The number of components we use for separate shrink-wrapping. */
153 /* The components already handled by separate shrink-wrapping, which should
154 not be considered by the prologue and epilogue. */
155 bool gpr_is_wrapped_separately
[32];
156 bool fpr_is_wrapped_separately
[32];
157 bool lr_is_wrapped_separately
;
160 /* Support targetm.vectorize.builtin_mask_for_load. */
161 static GTY(()) tree altivec_builtin_mask_for_load
;
163 /* Set to nonzero once AIX common-mode calls have been defined. */
164 static GTY(()) int common_mode_defined
;
166 /* Label number of label created for -mrelocatable, to call to so we can
167 get the address of the GOT section */
168 static int rs6000_pic_labelno
;
171 /* Counter for labels which are to be placed in .fixup. */
172 int fixuplabelno
= 0;
175 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
178 /* Specify the machine mode that pointers have. After generation of rtl, the
179 compiler makes no further distinction between pointers and any other objects
180 of this machine mode. */
181 scalar_int_mode rs6000_pmode
;
183 /* Width in bits of a pointer. */
184 unsigned rs6000_pointer_size
;
186 #ifdef HAVE_AS_GNU_ATTRIBUTE
187 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
188 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
190 /* Flag whether floating point values have been passed/returned.
191 Note that this doesn't say whether fprs are used, since the
192 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
193 should be set for soft-float values passed in gprs and ieee128
194 values passed in vsx registers. */
195 static bool rs6000_passes_float
;
196 static bool rs6000_passes_long_double
;
197 /* Flag whether vector values have been passed/returned. */
198 static bool rs6000_passes_vector
;
199 /* Flag whether small (<= 8 byte) structures have been returned. */
200 static bool rs6000_returns_struct
;
203 /* Value is TRUE if register/mode pair is acceptable. */
204 bool rs6000_hard_regno_mode_ok_p
[NUM_MACHINE_MODES
][FIRST_PSEUDO_REGISTER
];
206 /* Maximum number of registers needed for a given register class and mode. */
207 unsigned char rs6000_class_max_nregs
[NUM_MACHINE_MODES
][LIM_REG_CLASSES
];
209 /* How many registers are needed for a given register and mode. */
210 unsigned char rs6000_hard_regno_nregs
[NUM_MACHINE_MODES
][FIRST_PSEUDO_REGISTER
];
212 /* Map register number to register class. */
213 enum reg_class rs6000_regno_regclass
[FIRST_PSEUDO_REGISTER
];
215 static int dbg_cost_ctrl
;
217 /* Built in types. */
218 tree rs6000_builtin_types
[RS6000_BTI_MAX
];
219 tree rs6000_builtin_decls
[RS6000_BUILTIN_COUNT
];
221 /* Flag to say the TOC is initialized */
222 int toc_initialized
, need_toc_init
;
223 char toc_label_name
[10];
225 /* Cached value of rs6000_variable_issue. This is cached in
226 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
227 static short cached_can_issue_more
;
229 static GTY(()) section
*read_only_data_section
;
230 static GTY(()) section
*private_data_section
;
231 static GTY(()) section
*tls_data_section
;
232 static GTY(()) section
*tls_private_data_section
;
233 static GTY(()) section
*read_only_private_data_section
;
234 static GTY(()) section
*sdata2_section
;
235 static GTY(()) section
*toc_section
;
237 struct builtin_description
239 const HOST_WIDE_INT mask
;
240 const enum insn_code icode
;
241 const char *const name
;
242 const enum rs6000_builtins code
;
245 /* Describe the vector unit used for modes. */
246 enum rs6000_vector rs6000_vector_unit
[NUM_MACHINE_MODES
];
247 enum rs6000_vector rs6000_vector_mem
[NUM_MACHINE_MODES
];
249 /* Register classes for various constraints that are based on the target
251 enum reg_class rs6000_constraints
[RS6000_CONSTRAINT_MAX
];
253 /* Describe the alignment of a vector. */
254 int rs6000_vector_align
[NUM_MACHINE_MODES
];
256 /* Map selected modes to types for builtins. */
257 static GTY(()) tree builtin_mode_to_type
[MAX_MACHINE_MODE
][2];
259 /* What modes to automatically generate reciprocal divide estimate (fre) and
260 reciprocal sqrt (frsqrte) for. */
261 unsigned char rs6000_recip_bits
[MAX_MACHINE_MODE
];
263 /* Masks to determine which reciprocal esitmate instructions to generate
265 enum rs6000_recip_mask
{
266 RECIP_SF_DIV
= 0x001, /* Use divide estimate */
267 RECIP_DF_DIV
= 0x002,
268 RECIP_V4SF_DIV
= 0x004,
269 RECIP_V2DF_DIV
= 0x008,
271 RECIP_SF_RSQRT
= 0x010, /* Use reciprocal sqrt estimate. */
272 RECIP_DF_RSQRT
= 0x020,
273 RECIP_V4SF_RSQRT
= 0x040,
274 RECIP_V2DF_RSQRT
= 0x080,
276 /* Various combination of flags for -mrecip=xxx. */
278 RECIP_ALL
= (RECIP_SF_DIV
| RECIP_DF_DIV
| RECIP_V4SF_DIV
279 | RECIP_V2DF_DIV
| RECIP_SF_RSQRT
| RECIP_DF_RSQRT
280 | RECIP_V4SF_RSQRT
| RECIP_V2DF_RSQRT
),
282 RECIP_HIGH_PRECISION
= RECIP_ALL
,
284 /* On low precision machines like the power5, don't enable double precision
285 reciprocal square root estimate, since it isn't accurate enough. */
286 RECIP_LOW_PRECISION
= (RECIP_ALL
& ~(RECIP_DF_RSQRT
| RECIP_V2DF_RSQRT
))
289 /* -mrecip options. */
292 const char *string
; /* option name */
293 unsigned int mask
; /* mask bits to set */
294 } recip_options
[] = {
295 { "all", RECIP_ALL
},
296 { "none", RECIP_NONE
},
297 { "div", (RECIP_SF_DIV
| RECIP_DF_DIV
| RECIP_V4SF_DIV
299 { "divf", (RECIP_SF_DIV
| RECIP_V4SF_DIV
) },
300 { "divd", (RECIP_DF_DIV
| RECIP_V2DF_DIV
) },
301 { "rsqrt", (RECIP_SF_RSQRT
| RECIP_DF_RSQRT
| RECIP_V4SF_RSQRT
302 | RECIP_V2DF_RSQRT
) },
303 { "rsqrtf", (RECIP_SF_RSQRT
| RECIP_V4SF_RSQRT
) },
304 { "rsqrtd", (RECIP_DF_RSQRT
| RECIP_V2DF_RSQRT
) },
307 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
313 { "power9", PPC_PLATFORM_POWER9
},
314 { "power8", PPC_PLATFORM_POWER8
},
315 { "power7", PPC_PLATFORM_POWER7
},
316 { "power6x", PPC_PLATFORM_POWER6X
},
317 { "power6", PPC_PLATFORM_POWER6
},
318 { "power5+", PPC_PLATFORM_POWER5_PLUS
},
319 { "power5", PPC_PLATFORM_POWER5
},
320 { "ppc970", PPC_PLATFORM_PPC970
},
321 { "power4", PPC_PLATFORM_POWER4
},
322 { "ppca2", PPC_PLATFORM_PPCA2
},
323 { "ppc476", PPC_PLATFORM_PPC476
},
324 { "ppc464", PPC_PLATFORM_PPC464
},
325 { "ppc440", PPC_PLATFORM_PPC440
},
326 { "ppc405", PPC_PLATFORM_PPC405
},
327 { "ppc-cell-be", PPC_PLATFORM_CELL_BE
}
330 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
336 } cpu_supports_info
[] = {
337 /* AT_HWCAP masks. */
338 { "4xxmac", PPC_FEATURE_HAS_4xxMAC
, 0 },
339 { "altivec", PPC_FEATURE_HAS_ALTIVEC
, 0 },
340 { "arch_2_05", PPC_FEATURE_ARCH_2_05
, 0 },
341 { "arch_2_06", PPC_FEATURE_ARCH_2_06
, 0 },
342 { "archpmu", PPC_FEATURE_PERFMON_COMPAT
, 0 },
343 { "booke", PPC_FEATURE_BOOKE
, 0 },
344 { "cellbe", PPC_FEATURE_CELL_BE
, 0 },
345 { "dfp", PPC_FEATURE_HAS_DFP
, 0 },
346 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE
, 0 },
347 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE
, 0 },
348 { "fpu", PPC_FEATURE_HAS_FPU
, 0 },
349 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP
, 0 },
350 { "mmu", PPC_FEATURE_HAS_MMU
, 0 },
351 { "notb", PPC_FEATURE_NO_TB
, 0 },
352 { "pa6t", PPC_FEATURE_PA6T
, 0 },
353 { "power4", PPC_FEATURE_POWER4
, 0 },
354 { "power5", PPC_FEATURE_POWER5
, 0 },
355 { "power5+", PPC_FEATURE_POWER5_PLUS
, 0 },
356 { "power6x", PPC_FEATURE_POWER6_EXT
, 0 },
357 { "ppc32", PPC_FEATURE_32
, 0 },
358 { "ppc601", PPC_FEATURE_601_INSTR
, 0 },
359 { "ppc64", PPC_FEATURE_64
, 0 },
360 { "ppcle", PPC_FEATURE_PPC_LE
, 0 },
361 { "smt", PPC_FEATURE_SMT
, 0 },
362 { "spe", PPC_FEATURE_HAS_SPE
, 0 },
363 { "true_le", PPC_FEATURE_TRUE_LE
, 0 },
364 { "ucache", PPC_FEATURE_UNIFIED_CACHE
, 0 },
365 { "vsx", PPC_FEATURE_HAS_VSX
, 0 },
367 /* AT_HWCAP2 masks. */
368 { "arch_2_07", PPC_FEATURE2_ARCH_2_07
, 1 },
369 { "dscr", PPC_FEATURE2_HAS_DSCR
, 1 },
370 { "ebb", PPC_FEATURE2_HAS_EBB
, 1 },
371 { "htm", PPC_FEATURE2_HAS_HTM
, 1 },
372 { "htm-nosc", PPC_FEATURE2_HTM_NOSC
, 1 },
373 { "isel", PPC_FEATURE2_HAS_ISEL
, 1 },
374 { "tar", PPC_FEATURE2_HAS_TAR
, 1 },
375 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO
, 1 },
376 { "arch_3_00", PPC_FEATURE2_ARCH_3_00
, 1 },
377 { "ieee128", PPC_FEATURE2_HAS_IEEE128
, 1 },
378 { "darn", PPC_FEATURE2_DARN
, 1 },
379 { "scv", PPC_FEATURE2_SCV
, 1 }
382 /* On PowerPC, we have a limited number of target clones that we care about
383 which means we can use an array to hold the options, rather than having more
384 elaborate data structures to identify each possible variation. Order the
385 clones from the default to the highest ISA. */
387 CLONE_DEFAULT
= 0, /* default clone. */
388 CLONE_ISA_2_05
, /* ISA 2.05 (power6). */
389 CLONE_ISA_2_06
, /* ISA 2.06 (power7). */
390 CLONE_ISA_2_07
, /* ISA 2.07 (power8). */
391 CLONE_ISA_3_00
, /* ISA 3.00 (power9). */
395 /* Map compiler ISA bits into HWCAP names. */
397 HOST_WIDE_INT isa_mask
; /* rs6000_isa mask */
398 const char *name
; /* name to use in __builtin_cpu_supports. */
401 static const struct clone_map rs6000_clone_map
[CLONE_MAX
] = {
402 { 0, "" }, /* Default options. */
403 { OPTION_MASK_CMPB
, "arch_2_05" }, /* ISA 2.05 (power6). */
404 { OPTION_MASK_POPCNTD
, "arch_2_06" }, /* ISA 2.06 (power7). */
405 { OPTION_MASK_P8_VECTOR
, "arch_2_07" }, /* ISA 2.07 (power8). */
406 { OPTION_MASK_P9_VECTOR
, "arch_3_00" }, /* ISA 3.00 (power9). */
410 /* Newer LIBCs explicitly export this symbol to declare that they provide
411 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
412 reference to this symbol whenever we expand a CPU builtin, so that
413 we never link against an old LIBC. */
414 const char *tcb_verification_symbol
= "__parse_hwcap_and_convert_at_platform";
416 /* True if we have expanded a CPU builtin. */
419 /* Pointer to function (in rs6000-c.c) that can define or undefine target
420 macros that have changed. Languages that don't support the preprocessor
421 don't link in rs6000-c.c, so we can't call it directly. */
422 void (*rs6000_target_modify_macros_ptr
) (bool, HOST_WIDE_INT
, HOST_WIDE_INT
);
424 /* Simplfy register classes into simpler classifications. We assume
425 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
426 check for standard register classes (gpr/floating/altivec/vsx) and
427 floating/vector classes (float/altivec/vsx). */
429 enum rs6000_reg_type
{
440 /* Map register class to register type. */
441 static enum rs6000_reg_type reg_class_to_reg_type
[N_REG_CLASSES
];
443 /* First/last register type for the 'normal' register types (i.e. general
444 purpose, floating point, altivec, and VSX registers). */
445 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
447 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
450 /* Register classes we care about in secondary reload or go if legitimate
451 address. We only need to worry about GPR, FPR, and Altivec registers here,
452 along an ANY field that is the OR of the 3 register classes. */
454 enum rs6000_reload_reg_type
{
455 RELOAD_REG_GPR
, /* General purpose registers. */
456 RELOAD_REG_FPR
, /* Traditional floating point regs. */
457 RELOAD_REG_VMX
, /* Altivec (VMX) registers. */
458 RELOAD_REG_ANY
, /* OR of GPR, FPR, Altivec masks. */
462 /* For setting up register classes, loop through the 3 register classes mapping
463 into real registers, and skip the ANY class, which is just an OR of the
465 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
466 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
468 /* Map reload register type to a register in the register class. */
469 struct reload_reg_map_type
{
470 const char *name
; /* Register class name. */
471 int reg
; /* Register in the register class. */
474 static const struct reload_reg_map_type reload_reg_map
[N_RELOAD_REG
] = {
475 { "Gpr", FIRST_GPR_REGNO
}, /* RELOAD_REG_GPR. */
476 { "Fpr", FIRST_FPR_REGNO
}, /* RELOAD_REG_FPR. */
477 { "VMX", FIRST_ALTIVEC_REGNO
}, /* RELOAD_REG_VMX. */
478 { "Any", -1 }, /* RELOAD_REG_ANY. */
481 /* Mask bits for each register class, indexed per mode. Historically the
482 compiler has been more restrictive which types can do PRE_MODIFY instead of
483 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
484 typedef unsigned char addr_mask_type
;
486 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
487 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
488 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
489 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
490 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
491 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
492 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
493 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
495 /* Register type masks based on the type, of valid addressing modes. */
496 struct rs6000_reg_addr
{
497 enum insn_code reload_load
; /* INSN to reload for loading. */
498 enum insn_code reload_store
; /* INSN to reload for storing. */
499 enum insn_code reload_fpr_gpr
; /* INSN to move from FPR to GPR. */
500 enum insn_code reload_gpr_vsx
; /* INSN to move from GPR to VSX. */
501 enum insn_code reload_vsx_gpr
; /* INSN to move from VSX to GPR. */
502 enum insn_code fusion_gpr_ld
; /* INSN for fusing gpr ADDIS/loads. */
503 /* INSNs for fusing addi with loads
504 or stores for each reg. class. */
505 enum insn_code fusion_addi_ld
[(int)N_RELOAD_REG
];
506 enum insn_code fusion_addi_st
[(int)N_RELOAD_REG
];
507 /* INSNs for fusing addis with loads
508 or stores for each reg. class. */
509 enum insn_code fusion_addis_ld
[(int)N_RELOAD_REG
];
510 enum insn_code fusion_addis_st
[(int)N_RELOAD_REG
];
511 addr_mask_type addr_mask
[(int)N_RELOAD_REG
]; /* Valid address masks. */
512 bool scalar_in_vmx_p
; /* Scalar value can go in VMX. */
513 bool fused_toc
; /* Mode supports TOC fusion. */
516 static struct rs6000_reg_addr reg_addr
[NUM_MACHINE_MODES
];
518 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
520 mode_supports_pre_incdec_p (machine_mode mode
)
522 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
] & RELOAD_REG_PRE_INCDEC
)
526 /* Helper function to say whether a mode supports PRE_MODIFY. */
528 mode_supports_pre_modify_p (machine_mode mode
)
530 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
] & RELOAD_REG_PRE_MODIFY
)
534 /* Given that there exists at least one variable that is set (produced)
535 by OUT_INSN and read (consumed) by IN_INSN, return true iff
536 IN_INSN represents one or more memory store operations and none of
537 the variables set by OUT_INSN is used by IN_INSN as the address of a
538 store operation. If either IN_INSN or OUT_INSN does not represent
539 a "single" RTL SET expression (as loosely defined by the
540 implementation of the single_set function) or a PARALLEL with only
541 SETs, CLOBBERs, and USEs inside, this function returns false.
543 This rs6000-specific version of store_data_bypass_p checks for
544 certain conditions that result in assertion failures (and internal
545 compiler errors) in the generic store_data_bypass_p function and
546 returns false rather than calling store_data_bypass_p if one of the
547 problematic conditions is detected. */
550 rs6000_store_data_bypass_p (rtx_insn
*out_insn
, rtx_insn
*in_insn
)
557 in_set
= single_set (in_insn
);
560 if (MEM_P (SET_DEST (in_set
)))
562 out_set
= single_set (out_insn
);
565 out_pat
= PATTERN (out_insn
);
566 if (GET_CODE (out_pat
) == PARALLEL
)
568 for (i
= 0; i
< XVECLEN (out_pat
, 0); i
++)
570 out_exp
= XVECEXP (out_pat
, 0, i
);
571 if ((GET_CODE (out_exp
) == CLOBBER
)
572 || (GET_CODE (out_exp
) == USE
))
574 else if (GET_CODE (out_exp
) != SET
)
583 in_pat
= PATTERN (in_insn
);
584 if (GET_CODE (in_pat
) != PARALLEL
)
587 for (i
= 0; i
< XVECLEN (in_pat
, 0); i
++)
589 in_exp
= XVECEXP (in_pat
, 0, i
);
590 if ((GET_CODE (in_exp
) == CLOBBER
) || (GET_CODE (in_exp
) == USE
))
592 else if (GET_CODE (in_exp
) != SET
)
595 if (MEM_P (SET_DEST (in_exp
)))
597 out_set
= single_set (out_insn
);
600 out_pat
= PATTERN (out_insn
);
601 if (GET_CODE (out_pat
) != PARALLEL
)
603 for (j
= 0; j
< XVECLEN (out_pat
, 0); j
++)
605 out_exp
= XVECEXP (out_pat
, 0, j
);
606 if ((GET_CODE (out_exp
) == CLOBBER
)
607 || (GET_CODE (out_exp
) == USE
))
609 else if (GET_CODE (out_exp
) != SET
)
616 return store_data_bypass_p (out_insn
, in_insn
);
619 /* Return true if we have D-form addressing in altivec registers. */
621 mode_supports_vmx_dform (machine_mode mode
)
623 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
] & RELOAD_REG_OFFSET
) != 0);
626 /* Return true if we have D-form addressing in VSX registers. This addressing
627 is more limited than normal d-form addressing in that the offset must be
628 aligned on a 16-byte boundary. */
630 mode_supports_vsx_dform_quad (machine_mode mode
)
632 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
] & RELOAD_REG_QUAD_OFFSET
)
637 /* Target cpu costs. */
639 struct processor_costs
{
640 const int mulsi
; /* cost of SImode multiplication. */
641 const int mulsi_const
; /* cost of SImode multiplication by constant. */
642 const int mulsi_const9
; /* cost of SImode mult by short constant. */
643 const int muldi
; /* cost of DImode multiplication. */
644 const int divsi
; /* cost of SImode division. */
645 const int divdi
; /* cost of DImode division. */
646 const int fp
; /* cost of simple SFmode and DFmode insns. */
647 const int dmul
; /* cost of DFmode multiplication (and fmadd). */
648 const int sdiv
; /* cost of SFmode division (fdivs). */
649 const int ddiv
; /* cost of DFmode division (fdiv). */
650 const int cache_line_size
; /* cache line size in bytes. */
651 const int l1_cache_size
; /* size of l1 cache, in kilobytes. */
652 const int l2_cache_size
; /* size of l2 cache, in kilobytes. */
653 const int simultaneous_prefetches
; /* number of parallel prefetch
655 const int sfdf_convert
; /* cost of SF->DF conversion. */
658 const struct processor_costs
*rs6000_cost
;
660 /* Processor costs (relative to an add) */
662 /* Instruction size costs on 32bit processors. */
664 struct processor_costs size32_cost
= {
665 COSTS_N_INSNS (1), /* mulsi */
666 COSTS_N_INSNS (1), /* mulsi_const */
667 COSTS_N_INSNS (1), /* mulsi_const9 */
668 COSTS_N_INSNS (1), /* muldi */
669 COSTS_N_INSNS (1), /* divsi */
670 COSTS_N_INSNS (1), /* divdi */
671 COSTS_N_INSNS (1), /* fp */
672 COSTS_N_INSNS (1), /* dmul */
673 COSTS_N_INSNS (1), /* sdiv */
674 COSTS_N_INSNS (1), /* ddiv */
675 32, /* cache line size */
679 0, /* SF->DF convert */
682 /* Instruction size costs on 64bit processors. */
684 struct processor_costs size64_cost
= {
685 COSTS_N_INSNS (1), /* mulsi */
686 COSTS_N_INSNS (1), /* mulsi_const */
687 COSTS_N_INSNS (1), /* mulsi_const9 */
688 COSTS_N_INSNS (1), /* muldi */
689 COSTS_N_INSNS (1), /* divsi */
690 COSTS_N_INSNS (1), /* divdi */
691 COSTS_N_INSNS (1), /* fp */
692 COSTS_N_INSNS (1), /* dmul */
693 COSTS_N_INSNS (1), /* sdiv */
694 COSTS_N_INSNS (1), /* ddiv */
695 128, /* cache line size */
699 0, /* SF->DF convert */
702 /* Instruction costs on RS64A processors. */
704 struct processor_costs rs64a_cost
= {
705 COSTS_N_INSNS (20), /* mulsi */
706 COSTS_N_INSNS (12), /* mulsi_const */
707 COSTS_N_INSNS (8), /* mulsi_const9 */
708 COSTS_N_INSNS (34), /* muldi */
709 COSTS_N_INSNS (65), /* divsi */
710 COSTS_N_INSNS (67), /* divdi */
711 COSTS_N_INSNS (4), /* fp */
712 COSTS_N_INSNS (4), /* dmul */
713 COSTS_N_INSNS (31), /* sdiv */
714 COSTS_N_INSNS (31), /* ddiv */
715 128, /* cache line size */
719 0, /* SF->DF convert */
722 /* Instruction costs on MPCCORE processors. */
724 struct processor_costs mpccore_cost
= {
725 COSTS_N_INSNS (2), /* mulsi */
726 COSTS_N_INSNS (2), /* mulsi_const */
727 COSTS_N_INSNS (2), /* mulsi_const9 */
728 COSTS_N_INSNS (2), /* muldi */
729 COSTS_N_INSNS (6), /* divsi */
730 COSTS_N_INSNS (6), /* divdi */
731 COSTS_N_INSNS (4), /* fp */
732 COSTS_N_INSNS (5), /* dmul */
733 COSTS_N_INSNS (10), /* sdiv */
734 COSTS_N_INSNS (17), /* ddiv */
735 32, /* cache line size */
739 0, /* SF->DF convert */
742 /* Instruction costs on PPC403 processors. */
744 struct processor_costs ppc403_cost
= {
745 COSTS_N_INSNS (4), /* mulsi */
746 COSTS_N_INSNS (4), /* mulsi_const */
747 COSTS_N_INSNS (4), /* mulsi_const9 */
748 COSTS_N_INSNS (4), /* muldi */
749 COSTS_N_INSNS (33), /* divsi */
750 COSTS_N_INSNS (33), /* divdi */
751 COSTS_N_INSNS (11), /* fp */
752 COSTS_N_INSNS (11), /* dmul */
753 COSTS_N_INSNS (11), /* sdiv */
754 COSTS_N_INSNS (11), /* ddiv */
755 32, /* cache line size */
759 0, /* SF->DF convert */
762 /* Instruction costs on PPC405 processors. */
764 struct processor_costs ppc405_cost
= {
765 COSTS_N_INSNS (5), /* mulsi */
766 COSTS_N_INSNS (4), /* mulsi_const */
767 COSTS_N_INSNS (3), /* mulsi_const9 */
768 COSTS_N_INSNS (5), /* muldi */
769 COSTS_N_INSNS (35), /* divsi */
770 COSTS_N_INSNS (35), /* divdi */
771 COSTS_N_INSNS (11), /* fp */
772 COSTS_N_INSNS (11), /* dmul */
773 COSTS_N_INSNS (11), /* sdiv */
774 COSTS_N_INSNS (11), /* ddiv */
775 32, /* cache line size */
779 0, /* SF->DF convert */
782 /* Instruction costs on PPC440 processors. */
784 struct processor_costs ppc440_cost
= {
785 COSTS_N_INSNS (3), /* mulsi */
786 COSTS_N_INSNS (2), /* mulsi_const */
787 COSTS_N_INSNS (2), /* mulsi_const9 */
788 COSTS_N_INSNS (3), /* muldi */
789 COSTS_N_INSNS (34), /* divsi */
790 COSTS_N_INSNS (34), /* divdi */
791 COSTS_N_INSNS (5), /* fp */
792 COSTS_N_INSNS (5), /* dmul */
793 COSTS_N_INSNS (19), /* sdiv */
794 COSTS_N_INSNS (33), /* ddiv */
795 32, /* cache line size */
799 0, /* SF->DF convert */
802 /* Instruction costs on PPC476 processors. */
804 struct processor_costs ppc476_cost
= {
805 COSTS_N_INSNS (4), /* mulsi */
806 COSTS_N_INSNS (4), /* mulsi_const */
807 COSTS_N_INSNS (4), /* mulsi_const9 */
808 COSTS_N_INSNS (4), /* muldi */
809 COSTS_N_INSNS (11), /* divsi */
810 COSTS_N_INSNS (11), /* divdi */
811 COSTS_N_INSNS (6), /* fp */
812 COSTS_N_INSNS (6), /* dmul */
813 COSTS_N_INSNS (19), /* sdiv */
814 COSTS_N_INSNS (33), /* ddiv */
815 32, /* l1 cache line size */
819 0, /* SF->DF convert */
822 /* Instruction costs on PPC601 processors. */
824 struct processor_costs ppc601_cost
= {
825 COSTS_N_INSNS (5), /* mulsi */
826 COSTS_N_INSNS (5), /* mulsi_const */
827 COSTS_N_INSNS (5), /* mulsi_const9 */
828 COSTS_N_INSNS (5), /* muldi */
829 COSTS_N_INSNS (36), /* divsi */
830 COSTS_N_INSNS (36), /* divdi */
831 COSTS_N_INSNS (4), /* fp */
832 COSTS_N_INSNS (5), /* dmul */
833 COSTS_N_INSNS (17), /* sdiv */
834 COSTS_N_INSNS (31), /* ddiv */
835 32, /* cache line size */
839 0, /* SF->DF convert */
842 /* Instruction costs on PPC603 processors. */
844 struct processor_costs ppc603_cost
= {
845 COSTS_N_INSNS (5), /* mulsi */
846 COSTS_N_INSNS (3), /* mulsi_const */
847 COSTS_N_INSNS (2), /* mulsi_const9 */
848 COSTS_N_INSNS (5), /* muldi */
849 COSTS_N_INSNS (37), /* divsi */
850 COSTS_N_INSNS (37), /* divdi */
851 COSTS_N_INSNS (3), /* fp */
852 COSTS_N_INSNS (4), /* dmul */
853 COSTS_N_INSNS (18), /* sdiv */
854 COSTS_N_INSNS (33), /* ddiv */
855 32, /* cache line size */
859 0, /* SF->DF convert */
862 /* Instruction costs on PPC604 processors. */
864 struct processor_costs ppc604_cost
= {
865 COSTS_N_INSNS (4), /* mulsi */
866 COSTS_N_INSNS (4), /* mulsi_const */
867 COSTS_N_INSNS (4), /* mulsi_const9 */
868 COSTS_N_INSNS (4), /* muldi */
869 COSTS_N_INSNS (20), /* divsi */
870 COSTS_N_INSNS (20), /* divdi */
871 COSTS_N_INSNS (3), /* fp */
872 COSTS_N_INSNS (3), /* dmul */
873 COSTS_N_INSNS (18), /* sdiv */
874 COSTS_N_INSNS (32), /* ddiv */
875 32, /* cache line size */
879 0, /* SF->DF convert */
882 /* Instruction costs on PPC604e processors. */
884 struct processor_costs ppc604e_cost
= {
885 COSTS_N_INSNS (2), /* mulsi */
886 COSTS_N_INSNS (2), /* mulsi_const */
887 COSTS_N_INSNS (2), /* mulsi_const9 */
888 COSTS_N_INSNS (2), /* muldi */
889 COSTS_N_INSNS (20), /* divsi */
890 COSTS_N_INSNS (20), /* divdi */
891 COSTS_N_INSNS (3), /* fp */
892 COSTS_N_INSNS (3), /* dmul */
893 COSTS_N_INSNS (18), /* sdiv */
894 COSTS_N_INSNS (32), /* ddiv */
895 32, /* cache line size */
899 0, /* SF->DF convert */
902 /* Instruction costs on PPC620 processors. */
904 struct processor_costs ppc620_cost
= {
905 COSTS_N_INSNS (5), /* mulsi */
906 COSTS_N_INSNS (4), /* mulsi_const */
907 COSTS_N_INSNS (3), /* mulsi_const9 */
908 COSTS_N_INSNS (7), /* muldi */
909 COSTS_N_INSNS (21), /* divsi */
910 COSTS_N_INSNS (37), /* divdi */
911 COSTS_N_INSNS (3), /* fp */
912 COSTS_N_INSNS (3), /* dmul */
913 COSTS_N_INSNS (18), /* sdiv */
914 COSTS_N_INSNS (32), /* ddiv */
915 128, /* cache line size */
919 0, /* SF->DF convert */
922 /* Instruction costs on PPC630 processors. */
924 struct processor_costs ppc630_cost
= {
925 COSTS_N_INSNS (5), /* mulsi */
926 COSTS_N_INSNS (4), /* mulsi_const */
927 COSTS_N_INSNS (3), /* mulsi_const9 */
928 COSTS_N_INSNS (7), /* muldi */
929 COSTS_N_INSNS (21), /* divsi */
930 COSTS_N_INSNS (37), /* divdi */
931 COSTS_N_INSNS (3), /* fp */
932 COSTS_N_INSNS (3), /* dmul */
933 COSTS_N_INSNS (17), /* sdiv */
934 COSTS_N_INSNS (21), /* ddiv */
935 128, /* cache line size */
939 0, /* SF->DF convert */
942 /* Instruction costs on Cell processor. */
943 /* COSTS_N_INSNS (1) ~ one add. */
945 struct processor_costs ppccell_cost
= {
946 COSTS_N_INSNS (9/2)+2, /* mulsi */
947 COSTS_N_INSNS (6/2), /* mulsi_const */
948 COSTS_N_INSNS (6/2), /* mulsi_const9 */
949 COSTS_N_INSNS (15/2)+2, /* muldi */
950 COSTS_N_INSNS (38/2), /* divsi */
951 COSTS_N_INSNS (70/2), /* divdi */
952 COSTS_N_INSNS (10/2), /* fp */
953 COSTS_N_INSNS (10/2), /* dmul */
954 COSTS_N_INSNS (74/2), /* sdiv */
955 COSTS_N_INSNS (74/2), /* ddiv */
956 128, /* cache line size */
960 0, /* SF->DF convert */
963 /* Instruction costs on PPC750 and PPC7400 processors. */
965 struct processor_costs ppc750_cost
= {
966 COSTS_N_INSNS (5), /* mulsi */
967 COSTS_N_INSNS (3), /* mulsi_const */
968 COSTS_N_INSNS (2), /* mulsi_const9 */
969 COSTS_N_INSNS (5), /* muldi */
970 COSTS_N_INSNS (17), /* divsi */
971 COSTS_N_INSNS (17), /* divdi */
972 COSTS_N_INSNS (3), /* fp */
973 COSTS_N_INSNS (3), /* dmul */
974 COSTS_N_INSNS (17), /* sdiv */
975 COSTS_N_INSNS (31), /* ddiv */
976 32, /* cache line size */
980 0, /* SF->DF convert */
983 /* Instruction costs on PPC7450 processors. */
985 struct processor_costs ppc7450_cost
= {
986 COSTS_N_INSNS (4), /* mulsi */
987 COSTS_N_INSNS (3), /* mulsi_const */
988 COSTS_N_INSNS (3), /* mulsi_const9 */
989 COSTS_N_INSNS (4), /* muldi */
990 COSTS_N_INSNS (23), /* divsi */
991 COSTS_N_INSNS (23), /* divdi */
992 COSTS_N_INSNS (5), /* fp */
993 COSTS_N_INSNS (5), /* dmul */
994 COSTS_N_INSNS (21), /* sdiv */
995 COSTS_N_INSNS (35), /* ddiv */
996 32, /* cache line size */
1000 0, /* SF->DF convert */
1003 /* Instruction costs on PPC8540 processors. */
1005 struct processor_costs ppc8540_cost
= {
1006 COSTS_N_INSNS (4), /* mulsi */
1007 COSTS_N_INSNS (4), /* mulsi_const */
1008 COSTS_N_INSNS (4), /* mulsi_const9 */
1009 COSTS_N_INSNS (4), /* muldi */
1010 COSTS_N_INSNS (19), /* divsi */
1011 COSTS_N_INSNS (19), /* divdi */
1012 COSTS_N_INSNS (4), /* fp */
1013 COSTS_N_INSNS (4), /* dmul */
1014 COSTS_N_INSNS (29), /* sdiv */
1015 COSTS_N_INSNS (29), /* ddiv */
1016 32, /* cache line size */
1019 1, /* prefetch streams /*/
1020 0, /* SF->DF convert */
1023 /* Instruction costs on E300C2 and E300C3 cores. */
1025 struct processor_costs ppce300c2c3_cost
= {
1026 COSTS_N_INSNS (4), /* mulsi */
1027 COSTS_N_INSNS (4), /* mulsi_const */
1028 COSTS_N_INSNS (4), /* mulsi_const9 */
1029 COSTS_N_INSNS (4), /* muldi */
1030 COSTS_N_INSNS (19), /* divsi */
1031 COSTS_N_INSNS (19), /* divdi */
1032 COSTS_N_INSNS (3), /* fp */
1033 COSTS_N_INSNS (4), /* dmul */
1034 COSTS_N_INSNS (18), /* sdiv */
1035 COSTS_N_INSNS (33), /* ddiv */
1039 1, /* prefetch streams /*/
1040 0, /* SF->DF convert */
1043 /* Instruction costs on PPCE500MC processors. */
1045 struct processor_costs ppce500mc_cost
= {
1046 COSTS_N_INSNS (4), /* mulsi */
1047 COSTS_N_INSNS (4), /* mulsi_const */
1048 COSTS_N_INSNS (4), /* mulsi_const9 */
1049 COSTS_N_INSNS (4), /* muldi */
1050 COSTS_N_INSNS (14), /* divsi */
1051 COSTS_N_INSNS (14), /* divdi */
1052 COSTS_N_INSNS (8), /* fp */
1053 COSTS_N_INSNS (10), /* dmul */
1054 COSTS_N_INSNS (36), /* sdiv */
1055 COSTS_N_INSNS (66), /* ddiv */
1056 64, /* cache line size */
1059 1, /* prefetch streams /*/
1060 0, /* SF->DF convert */
1063 /* Instruction costs on PPCE500MC64 processors. */
1065 struct processor_costs ppce500mc64_cost
= {
1066 COSTS_N_INSNS (4), /* mulsi */
1067 COSTS_N_INSNS (4), /* mulsi_const */
1068 COSTS_N_INSNS (4), /* mulsi_const9 */
1069 COSTS_N_INSNS (4), /* muldi */
1070 COSTS_N_INSNS (14), /* divsi */
1071 COSTS_N_INSNS (14), /* divdi */
1072 COSTS_N_INSNS (4), /* fp */
1073 COSTS_N_INSNS (10), /* dmul */
1074 COSTS_N_INSNS (36), /* sdiv */
1075 COSTS_N_INSNS (66), /* ddiv */
1076 64, /* cache line size */
1079 1, /* prefetch streams /*/
1080 0, /* SF->DF convert */
1083 /* Instruction costs on PPCE5500 processors. */
1085 struct processor_costs ppce5500_cost
= {
1086 COSTS_N_INSNS (5), /* mulsi */
1087 COSTS_N_INSNS (5), /* mulsi_const */
1088 COSTS_N_INSNS (4), /* mulsi_const9 */
1089 COSTS_N_INSNS (5), /* muldi */
1090 COSTS_N_INSNS (14), /* divsi */
1091 COSTS_N_INSNS (14), /* divdi */
1092 COSTS_N_INSNS (7), /* fp */
1093 COSTS_N_INSNS (10), /* dmul */
1094 COSTS_N_INSNS (36), /* sdiv */
1095 COSTS_N_INSNS (66), /* ddiv */
1096 64, /* cache line size */
1099 1, /* prefetch streams /*/
1100 0, /* SF->DF convert */
1103 /* Instruction costs on PPCE6500 processors. */
1105 struct processor_costs ppce6500_cost
= {
1106 COSTS_N_INSNS (5), /* mulsi */
1107 COSTS_N_INSNS (5), /* mulsi_const */
1108 COSTS_N_INSNS (4), /* mulsi_const9 */
1109 COSTS_N_INSNS (5), /* muldi */
1110 COSTS_N_INSNS (14), /* divsi */
1111 COSTS_N_INSNS (14), /* divdi */
1112 COSTS_N_INSNS (7), /* fp */
1113 COSTS_N_INSNS (10), /* dmul */
1114 COSTS_N_INSNS (36), /* sdiv */
1115 COSTS_N_INSNS (66), /* ddiv */
1116 64, /* cache line size */
1119 1, /* prefetch streams /*/
1120 0, /* SF->DF convert */
1123 /* Instruction costs on AppliedMicro Titan processors. */
1125 struct processor_costs titan_cost
= {
1126 COSTS_N_INSNS (5), /* mulsi */
1127 COSTS_N_INSNS (5), /* mulsi_const */
1128 COSTS_N_INSNS (5), /* mulsi_const9 */
1129 COSTS_N_INSNS (5), /* muldi */
1130 COSTS_N_INSNS (18), /* divsi */
1131 COSTS_N_INSNS (18), /* divdi */
1132 COSTS_N_INSNS (10), /* fp */
1133 COSTS_N_INSNS (10), /* dmul */
1134 COSTS_N_INSNS (46), /* sdiv */
1135 COSTS_N_INSNS (72), /* ddiv */
1136 32, /* cache line size */
1139 1, /* prefetch streams /*/
1140 0, /* SF->DF convert */
1143 /* Instruction costs on POWER4 and POWER5 processors. */
1145 struct processor_costs power4_cost
= {
1146 COSTS_N_INSNS (3), /* mulsi */
1147 COSTS_N_INSNS (2), /* mulsi_const */
1148 COSTS_N_INSNS (2), /* mulsi_const9 */
1149 COSTS_N_INSNS (4), /* muldi */
1150 COSTS_N_INSNS (18), /* divsi */
1151 COSTS_N_INSNS (34), /* divdi */
1152 COSTS_N_INSNS (3), /* fp */
1153 COSTS_N_INSNS (3), /* dmul */
1154 COSTS_N_INSNS (17), /* sdiv */
1155 COSTS_N_INSNS (17), /* ddiv */
1156 128, /* cache line size */
1158 1024, /* l2 cache */
1159 8, /* prefetch streams /*/
1160 0, /* SF->DF convert */
1163 /* Instruction costs on POWER6 processors. */
1165 struct processor_costs power6_cost
= {
1166 COSTS_N_INSNS (8), /* mulsi */
1167 COSTS_N_INSNS (8), /* mulsi_const */
1168 COSTS_N_INSNS (8), /* mulsi_const9 */
1169 COSTS_N_INSNS (8), /* muldi */
1170 COSTS_N_INSNS (22), /* divsi */
1171 COSTS_N_INSNS (28), /* divdi */
1172 COSTS_N_INSNS (3), /* fp */
1173 COSTS_N_INSNS (3), /* dmul */
1174 COSTS_N_INSNS (13), /* sdiv */
1175 COSTS_N_INSNS (16), /* ddiv */
1176 128, /* cache line size */
1178 2048, /* l2 cache */
1179 16, /* prefetch streams */
1180 0, /* SF->DF convert */
1183 /* Instruction costs on POWER7 processors. */
1185 struct processor_costs power7_cost
= {
1186 COSTS_N_INSNS (2), /* mulsi */
1187 COSTS_N_INSNS (2), /* mulsi_const */
1188 COSTS_N_INSNS (2), /* mulsi_const9 */
1189 COSTS_N_INSNS (2), /* muldi */
1190 COSTS_N_INSNS (18), /* divsi */
1191 COSTS_N_INSNS (34), /* divdi */
1192 COSTS_N_INSNS (3), /* fp */
1193 COSTS_N_INSNS (3), /* dmul */
1194 COSTS_N_INSNS (13), /* sdiv */
1195 COSTS_N_INSNS (16), /* ddiv */
1196 128, /* cache line size */
1199 12, /* prefetch streams */
1200 COSTS_N_INSNS (3), /* SF->DF convert */
1203 /* Instruction costs on POWER8 processors. */
1205 struct processor_costs power8_cost
= {
1206 COSTS_N_INSNS (3), /* mulsi */
1207 COSTS_N_INSNS (3), /* mulsi_const */
1208 COSTS_N_INSNS (3), /* mulsi_const9 */
1209 COSTS_N_INSNS (3), /* muldi */
1210 COSTS_N_INSNS (19), /* divsi */
1211 COSTS_N_INSNS (35), /* divdi */
1212 COSTS_N_INSNS (3), /* fp */
1213 COSTS_N_INSNS (3), /* dmul */
1214 COSTS_N_INSNS (14), /* sdiv */
1215 COSTS_N_INSNS (17), /* ddiv */
1216 128, /* cache line size */
1219 12, /* prefetch streams */
1220 COSTS_N_INSNS (3), /* SF->DF convert */
1223 /* Instruction costs on POWER9 processors. */
1225 struct processor_costs power9_cost
= {
1226 COSTS_N_INSNS (3), /* mulsi */
1227 COSTS_N_INSNS (3), /* mulsi_const */
1228 COSTS_N_INSNS (3), /* mulsi_const9 */
1229 COSTS_N_INSNS (3), /* muldi */
1230 COSTS_N_INSNS (8), /* divsi */
1231 COSTS_N_INSNS (12), /* divdi */
1232 COSTS_N_INSNS (3), /* fp */
1233 COSTS_N_INSNS (3), /* dmul */
1234 COSTS_N_INSNS (13), /* sdiv */
1235 COSTS_N_INSNS (18), /* ddiv */
1236 128, /* cache line size */
1239 8, /* prefetch streams */
1240 COSTS_N_INSNS (3), /* SF->DF convert */
1243 /* Instruction costs on POWER A2 processors. */
1245 struct processor_costs ppca2_cost
= {
1246 COSTS_N_INSNS (16), /* mulsi */
1247 COSTS_N_INSNS (16), /* mulsi_const */
1248 COSTS_N_INSNS (16), /* mulsi_const9 */
1249 COSTS_N_INSNS (16), /* muldi */
1250 COSTS_N_INSNS (22), /* divsi */
1251 COSTS_N_INSNS (28), /* divdi */
1252 COSTS_N_INSNS (3), /* fp */
1253 COSTS_N_INSNS (3), /* dmul */
1254 COSTS_N_INSNS (59), /* sdiv */
1255 COSTS_N_INSNS (72), /* ddiv */
1258 2048, /* l2 cache */
1259 16, /* prefetch streams */
1260 0, /* SF->DF convert */
1264 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1265 #undef RS6000_BUILTIN_0
1266 #undef RS6000_BUILTIN_1
1267 #undef RS6000_BUILTIN_2
1268 #undef RS6000_BUILTIN_3
1269 #undef RS6000_BUILTIN_A
1270 #undef RS6000_BUILTIN_D
1271 #undef RS6000_BUILTIN_H
1272 #undef RS6000_BUILTIN_P
1273 #undef RS6000_BUILTIN_Q
1274 #undef RS6000_BUILTIN_X
1276 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1277 { NAME, ICODE, MASK, ATTR },
1279 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1280 { NAME, ICODE, MASK, ATTR },
1282 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1283 { NAME, ICODE, MASK, ATTR },
1285 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1286 { NAME, ICODE, MASK, ATTR },
1288 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1289 { NAME, ICODE, MASK, ATTR },
1291 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1292 { NAME, ICODE, MASK, ATTR },
1294 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1295 { NAME, ICODE, MASK, ATTR },
1297 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1298 { NAME, ICODE, MASK, ATTR },
1300 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
1301 { NAME, ICODE, MASK, ATTR },
1303 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1304 { NAME, ICODE, MASK, ATTR },
1306 struct rs6000_builtin_info_type
{
1308 const enum insn_code icode
;
1309 const HOST_WIDE_INT mask
;
1310 const unsigned attr
;
1313 static const struct rs6000_builtin_info_type rs6000_builtin_info
[] =
1315 #include "rs6000-builtin.def"
1318 #undef RS6000_BUILTIN_0
1319 #undef RS6000_BUILTIN_1
1320 #undef RS6000_BUILTIN_2
1321 #undef RS6000_BUILTIN_3
1322 #undef RS6000_BUILTIN_A
1323 #undef RS6000_BUILTIN_D
1324 #undef RS6000_BUILTIN_H
1325 #undef RS6000_BUILTIN_P
1326 #undef RS6000_BUILTIN_Q
1327 #undef RS6000_BUILTIN_X
1329 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1330 static tree (*rs6000_veclib_handler
) (combined_fn
, tree
, tree
);
1333 static bool rs6000_debug_legitimate_address_p (machine_mode
, rtx
, bool);
1334 static struct machine_function
* rs6000_init_machine_status (void);
1335 static int rs6000_ra_ever_killed (void);
1336 static tree
rs6000_handle_longcall_attribute (tree
*, tree
, tree
, int, bool *);
1337 static tree
rs6000_handle_altivec_attribute (tree
*, tree
, tree
, int, bool *);
1338 static tree
rs6000_handle_struct_attribute (tree
*, tree
, tree
, int, bool *);
1339 static tree
rs6000_builtin_vectorized_libmass (combined_fn
, tree
, tree
);
1340 static void rs6000_emit_set_long_const (rtx
, HOST_WIDE_INT
);
1341 static int rs6000_memory_move_cost (machine_mode
, reg_class_t
, bool);
1342 static bool rs6000_debug_rtx_costs (rtx
, machine_mode
, int, int, int *, bool);
1343 static int rs6000_debug_address_cost (rtx
, machine_mode
, addr_space_t
,
1345 static int rs6000_debug_adjust_cost (rtx_insn
*, int, rtx_insn
*, int,
1347 static bool is_microcoded_insn (rtx_insn
*);
1348 static bool is_nonpipeline_insn (rtx_insn
*);
1349 static bool is_cracked_insn (rtx_insn
*);
1350 static bool is_load_insn (rtx
, rtx
*);
1351 static bool is_store_insn (rtx
, rtx
*);
1352 static bool set_to_load_agen (rtx_insn
*,rtx_insn
*);
1353 static bool insn_terminates_group_p (rtx_insn
*, enum group_termination
);
1354 static bool insn_must_be_first_in_group (rtx_insn
*);
1355 static bool insn_must_be_last_in_group (rtx_insn
*);
1356 static void altivec_init_builtins (void);
1357 static tree
builtin_function_type (machine_mode
, machine_mode
,
1358 machine_mode
, machine_mode
,
1359 enum rs6000_builtins
, const char *name
);
1360 static void rs6000_common_init_builtins (void);
1361 static void paired_init_builtins (void);
1362 static rtx
paired_expand_predicate_builtin (enum insn_code
, tree
, rtx
);
1363 static void htm_init_builtins (void);
1364 static int rs6000_emit_int_cmove (rtx
, rtx
, rtx
, rtx
);
1365 static rs6000_stack_t
*rs6000_stack_info (void);
1366 static void is_altivec_return_reg (rtx
, void *);
1367 int easy_vector_constant (rtx
, machine_mode
);
1368 static rtx
rs6000_debug_legitimize_address (rtx
, rtx
, machine_mode
);
1369 static rtx
rs6000_legitimize_tls_address (rtx
, enum tls_model
);
1370 static rtx
rs6000_darwin64_record_arg (CUMULATIVE_ARGS
*, const_tree
,
1373 static void macho_branch_islands (void);
1375 static rtx
rs6000_legitimize_reload_address (rtx
, machine_mode
, int, int,
1377 static rtx
rs6000_debug_legitimize_reload_address (rtx
, machine_mode
, int,
1379 static bool rs6000_mode_dependent_address (const_rtx
);
1380 static bool rs6000_debug_mode_dependent_address (const_rtx
);
1381 static enum reg_class
rs6000_secondary_reload_class (enum reg_class
,
1383 static enum reg_class
rs6000_debug_secondary_reload_class (enum reg_class
,
1386 static enum reg_class
rs6000_preferred_reload_class (rtx
, enum reg_class
);
1387 static enum reg_class
rs6000_debug_preferred_reload_class (rtx
,
1389 static bool rs6000_secondary_memory_needed (enum reg_class
, enum reg_class
,
1391 static bool rs6000_debug_secondary_memory_needed (enum reg_class
,
1394 static bool rs6000_cannot_change_mode_class (machine_mode
,
1397 static bool rs6000_debug_cannot_change_mode_class (machine_mode
,
1400 static bool rs6000_save_toc_in_prologue_p (void);
1401 static rtx
rs6000_internal_arg_pointer (void);
1403 rtx (*rs6000_legitimize_reload_address_ptr
) (rtx
, machine_mode
, int, int,
1405 = rs6000_legitimize_reload_address
;
1407 static bool (*rs6000_mode_dependent_address_ptr
) (const_rtx
)
1408 = rs6000_mode_dependent_address
;
1410 enum reg_class (*rs6000_secondary_reload_class_ptr
) (enum reg_class
,
1412 = rs6000_secondary_reload_class
;
1414 enum reg_class (*rs6000_preferred_reload_class_ptr
) (rtx
, enum reg_class
)
1415 = rs6000_preferred_reload_class
;
1417 bool (*rs6000_secondary_memory_needed_ptr
) (enum reg_class
, enum reg_class
,
1419 = rs6000_secondary_memory_needed
;
1421 bool (*rs6000_cannot_change_mode_class_ptr
) (machine_mode
,
1424 = rs6000_cannot_change_mode_class
;
1426 const int INSN_NOT_AVAILABLE
= -1;
1428 static void rs6000_print_isa_options (FILE *, int, const char *,
1430 static void rs6000_print_builtin_options (FILE *, int, const char *,
1432 static HOST_WIDE_INT
rs6000_disable_incompatible_switches (void);
1434 static enum rs6000_reg_type
register_to_reg_type (rtx
, bool *);
1435 static bool rs6000_secondary_reload_move (enum rs6000_reg_type
,
1436 enum rs6000_reg_type
,
1438 secondary_reload_info
*,
1440 rtl_opt_pass
*make_pass_analyze_swaps (gcc::context
*);
1441 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused
));
1442 static tree
rs6000_fold_builtin (tree
, int, tree
*, bool);
1444 /* Hash table stuff for keeping track of TOC entries. */
1446 struct GTY((for_user
)) toc_hash_struct
1448 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1449 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1451 machine_mode key_mode
;
1455 struct toc_hasher
: ggc_ptr_hash
<toc_hash_struct
>
1457 static hashval_t
hash (toc_hash_struct
*);
1458 static bool equal (toc_hash_struct
*, toc_hash_struct
*);
1461 static GTY (()) hash_table
<toc_hasher
> *toc_hash_table
;
1463 /* Hash table to keep track of the argument types for builtin functions. */
1465 struct GTY((for_user
)) builtin_hash_struct
1468 machine_mode mode
[4]; /* return value + 3 arguments. */
1469 unsigned char uns_p
[4]; /* and whether the types are unsigned. */
1472 struct builtin_hasher
: ggc_ptr_hash
<builtin_hash_struct
>
1474 static hashval_t
hash (builtin_hash_struct
*);
1475 static bool equal (builtin_hash_struct
*, builtin_hash_struct
*);
1478 static GTY (()) hash_table
<builtin_hasher
> *builtin_hash_table
;
1481 /* Default register names. */
1482 char rs6000_reg_names
[][8] =
1484 "0", "1", "2", "3", "4", "5", "6", "7",
1485 "8", "9", "10", "11", "12", "13", "14", "15",
1486 "16", "17", "18", "19", "20", "21", "22", "23",
1487 "24", "25", "26", "27", "28", "29", "30", "31",
1488 "0", "1", "2", "3", "4", "5", "6", "7",
1489 "8", "9", "10", "11", "12", "13", "14", "15",
1490 "16", "17", "18", "19", "20", "21", "22", "23",
1491 "24", "25", "26", "27", "28", "29", "30", "31",
1492 "mq", "lr", "ctr","ap",
1493 "0", "1", "2", "3", "4", "5", "6", "7",
1495 /* AltiVec registers. */
1496 "0", "1", "2", "3", "4", "5", "6", "7",
1497 "8", "9", "10", "11", "12", "13", "14", "15",
1498 "16", "17", "18", "19", "20", "21", "22", "23",
1499 "24", "25", "26", "27", "28", "29", "30", "31",
1501 /* Soft frame pointer. */
1503 /* HTM SPR registers. */
1504 "tfhar", "tfiar", "texasr"
1507 #ifdef TARGET_REGNAMES
1508 static const char alt_reg_names
[][8] =
1510 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1511 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1512 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1513 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1514 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1515 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1516 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1517 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1518 "mq", "lr", "ctr", "ap",
1519 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1521 /* AltiVec registers. */
1522 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1523 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1524 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1525 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1527 /* Soft frame pointer. */
1529 /* HTM SPR registers. */
1530 "tfhar", "tfiar", "texasr"
1534 /* Table of valid machine attributes. */
1536 static const struct attribute_spec rs6000_attribute_table
[] =
1538 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1539 affects_type_identity } */
1540 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute
,
1542 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute
,
1544 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute
,
1546 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute
,
1548 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute
,
1550 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1551 SUBTARGET_ATTRIBUTE_TABLE
,
1553 { NULL
, 0, 0, false, false, false, NULL
, false }
1556 #ifndef TARGET_PROFILE_KERNEL
1557 #define TARGET_PROFILE_KERNEL 0
1560 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1561 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1563 /* Initialize the GCC target structure. */
1564 #undef TARGET_ATTRIBUTE_TABLE
1565 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1566 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1567 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1568 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1569 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1571 #undef TARGET_ASM_ALIGNED_DI_OP
1572 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1574 /* Default unaligned ops are only provided for ELF. Find the ops needed
1575 for non-ELF systems. */
1576 #ifndef OBJECT_FORMAT_ELF
1578 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1580 #undef TARGET_ASM_UNALIGNED_HI_OP
1581 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1582 #undef TARGET_ASM_UNALIGNED_SI_OP
1583 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1584 #undef TARGET_ASM_UNALIGNED_DI_OP
1585 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1588 #undef TARGET_ASM_UNALIGNED_HI_OP
1589 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1590 #undef TARGET_ASM_UNALIGNED_SI_OP
1591 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1592 #undef TARGET_ASM_UNALIGNED_DI_OP
1593 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1594 #undef TARGET_ASM_ALIGNED_DI_OP
1595 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1599 /* This hook deals with fixups for relocatable code and DI-mode objects
1601 #undef TARGET_ASM_INTEGER
1602 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1604 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1605 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1606 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1609 #undef TARGET_SET_UP_BY_PROLOGUE
1610 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1612 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1613 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1614 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1615 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1616 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1617 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1618 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1619 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1620 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1621 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1622 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1623 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1625 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1626 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1628 #undef TARGET_INTERNAL_ARG_POINTER
1629 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1631 #undef TARGET_HAVE_TLS
1632 #define TARGET_HAVE_TLS HAVE_AS_TLS
1634 #undef TARGET_CANNOT_FORCE_CONST_MEM
1635 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1637 #undef TARGET_DELEGITIMIZE_ADDRESS
1638 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1640 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1641 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1643 #undef TARGET_LEGITIMATE_COMBINED_INSN
1644 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1646 #undef TARGET_ASM_FUNCTION_PROLOGUE
1647 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1648 #undef TARGET_ASM_FUNCTION_EPILOGUE
1649 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1651 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1652 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1654 #undef TARGET_LEGITIMIZE_ADDRESS
1655 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1657 #undef TARGET_SCHED_VARIABLE_ISSUE
1658 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1660 #undef TARGET_SCHED_ISSUE_RATE
1661 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1662 #undef TARGET_SCHED_ADJUST_COST
1663 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1664 #undef TARGET_SCHED_ADJUST_PRIORITY
1665 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1666 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1667 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1668 #undef TARGET_SCHED_INIT
1669 #define TARGET_SCHED_INIT rs6000_sched_init
1670 #undef TARGET_SCHED_FINISH
1671 #define TARGET_SCHED_FINISH rs6000_sched_finish
1672 #undef TARGET_SCHED_REORDER
1673 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1674 #undef TARGET_SCHED_REORDER2
1675 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1677 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1678 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1680 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1681 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1683 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1684 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1685 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1686 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1687 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1688 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1689 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1690 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1692 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1693 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1695 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1696 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1697 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1698 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1699 rs6000_builtin_support_vector_misalignment
1700 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1701 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1702 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1703 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1704 rs6000_builtin_vectorization_cost
1705 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1706 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1707 rs6000_preferred_simd_mode
1708 #undef TARGET_VECTORIZE_INIT_COST
1709 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1710 #undef TARGET_VECTORIZE_ADD_STMT_COST
1711 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1712 #undef TARGET_VECTORIZE_FINISH_COST
1713 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1714 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1715 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1717 #undef TARGET_INIT_BUILTINS
1718 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1719 #undef TARGET_BUILTIN_DECL
1720 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1722 #undef TARGET_FOLD_BUILTIN
1723 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1724 #undef TARGET_GIMPLE_FOLD_BUILTIN
1725 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1727 #undef TARGET_EXPAND_BUILTIN
1728 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1730 #undef TARGET_MANGLE_TYPE
1731 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1733 #undef TARGET_INIT_LIBFUNCS
1734 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1737 #undef TARGET_BINDS_LOCAL_P
1738 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1741 #undef TARGET_MS_BITFIELD_LAYOUT_P
1742 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1744 #undef TARGET_ASM_OUTPUT_MI_THUNK
1745 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1747 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1748 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1750 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1751 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1753 #undef TARGET_REGISTER_MOVE_COST
1754 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1755 #undef TARGET_MEMORY_MOVE_COST
1756 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1757 #undef TARGET_CANNOT_COPY_INSN_P
1758 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1759 #undef TARGET_RTX_COSTS
1760 #define TARGET_RTX_COSTS rs6000_rtx_costs
1761 #undef TARGET_ADDRESS_COST
1762 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1764 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1765 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1767 #undef TARGET_PROMOTE_FUNCTION_MODE
1768 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1770 #undef TARGET_RETURN_IN_MEMORY
1771 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1773 #undef TARGET_RETURN_IN_MSB
1774 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1776 #undef TARGET_SETUP_INCOMING_VARARGS
1777 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1779 /* Always strict argument naming on rs6000. */
1780 #undef TARGET_STRICT_ARGUMENT_NAMING
1781 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1782 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1783 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1784 #undef TARGET_SPLIT_COMPLEX_ARG
1785 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1786 #undef TARGET_MUST_PASS_IN_STACK
1787 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1788 #undef TARGET_PASS_BY_REFERENCE
1789 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1790 #undef TARGET_ARG_PARTIAL_BYTES
1791 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1792 #undef TARGET_FUNCTION_ARG_ADVANCE
1793 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1794 #undef TARGET_FUNCTION_ARG
1795 #define TARGET_FUNCTION_ARG rs6000_function_arg
1796 #undef TARGET_FUNCTION_ARG_BOUNDARY
1797 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1799 #undef TARGET_BUILD_BUILTIN_VA_LIST
1800 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1802 #undef TARGET_EXPAND_BUILTIN_VA_START
1803 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1805 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1806 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1808 #undef TARGET_EH_RETURN_FILTER_MODE
1809 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1811 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1812 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1814 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1815 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1817 #undef TARGET_FLOATN_MODE
1818 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1820 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1821 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1823 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1824 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1826 #undef TARGET_MD_ASM_ADJUST
1827 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1829 #undef TARGET_OPTION_OVERRIDE
1830 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1832 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1833 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1834 rs6000_builtin_vectorized_function
1836 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1837 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1838 rs6000_builtin_md_vectorized_function
1840 #undef TARGET_STACK_PROTECT_GUARD
1841 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1844 #undef TARGET_STACK_PROTECT_FAIL
1845 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1849 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1850 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1853 /* Use a 32-bit anchor range. This leads to sequences like:
1855 addis tmp,anchor,high
1858 where tmp itself acts as an anchor, and can be shared between
1859 accesses to the same 64k page. */
1860 #undef TARGET_MIN_ANCHOR_OFFSET
1861 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1862 #undef TARGET_MAX_ANCHOR_OFFSET
1863 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1864 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1865 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1866 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1867 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1869 #undef TARGET_BUILTIN_RECIPROCAL
1870 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1872 #undef TARGET_SECONDARY_RELOAD
1873 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1875 #undef TARGET_LEGITIMATE_ADDRESS_P
1876 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1878 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1879 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1881 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1882 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1884 #undef TARGET_CAN_ELIMINATE
1885 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1887 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1888 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1890 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1891 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1893 #undef TARGET_TRAMPOLINE_INIT
1894 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1896 #undef TARGET_FUNCTION_VALUE
1897 #define TARGET_FUNCTION_VALUE rs6000_function_value
1899 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1900 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1902 #undef TARGET_OPTION_SAVE
1903 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1905 #undef TARGET_OPTION_RESTORE
1906 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1908 #undef TARGET_OPTION_PRINT
1909 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1911 #undef TARGET_CAN_INLINE_P
1912 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1914 #undef TARGET_SET_CURRENT_FUNCTION
1915 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1917 #undef TARGET_LEGITIMATE_CONSTANT_P
1918 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1920 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1921 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1923 #undef TARGET_CAN_USE_DOLOOP_P
1924 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1926 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1927 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1929 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1930 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1931 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1932 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1933 #undef TARGET_UNWIND_WORD_MODE
1934 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1936 #undef TARGET_OFFLOAD_OPTIONS
1937 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1939 #undef TARGET_C_MODE_FOR_SUFFIX
1940 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1942 #undef TARGET_INVALID_BINARY_OP
1943 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1945 #undef TARGET_OPTAB_SUPPORTED_P
1946 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1948 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1949 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1951 #undef TARGET_COMPARE_VERSION_PRIORITY
1952 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1954 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1955 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1956 rs6000_generate_version_dispatcher_body
1958 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1959 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1960 rs6000_get_function_versions_dispatcher
1962 #undef TARGET_OPTION_FUNCTION_VERSIONS
1963 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1965 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1966 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1967 rs6000_hard_regno_call_part_clobbered
1970 /* Processor table. */
1973 const char *const name
; /* Canonical processor name. */
1974 const enum processor_type processor
; /* Processor type enum value. */
1975 const HOST_WIDE_INT target_enable
; /* Target flags to enable. */
1978 static struct rs6000_ptt
const processor_target_table
[] =
1980 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1981 #include "rs6000-cpus.def"
1985 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1989 rs6000_cpu_name_lookup (const char *name
)
1995 for (i
= 0; i
< ARRAY_SIZE (processor_target_table
); i
++)
1996 if (! strcmp (name
, processor_target_table
[i
].name
))
2004 /* Return number of consecutive hard regs needed starting at reg REGNO
2005 to hold something of mode MODE.
2006 This is ordinarily the length in words of a value of mode MODE
2007 but can be less for certain modes in special long registers.
2009 POWER and PowerPC GPRs hold 32 bits worth;
2010 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2013 rs6000_hard_regno_nregs_internal (int regno
, machine_mode mode
)
2015 unsigned HOST_WIDE_INT reg_size
;
2017 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2018 128-bit floating point that can go in vector registers, which has VSX
2019 memory addressing. */
2020 if (FP_REGNO_P (regno
))
2021 reg_size
= (VECTOR_MEM_VSX_P (mode
) || FLOAT128_VECTOR_P (mode
)
2022 ? UNITS_PER_VSX_WORD
2023 : UNITS_PER_FP_WORD
);
2025 else if (ALTIVEC_REGNO_P (regno
))
2026 reg_size
= UNITS_PER_ALTIVEC_WORD
;
2029 reg_size
= UNITS_PER_WORD
;
2031 return (GET_MODE_SIZE (mode
) + reg_size
- 1) / reg_size
;
2034 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2037 rs6000_hard_regno_mode_ok (int regno
, machine_mode mode
)
2039 int last_regno
= regno
+ rs6000_hard_regno_nregs
[mode
][regno
] - 1;
2041 if (COMPLEX_MODE_P (mode
))
2042 mode
= GET_MODE_INNER (mode
);
2044 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2045 register combinations, and use PTImode where we need to deal with quad
2046 word memory operations. Don't allow quad words in the argument or frame
2047 pointer registers, just registers 0..31. */
2048 if (mode
== PTImode
)
2049 return (IN_RANGE (regno
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
)
2050 && IN_RANGE (last_regno
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
)
2051 && ((regno
& 1) == 0));
2053 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2054 implementations. Don't allow an item to be split between a FP register
2055 and an Altivec register. Allow TImode in all VSX registers if the user
2057 if (TARGET_VSX
&& VSX_REGNO_P (regno
)
2058 && (VECTOR_MEM_VSX_P (mode
)
2059 || FLOAT128_VECTOR_P (mode
)
2060 || reg_addr
[mode
].scalar_in_vmx_p
2062 || (TARGET_VADDUQM
&& mode
== V1TImode
)))
2064 if (FP_REGNO_P (regno
))
2065 return FP_REGNO_P (last_regno
);
2067 if (ALTIVEC_REGNO_P (regno
))
2069 if (GET_MODE_SIZE (mode
) != 16 && !reg_addr
[mode
].scalar_in_vmx_p
)
2072 return ALTIVEC_REGNO_P (last_regno
);
2076 /* The GPRs can hold any mode, but values bigger than one register
2077 cannot go past R31. */
2078 if (INT_REGNO_P (regno
))
2079 return INT_REGNO_P (last_regno
);
2081 /* The float registers (except for VSX vector modes) can only hold floating
2082 modes and DImode. */
2083 if (FP_REGNO_P (regno
))
2085 if (FLOAT128_VECTOR_P (mode
))
2088 if (SCALAR_FLOAT_MODE_P (mode
)
2089 && (mode
!= TDmode
|| (regno
% 2) == 0)
2090 && FP_REGNO_P (last_regno
))
2093 if (GET_MODE_CLASS (mode
) == MODE_INT
)
2095 if(GET_MODE_SIZE (mode
) == UNITS_PER_FP_WORD
)
2098 if (TARGET_P8_VECTOR
&& (mode
== SImode
))
2101 if (TARGET_P9_VECTOR
&& (mode
== QImode
|| mode
== HImode
))
2105 if (PAIRED_SIMD_REGNO_P (regno
) && TARGET_PAIRED_FLOAT
2106 && PAIRED_VECTOR_MODE (mode
))
2112 /* The CR register can only hold CC modes. */
2113 if (CR_REGNO_P (regno
))
2114 return GET_MODE_CLASS (mode
) == MODE_CC
;
2116 if (CA_REGNO_P (regno
))
2117 return mode
== Pmode
|| mode
== SImode
;
2119 /* AltiVec only in AldyVec registers. */
2120 if (ALTIVEC_REGNO_P (regno
))
2121 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
)
2122 || mode
== V1TImode
);
2124 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2125 and it must be able to fit within the register set. */
2127 return GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
;
2130 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2133 rs6000_hard_regno_call_part_clobbered (unsigned int regno
, machine_mode mode
)
2137 && GET_MODE_SIZE (mode
) > 4
2138 && INT_REGNO_P (regno
))
2142 && FP_REGNO_P (regno
)
2143 && GET_MODE_SIZE (mode
) > 8
2144 && !FLOAT128_2REG_P (mode
))
2150 /* Print interesting facts about registers. */
2152 rs6000_debug_reg_print (int first_regno
, int last_regno
, const char *reg_name
)
2156 for (r
= first_regno
; r
<= last_regno
; ++r
)
2158 const char *comma
= "";
2161 if (first_regno
== last_regno
)
2162 fprintf (stderr
, "%s:\t", reg_name
);
2164 fprintf (stderr
, "%s%d:\t", reg_name
, r
- first_regno
);
2167 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2168 if (rs6000_hard_regno_mode_ok_p
[m
][r
] && rs6000_hard_regno_nregs
[m
][r
])
2172 fprintf (stderr
, ",\n\t");
2177 if (rs6000_hard_regno_nregs
[m
][r
] > 1)
2178 len
+= fprintf (stderr
, "%s%s/%d", comma
, GET_MODE_NAME (m
),
2179 rs6000_hard_regno_nregs
[m
][r
]);
2181 len
+= fprintf (stderr
, "%s%s", comma
, GET_MODE_NAME (m
));
2186 if (call_used_regs
[r
])
2190 fprintf (stderr
, ",\n\t");
2195 len
+= fprintf (stderr
, "%s%s", comma
, "call-used");
2203 fprintf (stderr
, ",\n\t");
2208 len
+= fprintf (stderr
, "%s%s", comma
, "fixed");
2214 fprintf (stderr
, ",\n\t");
2218 len
+= fprintf (stderr
, "%sreg-class = %s", comma
,
2219 reg_class_names
[(int)rs6000_regno_regclass
[r
]]);
2224 fprintf (stderr
, ",\n\t");
2228 fprintf (stderr
, "%sregno = %d\n", comma
, r
);
2233 rs6000_debug_vector_unit (enum rs6000_vector v
)
2239 case VECTOR_NONE
: ret
= "none"; break;
2240 case VECTOR_ALTIVEC
: ret
= "altivec"; break;
2241 case VECTOR_VSX
: ret
= "vsx"; break;
2242 case VECTOR_P8_VECTOR
: ret
= "p8_vector"; break;
2243 case VECTOR_PAIRED
: ret
= "paired"; break;
2244 case VECTOR_OTHER
: ret
= "other"; break;
2245 default: ret
= "unknown"; break;
2251 /* Inner function printing just the address mask for a particular reload
2253 DEBUG_FUNCTION
char *
2254 rs6000_debug_addr_mask (addr_mask_type mask
, bool keep_spaces
)
2259 if ((mask
& RELOAD_REG_VALID
) != 0)
2261 else if (keep_spaces
)
2264 if ((mask
& RELOAD_REG_MULTIPLE
) != 0)
2266 else if (keep_spaces
)
2269 if ((mask
& RELOAD_REG_INDEXED
) != 0)
2271 else if (keep_spaces
)
2274 if ((mask
& RELOAD_REG_QUAD_OFFSET
) != 0)
2276 else if ((mask
& RELOAD_REG_OFFSET
) != 0)
2278 else if (keep_spaces
)
2281 if ((mask
& RELOAD_REG_PRE_INCDEC
) != 0)
2283 else if (keep_spaces
)
2286 if ((mask
& RELOAD_REG_PRE_MODIFY
) != 0)
2288 else if (keep_spaces
)
2291 if ((mask
& RELOAD_REG_AND_M16
) != 0)
2293 else if (keep_spaces
)
2301 /* Print the address masks in a human readble fashion. */
2303 rs6000_debug_print_mode (ssize_t m
)
2309 fprintf (stderr
, "Mode: %-5s", GET_MODE_NAME (m
));
2310 for (rc
= 0; rc
< N_RELOAD_REG
; rc
++)
2311 fprintf (stderr
, " %s: %s", reload_reg_map
[rc
].name
,
2312 rs6000_debug_addr_mask (reg_addr
[m
].addr_mask
[rc
], true));
2314 if ((reg_addr
[m
].reload_store
!= CODE_FOR_nothing
)
2315 || (reg_addr
[m
].reload_load
!= CODE_FOR_nothing
))
2316 fprintf (stderr
, " Reload=%c%c",
2317 (reg_addr
[m
].reload_store
!= CODE_FOR_nothing
) ? 's' : '*',
2318 (reg_addr
[m
].reload_load
!= CODE_FOR_nothing
) ? 'l' : '*');
2320 spaces
+= sizeof (" Reload=sl") - 1;
2322 if (reg_addr
[m
].scalar_in_vmx_p
)
2324 fprintf (stderr
, "%*s Upper=y", spaces
, "");
2328 spaces
+= sizeof (" Upper=y") - 1;
2330 fuse_extra_p
= ((reg_addr
[m
].fusion_gpr_ld
!= CODE_FOR_nothing
)
2331 || reg_addr
[m
].fused_toc
);
2334 for (rc
= 0; rc
< N_RELOAD_REG
; rc
++)
2336 if (rc
!= RELOAD_REG_ANY
)
2338 if (reg_addr
[m
].fusion_addi_ld
[rc
] != CODE_FOR_nothing
2339 || reg_addr
[m
].fusion_addi_ld
[rc
] != CODE_FOR_nothing
2340 || reg_addr
[m
].fusion_addi_st
[rc
] != CODE_FOR_nothing
2341 || reg_addr
[m
].fusion_addis_ld
[rc
] != CODE_FOR_nothing
2342 || reg_addr
[m
].fusion_addis_st
[rc
] != CODE_FOR_nothing
)
2344 fuse_extra_p
= true;
2353 fprintf (stderr
, "%*s Fuse:", spaces
, "");
2356 for (rc
= 0; rc
< N_RELOAD_REG
; rc
++)
2358 if (rc
!= RELOAD_REG_ANY
)
2362 if (reg_addr
[m
].fusion_addis_ld
[rc
] != CODE_FOR_nothing
)
2364 else if (reg_addr
[m
].fusion_addi_ld
[rc
] != CODE_FOR_nothing
)
2369 if (reg_addr
[m
].fusion_addis_st
[rc
] != CODE_FOR_nothing
)
2371 else if (reg_addr
[m
].fusion_addi_st
[rc
] != CODE_FOR_nothing
)
2376 if (load
== '-' && store
== '-')
2380 fprintf (stderr
, "%*s%c=%c%c", (spaces
+ 1), "",
2381 reload_reg_map
[rc
].name
[0], load
, store
);
2387 if (reg_addr
[m
].fusion_gpr_ld
!= CODE_FOR_nothing
)
2389 fprintf (stderr
, "%*sP8gpr", (spaces
+ 1), "");
2393 spaces
+= sizeof (" P8gpr") - 1;
2395 if (reg_addr
[m
].fused_toc
)
2397 fprintf (stderr
, "%*sToc", (spaces
+ 1), "");
2401 spaces
+= sizeof (" Toc") - 1;
2404 spaces
+= sizeof (" Fuse: G=ls F=ls v=ls P8gpr Toc") - 1;
2406 if (rs6000_vector_unit
[m
] != VECTOR_NONE
2407 || rs6000_vector_mem
[m
] != VECTOR_NONE
)
2409 fprintf (stderr
, "%*s vector: arith=%-10s mem=%s",
2411 rs6000_debug_vector_unit (rs6000_vector_unit
[m
]),
2412 rs6000_debug_vector_unit (rs6000_vector_mem
[m
]));
2415 fputs ("\n", stderr
);
2418 #define DEBUG_FMT_ID "%-32s= "
2419 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2420 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2421 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2423 /* Print various interesting information with -mdebug=reg. */
2425 rs6000_debug_reg_global (void)
2427 static const char *const tf
[2] = { "false", "true" };
2428 const char *nl
= (const char *)0;
2431 char costly_num
[20];
2433 char flags_buffer
[40];
2434 const char *costly_str
;
2435 const char *nop_str
;
2436 const char *trace_str
;
2437 const char *abi_str
;
2438 const char *cmodel_str
;
2439 struct cl_target_option cl_opts
;
2441 /* Modes we want tieable information on. */
2442 static const machine_mode print_tieable_modes
[] = {
2478 /* Virtual regs we are interested in. */
2479 const static struct {
2480 int regno
; /* register number. */
2481 const char *name
; /* register name. */
2482 } virtual_regs
[] = {
2483 { STACK_POINTER_REGNUM
, "stack pointer:" },
2484 { TOC_REGNUM
, "toc: " },
2485 { STATIC_CHAIN_REGNUM
, "static chain: " },
2486 { RS6000_PIC_OFFSET_TABLE_REGNUM
, "pic offset: " },
2487 { HARD_FRAME_POINTER_REGNUM
, "hard frame: " },
2488 { ARG_POINTER_REGNUM
, "arg pointer: " },
2489 { FRAME_POINTER_REGNUM
, "frame pointer:" },
2490 { FIRST_PSEUDO_REGISTER
, "first pseudo: " },
2491 { FIRST_VIRTUAL_REGISTER
, "first virtual:" },
2492 { VIRTUAL_INCOMING_ARGS_REGNUM
, "incoming_args:" },
2493 { VIRTUAL_STACK_VARS_REGNUM
, "stack_vars: " },
2494 { VIRTUAL_STACK_DYNAMIC_REGNUM
, "stack_dynamic:" },
2495 { VIRTUAL_OUTGOING_ARGS_REGNUM
, "outgoing_args:" },
2496 { VIRTUAL_CFA_REGNUM
, "cfa (frame): " },
2497 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM
, "stack boundry:" },
2498 { LAST_VIRTUAL_REGISTER
, "last virtual: " },
2501 fputs ("\nHard register information:\n", stderr
);
2502 rs6000_debug_reg_print (FIRST_GPR_REGNO
, LAST_GPR_REGNO
, "gr");
2503 rs6000_debug_reg_print (FIRST_FPR_REGNO
, LAST_FPR_REGNO
, "fp");
2504 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO
,
2507 rs6000_debug_reg_print (LR_REGNO
, LR_REGNO
, "lr");
2508 rs6000_debug_reg_print (CTR_REGNO
, CTR_REGNO
, "ctr");
2509 rs6000_debug_reg_print (CR0_REGNO
, CR7_REGNO
, "cr");
2510 rs6000_debug_reg_print (CA_REGNO
, CA_REGNO
, "ca");
2511 rs6000_debug_reg_print (VRSAVE_REGNO
, VRSAVE_REGNO
, "vrsave");
2512 rs6000_debug_reg_print (VSCR_REGNO
, VSCR_REGNO
, "vscr");
2514 fputs ("\nVirtual/stack/frame registers:\n", stderr
);
2515 for (v
= 0; v
< ARRAY_SIZE (virtual_regs
); v
++)
2516 fprintf (stderr
, "%s regno = %3d\n", virtual_regs
[v
].name
, virtual_regs
[v
].regno
);
2520 "d reg_class = %s\n"
2521 "f reg_class = %s\n"
2522 "v reg_class = %s\n"
2523 "wa reg_class = %s\n"
2524 "wb reg_class = %s\n"
2525 "wd reg_class = %s\n"
2526 "we reg_class = %s\n"
2527 "wf reg_class = %s\n"
2528 "wg reg_class = %s\n"
2529 "wh reg_class = %s\n"
2530 "wi reg_class = %s\n"
2531 "wj reg_class = %s\n"
2532 "wk reg_class = %s\n"
2533 "wl reg_class = %s\n"
2534 "wm reg_class = %s\n"
2535 "wo reg_class = %s\n"
2536 "wp reg_class = %s\n"
2537 "wq reg_class = %s\n"
2538 "wr reg_class = %s\n"
2539 "ws reg_class = %s\n"
2540 "wt reg_class = %s\n"
2541 "wu reg_class = %s\n"
2542 "wv reg_class = %s\n"
2543 "ww reg_class = %s\n"
2544 "wx reg_class = %s\n"
2545 "wy reg_class = %s\n"
2546 "wz reg_class = %s\n"
2547 "wA reg_class = %s\n"
2548 "wH reg_class = %s\n"
2549 "wI reg_class = %s\n"
2550 "wJ reg_class = %s\n"
2551 "wK reg_class = %s\n"
2553 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_d
]],
2554 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_f
]],
2555 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_v
]],
2556 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wa
]],
2557 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wb
]],
2558 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wd
]],
2559 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_we
]],
2560 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wf
]],
2561 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wg
]],
2562 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wh
]],
2563 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wi
]],
2564 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wj
]],
2565 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wk
]],
2566 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wl
]],
2567 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wm
]],
2568 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wo
]],
2569 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wp
]],
2570 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wq
]],
2571 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wr
]],
2572 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_ws
]],
2573 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wt
]],
2574 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wu
]],
2575 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wv
]],
2576 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_ww
]],
2577 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wx
]],
2578 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wy
]],
2579 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wz
]],
2580 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wA
]],
2581 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wH
]],
2582 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wI
]],
2583 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wJ
]],
2584 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wK
]]);
2587 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2588 rs6000_debug_print_mode (m
);
2590 fputs ("\n", stderr
);
2592 for (m1
= 0; m1
< ARRAY_SIZE (print_tieable_modes
); m1
++)
2594 machine_mode mode1
= print_tieable_modes
[m1
];
2595 bool first_time
= true;
2597 nl
= (const char *)0;
2598 for (m2
= 0; m2
< ARRAY_SIZE (print_tieable_modes
); m2
++)
2600 machine_mode mode2
= print_tieable_modes
[m2
];
2601 if (mode1
!= mode2
&& MODES_TIEABLE_P (mode1
, mode2
))
2605 fprintf (stderr
, "Tieable modes %s:", GET_MODE_NAME (mode1
));
2610 fprintf (stderr
, " %s", GET_MODE_NAME (mode2
));
2615 fputs ("\n", stderr
);
2621 if (rs6000_recip_control
)
2623 fprintf (stderr
, "\nReciprocal mask = 0x%x\n", rs6000_recip_control
);
2625 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2626 if (rs6000_recip_bits
[m
])
2629 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2631 (RS6000_RECIP_AUTO_RE_P (m
)
2633 : (RS6000_RECIP_HAVE_RE_P (m
) ? "have" : "none")),
2634 (RS6000_RECIP_AUTO_RSQRTE_P (m
)
2636 : (RS6000_RECIP_HAVE_RSQRTE_P (m
) ? "have" : "none")));
2639 fputs ("\n", stderr
);
2642 if (rs6000_cpu_index
>= 0)
2644 const char *name
= processor_target_table
[rs6000_cpu_index
].name
;
2646 = processor_target_table
[rs6000_cpu_index
].target_enable
;
2648 sprintf (flags_buffer
, "-mcpu=%s flags", name
);
2649 rs6000_print_isa_options (stderr
, 0, flags_buffer
, flags
);
2652 fprintf (stderr
, DEBUG_FMT_S
, "cpu", "<none>");
2654 if (rs6000_tune_index
>= 0)
2656 const char *name
= processor_target_table
[rs6000_tune_index
].name
;
2658 = processor_target_table
[rs6000_tune_index
].target_enable
;
2660 sprintf (flags_buffer
, "-mtune=%s flags", name
);
2661 rs6000_print_isa_options (stderr
, 0, flags_buffer
, flags
);
2664 fprintf (stderr
, DEBUG_FMT_S
, "tune", "<none>");
2666 cl_target_option_save (&cl_opts
, &global_options
);
2667 rs6000_print_isa_options (stderr
, 0, "rs6000_isa_flags",
2670 rs6000_print_isa_options (stderr
, 0, "rs6000_isa_flags_explicit",
2671 rs6000_isa_flags_explicit
);
2673 rs6000_print_builtin_options (stderr
, 0, "rs6000_builtin_mask",
2674 rs6000_builtin_mask
);
2676 rs6000_print_isa_options (stderr
, 0, "TARGET_DEFAULT", TARGET_DEFAULT
);
2678 fprintf (stderr
, DEBUG_FMT_S
, "--with-cpu default",
2679 OPTION_TARGET_CPU_DEFAULT
? OPTION_TARGET_CPU_DEFAULT
: "<none>");
2681 switch (rs6000_sched_costly_dep
)
2683 case max_dep_latency
:
2684 costly_str
= "max_dep_latency";
2688 costly_str
= "no_dep_costly";
2691 case all_deps_costly
:
2692 costly_str
= "all_deps_costly";
2695 case true_store_to_load_dep_costly
:
2696 costly_str
= "true_store_to_load_dep_costly";
2699 case store_to_load_dep_costly
:
2700 costly_str
= "store_to_load_dep_costly";
2704 costly_str
= costly_num
;
2705 sprintf (costly_num
, "%d", (int)rs6000_sched_costly_dep
);
2709 fprintf (stderr
, DEBUG_FMT_S
, "sched_costly_dep", costly_str
);
2711 switch (rs6000_sched_insert_nops
)
2713 case sched_finish_regroup_exact
:
2714 nop_str
= "sched_finish_regroup_exact";
2717 case sched_finish_pad_groups
:
2718 nop_str
= "sched_finish_pad_groups";
2721 case sched_finish_none
:
2722 nop_str
= "sched_finish_none";
2727 sprintf (nop_num
, "%d", (int)rs6000_sched_insert_nops
);
2731 fprintf (stderr
, DEBUG_FMT_S
, "sched_insert_nops", nop_str
);
2733 switch (rs6000_sdata
)
2740 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "data");
2744 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "sysv");
2748 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "eabi");
2753 switch (rs6000_traceback
)
2755 case traceback_default
: trace_str
= "default"; break;
2756 case traceback_none
: trace_str
= "none"; break;
2757 case traceback_part
: trace_str
= "part"; break;
2758 case traceback_full
: trace_str
= "full"; break;
2759 default: trace_str
= "unknown"; break;
2762 fprintf (stderr
, DEBUG_FMT_S
, "traceback", trace_str
);
2764 switch (rs6000_current_cmodel
)
2766 case CMODEL_SMALL
: cmodel_str
= "small"; break;
2767 case CMODEL_MEDIUM
: cmodel_str
= "medium"; break;
2768 case CMODEL_LARGE
: cmodel_str
= "large"; break;
2769 default: cmodel_str
= "unknown"; break;
2772 fprintf (stderr
, DEBUG_FMT_S
, "cmodel", cmodel_str
);
2774 switch (rs6000_current_abi
)
2776 case ABI_NONE
: abi_str
= "none"; break;
2777 case ABI_AIX
: abi_str
= "aix"; break;
2778 case ABI_ELFv2
: abi_str
= "ELFv2"; break;
2779 case ABI_V4
: abi_str
= "V4"; break;
2780 case ABI_DARWIN
: abi_str
= "darwin"; break;
2781 default: abi_str
= "unknown"; break;
2784 fprintf (stderr
, DEBUG_FMT_S
, "abi", abi_str
);
2786 if (rs6000_altivec_abi
)
2787 fprintf (stderr
, DEBUG_FMT_S
, "altivec_abi", "true");
2789 if (rs6000_darwin64_abi
)
2790 fprintf (stderr
, DEBUG_FMT_S
, "darwin64_abi", "true");
2792 fprintf (stderr
, DEBUG_FMT_S
, "single_float",
2793 (TARGET_SINGLE_FLOAT
? "true" : "false"));
2795 fprintf (stderr
, DEBUG_FMT_S
, "double_float",
2796 (TARGET_DOUBLE_FLOAT
? "true" : "false"));
2798 fprintf (stderr
, DEBUG_FMT_S
, "soft_float",
2799 (TARGET_SOFT_FLOAT
? "true" : "false"));
2801 if (TARGET_LINK_STACK
)
2802 fprintf (stderr
, DEBUG_FMT_S
, "link_stack", "true");
2804 if (TARGET_P8_FUSION
)
2808 strcpy (options
, (TARGET_P9_FUSION
) ? "power9" : "power8");
2809 if (TARGET_TOC_FUSION
)
2810 strcat (options
, ", toc");
2812 if (TARGET_P8_FUSION_SIGN
)
2813 strcat (options
, ", sign");
2815 fprintf (stderr
, DEBUG_FMT_S
, "fusion", options
);
2818 fprintf (stderr
, DEBUG_FMT_S
, "plt-format",
2819 TARGET_SECURE_PLT
? "secure" : "bss");
2820 fprintf (stderr
, DEBUG_FMT_S
, "struct-return",
2821 aix_struct_return
? "aix" : "sysv");
2822 fprintf (stderr
, DEBUG_FMT_S
, "always_hint", tf
[!!rs6000_always_hint
]);
2823 fprintf (stderr
, DEBUG_FMT_S
, "sched_groups", tf
[!!rs6000_sched_groups
]);
2824 fprintf (stderr
, DEBUG_FMT_S
, "align_branch",
2825 tf
[!!rs6000_align_branch_targets
]);
2826 fprintf (stderr
, DEBUG_FMT_D
, "tls_size", rs6000_tls_size
);
2827 fprintf (stderr
, DEBUG_FMT_D
, "long_double_size",
2828 rs6000_long_double_type_size
);
2829 fprintf (stderr
, DEBUG_FMT_D
, "sched_restricted_insns_priority",
2830 (int)rs6000_sched_restricted_insns_priority
);
2831 fprintf (stderr
, DEBUG_FMT_D
, "Number of standard builtins",
2833 fprintf (stderr
, DEBUG_FMT_D
, "Number of rs6000 builtins",
2834 (int)RS6000_BUILTIN_COUNT
);
2836 fprintf (stderr
, DEBUG_FMT_D
, "Enable float128 on VSX",
2837 (int)TARGET_FLOAT128_ENABLE_TYPE
);
2840 fprintf (stderr
, DEBUG_FMT_D
, "VSX easy 64-bit scalar element",
2841 (int)VECTOR_ELEMENT_SCALAR_64BIT
);
2843 if (TARGET_DIRECT_MOVE_128
)
2844 fprintf (stderr
, DEBUG_FMT_D
, "VSX easy 64-bit mfvsrld element",
2845 (int)VECTOR_ELEMENT_MFVSRLD_64BIT
);
2849 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2850 legitimate address support to figure out the appropriate addressing to
2854 rs6000_setup_reg_addr_masks (void)
2856 ssize_t rc
, reg
, m
, nregs
;
2857 addr_mask_type any_addr_mask
, addr_mask
;
2859 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2861 machine_mode m2
= (machine_mode
) m
;
2862 bool complex_p
= false;
2863 bool small_int_p
= (m2
== QImode
|| m2
== HImode
|| m2
== SImode
);
2866 if (COMPLEX_MODE_P (m2
))
2869 m2
= GET_MODE_INNER (m2
);
2872 msize
= GET_MODE_SIZE (m2
);
2874 /* SDmode is special in that we want to access it only via REG+REG
2875 addressing on power7 and above, since we want to use the LFIWZX and
2876 STFIWZX instructions to load it. */
2877 bool indexed_only_p
= (m
== SDmode
&& TARGET_NO_SDMODE_STACK
);
2880 for (rc
= FIRST_RELOAD_REG_CLASS
; rc
<= LAST_RELOAD_REG_CLASS
; rc
++)
2883 reg
= reload_reg_map
[rc
].reg
;
2885 /* Can mode values go in the GPR/FPR/Altivec registers? */
2886 if (reg
>= 0 && rs6000_hard_regno_mode_ok_p
[m
][reg
])
2888 bool small_int_vsx_p
= (small_int_p
2889 && (rc
== RELOAD_REG_FPR
2890 || rc
== RELOAD_REG_VMX
));
2892 nregs
= rs6000_hard_regno_nregs
[m
][reg
];
2893 addr_mask
|= RELOAD_REG_VALID
;
2895 /* Indicate if the mode takes more than 1 physical register. If
2896 it takes a single register, indicate it can do REG+REG
2897 addressing. Small integers in VSX registers can only do
2898 REG+REG addressing. */
2899 if (small_int_vsx_p
)
2900 addr_mask
|= RELOAD_REG_INDEXED
;
2901 else if (nregs
> 1 || m
== BLKmode
|| complex_p
)
2902 addr_mask
|= RELOAD_REG_MULTIPLE
;
2904 addr_mask
|= RELOAD_REG_INDEXED
;
2906 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2907 addressing. If we allow scalars into Altivec registers,
2908 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY. */
2911 && (rc
== RELOAD_REG_GPR
|| rc
== RELOAD_REG_FPR
)
2913 && !VECTOR_MODE_P (m2
)
2914 && !FLOAT128_VECTOR_P (m2
)
2916 && !small_int_vsx_p
)
2918 addr_mask
|= RELOAD_REG_PRE_INCDEC
;
2920 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2921 we don't allow PRE_MODIFY for some multi-register
2926 addr_mask
|= RELOAD_REG_PRE_MODIFY
;
2930 if (TARGET_POWERPC64
)
2931 addr_mask
|= RELOAD_REG_PRE_MODIFY
;
2937 addr_mask
|= RELOAD_REG_PRE_MODIFY
;
2943 /* GPR and FPR registers can do REG+OFFSET addressing, except
2944 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2945 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2946 if ((addr_mask
!= 0) && !indexed_only_p
2948 && (rc
== RELOAD_REG_GPR
2949 || ((msize
== 8 || m2
== SFmode
)
2950 && (rc
== RELOAD_REG_FPR
2951 || (rc
== RELOAD_REG_VMX
&& TARGET_P9_VECTOR
)))))
2952 addr_mask
|= RELOAD_REG_OFFSET
;
2954 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
2955 instructions are enabled. The offset for 128-bit VSX registers is
2956 only 12-bits. While GPRs can handle the full offset range, VSX
2957 registers can only handle the restricted range. */
2958 else if ((addr_mask
!= 0) && !indexed_only_p
2959 && msize
== 16 && TARGET_P9_VECTOR
2960 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2
)
2961 || (m2
== TImode
&& TARGET_VSX
)))
2963 addr_mask
|= RELOAD_REG_OFFSET
;
2964 if (rc
== RELOAD_REG_FPR
|| rc
== RELOAD_REG_VMX
)
2965 addr_mask
|= RELOAD_REG_QUAD_OFFSET
;
2968 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
2969 addressing on 128-bit types. */
2970 if (rc
== RELOAD_REG_VMX
&& msize
== 16
2971 && (addr_mask
& RELOAD_REG_VALID
) != 0)
2972 addr_mask
|= RELOAD_REG_AND_M16
;
2974 reg_addr
[m
].addr_mask
[rc
] = addr_mask
;
2975 any_addr_mask
|= addr_mask
;
2978 reg_addr
[m
].addr_mask
[RELOAD_REG_ANY
] = any_addr_mask
;
2983 /* Initialize the various global tables that are based on register size. */
2985 rs6000_init_hard_regno_mode_ok (bool global_init_p
)
2991 /* Precalculate REGNO_REG_CLASS. */
2992 rs6000_regno_regclass
[0] = GENERAL_REGS
;
2993 for (r
= 1; r
< 32; ++r
)
2994 rs6000_regno_regclass
[r
] = BASE_REGS
;
2996 for (r
= 32; r
< 64; ++r
)
2997 rs6000_regno_regclass
[r
] = FLOAT_REGS
;
2999 for (r
= 64; r
< FIRST_PSEUDO_REGISTER
; ++r
)
3000 rs6000_regno_regclass
[r
] = NO_REGS
;
3002 for (r
= FIRST_ALTIVEC_REGNO
; r
<= LAST_ALTIVEC_REGNO
; ++r
)
3003 rs6000_regno_regclass
[r
] = ALTIVEC_REGS
;
3005 rs6000_regno_regclass
[CR0_REGNO
] = CR0_REGS
;
3006 for (r
= CR1_REGNO
; r
<= CR7_REGNO
; ++r
)
3007 rs6000_regno_regclass
[r
] = CR_REGS
;
3009 rs6000_regno_regclass
[LR_REGNO
] = LINK_REGS
;
3010 rs6000_regno_regclass
[CTR_REGNO
] = CTR_REGS
;
3011 rs6000_regno_regclass
[CA_REGNO
] = NO_REGS
;
3012 rs6000_regno_regclass
[VRSAVE_REGNO
] = VRSAVE_REGS
;
3013 rs6000_regno_regclass
[VSCR_REGNO
] = VRSAVE_REGS
;
3014 rs6000_regno_regclass
[TFHAR_REGNO
] = SPR_REGS
;
3015 rs6000_regno_regclass
[TFIAR_REGNO
] = SPR_REGS
;
3016 rs6000_regno_regclass
[TEXASR_REGNO
] = SPR_REGS
;
3017 rs6000_regno_regclass
[ARG_POINTER_REGNUM
] = BASE_REGS
;
3018 rs6000_regno_regclass
[FRAME_POINTER_REGNUM
] = BASE_REGS
;
3020 /* Precalculate register class to simpler reload register class. We don't
3021 need all of the register classes that are combinations of different
3022 classes, just the simple ones that have constraint letters. */
3023 for (c
= 0; c
< N_REG_CLASSES
; c
++)
3024 reg_class_to_reg_type
[c
] = NO_REG_TYPE
;
3026 reg_class_to_reg_type
[(int)GENERAL_REGS
] = GPR_REG_TYPE
;
3027 reg_class_to_reg_type
[(int)BASE_REGS
] = GPR_REG_TYPE
;
3028 reg_class_to_reg_type
[(int)VSX_REGS
] = VSX_REG_TYPE
;
3029 reg_class_to_reg_type
[(int)VRSAVE_REGS
] = SPR_REG_TYPE
;
3030 reg_class_to_reg_type
[(int)VSCR_REGS
] = SPR_REG_TYPE
;
3031 reg_class_to_reg_type
[(int)LINK_REGS
] = SPR_REG_TYPE
;
3032 reg_class_to_reg_type
[(int)CTR_REGS
] = SPR_REG_TYPE
;
3033 reg_class_to_reg_type
[(int)LINK_OR_CTR_REGS
] = SPR_REG_TYPE
;
3034 reg_class_to_reg_type
[(int)CR_REGS
] = CR_REG_TYPE
;
3035 reg_class_to_reg_type
[(int)CR0_REGS
] = CR_REG_TYPE
;
3039 reg_class_to_reg_type
[(int)FLOAT_REGS
] = VSX_REG_TYPE
;
3040 reg_class_to_reg_type
[(int)ALTIVEC_REGS
] = VSX_REG_TYPE
;
3044 reg_class_to_reg_type
[(int)FLOAT_REGS
] = FPR_REG_TYPE
;
3045 reg_class_to_reg_type
[(int)ALTIVEC_REGS
] = ALTIVEC_REG_TYPE
;
3048 /* Precalculate the valid memory formats as well as the vector information,
3049 this must be set up before the rs6000_hard_regno_nregs_internal calls
3051 gcc_assert ((int)VECTOR_NONE
== 0);
3052 memset ((void *) &rs6000_vector_unit
[0], '\0', sizeof (rs6000_vector_unit
));
3053 memset ((void *) &rs6000_vector_mem
[0], '\0', sizeof (rs6000_vector_unit
));
3055 gcc_assert ((int)CODE_FOR_nothing
== 0);
3056 memset ((void *) ®_addr
[0], '\0', sizeof (reg_addr
));
3058 gcc_assert ((int)NO_REGS
== 0);
3059 memset ((void *) &rs6000_constraints
[0], '\0', sizeof (rs6000_constraints
));
3061 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3062 believes it can use native alignment or still uses 128-bit alignment. */
3063 if (TARGET_VSX
&& !TARGET_VSX_ALIGN_128
)
3074 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3075 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3076 if (TARGET_FLOAT128_TYPE
)
3078 rs6000_vector_mem
[KFmode
] = VECTOR_VSX
;
3079 rs6000_vector_align
[KFmode
] = 128;
3081 if (FLOAT128_IEEE_P (TFmode
))
3083 rs6000_vector_mem
[TFmode
] = VECTOR_VSX
;
3084 rs6000_vector_align
[TFmode
] = 128;
3088 /* V2DF mode, VSX only. */
3091 rs6000_vector_unit
[V2DFmode
] = VECTOR_VSX
;
3092 rs6000_vector_mem
[V2DFmode
] = VECTOR_VSX
;
3093 rs6000_vector_align
[V2DFmode
] = align64
;
3096 /* V4SF mode, either VSX or Altivec. */
3099 rs6000_vector_unit
[V4SFmode
] = VECTOR_VSX
;
3100 rs6000_vector_mem
[V4SFmode
] = VECTOR_VSX
;
3101 rs6000_vector_align
[V4SFmode
] = align32
;
3103 else if (TARGET_ALTIVEC
)
3105 rs6000_vector_unit
[V4SFmode
] = VECTOR_ALTIVEC
;
3106 rs6000_vector_mem
[V4SFmode
] = VECTOR_ALTIVEC
;
3107 rs6000_vector_align
[V4SFmode
] = align32
;
3110 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3114 rs6000_vector_unit
[V4SImode
] = VECTOR_ALTIVEC
;
3115 rs6000_vector_unit
[V8HImode
] = VECTOR_ALTIVEC
;
3116 rs6000_vector_unit
[V16QImode
] = VECTOR_ALTIVEC
;
3117 rs6000_vector_align
[V4SImode
] = align32
;
3118 rs6000_vector_align
[V8HImode
] = align32
;
3119 rs6000_vector_align
[V16QImode
] = align32
;
3123 rs6000_vector_mem
[V4SImode
] = VECTOR_VSX
;
3124 rs6000_vector_mem
[V8HImode
] = VECTOR_VSX
;
3125 rs6000_vector_mem
[V16QImode
] = VECTOR_VSX
;
3129 rs6000_vector_mem
[V4SImode
] = VECTOR_ALTIVEC
;
3130 rs6000_vector_mem
[V8HImode
] = VECTOR_ALTIVEC
;
3131 rs6000_vector_mem
[V16QImode
] = VECTOR_ALTIVEC
;
3135 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3136 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3139 rs6000_vector_mem
[V2DImode
] = VECTOR_VSX
;
3140 rs6000_vector_unit
[V2DImode
]
3141 = (TARGET_P8_VECTOR
) ? VECTOR_P8_VECTOR
: VECTOR_NONE
;
3142 rs6000_vector_align
[V2DImode
] = align64
;
3144 rs6000_vector_mem
[V1TImode
] = VECTOR_VSX
;
3145 rs6000_vector_unit
[V1TImode
]
3146 = (TARGET_P8_VECTOR
) ? VECTOR_P8_VECTOR
: VECTOR_NONE
;
3147 rs6000_vector_align
[V1TImode
] = 128;
3150 /* DFmode, see if we want to use the VSX unit. Memory is handled
3151 differently, so don't set rs6000_vector_mem. */
3154 rs6000_vector_unit
[DFmode
] = VECTOR_VSX
;
3155 rs6000_vector_align
[DFmode
] = 64;
3158 /* SFmode, see if we want to use the VSX unit. */
3159 if (TARGET_P8_VECTOR
)
3161 rs6000_vector_unit
[SFmode
] = VECTOR_VSX
;
3162 rs6000_vector_align
[SFmode
] = 32;
3165 /* Allow TImode in VSX register and set the VSX memory macros. */
3168 rs6000_vector_mem
[TImode
] = VECTOR_VSX
;
3169 rs6000_vector_align
[TImode
] = align64
;
3172 /* TODO add paired floating point vector support. */
3174 /* Register class constraints for the constraints that depend on compile
3175 switches. When the VSX code was added, different constraints were added
3176 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3177 of the VSX registers are used. The register classes for scalar floating
3178 point types is set, based on whether we allow that type into the upper
3179 (Altivec) registers. GCC has register classes to target the Altivec
3180 registers for load/store operations, to select using a VSX memory
3181 operation instead of the traditional floating point operation. The
3184 d - Register class to use with traditional DFmode instructions.
3185 f - Register class to use with traditional SFmode instructions.
3186 v - Altivec register.
3187 wa - Any VSX register.
3188 wc - Reserved to represent individual CR bits (used in LLVM).
3189 wd - Preferred register class for V2DFmode.
3190 wf - Preferred register class for V4SFmode.
3191 wg - Float register for power6x move insns.
3192 wh - FP register for direct move instructions.
3193 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3194 wj - FP or VSX register to hold 64-bit integers for direct moves.
3195 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3196 wl - Float register if we can do 32-bit signed int loads.
3197 wm - VSX register for ISA 2.07 direct move operations.
3198 wn - always NO_REGS.
3199 wr - GPR if 64-bit mode is permitted.
3200 ws - Register class to do ISA 2.06 DF operations.
3201 wt - VSX register for TImode in VSX registers.
3202 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3203 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3204 ww - Register class to do SF conversions in with VSX operations.
3205 wx - Float register if we can do 32-bit int stores.
3206 wy - Register class to do ISA 2.07 SF operations.
3207 wz - Float register if we can do 32-bit unsigned int loads.
3208 wH - Altivec register if SImode is allowed in VSX registers.
3209 wI - VSX register if SImode is allowed in VSX registers.
3210 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3211 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3213 if (TARGET_HARD_FLOAT
)
3214 rs6000_constraints
[RS6000_CONSTRAINT_f
] = FLOAT_REGS
; /* SFmode */
3216 if (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
3217 rs6000_constraints
[RS6000_CONSTRAINT_d
] = FLOAT_REGS
; /* DFmode */
3221 rs6000_constraints
[RS6000_CONSTRAINT_wa
] = VSX_REGS
;
3222 rs6000_constraints
[RS6000_CONSTRAINT_wd
] = VSX_REGS
; /* V2DFmode */
3223 rs6000_constraints
[RS6000_CONSTRAINT_wf
] = VSX_REGS
; /* V4SFmode */
3224 rs6000_constraints
[RS6000_CONSTRAINT_ws
] = VSX_REGS
; /* DFmode */
3225 rs6000_constraints
[RS6000_CONSTRAINT_wv
] = ALTIVEC_REGS
; /* DFmode */
3226 rs6000_constraints
[RS6000_CONSTRAINT_wi
] = VSX_REGS
; /* DImode */
3227 rs6000_constraints
[RS6000_CONSTRAINT_wt
] = VSX_REGS
; /* TImode */
3230 /* Add conditional constraints based on various options, to allow us to
3231 collapse multiple insn patterns. */
3233 rs6000_constraints
[RS6000_CONSTRAINT_v
] = ALTIVEC_REGS
;
3235 if (TARGET_MFPGPR
) /* DFmode */
3236 rs6000_constraints
[RS6000_CONSTRAINT_wg
] = FLOAT_REGS
;
3239 rs6000_constraints
[RS6000_CONSTRAINT_wl
] = FLOAT_REGS
; /* DImode */
3241 if (TARGET_DIRECT_MOVE
)
3243 rs6000_constraints
[RS6000_CONSTRAINT_wh
] = FLOAT_REGS
;
3244 rs6000_constraints
[RS6000_CONSTRAINT_wj
] /* DImode */
3245 = rs6000_constraints
[RS6000_CONSTRAINT_wi
];
3246 rs6000_constraints
[RS6000_CONSTRAINT_wk
] /* DFmode */
3247 = rs6000_constraints
[RS6000_CONSTRAINT_ws
];
3248 rs6000_constraints
[RS6000_CONSTRAINT_wm
] = VSX_REGS
;
3251 if (TARGET_POWERPC64
)
3253 rs6000_constraints
[RS6000_CONSTRAINT_wr
] = GENERAL_REGS
;
3254 rs6000_constraints
[RS6000_CONSTRAINT_wA
] = BASE_REGS
;
3257 if (TARGET_P8_VECTOR
) /* SFmode */
3259 rs6000_constraints
[RS6000_CONSTRAINT_wu
] = ALTIVEC_REGS
;
3260 rs6000_constraints
[RS6000_CONSTRAINT_wy
] = VSX_REGS
;
3261 rs6000_constraints
[RS6000_CONSTRAINT_ww
] = VSX_REGS
;
3263 else if (TARGET_VSX
)
3264 rs6000_constraints
[RS6000_CONSTRAINT_ww
] = FLOAT_REGS
;
3267 rs6000_constraints
[RS6000_CONSTRAINT_wx
] = FLOAT_REGS
; /* DImode */
3270 rs6000_constraints
[RS6000_CONSTRAINT_wz
] = FLOAT_REGS
; /* DImode */
3272 if (TARGET_FLOAT128_TYPE
)
3274 rs6000_constraints
[RS6000_CONSTRAINT_wq
] = VSX_REGS
; /* KFmode */
3275 if (FLOAT128_IEEE_P (TFmode
))
3276 rs6000_constraints
[RS6000_CONSTRAINT_wp
] = VSX_REGS
; /* TFmode */
3279 if (TARGET_P9_VECTOR
)
3281 /* Support for new D-form instructions. */
3282 rs6000_constraints
[RS6000_CONSTRAINT_wb
] = ALTIVEC_REGS
;
3284 /* Support for ISA 3.0 (power9) vectors. */
3285 rs6000_constraints
[RS6000_CONSTRAINT_wo
] = VSX_REGS
;
3288 /* Support for new direct moves (ISA 3.0 + 64bit). */
3289 if (TARGET_DIRECT_MOVE_128
)
3290 rs6000_constraints
[RS6000_CONSTRAINT_we
] = VSX_REGS
;
3292 /* Support small integers in VSX registers. */
3293 if (TARGET_P8_VECTOR
)
3295 rs6000_constraints
[RS6000_CONSTRAINT_wH
] = ALTIVEC_REGS
;
3296 rs6000_constraints
[RS6000_CONSTRAINT_wI
] = FLOAT_REGS
;
3297 if (TARGET_P9_VECTOR
)
3299 rs6000_constraints
[RS6000_CONSTRAINT_wJ
] = FLOAT_REGS
;
3300 rs6000_constraints
[RS6000_CONSTRAINT_wK
] = ALTIVEC_REGS
;
3304 /* Set up the reload helper and direct move functions. */
3305 if (TARGET_VSX
|| TARGET_ALTIVEC
)
3309 reg_addr
[V16QImode
].reload_store
= CODE_FOR_reload_v16qi_di_store
;
3310 reg_addr
[V16QImode
].reload_load
= CODE_FOR_reload_v16qi_di_load
;
3311 reg_addr
[V8HImode
].reload_store
= CODE_FOR_reload_v8hi_di_store
;
3312 reg_addr
[V8HImode
].reload_load
= CODE_FOR_reload_v8hi_di_load
;
3313 reg_addr
[V4SImode
].reload_store
= CODE_FOR_reload_v4si_di_store
;
3314 reg_addr
[V4SImode
].reload_load
= CODE_FOR_reload_v4si_di_load
;
3315 reg_addr
[V2DImode
].reload_store
= CODE_FOR_reload_v2di_di_store
;
3316 reg_addr
[V2DImode
].reload_load
= CODE_FOR_reload_v2di_di_load
;
3317 reg_addr
[V1TImode
].reload_store
= CODE_FOR_reload_v1ti_di_store
;
3318 reg_addr
[V1TImode
].reload_load
= CODE_FOR_reload_v1ti_di_load
;
3319 reg_addr
[V4SFmode
].reload_store
= CODE_FOR_reload_v4sf_di_store
;
3320 reg_addr
[V4SFmode
].reload_load
= CODE_FOR_reload_v4sf_di_load
;
3321 reg_addr
[V2DFmode
].reload_store
= CODE_FOR_reload_v2df_di_store
;
3322 reg_addr
[V2DFmode
].reload_load
= CODE_FOR_reload_v2df_di_load
;
3323 reg_addr
[DFmode
].reload_store
= CODE_FOR_reload_df_di_store
;
3324 reg_addr
[DFmode
].reload_load
= CODE_FOR_reload_df_di_load
;
3325 reg_addr
[DDmode
].reload_store
= CODE_FOR_reload_dd_di_store
;
3326 reg_addr
[DDmode
].reload_load
= CODE_FOR_reload_dd_di_load
;
3327 reg_addr
[SFmode
].reload_store
= CODE_FOR_reload_sf_di_store
;
3328 reg_addr
[SFmode
].reload_load
= CODE_FOR_reload_sf_di_load
;
3330 if (FLOAT128_VECTOR_P (KFmode
))
3332 reg_addr
[KFmode
].reload_store
= CODE_FOR_reload_kf_di_store
;
3333 reg_addr
[KFmode
].reload_load
= CODE_FOR_reload_kf_di_load
;
3336 if (FLOAT128_VECTOR_P (TFmode
))
3338 reg_addr
[TFmode
].reload_store
= CODE_FOR_reload_tf_di_store
;
3339 reg_addr
[TFmode
].reload_load
= CODE_FOR_reload_tf_di_load
;
3342 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3344 if (TARGET_NO_SDMODE_STACK
)
3346 reg_addr
[SDmode
].reload_store
= CODE_FOR_reload_sd_di_store
;
3347 reg_addr
[SDmode
].reload_load
= CODE_FOR_reload_sd_di_load
;
3352 reg_addr
[TImode
].reload_store
= CODE_FOR_reload_ti_di_store
;
3353 reg_addr
[TImode
].reload_load
= CODE_FOR_reload_ti_di_load
;
3356 if (TARGET_DIRECT_MOVE
&& !TARGET_DIRECT_MOVE_128
)
3358 reg_addr
[TImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxti
;
3359 reg_addr
[V1TImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv1ti
;
3360 reg_addr
[V2DFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv2df
;
3361 reg_addr
[V2DImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv2di
;
3362 reg_addr
[V4SFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv4sf
;
3363 reg_addr
[V4SImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv4si
;
3364 reg_addr
[V8HImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv8hi
;
3365 reg_addr
[V16QImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv16qi
;
3366 reg_addr
[SFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxsf
;
3368 reg_addr
[TImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprti
;
3369 reg_addr
[V1TImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv1ti
;
3370 reg_addr
[V2DFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv2df
;
3371 reg_addr
[V2DImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv2di
;
3372 reg_addr
[V4SFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv4sf
;
3373 reg_addr
[V4SImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv4si
;
3374 reg_addr
[V8HImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv8hi
;
3375 reg_addr
[V16QImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv16qi
;
3376 reg_addr
[SFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprsf
;
3378 if (FLOAT128_VECTOR_P (KFmode
))
3380 reg_addr
[KFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxkf
;
3381 reg_addr
[KFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprkf
;
3384 if (FLOAT128_VECTOR_P (TFmode
))
3386 reg_addr
[TFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxtf
;
3387 reg_addr
[TFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprtf
;
3393 reg_addr
[V16QImode
].reload_store
= CODE_FOR_reload_v16qi_si_store
;
3394 reg_addr
[V16QImode
].reload_load
= CODE_FOR_reload_v16qi_si_load
;
3395 reg_addr
[V8HImode
].reload_store
= CODE_FOR_reload_v8hi_si_store
;
3396 reg_addr
[V8HImode
].reload_load
= CODE_FOR_reload_v8hi_si_load
;
3397 reg_addr
[V4SImode
].reload_store
= CODE_FOR_reload_v4si_si_store
;
3398 reg_addr
[V4SImode
].reload_load
= CODE_FOR_reload_v4si_si_load
;
3399 reg_addr
[V2DImode
].reload_store
= CODE_FOR_reload_v2di_si_store
;
3400 reg_addr
[V2DImode
].reload_load
= CODE_FOR_reload_v2di_si_load
;
3401 reg_addr
[V1TImode
].reload_store
= CODE_FOR_reload_v1ti_si_store
;
3402 reg_addr
[V1TImode
].reload_load
= CODE_FOR_reload_v1ti_si_load
;
3403 reg_addr
[V4SFmode
].reload_store
= CODE_FOR_reload_v4sf_si_store
;
3404 reg_addr
[V4SFmode
].reload_load
= CODE_FOR_reload_v4sf_si_load
;
3405 reg_addr
[V2DFmode
].reload_store
= CODE_FOR_reload_v2df_si_store
;
3406 reg_addr
[V2DFmode
].reload_load
= CODE_FOR_reload_v2df_si_load
;
3407 reg_addr
[DFmode
].reload_store
= CODE_FOR_reload_df_si_store
;
3408 reg_addr
[DFmode
].reload_load
= CODE_FOR_reload_df_si_load
;
3409 reg_addr
[DDmode
].reload_store
= CODE_FOR_reload_dd_si_store
;
3410 reg_addr
[DDmode
].reload_load
= CODE_FOR_reload_dd_si_load
;
3411 reg_addr
[SFmode
].reload_store
= CODE_FOR_reload_sf_si_store
;
3412 reg_addr
[SFmode
].reload_load
= CODE_FOR_reload_sf_si_load
;
3414 if (FLOAT128_VECTOR_P (KFmode
))
3416 reg_addr
[KFmode
].reload_store
= CODE_FOR_reload_kf_si_store
;
3417 reg_addr
[KFmode
].reload_load
= CODE_FOR_reload_kf_si_load
;
3420 if (FLOAT128_IEEE_P (TFmode
))
3422 reg_addr
[TFmode
].reload_store
= CODE_FOR_reload_tf_si_store
;
3423 reg_addr
[TFmode
].reload_load
= CODE_FOR_reload_tf_si_load
;
3426 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3428 if (TARGET_NO_SDMODE_STACK
)
3430 reg_addr
[SDmode
].reload_store
= CODE_FOR_reload_sd_si_store
;
3431 reg_addr
[SDmode
].reload_load
= CODE_FOR_reload_sd_si_load
;
3436 reg_addr
[TImode
].reload_store
= CODE_FOR_reload_ti_si_store
;
3437 reg_addr
[TImode
].reload_load
= CODE_FOR_reload_ti_si_load
;
3440 if (TARGET_DIRECT_MOVE
)
3442 reg_addr
[DImode
].reload_fpr_gpr
= CODE_FOR_reload_fpr_from_gprdi
;
3443 reg_addr
[DDmode
].reload_fpr_gpr
= CODE_FOR_reload_fpr_from_gprdd
;
3444 reg_addr
[DFmode
].reload_fpr_gpr
= CODE_FOR_reload_fpr_from_gprdf
;
3448 reg_addr
[DFmode
].scalar_in_vmx_p
= true;
3449 reg_addr
[DImode
].scalar_in_vmx_p
= true;
3451 if (TARGET_P8_VECTOR
)
3453 reg_addr
[SFmode
].scalar_in_vmx_p
= true;
3454 reg_addr
[SImode
].scalar_in_vmx_p
= true;
3456 if (TARGET_P9_VECTOR
)
3458 reg_addr
[HImode
].scalar_in_vmx_p
= true;
3459 reg_addr
[QImode
].scalar_in_vmx_p
= true;
3464 /* Setup the fusion operations. */
3465 if (TARGET_P8_FUSION
)
3467 reg_addr
[QImode
].fusion_gpr_ld
= CODE_FOR_fusion_gpr_load_qi
;
3468 reg_addr
[HImode
].fusion_gpr_ld
= CODE_FOR_fusion_gpr_load_hi
;
3469 reg_addr
[SImode
].fusion_gpr_ld
= CODE_FOR_fusion_gpr_load_si
;
3471 reg_addr
[DImode
].fusion_gpr_ld
= CODE_FOR_fusion_gpr_load_di
;
3474 if (TARGET_P9_FUSION
)
3477 enum machine_mode mode
; /* mode of the fused type. */
3478 enum machine_mode pmode
; /* pointer mode. */
3479 enum rs6000_reload_reg_type rtype
; /* register type. */
3480 enum insn_code load
; /* load insn. */
3481 enum insn_code store
; /* store insn. */
3484 static const struct fuse_insns addis_insns
[] = {
3485 { E_SFmode
, E_DImode
, RELOAD_REG_FPR
,
3486 CODE_FOR_fusion_vsx_di_sf_load
,
3487 CODE_FOR_fusion_vsx_di_sf_store
},
3489 { E_SFmode
, E_SImode
, RELOAD_REG_FPR
,
3490 CODE_FOR_fusion_vsx_si_sf_load
,
3491 CODE_FOR_fusion_vsx_si_sf_store
},
3493 { E_DFmode
, E_DImode
, RELOAD_REG_FPR
,
3494 CODE_FOR_fusion_vsx_di_df_load
,
3495 CODE_FOR_fusion_vsx_di_df_store
},
3497 { E_DFmode
, E_SImode
, RELOAD_REG_FPR
,
3498 CODE_FOR_fusion_vsx_si_df_load
,
3499 CODE_FOR_fusion_vsx_si_df_store
},
3501 { E_DImode
, E_DImode
, RELOAD_REG_FPR
,
3502 CODE_FOR_fusion_vsx_di_di_load
,
3503 CODE_FOR_fusion_vsx_di_di_store
},
3505 { E_DImode
, E_SImode
, RELOAD_REG_FPR
,
3506 CODE_FOR_fusion_vsx_si_di_load
,
3507 CODE_FOR_fusion_vsx_si_di_store
},
3509 { E_QImode
, E_DImode
, RELOAD_REG_GPR
,
3510 CODE_FOR_fusion_gpr_di_qi_load
,
3511 CODE_FOR_fusion_gpr_di_qi_store
},
3513 { E_QImode
, E_SImode
, RELOAD_REG_GPR
,
3514 CODE_FOR_fusion_gpr_si_qi_load
,
3515 CODE_FOR_fusion_gpr_si_qi_store
},
3517 { E_HImode
, E_DImode
, RELOAD_REG_GPR
,
3518 CODE_FOR_fusion_gpr_di_hi_load
,
3519 CODE_FOR_fusion_gpr_di_hi_store
},
3521 { E_HImode
, E_SImode
, RELOAD_REG_GPR
,
3522 CODE_FOR_fusion_gpr_si_hi_load
,
3523 CODE_FOR_fusion_gpr_si_hi_store
},
3525 { E_SImode
, E_DImode
, RELOAD_REG_GPR
,
3526 CODE_FOR_fusion_gpr_di_si_load
,
3527 CODE_FOR_fusion_gpr_di_si_store
},
3529 { E_SImode
, E_SImode
, RELOAD_REG_GPR
,
3530 CODE_FOR_fusion_gpr_si_si_load
,
3531 CODE_FOR_fusion_gpr_si_si_store
},
3533 { E_SFmode
, E_DImode
, RELOAD_REG_GPR
,
3534 CODE_FOR_fusion_gpr_di_sf_load
,
3535 CODE_FOR_fusion_gpr_di_sf_store
},
3537 { E_SFmode
, E_SImode
, RELOAD_REG_GPR
,
3538 CODE_FOR_fusion_gpr_si_sf_load
,
3539 CODE_FOR_fusion_gpr_si_sf_store
},
3541 { E_DImode
, E_DImode
, RELOAD_REG_GPR
,
3542 CODE_FOR_fusion_gpr_di_di_load
,
3543 CODE_FOR_fusion_gpr_di_di_store
},
3545 { E_DFmode
, E_DImode
, RELOAD_REG_GPR
,
3546 CODE_FOR_fusion_gpr_di_df_load
,
3547 CODE_FOR_fusion_gpr_di_df_store
},
3550 machine_mode cur_pmode
= Pmode
;
3553 for (i
= 0; i
< ARRAY_SIZE (addis_insns
); i
++)
3555 machine_mode xmode
= addis_insns
[i
].mode
;
3556 enum rs6000_reload_reg_type rtype
= addis_insns
[i
].rtype
;
3558 if (addis_insns
[i
].pmode
!= cur_pmode
)
3561 if (rtype
== RELOAD_REG_FPR
&& !TARGET_HARD_FLOAT
)
3564 reg_addr
[xmode
].fusion_addis_ld
[rtype
] = addis_insns
[i
].load
;
3565 reg_addr
[xmode
].fusion_addis_st
[rtype
] = addis_insns
[i
].store
;
3567 if (rtype
== RELOAD_REG_FPR
&& TARGET_P9_VECTOR
)
3569 reg_addr
[xmode
].fusion_addis_ld
[RELOAD_REG_VMX
]
3570 = addis_insns
[i
].load
;
3571 reg_addr
[xmode
].fusion_addis_st
[RELOAD_REG_VMX
]
3572 = addis_insns
[i
].store
;
3577 /* Note which types we support fusing TOC setup plus memory insn. We only do
3578 fused TOCs for medium/large code models. */
3579 if (TARGET_P8_FUSION
&& TARGET_TOC_FUSION
&& TARGET_POWERPC64
3580 && (TARGET_CMODEL
!= CMODEL_SMALL
))
3582 reg_addr
[QImode
].fused_toc
= true;
3583 reg_addr
[HImode
].fused_toc
= true;
3584 reg_addr
[SImode
].fused_toc
= true;
3585 reg_addr
[DImode
].fused_toc
= true;
3586 if (TARGET_HARD_FLOAT
)
3588 if (TARGET_SINGLE_FLOAT
)
3589 reg_addr
[SFmode
].fused_toc
= true;
3590 if (TARGET_DOUBLE_FLOAT
)
3591 reg_addr
[DFmode
].fused_toc
= true;
3595 /* Precalculate HARD_REGNO_NREGS. */
3596 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; ++r
)
3597 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
3598 rs6000_hard_regno_nregs
[m
][r
]
3599 = rs6000_hard_regno_nregs_internal (r
, (machine_mode
)m
);
3601 /* Precalculate HARD_REGNO_MODE_OK. */
3602 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; ++r
)
3603 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
3604 if (rs6000_hard_regno_mode_ok (r
, (machine_mode
)m
))
3605 rs6000_hard_regno_mode_ok_p
[m
][r
] = true;
3607 /* Precalculate CLASS_MAX_NREGS sizes. */
3608 for (c
= 0; c
< LIM_REG_CLASSES
; ++c
)
3612 if (TARGET_VSX
&& VSX_REG_CLASS_P (c
))
3613 reg_size
= UNITS_PER_VSX_WORD
;
3615 else if (c
== ALTIVEC_REGS
)
3616 reg_size
= UNITS_PER_ALTIVEC_WORD
;
3618 else if (c
== FLOAT_REGS
)
3619 reg_size
= UNITS_PER_FP_WORD
;
3622 reg_size
= UNITS_PER_WORD
;
3624 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
3626 machine_mode m2
= (machine_mode
)m
;
3627 int reg_size2
= reg_size
;
3629 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3631 if (TARGET_VSX
&& VSX_REG_CLASS_P (c
) && FLOAT128_2REG_P (m
))
3632 reg_size2
= UNITS_PER_FP_WORD
;
3634 rs6000_class_max_nregs
[m
][c
]
3635 = (GET_MODE_SIZE (m2
) + reg_size2
- 1) / reg_size2
;
3639 /* Calculate which modes to automatically generate code to use a the
3640 reciprocal divide and square root instructions. In the future, possibly
3641 automatically generate the instructions even if the user did not specify
3642 -mrecip. The older machines double precision reciprocal sqrt estimate is
3643 not accurate enough. */
3644 memset (rs6000_recip_bits
, 0, sizeof (rs6000_recip_bits
));
3646 rs6000_recip_bits
[SFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
3648 rs6000_recip_bits
[DFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
3649 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
))
3650 rs6000_recip_bits
[V4SFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
3651 if (VECTOR_UNIT_VSX_P (V2DFmode
))
3652 rs6000_recip_bits
[V2DFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
3654 if (TARGET_FRSQRTES
)
3655 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
3657 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
3658 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
))
3659 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
3660 if (VECTOR_UNIT_VSX_P (V2DFmode
))
3661 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
3663 if (rs6000_recip_control
)
3665 if (!flag_finite_math_only
)
3666 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3668 if (flag_trapping_math
)
3669 warning (0, "%qs requires %qs or %qs", "-mrecip",
3670 "-fno-trapping-math", "-ffast-math");
3671 if (!flag_reciprocal_math
)
3672 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3674 if (flag_finite_math_only
&& !flag_trapping_math
&& flag_reciprocal_math
)
3676 if (RS6000_RECIP_HAVE_RE_P (SFmode
)
3677 && (rs6000_recip_control
& RECIP_SF_DIV
) != 0)
3678 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
3680 if (RS6000_RECIP_HAVE_RE_P (DFmode
)
3681 && (rs6000_recip_control
& RECIP_DF_DIV
) != 0)
3682 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
3684 if (RS6000_RECIP_HAVE_RE_P (V4SFmode
)
3685 && (rs6000_recip_control
& RECIP_V4SF_DIV
) != 0)
3686 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
3688 if (RS6000_RECIP_HAVE_RE_P (V2DFmode
)
3689 && (rs6000_recip_control
& RECIP_V2DF_DIV
) != 0)
3690 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
3692 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode
)
3693 && (rs6000_recip_control
& RECIP_SF_RSQRT
) != 0)
3694 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
3696 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode
)
3697 && (rs6000_recip_control
& RECIP_DF_RSQRT
) != 0)
3698 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
3700 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode
)
3701 && (rs6000_recip_control
& RECIP_V4SF_RSQRT
) != 0)
3702 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
3704 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode
)
3705 && (rs6000_recip_control
& RECIP_V2DF_RSQRT
) != 0)
3706 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
3710 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3711 legitimate address support to figure out the appropriate addressing to
3713 rs6000_setup_reg_addr_masks ();
3715 if (global_init_p
|| TARGET_DEBUG_TARGET
)
3717 if (TARGET_DEBUG_REG
)
3718 rs6000_debug_reg_global ();
3720 if (TARGET_DEBUG_COST
|| TARGET_DEBUG_REG
)
3722 "SImode variable mult cost = %d\n"
3723 "SImode constant mult cost = %d\n"
3724 "SImode short constant mult cost = %d\n"
3725 "DImode multipliciation cost = %d\n"
3726 "SImode division cost = %d\n"
3727 "DImode division cost = %d\n"
3728 "Simple fp operation cost = %d\n"
3729 "DFmode multiplication cost = %d\n"
3730 "SFmode division cost = %d\n"
3731 "DFmode division cost = %d\n"
3732 "cache line size = %d\n"
3733 "l1 cache size = %d\n"
3734 "l2 cache size = %d\n"
3735 "simultaneous prefetches = %d\n"
3738 rs6000_cost
->mulsi_const
,
3739 rs6000_cost
->mulsi_const9
,
3747 rs6000_cost
->cache_line_size
,
3748 rs6000_cost
->l1_cache_size
,
3749 rs6000_cost
->l2_cache_size
,
3750 rs6000_cost
->simultaneous_prefetches
);
3755 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3758 darwin_rs6000_override_options (void)
3760 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3762 rs6000_altivec_abi
= 1;
3763 TARGET_ALTIVEC_VRSAVE
= 1;
3764 rs6000_current_abi
= ABI_DARWIN
;
3766 if (DEFAULT_ABI
== ABI_DARWIN
3768 darwin_one_byte_bool
= 1;
3770 if (TARGET_64BIT
&& ! TARGET_POWERPC64
)
3772 rs6000_isa_flags
|= OPTION_MASK_POWERPC64
;
3773 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3777 rs6000_default_long_calls
= 1;
3778 rs6000_isa_flags
|= OPTION_MASK_SOFT_FLOAT
;
3781 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3783 if (!flag_mkernel
&& !flag_apple_kext
3785 && ! (rs6000_isa_flags_explicit
& OPTION_MASK_ALTIVEC
))
3786 rs6000_isa_flags
|= OPTION_MASK_ALTIVEC
;
3788 /* Unless the user (not the configurer) has explicitly overridden
3789 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3790 G4 unless targeting the kernel. */
3793 && strverscmp (darwin_macosx_version_min
, "10.5") >= 0
3794 && ! (rs6000_isa_flags_explicit
& OPTION_MASK_ALTIVEC
)
3795 && ! global_options_set
.x_rs6000_cpu_index
)
3797 rs6000_isa_flags
|= OPTION_MASK_ALTIVEC
;
3802 /* If not otherwise specified by a target, make 'long double' equivalent to
3805 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3806 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3809 /* Return the builtin mask of the various options used that could affect which
3810 builtins were used. In the past we used target_flags, but we've run out of
3811 bits, and some options like PAIRED are no longer in target_flags. */
3814 rs6000_builtin_mask_calculate (void)
3816 return (((TARGET_ALTIVEC
) ? RS6000_BTM_ALTIVEC
: 0)
3817 | ((TARGET_CMPB
) ? RS6000_BTM_CMPB
: 0)
3818 | ((TARGET_VSX
) ? RS6000_BTM_VSX
: 0)
3819 | ((TARGET_PAIRED_FLOAT
) ? RS6000_BTM_PAIRED
: 0)
3820 | ((TARGET_FRE
) ? RS6000_BTM_FRE
: 0)
3821 | ((TARGET_FRES
) ? RS6000_BTM_FRES
: 0)
3822 | ((TARGET_FRSQRTE
) ? RS6000_BTM_FRSQRTE
: 0)
3823 | ((TARGET_FRSQRTES
) ? RS6000_BTM_FRSQRTES
: 0)
3824 | ((TARGET_POPCNTD
) ? RS6000_BTM_POPCNTD
: 0)
3825 | ((rs6000_cpu
== PROCESSOR_CELL
) ? RS6000_BTM_CELL
: 0)
3826 | ((TARGET_P8_VECTOR
) ? RS6000_BTM_P8_VECTOR
: 0)
3827 | ((TARGET_P9_VECTOR
) ? RS6000_BTM_P9_VECTOR
: 0)
3828 | ((TARGET_P9_MISC
) ? RS6000_BTM_P9_MISC
: 0)
3829 | ((TARGET_MODULO
) ? RS6000_BTM_MODULO
: 0)
3830 | ((TARGET_64BIT
) ? RS6000_BTM_64BIT
: 0)
3831 | ((TARGET_CRYPTO
) ? RS6000_BTM_CRYPTO
: 0)
3832 | ((TARGET_HTM
) ? RS6000_BTM_HTM
: 0)
3833 | ((TARGET_DFP
) ? RS6000_BTM_DFP
: 0)
3834 | ((TARGET_HARD_FLOAT
) ? RS6000_BTM_HARD_FLOAT
: 0)
3835 | ((TARGET_LONG_DOUBLE_128
) ? RS6000_BTM_LDBL128
: 0)
3836 | ((TARGET_FLOAT128_TYPE
) ? RS6000_BTM_FLOAT128
: 0));
3839 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3840 to clobber the XER[CA] bit because clobbering that bit without telling
3841 the compiler worked just fine with versions of GCC before GCC 5, and
3842 breaking a lot of older code in ways that are hard to track down is
3843 not such a great idea. */
3846 rs6000_md_asm_adjust (vec
<rtx
> &/*outputs*/, vec
<rtx
> &/*inputs*/,
3847 vec
<const char *> &/*constraints*/,
3848 vec
<rtx
> &clobbers
, HARD_REG_SET
&clobbered_regs
)
3850 clobbers
.safe_push (gen_rtx_REG (SImode
, CA_REGNO
));
3851 SET_HARD_REG_BIT (clobbered_regs
, CA_REGNO
);
3855 /* Override command line options.
3857 Combine build-specific configuration information with options
3858 specified on the command line to set various state variables which
3859 influence code generation, optimization, and expansion of built-in
3860 functions. Assure that command-line configuration preferences are
3861 compatible with each other and with the build configuration; issue
3862 warnings while adjusting configuration or error messages while
3863 rejecting configuration.
3865 Upon entry to this function:
3867 This function is called once at the beginning of
3868 compilation, and then again at the start and end of compiling
3869 each section of code that has a different configuration, as
3870 indicated, for example, by adding the
3872 __attribute__((__target__("cpu=power9")))
3874 qualifier to a function definition or, for example, by bracketing
3877 #pragma GCC target("altivec")
3881 #pragma GCC reset_options
3883 directives. Parameter global_init_p is true for the initial
3884 invocation, which initializes global variables, and false for all
3885 subsequent invocations.
3888 Various global state information is assumed to be valid. This
3889 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3890 default CPU specified at build configure time, TARGET_DEFAULT,
3891 representing the default set of option flags for the default
3892 target, and global_options_set.x_rs6000_isa_flags, representing
3893 which options were requested on the command line.
3895 Upon return from this function:
3897 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3898 was set by name on the command line. Additionally, if certain
3899 attributes are automatically enabled or disabled by this function
3900 in order to assure compatibility between options and
3901 configuration, the flags associated with those attributes are
3902 also set. By setting these "explicit bits", we avoid the risk
3903 that other code might accidentally overwrite these particular
3904 attributes with "default values".
3906 The various bits of rs6000_isa_flags are set to indicate the
3907 target options that have been selected for the most current
3908 compilation efforts. This has the effect of also turning on the
3909 associated TARGET_XXX values since these are macros which are
3910 generally defined to test the corresponding bit of the
3911 rs6000_isa_flags variable.
3913 The variable rs6000_builtin_mask is set to represent the target
3914 options for the most current compilation efforts, consistent with
3915 the current contents of rs6000_isa_flags. This variable controls
3916 expansion of built-in functions.
3918 Various other global variables and fields of global structures
3919 (over 50 in all) are initialized to reflect the desired options
3920 for the most current compilation efforts. */
3923 rs6000_option_override_internal (bool global_init_p
)
3926 bool have_cpu
= false;
3928 /* The default cpu requested at configure time, if any. */
3929 const char *implicit_cpu
= OPTION_TARGET_CPU_DEFAULT
;
3931 HOST_WIDE_INT set_masks
;
3932 HOST_WIDE_INT ignore_masks
;
3935 struct cl_target_option
*main_target_opt
3936 = ((global_init_p
|| target_option_default_node
== NULL
)
3937 ? NULL
: TREE_TARGET_OPTION (target_option_default_node
));
3939 /* Print defaults. */
3940 if ((TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
) && global_init_p
)
3941 rs6000_print_isa_options (stderr
, 0, "TARGET_DEFAULT", TARGET_DEFAULT
);
3943 /* Remember the explicit arguments. */
3945 rs6000_isa_flags_explicit
= global_options_set
.x_rs6000_isa_flags
;
3947 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3948 library functions, so warn about it. The flag may be useful for
3949 performance studies from time to time though, so don't disable it
3951 if (global_options_set
.x_rs6000_alignment_flags
3952 && rs6000_alignment_flags
== MASK_ALIGN_POWER
3953 && DEFAULT_ABI
== ABI_DARWIN
3955 warning (0, "%qs is not supported for 64-bit Darwin;"
3956 " it is incompatible with the installed C and C++ libraries",
3959 /* Numerous experiment shows that IRA based loop pressure
3960 calculation works better for RTL loop invariant motion on targets
3961 with enough (>= 32) registers. It is an expensive optimization.
3962 So it is on only for peak performance. */
3963 if (optimize
>= 3 && global_init_p
3964 && !global_options_set
.x_flag_ira_loop_pressure
)
3965 flag_ira_loop_pressure
= 1;
3967 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
3968 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
3969 options were already specified. */
3970 if (flag_sanitize
& SANITIZE_USER_ADDRESS
3971 && !global_options_set
.x_flag_asynchronous_unwind_tables
)
3972 flag_asynchronous_unwind_tables
= 1;
3974 /* Set the pointer size. */
3977 rs6000_pmode
= DImode
;
3978 rs6000_pointer_size
= 64;
3982 rs6000_pmode
= SImode
;
3983 rs6000_pointer_size
= 32;
3986 /* Some OSs don't support saving the high part of 64-bit registers on context
3987 switch. Other OSs don't support saving Altivec registers. On those OSs,
3988 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3989 if the user wants either, the user must explicitly specify them and we
3990 won't interfere with the user's specification. */
3992 set_masks
= POWERPC_MASKS
;
3993 #ifdef OS_MISSING_POWERPC64
3994 if (OS_MISSING_POWERPC64
)
3995 set_masks
&= ~OPTION_MASK_POWERPC64
;
3997 #ifdef OS_MISSING_ALTIVEC
3998 if (OS_MISSING_ALTIVEC
)
3999 set_masks
&= ~(OPTION_MASK_ALTIVEC
| OPTION_MASK_VSX
4000 | OTHER_VSX_VECTOR_MASKS
);
4003 /* Don't override by the processor default if given explicitly. */
4004 set_masks
&= ~rs6000_isa_flags_explicit
;
4006 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
4007 the cpu in a target attribute or pragma, but did not specify a tuning
4008 option, use the cpu for the tuning option rather than the option specified
4009 with -mtune on the command line. Process a '--with-cpu' configuration
4010 request as an implicit --cpu. */
4011 if (rs6000_cpu_index
>= 0)
4013 cpu_index
= rs6000_cpu_index
;
4016 else if (main_target_opt
!= NULL
&& main_target_opt
->x_rs6000_cpu_index
>= 0)
4018 rs6000_cpu_index
= cpu_index
= main_target_opt
->x_rs6000_cpu_index
;
4021 else if (implicit_cpu
)
4023 rs6000_cpu_index
= cpu_index
= rs6000_cpu_name_lookup (implicit_cpu
);
4028 /* PowerPC 64-bit LE requires at least ISA 2.07. */
4029 const char *default_cpu
= ((!TARGET_POWERPC64
)
4031 : ((BYTES_BIG_ENDIAN
)
4035 rs6000_cpu_index
= cpu_index
= rs6000_cpu_name_lookup (default_cpu
);
4039 gcc_assert (cpu_index
>= 0);
4043 #ifndef HAVE_AS_POWER9
4044 if (processor_target_table
[rs6000_cpu_index
].processor
4045 == PROCESSOR_POWER9
)
4048 warning (0, "will not generate power9 instructions because "
4049 "assembler lacks power9 support");
4052 #ifndef HAVE_AS_POWER8
4053 if (processor_target_table
[rs6000_cpu_index
].processor
4054 == PROCESSOR_POWER8
)
4057 warning (0, "will not generate power8 instructions because "
4058 "assembler lacks power8 support");
4061 #ifndef HAVE_AS_POPCNTD
4062 if (processor_target_table
[rs6000_cpu_index
].processor
4063 == PROCESSOR_POWER7
)
4066 warning (0, "will not generate power7 instructions because "
4067 "assembler lacks power7 support");
4071 if (processor_target_table
[rs6000_cpu_index
].processor
4072 == PROCESSOR_POWER6
)
4075 warning (0, "will not generate power6 instructions because "
4076 "assembler lacks power6 support");
4079 #ifndef HAVE_AS_POPCNTB
4080 if (processor_target_table
[rs6000_cpu_index
].processor
4081 == PROCESSOR_POWER5
)
4084 warning (0, "will not generate power5 instructions because "
4085 "assembler lacks power5 support");
4091 /* PowerPC 64-bit LE requires at least ISA 2.07. */
4092 const char *default_cpu
= (!TARGET_POWERPC64
4098 rs6000_cpu_index
= cpu_index
= rs6000_cpu_name_lookup (default_cpu
);
4102 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
4103 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
4104 with those from the cpu, except for options that were explicitly set. If
4105 we don't have a cpu, do not override the target bits set in
4109 rs6000_isa_flags
&= ~set_masks
;
4110 rs6000_isa_flags
|= (processor_target_table
[cpu_index
].target_enable
4115 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
4116 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
4117 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
4118 to using rs6000_isa_flags, we need to do the initialization here.
4120 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
4121 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
4122 HOST_WIDE_INT flags
= ((TARGET_DEFAULT
) ? TARGET_DEFAULT
4123 : processor_target_table
[cpu_index
].target_enable
);
4124 rs6000_isa_flags
|= (flags
& ~rs6000_isa_flags_explicit
);
4127 if (rs6000_tune_index
>= 0)
4128 tune_index
= rs6000_tune_index
;
4130 rs6000_tune_index
= tune_index
= cpu_index
;
4134 enum processor_type tune_proc
4135 = (TARGET_POWERPC64
? PROCESSOR_DEFAULT64
: PROCESSOR_DEFAULT
);
4138 for (i
= 0; i
< ARRAY_SIZE (processor_target_table
); i
++)
4139 if (processor_target_table
[i
].processor
== tune_proc
)
4141 rs6000_tune_index
= tune_index
= i
;
4146 gcc_assert (tune_index
>= 0);
4147 rs6000_cpu
= processor_target_table
[tune_index
].processor
;
4149 if (rs6000_cpu
== PROCESSOR_PPCE300C2
|| rs6000_cpu
== PROCESSOR_PPCE300C3
4150 || rs6000_cpu
== PROCESSOR_PPCE500MC
|| rs6000_cpu
== PROCESSOR_PPCE500MC64
4151 || rs6000_cpu
== PROCESSOR_PPCE5500
)
4154 error ("AltiVec not supported in this target");
4157 /* If we are optimizing big endian systems for space, use the load/store
4158 multiple and string instructions. */
4159 if (BYTES_BIG_ENDIAN
&& optimize_size
)
4160 rs6000_isa_flags
|= ~rs6000_isa_flags_explicit
& (OPTION_MASK_MULTIPLE
4161 | OPTION_MASK_STRING
);
4163 /* Don't allow -mmultiple or -mstring on little endian systems
4164 unless the cpu is a 750, because the hardware doesn't support the
4165 instructions used in little endian mode, and causes an alignment
4166 trap. The 750 does not cause an alignment trap (except when the
4167 target is unaligned). */
4169 if (!BYTES_BIG_ENDIAN
&& rs6000_cpu
!= PROCESSOR_PPC750
)
4171 if (TARGET_MULTIPLE
)
4173 rs6000_isa_flags
&= ~OPTION_MASK_MULTIPLE
;
4174 if ((rs6000_isa_flags_explicit
& OPTION_MASK_MULTIPLE
) != 0)
4175 warning (0, "%qs is not supported on little endian systems",
4181 rs6000_isa_flags
&= ~OPTION_MASK_STRING
;
4182 if ((rs6000_isa_flags_explicit
& OPTION_MASK_STRING
) != 0)
4183 warning (0, "%qs is not supported on little endian systems",
4188 /* If little-endian, default to -mstrict-align on older processors.
4189 Testing for htm matches power8 and later. */
4190 if (!BYTES_BIG_ENDIAN
4191 && !(processor_target_table
[tune_index
].target_enable
& OPTION_MASK_HTM
))
4192 rs6000_isa_flags
|= ~rs6000_isa_flags_explicit
& OPTION_MASK_STRICT_ALIGN
;
4194 /* -maltivec={le,be} implies -maltivec. */
4195 if (rs6000_altivec_element_order
!= 0)
4196 rs6000_isa_flags
|= OPTION_MASK_ALTIVEC
;
4198 /* Disallow -maltivec=le in big endian mode for now. This is not
4199 known to be useful for anyone. */
4200 if (BYTES_BIG_ENDIAN
&& rs6000_altivec_element_order
== 1)
4202 warning (0, N_("-maltivec=le not allowed for big-endian targets"));
4203 rs6000_altivec_element_order
= 0;
4206 if (!rs6000_fold_gimple
)
4208 "gimple folding of rs6000 builtins has been disabled.\n");
4210 /* Add some warnings for VSX. */
4213 const char *msg
= NULL
;
4214 if (!TARGET_HARD_FLOAT
|| !TARGET_SINGLE_FLOAT
|| !TARGET_DOUBLE_FLOAT
)
4216 if (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
)
4217 msg
= N_("-mvsx requires hardware floating point");
4220 rs6000_isa_flags
&= ~ OPTION_MASK_VSX
;
4221 rs6000_isa_flags_explicit
|= OPTION_MASK_VSX
;
4224 else if (TARGET_PAIRED_FLOAT
)
4225 msg
= N_("-mvsx and -mpaired are incompatible");
4226 else if (TARGET_AVOID_XFORM
> 0)
4227 msg
= N_("-mvsx needs indexed addressing");
4228 else if (!TARGET_ALTIVEC
&& (rs6000_isa_flags_explicit
4229 & OPTION_MASK_ALTIVEC
))
4231 if (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
)
4232 msg
= N_("-mvsx and -mno-altivec are incompatible");
4234 msg
= N_("-mno-altivec disables vsx");
4240 rs6000_isa_flags
&= ~ OPTION_MASK_VSX
;
4241 rs6000_isa_flags_explicit
|= OPTION_MASK_VSX
;
4245 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4246 the -mcpu setting to enable options that conflict. */
4247 if ((!TARGET_HARD_FLOAT
|| !TARGET_ALTIVEC
|| !TARGET_VSX
)
4248 && (rs6000_isa_flags_explicit
& (OPTION_MASK_SOFT_FLOAT
4249 | OPTION_MASK_ALTIVEC
4250 | OPTION_MASK_VSX
)) != 0)
4251 rs6000_isa_flags
&= ~((OPTION_MASK_P8_VECTOR
| OPTION_MASK_CRYPTO
4252 | OPTION_MASK_DIRECT_MOVE
)
4253 & ~rs6000_isa_flags_explicit
);
4255 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
4256 rs6000_print_isa_options (stderr
, 0, "before defaults", rs6000_isa_flags
);
4258 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4259 off all of the options that depend on those flags. */
4260 ignore_masks
= rs6000_disable_incompatible_switches ();
4262 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4263 unless the user explicitly used the -mno-<option> to disable the code. */
4264 if (TARGET_P9_VECTOR
|| TARGET_MODULO
|| TARGET_P9_MISC
)
4265 rs6000_isa_flags
|= (ISA_3_0_MASKS_SERVER
& ~ignore_masks
);
4266 else if (TARGET_P9_MINMAX
)
4270 if (cpu_index
== PROCESSOR_POWER9
)
4272 /* legacy behavior: allow -mcpu=power9 with certain
4273 capabilities explicitly disabled. */
4274 rs6000_isa_flags
|= (ISA_3_0_MASKS_SERVER
& ~ignore_masks
);
4277 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4278 "for <xxx> less than power9", "-mcpu");
4280 else if ((ISA_3_0_MASKS_SERVER
& rs6000_isa_flags_explicit
)
4281 != (ISA_3_0_MASKS_SERVER
& rs6000_isa_flags
4282 & rs6000_isa_flags_explicit
))
4283 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4284 were explicitly cleared. */
4285 error ("%qs incompatible with explicitly disabled options",
4288 rs6000_isa_flags
|= ISA_3_0_MASKS_SERVER
;
4290 else if (TARGET_P8_VECTOR
|| TARGET_DIRECT_MOVE
|| TARGET_CRYPTO
)
4291 rs6000_isa_flags
|= (ISA_2_7_MASKS_SERVER
& ~ignore_masks
);
4292 else if (TARGET_VSX
)
4293 rs6000_isa_flags
|= (ISA_2_6_MASKS_SERVER
& ~ignore_masks
);
4294 else if (TARGET_POPCNTD
)
4295 rs6000_isa_flags
|= (ISA_2_6_MASKS_EMBEDDED
& ~ignore_masks
);
4296 else if (TARGET_DFP
)
4297 rs6000_isa_flags
|= (ISA_2_5_MASKS_SERVER
& ~ignore_masks
);
4298 else if (TARGET_CMPB
)
4299 rs6000_isa_flags
|= (ISA_2_5_MASKS_EMBEDDED
& ~ignore_masks
);
4300 else if (TARGET_FPRND
)
4301 rs6000_isa_flags
|= (ISA_2_4_MASKS
& ~ignore_masks
);
4302 else if (TARGET_POPCNTB
)
4303 rs6000_isa_flags
|= (ISA_2_2_MASKS
& ~ignore_masks
);
4304 else if (TARGET_ALTIVEC
)
4305 rs6000_isa_flags
|= (OPTION_MASK_PPC_GFXOPT
& ~ignore_masks
);
4307 if (TARGET_CRYPTO
&& !TARGET_ALTIVEC
)
4309 if (rs6000_isa_flags_explicit
& OPTION_MASK_CRYPTO
)
4310 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4311 rs6000_isa_flags
&= ~OPTION_MASK_CRYPTO
;
4314 if (TARGET_DIRECT_MOVE
&& !TARGET_VSX
)
4316 if (rs6000_isa_flags_explicit
& OPTION_MASK_DIRECT_MOVE
)
4317 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4318 rs6000_isa_flags
&= ~OPTION_MASK_DIRECT_MOVE
;
4321 if (TARGET_P8_VECTOR
&& !TARGET_ALTIVEC
)
4323 if (rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
)
4324 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4325 rs6000_isa_flags
&= ~OPTION_MASK_P8_VECTOR
;
4328 if (TARGET_P8_VECTOR
&& !TARGET_VSX
)
4330 if ((rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
)
4331 && (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
))
4332 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4333 else if ((rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
) == 0)
4335 rs6000_isa_flags
&= ~OPTION_MASK_P8_VECTOR
;
4336 if (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
)
4337 rs6000_isa_flags_explicit
|= OPTION_MASK_P8_VECTOR
;
4341 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4343 rs6000_isa_flags
|= OPTION_MASK_VSX
;
4344 rs6000_isa_flags_explicit
|= OPTION_MASK_VSX
;
4348 if (TARGET_DFP
&& !TARGET_HARD_FLOAT
)
4350 if (rs6000_isa_flags_explicit
& OPTION_MASK_DFP
)
4351 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4352 rs6000_isa_flags
&= ~OPTION_MASK_DFP
;
4355 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4356 silently turn off quad memory mode. */
4357 if ((TARGET_QUAD_MEMORY
|| TARGET_QUAD_MEMORY_ATOMIC
) && !TARGET_POWERPC64
)
4359 if ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY
) != 0)
4360 warning (0, N_("-mquad-memory requires 64-bit mode"));
4362 if ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY_ATOMIC
) != 0)
4363 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4365 rs6000_isa_flags
&= ~(OPTION_MASK_QUAD_MEMORY
4366 | OPTION_MASK_QUAD_MEMORY_ATOMIC
);
4369 /* Non-atomic quad memory load/store are disabled for little endian, since
4370 the words are reversed, but atomic operations can still be done by
4371 swapping the words. */
4372 if (TARGET_QUAD_MEMORY
&& !WORDS_BIG_ENDIAN
)
4374 if ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY
) != 0)
4375 warning (0, N_("-mquad-memory is not available in little endian "
4378 rs6000_isa_flags
&= ~OPTION_MASK_QUAD_MEMORY
;
4381 /* Assume if the user asked for normal quad memory instructions, they want
4382 the atomic versions as well, unless they explicity told us not to use quad
4383 word atomic instructions. */
4384 if (TARGET_QUAD_MEMORY
4385 && !TARGET_QUAD_MEMORY_ATOMIC
4386 && ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY_ATOMIC
) == 0))
4387 rs6000_isa_flags
|= OPTION_MASK_QUAD_MEMORY_ATOMIC
;
4389 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4390 generating power8 instructions. */
4391 if (!(rs6000_isa_flags_explicit
& OPTION_MASK_P8_FUSION
))
4392 rs6000_isa_flags
|= (processor_target_table
[tune_index
].target_enable
4393 & OPTION_MASK_P8_FUSION
);
4395 /* Setting additional fusion flags turns on base fusion. */
4396 if (!TARGET_P8_FUSION
&& (TARGET_P8_FUSION_SIGN
|| TARGET_TOC_FUSION
))
4398 if (rs6000_isa_flags_explicit
& OPTION_MASK_P8_FUSION
)
4400 if (TARGET_P8_FUSION_SIGN
)
4401 error ("%qs requires %qs", "-mpower8-fusion-sign",
4404 if (TARGET_TOC_FUSION
)
4405 error ("%qs requires %qs", "-mtoc-fusion", "-mpower8-fusion");
4407 rs6000_isa_flags
&= ~OPTION_MASK_P8_FUSION
;
4410 rs6000_isa_flags
|= OPTION_MASK_P8_FUSION
;
4413 /* Power9 fusion is a superset over power8 fusion. */
4414 if (TARGET_P9_FUSION
&& !TARGET_P8_FUSION
)
4416 if (rs6000_isa_flags_explicit
& OPTION_MASK_P8_FUSION
)
4418 /* We prefer to not mention undocumented options in
4419 error messages. However, if users have managed to select
4420 power9-fusion without selecting power8-fusion, they
4421 already know about undocumented flags. */
4422 error ("%qs requires %qs", "-mpower9-fusion", "-mpower8-fusion");
4423 rs6000_isa_flags
&= ~OPTION_MASK_P9_FUSION
;
4426 rs6000_isa_flags
|= OPTION_MASK_P8_FUSION
;
4429 /* Enable power9 fusion if we are tuning for power9, even if we aren't
4430 generating power9 instructions. */
4431 if (!(rs6000_isa_flags_explicit
& OPTION_MASK_P9_FUSION
))
4432 rs6000_isa_flags
|= (processor_target_table
[tune_index
].target_enable
4433 & OPTION_MASK_P9_FUSION
);
4435 /* Power8 does not fuse sign extended loads with the addis. If we are
4436 optimizing at high levels for speed, convert a sign extended load into a
4437 zero extending load, and an explicit sign extension. */
4438 if (TARGET_P8_FUSION
4439 && !(rs6000_isa_flags_explicit
& OPTION_MASK_P8_FUSION_SIGN
)
4440 && optimize_function_for_speed_p (cfun
)
4442 rs6000_isa_flags
|= OPTION_MASK_P8_FUSION_SIGN
;
4444 /* TOC fusion requires 64-bit and medium/large code model. */
4445 if (TARGET_TOC_FUSION
&& !TARGET_POWERPC64
)
4447 rs6000_isa_flags
&= ~OPTION_MASK_TOC_FUSION
;
4448 if ((rs6000_isa_flags_explicit
& OPTION_MASK_TOC_FUSION
) != 0)
4449 warning (0, N_("-mtoc-fusion requires 64-bit"));
4452 if (TARGET_TOC_FUSION
&& (TARGET_CMODEL
== CMODEL_SMALL
))
4454 rs6000_isa_flags
&= ~OPTION_MASK_TOC_FUSION
;
4455 if ((rs6000_isa_flags_explicit
& OPTION_MASK_TOC_FUSION
) != 0)
4456 warning (0, N_("-mtoc-fusion requires medium/large code model"));
4459 /* Turn on -mtoc-fusion by default if p8-fusion and 64-bit medium/large code
4461 if (TARGET_P8_FUSION
&& !TARGET_TOC_FUSION
&& TARGET_POWERPC64
4462 && (TARGET_CMODEL
!= CMODEL_SMALL
)
4463 && !(rs6000_isa_flags_explicit
& OPTION_MASK_TOC_FUSION
))
4464 rs6000_isa_flags
|= OPTION_MASK_TOC_FUSION
;
4466 /* ISA 3.0 vector instructions include ISA 2.07. */
4467 if (TARGET_P9_VECTOR
&& !TARGET_P8_VECTOR
)
4469 /* We prefer to not mention undocumented options in
4470 error messages. However, if users have managed to select
4471 power9-vector without selecting power8-vector, they
4472 already know about undocumented flags. */
4473 if ((rs6000_isa_flags_explicit
& OPTION_MASK_P9_VECTOR
) &&
4474 (rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
))
4475 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4476 else if ((rs6000_isa_flags_explicit
& OPTION_MASK_P9_VECTOR
) == 0)
4478 rs6000_isa_flags
&= ~OPTION_MASK_P9_VECTOR
;
4479 if (rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
)
4480 rs6000_isa_flags_explicit
|= OPTION_MASK_P9_VECTOR
;
4484 /* OPTION_MASK_P9_VECTOR is explicit and
4485 OPTION_MASK_P8_VECTOR is not explicit. */
4486 rs6000_isa_flags
|= OPTION_MASK_P8_VECTOR
;
4487 rs6000_isa_flags_explicit
|= OPTION_MASK_P8_VECTOR
;
4491 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4492 support. If we only have ISA 2.06 support, and the user did not specify
4493 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4494 but we don't enable the full vectorization support */
4495 if (TARGET_ALLOW_MOVMISALIGN
== -1 && TARGET_P8_VECTOR
&& TARGET_DIRECT_MOVE
)
4496 TARGET_ALLOW_MOVMISALIGN
= 1;
4498 else if (TARGET_ALLOW_MOVMISALIGN
&& !TARGET_VSX
)
4500 if (TARGET_ALLOW_MOVMISALIGN
> 0
4501 && global_options_set
.x_TARGET_ALLOW_MOVMISALIGN
)
4502 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4504 TARGET_ALLOW_MOVMISALIGN
= 0;
4507 /* Determine when unaligned vector accesses are permitted, and when
4508 they are preferred over masked Altivec loads. Note that if
4509 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4510 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4512 if (TARGET_EFFICIENT_UNALIGNED_VSX
)
4516 if (rs6000_isa_flags_explicit
& OPTION_MASK_EFFICIENT_UNALIGNED_VSX
)
4517 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4519 rs6000_isa_flags
&= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX
;
4522 else if (!TARGET_ALLOW_MOVMISALIGN
)
4524 if (rs6000_isa_flags_explicit
& OPTION_MASK_EFFICIENT_UNALIGNED_VSX
)
4525 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4526 "-mallow-movmisalign");
4528 rs6000_isa_flags
&= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX
;
4532 /* Set long double size before the IEEE 128-bit tests. */
4533 if (!global_options_set
.x_rs6000_long_double_type_size
)
4535 if (main_target_opt
!= NULL
4536 && (main_target_opt
->x_rs6000_long_double_type_size
4537 != RS6000_DEFAULT_LONG_DOUBLE_SIZE
))
4538 error ("target attribute or pragma changes long double size");
4540 rs6000_long_double_type_size
= RS6000_DEFAULT_LONG_DOUBLE_SIZE
;
4543 /* Set -mabi=ieeelongdouble on some old targets. Note, AIX and Darwin
4544 explicitly redefine TARGET_IEEEQUAD to 0, so those systems will not
4545 pick up this default. */
4546 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
4547 if (!global_options_set
.x_rs6000_ieeequad
)
4548 rs6000_ieeequad
= 1;
4551 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4552 sytems, but don't enable the __float128 keyword. */
4553 if (TARGET_VSX
&& TARGET_LONG_DOUBLE_128
4554 && (TARGET_FLOAT128_ENABLE_TYPE
|| TARGET_IEEEQUAD
)
4555 && ((rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_TYPE
) == 0))
4556 rs6000_isa_flags
|= OPTION_MASK_FLOAT128_TYPE
;
4558 /* IEEE 128-bit floating point requires VSX support. */
4561 if (TARGET_FLOAT128_KEYWORD
)
4563 if ((rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_KEYWORD
) != 0)
4564 error ("%qs requires VSX support", "-mfloat128");
4566 rs6000_isa_flags
&= ~(OPTION_MASK_FLOAT128_TYPE
4567 | OPTION_MASK_FLOAT128_KEYWORD
4568 | OPTION_MASK_FLOAT128_HW
);
4571 else if (TARGET_FLOAT128_TYPE
)
4573 if ((rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_TYPE
) != 0)
4574 error ("%qs requires VSX support", "-mfloat128-type");
4576 rs6000_isa_flags
&= ~(OPTION_MASK_FLOAT128_TYPE
4577 | OPTION_MASK_FLOAT128_KEYWORD
4578 | OPTION_MASK_FLOAT128_HW
);
4582 /* -mfloat128 and -mfloat128-hardware internally require the underlying IEEE
4583 128-bit floating point support to be enabled. */
4584 if (!TARGET_FLOAT128_TYPE
)
4586 if (TARGET_FLOAT128_KEYWORD
)
4588 if ((rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_KEYWORD
) != 0)
4590 error ("%qs requires %qs", "-mfloat128", "-mfloat128-type");
4591 rs6000_isa_flags
&= ~(OPTION_MASK_FLOAT128_TYPE
4592 | OPTION_MASK_FLOAT128_KEYWORD
4593 | OPTION_MASK_FLOAT128_HW
);
4596 rs6000_isa_flags
|= OPTION_MASK_FLOAT128_TYPE
;
4599 if (TARGET_FLOAT128_HW
)
4601 if ((rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_HW
) != 0)
4603 error ("%qs requires %qs", "-mfloat128-hardware",
4605 rs6000_isa_flags
&= ~OPTION_MASK_FLOAT128_HW
;
4608 rs6000_isa_flags
&= ~(OPTION_MASK_FLOAT128_TYPE
4609 | OPTION_MASK_FLOAT128_KEYWORD
4610 | OPTION_MASK_FLOAT128_HW
);
4614 /* If we have -mfloat128-type and full ISA 3.0 support, enable
4615 -mfloat128-hardware by default. However, don't enable the __float128
4616 keyword. If the user explicitly turned on -mfloat128-hardware, enable the
4617 -mfloat128 option as well if it was not already set. */
4618 if (TARGET_FLOAT128_TYPE
&& !TARGET_FLOAT128_HW
4619 && (rs6000_isa_flags
& ISA_3_0_MASKS_IEEE
) == ISA_3_0_MASKS_IEEE
4620 && !(rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_HW
))
4621 rs6000_isa_flags
|= OPTION_MASK_FLOAT128_HW
;
4623 if (TARGET_FLOAT128_HW
4624 && (rs6000_isa_flags
& ISA_3_0_MASKS_IEEE
) != ISA_3_0_MASKS_IEEE
)
4626 if ((rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_HW
) != 0)
4627 error ("%qs requires full ISA 3.0 support", "-mfloat128-hardware");
4629 rs6000_isa_flags
&= ~OPTION_MASK_FLOAT128_HW
;
4632 if (TARGET_FLOAT128_HW
&& !TARGET_64BIT
)
4634 if ((rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_HW
) != 0)
4635 error ("%qs requires %qs", "-mfloat128-hardware", "-m64");
4637 rs6000_isa_flags
&= ~OPTION_MASK_FLOAT128_HW
;
4640 if (TARGET_FLOAT128_HW
&& !TARGET_FLOAT128_KEYWORD
4641 && (rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_HW
) != 0
4642 && (rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_KEYWORD
) == 0)
4643 rs6000_isa_flags
|= OPTION_MASK_FLOAT128_KEYWORD
;
4645 /* Print the options after updating the defaults. */
4646 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
4647 rs6000_print_isa_options (stderr
, 0, "after defaults", rs6000_isa_flags
);
4649 /* E500mc does "better" if we inline more aggressively. Respect the
4650 user's opinion, though. */
4651 if (rs6000_block_move_inline_limit
== 0
4652 && (rs6000_cpu
== PROCESSOR_PPCE500MC
4653 || rs6000_cpu
== PROCESSOR_PPCE500MC64
4654 || rs6000_cpu
== PROCESSOR_PPCE5500
4655 || rs6000_cpu
== PROCESSOR_PPCE6500
))
4656 rs6000_block_move_inline_limit
= 128;
4658 /* store_one_arg depends on expand_block_move to handle at least the
4659 size of reg_parm_stack_space. */
4660 if (rs6000_block_move_inline_limit
< (TARGET_POWERPC64
? 64 : 32))
4661 rs6000_block_move_inline_limit
= (TARGET_POWERPC64
? 64 : 32);
4665 /* If the appropriate debug option is enabled, replace the target hooks
4666 with debug versions that call the real version and then prints
4667 debugging information. */
4668 if (TARGET_DEBUG_COST
)
4670 targetm
.rtx_costs
= rs6000_debug_rtx_costs
;
4671 targetm
.address_cost
= rs6000_debug_address_cost
;
4672 targetm
.sched
.adjust_cost
= rs6000_debug_adjust_cost
;
4675 if (TARGET_DEBUG_ADDR
)
4677 targetm
.legitimate_address_p
= rs6000_debug_legitimate_address_p
;
4678 targetm
.legitimize_address
= rs6000_debug_legitimize_address
;
4679 rs6000_secondary_reload_class_ptr
4680 = rs6000_debug_secondary_reload_class
;
4681 rs6000_secondary_memory_needed_ptr
4682 = rs6000_debug_secondary_memory_needed
;
4683 rs6000_cannot_change_mode_class_ptr
4684 = rs6000_debug_cannot_change_mode_class
;
4685 rs6000_preferred_reload_class_ptr
4686 = rs6000_debug_preferred_reload_class
;
4687 rs6000_legitimize_reload_address_ptr
4688 = rs6000_debug_legitimize_reload_address
;
4689 rs6000_mode_dependent_address_ptr
4690 = rs6000_debug_mode_dependent_address
;
4693 if (rs6000_veclibabi_name
)
4695 if (strcmp (rs6000_veclibabi_name
, "mass") == 0)
4696 rs6000_veclib_handler
= rs6000_builtin_vectorized_libmass
;
4699 error ("unknown vectorization library ABI type (%qs) for "
4700 "%qs switch", rs6000_veclibabi_name
, "-mveclibabi=");
4706 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4707 target attribute or pragma which automatically enables both options,
4708 unless the altivec ABI was set. This is set by default for 64-bit, but
4710 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_altivec_abi
)
4711 rs6000_isa_flags
&= ~((OPTION_MASK_VSX
| OPTION_MASK_ALTIVEC
4712 | OPTION_MASK_FLOAT128_TYPE
4713 | OPTION_MASK_FLOAT128_KEYWORD
)
4714 & ~rs6000_isa_flags_explicit
);
4716 /* Enable Altivec ABI for AIX -maltivec. */
4717 if (TARGET_XCOFF
&& (TARGET_ALTIVEC
|| TARGET_VSX
))
4719 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_altivec_abi
)
4720 error ("target attribute or pragma changes AltiVec ABI");
4722 rs6000_altivec_abi
= 1;
4725 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4726 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4727 be explicitly overridden in either case. */
4730 if (!global_options_set
.x_rs6000_altivec_abi
4731 && (TARGET_64BIT
|| TARGET_ALTIVEC
|| TARGET_VSX
))
4733 if (main_target_opt
!= NULL
&&
4734 !main_target_opt
->x_rs6000_altivec_abi
)
4735 error ("target attribute or pragma changes AltiVec ABI");
4737 rs6000_altivec_abi
= 1;
4741 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4742 So far, the only darwin64 targets are also MACH-O. */
4744 && DEFAULT_ABI
== ABI_DARWIN
4747 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_darwin64_abi
)
4748 error ("target attribute or pragma changes darwin64 ABI");
4751 rs6000_darwin64_abi
= 1;
4752 /* Default to natural alignment, for better performance. */
4753 rs6000_alignment_flags
= MASK_ALIGN_NATURAL
;
4757 /* Place FP constants in the constant pool instead of TOC
4758 if section anchors enabled. */
4759 if (flag_section_anchors
4760 && !global_options_set
.x_TARGET_NO_FP_IN_TOC
)
4761 TARGET_NO_FP_IN_TOC
= 1;
4763 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
4764 rs6000_print_isa_options (stderr
, 0, "before subtarget", rs6000_isa_flags
);
4766 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4767 SUBTARGET_OVERRIDE_OPTIONS
;
4769 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4770 SUBSUBTARGET_OVERRIDE_OPTIONS
;
4772 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4773 SUB3TARGET_OVERRIDE_OPTIONS
;
4776 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
4777 rs6000_print_isa_options (stderr
, 0, "after subtarget", rs6000_isa_flags
);
4779 /* For the E500 family of cores, reset the single/double FP flags to let us
4780 check that they remain constant across attributes or pragmas. Also,
4781 clear a possible request for string instructions, not supported and which
4782 we might have silently queried above for -Os.
4784 For other families, clear ISEL in case it was set implicitly.
4789 case PROCESSOR_PPC8540
:
4790 case PROCESSOR_PPC8548
:
4791 case PROCESSOR_PPCE500MC
:
4792 case PROCESSOR_PPCE500MC64
:
4793 case PROCESSOR_PPCE5500
:
4794 case PROCESSOR_PPCE6500
:
4796 rs6000_single_float
= 0;
4797 rs6000_double_float
= 0;
4799 rs6000_isa_flags
&= ~OPTION_MASK_STRING
;
4805 if (have_cpu
&& !(rs6000_isa_flags_explicit
& OPTION_MASK_ISEL
))
4806 rs6000_isa_flags
&= ~OPTION_MASK_ISEL
;
4811 if (main_target_opt
)
4813 if (main_target_opt
->x_rs6000_single_float
!= rs6000_single_float
)
4814 error ("target attribute or pragma changes single precision floating "
4816 if (main_target_opt
->x_rs6000_double_float
!= rs6000_double_float
)
4817 error ("target attribute or pragma changes double precision floating "
4821 rs6000_always_hint
= (rs6000_cpu
!= PROCESSOR_POWER4
4822 && rs6000_cpu
!= PROCESSOR_POWER5
4823 && rs6000_cpu
!= PROCESSOR_POWER6
4824 && rs6000_cpu
!= PROCESSOR_POWER7
4825 && rs6000_cpu
!= PROCESSOR_POWER8
4826 && rs6000_cpu
!= PROCESSOR_POWER9
4827 && rs6000_cpu
!= PROCESSOR_PPCA2
4828 && rs6000_cpu
!= PROCESSOR_CELL
4829 && rs6000_cpu
!= PROCESSOR_PPC476
);
4830 rs6000_sched_groups
= (rs6000_cpu
== PROCESSOR_POWER4
4831 || rs6000_cpu
== PROCESSOR_POWER5
4832 || rs6000_cpu
== PROCESSOR_POWER7
4833 || rs6000_cpu
== PROCESSOR_POWER8
);
4834 rs6000_align_branch_targets
= (rs6000_cpu
== PROCESSOR_POWER4
4835 || rs6000_cpu
== PROCESSOR_POWER5
4836 || rs6000_cpu
== PROCESSOR_POWER6
4837 || rs6000_cpu
== PROCESSOR_POWER7
4838 || rs6000_cpu
== PROCESSOR_POWER8
4839 || rs6000_cpu
== PROCESSOR_POWER9
4840 || rs6000_cpu
== PROCESSOR_PPCE500MC
4841 || rs6000_cpu
== PROCESSOR_PPCE500MC64
4842 || rs6000_cpu
== PROCESSOR_PPCE5500
4843 || rs6000_cpu
== PROCESSOR_PPCE6500
);
4845 /* Allow debug switches to override the above settings. These are set to -1
4846 in rs6000.opt to indicate the user hasn't directly set the switch. */
4847 if (TARGET_ALWAYS_HINT
>= 0)
4848 rs6000_always_hint
= TARGET_ALWAYS_HINT
;
4850 if (TARGET_SCHED_GROUPS
>= 0)
4851 rs6000_sched_groups
= TARGET_SCHED_GROUPS
;
4853 if (TARGET_ALIGN_BRANCH_TARGETS
>= 0)
4854 rs6000_align_branch_targets
= TARGET_ALIGN_BRANCH_TARGETS
;
4856 rs6000_sched_restricted_insns_priority
4857 = (rs6000_sched_groups
? 1 : 0);
4859 /* Handle -msched-costly-dep option. */
4860 rs6000_sched_costly_dep
4861 = (rs6000_sched_groups
? true_store_to_load_dep_costly
: no_dep_costly
);
4863 if (rs6000_sched_costly_dep_str
)
4865 if (! strcmp (rs6000_sched_costly_dep_str
, "no"))
4866 rs6000_sched_costly_dep
= no_dep_costly
;
4867 else if (! strcmp (rs6000_sched_costly_dep_str
, "all"))
4868 rs6000_sched_costly_dep
= all_deps_costly
;
4869 else if (! strcmp (rs6000_sched_costly_dep_str
, "true_store_to_load"))
4870 rs6000_sched_costly_dep
= true_store_to_load_dep_costly
;
4871 else if (! strcmp (rs6000_sched_costly_dep_str
, "store_to_load"))
4872 rs6000_sched_costly_dep
= store_to_load_dep_costly
;
4874 rs6000_sched_costly_dep
= ((enum rs6000_dependence_cost
)
4875 atoi (rs6000_sched_costly_dep_str
));
4878 /* Handle -minsert-sched-nops option. */
4879 rs6000_sched_insert_nops
4880 = (rs6000_sched_groups
? sched_finish_regroup_exact
: sched_finish_none
);
4882 if (rs6000_sched_insert_nops_str
)
4884 if (! strcmp (rs6000_sched_insert_nops_str
, "no"))
4885 rs6000_sched_insert_nops
= sched_finish_none
;
4886 else if (! strcmp (rs6000_sched_insert_nops_str
, "pad"))
4887 rs6000_sched_insert_nops
= sched_finish_pad_groups
;
4888 else if (! strcmp (rs6000_sched_insert_nops_str
, "regroup_exact"))
4889 rs6000_sched_insert_nops
= sched_finish_regroup_exact
;
4891 rs6000_sched_insert_nops
= ((enum rs6000_nop_insertion
)
4892 atoi (rs6000_sched_insert_nops_str
));
4895 /* Handle stack protector */
4896 if (!global_options_set
.x_rs6000_stack_protector_guard
)
4897 #ifdef TARGET_THREAD_SSP_OFFSET
4898 rs6000_stack_protector_guard
= SSP_TLS
;
4900 rs6000_stack_protector_guard
= SSP_GLOBAL
;
4903 #ifdef TARGET_THREAD_SSP_OFFSET
4904 rs6000_stack_protector_guard_offset
= TARGET_THREAD_SSP_OFFSET
;
4905 rs6000_stack_protector_guard_reg
= TARGET_64BIT
? 13 : 2;
4908 if (global_options_set
.x_rs6000_stack_protector_guard_offset_str
)
4911 const char *str
= rs6000_stack_protector_guard_offset_str
;
4914 long offset
= strtol (str
, &endp
, 0);
4915 if (!*str
|| *endp
|| errno
)
4916 error ("%qs is not a valid number in %qs", str
,
4917 "-mstack-protector-guard-offset=");
4919 if (!IN_RANGE (offset
, -0x8000, 0x7fff)
4920 || (TARGET_64BIT
&& (offset
& 3)))
4921 error ("%qs is not a valid offset in %qs", str
,
4922 "-mstack-protector-guard-offset=");
4924 rs6000_stack_protector_guard_offset
= offset
;
4927 if (global_options_set
.x_rs6000_stack_protector_guard_reg_str
)
4929 const char *str
= rs6000_stack_protector_guard_reg_str
;
4930 int reg
= decode_reg_name (str
);
4932 if (!IN_RANGE (reg
, 1, 31))
4933 error ("%qs is not a valid base register in %qs", str
,
4934 "-mstack-protector-guard-reg=");
4936 rs6000_stack_protector_guard_reg
= reg
;
4939 if (rs6000_stack_protector_guard
== SSP_TLS
4940 && !IN_RANGE (rs6000_stack_protector_guard_reg
, 1, 31))
4941 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4945 #ifdef TARGET_REGNAMES
4946 /* If the user desires alternate register names, copy in the
4947 alternate names now. */
4948 if (TARGET_REGNAMES
)
4949 memcpy (rs6000_reg_names
, alt_reg_names
, sizeof (rs6000_reg_names
));
4952 /* Set aix_struct_return last, after the ABI is determined.
4953 If -maix-struct-return or -msvr4-struct-return was explicitly
4954 used, don't override with the ABI default. */
4955 if (!global_options_set
.x_aix_struct_return
)
4956 aix_struct_return
= (DEFAULT_ABI
!= ABI_V4
|| DRAFT_V4_STRUCT_RET
);
4959 /* IBM XL compiler defaults to unsigned bitfields. */
4960 if (TARGET_XL_COMPAT
)
4961 flag_signed_bitfields
= 0;
4964 if (TARGET_LONG_DOUBLE_128
&& !TARGET_IEEEQUAD
)
4965 REAL_MODE_FORMAT (TFmode
) = &ibm_extended_format
;
4967 ASM_GENERATE_INTERNAL_LABEL (toc_label_name
, "LCTOC", 1);
4969 /* We can only guarantee the availability of DI pseudo-ops when
4970 assembling for 64-bit targets. */
4973 targetm
.asm_out
.aligned_op
.di
= NULL
;
4974 targetm
.asm_out
.unaligned_op
.di
= NULL
;
4978 /* Set branch target alignment, if not optimizing for size. */
4981 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4982 aligned 8byte to avoid misprediction by the branch predictor. */
4983 if (rs6000_cpu
== PROCESSOR_TITAN
4984 || rs6000_cpu
== PROCESSOR_CELL
)
4986 if (align_functions
<= 0)
4987 align_functions
= 8;
4988 if (align_jumps
<= 0)
4990 if (align_loops
<= 0)
4993 if (rs6000_align_branch_targets
)
4995 if (align_functions
<= 0)
4996 align_functions
= 16;
4997 if (align_jumps
<= 0)
4999 if (align_loops
<= 0)
5001 can_override_loop_align
= 1;
5005 if (align_jumps_max_skip
<= 0)
5006 align_jumps_max_skip
= 15;
5007 if (align_loops_max_skip
<= 0)
5008 align_loops_max_skip
= 15;
5011 /* Arrange to save and restore machine status around nested functions. */
5012 init_machine_status
= rs6000_init_machine_status
;
5014 /* We should always be splitting complex arguments, but we can't break
5015 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
5016 if (DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
)
5017 targetm
.calls
.split_complex_arg
= NULL
;
5019 /* The AIX and ELFv1 ABIs define standard function descriptors. */
5020 if (DEFAULT_ABI
== ABI_AIX
)
5021 targetm
.calls
.custom_function_descriptors
= 0;
5024 /* Initialize rs6000_cost with the appropriate target costs. */
5026 rs6000_cost
= TARGET_POWERPC64
? &size64_cost
: &size32_cost
;
5030 case PROCESSOR_RS64A
:
5031 rs6000_cost
= &rs64a_cost
;
5034 case PROCESSOR_MPCCORE
:
5035 rs6000_cost
= &mpccore_cost
;
5038 case PROCESSOR_PPC403
:
5039 rs6000_cost
= &ppc403_cost
;
5042 case PROCESSOR_PPC405
:
5043 rs6000_cost
= &ppc405_cost
;
5046 case PROCESSOR_PPC440
:
5047 rs6000_cost
= &ppc440_cost
;
5050 case PROCESSOR_PPC476
:
5051 rs6000_cost
= &ppc476_cost
;
5054 case PROCESSOR_PPC601
:
5055 rs6000_cost
= &ppc601_cost
;
5058 case PROCESSOR_PPC603
:
5059 rs6000_cost
= &ppc603_cost
;
5062 case PROCESSOR_PPC604
:
5063 rs6000_cost
= &ppc604_cost
;
5066 case PROCESSOR_PPC604e
:
5067 rs6000_cost
= &ppc604e_cost
;
5070 case PROCESSOR_PPC620
:
5071 rs6000_cost
= &ppc620_cost
;
5074 case PROCESSOR_PPC630
:
5075 rs6000_cost
= &ppc630_cost
;
5078 case PROCESSOR_CELL
:
5079 rs6000_cost
= &ppccell_cost
;
5082 case PROCESSOR_PPC750
:
5083 case PROCESSOR_PPC7400
:
5084 rs6000_cost
= &ppc750_cost
;
5087 case PROCESSOR_PPC7450
:
5088 rs6000_cost
= &ppc7450_cost
;
5091 case PROCESSOR_PPC8540
:
5092 case PROCESSOR_PPC8548
:
5093 rs6000_cost
= &ppc8540_cost
;
5096 case PROCESSOR_PPCE300C2
:
5097 case PROCESSOR_PPCE300C3
:
5098 rs6000_cost
= &ppce300c2c3_cost
;
5101 case PROCESSOR_PPCE500MC
:
5102 rs6000_cost
= &ppce500mc_cost
;
5105 case PROCESSOR_PPCE500MC64
:
5106 rs6000_cost
= &ppce500mc64_cost
;
5109 case PROCESSOR_PPCE5500
:
5110 rs6000_cost
= &ppce5500_cost
;
5113 case PROCESSOR_PPCE6500
:
5114 rs6000_cost
= &ppce6500_cost
;
5117 case PROCESSOR_TITAN
:
5118 rs6000_cost
= &titan_cost
;
5121 case PROCESSOR_POWER4
:
5122 case PROCESSOR_POWER5
:
5123 rs6000_cost
= &power4_cost
;
5126 case PROCESSOR_POWER6
:
5127 rs6000_cost
= &power6_cost
;
5130 case PROCESSOR_POWER7
:
5131 rs6000_cost
= &power7_cost
;
5134 case PROCESSOR_POWER8
:
5135 rs6000_cost
= &power8_cost
;
5138 case PROCESSOR_POWER9
:
5139 rs6000_cost
= &power9_cost
;
5142 case PROCESSOR_PPCA2
:
5143 rs6000_cost
= &ppca2_cost
;
5152 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES
,
5153 rs6000_cost
->simultaneous_prefetches
,
5154 global_options
.x_param_values
,
5155 global_options_set
.x_param_values
);
5156 maybe_set_param_value (PARAM_L1_CACHE_SIZE
, rs6000_cost
->l1_cache_size
,
5157 global_options
.x_param_values
,
5158 global_options_set
.x_param_values
);
5159 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE
,
5160 rs6000_cost
->cache_line_size
,
5161 global_options
.x_param_values
,
5162 global_options_set
.x_param_values
);
5163 maybe_set_param_value (PARAM_L2_CACHE_SIZE
, rs6000_cost
->l2_cache_size
,
5164 global_options
.x_param_values
,
5165 global_options_set
.x_param_values
);
5167 /* Increase loop peeling limits based on performance analysis. */
5168 maybe_set_param_value (PARAM_MAX_PEELED_INSNS
, 400,
5169 global_options
.x_param_values
,
5170 global_options_set
.x_param_values
);
5171 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS
, 400,
5172 global_options
.x_param_values
,
5173 global_options_set
.x_param_values
);
5175 /* Use the 'model' -fsched-pressure algorithm by default. */
5176 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM
,
5177 SCHED_PRESSURE_MODEL
,
5178 global_options
.x_param_values
,
5179 global_options_set
.x_param_values
);
5181 /* If using typedef char *va_list, signal that
5182 __builtin_va_start (&ap, 0) can be optimized to
5183 ap = __builtin_next_arg (0). */
5184 if (DEFAULT_ABI
!= ABI_V4
)
5185 targetm
.expand_builtin_va_start
= NULL
;
5188 /* Set up single/double float flags.
5189 If TARGET_HARD_FLOAT is set, but neither single or double is set,
5190 then set both flags. */
5191 if (TARGET_HARD_FLOAT
&& rs6000_single_float
== 0 && rs6000_double_float
== 0)
5192 rs6000_single_float
= rs6000_double_float
= 1;
5194 /* If not explicitly specified via option, decide whether to generate indexed
5195 load/store instructions. A value of -1 indicates that the
5196 initial value of this variable has not been overwritten. During
5197 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
5198 if (TARGET_AVOID_XFORM
== -1)
5199 /* Avoid indexed addressing when targeting Power6 in order to avoid the
5200 DERAT mispredict penalty. However the LVE and STVE altivec instructions
5201 need indexed accesses and the type used is the scalar type of the element
5202 being loaded or stored. */
5203 TARGET_AVOID_XFORM
= (rs6000_cpu
== PROCESSOR_POWER6
&& TARGET_CMPB
5204 && !TARGET_ALTIVEC
);
5206 /* Set the -mrecip options. */
5207 if (rs6000_recip_name
)
5209 char *p
= ASTRDUP (rs6000_recip_name
);
5211 unsigned int mask
, i
;
5214 while ((q
= strtok (p
, ",")) != NULL
)
5225 if (!strcmp (q
, "default"))
5226 mask
= ((TARGET_RECIP_PRECISION
)
5227 ? RECIP_HIGH_PRECISION
: RECIP_LOW_PRECISION
);
5230 for (i
= 0; i
< ARRAY_SIZE (recip_options
); i
++)
5231 if (!strcmp (q
, recip_options
[i
].string
))
5233 mask
= recip_options
[i
].mask
;
5237 if (i
== ARRAY_SIZE (recip_options
))
5239 error ("unknown option for %<%s=%s%>", "-mrecip", q
);
5247 rs6000_recip_control
&= ~mask
;
5249 rs6000_recip_control
|= mask
;
5253 /* Set the builtin mask of the various options used that could affect which
5254 builtins were used. In the past we used target_flags, but we've run out
5255 of bits, and some options like PAIRED are no longer in target_flags. */
5256 rs6000_builtin_mask
= rs6000_builtin_mask_calculate ();
5257 if (TARGET_DEBUG_BUILTIN
|| TARGET_DEBUG_TARGET
)
5258 rs6000_print_builtin_options (stderr
, 0, "builtin mask",
5259 rs6000_builtin_mask
);
5261 /* Initialize all of the registers. */
5262 rs6000_init_hard_regno_mode_ok (global_init_p
);
5264 /* Save the initial options in case the user does function specific options */
5266 target_option_default_node
= target_option_current_node
5267 = build_target_option_node (&global_options
);
5269 /* If not explicitly specified via option, decide whether to generate the
5270 extra blr's required to preserve the link stack on some cpus (eg, 476). */
5271 if (TARGET_LINK_STACK
== -1)
5272 SET_TARGET_LINK_STACK (rs6000_cpu
== PROCESSOR_PPC476
&& flag_pic
);
5277 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
5278 define the target cpu type. */
5281 rs6000_option_override (void)
5283 (void) rs6000_option_override_internal (true);
5287 /* Implement targetm.vectorize.builtin_mask_for_load. */
5289 rs6000_builtin_mask_for_load (void)
5291 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
5292 if ((TARGET_ALTIVEC
&& !TARGET_VSX
)
5293 || (TARGET_VSX
&& !TARGET_EFFICIENT_UNALIGNED_VSX
))
5294 return altivec_builtin_mask_for_load
;
5299 /* Implement LOOP_ALIGN. */
5301 rs6000_loop_align (rtx label
)
5306 /* Don't override loop alignment if -falign-loops was specified. */
5307 if (!can_override_loop_align
)
5308 return align_loops_log
;
5310 bb
= BLOCK_FOR_INSN (label
);
5311 ninsns
= num_loop_insns(bb
->loop_father
);
5313 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5314 if (ninsns
> 4 && ninsns
<= 8
5315 && (rs6000_cpu
== PROCESSOR_POWER4
5316 || rs6000_cpu
== PROCESSOR_POWER5
5317 || rs6000_cpu
== PROCESSOR_POWER6
5318 || rs6000_cpu
== PROCESSOR_POWER7
5319 || rs6000_cpu
== PROCESSOR_POWER8
5320 || rs6000_cpu
== PROCESSOR_POWER9
))
5323 return align_loops_log
;
5326 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
5328 rs6000_loop_align_max_skip (rtx_insn
*label
)
5330 return (1 << rs6000_loop_align (label
)) - 1;
5333 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5334 after applying N number of iterations. This routine does not determine
5335 how may iterations are required to reach desired alignment. */
5338 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED
, bool is_packed
)
5345 if (rs6000_alignment_flags
== MASK_ALIGN_NATURAL
)
5348 if (rs6000_alignment_flags
== MASK_ALIGN_POWER
)
5358 /* Assuming that all other types are naturally aligned. CHECKME! */
5363 /* Return true if the vector misalignment factor is supported by the
5366 rs6000_builtin_support_vector_misalignment (machine_mode mode
,
5373 if (TARGET_EFFICIENT_UNALIGNED_VSX
)
5376 /* Return if movmisalign pattern is not supported for this mode. */
5377 if (optab_handler (movmisalign_optab
, mode
) == CODE_FOR_nothing
)
5380 if (misalignment
== -1)
5382 /* Misalignment factor is unknown at compile time but we know
5383 it's word aligned. */
5384 if (rs6000_vector_alignment_reachable (type
, is_packed
))
5386 int element_size
= TREE_INT_CST_LOW (TYPE_SIZE (type
));
5388 if (element_size
== 64 || element_size
== 32)
5395 /* VSX supports word-aligned vector. */
5396 if (misalignment
% 4 == 0)
5402 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5404 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost
,
5405 tree vectype
, int misalign
)
5410 switch (type_of_cost
)
5420 case cond_branch_not_taken
:
5429 case vec_promote_demote
:
5435 case cond_branch_taken
:
5438 case unaligned_load
:
5439 if (TARGET_P9_VECTOR
)
5442 if (TARGET_EFFICIENT_UNALIGNED_VSX
)
5445 if (TARGET_VSX
&& TARGET_ALLOW_MOVMISALIGN
)
5447 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
5449 /* Double word aligned. */
5457 /* Double word aligned. */
5461 /* Unknown misalignment. */
5474 /* Misaligned loads are not supported. */
5479 case unaligned_store
:
5480 if (TARGET_EFFICIENT_UNALIGNED_VSX
)
5483 if (TARGET_VSX
&& TARGET_ALLOW_MOVMISALIGN
)
5485 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
5487 /* Double word aligned. */
5495 /* Double word aligned. */
5499 /* Unknown misalignment. */
5512 /* Misaligned stores are not supported. */
5518 /* This is a rough approximation assuming non-constant elements
5519 constructed into a vector via element insertion. FIXME:
5520 vec_construct is not granular enough for uniformly good
5521 decisions. If the initialization is a splat, this is
5522 cheaper than we estimate. Improve this someday. */
5523 elem_type
= TREE_TYPE (vectype
);
5524 /* 32-bit vectors loaded into registers are stored as double
5525 precision, so we need 2 permutes, 2 converts, and 1 merge
5526 to construct a vector of short floats from them. */
5527 if (SCALAR_FLOAT_TYPE_P (elem_type
)
5528 && TYPE_PRECISION (elem_type
) == 32)
5530 /* On POWER9, integer vector types are built up in GPRs and then
5531 use a direct move (2 cycles). For POWER8 this is even worse,
5532 as we need two direct moves and a merge, and the direct moves
5534 else if (INTEGRAL_TYPE_P (elem_type
))
5536 if (TARGET_P9_VECTOR
)
5537 return TYPE_VECTOR_SUBPARTS (vectype
) - 1 + 2;
5539 return TYPE_VECTOR_SUBPARTS (vectype
) - 1 + 5;
5542 /* V2DFmode doesn't need a direct move. */
5550 /* Implement targetm.vectorize.preferred_simd_mode. */
5553 rs6000_preferred_simd_mode (scalar_mode mode
)
5562 if (TARGET_ALTIVEC
|| TARGET_VSX
)
5579 if (TARGET_PAIRED_FLOAT
5585 typedef struct _rs6000_cost_data
5587 struct loop
*loop_info
;
5591 /* Test for likely overcommitment of vector hardware resources. If a
5592 loop iteration is relatively large, and too large a percentage of
5593 instructions in the loop are vectorized, the cost model may not
5594 adequately reflect delays from unavailable vector resources.
5595 Penalize the loop body cost for this case. */
5598 rs6000_density_test (rs6000_cost_data
*data
)
5600 const int DENSITY_PCT_THRESHOLD
= 85;
5601 const int DENSITY_SIZE_THRESHOLD
= 70;
5602 const int DENSITY_PENALTY
= 10;
5603 struct loop
*loop
= data
->loop_info
;
5604 basic_block
*bbs
= get_loop_body (loop
);
5605 int nbbs
= loop
->num_nodes
;
5606 int vec_cost
= data
->cost
[vect_body
], not_vec_cost
= 0;
5609 for (i
= 0; i
< nbbs
; i
++)
5611 basic_block bb
= bbs
[i
];
5612 gimple_stmt_iterator gsi
;
5614 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
5616 gimple
*stmt
= gsi_stmt (gsi
);
5617 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5619 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
5620 && !STMT_VINFO_IN_PATTERN_P (stmt_info
))
5626 density_pct
= (vec_cost
* 100) / (vec_cost
+ not_vec_cost
);
5628 if (density_pct
> DENSITY_PCT_THRESHOLD
5629 && vec_cost
+ not_vec_cost
> DENSITY_SIZE_THRESHOLD
)
5631 data
->cost
[vect_body
] = vec_cost
* (100 + DENSITY_PENALTY
) / 100;
5632 if (dump_enabled_p ())
5633 dump_printf_loc (MSG_NOTE
, vect_location
,
5634 "density %d%%, cost %d exceeds threshold, penalizing "
5635 "loop body cost by %d%%", density_pct
,
5636 vec_cost
+ not_vec_cost
, DENSITY_PENALTY
);
5640 /* Implement targetm.vectorize.init_cost. */
5642 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5643 instruction is needed by the vectorization. */
5644 static bool rs6000_vect_nonmem
;
5647 rs6000_init_cost (struct loop
*loop_info
)
5649 rs6000_cost_data
*data
= XNEW (struct _rs6000_cost_data
);
5650 data
->loop_info
= loop_info
;
5651 data
->cost
[vect_prologue
] = 0;
5652 data
->cost
[vect_body
] = 0;
5653 data
->cost
[vect_epilogue
] = 0;
5654 rs6000_vect_nonmem
= false;
5658 /* Implement targetm.vectorize.add_stmt_cost. */
5661 rs6000_add_stmt_cost (void *data
, int count
, enum vect_cost_for_stmt kind
,
5662 struct _stmt_vec_info
*stmt_info
, int misalign
,
5663 enum vect_cost_model_location where
)
5665 rs6000_cost_data
*cost_data
= (rs6000_cost_data
*) data
;
5666 unsigned retval
= 0;
5668 if (flag_vect_cost_model
)
5670 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
5671 int stmt_cost
= rs6000_builtin_vectorization_cost (kind
, vectype
,
5673 /* Statements in an inner loop relative to the loop being
5674 vectorized are weighted more heavily. The value here is
5675 arbitrary and could potentially be improved with analysis. */
5676 if (where
== vect_body
&& stmt_info
&& stmt_in_inner_loop_p (stmt_info
))
5677 count
*= 50; /* FIXME. */
5679 retval
= (unsigned) (count
* stmt_cost
);
5680 cost_data
->cost
[where
] += retval
;
5682 /* Check whether we're doing something other than just a copy loop.
5683 Not all such loops may be profitably vectorized; see
5684 rs6000_finish_cost. */
5685 if ((kind
== vec_to_scalar
|| kind
== vec_perm
5686 || kind
== vec_promote_demote
|| kind
== vec_construct
5687 || kind
== scalar_to_vec
)
5688 || (where
== vect_body
&& kind
== vector_stmt
))
5689 rs6000_vect_nonmem
= true;
5695 /* Implement targetm.vectorize.finish_cost. */
5698 rs6000_finish_cost (void *data
, unsigned *prologue_cost
,
5699 unsigned *body_cost
, unsigned *epilogue_cost
)
5701 rs6000_cost_data
*cost_data
= (rs6000_cost_data
*) data
;
5703 if (cost_data
->loop_info
)
5704 rs6000_density_test (cost_data
);
5706 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5707 that require versioning for any reason. The vectorization is at
5708 best a wash inside the loop, and the versioning checks make
5709 profitability highly unlikely and potentially quite harmful. */
5710 if (cost_data
->loop_info
)
5712 loop_vec_info vec_info
= loop_vec_info_for_loop (cost_data
->loop_info
);
5713 if (!rs6000_vect_nonmem
5714 && LOOP_VINFO_VECT_FACTOR (vec_info
) == 2
5715 && LOOP_REQUIRES_VERSIONING (vec_info
))
5716 cost_data
->cost
[vect_body
] += 10000;
5719 *prologue_cost
= cost_data
->cost
[vect_prologue
];
5720 *body_cost
= cost_data
->cost
[vect_body
];
5721 *epilogue_cost
= cost_data
->cost
[vect_epilogue
];
5724 /* Implement targetm.vectorize.destroy_cost_data. */
5727 rs6000_destroy_cost_data (void *data
)
5732 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5733 library with vectorized intrinsics. */
5736 rs6000_builtin_vectorized_libmass (combined_fn fn
, tree type_out
,
5740 const char *suffix
= NULL
;
5741 tree fntype
, new_fndecl
, bdecl
= NULL_TREE
;
5744 machine_mode el_mode
, in_mode
;
5747 /* Libmass is suitable for unsafe math only as it does not correctly support
5748 parts of IEEE with the required precision such as denormals. Only support
5749 it if we have VSX to use the simd d2 or f4 functions.
5750 XXX: Add variable length support. */
5751 if (!flag_unsafe_math_optimizations
|| !TARGET_VSX
)
5754 el_mode
= TYPE_MODE (TREE_TYPE (type_out
));
5755 n
= TYPE_VECTOR_SUBPARTS (type_out
);
5756 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
5757 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
5758 if (el_mode
!= in_mode
5794 if (el_mode
== DFmode
&& n
== 2)
5796 bdecl
= mathfn_built_in (double_type_node
, fn
);
5797 suffix
= "d2"; /* pow -> powd2 */
5799 else if (el_mode
== SFmode
&& n
== 4)
5801 bdecl
= mathfn_built_in (float_type_node
, fn
);
5802 suffix
= "4"; /* powf -> powf4 */
5814 gcc_assert (suffix
!= NULL
);
5815 bname
= IDENTIFIER_POINTER (DECL_NAME (bdecl
));
5819 strcpy (name
, bname
+ sizeof ("__builtin_") - 1);
5820 strcat (name
, suffix
);
5823 fntype
= build_function_type_list (type_out
, type_in
, NULL
);
5824 else if (n_args
== 2)
5825 fntype
= build_function_type_list (type_out
, type_in
, type_in
, NULL
);
5829 /* Build a function declaration for the vectorized function. */
5830 new_fndecl
= build_decl (BUILTINS_LOCATION
,
5831 FUNCTION_DECL
, get_identifier (name
), fntype
);
5832 TREE_PUBLIC (new_fndecl
) = 1;
5833 DECL_EXTERNAL (new_fndecl
) = 1;
5834 DECL_IS_NOVOPS (new_fndecl
) = 1;
5835 TREE_READONLY (new_fndecl
) = 1;
5840 /* Returns a function decl for a vectorized version of the builtin function
5841 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5842 if it is not available. */
5845 rs6000_builtin_vectorized_function (unsigned int fn
, tree type_out
,
5848 machine_mode in_mode
, out_mode
;
5851 if (TARGET_DEBUG_BUILTIN
)
5852 fprintf (stderr
, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5853 combined_fn_name (combined_fn (fn
)),
5854 GET_MODE_NAME (TYPE_MODE (type_out
)),
5855 GET_MODE_NAME (TYPE_MODE (type_in
)));
5857 if (TREE_CODE (type_out
) != VECTOR_TYPE
5858 || TREE_CODE (type_in
) != VECTOR_TYPE
)
5861 out_mode
= TYPE_MODE (TREE_TYPE (type_out
));
5862 out_n
= TYPE_VECTOR_SUBPARTS (type_out
);
5863 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
5864 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
5869 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5870 && out_mode
== DFmode
&& out_n
== 2
5871 && in_mode
== DFmode
&& in_n
== 2)
5872 return rs6000_builtin_decls
[VSX_BUILTIN_CPSGNDP
];
5873 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5874 && out_mode
== SFmode
&& out_n
== 4
5875 && in_mode
== SFmode
&& in_n
== 4)
5876 return rs6000_builtin_decls
[VSX_BUILTIN_CPSGNSP
];
5877 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5878 && out_mode
== SFmode
&& out_n
== 4
5879 && in_mode
== SFmode
&& in_n
== 4)
5880 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_COPYSIGN_V4SF
];
5883 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5884 && out_mode
== DFmode
&& out_n
== 2
5885 && in_mode
== DFmode
&& in_n
== 2)
5886 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIP
];
5887 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5888 && out_mode
== SFmode
&& out_n
== 4
5889 && in_mode
== SFmode
&& in_n
== 4)
5890 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIP
];
5891 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5892 && out_mode
== SFmode
&& out_n
== 4
5893 && in_mode
== SFmode
&& in_n
== 4)
5894 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIP
];
5897 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5898 && out_mode
== DFmode
&& out_n
== 2
5899 && in_mode
== DFmode
&& in_n
== 2)
5900 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIM
];
5901 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5902 && out_mode
== SFmode
&& out_n
== 4
5903 && in_mode
== SFmode
&& in_n
== 4)
5904 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIM
];
5905 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5906 && out_mode
== SFmode
&& out_n
== 4
5907 && in_mode
== SFmode
&& in_n
== 4)
5908 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIM
];
5911 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5912 && out_mode
== DFmode
&& out_n
== 2
5913 && in_mode
== DFmode
&& in_n
== 2)
5914 return rs6000_builtin_decls
[VSX_BUILTIN_XVMADDDP
];
5915 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5916 && out_mode
== SFmode
&& out_n
== 4
5917 && in_mode
== SFmode
&& in_n
== 4)
5918 return rs6000_builtin_decls
[VSX_BUILTIN_XVMADDSP
];
5919 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5920 && out_mode
== SFmode
&& out_n
== 4
5921 && in_mode
== SFmode
&& in_n
== 4)
5922 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VMADDFP
];
5925 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5926 && out_mode
== DFmode
&& out_n
== 2
5927 && in_mode
== DFmode
&& in_n
== 2)
5928 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIZ
];
5929 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5930 && out_mode
== SFmode
&& out_n
== 4
5931 && in_mode
== SFmode
&& in_n
== 4)
5932 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIZ
];
5933 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5934 && out_mode
== SFmode
&& out_n
== 4
5935 && in_mode
== SFmode
&& in_n
== 4)
5936 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIZ
];
5939 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5940 && flag_unsafe_math_optimizations
5941 && out_mode
== DFmode
&& out_n
== 2
5942 && in_mode
== DFmode
&& in_n
== 2)
5943 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPI
];
5944 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5945 && flag_unsafe_math_optimizations
5946 && out_mode
== SFmode
&& out_n
== 4
5947 && in_mode
== SFmode
&& in_n
== 4)
5948 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPI
];
5951 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5952 && !flag_trapping_math
5953 && out_mode
== DFmode
&& out_n
== 2
5954 && in_mode
== DFmode
&& in_n
== 2)
5955 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIC
];
5956 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5957 && !flag_trapping_math
5958 && out_mode
== SFmode
&& out_n
== 4
5959 && in_mode
== SFmode
&& in_n
== 4)
5960 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIC
];
5966 /* Generate calls to libmass if appropriate. */
5967 if (rs6000_veclib_handler
)
5968 return rs6000_veclib_handler (combined_fn (fn
), type_out
, type_in
);
5973 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5976 rs6000_builtin_md_vectorized_function (tree fndecl
, tree type_out
,
5979 machine_mode in_mode
, out_mode
;
5982 if (TARGET_DEBUG_BUILTIN
)
5983 fprintf (stderr
, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5984 IDENTIFIER_POINTER (DECL_NAME (fndecl
)),
5985 GET_MODE_NAME (TYPE_MODE (type_out
)),
5986 GET_MODE_NAME (TYPE_MODE (type_in
)));
5988 if (TREE_CODE (type_out
) != VECTOR_TYPE
5989 || TREE_CODE (type_in
) != VECTOR_TYPE
)
5992 out_mode
= TYPE_MODE (TREE_TYPE (type_out
));
5993 out_n
= TYPE_VECTOR_SUBPARTS (type_out
);
5994 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
5995 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
5997 enum rs6000_builtins fn
5998 = (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
6001 case RS6000_BUILTIN_RSQRTF
:
6002 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
)
6003 && out_mode
== SFmode
&& out_n
== 4
6004 && in_mode
== SFmode
&& in_n
== 4)
6005 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRSQRTFP
];
6007 case RS6000_BUILTIN_RSQRT
:
6008 if (VECTOR_UNIT_VSX_P (V2DFmode
)
6009 && out_mode
== DFmode
&& out_n
== 2
6010 && in_mode
== DFmode
&& in_n
== 2)
6011 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_2DF
];
6013 case RS6000_BUILTIN_RECIPF
:
6014 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
)
6015 && out_mode
== SFmode
&& out_n
== 4
6016 && in_mode
== SFmode
&& in_n
== 4)
6017 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRECIPFP
];
6019 case RS6000_BUILTIN_RECIP
:
6020 if (VECTOR_UNIT_VSX_P (V2DFmode
)
6021 && out_mode
== DFmode
&& out_n
== 2
6022 && in_mode
== DFmode
&& in_n
== 2)
6023 return rs6000_builtin_decls
[VSX_BUILTIN_RECIP_V2DF
];
6031 /* Default CPU string for rs6000*_file_start functions. */
6032 static const char *rs6000_default_cpu
;
6034 /* Do anything needed at the start of the asm file. */
6037 rs6000_file_start (void)
6040 const char *start
= buffer
;
6041 FILE *file
= asm_out_file
;
6043 rs6000_default_cpu
= TARGET_CPU_DEFAULT
;
6045 default_file_start ();
6047 if (flag_verbose_asm
)
6049 sprintf (buffer
, "\n%s rs6000/powerpc options:", ASM_COMMENT_START
);
6051 if (rs6000_default_cpu
!= 0 && rs6000_default_cpu
[0] != '\0')
6053 fprintf (file
, "%s --with-cpu=%s", start
, rs6000_default_cpu
);
6057 if (global_options_set
.x_rs6000_cpu_index
)
6059 fprintf (file
, "%s -mcpu=%s", start
,
6060 processor_target_table
[rs6000_cpu_index
].name
);
6064 if (global_options_set
.x_rs6000_tune_index
)
6066 fprintf (file
, "%s -mtune=%s", start
,
6067 processor_target_table
[rs6000_tune_index
].name
);
6071 if (PPC405_ERRATUM77
)
6073 fprintf (file
, "%s PPC405CR_ERRATUM77", start
);
6077 #ifdef USING_ELFOS_H
6078 switch (rs6000_sdata
)
6080 case SDATA_NONE
: fprintf (file
, "%s -msdata=none", start
); start
= ""; break;
6081 case SDATA_DATA
: fprintf (file
, "%s -msdata=data", start
); start
= ""; break;
6082 case SDATA_SYSV
: fprintf (file
, "%s -msdata=sysv", start
); start
= ""; break;
6083 case SDATA_EABI
: fprintf (file
, "%s -msdata=eabi", start
); start
= ""; break;
6086 if (rs6000_sdata
&& g_switch_value
)
6088 fprintf (file
, "%s -G %d", start
,
6098 #ifdef USING_ELFOS_H
6099 if (!(rs6000_default_cpu
&& rs6000_default_cpu
[0])
6100 && !global_options_set
.x_rs6000_cpu_index
)
6102 fputs ("\t.machine ", asm_out_file
);
6103 if ((rs6000_isa_flags
& OPTION_MASK_MODULO
) != 0)
6104 fputs ("power9\n", asm_out_file
);
6105 else if ((rs6000_isa_flags
& OPTION_MASK_DIRECT_MOVE
) != 0)
6106 fputs ("power8\n", asm_out_file
);
6107 else if ((rs6000_isa_flags
& OPTION_MASK_POPCNTD
) != 0)
6108 fputs ("power7\n", asm_out_file
);
6109 else if ((rs6000_isa_flags
& OPTION_MASK_CMPB
) != 0)
6110 fputs ("power6\n", asm_out_file
);
6111 else if ((rs6000_isa_flags
& OPTION_MASK_POPCNTB
) != 0)
6112 fputs ("power5\n", asm_out_file
);
6113 else if ((rs6000_isa_flags
& OPTION_MASK_MFCRF
) != 0)
6114 fputs ("power4\n", asm_out_file
);
6115 else if ((rs6000_isa_flags
& OPTION_MASK_POWERPC64
) != 0)
6116 fputs ("ppc64\n", asm_out_file
);
6118 fputs ("ppc\n", asm_out_file
);
6122 if (DEFAULT_ABI
== ABI_ELFv2
)
6123 fprintf (file
, "\t.abiversion 2\n");
6127 /* Return nonzero if this function is known to have a null epilogue. */
6130 direct_return (void)
6132 if (reload_completed
)
6134 rs6000_stack_t
*info
= rs6000_stack_info ();
6136 if (info
->first_gp_reg_save
== 32
6137 && info
->first_fp_reg_save
== 64
6138 && info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
+ 1
6139 && ! info
->lr_save_p
6140 && ! info
->cr_save_p
6141 && info
->vrsave_size
== 0
6149 /* Return the number of instructions it takes to form a constant in an
6150 integer register. */
6153 num_insns_constant_wide (HOST_WIDE_INT value
)
6155 /* signed constant loadable with addi */
6156 if (((unsigned HOST_WIDE_INT
) value
+ 0x8000) < 0x10000)
6159 /* constant loadable with addis */
6160 else if ((value
& 0xffff) == 0
6161 && (value
>> 31 == -1 || value
>> 31 == 0))
6164 else if (TARGET_POWERPC64
)
6166 HOST_WIDE_INT low
= ((value
& 0xffffffff) ^ 0x80000000) - 0x80000000;
6167 HOST_WIDE_INT high
= value
>> 31;
6169 if (high
== 0 || high
== -1)
6175 return num_insns_constant_wide (high
) + 1;
6177 return num_insns_constant_wide (low
) + 1;
6179 return (num_insns_constant_wide (high
)
6180 + num_insns_constant_wide (low
) + 1);
6188 num_insns_constant (rtx op
, machine_mode mode
)
6190 HOST_WIDE_INT low
, high
;
6192 switch (GET_CODE (op
))
6195 if ((INTVAL (op
) >> 31) != 0 && (INTVAL (op
) >> 31) != -1
6196 && rs6000_is_valid_and_mask (op
, mode
))
6199 return num_insns_constant_wide (INTVAL (op
));
6201 case CONST_WIDE_INT
:
6204 int ins
= CONST_WIDE_INT_NUNITS (op
) - 1;
6205 for (i
= 0; i
< CONST_WIDE_INT_NUNITS (op
); i
++)
6206 ins
+= num_insns_constant_wide (CONST_WIDE_INT_ELT (op
, i
));
6211 if (mode
== SFmode
|| mode
== SDmode
)
6215 if (DECIMAL_FLOAT_MODE_P (mode
))
6216 REAL_VALUE_TO_TARGET_DECIMAL32
6217 (*CONST_DOUBLE_REAL_VALUE (op
), l
);
6219 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op
), l
);
6220 return num_insns_constant_wide ((HOST_WIDE_INT
) l
);
6224 if (DECIMAL_FLOAT_MODE_P (mode
))
6225 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (op
), l
);
6227 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op
), l
);
6228 high
= l
[WORDS_BIG_ENDIAN
== 0];
6229 low
= l
[WORDS_BIG_ENDIAN
!= 0];
6232 return (num_insns_constant_wide (low
)
6233 + num_insns_constant_wide (high
));
6236 if ((high
== 0 && low
>= 0)
6237 || (high
== -1 && low
< 0))
6238 return num_insns_constant_wide (low
);
6240 else if (rs6000_is_valid_and_mask (op
, mode
))
6244 return num_insns_constant_wide (high
) + 1;
6247 return (num_insns_constant_wide (high
)
6248 + num_insns_constant_wide (low
) + 1);
6256 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
6257 If the mode of OP is MODE_VECTOR_INT, this simply returns the
6258 corresponding element of the vector, but for V4SFmode and V2SFmode,
6259 the corresponding "float" is interpreted as an SImode integer. */
6262 const_vector_elt_as_int (rtx op
, unsigned int elt
)
6266 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
6267 gcc_assert (GET_MODE (op
) != V2DImode
6268 && GET_MODE (op
) != V2DFmode
);
6270 tmp
= CONST_VECTOR_ELT (op
, elt
);
6271 if (GET_MODE (op
) == V4SFmode
6272 || GET_MODE (op
) == V2SFmode
)
6273 tmp
= gen_lowpart (SImode
, tmp
);
6274 return INTVAL (tmp
);
6277 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
6278 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
6279 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
6280 all items are set to the same value and contain COPIES replicas of the
6281 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6282 operand and the others are set to the value of the operand's msb. */
6285 vspltis_constant (rtx op
, unsigned step
, unsigned copies
)
6287 machine_mode mode
= GET_MODE (op
);
6288 machine_mode inner
= GET_MODE_INNER (mode
);
6296 HOST_WIDE_INT splat_val
;
6297 HOST_WIDE_INT msb_val
;
6299 if (mode
== V2DImode
|| mode
== V2DFmode
|| mode
== V1TImode
)
6302 nunits
= GET_MODE_NUNITS (mode
);
6303 bitsize
= GET_MODE_BITSIZE (inner
);
6304 mask
= GET_MODE_MASK (inner
);
6306 val
= const_vector_elt_as_int (op
, BYTES_BIG_ENDIAN
? nunits
- 1 : 0);
6308 msb_val
= val
>= 0 ? 0 : -1;
6310 /* Construct the value to be splatted, if possible. If not, return 0. */
6311 for (i
= 2; i
<= copies
; i
*= 2)
6313 HOST_WIDE_INT small_val
;
6315 small_val
= splat_val
>> bitsize
;
6317 if (splat_val
!= ((HOST_WIDE_INT
)
6318 ((unsigned HOST_WIDE_INT
) small_val
<< bitsize
)
6319 | (small_val
& mask
)))
6321 splat_val
= small_val
;
6324 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6325 if (EASY_VECTOR_15 (splat_val
))
6328 /* Also check if we can splat, and then add the result to itself. Do so if
6329 the value is positive, of if the splat instruction is using OP's mode;
6330 for splat_val < 0, the splat and the add should use the same mode. */
6331 else if (EASY_VECTOR_15_ADD_SELF (splat_val
)
6332 && (splat_val
>= 0 || (step
== 1 && copies
== 1)))
6335 /* Also check if are loading up the most significant bit which can be done by
6336 loading up -1 and shifting the value left by -1. */
6337 else if (EASY_VECTOR_MSB (splat_val
, inner
))
6343 /* Check if VAL is present in every STEP-th element, and the
6344 other elements are filled with its most significant bit. */
6345 for (i
= 1; i
< nunits
; ++i
)
6347 HOST_WIDE_INT desired_val
;
6348 unsigned elt
= BYTES_BIG_ENDIAN
? nunits
- 1 - i
: i
;
6349 if ((i
& (step
- 1)) == 0)
6352 desired_val
= msb_val
;
6354 if (desired_val
!= const_vector_elt_as_int (op
, elt
))
6361 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6362 instruction, filling in the bottom elements with 0 or -1.
6364 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6365 for the number of zeroes to shift in, or negative for the number of 0xff
6368 OP is a CONST_VECTOR. */
6371 vspltis_shifted (rtx op
)
6373 machine_mode mode
= GET_MODE (op
);
6374 machine_mode inner
= GET_MODE_INNER (mode
);
6382 if (mode
!= V16QImode
&& mode
!= V8HImode
&& mode
!= V4SImode
)
6385 /* We need to create pseudo registers to do the shift, so don't recognize
6386 shift vector constants after reload. */
6387 if (!can_create_pseudo_p ())
6390 nunits
= GET_MODE_NUNITS (mode
);
6391 mask
= GET_MODE_MASK (inner
);
6393 val
= const_vector_elt_as_int (op
, BYTES_BIG_ENDIAN
? 0 : nunits
- 1);
6395 /* Check if the value can really be the operand of a vspltis[bhw]. */
6396 if (EASY_VECTOR_15 (val
))
6399 /* Also check if we are loading up the most significant bit which can be done
6400 by loading up -1 and shifting the value left by -1. */
6401 else if (EASY_VECTOR_MSB (val
, inner
))
6407 /* Check if VAL is present in every STEP-th element until we find elements
6408 that are 0 or all 1 bits. */
6409 for (i
= 1; i
< nunits
; ++i
)
6411 unsigned elt
= BYTES_BIG_ENDIAN
? i
: nunits
- 1 - i
;
6412 HOST_WIDE_INT elt_val
= const_vector_elt_as_int (op
, elt
);
6414 /* If the value isn't the splat value, check for the remaining elements
6420 for (j
= i
+1; j
< nunits
; ++j
)
6422 unsigned elt2
= BYTES_BIG_ENDIAN
? j
: nunits
- 1 - j
;
6423 if (const_vector_elt_as_int (op
, elt2
) != 0)
6427 return (nunits
- i
) * GET_MODE_SIZE (inner
);
6430 else if ((elt_val
& mask
) == mask
)
6432 for (j
= i
+1; j
< nunits
; ++j
)
6434 unsigned elt2
= BYTES_BIG_ENDIAN
? j
: nunits
- 1 - j
;
6435 if ((const_vector_elt_as_int (op
, elt2
) & mask
) != mask
)
6439 return -((nunits
- i
) * GET_MODE_SIZE (inner
));
6447 /* If all elements are equal, we don't need to do VLSDOI. */
6452 /* Return true if OP is of the given MODE and can be synthesized
6453 with a vspltisb, vspltish or vspltisw. */
6456 easy_altivec_constant (rtx op
, machine_mode mode
)
6458 unsigned step
, copies
;
6460 if (mode
== VOIDmode
)
6461 mode
= GET_MODE (op
);
6462 else if (mode
!= GET_MODE (op
))
6465 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6467 if (mode
== V2DFmode
)
6468 return zero_constant (op
, mode
);
6470 else if (mode
== V2DImode
)
6472 if (GET_CODE (CONST_VECTOR_ELT (op
, 0)) != CONST_INT
6473 || GET_CODE (CONST_VECTOR_ELT (op
, 1)) != CONST_INT
)
6476 if (zero_constant (op
, mode
))
6479 if (INTVAL (CONST_VECTOR_ELT (op
, 0)) == -1
6480 && INTVAL (CONST_VECTOR_ELT (op
, 1)) == -1)
6486 /* V1TImode is a special container for TImode. Ignore for now. */
6487 else if (mode
== V1TImode
)
6490 /* Start with a vspltisw. */
6491 step
= GET_MODE_NUNITS (mode
) / 4;
6494 if (vspltis_constant (op
, step
, copies
))
6497 /* Then try with a vspltish. */
6503 if (vspltis_constant (op
, step
, copies
))
6506 /* And finally a vspltisb. */
6512 if (vspltis_constant (op
, step
, copies
))
6515 if (vspltis_shifted (op
) != 0)
6521 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6522 result is OP. Abort if it is not possible. */
6525 gen_easy_altivec_constant (rtx op
)
6527 machine_mode mode
= GET_MODE (op
);
6528 int nunits
= GET_MODE_NUNITS (mode
);
6529 rtx val
= CONST_VECTOR_ELT (op
, BYTES_BIG_ENDIAN
? nunits
- 1 : 0);
6530 unsigned step
= nunits
/ 4;
6531 unsigned copies
= 1;
6533 /* Start with a vspltisw. */
6534 if (vspltis_constant (op
, step
, copies
))
6535 return gen_rtx_VEC_DUPLICATE (V4SImode
, gen_lowpart (SImode
, val
));
6537 /* Then try with a vspltish. */
6543 if (vspltis_constant (op
, step
, copies
))
6544 return gen_rtx_VEC_DUPLICATE (V8HImode
, gen_lowpart (HImode
, val
));
6546 /* And finally a vspltisb. */
6552 if (vspltis_constant (op
, step
, copies
))
6553 return gen_rtx_VEC_DUPLICATE (V16QImode
, gen_lowpart (QImode
, val
));
6558 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6559 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6561 Return the number of instructions needed (1 or 2) into the address pointed
6564 Return the constant that is being split via CONSTANT_PTR. */
6567 xxspltib_constant_p (rtx op
,
6572 size_t nunits
= GET_MODE_NUNITS (mode
);
6574 HOST_WIDE_INT value
;
6577 /* Set the returned values to out of bound values. */
6578 *num_insns_ptr
= -1;
6579 *constant_ptr
= 256;
6581 if (!TARGET_P9_VECTOR
)
6584 if (mode
== VOIDmode
)
6585 mode
= GET_MODE (op
);
6587 else if (mode
!= GET_MODE (op
) && GET_MODE (op
) != VOIDmode
)
6590 /* Handle (vec_duplicate <constant>). */
6591 if (GET_CODE (op
) == VEC_DUPLICATE
)
6593 if (mode
!= V16QImode
&& mode
!= V8HImode
&& mode
!= V4SImode
6594 && mode
!= V2DImode
)
6597 element
= XEXP (op
, 0);
6598 if (!CONST_INT_P (element
))
6601 value
= INTVAL (element
);
6602 if (!IN_RANGE (value
, -128, 127))
6606 /* Handle (const_vector [...]). */
6607 else if (GET_CODE (op
) == CONST_VECTOR
)
6609 if (mode
!= V16QImode
&& mode
!= V8HImode
&& mode
!= V4SImode
6610 && mode
!= V2DImode
)
6613 element
= CONST_VECTOR_ELT (op
, 0);
6614 if (!CONST_INT_P (element
))
6617 value
= INTVAL (element
);
6618 if (!IN_RANGE (value
, -128, 127))
6621 for (i
= 1; i
< nunits
; i
++)
6623 element
= CONST_VECTOR_ELT (op
, i
);
6624 if (!CONST_INT_P (element
))
6627 if (value
!= INTVAL (element
))
6632 /* Handle integer constants being loaded into the upper part of the VSX
6633 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6634 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6635 else if (CONST_INT_P (op
))
6637 if (!SCALAR_INT_MODE_P (mode
))
6640 value
= INTVAL (op
);
6641 if (!IN_RANGE (value
, -128, 127))
6644 if (!IN_RANGE (value
, -1, 0))
6646 if (!(reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
] & RELOAD_REG_VALID
))
6649 if (EASY_VECTOR_15 (value
))
6657 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6658 sign extend. Special case 0/-1 to allow getting any VSX register instead
6659 of an Altivec register. */
6660 if ((mode
== V4SImode
|| mode
== V8HImode
) && !IN_RANGE (value
, -1, 0)
6661 && EASY_VECTOR_15 (value
))
6664 /* Return # of instructions and the constant byte for XXSPLTIB. */
6665 if (mode
== V16QImode
)
6668 else if (IN_RANGE (value
, -1, 0))
6674 *constant_ptr
= (int) value
;
6679 output_vec_const_move (rtx
*operands
)
6687 mode
= GET_MODE (dest
);
6691 bool dest_vmx_p
= ALTIVEC_REGNO_P (REGNO (dest
));
6692 int xxspltib_value
= 256;
6695 if (zero_constant (vec
, mode
))
6697 if (TARGET_P9_VECTOR
)
6698 return "xxspltib %x0,0";
6700 else if (dest_vmx_p
)
6701 return "vspltisw %0,0";
6704 return "xxlxor %x0,%x0,%x0";
6707 if (all_ones_constant (vec
, mode
))
6709 if (TARGET_P9_VECTOR
)
6710 return "xxspltib %x0,255";
6712 else if (dest_vmx_p
)
6713 return "vspltisw %0,-1";
6715 else if (TARGET_P8_VECTOR
)
6716 return "xxlorc %x0,%x0,%x0";
6722 if (TARGET_P9_VECTOR
6723 && xxspltib_constant_p (vec
, mode
, &num_insns
, &xxspltib_value
))
6727 operands
[2] = GEN_INT (xxspltib_value
& 0xff);
6728 return "xxspltib %x0,%2";
6739 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest
)));
6740 if (zero_constant (vec
, mode
))
6741 return "vspltisw %0,0";
6743 if (all_ones_constant (vec
, mode
))
6744 return "vspltisw %0,-1";
6746 /* Do we need to construct a value using VSLDOI? */
6747 shift
= vspltis_shifted (vec
);
6751 splat_vec
= gen_easy_altivec_constant (vec
);
6752 gcc_assert (GET_CODE (splat_vec
) == VEC_DUPLICATE
);
6753 operands
[1] = XEXP (splat_vec
, 0);
6754 if (!EASY_VECTOR_15 (INTVAL (operands
[1])))
6757 switch (GET_MODE (splat_vec
))
6760 return "vspltisw %0,%1";
6763 return "vspltish %0,%1";
6766 return "vspltisb %0,%1";
6776 /* Initialize TARGET of vector PAIRED to VALS. */
6779 paired_expand_vector_init (rtx target
, rtx vals
)
6781 machine_mode mode
= GET_MODE (target
);
6782 int n_elts
= GET_MODE_NUNITS (mode
);
6784 rtx x
, new_rtx
, tmp
, constant_op
, op1
, op2
;
6787 for (i
= 0; i
< n_elts
; ++i
)
6789 x
= XVECEXP (vals
, 0, i
);
6790 if (!(CONST_SCALAR_INT_P (x
) || CONST_DOUBLE_P (x
) || CONST_FIXED_P (x
)))
6795 /* Load from constant pool. */
6796 emit_move_insn (target
, gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0)));
6802 /* The vector is initialized only with non-constants. */
6803 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, XVECEXP (vals
, 0, 0),
6804 XVECEXP (vals
, 0, 1));
6806 emit_move_insn (target
, new_rtx
);
6810 /* One field is non-constant and the other one is a constant. Load the
6811 constant from the constant pool and use ps_merge instruction to
6812 construct the whole vector. */
6813 op1
= XVECEXP (vals
, 0, 0);
6814 op2
= XVECEXP (vals
, 0, 1);
6816 constant_op
= (CONSTANT_P (op1
)) ? op1
: op2
;
6818 tmp
= gen_reg_rtx (GET_MODE (constant_op
));
6819 emit_move_insn (tmp
, constant_op
);
6821 if (CONSTANT_P (op1
))
6822 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, tmp
, op2
);
6824 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, op1
, tmp
);
6826 emit_move_insn (target
, new_rtx
);
6830 paired_expand_vector_move (rtx operands
[])
6832 rtx op0
= operands
[0], op1
= operands
[1];
6834 emit_move_insn (op0
, op1
);
6837 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
6838 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
6839 operands for the relation operation COND. This is a recursive
6843 paired_emit_vector_compare (enum rtx_code rcode
,
6844 rtx dest
, rtx op0
, rtx op1
,
6845 rtx cc_op0
, rtx cc_op1
)
6847 rtx tmp
= gen_reg_rtx (V2SFmode
);
6850 gcc_assert (TARGET_PAIRED_FLOAT
);
6851 gcc_assert (GET_MODE (op0
) == GET_MODE (op1
));
6857 paired_emit_vector_compare (GE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6861 emit_insn (gen_subv2sf3 (tmp
, cc_op0
, cc_op1
));
6862 emit_insn (gen_selv2sf4 (dest
, tmp
, op0
, op1
, CONST0_RTX (SFmode
)));
6866 paired_emit_vector_compare (GE
, dest
, op0
, op1
, cc_op1
, cc_op0
);
6869 paired_emit_vector_compare (LE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6872 tmp1
= gen_reg_rtx (V2SFmode
);
6873 max
= gen_reg_rtx (V2SFmode
);
6874 min
= gen_reg_rtx (V2SFmode
);
6875 gen_reg_rtx (V2SFmode
);
6877 emit_insn (gen_subv2sf3 (tmp
, cc_op0
, cc_op1
));
6878 emit_insn (gen_selv2sf4
6879 (max
, tmp
, cc_op0
, cc_op1
, CONST0_RTX (SFmode
)));
6880 emit_insn (gen_subv2sf3 (tmp
, cc_op1
, cc_op0
));
6881 emit_insn (gen_selv2sf4
6882 (min
, tmp
, cc_op0
, cc_op1
, CONST0_RTX (SFmode
)));
6883 emit_insn (gen_subv2sf3 (tmp1
, min
, max
));
6884 emit_insn (gen_selv2sf4 (dest
, tmp1
, op0
, op1
, CONST0_RTX (SFmode
)));
6887 paired_emit_vector_compare (EQ
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6890 paired_emit_vector_compare (LE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6893 paired_emit_vector_compare (LT
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6896 paired_emit_vector_compare (GE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6899 paired_emit_vector_compare (GT
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6908 /* Emit vector conditional expression.
6909 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
6910 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
6913 paired_emit_vector_cond_expr (rtx dest
, rtx op1
, rtx op2
,
6914 rtx cond
, rtx cc_op0
, rtx cc_op1
)
6916 enum rtx_code rcode
= GET_CODE (cond
);
6918 if (!TARGET_PAIRED_FLOAT
)
6921 paired_emit_vector_compare (rcode
, dest
, op1
, op2
, cc_op0
, cc_op1
);
6926 /* Initialize vector TARGET to VALS. */
6929 rs6000_expand_vector_init (rtx target
, rtx vals
)
6931 machine_mode mode
= GET_MODE (target
);
6932 machine_mode inner_mode
= GET_MODE_INNER (mode
);
6933 int n_elts
= GET_MODE_NUNITS (mode
);
6934 int n_var
= 0, one_var
= -1;
6935 bool all_same
= true, all_const_zero
= true;
6939 for (i
= 0; i
< n_elts
; ++i
)
6941 x
= XVECEXP (vals
, 0, i
);
6942 if (!(CONST_SCALAR_INT_P (x
) || CONST_DOUBLE_P (x
) || CONST_FIXED_P (x
)))
6943 ++n_var
, one_var
= i
;
6944 else if (x
!= CONST0_RTX (inner_mode
))
6945 all_const_zero
= false;
6947 if (i
> 0 && !rtx_equal_p (x
, XVECEXP (vals
, 0, 0)))
6953 rtx const_vec
= gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0));
6954 bool int_vector_p
= (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
);
6955 if ((int_vector_p
|| TARGET_VSX
) && all_const_zero
)
6957 /* Zero register. */
6958 emit_move_insn (target
, CONST0_RTX (mode
));
6961 else if (int_vector_p
&& easy_vector_constant (const_vec
, mode
))
6963 /* Splat immediate. */
6964 emit_insn (gen_rtx_SET (target
, const_vec
));
6969 /* Load from constant pool. */
6970 emit_move_insn (target
, const_vec
);
6975 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6976 if (VECTOR_MEM_VSX_P (mode
) && (mode
== V2DFmode
|| mode
== V2DImode
))
6980 size_t num_elements
= all_same
? 1 : 2;
6981 for (i
= 0; i
< num_elements
; i
++)
6983 op
[i
] = XVECEXP (vals
, 0, i
);
6984 /* Just in case there is a SUBREG with a smaller mode, do a
6986 if (GET_MODE (op
[i
]) != inner_mode
)
6988 rtx tmp
= gen_reg_rtx (inner_mode
);
6989 convert_move (tmp
, op
[i
], 0);
6992 /* Allow load with splat double word. */
6993 else if (MEM_P (op
[i
]))
6996 op
[i
] = force_reg (inner_mode
, op
[i
]);
6998 else if (!REG_P (op
[i
]))
6999 op
[i
] = force_reg (inner_mode
, op
[i
]);
7004 if (mode
== V2DFmode
)
7005 emit_insn (gen_vsx_splat_v2df (target
, op
[0]));
7007 emit_insn (gen_vsx_splat_v2di (target
, op
[0]));
7011 if (mode
== V2DFmode
)
7012 emit_insn (gen_vsx_concat_v2df (target
, op
[0], op
[1]));
7014 emit_insn (gen_vsx_concat_v2di (target
, op
[0], op
[1]));
7019 /* Special case initializing vector int if we are on 64-bit systems with
7020 direct move or we have the ISA 3.0 instructions. */
7021 if (mode
== V4SImode
&& VECTOR_MEM_VSX_P (V4SImode
)
7022 && TARGET_DIRECT_MOVE_64BIT
)
7026 rtx element0
= XVECEXP (vals
, 0, 0);
7027 if (MEM_P (element0
))
7028 element0
= rs6000_address_for_fpconvert (element0
);
7030 element0
= force_reg (SImode
, element0
);
7032 if (TARGET_P9_VECTOR
)
7033 emit_insn (gen_vsx_splat_v4si (target
, element0
));
7036 rtx tmp
= gen_reg_rtx (DImode
);
7037 emit_insn (gen_zero_extendsidi2 (tmp
, element0
));
7038 emit_insn (gen_vsx_splat_v4si_di (target
, tmp
));
7047 for (i
= 0; i
< 4; i
++)
7049 elements
[i
] = XVECEXP (vals
, 0, i
);
7050 if (!CONST_INT_P (elements
[i
]) && !REG_P (elements
[i
]))
7051 elements
[i
] = copy_to_mode_reg (SImode
, elements
[i
]);
7054 emit_insn (gen_vsx_init_v4si (target
, elements
[0], elements
[1],
7055 elements
[2], elements
[3]));
7060 /* With single precision floating point on VSX, know that internally single
7061 precision is actually represented as a double, and either make 2 V2DF
7062 vectors, and convert these vectors to single precision, or do one
7063 conversion, and splat the result to the other elements. */
7064 if (mode
== V4SFmode
&& VECTOR_MEM_VSX_P (V4SFmode
))
7068 rtx element0
= XVECEXP (vals
, 0, 0);
7070 if (TARGET_P9_VECTOR
)
7072 if (MEM_P (element0
))
7073 element0
= rs6000_address_for_fpconvert (element0
);
7075 emit_insn (gen_vsx_splat_v4sf (target
, element0
));
7080 rtx freg
= gen_reg_rtx (V4SFmode
);
7081 rtx sreg
= force_reg (SFmode
, element0
);
7082 rtx cvt
= (TARGET_XSCVDPSPN
7083 ? gen_vsx_xscvdpspn_scalar (freg
, sreg
)
7084 : gen_vsx_xscvdpsp_scalar (freg
, sreg
));
7087 emit_insn (gen_vsx_xxspltw_v4sf_direct (target
, freg
,
7093 rtx dbl_even
= gen_reg_rtx (V2DFmode
);
7094 rtx dbl_odd
= gen_reg_rtx (V2DFmode
);
7095 rtx flt_even
= gen_reg_rtx (V4SFmode
);
7096 rtx flt_odd
= gen_reg_rtx (V4SFmode
);
7097 rtx op0
= force_reg (SFmode
, XVECEXP (vals
, 0, 0));
7098 rtx op1
= force_reg (SFmode
, XVECEXP (vals
, 0, 1));
7099 rtx op2
= force_reg (SFmode
, XVECEXP (vals
, 0, 2));
7100 rtx op3
= force_reg (SFmode
, XVECEXP (vals
, 0, 3));
7102 /* Use VMRGEW if we can instead of doing a permute. */
7103 if (TARGET_P8_VECTOR
)
7105 emit_insn (gen_vsx_concat_v2sf (dbl_even
, op0
, op2
));
7106 emit_insn (gen_vsx_concat_v2sf (dbl_odd
, op1
, op3
));
7107 emit_insn (gen_vsx_xvcvdpsp (flt_even
, dbl_even
));
7108 emit_insn (gen_vsx_xvcvdpsp (flt_odd
, dbl_odd
));
7109 if (BYTES_BIG_ENDIAN
)
7110 emit_insn (gen_p8_vmrgew_v4sf_direct (target
, flt_even
, flt_odd
));
7112 emit_insn (gen_p8_vmrgew_v4sf_direct (target
, flt_odd
, flt_even
));
7116 emit_insn (gen_vsx_concat_v2sf (dbl_even
, op0
, op1
));
7117 emit_insn (gen_vsx_concat_v2sf (dbl_odd
, op2
, op3
));
7118 emit_insn (gen_vsx_xvcvdpsp (flt_even
, dbl_even
));
7119 emit_insn (gen_vsx_xvcvdpsp (flt_odd
, dbl_odd
));
7120 rs6000_expand_extract_even (target
, flt_even
, flt_odd
);
7126 /* Special case initializing vector short/char that are splats if we are on
7127 64-bit systems with direct move. */
7128 if (all_same
&& TARGET_DIRECT_MOVE_64BIT
7129 && (mode
== V16QImode
|| mode
== V8HImode
))
7131 rtx op0
= XVECEXP (vals
, 0, 0);
7132 rtx di_tmp
= gen_reg_rtx (DImode
);
7135 op0
= force_reg (GET_MODE_INNER (mode
), op0
);
7137 if (mode
== V16QImode
)
7139 emit_insn (gen_zero_extendqidi2 (di_tmp
, op0
));
7140 emit_insn (gen_vsx_vspltb_di (target
, di_tmp
));
7144 if (mode
== V8HImode
)
7146 emit_insn (gen_zero_extendhidi2 (di_tmp
, op0
));
7147 emit_insn (gen_vsx_vsplth_di (target
, di_tmp
));
7152 /* Store value to stack temp. Load vector element. Splat. However, splat
7153 of 64-bit items is not supported on Altivec. */
7154 if (all_same
&& GET_MODE_SIZE (inner_mode
) <= 4)
7156 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (inner_mode
));
7157 emit_move_insn (adjust_address_nv (mem
, inner_mode
, 0),
7158 XVECEXP (vals
, 0, 0));
7159 x
= gen_rtx_UNSPEC (VOIDmode
,
7160 gen_rtvec (1, const0_rtx
), UNSPEC_LVE
);
7161 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
7163 gen_rtx_SET (target
, mem
),
7165 x
= gen_rtx_VEC_SELECT (inner_mode
, target
,
7166 gen_rtx_PARALLEL (VOIDmode
,
7167 gen_rtvec (1, const0_rtx
)));
7168 emit_insn (gen_rtx_SET (target
, gen_rtx_VEC_DUPLICATE (mode
, x
)));
7172 /* One field is non-constant. Load constant then overwrite
7176 rtx copy
= copy_rtx (vals
);
7178 /* Load constant part of vector, substitute neighboring value for
7180 XVECEXP (copy
, 0, one_var
) = XVECEXP (vals
, 0, (one_var
+ 1) % n_elts
);
7181 rs6000_expand_vector_init (target
, copy
);
7183 /* Insert variable. */
7184 rs6000_expand_vector_set (target
, XVECEXP (vals
, 0, one_var
), one_var
);
7188 /* Construct the vector in memory one field at a time
7189 and load the whole vector. */
7190 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
7191 for (i
= 0; i
< n_elts
; i
++)
7192 emit_move_insn (adjust_address_nv (mem
, inner_mode
,
7193 i
* GET_MODE_SIZE (inner_mode
)),
7194 XVECEXP (vals
, 0, i
));
7195 emit_move_insn (target
, mem
);
7198 /* Set field ELT of TARGET to VAL. */
7201 rs6000_expand_vector_set (rtx target
, rtx val
, int elt
)
7203 machine_mode mode
= GET_MODE (target
);
7204 machine_mode inner_mode
= GET_MODE_INNER (mode
);
7205 rtx reg
= gen_reg_rtx (mode
);
7207 int width
= GET_MODE_SIZE (inner_mode
);
7210 val
= force_reg (GET_MODE (val
), val
);
7212 if (VECTOR_MEM_VSX_P (mode
))
7214 rtx insn
= NULL_RTX
;
7215 rtx elt_rtx
= GEN_INT (elt
);
7217 if (mode
== V2DFmode
)
7218 insn
= gen_vsx_set_v2df (target
, target
, val
, elt_rtx
);
7220 else if (mode
== V2DImode
)
7221 insn
= gen_vsx_set_v2di (target
, target
, val
, elt_rtx
);
7223 else if (TARGET_P9_VECTOR
&& TARGET_POWERPC64
)
7225 if (mode
== V4SImode
)
7226 insn
= gen_vsx_set_v4si_p9 (target
, target
, val
, elt_rtx
);
7227 else if (mode
== V8HImode
)
7228 insn
= gen_vsx_set_v8hi_p9 (target
, target
, val
, elt_rtx
);
7229 else if (mode
== V16QImode
)
7230 insn
= gen_vsx_set_v16qi_p9 (target
, target
, val
, elt_rtx
);
7231 else if (mode
== V4SFmode
)
7232 insn
= gen_vsx_set_v4sf_p9 (target
, target
, val
, elt_rtx
);
7242 /* Simplify setting single element vectors like V1TImode. */
7243 if (GET_MODE_SIZE (mode
) == GET_MODE_SIZE (inner_mode
) && elt
== 0)
7245 emit_move_insn (target
, gen_lowpart (mode
, val
));
7249 /* Load single variable value. */
7250 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (inner_mode
));
7251 emit_move_insn (adjust_address_nv (mem
, inner_mode
, 0), val
);
7252 x
= gen_rtx_UNSPEC (VOIDmode
,
7253 gen_rtvec (1, const0_rtx
), UNSPEC_LVE
);
7254 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
7256 gen_rtx_SET (reg
, mem
),
7259 /* Linear sequence. */
7260 mask
= gen_rtx_PARALLEL (V16QImode
, rtvec_alloc (16));
7261 for (i
= 0; i
< 16; ++i
)
7262 XVECEXP (mask
, 0, i
) = GEN_INT (i
);
7264 /* Set permute mask to insert element into target. */
7265 for (i
= 0; i
< width
; ++i
)
7266 XVECEXP (mask
, 0, elt
*width
+ i
)
7267 = GEN_INT (i
+ 0x10);
7268 x
= gen_rtx_CONST_VECTOR (V16QImode
, XVEC (mask
, 0));
7270 if (BYTES_BIG_ENDIAN
)
7271 x
= gen_rtx_UNSPEC (mode
,
7272 gen_rtvec (3, target
, reg
,
7273 force_reg (V16QImode
, x
)),
7277 if (TARGET_P9_VECTOR
)
7278 x
= gen_rtx_UNSPEC (mode
,
7279 gen_rtvec (3, target
, reg
,
7280 force_reg (V16QImode
, x
)),
7284 /* Invert selector. We prefer to generate VNAND on P8 so
7285 that future fusion opportunities can kick in, but must
7286 generate VNOR elsewhere. */
7287 rtx notx
= gen_rtx_NOT (V16QImode
, force_reg (V16QImode
, x
));
7288 rtx iorx
= (TARGET_P8_VECTOR
7289 ? gen_rtx_IOR (V16QImode
, notx
, notx
)
7290 : gen_rtx_AND (V16QImode
, notx
, notx
));
7291 rtx tmp
= gen_reg_rtx (V16QImode
);
7292 emit_insn (gen_rtx_SET (tmp
, iorx
));
7294 /* Permute with operands reversed and adjusted selector. */
7295 x
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, reg
, target
, tmp
),
7300 emit_insn (gen_rtx_SET (target
, x
));
7303 /* Extract field ELT from VEC into TARGET. */
7306 rs6000_expand_vector_extract (rtx target
, rtx vec
, rtx elt
)
7308 machine_mode mode
= GET_MODE (vec
);
7309 machine_mode inner_mode
= GET_MODE_INNER (mode
);
7312 if (VECTOR_MEM_VSX_P (mode
) && CONST_INT_P (elt
))
7319 gcc_assert (INTVAL (elt
) == 0 && inner_mode
== TImode
);
7320 emit_move_insn (target
, gen_lowpart (TImode
, vec
));
7323 emit_insn (gen_vsx_extract_v2df (target
, vec
, elt
));
7326 emit_insn (gen_vsx_extract_v2di (target
, vec
, elt
));
7329 emit_insn (gen_vsx_extract_v4sf (target
, vec
, elt
));
7332 if (TARGET_DIRECT_MOVE_64BIT
)
7334 emit_insn (gen_vsx_extract_v16qi (target
, vec
, elt
));
7340 if (TARGET_DIRECT_MOVE_64BIT
)
7342 emit_insn (gen_vsx_extract_v8hi (target
, vec
, elt
));
7348 if (TARGET_DIRECT_MOVE_64BIT
)
7350 emit_insn (gen_vsx_extract_v4si (target
, vec
, elt
));
7356 else if (VECTOR_MEM_VSX_P (mode
) && !CONST_INT_P (elt
)
7357 && TARGET_DIRECT_MOVE_64BIT
)
7359 if (GET_MODE (elt
) != DImode
)
7361 rtx tmp
= gen_reg_rtx (DImode
);
7362 convert_move (tmp
, elt
, 0);
7365 else if (!REG_P (elt
))
7366 elt
= force_reg (DImode
, elt
);
7371 emit_insn (gen_vsx_extract_v2df_var (target
, vec
, elt
));
7375 emit_insn (gen_vsx_extract_v2di_var (target
, vec
, elt
));
7379 emit_insn (gen_vsx_extract_v4sf_var (target
, vec
, elt
));
7383 emit_insn (gen_vsx_extract_v4si_var (target
, vec
, elt
));
7387 emit_insn (gen_vsx_extract_v8hi_var (target
, vec
, elt
));
7391 emit_insn (gen_vsx_extract_v16qi_var (target
, vec
, elt
));
7399 gcc_assert (CONST_INT_P (elt
));
7401 /* Allocate mode-sized buffer. */
7402 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
7404 emit_move_insn (mem
, vec
);
7406 /* Add offset to field within buffer matching vector element. */
7407 mem
= adjust_address_nv (mem
, inner_mode
,
7408 INTVAL (elt
) * GET_MODE_SIZE (inner_mode
));
7410 emit_move_insn (target
, adjust_address_nv (mem
, inner_mode
, 0));
7413 /* Helper function to return the register number of a RTX. */
7415 regno_or_subregno (rtx op
)
7419 else if (SUBREG_P (op
))
7420 return subreg_regno (op
);
7425 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
7426 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
7427 temporary (BASE_TMP) to fixup the address. Return the new memory address
7428 that is valid for reads or writes to a given register (SCALAR_REG). */
7431 rs6000_adjust_vec_address (rtx scalar_reg
,
7435 machine_mode scalar_mode
)
7437 unsigned scalar_size
= GET_MODE_SIZE (scalar_mode
);
7438 rtx addr
= XEXP (mem
, 0);
7443 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7444 gcc_assert (GET_RTX_CLASS (GET_CODE (addr
)) != RTX_AUTOINC
);
7446 /* Calculate what we need to add to the address to get the element
7448 if (CONST_INT_P (element
))
7449 element_offset
= GEN_INT (INTVAL (element
) * scalar_size
);
7452 int byte_shift
= exact_log2 (scalar_size
);
7453 gcc_assert (byte_shift
>= 0);
7455 if (byte_shift
== 0)
7456 element_offset
= element
;
7460 if (TARGET_POWERPC64
)
7461 emit_insn (gen_ashldi3 (base_tmp
, element
, GEN_INT (byte_shift
)));
7463 emit_insn (gen_ashlsi3 (base_tmp
, element
, GEN_INT (byte_shift
)));
7465 element_offset
= base_tmp
;
7469 /* Create the new address pointing to the element within the vector. If we
7470 are adding 0, we don't have to change the address. */
7471 if (element_offset
== const0_rtx
)
7474 /* A simple indirect address can be converted into a reg + offset
7476 else if (REG_P (addr
) || SUBREG_P (addr
))
7477 new_addr
= gen_rtx_PLUS (Pmode
, addr
, element_offset
);
7479 /* Optimize D-FORM addresses with constant offset with a constant element, to
7480 include the element offset in the address directly. */
7481 else if (GET_CODE (addr
) == PLUS
)
7483 rtx op0
= XEXP (addr
, 0);
7484 rtx op1
= XEXP (addr
, 1);
7487 gcc_assert (REG_P (op0
) || SUBREG_P (op0
));
7488 if (CONST_INT_P (op1
) && CONST_INT_P (element_offset
))
7490 HOST_WIDE_INT offset
= INTVAL (op1
) + INTVAL (element_offset
);
7491 rtx offset_rtx
= GEN_INT (offset
);
7493 if (IN_RANGE (offset
, -32768, 32767)
7494 && (scalar_size
< 8 || (offset
& 0x3) == 0))
7495 new_addr
= gen_rtx_PLUS (Pmode
, op0
, offset_rtx
);
7498 emit_move_insn (base_tmp
, offset_rtx
);
7499 new_addr
= gen_rtx_PLUS (Pmode
, op0
, base_tmp
);
7504 bool op1_reg_p
= (REG_P (op1
) || SUBREG_P (op1
));
7505 bool ele_reg_p
= (REG_P (element_offset
) || SUBREG_P (element_offset
));
7507 /* Note, ADDI requires the register being added to be a base
7508 register. If the register was R0, load it up into the temporary
7511 && (ele_reg_p
|| reg_or_subregno (op1
) != FIRST_GPR_REGNO
))
7513 insn
= gen_add3_insn (base_tmp
, op1
, element_offset
);
7514 gcc_assert (insn
!= NULL_RTX
);
7519 && reg_or_subregno (element_offset
) != FIRST_GPR_REGNO
)
7521 insn
= gen_add3_insn (base_tmp
, element_offset
, op1
);
7522 gcc_assert (insn
!= NULL_RTX
);
7528 emit_move_insn (base_tmp
, op1
);
7529 emit_insn (gen_add2_insn (base_tmp
, element_offset
));
7532 new_addr
= gen_rtx_PLUS (Pmode
, op0
, base_tmp
);
7538 emit_move_insn (base_tmp
, addr
);
7539 new_addr
= gen_rtx_PLUS (Pmode
, base_tmp
, element_offset
);
7542 /* If we have a PLUS, we need to see whether the particular register class
7543 allows for D-FORM or X-FORM addressing. */
7544 if (GET_CODE (new_addr
) == PLUS
)
7546 rtx op1
= XEXP (new_addr
, 1);
7547 addr_mask_type addr_mask
;
7548 int scalar_regno
= regno_or_subregno (scalar_reg
);
7550 gcc_assert (scalar_regno
< FIRST_PSEUDO_REGISTER
);
7551 if (INT_REGNO_P (scalar_regno
))
7552 addr_mask
= reg_addr
[scalar_mode
].addr_mask
[RELOAD_REG_GPR
];
7554 else if (FP_REGNO_P (scalar_regno
))
7555 addr_mask
= reg_addr
[scalar_mode
].addr_mask
[RELOAD_REG_FPR
];
7557 else if (ALTIVEC_REGNO_P (scalar_regno
))
7558 addr_mask
= reg_addr
[scalar_mode
].addr_mask
[RELOAD_REG_VMX
];
7563 if (REG_P (op1
) || SUBREG_P (op1
))
7564 valid_addr_p
= (addr_mask
& RELOAD_REG_INDEXED
) != 0;
7566 valid_addr_p
= (addr_mask
& RELOAD_REG_OFFSET
) != 0;
7569 else if (REG_P (new_addr
) || SUBREG_P (new_addr
))
7570 valid_addr_p
= true;
7573 valid_addr_p
= false;
7577 emit_move_insn (base_tmp
, new_addr
);
7578 new_addr
= base_tmp
;
7581 return change_address (mem
, scalar_mode
, new_addr
);
7584 /* Split a variable vec_extract operation into the component instructions. */
7587 rs6000_split_vec_extract_var (rtx dest
, rtx src
, rtx element
, rtx tmp_gpr
,
7590 machine_mode mode
= GET_MODE (src
);
7591 machine_mode scalar_mode
= GET_MODE (dest
);
7592 unsigned scalar_size
= GET_MODE_SIZE (scalar_mode
);
7593 int byte_shift
= exact_log2 (scalar_size
);
7595 gcc_assert (byte_shift
>= 0);
7597 /* If we are given a memory address, optimize to load just the element. We
7598 don't have to adjust the vector element number on little endian
7602 gcc_assert (REG_P (tmp_gpr
));
7603 emit_move_insn (dest
, rs6000_adjust_vec_address (dest
, src
, element
,
7604 tmp_gpr
, scalar_mode
));
7608 else if (REG_P (src
) || SUBREG_P (src
))
7610 int bit_shift
= byte_shift
+ 3;
7612 int dest_regno
= regno_or_subregno (dest
);
7613 int src_regno
= regno_or_subregno (src
);
7614 int element_regno
= regno_or_subregno (element
);
7616 gcc_assert (REG_P (tmp_gpr
));
7618 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7619 a general purpose register. */
7620 if (TARGET_P9_VECTOR
7621 && (mode
== V16QImode
|| mode
== V8HImode
|| mode
== V4SImode
)
7622 && INT_REGNO_P (dest_regno
)
7623 && ALTIVEC_REGNO_P (src_regno
)
7624 && INT_REGNO_P (element_regno
))
7626 rtx dest_si
= gen_rtx_REG (SImode
, dest_regno
);
7627 rtx element_si
= gen_rtx_REG (SImode
, element_regno
);
7629 if (mode
== V16QImode
)
7630 emit_insn (VECTOR_ELT_ORDER_BIG
7631 ? gen_vextublx (dest_si
, element_si
, src
)
7632 : gen_vextubrx (dest_si
, element_si
, src
));
7634 else if (mode
== V8HImode
)
7636 rtx tmp_gpr_si
= gen_rtx_REG (SImode
, REGNO (tmp_gpr
));
7637 emit_insn (gen_ashlsi3 (tmp_gpr_si
, element_si
, const1_rtx
));
7638 emit_insn (VECTOR_ELT_ORDER_BIG
7639 ? gen_vextuhlx (dest_si
, tmp_gpr_si
, src
)
7640 : gen_vextuhrx (dest_si
, tmp_gpr_si
, src
));
7646 rtx tmp_gpr_si
= gen_rtx_REG (SImode
, REGNO (tmp_gpr
));
7647 emit_insn (gen_ashlsi3 (tmp_gpr_si
, element_si
, const2_rtx
));
7648 emit_insn (VECTOR_ELT_ORDER_BIG
7649 ? gen_vextuwlx (dest_si
, tmp_gpr_si
, src
)
7650 : gen_vextuwrx (dest_si
, tmp_gpr_si
, src
));
7657 gcc_assert (REG_P (tmp_altivec
));
7659 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7660 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7661 will shift the element into the upper position (adding 3 to convert a
7662 byte shift into a bit shift). */
7663 if (scalar_size
== 8)
7665 if (!VECTOR_ELT_ORDER_BIG
)
7667 emit_insn (gen_xordi3 (tmp_gpr
, element
, const1_rtx
));
7673 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7675 emit_insn (gen_rtx_SET (tmp_gpr
,
7676 gen_rtx_AND (DImode
,
7677 gen_rtx_ASHIFT (DImode
,
7684 if (!VECTOR_ELT_ORDER_BIG
)
7686 rtx num_ele_m1
= GEN_INT (GET_MODE_NUNITS (mode
) - 1);
7688 emit_insn (gen_anddi3 (tmp_gpr
, element
, num_ele_m1
));
7689 emit_insn (gen_subdi3 (tmp_gpr
, num_ele_m1
, tmp_gpr
));
7695 emit_insn (gen_ashldi3 (tmp_gpr
, element2
, GEN_INT (bit_shift
)));
7698 /* Get the value into the lower byte of the Altivec register where VSLO
7700 if (TARGET_P9_VECTOR
)
7701 emit_insn (gen_vsx_splat_v2di (tmp_altivec
, tmp_gpr
));
7702 else if (can_create_pseudo_p ())
7703 emit_insn (gen_vsx_concat_v2di (tmp_altivec
, tmp_gpr
, tmp_gpr
));
7706 rtx tmp_di
= gen_rtx_REG (DImode
, REGNO (tmp_altivec
));
7707 emit_move_insn (tmp_di
, tmp_gpr
);
7708 emit_insn (gen_vsx_concat_v2di (tmp_altivec
, tmp_di
, tmp_di
));
7711 /* Do the VSLO to get the value into the final location. */
7715 emit_insn (gen_vsx_vslo_v2df (dest
, src
, tmp_altivec
));
7719 emit_insn (gen_vsx_vslo_v2di (dest
, src
, tmp_altivec
));
7724 rtx tmp_altivec_di
= gen_rtx_REG (DImode
, REGNO (tmp_altivec
));
7725 rtx tmp_altivec_v4sf
= gen_rtx_REG (V4SFmode
, REGNO (tmp_altivec
));
7726 rtx src_v2di
= gen_rtx_REG (V2DImode
, REGNO (src
));
7727 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di
, src_v2di
,
7730 emit_insn (gen_vsx_xscvspdp_scalar2 (dest
, tmp_altivec_v4sf
));
7738 rtx tmp_altivec_di
= gen_rtx_REG (DImode
, REGNO (tmp_altivec
));
7739 rtx src_v2di
= gen_rtx_REG (V2DImode
, REGNO (src
));
7740 rtx tmp_gpr_di
= gen_rtx_REG (DImode
, REGNO (dest
));
7741 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di
, src_v2di
,
7743 emit_move_insn (tmp_gpr_di
, tmp_altivec_di
);
7744 emit_insn (gen_ashrdi3 (tmp_gpr_di
, tmp_gpr_di
,
7745 GEN_INT (64 - (8 * scalar_size
))));
7759 /* Helper function for rs6000_split_v4si_init to build up a DImode value from
7760 two SImode values. */
7763 rs6000_split_v4si_init_di_reg (rtx dest
, rtx si1
, rtx si2
, rtx tmp
)
7765 const unsigned HOST_WIDE_INT mask_32bit
= HOST_WIDE_INT_C (0xffffffff);
7767 if (CONST_INT_P (si1
) && CONST_INT_P (si2
))
7769 unsigned HOST_WIDE_INT const1
= (UINTVAL (si1
) & mask_32bit
) << 32;
7770 unsigned HOST_WIDE_INT const2
= UINTVAL (si2
) & mask_32bit
;
7772 emit_move_insn (dest
, GEN_INT (const1
| const2
));
7776 /* Put si1 into upper 32-bits of dest. */
7777 if (CONST_INT_P (si1
))
7778 emit_move_insn (dest
, GEN_INT ((UINTVAL (si1
) & mask_32bit
) << 32));
7781 /* Generate RLDIC. */
7782 rtx si1_di
= gen_rtx_REG (DImode
, regno_or_subregno (si1
));
7783 rtx shift_rtx
= gen_rtx_ASHIFT (DImode
, si1_di
, GEN_INT (32));
7784 rtx mask_rtx
= GEN_INT (mask_32bit
<< 32);
7785 rtx and_rtx
= gen_rtx_AND (DImode
, shift_rtx
, mask_rtx
);
7786 gcc_assert (!reg_overlap_mentioned_p (dest
, si1
));
7787 emit_insn (gen_rtx_SET (dest
, and_rtx
));
7790 /* Put si2 into the temporary. */
7791 gcc_assert (!reg_overlap_mentioned_p (dest
, tmp
));
7792 if (CONST_INT_P (si2
))
7793 emit_move_insn (tmp
, GEN_INT (UINTVAL (si2
) & mask_32bit
));
7795 emit_insn (gen_zero_extendsidi2 (tmp
, si2
));
7797 /* Combine the two parts. */
7798 emit_insn (gen_iordi3 (dest
, dest
, tmp
));
7802 /* Split a V4SI initialization. */
7805 rs6000_split_v4si_init (rtx operands
[])
7807 rtx dest
= operands
[0];
7809 /* Destination is a GPR, build up the two DImode parts in place. */
7810 if (REG_P (dest
) || SUBREG_P (dest
))
7812 int d_regno
= regno_or_subregno (dest
);
7813 rtx scalar1
= operands
[1];
7814 rtx scalar2
= operands
[2];
7815 rtx scalar3
= operands
[3];
7816 rtx scalar4
= operands
[4];
7817 rtx tmp1
= operands
[5];
7818 rtx tmp2
= operands
[6];
7820 /* Even though we only need one temporary (plus the destination, which
7821 has an early clobber constraint, try to use two temporaries, one for
7822 each double word created. That way the 2nd insn scheduling pass can
7823 rearrange things so the two parts are done in parallel. */
7824 if (BYTES_BIG_ENDIAN
)
7826 rtx di_lo
= gen_rtx_REG (DImode
, d_regno
);
7827 rtx di_hi
= gen_rtx_REG (DImode
, d_regno
+ 1);
7828 rs6000_split_v4si_init_di_reg (di_lo
, scalar1
, scalar2
, tmp1
);
7829 rs6000_split_v4si_init_di_reg (di_hi
, scalar3
, scalar4
, tmp2
);
7833 rtx di_lo
= gen_rtx_REG (DImode
, d_regno
+ 1);
7834 rtx di_hi
= gen_rtx_REG (DImode
, d_regno
);
7835 gcc_assert (!VECTOR_ELT_ORDER_BIG
);
7836 rs6000_split_v4si_init_di_reg (di_lo
, scalar4
, scalar3
, tmp1
);
7837 rs6000_split_v4si_init_di_reg (di_hi
, scalar2
, scalar1
, tmp2
);
7846 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7847 selects whether the alignment is abi mandated, optional, or
7848 both abi and optional alignment. */
7851 rs6000_data_alignment (tree type
, unsigned int align
, enum data_align how
)
7853 if (how
!= align_opt
)
7855 if (TREE_CODE (type
) == VECTOR_TYPE
)
7857 if (TARGET_PAIRED_FLOAT
&& PAIRED_VECTOR_MODE (TYPE_MODE (type
)))
7862 else if (align
< 128)
7867 if (how
!= align_abi
)
7869 if (TREE_CODE (type
) == ARRAY_TYPE
7870 && TYPE_MODE (TREE_TYPE (type
)) == QImode
)
7872 if (align
< BITS_PER_WORD
)
7873 align
= BITS_PER_WORD
;
7880 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7883 rs6000_special_adjust_field_align_p (tree type
, unsigned int computed
)
7885 if (TARGET_ALTIVEC
&& TREE_CODE (type
) == VECTOR_TYPE
)
7887 if (computed
!= 128)
7890 if (!warned
&& warn_psabi
)
7893 inform (input_location
,
7894 "the layout of aggregates containing vectors with"
7895 " %d-byte alignment has changed in GCC 5",
7896 computed
/ BITS_PER_UNIT
);
7899 /* In current GCC there is no special case. */
7906 /* AIX increases natural record alignment to doubleword if the first
7907 field is an FP double while the FP fields remain word aligned. */
7910 rs6000_special_round_type_align (tree type
, unsigned int computed
,
7911 unsigned int specified
)
7913 unsigned int align
= MAX (computed
, specified
);
7914 tree field
= TYPE_FIELDS (type
);
7916 /* Skip all non field decls */
7917 while (field
!= NULL
&& TREE_CODE (field
) != FIELD_DECL
)
7918 field
= DECL_CHAIN (field
);
7920 if (field
!= NULL
&& field
!= type
)
7922 type
= TREE_TYPE (field
);
7923 while (TREE_CODE (type
) == ARRAY_TYPE
)
7924 type
= TREE_TYPE (type
);
7926 if (type
!= error_mark_node
&& TYPE_MODE (type
) == DFmode
)
7927 align
= MAX (align
, 64);
7933 /* Darwin increases record alignment to the natural alignment of
7937 darwin_rs6000_special_round_type_align (tree type
, unsigned int computed
,
7938 unsigned int specified
)
7940 unsigned int align
= MAX (computed
, specified
);
7942 if (TYPE_PACKED (type
))
7945 /* Find the first field, looking down into aggregates. */
7947 tree field
= TYPE_FIELDS (type
);
7948 /* Skip all non field decls */
7949 while (field
!= NULL
&& TREE_CODE (field
) != FIELD_DECL
)
7950 field
= DECL_CHAIN (field
);
7953 /* A packed field does not contribute any extra alignment. */
7954 if (DECL_PACKED (field
))
7956 type
= TREE_TYPE (field
);
7957 while (TREE_CODE (type
) == ARRAY_TYPE
)
7958 type
= TREE_TYPE (type
);
7959 } while (AGGREGATE_TYPE_P (type
));
7961 if (! AGGREGATE_TYPE_P (type
) && type
!= error_mark_node
)
7962 align
= MAX (align
, TYPE_ALIGN (type
));
7967 /* Return 1 for an operand in small memory on V.4/eabi. */
7970 small_data_operand (rtx op ATTRIBUTE_UNUSED
,
7971 machine_mode mode ATTRIBUTE_UNUSED
)
7976 if (rs6000_sdata
== SDATA_NONE
|| rs6000_sdata
== SDATA_DATA
)
7979 if (DEFAULT_ABI
!= ABI_V4
)
7982 if (GET_CODE (op
) == SYMBOL_REF
)
7985 else if (GET_CODE (op
) != CONST
7986 || GET_CODE (XEXP (op
, 0)) != PLUS
7987 || GET_CODE (XEXP (XEXP (op
, 0), 0)) != SYMBOL_REF
7988 || GET_CODE (XEXP (XEXP (op
, 0), 1)) != CONST_INT
)
7993 rtx sum
= XEXP (op
, 0);
7994 HOST_WIDE_INT summand
;
7996 /* We have to be careful here, because it is the referenced address
7997 that must be 32k from _SDA_BASE_, not just the symbol. */
7998 summand
= INTVAL (XEXP (sum
, 1));
7999 if (summand
< 0 || summand
> g_switch_value
)
8002 sym_ref
= XEXP (sum
, 0);
8005 return SYMBOL_REF_SMALL_P (sym_ref
);
8011 /* Return true if either operand is a general purpose register. */
8014 gpr_or_gpr_p (rtx op0
, rtx op1
)
8016 return ((REG_P (op0
) && INT_REGNO_P (REGNO (op0
)))
8017 || (REG_P (op1
) && INT_REGNO_P (REGNO (op1
))));
8020 /* Return true if this is a move direct operation between GPR registers and
8021 floating point/VSX registers. */
8024 direct_move_p (rtx op0
, rtx op1
)
8028 if (!REG_P (op0
) || !REG_P (op1
))
8031 if (!TARGET_DIRECT_MOVE
&& !TARGET_MFPGPR
)
8034 regno0
= REGNO (op0
);
8035 regno1
= REGNO (op1
);
8036 if (regno0
>= FIRST_PSEUDO_REGISTER
|| regno1
>= FIRST_PSEUDO_REGISTER
)
8039 if (INT_REGNO_P (regno0
))
8040 return (TARGET_DIRECT_MOVE
) ? VSX_REGNO_P (regno1
) : FP_REGNO_P (regno1
);
8042 else if (INT_REGNO_P (regno1
))
8044 if (TARGET_MFPGPR
&& FP_REGNO_P (regno0
))
8047 else if (TARGET_DIRECT_MOVE
&& VSX_REGNO_P (regno0
))
8054 /* Return true if the OFFSET is valid for the quad address instructions that
8055 use d-form (register + offset) addressing. */
8058 quad_address_offset_p (HOST_WIDE_INT offset
)
8060 return (IN_RANGE (offset
, -32768, 32767) && ((offset
) & 0xf) == 0);
8063 /* Return true if the ADDR is an acceptable address for a quad memory
8064 operation of mode MODE (either LQ/STQ for general purpose registers, or
8065 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
8066 is intended for LQ/STQ. If it is false, the address is intended for the ISA
8067 3.0 LXV/STXV instruction. */
8070 quad_address_p (rtx addr
, machine_mode mode
, bool strict
)
8074 if (GET_MODE_SIZE (mode
) != 16)
8077 if (legitimate_indirect_address_p (addr
, strict
))
8080 if (VECTOR_MODE_P (mode
) && !mode_supports_vsx_dform_quad (mode
))
8083 if (GET_CODE (addr
) != PLUS
)
8086 op0
= XEXP (addr
, 0);
8087 if (!REG_P (op0
) || !INT_REG_OK_FOR_BASE_P (op0
, strict
))
8090 op1
= XEXP (addr
, 1);
8091 if (!CONST_INT_P (op1
))
8094 return quad_address_offset_p (INTVAL (op1
));
8097 /* Return true if this is a load or store quad operation. This function does
8098 not handle the atomic quad memory instructions. */
8101 quad_load_store_p (rtx op0
, rtx op1
)
8105 if (!TARGET_QUAD_MEMORY
)
8108 else if (REG_P (op0
) && MEM_P (op1
))
8109 ret
= (quad_int_reg_operand (op0
, GET_MODE (op0
))
8110 && quad_memory_operand (op1
, GET_MODE (op1
))
8111 && !reg_overlap_mentioned_p (op0
, op1
));
8113 else if (MEM_P (op0
) && REG_P (op1
))
8114 ret
= (quad_memory_operand (op0
, GET_MODE (op0
))
8115 && quad_int_reg_operand (op1
, GET_MODE (op1
)));
8120 if (TARGET_DEBUG_ADDR
)
8122 fprintf (stderr
, "\n========== quad_load_store, return %s\n",
8123 ret
? "true" : "false");
8124 debug_rtx (gen_rtx_SET (op0
, op1
));
8130 /* Given an address, return a constant offset term if one exists. */
8133 address_offset (rtx op
)
8135 if (GET_CODE (op
) == PRE_INC
8136 || GET_CODE (op
) == PRE_DEC
)
8138 else if (GET_CODE (op
) == PRE_MODIFY
8139 || GET_CODE (op
) == LO_SUM
)
8142 if (GET_CODE (op
) == CONST
)
8145 if (GET_CODE (op
) == PLUS
)
8148 if (CONST_INT_P (op
))
8154 /* Return true if the MEM operand is a memory operand suitable for use
8155 with a (full width, possibly multiple) gpr load/store. On
8156 powerpc64 this means the offset must be divisible by 4.
8157 Implements 'Y' constraint.
8159 Accept direct, indexed, offset, lo_sum and tocref. Since this is
8160 a constraint function we know the operand has satisfied a suitable
8161 memory predicate. Also accept some odd rtl generated by reload
8162 (see rs6000_legitimize_reload_address for various forms). It is
8163 important that reload rtl be accepted by appropriate constraints
8164 but not by the operand predicate.
8166 Offsetting a lo_sum should not be allowed, except where we know by
8167 alignment that a 32k boundary is not crossed, but see the ???
8168 comment in rs6000_legitimize_reload_address. Note that by
8169 "offsetting" here we mean a further offset to access parts of the
8170 MEM. It's fine to have a lo_sum where the inner address is offset
8171 from a sym, since the same sym+offset will appear in the high part
8172 of the address calculation. */
8175 mem_operand_gpr (rtx op
, machine_mode mode
)
8177 unsigned HOST_WIDE_INT offset
;
8179 rtx addr
= XEXP (op
, 0);
8181 op
= address_offset (addr
);
8185 offset
= INTVAL (op
);
8186 if (TARGET_POWERPC64
&& (offset
& 3) != 0)
8189 extra
= GET_MODE_SIZE (mode
) - UNITS_PER_WORD
;
8193 if (GET_CODE (addr
) == LO_SUM
)
8194 /* For lo_sum addresses, we must allow any offset except one that
8195 causes a wrap, so test only the low 16 bits. */
8196 offset
= ((offset
& 0xffff) ^ 0x8000) - 0x8000;
8198 return offset
+ 0x8000 < 0x10000u
- extra
;
8201 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
8202 enforce an offset divisible by 4 even for 32-bit. */
8205 mem_operand_ds_form (rtx op
, machine_mode mode
)
8207 unsigned HOST_WIDE_INT offset
;
8209 rtx addr
= XEXP (op
, 0);
8211 if (!offsettable_address_p (false, mode
, addr
))
8214 op
= address_offset (addr
);
8218 offset
= INTVAL (op
);
8219 if ((offset
& 3) != 0)
8222 extra
= GET_MODE_SIZE (mode
) - UNITS_PER_WORD
;
8226 if (GET_CODE (addr
) == LO_SUM
)
8227 /* For lo_sum addresses, we must allow any offset except one that
8228 causes a wrap, so test only the low 16 bits. */
8229 offset
= ((offset
& 0xffff) ^ 0x8000) - 0x8000;
8231 return offset
+ 0x8000 < 0x10000u
- extra
;
8234 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
8237 reg_offset_addressing_ok_p (machine_mode mode
)
8251 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
8252 ISA 3.0 vector d-form addressing mode was added. While TImode is not
8253 a vector mode, if we want to use the VSX registers to move it around,
8254 we need to restrict ourselves to reg+reg addressing. Similarly for
8255 IEEE 128-bit floating point that is passed in a single vector
8257 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
))
8258 return mode_supports_vsx_dform_quad (mode
);
8263 /* Paired vector modes. Only reg+reg addressing is valid. */
8264 if (TARGET_PAIRED_FLOAT
)
8269 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
8270 addressing for the LFIWZX and STFIWX instructions. */
8271 if (TARGET_NO_SDMODE_STACK
)
8283 virtual_stack_registers_memory_p (rtx op
)
8287 if (GET_CODE (op
) == REG
)
8288 regnum
= REGNO (op
);
8290 else if (GET_CODE (op
) == PLUS
8291 && GET_CODE (XEXP (op
, 0)) == REG
8292 && GET_CODE (XEXP (op
, 1)) == CONST_INT
)
8293 regnum
= REGNO (XEXP (op
, 0));
8298 return (regnum
>= FIRST_VIRTUAL_REGISTER
8299 && regnum
<= LAST_VIRTUAL_POINTER_REGISTER
);
8302 /* Return true if a MODE sized memory accesses to OP plus OFFSET
8303 is known to not straddle a 32k boundary. This function is used
8304 to determine whether -mcmodel=medium code can use TOC pointer
8305 relative addressing for OP. This means the alignment of the TOC
8306 pointer must also be taken into account, and unfortunately that is
8309 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
8310 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
8314 offsettable_ok_by_alignment (rtx op
, HOST_WIDE_INT offset
,
8318 unsigned HOST_WIDE_INT dsize
, dalign
, lsb
, mask
;
8320 if (GET_CODE (op
) != SYMBOL_REF
)
8323 /* ISA 3.0 vector d-form addressing is restricted, don't allow
8325 if (mode_supports_vsx_dform_quad (mode
))
8328 dsize
= GET_MODE_SIZE (mode
);
8329 decl
= SYMBOL_REF_DECL (op
);
8335 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
8336 replacing memory addresses with an anchor plus offset. We
8337 could find the decl by rummaging around in the block->objects
8338 VEC for the given offset but that seems like too much work. */
8339 dalign
= BITS_PER_UNIT
;
8340 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op
)
8341 && SYMBOL_REF_ANCHOR_P (op
)
8342 && SYMBOL_REF_BLOCK (op
) != NULL
)
8344 struct object_block
*block
= SYMBOL_REF_BLOCK (op
);
8346 dalign
= block
->alignment
;
8347 offset
+= SYMBOL_REF_BLOCK_OFFSET (op
);
8349 else if (CONSTANT_POOL_ADDRESS_P (op
))
8351 /* It would be nice to have get_pool_align().. */
8352 machine_mode cmode
= get_pool_mode (op
);
8354 dalign
= GET_MODE_ALIGNMENT (cmode
);
8357 else if (DECL_P (decl
))
8359 dalign
= DECL_ALIGN (decl
);
8363 /* Allow BLKmode when the entire object is known to not
8364 cross a 32k boundary. */
8365 if (!DECL_SIZE_UNIT (decl
))
8368 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl
)))
8371 dsize
= tree_to_uhwi (DECL_SIZE_UNIT (decl
));
8375 dalign
/= BITS_PER_UNIT
;
8376 if (dalign
> POWERPC64_TOC_POINTER_ALIGNMENT
)
8377 dalign
= POWERPC64_TOC_POINTER_ALIGNMENT
;
8378 return dalign
>= dsize
;
8384 /* Find how many bits of the alignment we know for this access. */
8385 dalign
/= BITS_PER_UNIT
;
8386 if (dalign
> POWERPC64_TOC_POINTER_ALIGNMENT
)
8387 dalign
= POWERPC64_TOC_POINTER_ALIGNMENT
;
8389 lsb
= offset
& -offset
;
8393 return dalign
>= dsize
;
8397 constant_pool_expr_p (rtx op
)
8401 split_const (op
, &base
, &offset
);
8402 return (GET_CODE (base
) == SYMBOL_REF
8403 && CONSTANT_POOL_ADDRESS_P (base
)
8404 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base
), Pmode
));
8407 /* These are only used to pass through from print_operand/print_operand_address
8408 to rs6000_output_addr_const_extra over the intervening function
8409 output_addr_const which is not target code. */
8410 static const_rtx tocrel_base_oac
, tocrel_offset_oac
;
8412 /* Return true if OP is a toc pointer relative address (the output
8413 of create_TOC_reference). If STRICT, do not match non-split
8414 -mcmodel=large/medium toc pointer relative addresses. If the pointers
8415 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
8416 TOCREL_OFFSET_RET respectively. */
8419 toc_relative_expr_p (const_rtx op
, bool strict
, const_rtx
*tocrel_base_ret
,
8420 const_rtx
*tocrel_offset_ret
)
8425 if (TARGET_CMODEL
!= CMODEL_SMALL
)
8427 /* When strict ensure we have everything tidy. */
8429 && !(GET_CODE (op
) == LO_SUM
8430 && REG_P (XEXP (op
, 0))
8431 && INT_REG_OK_FOR_BASE_P (XEXP (op
, 0), strict
)))
8434 /* When not strict, allow non-split TOC addresses and also allow
8435 (lo_sum (high ..)) TOC addresses created during reload. */
8436 if (GET_CODE (op
) == LO_SUM
)
8440 const_rtx tocrel_base
= op
;
8441 const_rtx tocrel_offset
= const0_rtx
;
8443 if (GET_CODE (op
) == PLUS
&& add_cint_operand (XEXP (op
, 1), GET_MODE (op
)))
8445 tocrel_base
= XEXP (op
, 0);
8446 tocrel_offset
= XEXP (op
, 1);
8449 if (tocrel_base_ret
)
8450 *tocrel_base_ret
= tocrel_base
;
8451 if (tocrel_offset_ret
)
8452 *tocrel_offset_ret
= tocrel_offset
;
8454 return (GET_CODE (tocrel_base
) == UNSPEC
8455 && XINT (tocrel_base
, 1) == UNSPEC_TOCREL
);
8458 /* Return true if X is a constant pool address, and also for cmodel=medium
8459 if X is a toc-relative address known to be offsettable within MODE. */
8462 legitimate_constant_pool_address_p (const_rtx x
, machine_mode mode
,
8465 const_rtx tocrel_base
, tocrel_offset
;
8466 return (toc_relative_expr_p (x
, strict
, &tocrel_base
, &tocrel_offset
)
8467 && (TARGET_CMODEL
!= CMODEL_MEDIUM
8468 || constant_pool_expr_p (XVECEXP (tocrel_base
, 0, 0))
8470 || offsettable_ok_by_alignment (XVECEXP (tocrel_base
, 0, 0),
8471 INTVAL (tocrel_offset
), mode
)));
8475 legitimate_small_data_p (machine_mode mode
, rtx x
)
8477 return (DEFAULT_ABI
== ABI_V4
8478 && !flag_pic
&& !TARGET_TOC
8479 && (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == CONST
)
8480 && small_data_operand (x
, mode
));
8484 rs6000_legitimate_offset_address_p (machine_mode mode
, rtx x
,
8485 bool strict
, bool worst_case
)
8487 unsigned HOST_WIDE_INT offset
;
8490 if (GET_CODE (x
) != PLUS
)
8492 if (!REG_P (XEXP (x
, 0)))
8494 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), strict
))
8496 if (mode_supports_vsx_dform_quad (mode
))
8497 return quad_address_p (x
, mode
, strict
);
8498 if (!reg_offset_addressing_ok_p (mode
))
8499 return virtual_stack_registers_memory_p (x
);
8500 if (legitimate_constant_pool_address_p (x
, mode
, strict
|| lra_in_progress
))
8502 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
8505 offset
= INTVAL (XEXP (x
, 1));
8511 /* Paired single modes: offset addressing isn't valid. */
8517 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8519 if (VECTOR_MEM_VSX_P (mode
))
8524 if (!TARGET_POWERPC64
)
8526 else if (offset
& 3)
8539 if (!TARGET_POWERPC64
)
8541 else if (offset
& 3)
8550 return offset
< 0x10000 - extra
;
8554 legitimate_indexed_address_p (rtx x
, int strict
)
8558 if (GET_CODE (x
) != PLUS
)
8564 return (REG_P (op0
) && REG_P (op1
)
8565 && ((INT_REG_OK_FOR_BASE_P (op0
, strict
)
8566 && INT_REG_OK_FOR_INDEX_P (op1
, strict
))
8567 || (INT_REG_OK_FOR_BASE_P (op1
, strict
)
8568 && INT_REG_OK_FOR_INDEX_P (op0
, strict
))));
8572 avoiding_indexed_address_p (machine_mode mode
)
8574 /* Avoid indexed addressing for modes that have non-indexed
8575 load/store instruction forms. */
8576 return (TARGET_AVOID_XFORM
&& VECTOR_MEM_NONE_P (mode
));
8580 legitimate_indirect_address_p (rtx x
, int strict
)
8582 return GET_CODE (x
) == REG
&& INT_REG_OK_FOR_BASE_P (x
, strict
);
8586 macho_lo_sum_memory_operand (rtx x
, machine_mode mode
)
8588 if (!TARGET_MACHO
|| !flag_pic
8589 || mode
!= SImode
|| GET_CODE (x
) != MEM
)
8593 if (GET_CODE (x
) != LO_SUM
)
8595 if (GET_CODE (XEXP (x
, 0)) != REG
)
8597 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 0))
8601 return CONSTANT_P (x
);
8605 legitimate_lo_sum_address_p (machine_mode mode
, rtx x
, int strict
)
8607 if (GET_CODE (x
) != LO_SUM
)
8609 if (GET_CODE (XEXP (x
, 0)) != REG
)
8611 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), strict
))
8613 /* quad word addresses are restricted, and we can't use LO_SUM. */
8614 if (mode_supports_vsx_dform_quad (mode
))
8618 if (TARGET_ELF
|| TARGET_MACHO
)
8622 if (DEFAULT_ABI
== ABI_V4
&& flag_pic
)
8624 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8625 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8626 recognizes some LO_SUM addresses as valid although this
8627 function says opposite. In most cases, LRA through different
8628 transformations can generate correct code for address reloads.
8629 It can not manage only some LO_SUM cases. So we need to add
8630 code analogous to one in rs6000_legitimize_reload_address for
8631 LOW_SUM here saying that some addresses are still valid. */
8632 large_toc_ok
= (lra_in_progress
&& TARGET_CMODEL
!= CMODEL_SMALL
8633 && small_toc_ref (x
, VOIDmode
));
8634 if (TARGET_TOC
&& ! large_toc_ok
)
8636 if (GET_MODE_NUNITS (mode
) != 1)
8638 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
8639 && !(/* ??? Assume floating point reg based on mode? */
8640 TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
8641 && (mode
== DFmode
|| mode
== DDmode
)))
8644 return CONSTANT_P (x
) || large_toc_ok
;
8651 /* Try machine-dependent ways of modifying an illegitimate address
8652 to be legitimate. If we find one, return the new, valid address.
8653 This is used from only one place: `memory_address' in explow.c.
8655 OLDX is the address as it was before break_out_memory_refs was
8656 called. In some cases it is useful to look at this to decide what
8659 It is always safe for this function to do nothing. It exists to
8660 recognize opportunities to optimize the output.
8662 On RS/6000, first check for the sum of a register with a constant
8663 integer that is out of range. If so, generate code to add the
8664 constant with the low-order 16 bits masked to the register and force
8665 this result into another register (this can be done with `cau').
8666 Then generate an address of REG+(CONST&0xffff), allowing for the
8667 possibility of bit 16 being a one.
8669 Then check for the sum of a register and something not constant, try to
8670 load the other things into a register and return the sum. */
8673 rs6000_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
8678 if (!reg_offset_addressing_ok_p (mode
)
8679 || mode_supports_vsx_dform_quad (mode
))
8681 if (virtual_stack_registers_memory_p (x
))
8684 /* In theory we should not be seeing addresses of the form reg+0,
8685 but just in case it is generated, optimize it away. */
8686 if (GET_CODE (x
) == PLUS
&& XEXP (x
, 1) == const0_rtx
)
8687 return force_reg (Pmode
, XEXP (x
, 0));
8689 /* For TImode with load/store quad, restrict addresses to just a single
8690 pointer, so it works with both GPRs and VSX registers. */
8691 /* Make sure both operands are registers. */
8692 else if (GET_CODE (x
) == PLUS
8693 && (mode
!= TImode
|| !TARGET_VSX
))
8694 return gen_rtx_PLUS (Pmode
,
8695 force_reg (Pmode
, XEXP (x
, 0)),
8696 force_reg (Pmode
, XEXP (x
, 1)));
8698 return force_reg (Pmode
, x
);
8700 if (GET_CODE (x
) == SYMBOL_REF
)
8702 enum tls_model model
= SYMBOL_REF_TLS_MODEL (x
);
8704 return rs6000_legitimize_tls_address (x
, model
);
8716 /* As in legitimate_offset_address_p we do not assume
8717 worst-case. The mode here is just a hint as to the registers
8718 used. A TImode is usually in gprs, but may actually be in
8719 fprs. Leave worst-case scenario for reload to handle via
8720 insn constraints. PTImode is only GPRs. */
8727 if (GET_CODE (x
) == PLUS
8728 && GET_CODE (XEXP (x
, 0)) == REG
8729 && GET_CODE (XEXP (x
, 1)) == CONST_INT
8730 && ((unsigned HOST_WIDE_INT
) (INTVAL (XEXP (x
, 1)) + 0x8000)
8732 && !PAIRED_VECTOR_MODE (mode
))
8734 HOST_WIDE_INT high_int
, low_int
;
8736 low_int
= ((INTVAL (XEXP (x
, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8737 if (low_int
>= 0x8000 - extra
)
8739 high_int
= INTVAL (XEXP (x
, 1)) - low_int
;
8740 sum
= force_operand (gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
8741 GEN_INT (high_int
)), 0);
8742 return plus_constant (Pmode
, sum
, low_int
);
8744 else if (GET_CODE (x
) == PLUS
8745 && GET_CODE (XEXP (x
, 0)) == REG
8746 && GET_CODE (XEXP (x
, 1)) != CONST_INT
8747 && GET_MODE_NUNITS (mode
) == 1
8748 && (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
8749 || (/* ??? Assume floating point reg based on mode? */
8750 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
8751 && (mode
== DFmode
|| mode
== DDmode
)))
8752 && !avoiding_indexed_address_p (mode
))
8754 return gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
8755 force_reg (Pmode
, force_operand (XEXP (x
, 1), 0)));
8757 else if (PAIRED_VECTOR_MODE (mode
))
8761 /* We accept [reg + reg]. */
8763 if (GET_CODE (x
) == PLUS
)
8765 rtx op1
= XEXP (x
, 0);
8766 rtx op2
= XEXP (x
, 1);
8769 op1
= force_reg (Pmode
, op1
);
8770 op2
= force_reg (Pmode
, op2
);
8772 /* We can't always do [reg + reg] for these, because [reg +
8773 reg + offset] is not a legitimate addressing mode. */
8774 y
= gen_rtx_PLUS (Pmode
, op1
, op2
);
8776 if ((GET_MODE_SIZE (mode
) > 8 || mode
== DDmode
) && REG_P (op2
))
8777 return force_reg (Pmode
, y
);
8782 return force_reg (Pmode
, x
);
8784 else if ((TARGET_ELF
8786 || !MACHO_DYNAMIC_NO_PIC_P
8792 && GET_CODE (x
) != CONST_INT
8793 && GET_CODE (x
) != CONST_WIDE_INT
8794 && GET_CODE (x
) != CONST_DOUBLE
8796 && GET_MODE_NUNITS (mode
) == 1
8797 && (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
8798 || (/* ??? Assume floating point reg based on mode? */
8799 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
8800 && (mode
== DFmode
|| mode
== DDmode
))))
8802 rtx reg
= gen_reg_rtx (Pmode
);
8804 emit_insn (gen_elf_high (reg
, x
));
8806 emit_insn (gen_macho_high (reg
, x
));
8807 return gen_rtx_LO_SUM (Pmode
, reg
, x
);
8810 && GET_CODE (x
) == SYMBOL_REF
8811 && constant_pool_expr_p (x
)
8812 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x
), Pmode
))
8813 return create_TOC_reference (x
, NULL_RTX
);
8818 /* Debug version of rs6000_legitimize_address. */
8820 rs6000_debug_legitimize_address (rtx x
, rtx oldx
, machine_mode mode
)
8826 ret
= rs6000_legitimize_address (x
, oldx
, mode
);
8827 insns
= get_insns ();
8833 "\nrs6000_legitimize_address: mode %s, old code %s, "
8834 "new code %s, modified\n",
8835 GET_MODE_NAME (mode
), GET_RTX_NAME (GET_CODE (x
)),
8836 GET_RTX_NAME (GET_CODE (ret
)));
8838 fprintf (stderr
, "Original address:\n");
8841 fprintf (stderr
, "oldx:\n");
8844 fprintf (stderr
, "New address:\n");
8849 fprintf (stderr
, "Insns added:\n");
8850 debug_rtx_list (insns
, 20);
8856 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8857 GET_MODE_NAME (mode
), GET_RTX_NAME (GET_CODE (x
)));
8868 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8869 We need to emit DTP-relative relocations. */
8871 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx
) ATTRIBUTE_UNUSED
;
8873 rs6000_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
8878 fputs ("\t.long\t", file
);
8881 fputs (DOUBLE_INT_ASM_OP
, file
);
8886 output_addr_const (file
, x
);
8888 fputs ("@dtprel+0x8000", file
);
8889 else if (TARGET_XCOFF
&& GET_CODE (x
) == SYMBOL_REF
)
8891 switch (SYMBOL_REF_TLS_MODEL (x
))
8895 case TLS_MODEL_LOCAL_EXEC
:
8896 fputs ("@le", file
);
8898 case TLS_MODEL_INITIAL_EXEC
:
8899 fputs ("@ie", file
);
8901 case TLS_MODEL_GLOBAL_DYNAMIC
:
8902 case TLS_MODEL_LOCAL_DYNAMIC
:
8911 /* Return true if X is a symbol that refers to real (rather than emulated)
8915 rs6000_real_tls_symbol_ref_p (rtx x
)
8917 return (GET_CODE (x
) == SYMBOL_REF
8918 && SYMBOL_REF_TLS_MODEL (x
) >= TLS_MODEL_REAL
);
8921 /* In the name of slightly smaller debug output, and to cater to
8922 general assembler lossage, recognize various UNSPEC sequences
8923 and turn them back into a direct symbol reference. */
8926 rs6000_delegitimize_address (rtx orig_x
)
8930 orig_x
= delegitimize_mem_from_attrs (orig_x
);
8936 if (TARGET_CMODEL
!= CMODEL_SMALL
8937 && GET_CODE (y
) == LO_SUM
)
8941 if (GET_CODE (y
) == PLUS
8942 && GET_MODE (y
) == Pmode
8943 && CONST_INT_P (XEXP (y
, 1)))
8945 offset
= XEXP (y
, 1);
8949 if (GET_CODE (y
) == UNSPEC
8950 && XINT (y
, 1) == UNSPEC_TOCREL
)
8952 y
= XVECEXP (y
, 0, 0);
8955 /* Do not associate thread-local symbols with the original
8956 constant pool symbol. */
8958 && GET_CODE (y
) == SYMBOL_REF
8959 && CONSTANT_POOL_ADDRESS_P (y
)
8960 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y
)))
8964 if (offset
!= NULL_RTX
)
8965 y
= gen_rtx_PLUS (Pmode
, y
, offset
);
8966 if (!MEM_P (orig_x
))
8969 return replace_equiv_address_nv (orig_x
, y
);
8973 && GET_CODE (orig_x
) == LO_SUM
8974 && GET_CODE (XEXP (orig_x
, 1)) == CONST
)
8976 y
= XEXP (XEXP (orig_x
, 1), 0);
8977 if (GET_CODE (y
) == UNSPEC
8978 && XINT (y
, 1) == UNSPEC_MACHOPIC_OFFSET
)
8979 return XVECEXP (y
, 0, 0);
8985 /* Return true if X shouldn't be emitted into the debug info.
8986 The linker doesn't like .toc section references from
8987 .debug_* sections, so reject .toc section symbols. */
8990 rs6000_const_not_ok_for_debug_p (rtx x
)
8992 if (GET_CODE (x
) == SYMBOL_REF
8993 && CONSTANT_POOL_ADDRESS_P (x
))
8995 rtx c
= get_pool_constant (x
);
8996 machine_mode cmode
= get_pool_mode (x
);
8997 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c
, cmode
))
9005 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
9008 rs6000_legitimate_combined_insn (rtx_insn
*insn
)
9010 int icode
= INSN_CODE (insn
);
9012 /* Reject creating doloop insns. Combine should not be allowed
9013 to create these for a number of reasons:
9014 1) In a nested loop, if combine creates one of these in an
9015 outer loop and the register allocator happens to allocate ctr
9016 to the outer loop insn, then the inner loop can't use ctr.
9017 Inner loops ought to be more highly optimized.
9018 2) Combine often wants to create one of these from what was
9019 originally a three insn sequence, first combining the three
9020 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
9021 allocated ctr, the splitter takes use back to the three insn
9022 sequence. It's better to stop combine at the two insn
9024 3) Faced with not being able to allocate ctr for ctrsi/crtdi
9025 insns, the register allocator sometimes uses floating point
9026 or vector registers for the pseudo. Since ctrsi/ctrdi is a
9027 jump insn and output reloads are not implemented for jumps,
9028 the ctrsi/ctrdi splitters need to handle all possible cases.
9029 That's a pain, and it gets to be seriously difficult when a
9030 splitter that runs after reload needs memory to transfer from
9031 a gpr to fpr. See PR70098 and PR71763 which are not fixed
9032 for the difficult case. It's better to not create problems
9033 in the first place. */
9034 if (icode
!= CODE_FOR_nothing
9035 && (icode
== CODE_FOR_ctrsi_internal1
9036 || icode
== CODE_FOR_ctrdi_internal1
9037 || icode
== CODE_FOR_ctrsi_internal2
9038 || icode
== CODE_FOR_ctrdi_internal2
9039 || icode
== CODE_FOR_ctrsi_internal3
9040 || icode
== CODE_FOR_ctrdi_internal3
9041 || icode
== CODE_FOR_ctrsi_internal4
9042 || icode
== CODE_FOR_ctrdi_internal4
))
9048 /* Construct the SYMBOL_REF for the tls_get_addr function. */
9050 static GTY(()) rtx rs6000_tls_symbol
;
9052 rs6000_tls_get_addr (void)
9054 if (!rs6000_tls_symbol
)
9055 rs6000_tls_symbol
= init_one_libfunc ("__tls_get_addr");
9057 return rs6000_tls_symbol
;
9060 /* Construct the SYMBOL_REF for TLS GOT references. */
9062 static GTY(()) rtx rs6000_got_symbol
;
9064 rs6000_got_sym (void)
9066 if (!rs6000_got_symbol
)
9068 rs6000_got_symbol
= gen_rtx_SYMBOL_REF (Pmode
, "_GLOBAL_OFFSET_TABLE_");
9069 SYMBOL_REF_FLAGS (rs6000_got_symbol
) |= SYMBOL_FLAG_LOCAL
;
9070 SYMBOL_REF_FLAGS (rs6000_got_symbol
) |= SYMBOL_FLAG_EXTERNAL
;
9073 return rs6000_got_symbol
;
9076 /* AIX Thread-Local Address support. */
9079 rs6000_legitimize_tls_address_aix (rtx addr
, enum tls_model model
)
9081 rtx sym
, mem
, tocref
, tlsreg
, tmpreg
, dest
, tlsaddr
;
9085 name
= XSTR (addr
, 0);
9086 /* Append TLS CSECT qualifier, unless the symbol already is qualified
9087 or the symbol will be in TLS private data section. */
9088 if (name
[strlen (name
) - 1] != ']'
9089 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr
))
9090 || bss_initializer_p (SYMBOL_REF_DECL (addr
))))
9092 tlsname
= XALLOCAVEC (char, strlen (name
) + 4);
9093 strcpy (tlsname
, name
);
9095 bss_initializer_p (SYMBOL_REF_DECL (addr
)) ? "[UL]" : "[TL]");
9096 tlsaddr
= copy_rtx (addr
);
9097 XSTR (tlsaddr
, 0) = ggc_strdup (tlsname
);
9102 /* Place addr into TOC constant pool. */
9103 sym
= force_const_mem (GET_MODE (tlsaddr
), tlsaddr
);
9105 /* Output the TOC entry and create the MEM referencing the value. */
9106 if (constant_pool_expr_p (XEXP (sym
, 0))
9107 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym
, 0)), Pmode
))
9109 tocref
= create_TOC_reference (XEXP (sym
, 0), NULL_RTX
);
9110 mem
= gen_const_mem (Pmode
, tocref
);
9111 set_mem_alias_set (mem
, get_TOC_alias_set ());
9116 /* Use global-dynamic for local-dynamic. */
9117 if (model
== TLS_MODEL_GLOBAL_DYNAMIC
9118 || model
== TLS_MODEL_LOCAL_DYNAMIC
)
9120 /* Create new TOC reference for @m symbol. */
9121 name
= XSTR (XVECEXP (XEXP (mem
, 0), 0, 0), 0);
9122 tlsname
= XALLOCAVEC (char, strlen (name
) + 1);
9123 strcpy (tlsname
, "*LCM");
9124 strcat (tlsname
, name
+ 3);
9125 rtx modaddr
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (tlsname
));
9126 SYMBOL_REF_FLAGS (modaddr
) |= SYMBOL_FLAG_LOCAL
;
9127 tocref
= create_TOC_reference (modaddr
, NULL_RTX
);
9128 rtx modmem
= gen_const_mem (Pmode
, tocref
);
9129 set_mem_alias_set (modmem
, get_TOC_alias_set ());
9131 rtx modreg
= gen_reg_rtx (Pmode
);
9132 emit_insn (gen_rtx_SET (modreg
, modmem
));
9134 tmpreg
= gen_reg_rtx (Pmode
);
9135 emit_insn (gen_rtx_SET (tmpreg
, mem
));
9137 dest
= gen_reg_rtx (Pmode
);
9139 emit_insn (gen_tls_get_addrsi (dest
, modreg
, tmpreg
));
9141 emit_insn (gen_tls_get_addrdi (dest
, modreg
, tmpreg
));
9144 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
9145 else if (TARGET_32BIT
)
9147 tlsreg
= gen_reg_rtx (SImode
);
9148 emit_insn (gen_tls_get_tpointer (tlsreg
));
9151 tlsreg
= gen_rtx_REG (DImode
, 13);
9153 /* Load the TOC value into temporary register. */
9154 tmpreg
= gen_reg_rtx (Pmode
);
9155 emit_insn (gen_rtx_SET (tmpreg
, mem
));
9156 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
9157 gen_rtx_MINUS (Pmode
, addr
, tlsreg
));
9159 /* Add TOC symbol value to TLS pointer. */
9160 dest
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, tmpreg
, tlsreg
));
9165 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
9166 this (thread-local) address. */
9169 rs6000_legitimize_tls_address (rtx addr
, enum tls_model model
)
9174 return rs6000_legitimize_tls_address_aix (addr
, model
);
9176 dest
= gen_reg_rtx (Pmode
);
9177 if (model
== TLS_MODEL_LOCAL_EXEC
&& rs6000_tls_size
== 16)
9183 tlsreg
= gen_rtx_REG (Pmode
, 13);
9184 insn
= gen_tls_tprel_64 (dest
, tlsreg
, addr
);
9188 tlsreg
= gen_rtx_REG (Pmode
, 2);
9189 insn
= gen_tls_tprel_32 (dest
, tlsreg
, addr
);
9193 else if (model
== TLS_MODEL_LOCAL_EXEC
&& rs6000_tls_size
== 32)
9197 tmp
= gen_reg_rtx (Pmode
);
9200 tlsreg
= gen_rtx_REG (Pmode
, 13);
9201 insn
= gen_tls_tprel_ha_64 (tmp
, tlsreg
, addr
);
9205 tlsreg
= gen_rtx_REG (Pmode
, 2);
9206 insn
= gen_tls_tprel_ha_32 (tmp
, tlsreg
, addr
);
9210 insn
= gen_tls_tprel_lo_64 (dest
, tmp
, addr
);
9212 insn
= gen_tls_tprel_lo_32 (dest
, tmp
, addr
);
9217 rtx r3
, got
, tga
, tmp1
, tmp2
, call_insn
;
9219 /* We currently use relocations like @got@tlsgd for tls, which
9220 means the linker will handle allocation of tls entries, placing
9221 them in the .got section. So use a pointer to the .got section,
9222 not one to secondary TOC sections used by 64-bit -mminimal-toc,
9223 or to secondary GOT sections used by 32-bit -fPIC. */
9225 got
= gen_rtx_REG (Pmode
, 2);
9229 got
= gen_rtx_REG (Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
9232 rtx gsym
= rs6000_got_sym ();
9233 got
= gen_reg_rtx (Pmode
);
9235 rs6000_emit_move (got
, gsym
, Pmode
);
9240 tmp1
= gen_reg_rtx (Pmode
);
9241 tmp2
= gen_reg_rtx (Pmode
);
9242 mem
= gen_const_mem (Pmode
, tmp1
);
9243 lab
= gen_label_rtx ();
9244 emit_insn (gen_load_toc_v4_PIC_1b (gsym
, lab
));
9245 emit_move_insn (tmp1
, gen_rtx_REG (Pmode
, LR_REGNO
));
9246 if (TARGET_LINK_STACK
)
9247 emit_insn (gen_addsi3 (tmp1
, tmp1
, GEN_INT (4)));
9248 emit_move_insn (tmp2
, mem
);
9249 rtx_insn
*last
= emit_insn (gen_addsi3 (got
, tmp1
, tmp2
));
9250 set_unique_reg_note (last
, REG_EQUAL
, gsym
);
9255 if (model
== TLS_MODEL_GLOBAL_DYNAMIC
)
9257 tga
= rs6000_tls_get_addr ();
9258 emit_library_call_value (tga
, dest
, LCT_CONST
, Pmode
,
9261 r3
= gen_rtx_REG (Pmode
, 3);
9262 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
9265 insn
= gen_tls_gd_aix64 (r3
, got
, addr
, tga
, const0_rtx
);
9267 insn
= gen_tls_gd_aix32 (r3
, got
, addr
, tga
, const0_rtx
);
9269 else if (DEFAULT_ABI
== ABI_V4
)
9270 insn
= gen_tls_gd_sysvsi (r3
, got
, addr
, tga
, const0_rtx
);
9273 call_insn
= last_call_insn ();
9274 PATTERN (call_insn
) = insn
;
9275 if (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
9276 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
),
9277 pic_offset_table_rtx
);
9279 else if (model
== TLS_MODEL_LOCAL_DYNAMIC
)
9281 tga
= rs6000_tls_get_addr ();
9282 tmp1
= gen_reg_rtx (Pmode
);
9283 emit_library_call_value (tga
, tmp1
, LCT_CONST
, Pmode
,
9286 r3
= gen_rtx_REG (Pmode
, 3);
9287 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
9290 insn
= gen_tls_ld_aix64 (r3
, got
, tga
, const0_rtx
);
9292 insn
= gen_tls_ld_aix32 (r3
, got
, tga
, const0_rtx
);
9294 else if (DEFAULT_ABI
== ABI_V4
)
9295 insn
= gen_tls_ld_sysvsi (r3
, got
, tga
, const0_rtx
);
9298 call_insn
= last_call_insn ();
9299 PATTERN (call_insn
) = insn
;
9300 if (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
9301 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
),
9302 pic_offset_table_rtx
);
9304 if (rs6000_tls_size
== 16)
9307 insn
= gen_tls_dtprel_64 (dest
, tmp1
, addr
);
9309 insn
= gen_tls_dtprel_32 (dest
, tmp1
, addr
);
9311 else if (rs6000_tls_size
== 32)
9313 tmp2
= gen_reg_rtx (Pmode
);
9315 insn
= gen_tls_dtprel_ha_64 (tmp2
, tmp1
, addr
);
9317 insn
= gen_tls_dtprel_ha_32 (tmp2
, tmp1
, addr
);
9320 insn
= gen_tls_dtprel_lo_64 (dest
, tmp2
, addr
);
9322 insn
= gen_tls_dtprel_lo_32 (dest
, tmp2
, addr
);
9326 tmp2
= gen_reg_rtx (Pmode
);
9328 insn
= gen_tls_got_dtprel_64 (tmp2
, got
, addr
);
9330 insn
= gen_tls_got_dtprel_32 (tmp2
, got
, addr
);
9332 insn
= gen_rtx_SET (dest
, gen_rtx_PLUS (Pmode
, tmp2
, tmp1
));
9338 /* IE, or 64-bit offset LE. */
9339 tmp2
= gen_reg_rtx (Pmode
);
9341 insn
= gen_tls_got_tprel_64 (tmp2
, got
, addr
);
9343 insn
= gen_tls_got_tprel_32 (tmp2
, got
, addr
);
9346 insn
= gen_tls_tls_64 (dest
, tmp2
, addr
);
9348 insn
= gen_tls_tls_32 (dest
, tmp2
, addr
);
9356 /* Only create the global variable for the stack protect guard if we are using
9357 the global flavor of that guard. */
9359 rs6000_init_stack_protect_guard (void)
9361 if (rs6000_stack_protector_guard
== SSP_GLOBAL
)
9362 return default_stack_protect_guard ();
9367 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
9370 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
9372 if (GET_CODE (x
) == HIGH
9373 && GET_CODE (XEXP (x
, 0)) == UNSPEC
)
9376 /* A TLS symbol in the TOC cannot contain a sum. */
9377 if (GET_CODE (x
) == CONST
9378 && GET_CODE (XEXP (x
, 0)) == PLUS
9379 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
9380 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x
, 0), 0)) != 0)
9383 /* Do not place an ELF TLS symbol in the constant pool. */
9384 return TARGET_ELF
&& tls_referenced_p (x
);
9387 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
9388 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
9389 can be addressed relative to the toc pointer. */
9392 use_toc_relative_ref (rtx sym
, machine_mode mode
)
9394 return ((constant_pool_expr_p (sym
)
9395 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym
),
9396 get_pool_mode (sym
)))
9397 || (TARGET_CMODEL
== CMODEL_MEDIUM
9398 && SYMBOL_REF_LOCAL_P (sym
)
9399 && GET_MODE_SIZE (mode
) <= POWERPC64_TOC_POINTER_ALIGNMENT
));
9402 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
9403 replace the input X, or the original X if no replacement is called for.
9404 The output parameter *WIN is 1 if the calling macro should goto WIN,
9407 For RS/6000, we wish to handle large displacements off a base
9408 register by splitting the addend across an addiu/addis and the mem insn.
9409 This cuts number of extra insns needed from 3 to 1.
9411 On Darwin, we use this to generate code for floating point constants.
9412 A movsf_low is generated so we wind up with 2 instructions rather than 3.
9413 The Darwin code is inside #if TARGET_MACHO because only then are the
9414 machopic_* functions defined. */
9416 rs6000_legitimize_reload_address (rtx x
, machine_mode mode
,
9417 int opnum
, int type
,
9418 int ind_levels ATTRIBUTE_UNUSED
, int *win
)
9420 bool reg_offset_p
= reg_offset_addressing_ok_p (mode
);
9421 bool quad_offset_p
= mode_supports_vsx_dform_quad (mode
);
9423 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
9424 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
9427 && ((mode
== DFmode
&& recog_data
.operand_mode
[0] == V2DFmode
)
9428 || (mode
== DImode
&& recog_data
.operand_mode
[0] == V2DImode
)
9429 || (mode
== SFmode
&& recog_data
.operand_mode
[0] == V4SFmode
9430 && TARGET_P9_VECTOR
)
9431 || (mode
== SImode
&& recog_data
.operand_mode
[0] == V4SImode
9432 && TARGET_P9_VECTOR
)))
9433 reg_offset_p
= false;
9435 /* We must recognize output that we have already generated ourselves. */
9436 if (GET_CODE (x
) == PLUS
9437 && GET_CODE (XEXP (x
, 0)) == PLUS
9438 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
9439 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
9440 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
9442 if (TARGET_DEBUG_ADDR
)
9444 fprintf (stderr
, "\nlegitimize_reload_address push_reload #1:\n");
9447 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9448 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
9449 opnum
, (enum reload_type
) type
);
9454 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
9455 if (GET_CODE (x
) == LO_SUM
9456 && GET_CODE (XEXP (x
, 0)) == HIGH
)
9458 if (TARGET_DEBUG_ADDR
)
9460 fprintf (stderr
, "\nlegitimize_reload_address push_reload #2:\n");
9463 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9464 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9465 opnum
, (enum reload_type
) type
);
9471 if (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
9472 && GET_CODE (x
) == LO_SUM
9473 && GET_CODE (XEXP (x
, 0)) == PLUS
9474 && XEXP (XEXP (x
, 0), 0) == pic_offset_table_rtx
9475 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == HIGH
9476 && XEXP (XEXP (XEXP (x
, 0), 1), 0) == XEXP (x
, 1)
9477 && machopic_operand_p (XEXP (x
, 1)))
9479 /* Result of previous invocation of this function on Darwin
9480 floating point constant. */
9481 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9482 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9483 opnum
, (enum reload_type
) type
);
9489 if (TARGET_CMODEL
!= CMODEL_SMALL
9492 && small_toc_ref (x
, VOIDmode
))
9494 rtx hi
= gen_rtx_HIGH (Pmode
, copy_rtx (x
));
9495 x
= gen_rtx_LO_SUM (Pmode
, hi
, x
);
9496 if (TARGET_DEBUG_ADDR
)
9498 fprintf (stderr
, "\nlegitimize_reload_address push_reload #3:\n");
9501 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9502 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9503 opnum
, (enum reload_type
) type
);
9508 if (GET_CODE (x
) == PLUS
9509 && REG_P (XEXP (x
, 0))
9510 && REGNO (XEXP (x
, 0)) < FIRST_PSEUDO_REGISTER
9511 && INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 1)
9512 && CONST_INT_P (XEXP (x
, 1))
9514 && !PAIRED_VECTOR_MODE (mode
)
9515 && (quad_offset_p
|| !VECTOR_MODE_P (mode
) || VECTOR_MEM_NONE_P (mode
)))
9517 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
9518 HOST_WIDE_INT low
= ((val
& 0xffff) ^ 0x8000) - 0x8000;
9520 = (((val
- low
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
9522 /* Check for 32-bit overflow or quad addresses with one of the
9523 four least significant bits set. */
9524 if (high
+ low
!= val
9525 || (quad_offset_p
&& (low
& 0xf)))
9531 /* Reload the high part into a base reg; leave the low part
9532 in the mem directly. */
9534 x
= gen_rtx_PLUS (GET_MODE (x
),
9535 gen_rtx_PLUS (GET_MODE (x
), XEXP (x
, 0),
9539 if (TARGET_DEBUG_ADDR
)
9541 fprintf (stderr
, "\nlegitimize_reload_address push_reload #4:\n");
9544 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9545 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
9546 opnum
, (enum reload_type
) type
);
9551 if (GET_CODE (x
) == SYMBOL_REF
9554 && (!VECTOR_MODE_P (mode
) || VECTOR_MEM_NONE_P (mode
))
9555 && !PAIRED_VECTOR_MODE (mode
)
9557 && DEFAULT_ABI
== ABI_DARWIN
9558 && (flag_pic
|| MACHO_DYNAMIC_NO_PIC_P
)
9559 && machopic_symbol_defined_p (x
)
9561 && DEFAULT_ABI
== ABI_V4
9564 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
9565 The same goes for DImode without 64-bit gprs and DFmode and DDmode
9567 ??? Assume floating point reg based on mode? This assumption is
9568 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
9569 where reload ends up doing a DFmode load of a constant from
9570 mem using two gprs. Unfortunately, at this point reload
9571 hasn't yet selected regs so poking around in reload data
9572 won't help and even if we could figure out the regs reliably,
9573 we'd still want to allow this transformation when the mem is
9574 naturally aligned. Since we say the address is good here, we
9575 can't disable offsets from LO_SUMs in mem_operand_gpr.
9576 FIXME: Allow offset from lo_sum for other modes too, when
9577 mem is sufficiently aligned.
9579 Also disallow this if the type can go in VMX/Altivec registers, since
9580 those registers do not have d-form (reg+offset) address modes. */
9581 && !reg_addr
[mode
].scalar_in_vmx_p
9586 && (mode
!= TImode
|| !TARGET_VSX
)
9588 && (mode
!= DImode
|| TARGET_POWERPC64
)
9589 && ((mode
!= DFmode
&& mode
!= DDmode
) || TARGET_POWERPC64
9590 || (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)))
9595 rtx offset
= machopic_gen_offset (x
);
9596 x
= gen_rtx_LO_SUM (GET_MODE (x
),
9597 gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
,
9598 gen_rtx_HIGH (Pmode
, offset
)), offset
);
9602 x
= gen_rtx_LO_SUM (GET_MODE (x
),
9603 gen_rtx_HIGH (Pmode
, x
), x
);
9605 if (TARGET_DEBUG_ADDR
)
9607 fprintf (stderr
, "\nlegitimize_reload_address push_reload #5:\n");
9610 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9611 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9612 opnum
, (enum reload_type
) type
);
9617 /* Reload an offset address wrapped by an AND that represents the
9618 masking of the lower bits. Strip the outer AND and let reload
9619 convert the offset address into an indirect address. For VSX,
9620 force reload to create the address with an AND in a separate
9621 register, because we can't guarantee an altivec register will
9623 if (VECTOR_MEM_ALTIVEC_P (mode
)
9624 && GET_CODE (x
) == AND
9625 && GET_CODE (XEXP (x
, 0)) == PLUS
9626 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
9627 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
9628 && GET_CODE (XEXP (x
, 1)) == CONST_INT
9629 && INTVAL (XEXP (x
, 1)) == -16)
9639 && GET_CODE (x
) == SYMBOL_REF
9640 && use_toc_relative_ref (x
, mode
))
9642 x
= create_TOC_reference (x
, NULL_RTX
);
9643 if (TARGET_CMODEL
!= CMODEL_SMALL
)
9645 if (TARGET_DEBUG_ADDR
)
9647 fprintf (stderr
, "\nlegitimize_reload_address push_reload #6:\n");
9650 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9651 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9652 opnum
, (enum reload_type
) type
);
9661 /* Debug version of rs6000_legitimize_reload_address. */
9663 rs6000_debug_legitimize_reload_address (rtx x
, machine_mode mode
,
9664 int opnum
, int type
,
9665 int ind_levels
, int *win
)
9667 rtx ret
= rs6000_legitimize_reload_address (x
, mode
, opnum
, type
,
9670 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9671 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9672 GET_MODE_NAME (mode
), opnum
, type
, ind_levels
, *win
);
9676 fprintf (stderr
, "Same address returned\n");
9678 fprintf (stderr
, "NULL returned\n");
9681 fprintf (stderr
, "New address:\n");
9688 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9689 that is a valid memory address for an instruction.
9690 The MODE argument is the machine mode for the MEM expression
9691 that wants to use this address.
9693 On the RS/6000, there are four valid address: a SYMBOL_REF that
9694 refers to a constant pool entry of an address (or the sum of it
9695 plus a constant), a short (16-bit signed) constant plus a register,
9696 the sum of two registers, or a register indirect, possibly with an
9697 auto-increment. For DFmode, DDmode and DImode with a constant plus
9698 register, we must ensure that both words are addressable or PowerPC64
9699 with offset word aligned.
9701 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9702 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9703 because adjacent memory cells are accessed by adding word-sized offsets
9704 during assembly output. */
9706 rs6000_legitimate_address_p (machine_mode mode
, rtx x
, bool reg_ok_strict
)
9708 bool reg_offset_p
= reg_offset_addressing_ok_p (mode
);
9709 bool quad_offset_p
= mode_supports_vsx_dform_quad (mode
);
9711 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9712 if (VECTOR_MEM_ALTIVEC_P (mode
)
9713 && GET_CODE (x
) == AND
9714 && GET_CODE (XEXP (x
, 1)) == CONST_INT
9715 && INTVAL (XEXP (x
, 1)) == -16)
9718 if (TARGET_ELF
&& RS6000_SYMBOL_REF_TLS_P (x
))
9720 if (legitimate_indirect_address_p (x
, reg_ok_strict
))
9723 && (GET_CODE (x
) == PRE_INC
|| GET_CODE (x
) == PRE_DEC
)
9724 && mode_supports_pre_incdec_p (mode
)
9725 && legitimate_indirect_address_p (XEXP (x
, 0), reg_ok_strict
))
9727 /* Handle restricted vector d-form offsets in ISA 3.0. */
9730 if (quad_address_p (x
, mode
, reg_ok_strict
))
9733 else if (virtual_stack_registers_memory_p (x
))
9736 else if (reg_offset_p
)
9738 if (legitimate_small_data_p (mode
, x
))
9740 if (legitimate_constant_pool_address_p (x
, mode
,
9741 reg_ok_strict
|| lra_in_progress
))
9743 if (reg_addr
[mode
].fused_toc
&& GET_CODE (x
) == UNSPEC
9744 && XINT (x
, 1) == UNSPEC_FUSION_ADDIS
)
9748 /* For TImode, if we have TImode in VSX registers, only allow register
9749 indirect addresses. This will allow the values to go in either GPRs
9750 or VSX registers without reloading. The vector types would tend to
9751 go into VSX registers, so we allow REG+REG, while TImode seems
9752 somewhat split, in that some uses are GPR based, and some VSX based. */
9753 /* FIXME: We could loosen this by changing the following to
9754 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
9755 but currently we cannot allow REG+REG addressing for TImode. See
9756 PR72827 for complete details on how this ends up hoodwinking DSE. */
9757 if (mode
== TImode
&& TARGET_VSX
)
9759 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9762 && GET_CODE (x
) == PLUS
9763 && GET_CODE (XEXP (x
, 0)) == REG
9764 && (XEXP (x
, 0) == virtual_stack_vars_rtx
9765 || XEXP (x
, 0) == arg_pointer_rtx
)
9766 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
9768 if (rs6000_legitimate_offset_address_p (mode
, x
, reg_ok_strict
, false))
9770 if (!FLOAT128_2REG_P (mode
)
9771 && ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
9773 || (mode
!= DFmode
&& mode
!= DDmode
))
9774 && (TARGET_POWERPC64
|| mode
!= DImode
)
9775 && (mode
!= TImode
|| VECTOR_MEM_VSX_P (TImode
))
9777 && !avoiding_indexed_address_p (mode
)
9778 && legitimate_indexed_address_p (x
, reg_ok_strict
))
9780 if (TARGET_UPDATE
&& GET_CODE (x
) == PRE_MODIFY
9781 && mode_supports_pre_modify_p (mode
)
9782 && legitimate_indirect_address_p (XEXP (x
, 0), reg_ok_strict
)
9783 && (rs6000_legitimate_offset_address_p (mode
, XEXP (x
, 1),
9784 reg_ok_strict
, false)
9785 || (!avoiding_indexed_address_p (mode
)
9786 && legitimate_indexed_address_p (XEXP (x
, 1), reg_ok_strict
)))
9787 && rtx_equal_p (XEXP (XEXP (x
, 1), 0), XEXP (x
, 0)))
9789 if (reg_offset_p
&& !quad_offset_p
9790 && legitimate_lo_sum_address_p (mode
, x
, reg_ok_strict
))
9795 /* Debug version of rs6000_legitimate_address_p. */
9797 rs6000_debug_legitimate_address_p (machine_mode mode
, rtx x
,
9800 bool ret
= rs6000_legitimate_address_p (mode
, x
, reg_ok_strict
);
9802 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9803 "strict = %d, reload = %s, code = %s\n",
9804 ret
? "true" : "false",
9805 GET_MODE_NAME (mode
),
9807 (reload_completed
? "after" : "before"),
9808 GET_RTX_NAME (GET_CODE (x
)));
9814 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9817 rs6000_mode_dependent_address_p (const_rtx addr
,
9818 addr_space_t as ATTRIBUTE_UNUSED
)
9820 return rs6000_mode_dependent_address_ptr (addr
);
9823 /* Go to LABEL if ADDR (a legitimate address expression)
9824 has an effect that depends on the machine mode it is used for.
9826 On the RS/6000 this is true of all integral offsets (since AltiVec
9827 and VSX modes don't allow them) or is a pre-increment or decrement.
9829 ??? Except that due to conceptual problems in offsettable_address_p
9830 we can't really report the problems of integral offsets. So leave
9831 this assuming that the adjustable offset must be valid for the
9832 sub-words of a TFmode operand, which is what we had before. */
9835 rs6000_mode_dependent_address (const_rtx addr
)
9837 switch (GET_CODE (addr
))
9840 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9841 is considered a legitimate address before reload, so there
9842 are no offset restrictions in that case. Note that this
9843 condition is safe in strict mode because any address involving
9844 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9845 been rejected as illegitimate. */
9846 if (XEXP (addr
, 0) != virtual_stack_vars_rtx
9847 && XEXP (addr
, 0) != arg_pointer_rtx
9848 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
)
9850 unsigned HOST_WIDE_INT val
= INTVAL (XEXP (addr
, 1));
9851 return val
+ 0x8000 >= 0x10000 - (TARGET_POWERPC64
? 8 : 12);
9856 /* Anything in the constant pool is sufficiently aligned that
9857 all bytes have the same high part address. */
9858 return !legitimate_constant_pool_address_p (addr
, QImode
, false);
9860 /* Auto-increment cases are now treated generically in recog.c. */
9862 return TARGET_UPDATE
;
9864 /* AND is only allowed in Altivec loads. */
9875 /* Debug version of rs6000_mode_dependent_address. */
9877 rs6000_debug_mode_dependent_address (const_rtx addr
)
9879 bool ret
= rs6000_mode_dependent_address (addr
);
9881 fprintf (stderr
, "\nrs6000_mode_dependent_address: ret = %s\n",
9882 ret
? "true" : "false");
9888 /* Implement FIND_BASE_TERM. */
9891 rs6000_find_base_term (rtx op
)
9896 if (GET_CODE (base
) == CONST
)
9897 base
= XEXP (base
, 0);
9898 if (GET_CODE (base
) == PLUS
)
9899 base
= XEXP (base
, 0);
9900 if (GET_CODE (base
) == UNSPEC
)
9901 switch (XINT (base
, 1))
9904 case UNSPEC_MACHOPIC_OFFSET
:
9905 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9906 for aliasing purposes. */
9907 return XVECEXP (base
, 0, 0);
9913 /* More elaborate version of recog's offsettable_memref_p predicate
9914 that works around the ??? note of rs6000_mode_dependent_address.
9915 In particular it accepts
9917 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9919 in 32-bit mode, that the recog predicate rejects. */
9922 rs6000_offsettable_memref_p (rtx op
, machine_mode reg_mode
)
9929 /* First mimic offsettable_memref_p. */
9930 if (offsettable_address_p (true, GET_MODE (op
), XEXP (op
, 0)))
9933 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9934 the latter predicate knows nothing about the mode of the memory
9935 reference and, therefore, assumes that it is the largest supported
9936 mode (TFmode). As a consequence, legitimate offsettable memory
9937 references are rejected. rs6000_legitimate_offset_address_p contains
9938 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9939 at least with a little bit of help here given that we know the
9940 actual registers used. */
9941 worst_case
= ((TARGET_POWERPC64
&& GET_MODE_CLASS (reg_mode
) == MODE_INT
)
9942 || GET_MODE_SIZE (reg_mode
) == 4);
9943 return rs6000_legitimate_offset_address_p (GET_MODE (op
), XEXP (op
, 0),
9947 /* Determine the reassociation width to be used in reassociate_bb.
9948 This takes into account how many parallel operations we
9949 can actually do of a given type, and also the latency.
9953 vect add/sub/mul 2/cycle
9954 fp add/sub/mul 2/cycle
9959 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED
,
9964 case PROCESSOR_POWER8
:
9965 case PROCESSOR_POWER9
:
9966 if (DECIMAL_FLOAT_MODE_P (mode
))
9968 if (VECTOR_MODE_P (mode
))
9970 if (INTEGRAL_MODE_P (mode
))
9971 return opc
== MULT_EXPR
? 4 : 6;
9972 if (FLOAT_MODE_P (mode
))
9981 /* Change register usage conditional on target flags. */
9983 rs6000_conditional_register_usage (void)
9987 if (TARGET_DEBUG_TARGET
)
9988 fprintf (stderr
, "rs6000_conditional_register_usage called\n");
9990 /* Set MQ register fixed (already call_used) so that it will not be
9994 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9996 fixed_regs
[13] = call_used_regs
[13]
9997 = call_really_used_regs
[13] = 1;
9999 /* Conditionally disable FPRs. */
10000 if (TARGET_SOFT_FLOAT
)
10001 for (i
= 32; i
< 64; i
++)
10002 fixed_regs
[i
] = call_used_regs
[i
]
10003 = call_really_used_regs
[i
] = 1;
10005 /* The TOC register is not killed across calls in a way that is
10006 visible to the compiler. */
10007 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
10008 call_really_used_regs
[2] = 0;
10010 if (DEFAULT_ABI
== ABI_V4
&& flag_pic
== 2)
10011 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
10013 if (DEFAULT_ABI
== ABI_V4
&& flag_pic
== 1)
10014 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
10015 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
10016 = call_really_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
10018 if (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
)
10019 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
10020 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
10021 = call_really_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
10023 if (TARGET_TOC
&& TARGET_MINIMAL_TOC
)
10024 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
10025 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
10027 if (!TARGET_ALTIVEC
&& !TARGET_VSX
)
10029 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
10030 fixed_regs
[i
] = call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
10031 call_really_used_regs
[VRSAVE_REGNO
] = 1;
10034 if (TARGET_ALTIVEC
|| TARGET_VSX
)
10035 global_regs
[VSCR_REGNO
] = 1;
10037 if (TARGET_ALTIVEC_ABI
)
10039 for (i
= FIRST_ALTIVEC_REGNO
; i
< FIRST_ALTIVEC_REGNO
+ 20; ++i
)
10040 call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
10042 /* AIX reserves VR20:31 in non-extended ABI mode. */
10044 for (i
= FIRST_ALTIVEC_REGNO
+ 20; i
< FIRST_ALTIVEC_REGNO
+ 32; ++i
)
10045 fixed_regs
[i
] = call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
10050 /* Output insns to set DEST equal to the constant SOURCE as a series of
10051 lis, ori and shl instructions and return TRUE. */
10054 rs6000_emit_set_const (rtx dest
, rtx source
)
10056 machine_mode mode
= GET_MODE (dest
);
10061 gcc_checking_assert (CONST_INT_P (source
));
10062 c
= INTVAL (source
);
10067 emit_insn (gen_rtx_SET (dest
, source
));
10071 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (SImode
);
10073 emit_insn (gen_rtx_SET (copy_rtx (temp
),
10074 GEN_INT (c
& ~(HOST_WIDE_INT
) 0xffff)));
10075 emit_insn (gen_rtx_SET (dest
,
10076 gen_rtx_IOR (SImode
, copy_rtx (temp
),
10077 GEN_INT (c
& 0xffff))));
10081 if (!TARGET_POWERPC64
)
10085 hi
= operand_subword_force (copy_rtx (dest
), WORDS_BIG_ENDIAN
== 0,
10087 lo
= operand_subword_force (dest
, WORDS_BIG_ENDIAN
!= 0,
10089 emit_move_insn (hi
, GEN_INT (c
>> 32));
10090 c
= ((c
& 0xffffffff) ^ 0x80000000) - 0x80000000;
10091 emit_move_insn (lo
, GEN_INT (c
));
10094 rs6000_emit_set_long_const (dest
, c
);
10098 gcc_unreachable ();
10101 insn
= get_last_insn ();
10102 set
= single_set (insn
);
10103 if (! CONSTANT_P (SET_SRC (set
)))
10104 set_unique_reg_note (insn
, REG_EQUAL
, GEN_INT (c
));
10109 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
10110 Output insns to set DEST equal to the constant C as a series of
10111 lis, ori and shl instructions. */
10114 rs6000_emit_set_long_const (rtx dest
, HOST_WIDE_INT c
)
10117 HOST_WIDE_INT ud1
, ud2
, ud3
, ud4
;
10127 if ((ud4
== 0xffff && ud3
== 0xffff && ud2
== 0xffff && (ud1
& 0x8000))
10128 || (ud4
== 0 && ud3
== 0 && ud2
== 0 && ! (ud1
& 0x8000)))
10129 emit_move_insn (dest
, GEN_INT ((ud1
^ 0x8000) - 0x8000));
10131 else if ((ud4
== 0xffff && ud3
== 0xffff && (ud2
& 0x8000))
10132 || (ud4
== 0 && ud3
== 0 && ! (ud2
& 0x8000)))
10134 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
10136 emit_move_insn (ud1
!= 0 ? copy_rtx (temp
) : dest
,
10137 GEN_INT (((ud2
<< 16) ^ 0x80000000) - 0x80000000));
10139 emit_move_insn (dest
,
10140 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10143 else if (ud3
== 0 && ud4
== 0)
10145 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
10147 gcc_assert (ud2
& 0x8000);
10148 emit_move_insn (copy_rtx (temp
),
10149 GEN_INT (((ud2
<< 16) ^ 0x80000000) - 0x80000000));
10151 emit_move_insn (copy_rtx (temp
),
10152 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10154 emit_move_insn (dest
,
10155 gen_rtx_ZERO_EXTEND (DImode
,
10156 gen_lowpart (SImode
,
10157 copy_rtx (temp
))));
10159 else if ((ud4
== 0xffff && (ud3
& 0x8000))
10160 || (ud4
== 0 && ! (ud3
& 0x8000)))
10162 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
10164 emit_move_insn (copy_rtx (temp
),
10165 GEN_INT (((ud3
<< 16) ^ 0x80000000) - 0x80000000));
10167 emit_move_insn (copy_rtx (temp
),
10168 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10170 emit_move_insn (ud1
!= 0 ? copy_rtx (temp
) : dest
,
10171 gen_rtx_ASHIFT (DImode
, copy_rtx (temp
),
10174 emit_move_insn (dest
,
10175 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10180 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
10182 emit_move_insn (copy_rtx (temp
),
10183 GEN_INT (((ud4
<< 16) ^ 0x80000000) - 0x80000000));
10185 emit_move_insn (copy_rtx (temp
),
10186 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10189 emit_move_insn (ud2
!= 0 || ud1
!= 0 ? copy_rtx (temp
) : dest
,
10190 gen_rtx_ASHIFT (DImode
, copy_rtx (temp
),
10193 emit_move_insn (ud1
!= 0 ? copy_rtx (temp
) : dest
,
10194 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10195 GEN_INT (ud2
<< 16)));
10197 emit_move_insn (dest
,
10198 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10203 /* Helper for the following. Get rid of [r+r] memory refs
10204 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
10207 rs6000_eliminate_indexed_memrefs (rtx operands
[2])
10209 if (GET_CODE (operands
[0]) == MEM
10210 && GET_CODE (XEXP (operands
[0], 0)) != REG
10211 && ! legitimate_constant_pool_address_p (XEXP (operands
[0], 0),
10212 GET_MODE (operands
[0]), false))
10214 = replace_equiv_address (operands
[0],
10215 copy_addr_to_reg (XEXP (operands
[0], 0)));
10217 if (GET_CODE (operands
[1]) == MEM
10218 && GET_CODE (XEXP (operands
[1], 0)) != REG
10219 && ! legitimate_constant_pool_address_p (XEXP (operands
[1], 0),
10220 GET_MODE (operands
[1]), false))
10222 = replace_equiv_address (operands
[1],
10223 copy_addr_to_reg (XEXP (operands
[1], 0)));
10226 /* Generate a vector of constants to permute MODE for a little-endian
10227 storage operation by swapping the two halves of a vector. */
10229 rs6000_const_vec (machine_mode mode
)
10257 v
= rtvec_alloc (subparts
);
10259 for (i
= 0; i
< subparts
/ 2; ++i
)
10260 RTVEC_ELT (v
, i
) = gen_rtx_CONST_INT (DImode
, i
+ subparts
/ 2);
10261 for (i
= subparts
/ 2; i
< subparts
; ++i
)
10262 RTVEC_ELT (v
, i
) = gen_rtx_CONST_INT (DImode
, i
- subparts
/ 2);
10267 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
10268 store operation. */
10270 rs6000_emit_le_vsx_permute (rtx dest
, rtx source
, machine_mode mode
)
10272 /* Scalar permutations are easier to express in integer modes rather than
10273 floating-point modes, so cast them here. We use V1TImode instead
10274 of TImode to ensure that the values don't go through GPRs. */
10275 if (FLOAT128_VECTOR_P (mode
))
10277 dest
= gen_lowpart (V1TImode
, dest
);
10278 source
= gen_lowpart (V1TImode
, source
);
10282 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
10284 if (mode
== TImode
|| mode
== V1TImode
)
10285 emit_insn (gen_rtx_SET (dest
, gen_rtx_ROTATE (mode
, source
,
10289 rtx par
= gen_rtx_PARALLEL (VOIDmode
, rs6000_const_vec (mode
));
10290 emit_insn (gen_rtx_SET (dest
, gen_rtx_VEC_SELECT (mode
, source
, par
)));
10294 /* Emit a little-endian load from vector memory location SOURCE to VSX
10295 register DEST in mode MODE. The load is done with two permuting
10296 insn's that represent an lxvd2x and xxpermdi. */
10298 rs6000_emit_le_vsx_load (rtx dest
, rtx source
, machine_mode mode
)
10300 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
10302 if (mode
== TImode
|| mode
== V1TImode
)
10305 dest
= gen_lowpart (V2DImode
, dest
);
10306 source
= adjust_address (source
, V2DImode
, 0);
10309 rtx tmp
= can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest
) : dest
;
10310 rs6000_emit_le_vsx_permute (tmp
, source
, mode
);
10311 rs6000_emit_le_vsx_permute (dest
, tmp
, mode
);
10314 /* Emit a little-endian store to vector memory location DEST from VSX
10315 register SOURCE in mode MODE. The store is done with two permuting
10316 insn's that represent an xxpermdi and an stxvd2x. */
10318 rs6000_emit_le_vsx_store (rtx dest
, rtx source
, machine_mode mode
)
10320 /* This should never be called during or after LRA, because it does
10321 not re-permute the source register. It is intended only for use
10323 gcc_assert (!lra_in_progress
&& !reload_completed
);
10325 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
10327 if (mode
== TImode
|| mode
== V1TImode
)
10330 dest
= adjust_address (dest
, V2DImode
, 0);
10331 source
= gen_lowpart (V2DImode
, source
);
10334 rtx tmp
= can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source
) : source
;
10335 rs6000_emit_le_vsx_permute (tmp
, source
, mode
);
10336 rs6000_emit_le_vsx_permute (dest
, tmp
, mode
);
10339 /* Emit a sequence representing a little-endian VSX load or store,
10340 moving data from SOURCE to DEST in mode MODE. This is done
10341 separately from rs6000_emit_move to ensure it is called only
10342 during expand. LE VSX loads and stores introduced later are
10343 handled with a split. The expand-time RTL generation allows
10344 us to optimize away redundant pairs of register-permutes. */
10346 rs6000_emit_le_vsx_move (rtx dest
, rtx source
, machine_mode mode
)
10348 gcc_assert (!BYTES_BIG_ENDIAN
10349 && VECTOR_MEM_VSX_P (mode
)
10350 && !TARGET_P9_VECTOR
10351 && !gpr_or_gpr_p (dest
, source
)
10352 && (MEM_P (source
) ^ MEM_P (dest
)));
10354 if (MEM_P (source
))
10356 gcc_assert (REG_P (dest
) || GET_CODE (dest
) == SUBREG
);
10357 rs6000_emit_le_vsx_load (dest
, source
, mode
);
10361 if (!REG_P (source
))
10362 source
= force_reg (mode
, source
);
10363 rs6000_emit_le_vsx_store (dest
, source
, mode
);
10367 /* Return whether a SFmode or SImode move can be done without converting one
10368 mode to another. This arrises when we have:
10370 (SUBREG:SF (REG:SI ...))
10371 (SUBREG:SI (REG:SF ...))
10373 and one of the values is in a floating point/vector register, where SFmode
10374 scalars are stored in DFmode format. */
10377 valid_sf_si_move (rtx dest
, rtx src
, machine_mode mode
)
10379 if (TARGET_ALLOW_SF_SUBREG
)
10382 if (mode
!= SFmode
&& GET_MODE_CLASS (mode
) != MODE_INT
)
10385 if (!SUBREG_P (src
) || !sf_subreg_operand (src
, mode
))
10388 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
10389 if (SUBREG_P (dest
))
10391 rtx dest_subreg
= SUBREG_REG (dest
);
10392 rtx src_subreg
= SUBREG_REG (src
);
10393 return GET_MODE (dest_subreg
) == GET_MODE (src_subreg
);
10400 /* Helper function to change moves with:
10402 (SUBREG:SF (REG:SI)) and
10403 (SUBREG:SI (REG:SF))
10405 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
10406 values are stored as DFmode values in the VSX registers. We need to convert
10407 the bits before we can use a direct move or operate on the bits in the
10408 vector register as an integer type.
10410 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
10413 rs6000_emit_move_si_sf_subreg (rtx dest
, rtx source
, machine_mode mode
)
10415 if (TARGET_DIRECT_MOVE_64BIT
&& !lra_in_progress
&& !reload_completed
10416 && (!SUBREG_P (dest
) || !sf_subreg_operand (dest
, mode
))
10417 && SUBREG_P (source
) && sf_subreg_operand (source
, mode
))
10419 rtx inner_source
= SUBREG_REG (source
);
10420 machine_mode inner_mode
= GET_MODE (inner_source
);
10422 if (mode
== SImode
&& inner_mode
== SFmode
)
10424 emit_insn (gen_movsi_from_sf (dest
, inner_source
));
10428 if (mode
== SFmode
&& inner_mode
== SImode
)
10430 emit_insn (gen_movsf_from_si (dest
, inner_source
));
10438 /* Emit a move from SOURCE to DEST in mode MODE. */
10440 rs6000_emit_move (rtx dest
, rtx source
, machine_mode mode
)
10443 operands
[0] = dest
;
10444 operands
[1] = source
;
10446 if (TARGET_DEBUG_ADDR
)
10449 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
10450 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
10451 GET_MODE_NAME (mode
),
10454 can_create_pseudo_p ());
10456 fprintf (stderr
, "source:\n");
10457 debug_rtx (source
);
10460 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
10461 if (CONST_WIDE_INT_P (operands
[1])
10462 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
10464 /* This should be fixed with the introduction of CONST_WIDE_INT. */
10465 gcc_unreachable ();
10468 /* See if we need to special case SImode/SFmode SUBREG moves. */
10469 if ((mode
== SImode
|| mode
== SFmode
) && SUBREG_P (source
)
10470 && rs6000_emit_move_si_sf_subreg (dest
, source
, mode
))
10473 /* Check if GCC is setting up a block move that will end up using FP
10474 registers as temporaries. We must make sure this is acceptable. */
10475 if (GET_CODE (operands
[0]) == MEM
10476 && GET_CODE (operands
[1]) == MEM
10478 && (SLOW_UNALIGNED_ACCESS (DImode
, MEM_ALIGN (operands
[0]))
10479 || SLOW_UNALIGNED_ACCESS (DImode
, MEM_ALIGN (operands
[1])))
10480 && ! (SLOW_UNALIGNED_ACCESS (SImode
, (MEM_ALIGN (operands
[0]) > 32
10481 ? 32 : MEM_ALIGN (operands
[0])))
10482 || SLOW_UNALIGNED_ACCESS (SImode
, (MEM_ALIGN (operands
[1]) > 32
10484 : MEM_ALIGN (operands
[1]))))
10485 && ! MEM_VOLATILE_P (operands
[0])
10486 && ! MEM_VOLATILE_P (operands
[1]))
10488 emit_move_insn (adjust_address (operands
[0], SImode
, 0),
10489 adjust_address (operands
[1], SImode
, 0));
10490 emit_move_insn (adjust_address (copy_rtx (operands
[0]), SImode
, 4),
10491 adjust_address (copy_rtx (operands
[1]), SImode
, 4));
10495 if (can_create_pseudo_p () && GET_CODE (operands
[0]) == MEM
10496 && !gpc_reg_operand (operands
[1], mode
))
10497 operands
[1] = force_reg (mode
, operands
[1]);
10499 /* Recognize the case where operand[1] is a reference to thread-local
10500 data and load its address to a register. */
10501 if (tls_referenced_p (operands
[1]))
10503 enum tls_model model
;
10504 rtx tmp
= operands
[1];
10507 if (GET_CODE (tmp
) == CONST
&& GET_CODE (XEXP (tmp
, 0)) == PLUS
)
10509 addend
= XEXP (XEXP (tmp
, 0), 1);
10510 tmp
= XEXP (XEXP (tmp
, 0), 0);
10513 gcc_assert (GET_CODE (tmp
) == SYMBOL_REF
);
10514 model
= SYMBOL_REF_TLS_MODEL (tmp
);
10515 gcc_assert (model
!= 0);
10517 tmp
= rs6000_legitimize_tls_address (tmp
, model
);
10520 tmp
= gen_rtx_PLUS (mode
, tmp
, addend
);
10521 tmp
= force_operand (tmp
, operands
[0]);
10526 /* 128-bit constant floating-point values on Darwin should really be loaded
10527 as two parts. However, this premature splitting is a problem when DFmode
10528 values can go into Altivec registers. */
10529 if (FLOAT128_IBM_P (mode
) && !reg_addr
[DFmode
].scalar_in_vmx_p
10530 && GET_CODE (operands
[1]) == CONST_DOUBLE
)
10532 rs6000_emit_move (simplify_gen_subreg (DFmode
, operands
[0], mode
, 0),
10533 simplify_gen_subreg (DFmode
, operands
[1], mode
, 0),
10535 rs6000_emit_move (simplify_gen_subreg (DFmode
, operands
[0], mode
,
10536 GET_MODE_SIZE (DFmode
)),
10537 simplify_gen_subreg (DFmode
, operands
[1], mode
,
10538 GET_MODE_SIZE (DFmode
)),
10543 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
10544 p1:SD) if p1 is not of floating point class and p0 is spilled as
10545 we can have no analogous movsd_store for this. */
10546 if (lra_in_progress
&& mode
== DDmode
10547 && REG_P (operands
[0]) && REGNO (operands
[0]) >= FIRST_PSEUDO_REGISTER
10548 && reg_preferred_class (REGNO (operands
[0])) == NO_REGS
10549 && GET_CODE (operands
[1]) == SUBREG
&& REG_P (SUBREG_REG (operands
[1]))
10550 && GET_MODE (SUBREG_REG (operands
[1])) == SDmode
)
10553 int regno
= REGNO (SUBREG_REG (operands
[1]));
10555 if (regno
>= FIRST_PSEUDO_REGISTER
)
10557 cl
= reg_preferred_class (regno
);
10558 regno
= cl
== NO_REGS
? -1 : ira_class_hard_regs
[cl
][1];
10560 if (regno
>= 0 && ! FP_REGNO_P (regno
))
10563 operands
[0] = gen_lowpart_SUBREG (SDmode
, operands
[0]);
10564 operands
[1] = SUBREG_REG (operands
[1]);
10567 if (lra_in_progress
10569 && REG_P (operands
[0]) && REGNO (operands
[0]) >= FIRST_PSEUDO_REGISTER
10570 && reg_preferred_class (REGNO (operands
[0])) == NO_REGS
10571 && (REG_P (operands
[1])
10572 || (GET_CODE (operands
[1]) == SUBREG
10573 && REG_P (SUBREG_REG (operands
[1])))))
10575 int regno
= REGNO (GET_CODE (operands
[1]) == SUBREG
10576 ? SUBREG_REG (operands
[1]) : operands
[1]);
10579 if (regno
>= FIRST_PSEUDO_REGISTER
)
10581 cl
= reg_preferred_class (regno
);
10582 gcc_assert (cl
!= NO_REGS
);
10583 regno
= ira_class_hard_regs
[cl
][0];
10585 if (FP_REGNO_P (regno
))
10587 if (GET_MODE (operands
[0]) != DDmode
)
10588 operands
[0] = gen_rtx_SUBREG (DDmode
, operands
[0], 0);
10589 emit_insn (gen_movsd_store (operands
[0], operands
[1]));
10591 else if (INT_REGNO_P (regno
))
10592 emit_insn (gen_movsd_hardfloat (operands
[0], operands
[1]));
10597 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10598 p:DD)) if p0 is not of floating point class and p1 is spilled as
10599 we can have no analogous movsd_load for this. */
10600 if (lra_in_progress
&& mode
== DDmode
10601 && GET_CODE (operands
[0]) == SUBREG
&& REG_P (SUBREG_REG (operands
[0]))
10602 && GET_MODE (SUBREG_REG (operands
[0])) == SDmode
10603 && REG_P (operands
[1]) && REGNO (operands
[1]) >= FIRST_PSEUDO_REGISTER
10604 && reg_preferred_class (REGNO (operands
[1])) == NO_REGS
)
10607 int regno
= REGNO (SUBREG_REG (operands
[0]));
10609 if (regno
>= FIRST_PSEUDO_REGISTER
)
10611 cl
= reg_preferred_class (regno
);
10612 regno
= cl
== NO_REGS
? -1 : ira_class_hard_regs
[cl
][0];
10614 if (regno
>= 0 && ! FP_REGNO_P (regno
))
10617 operands
[0] = SUBREG_REG (operands
[0]);
10618 operands
[1] = gen_lowpart_SUBREG (SDmode
, operands
[1]);
10621 if (lra_in_progress
10623 && (REG_P (operands
[0])
10624 || (GET_CODE (operands
[0]) == SUBREG
10625 && REG_P (SUBREG_REG (operands
[0]))))
10626 && REG_P (operands
[1]) && REGNO (operands
[1]) >= FIRST_PSEUDO_REGISTER
10627 && reg_preferred_class (REGNO (operands
[1])) == NO_REGS
)
10629 int regno
= REGNO (GET_CODE (operands
[0]) == SUBREG
10630 ? SUBREG_REG (operands
[0]) : operands
[0]);
10633 if (regno
>= FIRST_PSEUDO_REGISTER
)
10635 cl
= reg_preferred_class (regno
);
10636 gcc_assert (cl
!= NO_REGS
);
10637 regno
= ira_class_hard_regs
[cl
][0];
10639 if (FP_REGNO_P (regno
))
10641 if (GET_MODE (operands
[1]) != DDmode
)
10642 operands
[1] = gen_rtx_SUBREG (DDmode
, operands
[1], 0);
10643 emit_insn (gen_movsd_load (operands
[0], operands
[1]));
10645 else if (INT_REGNO_P (regno
))
10646 emit_insn (gen_movsd_hardfloat (operands
[0], operands
[1]));
10652 /* FIXME: In the long term, this switch statement should go away
10653 and be replaced by a sequence of tests based on things like
10659 if (CONSTANT_P (operands
[1])
10660 && GET_CODE (operands
[1]) != CONST_INT
)
10661 operands
[1] = force_const_mem (mode
, operands
[1]);
10668 if (FLOAT128_2REG_P (mode
))
10669 rs6000_eliminate_indexed_memrefs (operands
);
10676 if (CONSTANT_P (operands
[1])
10677 && ! easy_fp_constant (operands
[1], mode
))
10678 operands
[1] = force_const_mem (mode
, operands
[1]);
10690 if (CONSTANT_P (operands
[1])
10691 && !easy_vector_constant (operands
[1], mode
))
10692 operands
[1] = force_const_mem (mode
, operands
[1]);
10697 /* Use default pattern for address of ELF small data */
10700 && DEFAULT_ABI
== ABI_V4
10701 && (GET_CODE (operands
[1]) == SYMBOL_REF
10702 || GET_CODE (operands
[1]) == CONST
)
10703 && small_data_operand (operands
[1], mode
))
10705 emit_insn (gen_rtx_SET (operands
[0], operands
[1]));
10709 if (DEFAULT_ABI
== ABI_V4
10710 && mode
== Pmode
&& mode
== SImode
10711 && flag_pic
== 1 && got_operand (operands
[1], mode
))
10713 emit_insn (gen_movsi_got (operands
[0], operands
[1]));
10717 if ((TARGET_ELF
|| DEFAULT_ABI
== ABI_DARWIN
)
10721 && CONSTANT_P (operands
[1])
10722 && GET_CODE (operands
[1]) != HIGH
10723 && GET_CODE (operands
[1]) != CONST_INT
)
10725 rtx target
= (!can_create_pseudo_p ()
10727 : gen_reg_rtx (mode
));
10729 /* If this is a function address on -mcall-aixdesc,
10730 convert it to the address of the descriptor. */
10731 if (DEFAULT_ABI
== ABI_AIX
10732 && GET_CODE (operands
[1]) == SYMBOL_REF
10733 && XSTR (operands
[1], 0)[0] == '.')
10735 const char *name
= XSTR (operands
[1], 0);
10737 while (*name
== '.')
10739 new_ref
= gen_rtx_SYMBOL_REF (Pmode
, name
);
10740 CONSTANT_POOL_ADDRESS_P (new_ref
)
10741 = CONSTANT_POOL_ADDRESS_P (operands
[1]);
10742 SYMBOL_REF_FLAGS (new_ref
) = SYMBOL_REF_FLAGS (operands
[1]);
10743 SYMBOL_REF_USED (new_ref
) = SYMBOL_REF_USED (operands
[1]);
10744 SYMBOL_REF_DATA (new_ref
) = SYMBOL_REF_DATA (operands
[1]);
10745 operands
[1] = new_ref
;
10748 if (DEFAULT_ABI
== ABI_DARWIN
)
10751 if (MACHO_DYNAMIC_NO_PIC_P
)
10753 /* Take care of any required data indirection. */
10754 operands
[1] = rs6000_machopic_legitimize_pic_address (
10755 operands
[1], mode
, operands
[0]);
10756 if (operands
[0] != operands
[1])
10757 emit_insn (gen_rtx_SET (operands
[0], operands
[1]));
10761 emit_insn (gen_macho_high (target
, operands
[1]));
10762 emit_insn (gen_macho_low (operands
[0], target
, operands
[1]));
10766 emit_insn (gen_elf_high (target
, operands
[1]));
10767 emit_insn (gen_elf_low (operands
[0], target
, operands
[1]));
10771 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10772 and we have put it in the TOC, we just need to make a TOC-relative
10773 reference to it. */
10775 && GET_CODE (operands
[1]) == SYMBOL_REF
10776 && use_toc_relative_ref (operands
[1], mode
))
10777 operands
[1] = create_TOC_reference (operands
[1], operands
[0]);
10778 else if (mode
== Pmode
10779 && CONSTANT_P (operands
[1])
10780 && GET_CODE (operands
[1]) != HIGH
10781 && ((GET_CODE (operands
[1]) != CONST_INT
10782 && ! easy_fp_constant (operands
[1], mode
))
10783 || (GET_CODE (operands
[1]) == CONST_INT
10784 && (num_insns_constant (operands
[1], mode
)
10785 > (TARGET_CMODEL
!= CMODEL_SMALL
? 3 : 2)))
10786 || (GET_CODE (operands
[0]) == REG
10787 && FP_REGNO_P (REGNO (operands
[0]))))
10788 && !toc_relative_expr_p (operands
[1], false, NULL
, NULL
)
10789 && (TARGET_CMODEL
== CMODEL_SMALL
10790 || can_create_pseudo_p ()
10791 || (REG_P (operands
[0])
10792 && INT_REG_OK_FOR_BASE_P (operands
[0], true))))
10796 /* Darwin uses a special PIC legitimizer. */
10797 if (DEFAULT_ABI
== ABI_DARWIN
&& MACHOPIC_INDIRECT
)
10800 rs6000_machopic_legitimize_pic_address (operands
[1], mode
,
10802 if (operands
[0] != operands
[1])
10803 emit_insn (gen_rtx_SET (operands
[0], operands
[1]));
10808 /* If we are to limit the number of things we put in the TOC and
10809 this is a symbol plus a constant we can add in one insn,
10810 just put the symbol in the TOC and add the constant. */
10811 if (GET_CODE (operands
[1]) == CONST
10812 && TARGET_NO_SUM_IN_TOC
10813 && GET_CODE (XEXP (operands
[1], 0)) == PLUS
10814 && add_operand (XEXP (XEXP (operands
[1], 0), 1), mode
)
10815 && (GET_CODE (XEXP (XEXP (operands
[1], 0), 0)) == LABEL_REF
10816 || GET_CODE (XEXP (XEXP (operands
[1], 0), 0)) == SYMBOL_REF
)
10817 && ! side_effects_p (operands
[0]))
10820 force_const_mem (mode
, XEXP (XEXP (operands
[1], 0), 0));
10821 rtx other
= XEXP (XEXP (operands
[1], 0), 1);
10823 sym
= force_reg (mode
, sym
);
10824 emit_insn (gen_add3_insn (operands
[0], sym
, other
));
10828 operands
[1] = force_const_mem (mode
, operands
[1]);
10831 && GET_CODE (XEXP (operands
[1], 0)) == SYMBOL_REF
10832 && use_toc_relative_ref (XEXP (operands
[1], 0), mode
))
10834 rtx tocref
= create_TOC_reference (XEXP (operands
[1], 0),
10836 operands
[1] = gen_const_mem (mode
, tocref
);
10837 set_mem_alias_set (operands
[1], get_TOC_alias_set ());
10843 if (!VECTOR_MEM_VSX_P (TImode
))
10844 rs6000_eliminate_indexed_memrefs (operands
);
10848 rs6000_eliminate_indexed_memrefs (operands
);
10852 fatal_insn ("bad move", gen_rtx_SET (dest
, source
));
10855 /* Above, we may have called force_const_mem which may have returned
10856 an invalid address. If we can, fix this up; otherwise, reload will
10857 have to deal with it. */
10858 if (GET_CODE (operands
[1]) == MEM
)
10859 operands
[1] = validize_mem (operands
[1]);
10861 emit_insn (gen_rtx_SET (operands
[0], operands
[1]));
10864 /* Nonzero if we can use a floating-point register to pass this arg. */
10865 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10866 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10867 && (CUM)->fregno <= FP_ARG_MAX_REG \
10868 && TARGET_HARD_FLOAT)
10870 /* Nonzero if we can use an AltiVec register to pass this arg. */
10871 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10872 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10873 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10874 && TARGET_ALTIVEC_ABI \
10877 /* Walk down the type tree of TYPE counting consecutive base elements.
10878 If *MODEP is VOIDmode, then set it to the first valid floating point
10879 or vector type. If a non-floating point or vector type is found, or
10880 if a floating point or vector type that doesn't match a non-VOIDmode
10881 *MODEP is found, then return -1, otherwise return the count in the
10885 rs6000_aggregate_candidate (const_tree type
, machine_mode
*modep
)
10888 HOST_WIDE_INT size
;
10890 switch (TREE_CODE (type
))
10893 mode
= TYPE_MODE (type
);
10894 if (!SCALAR_FLOAT_MODE_P (mode
))
10897 if (*modep
== VOIDmode
)
10900 if (*modep
== mode
)
10906 mode
= TYPE_MODE (TREE_TYPE (type
));
10907 if (!SCALAR_FLOAT_MODE_P (mode
))
10910 if (*modep
== VOIDmode
)
10913 if (*modep
== mode
)
10919 if (!TARGET_ALTIVEC_ABI
|| !TARGET_ALTIVEC
)
10922 /* Use V4SImode as representative of all 128-bit vector types. */
10923 size
= int_size_in_bytes (type
);
10933 if (*modep
== VOIDmode
)
10936 /* Vector modes are considered to be opaque: two vectors are
10937 equivalent for the purposes of being homogeneous aggregates
10938 if they are the same size. */
10939 if (*modep
== mode
)
10947 tree index
= TYPE_DOMAIN (type
);
10949 /* Can't handle incomplete types nor sizes that are not
10951 if (!COMPLETE_TYPE_P (type
)
10952 || TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
)
10955 count
= rs6000_aggregate_candidate (TREE_TYPE (type
), modep
);
10958 || !TYPE_MAX_VALUE (index
)
10959 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index
))
10960 || !TYPE_MIN_VALUE (index
)
10961 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index
))
10965 count
*= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index
))
10966 - tree_to_uhwi (TYPE_MIN_VALUE (index
)));
10968 /* There must be no padding. */
10969 if (wi::ne_p (TYPE_SIZE (type
), count
* GET_MODE_BITSIZE (*modep
)))
10981 /* Can't handle incomplete types nor sizes that are not
10983 if (!COMPLETE_TYPE_P (type
)
10984 || TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
)
10987 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
10989 if (TREE_CODE (field
) != FIELD_DECL
)
10992 sub_count
= rs6000_aggregate_candidate (TREE_TYPE (field
), modep
);
10995 count
+= sub_count
;
10998 /* There must be no padding. */
10999 if (wi::ne_p (TYPE_SIZE (type
), count
* GET_MODE_BITSIZE (*modep
)))
11006 case QUAL_UNION_TYPE
:
11008 /* These aren't very interesting except in a degenerate case. */
11013 /* Can't handle incomplete types nor sizes that are not
11015 if (!COMPLETE_TYPE_P (type
)
11016 || TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
)
11019 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
11021 if (TREE_CODE (field
) != FIELD_DECL
)
11024 sub_count
= rs6000_aggregate_candidate (TREE_TYPE (field
), modep
);
11027 count
= count
> sub_count
? count
: sub_count
;
11030 /* There must be no padding. */
11031 if (wi::ne_p (TYPE_SIZE (type
), count
* GET_MODE_BITSIZE (*modep
)))
11044 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
11045 float or vector aggregate that shall be passed in FP/vector registers
11046 according to the ELFv2 ABI, return the homogeneous element mode in
11047 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
11049 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
11052 rs6000_discover_homogeneous_aggregate (machine_mode mode
, const_tree type
,
11053 machine_mode
*elt_mode
,
11056 /* Note that we do not accept complex types at the top level as
11057 homogeneous aggregates; these types are handled via the
11058 targetm.calls.split_complex_arg mechanism. Complex types
11059 can be elements of homogeneous aggregates, however. */
11060 if (DEFAULT_ABI
== ABI_ELFv2
&& type
&& AGGREGATE_TYPE_P (type
))
11062 machine_mode field_mode
= VOIDmode
;
11063 int field_count
= rs6000_aggregate_candidate (type
, &field_mode
);
11065 if (field_count
> 0)
11067 int n_regs
= (SCALAR_FLOAT_MODE_P (field_mode
) ?
11068 (GET_MODE_SIZE (field_mode
) + 7) >> 3 : 1);
11070 /* The ELFv2 ABI allows homogeneous aggregates to occupy
11071 up to AGGR_ARG_NUM_REG registers. */
11072 if (field_count
* n_regs
<= AGGR_ARG_NUM_REG
)
11075 *elt_mode
= field_mode
;
11077 *n_elts
= field_count
;
11090 /* Return a nonzero value to say to return the function value in
11091 memory, just as large structures are always returned. TYPE will be
11092 the data type of the value, and FNTYPE will be the type of the
11093 function doing the returning, or @code{NULL} for libcalls.
11095 The AIX ABI for the RS/6000 specifies that all structures are
11096 returned in memory. The Darwin ABI does the same.
11098 For the Darwin 64 Bit ABI, a function result can be returned in
11099 registers or in memory, depending on the size of the return data
11100 type. If it is returned in registers, the value occupies the same
11101 registers as it would if it were the first and only function
11102 argument. Otherwise, the function places its result in memory at
11103 the location pointed to by GPR3.
11105 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
11106 but a draft put them in memory, and GCC used to implement the draft
11107 instead of the final standard. Therefore, aix_struct_return
11108 controls this instead of DEFAULT_ABI; V.4 targets needing backward
11109 compatibility can change DRAFT_V4_STRUCT_RET to override the
11110 default, and -m switches get the final word. See
11111 rs6000_option_override_internal for more details.
11113 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
11114 long double support is enabled. These values are returned in memory.
11116 int_size_in_bytes returns -1 for variable size objects, which go in
11117 memory always. The cast to unsigned makes -1 > 8. */
11120 rs6000_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
11122 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
11124 && rs6000_darwin64_abi
11125 && TREE_CODE (type
) == RECORD_TYPE
11126 && int_size_in_bytes (type
) > 0)
11128 CUMULATIVE_ARGS valcum
;
11132 valcum
.fregno
= FP_ARG_MIN_REG
;
11133 valcum
.vregno
= ALTIVEC_ARG_MIN_REG
;
11134 /* Do a trial code generation as if this were going to be passed
11135 as an argument; if any part goes in memory, we return NULL. */
11136 valret
= rs6000_darwin64_record_arg (&valcum
, type
, true, true);
11139 /* Otherwise fall through to more conventional ABI rules. */
11142 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
11143 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type
), type
,
11147 /* The ELFv2 ABI returns aggregates up to 16B in registers */
11148 if (DEFAULT_ABI
== ABI_ELFv2
&& AGGREGATE_TYPE_P (type
)
11149 && (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) <= 16)
11152 if (AGGREGATE_TYPE_P (type
)
11153 && (aix_struct_return
11154 || (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) > 8))
11157 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
11158 modes only exist for GCC vector types if -maltivec. */
11159 if (TARGET_32BIT
&& !TARGET_ALTIVEC_ABI
11160 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type
)))
11163 /* Return synthetic vectors in memory. */
11164 if (TREE_CODE (type
) == VECTOR_TYPE
11165 && int_size_in_bytes (type
) > (TARGET_ALTIVEC_ABI
? 16 : 8))
11167 static bool warned_for_return_big_vectors
= false;
11168 if (!warned_for_return_big_vectors
)
11170 warning (OPT_Wpsabi
, "GCC vector returned by reference: "
11171 "non-standard ABI extension with no compatibility "
11173 warned_for_return_big_vectors
= true;
11178 if (DEFAULT_ABI
== ABI_V4
&& TARGET_IEEEQUAD
11179 && FLOAT128_IEEE_P (TYPE_MODE (type
)))
11185 /* Specify whether values returned in registers should be at the most
11186 significant end of a register. We want aggregates returned by
11187 value to match the way aggregates are passed to functions. */
11190 rs6000_return_in_msb (const_tree valtype
)
11192 return (DEFAULT_ABI
== ABI_ELFv2
11193 && BYTES_BIG_ENDIAN
11194 && AGGREGATE_TYPE_P (valtype
)
11195 && FUNCTION_ARG_PADDING (TYPE_MODE (valtype
), valtype
) == upward
);
11198 #ifdef HAVE_AS_GNU_ATTRIBUTE
11199 /* Return TRUE if a call to function FNDECL may be one that
11200 potentially affects the function calling ABI of the object file. */
11203 call_ABI_of_interest (tree fndecl
)
11205 if (rs6000_gnu_attr
&& symtab
->state
== EXPANSION
)
11207 struct cgraph_node
*c_node
;
11209 /* Libcalls are always interesting. */
11210 if (fndecl
== NULL_TREE
)
11213 /* Any call to an external function is interesting. */
11214 if (DECL_EXTERNAL (fndecl
))
11217 /* Interesting functions that we are emitting in this object file. */
11218 c_node
= cgraph_node::get (fndecl
);
11219 c_node
= c_node
->ultimate_alias_target ();
11220 return !c_node
->only_called_directly_p ();
11226 /* Initialize a variable CUM of type CUMULATIVE_ARGS
11227 for a call to a function whose data type is FNTYPE.
11228 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
11230 For incoming args we set the number of arguments in the prototype large
11231 so we never return a PARALLEL. */
11234 init_cumulative_args (CUMULATIVE_ARGS
*cum
, tree fntype
,
11235 rtx libname ATTRIBUTE_UNUSED
, int incoming
,
11236 int libcall
, int n_named_args
,
11237 tree fndecl ATTRIBUTE_UNUSED
,
11238 machine_mode return_mode ATTRIBUTE_UNUSED
)
11240 static CUMULATIVE_ARGS zero_cumulative
;
11242 *cum
= zero_cumulative
;
11244 cum
->fregno
= FP_ARG_MIN_REG
;
11245 cum
->vregno
= ALTIVEC_ARG_MIN_REG
;
11246 cum
->prototype
= (fntype
&& prototype_p (fntype
));
11247 cum
->call_cookie
= ((DEFAULT_ABI
== ABI_V4
&& libcall
)
11248 ? CALL_LIBCALL
: CALL_NORMAL
);
11249 cum
->sysv_gregno
= GP_ARG_MIN_REG
;
11250 cum
->stdarg
= stdarg_p (fntype
);
11251 cum
->libcall
= libcall
;
11253 cum
->nargs_prototype
= 0;
11254 if (incoming
|| cum
->prototype
)
11255 cum
->nargs_prototype
= n_named_args
;
11257 /* Check for a longcall attribute. */
11258 if ((!fntype
&& rs6000_default_long_calls
)
11260 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype
))
11261 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype
))))
11262 cum
->call_cookie
|= CALL_LONG
;
11264 if (TARGET_DEBUG_ARG
)
11266 fprintf (stderr
, "\ninit_cumulative_args:");
11269 tree ret_type
= TREE_TYPE (fntype
);
11270 fprintf (stderr
, " ret code = %s,",
11271 get_tree_code_name (TREE_CODE (ret_type
)));
11274 if (cum
->call_cookie
& CALL_LONG
)
11275 fprintf (stderr
, " longcall,");
11277 fprintf (stderr
, " proto = %d, nargs = %d\n",
11278 cum
->prototype
, cum
->nargs_prototype
);
11281 #ifdef HAVE_AS_GNU_ATTRIBUTE
11282 if (TARGET_ELF
&& (TARGET_64BIT
|| DEFAULT_ABI
== ABI_V4
))
11284 cum
->escapes
= call_ABI_of_interest (fndecl
);
11291 return_type
= TREE_TYPE (fntype
);
11292 return_mode
= TYPE_MODE (return_type
);
11295 return_type
= lang_hooks
.types
.type_for_mode (return_mode
, 0);
11297 if (return_type
!= NULL
)
11299 if (TREE_CODE (return_type
) == RECORD_TYPE
11300 && TYPE_TRANSPARENT_AGGR (return_type
))
11302 return_type
= TREE_TYPE (first_field (return_type
));
11303 return_mode
= TYPE_MODE (return_type
);
11305 if (AGGREGATE_TYPE_P (return_type
)
11306 && ((unsigned HOST_WIDE_INT
) int_size_in_bytes (return_type
)
11308 rs6000_returns_struct
= true;
11310 if (SCALAR_FLOAT_MODE_P (return_mode
))
11312 rs6000_passes_float
= true;
11313 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
|| TARGET_64BIT
)
11314 && (FLOAT128_IBM_P (return_mode
)
11315 || FLOAT128_IEEE_P (return_mode
)
11316 || (return_type
!= NULL
11317 && (TYPE_MAIN_VARIANT (return_type
)
11318 == long_double_type_node
))))
11319 rs6000_passes_long_double
= true;
11321 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode
)
11322 || PAIRED_VECTOR_MODE (return_mode
))
11323 rs6000_passes_vector
= true;
11330 && TARGET_ALTIVEC_ABI
11331 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype
))))
11333 error ("cannot return value in vector register because"
11334 " altivec instructions are disabled, use %qs"
11335 " to enable them", "-maltivec");
11339 /* The mode the ABI uses for a word. This is not the same as word_mode
11340 for -m32 -mpowerpc64. This is used to implement various target hooks. */
11342 static scalar_int_mode
11343 rs6000_abi_word_mode (void)
11345 return TARGET_32BIT
? SImode
: DImode
;
11348 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
11350 rs6000_offload_options (void)
11353 return xstrdup ("-foffload-abi=lp64");
11355 return xstrdup ("-foffload-abi=ilp32");
11358 /* On rs6000, function arguments are promoted, as are function return
11361 static machine_mode
11362 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED
,
11364 int *punsignedp ATTRIBUTE_UNUSED
,
11367 PROMOTE_MODE (mode
, *punsignedp
, type
);
11372 /* Return true if TYPE must be passed on the stack and not in registers. */
11375 rs6000_must_pass_in_stack (machine_mode mode
, const_tree type
)
11377 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
|| TARGET_64BIT
)
11378 return must_pass_in_stack_var_size (mode
, type
);
11380 return must_pass_in_stack_var_size_or_pad (mode
, type
);
11384 is_complex_IBM_long_double (machine_mode mode
)
11386 return mode
== ICmode
|| (!TARGET_IEEEQUAD
&& mode
== TCmode
);
11389 /* Whether ABI_V4 passes MODE args to a function in floating point
11393 abi_v4_pass_in_fpr (machine_mode mode
)
11395 if (!TARGET_HARD_FLOAT
)
11397 if (TARGET_SINGLE_FLOAT
&& mode
== SFmode
)
11399 if (TARGET_DOUBLE_FLOAT
&& mode
== DFmode
)
11401 /* ABI_V4 passes complex IBM long double in 8 gprs.
11402 Stupid, but we can't change the ABI now. */
11403 if (is_complex_IBM_long_double (mode
))
11405 if (FLOAT128_2REG_P (mode
))
11407 if (DECIMAL_FLOAT_MODE_P (mode
))
11412 /* If defined, a C expression which determines whether, and in which
11413 direction, to pad out an argument with extra space. The value
11414 should be of type `enum direction': either `upward' to pad above
11415 the argument, `downward' to pad below, or `none' to inhibit
11418 For the AIX ABI structs are always stored left shifted in their
11422 function_arg_padding (machine_mode mode
, const_tree type
)
11424 #ifndef AGGREGATE_PADDING_FIXED
11425 #define AGGREGATE_PADDING_FIXED 0
11427 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
11428 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
11431 if (!AGGREGATE_PADDING_FIXED
)
11433 /* GCC used to pass structures of the same size as integer types as
11434 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
11435 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
11436 passed padded downward, except that -mstrict-align further
11437 muddied the water in that multi-component structures of 2 and 4
11438 bytes in size were passed padded upward.
11440 The following arranges for best compatibility with previous
11441 versions of gcc, but removes the -mstrict-align dependency. */
11442 if (BYTES_BIG_ENDIAN
)
11444 HOST_WIDE_INT size
= 0;
11446 if (mode
== BLKmode
)
11448 if (type
&& TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
)
11449 size
= int_size_in_bytes (type
);
11452 size
= GET_MODE_SIZE (mode
);
11454 if (size
== 1 || size
== 2 || size
== 4)
11460 if (AGGREGATES_PAD_UPWARD_ALWAYS
)
11462 if (type
!= 0 && AGGREGATE_TYPE_P (type
))
11466 /* Fall back to the default. */
11467 return DEFAULT_FUNCTION_ARG_PADDING (mode
, type
);
11470 /* If defined, a C expression that gives the alignment boundary, in bits,
11471 of an argument with the specified mode and type. If it is not defined,
11472 PARM_BOUNDARY is used for all arguments.
11474 V.4 wants long longs and doubles to be double word aligned. Just
11475 testing the mode size is a boneheaded way to do this as it means
11476 that other types such as complex int are also double word aligned.
11477 However, we're stuck with this because changing the ABI might break
11478 existing library interfaces.
11480 Quadword align Altivec/VSX vectors.
11481 Quadword align large synthetic vector types. */
11483 static unsigned int
11484 rs6000_function_arg_boundary (machine_mode mode
, const_tree type
)
11486 machine_mode elt_mode
;
11489 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
11491 if (DEFAULT_ABI
== ABI_V4
11492 && (GET_MODE_SIZE (mode
) == 8
11493 || (TARGET_HARD_FLOAT
11494 && !is_complex_IBM_long_double (mode
)
11495 && FLOAT128_2REG_P (mode
))))
11497 else if (FLOAT128_VECTOR_P (mode
))
11499 else if (PAIRED_VECTOR_MODE (mode
)
11500 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
11501 && int_size_in_bytes (type
) >= 8
11502 && int_size_in_bytes (type
) < 16))
11504 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode
)
11505 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
11506 && int_size_in_bytes (type
) >= 16))
11509 /* Aggregate types that need > 8 byte alignment are quadword-aligned
11510 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
11511 -mcompat-align-parm is used. */
11512 if (((DEFAULT_ABI
== ABI_AIX
&& !rs6000_compat_align_parm
)
11513 || DEFAULT_ABI
== ABI_ELFv2
)
11514 && type
&& TYPE_ALIGN (type
) > 64)
11516 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
11517 or homogeneous float/vector aggregates here. We already handled
11518 vector aggregates above, but still need to check for float here. */
11519 bool aggregate_p
= (AGGREGATE_TYPE_P (type
)
11520 && !SCALAR_FLOAT_MODE_P (elt_mode
));
11522 /* We used to check for BLKmode instead of the above aggregate type
11523 check. Warn when this results in any difference to the ABI. */
11524 if (aggregate_p
!= (mode
== BLKmode
))
11526 static bool warned
;
11527 if (!warned
&& warn_psabi
)
11530 inform (input_location
,
11531 "the ABI of passing aggregates with %d-byte alignment"
11532 " has changed in GCC 5",
11533 (int) TYPE_ALIGN (type
) / BITS_PER_UNIT
);
11541 /* Similar for the Darwin64 ABI. Note that for historical reasons we
11542 implement the "aggregate type" check as a BLKmode check here; this
11543 means certain aggregate types are in fact not aligned. */
11544 if (TARGET_MACHO
&& rs6000_darwin64_abi
11546 && type
&& TYPE_ALIGN (type
) > 64)
11549 return PARM_BOUNDARY
;
11552 /* The offset in words to the start of the parameter save area. */
11554 static unsigned int
11555 rs6000_parm_offset (void)
11557 return (DEFAULT_ABI
== ABI_V4
? 2
11558 : DEFAULT_ABI
== ABI_ELFv2
? 4
11562 /* For a function parm of MODE and TYPE, return the starting word in
11563 the parameter area. NWORDS of the parameter area are already used. */
11565 static unsigned int
11566 rs6000_parm_start (machine_mode mode
, const_tree type
,
11567 unsigned int nwords
)
11569 unsigned int align
;
11571 align
= rs6000_function_arg_boundary (mode
, type
) / PARM_BOUNDARY
- 1;
11572 return nwords
+ (-(rs6000_parm_offset () + nwords
) & align
);
11575 /* Compute the size (in words) of a function argument. */
11577 static unsigned long
11578 rs6000_arg_size (machine_mode mode
, const_tree type
)
11580 unsigned long size
;
11582 if (mode
!= BLKmode
)
11583 size
= GET_MODE_SIZE (mode
);
11585 size
= int_size_in_bytes (type
);
11588 return (size
+ 3) >> 2;
11590 return (size
+ 7) >> 3;
11593 /* Use this to flush pending int fields. */
11596 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS
*cum
,
11597 HOST_WIDE_INT bitpos
, int final
)
11599 unsigned int startbit
, endbit
;
11600 int intregs
, intoffset
;
11603 /* Handle the situations where a float is taking up the first half
11604 of the GPR, and the other half is empty (typically due to
11605 alignment restrictions). We can detect this by a 8-byte-aligned
11606 int field, or by seeing that this is the final flush for this
11607 argument. Count the word and continue on. */
11608 if (cum
->floats_in_gpr
== 1
11609 && (cum
->intoffset
% 64 == 0
11610 || (cum
->intoffset
== -1 && final
)))
11613 cum
->floats_in_gpr
= 0;
11616 if (cum
->intoffset
== -1)
11619 intoffset
= cum
->intoffset
;
11620 cum
->intoffset
= -1;
11621 cum
->floats_in_gpr
= 0;
11623 if (intoffset
% BITS_PER_WORD
!= 0)
11625 mode
= mode_for_size (BITS_PER_WORD
- intoffset
% BITS_PER_WORD
,
11627 if (mode
== BLKmode
)
11629 /* We couldn't find an appropriate mode, which happens,
11630 e.g., in packed structs when there are 3 bytes to load.
11631 Back intoffset back to the beginning of the word in this
11633 intoffset
= ROUND_DOWN (intoffset
, BITS_PER_WORD
);
11637 startbit
= ROUND_DOWN (intoffset
, BITS_PER_WORD
);
11638 endbit
= ROUND_UP (bitpos
, BITS_PER_WORD
);
11639 intregs
= (endbit
- startbit
) / BITS_PER_WORD
;
11640 cum
->words
+= intregs
;
11641 /* words should be unsigned. */
11642 if ((unsigned)cum
->words
< (endbit
/BITS_PER_WORD
))
11644 int pad
= (endbit
/BITS_PER_WORD
) - cum
->words
;
11649 /* The darwin64 ABI calls for us to recurse down through structs,
11650 looking for elements passed in registers. Unfortunately, we have
11651 to track int register count here also because of misalignments
11652 in powerpc alignment mode. */
11655 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS
*cum
,
11657 HOST_WIDE_INT startbitpos
)
11661 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
11662 if (TREE_CODE (f
) == FIELD_DECL
)
11664 HOST_WIDE_INT bitpos
= startbitpos
;
11665 tree ftype
= TREE_TYPE (f
);
11667 if (ftype
== error_mark_node
)
11669 mode
= TYPE_MODE (ftype
);
11671 if (DECL_SIZE (f
) != 0
11672 && tree_fits_uhwi_p (bit_position (f
)))
11673 bitpos
+= int_bit_position (f
);
11675 /* ??? FIXME: else assume zero offset. */
11677 if (TREE_CODE (ftype
) == RECORD_TYPE
)
11678 rs6000_darwin64_record_arg_advance_recurse (cum
, ftype
, bitpos
);
11679 else if (USE_FP_FOR_ARG_P (cum
, mode
))
11681 unsigned n_fpregs
= (GET_MODE_SIZE (mode
) + 7) >> 3;
11682 rs6000_darwin64_record_arg_advance_flush (cum
, bitpos
, 0);
11683 cum
->fregno
+= n_fpregs
;
11684 /* Single-precision floats present a special problem for
11685 us, because they are smaller than an 8-byte GPR, and so
11686 the structure-packing rules combined with the standard
11687 varargs behavior mean that we want to pack float/float
11688 and float/int combinations into a single register's
11689 space. This is complicated by the arg advance flushing,
11690 which works on arbitrarily large groups of int-type
11692 if (mode
== SFmode
)
11694 if (cum
->floats_in_gpr
== 1)
11696 /* Two floats in a word; count the word and reset
11697 the float count. */
11699 cum
->floats_in_gpr
= 0;
11701 else if (bitpos
% 64 == 0)
11703 /* A float at the beginning of an 8-byte word;
11704 count it and put off adjusting cum->words until
11705 we see if a arg advance flush is going to do it
11707 cum
->floats_in_gpr
++;
11711 /* The float is at the end of a word, preceded
11712 by integer fields, so the arg advance flush
11713 just above has already set cum->words and
11714 everything is taken care of. */
11718 cum
->words
+= n_fpregs
;
11720 else if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, 1))
11722 rs6000_darwin64_record_arg_advance_flush (cum
, bitpos
, 0);
11726 else if (cum
->intoffset
== -1)
11727 cum
->intoffset
= bitpos
;
11731 /* Check for an item that needs to be considered specially under the darwin 64
11732 bit ABI. These are record types where the mode is BLK or the structure is
11733 8 bytes in size. */
11735 rs6000_darwin64_struct_check_p (machine_mode mode
, const_tree type
)
11737 return rs6000_darwin64_abi
11738 && ((mode
== BLKmode
11739 && TREE_CODE (type
) == RECORD_TYPE
11740 && int_size_in_bytes (type
) > 0)
11741 || (type
&& TREE_CODE (type
) == RECORD_TYPE
11742 && int_size_in_bytes (type
) == 8)) ? 1 : 0;
11745 /* Update the data in CUM to advance over an argument
11746 of mode MODE and data type TYPE.
11747 (TYPE is null for libcalls where that information may not be available.)
11749 Note that for args passed by reference, function_arg will be called
11750 with MODE and TYPE set to that of the pointer to the arg, not the arg
11754 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS
*cum
, machine_mode mode
,
11755 const_tree type
, bool named
, int depth
)
11757 machine_mode elt_mode
;
11760 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
11762 /* Only tick off an argument if we're not recursing. */
11764 cum
->nargs_prototype
--;
11766 #ifdef HAVE_AS_GNU_ATTRIBUTE
11767 if (TARGET_ELF
&& (TARGET_64BIT
|| DEFAULT_ABI
== ABI_V4
)
11770 if (SCALAR_FLOAT_MODE_P (mode
))
11772 rs6000_passes_float
= true;
11773 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
|| TARGET_64BIT
)
11774 && (FLOAT128_IBM_P (mode
)
11775 || FLOAT128_IEEE_P (mode
)
11777 && TYPE_MAIN_VARIANT (type
) == long_double_type_node
)))
11778 rs6000_passes_long_double
= true;
11780 if ((named
&& ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
11781 || (PAIRED_VECTOR_MODE (mode
)
11783 && cum
->sysv_gregno
<= GP_ARG_MAX_REG
))
11784 rs6000_passes_vector
= true;
11788 if (TARGET_ALTIVEC_ABI
11789 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode
)
11790 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
11791 && int_size_in_bytes (type
) == 16)))
11793 bool stack
= false;
11795 if (USE_ALTIVEC_FOR_ARG_P (cum
, elt_mode
, named
))
11797 cum
->vregno
+= n_elts
;
11799 if (!TARGET_ALTIVEC
)
11800 error ("cannot pass argument in vector register because"
11801 " altivec instructions are disabled, use %qs"
11802 " to enable them", "-maltivec");
11804 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11805 even if it is going to be passed in a vector register.
11806 Darwin does the same for variable-argument functions. */
11807 if (((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
11809 || (cum
->stdarg
&& DEFAULT_ABI
!= ABI_V4
))
11819 /* Vector parameters must be 16-byte aligned. In 32-bit
11820 mode this means we need to take into account the offset
11821 to the parameter save area. In 64-bit mode, they just
11822 have to start on an even word, since the parameter save
11823 area is 16-byte aligned. */
11825 align
= -(rs6000_parm_offset () + cum
->words
) & 3;
11827 align
= cum
->words
& 1;
11828 cum
->words
+= align
+ rs6000_arg_size (mode
, type
);
11830 if (TARGET_DEBUG_ARG
)
11832 fprintf (stderr
, "function_adv: words = %2d, align=%d, ",
11833 cum
->words
, align
);
11834 fprintf (stderr
, "nargs = %4d, proto = %d, mode = %4s\n",
11835 cum
->nargs_prototype
, cum
->prototype
,
11836 GET_MODE_NAME (mode
));
11840 else if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
11842 int size
= int_size_in_bytes (type
);
11843 /* Variable sized types have size == -1 and are
11844 treated as if consisting entirely of ints.
11845 Pad to 16 byte boundary if needed. */
11846 if (TYPE_ALIGN (type
) >= 2 * BITS_PER_WORD
11847 && (cum
->words
% 2) != 0)
11849 /* For varargs, we can just go up by the size of the struct. */
11851 cum
->words
+= (size
+ 7) / 8;
11854 /* It is tempting to say int register count just goes up by
11855 sizeof(type)/8, but this is wrong in a case such as
11856 { int; double; int; } [powerpc alignment]. We have to
11857 grovel through the fields for these too. */
11858 cum
->intoffset
= 0;
11859 cum
->floats_in_gpr
= 0;
11860 rs6000_darwin64_record_arg_advance_recurse (cum
, type
, 0);
11861 rs6000_darwin64_record_arg_advance_flush (cum
,
11862 size
* BITS_PER_UNIT
, 1);
11864 if (TARGET_DEBUG_ARG
)
11866 fprintf (stderr
, "function_adv: words = %2d, align=%d, size=%d",
11867 cum
->words
, TYPE_ALIGN (type
), size
);
11869 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11870 cum
->nargs_prototype
, cum
->prototype
,
11871 GET_MODE_NAME (mode
));
11874 else if (DEFAULT_ABI
== ABI_V4
)
11876 if (abi_v4_pass_in_fpr (mode
))
11878 /* _Decimal128 must use an even/odd register pair. This assumes
11879 that the register number is odd when fregno is odd. */
11880 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
11883 if (cum
->fregno
+ (FLOAT128_2REG_P (mode
) ? 1 : 0)
11884 <= FP_ARG_V4_MAX_REG
)
11885 cum
->fregno
+= (GET_MODE_SIZE (mode
) + 7) >> 3;
11888 cum
->fregno
= FP_ARG_V4_MAX_REG
+ 1;
11889 if (mode
== DFmode
|| FLOAT128_IBM_P (mode
)
11890 || mode
== DDmode
|| mode
== TDmode
)
11891 cum
->words
+= cum
->words
& 1;
11892 cum
->words
+= rs6000_arg_size (mode
, type
);
11897 int n_words
= rs6000_arg_size (mode
, type
);
11898 int gregno
= cum
->sysv_gregno
;
11900 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11901 As does any other 2 word item such as complex int due to a
11902 historical mistake. */
11904 gregno
+= (1 - gregno
) & 1;
11906 /* Multi-reg args are not split between registers and stack. */
11907 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
11909 /* Long long is aligned on the stack. So are other 2 word
11910 items such as complex int due to a historical mistake. */
11912 cum
->words
+= cum
->words
& 1;
11913 cum
->words
+= n_words
;
11916 /* Note: continuing to accumulate gregno past when we've started
11917 spilling to the stack indicates the fact that we've started
11918 spilling to the stack to expand_builtin_saveregs. */
11919 cum
->sysv_gregno
= gregno
+ n_words
;
11922 if (TARGET_DEBUG_ARG
)
11924 fprintf (stderr
, "function_adv: words = %2d, fregno = %2d, ",
11925 cum
->words
, cum
->fregno
);
11926 fprintf (stderr
, "gregno = %2d, nargs = %4d, proto = %d, ",
11927 cum
->sysv_gregno
, cum
->nargs_prototype
, cum
->prototype
);
11928 fprintf (stderr
, "mode = %4s, named = %d\n",
11929 GET_MODE_NAME (mode
), named
);
11934 int n_words
= rs6000_arg_size (mode
, type
);
11935 int start_words
= cum
->words
;
11936 int align_words
= rs6000_parm_start (mode
, type
, start_words
);
11938 cum
->words
= align_words
+ n_words
;
11940 if (SCALAR_FLOAT_MODE_P (elt_mode
) && TARGET_HARD_FLOAT
)
11942 /* _Decimal128 must be passed in an even/odd float register pair.
11943 This assumes that the register number is odd when fregno is
11945 if (elt_mode
== TDmode
&& (cum
->fregno
% 2) == 1)
11947 cum
->fregno
+= n_elts
* ((GET_MODE_SIZE (elt_mode
) + 7) >> 3);
11950 if (TARGET_DEBUG_ARG
)
11952 fprintf (stderr
, "function_adv: words = %2d, fregno = %2d, ",
11953 cum
->words
, cum
->fregno
);
11954 fprintf (stderr
, "nargs = %4d, proto = %d, mode = %4s, ",
11955 cum
->nargs_prototype
, cum
->prototype
, GET_MODE_NAME (mode
));
11956 fprintf (stderr
, "named = %d, align = %d, depth = %d\n",
11957 named
, align_words
- start_words
, depth
);
11963 rs6000_function_arg_advance (cumulative_args_t cum
, machine_mode mode
,
11964 const_tree type
, bool named
)
11966 rs6000_function_arg_advance_1 (get_cumulative_args (cum
), mode
, type
, named
,
11970 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11971 structure between cum->intoffset and bitpos to integer registers. */
11974 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS
*cum
,
11975 HOST_WIDE_INT bitpos
, rtx rvec
[], int *k
)
11978 unsigned int regno
;
11979 unsigned int startbit
, endbit
;
11980 int this_regno
, intregs
, intoffset
;
11983 if (cum
->intoffset
== -1)
11986 intoffset
= cum
->intoffset
;
11987 cum
->intoffset
= -1;
11989 /* If this is the trailing part of a word, try to only load that
11990 much into the register. Otherwise load the whole register. Note
11991 that in the latter case we may pick up unwanted bits. It's not a
11992 problem at the moment but may wish to revisit. */
11994 if (intoffset
% BITS_PER_WORD
!= 0)
11996 mode
= mode_for_size (BITS_PER_WORD
- intoffset
% BITS_PER_WORD
,
11998 if (mode
== BLKmode
)
12000 /* We couldn't find an appropriate mode, which happens,
12001 e.g., in packed structs when there are 3 bytes to load.
12002 Back intoffset back to the beginning of the word in this
12004 intoffset
= ROUND_DOWN (intoffset
, BITS_PER_WORD
);
12011 startbit
= ROUND_DOWN (intoffset
, BITS_PER_WORD
);
12012 endbit
= ROUND_UP (bitpos
, BITS_PER_WORD
);
12013 intregs
= (endbit
- startbit
) / BITS_PER_WORD
;
12014 this_regno
= cum
->words
+ intoffset
/ BITS_PER_WORD
;
12016 if (intregs
> 0 && intregs
> GP_ARG_NUM_REG
- this_regno
)
12017 cum
->use_stack
= 1;
12019 intregs
= MIN (intregs
, GP_ARG_NUM_REG
- this_regno
);
12023 intoffset
/= BITS_PER_UNIT
;
12026 regno
= GP_ARG_MIN_REG
+ this_regno
;
12027 reg
= gen_rtx_REG (mode
, regno
);
12029 gen_rtx_EXPR_LIST (VOIDmode
, reg
, GEN_INT (intoffset
));
12032 intoffset
= (intoffset
| (UNITS_PER_WORD
-1)) + 1;
12036 while (intregs
> 0);
12039 /* Recursive workhorse for the following. */
12042 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS
*cum
, const_tree type
,
12043 HOST_WIDE_INT startbitpos
, rtx rvec
[],
12048 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
12049 if (TREE_CODE (f
) == FIELD_DECL
)
12051 HOST_WIDE_INT bitpos
= startbitpos
;
12052 tree ftype
= TREE_TYPE (f
);
12054 if (ftype
== error_mark_node
)
12056 mode
= TYPE_MODE (ftype
);
12058 if (DECL_SIZE (f
) != 0
12059 && tree_fits_uhwi_p (bit_position (f
)))
12060 bitpos
+= int_bit_position (f
);
12062 /* ??? FIXME: else assume zero offset. */
12064 if (TREE_CODE (ftype
) == RECORD_TYPE
)
12065 rs6000_darwin64_record_arg_recurse (cum
, ftype
, bitpos
, rvec
, k
);
12066 else if (cum
->named
&& USE_FP_FOR_ARG_P (cum
, mode
))
12068 unsigned n_fpreg
= (GET_MODE_SIZE (mode
) + 7) >> 3;
12072 case E_SCmode
: mode
= SFmode
; break;
12073 case E_DCmode
: mode
= DFmode
; break;
12074 case E_TCmode
: mode
= TFmode
; break;
12078 rs6000_darwin64_record_arg_flush (cum
, bitpos
, rvec
, k
);
12079 if (cum
->fregno
+ n_fpreg
> FP_ARG_MAX_REG
+ 1)
12081 gcc_assert (cum
->fregno
== FP_ARG_MAX_REG
12082 && (mode
== TFmode
|| mode
== TDmode
));
12083 /* Long double or _Decimal128 split over regs and memory. */
12084 mode
= DECIMAL_FLOAT_MODE_P (mode
) ? DDmode
: DFmode
;
12088 = gen_rtx_EXPR_LIST (VOIDmode
,
12089 gen_rtx_REG (mode
, cum
->fregno
++),
12090 GEN_INT (bitpos
/ BITS_PER_UNIT
));
12091 if (FLOAT128_2REG_P (mode
))
12094 else if (cum
->named
&& USE_ALTIVEC_FOR_ARG_P (cum
, mode
, 1))
12096 rs6000_darwin64_record_arg_flush (cum
, bitpos
, rvec
, k
);
12098 = gen_rtx_EXPR_LIST (VOIDmode
,
12099 gen_rtx_REG (mode
, cum
->vregno
++),
12100 GEN_INT (bitpos
/ BITS_PER_UNIT
));
12102 else if (cum
->intoffset
== -1)
12103 cum
->intoffset
= bitpos
;
12107 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
12108 the register(s) to be used for each field and subfield of a struct
12109 being passed by value, along with the offset of where the
12110 register's value may be found in the block. FP fields go in FP
12111 register, vector fields go in vector registers, and everything
12112 else goes in int registers, packed as in memory.
12114 This code is also used for function return values. RETVAL indicates
12115 whether this is the case.
12117 Much of this is taken from the SPARC V9 port, which has a similar
12118 calling convention. */
12121 rs6000_darwin64_record_arg (CUMULATIVE_ARGS
*orig_cum
, const_tree type
,
12122 bool named
, bool retval
)
12124 rtx rvec
[FIRST_PSEUDO_REGISTER
];
12125 int k
= 1, kbase
= 1;
12126 HOST_WIDE_INT typesize
= int_size_in_bytes (type
);
12127 /* This is a copy; modifications are not visible to our caller. */
12128 CUMULATIVE_ARGS copy_cum
= *orig_cum
;
12129 CUMULATIVE_ARGS
*cum
= ©_cum
;
12131 /* Pad to 16 byte boundary if needed. */
12132 if (!retval
&& TYPE_ALIGN (type
) >= 2 * BITS_PER_WORD
12133 && (cum
->words
% 2) != 0)
12136 cum
->intoffset
= 0;
12137 cum
->use_stack
= 0;
12138 cum
->named
= named
;
12140 /* Put entries into rvec[] for individual FP and vector fields, and
12141 for the chunks of memory that go in int regs. Note we start at
12142 element 1; 0 is reserved for an indication of using memory, and
12143 may or may not be filled in below. */
12144 rs6000_darwin64_record_arg_recurse (cum
, type
, /* startbit pos= */ 0, rvec
, &k
);
12145 rs6000_darwin64_record_arg_flush (cum
, typesize
* BITS_PER_UNIT
, rvec
, &k
);
12147 /* If any part of the struct went on the stack put all of it there.
12148 This hack is because the generic code for
12149 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
12150 parts of the struct are not at the beginning. */
12151 if (cum
->use_stack
)
12154 return NULL_RTX
; /* doesn't go in registers at all */
12156 rvec
[0] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
12158 if (k
> 1 || cum
->use_stack
)
12159 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec_v (k
- kbase
, &rvec
[kbase
]));
12164 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
12167 rs6000_mixed_function_arg (machine_mode mode
, const_tree type
,
12172 rtx rvec
[GP_ARG_NUM_REG
+ 1];
12174 if (align_words
>= GP_ARG_NUM_REG
)
12177 n_units
= rs6000_arg_size (mode
, type
);
12179 /* Optimize the simple case where the arg fits in one gpr, except in
12180 the case of BLKmode due to assign_parms assuming that registers are
12181 BITS_PER_WORD wide. */
12183 || (n_units
== 1 && mode
!= BLKmode
))
12184 return gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
12187 if (align_words
+ n_units
> GP_ARG_NUM_REG
)
12188 /* Not all of the arg fits in gprs. Say that it goes in memory too,
12189 using a magic NULL_RTX component.
12190 This is not strictly correct. Only some of the arg belongs in
12191 memory, not all of it. However, the normal scheme using
12192 function_arg_partial_nregs can result in unusual subregs, eg.
12193 (subreg:SI (reg:DF) 4), which are not handled well. The code to
12194 store the whole arg to memory is often more efficient than code
12195 to store pieces, and we know that space is available in the right
12196 place for the whole arg. */
12197 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
12202 rtx r
= gen_rtx_REG (SImode
, GP_ARG_MIN_REG
+ align_words
);
12203 rtx off
= GEN_INT (i
++ * 4);
12204 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
12206 while (++align_words
< GP_ARG_NUM_REG
&& --n_units
!= 0);
12208 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (k
, rvec
));
12211 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
12212 but must also be copied into the parameter save area starting at
12213 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
12214 to the GPRs and/or memory. Return the number of elements used. */
12217 rs6000_psave_function_arg (machine_mode mode
, const_tree type
,
12218 int align_words
, rtx
*rvec
)
12222 if (align_words
< GP_ARG_NUM_REG
)
12224 int n_words
= rs6000_arg_size (mode
, type
);
12226 if (align_words
+ n_words
> GP_ARG_NUM_REG
12228 || (TARGET_32BIT
&& TARGET_POWERPC64
))
12230 /* If this is partially on the stack, then we only
12231 include the portion actually in registers here. */
12232 machine_mode rmode
= TARGET_32BIT
? SImode
: DImode
;
12235 if (align_words
+ n_words
> GP_ARG_NUM_REG
)
12237 /* Not all of the arg fits in gprs. Say that it goes in memory
12238 too, using a magic NULL_RTX component. Also see comment in
12239 rs6000_mixed_function_arg for why the normal
12240 function_arg_partial_nregs scheme doesn't work in this case. */
12241 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
12246 rtx r
= gen_rtx_REG (rmode
, GP_ARG_MIN_REG
+ align_words
);
12247 rtx off
= GEN_INT (i
++ * GET_MODE_SIZE (rmode
));
12248 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
12250 while (++align_words
< GP_ARG_NUM_REG
&& --n_words
!= 0);
12254 /* The whole arg fits in gprs. */
12255 rtx r
= gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
12256 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, const0_rtx
);
12261 /* It's entirely in memory. */
12262 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
12268 /* RVEC is a vector of K components of an argument of mode MODE.
12269 Construct the final function_arg return value from it. */
12272 rs6000_finish_function_arg (machine_mode mode
, rtx
*rvec
, int k
)
12274 gcc_assert (k
>= 1);
12276 /* Avoid returning a PARALLEL in the trivial cases. */
12279 if (XEXP (rvec
[0], 0) == NULL_RTX
)
12282 if (GET_MODE (XEXP (rvec
[0], 0)) == mode
)
12283 return XEXP (rvec
[0], 0);
12286 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (k
, rvec
));
12289 /* Determine where to put an argument to a function.
12290 Value is zero to push the argument on the stack,
12291 or a hard register in which to store the argument.
12293 MODE is the argument's machine mode.
12294 TYPE is the data type of the argument (as a tree).
12295 This is null for libcalls where that information may
12297 CUM is a variable of type CUMULATIVE_ARGS which gives info about
12298 the preceding args and about the function being called. It is
12299 not modified in this routine.
12300 NAMED is nonzero if this argument is a named parameter
12301 (otherwise it is an extra parameter matching an ellipsis).
12303 On RS/6000 the first eight words of non-FP are normally in registers
12304 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
12305 Under V.4, the first 8 FP args are in registers.
12307 If this is floating-point and no prototype is specified, we use
12308 both an FP and integer register (or possibly FP reg and stack). Library
12309 functions (when CALL_LIBCALL is set) always have the proper types for args,
12310 so we can pass the FP value just in one register. emit_library_function
12311 doesn't support PARALLEL anyway.
12313 Note that for args passed by reference, function_arg will be called
12314 with MODE and TYPE set to that of the pointer to the arg, not the arg
12318 rs6000_function_arg (cumulative_args_t cum_v
, machine_mode mode
,
12319 const_tree type
, bool named
)
12321 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
12322 enum rs6000_abi abi
= DEFAULT_ABI
;
12323 machine_mode elt_mode
;
12326 /* Return a marker to indicate whether CR1 needs to set or clear the
12327 bit that V.4 uses to say fp args were passed in registers.
12328 Assume that we don't need the marker for software floating point,
12329 or compiler generated library calls. */
12330 if (mode
== VOIDmode
)
12333 && (cum
->call_cookie
& CALL_LIBCALL
) == 0
12335 || (cum
->nargs_prototype
< 0
12336 && (cum
->prototype
|| TARGET_NO_PROTOTYPE
)))
12337 && TARGET_HARD_FLOAT
)
12338 return GEN_INT (cum
->call_cookie
12339 | ((cum
->fregno
== FP_ARG_MIN_REG
)
12340 ? CALL_V4_SET_FP_ARGS
12341 : CALL_V4_CLEAR_FP_ARGS
));
12343 return GEN_INT (cum
->call_cookie
& ~CALL_LIBCALL
);
12346 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
12348 if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
12350 rtx rslt
= rs6000_darwin64_record_arg (cum
, type
, named
, /*retval= */false);
12351 if (rslt
!= NULL_RTX
)
12353 /* Else fall through to usual handling. */
12356 if (USE_ALTIVEC_FOR_ARG_P (cum
, elt_mode
, named
))
12358 rtx rvec
[GP_ARG_NUM_REG
+ AGGR_ARG_NUM_REG
+ 1];
12362 /* Do we also need to pass this argument in the parameter save area?
12363 Library support functions for IEEE 128-bit are assumed to not need the
12364 value passed both in GPRs and in vector registers. */
12365 if (TARGET_64BIT
&& !cum
->prototype
12366 && (!cum
->libcall
|| !FLOAT128_VECTOR_P (elt_mode
)))
12368 int align_words
= ROUND_UP (cum
->words
, 2);
12369 k
= rs6000_psave_function_arg (mode
, type
, align_words
, rvec
);
12372 /* Describe where this argument goes in the vector registers. */
12373 for (i
= 0; i
< n_elts
&& cum
->vregno
+ i
<= ALTIVEC_ARG_MAX_REG
; i
++)
12375 r
= gen_rtx_REG (elt_mode
, cum
->vregno
+ i
);
12376 off
= GEN_INT (i
* GET_MODE_SIZE (elt_mode
));
12377 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
12380 return rs6000_finish_function_arg (mode
, rvec
, k
);
12382 else if (TARGET_ALTIVEC_ABI
12383 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
12384 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
12385 && int_size_in_bytes (type
) == 16)))
12387 if (named
|| abi
== ABI_V4
)
12391 /* Vector parameters to varargs functions under AIX or Darwin
12392 get passed in memory and possibly also in GPRs. */
12393 int align
, align_words
, n_words
;
12394 machine_mode part_mode
;
12396 /* Vector parameters must be 16-byte aligned. In 32-bit
12397 mode this means we need to take into account the offset
12398 to the parameter save area. In 64-bit mode, they just
12399 have to start on an even word, since the parameter save
12400 area is 16-byte aligned. */
12402 align
= -(rs6000_parm_offset () + cum
->words
) & 3;
12404 align
= cum
->words
& 1;
12405 align_words
= cum
->words
+ align
;
12407 /* Out of registers? Memory, then. */
12408 if (align_words
>= GP_ARG_NUM_REG
)
12411 if (TARGET_32BIT
&& TARGET_POWERPC64
)
12412 return rs6000_mixed_function_arg (mode
, type
, align_words
);
12414 /* The vector value goes in GPRs. Only the part of the
12415 value in GPRs is reported here. */
12417 n_words
= rs6000_arg_size (mode
, type
);
12418 if (align_words
+ n_words
> GP_ARG_NUM_REG
)
12419 /* Fortunately, there are only two possibilities, the value
12420 is either wholly in GPRs or half in GPRs and half not. */
12421 part_mode
= DImode
;
12423 return gen_rtx_REG (part_mode
, GP_ARG_MIN_REG
+ align_words
);
12427 else if (abi
== ABI_V4
)
12429 if (abi_v4_pass_in_fpr (mode
))
12431 /* _Decimal128 must use an even/odd register pair. This assumes
12432 that the register number is odd when fregno is odd. */
12433 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
12436 if (cum
->fregno
+ (FLOAT128_2REG_P (mode
) ? 1 : 0)
12437 <= FP_ARG_V4_MAX_REG
)
12438 return gen_rtx_REG (mode
, cum
->fregno
);
12444 int n_words
= rs6000_arg_size (mode
, type
);
12445 int gregno
= cum
->sysv_gregno
;
12447 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
12448 As does any other 2 word item such as complex int due to a
12449 historical mistake. */
12451 gregno
+= (1 - gregno
) & 1;
12453 /* Multi-reg args are not split between registers and stack. */
12454 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
12457 if (TARGET_32BIT
&& TARGET_POWERPC64
)
12458 return rs6000_mixed_function_arg (mode
, type
,
12459 gregno
- GP_ARG_MIN_REG
);
12460 return gen_rtx_REG (mode
, gregno
);
12465 int align_words
= rs6000_parm_start (mode
, type
, cum
->words
);
12467 /* _Decimal128 must be passed in an even/odd float register pair.
12468 This assumes that the register number is odd when fregno is odd. */
12469 if (elt_mode
== TDmode
&& (cum
->fregno
% 2) == 1)
12472 if (USE_FP_FOR_ARG_P (cum
, elt_mode
))
12474 rtx rvec
[GP_ARG_NUM_REG
+ AGGR_ARG_NUM_REG
+ 1];
12477 unsigned long n_fpreg
= (GET_MODE_SIZE (elt_mode
) + 7) >> 3;
12480 /* Do we also need to pass this argument in the parameter
12482 if (type
&& (cum
->nargs_prototype
<= 0
12483 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
12484 && TARGET_XL_COMPAT
12485 && align_words
>= GP_ARG_NUM_REG
)))
12486 k
= rs6000_psave_function_arg (mode
, type
, align_words
, rvec
);
12488 /* Describe where this argument goes in the fprs. */
12489 for (i
= 0; i
< n_elts
12490 && cum
->fregno
+ i
* n_fpreg
<= FP_ARG_MAX_REG
; i
++)
12492 /* Check if the argument is split over registers and memory.
12493 This can only ever happen for long double or _Decimal128;
12494 complex types are handled via split_complex_arg. */
12495 machine_mode fmode
= elt_mode
;
12496 if (cum
->fregno
+ (i
+ 1) * n_fpreg
> FP_ARG_MAX_REG
+ 1)
12498 gcc_assert (FLOAT128_2REG_P (fmode
));
12499 fmode
= DECIMAL_FLOAT_MODE_P (fmode
) ? DDmode
: DFmode
;
12502 r
= gen_rtx_REG (fmode
, cum
->fregno
+ i
* n_fpreg
);
12503 off
= GEN_INT (i
* GET_MODE_SIZE (elt_mode
));
12504 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
12507 /* If there were not enough FPRs to hold the argument, the rest
12508 usually goes into memory. However, if the current position
12509 is still within the register parameter area, a portion may
12510 actually have to go into GPRs.
12512 Note that it may happen that the portion of the argument
12513 passed in the first "half" of the first GPR was already
12514 passed in the last FPR as well.
12516 For unnamed arguments, we already set up GPRs to cover the
12517 whole argument in rs6000_psave_function_arg, so there is
12518 nothing further to do at this point. */
12519 fpr_words
= (i
* GET_MODE_SIZE (elt_mode
)) / (TARGET_32BIT
? 4 : 8);
12520 if (i
< n_elts
&& align_words
+ fpr_words
< GP_ARG_NUM_REG
12521 && cum
->nargs_prototype
> 0)
12523 static bool warned
;
12525 machine_mode rmode
= TARGET_32BIT
? SImode
: DImode
;
12526 int n_words
= rs6000_arg_size (mode
, type
);
12528 align_words
+= fpr_words
;
12529 n_words
-= fpr_words
;
12533 r
= gen_rtx_REG (rmode
, GP_ARG_MIN_REG
+ align_words
);
12534 off
= GEN_INT (fpr_words
++ * GET_MODE_SIZE (rmode
));
12535 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
12537 while (++align_words
< GP_ARG_NUM_REG
&& --n_words
!= 0);
12539 if (!warned
&& warn_psabi
)
12542 inform (input_location
,
12543 "the ABI of passing homogeneous float aggregates"
12544 " has changed in GCC 5");
12548 return rs6000_finish_function_arg (mode
, rvec
, k
);
12550 else if (align_words
< GP_ARG_NUM_REG
)
12552 if (TARGET_32BIT
&& TARGET_POWERPC64
)
12553 return rs6000_mixed_function_arg (mode
, type
, align_words
);
12555 return gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
12562 /* For an arg passed partly in registers and partly in memory, this is
12563 the number of bytes passed in registers. For args passed entirely in
12564 registers or entirely in memory, zero. When an arg is described by a
12565 PARALLEL, perhaps using more than one register type, this function
12566 returns the number of bytes used by the first element of the PARALLEL. */
12569 rs6000_arg_partial_bytes (cumulative_args_t cum_v
, machine_mode mode
,
12570 tree type
, bool named
)
12572 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
12573 bool passed_in_gprs
= true;
12576 machine_mode elt_mode
;
12579 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
12581 if (DEFAULT_ABI
== ABI_V4
)
12584 if (USE_ALTIVEC_FOR_ARG_P (cum
, elt_mode
, named
))
12586 /* If we are passing this arg in the fixed parameter save area (gprs or
12587 memory) as well as VRs, we do not use the partial bytes mechanism;
12588 instead, rs6000_function_arg will return a PARALLEL including a memory
12589 element as necessary. Library support functions for IEEE 128-bit are
12590 assumed to not need the value passed both in GPRs and in vector
12592 if (TARGET_64BIT
&& !cum
->prototype
12593 && (!cum
->libcall
|| !FLOAT128_VECTOR_P (elt_mode
)))
12596 /* Otherwise, we pass in VRs only. Check for partial copies. */
12597 passed_in_gprs
= false;
12598 if (cum
->vregno
+ n_elts
> ALTIVEC_ARG_MAX_REG
+ 1)
12599 ret
= (ALTIVEC_ARG_MAX_REG
+ 1 - cum
->vregno
) * 16;
12602 /* In this complicated case we just disable the partial_nregs code. */
12603 if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
12606 align_words
= rs6000_parm_start (mode
, type
, cum
->words
);
12608 if (USE_FP_FOR_ARG_P (cum
, elt_mode
))
12610 unsigned long n_fpreg
= (GET_MODE_SIZE (elt_mode
) + 7) >> 3;
12612 /* If we are passing this arg in the fixed parameter save area
12613 (gprs or memory) as well as FPRs, we do not use the partial
12614 bytes mechanism; instead, rs6000_function_arg will return a
12615 PARALLEL including a memory element as necessary. */
12617 && (cum
->nargs_prototype
<= 0
12618 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
12619 && TARGET_XL_COMPAT
12620 && align_words
>= GP_ARG_NUM_REG
)))
12623 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12624 passed_in_gprs
= false;
12625 if (cum
->fregno
+ n_elts
* n_fpreg
> FP_ARG_MAX_REG
+ 1)
12627 /* Compute number of bytes / words passed in FPRs. If there
12628 is still space available in the register parameter area
12629 *after* that amount, a part of the argument will be passed
12630 in GPRs. In that case, the total amount passed in any
12631 registers is equal to the amount that would have been passed
12632 in GPRs if everything were passed there, so we fall back to
12633 the GPR code below to compute the appropriate value. */
12634 int fpr
= ((FP_ARG_MAX_REG
+ 1 - cum
->fregno
)
12635 * MIN (8, GET_MODE_SIZE (elt_mode
)));
12636 int fpr_words
= fpr
/ (TARGET_32BIT
? 4 : 8);
12638 if (align_words
+ fpr_words
< GP_ARG_NUM_REG
)
12639 passed_in_gprs
= true;
12646 && align_words
< GP_ARG_NUM_REG
12647 && GP_ARG_NUM_REG
< align_words
+ rs6000_arg_size (mode
, type
))
12648 ret
= (GP_ARG_NUM_REG
- align_words
) * (TARGET_32BIT
? 4 : 8);
12650 if (ret
!= 0 && TARGET_DEBUG_ARG
)
12651 fprintf (stderr
, "rs6000_arg_partial_bytes: %d\n", ret
);
12656 /* A C expression that indicates when an argument must be passed by
12657 reference. If nonzero for an argument, a copy of that argument is
12658 made in memory and a pointer to the argument is passed instead of
12659 the argument itself. The pointer is passed in whatever way is
12660 appropriate for passing a pointer to that type.
12662 Under V.4, aggregates and long double are passed by reference.
12664 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12665 reference unless the AltiVec vector extension ABI is in force.
12667 As an extension to all ABIs, variable sized types are passed by
12671 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED
,
12672 machine_mode mode
, const_tree type
,
12673 bool named ATTRIBUTE_UNUSED
)
12678 if (DEFAULT_ABI
== ABI_V4
&& TARGET_IEEEQUAD
12679 && FLOAT128_IEEE_P (TYPE_MODE (type
)))
12681 if (TARGET_DEBUG_ARG
)
12682 fprintf (stderr
, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12686 if (DEFAULT_ABI
== ABI_V4
&& AGGREGATE_TYPE_P (type
))
12688 if (TARGET_DEBUG_ARG
)
12689 fprintf (stderr
, "function_arg_pass_by_reference: V4 aggregate\n");
12693 if (int_size_in_bytes (type
) < 0)
12695 if (TARGET_DEBUG_ARG
)
12696 fprintf (stderr
, "function_arg_pass_by_reference: variable size\n");
12700 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12701 modes only exist for GCC vector types if -maltivec. */
12702 if (TARGET_32BIT
&& !TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
12704 if (TARGET_DEBUG_ARG
)
12705 fprintf (stderr
, "function_arg_pass_by_reference: AltiVec\n");
12709 /* Pass synthetic vectors in memory. */
12710 if (TREE_CODE (type
) == VECTOR_TYPE
12711 && int_size_in_bytes (type
) > (TARGET_ALTIVEC_ABI
? 16 : 8))
12713 static bool warned_for_pass_big_vectors
= false;
12714 if (TARGET_DEBUG_ARG
)
12715 fprintf (stderr
, "function_arg_pass_by_reference: synthetic vector\n");
12716 if (!warned_for_pass_big_vectors
)
12718 warning (OPT_Wpsabi
, "GCC vector passed by reference: "
12719 "non-standard ABI extension with no compatibility "
12721 warned_for_pass_big_vectors
= true;
12729 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12730 already processes. Return true if the parameter must be passed
12731 (fully or partially) on the stack. */
12734 rs6000_parm_needs_stack (cumulative_args_t args_so_far
, tree type
)
12740 /* Catch errors. */
12741 if (type
== NULL
|| type
== error_mark_node
)
12744 /* Handle types with no storage requirement. */
12745 if (TYPE_MODE (type
) == VOIDmode
)
12748 /* Handle complex types. */
12749 if (TREE_CODE (type
) == COMPLEX_TYPE
)
12750 return (rs6000_parm_needs_stack (args_so_far
, TREE_TYPE (type
))
12751 || rs6000_parm_needs_stack (args_so_far
, TREE_TYPE (type
)));
12753 /* Handle transparent aggregates. */
12754 if ((TREE_CODE (type
) == UNION_TYPE
|| TREE_CODE (type
) == RECORD_TYPE
)
12755 && TYPE_TRANSPARENT_AGGR (type
))
12756 type
= TREE_TYPE (first_field (type
));
12758 /* See if this arg was passed by invisible reference. */
12759 if (pass_by_reference (get_cumulative_args (args_so_far
),
12760 TYPE_MODE (type
), type
, true))
12761 type
= build_pointer_type (type
);
12763 /* Find mode as it is passed by the ABI. */
12764 unsignedp
= TYPE_UNSIGNED (type
);
12765 mode
= promote_mode (type
, TYPE_MODE (type
), &unsignedp
);
12767 /* If we must pass in stack, we need a stack. */
12768 if (rs6000_must_pass_in_stack (mode
, type
))
12771 /* If there is no incoming register, we need a stack. */
12772 entry_parm
= rs6000_function_arg (args_so_far
, mode
, type
, true);
12773 if (entry_parm
== NULL
)
12776 /* Likewise if we need to pass both in registers and on the stack. */
12777 if (GET_CODE (entry_parm
) == PARALLEL
12778 && XEXP (XVECEXP (entry_parm
, 0, 0), 0) == NULL_RTX
)
12781 /* Also true if we're partially in registers and partially not. */
12782 if (rs6000_arg_partial_bytes (args_so_far
, mode
, type
, true) != 0)
12785 /* Update info on where next arg arrives in registers. */
12786 rs6000_function_arg_advance (args_so_far
, mode
, type
, true);
12790 /* Return true if FUN has no prototype, has a variable argument
12791 list, or passes any parameter in memory. */
12794 rs6000_function_parms_need_stack (tree fun
, bool incoming
)
12796 tree fntype
, result
;
12797 CUMULATIVE_ARGS args_so_far_v
;
12798 cumulative_args_t args_so_far
;
12801 /* Must be a libcall, all of which only use reg parms. */
12806 fntype
= TREE_TYPE (fun
);
12808 /* Varargs functions need the parameter save area. */
12809 if ((!incoming
&& !prototype_p (fntype
)) || stdarg_p (fntype
))
12812 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v
, fntype
, NULL_RTX
);
12813 args_so_far
= pack_cumulative_args (&args_so_far_v
);
12815 /* When incoming, we will have been passed the function decl.
12816 It is necessary to use the decl to handle K&R style functions,
12817 where TYPE_ARG_TYPES may not be available. */
12820 gcc_assert (DECL_P (fun
));
12821 result
= DECL_RESULT (fun
);
12824 result
= TREE_TYPE (fntype
);
12826 if (result
&& aggregate_value_p (result
, fntype
))
12828 if (!TYPE_P (result
))
12829 result
= TREE_TYPE (result
);
12830 result
= build_pointer_type (result
);
12831 rs6000_parm_needs_stack (args_so_far
, result
);
12838 for (parm
= DECL_ARGUMENTS (fun
);
12839 parm
&& parm
!= void_list_node
;
12840 parm
= TREE_CHAIN (parm
))
12841 if (rs6000_parm_needs_stack (args_so_far
, TREE_TYPE (parm
)))
12846 function_args_iterator args_iter
;
12849 FOREACH_FUNCTION_ARGS (fntype
, arg_type
, args_iter
)
12850 if (rs6000_parm_needs_stack (args_so_far
, arg_type
))
12857 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12858 usually a constant depending on the ABI. However, in the ELFv2 ABI
12859 the register parameter area is optional when calling a function that
12860 has a prototype is scope, has no variable argument list, and passes
12861 all parameters in registers. */
12864 rs6000_reg_parm_stack_space (tree fun
, bool incoming
)
12866 int reg_parm_stack_space
;
12868 switch (DEFAULT_ABI
)
12871 reg_parm_stack_space
= 0;
12876 reg_parm_stack_space
= TARGET_64BIT
? 64 : 32;
12880 /* ??? Recomputing this every time is a bit expensive. Is there
12881 a place to cache this information? */
12882 if (rs6000_function_parms_need_stack (fun
, incoming
))
12883 reg_parm_stack_space
= TARGET_64BIT
? 64 : 32;
12885 reg_parm_stack_space
= 0;
12889 return reg_parm_stack_space
;
12893 rs6000_move_block_from_reg (int regno
, rtx x
, int nregs
)
12896 machine_mode reg_mode
= TARGET_32BIT
? SImode
: DImode
;
12901 for (i
= 0; i
< nregs
; i
++)
12903 rtx tem
= adjust_address_nv (x
, reg_mode
, i
* GET_MODE_SIZE (reg_mode
));
12904 if (reload_completed
)
12906 if (! strict_memory_address_p (reg_mode
, XEXP (tem
, 0)))
12909 tem
= simplify_gen_subreg (reg_mode
, x
, BLKmode
,
12910 i
* GET_MODE_SIZE (reg_mode
));
12913 tem
= replace_equiv_address (tem
, XEXP (tem
, 0));
12917 emit_move_insn (tem
, gen_rtx_REG (reg_mode
, regno
+ i
));
12921 /* Perform any needed actions needed for a function that is receiving a
12922 variable number of arguments.
12926 MODE and TYPE are the mode and type of the current parameter.
12928 PRETEND_SIZE is a variable that should be set to the amount of stack
12929 that must be pushed by the prolog to pretend that our caller pushed
12932 Normally, this macro will push all remaining incoming registers on the
12933 stack and set PRETEND_SIZE to the length of the registers pushed. */
12936 setup_incoming_varargs (cumulative_args_t cum
, machine_mode mode
,
12937 tree type
, int *pretend_size ATTRIBUTE_UNUSED
,
12940 CUMULATIVE_ARGS next_cum
;
12941 int reg_size
= TARGET_32BIT
? 4 : 8;
12942 rtx save_area
= NULL_RTX
, mem
;
12943 int first_reg_offset
;
12944 alias_set_type set
;
12946 /* Skip the last named argument. */
12947 next_cum
= *get_cumulative_args (cum
);
12948 rs6000_function_arg_advance_1 (&next_cum
, mode
, type
, true, 0);
12950 if (DEFAULT_ABI
== ABI_V4
)
12952 first_reg_offset
= next_cum
.sysv_gregno
- GP_ARG_MIN_REG
;
12956 int gpr_reg_num
= 0, gpr_size
= 0, fpr_size
= 0;
12957 HOST_WIDE_INT offset
= 0;
12959 /* Try to optimize the size of the varargs save area.
12960 The ABI requires that ap.reg_save_area is doubleword
12961 aligned, but we don't need to allocate space for all
12962 the bytes, only those to which we actually will save
12964 if (cfun
->va_list_gpr_size
&& first_reg_offset
< GP_ARG_NUM_REG
)
12965 gpr_reg_num
= GP_ARG_NUM_REG
- first_reg_offset
;
12966 if (TARGET_HARD_FLOAT
12967 && next_cum
.fregno
<= FP_ARG_V4_MAX_REG
12968 && cfun
->va_list_fpr_size
)
12971 fpr_size
= (next_cum
.fregno
- FP_ARG_MIN_REG
)
12972 * UNITS_PER_FP_WORD
;
12973 if (cfun
->va_list_fpr_size
12974 < FP_ARG_V4_MAX_REG
+ 1 - next_cum
.fregno
)
12975 fpr_size
+= cfun
->va_list_fpr_size
* UNITS_PER_FP_WORD
;
12977 fpr_size
+= (FP_ARG_V4_MAX_REG
+ 1 - next_cum
.fregno
)
12978 * UNITS_PER_FP_WORD
;
12982 offset
= -((first_reg_offset
* reg_size
) & ~7);
12983 if (!fpr_size
&& gpr_reg_num
> cfun
->va_list_gpr_size
)
12985 gpr_reg_num
= cfun
->va_list_gpr_size
;
12986 if (reg_size
== 4 && (first_reg_offset
& 1))
12989 gpr_size
= (gpr_reg_num
* reg_size
+ 7) & ~7;
12992 offset
= - (int) (next_cum
.fregno
- FP_ARG_MIN_REG
)
12993 * UNITS_PER_FP_WORD
12994 - (int) (GP_ARG_NUM_REG
* reg_size
);
12996 if (gpr_size
+ fpr_size
)
12999 = assign_stack_local (BLKmode
, gpr_size
+ fpr_size
, 64);
13000 gcc_assert (GET_CODE (reg_save_area
) == MEM
);
13001 reg_save_area
= XEXP (reg_save_area
, 0);
13002 if (GET_CODE (reg_save_area
) == PLUS
)
13004 gcc_assert (XEXP (reg_save_area
, 0)
13005 == virtual_stack_vars_rtx
);
13006 gcc_assert (GET_CODE (XEXP (reg_save_area
, 1)) == CONST_INT
);
13007 offset
+= INTVAL (XEXP (reg_save_area
, 1));
13010 gcc_assert (reg_save_area
== virtual_stack_vars_rtx
);
13013 cfun
->machine
->varargs_save_offset
= offset
;
13014 save_area
= plus_constant (Pmode
, virtual_stack_vars_rtx
, offset
);
13019 first_reg_offset
= next_cum
.words
;
13020 save_area
= crtl
->args
.internal_arg_pointer
;
13022 if (targetm
.calls
.must_pass_in_stack (mode
, type
))
13023 first_reg_offset
+= rs6000_arg_size (TYPE_MODE (type
), type
);
13026 set
= get_varargs_alias_set ();
13027 if (! no_rtl
&& first_reg_offset
< GP_ARG_NUM_REG
13028 && cfun
->va_list_gpr_size
)
13030 int n_gpr
, nregs
= GP_ARG_NUM_REG
- first_reg_offset
;
13032 if (va_list_gpr_counter_field
)
13033 /* V4 va_list_gpr_size counts number of registers needed. */
13034 n_gpr
= cfun
->va_list_gpr_size
;
13036 /* char * va_list instead counts number of bytes needed. */
13037 n_gpr
= (cfun
->va_list_gpr_size
+ reg_size
- 1) / reg_size
;
13042 mem
= gen_rtx_MEM (BLKmode
,
13043 plus_constant (Pmode
, save_area
,
13044 first_reg_offset
* reg_size
));
13045 MEM_NOTRAP_P (mem
) = 1;
13046 set_mem_alias_set (mem
, set
);
13047 set_mem_align (mem
, BITS_PER_WORD
);
13049 rs6000_move_block_from_reg (GP_ARG_MIN_REG
+ first_reg_offset
, mem
,
13053 /* Save FP registers if needed. */
13054 if (DEFAULT_ABI
== ABI_V4
13055 && TARGET_HARD_FLOAT
13057 && next_cum
.fregno
<= FP_ARG_V4_MAX_REG
13058 && cfun
->va_list_fpr_size
)
13060 int fregno
= next_cum
.fregno
, nregs
;
13061 rtx cr1
= gen_rtx_REG (CCmode
, CR1_REGNO
);
13062 rtx lab
= gen_label_rtx ();
13063 int off
= (GP_ARG_NUM_REG
* reg_size
) + ((fregno
- FP_ARG_MIN_REG
)
13064 * UNITS_PER_FP_WORD
);
13067 (gen_rtx_SET (pc_rtx
,
13068 gen_rtx_IF_THEN_ELSE (VOIDmode
,
13069 gen_rtx_NE (VOIDmode
, cr1
,
13071 gen_rtx_LABEL_REF (VOIDmode
, lab
),
13075 fregno
<= FP_ARG_V4_MAX_REG
&& nregs
< cfun
->va_list_fpr_size
;
13076 fregno
++, off
+= UNITS_PER_FP_WORD
, nregs
++)
13078 mem
= gen_rtx_MEM ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
13080 plus_constant (Pmode
, save_area
, off
));
13081 MEM_NOTRAP_P (mem
) = 1;
13082 set_mem_alias_set (mem
, set
);
13083 set_mem_align (mem
, GET_MODE_ALIGNMENT (
13084 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
13085 ? DFmode
: SFmode
));
13086 emit_move_insn (mem
, gen_rtx_REG (
13087 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
13088 ? DFmode
: SFmode
, fregno
));
13095 /* Create the va_list data type. */
13098 rs6000_build_builtin_va_list (void)
13100 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
, record
, type_decl
;
13102 /* For AIX, prefer 'char *' because that's what the system
13103 header files like. */
13104 if (DEFAULT_ABI
!= ABI_V4
)
13105 return build_pointer_type (char_type_node
);
13107 record
= (*lang_hooks
.types
.make_type
) (RECORD_TYPE
);
13108 type_decl
= build_decl (BUILTINS_LOCATION
, TYPE_DECL
,
13109 get_identifier ("__va_list_tag"), record
);
13111 f_gpr
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
, get_identifier ("gpr"),
13112 unsigned_char_type_node
);
13113 f_fpr
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
, get_identifier ("fpr"),
13114 unsigned_char_type_node
);
13115 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
13116 every user file. */
13117 f_res
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
13118 get_identifier ("reserved"), short_unsigned_type_node
);
13119 f_ovf
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
13120 get_identifier ("overflow_arg_area"),
13122 f_sav
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
13123 get_identifier ("reg_save_area"),
13126 va_list_gpr_counter_field
= f_gpr
;
13127 va_list_fpr_counter_field
= f_fpr
;
13129 DECL_FIELD_CONTEXT (f_gpr
) = record
;
13130 DECL_FIELD_CONTEXT (f_fpr
) = record
;
13131 DECL_FIELD_CONTEXT (f_res
) = record
;
13132 DECL_FIELD_CONTEXT (f_ovf
) = record
;
13133 DECL_FIELD_CONTEXT (f_sav
) = record
;
13135 TYPE_STUB_DECL (record
) = type_decl
;
13136 TYPE_NAME (record
) = type_decl
;
13137 TYPE_FIELDS (record
) = f_gpr
;
13138 DECL_CHAIN (f_gpr
) = f_fpr
;
13139 DECL_CHAIN (f_fpr
) = f_res
;
13140 DECL_CHAIN (f_res
) = f_ovf
;
13141 DECL_CHAIN (f_ovf
) = f_sav
;
13143 layout_type (record
);
13145 /* The correct type is an array type of one element. */
13146 return build_array_type (record
, build_index_type (size_zero_node
));
13149 /* Implement va_start. */
13152 rs6000_va_start (tree valist
, rtx nextarg
)
13154 HOST_WIDE_INT words
, n_gpr
, n_fpr
;
13155 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
;
13156 tree gpr
, fpr
, ovf
, sav
, t
;
13158 /* Only SVR4 needs something special. */
13159 if (DEFAULT_ABI
!= ABI_V4
)
13161 std_expand_builtin_va_start (valist
, nextarg
);
13165 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
13166 f_fpr
= DECL_CHAIN (f_gpr
);
13167 f_res
= DECL_CHAIN (f_fpr
);
13168 f_ovf
= DECL_CHAIN (f_res
);
13169 f_sav
= DECL_CHAIN (f_ovf
);
13171 valist
= build_simple_mem_ref (valist
);
13172 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
13173 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), unshare_expr (valist
),
13175 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), unshare_expr (valist
),
13177 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), unshare_expr (valist
),
13180 /* Count number of gp and fp argument registers used. */
13181 words
= crtl
->args
.info
.words
;
13182 n_gpr
= MIN (crtl
->args
.info
.sysv_gregno
- GP_ARG_MIN_REG
,
13184 n_fpr
= MIN (crtl
->args
.info
.fregno
- FP_ARG_MIN_REG
,
13187 if (TARGET_DEBUG_ARG
)
13188 fprintf (stderr
, "va_start: words = " HOST_WIDE_INT_PRINT_DEC
", n_gpr = "
13189 HOST_WIDE_INT_PRINT_DEC
", n_fpr = " HOST_WIDE_INT_PRINT_DEC
"\n",
13190 words
, n_gpr
, n_fpr
);
13192 if (cfun
->va_list_gpr_size
)
13194 t
= build2 (MODIFY_EXPR
, TREE_TYPE (gpr
), gpr
,
13195 build_int_cst (NULL_TREE
, n_gpr
));
13196 TREE_SIDE_EFFECTS (t
) = 1;
13197 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
13200 if (cfun
->va_list_fpr_size
)
13202 t
= build2 (MODIFY_EXPR
, TREE_TYPE (fpr
), fpr
,
13203 build_int_cst (NULL_TREE
, n_fpr
));
13204 TREE_SIDE_EFFECTS (t
) = 1;
13205 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
13207 #ifdef HAVE_AS_GNU_ATTRIBUTE
13208 if (call_ABI_of_interest (cfun
->decl
))
13209 rs6000_passes_float
= true;
13213 /* Find the overflow area. */
13214 t
= make_tree (TREE_TYPE (ovf
), crtl
->args
.internal_arg_pointer
);
13216 t
= fold_build_pointer_plus_hwi (t
, words
* MIN_UNITS_PER_WORD
);
13217 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ovf
), ovf
, t
);
13218 TREE_SIDE_EFFECTS (t
) = 1;
13219 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
13221 /* If there were no va_arg invocations, don't set up the register
13223 if (!cfun
->va_list_gpr_size
13224 && !cfun
->va_list_fpr_size
13225 && n_gpr
< GP_ARG_NUM_REG
13226 && n_fpr
< FP_ARG_V4_MAX_REG
)
13229 /* Find the register save area. */
13230 t
= make_tree (TREE_TYPE (sav
), virtual_stack_vars_rtx
);
13231 if (cfun
->machine
->varargs_save_offset
)
13232 t
= fold_build_pointer_plus_hwi (t
, cfun
->machine
->varargs_save_offset
);
13233 t
= build2 (MODIFY_EXPR
, TREE_TYPE (sav
), sav
, t
);
13234 TREE_SIDE_EFFECTS (t
) = 1;
13235 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
13238 /* Implement va_arg. */
13241 rs6000_gimplify_va_arg (tree valist
, tree type
, gimple_seq
*pre_p
,
13242 gimple_seq
*post_p
)
13244 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
;
13245 tree gpr
, fpr
, ovf
, sav
, reg
, t
, u
;
13246 int size
, rsize
, n_reg
, sav_ofs
, sav_scale
;
13247 tree lab_false
, lab_over
, addr
;
13249 tree ptrtype
= build_pointer_type_for_mode (type
, ptr_mode
, true);
13253 if (pass_by_reference (NULL
, TYPE_MODE (type
), type
, false))
13255 t
= rs6000_gimplify_va_arg (valist
, ptrtype
, pre_p
, post_p
);
13256 return build_va_arg_indirect_ref (t
);
13259 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
13260 earlier version of gcc, with the property that it always applied alignment
13261 adjustments to the va-args (even for zero-sized types). The cheapest way
13262 to deal with this is to replicate the effect of the part of
13263 std_gimplify_va_arg_expr that carries out the align adjust, for the case
13265 We don't need to check for pass-by-reference because of the test above.
13266 We can return a simplifed answer, since we know there's no offset to add. */
13269 && rs6000_darwin64_abi
)
13270 || DEFAULT_ABI
== ABI_ELFv2
13271 || (DEFAULT_ABI
== ABI_AIX
&& !rs6000_compat_align_parm
))
13272 && integer_zerop (TYPE_SIZE (type
)))
13274 unsigned HOST_WIDE_INT align
, boundary
;
13275 tree valist_tmp
= get_initialized_tmp_var (valist
, pre_p
, NULL
);
13276 align
= PARM_BOUNDARY
/ BITS_PER_UNIT
;
13277 boundary
= rs6000_function_arg_boundary (TYPE_MODE (type
), type
);
13278 if (boundary
> MAX_SUPPORTED_STACK_ALIGNMENT
)
13279 boundary
= MAX_SUPPORTED_STACK_ALIGNMENT
;
13280 boundary
/= BITS_PER_UNIT
;
13281 if (boundary
> align
)
13284 /* This updates arg ptr by the amount that would be necessary
13285 to align the zero-sized (but not zero-alignment) item. */
13286 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist_tmp
,
13287 fold_build_pointer_plus_hwi (valist_tmp
, boundary
- 1));
13288 gimplify_and_add (t
, pre_p
);
13290 t
= fold_convert (sizetype
, valist_tmp
);
13291 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist_tmp
,
13292 fold_convert (TREE_TYPE (valist
),
13293 fold_build2 (BIT_AND_EXPR
, sizetype
, t
,
13294 size_int (-boundary
))));
13295 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist
, t
);
13296 gimplify_and_add (t
, pre_p
);
13298 /* Since it is zero-sized there's no increment for the item itself. */
13299 valist_tmp
= fold_convert (build_pointer_type (type
), valist_tmp
);
13300 return build_va_arg_indirect_ref (valist_tmp
);
13303 if (DEFAULT_ABI
!= ABI_V4
)
13305 if (targetm
.calls
.split_complex_arg
&& TREE_CODE (type
) == COMPLEX_TYPE
)
13307 tree elem_type
= TREE_TYPE (type
);
13308 machine_mode elem_mode
= TYPE_MODE (elem_type
);
13309 int elem_size
= GET_MODE_SIZE (elem_mode
);
13311 if (elem_size
< UNITS_PER_WORD
)
13313 tree real_part
, imag_part
;
13314 gimple_seq post
= NULL
;
13316 real_part
= rs6000_gimplify_va_arg (valist
, elem_type
, pre_p
,
13318 /* Copy the value into a temporary, lest the formal temporary
13319 be reused out from under us. */
13320 real_part
= get_initialized_tmp_var (real_part
, pre_p
, &post
);
13321 gimple_seq_add_seq (pre_p
, post
);
13323 imag_part
= rs6000_gimplify_va_arg (valist
, elem_type
, pre_p
,
13326 return build2 (COMPLEX_EXPR
, type
, real_part
, imag_part
);
13330 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
13333 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
13334 f_fpr
= DECL_CHAIN (f_gpr
);
13335 f_res
= DECL_CHAIN (f_fpr
);
13336 f_ovf
= DECL_CHAIN (f_res
);
13337 f_sav
= DECL_CHAIN (f_ovf
);
13339 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
13340 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), unshare_expr (valist
),
13342 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), unshare_expr (valist
),
13344 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), unshare_expr (valist
),
13347 size
= int_size_in_bytes (type
);
13348 rsize
= (size
+ 3) / 4;
13349 int pad
= 4 * rsize
- size
;
13352 machine_mode mode
= TYPE_MODE (type
);
13353 if (abi_v4_pass_in_fpr (mode
))
13355 /* FP args go in FP registers, if present. */
13357 n_reg
= (size
+ 7) / 8;
13358 sav_ofs
= ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? 8 : 4) * 4;
13359 sav_scale
= ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? 8 : 4);
13360 if (mode
!= SFmode
&& mode
!= SDmode
)
13365 /* Otherwise into GP registers. */
13374 /* Pull the value out of the saved registers.... */
13377 addr
= create_tmp_var (ptr_type_node
, "addr");
13379 /* AltiVec vectors never go in registers when -mabi=altivec. */
13380 if (TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
13384 lab_false
= create_artificial_label (input_location
);
13385 lab_over
= create_artificial_label (input_location
);
13387 /* Long long is aligned in the registers. As are any other 2 gpr
13388 item such as complex int due to a historical mistake. */
13390 if (n_reg
== 2 && reg
== gpr
)
13393 u
= build2 (BIT_AND_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
13394 build_int_cst (TREE_TYPE (reg
), n_reg
- 1));
13395 u
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (reg
),
13396 unshare_expr (reg
), u
);
13398 /* _Decimal128 is passed in even/odd fpr pairs; the stored
13399 reg number is 0 for f1, so we want to make it odd. */
13400 else if (reg
== fpr
&& mode
== TDmode
)
13402 t
= build2 (BIT_IOR_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
13403 build_int_cst (TREE_TYPE (reg
), 1));
13404 u
= build2 (MODIFY_EXPR
, void_type_node
, unshare_expr (reg
), t
);
13407 t
= fold_convert (TREE_TYPE (reg
), size_int (8 - n_reg
+ 1));
13408 t
= build2 (GE_EXPR
, boolean_type_node
, u
, t
);
13409 u
= build1 (GOTO_EXPR
, void_type_node
, lab_false
);
13410 t
= build3 (COND_EXPR
, void_type_node
, t
, u
, NULL_TREE
);
13411 gimplify_and_add (t
, pre_p
);
13415 t
= fold_build_pointer_plus_hwi (sav
, sav_ofs
);
13417 u
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
13418 build_int_cst (TREE_TYPE (reg
), n_reg
));
13419 u
= fold_convert (sizetype
, u
);
13420 u
= build2 (MULT_EXPR
, sizetype
, u
, size_int (sav_scale
));
13421 t
= fold_build_pointer_plus (t
, u
);
13423 /* _Decimal32 varargs are located in the second word of the 64-bit
13424 FP register for 32-bit binaries. */
13425 if (TARGET_32BIT
&& TARGET_HARD_FLOAT
&& mode
== SDmode
)
13426 t
= fold_build_pointer_plus_hwi (t
, size
);
13428 /* Args are passed right-aligned. */
13429 if (BYTES_BIG_ENDIAN
)
13430 t
= fold_build_pointer_plus_hwi (t
, pad
);
13432 gimplify_assign (addr
, t
, pre_p
);
13434 gimple_seq_add_stmt (pre_p
, gimple_build_goto (lab_over
));
13436 stmt
= gimple_build_label (lab_false
);
13437 gimple_seq_add_stmt (pre_p
, stmt
);
13439 if ((n_reg
== 2 && !regalign
) || n_reg
> 2)
13441 /* Ensure that we don't find any more args in regs.
13442 Alignment has taken care of for special cases. */
13443 gimplify_assign (reg
, build_int_cst (TREE_TYPE (reg
), 8), pre_p
);
13447 /* ... otherwise out of the overflow area. */
13449 /* Care for on-stack alignment if needed. */
13453 t
= fold_build_pointer_plus_hwi (t
, align
- 1);
13454 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (t
), t
,
13455 build_int_cst (TREE_TYPE (t
), -align
));
13458 /* Args are passed right-aligned. */
13459 if (BYTES_BIG_ENDIAN
)
13460 t
= fold_build_pointer_plus_hwi (t
, pad
);
13462 gimplify_expr (&t
, pre_p
, NULL
, is_gimple_val
, fb_rvalue
);
13464 gimplify_assign (unshare_expr (addr
), t
, pre_p
);
13466 t
= fold_build_pointer_plus_hwi (t
, size
);
13467 gimplify_assign (unshare_expr (ovf
), t
, pre_p
);
13471 stmt
= gimple_build_label (lab_over
);
13472 gimple_seq_add_stmt (pre_p
, stmt
);
13475 if (STRICT_ALIGNMENT
13476 && (TYPE_ALIGN (type
)
13477 > (unsigned) BITS_PER_UNIT
* (align
< 4 ? 4 : align
)))
13479 /* The value (of type complex double, for example) may not be
13480 aligned in memory in the saved registers, so copy via a
13481 temporary. (This is the same code as used for SPARC.) */
13482 tree tmp
= create_tmp_var (type
, "va_arg_tmp");
13483 tree dest_addr
= build_fold_addr_expr (tmp
);
13485 tree copy
= build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY
),
13486 3, dest_addr
, addr
, size_int (rsize
* 4));
13488 gimplify_and_add (copy
, pre_p
);
13492 addr
= fold_convert (ptrtype
, addr
);
13493 return build_va_arg_indirect_ref (addr
);
13499 def_builtin (const char *name
, tree type
, enum rs6000_builtins code
)
13502 unsigned classify
= rs6000_builtin_info
[(int)code
].attr
;
13503 const char *attr_string
= "";
13505 gcc_assert (name
!= NULL
);
13506 gcc_assert (IN_RANGE ((int)code
, 0, (int)RS6000_BUILTIN_COUNT
));
13508 if (rs6000_builtin_decls
[(int)code
])
13509 fatal_error (input_location
,
13510 "internal error: builtin function %qs already processed",
13513 rs6000_builtin_decls
[(int)code
] = t
=
13514 add_builtin_function (name
, type
, (int)code
, BUILT_IN_MD
, NULL
, NULL_TREE
);
13516 /* Set any special attributes. */
13517 if ((classify
& RS6000_BTC_CONST
) != 0)
13519 /* const function, function only depends on the inputs. */
13520 TREE_READONLY (t
) = 1;
13521 TREE_NOTHROW (t
) = 1;
13522 attr_string
= ", const";
13524 else if ((classify
& RS6000_BTC_PURE
) != 0)
13526 /* pure function, function can read global memory, but does not set any
13528 DECL_PURE_P (t
) = 1;
13529 TREE_NOTHROW (t
) = 1;
13530 attr_string
= ", pure";
13532 else if ((classify
& RS6000_BTC_FP
) != 0)
13534 /* Function is a math function. If rounding mode is on, then treat the
13535 function as not reading global memory, but it can have arbitrary side
13536 effects. If it is off, then assume the function is a const function.
13537 This mimics the ATTR_MATHFN_FPROUNDING attribute in
13538 builtin-attribute.def that is used for the math functions. */
13539 TREE_NOTHROW (t
) = 1;
13540 if (flag_rounding_math
)
13542 DECL_PURE_P (t
) = 1;
13543 DECL_IS_NOVOPS (t
) = 1;
13544 attr_string
= ", fp, pure";
13548 TREE_READONLY (t
) = 1;
13549 attr_string
= ", fp, const";
13552 else if ((classify
& RS6000_BTC_ATTR_MASK
) != 0)
13553 gcc_unreachable ();
13555 if (TARGET_DEBUG_BUILTIN
)
13556 fprintf (stderr
, "rs6000_builtin, code = %4d, %s%s\n",
13557 (int)code
, name
, attr_string
);
13560 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13562 #undef RS6000_BUILTIN_0
13563 #undef RS6000_BUILTIN_1
13564 #undef RS6000_BUILTIN_2
13565 #undef RS6000_BUILTIN_3
13566 #undef RS6000_BUILTIN_A
13567 #undef RS6000_BUILTIN_D
13568 #undef RS6000_BUILTIN_H
13569 #undef RS6000_BUILTIN_P
13570 #undef RS6000_BUILTIN_Q
13571 #undef RS6000_BUILTIN_X
13573 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13574 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13575 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13576 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13577 { MASK, ICODE, NAME, ENUM },
13579 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13580 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13581 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13582 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13583 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13584 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13586 static const struct builtin_description bdesc_3arg
[] =
13588 #include "rs6000-builtin.def"
13591 /* DST operations: void foo (void *, const int, const char). */
13593 #undef RS6000_BUILTIN_0
13594 #undef RS6000_BUILTIN_1
13595 #undef RS6000_BUILTIN_2
13596 #undef RS6000_BUILTIN_3
13597 #undef RS6000_BUILTIN_A
13598 #undef RS6000_BUILTIN_D
13599 #undef RS6000_BUILTIN_H
13600 #undef RS6000_BUILTIN_P
13601 #undef RS6000_BUILTIN_Q
13602 #undef RS6000_BUILTIN_X
13604 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13605 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13606 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13607 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13608 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13609 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13610 { MASK, ICODE, NAME, ENUM },
13612 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13613 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13614 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13615 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13617 static const struct builtin_description bdesc_dst
[] =
13619 #include "rs6000-builtin.def"
13622 /* Simple binary operations: VECc = foo (VECa, VECb). */
13624 #undef RS6000_BUILTIN_0
13625 #undef RS6000_BUILTIN_1
13626 #undef RS6000_BUILTIN_2
13627 #undef RS6000_BUILTIN_3
13628 #undef RS6000_BUILTIN_A
13629 #undef RS6000_BUILTIN_D
13630 #undef RS6000_BUILTIN_H
13631 #undef RS6000_BUILTIN_P
13632 #undef RS6000_BUILTIN_Q
13633 #undef RS6000_BUILTIN_X
13635 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13636 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13637 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13638 { MASK, ICODE, NAME, ENUM },
13640 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13641 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13642 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13643 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13644 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13645 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13646 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13648 static const struct builtin_description bdesc_2arg
[] =
13650 #include "rs6000-builtin.def"
13653 #undef RS6000_BUILTIN_0
13654 #undef RS6000_BUILTIN_1
13655 #undef RS6000_BUILTIN_2
13656 #undef RS6000_BUILTIN_3
13657 #undef RS6000_BUILTIN_A
13658 #undef RS6000_BUILTIN_D
13659 #undef RS6000_BUILTIN_H
13660 #undef RS6000_BUILTIN_P
13661 #undef RS6000_BUILTIN_Q
13662 #undef RS6000_BUILTIN_X
13664 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13665 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13666 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13667 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13668 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13669 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13670 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13671 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13672 { MASK, ICODE, NAME, ENUM },
13674 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13675 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13677 /* AltiVec predicates. */
13679 static const struct builtin_description bdesc_altivec_preds
[] =
13681 #include "rs6000-builtin.def"
13684 /* PAIRED predicates. */
13685 #undef RS6000_BUILTIN_0
13686 #undef RS6000_BUILTIN_1
13687 #undef RS6000_BUILTIN_2
13688 #undef RS6000_BUILTIN_3
13689 #undef RS6000_BUILTIN_A
13690 #undef RS6000_BUILTIN_D
13691 #undef RS6000_BUILTIN_H
13692 #undef RS6000_BUILTIN_P
13693 #undef RS6000_BUILTIN_Q
13694 #undef RS6000_BUILTIN_X
13696 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13697 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13698 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13699 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13700 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13701 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13702 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13703 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13704 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
13705 { MASK, ICODE, NAME, ENUM },
13707 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13709 static const struct builtin_description bdesc_paired_preds
[] =
13711 #include "rs6000-builtin.def"
13714 /* ABS* operations. */
13716 #undef RS6000_BUILTIN_0
13717 #undef RS6000_BUILTIN_1
13718 #undef RS6000_BUILTIN_2
13719 #undef RS6000_BUILTIN_3
13720 #undef RS6000_BUILTIN_A
13721 #undef RS6000_BUILTIN_D
13722 #undef RS6000_BUILTIN_H
13723 #undef RS6000_BUILTIN_P
13724 #undef RS6000_BUILTIN_Q
13725 #undef RS6000_BUILTIN_X
13727 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13728 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13729 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13730 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13731 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13732 { MASK, ICODE, NAME, ENUM },
13734 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13735 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13736 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13737 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13738 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13740 static const struct builtin_description bdesc_abs
[] =
13742 #include "rs6000-builtin.def"
13745 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13748 #undef RS6000_BUILTIN_0
13749 #undef RS6000_BUILTIN_1
13750 #undef RS6000_BUILTIN_2
13751 #undef RS6000_BUILTIN_3
13752 #undef RS6000_BUILTIN_A
13753 #undef RS6000_BUILTIN_D
13754 #undef RS6000_BUILTIN_H
13755 #undef RS6000_BUILTIN_P
13756 #undef RS6000_BUILTIN_Q
13757 #undef RS6000_BUILTIN_X
13759 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13760 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13761 { MASK, ICODE, NAME, ENUM },
13763 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13764 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13765 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13766 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13767 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13768 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13769 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13770 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13772 static const struct builtin_description bdesc_1arg
[] =
13774 #include "rs6000-builtin.def"
13777 /* Simple no-argument operations: result = __builtin_darn_32 () */
13779 #undef RS6000_BUILTIN_0
13780 #undef RS6000_BUILTIN_1
13781 #undef RS6000_BUILTIN_2
13782 #undef RS6000_BUILTIN_3
13783 #undef RS6000_BUILTIN_A
13784 #undef RS6000_BUILTIN_D
13785 #undef RS6000_BUILTIN_H
13786 #undef RS6000_BUILTIN_P
13787 #undef RS6000_BUILTIN_Q
13788 #undef RS6000_BUILTIN_X
13790 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13791 { MASK, ICODE, NAME, ENUM },
13793 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13794 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13795 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13796 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13797 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13798 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13799 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13800 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13801 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13803 static const struct builtin_description bdesc_0arg
[] =
13805 #include "rs6000-builtin.def"
13808 /* HTM builtins. */
13809 #undef RS6000_BUILTIN_0
13810 #undef RS6000_BUILTIN_1
13811 #undef RS6000_BUILTIN_2
13812 #undef RS6000_BUILTIN_3
13813 #undef RS6000_BUILTIN_A
13814 #undef RS6000_BUILTIN_D
13815 #undef RS6000_BUILTIN_H
13816 #undef RS6000_BUILTIN_P
13817 #undef RS6000_BUILTIN_Q
13818 #undef RS6000_BUILTIN_X
13820 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13821 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13822 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13823 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13824 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13825 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13826 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13827 { MASK, ICODE, NAME, ENUM },
13829 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13830 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13831 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13833 static const struct builtin_description bdesc_htm
[] =
13835 #include "rs6000-builtin.def"
13838 #undef RS6000_BUILTIN_0
13839 #undef RS6000_BUILTIN_1
13840 #undef RS6000_BUILTIN_2
13841 #undef RS6000_BUILTIN_3
13842 #undef RS6000_BUILTIN_A
13843 #undef RS6000_BUILTIN_D
13844 #undef RS6000_BUILTIN_H
13845 #undef RS6000_BUILTIN_P
13846 #undef RS6000_BUILTIN_Q
13848 /* Return true if a builtin function is overloaded. */
13850 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode
)
13852 return (rs6000_builtin_info
[(int)fncode
].attr
& RS6000_BTC_OVERLOADED
) != 0;
13856 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode
)
13858 return rs6000_builtin_info
[(int)fncode
].name
;
13861 /* Expand an expression EXP that calls a builtin without arguments. */
13863 rs6000_expand_zeroop_builtin (enum insn_code icode
, rtx target
)
13866 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
13868 if (icode
== CODE_FOR_nothing
)
13869 /* Builtin not supported on this processor. */
13873 || GET_MODE (target
) != tmode
13874 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
13875 target
= gen_reg_rtx (tmode
);
13877 pat
= GEN_FCN (icode
) (target
);
13887 rs6000_expand_mtfsf_builtin (enum insn_code icode
, tree exp
)
13890 tree arg0
= CALL_EXPR_ARG (exp
, 0);
13891 tree arg1
= CALL_EXPR_ARG (exp
, 1);
13892 rtx op0
= expand_normal (arg0
);
13893 rtx op1
= expand_normal (arg1
);
13894 machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
13895 machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
13897 if (icode
== CODE_FOR_nothing
)
13898 /* Builtin not supported on this processor. */
13901 /* If we got invalid arguments bail out before generating bad rtl. */
13902 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
13905 if (GET_CODE (op0
) != CONST_INT
13906 || INTVAL (op0
) > 255
13907 || INTVAL (op0
) < 0)
13909 error ("argument 1 must be an 8-bit field value");
13913 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
13914 op0
= copy_to_mode_reg (mode0
, op0
);
13916 if (! (*insn_data
[icode
].operand
[1].predicate
) (op1
, mode1
))
13917 op1
= copy_to_mode_reg (mode1
, op1
);
13919 pat
= GEN_FCN (icode
) (op0
, op1
);
13928 rs6000_expand_unop_builtin (enum insn_code icode
, tree exp
, rtx target
)
13931 tree arg0
= CALL_EXPR_ARG (exp
, 0);
13932 rtx op0
= expand_normal (arg0
);
13933 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
13934 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
13936 if (icode
== CODE_FOR_nothing
)
13937 /* Builtin not supported on this processor. */
13940 /* If we got invalid arguments bail out before generating bad rtl. */
13941 if (arg0
== error_mark_node
)
13944 if (icode
== CODE_FOR_altivec_vspltisb
13945 || icode
== CODE_FOR_altivec_vspltish
13946 || icode
== CODE_FOR_altivec_vspltisw
)
13948 /* Only allow 5-bit *signed* literals. */
13949 if (GET_CODE (op0
) != CONST_INT
13950 || INTVAL (op0
) > 15
13951 || INTVAL (op0
) < -16)
13953 error ("argument 1 must be a 5-bit signed literal");
13954 return CONST0_RTX (tmode
);
13959 || GET_MODE (target
) != tmode
13960 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
13961 target
= gen_reg_rtx (tmode
);
13963 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
13964 op0
= copy_to_mode_reg (mode0
, op0
);
13966 pat
= GEN_FCN (icode
) (target
, op0
);
13975 altivec_expand_abs_builtin (enum insn_code icode
, tree exp
, rtx target
)
13977 rtx pat
, scratch1
, scratch2
;
13978 tree arg0
= CALL_EXPR_ARG (exp
, 0);
13979 rtx op0
= expand_normal (arg0
);
13980 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
13981 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
13983 /* If we have invalid arguments, bail out before generating bad rtl. */
13984 if (arg0
== error_mark_node
)
13988 || GET_MODE (target
) != tmode
13989 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
13990 target
= gen_reg_rtx (tmode
);
13992 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
13993 op0
= copy_to_mode_reg (mode0
, op0
);
13995 scratch1
= gen_reg_rtx (mode0
);
13996 scratch2
= gen_reg_rtx (mode0
);
13998 pat
= GEN_FCN (icode
) (target
, op0
, scratch1
, scratch2
);
14007 rs6000_expand_binop_builtin (enum insn_code icode
, tree exp
, rtx target
)
14010 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14011 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14012 rtx op0
= expand_normal (arg0
);
14013 rtx op1
= expand_normal (arg1
);
14014 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14015 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
14016 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
14018 if (icode
== CODE_FOR_nothing
)
14019 /* Builtin not supported on this processor. */
14022 /* If we got invalid arguments bail out before generating bad rtl. */
14023 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
14026 if (icode
== CODE_FOR_altivec_vcfux
14027 || icode
== CODE_FOR_altivec_vcfsx
14028 || icode
== CODE_FOR_altivec_vctsxs
14029 || icode
== CODE_FOR_altivec_vctuxs
14030 || icode
== CODE_FOR_altivec_vspltb
14031 || icode
== CODE_FOR_altivec_vsplth
14032 || icode
== CODE_FOR_altivec_vspltw
)
14034 /* Only allow 5-bit unsigned literals. */
14036 if (TREE_CODE (arg1
) != INTEGER_CST
14037 || TREE_INT_CST_LOW (arg1
) & ~0x1f)
14039 error ("argument 2 must be a 5-bit unsigned literal");
14040 return CONST0_RTX (tmode
);
14043 else if (icode
== CODE_FOR_dfptstsfi_eq_dd
14044 || icode
== CODE_FOR_dfptstsfi_lt_dd
14045 || icode
== CODE_FOR_dfptstsfi_gt_dd
14046 || icode
== CODE_FOR_dfptstsfi_unordered_dd
14047 || icode
== CODE_FOR_dfptstsfi_eq_td
14048 || icode
== CODE_FOR_dfptstsfi_lt_td
14049 || icode
== CODE_FOR_dfptstsfi_gt_td
14050 || icode
== CODE_FOR_dfptstsfi_unordered_td
)
14052 /* Only allow 6-bit unsigned literals. */
14054 if (TREE_CODE (arg0
) != INTEGER_CST
14055 || !IN_RANGE (TREE_INT_CST_LOW (arg0
), 0, 63))
14057 error ("argument 1 must be a 6-bit unsigned literal");
14058 return CONST0_RTX (tmode
);
14061 else if (icode
== CODE_FOR_xststdcqp
14062 || icode
== CODE_FOR_xststdcdp
14063 || icode
== CODE_FOR_xststdcsp
14064 || icode
== CODE_FOR_xvtstdcdp
14065 || icode
== CODE_FOR_xvtstdcsp
)
14067 /* Only allow 7-bit unsigned literals. */
14069 if (TREE_CODE (arg1
) != INTEGER_CST
14070 || !IN_RANGE (TREE_INT_CST_LOW (arg1
), 0, 127))
14072 error ("argument 2 must be a 7-bit unsigned literal");
14073 return CONST0_RTX (tmode
);
14076 else if (icode
== CODE_FOR_unpackv1ti
14077 || icode
== CODE_FOR_unpackkf
14078 || icode
== CODE_FOR_unpacktf
14079 || icode
== CODE_FOR_unpackif
14080 || icode
== CODE_FOR_unpacktd
)
14082 /* Only allow 1-bit unsigned literals. */
14084 if (TREE_CODE (arg1
) != INTEGER_CST
14085 || !IN_RANGE (TREE_INT_CST_LOW (arg1
), 0, 1))
14087 error ("argument 2 must be a 1-bit unsigned literal");
14088 return CONST0_RTX (tmode
);
14093 || GET_MODE (target
) != tmode
14094 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14095 target
= gen_reg_rtx (tmode
);
14097 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
14098 op0
= copy_to_mode_reg (mode0
, op0
);
14099 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
14100 op1
= copy_to_mode_reg (mode1
, op1
);
14102 pat
= GEN_FCN (icode
) (target
, op0
, op1
);
14111 altivec_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
14114 tree cr6_form
= CALL_EXPR_ARG (exp
, 0);
14115 tree arg0
= CALL_EXPR_ARG (exp
, 1);
14116 tree arg1
= CALL_EXPR_ARG (exp
, 2);
14117 rtx op0
= expand_normal (arg0
);
14118 rtx op1
= expand_normal (arg1
);
14119 machine_mode tmode
= SImode
;
14120 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
14121 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
14124 if (TREE_CODE (cr6_form
) != INTEGER_CST
)
14126 error ("argument 1 of %qs must be a constant",
14127 "__builtin_altivec_predicate");
14131 cr6_form_int
= TREE_INT_CST_LOW (cr6_form
);
14133 gcc_assert (mode0
== mode1
);
14135 /* If we have invalid arguments, bail out before generating bad rtl. */
14136 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
14140 || GET_MODE (target
) != tmode
14141 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14142 target
= gen_reg_rtx (tmode
);
14144 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
14145 op0
= copy_to_mode_reg (mode0
, op0
);
14146 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
14147 op1
= copy_to_mode_reg (mode1
, op1
);
14149 /* Note that for many of the relevant operations (e.g. cmpne or
14150 cmpeq) with float or double operands, it makes more sense for the
14151 mode of the allocated scratch register to select a vector of
14152 integer. But the choice to copy the mode of operand 0 was made
14153 long ago and there are no plans to change it. */
14154 scratch
= gen_reg_rtx (mode0
);
14156 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
14161 /* The vec_any* and vec_all* predicates use the same opcodes for two
14162 different operations, but the bits in CR6 will be different
14163 depending on what information we want. So we have to play tricks
14164 with CR6 to get the right bits out.
14166 If you think this is disgusting, look at the specs for the
14167 AltiVec predicates. */
14169 switch (cr6_form_int
)
14172 emit_insn (gen_cr6_test_for_zero (target
));
14175 emit_insn (gen_cr6_test_for_zero_reverse (target
));
14178 emit_insn (gen_cr6_test_for_lt (target
));
14181 emit_insn (gen_cr6_test_for_lt_reverse (target
));
14184 error ("argument 1 of %qs is out of range",
14185 "__builtin_altivec_predicate");
14193 paired_expand_lv_builtin (enum insn_code icode
, tree exp
, rtx target
)
14196 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14197 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14198 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14199 machine_mode mode0
= Pmode
;
14200 machine_mode mode1
= Pmode
;
14201 rtx op0
= expand_normal (arg0
);
14202 rtx op1
= expand_normal (arg1
);
14204 if (icode
== CODE_FOR_nothing
)
14205 /* Builtin not supported on this processor. */
14208 /* If we got invalid arguments bail out before generating bad rtl. */
14209 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
14213 || GET_MODE (target
) != tmode
14214 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14215 target
= gen_reg_rtx (tmode
);
14217 op1
= copy_to_mode_reg (mode1
, op1
);
14219 if (op0
== const0_rtx
)
14221 addr
= gen_rtx_MEM (tmode
, op1
);
14225 op0
= copy_to_mode_reg (mode0
, op0
);
14226 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op0
, op1
));
14229 pat
= GEN_FCN (icode
) (target
, addr
);
14238 /* Return a constant vector for use as a little-endian permute control vector
14239 to reverse the order of elements of the given vector mode. */
14241 swap_selector_for_mode (machine_mode mode
)
14243 /* These are little endian vectors, so their elements are reversed
14244 from what you would normally expect for a permute control vector. */
14245 unsigned int swap2
[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
14246 unsigned int swap4
[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
14247 unsigned int swap8
[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
14248 unsigned int swap16
[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
14249 unsigned int *swaparray
, i
;
14266 swaparray
= swap16
;
14269 gcc_unreachable ();
14272 for (i
= 0; i
< 16; ++i
)
14273 perm
[i
] = GEN_INT (swaparray
[i
]);
14275 return force_reg (V16QImode
, gen_rtx_CONST_VECTOR (V16QImode
, gen_rtvec_v (16, perm
)));
14278 /* Generate code for an "lvxl", or "lve*x" built-in for a little endian target
14279 with -maltivec=be specified. Issue the load followed by an element-
14280 reversing permute. */
14282 altivec_expand_lvx_be (rtx op0
, rtx op1
, machine_mode mode
, unsigned unspec
)
14284 rtx tmp
= gen_reg_rtx (mode
);
14285 rtx load
= gen_rtx_SET (tmp
, op1
);
14286 rtx lvx
= gen_rtx_UNSPEC (mode
, gen_rtvec (1, const0_rtx
), unspec
);
14287 rtx par
= gen_rtx_PARALLEL (mode
, gen_rtvec (2, load
, lvx
));
14288 rtx sel
= swap_selector_for_mode (mode
);
14289 rtx vperm
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, tmp
, tmp
, sel
), UNSPEC_VPERM
);
14291 gcc_assert (REG_P (op0
));
14293 emit_insn (gen_rtx_SET (op0
, vperm
));
14296 /* Generate code for a "stvxl" built-in for a little endian target with
14297 -maltivec=be specified. Issue the store preceded by an element-reversing
14300 altivec_expand_stvx_be (rtx op0
, rtx op1
, machine_mode mode
, unsigned unspec
)
14302 rtx tmp
= gen_reg_rtx (mode
);
14303 rtx store
= gen_rtx_SET (op0
, tmp
);
14304 rtx stvx
= gen_rtx_UNSPEC (mode
, gen_rtvec (1, const0_rtx
), unspec
);
14305 rtx par
= gen_rtx_PARALLEL (mode
, gen_rtvec (2, store
, stvx
));
14306 rtx sel
= swap_selector_for_mode (mode
);
14309 gcc_assert (REG_P (op1
));
14310 vperm
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, op1
, op1
, sel
), UNSPEC_VPERM
);
14311 emit_insn (gen_rtx_SET (tmp
, vperm
));
14315 /* Generate code for a "stve*x" built-in for a little endian target with -maltivec=be
14316 specified. Issue the store preceded by an element-reversing permute. */
14318 altivec_expand_stvex_be (rtx op0
, rtx op1
, machine_mode mode
, unsigned unspec
)
14320 machine_mode inner_mode
= GET_MODE_INNER (mode
);
14321 rtx tmp
= gen_reg_rtx (mode
);
14322 rtx stvx
= gen_rtx_UNSPEC (inner_mode
, gen_rtvec (1, tmp
), unspec
);
14323 rtx sel
= swap_selector_for_mode (mode
);
14326 gcc_assert (REG_P (op1
));
14327 vperm
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, op1
, op1
, sel
), UNSPEC_VPERM
);
14328 emit_insn (gen_rtx_SET (tmp
, vperm
));
14329 emit_insn (gen_rtx_SET (op0
, stvx
));
14333 altivec_expand_lv_builtin (enum insn_code icode
, tree exp
, rtx target
, bool blk
)
14336 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14337 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14338 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14339 machine_mode mode0
= Pmode
;
14340 machine_mode mode1
= Pmode
;
14341 rtx op0
= expand_normal (arg0
);
14342 rtx op1
= expand_normal (arg1
);
14344 if (icode
== CODE_FOR_nothing
)
14345 /* Builtin not supported on this processor. */
14348 /* If we got invalid arguments bail out before generating bad rtl. */
14349 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
14353 || GET_MODE (target
) != tmode
14354 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14355 target
= gen_reg_rtx (tmode
);
14357 op1
= copy_to_mode_reg (mode1
, op1
);
14359 /* For LVX, express the RTL accurately by ANDing the address with -16.
14360 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
14361 so the raw address is fine. */
14362 if (icode
== CODE_FOR_altivec_lvx_v2df_2op
14363 || icode
== CODE_FOR_altivec_lvx_v2di_2op
14364 || icode
== CODE_FOR_altivec_lvx_v4sf_2op
14365 || icode
== CODE_FOR_altivec_lvx_v4si_2op
14366 || icode
== CODE_FOR_altivec_lvx_v8hi_2op
14367 || icode
== CODE_FOR_altivec_lvx_v16qi_2op
)
14370 if (op0
== const0_rtx
)
14374 op0
= copy_to_mode_reg (mode0
, op0
);
14375 rawaddr
= gen_rtx_PLUS (Pmode
, op1
, op0
);
14377 addr
= gen_rtx_AND (Pmode
, rawaddr
, gen_rtx_CONST_INT (Pmode
, -16));
14378 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, addr
);
14380 /* For -maltivec=be, emit the load and follow it up with a
14381 permute to swap the elements. */
14382 if (!BYTES_BIG_ENDIAN
&& VECTOR_ELT_ORDER_BIG
)
14384 rtx temp
= gen_reg_rtx (tmode
);
14385 emit_insn (gen_rtx_SET (temp
, addr
));
14387 rtx sel
= swap_selector_for_mode (tmode
);
14388 rtx vperm
= gen_rtx_UNSPEC (tmode
, gen_rtvec (3, temp
, temp
, sel
),
14390 emit_insn (gen_rtx_SET (target
, vperm
));
14393 emit_insn (gen_rtx_SET (target
, addr
));
14397 if (op0
== const0_rtx
)
14398 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, op1
);
14401 op0
= copy_to_mode_reg (mode0
, op0
);
14402 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
,
14403 gen_rtx_PLUS (Pmode
, op1
, op0
));
14406 pat
= GEN_FCN (icode
) (target
, addr
);
14416 altivec_expand_xl_be_builtin (enum insn_code icode
, tree exp
, rtx target
, bool blk
)
14419 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14420 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14421 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14422 machine_mode mode0
= Pmode
;
14423 machine_mode mode1
= Pmode
;
14424 rtx op0
= expand_normal (arg0
);
14425 rtx op1
= expand_normal (arg1
);
14427 if (icode
== CODE_FOR_nothing
)
14428 /* Builtin not supported on this processor. */
14431 /* If we got invalid arguments bail out before generating bad rtl. */
14432 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
14436 || GET_MODE (target
) != tmode
14437 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14438 target
= gen_reg_rtx (tmode
);
14440 op1
= copy_to_mode_reg (mode1
, op1
);
14442 if (op0
== const0_rtx
)
14443 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, op1
);
14446 op0
= copy_to_mode_reg (mode0
, op0
);
14447 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
,
14448 gen_rtx_PLUS (Pmode
, op1
, op0
));
14451 pat
= GEN_FCN (icode
) (target
, addr
);
14456 /* Reverse element order of elements if in LE mode */
14457 if (!VECTOR_ELT_ORDER_BIG
)
14459 rtx sel
= swap_selector_for_mode (tmode
);
14460 rtx vperm
= gen_rtx_UNSPEC (tmode
, gen_rtvec (3, target
, target
, sel
),
14462 emit_insn (gen_rtx_SET (target
, vperm
));
14468 paired_expand_stv_builtin (enum insn_code icode
, tree exp
)
14470 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14471 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14472 tree arg2
= CALL_EXPR_ARG (exp
, 2);
14473 rtx op0
= expand_normal (arg0
);
14474 rtx op1
= expand_normal (arg1
);
14475 rtx op2
= expand_normal (arg2
);
14477 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14478 machine_mode mode1
= Pmode
;
14479 machine_mode mode2
= Pmode
;
14481 /* Invalid arguments. Bail before doing anything stoopid! */
14482 if (arg0
== error_mark_node
14483 || arg1
== error_mark_node
14484 || arg2
== error_mark_node
)
14487 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, tmode
))
14488 op0
= copy_to_mode_reg (tmode
, op0
);
14490 op2
= copy_to_mode_reg (mode2
, op2
);
14492 if (op1
== const0_rtx
)
14494 addr
= gen_rtx_MEM (tmode
, op2
);
14498 op1
= copy_to_mode_reg (mode1
, op1
);
14499 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op1
, op2
));
14502 pat
= GEN_FCN (icode
) (addr
, op0
);
14509 altivec_expand_stxvl_builtin (enum insn_code icode
, tree exp
)
14512 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14513 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14514 tree arg2
= CALL_EXPR_ARG (exp
, 2);
14515 rtx op0
= expand_normal (arg0
);
14516 rtx op1
= expand_normal (arg1
);
14517 rtx op2
= expand_normal (arg2
);
14518 machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
14519 machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
14520 machine_mode mode2
= insn_data
[icode
].operand
[2].mode
;
14522 if (icode
== CODE_FOR_nothing
)
14523 /* Builtin not supported on this processor. */
14526 /* If we got invalid arguments bail out before generating bad rtl. */
14527 if (arg0
== error_mark_node
14528 || arg1
== error_mark_node
14529 || arg2
== error_mark_node
)
14532 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
14533 op0
= copy_to_mode_reg (mode0
, op0
);
14534 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
14535 op1
= copy_to_mode_reg (mode1
, op1
);
14536 if (! (*insn_data
[icode
].operand
[3].predicate
) (op2
, mode2
))
14537 op2
= copy_to_mode_reg (mode2
, op2
);
14539 pat
= GEN_FCN (icode
) (op0
, op1
, op2
);
14547 altivec_expand_stv_builtin (enum insn_code icode
, tree exp
)
14549 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14550 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14551 tree arg2
= CALL_EXPR_ARG (exp
, 2);
14552 rtx op0
= expand_normal (arg0
);
14553 rtx op1
= expand_normal (arg1
);
14554 rtx op2
= expand_normal (arg2
);
14555 rtx pat
, addr
, rawaddr
;
14556 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14557 machine_mode smode
= insn_data
[icode
].operand
[1].mode
;
14558 machine_mode mode1
= Pmode
;
14559 machine_mode mode2
= Pmode
;
14561 /* Invalid arguments. Bail before doing anything stoopid! */
14562 if (arg0
== error_mark_node
14563 || arg1
== error_mark_node
14564 || arg2
== error_mark_node
)
14567 op2
= copy_to_mode_reg (mode2
, op2
);
14569 /* For STVX, express the RTL accurately by ANDing the address with -16.
14570 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
14571 so the raw address is fine. */
14572 if (icode
== CODE_FOR_altivec_stvx_v2df_2op
14573 || icode
== CODE_FOR_altivec_stvx_v2di_2op
14574 || icode
== CODE_FOR_altivec_stvx_v4sf_2op
14575 || icode
== CODE_FOR_altivec_stvx_v4si_2op
14576 || icode
== CODE_FOR_altivec_stvx_v8hi_2op
14577 || icode
== CODE_FOR_altivec_stvx_v16qi_2op
)
14579 if (op1
== const0_rtx
)
14583 op1
= copy_to_mode_reg (mode1
, op1
);
14584 rawaddr
= gen_rtx_PLUS (Pmode
, op2
, op1
);
14587 addr
= gen_rtx_AND (Pmode
, rawaddr
, gen_rtx_CONST_INT (Pmode
, -16));
14588 addr
= gen_rtx_MEM (tmode
, addr
);
14590 op0
= copy_to_mode_reg (tmode
, op0
);
14592 /* For -maltivec=be, emit a permute to swap the elements, followed
14594 if (!BYTES_BIG_ENDIAN
&& VECTOR_ELT_ORDER_BIG
)
14596 rtx temp
= gen_reg_rtx (tmode
);
14597 rtx sel
= swap_selector_for_mode (tmode
);
14598 rtx vperm
= gen_rtx_UNSPEC (tmode
, gen_rtvec (3, op0
, op0
, sel
),
14600 emit_insn (gen_rtx_SET (temp
, vperm
));
14601 emit_insn (gen_rtx_SET (addr
, temp
));
14604 emit_insn (gen_rtx_SET (addr
, op0
));
14608 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, smode
))
14609 op0
= copy_to_mode_reg (smode
, op0
);
14611 if (op1
== const0_rtx
)
14612 addr
= gen_rtx_MEM (tmode
, op2
);
14615 op1
= copy_to_mode_reg (mode1
, op1
);
14616 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op2
, op1
));
14619 pat
= GEN_FCN (icode
) (addr
, op0
);
14627 /* Return the appropriate SPR number associated with the given builtin. */
14628 static inline HOST_WIDE_INT
14629 htm_spr_num (enum rs6000_builtins code
)
14631 if (code
== HTM_BUILTIN_GET_TFHAR
14632 || code
== HTM_BUILTIN_SET_TFHAR
)
14634 else if (code
== HTM_BUILTIN_GET_TFIAR
14635 || code
== HTM_BUILTIN_SET_TFIAR
)
14637 else if (code
== HTM_BUILTIN_GET_TEXASR
14638 || code
== HTM_BUILTIN_SET_TEXASR
)
14640 gcc_assert (code
== HTM_BUILTIN_GET_TEXASRU
14641 || code
== HTM_BUILTIN_SET_TEXASRU
);
14642 return TEXASRU_SPR
;
14645 /* Return the appropriate SPR regno associated with the given builtin. */
14646 static inline HOST_WIDE_INT
14647 htm_spr_regno (enum rs6000_builtins code
)
14649 if (code
== HTM_BUILTIN_GET_TFHAR
14650 || code
== HTM_BUILTIN_SET_TFHAR
)
14651 return TFHAR_REGNO
;
14652 else if (code
== HTM_BUILTIN_GET_TFIAR
14653 || code
== HTM_BUILTIN_SET_TFIAR
)
14654 return TFIAR_REGNO
;
14655 gcc_assert (code
== HTM_BUILTIN_GET_TEXASR
14656 || code
== HTM_BUILTIN_SET_TEXASR
14657 || code
== HTM_BUILTIN_GET_TEXASRU
14658 || code
== HTM_BUILTIN_SET_TEXASRU
);
14659 return TEXASR_REGNO
;
14662 /* Return the correct ICODE value depending on whether we are
14663 setting or reading the HTM SPRs. */
14664 static inline enum insn_code
14665 rs6000_htm_spr_icode (bool nonvoid
)
14668 return (TARGET_POWERPC64
) ? CODE_FOR_htm_mfspr_di
: CODE_FOR_htm_mfspr_si
;
14670 return (TARGET_POWERPC64
) ? CODE_FOR_htm_mtspr_di
: CODE_FOR_htm_mtspr_si
;
14673 /* Expand the HTM builtin in EXP and store the result in TARGET.
14674 Store true in *EXPANDEDP if we found a builtin to expand. */
14676 htm_expand_builtin (tree exp
, rtx target
, bool * expandedp
)
14678 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
14679 bool nonvoid
= TREE_TYPE (TREE_TYPE (fndecl
)) != void_type_node
;
14680 enum rs6000_builtins fcode
= (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
14681 const struct builtin_description
*d
;
14686 if (!TARGET_POWERPC64
14687 && (fcode
== HTM_BUILTIN_TABORTDC
14688 || fcode
== HTM_BUILTIN_TABORTDCI
))
14690 size_t uns_fcode
= (size_t)fcode
;
14691 const char *name
= rs6000_builtin_info
[uns_fcode
].name
;
14692 error ("builtin %qs is only valid in 64-bit mode", name
);
14696 /* Expand the HTM builtins. */
14698 for (i
= 0; i
< ARRAY_SIZE (bdesc_htm
); i
++, d
++)
14699 if (d
->code
== fcode
)
14701 rtx op
[MAX_HTM_OPERANDS
], pat
;
14704 call_expr_arg_iterator iter
;
14705 unsigned attr
= rs6000_builtin_info
[fcode
].attr
;
14706 enum insn_code icode
= d
->icode
;
14707 const struct insn_operand_data
*insn_op
;
14708 bool uses_spr
= (attr
& RS6000_BTC_SPR
);
14712 icode
= rs6000_htm_spr_icode (nonvoid
);
14713 insn_op
= &insn_data
[icode
].operand
[0];
14717 machine_mode tmode
= (uses_spr
) ? insn_op
->mode
: E_SImode
;
14719 || GET_MODE (target
) != tmode
14720 || (uses_spr
&& !(*insn_op
->predicate
) (target
, tmode
)))
14721 target
= gen_reg_rtx (tmode
);
14723 op
[nopnds
++] = target
;
14726 FOR_EACH_CALL_EXPR_ARG (arg
, iter
, exp
)
14728 if (arg
== error_mark_node
|| nopnds
>= MAX_HTM_OPERANDS
)
14731 insn_op
= &insn_data
[icode
].operand
[nopnds
];
14733 op
[nopnds
] = expand_normal (arg
);
14735 if (!(*insn_op
->predicate
) (op
[nopnds
], insn_op
->mode
))
14737 if (!strcmp (insn_op
->constraint
, "n"))
14739 int arg_num
= (nonvoid
) ? nopnds
: nopnds
+ 1;
14740 if (!CONST_INT_P (op
[nopnds
]))
14741 error ("argument %d must be an unsigned literal", arg_num
);
14743 error ("argument %d is an unsigned literal that is "
14744 "out of range", arg_num
);
14747 op
[nopnds
] = copy_to_mode_reg (insn_op
->mode
, op
[nopnds
]);
14753 /* Handle the builtins for extended mnemonics. These accept
14754 no arguments, but map to builtins that take arguments. */
14757 case HTM_BUILTIN_TENDALL
: /* Alias for: tend. 1 */
14758 case HTM_BUILTIN_TRESUME
: /* Alias for: tsr. 1 */
14759 op
[nopnds
++] = GEN_INT (1);
14761 attr
|= RS6000_BTC_UNARY
;
14763 case HTM_BUILTIN_TSUSPEND
: /* Alias for: tsr. 0 */
14764 op
[nopnds
++] = GEN_INT (0);
14766 attr
|= RS6000_BTC_UNARY
;
14772 /* If this builtin accesses SPRs, then pass in the appropriate
14773 SPR number and SPR regno as the last two operands. */
14776 machine_mode mode
= (TARGET_POWERPC64
) ? DImode
: SImode
;
14777 op
[nopnds
++] = gen_rtx_CONST_INT (mode
, htm_spr_num (fcode
));
14778 op
[nopnds
++] = gen_rtx_REG (mode
, htm_spr_regno (fcode
));
14780 /* If this builtin accesses a CR, then pass in a scratch
14781 CR as the last operand. */
14782 else if (attr
& RS6000_BTC_CR
)
14783 { cr
= gen_reg_rtx (CCmode
);
14789 int expected_nopnds
= 0;
14790 if ((attr
& RS6000_BTC_TYPE_MASK
) == RS6000_BTC_UNARY
)
14791 expected_nopnds
= 1;
14792 else if ((attr
& RS6000_BTC_TYPE_MASK
) == RS6000_BTC_BINARY
)
14793 expected_nopnds
= 2;
14794 else if ((attr
& RS6000_BTC_TYPE_MASK
) == RS6000_BTC_TERNARY
)
14795 expected_nopnds
= 3;
14796 if (!(attr
& RS6000_BTC_VOID
))
14797 expected_nopnds
+= 1;
14799 expected_nopnds
+= 2;
14801 gcc_assert (nopnds
== expected_nopnds
14802 && nopnds
<= MAX_HTM_OPERANDS
);
14808 pat
= GEN_FCN (icode
) (op
[0]);
14811 pat
= GEN_FCN (icode
) (op
[0], op
[1]);
14814 pat
= GEN_FCN (icode
) (op
[0], op
[1], op
[2]);
14817 pat
= GEN_FCN (icode
) (op
[0], op
[1], op
[2], op
[3]);
14820 gcc_unreachable ();
14826 if (attr
& RS6000_BTC_CR
)
14828 if (fcode
== HTM_BUILTIN_TBEGIN
)
14830 /* Emit code to set TARGET to true or false depending on
14831 whether the tbegin. instruction successfully or failed
14832 to start a transaction. We do this by placing the 1's
14833 complement of CR's EQ bit into TARGET. */
14834 rtx scratch
= gen_reg_rtx (SImode
);
14835 emit_insn (gen_rtx_SET (scratch
,
14836 gen_rtx_EQ (SImode
, cr
,
14838 emit_insn (gen_rtx_SET (target
,
14839 gen_rtx_XOR (SImode
, scratch
,
14844 /* Emit code to copy the 4-bit condition register field
14845 CR into the least significant end of register TARGET. */
14846 rtx scratch1
= gen_reg_rtx (SImode
);
14847 rtx scratch2
= gen_reg_rtx (SImode
);
14848 rtx subreg
= simplify_gen_subreg (CCmode
, scratch1
, SImode
, 0);
14849 emit_insn (gen_movcc (subreg
, cr
));
14850 emit_insn (gen_lshrsi3 (scratch2
, scratch1
, GEN_INT (28)));
14851 emit_insn (gen_andsi3 (target
, scratch2
, GEN_INT (0xf)));
14860 *expandedp
= false;
14864 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14867 cpu_expand_builtin (enum rs6000_builtins fcode
, tree exp ATTRIBUTE_UNUSED
,
14870 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14871 if (fcode
== RS6000_BUILTIN_CPU_INIT
)
14874 if (target
== 0 || GET_MODE (target
) != SImode
)
14875 target
= gen_reg_rtx (SImode
);
14877 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14878 tree arg
= TREE_OPERAND (CALL_EXPR_ARG (exp
, 0), 0);
14879 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14880 to a STRING_CST. */
14881 if (TREE_CODE (arg
) == ARRAY_REF
14882 && TREE_CODE (TREE_OPERAND (arg
, 0)) == STRING_CST
14883 && TREE_CODE (TREE_OPERAND (arg
, 1)) == INTEGER_CST
14884 && compare_tree_int (TREE_OPERAND (arg
, 1), 0) == 0)
14885 arg
= TREE_OPERAND (arg
, 0);
14887 if (TREE_CODE (arg
) != STRING_CST
)
14889 error ("builtin %qs only accepts a string argument",
14890 rs6000_builtin_info
[(size_t) fcode
].name
);
14894 if (fcode
== RS6000_BUILTIN_CPU_IS
)
14896 const char *cpu
= TREE_STRING_POINTER (arg
);
14897 rtx cpuid
= NULL_RTX
;
14898 for (size_t i
= 0; i
< ARRAY_SIZE (cpu_is_info
); i
++)
14899 if (strcmp (cpu
, cpu_is_info
[i
].cpu
) == 0)
14901 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14902 cpuid
= GEN_INT (cpu_is_info
[i
].cpuid
+ _DL_FIRST_PLATFORM
);
14905 if (cpuid
== NULL_RTX
)
14907 /* Invalid CPU argument. */
14908 error ("cpu %qs is an invalid argument to builtin %qs",
14909 cpu
, rs6000_builtin_info
[(size_t) fcode
].name
);
14913 rtx platform
= gen_reg_rtx (SImode
);
14914 rtx tcbmem
= gen_const_mem (SImode
,
14915 gen_rtx_PLUS (Pmode
,
14916 gen_rtx_REG (Pmode
, TLS_REGNUM
),
14917 GEN_INT (TCB_PLATFORM_OFFSET
)));
14918 emit_move_insn (platform
, tcbmem
);
14919 emit_insn (gen_eqsi3 (target
, platform
, cpuid
));
14921 else if (fcode
== RS6000_BUILTIN_CPU_SUPPORTS
)
14923 const char *hwcap
= TREE_STRING_POINTER (arg
);
14924 rtx mask
= NULL_RTX
;
14926 for (size_t i
= 0; i
< ARRAY_SIZE (cpu_supports_info
); i
++)
14927 if (strcmp (hwcap
, cpu_supports_info
[i
].hwcap
) == 0)
14929 mask
= GEN_INT (cpu_supports_info
[i
].mask
);
14930 hwcap_offset
= TCB_HWCAP_OFFSET (cpu_supports_info
[i
].id
);
14933 if (mask
== NULL_RTX
)
14935 /* Invalid HWCAP argument. */
14936 error ("%s %qs is an invalid argument to builtin %qs",
14937 "hwcap", hwcap
, rs6000_builtin_info
[(size_t) fcode
].name
);
14941 rtx tcb_hwcap
= gen_reg_rtx (SImode
);
14942 rtx tcbmem
= gen_const_mem (SImode
,
14943 gen_rtx_PLUS (Pmode
,
14944 gen_rtx_REG (Pmode
, TLS_REGNUM
),
14945 GEN_INT (hwcap_offset
)));
14946 emit_move_insn (tcb_hwcap
, tcbmem
);
14947 rtx scratch1
= gen_reg_rtx (SImode
);
14948 emit_insn (gen_rtx_SET (scratch1
, gen_rtx_AND (SImode
, tcb_hwcap
, mask
)));
14949 rtx scratch2
= gen_reg_rtx (SImode
);
14950 emit_insn (gen_eqsi3 (scratch2
, scratch1
, const0_rtx
));
14951 emit_insn (gen_rtx_SET (target
, gen_rtx_XOR (SImode
, scratch2
, const1_rtx
)));
14954 gcc_unreachable ();
14956 /* Record that we have expanded a CPU builtin, so that we can later
14957 emit a reference to the special symbol exported by LIBC to ensure we
14958 do not link against an old LIBC that doesn't support this feature. */
14959 cpu_builtin_p
= true;
14962 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
14963 "capability bits", rs6000_builtin_info
[(size_t) fcode
].name
);
14965 /* For old LIBCs, always return FALSE. */
14966 emit_move_insn (target
, GEN_INT (0));
14967 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14973 rs6000_expand_ternop_builtin (enum insn_code icode
, tree exp
, rtx target
)
14976 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14977 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14978 tree arg2
= CALL_EXPR_ARG (exp
, 2);
14979 rtx op0
= expand_normal (arg0
);
14980 rtx op1
= expand_normal (arg1
);
14981 rtx op2
= expand_normal (arg2
);
14982 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14983 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
14984 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
14985 machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
14987 if (icode
== CODE_FOR_nothing
)
14988 /* Builtin not supported on this processor. */
14991 /* If we got invalid arguments bail out before generating bad rtl. */
14992 if (arg0
== error_mark_node
14993 || arg1
== error_mark_node
14994 || arg2
== error_mark_node
)
14997 /* Check and prepare argument depending on the instruction code.
14999 Note that a switch statement instead of the sequence of tests
15000 would be incorrect as many of the CODE_FOR values could be
15001 CODE_FOR_nothing and that would yield multiple alternatives
15002 with identical values. We'd never reach here at runtime in
15004 if (icode
== CODE_FOR_altivec_vsldoi_v4sf
15005 || icode
== CODE_FOR_altivec_vsldoi_v2df
15006 || icode
== CODE_FOR_altivec_vsldoi_v4si
15007 || icode
== CODE_FOR_altivec_vsldoi_v8hi
15008 || icode
== CODE_FOR_altivec_vsldoi_v16qi
)
15010 /* Only allow 4-bit unsigned literals. */
15012 if (TREE_CODE (arg2
) != INTEGER_CST
15013 || TREE_INT_CST_LOW (arg2
) & ~0xf)
15015 error ("argument 3 must be a 4-bit unsigned literal");
15016 return CONST0_RTX (tmode
);
15019 else if (icode
== CODE_FOR_vsx_xxpermdi_v2df
15020 || icode
== CODE_FOR_vsx_xxpermdi_v2di
15021 || icode
== CODE_FOR_vsx_xxpermdi_v2df_be
15022 || icode
== CODE_FOR_vsx_xxpermdi_v2di_be
15023 || icode
== CODE_FOR_vsx_xxpermdi_v1ti
15024 || icode
== CODE_FOR_vsx_xxpermdi_v4sf
15025 || icode
== CODE_FOR_vsx_xxpermdi_v4si
15026 || icode
== CODE_FOR_vsx_xxpermdi_v8hi
15027 || icode
== CODE_FOR_vsx_xxpermdi_v16qi
15028 || icode
== CODE_FOR_vsx_xxsldwi_v16qi
15029 || icode
== CODE_FOR_vsx_xxsldwi_v8hi
15030 || icode
== CODE_FOR_vsx_xxsldwi_v4si
15031 || icode
== CODE_FOR_vsx_xxsldwi_v4sf
15032 || icode
== CODE_FOR_vsx_xxsldwi_v2di
15033 || icode
== CODE_FOR_vsx_xxsldwi_v2df
)
15035 /* Only allow 2-bit unsigned literals. */
15037 if (TREE_CODE (arg2
) != INTEGER_CST
15038 || TREE_INT_CST_LOW (arg2
) & ~0x3)
15040 error ("argument 3 must be a 2-bit unsigned literal");
15041 return CONST0_RTX (tmode
);
15044 else if (icode
== CODE_FOR_vsx_set_v2df
15045 || icode
== CODE_FOR_vsx_set_v2di
15046 || icode
== CODE_FOR_bcdadd
15047 || icode
== CODE_FOR_bcdadd_lt
15048 || icode
== CODE_FOR_bcdadd_eq
15049 || icode
== CODE_FOR_bcdadd_gt
15050 || icode
== CODE_FOR_bcdsub
15051 || icode
== CODE_FOR_bcdsub_lt
15052 || icode
== CODE_FOR_bcdsub_eq
15053 || icode
== CODE_FOR_bcdsub_gt
)
15055 /* Only allow 1-bit unsigned literals. */
15057 if (TREE_CODE (arg2
) != INTEGER_CST
15058 || TREE_INT_CST_LOW (arg2
) & ~0x1)
15060 error ("argument 3 must be a 1-bit unsigned literal");
15061 return CONST0_RTX (tmode
);
15064 else if (icode
== CODE_FOR_dfp_ddedpd_dd
15065 || icode
== CODE_FOR_dfp_ddedpd_td
)
15067 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
15069 if (TREE_CODE (arg0
) != INTEGER_CST
15070 || TREE_INT_CST_LOW (arg2
) & ~0x3)
15072 error ("argument 1 must be 0 or 2");
15073 return CONST0_RTX (tmode
);
15076 else if (icode
== CODE_FOR_dfp_denbcd_dd
15077 || icode
== CODE_FOR_dfp_denbcd_td
)
15079 /* Only allow 1-bit unsigned literals. */
15081 if (TREE_CODE (arg0
) != INTEGER_CST
15082 || TREE_INT_CST_LOW (arg0
) & ~0x1)
15084 error ("argument 1 must be a 1-bit unsigned literal");
15085 return CONST0_RTX (tmode
);
15088 else if (icode
== CODE_FOR_dfp_dscli_dd
15089 || icode
== CODE_FOR_dfp_dscli_td
15090 || icode
== CODE_FOR_dfp_dscri_dd
15091 || icode
== CODE_FOR_dfp_dscri_td
)
15093 /* Only allow 6-bit unsigned literals. */
15095 if (TREE_CODE (arg1
) != INTEGER_CST
15096 || TREE_INT_CST_LOW (arg1
) & ~0x3f)
15098 error ("argument 2 must be a 6-bit unsigned literal");
15099 return CONST0_RTX (tmode
);
15102 else if (icode
== CODE_FOR_crypto_vshasigmaw
15103 || icode
== CODE_FOR_crypto_vshasigmad
)
15105 /* Check whether the 2nd and 3rd arguments are integer constants and in
15106 range and prepare arguments. */
15108 if (TREE_CODE (arg1
) != INTEGER_CST
|| wi::geu_p (arg1
, 2))
15110 error ("argument 2 must be 0 or 1");
15111 return CONST0_RTX (tmode
);
15115 if (TREE_CODE (arg2
) != INTEGER_CST
|| wi::geu_p (arg2
, 16))
15117 error ("argument 3 must be in the range 0..15");
15118 return CONST0_RTX (tmode
);
15123 || GET_MODE (target
) != tmode
15124 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
15125 target
= gen_reg_rtx (tmode
);
15127 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
15128 op0
= copy_to_mode_reg (mode0
, op0
);
15129 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
15130 op1
= copy_to_mode_reg (mode1
, op1
);
15131 if (! (*insn_data
[icode
].operand
[3].predicate
) (op2
, mode2
))
15132 op2
= copy_to_mode_reg (mode2
, op2
);
15134 if (TARGET_PAIRED_FLOAT
&& icode
== CODE_FOR_selv2sf4
)
15135 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
, CONST0_RTX (SFmode
));
15137 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
);
15145 /* Expand the lvx builtins. */
15147 altivec_expand_ld_builtin (tree exp
, rtx target
, bool *expandedp
)
15149 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
15150 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
15152 machine_mode tmode
, mode0
;
15154 enum insn_code icode
;
15158 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi
:
15159 icode
= CODE_FOR_vector_altivec_load_v16qi
;
15161 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi
:
15162 icode
= CODE_FOR_vector_altivec_load_v8hi
;
15164 case ALTIVEC_BUILTIN_LD_INTERNAL_4si
:
15165 icode
= CODE_FOR_vector_altivec_load_v4si
;
15167 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf
:
15168 icode
= CODE_FOR_vector_altivec_load_v4sf
;
15170 case ALTIVEC_BUILTIN_LD_INTERNAL_2df
:
15171 icode
= CODE_FOR_vector_altivec_load_v2df
;
15173 case ALTIVEC_BUILTIN_LD_INTERNAL_2di
:
15174 icode
= CODE_FOR_vector_altivec_load_v2di
;
15176 case ALTIVEC_BUILTIN_LD_INTERNAL_1ti
:
15177 icode
= CODE_FOR_vector_altivec_load_v1ti
;
15180 *expandedp
= false;
15186 arg0
= CALL_EXPR_ARG (exp
, 0);
15187 op0
= expand_normal (arg0
);
15188 tmode
= insn_data
[icode
].operand
[0].mode
;
15189 mode0
= insn_data
[icode
].operand
[1].mode
;
15192 || GET_MODE (target
) != tmode
15193 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
15194 target
= gen_reg_rtx (tmode
);
15196 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
15197 op0
= gen_rtx_MEM (mode0
, copy_to_mode_reg (Pmode
, op0
));
15199 pat
= GEN_FCN (icode
) (target
, op0
);
15206 /* Expand the stvx builtins. */
15208 altivec_expand_st_builtin (tree exp
, rtx target ATTRIBUTE_UNUSED
,
15211 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
15212 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
15214 machine_mode mode0
, mode1
;
15216 enum insn_code icode
;
15220 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi
:
15221 icode
= CODE_FOR_vector_altivec_store_v16qi
;
15223 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi
:
15224 icode
= CODE_FOR_vector_altivec_store_v8hi
;
15226 case ALTIVEC_BUILTIN_ST_INTERNAL_4si
:
15227 icode
= CODE_FOR_vector_altivec_store_v4si
;
15229 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf
:
15230 icode
= CODE_FOR_vector_altivec_store_v4sf
;
15232 case ALTIVEC_BUILTIN_ST_INTERNAL_2df
:
15233 icode
= CODE_FOR_vector_altivec_store_v2df
;
15235 case ALTIVEC_BUILTIN_ST_INTERNAL_2di
:
15236 icode
= CODE_FOR_vector_altivec_store_v2di
;
15238 case ALTIVEC_BUILTIN_ST_INTERNAL_1ti
:
15239 icode
= CODE_FOR_vector_altivec_store_v1ti
;
15242 *expandedp
= false;
15246 arg0
= CALL_EXPR_ARG (exp
, 0);
15247 arg1
= CALL_EXPR_ARG (exp
, 1);
15248 op0
= expand_normal (arg0
);
15249 op1
= expand_normal (arg1
);
15250 mode0
= insn_data
[icode
].operand
[0].mode
;
15251 mode1
= insn_data
[icode
].operand
[1].mode
;
15253 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
15254 op0
= gen_rtx_MEM (mode0
, copy_to_mode_reg (Pmode
, op0
));
15255 if (! (*insn_data
[icode
].operand
[1].predicate
) (op1
, mode1
))
15256 op1
= copy_to_mode_reg (mode1
, op1
);
15258 pat
= GEN_FCN (icode
) (op0
, op1
);
15266 /* Expand the dst builtins. */
15268 altivec_expand_dst_builtin (tree exp
, rtx target ATTRIBUTE_UNUSED
,
15271 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
15272 enum rs6000_builtins fcode
= (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
15273 tree arg0
, arg1
, arg2
;
15274 machine_mode mode0
, mode1
;
15275 rtx pat
, op0
, op1
, op2
;
15276 const struct builtin_description
*d
;
15279 *expandedp
= false;
15281 /* Handle DST variants. */
15283 for (i
= 0; i
< ARRAY_SIZE (bdesc_dst
); i
++, d
++)
15284 if (d
->code
== fcode
)
15286 arg0
= CALL_EXPR_ARG (exp
, 0);
15287 arg1
= CALL_EXPR_ARG (exp
, 1);
15288 arg2
= CALL_EXPR_ARG (exp
, 2);
15289 op0
= expand_normal (arg0
);
15290 op1
= expand_normal (arg1
);
15291 op2
= expand_normal (arg2
);
15292 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
15293 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
15295 /* Invalid arguments, bail out before generating bad rtl. */
15296 if (arg0
== error_mark_node
15297 || arg1
== error_mark_node
15298 || arg2
== error_mark_node
)
15303 if (TREE_CODE (arg2
) != INTEGER_CST
15304 || TREE_INT_CST_LOW (arg2
) & ~0x3)
15306 error ("argument to %qs must be a 2-bit unsigned literal", d
->name
);
15310 if (! (*insn_data
[d
->icode
].operand
[0].predicate
) (op0
, mode0
))
15311 op0
= copy_to_mode_reg (Pmode
, op0
);
15312 if (! (*insn_data
[d
->icode
].operand
[1].predicate
) (op1
, mode1
))
15313 op1
= copy_to_mode_reg (mode1
, op1
);
15315 pat
= GEN_FCN (d
->icode
) (op0
, op1
, op2
);
15325 /* Expand vec_init builtin. */
15327 altivec_expand_vec_init_builtin (tree type
, tree exp
, rtx target
)
15329 machine_mode tmode
= TYPE_MODE (type
);
15330 machine_mode inner_mode
= GET_MODE_INNER (tmode
);
15331 int i
, n_elt
= GET_MODE_NUNITS (tmode
);
15333 gcc_assert (VECTOR_MODE_P (tmode
));
15334 gcc_assert (n_elt
== call_expr_nargs (exp
));
15336 if (!target
|| !register_operand (target
, tmode
))
15337 target
= gen_reg_rtx (tmode
);
15339 /* If we have a vector compromised of a single element, such as V1TImode, do
15340 the initialization directly. */
15341 if (n_elt
== 1 && GET_MODE_SIZE (tmode
) == GET_MODE_SIZE (inner_mode
))
15343 rtx x
= expand_normal (CALL_EXPR_ARG (exp
, 0));
15344 emit_move_insn (target
, gen_lowpart (tmode
, x
));
15348 rtvec v
= rtvec_alloc (n_elt
);
15350 for (i
= 0; i
< n_elt
; ++i
)
15352 rtx x
= expand_normal (CALL_EXPR_ARG (exp
, i
));
15353 RTVEC_ELT (v
, i
) = gen_lowpart (inner_mode
, x
);
15356 rs6000_expand_vector_init (target
, gen_rtx_PARALLEL (tmode
, v
));
15362 /* Return the integer constant in ARG. Constrain it to be in the range
15363 of the subparts of VEC_TYPE; issue an error if not. */
15366 get_element_number (tree vec_type
, tree arg
)
15368 unsigned HOST_WIDE_INT elt
, max
= TYPE_VECTOR_SUBPARTS (vec_type
) - 1;
15370 if (!tree_fits_uhwi_p (arg
)
15371 || (elt
= tree_to_uhwi (arg
), elt
> max
))
15373 error ("selector must be an integer constant in the range 0..%wi", max
);
15380 /* Expand vec_set builtin. */
15382 altivec_expand_vec_set_builtin (tree exp
)
15384 machine_mode tmode
, mode1
;
15385 tree arg0
, arg1
, arg2
;
15389 arg0
= CALL_EXPR_ARG (exp
, 0);
15390 arg1
= CALL_EXPR_ARG (exp
, 1);
15391 arg2
= CALL_EXPR_ARG (exp
, 2);
15393 tmode
= TYPE_MODE (TREE_TYPE (arg0
));
15394 mode1
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
15395 gcc_assert (VECTOR_MODE_P (tmode
));
15397 op0
= expand_expr (arg0
, NULL_RTX
, tmode
, EXPAND_NORMAL
);
15398 op1
= expand_expr (arg1
, NULL_RTX
, mode1
, EXPAND_NORMAL
);
15399 elt
= get_element_number (TREE_TYPE (arg0
), arg2
);
15401 if (GET_MODE (op1
) != mode1
&& GET_MODE (op1
) != VOIDmode
)
15402 op1
= convert_modes (mode1
, GET_MODE (op1
), op1
, true);
15404 op0
= force_reg (tmode
, op0
);
15405 op1
= force_reg (mode1
, op1
);
15407 rs6000_expand_vector_set (op0
, op1
, elt
);
15412 /* Expand vec_ext builtin. */
15414 altivec_expand_vec_ext_builtin (tree exp
, rtx target
)
15416 machine_mode tmode
, mode0
;
15421 arg0
= CALL_EXPR_ARG (exp
, 0);
15422 arg1
= CALL_EXPR_ARG (exp
, 1);
15424 op0
= expand_normal (arg0
);
15425 op1
= expand_normal (arg1
);
15427 /* Call get_element_number to validate arg1 if it is a constant. */
15428 if (TREE_CODE (arg1
) == INTEGER_CST
)
15429 (void) get_element_number (TREE_TYPE (arg0
), arg1
);
15431 tmode
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
15432 mode0
= TYPE_MODE (TREE_TYPE (arg0
));
15433 gcc_assert (VECTOR_MODE_P (mode0
));
15435 op0
= force_reg (mode0
, op0
);
15437 if (optimize
|| !target
|| !register_operand (target
, tmode
))
15438 target
= gen_reg_rtx (tmode
);
15440 rs6000_expand_vector_extract (target
, op0
, op1
);
15445 /* Expand the builtin in EXP and store the result in TARGET. Store
15446 true in *EXPANDEDP if we found a builtin to expand. */
15448 altivec_expand_builtin (tree exp
, rtx target
, bool *expandedp
)
15450 const struct builtin_description
*d
;
15452 enum insn_code icode
;
15453 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
15454 tree arg0
, arg1
, arg2
;
15456 machine_mode tmode
, mode0
;
15457 enum rs6000_builtins fcode
15458 = (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
15460 if (rs6000_overloaded_builtin_p (fcode
))
15463 error ("unresolved overload for Altivec builtin %qF", fndecl
);
15465 /* Given it is invalid, just generate a normal call. */
15466 return expand_call (exp
, target
, false);
15469 target
= altivec_expand_ld_builtin (exp
, target
, expandedp
);
15473 target
= altivec_expand_st_builtin (exp
, target
, expandedp
);
15477 target
= altivec_expand_dst_builtin (exp
, target
, expandedp
);
15485 case ALTIVEC_BUILTIN_STVX_V2DF
:
15486 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df_2op
, exp
);
15487 case ALTIVEC_BUILTIN_STVX_V2DI
:
15488 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di_2op
, exp
);
15489 case ALTIVEC_BUILTIN_STVX_V4SF
:
15490 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf_2op
, exp
);
15491 case ALTIVEC_BUILTIN_STVX
:
15492 case ALTIVEC_BUILTIN_STVX_V4SI
:
15493 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si_2op
, exp
);
15494 case ALTIVEC_BUILTIN_STVX_V8HI
:
15495 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi_2op
, exp
);
15496 case ALTIVEC_BUILTIN_STVX_V16QI
:
15497 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi_2op
, exp
);
15498 case ALTIVEC_BUILTIN_STVEBX
:
15499 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx
, exp
);
15500 case ALTIVEC_BUILTIN_STVEHX
:
15501 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx
, exp
);
15502 case ALTIVEC_BUILTIN_STVEWX
:
15503 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx
, exp
);
15504 case ALTIVEC_BUILTIN_STVXL_V2DF
:
15505 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df
, exp
);
15506 case ALTIVEC_BUILTIN_STVXL_V2DI
:
15507 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di
, exp
);
15508 case ALTIVEC_BUILTIN_STVXL_V4SF
:
15509 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf
, exp
);
15510 case ALTIVEC_BUILTIN_STVXL
:
15511 case ALTIVEC_BUILTIN_STVXL_V4SI
:
15512 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si
, exp
);
15513 case ALTIVEC_BUILTIN_STVXL_V8HI
:
15514 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi
, exp
);
15515 case ALTIVEC_BUILTIN_STVXL_V16QI
:
15516 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi
, exp
);
15518 case ALTIVEC_BUILTIN_STVLX
:
15519 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx
, exp
);
15520 case ALTIVEC_BUILTIN_STVLXL
:
15521 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl
, exp
);
15522 case ALTIVEC_BUILTIN_STVRX
:
15523 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx
, exp
);
15524 case ALTIVEC_BUILTIN_STVRXL
:
15525 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl
, exp
);
15527 case P9V_BUILTIN_STXVL
:
15528 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl
, exp
);
15530 case VSX_BUILTIN_STXVD2X_V1TI
:
15531 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti
, exp
);
15532 case VSX_BUILTIN_STXVD2X_V2DF
:
15533 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df
, exp
);
15534 case VSX_BUILTIN_STXVD2X_V2DI
:
15535 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di
, exp
);
15536 case VSX_BUILTIN_STXVW4X_V4SF
:
15537 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf
, exp
);
15538 case VSX_BUILTIN_STXVW4X_V4SI
:
15539 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si
, exp
);
15540 case VSX_BUILTIN_STXVW4X_V8HI
:
15541 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi
, exp
);
15542 case VSX_BUILTIN_STXVW4X_V16QI
:
15543 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi
, exp
);
15545 /* For the following on big endian, it's ok to use any appropriate
15546 unaligned-supporting store, so use a generic expander. For
15547 little-endian, the exact element-reversing instruction must
15549 case VSX_BUILTIN_ST_ELEMREV_V2DF
:
15551 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v2df
15552 : CODE_FOR_vsx_st_elemrev_v2df
);
15553 return altivec_expand_stv_builtin (code
, exp
);
15555 case VSX_BUILTIN_ST_ELEMREV_V2DI
:
15557 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v2di
15558 : CODE_FOR_vsx_st_elemrev_v2di
);
15559 return altivec_expand_stv_builtin (code
, exp
);
15561 case VSX_BUILTIN_ST_ELEMREV_V4SF
:
15563 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v4sf
15564 : CODE_FOR_vsx_st_elemrev_v4sf
);
15565 return altivec_expand_stv_builtin (code
, exp
);
15567 case VSX_BUILTIN_ST_ELEMREV_V4SI
:
15569 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v4si
15570 : CODE_FOR_vsx_st_elemrev_v4si
);
15571 return altivec_expand_stv_builtin (code
, exp
);
15573 case VSX_BUILTIN_ST_ELEMREV_V8HI
:
15575 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v8hi
15576 : CODE_FOR_vsx_st_elemrev_v8hi
);
15577 return altivec_expand_stv_builtin (code
, exp
);
15579 case VSX_BUILTIN_ST_ELEMREV_V16QI
:
15581 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v16qi
15582 : CODE_FOR_vsx_st_elemrev_v16qi
);
15583 return altivec_expand_stv_builtin (code
, exp
);
15586 case ALTIVEC_BUILTIN_MFVSCR
:
15587 icode
= CODE_FOR_altivec_mfvscr
;
15588 tmode
= insn_data
[icode
].operand
[0].mode
;
15591 || GET_MODE (target
) != tmode
15592 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
15593 target
= gen_reg_rtx (tmode
);
15595 pat
= GEN_FCN (icode
) (target
);
15601 case ALTIVEC_BUILTIN_MTVSCR
:
15602 icode
= CODE_FOR_altivec_mtvscr
;
15603 arg0
= CALL_EXPR_ARG (exp
, 0);
15604 op0
= expand_normal (arg0
);
15605 mode0
= insn_data
[icode
].operand
[0].mode
;
15607 /* If we got invalid arguments bail out before generating bad rtl. */
15608 if (arg0
== error_mark_node
)
15611 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
15612 op0
= copy_to_mode_reg (mode0
, op0
);
15614 pat
= GEN_FCN (icode
) (op0
);
15619 case ALTIVEC_BUILTIN_DSSALL
:
15620 emit_insn (gen_altivec_dssall ());
15623 case ALTIVEC_BUILTIN_DSS
:
15624 icode
= CODE_FOR_altivec_dss
;
15625 arg0
= CALL_EXPR_ARG (exp
, 0);
15627 op0
= expand_normal (arg0
);
15628 mode0
= insn_data
[icode
].operand
[0].mode
;
15630 /* If we got invalid arguments bail out before generating bad rtl. */
15631 if (arg0
== error_mark_node
)
15634 if (TREE_CODE (arg0
) != INTEGER_CST
15635 || TREE_INT_CST_LOW (arg0
) & ~0x3)
15637 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
15641 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
15642 op0
= copy_to_mode_reg (mode0
, op0
);
15644 emit_insn (gen_altivec_dss (op0
));
15647 case ALTIVEC_BUILTIN_VEC_INIT_V4SI
:
15648 case ALTIVEC_BUILTIN_VEC_INIT_V8HI
:
15649 case ALTIVEC_BUILTIN_VEC_INIT_V16QI
:
15650 case ALTIVEC_BUILTIN_VEC_INIT_V4SF
:
15651 case VSX_BUILTIN_VEC_INIT_V2DF
:
15652 case VSX_BUILTIN_VEC_INIT_V2DI
:
15653 case VSX_BUILTIN_VEC_INIT_V1TI
:
15654 return altivec_expand_vec_init_builtin (TREE_TYPE (exp
), exp
, target
);
15656 case ALTIVEC_BUILTIN_VEC_SET_V4SI
:
15657 case ALTIVEC_BUILTIN_VEC_SET_V8HI
:
15658 case ALTIVEC_BUILTIN_VEC_SET_V16QI
:
15659 case ALTIVEC_BUILTIN_VEC_SET_V4SF
:
15660 case VSX_BUILTIN_VEC_SET_V2DF
:
15661 case VSX_BUILTIN_VEC_SET_V2DI
:
15662 case VSX_BUILTIN_VEC_SET_V1TI
:
15663 return altivec_expand_vec_set_builtin (exp
);
15665 case ALTIVEC_BUILTIN_VEC_EXT_V4SI
:
15666 case ALTIVEC_BUILTIN_VEC_EXT_V8HI
:
15667 case ALTIVEC_BUILTIN_VEC_EXT_V16QI
:
15668 case ALTIVEC_BUILTIN_VEC_EXT_V4SF
:
15669 case VSX_BUILTIN_VEC_EXT_V2DF
:
15670 case VSX_BUILTIN_VEC_EXT_V2DI
:
15671 case VSX_BUILTIN_VEC_EXT_V1TI
:
15672 return altivec_expand_vec_ext_builtin (exp
, target
);
15674 case P9V_BUILTIN_VEXTRACT4B
:
15675 case P9V_BUILTIN_VEC_VEXTRACT4B
:
15676 arg1
= CALL_EXPR_ARG (exp
, 1);
15679 /* Generate a normal call if it is invalid. */
15680 if (arg1
== error_mark_node
)
15681 return expand_call (exp
, target
, false);
15683 if (TREE_CODE (arg1
) != INTEGER_CST
|| TREE_INT_CST_LOW (arg1
) > 12)
15685 error ("second argument to %qs must be 0..12", "vec_vextract4b");
15686 return expand_call (exp
, target
, false);
15690 case P9V_BUILTIN_VINSERT4B
:
15691 case P9V_BUILTIN_VINSERT4B_DI
:
15692 case P9V_BUILTIN_VEC_VINSERT4B
:
15693 arg2
= CALL_EXPR_ARG (exp
, 2);
15696 /* Generate a normal call if it is invalid. */
15697 if (arg2
== error_mark_node
)
15698 return expand_call (exp
, target
, false);
15700 if (TREE_CODE (arg2
) != INTEGER_CST
|| TREE_INT_CST_LOW (arg2
) > 12)
15702 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
15703 return expand_call (exp
, target
, false);
15709 /* Fall through. */
15712 /* Expand abs* operations. */
15714 for (i
= 0; i
< ARRAY_SIZE (bdesc_abs
); i
++, d
++)
15715 if (d
->code
== fcode
)
15716 return altivec_expand_abs_builtin (d
->icode
, exp
, target
);
15718 /* Expand the AltiVec predicates. */
15719 d
= bdesc_altivec_preds
;
15720 for (i
= 0; i
< ARRAY_SIZE (bdesc_altivec_preds
); i
++, d
++)
15721 if (d
->code
== fcode
)
15722 return altivec_expand_predicate_builtin (d
->icode
, exp
, target
);
15724 /* LV* are funky. We initialized them differently. */
15727 case ALTIVEC_BUILTIN_LVSL
:
15728 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl
,
15729 exp
, target
, false);
15730 case ALTIVEC_BUILTIN_LVSR
:
15731 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr
,
15732 exp
, target
, false);
15733 case ALTIVEC_BUILTIN_LVEBX
:
15734 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx
,
15735 exp
, target
, false);
15736 case ALTIVEC_BUILTIN_LVEHX
:
15737 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx
,
15738 exp
, target
, false);
15739 case ALTIVEC_BUILTIN_LVEWX
:
15740 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx
,
15741 exp
, target
, false);
15742 case ALTIVEC_BUILTIN_LVXL_V2DF
:
15743 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df
,
15744 exp
, target
, false);
15745 case ALTIVEC_BUILTIN_LVXL_V2DI
:
15746 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di
,
15747 exp
, target
, false);
15748 case ALTIVEC_BUILTIN_LVXL_V4SF
:
15749 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf
,
15750 exp
, target
, false);
15751 case ALTIVEC_BUILTIN_LVXL
:
15752 case ALTIVEC_BUILTIN_LVXL_V4SI
:
15753 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si
,
15754 exp
, target
, false);
15755 case ALTIVEC_BUILTIN_LVXL_V8HI
:
15756 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi
,
15757 exp
, target
, false);
15758 case ALTIVEC_BUILTIN_LVXL_V16QI
:
15759 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi
,
15760 exp
, target
, false);
15761 case ALTIVEC_BUILTIN_LVX_V2DF
:
15762 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df_2op
,
15763 exp
, target
, false);
15764 case ALTIVEC_BUILTIN_LVX_V2DI
:
15765 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di_2op
,
15766 exp
, target
, false);
15767 case ALTIVEC_BUILTIN_LVX_V4SF
:
15768 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf_2op
,
15769 exp
, target
, false);
15770 case ALTIVEC_BUILTIN_LVX
:
15771 case ALTIVEC_BUILTIN_LVX_V4SI
:
15772 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si_2op
,
15773 exp
, target
, false);
15774 case ALTIVEC_BUILTIN_LVX_V8HI
:
15775 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi_2op
,
15776 exp
, target
, false);
15777 case ALTIVEC_BUILTIN_LVX_V16QI
:
15778 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi_2op
,
15779 exp
, target
, false);
15780 case ALTIVEC_BUILTIN_LVLX
:
15781 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx
,
15782 exp
, target
, true);
15783 case ALTIVEC_BUILTIN_LVLXL
:
15784 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl
,
15785 exp
, target
, true);
15786 case ALTIVEC_BUILTIN_LVRX
:
15787 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx
,
15788 exp
, target
, true);
15789 case ALTIVEC_BUILTIN_LVRXL
:
15790 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl
,
15791 exp
, target
, true);
15792 case VSX_BUILTIN_LXVD2X_V1TI
:
15793 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti
,
15794 exp
, target
, false);
15795 case VSX_BUILTIN_LXVD2X_V2DF
:
15796 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df
,
15797 exp
, target
, false);
15798 case VSX_BUILTIN_LXVD2X_V2DI
:
15799 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di
,
15800 exp
, target
, false);
15801 case VSX_BUILTIN_LXVW4X_V4SF
:
15802 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf
,
15803 exp
, target
, false);
15804 case VSX_BUILTIN_LXVW4X_V4SI
:
15805 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si
,
15806 exp
, target
, false);
15807 case VSX_BUILTIN_LXVW4X_V8HI
:
15808 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi
,
15809 exp
, target
, false);
15810 case VSX_BUILTIN_LXVW4X_V16QI
:
15811 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi
,
15812 exp
, target
, false);
15813 /* For the following on big endian, it's ok to use any appropriate
15814 unaligned-supporting load, so use a generic expander. For
15815 little-endian, the exact element-reversing instruction must
15817 case VSX_BUILTIN_LD_ELEMREV_V2DF
:
15819 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v2df
15820 : CODE_FOR_vsx_ld_elemrev_v2df
);
15821 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15823 case VSX_BUILTIN_LD_ELEMREV_V2DI
:
15825 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v2di
15826 : CODE_FOR_vsx_ld_elemrev_v2di
);
15827 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15829 case VSX_BUILTIN_LD_ELEMREV_V4SF
:
15831 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v4sf
15832 : CODE_FOR_vsx_ld_elemrev_v4sf
);
15833 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15835 case VSX_BUILTIN_LD_ELEMREV_V4SI
:
15837 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v4si
15838 : CODE_FOR_vsx_ld_elemrev_v4si
);
15839 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15841 case VSX_BUILTIN_LD_ELEMREV_V8HI
:
15843 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v8hi
15844 : CODE_FOR_vsx_ld_elemrev_v8hi
);
15845 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15847 case VSX_BUILTIN_LD_ELEMREV_V16QI
:
15849 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v16qi
15850 : CODE_FOR_vsx_ld_elemrev_v16qi
);
15851 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15856 /* Fall through. */
15859 /* XL_BE We initialized them to always load in big endian order. */
15862 case VSX_BUILTIN_XL_BE_V2DI
:
15864 enum insn_code code
= CODE_FOR_vsx_load_v2di
;
15865 return altivec_expand_xl_be_builtin (code
, exp
, target
, false);
15868 case VSX_BUILTIN_XL_BE_V4SI
:
15870 enum insn_code code
= CODE_FOR_vsx_load_v4si
;
15871 return altivec_expand_xl_be_builtin (code
, exp
, target
, false);
15874 case VSX_BUILTIN_XL_BE_V8HI
:
15876 enum insn_code code
= CODE_FOR_vsx_load_v8hi
;
15877 return altivec_expand_xl_be_builtin (code
, exp
, target
, false);
15880 case VSX_BUILTIN_XL_BE_V16QI
:
15882 enum insn_code code
= CODE_FOR_vsx_load_v16qi
;
15883 return altivec_expand_xl_be_builtin (code
, exp
, target
, false);
15886 case VSX_BUILTIN_XL_BE_V2DF
:
15888 enum insn_code code
= CODE_FOR_vsx_load_v2df
;
15889 return altivec_expand_xl_be_builtin (code
, exp
, target
, false);
15892 case VSX_BUILTIN_XL_BE_V4SF
:
15894 enum insn_code code
= CODE_FOR_vsx_load_v4sf
;
15895 return altivec_expand_xl_be_builtin (code
, exp
, target
, false);
15900 /* Fall through. */
15903 *expandedp
= false;
15907 /* Expand the builtin in EXP and store the result in TARGET. Store
15908 true in *EXPANDEDP if we found a builtin to expand. */
15910 paired_expand_builtin (tree exp
, rtx target
, bool * expandedp
)
15912 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
15913 enum rs6000_builtins fcode
= (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
15914 const struct builtin_description
*d
;
15921 case PAIRED_BUILTIN_STX
:
15922 return paired_expand_stv_builtin (CODE_FOR_paired_stx
, exp
);
15923 case PAIRED_BUILTIN_LX
:
15924 return paired_expand_lv_builtin (CODE_FOR_paired_lx
, exp
, target
);
15927 /* Fall through. */
15930 /* Expand the paired predicates. */
15931 d
= bdesc_paired_preds
;
15932 for (i
= 0; i
< ARRAY_SIZE (bdesc_paired_preds
); i
++, d
++)
15933 if (d
->code
== fcode
)
15934 return paired_expand_predicate_builtin (d
->icode
, exp
, target
);
15936 *expandedp
= false;
15941 paired_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
15943 rtx pat
, scratch
, tmp
;
15944 tree form
= CALL_EXPR_ARG (exp
, 0);
15945 tree arg0
= CALL_EXPR_ARG (exp
, 1);
15946 tree arg1
= CALL_EXPR_ARG (exp
, 2);
15947 rtx op0
= expand_normal (arg0
);
15948 rtx op1
= expand_normal (arg1
);
15949 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
15950 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
15952 enum rtx_code code
;
15954 if (TREE_CODE (form
) != INTEGER_CST
)
15956 error ("argument 1 of %s must be a constant",
15957 "__builtin_paired_predicate");
15961 form_int
= TREE_INT_CST_LOW (form
);
15963 gcc_assert (mode0
== mode1
);
15965 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
15969 || GET_MODE (target
) != SImode
15970 || !(*insn_data
[icode
].operand
[0].predicate
) (target
, SImode
))
15971 target
= gen_reg_rtx (SImode
);
15972 if (!(*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
15973 op0
= copy_to_mode_reg (mode0
, op0
);
15974 if (!(*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
15975 op1
= copy_to_mode_reg (mode1
, op1
);
15977 scratch
= gen_reg_rtx (CCFPmode
);
15979 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
16001 emit_insn (gen_move_from_CR_ov_bit (target
, scratch
));
16004 error ("argument 1 of %qs is out of range",
16005 "__builtin_paired_predicate");
16009 tmp
= gen_rtx_fmt_ee (code
, SImode
, scratch
, const0_rtx
);
16010 emit_move_insn (target
, tmp
);
16014 /* Raise an error message for a builtin function that is called without the
16015 appropriate target options being set. */
16018 rs6000_invalid_builtin (enum rs6000_builtins fncode
)
16020 size_t uns_fncode
= (size_t) fncode
;
16021 const char *name
= rs6000_builtin_info
[uns_fncode
].name
;
16022 HOST_WIDE_INT fnmask
= rs6000_builtin_info
[uns_fncode
].mask
;
16024 gcc_assert (name
!= NULL
);
16025 if ((fnmask
& RS6000_BTM_CELL
) != 0)
16026 error ("builtin function %qs is only valid for the cell processor", name
);
16027 else if ((fnmask
& RS6000_BTM_VSX
) != 0)
16028 error ("builtin function %qs requires the %qs option", name
, "-mvsx");
16029 else if ((fnmask
& RS6000_BTM_HTM
) != 0)
16030 error ("builtin function %qs requires the %qs option", name
, "-mhtm");
16031 else if ((fnmask
& RS6000_BTM_ALTIVEC
) != 0)
16032 error ("builtin function %qs requires the %qs option", name
, "-maltivec");
16033 else if ((fnmask
& RS6000_BTM_PAIRED
) != 0)
16034 error ("builtin function %qs requires the %qs option", name
, "-mpaired");
16035 else if ((fnmask
& (RS6000_BTM_DFP
| RS6000_BTM_P8_VECTOR
))
16036 == (RS6000_BTM_DFP
| RS6000_BTM_P8_VECTOR
))
16037 error ("builtin function %qs requires the %qs and %qs options",
16038 name
, "-mhard-dfp", "-mpower8-vector");
16039 else if ((fnmask
& RS6000_BTM_DFP
) != 0)
16040 error ("builtin function %qs requires the %qs option", name
, "-mhard-dfp");
16041 else if ((fnmask
& RS6000_BTM_P8_VECTOR
) != 0)
16042 error ("builtin function %qs requires the %qs option", name
,
16043 "-mpower8-vector");
16044 else if ((fnmask
& (RS6000_BTM_P9_VECTOR
| RS6000_BTM_64BIT
))
16045 == (RS6000_BTM_P9_VECTOR
| RS6000_BTM_64BIT
))
16046 error ("builtin function %qs requires the %qs and %qs options",
16047 name
, "-mcpu=power9", "-m64");
16048 else if ((fnmask
& RS6000_BTM_P9_VECTOR
) != 0)
16049 error ("builtin function %qs requires the %qs option", name
,
16051 else if ((fnmask
& (RS6000_BTM_P9_MISC
| RS6000_BTM_64BIT
))
16052 == (RS6000_BTM_P9_MISC
| RS6000_BTM_64BIT
))
16053 error ("builtin function %qs requires the %qs and %qs options",
16054 name
, "-mcpu=power9", "-m64");
16055 else if ((fnmask
& RS6000_BTM_P9_MISC
) == RS6000_BTM_P9_MISC
)
16056 error ("builtin function %qs requires the %qs option", name
,
16058 else if ((fnmask
& (RS6000_BTM_HARD_FLOAT
| RS6000_BTM_LDBL128
))
16059 == (RS6000_BTM_HARD_FLOAT
| RS6000_BTM_LDBL128
))
16060 error ("builtin function %qs requires the %qs and %qs options",
16061 name
, "-mhard-float", "-mlong-double-128");
16062 else if ((fnmask
& RS6000_BTM_HARD_FLOAT
) != 0)
16063 error ("builtin function %qs requires the %qs option", name
,
16065 else if ((fnmask
& RS6000_BTM_FLOAT128
) != 0)
16066 error ("builtin function %qs requires the %qs option", name
, "-mfloat128");
16068 error ("builtin function %qs is not supported with the current options",
16072 /* Target hook for early folding of built-ins, shamelessly stolen
16076 rs6000_fold_builtin (tree fndecl
, int n_args ATTRIBUTE_UNUSED
,
16077 tree
*args
, bool ignore ATTRIBUTE_UNUSED
)
16079 if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
)
16081 enum rs6000_builtins fn_code
16082 = (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
16085 case RS6000_BUILTIN_NANQ
:
16086 case RS6000_BUILTIN_NANSQ
:
16088 tree type
= TREE_TYPE (TREE_TYPE (fndecl
));
16089 const char *str
= c_getstr (*args
);
16090 int quiet
= fn_code
== RS6000_BUILTIN_NANQ
;
16091 REAL_VALUE_TYPE real
;
16093 if (str
&& real_nan (&real
, str
, quiet
, TYPE_MODE (type
)))
16094 return build_real (type
, real
);
16097 case RS6000_BUILTIN_INFQ
:
16098 case RS6000_BUILTIN_HUGE_VALQ
:
16100 tree type
= TREE_TYPE (TREE_TYPE (fndecl
));
16101 REAL_VALUE_TYPE inf
;
16103 return build_real (type
, inf
);
16109 #ifdef SUBTARGET_FOLD_BUILTIN
16110 return SUBTARGET_FOLD_BUILTIN (fndecl
, n_args
, args
, ignore
);
16116 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
16117 a constant, use rs6000_fold_builtin.) */
16120 rs6000_gimple_fold_builtin (gimple_stmt_iterator
*gsi
)
16122 gimple
*stmt
= gsi_stmt (*gsi
);
16123 tree fndecl
= gimple_call_fndecl (stmt
);
16124 gcc_checking_assert (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
);
16125 enum rs6000_builtins fn_code
16126 = (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
16127 tree arg0
, arg1
, lhs
;
16129 size_t uns_fncode
= (size_t) fn_code
;
16130 enum insn_code icode
= rs6000_builtin_info
[uns_fncode
].icode
;
16131 const char *fn_name1
= rs6000_builtin_info
[uns_fncode
].name
;
16132 const char *fn_name2
= (icode
!= CODE_FOR_nothing
)
16133 ? get_insn_name ((int) icode
)
16136 if (TARGET_DEBUG_BUILTIN
)
16137 fprintf (stderr
, "rs6000_gimple_fold_builtin %d %s %s\n",
16138 fn_code
, fn_name1
, fn_name2
);
16140 if (!rs6000_fold_gimple
)
16143 /* Generic solution to prevent gimple folding of code without a LHS. */
16144 if (!gimple_call_lhs (stmt
))
16149 /* Flavors of vec_add. We deliberately don't expand
16150 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
16151 TImode, resulting in much poorer code generation. */
16152 case ALTIVEC_BUILTIN_VADDUBM
:
16153 case ALTIVEC_BUILTIN_VADDUHM
:
16154 case ALTIVEC_BUILTIN_VADDUWM
:
16155 case P8V_BUILTIN_VADDUDM
:
16156 case ALTIVEC_BUILTIN_VADDFP
:
16157 case VSX_BUILTIN_XVADDDP
:
16159 arg0
= gimple_call_arg (stmt
, 0);
16160 arg1
= gimple_call_arg (stmt
, 1);
16161 lhs
= gimple_call_lhs (stmt
);
16162 gimple
*g
= gimple_build_assign (lhs
, PLUS_EXPR
, arg0
, arg1
);
16163 gimple_set_location (g
, gimple_location (stmt
));
16164 gsi_replace (gsi
, g
, true);
16167 /* Flavors of vec_sub. We deliberately don't expand
16168 P8V_BUILTIN_VSUBUQM. */
16169 case ALTIVEC_BUILTIN_VSUBUBM
:
16170 case ALTIVEC_BUILTIN_VSUBUHM
:
16171 case ALTIVEC_BUILTIN_VSUBUWM
:
16172 case P8V_BUILTIN_VSUBUDM
:
16173 case ALTIVEC_BUILTIN_VSUBFP
:
16174 case VSX_BUILTIN_XVSUBDP
:
16176 arg0
= gimple_call_arg (stmt
, 0);
16177 arg1
= gimple_call_arg (stmt
, 1);
16178 lhs
= gimple_call_lhs (stmt
);
16179 gimple
*g
= gimple_build_assign (lhs
, MINUS_EXPR
, arg0
, arg1
);
16180 gimple_set_location (g
, gimple_location (stmt
));
16181 gsi_replace (gsi
, g
, true);
16184 case VSX_BUILTIN_XVMULSP
:
16185 case VSX_BUILTIN_XVMULDP
:
16187 arg0
= gimple_call_arg (stmt
, 0);
16188 arg1
= gimple_call_arg (stmt
, 1);
16189 lhs
= gimple_call_lhs (stmt
);
16190 gimple
*g
= gimple_build_assign (lhs
, MULT_EXPR
, arg0
, arg1
);
16191 gimple_set_location (g
, gimple_location (stmt
));
16192 gsi_replace (gsi
, g
, true);
16195 /* Even element flavors of vec_mul (signed). */
16196 case ALTIVEC_BUILTIN_VMULESB
:
16197 case ALTIVEC_BUILTIN_VMULESH
:
16198 /* Even element flavors of vec_mul (unsigned). */
16199 case ALTIVEC_BUILTIN_VMULEUB
:
16200 case ALTIVEC_BUILTIN_VMULEUH
:
16202 arg0
= gimple_call_arg (stmt
, 0);
16203 arg1
= gimple_call_arg (stmt
, 1);
16204 lhs
= gimple_call_lhs (stmt
);
16205 gimple
*g
= gimple_build_assign (lhs
, VEC_WIDEN_MULT_EVEN_EXPR
, arg0
, arg1
);
16206 gimple_set_location (g
, gimple_location (stmt
));
16207 gsi_replace (gsi
, g
, true);
16210 /* Odd element flavors of vec_mul (signed). */
16211 case ALTIVEC_BUILTIN_VMULOSB
:
16212 case ALTIVEC_BUILTIN_VMULOSH
:
16213 /* Odd element flavors of vec_mul (unsigned). */
16214 case ALTIVEC_BUILTIN_VMULOUB
:
16215 case ALTIVEC_BUILTIN_VMULOUH
:
16217 arg0
= gimple_call_arg (stmt
, 0);
16218 arg1
= gimple_call_arg (stmt
, 1);
16219 lhs
= gimple_call_lhs (stmt
);
16220 gimple
*g
= gimple_build_assign (lhs
, VEC_WIDEN_MULT_ODD_EXPR
, arg0
, arg1
);
16221 gimple_set_location (g
, gimple_location (stmt
));
16222 gsi_replace (gsi
, g
, true);
16225 /* Flavors of vec_div (Integer). */
16226 case VSX_BUILTIN_DIV_V2DI
:
16227 case VSX_BUILTIN_UDIV_V2DI
:
16229 arg0
= gimple_call_arg (stmt
, 0);
16230 arg1
= gimple_call_arg (stmt
, 1);
16231 lhs
= gimple_call_lhs (stmt
);
16232 gimple
*g
= gimple_build_assign (lhs
, TRUNC_DIV_EXPR
, arg0
, arg1
);
16233 gimple_set_location (g
, gimple_location (stmt
));
16234 gsi_replace (gsi
, g
, true);
16237 /* Flavors of vec_div (Float). */
16238 case VSX_BUILTIN_XVDIVSP
:
16239 case VSX_BUILTIN_XVDIVDP
:
16241 arg0
= gimple_call_arg (stmt
, 0);
16242 arg1
= gimple_call_arg (stmt
, 1);
16243 lhs
= gimple_call_lhs (stmt
);
16244 gimple
*g
= gimple_build_assign (lhs
, RDIV_EXPR
, arg0
, arg1
);
16245 gimple_set_location (g
, gimple_location (stmt
));
16246 gsi_replace (gsi
, g
, true);
16249 /* Flavors of vec_and. */
16250 case ALTIVEC_BUILTIN_VAND
:
16252 arg0
= gimple_call_arg (stmt
, 0);
16253 arg1
= gimple_call_arg (stmt
, 1);
16254 lhs
= gimple_call_lhs (stmt
);
16255 gimple
*g
= gimple_build_assign (lhs
, BIT_AND_EXPR
, arg0
, arg1
);
16256 gimple_set_location (g
, gimple_location (stmt
));
16257 gsi_replace (gsi
, g
, true);
16260 /* Flavors of vec_andc. */
16261 case ALTIVEC_BUILTIN_VANDC
:
16263 arg0
= gimple_call_arg (stmt
, 0);
16264 arg1
= gimple_call_arg (stmt
, 1);
16265 lhs
= gimple_call_lhs (stmt
);
16266 tree temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
16267 gimple
*g
= gimple_build_assign(temp
, BIT_NOT_EXPR
, arg1
);
16268 gimple_set_location (g
, gimple_location (stmt
));
16269 gsi_insert_before(gsi
, g
, GSI_SAME_STMT
);
16270 g
= gimple_build_assign (lhs
, BIT_AND_EXPR
, arg0
, temp
);
16271 gimple_set_location (g
, gimple_location (stmt
));
16272 gsi_replace (gsi
, g
, true);
16275 /* Flavors of vec_nand. */
16276 case P8V_BUILTIN_VEC_NAND
:
16277 case P8V_BUILTIN_NAND_V16QI
:
16278 case P8V_BUILTIN_NAND_V8HI
:
16279 case P8V_BUILTIN_NAND_V4SI
:
16280 case P8V_BUILTIN_NAND_V4SF
:
16281 case P8V_BUILTIN_NAND_V2DF
:
16282 case P8V_BUILTIN_NAND_V2DI
:
16284 arg0
= gimple_call_arg (stmt
, 0);
16285 arg1
= gimple_call_arg (stmt
, 1);
16286 lhs
= gimple_call_lhs (stmt
);
16287 tree temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
16288 gimple
*g
= gimple_build_assign(temp
, BIT_AND_EXPR
, arg0
, arg1
);
16289 gimple_set_location (g
, gimple_location (stmt
));
16290 gsi_insert_before(gsi
, g
, GSI_SAME_STMT
);
16291 g
= gimple_build_assign (lhs
, BIT_NOT_EXPR
, temp
);
16292 gimple_set_location (g
, gimple_location (stmt
));
16293 gsi_replace (gsi
, g
, true);
16296 /* Flavors of vec_or. */
16297 case ALTIVEC_BUILTIN_VOR
:
16299 arg0
= gimple_call_arg (stmt
, 0);
16300 arg1
= gimple_call_arg (stmt
, 1);
16301 lhs
= gimple_call_lhs (stmt
);
16302 gimple
*g
= gimple_build_assign (lhs
, BIT_IOR_EXPR
, arg0
, arg1
);
16303 gimple_set_location (g
, gimple_location (stmt
));
16304 gsi_replace (gsi
, g
, true);
16307 /* flavors of vec_orc. */
16308 case P8V_BUILTIN_ORC_V16QI
:
16309 case P8V_BUILTIN_ORC_V8HI
:
16310 case P8V_BUILTIN_ORC_V4SI
:
16311 case P8V_BUILTIN_ORC_V4SF
:
16312 case P8V_BUILTIN_ORC_V2DF
:
16313 case P8V_BUILTIN_ORC_V2DI
:
16315 arg0
= gimple_call_arg (stmt
, 0);
16316 arg1
= gimple_call_arg (stmt
, 1);
16317 lhs
= gimple_call_lhs (stmt
);
16318 tree temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
16319 gimple
*g
= gimple_build_assign(temp
, BIT_NOT_EXPR
, arg1
);
16320 gimple_set_location (g
, gimple_location (stmt
));
16321 gsi_insert_before(gsi
, g
, GSI_SAME_STMT
);
16322 g
= gimple_build_assign (lhs
, BIT_IOR_EXPR
, arg0
, temp
);
16323 gimple_set_location (g
, gimple_location (stmt
));
16324 gsi_replace (gsi
, g
, true);
16327 /* Flavors of vec_xor. */
16328 case ALTIVEC_BUILTIN_VXOR
:
16330 arg0
= gimple_call_arg (stmt
, 0);
16331 arg1
= gimple_call_arg (stmt
, 1);
16332 lhs
= gimple_call_lhs (stmt
);
16333 gimple
*g
= gimple_build_assign (lhs
, BIT_XOR_EXPR
, arg0
, arg1
);
16334 gimple_set_location (g
, gimple_location (stmt
));
16335 gsi_replace (gsi
, g
, true);
16338 /* Flavors of vec_nor. */
16339 case ALTIVEC_BUILTIN_VNOR
:
16341 arg0
= gimple_call_arg (stmt
, 0);
16342 arg1
= gimple_call_arg (stmt
, 1);
16343 lhs
= gimple_call_lhs (stmt
);
16344 tree temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
16345 gimple
*g
= gimple_build_assign (temp
, BIT_IOR_EXPR
, arg0
, arg1
);
16346 gimple_set_location (g
, gimple_location (stmt
));
16347 gsi_insert_before(gsi
, g
, GSI_SAME_STMT
);
16348 g
= gimple_build_assign (lhs
, BIT_NOT_EXPR
, temp
);
16349 gimple_set_location (g
, gimple_location (stmt
));
16350 gsi_replace (gsi
, g
, true);
16353 /* flavors of vec_abs. */
16354 case ALTIVEC_BUILTIN_ABS_V16QI
:
16355 case ALTIVEC_BUILTIN_ABS_V8HI
:
16356 case ALTIVEC_BUILTIN_ABS_V4SI
:
16357 case ALTIVEC_BUILTIN_ABS_V4SF
:
16358 case P8V_BUILTIN_ABS_V2DI
:
16359 case VSX_BUILTIN_XVABSDP
:
16361 arg0
= gimple_call_arg (stmt
, 0);
16362 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0
)))
16363 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0
))))
16365 lhs
= gimple_call_lhs (stmt
);
16366 gimple
*g
= gimple_build_assign (lhs
, ABS_EXPR
, arg0
);
16367 gimple_set_location (g
, gimple_location (stmt
));
16368 gsi_replace (gsi
, g
, true);
16371 /* flavors of vec_min. */
16372 case VSX_BUILTIN_XVMINDP
:
16373 case P8V_BUILTIN_VMINSD
:
16374 case P8V_BUILTIN_VMINUD
:
16375 case ALTIVEC_BUILTIN_VMINSB
:
16376 case ALTIVEC_BUILTIN_VMINSH
:
16377 case ALTIVEC_BUILTIN_VMINSW
:
16378 case ALTIVEC_BUILTIN_VMINUB
:
16379 case ALTIVEC_BUILTIN_VMINUH
:
16380 case ALTIVEC_BUILTIN_VMINUW
:
16381 case ALTIVEC_BUILTIN_VMINFP
:
16383 arg0
= gimple_call_arg (stmt
, 0);
16384 arg1
= gimple_call_arg (stmt
, 1);
16385 lhs
= gimple_call_lhs (stmt
);
16386 gimple
*g
= gimple_build_assign (lhs
, MIN_EXPR
, arg0
, arg1
);
16387 gimple_set_location (g
, gimple_location (stmt
));
16388 gsi_replace (gsi
, g
, true);
16391 /* flavors of vec_max. */
16392 case VSX_BUILTIN_XVMAXDP
:
16393 case P8V_BUILTIN_VMAXSD
:
16394 case P8V_BUILTIN_VMAXUD
:
16395 case ALTIVEC_BUILTIN_VMAXSB
:
16396 case ALTIVEC_BUILTIN_VMAXSH
:
16397 case ALTIVEC_BUILTIN_VMAXSW
:
16398 case ALTIVEC_BUILTIN_VMAXUB
:
16399 case ALTIVEC_BUILTIN_VMAXUH
:
16400 case ALTIVEC_BUILTIN_VMAXUW
:
16401 case ALTIVEC_BUILTIN_VMAXFP
:
16403 arg0
= gimple_call_arg (stmt
, 0);
16404 arg1
= gimple_call_arg (stmt
, 1);
16405 lhs
= gimple_call_lhs (stmt
);
16406 gimple
*g
= gimple_build_assign (lhs
, MAX_EXPR
, arg0
, arg1
);
16407 gimple_set_location (g
, gimple_location (stmt
));
16408 gsi_replace (gsi
, g
, true);
16411 /* Flavors of vec_eqv. */
16412 case P8V_BUILTIN_EQV_V16QI
:
16413 case P8V_BUILTIN_EQV_V8HI
:
16414 case P8V_BUILTIN_EQV_V4SI
:
16415 case P8V_BUILTIN_EQV_V4SF
:
16416 case P8V_BUILTIN_EQV_V2DF
:
16417 case P8V_BUILTIN_EQV_V2DI
:
16419 arg0
= gimple_call_arg (stmt
, 0);
16420 arg1
= gimple_call_arg (stmt
, 1);
16421 lhs
= gimple_call_lhs (stmt
);
16422 tree temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
16423 gimple
*g
= gimple_build_assign (temp
, BIT_XOR_EXPR
, arg0
, arg1
);
16424 gimple_set_location (g
, gimple_location (stmt
));
16425 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
16426 g
= gimple_build_assign (lhs
, BIT_NOT_EXPR
, temp
);
16427 gimple_set_location (g
, gimple_location (stmt
));
16428 gsi_replace (gsi
, g
, true);
16431 /* Flavors of vec_rotate_left. */
16432 case ALTIVEC_BUILTIN_VRLB
:
16433 case ALTIVEC_BUILTIN_VRLH
:
16434 case ALTIVEC_BUILTIN_VRLW
:
16435 case P8V_BUILTIN_VRLD
:
16437 arg0
= gimple_call_arg (stmt
, 0);
16438 arg1
= gimple_call_arg (stmt
, 1);
16439 lhs
= gimple_call_lhs (stmt
);
16440 gimple
*g
= gimple_build_assign (lhs
, LROTATE_EXPR
, arg0
, arg1
);
16441 gimple_set_location (g
, gimple_location (stmt
));
16442 gsi_replace (gsi
, g
, true);
16445 /* Flavors of vector shift right algebraic.
16446 vec_sra{b,h,w} -> vsra{b,h,w}. */
16447 case ALTIVEC_BUILTIN_VSRAB
:
16448 case ALTIVEC_BUILTIN_VSRAH
:
16449 case ALTIVEC_BUILTIN_VSRAW
:
16450 case P8V_BUILTIN_VSRAD
:
16452 arg0
= gimple_call_arg (stmt
, 0);
16453 arg1
= gimple_call_arg (stmt
, 1);
16454 lhs
= gimple_call_lhs (stmt
);
16455 gimple
*g
= gimple_build_assign (lhs
, RSHIFT_EXPR
, arg0
, arg1
);
16456 gimple_set_location (g
, gimple_location (stmt
));
16457 gsi_replace (gsi
, g
, true);
16460 /* Flavors of vector shift left.
16461 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
16462 case ALTIVEC_BUILTIN_VSLB
:
16463 case ALTIVEC_BUILTIN_VSLH
:
16464 case ALTIVEC_BUILTIN_VSLW
:
16465 case P8V_BUILTIN_VSLD
:
16467 arg0
= gimple_call_arg (stmt
, 0);
16468 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0
)))
16469 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0
))))
16471 arg1
= gimple_call_arg (stmt
, 1);
16472 lhs
= gimple_call_lhs (stmt
);
16473 gimple
*g
= gimple_build_assign (lhs
, LSHIFT_EXPR
, arg0
, arg1
);
16474 gimple_set_location (g
, gimple_location (stmt
));
16475 gsi_replace (gsi
, g
, true);
16478 /* Flavors of vector shift right. */
16479 case ALTIVEC_BUILTIN_VSRB
:
16480 case ALTIVEC_BUILTIN_VSRH
:
16481 case ALTIVEC_BUILTIN_VSRW
:
16482 case P8V_BUILTIN_VSRD
:
16484 arg0
= gimple_call_arg (stmt
, 0);
16485 arg1
= gimple_call_arg (stmt
, 1);
16486 lhs
= gimple_call_lhs (stmt
);
16487 gimple_seq stmts
= NULL
;
16488 /* Convert arg0 to unsigned. */
16490 = gimple_build (&stmts
, VIEW_CONVERT_EXPR
,
16491 unsigned_type_for (TREE_TYPE (arg0
)), arg0
);
16493 = gimple_build (&stmts
, RSHIFT_EXPR
,
16494 TREE_TYPE (arg0_unsigned
), arg0_unsigned
, arg1
);
16495 /* Convert result back to the lhs type. */
16496 res
= gimple_build (&stmts
, VIEW_CONVERT_EXPR
, TREE_TYPE (lhs
), res
);
16497 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
16498 update_call_from_tree (gsi
, res
);
16502 if (TARGET_DEBUG_BUILTIN
)
16503 fprintf (stderr
, "gimple builtin intrinsic not matched:%d %s %s\n",
16504 fn_code
, fn_name1
, fn_name2
);
16511 /* Expand an expression EXP that calls a built-in function,
16512 with result going to TARGET if that's convenient
16513 (and in mode MODE if that's convenient).
16514 SUBTARGET may be used as the target for computing one of EXP's operands.
16515 IGNORE is nonzero if the value is to be ignored. */
16518 rs6000_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
16519 machine_mode mode ATTRIBUTE_UNUSED
,
16520 int ignore ATTRIBUTE_UNUSED
)
16522 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
16523 enum rs6000_builtins fcode
16524 = (enum rs6000_builtins
)DECL_FUNCTION_CODE (fndecl
);
16525 size_t uns_fcode
= (size_t)fcode
;
16526 const struct builtin_description
*d
;
16530 HOST_WIDE_INT mask
= rs6000_builtin_info
[uns_fcode
].mask
;
16531 bool func_valid_p
= ((rs6000_builtin_mask
& mask
) == mask
);
16533 if (TARGET_DEBUG_BUILTIN
)
16535 enum insn_code icode
= rs6000_builtin_info
[uns_fcode
].icode
;
16536 const char *name1
= rs6000_builtin_info
[uns_fcode
].name
;
16537 const char *name2
= (icode
!= CODE_FOR_nothing
)
16538 ? get_insn_name ((int) icode
)
16542 switch (rs6000_builtin_info
[uns_fcode
].attr
& RS6000_BTC_TYPE_MASK
)
16544 default: name3
= "unknown"; break;
16545 case RS6000_BTC_SPECIAL
: name3
= "special"; break;
16546 case RS6000_BTC_UNARY
: name3
= "unary"; break;
16547 case RS6000_BTC_BINARY
: name3
= "binary"; break;
16548 case RS6000_BTC_TERNARY
: name3
= "ternary"; break;
16549 case RS6000_BTC_PREDICATE
: name3
= "predicate"; break;
16550 case RS6000_BTC_ABS
: name3
= "abs"; break;
16551 case RS6000_BTC_DST
: name3
= "dst"; break;
16556 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16557 (name1
) ? name1
: "---", fcode
,
16558 (name2
) ? name2
: "---", (int) icode
,
16560 func_valid_p
? "" : ", not valid");
16565 rs6000_invalid_builtin (fcode
);
16567 /* Given it is invalid, just generate a normal call. */
16568 return expand_call (exp
, target
, ignore
);
16573 case RS6000_BUILTIN_RECIP
:
16574 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3
, exp
, target
);
16576 case RS6000_BUILTIN_RECIPF
:
16577 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3
, exp
, target
);
16579 case RS6000_BUILTIN_RSQRTF
:
16580 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2
, exp
, target
);
16582 case RS6000_BUILTIN_RSQRT
:
16583 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2
, exp
, target
);
16585 case POWER7_BUILTIN_BPERMD
:
16586 return rs6000_expand_binop_builtin (((TARGET_64BIT
)
16587 ? CODE_FOR_bpermd_di
16588 : CODE_FOR_bpermd_si
), exp
, target
);
16590 case RS6000_BUILTIN_GET_TB
:
16591 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase
,
16594 case RS6000_BUILTIN_MFTB
:
16595 return rs6000_expand_zeroop_builtin (((TARGET_64BIT
)
16596 ? CODE_FOR_rs6000_mftb_di
16597 : CODE_FOR_rs6000_mftb_si
),
16600 case RS6000_BUILTIN_MFFS
:
16601 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs
, target
);
16603 case RS6000_BUILTIN_MTFSF
:
16604 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf
, exp
);
16606 case RS6000_BUILTIN_CPU_INIT
:
16607 case RS6000_BUILTIN_CPU_IS
:
16608 case RS6000_BUILTIN_CPU_SUPPORTS
:
16609 return cpu_expand_builtin (fcode
, exp
, target
);
16611 case ALTIVEC_BUILTIN_MASK_FOR_LOAD
:
16612 case ALTIVEC_BUILTIN_MASK_FOR_STORE
:
16614 int icode
= (BYTES_BIG_ENDIAN
? (int) CODE_FOR_altivec_lvsr_direct
16615 : (int) CODE_FOR_altivec_lvsl_direct
);
16616 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
16617 machine_mode mode
= insn_data
[icode
].operand
[1].mode
;
16621 gcc_assert (TARGET_ALTIVEC
);
16623 arg
= CALL_EXPR_ARG (exp
, 0);
16624 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg
)));
16625 op
= expand_expr (arg
, NULL_RTX
, Pmode
, EXPAND_NORMAL
);
16626 addr
= memory_address (mode
, op
);
16627 if (fcode
== ALTIVEC_BUILTIN_MASK_FOR_STORE
)
16631 /* For the load case need to negate the address. */
16632 op
= gen_reg_rtx (GET_MODE (addr
));
16633 emit_insn (gen_rtx_SET (op
, gen_rtx_NEG (GET_MODE (addr
), addr
)));
16635 op
= gen_rtx_MEM (mode
, op
);
16638 || GET_MODE (target
) != tmode
16639 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
16640 target
= gen_reg_rtx (tmode
);
16642 pat
= GEN_FCN (icode
) (target
, op
);
16650 case ALTIVEC_BUILTIN_VCFUX
:
16651 case ALTIVEC_BUILTIN_VCFSX
:
16652 case ALTIVEC_BUILTIN_VCTUXS
:
16653 case ALTIVEC_BUILTIN_VCTSXS
:
16654 /* FIXME: There's got to be a nicer way to handle this case than
16655 constructing a new CALL_EXPR. */
16656 if (call_expr_nargs (exp
) == 1)
16658 exp
= build_call_nary (TREE_TYPE (exp
), CALL_EXPR_FN (exp
),
16659 2, CALL_EXPR_ARG (exp
, 0), integer_zero_node
);
16667 if (TARGET_ALTIVEC
)
16669 ret
= altivec_expand_builtin (exp
, target
, &success
);
16674 if (TARGET_PAIRED_FLOAT
)
16676 ret
= paired_expand_builtin (exp
, target
, &success
);
16683 ret
= htm_expand_builtin (exp
, target
, &success
);
16689 unsigned attr
= rs6000_builtin_info
[uns_fcode
].attr
& RS6000_BTC_TYPE_MASK
;
16690 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16691 gcc_assert (attr
== RS6000_BTC_UNARY
16692 || attr
== RS6000_BTC_BINARY
16693 || attr
== RS6000_BTC_TERNARY
16694 || attr
== RS6000_BTC_SPECIAL
);
16696 /* Handle simple unary operations. */
16698 for (i
= 0; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
16699 if (d
->code
== fcode
)
16700 return rs6000_expand_unop_builtin (d
->icode
, exp
, target
);
16702 /* Handle simple binary operations. */
16704 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
16705 if (d
->code
== fcode
)
16706 return rs6000_expand_binop_builtin (d
->icode
, exp
, target
);
16708 /* Handle simple ternary operations. */
16710 for (i
= 0; i
< ARRAY_SIZE (bdesc_3arg
); i
++, d
++)
16711 if (d
->code
== fcode
)
16712 return rs6000_expand_ternop_builtin (d
->icode
, exp
, target
);
16714 /* Handle simple no-argument operations. */
16716 for (i
= 0; i
< ARRAY_SIZE (bdesc_0arg
); i
++, d
++)
16717 if (d
->code
== fcode
)
16718 return rs6000_expand_zeroop_builtin (d
->icode
, target
);
16720 gcc_unreachable ();
16723 /* Create a builtin vector type with a name. Taking care not to give
16724 the canonical type a name. */
16727 rs6000_vector_type (const char *name
, tree elt_type
, unsigned num_elts
)
16729 tree result
= build_vector_type (elt_type
, num_elts
);
16731 /* Copy so we don't give the canonical type a name. */
16732 result
= build_variant_type_copy (result
);
16734 add_builtin_type (name
, result
);
16740 rs6000_init_builtins (void)
16746 if (TARGET_DEBUG_BUILTIN
)
16747 fprintf (stderr
, "rs6000_init_builtins%s%s%s\n",
16748 (TARGET_PAIRED_FLOAT
) ? ", paired" : "",
16749 (TARGET_ALTIVEC
) ? ", altivec" : "",
16750 (TARGET_VSX
) ? ", vsx" : "");
16752 V2SI_type_node
= build_vector_type (intSI_type_node
, 2);
16753 V2SF_type_node
= build_vector_type (float_type_node
, 2);
16754 V2DI_type_node
= rs6000_vector_type (TARGET_POWERPC64
? "__vector long"
16755 : "__vector long long",
16756 intDI_type_node
, 2);
16757 V2DF_type_node
= rs6000_vector_type ("__vector double", double_type_node
, 2);
16758 V4SI_type_node
= rs6000_vector_type ("__vector signed int",
16759 intSI_type_node
, 4);
16760 V4SF_type_node
= rs6000_vector_type ("__vector float", float_type_node
, 4);
16761 V8HI_type_node
= rs6000_vector_type ("__vector signed short",
16762 intHI_type_node
, 8);
16763 V16QI_type_node
= rs6000_vector_type ("__vector signed char",
16764 intQI_type_node
, 16);
16766 unsigned_V16QI_type_node
= rs6000_vector_type ("__vector unsigned char",
16767 unsigned_intQI_type_node
, 16);
16768 unsigned_V8HI_type_node
= rs6000_vector_type ("__vector unsigned short",
16769 unsigned_intHI_type_node
, 8);
16770 unsigned_V4SI_type_node
= rs6000_vector_type ("__vector unsigned int",
16771 unsigned_intSI_type_node
, 4);
16772 unsigned_V2DI_type_node
= rs6000_vector_type (TARGET_POWERPC64
16773 ? "__vector unsigned long"
16774 : "__vector unsigned long long",
16775 unsigned_intDI_type_node
, 2);
16777 opaque_V2SF_type_node
= build_opaque_vector_type (float_type_node
, 2);
16778 opaque_V2SI_type_node
= build_opaque_vector_type (intSI_type_node
, 2);
16779 opaque_p_V2SI_type_node
= build_pointer_type (opaque_V2SI_type_node
);
16780 opaque_V4SI_type_node
= build_opaque_vector_type (intSI_type_node
, 4);
16782 const_str_type_node
16783 = build_pointer_type (build_qualified_type (char_type_node
,
16786 /* We use V1TI mode as a special container to hold __int128_t items that
16787 must live in VSX registers. */
16788 if (intTI_type_node
)
16790 V1TI_type_node
= rs6000_vector_type ("__vector __int128",
16791 intTI_type_node
, 1);
16792 unsigned_V1TI_type_node
16793 = rs6000_vector_type ("__vector unsigned __int128",
16794 unsigned_intTI_type_node
, 1);
16797 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16798 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16799 'vector unsigned short'. */
16801 bool_char_type_node
= build_distinct_type_copy (unsigned_intQI_type_node
);
16802 bool_short_type_node
= build_distinct_type_copy (unsigned_intHI_type_node
);
16803 bool_int_type_node
= build_distinct_type_copy (unsigned_intSI_type_node
);
16804 bool_long_type_node
= build_distinct_type_copy (unsigned_intDI_type_node
);
16805 pixel_type_node
= build_distinct_type_copy (unsigned_intHI_type_node
);
16807 long_integer_type_internal_node
= long_integer_type_node
;
16808 long_unsigned_type_internal_node
= long_unsigned_type_node
;
16809 long_long_integer_type_internal_node
= long_long_integer_type_node
;
16810 long_long_unsigned_type_internal_node
= long_long_unsigned_type_node
;
16811 intQI_type_internal_node
= intQI_type_node
;
16812 uintQI_type_internal_node
= unsigned_intQI_type_node
;
16813 intHI_type_internal_node
= intHI_type_node
;
16814 uintHI_type_internal_node
= unsigned_intHI_type_node
;
16815 intSI_type_internal_node
= intSI_type_node
;
16816 uintSI_type_internal_node
= unsigned_intSI_type_node
;
16817 intDI_type_internal_node
= intDI_type_node
;
16818 uintDI_type_internal_node
= unsigned_intDI_type_node
;
16819 intTI_type_internal_node
= intTI_type_node
;
16820 uintTI_type_internal_node
= unsigned_intTI_type_node
;
16821 float_type_internal_node
= float_type_node
;
16822 double_type_internal_node
= double_type_node
;
16823 long_double_type_internal_node
= long_double_type_node
;
16824 dfloat64_type_internal_node
= dfloat64_type_node
;
16825 dfloat128_type_internal_node
= dfloat128_type_node
;
16826 void_type_internal_node
= void_type_node
;
16828 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16829 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16830 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16831 format that uses a pair of doubles, depending on the switches and
16834 We do not enable the actual __float128 keyword unless the user explicitly
16835 asks for it, because the library support is not yet complete.
16837 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16838 floating point, we need make sure the type is non-zero or else self-test
16839 fails during bootstrap.
16841 We don't register a built-in type for __ibm128 if the type is the same as
16842 long double. Instead we add a #define for __ibm128 in
16843 rs6000_cpu_cpp_builtins to long double. */
16844 if (TARGET_LONG_DOUBLE_128
&& FLOAT128_IEEE_P (TFmode
))
16846 ibm128_float_type_node
= make_node (REAL_TYPE
);
16847 TYPE_PRECISION (ibm128_float_type_node
) = 128;
16848 SET_TYPE_MODE (ibm128_float_type_node
, IFmode
);
16849 layout_type (ibm128_float_type_node
);
16851 lang_hooks
.types
.register_builtin_type (ibm128_float_type_node
,
16855 ibm128_float_type_node
= long_double_type_node
;
16857 if (TARGET_FLOAT128_KEYWORD
)
16859 ieee128_float_type_node
= float128_type_node
;
16860 lang_hooks
.types
.register_builtin_type (ieee128_float_type_node
,
16864 else if (TARGET_FLOAT128_TYPE
)
16866 ieee128_float_type_node
= make_node (REAL_TYPE
);
16867 TYPE_PRECISION (ibm128_float_type_node
) = 128;
16868 SET_TYPE_MODE (ieee128_float_type_node
, KFmode
);
16869 layout_type (ieee128_float_type_node
);
16871 /* If we are not exporting the __float128/_Float128 keywords, we need a
16872 keyword to get the types created. Use __ieee128 as the dummy
16874 lang_hooks
.types
.register_builtin_type (ieee128_float_type_node
,
16879 ieee128_float_type_node
= long_double_type_node
;
16881 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16883 builtin_mode_to_type
[QImode
][0] = integer_type_node
;
16884 builtin_mode_to_type
[HImode
][0] = integer_type_node
;
16885 builtin_mode_to_type
[SImode
][0] = intSI_type_node
;
16886 builtin_mode_to_type
[SImode
][1] = unsigned_intSI_type_node
;
16887 builtin_mode_to_type
[DImode
][0] = intDI_type_node
;
16888 builtin_mode_to_type
[DImode
][1] = unsigned_intDI_type_node
;
16889 builtin_mode_to_type
[TImode
][0] = intTI_type_node
;
16890 builtin_mode_to_type
[TImode
][1] = unsigned_intTI_type_node
;
16891 builtin_mode_to_type
[SFmode
][0] = float_type_node
;
16892 builtin_mode_to_type
[DFmode
][0] = double_type_node
;
16893 builtin_mode_to_type
[IFmode
][0] = ibm128_float_type_node
;
16894 builtin_mode_to_type
[KFmode
][0] = ieee128_float_type_node
;
16895 builtin_mode_to_type
[TFmode
][0] = long_double_type_node
;
16896 builtin_mode_to_type
[DDmode
][0] = dfloat64_type_node
;
16897 builtin_mode_to_type
[TDmode
][0] = dfloat128_type_node
;
16898 builtin_mode_to_type
[V1TImode
][0] = V1TI_type_node
;
16899 builtin_mode_to_type
[V1TImode
][1] = unsigned_V1TI_type_node
;
16900 builtin_mode_to_type
[V2SImode
][0] = V2SI_type_node
;
16901 builtin_mode_to_type
[V2SFmode
][0] = V2SF_type_node
;
16902 builtin_mode_to_type
[V2DImode
][0] = V2DI_type_node
;
16903 builtin_mode_to_type
[V2DImode
][1] = unsigned_V2DI_type_node
;
16904 builtin_mode_to_type
[V2DFmode
][0] = V2DF_type_node
;
16905 builtin_mode_to_type
[V4SImode
][0] = V4SI_type_node
;
16906 builtin_mode_to_type
[V4SImode
][1] = unsigned_V4SI_type_node
;
16907 builtin_mode_to_type
[V4SFmode
][0] = V4SF_type_node
;
16908 builtin_mode_to_type
[V8HImode
][0] = V8HI_type_node
;
16909 builtin_mode_to_type
[V8HImode
][1] = unsigned_V8HI_type_node
;
16910 builtin_mode_to_type
[V16QImode
][0] = V16QI_type_node
;
16911 builtin_mode_to_type
[V16QImode
][1] = unsigned_V16QI_type_node
;
16913 tdecl
= add_builtin_type ("__bool char", bool_char_type_node
);
16914 TYPE_NAME (bool_char_type_node
) = tdecl
;
16916 tdecl
= add_builtin_type ("__bool short", bool_short_type_node
);
16917 TYPE_NAME (bool_short_type_node
) = tdecl
;
16919 tdecl
= add_builtin_type ("__bool int", bool_int_type_node
);
16920 TYPE_NAME (bool_int_type_node
) = tdecl
;
16922 tdecl
= add_builtin_type ("__pixel", pixel_type_node
);
16923 TYPE_NAME (pixel_type_node
) = tdecl
;
16925 bool_V16QI_type_node
= rs6000_vector_type ("__vector __bool char",
16926 bool_char_type_node
, 16);
16927 bool_V8HI_type_node
= rs6000_vector_type ("__vector __bool short",
16928 bool_short_type_node
, 8);
16929 bool_V4SI_type_node
= rs6000_vector_type ("__vector __bool int",
16930 bool_int_type_node
, 4);
16931 bool_V2DI_type_node
= rs6000_vector_type (TARGET_POWERPC64
16932 ? "__vector __bool long"
16933 : "__vector __bool long long",
16934 bool_long_type_node
, 2);
16935 pixel_V8HI_type_node
= rs6000_vector_type ("__vector __pixel",
16936 pixel_type_node
, 8);
16938 /* Paired builtins are only available if you build a compiler with the
16939 appropriate options, so only create those builtins with the appropriate
16940 compiler option. Create Altivec and VSX builtins on machines with at
16941 least the general purpose extensions (970 and newer) to allow the use of
16942 the target attribute. */
16943 if (TARGET_PAIRED_FLOAT
)
16944 paired_init_builtins ();
16945 if (TARGET_EXTRA_BUILTINS
)
16946 altivec_init_builtins ();
16948 htm_init_builtins ();
16950 if (TARGET_EXTRA_BUILTINS
|| TARGET_PAIRED_FLOAT
)
16951 rs6000_common_init_builtins ();
16953 ftype
= build_function_type_list (ieee128_float_type_node
,
16954 const_str_type_node
, NULL_TREE
);
16955 def_builtin ("__builtin_nanq", ftype
, RS6000_BUILTIN_NANQ
);
16956 def_builtin ("__builtin_nansq", ftype
, RS6000_BUILTIN_NANSQ
);
16958 ftype
= build_function_type_list (ieee128_float_type_node
, NULL_TREE
);
16959 def_builtin ("__builtin_infq", ftype
, RS6000_BUILTIN_INFQ
);
16960 def_builtin ("__builtin_huge_valq", ftype
, RS6000_BUILTIN_HUGE_VALQ
);
16962 ftype
= builtin_function_type (DFmode
, DFmode
, DFmode
, VOIDmode
,
16963 RS6000_BUILTIN_RECIP
, "__builtin_recipdiv");
16964 def_builtin ("__builtin_recipdiv", ftype
, RS6000_BUILTIN_RECIP
);
16966 ftype
= builtin_function_type (SFmode
, SFmode
, SFmode
, VOIDmode
,
16967 RS6000_BUILTIN_RECIPF
, "__builtin_recipdivf");
16968 def_builtin ("__builtin_recipdivf", ftype
, RS6000_BUILTIN_RECIPF
);
16970 ftype
= builtin_function_type (DFmode
, DFmode
, VOIDmode
, VOIDmode
,
16971 RS6000_BUILTIN_RSQRT
, "__builtin_rsqrt");
16972 def_builtin ("__builtin_rsqrt", ftype
, RS6000_BUILTIN_RSQRT
);
16974 ftype
= builtin_function_type (SFmode
, SFmode
, VOIDmode
, VOIDmode
,
16975 RS6000_BUILTIN_RSQRTF
, "__builtin_rsqrtf");
16976 def_builtin ("__builtin_rsqrtf", ftype
, RS6000_BUILTIN_RSQRTF
);
16978 mode
= (TARGET_64BIT
) ? DImode
: SImode
;
16979 ftype
= builtin_function_type (mode
, mode
, mode
, VOIDmode
,
16980 POWER7_BUILTIN_BPERMD
, "__builtin_bpermd");
16981 def_builtin ("__builtin_bpermd", ftype
, POWER7_BUILTIN_BPERMD
);
16983 ftype
= build_function_type_list (unsigned_intDI_type_node
,
16985 def_builtin ("__builtin_ppc_get_timebase", ftype
, RS6000_BUILTIN_GET_TB
);
16988 ftype
= build_function_type_list (unsigned_intDI_type_node
,
16991 ftype
= build_function_type_list (unsigned_intSI_type_node
,
16993 def_builtin ("__builtin_ppc_mftb", ftype
, RS6000_BUILTIN_MFTB
);
16995 ftype
= build_function_type_list (double_type_node
, NULL_TREE
);
16996 def_builtin ("__builtin_mffs", ftype
, RS6000_BUILTIN_MFFS
);
16998 ftype
= build_function_type_list (void_type_node
,
16999 intSI_type_node
, double_type_node
,
17001 def_builtin ("__builtin_mtfsf", ftype
, RS6000_BUILTIN_MTFSF
);
17003 ftype
= build_function_type_list (void_type_node
, NULL_TREE
);
17004 def_builtin ("__builtin_cpu_init", ftype
, RS6000_BUILTIN_CPU_INIT
);
17006 ftype
= build_function_type_list (bool_int_type_node
, const_ptr_type_node
,
17008 def_builtin ("__builtin_cpu_is", ftype
, RS6000_BUILTIN_CPU_IS
);
17009 def_builtin ("__builtin_cpu_supports", ftype
, RS6000_BUILTIN_CPU_SUPPORTS
);
17011 /* AIX libm provides clog as __clog. */
17012 if (TARGET_XCOFF
&&
17013 (tdecl
= builtin_decl_explicit (BUILT_IN_CLOG
)) != NULL_TREE
)
17014 set_user_assembler_name (tdecl
, "__clog");
17016 #ifdef SUBTARGET_INIT_BUILTINS
17017 SUBTARGET_INIT_BUILTINS
;
17021 /* Returns the rs6000 builtin decl for CODE. */
17024 rs6000_builtin_decl (unsigned code
, bool initialize_p ATTRIBUTE_UNUSED
)
17026 HOST_WIDE_INT fnmask
;
17028 if (code
>= RS6000_BUILTIN_COUNT
)
17029 return error_mark_node
;
17031 fnmask
= rs6000_builtin_info
[code
].mask
;
17032 if ((fnmask
& rs6000_builtin_mask
) != fnmask
)
17034 rs6000_invalid_builtin ((enum rs6000_builtins
)code
);
17035 return error_mark_node
;
17038 return rs6000_builtin_decls
[code
];
17042 paired_init_builtins (void)
17044 const struct builtin_description
*d
;
17046 HOST_WIDE_INT builtin_mask
= rs6000_builtin_mask
;
17048 tree int_ftype_int_v2sf_v2sf
17049 = build_function_type_list (integer_type_node
,
17054 tree pcfloat_type_node
=
17055 build_pointer_type (build_qualified_type
17056 (float_type_node
, TYPE_QUAL_CONST
));
17058 tree v2sf_ftype_long_pcfloat
= build_function_type_list (V2SF_type_node
,
17059 long_integer_type_node
,
17062 tree void_ftype_v2sf_long_pcfloat
=
17063 build_function_type_list (void_type_node
,
17065 long_integer_type_node
,
17070 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat
,
17071 PAIRED_BUILTIN_LX
);
17074 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat
,
17075 PAIRED_BUILTIN_STX
);
17078 d
= bdesc_paired_preds
;
17079 for (i
= 0; i
< ARRAY_SIZE (bdesc_paired_preds
); ++i
, d
++)
17082 HOST_WIDE_INT mask
= d
->mask
;
17084 if ((mask
& builtin_mask
) != mask
)
17086 if (TARGET_DEBUG_BUILTIN
)
17087 fprintf (stderr
, "paired_init_builtins, skip predicate %s\n",
17092 /* Cannot define builtin if the instruction is disabled. */
17093 gcc_assert (d
->icode
!= CODE_FOR_nothing
);
17095 if (TARGET_DEBUG_BUILTIN
)
17096 fprintf (stderr
, "paired pred #%d, insn = %s [%d], mode = %s\n",
17097 (int)i
, get_insn_name (d
->icode
), (int)d
->icode
,
17098 GET_MODE_NAME (insn_data
[d
->icode
].operand
[1].mode
));
17100 switch (insn_data
[d
->icode
].operand
[1].mode
)
17103 type
= int_ftype_int_v2sf_v2sf
;
17106 gcc_unreachable ();
17109 def_builtin (d
->name
, type
, d
->code
);
17114 altivec_init_builtins (void)
17116 const struct builtin_description
*d
;
17120 HOST_WIDE_INT builtin_mask
= rs6000_builtin_mask
;
17122 tree pvoid_type_node
= build_pointer_type (void_type_node
);
17124 tree pcvoid_type_node
17125 = build_pointer_type (build_qualified_type (void_type_node
,
17128 tree int_ftype_opaque
17129 = build_function_type_list (integer_type_node
,
17130 opaque_V4SI_type_node
, NULL_TREE
);
17131 tree opaque_ftype_opaque
17132 = build_function_type_list (integer_type_node
, NULL_TREE
);
17133 tree opaque_ftype_opaque_int
17134 = build_function_type_list (opaque_V4SI_type_node
,
17135 opaque_V4SI_type_node
, integer_type_node
, NULL_TREE
);
17136 tree opaque_ftype_opaque_opaque_int
17137 = build_function_type_list (opaque_V4SI_type_node
,
17138 opaque_V4SI_type_node
, opaque_V4SI_type_node
,
17139 integer_type_node
, NULL_TREE
);
17140 tree opaque_ftype_opaque_opaque_opaque
17141 = build_function_type_list (opaque_V4SI_type_node
,
17142 opaque_V4SI_type_node
, opaque_V4SI_type_node
,
17143 opaque_V4SI_type_node
, NULL_TREE
);
17144 tree opaque_ftype_opaque_opaque
17145 = build_function_type_list (opaque_V4SI_type_node
,
17146 opaque_V4SI_type_node
, opaque_V4SI_type_node
,
17148 tree int_ftype_int_opaque_opaque
17149 = build_function_type_list (integer_type_node
,
17150 integer_type_node
, opaque_V4SI_type_node
,
17151 opaque_V4SI_type_node
, NULL_TREE
);
17152 tree int_ftype_int_v4si_v4si
17153 = build_function_type_list (integer_type_node
,
17154 integer_type_node
, V4SI_type_node
,
17155 V4SI_type_node
, NULL_TREE
);
17156 tree int_ftype_int_v2di_v2di
17157 = build_function_type_list (integer_type_node
,
17158 integer_type_node
, V2DI_type_node
,
17159 V2DI_type_node
, NULL_TREE
);
17160 tree void_ftype_v4si
17161 = build_function_type_list (void_type_node
, V4SI_type_node
, NULL_TREE
);
17162 tree v8hi_ftype_void
17163 = build_function_type_list (V8HI_type_node
, NULL_TREE
);
17164 tree void_ftype_void
17165 = build_function_type_list (void_type_node
, NULL_TREE
);
17166 tree void_ftype_int
17167 = build_function_type_list (void_type_node
, integer_type_node
, NULL_TREE
);
17169 tree opaque_ftype_long_pcvoid
17170 = build_function_type_list (opaque_V4SI_type_node
,
17171 long_integer_type_node
, pcvoid_type_node
,
17173 tree v16qi_ftype_long_pcvoid
17174 = build_function_type_list (V16QI_type_node
,
17175 long_integer_type_node
, pcvoid_type_node
,
17177 tree v8hi_ftype_long_pcvoid
17178 = build_function_type_list (V8HI_type_node
,
17179 long_integer_type_node
, pcvoid_type_node
,
17181 tree v4si_ftype_long_pcvoid
17182 = build_function_type_list (V4SI_type_node
,
17183 long_integer_type_node
, pcvoid_type_node
,
17185 tree v4sf_ftype_long_pcvoid
17186 = build_function_type_list (V4SF_type_node
,
17187 long_integer_type_node
, pcvoid_type_node
,
17189 tree v2df_ftype_long_pcvoid
17190 = build_function_type_list (V2DF_type_node
,
17191 long_integer_type_node
, pcvoid_type_node
,
17193 tree v2di_ftype_long_pcvoid
17194 = build_function_type_list (V2DI_type_node
,
17195 long_integer_type_node
, pcvoid_type_node
,
17198 tree void_ftype_opaque_long_pvoid
17199 = build_function_type_list (void_type_node
,
17200 opaque_V4SI_type_node
, long_integer_type_node
,
17201 pvoid_type_node
, NULL_TREE
);
17202 tree void_ftype_v4si_long_pvoid
17203 = build_function_type_list (void_type_node
,
17204 V4SI_type_node
, long_integer_type_node
,
17205 pvoid_type_node
, NULL_TREE
);
17206 tree void_ftype_v16qi_long_pvoid
17207 = build_function_type_list (void_type_node
,
17208 V16QI_type_node
, long_integer_type_node
,
17209 pvoid_type_node
, NULL_TREE
);
17211 tree void_ftype_v16qi_pvoid_long
17212 = build_function_type_list (void_type_node
,
17213 V16QI_type_node
, pvoid_type_node
,
17214 long_integer_type_node
, NULL_TREE
);
17216 tree void_ftype_v8hi_long_pvoid
17217 = build_function_type_list (void_type_node
,
17218 V8HI_type_node
, long_integer_type_node
,
17219 pvoid_type_node
, NULL_TREE
);
17220 tree void_ftype_v4sf_long_pvoid
17221 = build_function_type_list (void_type_node
,
17222 V4SF_type_node
, long_integer_type_node
,
17223 pvoid_type_node
, NULL_TREE
);
17224 tree void_ftype_v2df_long_pvoid
17225 = build_function_type_list (void_type_node
,
17226 V2DF_type_node
, long_integer_type_node
,
17227 pvoid_type_node
, NULL_TREE
);
17228 tree void_ftype_v2di_long_pvoid
17229 = build_function_type_list (void_type_node
,
17230 V2DI_type_node
, long_integer_type_node
,
17231 pvoid_type_node
, NULL_TREE
);
17232 tree int_ftype_int_v8hi_v8hi
17233 = build_function_type_list (integer_type_node
,
17234 integer_type_node
, V8HI_type_node
,
17235 V8HI_type_node
, NULL_TREE
);
17236 tree int_ftype_int_v16qi_v16qi
17237 = build_function_type_list (integer_type_node
,
17238 integer_type_node
, V16QI_type_node
,
17239 V16QI_type_node
, NULL_TREE
);
17240 tree int_ftype_int_v4sf_v4sf
17241 = build_function_type_list (integer_type_node
,
17242 integer_type_node
, V4SF_type_node
,
17243 V4SF_type_node
, NULL_TREE
);
17244 tree int_ftype_int_v2df_v2df
17245 = build_function_type_list (integer_type_node
,
17246 integer_type_node
, V2DF_type_node
,
17247 V2DF_type_node
, NULL_TREE
);
17248 tree v2di_ftype_v2di
17249 = build_function_type_list (V2DI_type_node
, V2DI_type_node
, NULL_TREE
);
17250 tree v4si_ftype_v4si
17251 = build_function_type_list (V4SI_type_node
, V4SI_type_node
, NULL_TREE
);
17252 tree v8hi_ftype_v8hi
17253 = build_function_type_list (V8HI_type_node
, V8HI_type_node
, NULL_TREE
);
17254 tree v16qi_ftype_v16qi
17255 = build_function_type_list (V16QI_type_node
, V16QI_type_node
, NULL_TREE
);
17256 tree v4sf_ftype_v4sf
17257 = build_function_type_list (V4SF_type_node
, V4SF_type_node
, NULL_TREE
);
17258 tree v2df_ftype_v2df
17259 = build_function_type_list (V2DF_type_node
, V2DF_type_node
, NULL_TREE
);
17260 tree void_ftype_pcvoid_int_int
17261 = build_function_type_list (void_type_node
,
17262 pcvoid_type_node
, integer_type_node
,
17263 integer_type_node
, NULL_TREE
);
17265 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si
, ALTIVEC_BUILTIN_MTVSCR
);
17266 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void
, ALTIVEC_BUILTIN_MFVSCR
);
17267 def_builtin ("__builtin_altivec_dssall", void_ftype_void
, ALTIVEC_BUILTIN_DSSALL
);
17268 def_builtin ("__builtin_altivec_dss", void_ftype_int
, ALTIVEC_BUILTIN_DSS
);
17269 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVSL
);
17270 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVSR
);
17271 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEBX
);
17272 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEHX
);
17273 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEWX
);
17274 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVXL
);
17275 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid
,
17276 ALTIVEC_BUILTIN_LVXL_V2DF
);
17277 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid
,
17278 ALTIVEC_BUILTIN_LVXL_V2DI
);
17279 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid
,
17280 ALTIVEC_BUILTIN_LVXL_V4SF
);
17281 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid
,
17282 ALTIVEC_BUILTIN_LVXL_V4SI
);
17283 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid
,
17284 ALTIVEC_BUILTIN_LVXL_V8HI
);
17285 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid
,
17286 ALTIVEC_BUILTIN_LVXL_V16QI
);
17287 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVX
);
17288 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid
,
17289 ALTIVEC_BUILTIN_LVX_V2DF
);
17290 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid
,
17291 ALTIVEC_BUILTIN_LVX_V2DI
);
17292 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid
,
17293 ALTIVEC_BUILTIN_LVX_V4SF
);
17294 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid
,
17295 ALTIVEC_BUILTIN_LVX_V4SI
);
17296 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid
,
17297 ALTIVEC_BUILTIN_LVX_V8HI
);
17298 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid
,
17299 ALTIVEC_BUILTIN_LVX_V16QI
);
17300 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVX
);
17301 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid
,
17302 ALTIVEC_BUILTIN_STVX_V2DF
);
17303 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid
,
17304 ALTIVEC_BUILTIN_STVX_V2DI
);
17305 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid
,
17306 ALTIVEC_BUILTIN_STVX_V4SF
);
17307 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid
,
17308 ALTIVEC_BUILTIN_STVX_V4SI
);
17309 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid
,
17310 ALTIVEC_BUILTIN_STVX_V8HI
);
17311 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid
,
17312 ALTIVEC_BUILTIN_STVX_V16QI
);
17313 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVEWX
);
17314 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVXL
);
17315 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid
,
17316 ALTIVEC_BUILTIN_STVXL_V2DF
);
17317 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid
,
17318 ALTIVEC_BUILTIN_STVXL_V2DI
);
17319 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid
,
17320 ALTIVEC_BUILTIN_STVXL_V4SF
);
17321 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid
,
17322 ALTIVEC_BUILTIN_STVXL_V4SI
);
17323 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid
,
17324 ALTIVEC_BUILTIN_STVXL_V8HI
);
17325 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid
,
17326 ALTIVEC_BUILTIN_STVXL_V16QI
);
17327 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVEBX
);
17328 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid
, ALTIVEC_BUILTIN_STVEHX
);
17329 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LD
);
17330 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LDE
);
17331 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LDL
);
17332 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVSL
);
17333 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVSR
);
17334 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEBX
);
17335 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEHX
);
17336 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEWX
);
17337 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_ST
);
17338 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STE
);
17339 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STL
);
17340 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEWX
);
17341 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEBX
);
17342 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEHX
);
17344 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid
,
17345 VSX_BUILTIN_LXVD2X_V2DF
);
17346 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid
,
17347 VSX_BUILTIN_LXVD2X_V2DI
);
17348 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid
,
17349 VSX_BUILTIN_LXVW4X_V4SF
);
17350 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid
,
17351 VSX_BUILTIN_LXVW4X_V4SI
);
17352 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid
,
17353 VSX_BUILTIN_LXVW4X_V8HI
);
17354 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid
,
17355 VSX_BUILTIN_LXVW4X_V16QI
);
17356 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid
,
17357 VSX_BUILTIN_STXVD2X_V2DF
);
17358 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid
,
17359 VSX_BUILTIN_STXVD2X_V2DI
);
17360 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid
,
17361 VSX_BUILTIN_STXVW4X_V4SF
);
17362 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid
,
17363 VSX_BUILTIN_STXVW4X_V4SI
);
17364 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid
,
17365 VSX_BUILTIN_STXVW4X_V8HI
);
17366 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid
,
17367 VSX_BUILTIN_STXVW4X_V16QI
);
17369 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid
,
17370 VSX_BUILTIN_LD_ELEMREV_V2DF
);
17371 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid
,
17372 VSX_BUILTIN_LD_ELEMREV_V2DI
);
17373 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid
,
17374 VSX_BUILTIN_LD_ELEMREV_V4SF
);
17375 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid
,
17376 VSX_BUILTIN_LD_ELEMREV_V4SI
);
17377 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid
,
17378 VSX_BUILTIN_ST_ELEMREV_V2DF
);
17379 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid
,
17380 VSX_BUILTIN_ST_ELEMREV_V2DI
);
17381 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid
,
17382 VSX_BUILTIN_ST_ELEMREV_V4SF
);
17383 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid
,
17384 VSX_BUILTIN_ST_ELEMREV_V4SI
);
17386 def_builtin ("__builtin_vsx_le_be_v8hi", v8hi_ftype_long_pcvoid
,
17387 VSX_BUILTIN_XL_BE_V8HI
);
17388 def_builtin ("__builtin_vsx_le_be_v4si", v4si_ftype_long_pcvoid
,
17389 VSX_BUILTIN_XL_BE_V4SI
);
17390 def_builtin ("__builtin_vsx_le_be_v2di", v2di_ftype_long_pcvoid
,
17391 VSX_BUILTIN_XL_BE_V2DI
);
17392 def_builtin ("__builtin_vsx_le_be_v4sf", v4sf_ftype_long_pcvoid
,
17393 VSX_BUILTIN_XL_BE_V4SF
);
17394 def_builtin ("__builtin_vsx_le_be_v2df", v2df_ftype_long_pcvoid
,
17395 VSX_BUILTIN_XL_BE_V2DF
);
17396 def_builtin ("__builtin_vsx_le_be_v16qi", v16qi_ftype_long_pcvoid
,
17397 VSX_BUILTIN_XL_BE_V16QI
);
17399 if (TARGET_P9_VECTOR
)
17401 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid
,
17402 VSX_BUILTIN_LD_ELEMREV_V8HI
);
17403 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid
,
17404 VSX_BUILTIN_LD_ELEMREV_V16QI
);
17405 def_builtin ("__builtin_vsx_st_elemrev_v8hi",
17406 void_ftype_v8hi_long_pvoid
, VSX_BUILTIN_ST_ELEMREV_V8HI
);
17407 def_builtin ("__builtin_vsx_st_elemrev_v16qi",
17408 void_ftype_v16qi_long_pvoid
, VSX_BUILTIN_ST_ELEMREV_V16QI
);
17412 rs6000_builtin_decls
[(int) VSX_BUILTIN_LD_ELEMREV_V8HI
]
17413 = rs6000_builtin_decls
[(int) VSX_BUILTIN_LXVW4X_V8HI
];
17414 rs6000_builtin_decls
[(int) VSX_BUILTIN_LD_ELEMREV_V16QI
]
17415 = rs6000_builtin_decls
[(int) VSX_BUILTIN_LXVW4X_V16QI
];
17416 rs6000_builtin_decls
[(int) VSX_BUILTIN_ST_ELEMREV_V8HI
]
17417 = rs6000_builtin_decls
[(int) VSX_BUILTIN_STXVW4X_V8HI
];
17418 rs6000_builtin_decls
[(int) VSX_BUILTIN_ST_ELEMREV_V16QI
]
17419 = rs6000_builtin_decls
[(int) VSX_BUILTIN_STXVW4X_V16QI
];
17422 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid
,
17423 VSX_BUILTIN_VEC_LD
);
17424 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid
,
17425 VSX_BUILTIN_VEC_ST
);
17426 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid
,
17427 VSX_BUILTIN_VEC_XL
);
17428 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid
,
17429 VSX_BUILTIN_VEC_XL_BE
);
17430 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid
,
17431 VSX_BUILTIN_VEC_XST
);
17433 def_builtin ("__builtin_vec_step", int_ftype_opaque
, ALTIVEC_BUILTIN_VEC_STEP
);
17434 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque
, ALTIVEC_BUILTIN_VEC_SPLATS
);
17435 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque
, ALTIVEC_BUILTIN_VEC_PROMOTE
);
17437 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int
, ALTIVEC_BUILTIN_VEC_SLD
);
17438 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_SPLAT
);
17439 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_EXTRACT
);
17440 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int
, ALTIVEC_BUILTIN_VEC_INSERT
);
17441 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTW
);
17442 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTH
);
17443 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTB
);
17444 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTF
);
17445 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VCFSX
);
17446 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VCFUX
);
17447 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTS
);
17448 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTU
);
17450 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque
,
17451 ALTIVEC_BUILTIN_VEC_ADDE
);
17452 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque
,
17453 ALTIVEC_BUILTIN_VEC_ADDEC
);
17454 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque
,
17455 ALTIVEC_BUILTIN_VEC_CMPNE
);
17456 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque
,
17457 ALTIVEC_BUILTIN_VEC_MUL
);
17458 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque
,
17459 ALTIVEC_BUILTIN_VEC_SUBE
);
17460 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque
,
17461 ALTIVEC_BUILTIN_VEC_SUBEC
);
17463 /* Cell builtins. */
17464 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVLX
);
17465 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVLXL
);
17466 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVRX
);
17467 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVRXL
);
17469 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVLX
);
17470 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVLXL
);
17471 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVRX
);
17472 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVRXL
);
17474 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVLX
);
17475 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVLXL
);
17476 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVRX
);
17477 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVRXL
);
17479 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVLX
);
17480 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVLXL
);
17481 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVRX
);
17482 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVRXL
);
17484 if (TARGET_P9_VECTOR
)
17485 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long
,
17486 P9V_BUILTIN_STXVL
);
17488 /* Add the DST variants. */
17490 for (i
= 0; i
< ARRAY_SIZE (bdesc_dst
); i
++, d
++)
17492 HOST_WIDE_INT mask
= d
->mask
;
17494 /* It is expected that these dst built-in functions may have
17495 d->icode equal to CODE_FOR_nothing. */
17496 if ((mask
& builtin_mask
) != mask
)
17498 if (TARGET_DEBUG_BUILTIN
)
17499 fprintf (stderr
, "altivec_init_builtins, skip dst %s\n",
17503 def_builtin (d
->name
, void_ftype_pcvoid_int_int
, d
->code
);
17506 /* Initialize the predicates. */
17507 d
= bdesc_altivec_preds
;
17508 for (i
= 0; i
< ARRAY_SIZE (bdesc_altivec_preds
); i
++, d
++)
17510 machine_mode mode1
;
17512 HOST_WIDE_INT mask
= d
->mask
;
17514 if ((mask
& builtin_mask
) != mask
)
17516 if (TARGET_DEBUG_BUILTIN
)
17517 fprintf (stderr
, "altivec_init_builtins, skip predicate %s\n",
17522 if (rs6000_overloaded_builtin_p (d
->code
))
17526 /* Cannot define builtin if the instruction is disabled. */
17527 gcc_assert (d
->icode
!= CODE_FOR_nothing
);
17528 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
17534 type
= int_ftype_int_opaque_opaque
;
17537 type
= int_ftype_int_v2di_v2di
;
17540 type
= int_ftype_int_v4si_v4si
;
17543 type
= int_ftype_int_v8hi_v8hi
;
17546 type
= int_ftype_int_v16qi_v16qi
;
17549 type
= int_ftype_int_v4sf_v4sf
;
17552 type
= int_ftype_int_v2df_v2df
;
17555 gcc_unreachable ();
17558 def_builtin (d
->name
, type
, d
->code
);
17561 /* Initialize the abs* operators. */
17563 for (i
= 0; i
< ARRAY_SIZE (bdesc_abs
); i
++, d
++)
17565 machine_mode mode0
;
17567 HOST_WIDE_INT mask
= d
->mask
;
17569 if ((mask
& builtin_mask
) != mask
)
17571 if (TARGET_DEBUG_BUILTIN
)
17572 fprintf (stderr
, "altivec_init_builtins, skip abs %s\n",
17577 /* Cannot define builtin if the instruction is disabled. */
17578 gcc_assert (d
->icode
!= CODE_FOR_nothing
);
17579 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
17584 type
= v2di_ftype_v2di
;
17587 type
= v4si_ftype_v4si
;
17590 type
= v8hi_ftype_v8hi
;
17593 type
= v16qi_ftype_v16qi
;
17596 type
= v4sf_ftype_v4sf
;
17599 type
= v2df_ftype_v2df
;
17602 gcc_unreachable ();
17605 def_builtin (d
->name
, type
, d
->code
);
17608 /* Initialize target builtin that implements
17609 targetm.vectorize.builtin_mask_for_load. */
17611 decl
= add_builtin_function ("__builtin_altivec_mask_for_load",
17612 v16qi_ftype_long_pcvoid
,
17613 ALTIVEC_BUILTIN_MASK_FOR_LOAD
,
17614 BUILT_IN_MD
, NULL
, NULL_TREE
);
17615 TREE_READONLY (decl
) = 1;
17616 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17617 altivec_builtin_mask_for_load
= decl
;
17619 /* Access to the vec_init patterns. */
17620 ftype
= build_function_type_list (V4SI_type_node
, integer_type_node
,
17621 integer_type_node
, integer_type_node
,
17622 integer_type_node
, NULL_TREE
);
17623 def_builtin ("__builtin_vec_init_v4si", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V4SI
);
17625 ftype
= build_function_type_list (V8HI_type_node
, short_integer_type_node
,
17626 short_integer_type_node
,
17627 short_integer_type_node
,
17628 short_integer_type_node
,
17629 short_integer_type_node
,
17630 short_integer_type_node
,
17631 short_integer_type_node
,
17632 short_integer_type_node
, NULL_TREE
);
17633 def_builtin ("__builtin_vec_init_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V8HI
);
17635 ftype
= build_function_type_list (V16QI_type_node
, char_type_node
,
17636 char_type_node
, char_type_node
,
17637 char_type_node
, char_type_node
,
17638 char_type_node
, char_type_node
,
17639 char_type_node
, char_type_node
,
17640 char_type_node
, char_type_node
,
17641 char_type_node
, char_type_node
,
17642 char_type_node
, char_type_node
,
17643 char_type_node
, NULL_TREE
);
17644 def_builtin ("__builtin_vec_init_v16qi", ftype
,
17645 ALTIVEC_BUILTIN_VEC_INIT_V16QI
);
17647 ftype
= build_function_type_list (V4SF_type_node
, float_type_node
,
17648 float_type_node
, float_type_node
,
17649 float_type_node
, NULL_TREE
);
17650 def_builtin ("__builtin_vec_init_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V4SF
);
17652 /* VSX builtins. */
17653 ftype
= build_function_type_list (V2DF_type_node
, double_type_node
,
17654 double_type_node
, NULL_TREE
);
17655 def_builtin ("__builtin_vec_init_v2df", ftype
, VSX_BUILTIN_VEC_INIT_V2DF
);
17657 ftype
= build_function_type_list (V2DI_type_node
, intDI_type_node
,
17658 intDI_type_node
, NULL_TREE
);
17659 def_builtin ("__builtin_vec_init_v2di", ftype
, VSX_BUILTIN_VEC_INIT_V2DI
);
17661 /* Access to the vec_set patterns. */
17662 ftype
= build_function_type_list (V4SI_type_node
, V4SI_type_node
,
17664 integer_type_node
, NULL_TREE
);
17665 def_builtin ("__builtin_vec_set_v4si", ftype
, ALTIVEC_BUILTIN_VEC_SET_V4SI
);
17667 ftype
= build_function_type_list (V8HI_type_node
, V8HI_type_node
,
17669 integer_type_node
, NULL_TREE
);
17670 def_builtin ("__builtin_vec_set_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_SET_V8HI
);
17672 ftype
= build_function_type_list (V16QI_type_node
, V16QI_type_node
,
17674 integer_type_node
, NULL_TREE
);
17675 def_builtin ("__builtin_vec_set_v16qi", ftype
, ALTIVEC_BUILTIN_VEC_SET_V16QI
);
17677 ftype
= build_function_type_list (V4SF_type_node
, V4SF_type_node
,
17679 integer_type_node
, NULL_TREE
);
17680 def_builtin ("__builtin_vec_set_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_SET_V4SF
);
17682 ftype
= build_function_type_list (V2DF_type_node
, V2DF_type_node
,
17684 integer_type_node
, NULL_TREE
);
17685 def_builtin ("__builtin_vec_set_v2df", ftype
, VSX_BUILTIN_VEC_SET_V2DF
);
17687 ftype
= build_function_type_list (V2DI_type_node
, V2DI_type_node
,
17689 integer_type_node
, NULL_TREE
);
17690 def_builtin ("__builtin_vec_set_v2di", ftype
, VSX_BUILTIN_VEC_SET_V2DI
);
17692 /* Access to the vec_extract patterns. */
17693 ftype
= build_function_type_list (intSI_type_node
, V4SI_type_node
,
17694 integer_type_node
, NULL_TREE
);
17695 def_builtin ("__builtin_vec_ext_v4si", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V4SI
);
17697 ftype
= build_function_type_list (intHI_type_node
, V8HI_type_node
,
17698 integer_type_node
, NULL_TREE
);
17699 def_builtin ("__builtin_vec_ext_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V8HI
);
17701 ftype
= build_function_type_list (intQI_type_node
, V16QI_type_node
,
17702 integer_type_node
, NULL_TREE
);
17703 def_builtin ("__builtin_vec_ext_v16qi", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V16QI
);
17705 ftype
= build_function_type_list (float_type_node
, V4SF_type_node
,
17706 integer_type_node
, NULL_TREE
);
17707 def_builtin ("__builtin_vec_ext_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V4SF
);
17709 ftype
= build_function_type_list (double_type_node
, V2DF_type_node
,
17710 integer_type_node
, NULL_TREE
);
17711 def_builtin ("__builtin_vec_ext_v2df", ftype
, VSX_BUILTIN_VEC_EXT_V2DF
);
17713 ftype
= build_function_type_list (intDI_type_node
, V2DI_type_node
,
17714 integer_type_node
, NULL_TREE
);
17715 def_builtin ("__builtin_vec_ext_v2di", ftype
, VSX_BUILTIN_VEC_EXT_V2DI
);
17718 if (V1TI_type_node
)
17720 tree v1ti_ftype_long_pcvoid
17721 = build_function_type_list (V1TI_type_node
,
17722 long_integer_type_node
, pcvoid_type_node
,
17724 tree void_ftype_v1ti_long_pvoid
17725 = build_function_type_list (void_type_node
,
17726 V1TI_type_node
, long_integer_type_node
,
17727 pvoid_type_node
, NULL_TREE
);
17728 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid
,
17729 VSX_BUILTIN_LXVD2X_V1TI
);
17730 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid
,
17731 VSX_BUILTIN_STXVD2X_V1TI
);
17732 ftype
= build_function_type_list (V1TI_type_node
, intTI_type_node
,
17733 NULL_TREE
, NULL_TREE
);
17734 def_builtin ("__builtin_vec_init_v1ti", ftype
, VSX_BUILTIN_VEC_INIT_V1TI
);
17735 ftype
= build_function_type_list (V1TI_type_node
, V1TI_type_node
,
17737 integer_type_node
, NULL_TREE
);
17738 def_builtin ("__builtin_vec_set_v1ti", ftype
, VSX_BUILTIN_VEC_SET_V1TI
);
17739 ftype
= build_function_type_list (intTI_type_node
, V1TI_type_node
,
17740 integer_type_node
, NULL_TREE
);
17741 def_builtin ("__builtin_vec_ext_v1ti", ftype
, VSX_BUILTIN_VEC_EXT_V1TI
);
17747 htm_init_builtins (void)
17749 HOST_WIDE_INT builtin_mask
= rs6000_builtin_mask
;
17750 const struct builtin_description
*d
;
17754 for (i
= 0; i
< ARRAY_SIZE (bdesc_htm
); i
++, d
++)
17756 tree op
[MAX_HTM_OPERANDS
], type
;
17757 HOST_WIDE_INT mask
= d
->mask
;
17758 unsigned attr
= rs6000_builtin_info
[d
->code
].attr
;
17759 bool void_func
= (attr
& RS6000_BTC_VOID
);
17760 int attr_args
= (attr
& RS6000_BTC_TYPE_MASK
);
17762 tree gpr_type_node
;
17766 /* It is expected that these htm built-in functions may have
17767 d->icode equal to CODE_FOR_nothing. */
17769 if (TARGET_32BIT
&& TARGET_POWERPC64
)
17770 gpr_type_node
= long_long_unsigned_type_node
;
17772 gpr_type_node
= long_unsigned_type_node
;
17774 if (attr
& RS6000_BTC_SPR
)
17776 rettype
= gpr_type_node
;
17777 argtype
= gpr_type_node
;
17779 else if (d
->code
== HTM_BUILTIN_TABORTDC
17780 || d
->code
== HTM_BUILTIN_TABORTDCI
)
17782 rettype
= unsigned_type_node
;
17783 argtype
= gpr_type_node
;
17787 rettype
= unsigned_type_node
;
17788 argtype
= unsigned_type_node
;
17791 if ((mask
& builtin_mask
) != mask
)
17793 if (TARGET_DEBUG_BUILTIN
)
17794 fprintf (stderr
, "htm_builtin, skip binary %s\n", d
->name
);
17800 if (TARGET_DEBUG_BUILTIN
)
17801 fprintf (stderr
, "htm_builtin, bdesc_htm[%ld] no name\n",
17802 (long unsigned) i
);
17806 op
[nopnds
++] = (void_func
) ? void_type_node
: rettype
;
17808 if (attr_args
== RS6000_BTC_UNARY
)
17809 op
[nopnds
++] = argtype
;
17810 else if (attr_args
== RS6000_BTC_BINARY
)
17812 op
[nopnds
++] = argtype
;
17813 op
[nopnds
++] = argtype
;
17815 else if (attr_args
== RS6000_BTC_TERNARY
)
17817 op
[nopnds
++] = argtype
;
17818 op
[nopnds
++] = argtype
;
17819 op
[nopnds
++] = argtype
;
17825 type
= build_function_type_list (op
[0], NULL_TREE
);
17828 type
= build_function_type_list (op
[0], op
[1], NULL_TREE
);
17831 type
= build_function_type_list (op
[0], op
[1], op
[2], NULL_TREE
);
17834 type
= build_function_type_list (op
[0], op
[1], op
[2], op
[3],
17838 gcc_unreachable ();
17841 def_builtin (d
->name
, type
, d
->code
);
17845 /* Hash function for builtin functions with up to 3 arguments and a return
17848 builtin_hasher::hash (builtin_hash_struct
*bh
)
17853 for (i
= 0; i
< 4; i
++)
17855 ret
= (ret
* (unsigned)MAX_MACHINE_MODE
) + ((unsigned)bh
->mode
[i
]);
17856 ret
= (ret
* 2) + bh
->uns_p
[i
];
17862 /* Compare builtin hash entries H1 and H2 for equivalence. */
17864 builtin_hasher::equal (builtin_hash_struct
*p1
, builtin_hash_struct
*p2
)
17866 return ((p1
->mode
[0] == p2
->mode
[0])
17867 && (p1
->mode
[1] == p2
->mode
[1])
17868 && (p1
->mode
[2] == p2
->mode
[2])
17869 && (p1
->mode
[3] == p2
->mode
[3])
17870 && (p1
->uns_p
[0] == p2
->uns_p
[0])
17871 && (p1
->uns_p
[1] == p2
->uns_p
[1])
17872 && (p1
->uns_p
[2] == p2
->uns_p
[2])
17873 && (p1
->uns_p
[3] == p2
->uns_p
[3]));
17876 /* Map types for builtin functions with an explicit return type and up to 3
17877 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17878 of the argument. */
17880 builtin_function_type (machine_mode mode_ret
, machine_mode mode_arg0
,
17881 machine_mode mode_arg1
, machine_mode mode_arg2
,
17882 enum rs6000_builtins builtin
, const char *name
)
17884 struct builtin_hash_struct h
;
17885 struct builtin_hash_struct
*h2
;
17888 tree ret_type
= NULL_TREE
;
17889 tree arg_type
[3] = { NULL_TREE
, NULL_TREE
, NULL_TREE
};
17891 /* Create builtin_hash_table. */
17892 if (builtin_hash_table
== NULL
)
17893 builtin_hash_table
= hash_table
<builtin_hasher
>::create_ggc (1500);
17895 h
.type
= NULL_TREE
;
17896 h
.mode
[0] = mode_ret
;
17897 h
.mode
[1] = mode_arg0
;
17898 h
.mode
[2] = mode_arg1
;
17899 h
.mode
[3] = mode_arg2
;
17905 /* If the builtin is a type that produces unsigned results or takes unsigned
17906 arguments, and it is returned as a decl for the vectorizer (such as
17907 widening multiplies, permute), make sure the arguments and return value
17908 are type correct. */
17911 /* unsigned 1 argument functions. */
17912 case CRYPTO_BUILTIN_VSBOX
:
17913 case P8V_BUILTIN_VGBBD
:
17914 case MISC_BUILTIN_CDTBCD
:
17915 case MISC_BUILTIN_CBCDTD
:
17920 /* unsigned 2 argument functions. */
17921 case ALTIVEC_BUILTIN_VMULEUB
:
17922 case ALTIVEC_BUILTIN_VMULEUH
:
17923 case ALTIVEC_BUILTIN_VMULEUW
:
17924 case ALTIVEC_BUILTIN_VMULOUB
:
17925 case ALTIVEC_BUILTIN_VMULOUH
:
17926 case ALTIVEC_BUILTIN_VMULOUW
:
17927 case CRYPTO_BUILTIN_VCIPHER
:
17928 case CRYPTO_BUILTIN_VCIPHERLAST
:
17929 case CRYPTO_BUILTIN_VNCIPHER
:
17930 case CRYPTO_BUILTIN_VNCIPHERLAST
:
17931 case CRYPTO_BUILTIN_VPMSUMB
:
17932 case CRYPTO_BUILTIN_VPMSUMH
:
17933 case CRYPTO_BUILTIN_VPMSUMW
:
17934 case CRYPTO_BUILTIN_VPMSUMD
:
17935 case CRYPTO_BUILTIN_VPMSUM
:
17936 case MISC_BUILTIN_ADDG6S
:
17937 case MISC_BUILTIN_DIVWEU
:
17938 case MISC_BUILTIN_DIVWEUO
:
17939 case MISC_BUILTIN_DIVDEU
:
17940 case MISC_BUILTIN_DIVDEUO
:
17941 case VSX_BUILTIN_UDIV_V2DI
:
17942 case ALTIVEC_BUILTIN_VMAXUB
:
17943 case ALTIVEC_BUILTIN_VMINUB
:
17944 case ALTIVEC_BUILTIN_VMAXUH
:
17945 case ALTIVEC_BUILTIN_VMINUH
:
17946 case ALTIVEC_BUILTIN_VMAXUW
:
17947 case ALTIVEC_BUILTIN_VMINUW
:
17948 case P8V_BUILTIN_VMAXUD
:
17949 case P8V_BUILTIN_VMINUD
:
17955 /* unsigned 3 argument functions. */
17956 case ALTIVEC_BUILTIN_VPERM_16QI_UNS
:
17957 case ALTIVEC_BUILTIN_VPERM_8HI_UNS
:
17958 case ALTIVEC_BUILTIN_VPERM_4SI_UNS
:
17959 case ALTIVEC_BUILTIN_VPERM_2DI_UNS
:
17960 case ALTIVEC_BUILTIN_VSEL_16QI_UNS
:
17961 case ALTIVEC_BUILTIN_VSEL_8HI_UNS
:
17962 case ALTIVEC_BUILTIN_VSEL_4SI_UNS
:
17963 case ALTIVEC_BUILTIN_VSEL_2DI_UNS
:
17964 case VSX_BUILTIN_VPERM_16QI_UNS
:
17965 case VSX_BUILTIN_VPERM_8HI_UNS
:
17966 case VSX_BUILTIN_VPERM_4SI_UNS
:
17967 case VSX_BUILTIN_VPERM_2DI_UNS
:
17968 case VSX_BUILTIN_XXSEL_16QI_UNS
:
17969 case VSX_BUILTIN_XXSEL_8HI_UNS
:
17970 case VSX_BUILTIN_XXSEL_4SI_UNS
:
17971 case VSX_BUILTIN_XXSEL_2DI_UNS
:
17972 case CRYPTO_BUILTIN_VPERMXOR
:
17973 case CRYPTO_BUILTIN_VPERMXOR_V2DI
:
17974 case CRYPTO_BUILTIN_VPERMXOR_V4SI
:
17975 case CRYPTO_BUILTIN_VPERMXOR_V8HI
:
17976 case CRYPTO_BUILTIN_VPERMXOR_V16QI
:
17977 case CRYPTO_BUILTIN_VSHASIGMAW
:
17978 case CRYPTO_BUILTIN_VSHASIGMAD
:
17979 case CRYPTO_BUILTIN_VSHASIGMA
:
17986 /* signed permute functions with unsigned char mask. */
17987 case ALTIVEC_BUILTIN_VPERM_16QI
:
17988 case ALTIVEC_BUILTIN_VPERM_8HI
:
17989 case ALTIVEC_BUILTIN_VPERM_4SI
:
17990 case ALTIVEC_BUILTIN_VPERM_4SF
:
17991 case ALTIVEC_BUILTIN_VPERM_2DI
:
17992 case ALTIVEC_BUILTIN_VPERM_2DF
:
17993 case VSX_BUILTIN_VPERM_16QI
:
17994 case VSX_BUILTIN_VPERM_8HI
:
17995 case VSX_BUILTIN_VPERM_4SI
:
17996 case VSX_BUILTIN_VPERM_4SF
:
17997 case VSX_BUILTIN_VPERM_2DI
:
17998 case VSX_BUILTIN_VPERM_2DF
:
18002 /* unsigned args, signed return. */
18003 case VSX_BUILTIN_XVCVUXDSP
:
18004 case VSX_BUILTIN_XVCVUXDDP_UNS
:
18005 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF
:
18009 /* signed args, unsigned return. */
18010 case VSX_BUILTIN_XVCVDPUXDS_UNS
:
18011 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI
:
18012 case MISC_BUILTIN_UNPACK_TD
:
18013 case MISC_BUILTIN_UNPACK_V1TI
:
18017 /* unsigned arguments for 128-bit pack instructions. */
18018 case MISC_BUILTIN_PACK_TD
:
18019 case MISC_BUILTIN_PACK_V1TI
:
18024 /* unsigned second arguments (vector shift right). */
18025 case ALTIVEC_BUILTIN_VSRB
:
18026 case ALTIVEC_BUILTIN_VSRH
:
18027 case ALTIVEC_BUILTIN_VSRW
:
18028 case P8V_BUILTIN_VSRD
:
18036 /* Figure out how many args are present. */
18037 while (num_args
> 0 && h
.mode
[num_args
] == VOIDmode
)
18040 ret_type
= builtin_mode_to_type
[h
.mode
[0]][h
.uns_p
[0]];
18041 if (!ret_type
&& h
.uns_p
[0])
18042 ret_type
= builtin_mode_to_type
[h
.mode
[0]][0];
18045 fatal_error (input_location
,
18046 "internal error: builtin function %qs had an unexpected "
18047 "return type %qs", name
, GET_MODE_NAME (h
.mode
[0]));
18049 for (i
= 0; i
< (int) ARRAY_SIZE (arg_type
); i
++)
18050 arg_type
[i
] = NULL_TREE
;
18052 for (i
= 0; i
< num_args
; i
++)
18054 int m
= (int) h
.mode
[i
+1];
18055 int uns_p
= h
.uns_p
[i
+1];
18057 arg_type
[i
] = builtin_mode_to_type
[m
][uns_p
];
18058 if (!arg_type
[i
] && uns_p
)
18059 arg_type
[i
] = builtin_mode_to_type
[m
][0];
18062 fatal_error (input_location
,
18063 "internal error: builtin function %qs, argument %d "
18064 "had unexpected argument type %qs", name
, i
,
18065 GET_MODE_NAME (m
));
18068 builtin_hash_struct
**found
= builtin_hash_table
->find_slot (&h
, INSERT
);
18069 if (*found
== NULL
)
18071 h2
= ggc_alloc
<builtin_hash_struct
> ();
18075 h2
->type
= build_function_type_list (ret_type
, arg_type
[0], arg_type
[1],
18076 arg_type
[2], NULL_TREE
);
18079 return (*found
)->type
;
18083 rs6000_common_init_builtins (void)
18085 const struct builtin_description
*d
;
18088 tree opaque_ftype_opaque
= NULL_TREE
;
18089 tree opaque_ftype_opaque_opaque
= NULL_TREE
;
18090 tree opaque_ftype_opaque_opaque_opaque
= NULL_TREE
;
18091 tree v2si_ftype
= NULL_TREE
;
18092 tree v2si_ftype_qi
= NULL_TREE
;
18093 tree v2si_ftype_v2si_qi
= NULL_TREE
;
18094 tree v2si_ftype_int_qi
= NULL_TREE
;
18095 HOST_WIDE_INT builtin_mask
= rs6000_builtin_mask
;
18097 if (!TARGET_PAIRED_FLOAT
)
18099 builtin_mode_to_type
[V2SImode
][0] = opaque_V2SI_type_node
;
18100 builtin_mode_to_type
[V2SFmode
][0] = opaque_V2SF_type_node
;
18103 /* Paired builtins are only available if you build a compiler with the
18104 appropriate options, so only create those builtins with the appropriate
18105 compiler option. Create Altivec and VSX builtins on machines with at
18106 least the general purpose extensions (970 and newer) to allow the use of
18107 the target attribute.. */
18109 if (TARGET_EXTRA_BUILTINS
)
18110 builtin_mask
|= RS6000_BTM_COMMON
;
18112 /* Add the ternary operators. */
18114 for (i
= 0; i
< ARRAY_SIZE (bdesc_3arg
); i
++, d
++)
18117 HOST_WIDE_INT mask
= d
->mask
;
18119 if ((mask
& builtin_mask
) != mask
)
18121 if (TARGET_DEBUG_BUILTIN
)
18122 fprintf (stderr
, "rs6000_builtin, skip ternary %s\n", d
->name
);
18126 if (rs6000_overloaded_builtin_p (d
->code
))
18128 if (! (type
= opaque_ftype_opaque_opaque_opaque
))
18129 type
= opaque_ftype_opaque_opaque_opaque
18130 = build_function_type_list (opaque_V4SI_type_node
,
18131 opaque_V4SI_type_node
,
18132 opaque_V4SI_type_node
,
18133 opaque_V4SI_type_node
,
18138 enum insn_code icode
= d
->icode
;
18141 if (TARGET_DEBUG_BUILTIN
)
18142 fprintf (stderr
, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
18148 if (icode
== CODE_FOR_nothing
)
18150 if (TARGET_DEBUG_BUILTIN
)
18151 fprintf (stderr
, "rs6000_builtin, skip ternary %s (no code)\n",
18157 type
= builtin_function_type (insn_data
[icode
].operand
[0].mode
,
18158 insn_data
[icode
].operand
[1].mode
,
18159 insn_data
[icode
].operand
[2].mode
,
18160 insn_data
[icode
].operand
[3].mode
,
18164 def_builtin (d
->name
, type
, d
->code
);
18167 /* Add the binary operators. */
18169 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
18171 machine_mode mode0
, mode1
, mode2
;
18173 HOST_WIDE_INT mask
= d
->mask
;
18175 if ((mask
& builtin_mask
) != mask
)
18177 if (TARGET_DEBUG_BUILTIN
)
18178 fprintf (stderr
, "rs6000_builtin, skip binary %s\n", d
->name
);
18182 if (rs6000_overloaded_builtin_p (d
->code
))
18184 if (! (type
= opaque_ftype_opaque_opaque
))
18185 type
= opaque_ftype_opaque_opaque
18186 = build_function_type_list (opaque_V4SI_type_node
,
18187 opaque_V4SI_type_node
,
18188 opaque_V4SI_type_node
,
18193 enum insn_code icode
= d
->icode
;
18196 if (TARGET_DEBUG_BUILTIN
)
18197 fprintf (stderr
, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
18203 if (icode
== CODE_FOR_nothing
)
18205 if (TARGET_DEBUG_BUILTIN
)
18206 fprintf (stderr
, "rs6000_builtin, skip binary %s (no code)\n",
18212 mode0
= insn_data
[icode
].operand
[0].mode
;
18213 mode1
= insn_data
[icode
].operand
[1].mode
;
18214 mode2
= insn_data
[icode
].operand
[2].mode
;
18216 if (mode0
== V2SImode
&& mode1
== V2SImode
&& mode2
== QImode
)
18218 if (! (type
= v2si_ftype_v2si_qi
))
18219 type
= v2si_ftype_v2si_qi
18220 = build_function_type_list (opaque_V2SI_type_node
,
18221 opaque_V2SI_type_node
,
18226 else if (mode0
== V2SImode
&& GET_MODE_CLASS (mode1
) == MODE_INT
18227 && mode2
== QImode
)
18229 if (! (type
= v2si_ftype_int_qi
))
18230 type
= v2si_ftype_int_qi
18231 = build_function_type_list (opaque_V2SI_type_node
,
18238 type
= builtin_function_type (mode0
, mode1
, mode2
, VOIDmode
,
18242 def_builtin (d
->name
, type
, d
->code
);
18245 /* Add the simple unary operators. */
18247 for (i
= 0; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
18249 machine_mode mode0
, mode1
;
18251 HOST_WIDE_INT mask
= d
->mask
;
18253 if ((mask
& builtin_mask
) != mask
)
18255 if (TARGET_DEBUG_BUILTIN
)
18256 fprintf (stderr
, "rs6000_builtin, skip unary %s\n", d
->name
);
18260 if (rs6000_overloaded_builtin_p (d
->code
))
18262 if (! (type
= opaque_ftype_opaque
))
18263 type
= opaque_ftype_opaque
18264 = build_function_type_list (opaque_V4SI_type_node
,
18265 opaque_V4SI_type_node
,
18270 enum insn_code icode
= d
->icode
;
18273 if (TARGET_DEBUG_BUILTIN
)
18274 fprintf (stderr
, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
18280 if (icode
== CODE_FOR_nothing
)
18282 if (TARGET_DEBUG_BUILTIN
)
18283 fprintf (stderr
, "rs6000_builtin, skip unary %s (no code)\n",
18289 mode0
= insn_data
[icode
].operand
[0].mode
;
18290 mode1
= insn_data
[icode
].operand
[1].mode
;
18292 if (mode0
== V2SImode
&& mode1
== QImode
)
18294 if (! (type
= v2si_ftype_qi
))
18295 type
= v2si_ftype_qi
18296 = build_function_type_list (opaque_V2SI_type_node
,
18302 type
= builtin_function_type (mode0
, mode1
, VOIDmode
, VOIDmode
,
18306 def_builtin (d
->name
, type
, d
->code
);
18309 /* Add the simple no-argument operators. */
18311 for (i
= 0; i
< ARRAY_SIZE (bdesc_0arg
); i
++, d
++)
18313 machine_mode mode0
;
18315 HOST_WIDE_INT mask
= d
->mask
;
18317 if ((mask
& builtin_mask
) != mask
)
18319 if (TARGET_DEBUG_BUILTIN
)
18320 fprintf (stderr
, "rs6000_builtin, skip no-argument %s\n", d
->name
);
18323 if (rs6000_overloaded_builtin_p (d
->code
))
18325 if (!opaque_ftype_opaque
)
18326 opaque_ftype_opaque
18327 = build_function_type_list (opaque_V4SI_type_node
, NULL_TREE
);
18328 type
= opaque_ftype_opaque
;
18332 enum insn_code icode
= d
->icode
;
18335 if (TARGET_DEBUG_BUILTIN
)
18336 fprintf (stderr
, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
18337 (long unsigned) i
);
18340 if (icode
== CODE_FOR_nothing
)
18342 if (TARGET_DEBUG_BUILTIN
)
18344 "rs6000_builtin, skip no-argument %s (no code)\n",
18348 mode0
= insn_data
[icode
].operand
[0].mode
;
18349 if (mode0
== V2SImode
)
18351 /* code for paired single */
18352 if (! (type
= v2si_ftype
))
18355 = build_function_type_list (opaque_V2SI_type_node
,
18361 type
= builtin_function_type (mode0
, VOIDmode
, VOIDmode
, VOIDmode
,
18364 def_builtin (d
->name
, type
, d
->code
);
18368 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
18370 init_float128_ibm (machine_mode mode
)
18372 if (!TARGET_XL_COMPAT
)
18374 set_optab_libfunc (add_optab
, mode
, "__gcc_qadd");
18375 set_optab_libfunc (sub_optab
, mode
, "__gcc_qsub");
18376 set_optab_libfunc (smul_optab
, mode
, "__gcc_qmul");
18377 set_optab_libfunc (sdiv_optab
, mode
, "__gcc_qdiv");
18379 if (!TARGET_HARD_FLOAT
)
18381 set_optab_libfunc (neg_optab
, mode
, "__gcc_qneg");
18382 set_optab_libfunc (eq_optab
, mode
, "__gcc_qeq");
18383 set_optab_libfunc (ne_optab
, mode
, "__gcc_qne");
18384 set_optab_libfunc (gt_optab
, mode
, "__gcc_qgt");
18385 set_optab_libfunc (ge_optab
, mode
, "__gcc_qge");
18386 set_optab_libfunc (lt_optab
, mode
, "__gcc_qlt");
18387 set_optab_libfunc (le_optab
, mode
, "__gcc_qle");
18388 set_optab_libfunc (unord_optab
, mode
, "__gcc_qunord");
18390 set_conv_libfunc (sext_optab
, mode
, SFmode
, "__gcc_stoq");
18391 set_conv_libfunc (sext_optab
, mode
, DFmode
, "__gcc_dtoq");
18392 set_conv_libfunc (trunc_optab
, SFmode
, mode
, "__gcc_qtos");
18393 set_conv_libfunc (trunc_optab
, DFmode
, mode
, "__gcc_qtod");
18394 set_conv_libfunc (sfix_optab
, SImode
, mode
, "__gcc_qtoi");
18395 set_conv_libfunc (ufix_optab
, SImode
, mode
, "__gcc_qtou");
18396 set_conv_libfunc (sfloat_optab
, mode
, SImode
, "__gcc_itoq");
18397 set_conv_libfunc (ufloat_optab
, mode
, SImode
, "__gcc_utoq");
18402 set_optab_libfunc (add_optab
, mode
, "_xlqadd");
18403 set_optab_libfunc (sub_optab
, mode
, "_xlqsub");
18404 set_optab_libfunc (smul_optab
, mode
, "_xlqmul");
18405 set_optab_libfunc (sdiv_optab
, mode
, "_xlqdiv");
18408 /* Add various conversions for IFmode to use the traditional TFmode
18410 if (mode
== IFmode
)
18412 set_conv_libfunc (sext_optab
, mode
, SDmode
, "__dpd_extendsdtf2");
18413 set_conv_libfunc (sext_optab
, mode
, DDmode
, "__dpd_extendddtf2");
18414 set_conv_libfunc (trunc_optab
, mode
, TDmode
, "__dpd_trunctftd2");
18415 set_conv_libfunc (trunc_optab
, SDmode
, mode
, "__dpd_trunctfsd2");
18416 set_conv_libfunc (trunc_optab
, DDmode
, mode
, "__dpd_trunctfdd2");
18417 set_conv_libfunc (sext_optab
, TDmode
, mode
, "__dpd_extendtdtf2");
18419 if (TARGET_POWERPC64
)
18421 set_conv_libfunc (sfix_optab
, TImode
, mode
, "__fixtfti");
18422 set_conv_libfunc (ufix_optab
, TImode
, mode
, "__fixunstfti");
18423 set_conv_libfunc (sfloat_optab
, mode
, TImode
, "__floattitf");
18424 set_conv_libfunc (ufloat_optab
, mode
, TImode
, "__floatuntitf");
18429 /* Set up IEEE 128-bit floating point routines. Use different names if the
18430 arguments can be passed in a vector register. The historical PowerPC
18431 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
18432 continue to use that if we aren't using vector registers to pass IEEE
18433 128-bit floating point. */
18436 init_float128_ieee (machine_mode mode
)
18438 if (FLOAT128_VECTOR_P (mode
))
18440 set_optab_libfunc (add_optab
, mode
, "__addkf3");
18441 set_optab_libfunc (sub_optab
, mode
, "__subkf3");
18442 set_optab_libfunc (neg_optab
, mode
, "__negkf2");
18443 set_optab_libfunc (smul_optab
, mode
, "__mulkf3");
18444 set_optab_libfunc (sdiv_optab
, mode
, "__divkf3");
18445 set_optab_libfunc (sqrt_optab
, mode
, "__sqrtkf2");
18446 set_optab_libfunc (abs_optab
, mode
, "__abstkf2");
18448 set_optab_libfunc (eq_optab
, mode
, "__eqkf2");
18449 set_optab_libfunc (ne_optab
, mode
, "__nekf2");
18450 set_optab_libfunc (gt_optab
, mode
, "__gtkf2");
18451 set_optab_libfunc (ge_optab
, mode
, "__gekf2");
18452 set_optab_libfunc (lt_optab
, mode
, "__ltkf2");
18453 set_optab_libfunc (le_optab
, mode
, "__lekf2");
18454 set_optab_libfunc (unord_optab
, mode
, "__unordkf2");
18456 set_conv_libfunc (sext_optab
, mode
, SFmode
, "__extendsfkf2");
18457 set_conv_libfunc (sext_optab
, mode
, DFmode
, "__extenddfkf2");
18458 set_conv_libfunc (trunc_optab
, SFmode
, mode
, "__trunckfsf2");
18459 set_conv_libfunc (trunc_optab
, DFmode
, mode
, "__trunckfdf2");
18461 set_conv_libfunc (sext_optab
, mode
, IFmode
, "__extendtfkf2");
18462 if (mode
!= TFmode
&& FLOAT128_IBM_P (TFmode
))
18463 set_conv_libfunc (sext_optab
, mode
, TFmode
, "__extendtfkf2");
18465 set_conv_libfunc (trunc_optab
, IFmode
, mode
, "__trunckftf2");
18466 if (mode
!= TFmode
&& FLOAT128_IBM_P (TFmode
))
18467 set_conv_libfunc (trunc_optab
, TFmode
, mode
, "__trunckftf2");
18469 set_conv_libfunc (sext_optab
, mode
, SDmode
, "__dpd_extendsdkf2");
18470 set_conv_libfunc (sext_optab
, mode
, DDmode
, "__dpd_extendddkf2");
18471 set_conv_libfunc (trunc_optab
, mode
, TDmode
, "__dpd_trunckftd2");
18472 set_conv_libfunc (trunc_optab
, SDmode
, mode
, "__dpd_trunckfsd2");
18473 set_conv_libfunc (trunc_optab
, DDmode
, mode
, "__dpd_trunckfdd2");
18474 set_conv_libfunc (sext_optab
, TDmode
, mode
, "__dpd_extendtdkf2");
18476 set_conv_libfunc (sfix_optab
, SImode
, mode
, "__fixkfsi");
18477 set_conv_libfunc (ufix_optab
, SImode
, mode
, "__fixunskfsi");
18478 set_conv_libfunc (sfix_optab
, DImode
, mode
, "__fixkfdi");
18479 set_conv_libfunc (ufix_optab
, DImode
, mode
, "__fixunskfdi");
18481 set_conv_libfunc (sfloat_optab
, mode
, SImode
, "__floatsikf");
18482 set_conv_libfunc (ufloat_optab
, mode
, SImode
, "__floatunsikf");
18483 set_conv_libfunc (sfloat_optab
, mode
, DImode
, "__floatdikf");
18484 set_conv_libfunc (ufloat_optab
, mode
, DImode
, "__floatundikf");
18486 if (TARGET_POWERPC64
)
18488 set_conv_libfunc (sfix_optab
, TImode
, mode
, "__fixkfti");
18489 set_conv_libfunc (ufix_optab
, TImode
, mode
, "__fixunskfti");
18490 set_conv_libfunc (sfloat_optab
, mode
, TImode
, "__floattikf");
18491 set_conv_libfunc (ufloat_optab
, mode
, TImode
, "__floatuntikf");
18497 set_optab_libfunc (add_optab
, mode
, "_q_add");
18498 set_optab_libfunc (sub_optab
, mode
, "_q_sub");
18499 set_optab_libfunc (neg_optab
, mode
, "_q_neg");
18500 set_optab_libfunc (smul_optab
, mode
, "_q_mul");
18501 set_optab_libfunc (sdiv_optab
, mode
, "_q_div");
18502 if (TARGET_PPC_GPOPT
)
18503 set_optab_libfunc (sqrt_optab
, mode
, "_q_sqrt");
18505 set_optab_libfunc (eq_optab
, mode
, "_q_feq");
18506 set_optab_libfunc (ne_optab
, mode
, "_q_fne");
18507 set_optab_libfunc (gt_optab
, mode
, "_q_fgt");
18508 set_optab_libfunc (ge_optab
, mode
, "_q_fge");
18509 set_optab_libfunc (lt_optab
, mode
, "_q_flt");
18510 set_optab_libfunc (le_optab
, mode
, "_q_fle");
18512 set_conv_libfunc (sext_optab
, mode
, SFmode
, "_q_stoq");
18513 set_conv_libfunc (sext_optab
, mode
, DFmode
, "_q_dtoq");
18514 set_conv_libfunc (trunc_optab
, SFmode
, mode
, "_q_qtos");
18515 set_conv_libfunc (trunc_optab
, DFmode
, mode
, "_q_qtod");
18516 set_conv_libfunc (sfix_optab
, SImode
, mode
, "_q_qtoi");
18517 set_conv_libfunc (ufix_optab
, SImode
, mode
, "_q_qtou");
18518 set_conv_libfunc (sfloat_optab
, mode
, SImode
, "_q_itoq");
18519 set_conv_libfunc (ufloat_optab
, mode
, SImode
, "_q_utoq");
18524 rs6000_init_libfuncs (void)
18526 /* __float128 support. */
18527 if (TARGET_FLOAT128_TYPE
)
18529 init_float128_ibm (IFmode
);
18530 init_float128_ieee (KFmode
);
18533 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18534 if (TARGET_LONG_DOUBLE_128
)
18536 if (!TARGET_IEEEQUAD
)
18537 init_float128_ibm (TFmode
);
18539 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18541 init_float128_ieee (TFmode
);
18545 /* Emit a potentially record-form instruction, setting DST from SRC.
18546 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18547 signed comparison of DST with zero. If DOT is 1, the generated RTL
18548 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18549 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18550 a separate COMPARE. */
18553 rs6000_emit_dot_insn (rtx dst
, rtx src
, int dot
, rtx ccreg
)
18557 emit_move_insn (dst
, src
);
18561 if (cc_reg_not_cr0_operand (ccreg
, CCmode
))
18563 emit_move_insn (dst
, src
);
18564 emit_move_insn (ccreg
, gen_rtx_COMPARE (CCmode
, dst
, const0_rtx
));
18568 rtx ccset
= gen_rtx_SET (ccreg
, gen_rtx_COMPARE (CCmode
, src
, const0_rtx
));
18571 rtx clobber
= gen_rtx_CLOBBER (VOIDmode
, dst
);
18572 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, ccset
, clobber
)));
18576 rtx set
= gen_rtx_SET (dst
, src
);
18577 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, ccset
, set
)));
18582 /* A validation routine: say whether CODE, a condition code, and MODE
18583 match. The other alternatives either don't make sense or should
18584 never be generated. */
18587 validate_condition_mode (enum rtx_code code
, machine_mode mode
)
18589 gcc_assert ((GET_RTX_CLASS (code
) == RTX_COMPARE
18590 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
18591 && GET_MODE_CLASS (mode
) == MODE_CC
);
18593 /* These don't make sense. */
18594 gcc_assert ((code
!= GT
&& code
!= LT
&& code
!= GE
&& code
!= LE
)
18595 || mode
!= CCUNSmode
);
18597 gcc_assert ((code
!= GTU
&& code
!= LTU
&& code
!= GEU
&& code
!= LEU
)
18598 || mode
== CCUNSmode
);
18600 gcc_assert (mode
== CCFPmode
18601 || (code
!= ORDERED
&& code
!= UNORDERED
18602 && code
!= UNEQ
&& code
!= LTGT
18603 && code
!= UNGT
&& code
!= UNLT
18604 && code
!= UNGE
&& code
!= UNLE
));
18606 /* These should never be generated except for
18607 flag_finite_math_only. */
18608 gcc_assert (mode
!= CCFPmode
18609 || flag_finite_math_only
18610 || (code
!= LE
&& code
!= GE
18611 && code
!= UNEQ
&& code
!= LTGT
18612 && code
!= UNGT
&& code
!= UNLT
));
18614 /* These are invalid; the information is not there. */
18615 gcc_assert (mode
!= CCEQmode
|| code
== EQ
|| code
== NE
);
18619 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18620 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18621 not zero, store there the bit offset (counted from the right) where
18622 the single stretch of 1 bits begins; and similarly for B, the bit
18623 offset where it ends. */
18626 rs6000_is_valid_mask (rtx mask
, int *b
, int *e
, machine_mode mode
)
18628 unsigned HOST_WIDE_INT val
= INTVAL (mask
);
18629 unsigned HOST_WIDE_INT bit
;
18631 int n
= GET_MODE_PRECISION (mode
);
18633 if (mode
!= DImode
&& mode
!= SImode
)
18636 if (INTVAL (mask
) >= 0)
18639 ne
= exact_log2 (bit
);
18640 nb
= exact_log2 (val
+ bit
);
18642 else if (val
+ 1 == 0)
18651 nb
= exact_log2 (bit
);
18652 ne
= exact_log2 (val
+ bit
);
18657 ne
= exact_log2 (bit
);
18658 if (val
+ bit
== 0)
18666 if (nb
< 0 || ne
< 0 || nb
>= n
|| ne
>= n
)
18677 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18678 or rldicr instruction, to implement an AND with it in mode MODE. */
18681 rs6000_is_valid_and_mask (rtx mask
, machine_mode mode
)
18685 if (!rs6000_is_valid_mask (mask
, &nb
, &ne
, mode
))
18688 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18690 if (mode
== DImode
)
18691 return (ne
== 0 || nb
== 63 || (nb
< 32 && ne
<= nb
));
18693 /* For SImode, rlwinm can do everything. */
18694 if (mode
== SImode
)
18695 return (nb
< 32 && ne
< 32);
18700 /* Return the instruction template for an AND with mask in mode MODE, with
18701 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18704 rs6000_insn_for_and_mask (machine_mode mode
, rtx
*operands
, bool dot
)
18708 if (!rs6000_is_valid_mask (operands
[2], &nb
, &ne
, mode
))
18709 gcc_unreachable ();
18711 if (mode
== DImode
&& ne
== 0)
18713 operands
[3] = GEN_INT (63 - nb
);
18715 return "rldicl. %0,%1,0,%3";
18716 return "rldicl %0,%1,0,%3";
18719 if (mode
== DImode
&& nb
== 63)
18721 operands
[3] = GEN_INT (63 - ne
);
18723 return "rldicr. %0,%1,0,%3";
18724 return "rldicr %0,%1,0,%3";
18727 if (nb
< 32 && ne
< 32)
18729 operands
[3] = GEN_INT (31 - nb
);
18730 operands
[4] = GEN_INT (31 - ne
);
18732 return "rlwinm. %0,%1,0,%3,%4";
18733 return "rlwinm %0,%1,0,%3,%4";
18736 gcc_unreachable ();
18739 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18740 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18741 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18744 rs6000_is_valid_shift_mask (rtx mask
, rtx shift
, machine_mode mode
)
18748 if (!rs6000_is_valid_mask (mask
, &nb
, &ne
, mode
))
18751 int n
= GET_MODE_PRECISION (mode
);
18754 if (CONST_INT_P (XEXP (shift
, 1)))
18756 sh
= INTVAL (XEXP (shift
, 1));
18757 if (sh
< 0 || sh
>= n
)
18761 rtx_code code
= GET_CODE (shift
);
18763 /* Convert any shift by 0 to a rotate, to simplify below code. */
18767 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18768 if (code
== ROTATE
&& sh
>= 0 && nb
>= ne
&& ne
>= sh
)
18770 if (code
== ROTATE
&& sh
>= 0 && nb
>= ne
&& nb
< sh
)
18776 /* DImode rotates need rld*. */
18777 if (mode
== DImode
&& code
== ROTATE
)
18778 return (nb
== 63 || ne
== 0 || ne
== sh
);
18780 /* SImode rotates need rlw*. */
18781 if (mode
== SImode
&& code
== ROTATE
)
18782 return (nb
< 32 && ne
< 32 && sh
< 32);
18784 /* Wrap-around masks are only okay for rotates. */
18788 /* Variable shifts are only okay for rotates. */
18792 /* Don't allow ASHIFT if the mask is wrong for that. */
18793 if (code
== ASHIFT
&& ne
< sh
)
18796 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18797 if the mask is wrong for that. */
18798 if (nb
< 32 && ne
< 32 && sh
< 32
18799 && !(code
== LSHIFTRT
&& nb
>= 32 - sh
))
18802 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18803 if the mask is wrong for that. */
18804 if (code
== LSHIFTRT
)
18806 if (nb
== 63 || ne
== 0 || ne
== sh
)
18807 return !(code
== LSHIFTRT
&& nb
>= sh
);
18812 /* Return the instruction template for a shift with mask in mode MODE, with
18813 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18816 rs6000_insn_for_shift_mask (machine_mode mode
, rtx
*operands
, bool dot
)
18820 if (!rs6000_is_valid_mask (operands
[3], &nb
, &ne
, mode
))
18821 gcc_unreachable ();
18823 if (mode
== DImode
&& ne
== 0)
18825 if (GET_CODE (operands
[4]) == LSHIFTRT
&& INTVAL (operands
[2]))
18826 operands
[2] = GEN_INT (64 - INTVAL (operands
[2]));
18827 operands
[3] = GEN_INT (63 - nb
);
18829 return "rld%I2cl. %0,%1,%2,%3";
18830 return "rld%I2cl %0,%1,%2,%3";
18833 if (mode
== DImode
&& nb
== 63)
18835 operands
[3] = GEN_INT (63 - ne
);
18837 return "rld%I2cr. %0,%1,%2,%3";
18838 return "rld%I2cr %0,%1,%2,%3";
18842 && GET_CODE (operands
[4]) != LSHIFTRT
18843 && CONST_INT_P (operands
[2])
18844 && ne
== INTVAL (operands
[2]))
18846 operands
[3] = GEN_INT (63 - nb
);
18848 return "rld%I2c. %0,%1,%2,%3";
18849 return "rld%I2c %0,%1,%2,%3";
18852 if (nb
< 32 && ne
< 32)
18854 if (GET_CODE (operands
[4]) == LSHIFTRT
&& INTVAL (operands
[2]))
18855 operands
[2] = GEN_INT (32 - INTVAL (operands
[2]));
18856 operands
[3] = GEN_INT (31 - nb
);
18857 operands
[4] = GEN_INT (31 - ne
);
18858 /* This insn can also be a 64-bit rotate with mask that really makes
18859 it just a shift right (with mask); the %h below are to adjust for
18860 that situation (shift count is >= 32 in that case). */
18862 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18863 return "rlw%I2nm %0,%1,%h2,%3,%4";
18866 gcc_unreachable ();
18869 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18870 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18871 ASHIFT, or LSHIFTRT) in mode MODE. */
18874 rs6000_is_valid_insert_mask (rtx mask
, rtx shift
, machine_mode mode
)
18878 if (!rs6000_is_valid_mask (mask
, &nb
, &ne
, mode
))
18881 int n
= GET_MODE_PRECISION (mode
);
18883 int sh
= INTVAL (XEXP (shift
, 1));
18884 if (sh
< 0 || sh
>= n
)
18887 rtx_code code
= GET_CODE (shift
);
18889 /* Convert any shift by 0 to a rotate, to simplify below code. */
18893 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18894 if (code
== ROTATE
&& sh
>= 0 && nb
>= ne
&& ne
>= sh
)
18896 if (code
== ROTATE
&& sh
>= 0 && nb
>= ne
&& nb
< sh
)
18902 /* DImode rotates need rldimi. */
18903 if (mode
== DImode
&& code
== ROTATE
)
18906 /* SImode rotates need rlwimi. */
18907 if (mode
== SImode
&& code
== ROTATE
)
18908 return (nb
< 32 && ne
< 32 && sh
< 32);
18910 /* Wrap-around masks are only okay for rotates. */
18914 /* Don't allow ASHIFT if the mask is wrong for that. */
18915 if (code
== ASHIFT
&& ne
< sh
)
18918 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18919 if the mask is wrong for that. */
18920 if (nb
< 32 && ne
< 32 && sh
< 32
18921 && !(code
== LSHIFTRT
&& nb
>= 32 - sh
))
18924 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18925 if the mask is wrong for that. */
18926 if (code
== LSHIFTRT
)
18929 return !(code
== LSHIFTRT
&& nb
>= sh
);
18934 /* Return the instruction template for an insert with mask in mode MODE, with
18935 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18938 rs6000_insn_for_insert_mask (machine_mode mode
, rtx
*operands
, bool dot
)
18942 if (!rs6000_is_valid_mask (operands
[3], &nb
, &ne
, mode
))
18943 gcc_unreachable ();
18945 /* Prefer rldimi because rlwimi is cracked. */
18946 if (TARGET_POWERPC64
18947 && (!dot
|| mode
== DImode
)
18948 && GET_CODE (operands
[4]) != LSHIFTRT
18949 && ne
== INTVAL (operands
[2]))
18951 operands
[3] = GEN_INT (63 - nb
);
18953 return "rldimi. %0,%1,%2,%3";
18954 return "rldimi %0,%1,%2,%3";
18957 if (nb
< 32 && ne
< 32)
18959 if (GET_CODE (operands
[4]) == LSHIFTRT
&& INTVAL (operands
[2]))
18960 operands
[2] = GEN_INT (32 - INTVAL (operands
[2]));
18961 operands
[3] = GEN_INT (31 - nb
);
18962 operands
[4] = GEN_INT (31 - ne
);
18964 return "rlwimi. %0,%1,%2,%3,%4";
18965 return "rlwimi %0,%1,%2,%3,%4";
18968 gcc_unreachable ();
18971 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18972 using two machine instructions. */
18975 rs6000_is_valid_2insn_and (rtx c
, machine_mode mode
)
18977 /* There are two kinds of AND we can handle with two insns:
18978 1) those we can do with two rl* insn;
18981 We do not handle that last case yet. */
18983 /* If there is just one stretch of ones, we can do it. */
18984 if (rs6000_is_valid_mask (c
, NULL
, NULL
, mode
))
18987 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18988 one insn, we can do the whole thing with two. */
18989 unsigned HOST_WIDE_INT val
= INTVAL (c
);
18990 unsigned HOST_WIDE_INT bit1
= val
& -val
;
18991 unsigned HOST_WIDE_INT bit2
= (val
+ bit1
) & ~val
;
18992 unsigned HOST_WIDE_INT val1
= (val
+ bit1
) & val
;
18993 unsigned HOST_WIDE_INT bit3
= val1
& -val1
;
18994 return rs6000_is_valid_and_mask (GEN_INT (val
+ bit3
- bit2
), mode
);
18997 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
18998 If EXPAND is true, split rotate-and-mask instructions we generate to
18999 their constituent parts as well (this is used during expand); if DOT
19000 is 1, make the last insn a record-form instruction clobbering the
19001 destination GPR and setting the CC reg (from operands[3]); if 2, set
19002 that GPR as well as the CC reg. */
19005 rs6000_emit_2insn_and (machine_mode mode
, rtx
*operands
, bool expand
, int dot
)
19007 gcc_assert (!(expand
&& dot
));
19009 unsigned HOST_WIDE_INT val
= INTVAL (operands
[2]);
19011 /* If it is one stretch of ones, it is DImode; shift left, mask, then
19012 shift right. This generates better code than doing the masks without
19013 shifts, or shifting first right and then left. */
19015 if (rs6000_is_valid_mask (operands
[2], &nb
, &ne
, mode
) && nb
>= ne
)
19017 gcc_assert (mode
== DImode
);
19019 int shift
= 63 - nb
;
19022 rtx tmp1
= gen_reg_rtx (DImode
);
19023 rtx tmp2
= gen_reg_rtx (DImode
);
19024 emit_insn (gen_ashldi3 (tmp1
, operands
[1], GEN_INT (shift
)));
19025 emit_insn (gen_anddi3 (tmp2
, tmp1
, GEN_INT (val
<< shift
)));
19026 emit_insn (gen_lshrdi3 (operands
[0], tmp2
, GEN_INT (shift
)));
19030 rtx tmp
= gen_rtx_ASHIFT (mode
, operands
[1], GEN_INT (shift
));
19031 tmp
= gen_rtx_AND (mode
, tmp
, GEN_INT (val
<< shift
));
19032 emit_move_insn (operands
[0], tmp
);
19033 tmp
= gen_rtx_LSHIFTRT (mode
, operands
[0], GEN_INT (shift
));
19034 rs6000_emit_dot_insn (operands
[0], tmp
, dot
, dot
? operands
[3] : 0);
19039 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
19040 that does the rest. */
19041 unsigned HOST_WIDE_INT bit1
= val
& -val
;
19042 unsigned HOST_WIDE_INT bit2
= (val
+ bit1
) & ~val
;
19043 unsigned HOST_WIDE_INT val1
= (val
+ bit1
) & val
;
19044 unsigned HOST_WIDE_INT bit3
= val1
& -val1
;
19046 unsigned HOST_WIDE_INT mask1
= -bit3
+ bit2
- 1;
19047 unsigned HOST_WIDE_INT mask2
= val
+ bit3
- bit2
;
19049 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2
), mode
));
19051 /* Two "no-rotate"-and-mask instructions, for SImode. */
19052 if (rs6000_is_valid_and_mask (GEN_INT (mask1
), mode
))
19054 gcc_assert (mode
== SImode
);
19056 rtx reg
= expand
? gen_reg_rtx (mode
) : operands
[0];
19057 rtx tmp
= gen_rtx_AND (mode
, operands
[1], GEN_INT (mask1
));
19058 emit_move_insn (reg
, tmp
);
19059 tmp
= gen_rtx_AND (mode
, reg
, GEN_INT (mask2
));
19060 rs6000_emit_dot_insn (operands
[0], tmp
, dot
, dot
? operands
[3] : 0);
19064 gcc_assert (mode
== DImode
);
19066 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
19067 insns; we have to do the first in SImode, because it wraps. */
19068 if (mask2
<= 0xffffffff
19069 && rs6000_is_valid_and_mask (GEN_INT (mask1
), SImode
))
19071 rtx reg
= expand
? gen_reg_rtx (mode
) : operands
[0];
19072 rtx tmp
= gen_rtx_AND (SImode
, gen_lowpart (SImode
, operands
[1]),
19074 rtx reg_low
= gen_lowpart (SImode
, reg
);
19075 emit_move_insn (reg_low
, tmp
);
19076 tmp
= gen_rtx_AND (mode
, reg
, GEN_INT (mask2
));
19077 rs6000_emit_dot_insn (operands
[0], tmp
, dot
, dot
? operands
[3] : 0);
19081 /* Two rld* insns: rotate, clear the hole in the middle (which now is
19082 at the top end), rotate back and clear the other hole. */
19083 int right
= exact_log2 (bit3
);
19084 int left
= 64 - right
;
19086 /* Rotate the mask too. */
19087 mask1
= (mask1
>> right
) | ((bit2
- 1) << left
);
19091 rtx tmp1
= gen_reg_rtx (DImode
);
19092 rtx tmp2
= gen_reg_rtx (DImode
);
19093 rtx tmp3
= gen_reg_rtx (DImode
);
19094 emit_insn (gen_rotldi3 (tmp1
, operands
[1], GEN_INT (left
)));
19095 emit_insn (gen_anddi3 (tmp2
, tmp1
, GEN_INT (mask1
)));
19096 emit_insn (gen_rotldi3 (tmp3
, tmp2
, GEN_INT (right
)));
19097 emit_insn (gen_anddi3 (operands
[0], tmp3
, GEN_INT (mask2
)));
19101 rtx tmp
= gen_rtx_ROTATE (mode
, operands
[1], GEN_INT (left
));
19102 tmp
= gen_rtx_AND (mode
, tmp
, GEN_INT (mask1
));
19103 emit_move_insn (operands
[0], tmp
);
19104 tmp
= gen_rtx_ROTATE (mode
, operands
[0], GEN_INT (right
));
19105 tmp
= gen_rtx_AND (mode
, tmp
, GEN_INT (mask2
));
19106 rs6000_emit_dot_insn (operands
[0], tmp
, dot
, dot
? operands
[3] : 0);
19110 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
19111 for lfq and stfq insns iff the registers are hard registers. */
19114 registers_ok_for_quad_peep (rtx reg1
, rtx reg2
)
19116 /* We might have been passed a SUBREG. */
19117 if (GET_CODE (reg1
) != REG
|| GET_CODE (reg2
) != REG
)
19120 /* We might have been passed non floating point registers. */
19121 if (!FP_REGNO_P (REGNO (reg1
))
19122 || !FP_REGNO_P (REGNO (reg2
)))
19125 return (REGNO (reg1
) == REGNO (reg2
) - 1);
19128 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
19129 addr1 and addr2 must be in consecutive memory locations
19130 (addr2 == addr1 + 8). */
19133 mems_ok_for_quad_peep (rtx mem1
, rtx mem2
)
19136 unsigned int reg1
, reg2
;
19137 int offset1
, offset2
;
19139 /* The mems cannot be volatile. */
19140 if (MEM_VOLATILE_P (mem1
) || MEM_VOLATILE_P (mem2
))
19143 addr1
= XEXP (mem1
, 0);
19144 addr2
= XEXP (mem2
, 0);
19146 /* Extract an offset (if used) from the first addr. */
19147 if (GET_CODE (addr1
) == PLUS
)
19149 /* If not a REG, return zero. */
19150 if (GET_CODE (XEXP (addr1
, 0)) != REG
)
19154 reg1
= REGNO (XEXP (addr1
, 0));
19155 /* The offset must be constant! */
19156 if (GET_CODE (XEXP (addr1
, 1)) != CONST_INT
)
19158 offset1
= INTVAL (XEXP (addr1
, 1));
19161 else if (GET_CODE (addr1
) != REG
)
19165 reg1
= REGNO (addr1
);
19166 /* This was a simple (mem (reg)) expression. Offset is 0. */
19170 /* And now for the second addr. */
19171 if (GET_CODE (addr2
) == PLUS
)
19173 /* If not a REG, return zero. */
19174 if (GET_CODE (XEXP (addr2
, 0)) != REG
)
19178 reg2
= REGNO (XEXP (addr2
, 0));
19179 /* The offset must be constant. */
19180 if (GET_CODE (XEXP (addr2
, 1)) != CONST_INT
)
19182 offset2
= INTVAL (XEXP (addr2
, 1));
19185 else if (GET_CODE (addr2
) != REG
)
19189 reg2
= REGNO (addr2
);
19190 /* This was a simple (mem (reg)) expression. Offset is 0. */
19194 /* Both of these must have the same base register. */
19198 /* The offset for the second addr must be 8 more than the first addr. */
19199 if (offset2
!= offset1
+ 8)
19202 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
19207 /* Return the mode to be used for memory when a secondary memory
19208 location is needed. For SDmode values we need to use DDmode, in
19209 all other cases we can use the same mode. */
19211 rs6000_secondary_memory_needed_mode (machine_mode mode
)
19213 if (lra_in_progress
&& mode
== SDmode
)
19218 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
19219 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
19220 only work on the traditional altivec registers, note if an altivec register
19223 static enum rs6000_reg_type
19224 register_to_reg_type (rtx reg
, bool *is_altivec
)
19226 HOST_WIDE_INT regno
;
19227 enum reg_class rclass
;
19229 if (GET_CODE (reg
) == SUBREG
)
19230 reg
= SUBREG_REG (reg
);
19233 return NO_REG_TYPE
;
19235 regno
= REGNO (reg
);
19236 if (regno
>= FIRST_PSEUDO_REGISTER
)
19238 if (!lra_in_progress
&& !reload_completed
)
19239 return PSEUDO_REG_TYPE
;
19241 regno
= true_regnum (reg
);
19242 if (regno
< 0 || regno
>= FIRST_PSEUDO_REGISTER
)
19243 return PSEUDO_REG_TYPE
;
19246 gcc_assert (regno
>= 0);
19248 if (is_altivec
&& ALTIVEC_REGNO_P (regno
))
19249 *is_altivec
= true;
19251 rclass
= rs6000_regno_regclass
[regno
];
19252 return reg_class_to_reg_type
[(int)rclass
];
19255 /* Helper function to return the cost of adding a TOC entry address. */
19258 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask
)
19262 if (TARGET_CMODEL
!= CMODEL_SMALL
)
19263 ret
= ((addr_mask
& RELOAD_REG_OFFSET
) == 0) ? 1 : 2;
19266 ret
= (TARGET_MINIMAL_TOC
) ? 6 : 3;
19271 /* Helper function for rs6000_secondary_reload to determine whether the memory
19272 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
19273 needs reloading. Return negative if the memory is not handled by the memory
19274 helper functions and to try a different reload method, 0 if no additional
19275 instructions are need, and positive to give the extra cost for the
19279 rs6000_secondary_reload_memory (rtx addr
,
19280 enum reg_class rclass
,
19283 int extra_cost
= 0;
19284 rtx reg
, and_arg
, plus_arg0
, plus_arg1
;
19285 addr_mask_type addr_mask
;
19286 const char *type
= NULL
;
19287 const char *fail_msg
= NULL
;
19289 if (GPR_REG_CLASS_P (rclass
))
19290 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_GPR
];
19292 else if (rclass
== FLOAT_REGS
)
19293 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_FPR
];
19295 else if (rclass
== ALTIVEC_REGS
)
19296 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
];
19298 /* For the combined VSX_REGS, turn off Altivec AND -16. */
19299 else if (rclass
== VSX_REGS
)
19300 addr_mask
= (reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
]
19301 & ~RELOAD_REG_AND_M16
);
19303 /* If the register allocator hasn't made up its mind yet on the register
19304 class to use, settle on defaults to use. */
19305 else if (rclass
== NO_REGS
)
19307 addr_mask
= (reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
]
19308 & ~RELOAD_REG_AND_M16
);
19310 if ((addr_mask
& RELOAD_REG_MULTIPLE
) != 0)
19311 addr_mask
&= ~(RELOAD_REG_INDEXED
19312 | RELOAD_REG_PRE_INCDEC
19313 | RELOAD_REG_PRE_MODIFY
);
19319 /* If the register isn't valid in this register class, just return now. */
19320 if ((addr_mask
& RELOAD_REG_VALID
) == 0)
19322 if (TARGET_DEBUG_ADDR
)
19325 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19326 "not valid in class\n",
19327 GET_MODE_NAME (mode
), reg_class_names
[rclass
]);
19334 switch (GET_CODE (addr
))
19336 /* Does the register class supports auto update forms for this mode? We
19337 don't need a scratch register, since the powerpc only supports
19338 PRE_INC, PRE_DEC, and PRE_MODIFY. */
19341 reg
= XEXP (addr
, 0);
19342 if (!base_reg_operand (addr
, GET_MODE (reg
)))
19344 fail_msg
= "no base register #1";
19348 else if ((addr_mask
& RELOAD_REG_PRE_INCDEC
) == 0)
19356 reg
= XEXP (addr
, 0);
19357 plus_arg1
= XEXP (addr
, 1);
19358 if (!base_reg_operand (reg
, GET_MODE (reg
))
19359 || GET_CODE (plus_arg1
) != PLUS
19360 || !rtx_equal_p (reg
, XEXP (plus_arg1
, 0)))
19362 fail_msg
= "bad PRE_MODIFY";
19366 else if ((addr_mask
& RELOAD_REG_PRE_MODIFY
) == 0)
19373 /* Do we need to simulate AND -16 to clear the bottom address bits used
19374 in VMX load/stores? Only allow the AND for vector sizes. */
19376 and_arg
= XEXP (addr
, 0);
19377 if (GET_MODE_SIZE (mode
) != 16
19378 || GET_CODE (XEXP (addr
, 1)) != CONST_INT
19379 || INTVAL (XEXP (addr
, 1)) != -16)
19381 fail_msg
= "bad Altivec AND #1";
19385 if (rclass
!= ALTIVEC_REGS
)
19387 if (legitimate_indirect_address_p (and_arg
, false))
19390 else if (legitimate_indexed_address_p (and_arg
, false))
19395 fail_msg
= "bad Altivec AND #2";
19403 /* If this is an indirect address, make sure it is a base register. */
19406 if (!legitimate_indirect_address_p (addr
, false))
19413 /* If this is an indexed address, make sure the register class can handle
19414 indexed addresses for this mode. */
19416 plus_arg0
= XEXP (addr
, 0);
19417 plus_arg1
= XEXP (addr
, 1);
19419 /* (plus (plus (reg) (constant)) (constant)) is generated during
19420 push_reload processing, so handle it now. */
19421 if (GET_CODE (plus_arg0
) == PLUS
&& CONST_INT_P (plus_arg1
))
19423 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
19430 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19431 push_reload processing, so handle it now. */
19432 else if (GET_CODE (plus_arg0
) == PLUS
&& REG_P (plus_arg1
))
19434 if ((addr_mask
& RELOAD_REG_INDEXED
) == 0)
19437 type
= "indexed #2";
19441 else if (!base_reg_operand (plus_arg0
, GET_MODE (plus_arg0
)))
19443 fail_msg
= "no base register #2";
19447 else if (int_reg_operand (plus_arg1
, GET_MODE (plus_arg1
)))
19449 if ((addr_mask
& RELOAD_REG_INDEXED
) == 0
19450 || !legitimate_indexed_address_p (addr
, false))
19457 else if ((addr_mask
& RELOAD_REG_QUAD_OFFSET
) != 0
19458 && CONST_INT_P (plus_arg1
))
19460 if (!quad_address_offset_p (INTVAL (plus_arg1
)))
19463 type
= "vector d-form offset";
19467 /* Make sure the register class can handle offset addresses. */
19468 else if (rs6000_legitimate_offset_address_p (mode
, addr
, false, true))
19470 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
19473 type
= "offset #2";
19479 fail_msg
= "bad PLUS";
19486 /* Quad offsets are restricted and can't handle normal addresses. */
19487 if ((addr_mask
& RELOAD_REG_QUAD_OFFSET
) != 0)
19490 type
= "vector d-form lo_sum";
19493 else if (!legitimate_lo_sum_address_p (mode
, addr
, false))
19495 fail_msg
= "bad LO_SUM";
19499 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
19506 /* Static addresses need to create a TOC entry. */
19510 if ((addr_mask
& RELOAD_REG_QUAD_OFFSET
) != 0)
19513 type
= "vector d-form lo_sum #2";
19519 extra_cost
= rs6000_secondary_reload_toc_costs (addr_mask
);
19523 /* TOC references look like offsetable memory. */
19525 if (TARGET_CMODEL
== CMODEL_SMALL
|| XINT (addr
, 1) != UNSPEC_TOCREL
)
19527 fail_msg
= "bad UNSPEC";
19531 else if ((addr_mask
& RELOAD_REG_QUAD_OFFSET
) != 0)
19534 type
= "vector d-form lo_sum #3";
19537 else if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
19540 type
= "toc reference";
19546 fail_msg
= "bad address";
19551 if (TARGET_DEBUG_ADDR
/* && extra_cost != 0 */)
19553 if (extra_cost
< 0)
19555 "rs6000_secondary_reload_memory error: mode = %s, "
19556 "class = %s, addr_mask = '%s', %s\n",
19557 GET_MODE_NAME (mode
),
19558 reg_class_names
[rclass
],
19559 rs6000_debug_addr_mask (addr_mask
, false),
19560 (fail_msg
!= NULL
) ? fail_msg
: "<bad address>");
19564 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19565 "addr_mask = '%s', extra cost = %d, %s\n",
19566 GET_MODE_NAME (mode
),
19567 reg_class_names
[rclass
],
19568 rs6000_debug_addr_mask (addr_mask
, false),
19570 (type
) ? type
: "<none>");
19578 /* Helper function for rs6000_secondary_reload to return true if a move to a
19579 different register classe is really a simple move. */
19582 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type
,
19583 enum rs6000_reg_type from_type
,
19586 int size
= GET_MODE_SIZE (mode
);
19588 /* Add support for various direct moves available. In this function, we only
19589 look at cases where we don't need any extra registers, and one or more
19590 simple move insns are issued. Originally small integers are not allowed
19591 in FPR/VSX registers. Single precision binary floating is not a simple
19592 move because we need to convert to the single precision memory layout.
19593 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19594 need special direct move handling, which we do not support yet. */
19595 if (TARGET_DIRECT_MOVE
19596 && ((to_type
== GPR_REG_TYPE
&& from_type
== VSX_REG_TYPE
)
19597 || (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
)))
19599 if (TARGET_POWERPC64
)
19601 /* ISA 2.07: MTVSRD or MVFVSRD. */
19605 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19606 if (size
== 16 && TARGET_P9_VECTOR
&& mode
!= TDmode
)
19610 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19611 if (TARGET_P8_VECTOR
)
19613 if (mode
== SImode
)
19616 if (TARGET_P9_VECTOR
&& (mode
== HImode
|| mode
== QImode
))
19620 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19621 if (mode
== SDmode
)
19625 /* Power6+: MFTGPR or MFFGPR. */
19626 else if (TARGET_MFPGPR
&& TARGET_POWERPC64
&& size
== 8
19627 && ((to_type
== GPR_REG_TYPE
&& from_type
== FPR_REG_TYPE
)
19628 || (to_type
== FPR_REG_TYPE
&& from_type
== GPR_REG_TYPE
)))
19631 /* Move to/from SPR. */
19632 else if ((size
== 4 || (TARGET_POWERPC64
&& size
== 8))
19633 && ((to_type
== GPR_REG_TYPE
&& from_type
== SPR_REG_TYPE
)
19634 || (to_type
== SPR_REG_TYPE
&& from_type
== GPR_REG_TYPE
)))
19640 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19641 special direct moves that involve allocating an extra register, return the
19642 insn code of the helper function if there is such a function or
19643 CODE_FOR_nothing if not. */
19646 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type
,
19647 enum rs6000_reg_type from_type
,
19649 secondary_reload_info
*sri
,
19653 enum insn_code icode
= CODE_FOR_nothing
;
19655 int size
= GET_MODE_SIZE (mode
);
19657 if (TARGET_POWERPC64
&& size
== 16)
19659 /* Handle moving 128-bit values from GPRs to VSX point registers on
19660 ISA 2.07 (power8, power9) when running in 64-bit mode using
19661 XXPERMDI to glue the two 64-bit values back together. */
19662 if (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
)
19664 cost
= 3; /* 2 mtvsrd's, 1 xxpermdi. */
19665 icode
= reg_addr
[mode
].reload_vsx_gpr
;
19668 /* Handle moving 128-bit values from VSX point registers to GPRs on
19669 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19670 bottom 64-bit value. */
19671 else if (to_type
== GPR_REG_TYPE
&& from_type
== VSX_REG_TYPE
)
19673 cost
= 3; /* 2 mfvsrd's, 1 xxpermdi. */
19674 icode
= reg_addr
[mode
].reload_gpr_vsx
;
19678 else if (TARGET_POWERPC64
&& mode
== SFmode
)
19680 if (to_type
== GPR_REG_TYPE
&& from_type
== VSX_REG_TYPE
)
19682 cost
= 3; /* xscvdpspn, mfvsrd, and. */
19683 icode
= reg_addr
[mode
].reload_gpr_vsx
;
19686 else if (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
)
19688 cost
= 2; /* mtvsrz, xscvspdpn. */
19689 icode
= reg_addr
[mode
].reload_vsx_gpr
;
19693 else if (!TARGET_POWERPC64
&& size
== 8)
19695 /* Handle moving 64-bit values from GPRs to floating point registers on
19696 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19697 32-bit values back together. Altivec register classes must be handled
19698 specially since a different instruction is used, and the secondary
19699 reload support requires a single instruction class in the scratch
19700 register constraint. However, right now TFmode is not allowed in
19701 Altivec registers, so the pattern will never match. */
19702 if (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
&& !altivec_p
)
19704 cost
= 3; /* 2 mtvsrwz's, 1 fmrgow. */
19705 icode
= reg_addr
[mode
].reload_fpr_gpr
;
19709 if (icode
!= CODE_FOR_nothing
)
19714 sri
->icode
= icode
;
19715 sri
->extra_cost
= cost
;
19722 /* Return whether a move between two register classes can be done either
19723 directly (simple move) or via a pattern that uses a single extra temporary
19724 (using ISA 2.07's direct move in this case. */
19727 rs6000_secondary_reload_move (enum rs6000_reg_type to_type
,
19728 enum rs6000_reg_type from_type
,
19730 secondary_reload_info
*sri
,
19733 /* Fall back to load/store reloads if either type is not a register. */
19734 if (to_type
== NO_REG_TYPE
|| from_type
== NO_REG_TYPE
)
19737 /* If we haven't allocated registers yet, assume the move can be done for the
19738 standard register types. */
19739 if ((to_type
== PSEUDO_REG_TYPE
&& from_type
== PSEUDO_REG_TYPE
)
19740 || (to_type
== PSEUDO_REG_TYPE
&& IS_STD_REG_TYPE (from_type
))
19741 || (from_type
== PSEUDO_REG_TYPE
&& IS_STD_REG_TYPE (to_type
)))
19744 /* Moves to the same set of registers is a simple move for non-specialized
19746 if (to_type
== from_type
&& IS_STD_REG_TYPE (to_type
))
19749 /* Check whether a simple move can be done directly. */
19750 if (rs6000_secondary_reload_simple_move (to_type
, from_type
, mode
))
19754 sri
->icode
= CODE_FOR_nothing
;
19755 sri
->extra_cost
= 0;
19760 /* Now check if we can do it in a few steps. */
19761 return rs6000_secondary_reload_direct_move (to_type
, from_type
, mode
, sri
,
19765 /* Inform reload about cases where moving X with a mode MODE to a register in
19766 RCLASS requires an extra scratch or immediate register. Return the class
19767 needed for the immediate register.
19769 For VSX and Altivec, we may need a register to convert sp+offset into
19772 For misaligned 64-bit gpr loads and stores we need a register to
19773 convert an offset address to indirect. */
19776 rs6000_secondary_reload (bool in_p
,
19778 reg_class_t rclass_i
,
19780 secondary_reload_info
*sri
)
19782 enum reg_class rclass
= (enum reg_class
) rclass_i
;
19783 reg_class_t ret
= ALL_REGS
;
19784 enum insn_code icode
;
19785 bool default_p
= false;
19786 bool done_p
= false;
19788 /* Allow subreg of memory before/during reload. */
19789 bool memory_p
= (MEM_P (x
)
19790 || (!reload_completed
&& GET_CODE (x
) == SUBREG
19791 && MEM_P (SUBREG_REG (x
))));
19793 sri
->icode
= CODE_FOR_nothing
;
19794 sri
->t_icode
= CODE_FOR_nothing
;
19795 sri
->extra_cost
= 0;
19797 ? reg_addr
[mode
].reload_load
19798 : reg_addr
[mode
].reload_store
);
19800 if (REG_P (x
) || register_operand (x
, mode
))
19802 enum rs6000_reg_type to_type
= reg_class_to_reg_type
[(int)rclass
];
19803 bool altivec_p
= (rclass
== ALTIVEC_REGS
);
19804 enum rs6000_reg_type from_type
= register_to_reg_type (x
, &altivec_p
);
19807 std::swap (to_type
, from_type
);
19809 /* Can we do a direct move of some sort? */
19810 if (rs6000_secondary_reload_move (to_type
, from_type
, mode
, sri
,
19813 icode
= (enum insn_code
)sri
->icode
;
19820 /* Make sure 0.0 is not reloaded or forced into memory. */
19821 if (x
== CONST0_RTX (mode
) && VSX_REG_CLASS_P (rclass
))
19828 /* If this is a scalar floating point value and we want to load it into the
19829 traditional Altivec registers, do it via a move via a traditional floating
19830 point register, unless we have D-form addressing. Also make sure that
19831 non-zero constants use a FPR. */
19832 if (!done_p
&& reg_addr
[mode
].scalar_in_vmx_p
19833 && !mode_supports_vmx_dform (mode
)
19834 && (rclass
== VSX_REGS
|| rclass
== ALTIVEC_REGS
)
19835 && (memory_p
|| (GET_CODE (x
) == CONST_DOUBLE
)))
19842 /* Handle reload of load/stores if we have reload helper functions. */
19843 if (!done_p
&& icode
!= CODE_FOR_nothing
&& memory_p
)
19845 int extra_cost
= rs6000_secondary_reload_memory (XEXP (x
, 0), rclass
,
19848 if (extra_cost
>= 0)
19852 if (extra_cost
> 0)
19854 sri
->extra_cost
= extra_cost
;
19855 sri
->icode
= icode
;
19860 /* Handle unaligned loads and stores of integer registers. */
19861 if (!done_p
&& TARGET_POWERPC64
19862 && reg_class_to_reg_type
[(int)rclass
] == GPR_REG_TYPE
19864 && GET_MODE_SIZE (GET_MODE (x
)) >= UNITS_PER_WORD
)
19866 rtx addr
= XEXP (x
, 0);
19867 rtx off
= address_offset (addr
);
19869 if (off
!= NULL_RTX
)
19871 unsigned int extra
= GET_MODE_SIZE (GET_MODE (x
)) - UNITS_PER_WORD
;
19872 unsigned HOST_WIDE_INT offset
= INTVAL (off
);
19874 /* We need a secondary reload when our legitimate_address_p
19875 says the address is good (as otherwise the entire address
19876 will be reloaded), and the offset is not a multiple of
19877 four or we have an address wrap. Address wrap will only
19878 occur for LO_SUMs since legitimate_offset_address_p
19879 rejects addresses for 16-byte mems that will wrap. */
19880 if (GET_CODE (addr
) == LO_SUM
19881 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19882 && ((offset
& 3) != 0
19883 || ((offset
& 0xffff) ^ 0x8000) >= 0x10000 - extra
))
19884 : (offset
+ 0x8000 < 0x10000 - extra
/* legitimate_address_p */
19885 && (offset
& 3) != 0))
19887 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19889 sri
->icode
= ((TARGET_32BIT
) ? CODE_FOR_reload_si_load
19890 : CODE_FOR_reload_di_load
);
19892 sri
->icode
= ((TARGET_32BIT
) ? CODE_FOR_reload_si_store
19893 : CODE_FOR_reload_di_store
);
19894 sri
->extra_cost
= 2;
19905 if (!done_p
&& !TARGET_POWERPC64
19906 && reg_class_to_reg_type
[(int)rclass
] == GPR_REG_TYPE
19908 && GET_MODE_SIZE (GET_MODE (x
)) > UNITS_PER_WORD
)
19910 rtx addr
= XEXP (x
, 0);
19911 rtx off
= address_offset (addr
);
19913 if (off
!= NULL_RTX
)
19915 unsigned int extra
= GET_MODE_SIZE (GET_MODE (x
)) - UNITS_PER_WORD
;
19916 unsigned HOST_WIDE_INT offset
= INTVAL (off
);
19918 /* We need a secondary reload when our legitimate_address_p
19919 says the address is good (as otherwise the entire address
19920 will be reloaded), and we have a wrap.
19922 legitimate_lo_sum_address_p allows LO_SUM addresses to
19923 have any offset so test for wrap in the low 16 bits.
19925 legitimate_offset_address_p checks for the range
19926 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19927 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19928 [0x7ff4,0x7fff] respectively, so test for the
19929 intersection of these ranges, [0x7ffc,0x7fff] and
19930 [0x7ff4,0x7ff7] respectively.
19932 Note that the address we see here may have been
19933 manipulated by legitimize_reload_address. */
19934 if (GET_CODE (addr
) == LO_SUM
19935 ? ((offset
& 0xffff) ^ 0x8000) >= 0x10000 - extra
19936 : offset
- (0x8000 - extra
) < UNITS_PER_WORD
)
19939 sri
->icode
= CODE_FOR_reload_si_load
;
19941 sri
->icode
= CODE_FOR_reload_si_store
;
19942 sri
->extra_cost
= 2;
19957 ret
= default_secondary_reload (in_p
, x
, rclass
, mode
, sri
);
19959 gcc_assert (ret
!= ALL_REGS
);
19961 if (TARGET_DEBUG_ADDR
)
19964 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19966 reg_class_names
[ret
],
19967 in_p
? "true" : "false",
19968 reg_class_names
[rclass
],
19969 GET_MODE_NAME (mode
));
19971 if (reload_completed
)
19972 fputs (", after reload", stderr
);
19975 fputs (", done_p not set", stderr
);
19978 fputs (", default secondary reload", stderr
);
19980 if (sri
->icode
!= CODE_FOR_nothing
)
19981 fprintf (stderr
, ", reload func = %s, extra cost = %d",
19982 insn_data
[sri
->icode
].name
, sri
->extra_cost
);
19984 else if (sri
->extra_cost
> 0)
19985 fprintf (stderr
, ", extra cost = %d", sri
->extra_cost
);
19987 fputs ("\n", stderr
);
19994 /* Better tracing for rs6000_secondary_reload_inner. */
19997 rs6000_secondary_reload_trace (int line
, rtx reg
, rtx mem
, rtx scratch
,
20002 gcc_assert (reg
!= NULL_RTX
&& mem
!= NULL_RTX
&& scratch
!= NULL_RTX
);
20004 fprintf (stderr
, "rs6000_secondary_reload_inner:%d, type = %s\n", line
,
20005 store_p
? "store" : "load");
20008 set
= gen_rtx_SET (mem
, reg
);
20010 set
= gen_rtx_SET (reg
, mem
);
20012 clobber
= gen_rtx_CLOBBER (VOIDmode
, scratch
);
20013 debug_rtx (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, set
, clobber
)));
20016 static void rs6000_secondary_reload_fail (int, rtx
, rtx
, rtx
, bool)
20017 ATTRIBUTE_NORETURN
;
20020 rs6000_secondary_reload_fail (int line
, rtx reg
, rtx mem
, rtx scratch
,
20023 rs6000_secondary_reload_trace (line
, reg
, mem
, scratch
, store_p
);
20024 gcc_unreachable ();
20027 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
20028 reload helper functions. These were identified in
20029 rs6000_secondary_reload_memory, and if reload decided to use the secondary
20030 reload, it calls the insns:
20031 reload_<RELOAD:mode>_<P:mptrsize>_store
20032 reload_<RELOAD:mode>_<P:mptrsize>_load
20034 which in turn calls this function, to do whatever is necessary to create
20035 valid addresses. */
20038 rs6000_secondary_reload_inner (rtx reg
, rtx mem
, rtx scratch
, bool store_p
)
20040 int regno
= true_regnum (reg
);
20041 machine_mode mode
= GET_MODE (reg
);
20042 addr_mask_type addr_mask
;
20045 rtx op_reg
, op0
, op1
;
20050 if (regno
< 0 || regno
>= FIRST_PSEUDO_REGISTER
|| !MEM_P (mem
)
20051 || !base_reg_operand (scratch
, GET_MODE (scratch
)))
20052 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20054 if (IN_RANGE (regno
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
))
20055 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_GPR
];
20057 else if (IN_RANGE (regno
, FIRST_FPR_REGNO
, LAST_FPR_REGNO
))
20058 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_FPR
];
20060 else if (IN_RANGE (regno
, FIRST_ALTIVEC_REGNO
, LAST_ALTIVEC_REGNO
))
20061 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
];
20064 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20066 /* Make sure the mode is valid in this register class. */
20067 if ((addr_mask
& RELOAD_REG_VALID
) == 0)
20068 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20070 if (TARGET_DEBUG_ADDR
)
20071 rs6000_secondary_reload_trace (__LINE__
, reg
, mem
, scratch
, store_p
);
20073 new_addr
= addr
= XEXP (mem
, 0);
20074 switch (GET_CODE (addr
))
20076 /* Does the register class support auto update forms for this mode? If
20077 not, do the update now. We don't need a scratch register, since the
20078 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
20081 op_reg
= XEXP (addr
, 0);
20082 if (!base_reg_operand (op_reg
, Pmode
))
20083 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20085 if ((addr_mask
& RELOAD_REG_PRE_INCDEC
) == 0)
20087 emit_insn (gen_add2_insn (op_reg
, GEN_INT (GET_MODE_SIZE (mode
))));
20093 op0
= XEXP (addr
, 0);
20094 op1
= XEXP (addr
, 1);
20095 if (!base_reg_operand (op0
, Pmode
)
20096 || GET_CODE (op1
) != PLUS
20097 || !rtx_equal_p (op0
, XEXP (op1
, 0)))
20098 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20100 if ((addr_mask
& RELOAD_REG_PRE_MODIFY
) == 0)
20102 emit_insn (gen_rtx_SET (op0
, op1
));
20107 /* Do we need to simulate AND -16 to clear the bottom address bits used
20108 in VMX load/stores? */
20110 op0
= XEXP (addr
, 0);
20111 op1
= XEXP (addr
, 1);
20112 if ((addr_mask
& RELOAD_REG_AND_M16
) == 0)
20114 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
20117 else if (GET_CODE (op1
) == PLUS
)
20119 emit_insn (gen_rtx_SET (scratch
, op1
));
20124 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20126 and_op
= gen_rtx_AND (GET_MODE (scratch
), op_reg
, op1
);
20127 cc_clobber
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (CCmode
));
20128 rv
= gen_rtvec (2, gen_rtx_SET (scratch
, and_op
), cc_clobber
);
20129 emit_insn (gen_rtx_PARALLEL (VOIDmode
, rv
));
20130 new_addr
= scratch
;
20134 /* If this is an indirect address, make sure it is a base register. */
20137 if (!base_reg_operand (addr
, GET_MODE (addr
)))
20139 emit_insn (gen_rtx_SET (scratch
, addr
));
20140 new_addr
= scratch
;
20144 /* If this is an indexed address, make sure the register class can handle
20145 indexed addresses for this mode. */
20147 op0
= XEXP (addr
, 0);
20148 op1
= XEXP (addr
, 1);
20149 if (!base_reg_operand (op0
, Pmode
))
20150 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20152 else if (int_reg_operand (op1
, Pmode
))
20154 if ((addr_mask
& RELOAD_REG_INDEXED
) == 0)
20156 emit_insn (gen_rtx_SET (scratch
, addr
));
20157 new_addr
= scratch
;
20161 else if (mode_supports_vsx_dform_quad (mode
) && CONST_INT_P (op1
))
20163 if (((addr_mask
& RELOAD_REG_QUAD_OFFSET
) == 0)
20164 || !quad_address_p (addr
, mode
, false))
20166 emit_insn (gen_rtx_SET (scratch
, addr
));
20167 new_addr
= scratch
;
20171 /* Make sure the register class can handle offset addresses. */
20172 else if (rs6000_legitimate_offset_address_p (mode
, addr
, false, true))
20174 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
20176 emit_insn (gen_rtx_SET (scratch
, addr
));
20177 new_addr
= scratch
;
20182 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20187 op0
= XEXP (addr
, 0);
20188 op1
= XEXP (addr
, 1);
20189 if (!base_reg_operand (op0
, Pmode
))
20190 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20192 else if (int_reg_operand (op1
, Pmode
))
20194 if ((addr_mask
& RELOAD_REG_INDEXED
) == 0)
20196 emit_insn (gen_rtx_SET (scratch
, addr
));
20197 new_addr
= scratch
;
20201 /* Quad offsets are restricted and can't handle normal addresses. */
20202 else if (mode_supports_vsx_dform_quad (mode
))
20204 emit_insn (gen_rtx_SET (scratch
, addr
));
20205 new_addr
= scratch
;
20208 /* Make sure the register class can handle offset addresses. */
20209 else if (legitimate_lo_sum_address_p (mode
, addr
, false))
20211 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
20213 emit_insn (gen_rtx_SET (scratch
, addr
));
20214 new_addr
= scratch
;
20219 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20226 rs6000_emit_move (scratch
, addr
, Pmode
);
20227 new_addr
= scratch
;
20231 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20234 /* Adjust the address if it changed. */
20235 if (addr
!= new_addr
)
20237 mem
= replace_equiv_address_nv (mem
, new_addr
);
20238 if (TARGET_DEBUG_ADDR
)
20239 fprintf (stderr
, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
20242 /* Now create the move. */
20244 emit_insn (gen_rtx_SET (mem
, reg
));
20246 emit_insn (gen_rtx_SET (reg
, mem
));
20251 /* Convert reloads involving 64-bit gprs and misaligned offset
20252 addressing, or multiple 32-bit gprs and offsets that are too large,
20253 to use indirect addressing. */
20256 rs6000_secondary_reload_gpr (rtx reg
, rtx mem
, rtx scratch
, bool store_p
)
20258 int regno
= true_regnum (reg
);
20259 enum reg_class rclass
;
20261 rtx scratch_or_premodify
= scratch
;
20263 if (TARGET_DEBUG_ADDR
)
20265 fprintf (stderr
, "\nrs6000_secondary_reload_gpr, type = %s\n",
20266 store_p
? "store" : "load");
20267 fprintf (stderr
, "reg:\n");
20269 fprintf (stderr
, "mem:\n");
20271 fprintf (stderr
, "scratch:\n");
20272 debug_rtx (scratch
);
20275 gcc_assert (regno
>= 0 && regno
< FIRST_PSEUDO_REGISTER
);
20276 gcc_assert (GET_CODE (mem
) == MEM
);
20277 rclass
= REGNO_REG_CLASS (regno
);
20278 gcc_assert (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
);
20279 addr
= XEXP (mem
, 0);
20281 if (GET_CODE (addr
) == PRE_MODIFY
)
20283 gcc_assert (REG_P (XEXP (addr
, 0))
20284 && GET_CODE (XEXP (addr
, 1)) == PLUS
20285 && XEXP (XEXP (addr
, 1), 0) == XEXP (addr
, 0));
20286 scratch_or_premodify
= XEXP (addr
, 0);
20287 if (!HARD_REGISTER_P (scratch_or_premodify
))
20288 /* If we have a pseudo here then reload will have arranged
20289 to have it replaced, but only in the original insn.
20290 Use the replacement here too. */
20291 scratch_or_premodify
= find_replacement (&XEXP (addr
, 0));
20293 /* RTL emitted by rs6000_secondary_reload_gpr uses RTL
20294 expressions from the original insn, without unsharing them.
20295 Any RTL that points into the original insn will of course
20296 have register replacements applied. That is why we don't
20297 need to look for replacements under the PLUS. */
20298 addr
= XEXP (addr
, 1);
20300 gcc_assert (GET_CODE (addr
) == PLUS
|| GET_CODE (addr
) == LO_SUM
);
20302 rs6000_emit_move (scratch_or_premodify
, addr
, Pmode
);
20304 mem
= replace_equiv_address_nv (mem
, scratch_or_premodify
);
20306 /* Now create the move. */
20308 emit_insn (gen_rtx_SET (mem
, reg
));
20310 emit_insn (gen_rtx_SET (reg
, mem
));
20315 /* Given an rtx X being reloaded into a reg required to be
20316 in class CLASS, return the class of reg to actually use.
20317 In general this is just CLASS; but on some machines
20318 in some cases it is preferable to use a more restrictive class.
20320 On the RS/6000, we have to return NO_REGS when we want to reload a
20321 floating-point CONST_DOUBLE to force it to be copied to memory.
20323 We also don't want to reload integer values into floating-point
20324 registers if we can at all help it. In fact, this can
20325 cause reload to die, if it tries to generate a reload of CTR
20326 into a FP register and discovers it doesn't have the memory location
20329 ??? Would it be a good idea to have reload do the converse, that is
20330 try to reload floating modes into FP registers if possible?
20333 static enum reg_class
20334 rs6000_preferred_reload_class (rtx x
, enum reg_class rclass
)
20336 machine_mode mode
= GET_MODE (x
);
20337 bool is_constant
= CONSTANT_P (x
);
20339 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
20340 reload class for it. */
20341 if ((rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
20342 && (reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
] & RELOAD_REG_VALID
) == 0)
20345 if ((rclass
== FLOAT_REGS
|| rclass
== VSX_REGS
)
20346 && (reg_addr
[mode
].addr_mask
[RELOAD_REG_FPR
] & RELOAD_REG_VALID
) == 0)
20349 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
20350 the reloading of address expressions using PLUS into floating point
20352 if (TARGET_VSX
&& VSX_REG_CLASS_P (rclass
) && GET_CODE (x
) != PLUS
)
20356 /* Zero is always allowed in all VSX registers. */
20357 if (x
== CONST0_RTX (mode
))
20360 /* If this is a vector constant that can be formed with a few Altivec
20361 instructions, we want altivec registers. */
20362 if (GET_CODE (x
) == CONST_VECTOR
&& easy_vector_constant (x
, mode
))
20363 return ALTIVEC_REGS
;
20365 /* If this is an integer constant that can easily be loaded into
20366 vector registers, allow it. */
20367 if (CONST_INT_P (x
))
20369 HOST_WIDE_INT value
= INTVAL (x
);
20371 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
20372 2.06 can generate it in the Altivec registers with
20376 if (TARGET_P8_VECTOR
)
20378 else if (rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
20379 return ALTIVEC_REGS
;
20384 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
20385 a sign extend in the Altivec registers. */
20386 if (IN_RANGE (value
, -128, 127) && TARGET_P9_VECTOR
20387 && (rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
))
20388 return ALTIVEC_REGS
;
20391 /* Force constant to memory. */
20395 /* D-form addressing can easily reload the value. */
20396 if (mode_supports_vmx_dform (mode
)
20397 || mode_supports_vsx_dform_quad (mode
))
20400 /* If this is a scalar floating point value and we don't have D-form
20401 addressing, prefer the traditional floating point registers so that we
20402 can use D-form (register+offset) addressing. */
20403 if (rclass
== VSX_REGS
20404 && (mode
== SFmode
|| GET_MODE_SIZE (mode
) == 8))
20407 /* Prefer the Altivec registers if Altivec is handling the vector
20408 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20410 if (VECTOR_UNIT_ALTIVEC_P (mode
) || VECTOR_MEM_ALTIVEC_P (mode
)
20411 || mode
== V1TImode
)
20412 return ALTIVEC_REGS
;
20417 if (is_constant
|| GET_CODE (x
) == PLUS
)
20419 if (reg_class_subset_p (GENERAL_REGS
, rclass
))
20420 return GENERAL_REGS
;
20421 if (reg_class_subset_p (BASE_REGS
, rclass
))
20426 if (GET_MODE_CLASS (mode
) == MODE_INT
&& rclass
== NON_SPECIAL_REGS
)
20427 return GENERAL_REGS
;
20432 /* Debug version of rs6000_preferred_reload_class. */
20433 static enum reg_class
20434 rs6000_debug_preferred_reload_class (rtx x
, enum reg_class rclass
)
20436 enum reg_class ret
= rs6000_preferred_reload_class (x
, rclass
);
20439 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20441 reg_class_names
[ret
], reg_class_names
[rclass
],
20442 GET_MODE_NAME (GET_MODE (x
)));
20448 /* If we are copying between FP or AltiVec registers and anything else, we need
20449 a memory location. The exception is when we are targeting ppc64 and the
20450 move to/from fpr to gpr instructions are available. Also, under VSX, you
20451 can copy vector registers from the FP register set to the Altivec register
20452 set and vice versa. */
20455 rs6000_secondary_memory_needed (enum reg_class from_class
,
20456 enum reg_class to_class
,
20459 enum rs6000_reg_type from_type
, to_type
;
20460 bool altivec_p
= ((from_class
== ALTIVEC_REGS
)
20461 || (to_class
== ALTIVEC_REGS
));
20463 /* If a simple/direct move is available, we don't need secondary memory */
20464 from_type
= reg_class_to_reg_type
[(int)from_class
];
20465 to_type
= reg_class_to_reg_type
[(int)to_class
];
20467 if (rs6000_secondary_reload_move (to_type
, from_type
, mode
,
20468 (secondary_reload_info
*)0, altivec_p
))
20471 /* If we have a floating point or vector register class, we need to use
20472 memory to transfer the data. */
20473 if (IS_FP_VECT_REG_TYPE (from_type
) || IS_FP_VECT_REG_TYPE (to_type
))
20479 /* Debug version of rs6000_secondary_memory_needed. */
20481 rs6000_debug_secondary_memory_needed (enum reg_class from_class
,
20482 enum reg_class to_class
,
20485 bool ret
= rs6000_secondary_memory_needed (from_class
, to_class
, mode
);
20488 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20489 "to_class = %s, mode = %s\n",
20490 ret
? "true" : "false",
20491 reg_class_names
[from_class
],
20492 reg_class_names
[to_class
],
20493 GET_MODE_NAME (mode
));
20498 /* Return the register class of a scratch register needed to copy IN into
20499 or out of a register in RCLASS in MODE. If it can be done directly,
20500 NO_REGS is returned. */
20502 static enum reg_class
20503 rs6000_secondary_reload_class (enum reg_class rclass
, machine_mode mode
,
20508 if (TARGET_ELF
|| (DEFAULT_ABI
== ABI_DARWIN
20510 && MACHOPIC_INDIRECT
20514 /* We cannot copy a symbolic operand directly into anything
20515 other than BASE_REGS for TARGET_ELF. So indicate that a
20516 register from BASE_REGS is needed as an intermediate
20519 On Darwin, pic addresses require a load from memory, which
20520 needs a base register. */
20521 if (rclass
!= BASE_REGS
20522 && (GET_CODE (in
) == SYMBOL_REF
20523 || GET_CODE (in
) == HIGH
20524 || GET_CODE (in
) == LABEL_REF
20525 || GET_CODE (in
) == CONST
))
20529 if (GET_CODE (in
) == REG
)
20531 regno
= REGNO (in
);
20532 if (regno
>= FIRST_PSEUDO_REGISTER
)
20534 regno
= true_regnum (in
);
20535 if (regno
>= FIRST_PSEUDO_REGISTER
)
20539 else if (GET_CODE (in
) == SUBREG
)
20541 regno
= true_regnum (in
);
20542 if (regno
>= FIRST_PSEUDO_REGISTER
)
20548 /* If we have VSX register moves, prefer moving scalar values between
20549 Altivec registers and GPR by going via an FPR (and then via memory)
20550 instead of reloading the secondary memory address for Altivec moves. */
20552 && GET_MODE_SIZE (mode
) < 16
20553 && !mode_supports_vmx_dform (mode
)
20554 && (((rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
)
20555 && (regno
>= 0 && ALTIVEC_REGNO_P (regno
)))
20556 || ((rclass
== VSX_REGS
|| rclass
== ALTIVEC_REGS
)
20557 && (regno
>= 0 && INT_REGNO_P (regno
)))))
20560 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20562 if (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
20563 || (regno
>= 0 && INT_REGNO_P (regno
)))
20566 /* Constants, memory, and VSX registers can go into VSX registers (both the
20567 traditional floating point and the altivec registers). */
20568 if (rclass
== VSX_REGS
20569 && (regno
== -1 || VSX_REGNO_P (regno
)))
20572 /* Constants, memory, and FP registers can go into FP registers. */
20573 if ((regno
== -1 || FP_REGNO_P (regno
))
20574 && (rclass
== FLOAT_REGS
|| rclass
== NON_SPECIAL_REGS
))
20575 return (mode
!= SDmode
|| lra_in_progress
) ? NO_REGS
: GENERAL_REGS
;
20577 /* Memory, and AltiVec registers can go into AltiVec registers. */
20578 if ((regno
== -1 || ALTIVEC_REGNO_P (regno
))
20579 && rclass
== ALTIVEC_REGS
)
20582 /* We can copy among the CR registers. */
20583 if ((rclass
== CR_REGS
|| rclass
== CR0_REGS
)
20584 && regno
>= 0 && CR_REGNO_P (regno
))
20587 /* Otherwise, we need GENERAL_REGS. */
20588 return GENERAL_REGS
;
20591 /* Debug version of rs6000_secondary_reload_class. */
20592 static enum reg_class
20593 rs6000_debug_secondary_reload_class (enum reg_class rclass
,
20594 machine_mode mode
, rtx in
)
20596 enum reg_class ret
= rs6000_secondary_reload_class (rclass
, mode
, in
);
20598 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20599 "mode = %s, input rtx:\n",
20600 reg_class_names
[ret
], reg_class_names
[rclass
],
20601 GET_MODE_NAME (mode
));
20607 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
20610 rs6000_cannot_change_mode_class (machine_mode from
,
20612 enum reg_class rclass
)
20614 unsigned from_size
= GET_MODE_SIZE (from
);
20615 unsigned to_size
= GET_MODE_SIZE (to
);
20617 if (from_size
!= to_size
)
20619 enum reg_class xclass
= (TARGET_VSX
) ? VSX_REGS
: FLOAT_REGS
;
20621 if (reg_classes_intersect_p (xclass
, rclass
))
20623 unsigned to_nregs
= hard_regno_nregs
[FIRST_FPR_REGNO
][to
];
20624 unsigned from_nregs
= hard_regno_nregs
[FIRST_FPR_REGNO
][from
];
20625 bool to_float128_vector_p
= FLOAT128_VECTOR_P (to
);
20626 bool from_float128_vector_p
= FLOAT128_VECTOR_P (from
);
20628 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20629 single register under VSX because the scalar part of the register
20630 is in the upper 64-bits, and not the lower 64-bits. Types like
20631 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20632 IEEE floating point can't overlap, and neither can small
20635 if (to_float128_vector_p
&& from_float128_vector_p
)
20638 else if (to_float128_vector_p
|| from_float128_vector_p
)
20641 /* TDmode in floating-mode registers must always go into a register
20642 pair with the most significant word in the even-numbered register
20643 to match ISA requirements. In little-endian mode, this does not
20644 match subreg numbering, so we cannot allow subregs. */
20645 if (!BYTES_BIG_ENDIAN
&& (to
== TDmode
|| from
== TDmode
))
20648 if (from_size
< 8 || to_size
< 8)
20651 if (from_size
== 8 && (8 * to_nregs
) != to_size
)
20654 if (to_size
== 8 && (8 * from_nregs
) != from_size
)
20663 /* Since the VSX register set includes traditional floating point registers
20664 and altivec registers, just check for the size being different instead of
20665 trying to check whether the modes are vector modes. Otherwise it won't
20666 allow say DF and DI to change classes. For types like TFmode and TDmode
20667 that take 2 64-bit registers, rather than a single 128-bit register, don't
20668 allow subregs of those types to other 128 bit types. */
20669 if (TARGET_VSX
&& VSX_REG_CLASS_P (rclass
))
20671 unsigned num_regs
= (from_size
+ 15) / 16;
20672 if (hard_regno_nregs
[FIRST_FPR_REGNO
][to
] > num_regs
20673 || hard_regno_nregs
[FIRST_FPR_REGNO
][from
] > num_regs
)
20676 return (from_size
!= 8 && from_size
!= 16);
20679 if (TARGET_ALTIVEC
&& rclass
== ALTIVEC_REGS
20680 && (ALTIVEC_VECTOR_MODE (from
) + ALTIVEC_VECTOR_MODE (to
)) == 1)
20686 /* Debug version of rs6000_cannot_change_mode_class. */
20688 rs6000_debug_cannot_change_mode_class (machine_mode from
,
20690 enum reg_class rclass
)
20692 bool ret
= rs6000_cannot_change_mode_class (from
, to
, rclass
);
20695 "rs6000_cannot_change_mode_class, return %s, from = %s, "
20696 "to = %s, rclass = %s\n",
20697 ret
? "true" : "false",
20698 GET_MODE_NAME (from
), GET_MODE_NAME (to
),
20699 reg_class_names
[rclass
]);
20704 /* Return a string to do a move operation of 128 bits of data. */
20707 rs6000_output_move_128bit (rtx operands
[])
20709 rtx dest
= operands
[0];
20710 rtx src
= operands
[1];
20711 machine_mode mode
= GET_MODE (dest
);
20714 bool dest_gpr_p
, dest_fp_p
, dest_vmx_p
, dest_vsx_p
;
20715 bool src_gpr_p
, src_fp_p
, src_vmx_p
, src_vsx_p
;
20719 dest_regno
= REGNO (dest
);
20720 dest_gpr_p
= INT_REGNO_P (dest_regno
);
20721 dest_fp_p
= FP_REGNO_P (dest_regno
);
20722 dest_vmx_p
= ALTIVEC_REGNO_P (dest_regno
);
20723 dest_vsx_p
= dest_fp_p
| dest_vmx_p
;
20728 dest_gpr_p
= dest_fp_p
= dest_vmx_p
= dest_vsx_p
= false;
20733 src_regno
= REGNO (src
);
20734 src_gpr_p
= INT_REGNO_P (src_regno
);
20735 src_fp_p
= FP_REGNO_P (src_regno
);
20736 src_vmx_p
= ALTIVEC_REGNO_P (src_regno
);
20737 src_vsx_p
= src_fp_p
| src_vmx_p
;
20742 src_gpr_p
= src_fp_p
= src_vmx_p
= src_vsx_p
= false;
20745 /* Register moves. */
20746 if (dest_regno
>= 0 && src_regno
>= 0)
20753 if (TARGET_DIRECT_MOVE_128
&& src_vsx_p
)
20754 return (WORDS_BIG_ENDIAN
20755 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20756 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20758 else if (TARGET_VSX
&& TARGET_DIRECT_MOVE
&& src_vsx_p
)
20762 else if (TARGET_VSX
&& dest_vsx_p
)
20765 return "xxlor %x0,%x1,%x1";
20767 else if (TARGET_DIRECT_MOVE_128
&& src_gpr_p
)
20768 return (WORDS_BIG_ENDIAN
20769 ? "mtvsrdd %x0,%1,%L1"
20770 : "mtvsrdd %x0,%L1,%1");
20772 else if (TARGET_DIRECT_MOVE
&& src_gpr_p
)
20776 else if (TARGET_ALTIVEC
&& dest_vmx_p
&& src_vmx_p
)
20777 return "vor %0,%1,%1";
20779 else if (dest_fp_p
&& src_fp_p
)
20784 else if (dest_regno
>= 0 && MEM_P (src
))
20788 if (TARGET_QUAD_MEMORY
&& quad_load_store_p (dest
, src
))
20794 else if (TARGET_ALTIVEC
&& dest_vmx_p
20795 && altivec_indexed_or_indirect_operand (src
, mode
))
20796 return "lvx %0,%y1";
20798 else if (TARGET_VSX
&& dest_vsx_p
)
20800 if (mode_supports_vsx_dform_quad (mode
)
20801 && quad_address_p (XEXP (src
, 0), mode
, true))
20802 return "lxv %x0,%1";
20804 else if (TARGET_P9_VECTOR
)
20805 return "lxvx %x0,%y1";
20807 else if (mode
== V16QImode
|| mode
== V8HImode
|| mode
== V4SImode
)
20808 return "lxvw4x %x0,%y1";
20811 return "lxvd2x %x0,%y1";
20814 else if (TARGET_ALTIVEC
&& dest_vmx_p
)
20815 return "lvx %0,%y1";
20817 else if (dest_fp_p
)
20822 else if (src_regno
>= 0 && MEM_P (dest
))
20826 if (TARGET_QUAD_MEMORY
&& quad_load_store_p (dest
, src
))
20827 return "stq %1,%0";
20832 else if (TARGET_ALTIVEC
&& src_vmx_p
20833 && altivec_indexed_or_indirect_operand (src
, mode
))
20834 return "stvx %1,%y0";
20836 else if (TARGET_VSX
&& src_vsx_p
)
20838 if (mode_supports_vsx_dform_quad (mode
)
20839 && quad_address_p (XEXP (dest
, 0), mode
, true))
20840 return "stxv %x1,%0";
20842 else if (TARGET_P9_VECTOR
)
20843 return "stxvx %x1,%y0";
20845 else if (mode
== V16QImode
|| mode
== V8HImode
|| mode
== V4SImode
)
20846 return "stxvw4x %x1,%y0";
20849 return "stxvd2x %x1,%y0";
20852 else if (TARGET_ALTIVEC
&& src_vmx_p
)
20853 return "stvx %1,%y0";
20860 else if (dest_regno
>= 0
20861 && (GET_CODE (src
) == CONST_INT
20862 || GET_CODE (src
) == CONST_WIDE_INT
20863 || GET_CODE (src
) == CONST_DOUBLE
20864 || GET_CODE (src
) == CONST_VECTOR
))
20869 else if ((dest_vmx_p
&& TARGET_ALTIVEC
)
20870 || (dest_vsx_p
&& TARGET_VSX
))
20871 return output_vec_const_move (operands
);
20874 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest
, src
));
20877 /* Validate a 128-bit move. */
20879 rs6000_move_128bit_ok_p (rtx operands
[])
20881 machine_mode mode
= GET_MODE (operands
[0]);
20882 return (gpc_reg_operand (operands
[0], mode
)
20883 || gpc_reg_operand (operands
[1], mode
));
20886 /* Return true if a 128-bit move needs to be split. */
20888 rs6000_split_128bit_ok_p (rtx operands
[])
20890 if (!reload_completed
)
20893 if (!gpr_or_gpr_p (operands
[0], operands
[1]))
20896 if (quad_load_store_p (operands
[0], operands
[1]))
20903 /* Given a comparison operation, return the bit number in CCR to test. We
20904 know this is a valid comparison.
20906 SCC_P is 1 if this is for an scc. That means that %D will have been
20907 used instead of %C, so the bits will be in different places.
20909 Return -1 if OP isn't a valid comparison for some reason. */
20912 ccr_bit (rtx op
, int scc_p
)
20914 enum rtx_code code
= GET_CODE (op
);
20915 machine_mode cc_mode
;
20920 if (!COMPARISON_P (op
))
20923 reg
= XEXP (op
, 0);
20925 gcc_assert (GET_CODE (reg
) == REG
&& CR_REGNO_P (REGNO (reg
)));
20927 cc_mode
= GET_MODE (reg
);
20928 cc_regnum
= REGNO (reg
);
20929 base_bit
= 4 * (cc_regnum
- CR0_REGNO
);
20931 validate_condition_mode (code
, cc_mode
);
20933 /* When generating a sCOND operation, only positive conditions are
20936 || code
== EQ
|| code
== GT
|| code
== LT
|| code
== UNORDERED
20937 || code
== GTU
|| code
== LTU
);
20942 return scc_p
? base_bit
+ 3 : base_bit
+ 2;
20944 return base_bit
+ 2;
20945 case GT
: case GTU
: case UNLE
:
20946 return base_bit
+ 1;
20947 case LT
: case LTU
: case UNGE
:
20949 case ORDERED
: case UNORDERED
:
20950 return base_bit
+ 3;
20953 /* If scc, we will have done a cror to put the bit in the
20954 unordered position. So test that bit. For integer, this is ! LT
20955 unless this is an scc insn. */
20956 return scc_p
? base_bit
+ 3 : base_bit
;
20959 return scc_p
? base_bit
+ 3 : base_bit
+ 1;
20962 gcc_unreachable ();
20966 /* Return the GOT register. */
20969 rs6000_got_register (rtx value ATTRIBUTE_UNUSED
)
20971 /* The second flow pass currently (June 1999) can't update
20972 regs_ever_live without disturbing other parts of the compiler, so
20973 update it here to make the prolog/epilogue code happy. */
20974 if (!can_create_pseudo_p ()
20975 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))
20976 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM
, true);
20978 crtl
->uses_pic_offset_table
= 1;
20980 return pic_offset_table_rtx
;
20983 static rs6000_stack_t stack_info
;
20985 /* Function to init struct machine_function.
20986 This will be called, via a pointer variable,
20987 from push_function_context. */
20989 static struct machine_function
*
20990 rs6000_init_machine_status (void)
20992 stack_info
.reload_completed
= 0;
20993 return ggc_cleared_alloc
<machine_function
> ();
20996 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
20998 /* Write out a function code label. */
21001 rs6000_output_function_entry (FILE *file
, const char *fname
)
21003 if (fname
[0] != '.')
21005 switch (DEFAULT_ABI
)
21008 gcc_unreachable ();
21014 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "L.");
21024 RS6000_OUTPUT_BASENAME (file
, fname
);
21027 /* Print an operand. Recognize special options, documented below. */
21030 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
21031 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
21033 #define SMALL_DATA_RELOC "sda21"
21034 #define SMALL_DATA_REG 0
21038 print_operand (FILE *file
, rtx x
, int code
)
21041 unsigned HOST_WIDE_INT uval
;
21045 /* %a is output_address. */
21047 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
21051 /* Like 'J' but get to the GT bit only. */
21052 gcc_assert (REG_P (x
));
21054 /* Bit 1 is GT bit. */
21055 i
= 4 * (REGNO (x
) - CR0_REGNO
) + 1;
21057 /* Add one for shift count in rlinm for scc. */
21058 fprintf (file
, "%d", i
+ 1);
21062 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
21065 output_operand_lossage ("invalid %%e value");
21070 if ((uval
& 0xffff) == 0 && uval
!= 0)
21075 /* X is a CR register. Print the number of the EQ bit of the CR */
21076 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
21077 output_operand_lossage ("invalid %%E value");
21079 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
) + 2);
21083 /* X is a CR register. Print the shift count needed to move it
21084 to the high-order four bits. */
21085 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
21086 output_operand_lossage ("invalid %%f value");
21088 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
));
21092 /* Similar, but print the count for the rotate in the opposite
21094 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
21095 output_operand_lossage ("invalid %%F value");
21097 fprintf (file
, "%d", 32 - 4 * (REGNO (x
) - CR0_REGNO
));
21101 /* X is a constant integer. If it is negative, print "m",
21102 otherwise print "z". This is to make an aze or ame insn. */
21103 if (GET_CODE (x
) != CONST_INT
)
21104 output_operand_lossage ("invalid %%G value");
21105 else if (INTVAL (x
) >= 0)
21112 /* If constant, output low-order five bits. Otherwise, write
21115 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) & 31);
21117 print_operand (file
, x
, 0);
21121 /* If constant, output low-order six bits. Otherwise, write
21124 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) & 63);
21126 print_operand (file
, x
, 0);
21130 /* Print `i' if this is a constant, else nothing. */
21136 /* Write the bit number in CCR for jump. */
21137 i
= ccr_bit (x
, 0);
21139 output_operand_lossage ("invalid %%j code");
21141 fprintf (file
, "%d", i
);
21145 /* Similar, but add one for shift count in rlinm for scc and pass
21146 scc flag to `ccr_bit'. */
21147 i
= ccr_bit (x
, 1);
21149 output_operand_lossage ("invalid %%J code");
21151 /* If we want bit 31, write a shift count of zero, not 32. */
21152 fprintf (file
, "%d", i
== 31 ? 0 : i
+ 1);
21156 /* X must be a constant. Write the 1's complement of the
21159 output_operand_lossage ("invalid %%k value");
21161 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~ INTVAL (x
));
21165 /* X must be a symbolic constant on ELF. Write an
21166 expression suitable for an 'addi' that adds in the low 16
21167 bits of the MEM. */
21168 if (GET_CODE (x
) == CONST
)
21170 if (GET_CODE (XEXP (x
, 0)) != PLUS
21171 || (GET_CODE (XEXP (XEXP (x
, 0), 0)) != SYMBOL_REF
21172 && GET_CODE (XEXP (XEXP (x
, 0), 0)) != LABEL_REF
)
21173 || GET_CODE (XEXP (XEXP (x
, 0), 1)) != CONST_INT
)
21174 output_operand_lossage ("invalid %%K value");
21176 print_operand_address (file
, x
);
21177 fputs ("@l", file
);
21180 /* %l is output_asm_label. */
21183 /* Write second word of DImode or DFmode reference. Works on register
21184 or non-indexed memory only. */
21186 fputs (reg_names
[REGNO (x
) + 1], file
);
21187 else if (MEM_P (x
))
21189 machine_mode mode
= GET_MODE (x
);
21190 /* Handle possible auto-increment. Since it is pre-increment and
21191 we have already done it, we can just use an offset of word. */
21192 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
21193 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
21194 output_address (mode
, plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0),
21196 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
21197 output_address (mode
, plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0),
21200 output_address (mode
, XEXP (adjust_address_nv (x
, SImode
,
21204 if (small_data_operand (x
, GET_MODE (x
)))
21205 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
21206 reg_names
[SMALL_DATA_REG
]);
21211 /* Write the number of elements in the vector times 4. */
21212 if (GET_CODE (x
) != PARALLEL
)
21213 output_operand_lossage ("invalid %%N value");
21215 fprintf (file
, "%d", XVECLEN (x
, 0) * 4);
21219 /* Similar, but subtract 1 first. */
21220 if (GET_CODE (x
) != PARALLEL
)
21221 output_operand_lossage ("invalid %%O value");
21223 fprintf (file
, "%d", (XVECLEN (x
, 0) - 1) * 4);
21227 /* X is a CONST_INT that is a power of two. Output the logarithm. */
21230 || (i
= exact_log2 (INTVAL (x
))) < 0)
21231 output_operand_lossage ("invalid %%p value");
21233 fprintf (file
, "%d", i
);
21237 /* The operand must be an indirect memory reference. The result
21238 is the register name. */
21239 if (GET_CODE (x
) != MEM
|| GET_CODE (XEXP (x
, 0)) != REG
21240 || REGNO (XEXP (x
, 0)) >= 32)
21241 output_operand_lossage ("invalid %%P value");
21243 fputs (reg_names
[REGNO (XEXP (x
, 0))], file
);
21247 /* This outputs the logical code corresponding to a boolean
21248 expression. The expression may have one or both operands
21249 negated (if one, only the first one). For condition register
21250 logical operations, it will also treat the negated
21251 CR codes as NOTs, but not handle NOTs of them. */
21253 const char *const *t
= 0;
21255 enum rtx_code code
= GET_CODE (x
);
21256 static const char * const tbl
[3][3] = {
21257 { "and", "andc", "nor" },
21258 { "or", "orc", "nand" },
21259 { "xor", "eqv", "xor" } };
21263 else if (code
== IOR
)
21265 else if (code
== XOR
)
21268 output_operand_lossage ("invalid %%q value");
21270 if (GET_CODE (XEXP (x
, 0)) != NOT
)
21274 if (GET_CODE (XEXP (x
, 1)) == NOT
)
21285 if (! TARGET_MFCRF
)
21291 /* X is a CR register. Print the mask for `mtcrf'. */
21292 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
21293 output_operand_lossage ("invalid %%R value");
21295 fprintf (file
, "%d", 128 >> (REGNO (x
) - CR0_REGNO
));
21299 /* Low 5 bits of 32 - value */
21301 output_operand_lossage ("invalid %%s value");
21303 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (32 - INTVAL (x
)) & 31);
21307 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
21308 gcc_assert (REG_P (x
) && GET_MODE (x
) == CCmode
);
21310 /* Bit 3 is OV bit. */
21311 i
= 4 * (REGNO (x
) - CR0_REGNO
) + 3;
21313 /* If we want bit 31, write a shift count of zero, not 32. */
21314 fprintf (file
, "%d", i
== 31 ? 0 : i
+ 1);
21318 /* Print the symbolic name of a branch target register. */
21319 if (GET_CODE (x
) != REG
|| (REGNO (x
) != LR_REGNO
21320 && REGNO (x
) != CTR_REGNO
))
21321 output_operand_lossage ("invalid %%T value");
21322 else if (REGNO (x
) == LR_REGNO
)
21323 fputs ("lr", file
);
21325 fputs ("ctr", file
);
21329 /* High-order or low-order 16 bits of constant, whichever is non-zero,
21330 for use in unsigned operand. */
21333 output_operand_lossage ("invalid %%u value");
21338 if ((uval
& 0xffff) == 0)
21341 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
, uval
& 0xffff);
21345 /* High-order 16 bits of constant for use in signed operand. */
21347 output_operand_lossage ("invalid %%v value");
21349 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
21350 (INTVAL (x
) >> 16) & 0xffff);
21354 /* Print `u' if this has an auto-increment or auto-decrement. */
21356 && (GET_CODE (XEXP (x
, 0)) == PRE_INC
21357 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
21358 || GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
))
21363 /* Print the trap code for this operand. */
21364 switch (GET_CODE (x
))
21367 fputs ("eq", file
); /* 4 */
21370 fputs ("ne", file
); /* 24 */
21373 fputs ("lt", file
); /* 16 */
21376 fputs ("le", file
); /* 20 */
21379 fputs ("gt", file
); /* 8 */
21382 fputs ("ge", file
); /* 12 */
21385 fputs ("llt", file
); /* 2 */
21388 fputs ("lle", file
); /* 6 */
21391 fputs ("lgt", file
); /* 1 */
21394 fputs ("lge", file
); /* 5 */
21397 gcc_unreachable ();
21402 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21405 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
21406 ((INTVAL (x
) & 0xffff) ^ 0x8000) - 0x8000);
21408 print_operand (file
, x
, 0);
21412 /* X is a FPR or Altivec register used in a VSX context. */
21413 if (GET_CODE (x
) != REG
|| !VSX_REGNO_P (REGNO (x
)))
21414 output_operand_lossage ("invalid %%x value");
21417 int reg
= REGNO (x
);
21418 int vsx_reg
= (FP_REGNO_P (reg
)
21420 : reg
- FIRST_ALTIVEC_REGNO
+ 32);
21422 #ifdef TARGET_REGNAMES
21423 if (TARGET_REGNAMES
)
21424 fprintf (file
, "%%vs%d", vsx_reg
);
21427 fprintf (file
, "%d", vsx_reg
);
21433 && (legitimate_indexed_address_p (XEXP (x
, 0), 0)
21434 || (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
21435 && legitimate_indexed_address_p (XEXP (XEXP (x
, 0), 1), 0))))
21440 /* Like 'L', for third word of TImode/PTImode */
21442 fputs (reg_names
[REGNO (x
) + 2], file
);
21443 else if (MEM_P (x
))
21445 machine_mode mode
= GET_MODE (x
);
21446 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
21447 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
21448 output_address (mode
, plus_constant (Pmode
,
21449 XEXP (XEXP (x
, 0), 0), 8));
21450 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
21451 output_address (mode
, plus_constant (Pmode
,
21452 XEXP (XEXP (x
, 0), 0), 8));
21454 output_address (mode
, XEXP (adjust_address_nv (x
, SImode
, 8), 0));
21455 if (small_data_operand (x
, GET_MODE (x
)))
21456 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
21457 reg_names
[SMALL_DATA_REG
]);
21462 /* X is a SYMBOL_REF. Write out the name preceded by a
21463 period and without any trailing data in brackets. Used for function
21464 names. If we are configured for System V (or the embedded ABI) on
21465 the PowerPC, do not emit the period, since those systems do not use
21466 TOCs and the like. */
21467 gcc_assert (GET_CODE (x
) == SYMBOL_REF
);
21469 /* For macho, check to see if we need a stub. */
21472 const char *name
= XSTR (x
, 0);
21474 if (darwin_emit_branch_islands
21475 && MACHOPIC_INDIRECT
21476 && machopic_classify_symbol (x
) == MACHOPIC_UNDEFINED_FUNCTION
)
21477 name
= machopic_indirection_name (x
, /*stub_p=*/true);
21479 assemble_name (file
, name
);
21481 else if (!DOT_SYMBOLS
)
21482 assemble_name (file
, XSTR (x
, 0));
21484 rs6000_output_function_entry (file
, XSTR (x
, 0));
21488 /* Like 'L', for last word of TImode/PTImode. */
21490 fputs (reg_names
[REGNO (x
) + 3], file
);
21491 else if (MEM_P (x
))
21493 machine_mode mode
= GET_MODE (x
);
21494 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
21495 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
21496 output_address (mode
, plus_constant (Pmode
,
21497 XEXP (XEXP (x
, 0), 0), 12));
21498 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
21499 output_address (mode
, plus_constant (Pmode
,
21500 XEXP (XEXP (x
, 0), 0), 12));
21502 output_address (mode
, XEXP (adjust_address_nv (x
, SImode
, 12), 0));
21503 if (small_data_operand (x
, GET_MODE (x
)))
21504 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
21505 reg_names
[SMALL_DATA_REG
]);
21509 /* Print AltiVec memory operand. */
21514 gcc_assert (MEM_P (x
));
21518 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x
))
21519 && GET_CODE (tmp
) == AND
21520 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
21521 && INTVAL (XEXP (tmp
, 1)) == -16)
21522 tmp
= XEXP (tmp
, 0);
21523 else if (VECTOR_MEM_VSX_P (GET_MODE (x
))
21524 && GET_CODE (tmp
) == PRE_MODIFY
)
21525 tmp
= XEXP (tmp
, 1);
21527 fprintf (file
, "0,%s", reg_names
[REGNO (tmp
)]);
21530 if (GET_CODE (tmp
) != PLUS
21531 || !REG_P (XEXP (tmp
, 0))
21532 || !REG_P (XEXP (tmp
, 1)))
21534 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21538 if (REGNO (XEXP (tmp
, 0)) == 0)
21539 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (tmp
, 1)) ],
21540 reg_names
[ REGNO (XEXP (tmp
, 0)) ]);
21542 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (tmp
, 0)) ],
21543 reg_names
[ REGNO (XEXP (tmp
, 1)) ]);
21550 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
21551 else if (MEM_P (x
))
21553 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21554 know the width from the mode. */
21555 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
)
21556 fprintf (file
, "%d(%s)", GET_MODE_SIZE (GET_MODE (x
)),
21557 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
21558 else if (GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
21559 fprintf (file
, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x
)),
21560 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
21561 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
21562 output_address (GET_MODE (x
), XEXP (XEXP (x
, 0), 1));
21564 output_address (GET_MODE (x
), XEXP (x
, 0));
21568 if (toc_relative_expr_p (x
, false, &tocrel_base_oac
, &tocrel_offset_oac
))
21569 /* This hack along with a corresponding hack in
21570 rs6000_output_addr_const_extra arranges to output addends
21571 where the assembler expects to find them. eg.
21572 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21573 without this hack would be output as "x@toc+4". We
21575 output_addr_const (file
, CONST_CAST_RTX (tocrel_base_oac
));
21577 output_addr_const (file
, x
);
21582 if (const char *name
= get_some_local_dynamic_name ())
21583 assemble_name (file
, name
);
21585 output_operand_lossage ("'%%&' used without any "
21586 "local dynamic TLS references");
21590 output_operand_lossage ("invalid %%xn code");
21594 /* Print the address of an operand. */
21597 print_operand_address (FILE *file
, rtx x
)
21600 fprintf (file
, "0(%s)", reg_names
[ REGNO (x
) ]);
21601 else if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == CONST
21602 || GET_CODE (x
) == LABEL_REF
)
21604 output_addr_const (file
, x
);
21605 if (small_data_operand (x
, GET_MODE (x
)))
21606 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
21607 reg_names
[SMALL_DATA_REG
]);
21609 gcc_assert (!TARGET_TOC
);
21611 else if (GET_CODE (x
) == PLUS
&& REG_P (XEXP (x
, 0))
21612 && REG_P (XEXP (x
, 1)))
21614 if (REGNO (XEXP (x
, 0)) == 0)
21615 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (x
, 1)) ],
21616 reg_names
[ REGNO (XEXP (x
, 0)) ]);
21618 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (x
, 0)) ],
21619 reg_names
[ REGNO (XEXP (x
, 1)) ]);
21621 else if (GET_CODE (x
) == PLUS
&& REG_P (XEXP (x
, 0))
21622 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
21623 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
"(%s)",
21624 INTVAL (XEXP (x
, 1)), reg_names
[ REGNO (XEXP (x
, 0)) ]);
21626 else if (GET_CODE (x
) == LO_SUM
&& REG_P (XEXP (x
, 0))
21627 && CONSTANT_P (XEXP (x
, 1)))
21629 fprintf (file
, "lo16(");
21630 output_addr_const (file
, XEXP (x
, 1));
21631 fprintf (file
, ")(%s)", reg_names
[ REGNO (XEXP (x
, 0)) ]);
21635 else if (GET_CODE (x
) == LO_SUM
&& REG_P (XEXP (x
, 0))
21636 && CONSTANT_P (XEXP (x
, 1)))
21638 output_addr_const (file
, XEXP (x
, 1));
21639 fprintf (file
, "@l(%s)", reg_names
[ REGNO (XEXP (x
, 0)) ]);
21642 else if (toc_relative_expr_p (x
, false, &tocrel_base_oac
, &tocrel_offset_oac
))
21644 /* This hack along with a corresponding hack in
21645 rs6000_output_addr_const_extra arranges to output addends
21646 where the assembler expects to find them. eg.
21648 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21649 without this hack would be output as "x@toc+8@l(9)". We
21650 want "x+8@toc@l(9)". */
21651 output_addr_const (file
, CONST_CAST_RTX (tocrel_base_oac
));
21652 if (GET_CODE (x
) == LO_SUM
)
21653 fprintf (file
, "@l(%s)", reg_names
[REGNO (XEXP (x
, 0))]);
21655 fprintf (file
, "(%s)", reg_names
[REGNO (XVECEXP (tocrel_base_oac
, 0, 1))]);
21658 gcc_unreachable ();
21661 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21664 rs6000_output_addr_const_extra (FILE *file
, rtx x
)
21666 if (GET_CODE (x
) == UNSPEC
)
21667 switch (XINT (x
, 1))
21669 case UNSPEC_TOCREL
:
21670 gcc_checking_assert (GET_CODE (XVECEXP (x
, 0, 0)) == SYMBOL_REF
21671 && REG_P (XVECEXP (x
, 0, 1))
21672 && REGNO (XVECEXP (x
, 0, 1)) == TOC_REGISTER
);
21673 output_addr_const (file
, XVECEXP (x
, 0, 0));
21674 if (x
== tocrel_base_oac
&& tocrel_offset_oac
!= const0_rtx
)
21676 if (INTVAL (tocrel_offset_oac
) >= 0)
21677 fprintf (file
, "+");
21678 output_addr_const (file
, CONST_CAST_RTX (tocrel_offset_oac
));
21680 if (!TARGET_AIX
|| (TARGET_ELF
&& TARGET_MINIMAL_TOC
))
21683 assemble_name (file
, toc_label_name
);
21686 else if (TARGET_ELF
)
21687 fputs ("@toc", file
);
21691 case UNSPEC_MACHOPIC_OFFSET
:
21692 output_addr_const (file
, XVECEXP (x
, 0, 0));
21694 machopic_output_function_base_name (file
);
21701 /* Target hook for assembling integer objects. The PowerPC version has
21702 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21703 is defined. It also needs to handle DI-mode objects on 64-bit
21707 rs6000_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
21709 #ifdef RELOCATABLE_NEEDS_FIXUP
21710 /* Special handling for SI values. */
21711 if (RELOCATABLE_NEEDS_FIXUP
&& size
== 4 && aligned_p
)
21713 static int recurse
= 0;
21715 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21716 the .fixup section. Since the TOC section is already relocated, we
21717 don't need to mark it here. We used to skip the text section, but it
21718 should never be valid for relocated addresses to be placed in the text
21720 if (DEFAULT_ABI
== ABI_V4
21721 && (TARGET_RELOCATABLE
|| flag_pic
> 1)
21722 && in_section
!= toc_section
21724 && !CONST_SCALAR_INT_P (x
)
21730 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCP", fixuplabelno
);
21732 ASM_OUTPUT_LABEL (asm_out_file
, buf
);
21733 fprintf (asm_out_file
, "\t.long\t(");
21734 output_addr_const (asm_out_file
, x
);
21735 fprintf (asm_out_file
, ")@fixup\n");
21736 fprintf (asm_out_file
, "\t.section\t\".fixup\",\"aw\"\n");
21737 ASM_OUTPUT_ALIGN (asm_out_file
, 2);
21738 fprintf (asm_out_file
, "\t.long\t");
21739 assemble_name (asm_out_file
, buf
);
21740 fprintf (asm_out_file
, "\n\t.previous\n");
21744 /* Remove initial .'s to turn a -mcall-aixdesc function
21745 address into the address of the descriptor, not the function
21747 else if (GET_CODE (x
) == SYMBOL_REF
21748 && XSTR (x
, 0)[0] == '.'
21749 && DEFAULT_ABI
== ABI_AIX
)
21751 const char *name
= XSTR (x
, 0);
21752 while (*name
== '.')
21755 fprintf (asm_out_file
, "\t.long\t%s\n", name
);
21759 #endif /* RELOCATABLE_NEEDS_FIXUP */
21760 return default_assemble_integer (x
, size
, aligned_p
);
21763 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21764 /* Emit an assembler directive to set symbol visibility for DECL to
21765 VISIBILITY_TYPE. */
21768 rs6000_assemble_visibility (tree decl
, int vis
)
21773 /* Functions need to have their entry point symbol visibility set as
21774 well as their descriptor symbol visibility. */
21775 if (DEFAULT_ABI
== ABI_AIX
21777 && TREE_CODE (decl
) == FUNCTION_DECL
)
21779 static const char * const visibility_types
[] = {
21780 NULL
, "protected", "hidden", "internal"
21783 const char *name
, *type
;
21785 name
= ((* targetm
.strip_name_encoding
)
21786 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
))));
21787 type
= visibility_types
[vis
];
21789 fprintf (asm_out_file
, "\t.%s\t%s\n", type
, name
);
21790 fprintf (asm_out_file
, "\t.%s\t.%s\n", type
, name
);
21793 default_assemble_visibility (decl
, vis
);
21798 rs6000_reverse_condition (machine_mode mode
, enum rtx_code code
)
21800 /* Reversal of FP compares takes care -- an ordered compare
21801 becomes an unordered compare and vice versa. */
21802 if (mode
== CCFPmode
21803 && (!flag_finite_math_only
21804 || code
== UNLT
|| code
== UNLE
|| code
== UNGT
|| code
== UNGE
21805 || code
== UNEQ
|| code
== LTGT
))
21806 return reverse_condition_maybe_unordered (code
);
21808 return reverse_condition (code
);
21811 /* Generate a compare for CODE. Return a brand-new rtx that
21812 represents the result of the compare. */
21815 rs6000_generate_compare (rtx cmp
, machine_mode mode
)
21817 machine_mode comp_mode
;
21818 rtx compare_result
;
21819 enum rtx_code code
= GET_CODE (cmp
);
21820 rtx op0
= XEXP (cmp
, 0);
21821 rtx op1
= XEXP (cmp
, 1);
21823 if (!TARGET_FLOAT128_HW
&& FLOAT128_VECTOR_P (mode
))
21824 comp_mode
= CCmode
;
21825 else if (FLOAT_MODE_P (mode
))
21826 comp_mode
= CCFPmode
;
21827 else if (code
== GTU
|| code
== LTU
21828 || code
== GEU
|| code
== LEU
)
21829 comp_mode
= CCUNSmode
;
21830 else if ((code
== EQ
|| code
== NE
)
21831 && unsigned_reg_p (op0
)
21832 && (unsigned_reg_p (op1
)
21833 || (CONST_INT_P (op1
) && INTVAL (op1
) != 0)))
21834 /* These are unsigned values, perhaps there will be a later
21835 ordering compare that can be shared with this one. */
21836 comp_mode
= CCUNSmode
;
21838 comp_mode
= CCmode
;
21840 /* If we have an unsigned compare, make sure we don't have a signed value as
21842 if (comp_mode
== CCUNSmode
&& GET_CODE (op1
) == CONST_INT
21843 && INTVAL (op1
) < 0)
21845 op0
= copy_rtx_if_shared (op0
);
21846 op1
= force_reg (GET_MODE (op0
), op1
);
21847 cmp
= gen_rtx_fmt_ee (code
, GET_MODE (cmp
), op0
, op1
);
21850 /* First, the compare. */
21851 compare_result
= gen_reg_rtx (comp_mode
);
21853 /* IEEE 128-bit support in VSX registers when we do not have hardware
21855 if (!TARGET_FLOAT128_HW
&& FLOAT128_VECTOR_P (mode
))
21857 rtx libfunc
= NULL_RTX
;
21858 bool check_nan
= false;
21865 libfunc
= optab_libfunc (eq_optab
, mode
);
21870 libfunc
= optab_libfunc (ge_optab
, mode
);
21875 libfunc
= optab_libfunc (le_optab
, mode
);
21880 libfunc
= optab_libfunc (unord_optab
, mode
);
21881 code
= (code
== UNORDERED
) ? NE
: EQ
;
21887 libfunc
= optab_libfunc (ge_optab
, mode
);
21888 code
= (code
== UNGE
) ? GE
: GT
;
21894 libfunc
= optab_libfunc (le_optab
, mode
);
21895 code
= (code
== UNLE
) ? LE
: LT
;
21901 libfunc
= optab_libfunc (eq_optab
, mode
);
21902 code
= (code
= UNEQ
) ? EQ
: NE
;
21906 gcc_unreachable ();
21909 gcc_assert (libfunc
);
21912 dest
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
21913 SImode
, op0
, mode
, op1
, mode
);
21915 /* The library signals an exception for signalling NaNs, so we need to
21916 handle isgreater, etc. by first checking isordered. */
21919 rtx ne_rtx
, normal_dest
, unord_dest
;
21920 rtx unord_func
= optab_libfunc (unord_optab
, mode
);
21921 rtx join_label
= gen_label_rtx ();
21922 rtx join_ref
= gen_rtx_LABEL_REF (VOIDmode
, join_label
);
21923 rtx unord_cmp
= gen_reg_rtx (comp_mode
);
21926 /* Test for either value being a NaN. */
21927 gcc_assert (unord_func
);
21928 unord_dest
= emit_library_call_value (unord_func
, NULL_RTX
, LCT_CONST
,
21929 SImode
, op0
, mode
, op1
, mode
);
21931 /* Set value (0) if either value is a NaN, and jump to the join
21933 dest
= gen_reg_rtx (SImode
);
21934 emit_move_insn (dest
, const1_rtx
);
21935 emit_insn (gen_rtx_SET (unord_cmp
,
21936 gen_rtx_COMPARE (comp_mode
, unord_dest
,
21939 ne_rtx
= gen_rtx_NE (comp_mode
, unord_cmp
, const0_rtx
);
21940 emit_jump_insn (gen_rtx_SET (pc_rtx
,
21941 gen_rtx_IF_THEN_ELSE (VOIDmode
, ne_rtx
,
21945 /* Do the normal comparison, knowing that the values are not
21947 normal_dest
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
21948 SImode
, op0
, mode
, op1
, mode
);
21950 emit_insn (gen_cstoresi4 (dest
,
21951 gen_rtx_fmt_ee (code
, SImode
, normal_dest
,
21953 normal_dest
, const0_rtx
));
21955 /* Join NaN and non-Nan paths. Compare dest against 0. */
21956 emit_label (join_label
);
21960 emit_insn (gen_rtx_SET (compare_result
,
21961 gen_rtx_COMPARE (comp_mode
, dest
, const0_rtx
)));
21966 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
21967 CLOBBERs to match cmptf_internal2 pattern. */
21968 if (comp_mode
== CCFPmode
&& TARGET_XL_COMPAT
21969 && FLOAT128_IBM_P (GET_MODE (op0
))
21970 && TARGET_HARD_FLOAT
)
21971 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
21973 gen_rtx_SET (compare_result
,
21974 gen_rtx_COMPARE (comp_mode
, op0
, op1
)),
21975 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21976 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21977 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21978 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21979 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21980 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21981 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21982 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21983 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (Pmode
)))));
21984 else if (GET_CODE (op1
) == UNSPEC
21985 && XINT (op1
, 1) == UNSPEC_SP_TEST
)
21987 rtx op1b
= XVECEXP (op1
, 0, 0);
21988 comp_mode
= CCEQmode
;
21989 compare_result
= gen_reg_rtx (CCEQmode
);
21991 emit_insn (gen_stack_protect_testdi (compare_result
, op0
, op1b
));
21993 emit_insn (gen_stack_protect_testsi (compare_result
, op0
, op1b
));
21996 emit_insn (gen_rtx_SET (compare_result
,
21997 gen_rtx_COMPARE (comp_mode
, op0
, op1
)));
22000 /* Some kinds of FP comparisons need an OR operation;
22001 under flag_finite_math_only we don't bother. */
22002 if (FLOAT_MODE_P (mode
)
22003 && (!FLOAT128_IEEE_P (mode
) || TARGET_FLOAT128_HW
)
22004 && !flag_finite_math_only
22005 && (code
== LE
|| code
== GE
22006 || code
== UNEQ
|| code
== LTGT
22007 || code
== UNGT
|| code
== UNLT
))
22009 enum rtx_code or1
, or2
;
22010 rtx or1_rtx
, or2_rtx
, compare2_rtx
;
22011 rtx or_result
= gen_reg_rtx (CCEQmode
);
22015 case LE
: or1
= LT
; or2
= EQ
; break;
22016 case GE
: or1
= GT
; or2
= EQ
; break;
22017 case UNEQ
: or1
= UNORDERED
; or2
= EQ
; break;
22018 case LTGT
: or1
= LT
; or2
= GT
; break;
22019 case UNGT
: or1
= UNORDERED
; or2
= GT
; break;
22020 case UNLT
: or1
= UNORDERED
; or2
= LT
; break;
22021 default: gcc_unreachable ();
22023 validate_condition_mode (or1
, comp_mode
);
22024 validate_condition_mode (or2
, comp_mode
);
22025 or1_rtx
= gen_rtx_fmt_ee (or1
, SImode
, compare_result
, const0_rtx
);
22026 or2_rtx
= gen_rtx_fmt_ee (or2
, SImode
, compare_result
, const0_rtx
);
22027 compare2_rtx
= gen_rtx_COMPARE (CCEQmode
,
22028 gen_rtx_IOR (SImode
, or1_rtx
, or2_rtx
),
22030 emit_insn (gen_rtx_SET (or_result
, compare2_rtx
));
22032 compare_result
= or_result
;
22036 validate_condition_mode (code
, GET_MODE (compare_result
));
22038 return gen_rtx_fmt_ee (code
, VOIDmode
, compare_result
, const0_rtx
);
22042 /* Return the diagnostic message string if the binary operation OP is
22043 not permitted on TYPE1 and TYPE2, NULL otherwise. */
22046 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED
,
22050 machine_mode mode1
= TYPE_MODE (type1
);
22051 machine_mode mode2
= TYPE_MODE (type2
);
22053 /* For complex modes, use the inner type. */
22054 if (COMPLEX_MODE_P (mode1
))
22055 mode1
= GET_MODE_INNER (mode1
);
22057 if (COMPLEX_MODE_P (mode2
))
22058 mode2
= GET_MODE_INNER (mode2
);
22060 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
22061 double to intermix unless -mfloat128-convert. */
22062 if (mode1
== mode2
)
22065 if (!TARGET_FLOAT128_CVT
)
22067 if ((mode1
== KFmode
&& mode2
== IFmode
)
22068 || (mode1
== IFmode
&& mode2
== KFmode
))
22069 return N_("__float128 and __ibm128 cannot be used in the same "
22072 if (TARGET_IEEEQUAD
22073 && ((mode1
== IFmode
&& mode2
== TFmode
)
22074 || (mode1
== TFmode
&& mode2
== IFmode
)))
22075 return N_("__ibm128 and long double cannot be used in the same "
22078 if (!TARGET_IEEEQUAD
22079 && ((mode1
== KFmode
&& mode2
== TFmode
)
22080 || (mode1
== TFmode
&& mode2
== KFmode
)))
22081 return N_("__float128 and long double cannot be used in the same "
22089 /* Expand floating point conversion to/from __float128 and __ibm128. */
22092 rs6000_expand_float128_convert (rtx dest
, rtx src
, bool unsigned_p
)
22094 machine_mode dest_mode
= GET_MODE (dest
);
22095 machine_mode src_mode
= GET_MODE (src
);
22096 convert_optab cvt
= unknown_optab
;
22097 bool do_move
= false;
22098 rtx libfunc
= NULL_RTX
;
22100 typedef rtx (*rtx_2func_t
) (rtx
, rtx
);
22101 rtx_2func_t hw_convert
= (rtx_2func_t
)0;
22105 rtx_2func_t from_df
;
22106 rtx_2func_t from_sf
;
22107 rtx_2func_t from_si_sign
;
22108 rtx_2func_t from_si_uns
;
22109 rtx_2func_t from_di_sign
;
22110 rtx_2func_t from_di_uns
;
22113 rtx_2func_t to_si_sign
;
22114 rtx_2func_t to_si_uns
;
22115 rtx_2func_t to_di_sign
;
22116 rtx_2func_t to_di_uns
;
22117 } hw_conversions
[2] = {
22118 /* convertions to/from KFmode */
22120 gen_extenddfkf2_hw
, /* KFmode <- DFmode. */
22121 gen_extendsfkf2_hw
, /* KFmode <- SFmode. */
22122 gen_float_kfsi2_hw
, /* KFmode <- SImode (signed). */
22123 gen_floatuns_kfsi2_hw
, /* KFmode <- SImode (unsigned). */
22124 gen_float_kfdi2_hw
, /* KFmode <- DImode (signed). */
22125 gen_floatuns_kfdi2_hw
, /* KFmode <- DImode (unsigned). */
22126 gen_trunckfdf2_hw
, /* DFmode <- KFmode. */
22127 gen_trunckfsf2_hw
, /* SFmode <- KFmode. */
22128 gen_fix_kfsi2_hw
, /* SImode <- KFmode (signed). */
22129 gen_fixuns_kfsi2_hw
, /* SImode <- KFmode (unsigned). */
22130 gen_fix_kfdi2_hw
, /* DImode <- KFmode (signed). */
22131 gen_fixuns_kfdi2_hw
, /* DImode <- KFmode (unsigned). */
22134 /* convertions to/from TFmode */
22136 gen_extenddftf2_hw
, /* TFmode <- DFmode. */
22137 gen_extendsftf2_hw
, /* TFmode <- SFmode. */
22138 gen_float_tfsi2_hw
, /* TFmode <- SImode (signed). */
22139 gen_floatuns_tfsi2_hw
, /* TFmode <- SImode (unsigned). */
22140 gen_float_tfdi2_hw
, /* TFmode <- DImode (signed). */
22141 gen_floatuns_tfdi2_hw
, /* TFmode <- DImode (unsigned). */
22142 gen_trunctfdf2_hw
, /* DFmode <- TFmode. */
22143 gen_trunctfsf2_hw
, /* SFmode <- TFmode. */
22144 gen_fix_tfsi2_hw
, /* SImode <- TFmode (signed). */
22145 gen_fixuns_tfsi2_hw
, /* SImode <- TFmode (unsigned). */
22146 gen_fix_tfdi2_hw
, /* DImode <- TFmode (signed). */
22147 gen_fixuns_tfdi2_hw
, /* DImode <- TFmode (unsigned). */
22151 if (dest_mode
== src_mode
)
22152 gcc_unreachable ();
22154 /* Eliminate memory operations. */
22156 src
= force_reg (src_mode
, src
);
22160 rtx tmp
= gen_reg_rtx (dest_mode
);
22161 rs6000_expand_float128_convert (tmp
, src
, unsigned_p
);
22162 rs6000_emit_move (dest
, tmp
, dest_mode
);
22166 /* Convert to IEEE 128-bit floating point. */
22167 if (FLOAT128_IEEE_P (dest_mode
))
22169 if (dest_mode
== KFmode
)
22171 else if (dest_mode
== TFmode
)
22174 gcc_unreachable ();
22180 hw_convert
= hw_conversions
[kf_or_tf
].from_df
;
22185 hw_convert
= hw_conversions
[kf_or_tf
].from_sf
;
22191 if (FLOAT128_IBM_P (src_mode
))
22200 cvt
= ufloat_optab
;
22201 hw_convert
= hw_conversions
[kf_or_tf
].from_si_uns
;
22205 cvt
= sfloat_optab
;
22206 hw_convert
= hw_conversions
[kf_or_tf
].from_si_sign
;
22213 cvt
= ufloat_optab
;
22214 hw_convert
= hw_conversions
[kf_or_tf
].from_di_uns
;
22218 cvt
= sfloat_optab
;
22219 hw_convert
= hw_conversions
[kf_or_tf
].from_di_sign
;
22224 gcc_unreachable ();
22228 /* Convert from IEEE 128-bit floating point. */
22229 else if (FLOAT128_IEEE_P (src_mode
))
22231 if (src_mode
== KFmode
)
22233 else if (src_mode
== TFmode
)
22236 gcc_unreachable ();
22242 hw_convert
= hw_conversions
[kf_or_tf
].to_df
;
22247 hw_convert
= hw_conversions
[kf_or_tf
].to_sf
;
22253 if (FLOAT128_IBM_P (dest_mode
))
22263 hw_convert
= hw_conversions
[kf_or_tf
].to_si_uns
;
22268 hw_convert
= hw_conversions
[kf_or_tf
].to_si_sign
;
22276 hw_convert
= hw_conversions
[kf_or_tf
].to_di_uns
;
22281 hw_convert
= hw_conversions
[kf_or_tf
].to_di_sign
;
22286 gcc_unreachable ();
22290 /* Both IBM format. */
22291 else if (FLOAT128_IBM_P (dest_mode
) && FLOAT128_IBM_P (src_mode
))
22295 gcc_unreachable ();
22297 /* Handle conversion between TFmode/KFmode. */
22299 emit_move_insn (dest
, gen_lowpart (dest_mode
, src
));
22301 /* Handle conversion if we have hardware support. */
22302 else if (TARGET_FLOAT128_HW
&& hw_convert
)
22303 emit_insn ((hw_convert
) (dest
, src
));
22305 /* Call an external function to do the conversion. */
22306 else if (cvt
!= unknown_optab
)
22308 libfunc
= convert_optab_libfunc (cvt
, dest_mode
, src_mode
);
22309 gcc_assert (libfunc
!= NULL_RTX
);
22311 dest2
= emit_library_call_value (libfunc
, dest
, LCT_CONST
, dest_mode
,
22314 gcc_assert (dest2
!= NULL_RTX
);
22315 if (!rtx_equal_p (dest
, dest2
))
22316 emit_move_insn (dest
, dest2
);
22320 gcc_unreachable ();
22326 /* Emit the RTL for an sISEL pattern. */
22329 rs6000_emit_sISEL (machine_mode mode ATTRIBUTE_UNUSED
, rtx operands
[])
22331 rs6000_emit_int_cmove (operands
[0], operands
[1], const1_rtx
, const0_rtx
);
22334 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22335 can be used as that dest register. Return the dest register. */
22338 rs6000_emit_eqne (machine_mode mode
, rtx op1
, rtx op2
, rtx scratch
)
22340 if (op2
== const0_rtx
)
22343 if (GET_CODE (scratch
) == SCRATCH
)
22344 scratch
= gen_reg_rtx (mode
);
22346 if (logical_operand (op2
, mode
))
22347 emit_insn (gen_rtx_SET (scratch
, gen_rtx_XOR (mode
, op1
, op2
)));
22349 emit_insn (gen_rtx_SET (scratch
,
22350 gen_rtx_PLUS (mode
, op1
, negate_rtx (mode
, op2
))));
22356 rs6000_emit_sCOND (machine_mode mode
, rtx operands
[])
22359 machine_mode op_mode
;
22360 enum rtx_code cond_code
;
22361 rtx result
= operands
[0];
22363 condition_rtx
= rs6000_generate_compare (operands
[1], mode
);
22364 cond_code
= GET_CODE (condition_rtx
);
22366 if (cond_code
== NE
22367 || cond_code
== GE
|| cond_code
== LE
22368 || cond_code
== GEU
|| cond_code
== LEU
22369 || cond_code
== ORDERED
|| cond_code
== UNGE
|| cond_code
== UNLE
)
22371 rtx not_result
= gen_reg_rtx (CCEQmode
);
22372 rtx not_op
, rev_cond_rtx
;
22373 machine_mode cc_mode
;
22375 cc_mode
= GET_MODE (XEXP (condition_rtx
, 0));
22377 rev_cond_rtx
= gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode
, cond_code
),
22378 SImode
, XEXP (condition_rtx
, 0), const0_rtx
);
22379 not_op
= gen_rtx_COMPARE (CCEQmode
, rev_cond_rtx
, const0_rtx
);
22380 emit_insn (gen_rtx_SET (not_result
, not_op
));
22381 condition_rtx
= gen_rtx_EQ (VOIDmode
, not_result
, const0_rtx
);
22384 op_mode
= GET_MODE (XEXP (operands
[1], 0));
22385 if (op_mode
== VOIDmode
)
22386 op_mode
= GET_MODE (XEXP (operands
[1], 1));
22388 if (TARGET_POWERPC64
&& (op_mode
== DImode
|| FLOAT_MODE_P (mode
)))
22390 PUT_MODE (condition_rtx
, DImode
);
22391 convert_move (result
, condition_rtx
, 0);
22395 PUT_MODE (condition_rtx
, SImode
);
22396 emit_insn (gen_rtx_SET (result
, condition_rtx
));
22400 /* Emit a branch of kind CODE to location LOC. */
22403 rs6000_emit_cbranch (machine_mode mode
, rtx operands
[])
22405 rtx condition_rtx
, loc_ref
;
22407 condition_rtx
= rs6000_generate_compare (operands
[0], mode
);
22408 loc_ref
= gen_rtx_LABEL_REF (VOIDmode
, operands
[3]);
22409 emit_jump_insn (gen_rtx_SET (pc_rtx
,
22410 gen_rtx_IF_THEN_ELSE (VOIDmode
, condition_rtx
,
22411 loc_ref
, pc_rtx
)));
22414 /* Return the string to output a conditional branch to LABEL, which is
22415 the operand template of the label, or NULL if the branch is really a
22416 conditional return.
22418 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22419 condition code register and its mode specifies what kind of
22420 comparison we made.
22422 REVERSED is nonzero if we should reverse the sense of the comparison.
22424 INSN is the insn. */
22427 output_cbranch (rtx op
, const char *label
, int reversed
, rtx_insn
*insn
)
22429 static char string
[64];
22430 enum rtx_code code
= GET_CODE (op
);
22431 rtx cc_reg
= XEXP (op
, 0);
22432 machine_mode mode
= GET_MODE (cc_reg
);
22433 int cc_regno
= REGNO (cc_reg
) - CR0_REGNO
;
22434 int need_longbranch
= label
!= NULL
&& get_attr_length (insn
) == 8;
22435 int really_reversed
= reversed
^ need_longbranch
;
22441 validate_condition_mode (code
, mode
);
22443 /* Work out which way this really branches. We could use
22444 reverse_condition_maybe_unordered here always but this
22445 makes the resulting assembler clearer. */
22446 if (really_reversed
)
22448 /* Reversal of FP compares takes care -- an ordered compare
22449 becomes an unordered compare and vice versa. */
22450 if (mode
== CCFPmode
)
22451 code
= reverse_condition_maybe_unordered (code
);
22453 code
= reverse_condition (code
);
22458 /* Not all of these are actually distinct opcodes, but
22459 we distinguish them for clarity of the resulting assembler. */
22460 case NE
: case LTGT
:
22461 ccode
= "ne"; break;
22462 case EQ
: case UNEQ
:
22463 ccode
= "eq"; break;
22465 ccode
= "ge"; break;
22466 case GT
: case GTU
: case UNGT
:
22467 ccode
= "gt"; break;
22469 ccode
= "le"; break;
22470 case LT
: case LTU
: case UNLT
:
22471 ccode
= "lt"; break;
22472 case UNORDERED
: ccode
= "un"; break;
22473 case ORDERED
: ccode
= "nu"; break;
22474 case UNGE
: ccode
= "nl"; break;
22475 case UNLE
: ccode
= "ng"; break;
22477 gcc_unreachable ();
22480 /* Maybe we have a guess as to how likely the branch is. */
22482 note
= find_reg_note (insn
, REG_BR_PROB
, NULL_RTX
);
22483 if (note
!= NULL_RTX
)
22485 /* PROB is the difference from 50%. */
22486 int prob
= profile_probability::from_reg_br_prob_note (XINT (note
, 0))
22487 .to_reg_br_prob_base () - REG_BR_PROB_BASE
/ 2;
22489 /* Only hint for highly probable/improbable branches on newer cpus when
22490 we have real profile data, as static prediction overrides processor
22491 dynamic prediction. For older cpus we may as well always hint, but
22492 assume not taken for branches that are very close to 50% as a
22493 mispredicted taken branch is more expensive than a
22494 mispredicted not-taken branch. */
22495 if (rs6000_always_hint
22496 || (abs (prob
) > REG_BR_PROB_BASE
/ 100 * 48
22497 && (profile_status_for_fn (cfun
) != PROFILE_GUESSED
)
22498 && br_prob_note_reliable_p (note
)))
22500 if (abs (prob
) > REG_BR_PROB_BASE
/ 20
22501 && ((prob
> 0) ^ need_longbranch
))
22509 s
+= sprintf (s
, "b%slr%s ", ccode
, pred
);
22511 s
+= sprintf (s
, "b%s%s ", ccode
, pred
);
22513 /* We need to escape any '%' characters in the reg_names string.
22514 Assume they'd only be the first character.... */
22515 if (reg_names
[cc_regno
+ CR0_REGNO
][0] == '%')
22517 s
+= sprintf (s
, "%s", reg_names
[cc_regno
+ CR0_REGNO
]);
22521 /* If the branch distance was too far, we may have to use an
22522 unconditional branch to go the distance. */
22523 if (need_longbranch
)
22524 s
+= sprintf (s
, ",$+8\n\tb %s", label
);
22526 s
+= sprintf (s
, ",%s", label
);
22532 /* Return insn for VSX or Altivec comparisons. */
22535 rs6000_emit_vector_compare_inner (enum rtx_code code
, rtx op0
, rtx op1
)
22538 machine_mode mode
= GET_MODE (op0
);
22546 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
22557 mask
= gen_reg_rtx (mode
);
22558 emit_insn (gen_rtx_SET (mask
, gen_rtx_fmt_ee (code
, mode
, op0
, op1
)));
22565 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22566 DMODE is expected destination mode. This is a recursive function. */
22569 rs6000_emit_vector_compare (enum rtx_code rcode
,
22571 machine_mode dmode
)
22574 bool swap_operands
= false;
22575 bool try_again
= false;
22577 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode
));
22578 gcc_assert (GET_MODE (op0
) == GET_MODE (op1
));
22580 /* See if the comparison works as is. */
22581 mask
= rs6000_emit_vector_compare_inner (rcode
, op0
, op1
);
22589 swap_operands
= true;
22594 swap_operands
= true;
22602 /* Invert condition and try again.
22603 e.g., A != B becomes ~(A==B). */
22605 enum rtx_code rev_code
;
22606 enum insn_code nor_code
;
22609 rev_code
= reverse_condition_maybe_unordered (rcode
);
22610 if (rev_code
== UNKNOWN
)
22613 nor_code
= optab_handler (one_cmpl_optab
, dmode
);
22614 if (nor_code
== CODE_FOR_nothing
)
22617 mask2
= rs6000_emit_vector_compare (rev_code
, op0
, op1
, dmode
);
22621 mask
= gen_reg_rtx (dmode
);
22622 emit_insn (GEN_FCN (nor_code
) (mask
, mask2
));
22630 /* Try GT/GTU/LT/LTU OR EQ */
22633 enum insn_code ior_code
;
22634 enum rtx_code new_code
;
22655 gcc_unreachable ();
22658 ior_code
= optab_handler (ior_optab
, dmode
);
22659 if (ior_code
== CODE_FOR_nothing
)
22662 c_rtx
= rs6000_emit_vector_compare (new_code
, op0
, op1
, dmode
);
22666 eq_rtx
= rs6000_emit_vector_compare (EQ
, op0
, op1
, dmode
);
22670 mask
= gen_reg_rtx (dmode
);
22671 emit_insn (GEN_FCN (ior_code
) (mask
, c_rtx
, eq_rtx
));
22682 std::swap (op0
, op1
);
22684 mask
= rs6000_emit_vector_compare_inner (rcode
, op0
, op1
);
22689 /* You only get two chances. */
22693 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22694 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22695 operands for the relation operation COND. */
22698 rs6000_emit_vector_cond_expr (rtx dest
, rtx op_true
, rtx op_false
,
22699 rtx cond
, rtx cc_op0
, rtx cc_op1
)
22701 machine_mode dest_mode
= GET_MODE (dest
);
22702 machine_mode mask_mode
= GET_MODE (cc_op0
);
22703 enum rtx_code rcode
= GET_CODE (cond
);
22704 machine_mode cc_mode
= CCmode
;
22707 bool invert_move
= false;
22709 if (VECTOR_UNIT_NONE_P (dest_mode
))
22712 gcc_assert (GET_MODE_SIZE (dest_mode
) == GET_MODE_SIZE (mask_mode
)
22713 && GET_MODE_NUNITS (dest_mode
) == GET_MODE_NUNITS (mask_mode
));
22717 /* Swap operands if we can, and fall back to doing the operation as
22718 specified, and doing a NOR to invert the test. */
22724 /* Invert condition and try again.
22725 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22726 invert_move
= true;
22727 rcode
= reverse_condition_maybe_unordered (rcode
);
22728 if (rcode
== UNKNOWN
)
22734 if (GET_MODE_CLASS (mask_mode
) == MODE_VECTOR_INT
)
22736 /* Invert condition to avoid compound test. */
22737 invert_move
= true;
22738 rcode
= reverse_condition (rcode
);
22746 /* Mark unsigned tests with CCUNSmode. */
22747 cc_mode
= CCUNSmode
;
22749 /* Invert condition to avoid compound test if necessary. */
22750 if (rcode
== GEU
|| rcode
== LEU
)
22752 invert_move
= true;
22753 rcode
= reverse_condition (rcode
);
22761 /* Get the vector mask for the given relational operations. */
22762 mask
= rs6000_emit_vector_compare (rcode
, cc_op0
, cc_op1
, mask_mode
);
22768 std::swap (op_true
, op_false
);
22770 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22771 if (GET_MODE_CLASS (dest_mode
) == MODE_VECTOR_INT
22772 && (GET_CODE (op_true
) == CONST_VECTOR
22773 || GET_CODE (op_false
) == CONST_VECTOR
))
22775 rtx constant_0
= CONST0_RTX (dest_mode
);
22776 rtx constant_m1
= CONSTM1_RTX (dest_mode
);
22778 if (op_true
== constant_m1
&& op_false
== constant_0
)
22780 emit_move_insn (dest
, mask
);
22784 else if (op_true
== constant_0
&& op_false
== constant_m1
)
22786 emit_insn (gen_rtx_SET (dest
, gen_rtx_NOT (dest_mode
, mask
)));
22790 /* If we can't use the vector comparison directly, perhaps we can use
22791 the mask for the true or false fields, instead of loading up a
22793 if (op_true
== constant_m1
)
22796 if (op_false
== constant_0
)
22800 if (!REG_P (op_true
) && !SUBREG_P (op_true
))
22801 op_true
= force_reg (dest_mode
, op_true
);
22803 if (!REG_P (op_false
) && !SUBREG_P (op_false
))
22804 op_false
= force_reg (dest_mode
, op_false
);
22806 cond2
= gen_rtx_fmt_ee (NE
, cc_mode
, gen_lowpart (dest_mode
, mask
),
22807 CONST0_RTX (dest_mode
));
22808 emit_insn (gen_rtx_SET (dest
,
22809 gen_rtx_IF_THEN_ELSE (dest_mode
,
22816 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22817 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22818 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22819 hardware has no such operation. */
22822 rs6000_emit_p9_fp_minmax (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
22824 enum rtx_code code
= GET_CODE (op
);
22825 rtx op0
= XEXP (op
, 0);
22826 rtx op1
= XEXP (op
, 1);
22827 machine_mode compare_mode
= GET_MODE (op0
);
22828 machine_mode result_mode
= GET_MODE (dest
);
22829 bool max_p
= false;
22831 if (result_mode
!= compare_mode
)
22834 if (code
== GE
|| code
== GT
)
22836 else if (code
== LE
|| code
== LT
)
22841 if (rtx_equal_p (op0
, true_cond
) && rtx_equal_p (op1
, false_cond
))
22844 else if (rtx_equal_p (op1
, true_cond
) && rtx_equal_p (op0
, false_cond
))
22850 rs6000_emit_minmax (dest
, max_p
? SMAX
: SMIN
, op0
, op1
);
22854 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22855 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22856 operands of the last comparison is nonzero/true, FALSE_COND if it is
22857 zero/false. Return 0 if the hardware has no such operation. */
22860 rs6000_emit_p9_fp_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
22862 enum rtx_code code
= GET_CODE (op
);
22863 rtx op0
= XEXP (op
, 0);
22864 rtx op1
= XEXP (op
, 1);
22865 machine_mode result_mode
= GET_MODE (dest
);
22870 if (!can_create_pseudo_p ())
22883 code
= swap_condition (code
);
22884 std::swap (op0
, op1
);
22891 /* Generate: [(parallel [(set (dest)
22892 (if_then_else (op (cmp1) (cmp2))
22895 (clobber (scratch))])]. */
22897 compare_rtx
= gen_rtx_fmt_ee (code
, CCFPmode
, op0
, op1
);
22898 cmove_rtx
= gen_rtx_SET (dest
,
22899 gen_rtx_IF_THEN_ELSE (result_mode
,
22904 clobber_rtx
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (V2DImode
));
22905 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
22906 gen_rtvec (2, cmove_rtx
, clobber_rtx
)));
22911 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
22912 operands of the last comparison is nonzero/true, FALSE_COND if it
22913 is zero/false. Return 0 if the hardware has no such operation. */
22916 rs6000_emit_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
22918 enum rtx_code code
= GET_CODE (op
);
22919 rtx op0
= XEXP (op
, 0);
22920 rtx op1
= XEXP (op
, 1);
22921 machine_mode compare_mode
= GET_MODE (op0
);
22922 machine_mode result_mode
= GET_MODE (dest
);
22924 bool is_against_zero
;
22926 /* These modes should always match. */
22927 if (GET_MODE (op1
) != compare_mode
22928 /* In the isel case however, we can use a compare immediate, so
22929 op1 may be a small constant. */
22930 && (!TARGET_ISEL
|| !short_cint_operand (op1
, VOIDmode
)))
22932 if (GET_MODE (true_cond
) != result_mode
)
22934 if (GET_MODE (false_cond
) != result_mode
)
22937 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
22938 if (TARGET_P9_MINMAX
22939 && (compare_mode
== SFmode
|| compare_mode
== DFmode
)
22940 && (result_mode
== SFmode
|| result_mode
== DFmode
))
22942 if (rs6000_emit_p9_fp_minmax (dest
, op
, true_cond
, false_cond
))
22945 if (rs6000_emit_p9_fp_cmove (dest
, op
, true_cond
, false_cond
))
22949 /* Don't allow using floating point comparisons for integer results for
22951 if (FLOAT_MODE_P (compare_mode
) && !FLOAT_MODE_P (result_mode
))
22954 /* First, work out if the hardware can do this at all, or
22955 if it's too slow.... */
22956 if (!FLOAT_MODE_P (compare_mode
))
22959 return rs6000_emit_int_cmove (dest
, op
, true_cond
, false_cond
);
22963 is_against_zero
= op1
== CONST0_RTX (compare_mode
);
22965 /* A floating-point subtract might overflow, underflow, or produce
22966 an inexact result, thus changing the floating-point flags, so it
22967 can't be generated if we care about that. It's safe if one side
22968 of the construct is zero, since then no subtract will be
22970 if (SCALAR_FLOAT_MODE_P (compare_mode
)
22971 && flag_trapping_math
&& ! is_against_zero
)
22974 /* Eliminate half of the comparisons by switching operands, this
22975 makes the remaining code simpler. */
22976 if (code
== UNLT
|| code
== UNGT
|| code
== UNORDERED
|| code
== NE
22977 || code
== LTGT
|| code
== LT
|| code
== UNLE
)
22979 code
= reverse_condition_maybe_unordered (code
);
22981 true_cond
= false_cond
;
22985 /* UNEQ and LTGT take four instructions for a comparison with zero,
22986 it'll probably be faster to use a branch here too. */
22987 if (code
== UNEQ
&& HONOR_NANS (compare_mode
))
22990 /* We're going to try to implement comparisons by performing
22991 a subtract, then comparing against zero. Unfortunately,
22992 Inf - Inf is NaN which is not zero, and so if we don't
22993 know that the operand is finite and the comparison
22994 would treat EQ different to UNORDERED, we can't do it. */
22995 if (HONOR_INFINITIES (compare_mode
)
22996 && code
!= GT
&& code
!= UNGE
22997 && (GET_CODE (op1
) != CONST_DOUBLE
22998 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1
)))
22999 /* Constructs of the form (a OP b ? a : b) are safe. */
23000 && ((! rtx_equal_p (op0
, false_cond
) && ! rtx_equal_p (op1
, false_cond
))
23001 || (! rtx_equal_p (op0
, true_cond
)
23002 && ! rtx_equal_p (op1
, true_cond
))))
23005 /* At this point we know we can use fsel. */
23007 /* Reduce the comparison to a comparison against zero. */
23008 if (! is_against_zero
)
23010 temp
= gen_reg_rtx (compare_mode
);
23011 emit_insn (gen_rtx_SET (temp
, gen_rtx_MINUS (compare_mode
, op0
, op1
)));
23013 op1
= CONST0_RTX (compare_mode
);
23016 /* If we don't care about NaNs we can reduce some of the comparisons
23017 down to faster ones. */
23018 if (! HONOR_NANS (compare_mode
))
23024 true_cond
= false_cond
;
23037 /* Now, reduce everything down to a GE. */
23044 temp
= gen_reg_rtx (compare_mode
);
23045 emit_insn (gen_rtx_SET (temp
, gen_rtx_NEG (compare_mode
, op0
)));
23050 temp
= gen_reg_rtx (compare_mode
);
23051 emit_insn (gen_rtx_SET (temp
, gen_rtx_ABS (compare_mode
, op0
)));
23056 temp
= gen_reg_rtx (compare_mode
);
23057 emit_insn (gen_rtx_SET (temp
,
23058 gen_rtx_NEG (compare_mode
,
23059 gen_rtx_ABS (compare_mode
, op0
))));
23064 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
23065 temp
= gen_reg_rtx (result_mode
);
23066 emit_insn (gen_rtx_SET (temp
,
23067 gen_rtx_IF_THEN_ELSE (result_mode
,
23068 gen_rtx_GE (VOIDmode
,
23070 true_cond
, false_cond
)));
23071 false_cond
= true_cond
;
23074 temp
= gen_reg_rtx (compare_mode
);
23075 emit_insn (gen_rtx_SET (temp
, gen_rtx_NEG (compare_mode
, op0
)));
23080 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
23081 temp
= gen_reg_rtx (result_mode
);
23082 emit_insn (gen_rtx_SET (temp
,
23083 gen_rtx_IF_THEN_ELSE (result_mode
,
23084 gen_rtx_GE (VOIDmode
,
23086 true_cond
, false_cond
)));
23087 true_cond
= false_cond
;
23090 temp
= gen_reg_rtx (compare_mode
);
23091 emit_insn (gen_rtx_SET (temp
, gen_rtx_NEG (compare_mode
, op0
)));
23096 gcc_unreachable ();
23099 emit_insn (gen_rtx_SET (dest
,
23100 gen_rtx_IF_THEN_ELSE (result_mode
,
23101 gen_rtx_GE (VOIDmode
,
23103 true_cond
, false_cond
)));
23107 /* Same as above, but for ints (isel). */
23110 rs6000_emit_int_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
23112 rtx condition_rtx
, cr
;
23113 machine_mode mode
= GET_MODE (dest
);
23114 enum rtx_code cond_code
;
23115 rtx (*isel_func
) (rtx
, rtx
, rtx
, rtx
, rtx
);
23118 if (mode
!= SImode
&& (!TARGET_POWERPC64
|| mode
!= DImode
))
23121 /* We still have to do the compare, because isel doesn't do a
23122 compare, it just looks at the CRx bits set by a previous compare
23124 condition_rtx
= rs6000_generate_compare (op
, mode
);
23125 cond_code
= GET_CODE (condition_rtx
);
23126 cr
= XEXP (condition_rtx
, 0);
23127 signedp
= GET_MODE (cr
) == CCmode
;
23129 isel_func
= (mode
== SImode
23130 ? (signedp
? gen_isel_signed_si
: gen_isel_unsigned_si
)
23131 : (signedp
? gen_isel_signed_di
: gen_isel_unsigned_di
));
23135 case LT
: case GT
: case LTU
: case GTU
: case EQ
:
23136 /* isel handles these directly. */
23140 /* We need to swap the sense of the comparison. */
23142 std::swap (false_cond
, true_cond
);
23143 PUT_CODE (condition_rtx
, reverse_condition (cond_code
));
23148 false_cond
= force_reg (mode
, false_cond
);
23149 if (true_cond
!= const0_rtx
)
23150 true_cond
= force_reg (mode
, true_cond
);
23152 emit_insn (isel_func (dest
, condition_rtx
, true_cond
, false_cond
, cr
));
23158 output_isel (rtx
*operands
)
23160 enum rtx_code code
;
23162 code
= GET_CODE (operands
[1]);
23164 if (code
== GE
|| code
== GEU
|| code
== LE
|| code
== LEU
|| code
== NE
)
23166 gcc_assert (GET_CODE (operands
[2]) == REG
23167 && GET_CODE (operands
[3]) == REG
);
23168 PUT_CODE (operands
[1], reverse_condition (code
));
23169 return "isel %0,%3,%2,%j1";
23172 return "isel %0,%2,%3,%j1";
23176 rs6000_emit_minmax (rtx dest
, enum rtx_code code
, rtx op0
, rtx op1
)
23178 machine_mode mode
= GET_MODE (op0
);
23182 /* VSX/altivec have direct min/max insns. */
23183 if ((code
== SMAX
|| code
== SMIN
)
23184 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode
)
23185 || (mode
== SFmode
&& VECTOR_UNIT_VSX_P (DFmode
))))
23187 emit_insn (gen_rtx_SET (dest
, gen_rtx_fmt_ee (code
, mode
, op0
, op1
)));
23191 if (code
== SMAX
|| code
== SMIN
)
23196 if (code
== SMAX
|| code
== UMAX
)
23197 target
= emit_conditional_move (dest
, c
, op0
, op1
, mode
,
23198 op0
, op1
, mode
, 0);
23200 target
= emit_conditional_move (dest
, c
, op0
, op1
, mode
,
23201 op1
, op0
, mode
, 0);
23202 gcc_assert (target
);
23203 if (target
!= dest
)
23204 emit_move_insn (dest
, target
);
23207 /* Split a signbit operation on 64-bit machines with direct move. Also allow
23208 for the value to come from memory or if it is already loaded into a GPR. */
23211 rs6000_split_signbit (rtx dest
, rtx src
)
23213 machine_mode d_mode
= GET_MODE (dest
);
23214 machine_mode s_mode
= GET_MODE (src
);
23215 rtx dest_di
= (d_mode
== DImode
) ? dest
: gen_lowpart (DImode
, dest
);
23216 rtx shift_reg
= dest_di
;
23218 gcc_assert (FLOAT128_IEEE_P (s_mode
) && TARGET_POWERPC64
);
23222 rtx mem
= (WORDS_BIG_ENDIAN
23223 ? adjust_address (src
, DImode
, 0)
23224 : adjust_address (src
, DImode
, 8));
23225 emit_insn (gen_rtx_SET (dest_di
, mem
));
23230 unsigned int r
= reg_or_subregno (src
);
23232 if (INT_REGNO_P (r
))
23233 shift_reg
= gen_rtx_REG (DImode
, r
+ (BYTES_BIG_ENDIAN
== 0));
23237 /* Generate the special mfvsrd instruction to get it in a GPR. */
23238 gcc_assert (VSX_REGNO_P (r
));
23239 if (s_mode
== KFmode
)
23240 emit_insn (gen_signbitkf2_dm2 (dest_di
, src
));
23242 emit_insn (gen_signbittf2_dm2 (dest_di
, src
));
23246 emit_insn (gen_lshrdi3 (dest_di
, shift_reg
, GEN_INT (63)));
23250 /* A subroutine of the atomic operation splitters. Jump to LABEL if
23251 COND is true. Mark the jump as unlikely to be taken. */
23254 emit_unlikely_jump (rtx cond
, rtx label
)
23256 rtx x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, cond
, label
, pc_rtx
);
23257 rtx_insn
*insn
= emit_jump_insn (gen_rtx_SET (pc_rtx
, x
));
23258 add_reg_br_prob_note (insn
, profile_probability::very_unlikely ());
23261 /* A subroutine of the atomic operation splitters. Emit a load-locked
23262 instruction in MODE. For QI/HImode, possibly use a pattern than includes
23263 the zero_extend operation. */
23266 emit_load_locked (machine_mode mode
, rtx reg
, rtx mem
)
23268 rtx (*fn
) (rtx
, rtx
) = NULL
;
23273 fn
= gen_load_lockedqi
;
23276 fn
= gen_load_lockedhi
;
23279 if (GET_MODE (mem
) == QImode
)
23280 fn
= gen_load_lockedqi_si
;
23281 else if (GET_MODE (mem
) == HImode
)
23282 fn
= gen_load_lockedhi_si
;
23284 fn
= gen_load_lockedsi
;
23287 fn
= gen_load_lockeddi
;
23290 fn
= gen_load_lockedti
;
23293 gcc_unreachable ();
23295 emit_insn (fn (reg
, mem
));
23298 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23299 instruction in MODE. */
23302 emit_store_conditional (machine_mode mode
, rtx res
, rtx mem
, rtx val
)
23304 rtx (*fn
) (rtx
, rtx
, rtx
) = NULL
;
23309 fn
= gen_store_conditionalqi
;
23312 fn
= gen_store_conditionalhi
;
23315 fn
= gen_store_conditionalsi
;
23318 fn
= gen_store_conditionaldi
;
23321 fn
= gen_store_conditionalti
;
23324 gcc_unreachable ();
23327 /* Emit sync before stwcx. to address PPC405 Erratum. */
23328 if (PPC405_ERRATUM77
)
23329 emit_insn (gen_hwsync ());
23331 emit_insn (fn (res
, mem
, val
));
23334 /* Expand barriers before and after a load_locked/store_cond sequence. */
23337 rs6000_pre_atomic_barrier (rtx mem
, enum memmodel model
)
23339 rtx addr
= XEXP (mem
, 0);
23341 if (!legitimate_indirect_address_p (addr
, reload_completed
)
23342 && !legitimate_indexed_address_p (addr
, reload_completed
))
23344 addr
= force_reg (Pmode
, addr
);
23345 mem
= replace_equiv_address_nv (mem
, addr
);
23350 case MEMMODEL_RELAXED
:
23351 case MEMMODEL_CONSUME
:
23352 case MEMMODEL_ACQUIRE
:
23354 case MEMMODEL_RELEASE
:
23355 case MEMMODEL_ACQ_REL
:
23356 emit_insn (gen_lwsync ());
23358 case MEMMODEL_SEQ_CST
:
23359 emit_insn (gen_hwsync ());
23362 gcc_unreachable ();
23368 rs6000_post_atomic_barrier (enum memmodel model
)
23372 case MEMMODEL_RELAXED
:
23373 case MEMMODEL_CONSUME
:
23374 case MEMMODEL_RELEASE
:
23376 case MEMMODEL_ACQUIRE
:
23377 case MEMMODEL_ACQ_REL
:
23378 case MEMMODEL_SEQ_CST
:
23379 emit_insn (gen_isync ());
23382 gcc_unreachable ();
23386 /* A subroutine of the various atomic expanders. For sub-word operations,
23387 we must adjust things to operate on SImode. Given the original MEM,
23388 return a new aligned memory. Also build and return the quantities by
23389 which to shift and mask. */
23392 rs6000_adjust_atomic_subword (rtx orig_mem
, rtx
*pshift
, rtx
*pmask
)
23394 rtx addr
, align
, shift
, mask
, mem
;
23395 HOST_WIDE_INT shift_mask
;
23396 machine_mode mode
= GET_MODE (orig_mem
);
23398 /* For smaller modes, we have to implement this via SImode. */
23399 shift_mask
= (mode
== QImode
? 0x18 : 0x10);
23401 addr
= XEXP (orig_mem
, 0);
23402 addr
= force_reg (GET_MODE (addr
), addr
);
23404 /* Aligned memory containing subword. Generate a new memory. We
23405 do not want any of the existing MEM_ATTR data, as we're now
23406 accessing memory outside the original object. */
23407 align
= expand_simple_binop (Pmode
, AND
, addr
, GEN_INT (-4),
23408 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23409 mem
= gen_rtx_MEM (SImode
, align
);
23410 MEM_VOLATILE_P (mem
) = MEM_VOLATILE_P (orig_mem
);
23411 if (MEM_ALIAS_SET (orig_mem
) == ALIAS_SET_MEMORY_BARRIER
)
23412 set_mem_alias_set (mem
, ALIAS_SET_MEMORY_BARRIER
);
23414 /* Shift amount for subword relative to aligned word. */
23415 shift
= gen_reg_rtx (SImode
);
23416 addr
= gen_lowpart (SImode
, addr
);
23417 rtx tmp
= gen_reg_rtx (SImode
);
23418 emit_insn (gen_ashlsi3 (tmp
, addr
, GEN_INT (3)));
23419 emit_insn (gen_andsi3 (shift
, tmp
, GEN_INT (shift_mask
)));
23420 if (BYTES_BIG_ENDIAN
)
23421 shift
= expand_simple_binop (SImode
, XOR
, shift
, GEN_INT (shift_mask
),
23422 shift
, 1, OPTAB_LIB_WIDEN
);
23425 /* Mask for insertion. */
23426 mask
= expand_simple_binop (SImode
, ASHIFT
, GEN_INT (GET_MODE_MASK (mode
)),
23427 shift
, NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23433 /* A subroutine of the various atomic expanders. For sub-word operands,
23434 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23437 rs6000_mask_atomic_subword (rtx oldval
, rtx newval
, rtx mask
)
23441 x
= gen_reg_rtx (SImode
);
23442 emit_insn (gen_rtx_SET (x
, gen_rtx_AND (SImode
,
23443 gen_rtx_NOT (SImode
, mask
),
23446 x
= expand_simple_binop (SImode
, IOR
, newval
, x
, x
, 1, OPTAB_LIB_WIDEN
);
23451 /* A subroutine of the various atomic expanders. For sub-word operands,
23452 extract WIDE to NARROW via SHIFT. */
23455 rs6000_finish_atomic_subword (rtx narrow
, rtx wide
, rtx shift
)
23457 wide
= expand_simple_binop (SImode
, LSHIFTRT
, wide
, shift
,
23458 wide
, 1, OPTAB_LIB_WIDEN
);
23459 emit_move_insn (narrow
, gen_lowpart (GET_MODE (narrow
), wide
));
23462 /* Expand an atomic compare and swap operation. */
23465 rs6000_expand_atomic_compare_and_swap (rtx operands
[])
23467 rtx boolval
, retval
, mem
, oldval
, newval
, cond
;
23468 rtx label1
, label2
, x
, mask
, shift
;
23469 machine_mode mode
, orig_mode
;
23470 enum memmodel mod_s
, mod_f
;
23473 boolval
= operands
[0];
23474 retval
= operands
[1];
23476 oldval
= operands
[3];
23477 newval
= operands
[4];
23478 is_weak
= (INTVAL (operands
[5]) != 0);
23479 mod_s
= memmodel_base (INTVAL (operands
[6]));
23480 mod_f
= memmodel_base (INTVAL (operands
[7]));
23481 orig_mode
= mode
= GET_MODE (mem
);
23483 mask
= shift
= NULL_RTX
;
23484 if (mode
== QImode
|| mode
== HImode
)
23486 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23487 lwarx and shift/mask operations. With power8, we need to do the
23488 comparison in SImode, but the store is still done in QI/HImode. */
23489 oldval
= convert_modes (SImode
, mode
, oldval
, 1);
23491 if (!TARGET_SYNC_HI_QI
)
23493 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
23495 /* Shift and mask OLDVAL into position with the word. */
23496 oldval
= expand_simple_binop (SImode
, ASHIFT
, oldval
, shift
,
23497 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23499 /* Shift and mask NEWVAL into position within the word. */
23500 newval
= convert_modes (SImode
, mode
, newval
, 1);
23501 newval
= expand_simple_binop (SImode
, ASHIFT
, newval
, shift
,
23502 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23505 /* Prepare to adjust the return value. */
23506 retval
= gen_reg_rtx (SImode
);
23509 else if (reg_overlap_mentioned_p (retval
, oldval
))
23510 oldval
= copy_to_reg (oldval
);
23512 if (mode
!= TImode
&& !reg_or_short_operand (oldval
, mode
))
23513 oldval
= copy_to_mode_reg (mode
, oldval
);
23515 if (reg_overlap_mentioned_p (retval
, newval
))
23516 newval
= copy_to_reg (newval
);
23518 mem
= rs6000_pre_atomic_barrier (mem
, mod_s
);
23523 label1
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
23524 emit_label (XEXP (label1
, 0));
23526 label2
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
23528 emit_load_locked (mode
, retval
, mem
);
23532 x
= expand_simple_binop (SImode
, AND
, retval
, mask
,
23533 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23535 cond
= gen_reg_rtx (CCmode
);
23536 /* If we have TImode, synthesize a comparison. */
23537 if (mode
!= TImode
)
23538 x
= gen_rtx_COMPARE (CCmode
, x
, oldval
);
23541 rtx xor1_result
= gen_reg_rtx (DImode
);
23542 rtx xor2_result
= gen_reg_rtx (DImode
);
23543 rtx or_result
= gen_reg_rtx (DImode
);
23544 rtx new_word0
= simplify_gen_subreg (DImode
, x
, TImode
, 0);
23545 rtx new_word1
= simplify_gen_subreg (DImode
, x
, TImode
, 8);
23546 rtx old_word0
= simplify_gen_subreg (DImode
, oldval
, TImode
, 0);
23547 rtx old_word1
= simplify_gen_subreg (DImode
, oldval
, TImode
, 8);
23549 emit_insn (gen_xordi3 (xor1_result
, new_word0
, old_word0
));
23550 emit_insn (gen_xordi3 (xor2_result
, new_word1
, old_word1
));
23551 emit_insn (gen_iordi3 (or_result
, xor1_result
, xor2_result
));
23552 x
= gen_rtx_COMPARE (CCmode
, or_result
, const0_rtx
);
23555 emit_insn (gen_rtx_SET (cond
, x
));
23557 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
23558 emit_unlikely_jump (x
, label2
);
23562 x
= rs6000_mask_atomic_subword (retval
, newval
, mask
);
23564 emit_store_conditional (orig_mode
, cond
, mem
, x
);
23568 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
23569 emit_unlikely_jump (x
, label1
);
23572 if (!is_mm_relaxed (mod_f
))
23573 emit_label (XEXP (label2
, 0));
23575 rs6000_post_atomic_barrier (mod_s
);
23577 if (is_mm_relaxed (mod_f
))
23578 emit_label (XEXP (label2
, 0));
23581 rs6000_finish_atomic_subword (operands
[1], retval
, shift
);
23582 else if (mode
!= GET_MODE (operands
[1]))
23583 convert_move (operands
[1], retval
, 1);
23585 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23586 x
= gen_rtx_EQ (SImode
, cond
, const0_rtx
);
23587 emit_insn (gen_rtx_SET (boolval
, x
));
23590 /* Expand an atomic exchange operation. */
23593 rs6000_expand_atomic_exchange (rtx operands
[])
23595 rtx retval
, mem
, val
, cond
;
23597 enum memmodel model
;
23598 rtx label
, x
, mask
, shift
;
23600 retval
= operands
[0];
23603 model
= memmodel_base (INTVAL (operands
[3]));
23604 mode
= GET_MODE (mem
);
23606 mask
= shift
= NULL_RTX
;
23607 if (!TARGET_SYNC_HI_QI
&& (mode
== QImode
|| mode
== HImode
))
23609 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
23611 /* Shift and mask VAL into position with the word. */
23612 val
= convert_modes (SImode
, mode
, val
, 1);
23613 val
= expand_simple_binop (SImode
, ASHIFT
, val
, shift
,
23614 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23616 /* Prepare to adjust the return value. */
23617 retval
= gen_reg_rtx (SImode
);
23621 mem
= rs6000_pre_atomic_barrier (mem
, model
);
23623 label
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
23624 emit_label (XEXP (label
, 0));
23626 emit_load_locked (mode
, retval
, mem
);
23630 x
= rs6000_mask_atomic_subword (retval
, val
, mask
);
23632 cond
= gen_reg_rtx (CCmode
);
23633 emit_store_conditional (mode
, cond
, mem
, x
);
23635 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
23636 emit_unlikely_jump (x
, label
);
23638 rs6000_post_atomic_barrier (model
);
23641 rs6000_finish_atomic_subword (operands
[0], retval
, shift
);
23644 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23645 to perform. MEM is the memory on which to operate. VAL is the second
23646 operand of the binary operator. BEFORE and AFTER are optional locations to
23647 return the value of MEM either before of after the operation. MODEL_RTX
23648 is a CONST_INT containing the memory model to use. */
23651 rs6000_expand_atomic_op (enum rtx_code code
, rtx mem
, rtx val
,
23652 rtx orig_before
, rtx orig_after
, rtx model_rtx
)
23654 enum memmodel model
= memmodel_base (INTVAL (model_rtx
));
23655 machine_mode mode
= GET_MODE (mem
);
23656 machine_mode store_mode
= mode
;
23657 rtx label
, x
, cond
, mask
, shift
;
23658 rtx before
= orig_before
, after
= orig_after
;
23660 mask
= shift
= NULL_RTX
;
23661 /* On power8, we want to use SImode for the operation. On previous systems,
23662 use the operation in a subword and shift/mask to get the proper byte or
23664 if (mode
== QImode
|| mode
== HImode
)
23666 if (TARGET_SYNC_HI_QI
)
23668 val
= convert_modes (SImode
, mode
, val
, 1);
23670 /* Prepare to adjust the return value. */
23671 before
= gen_reg_rtx (SImode
);
23673 after
= gen_reg_rtx (SImode
);
23678 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
23680 /* Shift and mask VAL into position with the word. */
23681 val
= convert_modes (SImode
, mode
, val
, 1);
23682 val
= expand_simple_binop (SImode
, ASHIFT
, val
, shift
,
23683 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23689 /* We've already zero-extended VAL. That is sufficient to
23690 make certain that it does not affect other bits. */
23695 /* If we make certain that all of the other bits in VAL are
23696 set, that will be sufficient to not affect other bits. */
23697 x
= gen_rtx_NOT (SImode
, mask
);
23698 x
= gen_rtx_IOR (SImode
, x
, val
);
23699 emit_insn (gen_rtx_SET (val
, x
));
23706 /* These will all affect bits outside the field and need
23707 adjustment via MASK within the loop. */
23711 gcc_unreachable ();
23714 /* Prepare to adjust the return value. */
23715 before
= gen_reg_rtx (SImode
);
23717 after
= gen_reg_rtx (SImode
);
23718 store_mode
= mode
= SImode
;
23722 mem
= rs6000_pre_atomic_barrier (mem
, model
);
23724 label
= gen_label_rtx ();
23725 emit_label (label
);
23726 label
= gen_rtx_LABEL_REF (VOIDmode
, label
);
23728 if (before
== NULL_RTX
)
23729 before
= gen_reg_rtx (mode
);
23731 emit_load_locked (mode
, before
, mem
);
23735 x
= expand_simple_binop (mode
, AND
, before
, val
,
23736 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23737 after
= expand_simple_unop (mode
, NOT
, x
, after
, 1);
23741 after
= expand_simple_binop (mode
, code
, before
, val
,
23742 after
, 1, OPTAB_LIB_WIDEN
);
23748 x
= expand_simple_binop (SImode
, AND
, after
, mask
,
23749 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23750 x
= rs6000_mask_atomic_subword (before
, x
, mask
);
23752 else if (store_mode
!= mode
)
23753 x
= convert_modes (store_mode
, mode
, x
, 1);
23755 cond
= gen_reg_rtx (CCmode
);
23756 emit_store_conditional (store_mode
, cond
, mem
, x
);
23758 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
23759 emit_unlikely_jump (x
, label
);
23761 rs6000_post_atomic_barrier (model
);
23765 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23766 then do the calcuations in a SImode register. */
23768 rs6000_finish_atomic_subword (orig_before
, before
, shift
);
23770 rs6000_finish_atomic_subword (orig_after
, after
, shift
);
23772 else if (store_mode
!= mode
)
23774 /* QImode/HImode on machines with lbarx/lharx where we do the native
23775 operation and then do the calcuations in a SImode register. */
23777 convert_move (orig_before
, before
, 1);
23779 convert_move (orig_after
, after
, 1);
23781 else if (orig_after
&& after
!= orig_after
)
23782 emit_move_insn (orig_after
, after
);
23785 /* Emit instructions to move SRC to DST. Called by splitters for
23786 multi-register moves. It will emit at most one instruction for
23787 each register that is accessed; that is, it won't emit li/lis pairs
23788 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23792 rs6000_split_multireg_move (rtx dst
, rtx src
)
23794 /* The register number of the first register being moved. */
23796 /* The mode that is to be moved. */
23798 /* The mode that the move is being done in, and its size. */
23799 machine_mode reg_mode
;
23801 /* The number of registers that will be moved. */
23804 reg
= REG_P (dst
) ? REGNO (dst
) : REGNO (src
);
23805 mode
= GET_MODE (dst
);
23806 nregs
= hard_regno_nregs
[reg
][mode
];
23807 if (FP_REGNO_P (reg
))
23808 reg_mode
= DECIMAL_FLOAT_MODE_P (mode
) ? DDmode
:
23809 ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? DFmode
: SFmode
);
23810 else if (ALTIVEC_REGNO_P (reg
))
23811 reg_mode
= V16QImode
;
23813 reg_mode
= word_mode
;
23814 reg_mode_size
= GET_MODE_SIZE (reg_mode
);
23816 gcc_assert (reg_mode_size
* nregs
== GET_MODE_SIZE (mode
));
23818 /* TDmode residing in FP registers is special, since the ISA requires that
23819 the lower-numbered word of a register pair is always the most significant
23820 word, even in little-endian mode. This does not match the usual subreg
23821 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23822 the appropriate constituent registers "by hand" in little-endian mode.
23824 Note we do not need to check for destructive overlap here since TDmode
23825 can only reside in even/odd register pairs. */
23826 if (FP_REGNO_P (reg
) && DECIMAL_FLOAT_MODE_P (mode
) && !BYTES_BIG_ENDIAN
)
23831 for (i
= 0; i
< nregs
; i
++)
23833 if (REG_P (src
) && FP_REGNO_P (REGNO (src
)))
23834 p_src
= gen_rtx_REG (reg_mode
, REGNO (src
) + nregs
- 1 - i
);
23836 p_src
= simplify_gen_subreg (reg_mode
, src
, mode
,
23837 i
* reg_mode_size
);
23839 if (REG_P (dst
) && FP_REGNO_P (REGNO (dst
)))
23840 p_dst
= gen_rtx_REG (reg_mode
, REGNO (dst
) + nregs
- 1 - i
);
23842 p_dst
= simplify_gen_subreg (reg_mode
, dst
, mode
,
23843 i
* reg_mode_size
);
23845 emit_insn (gen_rtx_SET (p_dst
, p_src
));
23851 if (REG_P (src
) && REG_P (dst
) && (REGNO (src
) < REGNO (dst
)))
23853 /* Move register range backwards, if we might have destructive
23856 for (i
= nregs
- 1; i
>= 0; i
--)
23857 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode
, dst
, mode
,
23858 i
* reg_mode_size
),
23859 simplify_gen_subreg (reg_mode
, src
, mode
,
23860 i
* reg_mode_size
)));
23866 bool used_update
= false;
23867 rtx restore_basereg
= NULL_RTX
;
23869 if (MEM_P (src
) && INT_REGNO_P (reg
))
23873 if (GET_CODE (XEXP (src
, 0)) == PRE_INC
23874 || GET_CODE (XEXP (src
, 0)) == PRE_DEC
)
23877 breg
= XEXP (XEXP (src
, 0), 0);
23878 delta_rtx
= (GET_CODE (XEXP (src
, 0)) == PRE_INC
23879 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src
)))
23880 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src
))));
23881 emit_insn (gen_add3_insn (breg
, breg
, delta_rtx
));
23882 src
= replace_equiv_address (src
, breg
);
23884 else if (! rs6000_offsettable_memref_p (src
, reg_mode
))
23886 if (GET_CODE (XEXP (src
, 0)) == PRE_MODIFY
)
23888 rtx basereg
= XEXP (XEXP (src
, 0), 0);
23891 rtx ndst
= simplify_gen_subreg (reg_mode
, dst
, mode
, 0);
23892 emit_insn (gen_rtx_SET (ndst
,
23893 gen_rtx_MEM (reg_mode
,
23895 used_update
= true;
23898 emit_insn (gen_rtx_SET (basereg
,
23899 XEXP (XEXP (src
, 0), 1)));
23900 src
= replace_equiv_address (src
, basereg
);
23904 rtx basereg
= gen_rtx_REG (Pmode
, reg
);
23905 emit_insn (gen_rtx_SET (basereg
, XEXP (src
, 0)));
23906 src
= replace_equiv_address (src
, basereg
);
23910 breg
= XEXP (src
, 0);
23911 if (GET_CODE (breg
) == PLUS
|| GET_CODE (breg
) == LO_SUM
)
23912 breg
= XEXP (breg
, 0);
23914 /* If the base register we are using to address memory is
23915 also a destination reg, then change that register last. */
23917 && REGNO (breg
) >= REGNO (dst
)
23918 && REGNO (breg
) < REGNO (dst
) + nregs
)
23919 j
= REGNO (breg
) - REGNO (dst
);
23921 else if (MEM_P (dst
) && INT_REGNO_P (reg
))
23925 if (GET_CODE (XEXP (dst
, 0)) == PRE_INC
23926 || GET_CODE (XEXP (dst
, 0)) == PRE_DEC
)
23929 breg
= XEXP (XEXP (dst
, 0), 0);
23930 delta_rtx
= (GET_CODE (XEXP (dst
, 0)) == PRE_INC
23931 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst
)))
23932 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst
))));
23934 /* We have to update the breg before doing the store.
23935 Use store with update, if available. */
23939 rtx nsrc
= simplify_gen_subreg (reg_mode
, src
, mode
, 0);
23940 emit_insn (TARGET_32BIT
23941 ? (TARGET_POWERPC64
23942 ? gen_movdi_si_update (breg
, breg
, delta_rtx
, nsrc
)
23943 : gen_movsi_update (breg
, breg
, delta_rtx
, nsrc
))
23944 : gen_movdi_di_update (breg
, breg
, delta_rtx
, nsrc
));
23945 used_update
= true;
23948 emit_insn (gen_add3_insn (breg
, breg
, delta_rtx
));
23949 dst
= replace_equiv_address (dst
, breg
);
23951 else if (!rs6000_offsettable_memref_p (dst
, reg_mode
)
23952 && GET_CODE (XEXP (dst
, 0)) != LO_SUM
)
23954 if (GET_CODE (XEXP (dst
, 0)) == PRE_MODIFY
)
23956 rtx basereg
= XEXP (XEXP (dst
, 0), 0);
23959 rtx nsrc
= simplify_gen_subreg (reg_mode
, src
, mode
, 0);
23960 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode
,
23963 used_update
= true;
23966 emit_insn (gen_rtx_SET (basereg
,
23967 XEXP (XEXP (dst
, 0), 1)));
23968 dst
= replace_equiv_address (dst
, basereg
);
23972 rtx basereg
= XEXP (XEXP (dst
, 0), 0);
23973 rtx offsetreg
= XEXP (XEXP (dst
, 0), 1);
23974 gcc_assert (GET_CODE (XEXP (dst
, 0)) == PLUS
23976 && REG_P (offsetreg
)
23977 && REGNO (basereg
) != REGNO (offsetreg
));
23978 if (REGNO (basereg
) == 0)
23980 rtx tmp
= offsetreg
;
23981 offsetreg
= basereg
;
23984 emit_insn (gen_add3_insn (basereg
, basereg
, offsetreg
));
23985 restore_basereg
= gen_sub3_insn (basereg
, basereg
, offsetreg
);
23986 dst
= replace_equiv_address (dst
, basereg
);
23989 else if (GET_CODE (XEXP (dst
, 0)) != LO_SUM
)
23990 gcc_assert (rs6000_offsettable_memref_p (dst
, reg_mode
));
23993 for (i
= 0; i
< nregs
; i
++)
23995 /* Calculate index to next subword. */
24000 /* If compiler already emitted move of first word by
24001 store with update, no need to do anything. */
24002 if (j
== 0 && used_update
)
24005 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode
, dst
, mode
,
24006 j
* reg_mode_size
),
24007 simplify_gen_subreg (reg_mode
, src
, mode
,
24008 j
* reg_mode_size
)));
24010 if (restore_basereg
!= NULL_RTX
)
24011 emit_insn (restore_basereg
);
24016 /* This page contains routines that are used to determine what the
24017 function prologue and epilogue code will do and write them out. */
24019 /* Determine whether the REG is really used. */
24022 save_reg_p (int reg
)
24024 /* We need to mark the PIC offset register live for the same conditions
24025 as it is set up, or otherwise it won't be saved before we clobber it. */
24027 if (reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
&& !TARGET_SINGLE_PIC_BASE
)
24029 /* When calling eh_return, we must return true for all the cases
24030 where conditional_register_usage marks the PIC offset reg
24032 if (TARGET_TOC
&& TARGET_MINIMAL_TOC
24033 && (crtl
->calls_eh_return
24034 || df_regs_ever_live_p (reg
)
24035 || !constant_pool_empty_p ()))
24038 if ((DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
)
24043 return !call_used_regs
[reg
] && df_regs_ever_live_p (reg
);
24046 /* Return the first fixed-point register that is required to be
24047 saved. 32 if none. */
24050 first_reg_to_save (void)
24054 /* Find lowest numbered live register. */
24055 for (first_reg
= 13; first_reg
<= 31; first_reg
++)
24056 if (save_reg_p (first_reg
))
24061 && crtl
->uses_pic_offset_table
24062 && first_reg
> RS6000_PIC_OFFSET_TABLE_REGNUM
)
24063 return RS6000_PIC_OFFSET_TABLE_REGNUM
;
24069 /* Similar, for FP regs. */
24072 first_fp_reg_to_save (void)
24076 /* Find lowest numbered live register. */
24077 for (first_reg
= 14 + 32; first_reg
<= 63; first_reg
++)
24078 if (save_reg_p (first_reg
))
24084 /* Similar, for AltiVec regs. */
24087 first_altivec_reg_to_save (void)
24091 /* Stack frame remains as is unless we are in AltiVec ABI. */
24092 if (! TARGET_ALTIVEC_ABI
)
24093 return LAST_ALTIVEC_REGNO
+ 1;
24095 /* On Darwin, the unwind routines are compiled without
24096 TARGET_ALTIVEC, and use save_world to save/restore the
24097 altivec registers when necessary. */
24098 if (DEFAULT_ABI
== ABI_DARWIN
&& crtl
->calls_eh_return
24099 && ! TARGET_ALTIVEC
)
24100 return FIRST_ALTIVEC_REGNO
+ 20;
24102 /* Find lowest numbered live register. */
24103 for (i
= FIRST_ALTIVEC_REGNO
+ 20; i
<= LAST_ALTIVEC_REGNO
; ++i
)
24104 if (save_reg_p (i
))
24110 /* Return a 32-bit mask of the AltiVec registers we need to set in
24111 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
24112 the 32-bit word is 0. */
24114 static unsigned int
24115 compute_vrsave_mask (void)
24117 unsigned int i
, mask
= 0;
24119 /* On Darwin, the unwind routines are compiled without
24120 TARGET_ALTIVEC, and use save_world to save/restore the
24121 call-saved altivec registers when necessary. */
24122 if (DEFAULT_ABI
== ABI_DARWIN
&& crtl
->calls_eh_return
24123 && ! TARGET_ALTIVEC
)
24126 /* First, find out if we use _any_ altivec registers. */
24127 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
24128 if (df_regs_ever_live_p (i
))
24129 mask
|= ALTIVEC_REG_BIT (i
);
24134 /* Next, remove the argument registers from the set. These must
24135 be in the VRSAVE mask set by the caller, so we don't need to add
24136 them in again. More importantly, the mask we compute here is
24137 used to generate CLOBBERs in the set_vrsave insn, and we do not
24138 wish the argument registers to die. */
24139 for (i
= ALTIVEC_ARG_MIN_REG
; i
< (unsigned) crtl
->args
.info
.vregno
; i
++)
24140 mask
&= ~ALTIVEC_REG_BIT (i
);
24142 /* Similarly, remove the return value from the set. */
24145 diddle_return_value (is_altivec_return_reg
, &yes
);
24147 mask
&= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN
);
24153 /* For a very restricted set of circumstances, we can cut down the
24154 size of prologues/epilogues by calling our own save/restore-the-world
24158 compute_save_world_info (rs6000_stack_t
*info
)
24160 info
->world_save_p
= 1;
24162 = (WORLD_SAVE_P (info
)
24163 && DEFAULT_ABI
== ABI_DARWIN
24164 && !cfun
->has_nonlocal_label
24165 && info
->first_fp_reg_save
== FIRST_SAVED_FP_REGNO
24166 && info
->first_gp_reg_save
== FIRST_SAVED_GP_REGNO
24167 && info
->first_altivec_reg_save
== FIRST_SAVED_ALTIVEC_REGNO
24168 && info
->cr_save_p
);
24170 /* This will not work in conjunction with sibcalls. Make sure there
24171 are none. (This check is expensive, but seldom executed.) */
24172 if (WORLD_SAVE_P (info
))
24175 for (insn
= get_last_insn_anywhere (); insn
; insn
= PREV_INSN (insn
))
24176 if (CALL_P (insn
) && SIBLING_CALL_P (insn
))
24178 info
->world_save_p
= 0;
24183 if (WORLD_SAVE_P (info
))
24185 /* Even if we're not touching VRsave, make sure there's room on the
24186 stack for it, if it looks like we're calling SAVE_WORLD, which
24187 will attempt to save it. */
24188 info
->vrsave_size
= 4;
24190 /* If we are going to save the world, we need to save the link register too. */
24191 info
->lr_save_p
= 1;
24193 /* "Save" the VRsave register too if we're saving the world. */
24194 if (info
->vrsave_mask
== 0)
24195 info
->vrsave_mask
= compute_vrsave_mask ();
24197 /* Because the Darwin register save/restore routines only handle
24198 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
24200 gcc_assert (info
->first_fp_reg_save
>= FIRST_SAVED_FP_REGNO
24201 && (info
->first_altivec_reg_save
24202 >= FIRST_SAVED_ALTIVEC_REGNO
));
24210 is_altivec_return_reg (rtx reg
, void *xyes
)
24212 bool *yes
= (bool *) xyes
;
24213 if (REGNO (reg
) == ALTIVEC_ARG_RETURN
)
24218 /* Return whether REG is a global user reg or has been specifed by
24219 -ffixed-REG. We should not restore these, and so cannot use
24220 lmw or out-of-line restore functions if there are any. We also
24221 can't save them (well, emit frame notes for them), because frame
24222 unwinding during exception handling will restore saved registers. */
24225 fixed_reg_p (int reg
)
24227 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
24228 backend sets it, overriding anything the user might have given. */
24229 if (reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
24230 && ((DEFAULT_ABI
== ABI_V4
&& flag_pic
)
24231 || (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
)
24232 || (TARGET_TOC
&& TARGET_MINIMAL_TOC
)))
24235 return fixed_regs
[reg
];
24238 /* Determine the strategy for savings/restoring registers. */
24241 SAVE_MULTIPLE
= 0x1,
24242 SAVE_INLINE_GPRS
= 0x2,
24243 SAVE_INLINE_FPRS
= 0x4,
24244 SAVE_NOINLINE_GPRS_SAVES_LR
= 0x8,
24245 SAVE_NOINLINE_FPRS_SAVES_LR
= 0x10,
24246 SAVE_INLINE_VRS
= 0x20,
24247 REST_MULTIPLE
= 0x100,
24248 REST_INLINE_GPRS
= 0x200,
24249 REST_INLINE_FPRS
= 0x400,
24250 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
= 0x800,
24251 REST_INLINE_VRS
= 0x1000
24255 rs6000_savres_strategy (rs6000_stack_t
*info
,
24256 bool using_static_chain_p
)
24260 /* Select between in-line and out-of-line save and restore of regs.
24261 First, all the obvious cases where we don't use out-of-line. */
24262 if (crtl
->calls_eh_return
24263 || cfun
->machine
->ra_need_lr
)
24264 strategy
|= (SAVE_INLINE_FPRS
| REST_INLINE_FPRS
24265 | SAVE_INLINE_GPRS
| REST_INLINE_GPRS
24266 | SAVE_INLINE_VRS
| REST_INLINE_VRS
);
24268 if (info
->first_gp_reg_save
== 32)
24269 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24271 if (info
->first_fp_reg_save
== 64
24272 /* The out-of-line FP routines use double-precision stores;
24273 we can't use those routines if we don't have such stores. */
24274 || (TARGET_HARD_FLOAT
&& !TARGET_DOUBLE_FLOAT
))
24275 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
24277 if (info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
+ 1)
24278 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
24280 /* Define cutoff for using out-of-line functions to save registers. */
24281 if (DEFAULT_ABI
== ABI_V4
|| TARGET_ELF
)
24283 if (!optimize_size
)
24285 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
24286 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24287 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
24291 /* Prefer out-of-line restore if it will exit. */
24292 if (info
->first_fp_reg_save
> 61)
24293 strategy
|= SAVE_INLINE_FPRS
;
24294 if (info
->first_gp_reg_save
> 29)
24296 if (info
->first_fp_reg_save
== 64)
24297 strategy
|= SAVE_INLINE_GPRS
;
24299 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24301 if (info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
)
24302 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
24305 else if (DEFAULT_ABI
== ABI_DARWIN
)
24307 if (info
->first_fp_reg_save
> 60)
24308 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
24309 if (info
->first_gp_reg_save
> 29)
24310 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24311 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
24315 gcc_checking_assert (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
);
24316 if ((flag_shrink_wrap_separate
&& optimize_function_for_speed_p (cfun
))
24317 || info
->first_fp_reg_save
> 61)
24318 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
24319 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24320 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
24323 /* Don't bother to try to save things out-of-line if r11 is occupied
24324 by the static chain. It would require too much fiddling and the
24325 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24326 pointer on Darwin, and AIX uses r1 or r12. */
24327 if (using_static_chain_p
24328 && (DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
))
24329 strategy
|= ((DEFAULT_ABI
== ABI_DARWIN
? 0 : SAVE_INLINE_FPRS
)
24331 | SAVE_INLINE_VRS
);
24333 /* Don't ever restore fixed regs. That means we can't use the
24334 out-of-line register restore functions if a fixed reg is in the
24335 range of regs restored. */
24336 if (!(strategy
& REST_INLINE_FPRS
))
24337 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
24340 strategy
|= REST_INLINE_FPRS
;
24344 /* We can only use the out-of-line routines to restore fprs if we've
24345 saved all the registers from first_fp_reg_save in the prologue.
24346 Otherwise, we risk loading garbage. Of course, if we have saved
24347 out-of-line then we know we haven't skipped any fprs. */
24348 if ((strategy
& SAVE_INLINE_FPRS
)
24349 && !(strategy
& REST_INLINE_FPRS
))
24350 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
24351 if (!save_reg_p (i
))
24353 strategy
|= REST_INLINE_FPRS
;
24357 /* Similarly, for altivec regs. */
24358 if (!(strategy
& REST_INLINE_VRS
))
24359 for (int i
= info
->first_altivec_reg_save
; i
< LAST_ALTIVEC_REGNO
+ 1; i
++)
24362 strategy
|= REST_INLINE_VRS
;
24366 if ((strategy
& SAVE_INLINE_VRS
)
24367 && !(strategy
& REST_INLINE_VRS
))
24368 for (int i
= info
->first_altivec_reg_save
; i
< LAST_ALTIVEC_REGNO
+ 1; i
++)
24369 if (!save_reg_p (i
))
24371 strategy
|= REST_INLINE_VRS
;
24375 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24376 saved is an out-of-line save or restore. Set up the value for
24377 the next test (excluding out-of-line gprs). */
24378 bool lr_save_p
= (info
->lr_save_p
24379 || !(strategy
& SAVE_INLINE_FPRS
)
24380 || !(strategy
& SAVE_INLINE_VRS
)
24381 || !(strategy
& REST_INLINE_FPRS
)
24382 || !(strategy
& REST_INLINE_VRS
));
24384 if (TARGET_MULTIPLE
24385 && !TARGET_POWERPC64
24386 && info
->first_gp_reg_save
< 31
24387 && !(flag_shrink_wrap
24388 && flag_shrink_wrap_separate
24389 && optimize_function_for_speed_p (cfun
)))
24392 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
24393 if (save_reg_p (i
))
24397 /* Don't use store multiple if only one reg needs to be
24398 saved. This can occur for example when the ABI_V4 pic reg
24399 (r30) needs to be saved to make calls, but r31 is not
24401 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24404 /* Prefer store multiple for saves over out-of-line
24405 routines, since the store-multiple instruction will
24406 always be smaller. */
24407 strategy
|= SAVE_INLINE_GPRS
| SAVE_MULTIPLE
;
24409 /* The situation is more complicated with load multiple.
24410 We'd prefer to use the out-of-line routines for restores,
24411 since the "exit" out-of-line routines can handle the
24412 restore of LR and the frame teardown. However if doesn't
24413 make sense to use the out-of-line routine if that is the
24414 only reason we'd need to save LR, and we can't use the
24415 "exit" out-of-line gpr restore if we have saved some
24416 fprs; In those cases it is advantageous to use load
24417 multiple when available. */
24418 if (info
->first_fp_reg_save
!= 64 || !lr_save_p
)
24419 strategy
|= REST_INLINE_GPRS
| REST_MULTIPLE
;
24423 /* Using the "exit" out-of-line routine does not improve code size
24424 if using it would require lr to be saved and if only saving one
24426 else if (!lr_save_p
&& info
->first_gp_reg_save
> 29)
24427 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24429 /* Don't ever restore fixed regs. */
24430 if ((strategy
& (REST_INLINE_GPRS
| REST_MULTIPLE
)) != REST_INLINE_GPRS
)
24431 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
24432 if (fixed_reg_p (i
))
24434 strategy
|= REST_INLINE_GPRS
;
24435 strategy
&= ~REST_MULTIPLE
;
24439 /* We can only use load multiple or the out-of-line routines to
24440 restore gprs if we've saved all the registers from
24441 first_gp_reg_save. Otherwise, we risk loading garbage.
24442 Of course, if we have saved out-of-line or used stmw then we know
24443 we haven't skipped any gprs. */
24444 if ((strategy
& (SAVE_INLINE_GPRS
| SAVE_MULTIPLE
)) == SAVE_INLINE_GPRS
24445 && (strategy
& (REST_INLINE_GPRS
| REST_MULTIPLE
)) != REST_INLINE_GPRS
)
24446 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
24447 if (!save_reg_p (i
))
24449 strategy
|= REST_INLINE_GPRS
;
24450 strategy
&= ~REST_MULTIPLE
;
24454 if (TARGET_ELF
&& TARGET_64BIT
)
24456 if (!(strategy
& SAVE_INLINE_FPRS
))
24457 strategy
|= SAVE_NOINLINE_FPRS_SAVES_LR
;
24458 else if (!(strategy
& SAVE_INLINE_GPRS
)
24459 && info
->first_fp_reg_save
== 64)
24460 strategy
|= SAVE_NOINLINE_GPRS_SAVES_LR
;
24462 else if (TARGET_AIX
&& !(strategy
& REST_INLINE_FPRS
))
24463 strategy
|= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
;
24465 if (TARGET_MACHO
&& !(strategy
& SAVE_INLINE_FPRS
))
24466 strategy
|= SAVE_NOINLINE_FPRS_SAVES_LR
;
24471 /* Calculate the stack information for the current function. This is
24472 complicated by having two separate calling sequences, the AIX calling
24473 sequence and the V.4 calling sequence.
24475 AIX (and Darwin/Mac OS X) stack frames look like:
24477 SP----> +---------------------------------------+
24478 | back chain to caller | 0 0
24479 +---------------------------------------+
24480 | saved CR | 4 8 (8-11)
24481 +---------------------------------------+
24483 +---------------------------------------+
24484 | reserved for compilers | 12 24
24485 +---------------------------------------+
24486 | reserved for binders | 16 32
24487 +---------------------------------------+
24488 | saved TOC pointer | 20 40
24489 +---------------------------------------+
24490 | Parameter save area (+padding*) (P) | 24 48
24491 +---------------------------------------+
24492 | Alloca space (A) | 24+P etc.
24493 +---------------------------------------+
24494 | Local variable space (L) | 24+P+A
24495 +---------------------------------------+
24496 | Float/int conversion temporary (X) | 24+P+A+L
24497 +---------------------------------------+
24498 | Save area for AltiVec registers (W) | 24+P+A+L+X
24499 +---------------------------------------+
24500 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24501 +---------------------------------------+
24502 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24503 +---------------------------------------+
24504 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24505 +---------------------------------------+
24506 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24507 +---------------------------------------+
24508 old SP->| back chain to caller's caller |
24509 +---------------------------------------+
24511 * If the alloca area is present, the parameter save area is
24512 padded so that the former starts 16-byte aligned.
24514 The required alignment for AIX configurations is two words (i.e., 8
24517 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24519 SP----> +---------------------------------------+
24520 | Back chain to caller | 0
24521 +---------------------------------------+
24522 | Save area for CR | 8
24523 +---------------------------------------+
24525 +---------------------------------------+
24526 | Saved TOC pointer | 24
24527 +---------------------------------------+
24528 | Parameter save area (+padding*) (P) | 32
24529 +---------------------------------------+
24530 | Alloca space (A) | 32+P
24531 +---------------------------------------+
24532 | Local variable space (L) | 32+P+A
24533 +---------------------------------------+
24534 | Save area for AltiVec registers (W) | 32+P+A+L
24535 +---------------------------------------+
24536 | AltiVec alignment padding (Y) | 32+P+A+L+W
24537 +---------------------------------------+
24538 | Save area for GP registers (G) | 32+P+A+L+W+Y
24539 +---------------------------------------+
24540 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24541 +---------------------------------------+
24542 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24543 +---------------------------------------+
24545 * If the alloca area is present, the parameter save area is
24546 padded so that the former starts 16-byte aligned.
24548 V.4 stack frames look like:
24550 SP----> +---------------------------------------+
24551 | back chain to caller | 0
24552 +---------------------------------------+
24553 | caller's saved LR | 4
24554 +---------------------------------------+
24555 | Parameter save area (+padding*) (P) | 8
24556 +---------------------------------------+
24557 | Alloca space (A) | 8+P
24558 +---------------------------------------+
24559 | Varargs save area (V) | 8+P+A
24560 +---------------------------------------+
24561 | Local variable space (L) | 8+P+A+V
24562 +---------------------------------------+
24563 | Float/int conversion temporary (X) | 8+P+A+V+L
24564 +---------------------------------------+
24565 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24566 +---------------------------------------+
24567 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24568 +---------------------------------------+
24569 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24570 +---------------------------------------+
24571 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24572 +---------------------------------------+
24573 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24574 +---------------------------------------+
24575 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24576 +---------------------------------------+
24577 old SP->| back chain to caller's caller |
24578 +---------------------------------------+
24580 * If the alloca area is present and the required alignment is
24581 16 bytes, the parameter save area is padded so that the
24582 alloca area starts 16-byte aligned.
24584 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24585 given. (But note below and in sysv4.h that we require only 8 and
24586 may round up the size of our stack frame anyways. The historical
24587 reason is early versions of powerpc-linux which didn't properly
24588 align the stack at program startup. A happy side-effect is that
24589 -mno-eabi libraries can be used with -meabi programs.)
24591 The EABI configuration defaults to the V.4 layout. However,
24592 the stack alignment requirements may differ. If -mno-eabi is not
24593 given, the required stack alignment is 8 bytes; if -mno-eabi is
24594 given, the required alignment is 16 bytes. (But see V.4 comment
24597 #ifndef ABI_STACK_BOUNDARY
24598 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24601 static rs6000_stack_t
*
24602 rs6000_stack_info (void)
24604 /* We should never be called for thunks, we are not set up for that. */
24605 gcc_assert (!cfun
->is_thunk
);
24607 rs6000_stack_t
*info
= &stack_info
;
24608 int reg_size
= TARGET_32BIT
? 4 : 8;
24613 HOST_WIDE_INT non_fixed_size
;
24614 bool using_static_chain_p
;
24616 if (reload_completed
&& info
->reload_completed
)
24619 memset (info
, 0, sizeof (*info
));
24620 info
->reload_completed
= reload_completed
;
24622 /* Select which calling sequence. */
24623 info
->abi
= DEFAULT_ABI
;
24625 /* Calculate which registers need to be saved & save area size. */
24626 info
->first_gp_reg_save
= first_reg_to_save ();
24627 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24628 even if it currently looks like we won't. Reload may need it to
24629 get at a constant; if so, it will have already created a constant
24630 pool entry for it. */
24631 if (((TARGET_TOC
&& TARGET_MINIMAL_TOC
)
24632 || (flag_pic
== 1 && DEFAULT_ABI
== ABI_V4
)
24633 || (flag_pic
&& DEFAULT_ABI
== ABI_DARWIN
))
24634 && crtl
->uses_const_pool
24635 && info
->first_gp_reg_save
> RS6000_PIC_OFFSET_TABLE_REGNUM
)
24636 first_gp
= RS6000_PIC_OFFSET_TABLE_REGNUM
;
24638 first_gp
= info
->first_gp_reg_save
;
24640 info
->gp_size
= reg_size
* (32 - first_gp
);
24642 info
->first_fp_reg_save
= first_fp_reg_to_save ();
24643 info
->fp_size
= 8 * (64 - info
->first_fp_reg_save
);
24645 info
->first_altivec_reg_save
= first_altivec_reg_to_save ();
24646 info
->altivec_size
= 16 * (LAST_ALTIVEC_REGNO
+ 1
24647 - info
->first_altivec_reg_save
);
24649 /* Does this function call anything? */
24650 info
->calls_p
= (!crtl
->is_leaf
|| cfun
->machine
->ra_needs_full_frame
);
24652 /* Determine if we need to save the condition code registers. */
24653 if (save_reg_p (CR2_REGNO
)
24654 || save_reg_p (CR3_REGNO
)
24655 || save_reg_p (CR4_REGNO
))
24657 info
->cr_save_p
= 1;
24658 if (DEFAULT_ABI
== ABI_V4
)
24659 info
->cr_size
= reg_size
;
24662 /* If the current function calls __builtin_eh_return, then we need
24663 to allocate stack space for registers that will hold data for
24664 the exception handler. */
24665 if (crtl
->calls_eh_return
)
24668 for (i
= 0; EH_RETURN_DATA_REGNO (i
) != INVALID_REGNUM
; ++i
)
24671 ehrd_size
= i
* UNITS_PER_WORD
;
24676 /* In the ELFv2 ABI, we also need to allocate space for separate
24677 CR field save areas if the function calls __builtin_eh_return. */
24678 if (DEFAULT_ABI
== ABI_ELFv2
&& crtl
->calls_eh_return
)
24680 /* This hard-codes that we have three call-saved CR fields. */
24681 ehcr_size
= 3 * reg_size
;
24682 /* We do *not* use the regular CR save mechanism. */
24683 info
->cr_save_p
= 0;
24688 /* Determine various sizes. */
24689 info
->reg_size
= reg_size
;
24690 info
->fixed_size
= RS6000_SAVE_AREA
;
24691 info
->vars_size
= RS6000_ALIGN (get_frame_size (), 8);
24692 if (cfun
->calls_alloca
)
24694 RS6000_ALIGN (crtl
->outgoing_args_size
+ info
->fixed_size
,
24695 STACK_BOUNDARY
/ BITS_PER_UNIT
) - info
->fixed_size
;
24697 info
->parm_size
= RS6000_ALIGN (crtl
->outgoing_args_size
,
24698 TARGET_ALTIVEC
? 16 : 8);
24699 if (FRAME_GROWS_DOWNWARD
)
24701 += RS6000_ALIGN (info
->fixed_size
+ info
->vars_size
+ info
->parm_size
,
24702 ABI_STACK_BOUNDARY
/ BITS_PER_UNIT
)
24703 - (info
->fixed_size
+ info
->vars_size
+ info
->parm_size
);
24705 if (TARGET_ALTIVEC_ABI
)
24706 info
->vrsave_mask
= compute_vrsave_mask ();
24708 if (TARGET_ALTIVEC_VRSAVE
&& info
->vrsave_mask
)
24709 info
->vrsave_size
= 4;
24711 compute_save_world_info (info
);
24713 /* Calculate the offsets. */
24714 switch (DEFAULT_ABI
)
24718 gcc_unreachable ();
24723 info
->fp_save_offset
= -info
->fp_size
;
24724 info
->gp_save_offset
= info
->fp_save_offset
- info
->gp_size
;
24726 if (TARGET_ALTIVEC_ABI
)
24728 info
->vrsave_save_offset
= info
->gp_save_offset
- info
->vrsave_size
;
24730 /* Align stack so vector save area is on a quadword boundary.
24731 The padding goes above the vectors. */
24732 if (info
->altivec_size
!= 0)
24733 info
->altivec_padding_size
= info
->vrsave_save_offset
& 0xF;
24735 info
->altivec_save_offset
= info
->vrsave_save_offset
24736 - info
->altivec_padding_size
24737 - info
->altivec_size
;
24738 gcc_assert (info
->altivec_size
== 0
24739 || info
->altivec_save_offset
% 16 == 0);
24741 /* Adjust for AltiVec case. */
24742 info
->ehrd_offset
= info
->altivec_save_offset
- ehrd_size
;
24745 info
->ehrd_offset
= info
->gp_save_offset
- ehrd_size
;
24747 info
->ehcr_offset
= info
->ehrd_offset
- ehcr_size
;
24748 info
->cr_save_offset
= reg_size
; /* first word when 64-bit. */
24749 info
->lr_save_offset
= 2*reg_size
;
24753 info
->fp_save_offset
= -info
->fp_size
;
24754 info
->gp_save_offset
= info
->fp_save_offset
- info
->gp_size
;
24755 info
->cr_save_offset
= info
->gp_save_offset
- info
->cr_size
;
24757 if (TARGET_ALTIVEC_ABI
)
24759 info
->vrsave_save_offset
= info
->cr_save_offset
- info
->vrsave_size
;
24761 /* Align stack so vector save area is on a quadword boundary. */
24762 if (info
->altivec_size
!= 0)
24763 info
->altivec_padding_size
= 16 - (-info
->vrsave_save_offset
% 16);
24765 info
->altivec_save_offset
= info
->vrsave_save_offset
24766 - info
->altivec_padding_size
24767 - info
->altivec_size
;
24769 /* Adjust for AltiVec case. */
24770 info
->ehrd_offset
= info
->altivec_save_offset
;
24773 info
->ehrd_offset
= info
->cr_save_offset
;
24775 info
->ehrd_offset
-= ehrd_size
;
24776 info
->lr_save_offset
= reg_size
;
24779 save_align
= (TARGET_ALTIVEC_ABI
|| DEFAULT_ABI
== ABI_DARWIN
) ? 16 : 8;
24780 info
->save_size
= RS6000_ALIGN (info
->fp_size
24782 + info
->altivec_size
24783 + info
->altivec_padding_size
24787 + info
->vrsave_size
,
24790 non_fixed_size
= info
->vars_size
+ info
->parm_size
+ info
->save_size
;
24792 info
->total_size
= RS6000_ALIGN (non_fixed_size
+ info
->fixed_size
,
24793 ABI_STACK_BOUNDARY
/ BITS_PER_UNIT
);
24795 /* Determine if we need to save the link register. */
24797 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
24799 && !TARGET_PROFILE_KERNEL
)
24800 || (DEFAULT_ABI
== ABI_V4
&& cfun
->calls_alloca
)
24801 #ifdef TARGET_RELOCATABLE
24802 || (DEFAULT_ABI
== ABI_V4
24803 && (TARGET_RELOCATABLE
|| flag_pic
> 1)
24804 && !constant_pool_empty_p ())
24806 || rs6000_ra_ever_killed ())
24807 info
->lr_save_p
= 1;
24809 using_static_chain_p
= (cfun
->static_chain_decl
!= NULL_TREE
24810 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM
)
24811 && call_used_regs
[STATIC_CHAIN_REGNUM
]);
24812 info
->savres_strategy
= rs6000_savres_strategy (info
, using_static_chain_p
);
24814 if (!(info
->savres_strategy
& SAVE_INLINE_GPRS
)
24815 || !(info
->savres_strategy
& SAVE_INLINE_FPRS
)
24816 || !(info
->savres_strategy
& SAVE_INLINE_VRS
)
24817 || !(info
->savres_strategy
& REST_INLINE_GPRS
)
24818 || !(info
->savres_strategy
& REST_INLINE_FPRS
)
24819 || !(info
->savres_strategy
& REST_INLINE_VRS
))
24820 info
->lr_save_p
= 1;
24822 if (info
->lr_save_p
)
24823 df_set_regs_ever_live (LR_REGNO
, true);
24825 /* Determine if we need to allocate any stack frame:
24827 For AIX we need to push the stack if a frame pointer is needed
24828 (because the stack might be dynamically adjusted), if we are
24829 debugging, if we make calls, or if the sum of fp_save, gp_save,
24830 and local variables are more than the space needed to save all
24831 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24832 + 18*8 = 288 (GPR13 reserved).
24834 For V.4 we don't have the stack cushion that AIX uses, but assume
24835 that the debugger can handle stackless frames. */
24840 else if (DEFAULT_ABI
== ABI_V4
)
24841 info
->push_p
= non_fixed_size
!= 0;
24843 else if (frame_pointer_needed
)
24846 else if (TARGET_XCOFF
&& write_symbols
!= NO_DEBUG
)
24850 info
->push_p
= non_fixed_size
> (TARGET_32BIT
? 220 : 288);
24856 debug_stack_info (rs6000_stack_t
*info
)
24858 const char *abi_string
;
24861 info
= rs6000_stack_info ();
24863 fprintf (stderr
, "\nStack information for function %s:\n",
24864 ((current_function_decl
&& DECL_NAME (current_function_decl
))
24865 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl
))
24870 default: abi_string
= "Unknown"; break;
24871 case ABI_NONE
: abi_string
= "NONE"; break;
24872 case ABI_AIX
: abi_string
= "AIX"; break;
24873 case ABI_ELFv2
: abi_string
= "ELFv2"; break;
24874 case ABI_DARWIN
: abi_string
= "Darwin"; break;
24875 case ABI_V4
: abi_string
= "V.4"; break;
24878 fprintf (stderr
, "\tABI = %5s\n", abi_string
);
24880 if (TARGET_ALTIVEC_ABI
)
24881 fprintf (stderr
, "\tALTIVEC ABI extensions enabled.\n");
24883 if (info
->first_gp_reg_save
!= 32)
24884 fprintf (stderr
, "\tfirst_gp_reg_save = %5d\n", info
->first_gp_reg_save
);
24886 if (info
->first_fp_reg_save
!= 64)
24887 fprintf (stderr
, "\tfirst_fp_reg_save = %5d\n", info
->first_fp_reg_save
);
24889 if (info
->first_altivec_reg_save
<= LAST_ALTIVEC_REGNO
)
24890 fprintf (stderr
, "\tfirst_altivec_reg_save = %5d\n",
24891 info
->first_altivec_reg_save
);
24893 if (info
->lr_save_p
)
24894 fprintf (stderr
, "\tlr_save_p = %5d\n", info
->lr_save_p
);
24896 if (info
->cr_save_p
)
24897 fprintf (stderr
, "\tcr_save_p = %5d\n", info
->cr_save_p
);
24899 if (info
->vrsave_mask
)
24900 fprintf (stderr
, "\tvrsave_mask = 0x%x\n", info
->vrsave_mask
);
24903 fprintf (stderr
, "\tpush_p = %5d\n", info
->push_p
);
24906 fprintf (stderr
, "\tcalls_p = %5d\n", info
->calls_p
);
24909 fprintf (stderr
, "\tgp_save_offset = %5d\n", info
->gp_save_offset
);
24912 fprintf (stderr
, "\tfp_save_offset = %5d\n", info
->fp_save_offset
);
24914 if (info
->altivec_size
)
24915 fprintf (stderr
, "\taltivec_save_offset = %5d\n",
24916 info
->altivec_save_offset
);
24918 if (info
->vrsave_size
)
24919 fprintf (stderr
, "\tvrsave_save_offset = %5d\n",
24920 info
->vrsave_save_offset
);
24922 if (info
->lr_save_p
)
24923 fprintf (stderr
, "\tlr_save_offset = %5d\n", info
->lr_save_offset
);
24925 if (info
->cr_save_p
)
24926 fprintf (stderr
, "\tcr_save_offset = %5d\n", info
->cr_save_offset
);
24928 if (info
->varargs_save_offset
)
24929 fprintf (stderr
, "\tvarargs_save_offset = %5d\n", info
->varargs_save_offset
);
24931 if (info
->total_size
)
24932 fprintf (stderr
, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC
"\n",
24935 if (info
->vars_size
)
24936 fprintf (stderr
, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC
"\n",
24939 if (info
->parm_size
)
24940 fprintf (stderr
, "\tparm_size = %5d\n", info
->parm_size
);
24942 if (info
->fixed_size
)
24943 fprintf (stderr
, "\tfixed_size = %5d\n", info
->fixed_size
);
24946 fprintf (stderr
, "\tgp_size = %5d\n", info
->gp_size
);
24949 fprintf (stderr
, "\tfp_size = %5d\n", info
->fp_size
);
24951 if (info
->altivec_size
)
24952 fprintf (stderr
, "\taltivec_size = %5d\n", info
->altivec_size
);
24954 if (info
->vrsave_size
)
24955 fprintf (stderr
, "\tvrsave_size = %5d\n", info
->vrsave_size
);
24957 if (info
->altivec_padding_size
)
24958 fprintf (stderr
, "\taltivec_padding_size= %5d\n",
24959 info
->altivec_padding_size
);
24962 fprintf (stderr
, "\tcr_size = %5d\n", info
->cr_size
);
24964 if (info
->save_size
)
24965 fprintf (stderr
, "\tsave_size = %5d\n", info
->save_size
);
24967 if (info
->reg_size
!= 4)
24968 fprintf (stderr
, "\treg_size = %5d\n", info
->reg_size
);
24970 fprintf (stderr
, "\tsave-strategy = %04x\n", info
->savres_strategy
);
24972 fprintf (stderr
, "\n");
24976 rs6000_return_addr (int count
, rtx frame
)
24978 /* Currently we don't optimize very well between prolog and body
24979 code and for PIC code the code can be actually quite bad, so
24980 don't try to be too clever here. */
24982 || ((DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
) && flag_pic
))
24984 cfun
->machine
->ra_needs_full_frame
= 1;
24991 plus_constant (Pmode
,
24993 (gen_rtx_MEM (Pmode
,
24994 memory_address (Pmode
, frame
))),
24995 RETURN_ADDRESS_OFFSET
)));
24998 cfun
->machine
->ra_need_lr
= 1;
24999 return get_hard_reg_initial_val (Pmode
, LR_REGNO
);
25002 /* Say whether a function is a candidate for sibcall handling or not. */
25005 rs6000_function_ok_for_sibcall (tree decl
, tree exp
)
25010 fntype
= TREE_TYPE (decl
);
25012 fntype
= TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp
)));
25014 /* We can't do it if the called function has more vector parameters
25015 than the current function; there's nowhere to put the VRsave code. */
25016 if (TARGET_ALTIVEC_ABI
25017 && TARGET_ALTIVEC_VRSAVE
25018 && !(decl
&& decl
== current_function_decl
))
25020 function_args_iterator args_iter
;
25024 /* Functions with vector parameters are required to have a
25025 prototype, so the argument type info must be available
25027 FOREACH_FUNCTION_ARGS(fntype
, type
, args_iter
)
25028 if (TREE_CODE (type
) == VECTOR_TYPE
25029 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type
)))
25032 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl
), type
, args_iter
)
25033 if (TREE_CODE (type
) == VECTOR_TYPE
25034 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type
)))
25041 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
25042 functions, because the callee may have a different TOC pointer to
25043 the caller and there's no way to ensure we restore the TOC when
25044 we return. With the secure-plt SYSV ABI we can't make non-local
25045 calls when -fpic/PIC because the plt call stubs use r30. */
25046 if (DEFAULT_ABI
== ABI_DARWIN
25047 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
25049 && !DECL_EXTERNAL (decl
)
25050 && !DECL_WEAK (decl
)
25051 && (*targetm
.binds_local_p
) (decl
))
25052 || (DEFAULT_ABI
== ABI_V4
25053 && (!TARGET_SECURE_PLT
25056 && (*targetm
.binds_local_p
) (decl
)))))
25058 tree attr_list
= TYPE_ATTRIBUTES (fntype
);
25060 if (!lookup_attribute ("longcall", attr_list
)
25061 || lookup_attribute ("shortcall", attr_list
))
25069 rs6000_ra_ever_killed (void)
25075 if (cfun
->is_thunk
)
25078 if (cfun
->machine
->lr_save_state
)
25079 return cfun
->machine
->lr_save_state
- 1;
25081 /* regs_ever_live has LR marked as used if any sibcalls are present,
25082 but this should not force saving and restoring in the
25083 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
25084 clobbers LR, so that is inappropriate. */
25086 /* Also, the prologue can generate a store into LR that
25087 doesn't really count, like this:
25090 bcl to set PIC register
25094 When we're called from the epilogue, we need to avoid counting
25095 this as a store. */
25097 push_topmost_sequence ();
25098 top
= get_insns ();
25099 pop_topmost_sequence ();
25100 reg
= gen_rtx_REG (Pmode
, LR_REGNO
);
25102 for (insn
= NEXT_INSN (top
); insn
!= NULL_RTX
; insn
= NEXT_INSN (insn
))
25108 if (!SIBLING_CALL_P (insn
))
25111 else if (find_regno_note (insn
, REG_INC
, LR_REGNO
))
25113 else if (set_of (reg
, insn
) != NULL_RTX
25114 && !prologue_epilogue_contains (insn
))
25121 /* Emit instructions needed to load the TOC register.
25122 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
25123 a constant pool; or for SVR4 -fpic. */
25126 rs6000_emit_load_toc_table (int fromprolog
)
25129 dest
= gen_rtx_REG (Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
25131 if (TARGET_ELF
&& TARGET_SECURE_PLT
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
)
25134 rtx lab
, tmp1
, tmp2
, got
;
25136 lab
= gen_label_rtx ();
25137 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (lab
));
25138 lab
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
25141 got
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (toc_label_name
));
25145 got
= rs6000_got_sym ();
25146 tmp1
= tmp2
= dest
;
25149 tmp1
= gen_reg_rtx (Pmode
);
25150 tmp2
= gen_reg_rtx (Pmode
);
25152 emit_insn (gen_load_toc_v4_PIC_1 (lab
));
25153 emit_move_insn (tmp1
, gen_rtx_REG (Pmode
, LR_REGNO
));
25154 emit_insn (gen_load_toc_v4_PIC_3b (tmp2
, tmp1
, got
, lab
));
25155 emit_insn (gen_load_toc_v4_PIC_3c (dest
, tmp2
, got
, lab
));
25157 else if (TARGET_ELF
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
== 1)
25159 emit_insn (gen_load_toc_v4_pic_si ());
25160 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
25162 else if (TARGET_ELF
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
== 2)
25165 rtx temp0
= (fromprolog
25166 ? gen_rtx_REG (Pmode
, 0)
25167 : gen_reg_rtx (Pmode
));
25173 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
25174 symF
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
25176 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCL", rs6000_pic_labelno
);
25177 symL
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
25179 emit_insn (gen_load_toc_v4_PIC_1 (symF
));
25180 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
25181 emit_insn (gen_load_toc_v4_PIC_2 (temp0
, dest
, symL
, symF
));
25187 tocsym
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (toc_label_name
));
25189 lab
= gen_label_rtx ();
25190 emit_insn (gen_load_toc_v4_PIC_1b (tocsym
, lab
));
25191 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
25192 if (TARGET_LINK_STACK
)
25193 emit_insn (gen_addsi3 (dest
, dest
, GEN_INT (4)));
25194 emit_move_insn (temp0
, gen_rtx_MEM (Pmode
, dest
));
25196 emit_insn (gen_addsi3 (dest
, temp0
, dest
));
25198 else if (TARGET_ELF
&& !TARGET_AIX
&& flag_pic
== 0 && TARGET_MINIMAL_TOC
)
25200 /* This is for AIX code running in non-PIC ELF32. */
25201 rtx realsym
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (toc_label_name
));
25204 emit_insn (gen_elf_high (dest
, realsym
));
25205 emit_insn (gen_elf_low (dest
, dest
, realsym
));
25209 gcc_assert (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
);
25212 emit_insn (gen_load_toc_aix_si (dest
));
25214 emit_insn (gen_load_toc_aix_di (dest
));
25218 /* Emit instructions to restore the link register after determining where
25219 its value has been stored. */
25222 rs6000_emit_eh_reg_restore (rtx source
, rtx scratch
)
25224 rs6000_stack_t
*info
= rs6000_stack_info ();
25227 operands
[0] = source
;
25228 operands
[1] = scratch
;
25230 if (info
->lr_save_p
)
25232 rtx frame_rtx
= stack_pointer_rtx
;
25233 HOST_WIDE_INT sp_offset
= 0;
25236 if (frame_pointer_needed
25237 || cfun
->calls_alloca
25238 || info
->total_size
> 32767)
25240 tmp
= gen_frame_mem (Pmode
, frame_rtx
);
25241 emit_move_insn (operands
[1], tmp
);
25242 frame_rtx
= operands
[1];
25244 else if (info
->push_p
)
25245 sp_offset
= info
->total_size
;
25247 tmp
= plus_constant (Pmode
, frame_rtx
,
25248 info
->lr_save_offset
+ sp_offset
);
25249 tmp
= gen_frame_mem (Pmode
, tmp
);
25250 emit_move_insn (tmp
, operands
[0]);
25253 emit_move_insn (gen_rtx_REG (Pmode
, LR_REGNO
), operands
[0]);
25255 /* Freeze lr_save_p. We've just emitted rtl that depends on the
25256 state of lr_save_p so any change from here on would be a bug. In
25257 particular, stop rs6000_ra_ever_killed from considering the SET
25258 of lr we may have added just above. */
25259 cfun
->machine
->lr_save_state
= info
->lr_save_p
+ 1;
25262 static GTY(()) alias_set_type set
= -1;
25265 get_TOC_alias_set (void)
25268 set
= new_alias_set ();
25272 /* This returns nonzero if the current function uses the TOC. This is
25273 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25274 is generated by the ABI_V4 load_toc_* patterns. */
25281 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
25284 rtx pat
= PATTERN (insn
);
25287 if (GET_CODE (pat
) == PARALLEL
)
25288 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
25290 rtx sub
= XVECEXP (pat
, 0, i
);
25291 if (GET_CODE (sub
) == USE
)
25293 sub
= XEXP (sub
, 0);
25294 if (GET_CODE (sub
) == UNSPEC
25295 && XINT (sub
, 1) == UNSPEC_TOC
)
25305 create_TOC_reference (rtx symbol
, rtx largetoc_reg
)
25307 rtx tocrel
, tocreg
, hi
;
25309 if (TARGET_DEBUG_ADDR
)
25311 if (GET_CODE (symbol
) == SYMBOL_REF
)
25312 fprintf (stderr
, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25316 fprintf (stderr
, "\ncreate_TOC_reference, code %s:\n",
25317 GET_RTX_NAME (GET_CODE (symbol
)));
25318 debug_rtx (symbol
);
25322 if (!can_create_pseudo_p ())
25323 df_set_regs_ever_live (TOC_REGISTER
, true);
25325 tocreg
= gen_rtx_REG (Pmode
, TOC_REGISTER
);
25326 tocrel
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, symbol
, tocreg
), UNSPEC_TOCREL
);
25327 if (TARGET_CMODEL
== CMODEL_SMALL
|| can_create_pseudo_p ())
25330 hi
= gen_rtx_HIGH (Pmode
, copy_rtx (tocrel
));
25331 if (largetoc_reg
!= NULL
)
25333 emit_move_insn (largetoc_reg
, hi
);
25336 return gen_rtx_LO_SUM (Pmode
, hi
, tocrel
);
25339 /* Issue assembly directives that create a reference to the given DWARF
25340 FRAME_TABLE_LABEL from the current function section. */
25342 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label
)
25344 fprintf (asm_out_file
, "\t.ref %s\n",
25345 (* targetm
.strip_name_encoding
) (frame_table_label
));
25348 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25349 and the change to the stack pointer. */
25352 rs6000_emit_stack_tie (rtx fp
, bool hard_frame_needed
)
25359 regs
[i
++] = gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
25360 if (hard_frame_needed
)
25361 regs
[i
++] = gen_rtx_REG (Pmode
, HARD_FRAME_POINTER_REGNUM
);
25362 if (!(REGNO (fp
) == STACK_POINTER_REGNUM
25363 || (hard_frame_needed
25364 && REGNO (fp
) == HARD_FRAME_POINTER_REGNUM
)))
25367 p
= rtvec_alloc (i
);
25370 rtx mem
= gen_frame_mem (BLKmode
, regs
[i
]);
25371 RTVEC_ELT (p
, i
) = gen_rtx_SET (mem
, const0_rtx
);
25374 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode
, p
)));
25377 /* Emit the correct code for allocating stack space, as insns.
25378 If COPY_REG, make sure a copy of the old frame is left there.
25379 The generated code may use hard register 0 as a temporary. */
25382 rs6000_emit_allocate_stack (HOST_WIDE_INT size
, rtx copy_reg
, int copy_off
)
25385 rtx stack_reg
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
25386 rtx tmp_reg
= gen_rtx_REG (Pmode
, 0);
25387 rtx todec
= gen_int_mode (-size
, Pmode
);
25390 if (INTVAL (todec
) != -size
)
25392 warning (0, "stack frame too large");
25393 emit_insn (gen_trap ());
25397 if (crtl
->limit_stack
)
25399 if (REG_P (stack_limit_rtx
)
25400 && REGNO (stack_limit_rtx
) > 1
25401 && REGNO (stack_limit_rtx
) <= 31)
25404 = gen_add3_insn (tmp_reg
, stack_limit_rtx
, GEN_INT (size
));
25407 emit_insn (gen_cond_trap (LTU
, stack_reg
, tmp_reg
, const0_rtx
));
25409 else if (GET_CODE (stack_limit_rtx
) == SYMBOL_REF
25411 && DEFAULT_ABI
== ABI_V4
25414 rtx toload
= gen_rtx_CONST (VOIDmode
,
25415 gen_rtx_PLUS (Pmode
,
25419 emit_insn (gen_elf_high (tmp_reg
, toload
));
25420 emit_insn (gen_elf_low (tmp_reg
, tmp_reg
, toload
));
25421 emit_insn (gen_cond_trap (LTU
, stack_reg
, tmp_reg
,
25425 warning (0, "stack limit expression is not supported");
25431 emit_insn (gen_add3_insn (copy_reg
, stack_reg
, GEN_INT (copy_off
)));
25433 emit_move_insn (copy_reg
, stack_reg
);
25438 /* Need a note here so that try_split doesn't get confused. */
25439 if (get_last_insn () == NULL_RTX
)
25440 emit_note (NOTE_INSN_DELETED
);
25441 insn
= emit_move_insn (tmp_reg
, todec
);
25442 try_split (PATTERN (insn
), insn
, 0);
25446 insn
= emit_insn (TARGET_32BIT
25447 ? gen_movsi_update_stack (stack_reg
, stack_reg
,
25449 : gen_movdi_di_update_stack (stack_reg
, stack_reg
,
25450 todec
, stack_reg
));
25451 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25452 it now and set the alias set/attributes. The above gen_*_update
25453 calls will generate a PARALLEL with the MEM set being the first
25455 par
= PATTERN (insn
);
25456 gcc_assert (GET_CODE (par
) == PARALLEL
);
25457 set
= XVECEXP (par
, 0, 0);
25458 gcc_assert (GET_CODE (set
) == SET
);
25459 mem
= SET_DEST (set
);
25460 gcc_assert (MEM_P (mem
));
25461 MEM_NOTRAP_P (mem
) = 1;
25462 set_mem_alias_set (mem
, get_frame_alias_set ());
25464 RTX_FRAME_RELATED_P (insn
) = 1;
25465 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
25466 gen_rtx_SET (stack_reg
, gen_rtx_PLUS (Pmode
, stack_reg
,
25467 GEN_INT (-size
))));
25471 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25473 #if PROBE_INTERVAL > 32768
25474 #error Cannot use indexed addressing mode for stack probing
25477 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25478 inclusive. These are offsets from the current stack pointer. */
25481 rs6000_emit_probe_stack_range (HOST_WIDE_INT first
, HOST_WIDE_INT size
)
25483 /* See if we have a constant small number of probes to generate. If so,
25484 that's the easy case. */
25485 if (first
+ size
<= 32768)
25489 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25490 it exceeds SIZE. If only one probe is needed, this will not
25491 generate any code. Then probe at FIRST + SIZE. */
25492 for (i
= PROBE_INTERVAL
; i
< size
; i
+= PROBE_INTERVAL
)
25493 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
25496 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
25500 /* Otherwise, do the same as above, but in a loop. Note that we must be
25501 extra careful with variables wrapping around because we might be at
25502 the very top (or the very bottom) of the address space and we have
25503 to be able to handle this case properly; in particular, we use an
25504 equality test for the loop condition. */
25507 HOST_WIDE_INT rounded_size
;
25508 rtx r12
= gen_rtx_REG (Pmode
, 12);
25509 rtx r0
= gen_rtx_REG (Pmode
, 0);
25511 /* Sanity check for the addressing mode we're going to use. */
25512 gcc_assert (first
<= 32768);
25514 /* Step 1: round SIZE to the previous multiple of the interval. */
25516 rounded_size
= ROUND_DOWN (size
, PROBE_INTERVAL
);
25519 /* Step 2: compute initial and final value of the loop counter. */
25521 /* TEST_ADDR = SP + FIRST. */
25522 emit_insn (gen_rtx_SET (r12
, plus_constant (Pmode
, stack_pointer_rtx
,
25525 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25526 if (rounded_size
> 32768)
25528 emit_move_insn (r0
, GEN_INT (-rounded_size
));
25529 emit_insn (gen_rtx_SET (r0
, gen_rtx_PLUS (Pmode
, r12
, r0
)));
25532 emit_insn (gen_rtx_SET (r0
, plus_constant (Pmode
, r12
,
25536 /* Step 3: the loop
25540 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25543 while (TEST_ADDR != LAST_ADDR)
25545 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25546 until it is equal to ROUNDED_SIZE. */
25549 emit_insn (gen_probe_stack_rangedi (r12
, r12
, r0
));
25551 emit_insn (gen_probe_stack_rangesi (r12
, r12
, r0
));
25554 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25555 that SIZE is equal to ROUNDED_SIZE. */
25557 if (size
!= rounded_size
)
25558 emit_stack_probe (plus_constant (Pmode
, r12
, rounded_size
- size
));
25562 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25563 absolute addresses. */
25566 output_probe_stack_range (rtx reg1
, rtx reg2
)
25568 static int labelno
= 0;
25572 ASM_GENERATE_INTERNAL_LABEL (loop_lab
, "LPSRL", labelno
++);
25575 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, loop_lab
);
25577 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25579 xops
[1] = GEN_INT (-PROBE_INTERVAL
);
25580 output_asm_insn ("addi %0,%0,%1", xops
);
25582 /* Probe at TEST_ADDR. */
25583 xops
[1] = gen_rtx_REG (Pmode
, 0);
25584 output_asm_insn ("stw %1,0(%0)", xops
);
25586 /* Test if TEST_ADDR == LAST_ADDR. */
25589 output_asm_insn ("cmpd 0,%0,%1", xops
);
25591 output_asm_insn ("cmpw 0,%0,%1", xops
);
25594 fputs ("\tbne 0,", asm_out_file
);
25595 assemble_name_raw (asm_out_file
, loop_lab
);
25596 fputc ('\n', asm_out_file
);
25601 /* This function is called when rs6000_frame_related is processing
25602 SETs within a PARALLEL, and returns whether the REGNO save ought to
25603 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25604 for out-of-line register save functions, store multiple, and the
25605 Darwin world_save. They may contain registers that don't really
25609 interesting_frame_related_regno (unsigned int regno
)
25611 /* Saves apparently of r0 are actually saving LR. It doesn't make
25612 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25613 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25614 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25615 as frame related. */
25618 /* If we see CR2 then we are here on a Darwin world save. Saves of
25619 CR2 signify the whole CR is being saved. This is a long-standing
25620 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25621 that CR needs to be saved. */
25622 if (regno
== CR2_REGNO
)
25624 /* Omit frame info for any user-defined global regs. If frame info
25625 is supplied for them, frame unwinding will restore a user reg.
25626 Also omit frame info for any reg we don't need to save, as that
25627 bloats frame info and can cause problems with shrink wrapping.
25628 Since global regs won't be seen as needing to be saved, both of
25629 these conditions are covered by save_reg_p. */
25630 return save_reg_p (regno
);
25633 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25634 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25635 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25636 deduce these equivalences by itself so it wasn't necessary to hold
25637 its hand so much. Don't be tempted to always supply d2_f_d_e with
25638 the actual cfa register, ie. r31 when we are using a hard frame
25639 pointer. That fails when saving regs off r1, and sched moves the
25640 r31 setup past the reg saves. */
25643 rs6000_frame_related (rtx_insn
*insn
, rtx reg
, HOST_WIDE_INT val
,
25644 rtx reg2
, rtx repl2
)
25648 if (REGNO (reg
) == STACK_POINTER_REGNUM
)
25650 gcc_checking_assert (val
== 0);
25654 repl
= gen_rtx_PLUS (Pmode
, gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
),
25657 rtx pat
= PATTERN (insn
);
25658 if (!repl
&& !reg2
)
25660 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
25661 if (GET_CODE (pat
) == PARALLEL
)
25662 for (int i
= 0; i
< XVECLEN (pat
, 0); i
++)
25663 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
)
25665 rtx set
= XVECEXP (pat
, 0, i
);
25667 if (!REG_P (SET_SRC (set
))
25668 || interesting_frame_related_regno (REGNO (SET_SRC (set
))))
25669 RTX_FRAME_RELATED_P (set
) = 1;
25671 RTX_FRAME_RELATED_P (insn
) = 1;
25675 /* We expect that 'pat' is either a SET or a PARALLEL containing
25676 SETs (and possibly other stuff). In a PARALLEL, all the SETs
25677 are important so they all have to be marked RTX_FRAME_RELATED_P.
25678 Call simplify_replace_rtx on the SETs rather than the whole insn
25679 so as to leave the other stuff alone (for example USE of r12). */
25681 set_used_flags (pat
);
25682 if (GET_CODE (pat
) == SET
)
25685 pat
= simplify_replace_rtx (pat
, reg
, repl
);
25687 pat
= simplify_replace_rtx (pat
, reg2
, repl2
);
25689 else if (GET_CODE (pat
) == PARALLEL
)
25691 pat
= shallow_copy_rtx (pat
);
25692 XVEC (pat
, 0) = shallow_copy_rtvec (XVEC (pat
, 0));
25694 for (int i
= 0; i
< XVECLEN (pat
, 0); i
++)
25695 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
)
25697 rtx set
= XVECEXP (pat
, 0, i
);
25700 set
= simplify_replace_rtx (set
, reg
, repl
);
25702 set
= simplify_replace_rtx (set
, reg2
, repl2
);
25703 XVECEXP (pat
, 0, i
) = set
;
25705 if (!REG_P (SET_SRC (set
))
25706 || interesting_frame_related_regno (REGNO (SET_SRC (set
))))
25707 RTX_FRAME_RELATED_P (set
) = 1;
25711 gcc_unreachable ();
25713 RTX_FRAME_RELATED_P (insn
) = 1;
25714 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, copy_rtx_if_shared (pat
));
25719 /* Returns an insn that has a vrsave set operation with the
25720 appropriate CLOBBERs. */
25723 generate_set_vrsave (rtx reg
, rs6000_stack_t
*info
, int epiloguep
)
25726 rtx insn
, clobs
[TOTAL_ALTIVEC_REGS
+ 1];
25727 rtx vrsave
= gen_rtx_REG (SImode
, VRSAVE_REGNO
);
25730 = gen_rtx_SET (vrsave
,
25731 gen_rtx_UNSPEC_VOLATILE (SImode
,
25732 gen_rtvec (2, reg
, vrsave
),
25733 UNSPECV_SET_VRSAVE
));
25737 /* We need to clobber the registers in the mask so the scheduler
25738 does not move sets to VRSAVE before sets of AltiVec registers.
25740 However, if the function receives nonlocal gotos, reload will set
25741 all call saved registers live. We will end up with:
25743 (set (reg 999) (mem))
25744 (parallel [ (set (reg vrsave) (unspec blah))
25745 (clobber (reg 999))])
25747 The clobber will cause the store into reg 999 to be dead, and
25748 flow will attempt to delete an epilogue insn. In this case, we
25749 need an unspec use/set of the register. */
25751 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
25752 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
25754 if (!epiloguep
|| call_used_regs
[i
])
25755 clobs
[nclobs
++] = gen_rtx_CLOBBER (VOIDmode
,
25756 gen_rtx_REG (V4SImode
, i
));
25759 rtx reg
= gen_rtx_REG (V4SImode
, i
);
25762 = gen_rtx_SET (reg
,
25763 gen_rtx_UNSPEC (V4SImode
,
25764 gen_rtvec (1, reg
), 27));
25768 insn
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (nclobs
));
25770 for (i
= 0; i
< nclobs
; ++i
)
25771 XVECEXP (insn
, 0, i
) = clobs
[i
];
25777 gen_frame_set (rtx reg
, rtx frame_reg
, int offset
, bool store
)
25781 addr
= gen_rtx_PLUS (Pmode
, frame_reg
, GEN_INT (offset
));
25782 mem
= gen_frame_mem (GET_MODE (reg
), addr
);
25783 return gen_rtx_SET (store
? mem
: reg
, store
? reg
: mem
);
25787 gen_frame_load (rtx reg
, rtx frame_reg
, int offset
)
25789 return gen_frame_set (reg
, frame_reg
, offset
, false);
25793 gen_frame_store (rtx reg
, rtx frame_reg
, int offset
)
25795 return gen_frame_set (reg
, frame_reg
, offset
, true);
25798 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
25799 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
25802 emit_frame_save (rtx frame_reg
, machine_mode mode
,
25803 unsigned int regno
, int offset
, HOST_WIDE_INT frame_reg_to_sp
)
25807 /* Some cases that need register indexed addressing. */
25808 gcc_checking_assert (!(TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
25809 || (TARGET_VSX
&& ALTIVEC_OR_VSX_VECTOR_MODE (mode
)));
25811 reg
= gen_rtx_REG (mode
, regno
);
25812 rtx_insn
*insn
= emit_insn (gen_frame_store (reg
, frame_reg
, offset
));
25813 return rs6000_frame_related (insn
, frame_reg
, frame_reg_to_sp
,
25814 NULL_RTX
, NULL_RTX
);
25817 /* Emit an offset memory reference suitable for a frame store, while
25818 converting to a valid addressing mode. */
25821 gen_frame_mem_offset (machine_mode mode
, rtx reg
, int offset
)
25823 return gen_frame_mem (mode
, gen_rtx_PLUS (Pmode
, reg
, GEN_INT (offset
)));
25826 #ifndef TARGET_FIX_AND_CONTINUE
25827 #define TARGET_FIX_AND_CONTINUE 0
25830 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
25831 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
25832 #define LAST_SAVRES_REGISTER 31
25833 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
25844 static GTY(()) rtx savres_routine_syms
[N_SAVRES_REGISTERS
][12];
25846 /* Temporary holding space for an out-of-line register save/restore
25848 static char savres_routine_name
[30];
25850 /* Return the name for an out-of-line register save/restore routine.
25851 We are saving/restoring GPRs if GPR is true. */
25854 rs6000_savres_routine_name (int regno
, int sel
)
25856 const char *prefix
= "";
25857 const char *suffix
= "";
25859 /* Different targets are supposed to define
25860 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
25861 routine name could be defined with:
25863 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
25865 This is a nice idea in practice, but in reality, things are
25866 complicated in several ways:
25868 - ELF targets have save/restore routines for GPRs.
25870 - PPC64 ELF targets have routines for save/restore of GPRs that
25871 differ in what they do with the link register, so having a set
25872 prefix doesn't work. (We only use one of the save routines at
25873 the moment, though.)
25875 - PPC32 elf targets have "exit" versions of the restore routines
25876 that restore the link register and can save some extra space.
25877 These require an extra suffix. (There are also "tail" versions
25878 of the restore routines and "GOT" versions of the save routines,
25879 but we don't generate those at present. Same problems apply,
25882 We deal with all this by synthesizing our own prefix/suffix and
25883 using that for the simple sprintf call shown above. */
25884 if (DEFAULT_ABI
== ABI_V4
)
25889 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
25890 prefix
= (sel
& SAVRES_SAVE
) ? "_savegpr_" : "_restgpr_";
25891 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
25892 prefix
= (sel
& SAVRES_SAVE
) ? "_savefpr_" : "_restfpr_";
25893 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
25894 prefix
= (sel
& SAVRES_SAVE
) ? "_savevr_" : "_restvr_";
25898 if ((sel
& SAVRES_LR
))
25901 else if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
25903 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
25904 /* No out-of-line save/restore routines for GPRs on AIX. */
25905 gcc_assert (!TARGET_AIX
|| (sel
& SAVRES_REG
) != SAVRES_GPR
);
25909 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
25910 prefix
= ((sel
& SAVRES_SAVE
)
25911 ? ((sel
& SAVRES_LR
) ? "_savegpr0_" : "_savegpr1_")
25912 : ((sel
& SAVRES_LR
) ? "_restgpr0_" : "_restgpr1_"));
25913 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
25915 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
25916 if ((sel
& SAVRES_LR
))
25917 prefix
= ((sel
& SAVRES_SAVE
) ? "_savefpr_" : "_restfpr_");
25921 prefix
= (sel
& SAVRES_SAVE
) ? SAVE_FP_PREFIX
: RESTORE_FP_PREFIX
;
25922 suffix
= (sel
& SAVRES_SAVE
) ? SAVE_FP_SUFFIX
: RESTORE_FP_SUFFIX
;
25925 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
25926 prefix
= (sel
& SAVRES_SAVE
) ? "_savevr_" : "_restvr_";
25931 if (DEFAULT_ABI
== ABI_DARWIN
)
25933 /* The Darwin approach is (slightly) different, in order to be
25934 compatible with code generated by the system toolchain. There is a
25935 single symbol for the start of save sequence, and the code here
25936 embeds an offset into that code on the basis of the first register
25938 prefix
= (sel
& SAVRES_SAVE
) ? "save" : "rest" ;
25939 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
25940 sprintf (savres_routine_name
, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix
,
25941 ((sel
& SAVRES_LR
) ? "x" : ""), (regno
== 13 ? "" : "+"),
25942 (regno
- 13) * 4, prefix
, regno
);
25943 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
25944 sprintf (savres_routine_name
, "*%sFP%s%.0d ; %s f%d-f31", prefix
,
25945 (regno
== 14 ? "" : "+"), (regno
- 14) * 4, prefix
, regno
);
25946 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
25947 sprintf (savres_routine_name
, "*%sVEC%s%.0d ; %s v%d-v31", prefix
,
25948 (regno
== 20 ? "" : "+"), (regno
- 20) * 8, prefix
, regno
);
25953 sprintf (savres_routine_name
, "%s%d%s", prefix
, regno
, suffix
);
25955 return savres_routine_name
;
25958 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
25959 We are saving/restoring GPRs if GPR is true. */
25962 rs6000_savres_routine_sym (rs6000_stack_t
*info
, int sel
)
25964 int regno
= ((sel
& SAVRES_REG
) == SAVRES_GPR
25965 ? info
->first_gp_reg_save
25966 : (sel
& SAVRES_REG
) == SAVRES_FPR
25967 ? info
->first_fp_reg_save
- 32
25968 : (sel
& SAVRES_REG
) == SAVRES_VR
25969 ? info
->first_altivec_reg_save
- FIRST_ALTIVEC_REGNO
25974 /* Don't generate bogus routine names. */
25975 gcc_assert (FIRST_SAVRES_REGISTER
<= regno
25976 && regno
<= LAST_SAVRES_REGISTER
25977 && select
>= 0 && select
<= 12);
25979 sym
= savres_routine_syms
[regno
-FIRST_SAVRES_REGISTER
][select
];
25985 name
= rs6000_savres_routine_name (regno
, sel
);
25987 sym
= savres_routine_syms
[regno
-FIRST_SAVRES_REGISTER
][select
]
25988 = gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (name
));
25989 SYMBOL_REF_FLAGS (sym
) |= SYMBOL_FLAG_FUNCTION
;
25995 /* Emit a sequence of insns, including a stack tie if needed, for
25996 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
25997 reset the stack pointer, but move the base of the frame into
25998 reg UPDT_REGNO for use by out-of-line register restore routines. */
26001 rs6000_emit_stack_reset (rtx frame_reg_rtx
, HOST_WIDE_INT frame_off
,
26002 unsigned updt_regno
)
26004 /* If there is nothing to do, don't do anything. */
26005 if (frame_off
== 0 && REGNO (frame_reg_rtx
) == updt_regno
)
26008 rtx updt_reg_rtx
= gen_rtx_REG (Pmode
, updt_regno
);
26010 /* This blockage is needed so that sched doesn't decide to move
26011 the sp change before the register restores. */
26012 if (DEFAULT_ABI
== ABI_V4
)
26013 return emit_insn (gen_stack_restore_tie (updt_reg_rtx
, frame_reg_rtx
,
26014 GEN_INT (frame_off
)));
26016 /* If we are restoring registers out-of-line, we will be using the
26017 "exit" variants of the restore routines, which will reset the
26018 stack for us. But we do need to point updt_reg into the
26019 right place for those routines. */
26020 if (frame_off
!= 0)
26021 return emit_insn (gen_add3_insn (updt_reg_rtx
,
26022 frame_reg_rtx
, GEN_INT (frame_off
)));
26024 return emit_move_insn (updt_reg_rtx
, frame_reg_rtx
);
26029 /* Return the register number used as a pointer by out-of-line
26030 save/restore functions. */
26032 static inline unsigned
26033 ptr_regno_for_savres (int sel
)
26035 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
26036 return (sel
& SAVRES_REG
) == SAVRES_FPR
|| (sel
& SAVRES_LR
) ? 1 : 12;
26037 return DEFAULT_ABI
== ABI_DARWIN
&& (sel
& SAVRES_REG
) == SAVRES_FPR
? 1 : 11;
26040 /* Construct a parallel rtx describing the effect of a call to an
26041 out-of-line register save/restore routine, and emit the insn
26042 or jump_insn as appropriate. */
26045 rs6000_emit_savres_rtx (rs6000_stack_t
*info
,
26046 rtx frame_reg_rtx
, int save_area_offset
, int lr_offset
,
26047 machine_mode reg_mode
, int sel
)
26050 int offset
, start_reg
, end_reg
, n_regs
, use_reg
;
26051 int reg_size
= GET_MODE_SIZE (reg_mode
);
26058 start_reg
= ((sel
& SAVRES_REG
) == SAVRES_GPR
26059 ? info
->first_gp_reg_save
26060 : (sel
& SAVRES_REG
) == SAVRES_FPR
26061 ? info
->first_fp_reg_save
26062 : (sel
& SAVRES_REG
) == SAVRES_VR
26063 ? info
->first_altivec_reg_save
26065 end_reg
= ((sel
& SAVRES_REG
) == SAVRES_GPR
26067 : (sel
& SAVRES_REG
) == SAVRES_FPR
26069 : (sel
& SAVRES_REG
) == SAVRES_VR
26070 ? LAST_ALTIVEC_REGNO
+ 1
26072 n_regs
= end_reg
- start_reg
;
26073 p
= rtvec_alloc (3 + ((sel
& SAVRES_LR
) ? 1 : 0)
26074 + ((sel
& SAVRES_REG
) == SAVRES_VR
? 1 : 0)
26077 if (!(sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
26078 RTVEC_ELT (p
, offset
++) = ret_rtx
;
26080 RTVEC_ELT (p
, offset
++)
26081 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, LR_REGNO
));
26083 sym
= rs6000_savres_routine_sym (info
, sel
);
26084 RTVEC_ELT (p
, offset
++) = gen_rtx_USE (VOIDmode
, sym
);
26086 use_reg
= ptr_regno_for_savres (sel
);
26087 if ((sel
& SAVRES_REG
) == SAVRES_VR
)
26089 /* Vector regs are saved/restored using [reg+reg] addressing. */
26090 RTVEC_ELT (p
, offset
++)
26091 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, use_reg
));
26092 RTVEC_ELT (p
, offset
++)
26093 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, 0));
26096 RTVEC_ELT (p
, offset
++)
26097 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, use_reg
));
26099 for (i
= 0; i
< end_reg
- start_reg
; i
++)
26100 RTVEC_ELT (p
, i
+ offset
)
26101 = gen_frame_set (gen_rtx_REG (reg_mode
, start_reg
+ i
),
26102 frame_reg_rtx
, save_area_offset
+ reg_size
* i
,
26103 (sel
& SAVRES_SAVE
) != 0);
26105 if ((sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
26106 RTVEC_ELT (p
, i
+ offset
)
26107 = gen_frame_store (gen_rtx_REG (Pmode
, 0), frame_reg_rtx
, lr_offset
);
26109 par
= gen_rtx_PARALLEL (VOIDmode
, p
);
26111 if (!(sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
26113 insn
= emit_jump_insn (par
);
26114 JUMP_LABEL (insn
) = ret_rtx
;
26117 insn
= emit_insn (par
);
26121 /* Emit prologue code to store CR fields that need to be saved into REG. This
26122 function should only be called when moving the non-volatile CRs to REG, it
26123 is not a general purpose routine to move the entire set of CRs to REG.
26124 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
26128 rs6000_emit_prologue_move_from_cr (rtx reg
)
26130 /* Only the ELFv2 ABI allows storing only selected fields. */
26131 if (DEFAULT_ABI
== ABI_ELFv2
&& TARGET_MFCRF
)
26133 int i
, cr_reg
[8], count
= 0;
26135 /* Collect CR fields that must be saved. */
26136 for (i
= 0; i
< 8; i
++)
26137 if (save_reg_p (CR0_REGNO
+ i
))
26138 cr_reg
[count
++] = i
;
26140 /* If it's just a single one, use mfcrf. */
26143 rtvec p
= rtvec_alloc (1);
26144 rtvec r
= rtvec_alloc (2);
26145 RTVEC_ELT (r
, 0) = gen_rtx_REG (CCmode
, CR0_REGNO
+ cr_reg
[0]);
26146 RTVEC_ELT (r
, 1) = GEN_INT (1 << (7 - cr_reg
[0]));
26148 = gen_rtx_SET (reg
,
26149 gen_rtx_UNSPEC (SImode
, r
, UNSPEC_MOVESI_FROM_CR
));
26151 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
26155 /* ??? It might be better to handle count == 2 / 3 cases here
26156 as well, using logical operations to combine the values. */
26159 emit_insn (gen_prologue_movesi_from_cr (reg
));
26162 /* Return whether the split-stack arg pointer (r12) is used. */
26165 split_stack_arg_pointer_used_p (void)
26167 /* If the pseudo holding the arg pointer is no longer a pseudo,
26168 then the arg pointer is used. */
26169 if (cfun
->machine
->split_stack_arg_pointer
!= NULL_RTX
26170 && (!REG_P (cfun
->machine
->split_stack_arg_pointer
)
26171 || (REGNO (cfun
->machine
->split_stack_arg_pointer
)
26172 < FIRST_PSEUDO_REGISTER
)))
26175 /* Unfortunately we also need to do some code scanning, since
26176 r12 may have been substituted for the pseudo. */
26178 basic_block bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
;
26179 FOR_BB_INSNS (bb
, insn
)
26180 if (NONDEBUG_INSN_P (insn
))
26182 /* A call destroys r12. */
26187 FOR_EACH_INSN_USE (use
, insn
)
26189 rtx x
= DF_REF_REG (use
);
26190 if (REG_P (x
) && REGNO (x
) == 12)
26194 FOR_EACH_INSN_DEF (def
, insn
)
26196 rtx x
= DF_REF_REG (def
);
26197 if (REG_P (x
) && REGNO (x
) == 12)
26201 return bitmap_bit_p (DF_LR_OUT (bb
), 12);
26204 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26207 rs6000_global_entry_point_needed_p (void)
26209 /* Only needed for the ELFv2 ABI. */
26210 if (DEFAULT_ABI
!= ABI_ELFv2
)
26213 /* With -msingle-pic-base, we assume the whole program shares the same
26214 TOC, so no global entry point prologues are needed anywhere. */
26215 if (TARGET_SINGLE_PIC_BASE
)
26218 /* Ensure we have a global entry point for thunks. ??? We could
26219 avoid that if the target routine doesn't need a global entry point,
26220 but we do not know whether this is the case at this point. */
26221 if (cfun
->is_thunk
)
26224 /* For regular functions, rs6000_emit_prologue sets this flag if the
26225 routine ever uses the TOC pointer. */
26226 return cfun
->machine
->r2_setup_needed
;
26229 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26231 rs6000_get_separate_components (void)
26233 rs6000_stack_t
*info
= rs6000_stack_info ();
26235 if (WORLD_SAVE_P (info
))
26238 gcc_assert (!(info
->savres_strategy
& SAVE_MULTIPLE
)
26239 && !(info
->savres_strategy
& REST_MULTIPLE
));
26241 /* Component 0 is the save/restore of LR (done via GPR0).
26242 Components 13..31 are the save/restore of GPR13..GPR31.
26243 Components 46..63 are the save/restore of FPR14..FPR31. */
26245 cfun
->machine
->n_components
= 64;
26247 sbitmap components
= sbitmap_alloc (cfun
->machine
->n_components
);
26248 bitmap_clear (components
);
26250 int reg_size
= TARGET_32BIT
? 4 : 8;
26251 int fp_reg_size
= 8;
26253 /* The GPRs we need saved to the frame. */
26254 if ((info
->savres_strategy
& SAVE_INLINE_GPRS
)
26255 && (info
->savres_strategy
& REST_INLINE_GPRS
))
26257 int offset
= info
->gp_save_offset
;
26259 offset
+= info
->total_size
;
26261 for (unsigned regno
= info
->first_gp_reg_save
; regno
< 32; regno
++)
26263 if (IN_RANGE (offset
, -0x8000, 0x7fff)
26264 && save_reg_p (regno
))
26265 bitmap_set_bit (components
, regno
);
26267 offset
+= reg_size
;
26271 /* Don't mess with the hard frame pointer. */
26272 if (frame_pointer_needed
)
26273 bitmap_clear_bit (components
, HARD_FRAME_POINTER_REGNUM
);
26275 /* Don't mess with the fixed TOC register. */
26276 if ((TARGET_TOC
&& TARGET_MINIMAL_TOC
)
26277 || (flag_pic
== 1 && DEFAULT_ABI
== ABI_V4
)
26278 || (flag_pic
&& DEFAULT_ABI
== ABI_DARWIN
))
26279 bitmap_clear_bit (components
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
26281 /* The FPRs we need saved to the frame. */
26282 if ((info
->savres_strategy
& SAVE_INLINE_FPRS
)
26283 && (info
->savres_strategy
& REST_INLINE_FPRS
))
26285 int offset
= info
->fp_save_offset
;
26287 offset
+= info
->total_size
;
26289 for (unsigned regno
= info
->first_fp_reg_save
; regno
< 64; regno
++)
26291 if (IN_RANGE (offset
, -0x8000, 0x7fff) && save_reg_p (regno
))
26292 bitmap_set_bit (components
, regno
);
26294 offset
+= fp_reg_size
;
26298 /* Optimize LR save and restore if we can. This is component 0. Any
26299 out-of-line register save/restore routines need LR. */
26300 if (info
->lr_save_p
26301 && !(flag_pic
&& (DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
))
26302 && (info
->savres_strategy
& SAVE_INLINE_GPRS
)
26303 && (info
->savres_strategy
& REST_INLINE_GPRS
)
26304 && (info
->savres_strategy
& SAVE_INLINE_FPRS
)
26305 && (info
->savres_strategy
& REST_INLINE_FPRS
)
26306 && (info
->savres_strategy
& SAVE_INLINE_VRS
)
26307 && (info
->savres_strategy
& REST_INLINE_VRS
))
26309 int offset
= info
->lr_save_offset
;
26311 offset
+= info
->total_size
;
26312 if (IN_RANGE (offset
, -0x8000, 0x7fff))
26313 bitmap_set_bit (components
, 0);
26319 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26321 rs6000_components_for_bb (basic_block bb
)
26323 rs6000_stack_t
*info
= rs6000_stack_info ();
26325 bitmap in
= DF_LIVE_IN (bb
);
26326 bitmap gen
= &DF_LIVE_BB_INFO (bb
)->gen
;
26327 bitmap kill
= &DF_LIVE_BB_INFO (bb
)->kill
;
26329 sbitmap components
= sbitmap_alloc (cfun
->machine
->n_components
);
26330 bitmap_clear (components
);
26332 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26335 for (unsigned regno
= info
->first_gp_reg_save
; regno
< 32; regno
++)
26336 if (bitmap_bit_p (in
, regno
)
26337 || bitmap_bit_p (gen
, regno
)
26338 || bitmap_bit_p (kill
, regno
))
26339 bitmap_set_bit (components
, regno
);
26342 for (unsigned regno
= info
->first_fp_reg_save
; regno
< 64; regno
++)
26343 if (bitmap_bit_p (in
, regno
)
26344 || bitmap_bit_p (gen
, regno
)
26345 || bitmap_bit_p (kill
, regno
))
26346 bitmap_set_bit (components
, regno
);
26348 /* The link register. */
26349 if (bitmap_bit_p (in
, LR_REGNO
)
26350 || bitmap_bit_p (gen
, LR_REGNO
)
26351 || bitmap_bit_p (kill
, LR_REGNO
))
26352 bitmap_set_bit (components
, 0);
26357 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26359 rs6000_disqualify_components (sbitmap components
, edge e
,
26360 sbitmap edge_components
, bool /*is_prologue*/)
26362 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26363 live where we want to place that code. */
26364 if (bitmap_bit_p (edge_components
, 0)
26365 && bitmap_bit_p (DF_LIVE_IN (e
->dest
), 0))
26368 fprintf (dump_file
, "Disqualifying LR because GPR0 is live "
26369 "on entry to bb %d\n", e
->dest
->index
);
26370 bitmap_clear_bit (components
, 0);
26374 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26376 rs6000_emit_prologue_components (sbitmap components
)
26378 rs6000_stack_t
*info
= rs6000_stack_info ();
26379 rtx ptr_reg
= gen_rtx_REG (Pmode
, frame_pointer_needed
26380 ? HARD_FRAME_POINTER_REGNUM
26381 : STACK_POINTER_REGNUM
);
26383 machine_mode reg_mode
= Pmode
;
26384 int reg_size
= TARGET_32BIT
? 4 : 8;
26385 machine_mode fp_reg_mode
= (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
26387 int fp_reg_size
= 8;
26389 /* Prologue for LR. */
26390 if (bitmap_bit_p (components
, 0))
26392 rtx reg
= gen_rtx_REG (reg_mode
, 0);
26393 rtx_insn
*insn
= emit_move_insn (reg
, gen_rtx_REG (reg_mode
, LR_REGNO
));
26394 RTX_FRAME_RELATED_P (insn
) = 1;
26395 add_reg_note (insn
, REG_CFA_REGISTER
, NULL
);
26397 int offset
= info
->lr_save_offset
;
26399 offset
+= info
->total_size
;
26401 insn
= emit_insn (gen_frame_store (reg
, ptr_reg
, offset
));
26402 RTX_FRAME_RELATED_P (insn
) = 1;
26403 rtx lr
= gen_rtx_REG (reg_mode
, LR_REGNO
);
26404 rtx mem
= copy_rtx (SET_DEST (single_set (insn
)));
26405 add_reg_note (insn
, REG_CFA_OFFSET
, gen_rtx_SET (mem
, lr
));
26408 /* Prologue for the GPRs. */
26409 int offset
= info
->gp_save_offset
;
26411 offset
+= info
->total_size
;
26413 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
26415 if (bitmap_bit_p (components
, i
))
26417 rtx reg
= gen_rtx_REG (reg_mode
, i
);
26418 rtx_insn
*insn
= emit_insn (gen_frame_store (reg
, ptr_reg
, offset
));
26419 RTX_FRAME_RELATED_P (insn
) = 1;
26420 rtx set
= copy_rtx (single_set (insn
));
26421 add_reg_note (insn
, REG_CFA_OFFSET
, set
);
26424 offset
+= reg_size
;
26427 /* Prologue for the FPRs. */
26428 offset
= info
->fp_save_offset
;
26430 offset
+= info
->total_size
;
26432 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
26434 if (bitmap_bit_p (components
, i
))
26436 rtx reg
= gen_rtx_REG (fp_reg_mode
, i
);
26437 rtx_insn
*insn
= emit_insn (gen_frame_store (reg
, ptr_reg
, offset
));
26438 RTX_FRAME_RELATED_P (insn
) = 1;
26439 rtx set
= copy_rtx (single_set (insn
));
26440 add_reg_note (insn
, REG_CFA_OFFSET
, set
);
26443 offset
+= fp_reg_size
;
26447 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26449 rs6000_emit_epilogue_components (sbitmap components
)
26451 rs6000_stack_t
*info
= rs6000_stack_info ();
26452 rtx ptr_reg
= gen_rtx_REG (Pmode
, frame_pointer_needed
26453 ? HARD_FRAME_POINTER_REGNUM
26454 : STACK_POINTER_REGNUM
);
26456 machine_mode reg_mode
= Pmode
;
26457 int reg_size
= TARGET_32BIT
? 4 : 8;
26459 machine_mode fp_reg_mode
= (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
26461 int fp_reg_size
= 8;
26463 /* Epilogue for the FPRs. */
26464 int offset
= info
->fp_save_offset
;
26466 offset
+= info
->total_size
;
26468 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
26470 if (bitmap_bit_p (components
, i
))
26472 rtx reg
= gen_rtx_REG (fp_reg_mode
, i
);
26473 rtx_insn
*insn
= emit_insn (gen_frame_load (reg
, ptr_reg
, offset
));
26474 RTX_FRAME_RELATED_P (insn
) = 1;
26475 add_reg_note (insn
, REG_CFA_RESTORE
, reg
);
26478 offset
+= fp_reg_size
;
26481 /* Epilogue for the GPRs. */
26482 offset
= info
->gp_save_offset
;
26484 offset
+= info
->total_size
;
26486 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
26488 if (bitmap_bit_p (components
, i
))
26490 rtx reg
= gen_rtx_REG (reg_mode
, i
);
26491 rtx_insn
*insn
= emit_insn (gen_frame_load (reg
, ptr_reg
, offset
));
26492 RTX_FRAME_RELATED_P (insn
) = 1;
26493 add_reg_note (insn
, REG_CFA_RESTORE
, reg
);
26496 offset
+= reg_size
;
26499 /* Epilogue for LR. */
26500 if (bitmap_bit_p (components
, 0))
26502 int offset
= info
->lr_save_offset
;
26504 offset
+= info
->total_size
;
26506 rtx reg
= gen_rtx_REG (reg_mode
, 0);
26507 rtx_insn
*insn
= emit_insn (gen_frame_load (reg
, ptr_reg
, offset
));
26509 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
26510 insn
= emit_move_insn (lr
, reg
);
26511 RTX_FRAME_RELATED_P (insn
) = 1;
26512 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
26516 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26518 rs6000_set_handled_components (sbitmap components
)
26520 rs6000_stack_t
*info
= rs6000_stack_info ();
26522 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
26523 if (bitmap_bit_p (components
, i
))
26524 cfun
->machine
->gpr_is_wrapped_separately
[i
] = true;
26526 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
26527 if (bitmap_bit_p (components
, i
))
26528 cfun
->machine
->fpr_is_wrapped_separately
[i
- 32] = true;
26530 if (bitmap_bit_p (components
, 0))
26531 cfun
->machine
->lr_is_wrapped_separately
= true;
26534 /* VRSAVE is a bit vector representing which AltiVec registers
26535 are used. The OS uses this to determine which vector
26536 registers to save on a context switch. We need to save
26537 VRSAVE on the stack frame, add whatever AltiVec registers we
26538 used in this function, and do the corresponding magic in the
26541 emit_vrsave_prologue (rs6000_stack_t
*info
, int save_regno
,
26542 HOST_WIDE_INT frame_off
, rtx frame_reg_rtx
)
26544 /* Get VRSAVE into a GPR. */
26545 rtx reg
= gen_rtx_REG (SImode
, save_regno
);
26546 rtx vrsave
= gen_rtx_REG (SImode
, VRSAVE_REGNO
);
26548 emit_insn (gen_get_vrsave_internal (reg
));
26550 emit_insn (gen_rtx_SET (reg
, vrsave
));
26553 int offset
= info
->vrsave_save_offset
+ frame_off
;
26554 emit_insn (gen_frame_store (reg
, frame_reg_rtx
, offset
));
26556 /* Include the registers in the mask. */
26557 emit_insn (gen_iorsi3 (reg
, reg
, GEN_INT (info
->vrsave_mask
)));
26559 emit_insn (generate_set_vrsave (reg
, info
, 0));
26562 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26563 called, it left the arg pointer to the old stack in r29. Otherwise, the
26564 arg pointer is the top of the current frame. */
26566 emit_split_stack_prologue (rs6000_stack_t
*info
, rtx_insn
*sp_adjust
,
26567 HOST_WIDE_INT frame_off
, rtx frame_reg_rtx
)
26569 cfun
->machine
->split_stack_argp_used
= true;
26573 rtx r12
= gen_rtx_REG (Pmode
, 12);
26574 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
26575 rtx set_r12
= gen_rtx_SET (r12
, sp_reg_rtx
);
26576 emit_insn_before (set_r12
, sp_adjust
);
26578 else if (frame_off
!= 0 || REGNO (frame_reg_rtx
) != 12)
26580 rtx r12
= gen_rtx_REG (Pmode
, 12);
26581 if (frame_off
== 0)
26582 emit_move_insn (r12
, frame_reg_rtx
);
26584 emit_insn (gen_add3_insn (r12
, frame_reg_rtx
, GEN_INT (frame_off
)));
26589 rtx r12
= gen_rtx_REG (Pmode
, 12);
26590 rtx r29
= gen_rtx_REG (Pmode
, 29);
26591 rtx cr7
= gen_rtx_REG (CCUNSmode
, CR7_REGNO
);
26592 rtx not_more
= gen_label_rtx ();
26595 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
26596 gen_rtx_GEU (VOIDmode
, cr7
, const0_rtx
),
26597 gen_rtx_LABEL_REF (VOIDmode
, not_more
),
26599 jump
= emit_jump_insn (gen_rtx_SET (pc_rtx
, jump
));
26600 JUMP_LABEL (jump
) = not_more
;
26601 LABEL_NUSES (not_more
) += 1;
26602 emit_move_insn (r12
, r29
);
26603 emit_label (not_more
);
26607 /* Emit function prologue as insns. */
26610 rs6000_emit_prologue (void)
26612 rs6000_stack_t
*info
= rs6000_stack_info ();
26613 machine_mode reg_mode
= Pmode
;
26614 int reg_size
= TARGET_32BIT
? 4 : 8;
26615 machine_mode fp_reg_mode
= (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
26617 int fp_reg_size
= 8;
26618 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
26619 rtx frame_reg_rtx
= sp_reg_rtx
;
26620 unsigned int cr_save_regno
;
26621 rtx cr_save_rtx
= NULL_RTX
;
26624 int using_static_chain_p
= (cfun
->static_chain_decl
!= NULL_TREE
26625 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM
)
26626 && call_used_regs
[STATIC_CHAIN_REGNUM
]);
26627 int using_split_stack
= (flag_split_stack
26628 && (lookup_attribute ("no_split_stack",
26629 DECL_ATTRIBUTES (cfun
->decl
))
26632 /* Offset to top of frame for frame_reg and sp respectively. */
26633 HOST_WIDE_INT frame_off
= 0;
26634 HOST_WIDE_INT sp_off
= 0;
26635 /* sp_adjust is the stack adjusting instruction, tracked so that the
26636 insn setting up the split-stack arg pointer can be emitted just
26637 prior to it, when r12 is not used here for other purposes. */
26638 rtx_insn
*sp_adjust
= 0;
26641 /* Track and check usage of r0, r11, r12. */
26642 int reg_inuse
= using_static_chain_p
? 1 << 11 : 0;
26643 #define START_USE(R) do \
26645 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26646 reg_inuse |= 1 << (R); \
26648 #define END_USE(R) do \
26650 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
26651 reg_inuse &= ~(1 << (R)); \
26653 #define NOT_INUSE(R) do \
26655 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26658 #define START_USE(R) do {} while (0)
26659 #define END_USE(R) do {} while (0)
26660 #define NOT_INUSE(R) do {} while (0)
26663 if (DEFAULT_ABI
== ABI_ELFv2
26664 && !TARGET_SINGLE_PIC_BASE
)
26666 cfun
->machine
->r2_setup_needed
= df_regs_ever_live_p (TOC_REGNUM
);
26668 /* With -mminimal-toc we may generate an extra use of r2 below. */
26669 if (TARGET_TOC
&& TARGET_MINIMAL_TOC
26670 && !constant_pool_empty_p ())
26671 cfun
->machine
->r2_setup_needed
= true;
26675 if (flag_stack_usage_info
)
26676 current_function_static_stack_size
= info
->total_size
;
26678 if (flag_stack_check
== STATIC_BUILTIN_STACK_CHECK
)
26680 HOST_WIDE_INT size
= info
->total_size
;
26682 if (crtl
->is_leaf
&& !cfun
->calls_alloca
)
26684 if (size
> PROBE_INTERVAL
&& size
> STACK_CHECK_PROTECT
)
26685 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT
,
26686 size
- STACK_CHECK_PROTECT
);
26689 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT
, size
);
26692 if (TARGET_FIX_AND_CONTINUE
)
26694 /* gdb on darwin arranges to forward a function from the old
26695 address by modifying the first 5 instructions of the function
26696 to branch to the overriding function. This is necessary to
26697 permit function pointers that point to the old function to
26698 actually forward to the new function. */
26699 emit_insn (gen_nop ());
26700 emit_insn (gen_nop ());
26701 emit_insn (gen_nop ());
26702 emit_insn (gen_nop ());
26703 emit_insn (gen_nop ());
26706 /* Handle world saves specially here. */
26707 if (WORLD_SAVE_P (info
))
26714 /* save_world expects lr in r0. */
26715 reg0
= gen_rtx_REG (Pmode
, 0);
26716 if (info
->lr_save_p
)
26718 insn
= emit_move_insn (reg0
,
26719 gen_rtx_REG (Pmode
, LR_REGNO
));
26720 RTX_FRAME_RELATED_P (insn
) = 1;
26723 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
26724 assumptions about the offsets of various bits of the stack
26726 gcc_assert (info
->gp_save_offset
== -220
26727 && info
->fp_save_offset
== -144
26728 && info
->lr_save_offset
== 8
26729 && info
->cr_save_offset
== 4
26732 && (!crtl
->calls_eh_return
26733 || info
->ehrd_offset
== -432)
26734 && info
->vrsave_save_offset
== -224
26735 && info
->altivec_save_offset
== -416);
26737 treg
= gen_rtx_REG (SImode
, 11);
26738 emit_move_insn (treg
, GEN_INT (-info
->total_size
));
26740 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
26741 in R11. It also clobbers R12, so beware! */
26743 /* Preserve CR2 for save_world prologues */
26745 sz
+= 32 - info
->first_gp_reg_save
;
26746 sz
+= 64 - info
->first_fp_reg_save
;
26747 sz
+= LAST_ALTIVEC_REGNO
- info
->first_altivec_reg_save
+ 1;
26748 p
= rtvec_alloc (sz
);
26750 RTVEC_ELT (p
, j
++) = gen_rtx_CLOBBER (VOIDmode
,
26751 gen_rtx_REG (SImode
,
26753 RTVEC_ELT (p
, j
++) = gen_rtx_USE (VOIDmode
,
26754 gen_rtx_SYMBOL_REF (Pmode
,
26756 /* We do floats first so that the instruction pattern matches
26758 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
26760 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
26762 info
->first_fp_reg_save
+ i
),
26764 info
->fp_save_offset
+ frame_off
+ 8 * i
);
26765 for (i
= 0; info
->first_altivec_reg_save
+ i
<= LAST_ALTIVEC_REGNO
; i
++)
26767 = gen_frame_store (gen_rtx_REG (V4SImode
,
26768 info
->first_altivec_reg_save
+ i
),
26770 info
->altivec_save_offset
+ frame_off
+ 16 * i
);
26771 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
26773 = gen_frame_store (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
26775 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
26777 /* CR register traditionally saved as CR2. */
26779 = gen_frame_store (gen_rtx_REG (SImode
, CR2_REGNO
),
26780 frame_reg_rtx
, info
->cr_save_offset
+ frame_off
);
26781 /* Explain about use of R0. */
26782 if (info
->lr_save_p
)
26784 = gen_frame_store (reg0
,
26785 frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
26786 /* Explain what happens to the stack pointer. */
26788 rtx newval
= gen_rtx_PLUS (Pmode
, sp_reg_rtx
, treg
);
26789 RTVEC_ELT (p
, j
++) = gen_rtx_SET (sp_reg_rtx
, newval
);
26792 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
26793 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
26794 treg
, GEN_INT (-info
->total_size
));
26795 sp_off
= frame_off
= info
->total_size
;
26798 strategy
= info
->savres_strategy
;
26800 /* For V.4, update stack before we do any saving and set back pointer. */
26801 if (! WORLD_SAVE_P (info
)
26803 && (DEFAULT_ABI
== ABI_V4
26804 || crtl
->calls_eh_return
))
26806 bool need_r11
= (!(strategy
& SAVE_INLINE_FPRS
)
26807 || !(strategy
& SAVE_INLINE_GPRS
)
26808 || !(strategy
& SAVE_INLINE_VRS
));
26809 int ptr_regno
= -1;
26810 rtx ptr_reg
= NULL_RTX
;
26813 if (info
->total_size
< 32767)
26814 frame_off
= info
->total_size
;
26817 else if (info
->cr_save_p
26819 || info
->first_fp_reg_save
< 64
26820 || info
->first_gp_reg_save
< 32
26821 || info
->altivec_size
!= 0
26822 || info
->vrsave_size
!= 0
26823 || crtl
->calls_eh_return
)
26827 /* The prologue won't be saving any regs so there is no need
26828 to set up a frame register to access any frame save area.
26829 We also won't be using frame_off anywhere below, but set
26830 the correct value anyway to protect against future
26831 changes to this function. */
26832 frame_off
= info
->total_size
;
26834 if (ptr_regno
!= -1)
26836 /* Set up the frame offset to that needed by the first
26837 out-of-line save function. */
26838 START_USE (ptr_regno
);
26839 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
26840 frame_reg_rtx
= ptr_reg
;
26841 if (!(strategy
& SAVE_INLINE_FPRS
) && info
->fp_size
!= 0)
26842 gcc_checking_assert (info
->fp_save_offset
+ info
->fp_size
== 0);
26843 else if (!(strategy
& SAVE_INLINE_GPRS
) && info
->first_gp_reg_save
< 32)
26844 ptr_off
= info
->gp_save_offset
+ info
->gp_size
;
26845 else if (!(strategy
& SAVE_INLINE_VRS
) && info
->altivec_size
!= 0)
26846 ptr_off
= info
->altivec_save_offset
+ info
->altivec_size
;
26847 frame_off
= -ptr_off
;
26849 sp_adjust
= rs6000_emit_allocate_stack (info
->total_size
,
26851 if (REGNO (frame_reg_rtx
) == 12)
26853 sp_off
= info
->total_size
;
26854 if (frame_reg_rtx
!= sp_reg_rtx
)
26855 rs6000_emit_stack_tie (frame_reg_rtx
, false);
26858 /* If we use the link register, get it into r0. */
26859 if (!WORLD_SAVE_P (info
) && info
->lr_save_p
26860 && !cfun
->machine
->lr_is_wrapped_separately
)
26862 rtx addr
, reg
, mem
;
26864 reg
= gen_rtx_REG (Pmode
, 0);
26866 insn
= emit_move_insn (reg
, gen_rtx_REG (Pmode
, LR_REGNO
));
26867 RTX_FRAME_RELATED_P (insn
) = 1;
26869 if (!(strategy
& (SAVE_NOINLINE_GPRS_SAVES_LR
26870 | SAVE_NOINLINE_FPRS_SAVES_LR
)))
26872 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
26873 GEN_INT (info
->lr_save_offset
+ frame_off
));
26874 mem
= gen_rtx_MEM (Pmode
, addr
);
26875 /* This should not be of rs6000_sr_alias_set, because of
26876 __builtin_return_address. */
26878 insn
= emit_move_insn (mem
, reg
);
26879 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
26880 NULL_RTX
, NULL_RTX
);
26885 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
26886 r12 will be needed by out-of-line gpr restore. */
26887 cr_save_regno
= ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
26888 && !(strategy
& (SAVE_INLINE_GPRS
26889 | SAVE_NOINLINE_GPRS_SAVES_LR
))
26891 if (!WORLD_SAVE_P (info
)
26893 && REGNO (frame_reg_rtx
) != cr_save_regno
26894 && !(using_static_chain_p
&& cr_save_regno
== 11)
26895 && !(using_split_stack
&& cr_save_regno
== 12 && sp_adjust
))
26897 cr_save_rtx
= gen_rtx_REG (SImode
, cr_save_regno
);
26898 START_USE (cr_save_regno
);
26899 rs6000_emit_prologue_move_from_cr (cr_save_rtx
);
26902 /* Do any required saving of fpr's. If only one or two to save, do
26903 it ourselves. Otherwise, call function. */
26904 if (!WORLD_SAVE_P (info
) && (strategy
& SAVE_INLINE_FPRS
))
26906 int offset
= info
->fp_save_offset
+ frame_off
;
26907 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
26910 && !cfun
->machine
->fpr_is_wrapped_separately
[i
- 32])
26911 emit_frame_save (frame_reg_rtx
, fp_reg_mode
, i
, offset
,
26912 sp_off
- frame_off
);
26914 offset
+= fp_reg_size
;
26917 else if (!WORLD_SAVE_P (info
) && info
->first_fp_reg_save
!= 64)
26919 bool lr
= (strategy
& SAVE_NOINLINE_FPRS_SAVES_LR
) != 0;
26920 int sel
= SAVRES_SAVE
| SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
26921 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
26922 rtx ptr_reg
= frame_reg_rtx
;
26924 if (REGNO (frame_reg_rtx
) == ptr_regno
)
26925 gcc_checking_assert (frame_off
== 0);
26928 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
26929 NOT_INUSE (ptr_regno
);
26930 emit_insn (gen_add3_insn (ptr_reg
,
26931 frame_reg_rtx
, GEN_INT (frame_off
)));
26933 insn
= rs6000_emit_savres_rtx (info
, ptr_reg
,
26934 info
->fp_save_offset
,
26935 info
->lr_save_offset
,
26937 rs6000_frame_related (insn
, ptr_reg
, sp_off
,
26938 NULL_RTX
, NULL_RTX
);
26943 /* Save GPRs. This is done as a PARALLEL if we are using
26944 the store-multiple instructions. */
26945 if (!WORLD_SAVE_P (info
) && !(strategy
& SAVE_INLINE_GPRS
))
26947 bool lr
= (strategy
& SAVE_NOINLINE_GPRS_SAVES_LR
) != 0;
26948 int sel
= SAVRES_SAVE
| SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
26949 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
26950 rtx ptr_reg
= frame_reg_rtx
;
26951 bool ptr_set_up
= REGNO (ptr_reg
) == ptr_regno
;
26952 int end_save
= info
->gp_save_offset
+ info
->gp_size
;
26955 if (ptr_regno
== 12)
26958 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
26960 /* Need to adjust r11 (r12) if we saved any FPRs. */
26961 if (end_save
+ frame_off
!= 0)
26963 rtx offset
= GEN_INT (end_save
+ frame_off
);
26966 frame_off
= -end_save
;
26968 NOT_INUSE (ptr_regno
);
26969 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
26971 else if (!ptr_set_up
)
26973 NOT_INUSE (ptr_regno
);
26974 emit_move_insn (ptr_reg
, frame_reg_rtx
);
26976 ptr_off
= -end_save
;
26977 insn
= rs6000_emit_savres_rtx (info
, ptr_reg
,
26978 info
->gp_save_offset
+ ptr_off
,
26979 info
->lr_save_offset
+ ptr_off
,
26981 rs6000_frame_related (insn
, ptr_reg
, sp_off
- ptr_off
,
26982 NULL_RTX
, NULL_RTX
);
26986 else if (!WORLD_SAVE_P (info
) && (strategy
& SAVE_MULTIPLE
))
26990 p
= rtvec_alloc (32 - info
->first_gp_reg_save
);
26991 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
26993 = gen_frame_store (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
26995 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
26996 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
26997 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
26998 NULL_RTX
, NULL_RTX
);
27000 else if (!WORLD_SAVE_P (info
))
27002 int offset
= info
->gp_save_offset
+ frame_off
;
27003 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
27006 && !cfun
->machine
->gpr_is_wrapped_separately
[i
])
27007 emit_frame_save (frame_reg_rtx
, reg_mode
, i
, offset
,
27008 sp_off
- frame_off
);
27010 offset
+= reg_size
;
27014 if (crtl
->calls_eh_return
)
27021 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
27022 if (regno
== INVALID_REGNUM
)
27026 p
= rtvec_alloc (i
);
27030 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
27031 if (regno
== INVALID_REGNUM
)
27035 = gen_frame_store (gen_rtx_REG (reg_mode
, regno
),
27037 info
->ehrd_offset
+ sp_off
+ reg_size
* (int) i
);
27038 RTVEC_ELT (p
, i
) = set
;
27039 RTX_FRAME_RELATED_P (set
) = 1;
27042 insn
= emit_insn (gen_blockage ());
27043 RTX_FRAME_RELATED_P (insn
) = 1;
27044 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, gen_rtx_PARALLEL (VOIDmode
, p
));
27047 /* In AIX ABI we need to make sure r2 is really saved. */
27048 if (TARGET_AIX
&& crtl
->calls_eh_return
)
27050 rtx tmp_reg
, tmp_reg_si
, hi
, lo
, compare_result
, toc_save_done
, jump
;
27051 rtx join_insn
, note
;
27052 rtx_insn
*save_insn
;
27053 long toc_restore_insn
;
27055 tmp_reg
= gen_rtx_REG (Pmode
, 11);
27056 tmp_reg_si
= gen_rtx_REG (SImode
, 11);
27057 if (using_static_chain_p
)
27060 emit_move_insn (gen_rtx_REG (Pmode
, 0), tmp_reg
);
27064 emit_move_insn (tmp_reg
, gen_rtx_REG (Pmode
, LR_REGNO
));
27065 /* Peek at instruction to which this function returns. If it's
27066 restoring r2, then we know we've already saved r2. We can't
27067 unconditionally save r2 because the value we have will already
27068 be updated if we arrived at this function via a plt call or
27069 toc adjusting stub. */
27070 emit_move_insn (tmp_reg_si
, gen_rtx_MEM (SImode
, tmp_reg
));
27071 toc_restore_insn
= ((TARGET_32BIT
? 0x80410000 : 0xE8410000)
27072 + RS6000_TOC_SAVE_SLOT
);
27073 hi
= gen_int_mode (toc_restore_insn
& ~0xffff, SImode
);
27074 emit_insn (gen_xorsi3 (tmp_reg_si
, tmp_reg_si
, hi
));
27075 compare_result
= gen_rtx_REG (CCUNSmode
, CR0_REGNO
);
27076 validate_condition_mode (EQ
, CCUNSmode
);
27077 lo
= gen_int_mode (toc_restore_insn
& 0xffff, SImode
);
27078 emit_insn (gen_rtx_SET (compare_result
,
27079 gen_rtx_COMPARE (CCUNSmode
, tmp_reg_si
, lo
)));
27080 toc_save_done
= gen_label_rtx ();
27081 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
27082 gen_rtx_EQ (VOIDmode
, compare_result
,
27084 gen_rtx_LABEL_REF (VOIDmode
, toc_save_done
),
27086 jump
= emit_jump_insn (gen_rtx_SET (pc_rtx
, jump
));
27087 JUMP_LABEL (jump
) = toc_save_done
;
27088 LABEL_NUSES (toc_save_done
) += 1;
27090 save_insn
= emit_frame_save (frame_reg_rtx
, reg_mode
,
27091 TOC_REGNUM
, frame_off
+ RS6000_TOC_SAVE_SLOT
,
27092 sp_off
- frame_off
);
27094 emit_label (toc_save_done
);
27096 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
27097 have a CFG that has different saves along different paths.
27098 Move the note to a dummy blockage insn, which describes that
27099 R2 is unconditionally saved after the label. */
27100 /* ??? An alternate representation might be a special insn pattern
27101 containing both the branch and the store. That might let the
27102 code that minimizes the number of DW_CFA_advance opcodes better
27103 freedom in placing the annotations. */
27104 note
= find_reg_note (save_insn
, REG_FRAME_RELATED_EXPR
, NULL
);
27106 remove_note (save_insn
, note
);
27108 note
= alloc_reg_note (REG_FRAME_RELATED_EXPR
,
27109 copy_rtx (PATTERN (save_insn
)), NULL_RTX
);
27110 RTX_FRAME_RELATED_P (save_insn
) = 0;
27112 join_insn
= emit_insn (gen_blockage ());
27113 REG_NOTES (join_insn
) = note
;
27114 RTX_FRAME_RELATED_P (join_insn
) = 1;
27116 if (using_static_chain_p
)
27118 emit_move_insn (tmp_reg
, gen_rtx_REG (Pmode
, 0));
27125 /* Save CR if we use any that must be preserved. */
27126 if (!WORLD_SAVE_P (info
) && info
->cr_save_p
)
27128 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
27129 GEN_INT (info
->cr_save_offset
+ frame_off
));
27130 rtx mem
= gen_frame_mem (SImode
, addr
);
27132 /* If we didn't copy cr before, do so now using r0. */
27133 if (cr_save_rtx
== NULL_RTX
)
27136 cr_save_rtx
= gen_rtx_REG (SImode
, 0);
27137 rs6000_emit_prologue_move_from_cr (cr_save_rtx
);
27140 /* Saving CR requires a two-instruction sequence: one instruction
27141 to move the CR to a general-purpose register, and a second
27142 instruction that stores the GPR to memory.
27144 We do not emit any DWARF CFI records for the first of these,
27145 because we cannot properly represent the fact that CR is saved in
27146 a register. One reason is that we cannot express that multiple
27147 CR fields are saved; another reason is that on 64-bit, the size
27148 of the CR register in DWARF (4 bytes) differs from the size of
27149 a general-purpose register.
27151 This means if any intervening instruction were to clobber one of
27152 the call-saved CR fields, we'd have incorrect CFI. To prevent
27153 this from happening, we mark the store to memory as a use of
27154 those CR fields, which prevents any such instruction from being
27155 scheduled in between the two instructions. */
27160 crsave_v
[n_crsave
++] = gen_rtx_SET (mem
, cr_save_rtx
);
27161 for (i
= 0; i
< 8; i
++)
27162 if (save_reg_p (CR0_REGNO
+ i
))
27163 crsave_v
[n_crsave
++]
27164 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (CCmode
, CR0_REGNO
+ i
));
27166 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
,
27167 gen_rtvec_v (n_crsave
, crsave_v
)));
27168 END_USE (REGNO (cr_save_rtx
));
27170 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27171 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27172 so we need to construct a frame expression manually. */
27173 RTX_FRAME_RELATED_P (insn
) = 1;
27175 /* Update address to be stack-pointer relative, like
27176 rs6000_frame_related would do. */
27177 addr
= gen_rtx_PLUS (Pmode
, gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
),
27178 GEN_INT (info
->cr_save_offset
+ sp_off
));
27179 mem
= gen_frame_mem (SImode
, addr
);
27181 if (DEFAULT_ABI
== ABI_ELFv2
)
27183 /* In the ELFv2 ABI we generate separate CFI records for each
27184 CR field that was actually saved. They all point to the
27185 same 32-bit stack slot. */
27189 for (i
= 0; i
< 8; i
++)
27190 if (save_reg_p (CR0_REGNO
+ i
))
27193 = gen_rtx_SET (mem
, gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
27195 RTX_FRAME_RELATED_P (crframe
[n_crframe
]) = 1;
27199 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
27200 gen_rtx_PARALLEL (VOIDmode
,
27201 gen_rtvec_v (n_crframe
, crframe
)));
27205 /* In other ABIs, by convention, we use a single CR regnum to
27206 represent the fact that all call-saved CR fields are saved.
27207 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27208 rtx set
= gen_rtx_SET (mem
, gen_rtx_REG (SImode
, CR2_REGNO
));
27209 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, set
);
27213 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27214 *separate* slots if the routine calls __builtin_eh_return, so
27215 that they can be independently restored by the unwinder. */
27216 if (DEFAULT_ABI
== ABI_ELFv2
&& crtl
->calls_eh_return
)
27218 int i
, cr_off
= info
->ehcr_offset
;
27221 /* ??? We might get better performance by using multiple mfocrf
27223 crsave
= gen_rtx_REG (SImode
, 0);
27224 emit_insn (gen_prologue_movesi_from_cr (crsave
));
27226 for (i
= 0; i
< 8; i
++)
27227 if (!call_used_regs
[CR0_REGNO
+ i
])
27229 rtvec p
= rtvec_alloc (2);
27231 = gen_frame_store (crsave
, frame_reg_rtx
, cr_off
+ frame_off
);
27233 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (CCmode
, CR0_REGNO
+ i
));
27235 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
27237 RTX_FRAME_RELATED_P (insn
) = 1;
27238 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
27239 gen_frame_store (gen_rtx_REG (SImode
, CR0_REGNO
+ i
),
27240 sp_reg_rtx
, cr_off
+ sp_off
));
27242 cr_off
+= reg_size
;
27246 /* Update stack and set back pointer unless this is V.4,
27247 for which it was done previously. */
27248 if (!WORLD_SAVE_P (info
) && info
->push_p
27249 && !(DEFAULT_ABI
== ABI_V4
|| crtl
->calls_eh_return
))
27251 rtx ptr_reg
= NULL
;
27254 /* If saving altivec regs we need to be able to address all save
27255 locations using a 16-bit offset. */
27256 if ((strategy
& SAVE_INLINE_VRS
) == 0
27257 || (info
->altivec_size
!= 0
27258 && (info
->altivec_save_offset
+ info
->altivec_size
- 16
27259 + info
->total_size
- frame_off
) > 32767)
27260 || (info
->vrsave_size
!= 0
27261 && (info
->vrsave_save_offset
27262 + info
->total_size
- frame_off
) > 32767))
27264 int sel
= SAVRES_SAVE
| SAVRES_VR
;
27265 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
27267 if (using_static_chain_p
27268 && ptr_regno
== STATIC_CHAIN_REGNUM
)
27270 if (REGNO (frame_reg_rtx
) != ptr_regno
)
27271 START_USE (ptr_regno
);
27272 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
27273 frame_reg_rtx
= ptr_reg
;
27274 ptr_off
= info
->altivec_save_offset
+ info
->altivec_size
;
27275 frame_off
= -ptr_off
;
27277 else if (REGNO (frame_reg_rtx
) == 1)
27278 frame_off
= info
->total_size
;
27279 sp_adjust
= rs6000_emit_allocate_stack (info
->total_size
,
27281 if (REGNO (frame_reg_rtx
) == 12)
27283 sp_off
= info
->total_size
;
27284 if (frame_reg_rtx
!= sp_reg_rtx
)
27285 rs6000_emit_stack_tie (frame_reg_rtx
, false);
27288 /* Set frame pointer, if needed. */
27289 if (frame_pointer_needed
)
27291 insn
= emit_move_insn (gen_rtx_REG (Pmode
, HARD_FRAME_POINTER_REGNUM
),
27293 RTX_FRAME_RELATED_P (insn
) = 1;
27296 /* Save AltiVec registers if needed. Save here because the red zone does
27297 not always include AltiVec registers. */
27298 if (!WORLD_SAVE_P (info
)
27299 && info
->altivec_size
!= 0 && (strategy
& SAVE_INLINE_VRS
) == 0)
27301 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
27303 /* Oddly, the vector save/restore functions point r0 at the end
27304 of the save area, then use r11 or r12 to load offsets for
27305 [reg+reg] addressing. */
27306 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
27307 int scratch_regno
= ptr_regno_for_savres (SAVRES_SAVE
| SAVRES_VR
);
27308 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
27310 gcc_checking_assert (scratch_regno
== 11 || scratch_regno
== 12);
27312 if (scratch_regno
== 12)
27314 if (end_save
+ frame_off
!= 0)
27316 rtx offset
= GEN_INT (end_save
+ frame_off
);
27318 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
27321 emit_move_insn (ptr_reg
, frame_reg_rtx
);
27323 ptr_off
= -end_save
;
27324 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
27325 info
->altivec_save_offset
+ ptr_off
,
27326 0, V4SImode
, SAVRES_SAVE
| SAVRES_VR
);
27327 rs6000_frame_related (insn
, scratch_reg
, sp_off
- ptr_off
,
27328 NULL_RTX
, NULL_RTX
);
27329 if (REGNO (frame_reg_rtx
) == REGNO (scratch_reg
))
27331 /* The oddity mentioned above clobbered our frame reg. */
27332 emit_move_insn (frame_reg_rtx
, ptr_reg
);
27333 frame_off
= ptr_off
;
27336 else if (!WORLD_SAVE_P (info
)
27337 && info
->altivec_size
!= 0)
27341 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
27342 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
27344 rtx areg
, savereg
, mem
;
27345 HOST_WIDE_INT offset
;
27347 offset
= (info
->altivec_save_offset
+ frame_off
27348 + 16 * (i
- info
->first_altivec_reg_save
));
27350 savereg
= gen_rtx_REG (V4SImode
, i
);
27352 if (TARGET_P9_VECTOR
&& quad_address_offset_p (offset
))
27354 mem
= gen_frame_mem (V4SImode
,
27355 gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
27356 GEN_INT (offset
)));
27357 insn
= emit_insn (gen_rtx_SET (mem
, savereg
));
27363 areg
= gen_rtx_REG (Pmode
, 0);
27364 emit_move_insn (areg
, GEN_INT (offset
));
27366 /* AltiVec addressing mode is [reg+reg]. */
27367 mem
= gen_frame_mem (V4SImode
,
27368 gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
));
27370 /* Rather than emitting a generic move, force use of the stvx
27371 instruction, which we always want on ISA 2.07 (power8) systems.
27372 In particular we don't want xxpermdi/stxvd2x for little
27374 insn
= emit_insn (gen_altivec_stvx_v4si_internal (mem
, savereg
));
27377 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
27378 areg
, GEN_INT (offset
));
27382 /* VRSAVE is a bit vector representing which AltiVec registers
27383 are used. The OS uses this to determine which vector
27384 registers to save on a context switch. We need to save
27385 VRSAVE on the stack frame, add whatever AltiVec registers we
27386 used in this function, and do the corresponding magic in the
27389 if (!WORLD_SAVE_P (info
) && info
->vrsave_size
!= 0)
27391 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27392 be using r12 as frame_reg_rtx and r11 as the static chain
27393 pointer for nested functions. */
27394 int save_regno
= 12;
27395 if ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
27396 && !using_static_chain_p
)
27398 else if (using_split_stack
|| REGNO (frame_reg_rtx
) == 12)
27401 if (using_static_chain_p
)
27404 NOT_INUSE (save_regno
);
27406 emit_vrsave_prologue (info
, save_regno
, frame_off
, frame_reg_rtx
);
27409 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27410 if (!TARGET_SINGLE_PIC_BASE
27411 && ((TARGET_TOC
&& TARGET_MINIMAL_TOC
27412 && !constant_pool_empty_p ())
27413 || (DEFAULT_ABI
== ABI_V4
27414 && (flag_pic
== 1 || (flag_pic
&& TARGET_SECURE_PLT
))
27415 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))))
27417 /* If emit_load_toc_table will use the link register, we need to save
27418 it. We use R12 for this purpose because emit_load_toc_table
27419 can use register 0. This allows us to use a plain 'blr' to return
27420 from the procedure more often. */
27421 int save_LR_around_toc_setup
= (TARGET_ELF
27422 && DEFAULT_ABI
== ABI_V4
27424 && ! info
->lr_save_p
27425 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun
)->preds
) > 0);
27426 if (save_LR_around_toc_setup
)
27428 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
27429 rtx tmp
= gen_rtx_REG (Pmode
, 12);
27432 insn
= emit_move_insn (tmp
, lr
);
27433 RTX_FRAME_RELATED_P (insn
) = 1;
27435 rs6000_emit_load_toc_table (TRUE
);
27437 insn
= emit_move_insn (lr
, tmp
);
27438 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
27439 RTX_FRAME_RELATED_P (insn
) = 1;
27442 rs6000_emit_load_toc_table (TRUE
);
27446 if (!TARGET_SINGLE_PIC_BASE
27447 && DEFAULT_ABI
== ABI_DARWIN
27448 && flag_pic
&& crtl
->uses_pic_offset_table
)
27450 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
27451 rtx src
= gen_rtx_SYMBOL_REF (Pmode
, MACHOPIC_FUNCTION_BASE_NAME
);
27453 /* Save and restore LR locally around this call (in R0). */
27454 if (!info
->lr_save_p
)
27455 emit_move_insn (gen_rtx_REG (Pmode
, 0), lr
);
27457 emit_insn (gen_load_macho_picbase (src
));
27459 emit_move_insn (gen_rtx_REG (Pmode
,
27460 RS6000_PIC_OFFSET_TABLE_REGNUM
),
27463 if (!info
->lr_save_p
)
27464 emit_move_insn (lr
, gen_rtx_REG (Pmode
, 0));
27468 /* If we need to, save the TOC register after doing the stack setup.
27469 Do not emit eh frame info for this save. The unwinder wants info,
27470 conceptually attached to instructions in this function, about
27471 register values in the caller of this function. This R2 may have
27472 already been changed from the value in the caller.
27473 We don't attempt to write accurate DWARF EH frame info for R2
27474 because code emitted by gcc for a (non-pointer) function call
27475 doesn't save and restore R2. Instead, R2 is managed out-of-line
27476 by a linker generated plt call stub when the function resides in
27477 a shared library. This behavior is costly to describe in DWARF,
27478 both in terms of the size of DWARF info and the time taken in the
27479 unwinder to interpret it. R2 changes, apart from the
27480 calls_eh_return case earlier in this function, are handled by
27481 linux-unwind.h frob_update_context. */
27482 if (rs6000_save_toc_in_prologue_p ())
27484 rtx reg
= gen_rtx_REG (reg_mode
, TOC_REGNUM
);
27485 emit_insn (gen_frame_store (reg
, sp_reg_rtx
, RS6000_TOC_SAVE_SLOT
));
27488 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27489 if (using_split_stack
&& split_stack_arg_pointer_used_p ())
27490 emit_split_stack_prologue (info
, sp_adjust
, frame_off
, frame_reg_rtx
);
27493 /* Output .extern statements for the save/restore routines we use. */
27496 rs6000_output_savres_externs (FILE *file
)
27498 rs6000_stack_t
*info
= rs6000_stack_info ();
27500 if (TARGET_DEBUG_STACK
)
27501 debug_stack_info (info
);
27503 /* Write .extern for any function we will call to save and restore
27505 if (info
->first_fp_reg_save
< 64
27510 int regno
= info
->first_fp_reg_save
- 32;
27512 if ((info
->savres_strategy
& SAVE_INLINE_FPRS
) == 0)
27514 bool lr
= (info
->savres_strategy
& SAVE_NOINLINE_FPRS_SAVES_LR
) != 0;
27515 int sel
= SAVRES_SAVE
| SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
27516 name
= rs6000_savres_routine_name (regno
, sel
);
27517 fprintf (file
, "\t.extern %s\n", name
);
27519 if ((info
->savres_strategy
& REST_INLINE_FPRS
) == 0)
27521 bool lr
= (info
->savres_strategy
27522 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
27523 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
27524 name
= rs6000_savres_routine_name (regno
, sel
);
27525 fprintf (file
, "\t.extern %s\n", name
);
27530 /* Write function prologue. */
27533 rs6000_output_function_prologue (FILE *file
)
27535 if (!cfun
->is_thunk
)
27536 rs6000_output_savres_externs (file
);
27538 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27539 immediately after the global entry point label. */
27540 if (rs6000_global_entry_point_needed_p ())
27542 const char *name
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
27544 (*targetm
.asm_out
.internal_label
) (file
, "LCF", rs6000_pic_labelno
);
27546 if (TARGET_CMODEL
!= CMODEL_LARGE
)
27548 /* In the small and medium code models, we assume the TOC is less
27549 2 GB away from the text section, so it can be computed via the
27550 following two-instruction sequence. */
27553 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
27554 fprintf (file
, "0:\taddis 2,12,.TOC.-");
27555 assemble_name (file
, buf
);
27556 fprintf (file
, "@ha\n");
27557 fprintf (file
, "\taddi 2,2,.TOC.-");
27558 assemble_name (file
, buf
);
27559 fprintf (file
, "@l\n");
27563 /* In the large code model, we allow arbitrary offsets between the
27564 TOC and the text section, so we have to load the offset from
27565 memory. The data field is emitted directly before the global
27566 entry point in rs6000_elf_declare_function_name. */
27569 #ifdef HAVE_AS_ENTRY_MARKERS
27570 /* If supported by the linker, emit a marker relocation. If the
27571 total code size of the final executable or shared library
27572 happens to fit into 2 GB after all, the linker will replace
27573 this code sequence with the sequence for the small or medium
27575 fprintf (file
, "\t.reloc .,R_PPC64_ENTRY\n");
27577 fprintf (file
, "\tld 2,");
27578 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCL", rs6000_pic_labelno
);
27579 assemble_name (file
, buf
);
27580 fprintf (file
, "-");
27581 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
27582 assemble_name (file
, buf
);
27583 fprintf (file
, "(12)\n");
27584 fprintf (file
, "\tadd 2,2,12\n");
27587 fputs ("\t.localentry\t", file
);
27588 assemble_name (file
, name
);
27589 fputs (",.-", file
);
27590 assemble_name (file
, name
);
27591 fputs ("\n", file
);
27594 /* Output -mprofile-kernel code. This needs to be done here instead of
27595 in output_function_profile since it must go after the ELFv2 ABI
27596 local entry point. */
27597 if (TARGET_PROFILE_KERNEL
&& crtl
->profile
)
27599 gcc_assert (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
);
27600 gcc_assert (!TARGET_32BIT
);
27602 asm_fprintf (file
, "\tmflr %s\n", reg_names
[0]);
27604 /* In the ELFv2 ABI we have no compiler stack word. It must be
27605 the resposibility of _mcount to preserve the static chain
27606 register if required. */
27607 if (DEFAULT_ABI
!= ABI_ELFv2
27608 && cfun
->static_chain_decl
!= NULL
)
27610 asm_fprintf (file
, "\tstd %s,24(%s)\n",
27611 reg_names
[STATIC_CHAIN_REGNUM
], reg_names
[1]);
27612 fprintf (file
, "\tbl %s\n", RS6000_MCOUNT
);
27613 asm_fprintf (file
, "\tld %s,24(%s)\n",
27614 reg_names
[STATIC_CHAIN_REGNUM
], reg_names
[1]);
27617 fprintf (file
, "\tbl %s\n", RS6000_MCOUNT
);
27620 rs6000_pic_labelno
++;
27623 /* -mprofile-kernel code calls mcount before the function prolog,
27624 so a profiled leaf function should stay a leaf function. */
27626 rs6000_keep_leaf_when_profiled ()
27628 return TARGET_PROFILE_KERNEL
;
27631 /* Non-zero if vmx regs are restored before the frame pop, zero if
27632 we restore after the pop when possible. */
27633 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27635 /* Restoring cr is a two step process: loading a reg from the frame
27636 save, then moving the reg to cr. For ABI_V4 we must let the
27637 unwinder know that the stack location is no longer valid at or
27638 before the stack deallocation, but we can't emit a cfa_restore for
27639 cr at the stack deallocation like we do for other registers.
27640 The trouble is that it is possible for the move to cr to be
27641 scheduled after the stack deallocation. So say exactly where cr
27642 is located on each of the two insns. */
27645 load_cr_save (int regno
, rtx frame_reg_rtx
, int offset
, bool exit_func
)
27647 rtx mem
= gen_frame_mem_offset (SImode
, frame_reg_rtx
, offset
);
27648 rtx reg
= gen_rtx_REG (SImode
, regno
);
27649 rtx_insn
*insn
= emit_move_insn (reg
, mem
);
27651 if (!exit_func
&& DEFAULT_ABI
== ABI_V4
)
27653 rtx cr
= gen_rtx_REG (SImode
, CR2_REGNO
);
27654 rtx set
= gen_rtx_SET (reg
, cr
);
27656 add_reg_note (insn
, REG_CFA_REGISTER
, set
);
27657 RTX_FRAME_RELATED_P (insn
) = 1;
27662 /* Reload CR from REG. */
27665 restore_saved_cr (rtx reg
, int using_mfcr_multiple
, bool exit_func
)
27670 if (using_mfcr_multiple
)
27672 for (i
= 0; i
< 8; i
++)
27673 if (save_reg_p (CR0_REGNO
+ i
))
27675 gcc_assert (count
);
27678 if (using_mfcr_multiple
&& count
> 1)
27684 p
= rtvec_alloc (count
);
27687 for (i
= 0; i
< 8; i
++)
27688 if (save_reg_p (CR0_REGNO
+ i
))
27690 rtvec r
= rtvec_alloc (2);
27691 RTVEC_ELT (r
, 0) = reg
;
27692 RTVEC_ELT (r
, 1) = GEN_INT (1 << (7-i
));
27693 RTVEC_ELT (p
, ndx
) =
27694 gen_rtx_SET (gen_rtx_REG (CCmode
, CR0_REGNO
+ i
),
27695 gen_rtx_UNSPEC (CCmode
, r
, UNSPEC_MOVESI_TO_CR
));
27698 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
27699 gcc_assert (ndx
== count
);
27701 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27702 CR field separately. */
27703 if (!exit_func
&& DEFAULT_ABI
== ABI_ELFv2
&& flag_shrink_wrap
)
27705 for (i
= 0; i
< 8; i
++)
27706 if (save_reg_p (CR0_REGNO
+ i
))
27707 add_reg_note (insn
, REG_CFA_RESTORE
,
27708 gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
27710 RTX_FRAME_RELATED_P (insn
) = 1;
27714 for (i
= 0; i
< 8; i
++)
27715 if (save_reg_p (CR0_REGNO
+ i
))
27717 rtx insn
= emit_insn (gen_movsi_to_cr_one
27718 (gen_rtx_REG (CCmode
, CR0_REGNO
+ i
), reg
));
27720 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27721 CR field separately, attached to the insn that in fact
27722 restores this particular CR field. */
27723 if (!exit_func
&& DEFAULT_ABI
== ABI_ELFv2
&& flag_shrink_wrap
)
27725 add_reg_note (insn
, REG_CFA_RESTORE
,
27726 gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
27728 RTX_FRAME_RELATED_P (insn
) = 1;
27732 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
27733 if (!exit_func
&& DEFAULT_ABI
!= ABI_ELFv2
27734 && (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
))
27736 rtx_insn
*insn
= get_last_insn ();
27737 rtx cr
= gen_rtx_REG (SImode
, CR2_REGNO
);
27739 add_reg_note (insn
, REG_CFA_RESTORE
, cr
);
27740 RTX_FRAME_RELATED_P (insn
) = 1;
27744 /* Like cr, the move to lr instruction can be scheduled after the
27745 stack deallocation, but unlike cr, its stack frame save is still
27746 valid. So we only need to emit the cfa_restore on the correct
27750 load_lr_save (int regno
, rtx frame_reg_rtx
, int offset
)
27752 rtx mem
= gen_frame_mem_offset (Pmode
, frame_reg_rtx
, offset
);
27753 rtx reg
= gen_rtx_REG (Pmode
, regno
);
27755 emit_move_insn (reg
, mem
);
27759 restore_saved_lr (int regno
, bool exit_func
)
27761 rtx reg
= gen_rtx_REG (Pmode
, regno
);
27762 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
27763 rtx_insn
*insn
= emit_move_insn (lr
, reg
);
27765 if (!exit_func
&& flag_shrink_wrap
)
27767 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
27768 RTX_FRAME_RELATED_P (insn
) = 1;
27773 add_crlr_cfa_restore (const rs6000_stack_t
*info
, rtx cfa_restores
)
27775 if (DEFAULT_ABI
== ABI_ELFv2
)
27778 for (i
= 0; i
< 8; i
++)
27779 if (save_reg_p (CR0_REGNO
+ i
))
27781 rtx cr
= gen_rtx_REG (SImode
, CR0_REGNO
+ i
);
27782 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, cr
,
27786 else if (info
->cr_save_p
)
27787 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
27788 gen_rtx_REG (SImode
, CR2_REGNO
),
27791 if (info
->lr_save_p
)
27792 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
27793 gen_rtx_REG (Pmode
, LR_REGNO
),
27795 return cfa_restores
;
27798 /* Return true if OFFSET from stack pointer can be clobbered by signals.
27799 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
27800 below stack pointer not cloberred by signals. */
27803 offset_below_red_zone_p (HOST_WIDE_INT offset
)
27805 return offset
< (DEFAULT_ABI
== ABI_V4
27807 : TARGET_32BIT
? -220 : -288);
27810 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
27813 emit_cfa_restores (rtx cfa_restores
)
27815 rtx_insn
*insn
= get_last_insn ();
27816 rtx
*loc
= ®_NOTES (insn
);
27819 loc
= &XEXP (*loc
, 1);
27820 *loc
= cfa_restores
;
27821 RTX_FRAME_RELATED_P (insn
) = 1;
27824 /* Emit function epilogue as insns. */
27827 rs6000_emit_epilogue (int sibcall
)
27829 rs6000_stack_t
*info
;
27830 int restoring_GPRs_inline
;
27831 int restoring_FPRs_inline
;
27832 int using_load_multiple
;
27833 int using_mtcr_multiple
;
27834 int use_backchain_to_restore_sp
;
27837 HOST_WIDE_INT frame_off
= 0;
27838 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, 1);
27839 rtx frame_reg_rtx
= sp_reg_rtx
;
27840 rtx cfa_restores
= NULL_RTX
;
27842 rtx cr_save_reg
= NULL_RTX
;
27843 machine_mode reg_mode
= Pmode
;
27844 int reg_size
= TARGET_32BIT
? 4 : 8;
27845 machine_mode fp_reg_mode
= (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
27847 int fp_reg_size
= 8;
27850 unsigned ptr_regno
;
27852 info
= rs6000_stack_info ();
27854 strategy
= info
->savres_strategy
;
27855 using_load_multiple
= strategy
& REST_MULTIPLE
;
27856 restoring_FPRs_inline
= sibcall
|| (strategy
& REST_INLINE_FPRS
);
27857 restoring_GPRs_inline
= sibcall
|| (strategy
& REST_INLINE_GPRS
);
27858 using_mtcr_multiple
= (rs6000_cpu
== PROCESSOR_PPC601
27859 || rs6000_cpu
== PROCESSOR_PPC603
27860 || rs6000_cpu
== PROCESSOR_PPC750
27862 /* Restore via the backchain when we have a large frame, since this
27863 is more efficient than an addis, addi pair. The second condition
27864 here will not trigger at the moment; We don't actually need a
27865 frame pointer for alloca, but the generic parts of the compiler
27866 give us one anyway. */
27867 use_backchain_to_restore_sp
= (info
->total_size
+ (info
->lr_save_p
27868 ? info
->lr_save_offset
27870 || (cfun
->calls_alloca
27871 && !frame_pointer_needed
));
27872 restore_lr
= (info
->lr_save_p
27873 && (restoring_FPRs_inline
27874 || (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
))
27875 && (restoring_GPRs_inline
27876 || info
->first_fp_reg_save
< 64)
27877 && !cfun
->machine
->lr_is_wrapped_separately
);
27880 if (WORLD_SAVE_P (info
))
27884 const char *alloc_rname
;
27887 /* eh_rest_world_r10 will return to the location saved in the LR
27888 stack slot (which is not likely to be our caller.)
27889 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
27890 rest_world is similar, except any R10 parameter is ignored.
27891 The exception-handling stuff that was here in 2.95 is no
27892 longer necessary. */
27895 + 32 - info
->first_gp_reg_save
27896 + LAST_ALTIVEC_REGNO
+ 1 - info
->first_altivec_reg_save
27897 + 63 + 1 - info
->first_fp_reg_save
);
27899 strcpy (rname
, ((crtl
->calls_eh_return
) ?
27900 "*eh_rest_world_r10" : "*rest_world"));
27901 alloc_rname
= ggc_strdup (rname
);
27904 RTVEC_ELT (p
, j
++) = ret_rtx
;
27906 = gen_rtx_USE (VOIDmode
, gen_rtx_SYMBOL_REF (Pmode
, alloc_rname
));
27907 /* The instruction pattern requires a clobber here;
27908 it is shared with the restVEC helper. */
27910 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, 11));
27913 /* CR register traditionally saved as CR2. */
27914 rtx reg
= gen_rtx_REG (SImode
, CR2_REGNO
);
27916 = gen_frame_load (reg
, frame_reg_rtx
, info
->cr_save_offset
);
27917 if (flag_shrink_wrap
)
27919 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
27920 gen_rtx_REG (Pmode
, LR_REGNO
),
27922 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
27926 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
27928 rtx reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
27930 = gen_frame_load (reg
,
27931 frame_reg_rtx
, info
->gp_save_offset
+ reg_size
* i
);
27932 if (flag_shrink_wrap
27933 && save_reg_p (info
->first_gp_reg_save
+ i
))
27934 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
27936 for (i
= 0; info
->first_altivec_reg_save
+ i
<= LAST_ALTIVEC_REGNO
; i
++)
27938 rtx reg
= gen_rtx_REG (V4SImode
, info
->first_altivec_reg_save
+ i
);
27940 = gen_frame_load (reg
,
27941 frame_reg_rtx
, info
->altivec_save_offset
+ 16 * i
);
27942 if (flag_shrink_wrap
27943 && save_reg_p (info
->first_altivec_reg_save
+ i
))
27944 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
27946 for (i
= 0; info
->first_fp_reg_save
+ i
<= 63; i
++)
27948 rtx reg
= gen_rtx_REG ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
27949 ? DFmode
: SFmode
),
27950 info
->first_fp_reg_save
+ i
);
27952 = gen_frame_load (reg
, frame_reg_rtx
, info
->fp_save_offset
+ 8 * i
);
27953 if (flag_shrink_wrap
27954 && save_reg_p (info
->first_fp_reg_save
+ i
))
27955 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
27958 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, 0));
27960 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 12));
27962 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 7));
27964 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 8));
27966 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (SImode
, 10));
27967 insn
= emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
27969 if (flag_shrink_wrap
)
27971 REG_NOTES (insn
) = cfa_restores
;
27972 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
27973 RTX_FRAME_RELATED_P (insn
) = 1;
27978 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
27980 frame_off
= info
->total_size
;
27982 /* Restore AltiVec registers if we must do so before adjusting the
27984 if (info
->altivec_size
!= 0
27985 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27986 || (DEFAULT_ABI
!= ABI_V4
27987 && offset_below_red_zone_p (info
->altivec_save_offset
))))
27990 int scratch_regno
= ptr_regno_for_savres (SAVRES_VR
);
27992 gcc_checking_assert (scratch_regno
== 11 || scratch_regno
== 12);
27993 if (use_backchain_to_restore_sp
)
27995 int frame_regno
= 11;
27997 if ((strategy
& REST_INLINE_VRS
) == 0)
27999 /* Of r11 and r12, select the one not clobbered by an
28000 out-of-line restore function for the frame register. */
28001 frame_regno
= 11 + 12 - scratch_regno
;
28003 frame_reg_rtx
= gen_rtx_REG (Pmode
, frame_regno
);
28004 emit_move_insn (frame_reg_rtx
,
28005 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
28008 else if (frame_pointer_needed
)
28009 frame_reg_rtx
= hard_frame_pointer_rtx
;
28011 if ((strategy
& REST_INLINE_VRS
) == 0)
28013 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
28015 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
28016 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
28018 if (end_save
+ frame_off
!= 0)
28020 rtx offset
= GEN_INT (end_save
+ frame_off
);
28022 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
28025 emit_move_insn (ptr_reg
, frame_reg_rtx
);
28027 ptr_off
= -end_save
;
28028 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
28029 info
->altivec_save_offset
+ ptr_off
,
28030 0, V4SImode
, SAVRES_VR
);
28034 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
28035 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
28037 rtx addr
, areg
, mem
, insn
;
28038 rtx reg
= gen_rtx_REG (V4SImode
, i
);
28039 HOST_WIDE_INT offset
28040 = (info
->altivec_save_offset
+ frame_off
28041 + 16 * (i
- info
->first_altivec_reg_save
));
28043 if (TARGET_P9_VECTOR
&& quad_address_offset_p (offset
))
28045 mem
= gen_frame_mem (V4SImode
,
28046 gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
28047 GEN_INT (offset
)));
28048 insn
= gen_rtx_SET (reg
, mem
);
28052 areg
= gen_rtx_REG (Pmode
, 0);
28053 emit_move_insn (areg
, GEN_INT (offset
));
28055 /* AltiVec addressing mode is [reg+reg]. */
28056 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
);
28057 mem
= gen_frame_mem (V4SImode
, addr
);
28059 /* Rather than emitting a generic move, force use of the
28060 lvx instruction, which we always want. In particular we
28061 don't want lxvd2x/xxpermdi for little endian. */
28062 insn
= gen_altivec_lvx_v4si_internal (reg
, mem
);
28065 (void) emit_insn (insn
);
28069 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
28070 if (((strategy
& REST_INLINE_VRS
) == 0
28071 || (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
)) != 0)
28072 && (flag_shrink_wrap
28073 || (offset_below_red_zone_p
28074 (info
->altivec_save_offset
28075 + 16 * (i
- info
->first_altivec_reg_save
))))
28078 rtx reg
= gen_rtx_REG (V4SImode
, i
);
28079 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28083 /* Restore VRSAVE if we must do so before adjusting the stack. */
28084 if (info
->vrsave_size
!= 0
28085 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28086 || (DEFAULT_ABI
!= ABI_V4
28087 && offset_below_red_zone_p (info
->vrsave_save_offset
))))
28091 if (frame_reg_rtx
== sp_reg_rtx
)
28093 if (use_backchain_to_restore_sp
)
28095 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
28096 emit_move_insn (frame_reg_rtx
,
28097 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
28100 else if (frame_pointer_needed
)
28101 frame_reg_rtx
= hard_frame_pointer_rtx
;
28104 reg
= gen_rtx_REG (SImode
, 12);
28105 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
28106 info
->vrsave_save_offset
+ frame_off
));
28108 emit_insn (generate_set_vrsave (reg
, info
, 1));
28112 /* If we have a large stack frame, restore the old stack pointer
28113 using the backchain. */
28114 if (use_backchain_to_restore_sp
)
28116 if (frame_reg_rtx
== sp_reg_rtx
)
28118 /* Under V.4, don't reset the stack pointer until after we're done
28119 loading the saved registers. */
28120 if (DEFAULT_ABI
== ABI_V4
)
28121 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
28123 insn
= emit_move_insn (frame_reg_rtx
,
28124 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
28127 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28128 && DEFAULT_ABI
== ABI_V4
)
28129 /* frame_reg_rtx has been set up by the altivec restore. */
28133 insn
= emit_move_insn (sp_reg_rtx
, frame_reg_rtx
);
28134 frame_reg_rtx
= sp_reg_rtx
;
28137 /* If we have a frame pointer, we can restore the old stack pointer
28139 else if (frame_pointer_needed
)
28141 frame_reg_rtx
= sp_reg_rtx
;
28142 if (DEFAULT_ABI
== ABI_V4
)
28143 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
28144 /* Prevent reordering memory accesses against stack pointer restore. */
28145 else if (cfun
->calls_alloca
28146 || offset_below_red_zone_p (-info
->total_size
))
28147 rs6000_emit_stack_tie (frame_reg_rtx
, true);
28149 insn
= emit_insn (gen_add3_insn (frame_reg_rtx
, hard_frame_pointer_rtx
,
28150 GEN_INT (info
->total_size
)));
28153 else if (info
->push_p
28154 && DEFAULT_ABI
!= ABI_V4
28155 && !crtl
->calls_eh_return
)
28157 /* Prevent reordering memory accesses against stack pointer restore. */
28158 if (cfun
->calls_alloca
28159 || offset_below_red_zone_p (-info
->total_size
))
28160 rs6000_emit_stack_tie (frame_reg_rtx
, false);
28161 insn
= emit_insn (gen_add3_insn (sp_reg_rtx
, sp_reg_rtx
,
28162 GEN_INT (info
->total_size
)));
28165 if (insn
&& frame_reg_rtx
== sp_reg_rtx
)
28169 REG_NOTES (insn
) = cfa_restores
;
28170 cfa_restores
= NULL_RTX
;
28172 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
28173 RTX_FRAME_RELATED_P (insn
) = 1;
28176 /* Restore AltiVec registers if we have not done so already. */
28177 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28178 && info
->altivec_size
!= 0
28179 && (DEFAULT_ABI
== ABI_V4
28180 || !offset_below_red_zone_p (info
->altivec_save_offset
)))
28184 if ((strategy
& REST_INLINE_VRS
) == 0)
28186 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
28188 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
28189 int scratch_regno
= ptr_regno_for_savres (SAVRES_VR
);
28190 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
28192 if (end_save
+ frame_off
!= 0)
28194 rtx offset
= GEN_INT (end_save
+ frame_off
);
28196 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
28199 emit_move_insn (ptr_reg
, frame_reg_rtx
);
28201 ptr_off
= -end_save
;
28202 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
28203 info
->altivec_save_offset
+ ptr_off
,
28204 0, V4SImode
, SAVRES_VR
);
28205 if (REGNO (frame_reg_rtx
) == REGNO (scratch_reg
))
28207 /* Frame reg was clobbered by out-of-line save. Restore it
28208 from ptr_reg, and if we are calling out-of-line gpr or
28209 fpr restore set up the correct pointer and offset. */
28210 unsigned newptr_regno
= 1;
28211 if (!restoring_GPRs_inline
)
28213 bool lr
= info
->gp_save_offset
+ info
->gp_size
== 0;
28214 int sel
= SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
28215 newptr_regno
= ptr_regno_for_savres (sel
);
28216 end_save
= info
->gp_save_offset
+ info
->gp_size
;
28218 else if (!restoring_FPRs_inline
)
28220 bool lr
= !(strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
);
28221 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
28222 newptr_regno
= ptr_regno_for_savres (sel
);
28223 end_save
= info
->fp_save_offset
+ info
->fp_size
;
28226 if (newptr_regno
!= 1 && REGNO (frame_reg_rtx
) != newptr_regno
)
28227 frame_reg_rtx
= gen_rtx_REG (Pmode
, newptr_regno
);
28229 if (end_save
+ ptr_off
!= 0)
28231 rtx offset
= GEN_INT (end_save
+ ptr_off
);
28233 frame_off
= -end_save
;
28235 emit_insn (gen_addsi3_carry (frame_reg_rtx
,
28238 emit_insn (gen_adddi3_carry (frame_reg_rtx
,
28243 frame_off
= ptr_off
;
28244 emit_move_insn (frame_reg_rtx
, ptr_reg
);
28250 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
28251 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
28253 rtx addr
, areg
, mem
, insn
;
28254 rtx reg
= gen_rtx_REG (V4SImode
, i
);
28255 HOST_WIDE_INT offset
28256 = (info
->altivec_save_offset
+ frame_off
28257 + 16 * (i
- info
->first_altivec_reg_save
));
28259 if (TARGET_P9_VECTOR
&& quad_address_offset_p (offset
))
28261 mem
= gen_frame_mem (V4SImode
,
28262 gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
28263 GEN_INT (offset
)));
28264 insn
= gen_rtx_SET (reg
, mem
);
28268 areg
= gen_rtx_REG (Pmode
, 0);
28269 emit_move_insn (areg
, GEN_INT (offset
));
28271 /* AltiVec addressing mode is [reg+reg]. */
28272 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
);
28273 mem
= gen_frame_mem (V4SImode
, addr
);
28275 /* Rather than emitting a generic move, force use of the
28276 lvx instruction, which we always want. In particular we
28277 don't want lxvd2x/xxpermdi for little endian. */
28278 insn
= gen_altivec_lvx_v4si_internal (reg
, mem
);
28281 (void) emit_insn (insn
);
28285 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
28286 if (((strategy
& REST_INLINE_VRS
) == 0
28287 || (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
)) != 0)
28288 && (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
28291 rtx reg
= gen_rtx_REG (V4SImode
, i
);
28292 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28296 /* Restore VRSAVE if we have not done so already. */
28297 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28298 && info
->vrsave_size
!= 0
28299 && (DEFAULT_ABI
== ABI_V4
28300 || !offset_below_red_zone_p (info
->vrsave_save_offset
)))
28304 reg
= gen_rtx_REG (SImode
, 12);
28305 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
28306 info
->vrsave_save_offset
+ frame_off
));
28308 emit_insn (generate_set_vrsave (reg
, info
, 1));
28311 /* If we exit by an out-of-line restore function on ABI_V4 then that
28312 function will deallocate the stack, so we don't need to worry
28313 about the unwinder restoring cr from an invalid stack frame
28315 exit_func
= (!restoring_FPRs_inline
28316 || (!restoring_GPRs_inline
28317 && info
->first_fp_reg_save
== 64));
28319 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28320 *separate* slots if the routine calls __builtin_eh_return, so
28321 that they can be independently restored by the unwinder. */
28322 if (DEFAULT_ABI
== ABI_ELFv2
&& crtl
->calls_eh_return
)
28324 int i
, cr_off
= info
->ehcr_offset
;
28326 for (i
= 0; i
< 8; i
++)
28327 if (!call_used_regs
[CR0_REGNO
+ i
])
28329 rtx reg
= gen_rtx_REG (SImode
, 0);
28330 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
28331 cr_off
+ frame_off
));
28333 insn
= emit_insn (gen_movsi_to_cr_one
28334 (gen_rtx_REG (CCmode
, CR0_REGNO
+ i
), reg
));
28336 if (!exit_func
&& flag_shrink_wrap
)
28338 add_reg_note (insn
, REG_CFA_RESTORE
,
28339 gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
28341 RTX_FRAME_RELATED_P (insn
) = 1;
28344 cr_off
+= reg_size
;
28348 /* Get the old lr if we saved it. If we are restoring registers
28349 out-of-line, then the out-of-line routines can do this for us. */
28350 if (restore_lr
&& restoring_GPRs_inline
)
28351 load_lr_save (0, frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
28353 /* Get the old cr if we saved it. */
28354 if (info
->cr_save_p
)
28356 unsigned cr_save_regno
= 12;
28358 if (!restoring_GPRs_inline
)
28360 /* Ensure we don't use the register used by the out-of-line
28361 gpr register restore below. */
28362 bool lr
= info
->gp_save_offset
+ info
->gp_size
== 0;
28363 int sel
= SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
28364 int gpr_ptr_regno
= ptr_regno_for_savres (sel
);
28366 if (gpr_ptr_regno
== 12)
28367 cr_save_regno
= 11;
28368 gcc_checking_assert (REGNO (frame_reg_rtx
) != cr_save_regno
);
28370 else if (REGNO (frame_reg_rtx
) == 12)
28371 cr_save_regno
= 11;
28373 cr_save_reg
= load_cr_save (cr_save_regno
, frame_reg_rtx
,
28374 info
->cr_save_offset
+ frame_off
,
28378 /* Set LR here to try to overlap restores below. */
28379 if (restore_lr
&& restoring_GPRs_inline
)
28380 restore_saved_lr (0, exit_func
);
28382 /* Load exception handler data registers, if needed. */
28383 if (crtl
->calls_eh_return
)
28385 unsigned int i
, regno
;
28389 rtx reg
= gen_rtx_REG (reg_mode
, 2);
28390 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
28391 frame_off
+ RS6000_TOC_SAVE_SLOT
));
28398 regno
= EH_RETURN_DATA_REGNO (i
);
28399 if (regno
== INVALID_REGNUM
)
28402 mem
= gen_frame_mem_offset (reg_mode
, frame_reg_rtx
,
28403 info
->ehrd_offset
+ frame_off
28404 + reg_size
* (int) i
);
28406 emit_move_insn (gen_rtx_REG (reg_mode
, regno
), mem
);
28410 /* Restore GPRs. This is done as a PARALLEL if we are using
28411 the load-multiple instructions. */
28412 if (!restoring_GPRs_inline
)
28414 /* We are jumping to an out-of-line function. */
28416 int end_save
= info
->gp_save_offset
+ info
->gp_size
;
28417 bool can_use_exit
= end_save
== 0;
28418 int sel
= SAVRES_GPR
| (can_use_exit
? SAVRES_LR
: 0);
28421 /* Emit stack reset code if we need it. */
28422 ptr_regno
= ptr_regno_for_savres (sel
);
28423 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
28425 rs6000_emit_stack_reset (frame_reg_rtx
, frame_off
, ptr_regno
);
28426 else if (end_save
+ frame_off
!= 0)
28427 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
,
28428 GEN_INT (end_save
+ frame_off
)));
28429 else if (REGNO (frame_reg_rtx
) != ptr_regno
)
28430 emit_move_insn (ptr_reg
, frame_reg_rtx
);
28431 if (REGNO (frame_reg_rtx
) == ptr_regno
)
28432 frame_off
= -end_save
;
28434 if (can_use_exit
&& info
->cr_save_p
)
28435 restore_saved_cr (cr_save_reg
, using_mtcr_multiple
, true);
28437 ptr_off
= -end_save
;
28438 rs6000_emit_savres_rtx (info
, ptr_reg
,
28439 info
->gp_save_offset
+ ptr_off
,
28440 info
->lr_save_offset
+ ptr_off
,
28443 else if (using_load_multiple
)
28446 p
= rtvec_alloc (32 - info
->first_gp_reg_save
);
28447 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
28449 = gen_frame_load (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
28451 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
28452 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
28456 int offset
= info
->gp_save_offset
+ frame_off
;
28457 for (i
= info
->first_gp_reg_save
; i
< 32; i
++)
28460 && !cfun
->machine
->gpr_is_wrapped_separately
[i
])
28462 rtx reg
= gen_rtx_REG (reg_mode
, i
);
28463 emit_insn (gen_frame_load (reg
, frame_reg_rtx
, offset
));
28466 offset
+= reg_size
;
28470 if (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
28472 /* If the frame pointer was used then we can't delay emitting
28473 a REG_CFA_DEF_CFA note. This must happen on the insn that
28474 restores the frame pointer, r31. We may have already emitted
28475 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28476 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28477 be harmless if emitted. */
28478 if (frame_pointer_needed
)
28480 insn
= get_last_insn ();
28481 add_reg_note (insn
, REG_CFA_DEF_CFA
,
28482 plus_constant (Pmode
, frame_reg_rtx
, frame_off
));
28483 RTX_FRAME_RELATED_P (insn
) = 1;
28486 /* Set up cfa_restores. We always need these when
28487 shrink-wrapping. If not shrink-wrapping then we only need
28488 the cfa_restore when the stack location is no longer valid.
28489 The cfa_restores must be emitted on or before the insn that
28490 invalidates the stack, and of course must not be emitted
28491 before the insn that actually does the restore. The latter
28492 is why it is a bad idea to emit the cfa_restores as a group
28493 on the last instruction here that actually does a restore:
28494 That insn may be reordered with respect to others doing
28496 if (flag_shrink_wrap
28497 && !restoring_GPRs_inline
28498 && info
->first_fp_reg_save
== 64)
28499 cfa_restores
= add_crlr_cfa_restore (info
, cfa_restores
);
28501 for (i
= info
->first_gp_reg_save
; i
< 32; i
++)
28503 && !cfun
->machine
->gpr_is_wrapped_separately
[i
])
28505 rtx reg
= gen_rtx_REG (reg_mode
, i
);
28506 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28510 if (!restoring_GPRs_inline
28511 && info
->first_fp_reg_save
== 64)
28513 /* We are jumping to an out-of-line function. */
28515 emit_cfa_restores (cfa_restores
);
28519 if (restore_lr
&& !restoring_GPRs_inline
)
28521 load_lr_save (0, frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
28522 restore_saved_lr (0, exit_func
);
28525 /* Restore fpr's if we need to do it without calling a function. */
28526 if (restoring_FPRs_inline
)
28528 int offset
= info
->fp_save_offset
+ frame_off
;
28529 for (i
= info
->first_fp_reg_save
; i
< 64; i
++)
28532 && !cfun
->machine
->fpr_is_wrapped_separately
[i
- 32])
28534 rtx reg
= gen_rtx_REG (fp_reg_mode
, i
);
28535 emit_insn (gen_frame_load (reg
, frame_reg_rtx
, offset
));
28536 if (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
28537 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
,
28541 offset
+= fp_reg_size
;
28545 /* If we saved cr, restore it here. Just those that were used. */
28546 if (info
->cr_save_p
)
28547 restore_saved_cr (cr_save_reg
, using_mtcr_multiple
, exit_func
);
28549 /* If this is V.4, unwind the stack pointer after all of the loads
28550 have been done, or set up r11 if we are restoring fp out of line. */
28552 if (!restoring_FPRs_inline
)
28554 bool lr
= (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
28555 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
28556 ptr_regno
= ptr_regno_for_savres (sel
);
28559 insn
= rs6000_emit_stack_reset (frame_reg_rtx
, frame_off
, ptr_regno
);
28560 if (REGNO (frame_reg_rtx
) == ptr_regno
)
28563 if (insn
&& restoring_FPRs_inline
)
28567 REG_NOTES (insn
) = cfa_restores
;
28568 cfa_restores
= NULL_RTX
;
28570 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
28571 RTX_FRAME_RELATED_P (insn
) = 1;
28574 if (crtl
->calls_eh_return
)
28576 rtx sa
= EH_RETURN_STACKADJ_RTX
;
28577 emit_insn (gen_add3_insn (sp_reg_rtx
, sp_reg_rtx
, sa
));
28580 if (!sibcall
&& restoring_FPRs_inline
)
28584 /* We can't hang the cfa_restores off a simple return,
28585 since the shrink-wrap code sometimes uses an existing
28586 return. This means there might be a path from
28587 pre-prologue code to this return, and dwarf2cfi code
28588 wants the eh_frame unwinder state to be the same on
28589 all paths to any point. So we need to emit the
28590 cfa_restores before the return. For -m64 we really
28591 don't need epilogue cfa_restores at all, except for
28592 this irritating dwarf2cfi with shrink-wrap
28593 requirement; The stack red-zone means eh_frame info
28594 from the prologue telling the unwinder to restore
28595 from the stack is perfectly good right to the end of
28597 emit_insn (gen_blockage ());
28598 emit_cfa_restores (cfa_restores
);
28599 cfa_restores
= NULL_RTX
;
28602 emit_jump_insn (targetm
.gen_simple_return ());
28605 if (!sibcall
&& !restoring_FPRs_inline
)
28607 bool lr
= (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
28608 rtvec p
= rtvec_alloc (3 + !!lr
+ 64 - info
->first_fp_reg_save
);
28610 RTVEC_ELT (p
, elt
++) = ret_rtx
;
28612 RTVEC_ELT (p
, elt
++)
28613 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, LR_REGNO
));
28615 /* We have to restore more than two FP registers, so branch to the
28616 restore function. It will return to our caller. */
28621 if (flag_shrink_wrap
)
28622 cfa_restores
= add_crlr_cfa_restore (info
, cfa_restores
);
28624 sym
= rs6000_savres_routine_sym (info
, SAVRES_FPR
| (lr
? SAVRES_LR
: 0));
28625 RTVEC_ELT (p
, elt
++) = gen_rtx_USE (VOIDmode
, sym
);
28626 reg
= (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)? 1 : 11;
28627 RTVEC_ELT (p
, elt
++) = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, reg
));
28629 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
28631 rtx reg
= gen_rtx_REG (DFmode
, info
->first_fp_reg_save
+ i
);
28633 RTVEC_ELT (p
, elt
++)
28634 = gen_frame_load (reg
, sp_reg_rtx
, info
->fp_save_offset
+ 8 * i
);
28635 if (flag_shrink_wrap
28636 && save_reg_p (info
->first_fp_reg_save
+ i
))
28637 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28640 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
28646 /* Ensure the cfa_restores are hung off an insn that won't
28647 be reordered above other restores. */
28648 emit_insn (gen_blockage ());
28650 emit_cfa_restores (cfa_restores
);
28654 /* Write function epilogue. */
28657 rs6000_output_function_epilogue (FILE *file
)
28660 macho_branch_islands ();
28663 rtx_insn
*insn
= get_last_insn ();
28664 rtx_insn
*deleted_debug_label
= NULL
;
28666 /* Mach-O doesn't support labels at the end of objects, so if
28667 it looks like we might want one, take special action.
28669 First, collect any sequence of deleted debug labels. */
28672 && NOTE_KIND (insn
) != NOTE_INSN_DELETED_LABEL
)
28674 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
28675 notes only, instead set their CODE_LABEL_NUMBER to -1,
28676 otherwise there would be code generation differences
28677 in between -g and -g0. */
28678 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_DELETED_DEBUG_LABEL
)
28679 deleted_debug_label
= insn
;
28680 insn
= PREV_INSN (insn
);
28683 /* Second, if we have:
28686 then this needs to be detected, so skip past the barrier. */
28688 if (insn
&& BARRIER_P (insn
))
28689 insn
= PREV_INSN (insn
);
28691 /* Up to now we've only seen notes or barriers. */
28696 && NOTE_KIND (insn
) == NOTE_INSN_DELETED_LABEL
))
28697 /* Trailing label: <barrier>. */
28698 fputs ("\tnop\n", file
);
28701 /* Lastly, see if we have a completely empty function body. */
28702 while (insn
&& ! INSN_P (insn
))
28703 insn
= PREV_INSN (insn
);
28704 /* If we don't find any insns, we've got an empty function body;
28705 I.e. completely empty - without a return or branch. This is
28706 taken as the case where a function body has been removed
28707 because it contains an inline __builtin_unreachable(). GCC
28708 states that reaching __builtin_unreachable() means UB so we're
28709 not obliged to do anything special; however, we want
28710 non-zero-sized function bodies. To meet this, and help the
28711 user out, let's trap the case. */
28713 fputs ("\ttrap\n", file
);
28716 else if (deleted_debug_label
)
28717 for (insn
= deleted_debug_label
; insn
; insn
= NEXT_INSN (insn
))
28718 if (NOTE_KIND (insn
) == NOTE_INSN_DELETED_DEBUG_LABEL
)
28719 CODE_LABEL_NUMBER (insn
) = -1;
28723 /* Output a traceback table here. See /usr/include/sys/debug.h for info
28726 We don't output a traceback table if -finhibit-size-directive was
28727 used. The documentation for -finhibit-size-directive reads
28728 ``don't output a @code{.size} assembler directive, or anything
28729 else that would cause trouble if the function is split in the
28730 middle, and the two halves are placed at locations far apart in
28731 memory.'' The traceback table has this property, since it
28732 includes the offset from the start of the function to the
28733 traceback table itself.
28735 System V.4 Powerpc's (and the embedded ABI derived from it) use a
28736 different traceback table. */
28737 if ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
28738 && ! flag_inhibit_size_directive
28739 && rs6000_traceback
!= traceback_none
&& !cfun
->is_thunk
)
28741 const char *fname
= NULL
;
28742 const char *language_string
= lang_hooks
.name
;
28743 int fixed_parms
= 0, float_parms
= 0, parm_info
= 0;
28745 int optional_tbtab
;
28746 rs6000_stack_t
*info
= rs6000_stack_info ();
28748 if (rs6000_traceback
== traceback_full
)
28749 optional_tbtab
= 1;
28750 else if (rs6000_traceback
== traceback_part
)
28751 optional_tbtab
= 0;
28753 optional_tbtab
= !optimize_size
&& !TARGET_ELF
;
28755 if (optional_tbtab
)
28757 fname
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
28758 while (*fname
== '.') /* V.4 encodes . in the name */
28761 /* Need label immediately before tbtab, so we can compute
28762 its offset from the function start. */
28763 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LT");
28764 ASM_OUTPUT_LABEL (file
, fname
);
28767 /* The .tbtab pseudo-op can only be used for the first eight
28768 expressions, since it can't handle the possibly variable
28769 length fields that follow. However, if you omit the optional
28770 fields, the assembler outputs zeros for all optional fields
28771 anyways, giving each variable length field is minimum length
28772 (as defined in sys/debug.h). Thus we can not use the .tbtab
28773 pseudo-op at all. */
28775 /* An all-zero word flags the start of the tbtab, for debuggers
28776 that have to find it by searching forward from the entry
28777 point or from the current pc. */
28778 fputs ("\t.long 0\n", file
);
28780 /* Tbtab format type. Use format type 0. */
28781 fputs ("\t.byte 0,", file
);
28783 /* Language type. Unfortunately, there does not seem to be any
28784 official way to discover the language being compiled, so we
28785 use language_string.
28786 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
28787 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
28788 a number, so for now use 9. LTO, Go and JIT aren't assigned numbers
28789 either, so for now use 0. */
28791 || ! strcmp (language_string
, "GNU GIMPLE")
28792 || ! strcmp (language_string
, "GNU Go")
28793 || ! strcmp (language_string
, "libgccjit"))
28795 else if (! strcmp (language_string
, "GNU F77")
28796 || lang_GNU_Fortran ())
28798 else if (! strcmp (language_string
, "GNU Pascal"))
28800 else if (! strcmp (language_string
, "GNU Ada"))
28802 else if (lang_GNU_CXX ()
28803 || ! strcmp (language_string
, "GNU Objective-C++"))
28805 else if (! strcmp (language_string
, "GNU Java"))
28807 else if (! strcmp (language_string
, "GNU Objective-C"))
28810 gcc_unreachable ();
28811 fprintf (file
, "%d,", i
);
28813 /* 8 single bit fields: global linkage (not set for C extern linkage,
28814 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
28815 from start of procedure stored in tbtab, internal function, function
28816 has controlled storage, function has no toc, function uses fp,
28817 function logs/aborts fp operations. */
28818 /* Assume that fp operations are used if any fp reg must be saved. */
28819 fprintf (file
, "%d,",
28820 (optional_tbtab
<< 5) | ((info
->first_fp_reg_save
!= 64) << 1));
28822 /* 6 bitfields: function is interrupt handler, name present in
28823 proc table, function calls alloca, on condition directives
28824 (controls stack walks, 3 bits), saves condition reg, saves
28826 /* The `function calls alloca' bit seems to be set whenever reg 31 is
28827 set up as a frame pointer, even when there is no alloca call. */
28828 fprintf (file
, "%d,",
28829 ((optional_tbtab
<< 6)
28830 | ((optional_tbtab
& frame_pointer_needed
) << 5)
28831 | (info
->cr_save_p
<< 1)
28832 | (info
->lr_save_p
)));
28834 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
28836 fprintf (file
, "%d,",
28837 (info
->push_p
<< 7) | (64 - info
->first_fp_reg_save
));
28839 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
28840 fprintf (file
, "%d,", (32 - first_reg_to_save ()));
28842 if (optional_tbtab
)
28844 /* Compute the parameter info from the function decl argument
28847 int next_parm_info_bit
= 31;
28849 for (decl
= DECL_ARGUMENTS (current_function_decl
);
28850 decl
; decl
= DECL_CHAIN (decl
))
28852 rtx parameter
= DECL_INCOMING_RTL (decl
);
28853 machine_mode mode
= GET_MODE (parameter
);
28855 if (GET_CODE (parameter
) == REG
)
28857 if (SCALAR_FLOAT_MODE_P (mode
))
28880 gcc_unreachable ();
28883 /* If only one bit will fit, don't or in this entry. */
28884 if (next_parm_info_bit
> 0)
28885 parm_info
|= (bits
<< (next_parm_info_bit
- 1));
28886 next_parm_info_bit
-= 2;
28890 fixed_parms
+= ((GET_MODE_SIZE (mode
)
28891 + (UNITS_PER_WORD
- 1))
28893 next_parm_info_bit
-= 1;
28899 /* Number of fixed point parameters. */
28900 /* This is actually the number of words of fixed point parameters; thus
28901 an 8 byte struct counts as 2; and thus the maximum value is 8. */
28902 fprintf (file
, "%d,", fixed_parms
);
28904 /* 2 bitfields: number of floating point parameters (7 bits), parameters
28906 /* This is actually the number of fp registers that hold parameters;
28907 and thus the maximum value is 13. */
28908 /* Set parameters on stack bit if parameters are not in their original
28909 registers, regardless of whether they are on the stack? Xlc
28910 seems to set the bit when not optimizing. */
28911 fprintf (file
, "%d\n", ((float_parms
<< 1) | (! optimize
)));
28913 if (optional_tbtab
)
28915 /* Optional fields follow. Some are variable length. */
28917 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
28918 float, 11 double float. */
28919 /* There is an entry for each parameter in a register, in the order
28920 that they occur in the parameter list. Any intervening arguments
28921 on the stack are ignored. If the list overflows a long (max
28922 possible length 34 bits) then completely leave off all elements
28924 /* Only emit this long if there was at least one parameter. */
28925 if (fixed_parms
|| float_parms
)
28926 fprintf (file
, "\t.long %d\n", parm_info
);
28928 /* Offset from start of code to tb table. */
28929 fputs ("\t.long ", file
);
28930 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LT");
28931 RS6000_OUTPUT_BASENAME (file
, fname
);
28933 rs6000_output_function_entry (file
, fname
);
28936 /* Interrupt handler mask. */
28937 /* Omit this long, since we never set the interrupt handler bit
28940 /* Number of CTL (controlled storage) anchors. */
28941 /* Omit this long, since the has_ctl bit is never set above. */
28943 /* Displacement into stack of each CTL anchor. */
28944 /* Omit this list of longs, because there are no CTL anchors. */
28946 /* Length of function name. */
28949 fprintf (file
, "\t.short %d\n", (int) strlen (fname
));
28951 /* Function name. */
28952 assemble_string (fname
, strlen (fname
));
28954 /* Register for alloca automatic storage; this is always reg 31.
28955 Only emit this if the alloca bit was set above. */
28956 if (frame_pointer_needed
)
28957 fputs ("\t.byte 31\n", file
);
28959 fputs ("\t.align 2\n", file
);
28963 /* Arrange to define .LCTOC1 label, if not already done. */
28967 if (!toc_initialized
)
28969 switch_to_section (toc_section
);
28970 switch_to_section (current_function_section ());
28975 /* -fsplit-stack support. */
28977 /* A SYMBOL_REF for __morestack. */
28978 static GTY(()) rtx morestack_ref
;
28981 gen_add3_const (rtx rt
, rtx ra
, long c
)
28984 return gen_adddi3 (rt
, ra
, GEN_INT (c
));
28986 return gen_addsi3 (rt
, ra
, GEN_INT (c
));
28989 /* Emit -fsplit-stack prologue, which goes before the regular function
28990 prologue (at local entry point in the case of ELFv2). */
28993 rs6000_expand_split_stack_prologue (void)
28995 rs6000_stack_t
*info
= rs6000_stack_info ();
28996 unsigned HOST_WIDE_INT allocate
;
28997 long alloc_hi
, alloc_lo
;
28998 rtx r0
, r1
, r12
, lr
, ok_label
, compare
, jump
, call_fusage
;
29001 gcc_assert (flag_split_stack
&& reload_completed
);
29006 if (global_regs
[29])
29008 error ("%qs uses register r29", "-fsplit-stack");
29009 inform (DECL_SOURCE_LOCATION (global_regs_decl
[29]),
29010 "conflicts with %qD", global_regs_decl
[29]);
29013 allocate
= info
->total_size
;
29014 if (allocate
> (unsigned HOST_WIDE_INT
) 1 << 31)
29016 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
29019 if (morestack_ref
== NULL_RTX
)
29021 morestack_ref
= gen_rtx_SYMBOL_REF (Pmode
, "__morestack");
29022 SYMBOL_REF_FLAGS (morestack_ref
) |= (SYMBOL_FLAG_LOCAL
29023 | SYMBOL_FLAG_FUNCTION
);
29026 r0
= gen_rtx_REG (Pmode
, 0);
29027 r1
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
29028 r12
= gen_rtx_REG (Pmode
, 12);
29029 emit_insn (gen_load_split_stack_limit (r0
));
29030 /* Always emit two insns here to calculate the requested stack,
29031 so that the linker can edit them when adjusting size for calling
29032 non-split-stack code. */
29033 alloc_hi
= (-allocate
+ 0x8000) & ~0xffffL
;
29034 alloc_lo
= -allocate
- alloc_hi
;
29037 emit_insn (gen_add3_const (r12
, r1
, alloc_hi
));
29039 emit_insn (gen_add3_const (r12
, r12
, alloc_lo
));
29041 emit_insn (gen_nop ());
29045 emit_insn (gen_add3_const (r12
, r1
, alloc_lo
));
29046 emit_insn (gen_nop ());
29049 compare
= gen_rtx_REG (CCUNSmode
, CR7_REGNO
);
29050 emit_insn (gen_rtx_SET (compare
, gen_rtx_COMPARE (CCUNSmode
, r12
, r0
)));
29051 ok_label
= gen_label_rtx ();
29052 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
29053 gen_rtx_GEU (VOIDmode
, compare
, const0_rtx
),
29054 gen_rtx_LABEL_REF (VOIDmode
, ok_label
),
29056 insn
= emit_jump_insn (gen_rtx_SET (pc_rtx
, jump
));
29057 JUMP_LABEL (insn
) = ok_label
;
29058 /* Mark the jump as very likely to be taken. */
29059 add_reg_br_prob_note (insn
, profile_probability::very_likely ());
29061 lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
29062 insn
= emit_move_insn (r0
, lr
);
29063 RTX_FRAME_RELATED_P (insn
) = 1;
29064 insn
= emit_insn (gen_frame_store (r0
, r1
, info
->lr_save_offset
));
29065 RTX_FRAME_RELATED_P (insn
) = 1;
29067 insn
= emit_call_insn (gen_call (gen_rtx_MEM (SImode
, morestack_ref
),
29068 const0_rtx
, const0_rtx
));
29069 call_fusage
= NULL_RTX
;
29070 use_reg (&call_fusage
, r12
);
29071 /* Say the call uses r0, even though it doesn't, to stop regrename
29072 from twiddling with the insns saving lr, trashing args for cfun.
29073 The insns restoring lr are similarly protected by making
29074 split_stack_return use r0. */
29075 use_reg (&call_fusage
, r0
);
29076 add_function_usage_to (insn
, call_fusage
);
29077 /* Indicate that this function can't jump to non-local gotos. */
29078 make_reg_eh_region_note_nothrow_nononlocal (insn
);
29079 emit_insn (gen_frame_load (r0
, r1
, info
->lr_save_offset
));
29080 insn
= emit_move_insn (lr
, r0
);
29081 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
29082 RTX_FRAME_RELATED_P (insn
) = 1;
29083 emit_insn (gen_split_stack_return ());
29085 emit_label (ok_label
);
29086 LABEL_NUSES (ok_label
) = 1;
29089 /* Return the internal arg pointer used for function incoming
29090 arguments. When -fsplit-stack, the arg pointer is r12 so we need
29091 to copy it to a pseudo in order for it to be preserved over calls
29092 and suchlike. We'd really like to use a pseudo here for the
29093 internal arg pointer but data-flow analysis is not prepared to
29094 accept pseudos as live at the beginning of a function. */
29097 rs6000_internal_arg_pointer (void)
29099 if (flag_split_stack
29100 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun
->decl
))
29104 if (cfun
->machine
->split_stack_arg_pointer
== NULL_RTX
)
29108 cfun
->machine
->split_stack_arg_pointer
= gen_reg_rtx (Pmode
);
29109 REG_POINTER (cfun
->machine
->split_stack_arg_pointer
) = 1;
29111 /* Put the pseudo initialization right after the note at the
29112 beginning of the function. */
29113 pat
= gen_rtx_SET (cfun
->machine
->split_stack_arg_pointer
,
29114 gen_rtx_REG (Pmode
, 12));
29115 push_topmost_sequence ();
29116 emit_insn_after (pat
, get_insns ());
29117 pop_topmost_sequence ();
29119 return plus_constant (Pmode
, cfun
->machine
->split_stack_arg_pointer
,
29120 FIRST_PARM_OFFSET (current_function_decl
));
29122 return virtual_incoming_args_rtx
;
29125 /* We may have to tell the dataflow pass that the split stack prologue
29126 is initializing a register. */
29129 rs6000_live_on_entry (bitmap regs
)
29131 if (flag_split_stack
)
29132 bitmap_set_bit (regs
, 12);
29135 /* Emit -fsplit-stack dynamic stack allocation space check. */
29138 rs6000_split_stack_space_check (rtx size
, rtx label
)
29140 rtx sp
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
29141 rtx limit
= gen_reg_rtx (Pmode
);
29142 rtx requested
= gen_reg_rtx (Pmode
);
29143 rtx cmp
= gen_reg_rtx (CCUNSmode
);
29146 emit_insn (gen_load_split_stack_limit (limit
));
29147 if (CONST_INT_P (size
))
29148 emit_insn (gen_add3_insn (requested
, sp
, GEN_INT (-INTVAL (size
))));
29151 size
= force_reg (Pmode
, size
);
29152 emit_move_insn (requested
, gen_rtx_MINUS (Pmode
, sp
, size
));
29154 emit_insn (gen_rtx_SET (cmp
, gen_rtx_COMPARE (CCUNSmode
, requested
, limit
)));
29155 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
29156 gen_rtx_GEU (VOIDmode
, cmp
, const0_rtx
),
29157 gen_rtx_LABEL_REF (VOIDmode
, label
),
29159 jump
= emit_jump_insn (gen_rtx_SET (pc_rtx
, jump
));
29160 JUMP_LABEL (jump
) = label
;
29163 /* A C compound statement that outputs the assembler code for a thunk
29164 function, used to implement C++ virtual function calls with
29165 multiple inheritance. The thunk acts as a wrapper around a virtual
29166 function, adjusting the implicit object parameter before handing
29167 control off to the real function.
29169 First, emit code to add the integer DELTA to the location that
29170 contains the incoming first argument. Assume that this argument
29171 contains a pointer, and is the one used to pass the `this' pointer
29172 in C++. This is the incoming argument *before* the function
29173 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29174 values of all other incoming arguments.
29176 After the addition, emit code to jump to FUNCTION, which is a
29177 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29178 not touch the return address. Hence returning from FUNCTION will
29179 return to whoever called the current `thunk'.
29181 The effect must be as if FUNCTION had been called directly with the
29182 adjusted first argument. This macro is responsible for emitting
29183 all of the code for a thunk function; output_function_prologue()
29184 and output_function_epilogue() are not invoked.
29186 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29187 been extracted from it.) It might possibly be useful on some
29188 targets, but probably not.
29190 If you do not define this macro, the target-independent code in the
29191 C++ frontend will generate a less efficient heavyweight thunk that
29192 calls FUNCTION instead of jumping to it. The generic approach does
29193 not support varargs. */
29196 rs6000_output_mi_thunk (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
29197 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
29200 rtx this_rtx
, funexp
;
29203 reload_completed
= 1;
29204 epilogue_completed
= 1;
29206 /* Mark the end of the (empty) prologue. */
29207 emit_note (NOTE_INSN_PROLOGUE_END
);
29209 /* Find the "this" pointer. If the function returns a structure,
29210 the structure return pointer is in r3. */
29211 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
29212 this_rtx
= gen_rtx_REG (Pmode
, 4);
29214 this_rtx
= gen_rtx_REG (Pmode
, 3);
29216 /* Apply the constant offset, if required. */
29218 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, GEN_INT (delta
)));
29220 /* Apply the offset from the vtable, if required. */
29223 rtx vcall_offset_rtx
= GEN_INT (vcall_offset
);
29224 rtx tmp
= gen_rtx_REG (Pmode
, 12);
29226 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, this_rtx
));
29227 if (((unsigned HOST_WIDE_INT
) vcall_offset
) + 0x8000 >= 0x10000)
29229 emit_insn (gen_add3_insn (tmp
, tmp
, vcall_offset_rtx
));
29230 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, tmp
));
29234 rtx loc
= gen_rtx_PLUS (Pmode
, tmp
, vcall_offset_rtx
);
29236 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, loc
));
29238 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, tmp
));
29241 /* Generate a tail call to the target function. */
29242 if (!TREE_USED (function
))
29244 assemble_external (function
);
29245 TREE_USED (function
) = 1;
29247 funexp
= XEXP (DECL_RTL (function
), 0);
29248 funexp
= gen_rtx_MEM (FUNCTION_MODE
, funexp
);
29251 if (MACHOPIC_INDIRECT
)
29252 funexp
= machopic_indirect_call_target (funexp
);
29255 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29256 generate sibcall RTL explicitly. */
29257 insn
= emit_call_insn (
29258 gen_rtx_PARALLEL (VOIDmode
,
29260 gen_rtx_CALL (VOIDmode
,
29261 funexp
, const0_rtx
),
29262 gen_rtx_USE (VOIDmode
, const0_rtx
),
29263 simple_return_rtx
)));
29264 SIBLING_CALL_P (insn
) = 1;
29267 /* Run just enough of rest_of_compilation to get the insns emitted.
29268 There's not really enough bulk here to make other passes such as
29269 instruction scheduling worth while. Note that use_thunk calls
29270 assemble_start_function and assemble_end_function. */
29271 insn
= get_insns ();
29272 shorten_branches (insn
);
29273 final_start_function (insn
, file
, 1);
29274 final (insn
, file
, 1);
29275 final_end_function ();
29277 reload_completed
= 0;
29278 epilogue_completed
= 0;
29281 /* A quick summary of the various types of 'constant-pool tables'
29284 Target Flags Name One table per
29285 AIX (none) AIX TOC object file
29286 AIX -mfull-toc AIX TOC object file
29287 AIX -mminimal-toc AIX minimal TOC translation unit
29288 SVR4/EABI (none) SVR4 SDATA object file
29289 SVR4/EABI -fpic SVR4 pic object file
29290 SVR4/EABI -fPIC SVR4 PIC translation unit
29291 SVR4/EABI -mrelocatable EABI TOC function
29292 SVR4/EABI -maix AIX TOC object file
29293 SVR4/EABI -maix -mminimal-toc
29294 AIX minimal TOC translation unit
29296 Name Reg. Set by entries contains:
29297 made by addrs? fp? sum?
29299 AIX TOC 2 crt0 as Y option option
29300 AIX minimal TOC 30 prolog gcc Y Y option
29301 SVR4 SDATA 13 crt0 gcc N Y N
29302 SVR4 pic 30 prolog ld Y not yet N
29303 SVR4 PIC 30 prolog gcc Y option option
29304 EABI TOC 30 prolog gcc Y option option
29308 /* Hash functions for the hash table. */
29311 rs6000_hash_constant (rtx k
)
29313 enum rtx_code code
= GET_CODE (k
);
29314 machine_mode mode
= GET_MODE (k
);
29315 unsigned result
= (code
<< 3) ^ mode
;
29316 const char *format
;
29319 format
= GET_RTX_FORMAT (code
);
29320 flen
= strlen (format
);
29326 return result
* 1231 + (unsigned) INSN_UID (XEXP (k
, 0));
29328 case CONST_WIDE_INT
:
29331 flen
= CONST_WIDE_INT_NUNITS (k
);
29332 for (i
= 0; i
< flen
; i
++)
29333 result
= result
* 613 + CONST_WIDE_INT_ELT (k
, i
);
29338 if (mode
!= VOIDmode
)
29339 return real_hash (CONST_DOUBLE_REAL_VALUE (k
)) * result
;
29351 for (; fidx
< flen
; fidx
++)
29352 switch (format
[fidx
])
29357 const char *str
= XSTR (k
, fidx
);
29358 len
= strlen (str
);
29359 result
= result
* 613 + len
;
29360 for (i
= 0; i
< len
; i
++)
29361 result
= result
* 613 + (unsigned) str
[i
];
29366 result
= result
* 1231 + rs6000_hash_constant (XEXP (k
, fidx
));
29370 result
= result
* 613 + (unsigned) XINT (k
, fidx
);
29373 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT
))
29374 result
= result
* 613 + (unsigned) XWINT (k
, fidx
);
29378 for (i
= 0; i
< sizeof (HOST_WIDE_INT
) / sizeof (unsigned); i
++)
29379 result
= result
* 613 + (unsigned) (XWINT (k
, fidx
)
29386 gcc_unreachable ();
29393 toc_hasher::hash (toc_hash_struct
*thc
)
29395 return rs6000_hash_constant (thc
->key
) ^ thc
->key_mode
;
29398 /* Compare H1 and H2 for equivalence. */
29401 toc_hasher::equal (toc_hash_struct
*h1
, toc_hash_struct
*h2
)
29406 if (h1
->key_mode
!= h2
->key_mode
)
29409 return rtx_equal_p (r1
, r2
);
29412 /* These are the names given by the C++ front-end to vtables, and
29413 vtable-like objects. Ideally, this logic should not be here;
29414 instead, there should be some programmatic way of inquiring as
29415 to whether or not an object is a vtable. */
29417 #define VTABLE_NAME_P(NAME) \
29418 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29419 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29420 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29421 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29422 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29424 #ifdef NO_DOLLAR_IN_LABEL
29425 /* Return a GGC-allocated character string translating dollar signs in
29426 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29429 rs6000_xcoff_strip_dollar (const char *name
)
29435 q
= (const char *) strchr (name
, '$');
29437 if (q
== 0 || q
== name
)
29440 len
= strlen (name
);
29441 strip
= XALLOCAVEC (char, len
+ 1);
29442 strcpy (strip
, name
);
29443 p
= strip
+ (q
- name
);
29447 p
= strchr (p
+ 1, '$');
29450 return ggc_alloc_string (strip
, len
);
29455 rs6000_output_symbol_ref (FILE *file
, rtx x
)
29457 const char *name
= XSTR (x
, 0);
29459 /* Currently C++ toc references to vtables can be emitted before it
29460 is decided whether the vtable is public or private. If this is
29461 the case, then the linker will eventually complain that there is
29462 a reference to an unknown section. Thus, for vtables only,
29463 we emit the TOC reference to reference the identifier and not the
29465 if (VTABLE_NAME_P (name
))
29467 RS6000_OUTPUT_BASENAME (file
, name
);
29470 assemble_name (file
, name
);
29473 /* Output a TOC entry. We derive the entry name from what is being
29477 output_toc (FILE *file
, rtx x
, int labelno
, machine_mode mode
)
29480 const char *name
= buf
;
29482 HOST_WIDE_INT offset
= 0;
29484 gcc_assert (!TARGET_NO_TOC
);
29486 /* When the linker won't eliminate them, don't output duplicate
29487 TOC entries (this happens on AIX if there is any kind of TOC,
29488 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29490 if (TARGET_TOC
&& GET_CODE (x
) != LABEL_REF
)
29492 struct toc_hash_struct
*h
;
29494 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29495 time because GGC is not initialized at that point. */
29496 if (toc_hash_table
== NULL
)
29497 toc_hash_table
= hash_table
<toc_hasher
>::create_ggc (1021);
29499 h
= ggc_alloc
<toc_hash_struct
> ();
29501 h
->key_mode
= mode
;
29502 h
->labelno
= labelno
;
29504 toc_hash_struct
**found
= toc_hash_table
->find_slot (h
, INSERT
);
29505 if (*found
== NULL
)
29507 else /* This is indeed a duplicate.
29508 Set this label equal to that label. */
29510 fputs ("\t.set ", file
);
29511 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LC");
29512 fprintf (file
, "%d,", labelno
);
29513 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LC");
29514 fprintf (file
, "%d\n", ((*found
)->labelno
));
29517 if (TARGET_XCOFF
&& GET_CODE (x
) == SYMBOL_REF
29518 && (SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_GLOBAL_DYNAMIC
29519 || SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_DYNAMIC
))
29521 fputs ("\t.set ", file
);
29522 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LCM");
29523 fprintf (file
, "%d,", labelno
);
29524 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LCM");
29525 fprintf (file
, "%d\n", ((*found
)->labelno
));
29532 /* If we're going to put a double constant in the TOC, make sure it's
29533 aligned properly when strict alignment is on. */
29534 if ((CONST_DOUBLE_P (x
) || CONST_WIDE_INT_P (x
))
29535 && STRICT_ALIGNMENT
29536 && GET_MODE_BITSIZE (mode
) >= 64
29537 && ! (TARGET_NO_FP_IN_TOC
&& ! TARGET_MINIMAL_TOC
)) {
29538 ASM_OUTPUT_ALIGN (file
, 3);
29541 (*targetm
.asm_out
.internal_label
) (file
, "LC", labelno
);
29543 /* Handle FP constants specially. Note that if we have a minimal
29544 TOC, things we put here aren't actually in the TOC, so we can allow
29546 if (GET_CODE (x
) == CONST_DOUBLE
&&
29547 (GET_MODE (x
) == TFmode
|| GET_MODE (x
) == TDmode
29548 || GET_MODE (x
) == IFmode
|| GET_MODE (x
) == KFmode
))
29552 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
29553 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x
), k
);
29555 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x
), k
);
29559 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29560 fputs (DOUBLE_INT_ASM_OP
, file
);
29562 fprintf (file
, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29563 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
29564 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
29565 fprintf (file
, "0x%lx%08lx,0x%lx%08lx\n",
29566 k
[WORDS_BIG_ENDIAN
? 0 : 1] & 0xffffffff,
29567 k
[WORDS_BIG_ENDIAN
? 1 : 0] & 0xffffffff,
29568 k
[WORDS_BIG_ENDIAN
? 2 : 3] & 0xffffffff,
29569 k
[WORDS_BIG_ENDIAN
? 3 : 2] & 0xffffffff);
29574 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29575 fputs ("\t.long ", file
);
29577 fprintf (file
, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29578 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
29579 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
29580 fprintf (file
, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29581 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
29582 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
29586 else if (GET_CODE (x
) == CONST_DOUBLE
&&
29587 (GET_MODE (x
) == DFmode
|| GET_MODE (x
) == DDmode
))
29591 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
29592 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x
), k
);
29594 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x
), k
);
29598 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29599 fputs (DOUBLE_INT_ASM_OP
, file
);
29601 fprintf (file
, "\t.tc FD_%lx_%lx[TC],",
29602 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
29603 fprintf (file
, "0x%lx%08lx\n",
29604 k
[WORDS_BIG_ENDIAN
? 0 : 1] & 0xffffffff,
29605 k
[WORDS_BIG_ENDIAN
? 1 : 0] & 0xffffffff);
29610 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29611 fputs ("\t.long ", file
);
29613 fprintf (file
, "\t.tc FD_%lx_%lx[TC],",
29614 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
29615 fprintf (file
, "0x%lx,0x%lx\n",
29616 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
29620 else if (GET_CODE (x
) == CONST_DOUBLE
&&
29621 (GET_MODE (x
) == SFmode
|| GET_MODE (x
) == SDmode
))
29625 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
29626 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x
), l
);
29628 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x
), l
);
29632 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29633 fputs (DOUBLE_INT_ASM_OP
, file
);
29635 fprintf (file
, "\t.tc FS_%lx[TC],", l
& 0xffffffff);
29636 if (WORDS_BIG_ENDIAN
)
29637 fprintf (file
, "0x%lx00000000\n", l
& 0xffffffff);
29639 fprintf (file
, "0x%lx\n", l
& 0xffffffff);
29644 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29645 fputs ("\t.long ", file
);
29647 fprintf (file
, "\t.tc FS_%lx[TC],", l
& 0xffffffff);
29648 fprintf (file
, "0x%lx\n", l
& 0xffffffff);
29652 else if (GET_MODE (x
) == VOIDmode
&& GET_CODE (x
) == CONST_INT
)
29654 unsigned HOST_WIDE_INT low
;
29655 HOST_WIDE_INT high
;
29657 low
= INTVAL (x
) & 0xffffffff;
29658 high
= (HOST_WIDE_INT
) INTVAL (x
) >> 32;
29660 /* TOC entries are always Pmode-sized, so when big-endian
29661 smaller integer constants in the TOC need to be padded.
29662 (This is still a win over putting the constants in
29663 a separate constant pool, because then we'd have
29664 to have both a TOC entry _and_ the actual constant.)
29666 For a 32-bit target, CONST_INT values are loaded and shifted
29667 entirely within `low' and can be stored in one TOC entry. */
29669 /* It would be easy to make this work, but it doesn't now. */
29670 gcc_assert (!TARGET_64BIT
|| POINTER_SIZE
>= GET_MODE_BITSIZE (mode
));
29672 if (WORDS_BIG_ENDIAN
&& POINTER_SIZE
> GET_MODE_BITSIZE (mode
))
29675 low
<<= POINTER_SIZE
- GET_MODE_BITSIZE (mode
);
29676 high
= (HOST_WIDE_INT
) low
>> 32;
29682 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29683 fputs (DOUBLE_INT_ASM_OP
, file
);
29685 fprintf (file
, "\t.tc ID_%lx_%lx[TC],",
29686 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
29687 fprintf (file
, "0x%lx%08lx\n",
29688 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
29693 if (POINTER_SIZE
< GET_MODE_BITSIZE (mode
))
29695 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29696 fputs ("\t.long ", file
);
29698 fprintf (file
, "\t.tc ID_%lx_%lx[TC],",
29699 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
29700 fprintf (file
, "0x%lx,0x%lx\n",
29701 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
29705 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29706 fputs ("\t.long ", file
);
29708 fprintf (file
, "\t.tc IS_%lx[TC],", (long) low
& 0xffffffff);
29709 fprintf (file
, "0x%lx\n", (long) low
& 0xffffffff);
29715 if (GET_CODE (x
) == CONST
)
29717 gcc_assert (GET_CODE (XEXP (x
, 0)) == PLUS
29718 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
);
29720 base
= XEXP (XEXP (x
, 0), 0);
29721 offset
= INTVAL (XEXP (XEXP (x
, 0), 1));
29724 switch (GET_CODE (base
))
29727 name
= XSTR (base
, 0);
29731 ASM_GENERATE_INTERNAL_LABEL (buf
, "L",
29732 CODE_LABEL_NUMBER (XEXP (base
, 0)));
29736 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (base
));
29740 gcc_unreachable ();
29743 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29744 fputs (TARGET_32BIT
? "\t.long " : DOUBLE_INT_ASM_OP
, file
);
29747 fputs ("\t.tc ", file
);
29748 RS6000_OUTPUT_BASENAME (file
, name
);
29751 fprintf (file
, ".N" HOST_WIDE_INT_PRINT_UNSIGNED
, - offset
);
29753 fprintf (file
, ".P" HOST_WIDE_INT_PRINT_UNSIGNED
, offset
);
29755 /* Mark large TOC symbols on AIX with [TE] so they are mapped
29756 after other TOC symbols, reducing overflow of small TOC access
29757 to [TC] symbols. */
29758 fputs (TARGET_XCOFF
&& TARGET_CMODEL
!= CMODEL_SMALL
29759 ? "[TE]," : "[TC],", file
);
29762 /* Currently C++ toc references to vtables can be emitted before it
29763 is decided whether the vtable is public or private. If this is
29764 the case, then the linker will eventually complain that there is
29765 a TOC reference to an unknown section. Thus, for vtables only,
29766 we emit the TOC reference to reference the symbol and not the
29768 if (VTABLE_NAME_P (name
))
29770 RS6000_OUTPUT_BASENAME (file
, name
);
29772 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, offset
);
29773 else if (offset
> 0)
29774 fprintf (file
, "+" HOST_WIDE_INT_PRINT_DEC
, offset
);
29777 output_addr_const (file
, x
);
29780 if (TARGET_XCOFF
&& GET_CODE (base
) == SYMBOL_REF
)
29782 switch (SYMBOL_REF_TLS_MODEL (base
))
29786 case TLS_MODEL_LOCAL_EXEC
:
29787 fputs ("@le", file
);
29789 case TLS_MODEL_INITIAL_EXEC
:
29790 fputs ("@ie", file
);
29792 /* Use global-dynamic for local-dynamic. */
29793 case TLS_MODEL_GLOBAL_DYNAMIC
:
29794 case TLS_MODEL_LOCAL_DYNAMIC
:
29796 (*targetm
.asm_out
.internal_label
) (file
, "LCM", labelno
);
29797 fputs ("\t.tc .", file
);
29798 RS6000_OUTPUT_BASENAME (file
, name
);
29799 fputs ("[TC],", file
);
29800 output_addr_const (file
, x
);
29801 fputs ("@m", file
);
29804 gcc_unreachable ();
29812 /* Output an assembler pseudo-op to write an ASCII string of N characters
29813 starting at P to FILE.
29815 On the RS/6000, we have to do this using the .byte operation and
29816 write out special characters outside the quoted string.
29817 Also, the assembler is broken; very long strings are truncated,
29818 so we must artificially break them up early. */
29821 output_ascii (FILE *file
, const char *p
, int n
)
29824 int i
, count_string
;
29825 const char *for_string
= "\t.byte \"";
29826 const char *for_decimal
= "\t.byte ";
29827 const char *to_close
= NULL
;
29830 for (i
= 0; i
< n
; i
++)
29833 if (c
>= ' ' && c
< 0177)
29836 fputs (for_string
, file
);
29839 /* Write two quotes to get one. */
29847 for_decimal
= "\"\n\t.byte ";
29851 if (count_string
>= 512)
29853 fputs (to_close
, file
);
29855 for_string
= "\t.byte \"";
29856 for_decimal
= "\t.byte ";
29864 fputs (for_decimal
, file
);
29865 fprintf (file
, "%d", c
);
29867 for_string
= "\n\t.byte \"";
29868 for_decimal
= ", ";
29874 /* Now close the string if we have written one. Then end the line. */
29876 fputs (to_close
, file
);
29879 /* Generate a unique section name for FILENAME for a section type
29880 represented by SECTION_DESC. Output goes into BUF.
29882 SECTION_DESC can be any string, as long as it is different for each
29883 possible section type.
29885 We name the section in the same manner as xlc. The name begins with an
29886 underscore followed by the filename (after stripping any leading directory
29887 names) with the last period replaced by the string SECTION_DESC. If
29888 FILENAME does not contain a period, SECTION_DESC is appended to the end of
29892 rs6000_gen_section_name (char **buf
, const char *filename
,
29893 const char *section_desc
)
29895 const char *q
, *after_last_slash
, *last_period
= 0;
29899 after_last_slash
= filename
;
29900 for (q
= filename
; *q
; q
++)
29903 after_last_slash
= q
+ 1;
29904 else if (*q
== '.')
29908 len
= strlen (after_last_slash
) + strlen (section_desc
) + 2;
29909 *buf
= (char *) xmalloc (len
);
29914 for (q
= after_last_slash
; *q
; q
++)
29916 if (q
== last_period
)
29918 strcpy (p
, section_desc
);
29919 p
+= strlen (section_desc
);
29923 else if (ISALNUM (*q
))
29927 if (last_period
== 0)
29928 strcpy (p
, section_desc
);
29933 /* Emit profile function. */
29936 output_profile_hook (int labelno ATTRIBUTE_UNUSED
)
29938 /* Non-standard profiling for kernels, which just saves LR then calls
29939 _mcount without worrying about arg saves. The idea is to change
29940 the function prologue as little as possible as it isn't easy to
29941 account for arg save/restore code added just for _mcount. */
29942 if (TARGET_PROFILE_KERNEL
)
29945 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
29947 #ifndef NO_PROFILE_COUNTERS
29948 # define NO_PROFILE_COUNTERS 0
29950 if (NO_PROFILE_COUNTERS
)
29951 emit_library_call (init_one_libfunc (RS6000_MCOUNT
),
29952 LCT_NORMAL
, VOIDmode
);
29956 const char *label_name
;
29959 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
29960 label_name
= ggc_strdup ((*targetm
.strip_name_encoding
) (buf
));
29961 fun
= gen_rtx_SYMBOL_REF (Pmode
, label_name
);
29963 emit_library_call (init_one_libfunc (RS6000_MCOUNT
),
29964 LCT_NORMAL
, VOIDmode
, fun
, Pmode
);
29967 else if (DEFAULT_ABI
== ABI_DARWIN
)
29969 const char *mcount_name
= RS6000_MCOUNT
;
29970 int caller_addr_regno
= LR_REGNO
;
29972 /* Be conservative and always set this, at least for now. */
29973 crtl
->uses_pic_offset_table
= 1;
29976 /* For PIC code, set up a stub and collect the caller's address
29977 from r0, which is where the prologue puts it. */
29978 if (MACHOPIC_INDIRECT
29979 && crtl
->uses_pic_offset_table
)
29980 caller_addr_regno
= 0;
29982 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, mcount_name
),
29983 LCT_NORMAL
, VOIDmode
,
29984 gen_rtx_REG (Pmode
, caller_addr_regno
), Pmode
);
29988 /* Write function profiler code. */
29991 output_function_profiler (FILE *file
, int labelno
)
29995 switch (DEFAULT_ABI
)
29998 gcc_unreachable ();
30003 warning (0, "no profiling of 64-bit code for this ABI");
30006 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
30007 fprintf (file
, "\tmflr %s\n", reg_names
[0]);
30008 if (NO_PROFILE_COUNTERS
)
30010 asm_fprintf (file
, "\tstw %s,4(%s)\n",
30011 reg_names
[0], reg_names
[1]);
30013 else if (TARGET_SECURE_PLT
&& flag_pic
)
30015 if (TARGET_LINK_STACK
)
30018 get_ppc476_thunk_name (name
);
30019 asm_fprintf (file
, "\tbl %s\n", name
);
30022 asm_fprintf (file
, "\tbcl 20,31,1f\n1:\n");
30023 asm_fprintf (file
, "\tstw %s,4(%s)\n",
30024 reg_names
[0], reg_names
[1]);
30025 asm_fprintf (file
, "\tmflr %s\n", reg_names
[12]);
30026 asm_fprintf (file
, "\taddis %s,%s,",
30027 reg_names
[12], reg_names
[12]);
30028 assemble_name (file
, buf
);
30029 asm_fprintf (file
, "-1b@ha\n\tla %s,", reg_names
[0]);
30030 assemble_name (file
, buf
);
30031 asm_fprintf (file
, "-1b@l(%s)\n", reg_names
[12]);
30033 else if (flag_pic
== 1)
30035 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file
);
30036 asm_fprintf (file
, "\tstw %s,4(%s)\n",
30037 reg_names
[0], reg_names
[1]);
30038 asm_fprintf (file
, "\tmflr %s\n", reg_names
[12]);
30039 asm_fprintf (file
, "\tlwz %s,", reg_names
[0]);
30040 assemble_name (file
, buf
);
30041 asm_fprintf (file
, "@got(%s)\n", reg_names
[12]);
30043 else if (flag_pic
> 1)
30045 asm_fprintf (file
, "\tstw %s,4(%s)\n",
30046 reg_names
[0], reg_names
[1]);
30047 /* Now, we need to get the address of the label. */
30048 if (TARGET_LINK_STACK
)
30051 get_ppc476_thunk_name (name
);
30052 asm_fprintf (file
, "\tbl %s\n\tb 1f\n\t.long ", name
);
30053 assemble_name (file
, buf
);
30054 fputs ("-.\n1:", file
);
30055 asm_fprintf (file
, "\tmflr %s\n", reg_names
[11]);
30056 asm_fprintf (file
, "\taddi %s,%s,4\n",
30057 reg_names
[11], reg_names
[11]);
30061 fputs ("\tbcl 20,31,1f\n\t.long ", file
);
30062 assemble_name (file
, buf
);
30063 fputs ("-.\n1:", file
);
30064 asm_fprintf (file
, "\tmflr %s\n", reg_names
[11]);
30066 asm_fprintf (file
, "\tlwz %s,0(%s)\n",
30067 reg_names
[0], reg_names
[11]);
30068 asm_fprintf (file
, "\tadd %s,%s,%s\n",
30069 reg_names
[0], reg_names
[0], reg_names
[11]);
30073 asm_fprintf (file
, "\tlis %s,", reg_names
[12]);
30074 assemble_name (file
, buf
);
30075 fputs ("@ha\n", file
);
30076 asm_fprintf (file
, "\tstw %s,4(%s)\n",
30077 reg_names
[0], reg_names
[1]);
30078 asm_fprintf (file
, "\tla %s,", reg_names
[0]);
30079 assemble_name (file
, buf
);
30080 asm_fprintf (file
, "@l(%s)\n", reg_names
[12]);
30083 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
30084 fprintf (file
, "\tbl %s%s\n",
30085 RS6000_MCOUNT
, flag_pic
? "@plt" : "");
30091 /* Don't do anything, done in output_profile_hook (). */
30098 /* The following variable value is the last issued insn. */
30100 static rtx_insn
*last_scheduled_insn
;
30102 /* The following variable helps to balance issuing of load and
30103 store instructions */
30105 static int load_store_pendulum
;
30107 /* The following variable helps pair divide insns during scheduling. */
30108 static int divide_cnt
;
30109 /* The following variable helps pair and alternate vector and vector load
30110 insns during scheduling. */
30111 static int vec_pairing
;
30114 /* Power4 load update and store update instructions are cracked into a
30115 load or store and an integer insn which are executed in the same cycle.
30116 Branches have their own dispatch slot which does not count against the
30117 GCC issue rate, but it changes the program flow so there are no other
30118 instructions to issue in this cycle. */
30121 rs6000_variable_issue_1 (rtx_insn
*insn
, int more
)
30123 last_scheduled_insn
= insn
;
30124 if (GET_CODE (PATTERN (insn
)) == USE
30125 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
30127 cached_can_issue_more
= more
;
30128 return cached_can_issue_more
;
30131 if (insn_terminates_group_p (insn
, current_group
))
30133 cached_can_issue_more
= 0;
30134 return cached_can_issue_more
;
30137 /* If no reservation, but reach here */
30138 if (recog_memoized (insn
) < 0)
30141 if (rs6000_sched_groups
)
30143 if (is_microcoded_insn (insn
))
30144 cached_can_issue_more
= 0;
30145 else if (is_cracked_insn (insn
))
30146 cached_can_issue_more
= more
> 2 ? more
- 2 : 0;
30148 cached_can_issue_more
= more
- 1;
30150 return cached_can_issue_more
;
30153 if (rs6000_cpu_attr
== CPU_CELL
&& is_nonpipeline_insn (insn
))
30156 cached_can_issue_more
= more
- 1;
30157 return cached_can_issue_more
;
30161 rs6000_variable_issue (FILE *stream
, int verbose
, rtx_insn
*insn
, int more
)
30163 int r
= rs6000_variable_issue_1 (insn
, more
);
30165 fprintf (stream
, "// rs6000_variable_issue (more = %d) = %d\n", more
, r
);
30169 /* Adjust the cost of a scheduling dependency. Return the new cost of
30170 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30173 rs6000_adjust_cost (rtx_insn
*insn
, int dep_type
, rtx_insn
*dep_insn
, int cost
,
30176 enum attr_type attr_type
;
30178 if (recog_memoized (insn
) < 0 || recog_memoized (dep_insn
) < 0)
30185 /* Data dependency; DEP_INSN writes a register that INSN reads
30186 some cycles later. */
30188 /* Separate a load from a narrower, dependent store. */
30189 if ((rs6000_sched_groups
|| rs6000_cpu_attr
== CPU_POWER9
)
30190 && GET_CODE (PATTERN (insn
)) == SET
30191 && GET_CODE (PATTERN (dep_insn
)) == SET
30192 && GET_CODE (XEXP (PATTERN (insn
), 1)) == MEM
30193 && GET_CODE (XEXP (PATTERN (dep_insn
), 0)) == MEM
30194 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn
), 1)))
30195 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn
), 0)))))
30198 attr_type
= get_attr_type (insn
);
30203 /* Tell the first scheduling pass about the latency between
30204 a mtctr and bctr (and mtlr and br/blr). The first
30205 scheduling pass will not know about this latency since
30206 the mtctr instruction, which has the latency associated
30207 to it, will be generated by reload. */
30210 /* Leave some extra cycles between a compare and its
30211 dependent branch, to inhibit expensive mispredicts. */
30212 if ((rs6000_cpu_attr
== CPU_PPC603
30213 || rs6000_cpu_attr
== CPU_PPC604
30214 || rs6000_cpu_attr
== CPU_PPC604E
30215 || rs6000_cpu_attr
== CPU_PPC620
30216 || rs6000_cpu_attr
== CPU_PPC630
30217 || rs6000_cpu_attr
== CPU_PPC750
30218 || rs6000_cpu_attr
== CPU_PPC7400
30219 || rs6000_cpu_attr
== CPU_PPC7450
30220 || rs6000_cpu_attr
== CPU_PPCE5500
30221 || rs6000_cpu_attr
== CPU_PPCE6500
30222 || rs6000_cpu_attr
== CPU_POWER4
30223 || rs6000_cpu_attr
== CPU_POWER5
30224 || rs6000_cpu_attr
== CPU_POWER7
30225 || rs6000_cpu_attr
== CPU_POWER8
30226 || rs6000_cpu_attr
== CPU_POWER9
30227 || rs6000_cpu_attr
== CPU_CELL
)
30228 && recog_memoized (dep_insn
)
30229 && (INSN_CODE (dep_insn
) >= 0))
30231 switch (get_attr_type (dep_insn
))
30234 case TYPE_FPCOMPARE
:
30235 case TYPE_CR_LOGICAL
:
30236 case TYPE_DELAYED_CR
:
30240 if (get_attr_dot (dep_insn
) == DOT_YES
)
30245 if (get_attr_dot (dep_insn
) == DOT_YES
30246 && get_attr_var_shift (dep_insn
) == VAR_SHIFT_NO
)
30257 if ((rs6000_cpu
== PROCESSOR_POWER6
)
30258 && recog_memoized (dep_insn
)
30259 && (INSN_CODE (dep_insn
) >= 0))
30262 if (GET_CODE (PATTERN (insn
)) != SET
)
30263 /* If this happens, we have to extend this to schedule
30264 optimally. Return default for now. */
30267 /* Adjust the cost for the case where the value written
30268 by a fixed point operation is used as the address
30269 gen value on a store. */
30270 switch (get_attr_type (dep_insn
))
30275 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30276 return get_attr_sign_extend (dep_insn
)
30277 == SIGN_EXTEND_YES
? 6 : 4;
30282 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30283 return get_attr_var_shift (dep_insn
) == VAR_SHIFT_YES
?
30293 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30301 if (get_attr_update (dep_insn
) == UPDATE_YES
30302 && ! rs6000_store_data_bypass_p (dep_insn
, insn
))
30308 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30314 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30315 return get_attr_size (dep_insn
) == SIZE_32
? 45 : 57;
30325 if ((rs6000_cpu
== PROCESSOR_POWER6
)
30326 && recog_memoized (dep_insn
)
30327 && (INSN_CODE (dep_insn
) >= 0))
30330 /* Adjust the cost for the case where the value written
30331 by a fixed point instruction is used within the address
30332 gen portion of a subsequent load(u)(x) */
30333 switch (get_attr_type (dep_insn
))
30338 if (set_to_load_agen (dep_insn
, insn
))
30339 return get_attr_sign_extend (dep_insn
)
30340 == SIGN_EXTEND_YES
? 6 : 4;
30345 if (set_to_load_agen (dep_insn
, insn
))
30346 return get_attr_var_shift (dep_insn
) == VAR_SHIFT_YES
?
30356 if (set_to_load_agen (dep_insn
, insn
))
30364 if (get_attr_update (dep_insn
) == UPDATE_YES
30365 && set_to_load_agen (dep_insn
, insn
))
30371 if (set_to_load_agen (dep_insn
, insn
))
30377 if (set_to_load_agen (dep_insn
, insn
))
30378 return get_attr_size (dep_insn
) == SIZE_32
? 45 : 57;
30388 if ((rs6000_cpu
== PROCESSOR_POWER6
)
30389 && get_attr_update (insn
) == UPDATE_NO
30390 && recog_memoized (dep_insn
)
30391 && (INSN_CODE (dep_insn
) >= 0)
30392 && (get_attr_type (dep_insn
) == TYPE_MFFGPR
))
30399 /* Fall out to return default cost. */
30403 case REG_DEP_OUTPUT
:
30404 /* Output dependency; DEP_INSN writes a register that INSN writes some
30406 if ((rs6000_cpu
== PROCESSOR_POWER6
)
30407 && recog_memoized (dep_insn
)
30408 && (INSN_CODE (dep_insn
) >= 0))
30410 attr_type
= get_attr_type (insn
);
30415 case TYPE_FPSIMPLE
:
30416 if (get_attr_type (dep_insn
) == TYPE_FP
30417 || get_attr_type (dep_insn
) == TYPE_FPSIMPLE
)
30421 if (get_attr_update (insn
) == UPDATE_NO
30422 && get_attr_type (dep_insn
) == TYPE_MFFGPR
)
30429 /* Fall through, no cost for output dependency. */
30433 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30438 gcc_unreachable ();
30444 /* Debug version of rs6000_adjust_cost. */
30447 rs6000_debug_adjust_cost (rtx_insn
*insn
, int dep_type
, rtx_insn
*dep_insn
,
30448 int cost
, unsigned int dw
)
30450 int ret
= rs6000_adjust_cost (insn
, dep_type
, dep_insn
, cost
, dw
);
30458 default: dep
= "unknown depencency"; break;
30459 case REG_DEP_TRUE
: dep
= "data dependency"; break;
30460 case REG_DEP_OUTPUT
: dep
= "output dependency"; break;
30461 case REG_DEP_ANTI
: dep
= "anti depencency"; break;
30465 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30466 "%s, insn:\n", ret
, cost
, dep
);
30474 /* The function returns a true if INSN is microcoded.
30475 Return false otherwise. */
30478 is_microcoded_insn (rtx_insn
*insn
)
30480 if (!insn
|| !NONDEBUG_INSN_P (insn
)
30481 || GET_CODE (PATTERN (insn
)) == USE
30482 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
30485 if (rs6000_cpu_attr
== CPU_CELL
)
30486 return get_attr_cell_micro (insn
) == CELL_MICRO_ALWAYS
;
30488 if (rs6000_sched_groups
30489 && (rs6000_cpu
== PROCESSOR_POWER4
|| rs6000_cpu
== PROCESSOR_POWER5
))
30491 enum attr_type type
= get_attr_type (insn
);
30492 if ((type
== TYPE_LOAD
30493 && get_attr_update (insn
) == UPDATE_YES
30494 && get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
)
30495 || ((type
== TYPE_LOAD
|| type
== TYPE_STORE
)
30496 && get_attr_update (insn
) == UPDATE_YES
30497 && get_attr_indexed (insn
) == INDEXED_YES
)
30498 || type
== TYPE_MFCR
)
30505 /* The function returns true if INSN is cracked into 2 instructions
30506 by the processor (and therefore occupies 2 issue slots). */
30509 is_cracked_insn (rtx_insn
*insn
)
30511 if (!insn
|| !NONDEBUG_INSN_P (insn
)
30512 || GET_CODE (PATTERN (insn
)) == USE
30513 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
30516 if (rs6000_sched_groups
30517 && (rs6000_cpu
== PROCESSOR_POWER4
|| rs6000_cpu
== PROCESSOR_POWER5
))
30519 enum attr_type type
= get_attr_type (insn
);
30520 if ((type
== TYPE_LOAD
30521 && get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
30522 && get_attr_update (insn
) == UPDATE_NO
)
30523 || (type
== TYPE_LOAD
30524 && get_attr_sign_extend (insn
) == SIGN_EXTEND_NO
30525 && get_attr_update (insn
) == UPDATE_YES
30526 && get_attr_indexed (insn
) == INDEXED_NO
)
30527 || (type
== TYPE_STORE
30528 && get_attr_update (insn
) == UPDATE_YES
30529 && get_attr_indexed (insn
) == INDEXED_NO
)
30530 || ((type
== TYPE_FPLOAD
|| type
== TYPE_FPSTORE
)
30531 && get_attr_update (insn
) == UPDATE_YES
)
30532 || type
== TYPE_DELAYED_CR
30533 || (type
== TYPE_EXTS
30534 && get_attr_dot (insn
) == DOT_YES
)
30535 || (type
== TYPE_SHIFT
30536 && get_attr_dot (insn
) == DOT_YES
30537 && get_attr_var_shift (insn
) == VAR_SHIFT_NO
)
30538 || (type
== TYPE_MUL
30539 && get_attr_dot (insn
) == DOT_YES
)
30540 || type
== TYPE_DIV
30541 || (type
== TYPE_INSERT
30542 && get_attr_size (insn
) == SIZE_32
))
30549 /* The function returns true if INSN can be issued only from
30550 the branch slot. */
30553 is_branch_slot_insn (rtx_insn
*insn
)
30555 if (!insn
|| !NONDEBUG_INSN_P (insn
)
30556 || GET_CODE (PATTERN (insn
)) == USE
30557 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
30560 if (rs6000_sched_groups
)
30562 enum attr_type type
= get_attr_type (insn
);
30563 if (type
== TYPE_BRANCH
|| type
== TYPE_JMPREG
)
30571 /* The function returns true if out_inst sets a value that is
30572 used in the address generation computation of in_insn */
30574 set_to_load_agen (rtx_insn
*out_insn
, rtx_insn
*in_insn
)
30576 rtx out_set
, in_set
;
30578 /* For performance reasons, only handle the simple case where
30579 both loads are a single_set. */
30580 out_set
= single_set (out_insn
);
30583 in_set
= single_set (in_insn
);
30585 return reg_mentioned_p (SET_DEST (out_set
), SET_SRC (in_set
));
30591 /* Try to determine base/offset/size parts of the given MEM.
30592 Return true if successful, false if all the values couldn't
30595 This function only looks for REG or REG+CONST address forms.
30596 REG+REG address form will return false. */
30599 get_memref_parts (rtx mem
, rtx
*base
, HOST_WIDE_INT
*offset
,
30600 HOST_WIDE_INT
*size
)
30603 if MEM_SIZE_KNOWN_P (mem
)
30604 *size
= MEM_SIZE (mem
);
30608 addr_rtx
= (XEXP (mem
, 0));
30609 if (GET_CODE (addr_rtx
) == PRE_MODIFY
)
30610 addr_rtx
= XEXP (addr_rtx
, 1);
30613 while (GET_CODE (addr_rtx
) == PLUS
30614 && CONST_INT_P (XEXP (addr_rtx
, 1)))
30616 *offset
+= INTVAL (XEXP (addr_rtx
, 1));
30617 addr_rtx
= XEXP (addr_rtx
, 0);
30619 if (!REG_P (addr_rtx
))
30626 /* The function returns true if the target storage location of
30627 mem1 is adjacent to the target storage location of mem2 */
30628 /* Return 1 if memory locations are adjacent. */
30631 adjacent_mem_locations (rtx mem1
, rtx mem2
)
30634 HOST_WIDE_INT off1
, size1
, off2
, size2
;
30636 if (get_memref_parts (mem1
, ®1
, &off1
, &size1
)
30637 && get_memref_parts (mem2
, ®2
, &off2
, &size2
))
30638 return ((REGNO (reg1
) == REGNO (reg2
))
30639 && ((off1
+ size1
== off2
)
30640 || (off2
+ size2
== off1
)));
30645 /* This function returns true if it can be determined that the two MEM
30646 locations overlap by at least 1 byte based on base reg/offset/size. */
30649 mem_locations_overlap (rtx mem1
, rtx mem2
)
30652 HOST_WIDE_INT off1
, size1
, off2
, size2
;
30654 if (get_memref_parts (mem1
, ®1
, &off1
, &size1
)
30655 && get_memref_parts (mem2
, ®2
, &off2
, &size2
))
30656 return ((REGNO (reg1
) == REGNO (reg2
))
30657 && (((off1
<= off2
) && (off1
+ size1
> off2
))
30658 || ((off2
<= off1
) && (off2
+ size2
> off1
))));
30663 /* A C statement (sans semicolon) to update the integer scheduling
30664 priority INSN_PRIORITY (INSN). Increase the priority to execute the
30665 INSN earlier, reduce the priority to execute INSN later. Do not
30666 define this macro if you do not need to adjust the scheduling
30667 priorities of insns. */
30670 rs6000_adjust_priority (rtx_insn
*insn ATTRIBUTE_UNUSED
, int priority
)
30672 rtx load_mem
, str_mem
;
30673 /* On machines (like the 750) which have asymmetric integer units,
30674 where one integer unit can do multiply and divides and the other
30675 can't, reduce the priority of multiply/divide so it is scheduled
30676 before other integer operations. */
30679 if (! INSN_P (insn
))
30682 if (GET_CODE (PATTERN (insn
)) == USE
)
30685 switch (rs6000_cpu_attr
) {
30687 switch (get_attr_type (insn
))
30694 fprintf (stderr
, "priority was %#x (%d) before adjustment\n",
30695 priority
, priority
);
30696 if (priority
>= 0 && priority
< 0x01000000)
30703 if (insn_must_be_first_in_group (insn
)
30704 && reload_completed
30705 && current_sched_info
->sched_max_insns_priority
30706 && rs6000_sched_restricted_insns_priority
)
30709 /* Prioritize insns that can be dispatched only in the first
30711 if (rs6000_sched_restricted_insns_priority
== 1)
30712 /* Attach highest priority to insn. This means that in
30713 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
30714 precede 'priority' (critical path) considerations. */
30715 return current_sched_info
->sched_max_insns_priority
;
30716 else if (rs6000_sched_restricted_insns_priority
== 2)
30717 /* Increase priority of insn by a minimal amount. This means that in
30718 haifa-sched.c:ready_sort(), only 'priority' (critical path)
30719 considerations precede dispatch-slot restriction considerations. */
30720 return (priority
+ 1);
30723 if (rs6000_cpu
== PROCESSOR_POWER6
30724 && ((load_store_pendulum
== -2 && is_load_insn (insn
, &load_mem
))
30725 || (load_store_pendulum
== 2 && is_store_insn (insn
, &str_mem
))))
30726 /* Attach highest priority to insn if the scheduler has just issued two
30727 stores and this instruction is a load, or two loads and this instruction
30728 is a store. Power6 wants loads and stores scheduled alternately
30730 return current_sched_info
->sched_max_insns_priority
;
30735 /* Return true if the instruction is nonpipelined on the Cell. */
30737 is_nonpipeline_insn (rtx_insn
*insn
)
30739 enum attr_type type
;
30740 if (!insn
|| !NONDEBUG_INSN_P (insn
)
30741 || GET_CODE (PATTERN (insn
)) == USE
30742 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
30745 type
= get_attr_type (insn
);
30746 if (type
== TYPE_MUL
30747 || type
== TYPE_DIV
30748 || type
== TYPE_SDIV
30749 || type
== TYPE_DDIV
30750 || type
== TYPE_SSQRT
30751 || type
== TYPE_DSQRT
30752 || type
== TYPE_MFCR
30753 || type
== TYPE_MFCRF
30754 || type
== TYPE_MFJMPR
)
30762 /* Return how many instructions the machine can issue per cycle. */
30765 rs6000_issue_rate (void)
30767 /* Unless scheduling for register pressure, use issue rate of 1 for
30768 first scheduling pass to decrease degradation. */
30769 if (!reload_completed
&& !flag_sched_pressure
)
30772 switch (rs6000_cpu_attr
) {
30774 case CPU_PPC601
: /* ? */
30784 case CPU_PPCE300C2
:
30785 case CPU_PPCE300C3
:
30786 case CPU_PPCE500MC
:
30787 case CPU_PPCE500MC64
:
30812 /* Return how many instructions to look ahead for better insn
30816 rs6000_use_sched_lookahead (void)
30818 switch (rs6000_cpu_attr
)
30825 return (reload_completed
? 8 : 0);
30832 /* We are choosing insn from the ready queue. Return zero if INSN can be
30835 rs6000_use_sched_lookahead_guard (rtx_insn
*insn
, int ready_index
)
30837 if (ready_index
== 0)
30840 if (rs6000_cpu_attr
!= CPU_CELL
)
30843 gcc_assert (insn
!= NULL_RTX
&& INSN_P (insn
));
30845 if (!reload_completed
30846 || is_nonpipeline_insn (insn
)
30847 || is_microcoded_insn (insn
))
30853 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
30854 and return true. */
30857 find_mem_ref (rtx pat
, rtx
*mem_ref
)
30862 /* stack_tie does not produce any real memory traffic. */
30863 if (tie_operand (pat
, VOIDmode
))
30866 if (GET_CODE (pat
) == MEM
)
30872 /* Recursively process the pattern. */
30873 fmt
= GET_RTX_FORMAT (GET_CODE (pat
));
30875 for (i
= GET_RTX_LENGTH (GET_CODE (pat
)) - 1; i
>= 0; i
--)
30879 if (find_mem_ref (XEXP (pat
, i
), mem_ref
))
30882 else if (fmt
[i
] == 'E')
30883 for (j
= XVECLEN (pat
, i
) - 1; j
>= 0; j
--)
30885 if (find_mem_ref (XVECEXP (pat
, i
, j
), mem_ref
))
30893 /* Determine if PAT is a PATTERN of a load insn. */
30896 is_load_insn1 (rtx pat
, rtx
*load_mem
)
30898 if (!pat
|| pat
== NULL_RTX
)
30901 if (GET_CODE (pat
) == SET
)
30902 return find_mem_ref (SET_SRC (pat
), load_mem
);
30904 if (GET_CODE (pat
) == PARALLEL
)
30908 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
30909 if (is_load_insn1 (XVECEXP (pat
, 0, i
), load_mem
))
30916 /* Determine if INSN loads from memory. */
30919 is_load_insn (rtx insn
, rtx
*load_mem
)
30921 if (!insn
|| !INSN_P (insn
))
30927 return is_load_insn1 (PATTERN (insn
), load_mem
);
30930 /* Determine if PAT is a PATTERN of a store insn. */
30933 is_store_insn1 (rtx pat
, rtx
*str_mem
)
30935 if (!pat
|| pat
== NULL_RTX
)
30938 if (GET_CODE (pat
) == SET
)
30939 return find_mem_ref (SET_DEST (pat
), str_mem
);
30941 if (GET_CODE (pat
) == PARALLEL
)
30945 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
30946 if (is_store_insn1 (XVECEXP (pat
, 0, i
), str_mem
))
30953 /* Determine if INSN stores to memory. */
30956 is_store_insn (rtx insn
, rtx
*str_mem
)
30958 if (!insn
|| !INSN_P (insn
))
30961 return is_store_insn1 (PATTERN (insn
), str_mem
);
30964 /* Return whether TYPE is a Power9 pairable vector instruction type. */
30967 is_power9_pairable_vec_type (enum attr_type type
)
30971 case TYPE_VECSIMPLE
:
30972 case TYPE_VECCOMPLEX
:
30976 case TYPE_VECFLOAT
:
30978 case TYPE_VECDOUBLE
:
30986 /* Returns whether the dependence between INSN and NEXT is considered
30987 costly by the given target. */
30990 rs6000_is_costly_dependence (dep_t dep
, int cost
, int distance
)
30994 rtx load_mem
, str_mem
;
30996 /* If the flag is not enabled - no dependence is considered costly;
30997 allow all dependent insns in the same group.
30998 This is the most aggressive option. */
30999 if (rs6000_sched_costly_dep
== no_dep_costly
)
31002 /* If the flag is set to 1 - a dependence is always considered costly;
31003 do not allow dependent instructions in the same group.
31004 This is the most conservative option. */
31005 if (rs6000_sched_costly_dep
== all_deps_costly
)
31008 insn
= DEP_PRO (dep
);
31009 next
= DEP_CON (dep
);
31011 if (rs6000_sched_costly_dep
== store_to_load_dep_costly
31012 && is_load_insn (next
, &load_mem
)
31013 && is_store_insn (insn
, &str_mem
))
31014 /* Prevent load after store in the same group. */
31017 if (rs6000_sched_costly_dep
== true_store_to_load_dep_costly
31018 && is_load_insn (next
, &load_mem
)
31019 && is_store_insn (insn
, &str_mem
)
31020 && DEP_TYPE (dep
) == REG_DEP_TRUE
31021 && mem_locations_overlap(str_mem
, load_mem
))
31022 /* Prevent load after store in the same group if it is a true
31026 /* The flag is set to X; dependences with latency >= X are considered costly,
31027 and will not be scheduled in the same group. */
31028 if (rs6000_sched_costly_dep
<= max_dep_latency
31029 && ((cost
- distance
) >= (int)rs6000_sched_costly_dep
))
31035 /* Return the next insn after INSN that is found before TAIL is reached,
31036 skipping any "non-active" insns - insns that will not actually occupy
31037 an issue slot. Return NULL_RTX if such an insn is not found. */
31040 get_next_active_insn (rtx_insn
*insn
, rtx_insn
*tail
)
31042 if (insn
== NULL_RTX
|| insn
== tail
)
31047 insn
= NEXT_INSN (insn
);
31048 if (insn
== NULL_RTX
|| insn
== tail
)
31052 || JUMP_P (insn
) || JUMP_TABLE_DATA_P (insn
)
31053 || (NONJUMP_INSN_P (insn
)
31054 && GET_CODE (PATTERN (insn
)) != USE
31055 && GET_CODE (PATTERN (insn
)) != CLOBBER
31056 && INSN_CODE (insn
) != CODE_FOR_stack_tie
))
31062 /* Do Power9 specific sched_reorder2 reordering of ready list. */
31065 power9_sched_reorder2 (rtx_insn
**ready
, int lastpos
)
31070 enum attr_type type
, type2
;
31072 type
= get_attr_type (last_scheduled_insn
);
31074 /* Try to issue fixed point divides back-to-back in pairs so they will be
31075 routed to separate execution units and execute in parallel. */
31076 if (type
== TYPE_DIV
&& divide_cnt
== 0)
31078 /* First divide has been scheduled. */
31081 /* Scan the ready list looking for another divide, if found move it
31082 to the end of the list so it is chosen next. */
31086 if (recog_memoized (ready
[pos
]) >= 0
31087 && get_attr_type (ready
[pos
]) == TYPE_DIV
)
31090 for (i
= pos
; i
< lastpos
; i
++)
31091 ready
[i
] = ready
[i
+ 1];
31092 ready
[lastpos
] = tmp
;
31100 /* Last insn was the 2nd divide or not a divide, reset the counter. */
31103 /* The best dispatch throughput for vector and vector load insns can be
31104 achieved by interleaving a vector and vector load such that they'll
31105 dispatch to the same superslice. If this pairing cannot be achieved
31106 then it is best to pair vector insns together and vector load insns
31109 To aid in this pairing, vec_pairing maintains the current state with
31110 the following values:
31112 0 : Initial state, no vecload/vector pairing has been started.
31114 1 : A vecload or vector insn has been issued and a candidate for
31115 pairing has been found and moved to the end of the ready
31117 if (type
== TYPE_VECLOAD
)
31119 /* Issued a vecload. */
31120 if (vec_pairing
== 0)
31122 int vecload_pos
= -1;
31123 /* We issued a single vecload, look for a vector insn to pair it
31124 with. If one isn't found, try to pair another vecload. */
31128 if (recog_memoized (ready
[pos
]) >= 0)
31130 type2
= get_attr_type (ready
[pos
]);
31131 if (is_power9_pairable_vec_type (type2
))
31133 /* Found a vector insn to pair with, move it to the
31134 end of the ready list so it is scheduled next. */
31136 for (i
= pos
; i
< lastpos
; i
++)
31137 ready
[i
] = ready
[i
+ 1];
31138 ready
[lastpos
] = tmp
;
31140 return cached_can_issue_more
;
31142 else if (type2
== TYPE_VECLOAD
&& vecload_pos
== -1)
31143 /* Remember position of first vecload seen. */
31148 if (vecload_pos
>= 0)
31150 /* Didn't find a vector to pair with but did find a vecload,
31151 move it to the end of the ready list. */
31152 tmp
= ready
[vecload_pos
];
31153 for (i
= vecload_pos
; i
< lastpos
; i
++)
31154 ready
[i
] = ready
[i
+ 1];
31155 ready
[lastpos
] = tmp
;
31157 return cached_can_issue_more
;
31161 else if (is_power9_pairable_vec_type (type
))
31163 /* Issued a vector operation. */
31164 if (vec_pairing
== 0)
31167 /* We issued a single vector insn, look for a vecload to pair it
31168 with. If one isn't found, try to pair another vector. */
31172 if (recog_memoized (ready
[pos
]) >= 0)
31174 type2
= get_attr_type (ready
[pos
]);
31175 if (type2
== TYPE_VECLOAD
)
31177 /* Found a vecload insn to pair with, move it to the
31178 end of the ready list so it is scheduled next. */
31180 for (i
= pos
; i
< lastpos
; i
++)
31181 ready
[i
] = ready
[i
+ 1];
31182 ready
[lastpos
] = tmp
;
31184 return cached_can_issue_more
;
31186 else if (is_power9_pairable_vec_type (type2
)
31188 /* Remember position of first vector insn seen. */
31195 /* Didn't find a vecload to pair with but did find a vector
31196 insn, move it to the end of the ready list. */
31197 tmp
= ready
[vec_pos
];
31198 for (i
= vec_pos
; i
< lastpos
; i
++)
31199 ready
[i
] = ready
[i
+ 1];
31200 ready
[lastpos
] = tmp
;
31202 return cached_can_issue_more
;
31207 /* We've either finished a vec/vecload pair, couldn't find an insn to
31208 continue the current pair, or the last insn had nothing to do with
31209 with pairing. In any case, reset the state. */
31213 return cached_can_issue_more
;
31216 /* We are about to begin issuing insns for this clock cycle. */
31219 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED
, int sched_verbose
,
31220 rtx_insn
**ready ATTRIBUTE_UNUSED
,
31221 int *pn_ready ATTRIBUTE_UNUSED
,
31222 int clock_var ATTRIBUTE_UNUSED
)
31224 int n_ready
= *pn_ready
;
31227 fprintf (dump
, "// rs6000_sched_reorder :\n");
31229 /* Reorder the ready list, if the second to last ready insn
31230 is a nonepipeline insn. */
31231 if (rs6000_cpu_attr
== CPU_CELL
&& n_ready
> 1)
31233 if (is_nonpipeline_insn (ready
[n_ready
- 1])
31234 && (recog_memoized (ready
[n_ready
- 2]) > 0))
31235 /* Simply swap first two insns. */
31236 std::swap (ready
[n_ready
- 1], ready
[n_ready
- 2]);
31239 if (rs6000_cpu
== PROCESSOR_POWER6
)
31240 load_store_pendulum
= 0;
31242 return rs6000_issue_rate ();
31245 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31248 rs6000_sched_reorder2 (FILE *dump
, int sched_verbose
, rtx_insn
**ready
,
31249 int *pn_ready
, int clock_var ATTRIBUTE_UNUSED
)
31252 fprintf (dump
, "// rs6000_sched_reorder2 :\n");
31254 /* For Power6, we need to handle some special cases to try and keep the
31255 store queue from overflowing and triggering expensive flushes.
31257 This code monitors how load and store instructions are being issued
31258 and skews the ready list one way or the other to increase the likelihood
31259 that a desired instruction is issued at the proper time.
31261 A couple of things are done. First, we maintain a "load_store_pendulum"
31262 to track the current state of load/store issue.
31264 - If the pendulum is at zero, then no loads or stores have been
31265 issued in the current cycle so we do nothing.
31267 - If the pendulum is 1, then a single load has been issued in this
31268 cycle and we attempt to locate another load in the ready list to
31271 - If the pendulum is -2, then two stores have already been
31272 issued in this cycle, so we increase the priority of the first load
31273 in the ready list to increase it's likelihood of being chosen first
31276 - If the pendulum is -1, then a single store has been issued in this
31277 cycle and we attempt to locate another store in the ready list to
31278 issue with it, preferring a store to an adjacent memory location to
31279 facilitate store pairing in the store queue.
31281 - If the pendulum is 2, then two loads have already been
31282 issued in this cycle, so we increase the priority of the first store
31283 in the ready list to increase it's likelihood of being chosen first
31286 - If the pendulum < -2 or > 2, then do nothing.
31288 Note: This code covers the most common scenarios. There exist non
31289 load/store instructions which make use of the LSU and which
31290 would need to be accounted for to strictly model the behavior
31291 of the machine. Those instructions are currently unaccounted
31292 for to help minimize compile time overhead of this code.
31294 if (rs6000_cpu
== PROCESSOR_POWER6
&& last_scheduled_insn
)
31299 rtx load_mem
, str_mem
;
31301 if (is_store_insn (last_scheduled_insn
, &str_mem
))
31302 /* Issuing a store, swing the load_store_pendulum to the left */
31303 load_store_pendulum
--;
31304 else if (is_load_insn (last_scheduled_insn
, &load_mem
))
31305 /* Issuing a load, swing the load_store_pendulum to the right */
31306 load_store_pendulum
++;
31308 return cached_can_issue_more
;
31310 /* If the pendulum is balanced, or there is only one instruction on
31311 the ready list, then all is well, so return. */
31312 if ((load_store_pendulum
== 0) || (*pn_ready
<= 1))
31313 return cached_can_issue_more
;
31315 if (load_store_pendulum
== 1)
31317 /* A load has been issued in this cycle. Scan the ready list
31318 for another load to issue with it */
31323 if (is_load_insn (ready
[pos
], &load_mem
))
31325 /* Found a load. Move it to the head of the ready list,
31326 and adjust it's priority so that it is more likely to
31329 for (i
=pos
; i
<*pn_ready
-1; i
++)
31330 ready
[i
] = ready
[i
+ 1];
31331 ready
[*pn_ready
-1] = tmp
;
31333 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
31334 INSN_PRIORITY (tmp
)++;
31340 else if (load_store_pendulum
== -2)
31342 /* Two stores have been issued in this cycle. Increase the
31343 priority of the first load in the ready list to favor it for
31344 issuing in the next cycle. */
31349 if (is_load_insn (ready
[pos
], &load_mem
)
31351 && INSN_PRIORITY_KNOWN (ready
[pos
]))
31353 INSN_PRIORITY (ready
[pos
])++;
31355 /* Adjust the pendulum to account for the fact that a load
31356 was found and increased in priority. This is to prevent
31357 increasing the priority of multiple loads */
31358 load_store_pendulum
--;
31365 else if (load_store_pendulum
== -1)
31367 /* A store has been issued in this cycle. Scan the ready list for
31368 another store to issue with it, preferring a store to an adjacent
31370 int first_store_pos
= -1;
31376 if (is_store_insn (ready
[pos
], &str_mem
))
31379 /* Maintain the index of the first store found on the
31381 if (first_store_pos
== -1)
31382 first_store_pos
= pos
;
31384 if (is_store_insn (last_scheduled_insn
, &str_mem2
)
31385 && adjacent_mem_locations (str_mem
, str_mem2
))
31387 /* Found an adjacent store. Move it to the head of the
31388 ready list, and adjust it's priority so that it is
31389 more likely to stay there */
31391 for (i
=pos
; i
<*pn_ready
-1; i
++)
31392 ready
[i
] = ready
[i
+ 1];
31393 ready
[*pn_ready
-1] = tmp
;
31395 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
31396 INSN_PRIORITY (tmp
)++;
31398 first_store_pos
= -1;
31406 if (first_store_pos
>= 0)
31408 /* An adjacent store wasn't found, but a non-adjacent store was,
31409 so move the non-adjacent store to the front of the ready
31410 list, and adjust its priority so that it is more likely to
31412 tmp
= ready
[first_store_pos
];
31413 for (i
=first_store_pos
; i
<*pn_ready
-1; i
++)
31414 ready
[i
] = ready
[i
+ 1];
31415 ready
[*pn_ready
-1] = tmp
;
31416 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
31417 INSN_PRIORITY (tmp
)++;
31420 else if (load_store_pendulum
== 2)
31422 /* Two loads have been issued in this cycle. Increase the priority
31423 of the first store in the ready list to favor it for issuing in
31429 if (is_store_insn (ready
[pos
], &str_mem
)
31431 && INSN_PRIORITY_KNOWN (ready
[pos
]))
31433 INSN_PRIORITY (ready
[pos
])++;
31435 /* Adjust the pendulum to account for the fact that a store
31436 was found and increased in priority. This is to prevent
31437 increasing the priority of multiple stores */
31438 load_store_pendulum
++;
31447 /* Do Power9 dependent reordering if necessary. */
31448 if (rs6000_cpu
== PROCESSOR_POWER9
&& last_scheduled_insn
31449 && recog_memoized (last_scheduled_insn
) >= 0)
31450 return power9_sched_reorder2 (ready
, *pn_ready
- 1);
31452 return cached_can_issue_more
;
31455 /* Return whether the presence of INSN causes a dispatch group termination
31456 of group WHICH_GROUP.
31458 If WHICH_GROUP == current_group, this function will return true if INSN
31459 causes the termination of the current group (i.e, the dispatch group to
31460 which INSN belongs). This means that INSN will be the last insn in the
31461 group it belongs to.
31463 If WHICH_GROUP == previous_group, this function will return true if INSN
31464 causes the termination of the previous group (i.e, the dispatch group that
31465 precedes the group to which INSN belongs). This means that INSN will be
31466 the first insn in the group it belongs to). */
31469 insn_terminates_group_p (rtx_insn
*insn
, enum group_termination which_group
)
31476 first
= insn_must_be_first_in_group (insn
);
31477 last
= insn_must_be_last_in_group (insn
);
31482 if (which_group
== current_group
)
31484 else if (which_group
== previous_group
)
31492 insn_must_be_first_in_group (rtx_insn
*insn
)
31494 enum attr_type type
;
31498 || DEBUG_INSN_P (insn
)
31499 || GET_CODE (PATTERN (insn
)) == USE
31500 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
31503 switch (rs6000_cpu
)
31505 case PROCESSOR_POWER5
:
31506 if (is_cracked_insn (insn
))
31509 case PROCESSOR_POWER4
:
31510 if (is_microcoded_insn (insn
))
31513 if (!rs6000_sched_groups
)
31516 type
= get_attr_type (insn
);
31523 case TYPE_DELAYED_CR
:
31524 case TYPE_CR_LOGICAL
:
31537 case PROCESSOR_POWER6
:
31538 type
= get_attr_type (insn
);
31547 case TYPE_FPCOMPARE
:
31558 if (get_attr_dot (insn
) == DOT_NO
31559 || get_attr_var_shift (insn
) == VAR_SHIFT_NO
)
31564 if (get_attr_size (insn
) == SIZE_32
)
31572 if (get_attr_update (insn
) == UPDATE_YES
)
31580 case PROCESSOR_POWER7
:
31581 type
= get_attr_type (insn
);
31585 case TYPE_CR_LOGICAL
:
31599 if (get_attr_dot (insn
) == DOT_YES
)
31604 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
31605 || get_attr_update (insn
) == UPDATE_YES
)
31612 if (get_attr_update (insn
) == UPDATE_YES
)
31620 case PROCESSOR_POWER8
:
31621 type
= get_attr_type (insn
);
31625 case TYPE_CR_LOGICAL
:
31626 case TYPE_DELAYED_CR
:
31634 case TYPE_VECSTORE
:
31641 if (get_attr_dot (insn
) == DOT_YES
)
31646 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
31647 || get_attr_update (insn
) == UPDATE_YES
)
31652 if (get_attr_update (insn
) == UPDATE_YES
31653 && get_attr_indexed (insn
) == INDEXED_YES
)
31669 insn_must_be_last_in_group (rtx_insn
*insn
)
31671 enum attr_type type
;
31675 || DEBUG_INSN_P (insn
)
31676 || GET_CODE (PATTERN (insn
)) == USE
31677 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
31680 switch (rs6000_cpu
) {
31681 case PROCESSOR_POWER4
:
31682 case PROCESSOR_POWER5
:
31683 if (is_microcoded_insn (insn
))
31686 if (is_branch_slot_insn (insn
))
31690 case PROCESSOR_POWER6
:
31691 type
= get_attr_type (insn
);
31699 case TYPE_FPCOMPARE
:
31710 if (get_attr_dot (insn
) == DOT_NO
31711 || get_attr_var_shift (insn
) == VAR_SHIFT_NO
)
31716 if (get_attr_size (insn
) == SIZE_32
)
31724 case PROCESSOR_POWER7
:
31725 type
= get_attr_type (insn
);
31735 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
31736 && get_attr_update (insn
) == UPDATE_YES
)
31741 if (get_attr_update (insn
) == UPDATE_YES
31742 && get_attr_indexed (insn
) == INDEXED_YES
)
31750 case PROCESSOR_POWER8
:
31751 type
= get_attr_type (insn
);
31763 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
31764 && get_attr_update (insn
) == UPDATE_YES
)
31769 if (get_attr_update (insn
) == UPDATE_YES
31770 && get_attr_indexed (insn
) == INDEXED_YES
)
31785 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
31786 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
31789 is_costly_group (rtx
*group_insns
, rtx next_insn
)
31792 int issue_rate
= rs6000_issue_rate ();
31794 for (i
= 0; i
< issue_rate
; i
++)
31796 sd_iterator_def sd_it
;
31798 rtx insn
= group_insns
[i
];
31803 FOR_EACH_DEP (insn
, SD_LIST_RES_FORW
, sd_it
, dep
)
31805 rtx next
= DEP_CON (dep
);
31807 if (next
== next_insn
31808 && rs6000_is_costly_dependence (dep
, dep_cost (dep
), 0))
31816 /* Utility of the function redefine_groups.
31817 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
31818 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
31819 to keep it "far" (in a separate group) from GROUP_INSNS, following
31820 one of the following schemes, depending on the value of the flag
31821 -minsert_sched_nops = X:
31822 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
31823 in order to force NEXT_INSN into a separate group.
31824 (2) X < sched_finish_regroup_exact: insert exactly X nops.
31825 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
31826 insertion (has a group just ended, how many vacant issue slots remain in the
31827 last group, and how many dispatch groups were encountered so far). */
31830 force_new_group (int sched_verbose
, FILE *dump
, rtx
*group_insns
,
31831 rtx_insn
*next_insn
, bool *group_end
, int can_issue_more
,
31836 int issue_rate
= rs6000_issue_rate ();
31837 bool end
= *group_end
;
31840 if (next_insn
== NULL_RTX
|| DEBUG_INSN_P (next_insn
))
31841 return can_issue_more
;
31843 if (rs6000_sched_insert_nops
> sched_finish_regroup_exact
)
31844 return can_issue_more
;
31846 force
= is_costly_group (group_insns
, next_insn
);
31848 return can_issue_more
;
31850 if (sched_verbose
> 6)
31851 fprintf (dump
,"force: group count = %d, can_issue_more = %d\n",
31852 *group_count
,can_issue_more
);
31854 if (rs6000_sched_insert_nops
== sched_finish_regroup_exact
)
31857 can_issue_more
= 0;
31859 /* Since only a branch can be issued in the last issue_slot, it is
31860 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
31861 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
31862 in this case the last nop will start a new group and the branch
31863 will be forced to the new group. */
31864 if (can_issue_more
&& !is_branch_slot_insn (next_insn
))
31867 /* Do we have a special group ending nop? */
31868 if (rs6000_cpu_attr
== CPU_POWER6
|| rs6000_cpu_attr
== CPU_POWER7
31869 || rs6000_cpu_attr
== CPU_POWER8
)
31871 nop
= gen_group_ending_nop ();
31872 emit_insn_before (nop
, next_insn
);
31873 can_issue_more
= 0;
31876 while (can_issue_more
> 0)
31879 emit_insn_before (nop
, next_insn
);
31887 if (rs6000_sched_insert_nops
< sched_finish_regroup_exact
)
31889 int n_nops
= rs6000_sched_insert_nops
;
31891 /* Nops can't be issued from the branch slot, so the effective
31892 issue_rate for nops is 'issue_rate - 1'. */
31893 if (can_issue_more
== 0)
31894 can_issue_more
= issue_rate
;
31896 if (can_issue_more
== 0)
31898 can_issue_more
= issue_rate
- 1;
31901 for (i
= 0; i
< issue_rate
; i
++)
31903 group_insns
[i
] = 0;
31910 emit_insn_before (nop
, next_insn
);
31911 if (can_issue_more
== issue_rate
- 1) /* new group begins */
31914 if (can_issue_more
== 0)
31916 can_issue_more
= issue_rate
- 1;
31919 for (i
= 0; i
< issue_rate
; i
++)
31921 group_insns
[i
] = 0;
31927 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
31930 /* Is next_insn going to start a new group? */
31933 || (can_issue_more
== 1 && !is_branch_slot_insn (next_insn
))
31934 || (can_issue_more
<= 2 && is_cracked_insn (next_insn
))
31935 || (can_issue_more
< issue_rate
&&
31936 insn_terminates_group_p (next_insn
, previous_group
)));
31937 if (*group_end
&& end
)
31940 if (sched_verbose
> 6)
31941 fprintf (dump
, "done force: group count = %d, can_issue_more = %d\n",
31942 *group_count
, can_issue_more
);
31943 return can_issue_more
;
31946 return can_issue_more
;
31949 /* This function tries to synch the dispatch groups that the compiler "sees"
31950 with the dispatch groups that the processor dispatcher is expected to
31951 form in practice. It tries to achieve this synchronization by forcing the
31952 estimated processor grouping on the compiler (as opposed to the function
31953 'pad_goups' which tries to force the scheduler's grouping on the processor).
31955 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
31956 examines the (estimated) dispatch groups that will be formed by the processor
31957 dispatcher. It marks these group boundaries to reflect the estimated
31958 processor grouping, overriding the grouping that the scheduler had marked.
31959 Depending on the value of the flag '-minsert-sched-nops' this function can
31960 force certain insns into separate groups or force a certain distance between
31961 them by inserting nops, for example, if there exists a "costly dependence"
31964 The function estimates the group boundaries that the processor will form as
31965 follows: It keeps track of how many vacant issue slots are available after
31966 each insn. A subsequent insn will start a new group if one of the following
31968 - no more vacant issue slots remain in the current dispatch group.
31969 - only the last issue slot, which is the branch slot, is vacant, but the next
31970 insn is not a branch.
31971 - only the last 2 or less issue slots, including the branch slot, are vacant,
31972 which means that a cracked insn (which occupies two issue slots) can't be
31973 issued in this group.
31974 - less than 'issue_rate' slots are vacant, and the next insn always needs to
31975 start a new group. */
31978 redefine_groups (FILE *dump
, int sched_verbose
, rtx_insn
*prev_head_insn
,
31981 rtx_insn
*insn
, *next_insn
;
31983 int can_issue_more
;
31986 int group_count
= 0;
31990 issue_rate
= rs6000_issue_rate ();
31991 group_insns
= XALLOCAVEC (rtx
, issue_rate
);
31992 for (i
= 0; i
< issue_rate
; i
++)
31994 group_insns
[i
] = 0;
31996 can_issue_more
= issue_rate
;
31998 insn
= get_next_active_insn (prev_head_insn
, tail
);
32001 while (insn
!= NULL_RTX
)
32003 slot
= (issue_rate
- can_issue_more
);
32004 group_insns
[slot
] = insn
;
32006 rs6000_variable_issue (dump
, sched_verbose
, insn
, can_issue_more
);
32007 if (insn_terminates_group_p (insn
, current_group
))
32008 can_issue_more
= 0;
32010 next_insn
= get_next_active_insn (insn
, tail
);
32011 if (next_insn
== NULL_RTX
)
32012 return group_count
+ 1;
32014 /* Is next_insn going to start a new group? */
32016 = (can_issue_more
== 0
32017 || (can_issue_more
== 1 && !is_branch_slot_insn (next_insn
))
32018 || (can_issue_more
<= 2 && is_cracked_insn (next_insn
))
32019 || (can_issue_more
< issue_rate
&&
32020 insn_terminates_group_p (next_insn
, previous_group
)));
32022 can_issue_more
= force_new_group (sched_verbose
, dump
, group_insns
,
32023 next_insn
, &group_end
, can_issue_more
,
32029 can_issue_more
= 0;
32030 for (i
= 0; i
< issue_rate
; i
++)
32032 group_insns
[i
] = 0;
32036 if (GET_MODE (next_insn
) == TImode
&& can_issue_more
)
32037 PUT_MODE (next_insn
, VOIDmode
);
32038 else if (!can_issue_more
&& GET_MODE (next_insn
) != TImode
)
32039 PUT_MODE (next_insn
, TImode
);
32042 if (can_issue_more
== 0)
32043 can_issue_more
= issue_rate
;
32046 return group_count
;
32049 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32050 dispatch group boundaries that the scheduler had marked. Pad with nops
32051 any dispatch groups which have vacant issue slots, in order to force the
32052 scheduler's grouping on the processor dispatcher. The function
32053 returns the number of dispatch groups found. */
32056 pad_groups (FILE *dump
, int sched_verbose
, rtx_insn
*prev_head_insn
,
32059 rtx_insn
*insn
, *next_insn
;
32062 int can_issue_more
;
32064 int group_count
= 0;
32066 /* Initialize issue_rate. */
32067 issue_rate
= rs6000_issue_rate ();
32068 can_issue_more
= issue_rate
;
32070 insn
= get_next_active_insn (prev_head_insn
, tail
);
32071 next_insn
= get_next_active_insn (insn
, tail
);
32073 while (insn
!= NULL_RTX
)
32076 rs6000_variable_issue (dump
, sched_verbose
, insn
, can_issue_more
);
32078 group_end
= (next_insn
== NULL_RTX
|| GET_MODE (next_insn
) == TImode
);
32080 if (next_insn
== NULL_RTX
)
32085 /* If the scheduler had marked group termination at this location
32086 (between insn and next_insn), and neither insn nor next_insn will
32087 force group termination, pad the group with nops to force group
32090 && (rs6000_sched_insert_nops
== sched_finish_pad_groups
)
32091 && !insn_terminates_group_p (insn
, current_group
)
32092 && !insn_terminates_group_p (next_insn
, previous_group
))
32094 if (!is_branch_slot_insn (next_insn
))
32097 while (can_issue_more
)
32100 emit_insn_before (nop
, next_insn
);
32105 can_issue_more
= issue_rate
;
32110 next_insn
= get_next_active_insn (insn
, tail
);
32113 return group_count
;
32116 /* We're beginning a new block. Initialize data structures as necessary. */
32119 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED
,
32120 int sched_verbose ATTRIBUTE_UNUSED
,
32121 int max_ready ATTRIBUTE_UNUSED
)
32123 last_scheduled_insn
= NULL
;
32124 load_store_pendulum
= 0;
32129 /* The following function is called at the end of scheduling BB.
32130 After reload, it inserts nops at insn group bundling. */
32133 rs6000_sched_finish (FILE *dump
, int sched_verbose
)
32138 fprintf (dump
, "=== Finishing schedule.\n");
32140 if (reload_completed
&& rs6000_sched_groups
)
32142 /* Do not run sched_finish hook when selective scheduling enabled. */
32143 if (sel_sched_p ())
32146 if (rs6000_sched_insert_nops
== sched_finish_none
)
32149 if (rs6000_sched_insert_nops
== sched_finish_pad_groups
)
32150 n_groups
= pad_groups (dump
, sched_verbose
,
32151 current_sched_info
->prev_head
,
32152 current_sched_info
->next_tail
);
32154 n_groups
= redefine_groups (dump
, sched_verbose
,
32155 current_sched_info
->prev_head
,
32156 current_sched_info
->next_tail
);
32158 if (sched_verbose
>= 6)
32160 fprintf (dump
, "ngroups = %d\n", n_groups
);
32161 print_rtl (dump
, current_sched_info
->prev_head
);
32162 fprintf (dump
, "Done finish_sched\n");
32167 struct rs6000_sched_context
32169 short cached_can_issue_more
;
32170 rtx_insn
*last_scheduled_insn
;
32171 int load_store_pendulum
;
32176 typedef struct rs6000_sched_context rs6000_sched_context_def
;
32177 typedef rs6000_sched_context_def
*rs6000_sched_context_t
;
32179 /* Allocate store for new scheduling context. */
32181 rs6000_alloc_sched_context (void)
32183 return xmalloc (sizeof (rs6000_sched_context_def
));
32186 /* If CLEAN_P is true then initializes _SC with clean data,
32187 and from the global context otherwise. */
32189 rs6000_init_sched_context (void *_sc
, bool clean_p
)
32191 rs6000_sched_context_t sc
= (rs6000_sched_context_t
) _sc
;
32195 sc
->cached_can_issue_more
= 0;
32196 sc
->last_scheduled_insn
= NULL
;
32197 sc
->load_store_pendulum
= 0;
32198 sc
->divide_cnt
= 0;
32199 sc
->vec_pairing
= 0;
32203 sc
->cached_can_issue_more
= cached_can_issue_more
;
32204 sc
->last_scheduled_insn
= last_scheduled_insn
;
32205 sc
->load_store_pendulum
= load_store_pendulum
;
32206 sc
->divide_cnt
= divide_cnt
;
32207 sc
->vec_pairing
= vec_pairing
;
32211 /* Sets the global scheduling context to the one pointed to by _SC. */
32213 rs6000_set_sched_context (void *_sc
)
32215 rs6000_sched_context_t sc
= (rs6000_sched_context_t
) _sc
;
32217 gcc_assert (sc
!= NULL
);
32219 cached_can_issue_more
= sc
->cached_can_issue_more
;
32220 last_scheduled_insn
= sc
->last_scheduled_insn
;
32221 load_store_pendulum
= sc
->load_store_pendulum
;
32222 divide_cnt
= sc
->divide_cnt
;
32223 vec_pairing
= sc
->vec_pairing
;
32228 rs6000_free_sched_context (void *_sc
)
32230 gcc_assert (_sc
!= NULL
);
32236 rs6000_sched_can_speculate_insn (rtx_insn
*insn
)
32238 switch (get_attr_type (insn
))
32253 /* Length in units of the trampoline for entering a nested function. */
32256 rs6000_trampoline_size (void)
32260 switch (DEFAULT_ABI
)
32263 gcc_unreachable ();
32266 ret
= (TARGET_32BIT
) ? 12 : 24;
32270 gcc_assert (!TARGET_32BIT
);
32276 ret
= (TARGET_32BIT
) ? 40 : 48;
32283 /* Emit RTL insns to initialize the variable parts of a trampoline.
32284 FNADDR is an RTX for the address of the function's pure code.
32285 CXT is an RTX for the static chain value for the function. */
32288 rs6000_trampoline_init (rtx m_tramp
, tree fndecl
, rtx cxt
)
32290 int regsize
= (TARGET_32BIT
) ? 4 : 8;
32291 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
32292 rtx ctx_reg
= force_reg (Pmode
, cxt
);
32293 rtx addr
= force_reg (Pmode
, XEXP (m_tramp
, 0));
32295 switch (DEFAULT_ABI
)
32298 gcc_unreachable ();
32300 /* Under AIX, just build the 3 word function descriptor */
32303 rtx fnmem
, fn_reg
, toc_reg
;
32305 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS
)
32306 error ("you cannot take the address of a nested function if you use "
32307 "the %qs option", "-mno-pointers-to-nested-functions");
32309 fnmem
= gen_const_mem (Pmode
, force_reg (Pmode
, fnaddr
));
32310 fn_reg
= gen_reg_rtx (Pmode
);
32311 toc_reg
= gen_reg_rtx (Pmode
);
32313 /* Macro to shorten the code expansions below. */
32314 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32316 m_tramp
= replace_equiv_address (m_tramp
, addr
);
32318 emit_move_insn (fn_reg
, MEM_PLUS (fnmem
, 0));
32319 emit_move_insn (toc_reg
, MEM_PLUS (fnmem
, regsize
));
32320 emit_move_insn (MEM_PLUS (m_tramp
, 0), fn_reg
);
32321 emit_move_insn (MEM_PLUS (m_tramp
, regsize
), toc_reg
);
32322 emit_move_insn (MEM_PLUS (m_tramp
, 2*regsize
), ctx_reg
);
32328 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32332 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__trampoline_setup"),
32333 LCT_NORMAL
, VOIDmode
,
32335 GEN_INT (rs6000_trampoline_size ()), SImode
,
32343 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32344 identifier as an argument, so the front end shouldn't look it up. */
32347 rs6000_attribute_takes_identifier_p (const_tree attr_id
)
32349 return is_attribute_p ("altivec", attr_id
);
32352 /* Handle the "altivec" attribute. The attribute may have
32353 arguments as follows:
32355 __attribute__((altivec(vector__)))
32356 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32357 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32359 and may appear more than once (e.g., 'vector bool char') in a
32360 given declaration. */
32363 rs6000_handle_altivec_attribute (tree
*node
,
32364 tree name ATTRIBUTE_UNUSED
,
32366 int flags ATTRIBUTE_UNUSED
,
32367 bool *no_add_attrs
)
32369 tree type
= *node
, result
= NULL_TREE
;
32373 = ((args
&& TREE_CODE (args
) == TREE_LIST
&& TREE_VALUE (args
)
32374 && TREE_CODE (TREE_VALUE (args
)) == IDENTIFIER_NODE
)
32375 ? *IDENTIFIER_POINTER (TREE_VALUE (args
))
32378 while (POINTER_TYPE_P (type
)
32379 || TREE_CODE (type
) == FUNCTION_TYPE
32380 || TREE_CODE (type
) == METHOD_TYPE
32381 || TREE_CODE (type
) == ARRAY_TYPE
)
32382 type
= TREE_TYPE (type
);
32384 mode
= TYPE_MODE (type
);
32386 /* Check for invalid AltiVec type qualifiers. */
32387 if (type
== long_double_type_node
)
32388 error ("use of %<long double%> in AltiVec types is invalid");
32389 else if (type
== boolean_type_node
)
32390 error ("use of boolean types in AltiVec types is invalid");
32391 else if (TREE_CODE (type
) == COMPLEX_TYPE
)
32392 error ("use of %<complex%> in AltiVec types is invalid");
32393 else if (DECIMAL_FLOAT_MODE_P (mode
))
32394 error ("use of decimal floating point types in AltiVec types is invalid");
32395 else if (!TARGET_VSX
)
32397 if (type
== long_unsigned_type_node
|| type
== long_integer_type_node
)
32400 error ("use of %<long%> in AltiVec types is invalid for "
32401 "64-bit code without %qs", "-mvsx");
32402 else if (rs6000_warn_altivec_long
)
32403 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32406 else if (type
== long_long_unsigned_type_node
32407 || type
== long_long_integer_type_node
)
32408 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32410 else if (type
== double_type_node
)
32411 error ("use of %<double%> in AltiVec types is invalid without %qs",
32415 switch (altivec_type
)
32418 unsigned_p
= TYPE_UNSIGNED (type
);
32422 result
= (unsigned_p
? unsigned_V1TI_type_node
: V1TI_type_node
);
32425 result
= (unsigned_p
? unsigned_V2DI_type_node
: V2DI_type_node
);
32428 result
= (unsigned_p
? unsigned_V4SI_type_node
: V4SI_type_node
);
32431 result
= (unsigned_p
? unsigned_V8HI_type_node
: V8HI_type_node
);
32434 result
= (unsigned_p
? unsigned_V16QI_type_node
: V16QI_type_node
);
32436 case E_SFmode
: result
= V4SF_type_node
; break;
32437 case E_DFmode
: result
= V2DF_type_node
; break;
32438 /* If the user says 'vector int bool', we may be handed the 'bool'
32439 attribute _before_ the 'vector' attribute, and so select the
32440 proper type in the 'b' case below. */
32441 case E_V4SImode
: case E_V8HImode
: case E_V16QImode
: case E_V4SFmode
:
32442 case E_V2DImode
: case E_V2DFmode
:
32450 case E_DImode
: case E_V2DImode
: result
= bool_V2DI_type_node
; break;
32451 case E_SImode
: case E_V4SImode
: result
= bool_V4SI_type_node
; break;
32452 case E_HImode
: case E_V8HImode
: result
= bool_V8HI_type_node
; break;
32453 case E_QImode
: case E_V16QImode
: result
= bool_V16QI_type_node
;
32460 case E_V8HImode
: result
= pixel_V8HI_type_node
;
32466 /* Propagate qualifiers attached to the element type
32467 onto the vector type. */
32468 if (result
&& result
!= type
&& TYPE_QUALS (type
))
32469 result
= build_qualified_type (result
, TYPE_QUALS (type
));
32471 *no_add_attrs
= true; /* No need to hang on to the attribute. */
32474 *node
= lang_hooks
.types
.reconstruct_complex_type (*node
, result
);
32479 /* AltiVec defines four built-in scalar types that serve as vector
32480 elements; we must teach the compiler how to mangle them. */
32482 static const char *
32483 rs6000_mangle_type (const_tree type
)
32485 type
= TYPE_MAIN_VARIANT (type
);
32487 if (TREE_CODE (type
) != VOID_TYPE
&& TREE_CODE (type
) != BOOLEAN_TYPE
32488 && TREE_CODE (type
) != INTEGER_TYPE
&& TREE_CODE (type
) != REAL_TYPE
)
32491 if (type
== bool_char_type_node
) return "U6__boolc";
32492 if (type
== bool_short_type_node
) return "U6__bools";
32493 if (type
== pixel_type_node
) return "u7__pixel";
32494 if (type
== bool_int_type_node
) return "U6__booli";
32495 if (type
== bool_long_type_node
) return "U6__booll";
32497 /* Use a unique name for __float128 rather than trying to use "e" or "g". Use
32498 "g" for IBM extended double, no matter whether it is long double (using
32499 -mabi=ibmlongdouble) or the distinct __ibm128 type. */
32500 if (TARGET_FLOAT128_TYPE
)
32502 if (type
== ieee128_float_type_node
)
32503 return "U10__float128";
32505 if (type
== ibm128_float_type_node
)
32508 if (type
== long_double_type_node
&& TARGET_LONG_DOUBLE_128
)
32509 return (TARGET_IEEEQUAD
) ? "U10__float128" : "g";
32512 /* Mangle IBM extended float long double as `g' (__float128) on
32513 powerpc*-linux where long-double-64 previously was the default. */
32514 if (TYPE_MAIN_VARIANT (type
) == long_double_type_node
32516 && TARGET_LONG_DOUBLE_128
32517 && !TARGET_IEEEQUAD
)
32520 /* For all other types, use normal C++ mangling. */
32524 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32525 struct attribute_spec.handler. */
32528 rs6000_handle_longcall_attribute (tree
*node
, tree name
,
32529 tree args ATTRIBUTE_UNUSED
,
32530 int flags ATTRIBUTE_UNUSED
,
32531 bool *no_add_attrs
)
32533 if (TREE_CODE (*node
) != FUNCTION_TYPE
32534 && TREE_CODE (*node
) != FIELD_DECL
32535 && TREE_CODE (*node
) != TYPE_DECL
)
32537 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
32539 *no_add_attrs
= true;
32545 /* Set longcall attributes on all functions declared when
32546 rs6000_default_long_calls is true. */
32548 rs6000_set_default_type_attributes (tree type
)
32550 if (rs6000_default_long_calls
32551 && (TREE_CODE (type
) == FUNCTION_TYPE
32552 || TREE_CODE (type
) == METHOD_TYPE
))
32553 TYPE_ATTRIBUTES (type
) = tree_cons (get_identifier ("longcall"),
32555 TYPE_ATTRIBUTES (type
));
32558 darwin_set_default_type_attributes (type
);
32562 /* Return a reference suitable for calling a function with the
32563 longcall attribute. */
32566 rs6000_longcall_ref (rtx call_ref
)
32568 const char *call_name
;
32571 if (GET_CODE (call_ref
) != SYMBOL_REF
)
32574 /* System V adds '.' to the internal name, so skip them. */
32575 call_name
= XSTR (call_ref
, 0);
32576 if (*call_name
== '.')
32578 while (*call_name
== '.')
32581 node
= get_identifier (call_name
);
32582 call_ref
= gen_rtx_SYMBOL_REF (VOIDmode
, IDENTIFIER_POINTER (node
));
32585 return force_reg (Pmode
, call_ref
);
32588 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32589 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32592 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32593 struct attribute_spec.handler. */
32595 rs6000_handle_struct_attribute (tree
*node
, tree name
,
32596 tree args ATTRIBUTE_UNUSED
,
32597 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
32600 if (DECL_P (*node
))
32602 if (TREE_CODE (*node
) == TYPE_DECL
)
32603 type
= &TREE_TYPE (*node
);
32608 if (!(type
&& (TREE_CODE (*type
) == RECORD_TYPE
32609 || TREE_CODE (*type
) == UNION_TYPE
)))
32611 warning (OPT_Wattributes
, "%qE attribute ignored", name
);
32612 *no_add_attrs
= true;
32615 else if ((is_attribute_p ("ms_struct", name
)
32616 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type
)))
32617 || ((is_attribute_p ("gcc_struct", name
)
32618 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type
)))))
32620 warning (OPT_Wattributes
, "%qE incompatible attribute ignored",
32622 *no_add_attrs
= true;
32629 rs6000_ms_bitfield_layout_p (const_tree record_type
)
32631 return (TARGET_USE_MS_BITFIELD_LAYOUT
&&
32632 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type
)))
32633 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type
));
32636 #ifdef USING_ELFOS_H
32638 /* A get_unnamed_section callback, used for switching to toc_section. */
32641 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
32643 if ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
32644 && TARGET_MINIMAL_TOC
)
32646 if (!toc_initialized
)
32648 fprintf (asm_out_file
, "%s\n", TOC_SECTION_ASM_OP
);
32649 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
32650 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "LCTOC", 0);
32651 fprintf (asm_out_file
, "\t.tc ");
32652 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1[TC],");
32653 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
32654 fprintf (asm_out_file
, "\n");
32656 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
32657 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
32658 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
32659 fprintf (asm_out_file
, " = .+32768\n");
32660 toc_initialized
= 1;
32663 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
32665 else if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
32667 fprintf (asm_out_file
, "%s\n", TOC_SECTION_ASM_OP
);
32668 if (!toc_initialized
)
32670 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
32671 toc_initialized
= 1;
32676 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
32677 if (!toc_initialized
)
32679 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
32680 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
32681 fprintf (asm_out_file
, " = .+32768\n");
32682 toc_initialized
= 1;
32687 /* Implement TARGET_ASM_INIT_SECTIONS. */
32690 rs6000_elf_asm_init_sections (void)
32693 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op
, NULL
);
32696 = get_unnamed_section (SECTION_WRITE
, output_section_asm_op
,
32697 SDATA2_SECTION_ASM_OP
);
32700 /* Implement TARGET_SELECT_RTX_SECTION. */
32703 rs6000_elf_select_rtx_section (machine_mode mode
, rtx x
,
32704 unsigned HOST_WIDE_INT align
)
32706 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
))
32707 return toc_section
;
32709 return default_elf_select_rtx_section (mode
, x
, align
);
32712 /* For a SYMBOL_REF, set generic flags and then perform some
32713 target-specific processing.
32715 When the AIX ABI is requested on a non-AIX system, replace the
32716 function name with the real name (with a leading .) rather than the
32717 function descriptor name. This saves a lot of overriding code to
32718 read the prefixes. */
32720 static void rs6000_elf_encode_section_info (tree
, rtx
, int) ATTRIBUTE_UNUSED
;
32722 rs6000_elf_encode_section_info (tree decl
, rtx rtl
, int first
)
32724 default_encode_section_info (decl
, rtl
, first
);
32727 && TREE_CODE (decl
) == FUNCTION_DECL
32729 && DEFAULT_ABI
== ABI_AIX
)
32731 rtx sym_ref
= XEXP (rtl
, 0);
32732 size_t len
= strlen (XSTR (sym_ref
, 0));
32733 char *str
= XALLOCAVEC (char, len
+ 2);
32735 memcpy (str
+ 1, XSTR (sym_ref
, 0), len
+ 1);
32736 XSTR (sym_ref
, 0) = ggc_alloc_string (str
, len
+ 1);
32741 compare_section_name (const char *section
, const char *templ
)
32745 len
= strlen (templ
);
32746 return (strncmp (section
, templ
, len
) == 0
32747 && (section
[len
] == 0 || section
[len
] == '.'));
32751 rs6000_elf_in_small_data_p (const_tree decl
)
32753 if (rs6000_sdata
== SDATA_NONE
)
32756 /* We want to merge strings, so we never consider them small data. */
32757 if (TREE_CODE (decl
) == STRING_CST
)
32760 /* Functions are never in the small data area. */
32761 if (TREE_CODE (decl
) == FUNCTION_DECL
)
32764 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_SECTION_NAME (decl
))
32766 const char *section
= DECL_SECTION_NAME (decl
);
32767 if (compare_section_name (section
, ".sdata")
32768 || compare_section_name (section
, ".sdata2")
32769 || compare_section_name (section
, ".gnu.linkonce.s")
32770 || compare_section_name (section
, ".sbss")
32771 || compare_section_name (section
, ".sbss2")
32772 || compare_section_name (section
, ".gnu.linkonce.sb")
32773 || strcmp (section
, ".PPC.EMB.sdata0") == 0
32774 || strcmp (section
, ".PPC.EMB.sbss0") == 0)
32779 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (decl
));
32782 && size
<= g_switch_value
32783 /* If it's not public, and we're not going to reference it there,
32784 there's no need to put it in the small data section. */
32785 && (rs6000_sdata
!= SDATA_DATA
|| TREE_PUBLIC (decl
)))
32792 #endif /* USING_ELFOS_H */
32794 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
32797 rs6000_use_blocks_for_constant_p (machine_mode mode
, const_rtx x
)
32799 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
);
32802 /* Do not place thread-local symbols refs in the object blocks. */
32805 rs6000_use_blocks_for_decl_p (const_tree decl
)
32807 return !DECL_THREAD_LOCAL_P (decl
);
32810 /* Return a REG that occurs in ADDR with coefficient 1.
32811 ADDR can be effectively incremented by incrementing REG.
32813 r0 is special and we must not select it as an address
32814 register by this routine since our caller will try to
32815 increment the returned register via an "la" instruction. */
32818 find_addr_reg (rtx addr
)
32820 while (GET_CODE (addr
) == PLUS
)
32822 if (GET_CODE (XEXP (addr
, 0)) == REG
32823 && REGNO (XEXP (addr
, 0)) != 0)
32824 addr
= XEXP (addr
, 0);
32825 else if (GET_CODE (XEXP (addr
, 1)) == REG
32826 && REGNO (XEXP (addr
, 1)) != 0)
32827 addr
= XEXP (addr
, 1);
32828 else if (CONSTANT_P (XEXP (addr
, 0)))
32829 addr
= XEXP (addr
, 1);
32830 else if (CONSTANT_P (XEXP (addr
, 1)))
32831 addr
= XEXP (addr
, 0);
32833 gcc_unreachable ();
32835 gcc_assert (GET_CODE (addr
) == REG
&& REGNO (addr
) != 0);
32840 rs6000_fatal_bad_address (rtx op
)
32842 fatal_insn ("bad address", op
);
32847 typedef struct branch_island_d
{
32848 tree function_name
;
32854 static vec
<branch_island
, va_gc
> *branch_islands
;
32856 /* Remember to generate a branch island for far calls to the given
32860 add_compiler_branch_island (tree label_name
, tree function_name
,
32863 branch_island bi
= {function_name
, label_name
, line_number
};
32864 vec_safe_push (branch_islands
, bi
);
32867 /* Generate far-jump branch islands for everything recorded in
32868 branch_islands. Invoked immediately after the last instruction of
32869 the epilogue has been emitted; the branch islands must be appended
32870 to, and contiguous with, the function body. Mach-O stubs are
32871 generated in machopic_output_stub(). */
32874 macho_branch_islands (void)
32878 while (!vec_safe_is_empty (branch_islands
))
32880 branch_island
*bi
= &branch_islands
->last ();
32881 const char *label
= IDENTIFIER_POINTER (bi
->label_name
);
32882 const char *name
= IDENTIFIER_POINTER (bi
->function_name
);
32883 char name_buf
[512];
32884 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
32885 if (name
[0] == '*' || name
[0] == '&')
32886 strcpy (name_buf
, name
+1);
32890 strcpy (name_buf
+1, name
);
32892 strcpy (tmp_buf
, "\n");
32893 strcat (tmp_buf
, label
);
32894 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32895 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
32896 dbxout_stabd (N_SLINE
, bi
->line_number
);
32897 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32900 if (TARGET_LINK_STACK
)
32903 get_ppc476_thunk_name (name
);
32904 strcat (tmp_buf
, ":\n\tmflr r0\n\tbl ");
32905 strcat (tmp_buf
, name
);
32906 strcat (tmp_buf
, "\n");
32907 strcat (tmp_buf
, label
);
32908 strcat (tmp_buf
, "_pic:\n\tmflr r11\n");
32912 strcat (tmp_buf
, ":\n\tmflr r0\n\tbcl 20,31,");
32913 strcat (tmp_buf
, label
);
32914 strcat (tmp_buf
, "_pic\n");
32915 strcat (tmp_buf
, label
);
32916 strcat (tmp_buf
, "_pic:\n\tmflr r11\n");
32919 strcat (tmp_buf
, "\taddis r11,r11,ha16(");
32920 strcat (tmp_buf
, name_buf
);
32921 strcat (tmp_buf
, " - ");
32922 strcat (tmp_buf
, label
);
32923 strcat (tmp_buf
, "_pic)\n");
32925 strcat (tmp_buf
, "\tmtlr r0\n");
32927 strcat (tmp_buf
, "\taddi r12,r11,lo16(");
32928 strcat (tmp_buf
, name_buf
);
32929 strcat (tmp_buf
, " - ");
32930 strcat (tmp_buf
, label
);
32931 strcat (tmp_buf
, "_pic)\n");
32933 strcat (tmp_buf
, "\tmtctr r12\n\tbctr\n");
32937 strcat (tmp_buf
, ":\nlis r12,hi16(");
32938 strcat (tmp_buf
, name_buf
);
32939 strcat (tmp_buf
, ")\n\tori r12,r12,lo16(");
32940 strcat (tmp_buf
, name_buf
);
32941 strcat (tmp_buf
, ")\n\tmtctr r12\n\tbctr");
32943 output_asm_insn (tmp_buf
, 0);
32944 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32945 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
32946 dbxout_stabd (N_SLINE
, bi
->line_number
);
32947 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32948 branch_islands
->pop ();
32952 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
32953 already there or not. */
32956 no_previous_def (tree function_name
)
32961 FOR_EACH_VEC_SAFE_ELT (branch_islands
, ix
, bi
)
32962 if (function_name
== bi
->function_name
)
32967 /* GET_PREV_LABEL gets the label name from the previous definition of
32971 get_prev_label (tree function_name
)
32976 FOR_EACH_VEC_SAFE_ELT (branch_islands
, ix
, bi
)
32977 if (function_name
== bi
->function_name
)
32978 return bi
->label_name
;
32982 /* INSN is either a function call or a millicode call. It may have an
32983 unconditional jump in its delay slot.
32985 CALL_DEST is the routine we are calling. */
32988 output_call (rtx_insn
*insn
, rtx
*operands
, int dest_operand_number
,
32989 int cookie_operand_number
)
32991 static char buf
[256];
32992 if (darwin_emit_branch_islands
32993 && GET_CODE (operands
[dest_operand_number
]) == SYMBOL_REF
32994 && (INTVAL (operands
[cookie_operand_number
]) & CALL_LONG
))
32997 tree funname
= get_identifier (XSTR (operands
[dest_operand_number
], 0));
32999 if (no_previous_def (funname
))
33001 rtx label_rtx
= gen_label_rtx ();
33002 char *label_buf
, temp_buf
[256];
33003 ASM_GENERATE_INTERNAL_LABEL (temp_buf
, "L",
33004 CODE_LABEL_NUMBER (label_rtx
));
33005 label_buf
= temp_buf
[0] == '*' ? temp_buf
+ 1 : temp_buf
;
33006 labelname
= get_identifier (label_buf
);
33007 add_compiler_branch_island (labelname
, funname
, insn_line (insn
));
33010 labelname
= get_prev_label (funname
);
33012 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
33013 instruction will reach 'foo', otherwise link as 'bl L42'".
33014 "L42" should be a 'branch island', that will do a far jump to
33015 'foo'. Branch islands are generated in
33016 macho_branch_islands(). */
33017 sprintf (buf
, "jbsr %%z%d,%.246s",
33018 dest_operand_number
, IDENTIFIER_POINTER (labelname
));
33021 sprintf (buf
, "bl %%z%d", dest_operand_number
);
33025 /* Generate PIC and indirect symbol stubs. */
33028 machopic_output_stub (FILE *file
, const char *symb
, const char *stub
)
33030 unsigned int length
;
33031 char *symbol_name
, *lazy_ptr_name
;
33032 char *local_label_0
;
33033 static int label
= 0;
33035 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
33036 symb
= (*targetm
.strip_name_encoding
) (symb
);
33039 length
= strlen (symb
);
33040 symbol_name
= XALLOCAVEC (char, length
+ 32);
33041 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name
, symb
, length
);
33043 lazy_ptr_name
= XALLOCAVEC (char, length
+ 32);
33044 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name
, symb
, length
);
33047 switch_to_section (darwin_sections
[machopic_picsymbol_stub1_section
]);
33049 switch_to_section (darwin_sections
[machopic_symbol_stub1_section
]);
33053 fprintf (file
, "\t.align 5\n");
33055 fprintf (file
, "%s:\n", stub
);
33056 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
33059 local_label_0
= XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
33060 sprintf (local_label_0
, "\"L%011d$spb\"", label
);
33062 fprintf (file
, "\tmflr r0\n");
33063 if (TARGET_LINK_STACK
)
33066 get_ppc476_thunk_name (name
);
33067 fprintf (file
, "\tbl %s\n", name
);
33068 fprintf (file
, "%s:\n\tmflr r11\n", local_label_0
);
33072 fprintf (file
, "\tbcl 20,31,%s\n", local_label_0
);
33073 fprintf (file
, "%s:\n\tmflr r11\n", local_label_0
);
33075 fprintf (file
, "\taddis r11,r11,ha16(%s-%s)\n",
33076 lazy_ptr_name
, local_label_0
);
33077 fprintf (file
, "\tmtlr r0\n");
33078 fprintf (file
, "\t%s r12,lo16(%s-%s)(r11)\n",
33079 (TARGET_64BIT
? "ldu" : "lwzu"),
33080 lazy_ptr_name
, local_label_0
);
33081 fprintf (file
, "\tmtctr r12\n");
33082 fprintf (file
, "\tbctr\n");
33086 fprintf (file
, "\t.align 4\n");
33088 fprintf (file
, "%s:\n", stub
);
33089 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
33091 fprintf (file
, "\tlis r11,ha16(%s)\n", lazy_ptr_name
);
33092 fprintf (file
, "\t%s r12,lo16(%s)(r11)\n",
33093 (TARGET_64BIT
? "ldu" : "lwzu"),
33095 fprintf (file
, "\tmtctr r12\n");
33096 fprintf (file
, "\tbctr\n");
33099 switch_to_section (darwin_sections
[machopic_lazy_symbol_ptr_section
]);
33100 fprintf (file
, "%s:\n", lazy_ptr_name
);
33101 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
33102 fprintf (file
, "%sdyld_stub_binding_helper\n",
33103 (TARGET_64BIT
? DOUBLE_INT_ASM_OP
: "\t.long\t"));
33106 /* Legitimize PIC addresses. If the address is already
33107 position-independent, we return ORIG. Newly generated
33108 position-independent addresses go into a reg. This is REG if non
33109 zero, otherwise we allocate register(s) as necessary. */
33111 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33114 rs6000_machopic_legitimize_pic_address (rtx orig
, machine_mode mode
,
33119 if (reg
== NULL
&& !reload_completed
)
33120 reg
= gen_reg_rtx (Pmode
);
33122 if (GET_CODE (orig
) == CONST
)
33126 if (GET_CODE (XEXP (orig
, 0)) == PLUS
33127 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
33130 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
33132 /* Use a different reg for the intermediate value, as
33133 it will be marked UNCHANGING. */
33134 reg_temp
= !can_create_pseudo_p () ? reg
: gen_reg_rtx (Pmode
);
33135 base
= rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig
, 0), 0),
33138 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig
, 0), 1),
33141 if (GET_CODE (offset
) == CONST_INT
)
33143 if (SMALL_INT (offset
))
33144 return plus_constant (Pmode
, base
, INTVAL (offset
));
33145 else if (!reload_completed
)
33146 offset
= force_reg (Pmode
, offset
);
33149 rtx mem
= force_const_mem (Pmode
, orig
);
33150 return machopic_legitimize_pic_address (mem
, Pmode
, reg
);
33153 return gen_rtx_PLUS (Pmode
, base
, offset
);
33156 /* Fall back on generic machopic code. */
33157 return machopic_legitimize_pic_address (orig
, mode
, reg
);
33160 /* Output a .machine directive for the Darwin assembler, and call
33161 the generic start_file routine. */
33164 rs6000_darwin_file_start (void)
33166 static const struct
33170 HOST_WIDE_INT if_set
;
33172 { "ppc64", "ppc64", MASK_64BIT
},
33173 { "970", "ppc970", MASK_PPC_GPOPT
| MASK_MFCRF
| MASK_POWERPC64
},
33174 { "power4", "ppc970", 0 },
33175 { "G5", "ppc970", 0 },
33176 { "7450", "ppc7450", 0 },
33177 { "7400", "ppc7400", MASK_ALTIVEC
},
33178 { "G4", "ppc7400", 0 },
33179 { "750", "ppc750", 0 },
33180 { "740", "ppc750", 0 },
33181 { "G3", "ppc750", 0 },
33182 { "604e", "ppc604e", 0 },
33183 { "604", "ppc604", 0 },
33184 { "603e", "ppc603", 0 },
33185 { "603", "ppc603", 0 },
33186 { "601", "ppc601", 0 },
33187 { NULL
, "ppc", 0 } };
33188 const char *cpu_id
= "";
33191 rs6000_file_start ();
33192 darwin_file_start ();
33194 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33196 if (rs6000_default_cpu
!= 0 && rs6000_default_cpu
[0] != '\0')
33197 cpu_id
= rs6000_default_cpu
;
33199 if (global_options_set
.x_rs6000_cpu_index
)
33200 cpu_id
= processor_target_table
[rs6000_cpu_index
].name
;
33202 /* Look through the mapping array. Pick the first name that either
33203 matches the argument, has a bit set in IF_SET that is also set
33204 in the target flags, or has a NULL name. */
33207 while (mapping
[i
].arg
!= NULL
33208 && strcmp (mapping
[i
].arg
, cpu_id
) != 0
33209 && (mapping
[i
].if_set
& rs6000_isa_flags
) == 0)
33212 fprintf (asm_out_file
, "\t.machine %s\n", mapping
[i
].name
);
33215 #endif /* TARGET_MACHO */
33219 rs6000_elf_reloc_rw_mask (void)
33223 else if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
33229 /* Record an element in the table of global constructors. SYMBOL is
33230 a SYMBOL_REF of the function to be called; PRIORITY is a number
33231 between 0 and MAX_INIT_PRIORITY.
33233 This differs from default_named_section_asm_out_constructor in
33234 that we have special handling for -mrelocatable. */
33236 static void rs6000_elf_asm_out_constructor (rtx
, int) ATTRIBUTE_UNUSED
;
33238 rs6000_elf_asm_out_constructor (rtx symbol
, int priority
)
33240 const char *section
= ".ctors";
33243 if (priority
!= DEFAULT_INIT_PRIORITY
)
33245 sprintf (buf
, ".ctors.%.5u",
33246 /* Invert the numbering so the linker puts us in the proper
33247 order; constructors are run from right to left, and the
33248 linker sorts in increasing order. */
33249 MAX_INIT_PRIORITY
- priority
);
33253 switch_to_section (get_section (section
, SECTION_WRITE
, NULL
));
33254 assemble_align (POINTER_SIZE
);
33256 if (DEFAULT_ABI
== ABI_V4
33257 && (TARGET_RELOCATABLE
|| flag_pic
> 1))
33259 fputs ("\t.long (", asm_out_file
);
33260 output_addr_const (asm_out_file
, symbol
);
33261 fputs (")@fixup\n", asm_out_file
);
33264 assemble_integer (symbol
, POINTER_SIZE
/ BITS_PER_UNIT
, POINTER_SIZE
, 1);
33267 static void rs6000_elf_asm_out_destructor (rtx
, int) ATTRIBUTE_UNUSED
;
33269 rs6000_elf_asm_out_destructor (rtx symbol
, int priority
)
33271 const char *section
= ".dtors";
33274 if (priority
!= DEFAULT_INIT_PRIORITY
)
33276 sprintf (buf
, ".dtors.%.5u",
33277 /* Invert the numbering so the linker puts us in the proper
33278 order; constructors are run from right to left, and the
33279 linker sorts in increasing order. */
33280 MAX_INIT_PRIORITY
- priority
);
33284 switch_to_section (get_section (section
, SECTION_WRITE
, NULL
));
33285 assemble_align (POINTER_SIZE
);
33287 if (DEFAULT_ABI
== ABI_V4
33288 && (TARGET_RELOCATABLE
|| flag_pic
> 1))
33290 fputs ("\t.long (", asm_out_file
);
33291 output_addr_const (asm_out_file
, symbol
);
33292 fputs (")@fixup\n", asm_out_file
);
33295 assemble_integer (symbol
, POINTER_SIZE
/ BITS_PER_UNIT
, POINTER_SIZE
, 1);
33299 rs6000_elf_declare_function_name (FILE *file
, const char *name
, tree decl
)
33301 if (TARGET_64BIT
&& DEFAULT_ABI
!= ABI_ELFv2
)
33303 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file
);
33304 ASM_OUTPUT_LABEL (file
, name
);
33305 fputs (DOUBLE_INT_ASM_OP
, file
);
33306 rs6000_output_function_entry (file
, name
);
33307 fputs (",.TOC.@tocbase,0\n\t.previous\n", file
);
33310 fputs ("\t.size\t", file
);
33311 assemble_name (file
, name
);
33312 fputs (",24\n\t.type\t.", file
);
33313 assemble_name (file
, name
);
33314 fputs (",@function\n", file
);
33315 if (TREE_PUBLIC (decl
) && ! DECL_WEAK (decl
))
33317 fputs ("\t.globl\t.", file
);
33318 assemble_name (file
, name
);
33323 ASM_OUTPUT_TYPE_DIRECTIVE (file
, name
, "function");
33324 ASM_DECLARE_RESULT (file
, DECL_RESULT (decl
));
33325 rs6000_output_function_entry (file
, name
);
33326 fputs (":\n", file
);
33330 if (DEFAULT_ABI
== ABI_V4
33331 && (TARGET_RELOCATABLE
|| flag_pic
> 1)
33332 && !TARGET_SECURE_PLT
33333 && (!constant_pool_empty_p () || crtl
->profile
)
33338 (*targetm
.asm_out
.internal_label
) (file
, "LCL", rs6000_pic_labelno
);
33340 fprintf (file
, "\t.long ");
33341 assemble_name (file
, toc_label_name
);
33344 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
33345 assemble_name (file
, buf
);
33349 ASM_OUTPUT_TYPE_DIRECTIVE (file
, name
, "function");
33350 ASM_DECLARE_RESULT (file
, DECL_RESULT (decl
));
33352 if (TARGET_CMODEL
== CMODEL_LARGE
&& rs6000_global_entry_point_needed_p ())
33356 (*targetm
.asm_out
.internal_label
) (file
, "LCL", rs6000_pic_labelno
);
33358 fprintf (file
, "\t.quad .TOC.-");
33359 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
33360 assemble_name (file
, buf
);
33364 if (DEFAULT_ABI
== ABI_AIX
)
33366 const char *desc_name
, *orig_name
;
33368 orig_name
= (*targetm
.strip_name_encoding
) (name
);
33369 desc_name
= orig_name
;
33370 while (*desc_name
== '.')
33373 if (TREE_PUBLIC (decl
))
33374 fprintf (file
, "\t.globl %s\n", desc_name
);
33376 fprintf (file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
33377 fprintf (file
, "%s:\n", desc_name
);
33378 fprintf (file
, "\t.long %s\n", orig_name
);
33379 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file
);
33380 fputs ("\t.long 0\n", file
);
33381 fprintf (file
, "\t.previous\n");
33383 ASM_OUTPUT_LABEL (file
, name
);
33386 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED
;
33388 rs6000_elf_file_end (void)
33390 #ifdef HAVE_AS_GNU_ATTRIBUTE
33391 /* ??? The value emitted depends on options active at file end.
33392 Assume anyone using #pragma or attributes that might change
33393 options knows what they are doing. */
33394 if ((TARGET_64BIT
|| DEFAULT_ABI
== ABI_V4
)
33395 && rs6000_passes_float
)
33401 else if (TARGET_SF_FPR
)
33405 if (rs6000_passes_long_double
)
33407 if (!TARGET_LONG_DOUBLE_128
)
33409 else if (TARGET_IEEEQUAD
)
33414 fprintf (asm_out_file
, "\t.gnu_attribute 4, %d\n", fp
);
33416 if (TARGET_32BIT
&& DEFAULT_ABI
== ABI_V4
)
33418 if (rs6000_passes_vector
)
33419 fprintf (asm_out_file
, "\t.gnu_attribute 8, %d\n",
33420 (TARGET_ALTIVEC_ABI
? 2 : 1));
33421 if (rs6000_returns_struct
)
33422 fprintf (asm_out_file
, "\t.gnu_attribute 12, %d\n",
33423 aix_struct_return
? 2 : 1);
33426 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33427 if (TARGET_32BIT
|| DEFAULT_ABI
== ABI_ELFv2
)
33428 file_end_indicate_exec_stack ();
33431 if (flag_split_stack
)
33432 file_end_indicate_split_stack ();
33436 /* We have expanded a CPU builtin, so we need to emit a reference to
33437 the special symbol that LIBC uses to declare it supports the
33438 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33439 switch_to_section (data_section
);
33440 fprintf (asm_out_file
, "\t.align %u\n", TARGET_32BIT
? 2 : 3);
33441 fprintf (asm_out_file
, "\t%s %s\n",
33442 TARGET_32BIT
? ".long" : ".quad", tcb_verification_symbol
);
33449 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33450 #define HAVE_XCOFF_DWARF_EXTRAS 0
33453 static enum unwind_info_type
33454 rs6000_xcoff_debug_unwind_info (void)
33460 rs6000_xcoff_asm_output_anchor (rtx symbol
)
33464 sprintf (buffer
, "$ + " HOST_WIDE_INT_PRINT_DEC
,
33465 SYMBOL_REF_BLOCK_OFFSET (symbol
));
33466 fprintf (asm_out_file
, "%s", SET_ASM_OP
);
33467 RS6000_OUTPUT_BASENAME (asm_out_file
, XSTR (symbol
, 0));
33468 fprintf (asm_out_file
, ",");
33469 RS6000_OUTPUT_BASENAME (asm_out_file
, buffer
);
33470 fprintf (asm_out_file
, "\n");
33474 rs6000_xcoff_asm_globalize_label (FILE *stream
, const char *name
)
33476 fputs (GLOBAL_ASM_OP
, stream
);
33477 RS6000_OUTPUT_BASENAME (stream
, name
);
33478 putc ('\n', stream
);
33481 /* A get_unnamed_decl callback, used for read-only sections. PTR
33482 points to the section string variable. */
33485 rs6000_xcoff_output_readonly_section_asm_op (const void *directive
)
33487 fprintf (asm_out_file
, "\t.csect %s[RO],%s\n",
33488 *(const char *const *) directive
,
33489 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
33492 /* Likewise for read-write sections. */
33495 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive
)
33497 fprintf (asm_out_file
, "\t.csect %s[RW],%s\n",
33498 *(const char *const *) directive
,
33499 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
33503 rs6000_xcoff_output_tls_section_asm_op (const void *directive
)
33505 fprintf (asm_out_file
, "\t.csect %s[TL],%s\n",
33506 *(const char *const *) directive
,
33507 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
33510 /* A get_unnamed_section callback, used for switching to toc_section. */
33513 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
33515 if (TARGET_MINIMAL_TOC
)
33517 /* toc_section is always selected at least once from
33518 rs6000_xcoff_file_start, so this is guaranteed to
33519 always be defined once and only once in each file. */
33520 if (!toc_initialized
)
33522 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file
);
33523 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file
);
33524 toc_initialized
= 1;
33526 fprintf (asm_out_file
, "\t.csect toc_table[RW]%s\n",
33527 (TARGET_32BIT
? "" : ",3"));
33530 fputs ("\t.toc\n", asm_out_file
);
33533 /* Implement TARGET_ASM_INIT_SECTIONS. */
33536 rs6000_xcoff_asm_init_sections (void)
33538 read_only_data_section
33539 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op
,
33540 &xcoff_read_only_section_name
);
33542 private_data_section
33543 = get_unnamed_section (SECTION_WRITE
,
33544 rs6000_xcoff_output_readwrite_section_asm_op
,
33545 &xcoff_private_data_section_name
);
33548 = get_unnamed_section (SECTION_TLS
,
33549 rs6000_xcoff_output_tls_section_asm_op
,
33550 &xcoff_tls_data_section_name
);
33552 tls_private_data_section
33553 = get_unnamed_section (SECTION_TLS
,
33554 rs6000_xcoff_output_tls_section_asm_op
,
33555 &xcoff_private_data_section_name
);
33557 read_only_private_data_section
33558 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op
,
33559 &xcoff_private_data_section_name
);
33562 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op
, NULL
);
33564 readonly_data_section
= read_only_data_section
;
33568 rs6000_xcoff_reloc_rw_mask (void)
33574 rs6000_xcoff_asm_named_section (const char *name
, unsigned int flags
,
33575 tree decl ATTRIBUTE_UNUSED
)
33578 static const char * const suffix
[5] = { "PR", "RO", "RW", "TL", "XO" };
33580 if (flags
& SECTION_EXCLUDE
)
33582 else if (flags
& SECTION_DEBUG
)
33584 fprintf (asm_out_file
, "\t.dwsect %s\n", name
);
33587 else if (flags
& SECTION_CODE
)
33589 else if (flags
& SECTION_TLS
)
33591 else if (flags
& SECTION_WRITE
)
33596 fprintf (asm_out_file
, "\t.csect %s%s[%s],%u\n",
33597 (flags
& SECTION_CODE
) ? "." : "",
33598 name
, suffix
[smclass
], flags
& SECTION_ENTSIZE
);
33601 #define IN_NAMED_SECTION(DECL) \
33602 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33603 && DECL_SECTION_NAME (DECL) != NULL)
33606 rs6000_xcoff_select_section (tree decl
, int reloc
,
33607 unsigned HOST_WIDE_INT align
)
33609 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33611 if (align
> BIGGEST_ALIGNMENT
)
33613 resolve_unique_section (decl
, reloc
, true);
33614 if (IN_NAMED_SECTION (decl
))
33615 return get_named_section (decl
, NULL
, reloc
);
33618 if (decl_readonly_section (decl
, reloc
))
33620 if (TREE_PUBLIC (decl
))
33621 return read_only_data_section
;
33623 return read_only_private_data_section
;
33628 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_THREAD_LOCAL_P (decl
))
33630 if (TREE_PUBLIC (decl
))
33631 return tls_data_section
;
33632 else if (bss_initializer_p (decl
))
33634 /* Convert to COMMON to emit in BSS. */
33635 DECL_COMMON (decl
) = 1;
33636 return tls_comm_section
;
33639 return tls_private_data_section
;
33643 if (TREE_PUBLIC (decl
))
33644 return data_section
;
33646 return private_data_section
;
33651 rs6000_xcoff_unique_section (tree decl
, int reloc ATTRIBUTE_UNUSED
)
33655 /* Use select_section for private data and uninitialized data with
33656 alignment <= BIGGEST_ALIGNMENT. */
33657 if (!TREE_PUBLIC (decl
)
33658 || DECL_COMMON (decl
)
33659 || (DECL_INITIAL (decl
) == NULL_TREE
33660 && DECL_ALIGN (decl
) <= BIGGEST_ALIGNMENT
)
33661 || DECL_INITIAL (decl
) == error_mark_node
33662 || (flag_zero_initialized_in_bss
33663 && initializer_zerop (DECL_INITIAL (decl
))))
33666 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
33667 name
= (*targetm
.strip_name_encoding
) (name
);
33668 set_decl_section_name (decl
, name
);
33671 /* Select section for constant in constant pool.
33673 On RS/6000, all constants are in the private read-only data area.
33674 However, if this is being placed in the TOC it must be output as a
33678 rs6000_xcoff_select_rtx_section (machine_mode mode
, rtx x
,
33679 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
33681 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
))
33682 return toc_section
;
33684 return read_only_private_data_section
;
33687 /* Remove any trailing [DS] or the like from the symbol name. */
33689 static const char *
33690 rs6000_xcoff_strip_name_encoding (const char *name
)
33695 len
= strlen (name
);
33696 if (name
[len
- 1] == ']')
33697 return ggc_alloc_string (name
, len
- 4);
33702 /* Section attributes. AIX is always PIC. */
33704 static unsigned int
33705 rs6000_xcoff_section_type_flags (tree decl
, const char *name
, int reloc
)
33707 unsigned int align
;
33708 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
33710 /* Align to at least UNIT size. */
33711 if ((flags
& SECTION_CODE
) != 0 || !decl
|| !DECL_P (decl
))
33712 align
= MIN_UNITS_PER_WORD
;
33714 /* Increase alignment of large objects if not already stricter. */
33715 align
= MAX ((DECL_ALIGN (decl
) / BITS_PER_UNIT
),
33716 int_size_in_bytes (TREE_TYPE (decl
)) > MIN_UNITS_PER_WORD
33717 ? UNITS_PER_FP_WORD
: MIN_UNITS_PER_WORD
);
33719 return flags
| (exact_log2 (align
) & SECTION_ENTSIZE
);
33722 /* Output at beginning of assembler file.
33724 Initialize the section names for the RS/6000 at this point.
33726 Specify filename, including full path, to assembler.
33728 We want to go into the TOC section so at least one .toc will be emitted.
33729 Also, in order to output proper .bs/.es pairs, we need at least one static
33730 [RW] section emitted.
33732 Finally, declare mcount when profiling to make the assembler happy. */
33735 rs6000_xcoff_file_start (void)
33737 rs6000_gen_section_name (&xcoff_bss_section_name
,
33738 main_input_filename
, ".bss_");
33739 rs6000_gen_section_name (&xcoff_private_data_section_name
,
33740 main_input_filename
, ".rw_");
33741 rs6000_gen_section_name (&xcoff_read_only_section_name
,
33742 main_input_filename
, ".ro_");
33743 rs6000_gen_section_name (&xcoff_tls_data_section_name
,
33744 main_input_filename
, ".tls_");
33745 rs6000_gen_section_name (&xcoff_tbss_section_name
,
33746 main_input_filename
, ".tbss_[UL]");
33748 fputs ("\t.file\t", asm_out_file
);
33749 output_quoted_string (asm_out_file
, main_input_filename
);
33750 fputc ('\n', asm_out_file
);
33751 if (write_symbols
!= NO_DEBUG
)
33752 switch_to_section (private_data_section
);
33753 switch_to_section (toc_section
);
33754 switch_to_section (text_section
);
33756 fprintf (asm_out_file
, "\t.extern %s\n", RS6000_MCOUNT
);
33757 rs6000_file_start ();
33760 /* Output at end of assembler file.
33761 On the RS/6000, referencing data should automatically pull in text. */
33764 rs6000_xcoff_file_end (void)
33766 switch_to_section (text_section
);
33767 fputs ("_section_.text:\n", asm_out_file
);
33768 switch_to_section (data_section
);
33769 fputs (TARGET_32BIT
33770 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
33774 struct declare_alias_data
33777 bool function_descriptor
;
33780 /* Declare alias N. A helper function for for_node_and_aliases. */
33783 rs6000_declare_alias (struct symtab_node
*n
, void *d
)
33785 struct declare_alias_data
*data
= (struct declare_alias_data
*)d
;
33786 /* Main symbol is output specially, because varasm machinery does part of
33787 the job for us - we do not need to declare .globl/lglobs and such. */
33788 if (!n
->alias
|| n
->weakref
)
33791 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n
->decl
)))
33794 /* Prevent assemble_alias from trying to use .set pseudo operation
33795 that does not behave as expected by the middle-end. */
33796 TREE_ASM_WRITTEN (n
->decl
) = true;
33798 const char *name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n
->decl
));
33799 char *buffer
= (char *) alloca (strlen (name
) + 2);
33801 int dollar_inside
= 0;
33803 strcpy (buffer
, name
);
33804 p
= strchr (buffer
, '$');
33808 p
= strchr (p
+ 1, '$');
33810 if (TREE_PUBLIC (n
->decl
))
33812 if (!RS6000_WEAK
|| !DECL_WEAK (n
->decl
))
33814 if (dollar_inside
) {
33815 if (data
->function_descriptor
)
33816 fprintf(data
->file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
33817 fprintf(data
->file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
33819 if (data
->function_descriptor
)
33821 fputs ("\t.globl .", data
->file
);
33822 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
33823 putc ('\n', data
->file
);
33825 fputs ("\t.globl ", data
->file
);
33826 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
33827 putc ('\n', data
->file
);
33829 #ifdef ASM_WEAKEN_DECL
33830 else if (DECL_WEAK (n
->decl
) && !data
->function_descriptor
)
33831 ASM_WEAKEN_DECL (data
->file
, n
->decl
, name
, NULL
);
33838 if (data
->function_descriptor
)
33839 fprintf(data
->file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
33840 fprintf(data
->file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
33842 if (data
->function_descriptor
)
33844 fputs ("\t.lglobl .", data
->file
);
33845 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
33846 putc ('\n', data
->file
);
33848 fputs ("\t.lglobl ", data
->file
);
33849 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
33850 putc ('\n', data
->file
);
33852 if (data
->function_descriptor
)
33853 fputs (".", data
->file
);
33854 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
33855 fputs (":\n", data
->file
);
33860 #ifdef HAVE_GAS_HIDDEN
33861 /* Helper function to calculate visibility of a DECL
33862 and return the value as a const string. */
33864 static const char *
33865 rs6000_xcoff_visibility (tree decl
)
33867 static const char * const visibility_types
[] = {
33868 "", ",protected", ",hidden", ",internal"
33871 enum symbol_visibility vis
= DECL_VISIBILITY (decl
);
33873 if (TREE_CODE (decl
) == FUNCTION_DECL
33874 && cgraph_node::get (decl
)
33875 && cgraph_node::get (decl
)->instrumentation_clone
33876 && cgraph_node::get (decl
)->instrumented_version
)
33877 vis
= DECL_VISIBILITY (cgraph_node::get (decl
)->instrumented_version
->decl
);
33879 return visibility_types
[vis
];
33884 /* This macro produces the initial definition of a function name.
33885 On the RS/6000, we need to place an extra '.' in the function name and
33886 output the function descriptor.
33887 Dollar signs are converted to underscores.
33889 The csect for the function will have already been created when
33890 text_section was selected. We do have to go back to that csect, however.
33892 The third and fourth parameters to the .function pseudo-op (16 and 044)
33893 are placeholders which no longer have any use.
33895 Because AIX assembler's .set command has unexpected semantics, we output
33896 all aliases as alternative labels in front of the definition. */
33899 rs6000_xcoff_declare_function_name (FILE *file
, const char *name
, tree decl
)
33901 char *buffer
= (char *) alloca (strlen (name
) + 1);
33903 int dollar_inside
= 0;
33904 struct declare_alias_data data
= {file
, false};
33906 strcpy (buffer
, name
);
33907 p
= strchr (buffer
, '$');
33911 p
= strchr (p
+ 1, '$');
33913 if (TREE_PUBLIC (decl
))
33915 if (!RS6000_WEAK
|| !DECL_WEAK (decl
))
33917 if (dollar_inside
) {
33918 fprintf(file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
33919 fprintf(file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
33921 fputs ("\t.globl .", file
);
33922 RS6000_OUTPUT_BASENAME (file
, buffer
);
33923 #ifdef HAVE_GAS_HIDDEN
33924 fputs (rs6000_xcoff_visibility (decl
), file
);
33931 if (dollar_inside
) {
33932 fprintf(file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
33933 fprintf(file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
33935 fputs ("\t.lglobl .", file
);
33936 RS6000_OUTPUT_BASENAME (file
, buffer
);
33939 fputs ("\t.csect ", file
);
33940 RS6000_OUTPUT_BASENAME (file
, buffer
);
33941 fputs (TARGET_32BIT
? "[DS]\n" : "[DS],3\n", file
);
33942 RS6000_OUTPUT_BASENAME (file
, buffer
);
33943 fputs (":\n", file
);
33944 symtab_node::get (decl
)->call_for_symbol_and_aliases (rs6000_declare_alias
,
33946 fputs (TARGET_32BIT
? "\t.long ." : "\t.llong .", file
);
33947 RS6000_OUTPUT_BASENAME (file
, buffer
);
33948 fputs (", TOC[tc0], 0\n", file
);
33950 switch_to_section (function_section (decl
));
33952 RS6000_OUTPUT_BASENAME (file
, buffer
);
33953 fputs (":\n", file
);
33954 data
.function_descriptor
= true;
33955 symtab_node::get (decl
)->call_for_symbol_and_aliases (rs6000_declare_alias
,
33957 if (!DECL_IGNORED_P (decl
))
33959 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
33960 xcoffout_declare_function (file
, decl
, buffer
);
33961 else if (write_symbols
== DWARF2_DEBUG
)
33963 name
= (*targetm
.strip_name_encoding
) (name
);
33964 fprintf (file
, "\t.function .%s,.%s,2,0\n", name
, name
);
33971 /* Output assembly language to globalize a symbol from a DECL,
33972 possibly with visibility. */
33975 rs6000_xcoff_asm_globalize_decl_name (FILE *stream
, tree decl
)
33977 const char *name
= XSTR (XEXP (DECL_RTL (decl
), 0), 0);
33978 fputs (GLOBAL_ASM_OP
, stream
);
33979 RS6000_OUTPUT_BASENAME (stream
, name
);
33980 #ifdef HAVE_GAS_HIDDEN
33981 fputs (rs6000_xcoff_visibility (decl
), stream
);
33983 putc ('\n', stream
);
33986 /* Output assembly language to define a symbol as COMMON from a DECL,
33987 possibly with visibility. */
33990 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream
,
33991 tree decl ATTRIBUTE_UNUSED
,
33993 unsigned HOST_WIDE_INT size
,
33994 unsigned HOST_WIDE_INT align
)
33996 unsigned HOST_WIDE_INT align2
= 2;
33999 align2
= floor_log2 (align
/ BITS_PER_UNIT
);
34003 fputs (COMMON_ASM_OP
, stream
);
34004 RS6000_OUTPUT_BASENAME (stream
, name
);
34007 "," HOST_WIDE_INT_PRINT_UNSIGNED
"," HOST_WIDE_INT_PRINT_UNSIGNED
,
34010 #ifdef HAVE_GAS_HIDDEN
34011 fputs (rs6000_xcoff_visibility (decl
), stream
);
34013 putc ('\n', stream
);
34016 /* This macro produces the initial definition of a object (variable) name.
34017 Because AIX assembler's .set command has unexpected semantics, we output
34018 all aliases as alternative labels in front of the definition. */
34021 rs6000_xcoff_declare_object_name (FILE *file
, const char *name
, tree decl
)
34023 struct declare_alias_data data
= {file
, false};
34024 RS6000_OUTPUT_BASENAME (file
, name
);
34025 fputs (":\n", file
);
34026 symtab_node::get_create (decl
)->call_for_symbol_and_aliases (rs6000_declare_alias
,
34030 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
34033 rs6000_asm_output_dwarf_pcrel (FILE *file
, int size
, const char *label
)
34035 fputs (integer_asm_op (size
, FALSE
), file
);
34036 assemble_name (file
, label
);
34037 fputs ("-$", file
);
34040 /* Output a symbol offset relative to the dbase for the current object.
34041 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
34044 __gcc_unwind_dbase is embedded in all executables/libraries through
34045 libgcc/config/rs6000/crtdbase.S. */
34048 rs6000_asm_output_dwarf_datarel (FILE *file
, int size
, const char *label
)
34050 fputs (integer_asm_op (size
, FALSE
), file
);
34051 assemble_name (file
, label
);
34052 fputs("-__gcc_unwind_dbase", file
);
34057 rs6000_xcoff_encode_section_info (tree decl
, rtx rtl
, int first
)
34061 const char *symname
;
34063 default_encode_section_info (decl
, rtl
, first
);
34065 /* Careful not to prod global register variables. */
34068 symbol
= XEXP (rtl
, 0);
34069 if (GET_CODE (symbol
) != SYMBOL_REF
)
34072 flags
= SYMBOL_REF_FLAGS (symbol
);
34074 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_THREAD_LOCAL_P (decl
))
34075 flags
&= ~SYMBOL_FLAG_HAS_BLOCK_INFO
;
34077 SYMBOL_REF_FLAGS (symbol
) = flags
;
34079 /* Append mapping class to extern decls. */
34080 symname
= XSTR (symbol
, 0);
34081 if (decl
/* sync condition with assemble_external () */
34082 && DECL_P (decl
) && DECL_EXTERNAL (decl
) && TREE_PUBLIC (decl
)
34083 && ((TREE_CODE (decl
) == VAR_DECL
&& !DECL_THREAD_LOCAL_P (decl
))
34084 || TREE_CODE (decl
) == FUNCTION_DECL
)
34085 && symname
[strlen (symname
) - 1] != ']')
34087 char *newname
= (char *) alloca (strlen (symname
) + 5);
34088 strcpy (newname
, symname
);
34089 strcat (newname
, (TREE_CODE (decl
) == FUNCTION_DECL
34090 ? "[DS]" : "[UA]"));
34091 XSTR (symbol
, 0) = ggc_strdup (newname
);
34094 #endif /* HAVE_AS_TLS */
34095 #endif /* TARGET_XCOFF */
34098 rs6000_asm_weaken_decl (FILE *stream
, tree decl
,
34099 const char *name
, const char *val
)
34101 fputs ("\t.weak\t", stream
);
34102 RS6000_OUTPUT_BASENAME (stream
, name
);
34103 if (decl
&& TREE_CODE (decl
) == FUNCTION_DECL
34104 && DEFAULT_ABI
== ABI_AIX
&& DOT_SYMBOLS
)
34107 fputs ("[DS]", stream
);
34108 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34110 fputs (rs6000_xcoff_visibility (decl
), stream
);
34112 fputs ("\n\t.weak\t.", stream
);
34113 RS6000_OUTPUT_BASENAME (stream
, name
);
34115 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34117 fputs (rs6000_xcoff_visibility (decl
), stream
);
34119 fputc ('\n', stream
);
34122 #ifdef ASM_OUTPUT_DEF
34123 ASM_OUTPUT_DEF (stream
, name
, val
);
34125 if (decl
&& TREE_CODE (decl
) == FUNCTION_DECL
34126 && DEFAULT_ABI
== ABI_AIX
&& DOT_SYMBOLS
)
34128 fputs ("\t.set\t.", stream
);
34129 RS6000_OUTPUT_BASENAME (stream
, name
);
34130 fputs (",.", stream
);
34131 RS6000_OUTPUT_BASENAME (stream
, val
);
34132 fputc ('\n', stream
);
34138 /* Return true if INSN should not be copied. */
34141 rs6000_cannot_copy_insn_p (rtx_insn
*insn
)
34143 return recog_memoized (insn
) >= 0
34144 && get_attr_cannot_copy (insn
);
34147 /* Compute a (partial) cost for rtx X. Return true if the complete
34148 cost has been computed, and false if subexpressions should be
34149 scanned. In either case, *TOTAL contains the cost result. */
34152 rs6000_rtx_costs (rtx x
, machine_mode mode
, int outer_code
,
34153 int opno ATTRIBUTE_UNUSED
, int *total
, bool speed
)
34155 int code
= GET_CODE (x
);
34159 /* On the RS/6000, if it is valid in the insn, it is free. */
34161 if (((outer_code
== SET
34162 || outer_code
== PLUS
34163 || outer_code
== MINUS
)
34164 && (satisfies_constraint_I (x
)
34165 || satisfies_constraint_L (x
)))
34166 || (outer_code
== AND
34167 && (satisfies_constraint_K (x
)
34169 ? satisfies_constraint_L (x
)
34170 : satisfies_constraint_J (x
))))
34171 || ((outer_code
== IOR
|| outer_code
== XOR
)
34172 && (satisfies_constraint_K (x
)
34174 ? satisfies_constraint_L (x
)
34175 : satisfies_constraint_J (x
))))
34176 || outer_code
== ASHIFT
34177 || outer_code
== ASHIFTRT
34178 || outer_code
== LSHIFTRT
34179 || outer_code
== ROTATE
34180 || outer_code
== ROTATERT
34181 || outer_code
== ZERO_EXTRACT
34182 || (outer_code
== MULT
34183 && satisfies_constraint_I (x
))
34184 || ((outer_code
== DIV
|| outer_code
== UDIV
34185 || outer_code
== MOD
|| outer_code
== UMOD
)
34186 && exact_log2 (INTVAL (x
)) >= 0)
34187 || (outer_code
== COMPARE
34188 && (satisfies_constraint_I (x
)
34189 || satisfies_constraint_K (x
)))
34190 || ((outer_code
== EQ
|| outer_code
== NE
)
34191 && (satisfies_constraint_I (x
)
34192 || satisfies_constraint_K (x
)
34194 ? satisfies_constraint_L (x
)
34195 : satisfies_constraint_J (x
))))
34196 || (outer_code
== GTU
34197 && satisfies_constraint_I (x
))
34198 || (outer_code
== LTU
34199 && satisfies_constraint_P (x
)))
34204 else if ((outer_code
== PLUS
34205 && reg_or_add_cint_operand (x
, VOIDmode
))
34206 || (outer_code
== MINUS
34207 && reg_or_sub_cint_operand (x
, VOIDmode
))
34208 || ((outer_code
== SET
34209 || outer_code
== IOR
34210 || outer_code
== XOR
)
34212 & ~ (unsigned HOST_WIDE_INT
) 0xffffffff) == 0))
34214 *total
= COSTS_N_INSNS (1);
34220 case CONST_WIDE_INT
:
34224 *total
= !speed
? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34228 /* When optimizing for size, MEM should be slightly more expensive
34229 than generating address, e.g., (plus (reg) (const)).
34230 L1 cache latency is about two instructions. */
34231 *total
= !speed
? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34232 if (SLOW_UNALIGNED_ACCESS (mode
, MEM_ALIGN (x
)))
34233 *total
+= COSTS_N_INSNS (100);
34242 if (FLOAT_MODE_P (mode
))
34243 *total
= rs6000_cost
->fp
;
34245 *total
= COSTS_N_INSNS (1);
34249 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
34250 && satisfies_constraint_I (XEXP (x
, 1)))
34252 if (INTVAL (XEXP (x
, 1)) >= -256
34253 && INTVAL (XEXP (x
, 1)) <= 255)
34254 *total
= rs6000_cost
->mulsi_const9
;
34256 *total
= rs6000_cost
->mulsi_const
;
34258 else if (mode
== SFmode
)
34259 *total
= rs6000_cost
->fp
;
34260 else if (FLOAT_MODE_P (mode
))
34261 *total
= rs6000_cost
->dmul
;
34262 else if (mode
== DImode
)
34263 *total
= rs6000_cost
->muldi
;
34265 *total
= rs6000_cost
->mulsi
;
34269 if (mode
== SFmode
)
34270 *total
= rs6000_cost
->fp
;
34272 *total
= rs6000_cost
->dmul
;
34277 if (FLOAT_MODE_P (mode
))
34279 *total
= mode
== DFmode
? rs6000_cost
->ddiv
34280 : rs6000_cost
->sdiv
;
34287 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
34288 && exact_log2 (INTVAL (XEXP (x
, 1))) >= 0)
34290 if (code
== DIV
|| code
== MOD
)
34292 *total
= COSTS_N_INSNS (2);
34295 *total
= COSTS_N_INSNS (1);
34299 if (GET_MODE (XEXP (x
, 1)) == DImode
)
34300 *total
= rs6000_cost
->divdi
;
34302 *total
= rs6000_cost
->divsi
;
34304 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34305 if (!TARGET_MODULO
&& (code
== MOD
|| code
== UMOD
))
34306 *total
+= COSTS_N_INSNS (2);
34310 *total
= COSTS_N_INSNS (TARGET_CTZ
? 1 : 4);
34314 *total
= COSTS_N_INSNS (4);
34318 *total
= COSTS_N_INSNS (TARGET_POPCNTD
? 1 : 6);
34322 *total
= COSTS_N_INSNS (TARGET_CMPB
? 2 : 6);
34326 if (outer_code
== AND
|| outer_code
== IOR
|| outer_code
== XOR
)
34329 *total
= COSTS_N_INSNS (1);
34333 if (CONST_INT_P (XEXP (x
, 1)))
34335 rtx left
= XEXP (x
, 0);
34336 rtx_code left_code
= GET_CODE (left
);
34338 /* rotate-and-mask: 1 insn. */
34339 if ((left_code
== ROTATE
34340 || left_code
== ASHIFT
34341 || left_code
== LSHIFTRT
)
34342 && rs6000_is_valid_shift_mask (XEXP (x
, 1), left
, mode
))
34344 *total
= rtx_cost (XEXP (left
, 0), mode
, left_code
, 0, speed
);
34345 if (!CONST_INT_P (XEXP (left
, 1)))
34346 *total
+= rtx_cost (XEXP (left
, 1), SImode
, left_code
, 1, speed
);
34347 *total
+= COSTS_N_INSNS (1);
34351 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34352 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
34353 if (rs6000_is_valid_and_mask (XEXP (x
, 1), mode
)
34354 || (val
& 0xffff) == val
34355 || (val
& 0xffff0000) == val
34356 || ((val
& 0xffff) == 0 && mode
== SImode
))
34358 *total
= rtx_cost (left
, mode
, AND
, 0, speed
);
34359 *total
+= COSTS_N_INSNS (1);
34364 if (rs6000_is_valid_2insn_and (XEXP (x
, 1), mode
))
34366 *total
= rtx_cost (left
, mode
, AND
, 0, speed
);
34367 *total
+= COSTS_N_INSNS (2);
34372 *total
= COSTS_N_INSNS (1);
34377 *total
= COSTS_N_INSNS (1);
34383 *total
= COSTS_N_INSNS (1);
34387 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34388 the sign extend and shift separately within the insn. */
34389 if (TARGET_EXTSWSLI
&& mode
== DImode
34390 && GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
34391 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == SImode
)
34402 /* Handle mul_highpart. */
34403 if (outer_code
== TRUNCATE
34404 && GET_CODE (XEXP (x
, 0)) == MULT
)
34406 if (mode
== DImode
)
34407 *total
= rs6000_cost
->muldi
;
34409 *total
= rs6000_cost
->mulsi
;
34412 else if (outer_code
== AND
)
34415 *total
= COSTS_N_INSNS (1);
34420 if (GET_CODE (XEXP (x
, 0)) == MEM
)
34423 *total
= COSTS_N_INSNS (1);
34429 if (!FLOAT_MODE_P (mode
))
34431 *total
= COSTS_N_INSNS (1);
34437 case UNSIGNED_FLOAT
:
34440 case FLOAT_TRUNCATE
:
34441 *total
= rs6000_cost
->fp
;
34445 if (mode
== DFmode
)
34446 *total
= rs6000_cost
->sfdf_convert
;
34448 *total
= rs6000_cost
->fp
;
34452 switch (XINT (x
, 1))
34455 *total
= rs6000_cost
->fp
;
34467 *total
= COSTS_N_INSNS (1);
34470 else if (FLOAT_MODE_P (mode
) && TARGET_PPC_GFXOPT
&& TARGET_HARD_FLOAT
)
34472 *total
= rs6000_cost
->fp
;
34481 /* Carry bit requires mode == Pmode.
34482 NEG or PLUS already counted so only add one. */
34484 && (outer_code
== NEG
|| outer_code
== PLUS
))
34486 *total
= COSTS_N_INSNS (1);
34489 if (outer_code
== SET
)
34491 if (XEXP (x
, 1) == const0_rtx
)
34493 if (TARGET_ISEL
&& !TARGET_MFCRF
)
34494 *total
= COSTS_N_INSNS (8);
34496 *total
= COSTS_N_INSNS (2);
34501 *total
= COSTS_N_INSNS (3);
34510 if (outer_code
== SET
&& (XEXP (x
, 1) == const0_rtx
))
34512 if (TARGET_ISEL
&& !TARGET_MFCRF
)
34513 *total
= COSTS_N_INSNS (8);
34515 *total
= COSTS_N_INSNS (2);
34519 if (outer_code
== COMPARE
)
34533 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34536 rs6000_debug_rtx_costs (rtx x
, machine_mode mode
, int outer_code
,
34537 int opno
, int *total
, bool speed
)
34539 bool ret
= rs6000_rtx_costs (x
, mode
, outer_code
, opno
, total
, speed
);
34542 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34543 "opno = %d, total = %d, speed = %s, x:\n",
34544 ret
? "complete" : "scan inner",
34545 GET_MODE_NAME (mode
),
34546 GET_RTX_NAME (outer_code
),
34549 speed
? "true" : "false");
34556 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
34559 rs6000_debug_address_cost (rtx x
, machine_mode mode
,
34560 addr_space_t as
, bool speed
)
34562 int ret
= TARGET_ADDRESS_COST (x
, mode
, as
, speed
);
34564 fprintf (stderr
, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
34565 ret
, speed
? "true" : "false");
34572 /* A C expression returning the cost of moving data from a register of class
34573 CLASS1 to one of CLASS2. */
34576 rs6000_register_move_cost (machine_mode mode
,
34577 reg_class_t from
, reg_class_t to
)
34581 if (TARGET_DEBUG_COST
)
34584 /* Moves from/to GENERAL_REGS. */
34585 if (reg_classes_intersect_p (to
, GENERAL_REGS
)
34586 || reg_classes_intersect_p (from
, GENERAL_REGS
))
34588 reg_class_t rclass
= from
;
34590 if (! reg_classes_intersect_p (to
, GENERAL_REGS
))
34593 if (rclass
== FLOAT_REGS
|| rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
34594 ret
= (rs6000_memory_move_cost (mode
, rclass
, false)
34595 + rs6000_memory_move_cost (mode
, GENERAL_REGS
, false));
34597 /* It's more expensive to move CR_REGS than CR0_REGS because of the
34599 else if (rclass
== CR_REGS
)
34602 /* For those processors that have slow LR/CTR moves, make them more
34603 expensive than memory in order to bias spills to memory .*/
34604 else if ((rs6000_cpu
== PROCESSOR_POWER6
34605 || rs6000_cpu
== PROCESSOR_POWER7
34606 || rs6000_cpu
== PROCESSOR_POWER8
34607 || rs6000_cpu
== PROCESSOR_POWER9
)
34608 && reg_classes_intersect_p (rclass
, LINK_OR_CTR_REGS
))
34609 ret
= 6 * hard_regno_nregs
[0][mode
];
34612 /* A move will cost one instruction per GPR moved. */
34613 ret
= 2 * hard_regno_nregs
[0][mode
];
34616 /* If we have VSX, we can easily move between FPR or Altivec registers. */
34617 else if (VECTOR_MEM_VSX_P (mode
)
34618 && reg_classes_intersect_p (to
, VSX_REGS
)
34619 && reg_classes_intersect_p (from
, VSX_REGS
))
34620 ret
= 2 * hard_regno_nregs
[FIRST_FPR_REGNO
][mode
];
34622 /* Moving between two similar registers is just one instruction. */
34623 else if (reg_classes_intersect_p (to
, from
))
34624 ret
= (FLOAT128_2REG_P (mode
)) ? 4 : 2;
34626 /* Everything else has to go through GENERAL_REGS. */
34628 ret
= (rs6000_register_move_cost (mode
, GENERAL_REGS
, to
)
34629 + rs6000_register_move_cost (mode
, from
, GENERAL_REGS
));
34631 if (TARGET_DEBUG_COST
)
34633 if (dbg_cost_ctrl
== 1)
34635 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
34636 ret
, GET_MODE_NAME (mode
), reg_class_names
[from
],
34637 reg_class_names
[to
]);
34644 /* A C expressions returning the cost of moving data of MODE from a register to
34648 rs6000_memory_move_cost (machine_mode mode
, reg_class_t rclass
,
34649 bool in ATTRIBUTE_UNUSED
)
34653 if (TARGET_DEBUG_COST
)
34656 if (reg_classes_intersect_p (rclass
, GENERAL_REGS
))
34657 ret
= 4 * hard_regno_nregs
[0][mode
];
34658 else if ((reg_classes_intersect_p (rclass
, FLOAT_REGS
)
34659 || reg_classes_intersect_p (rclass
, VSX_REGS
)))
34660 ret
= 4 * hard_regno_nregs
[32][mode
];
34661 else if (reg_classes_intersect_p (rclass
, ALTIVEC_REGS
))
34662 ret
= 4 * hard_regno_nregs
[FIRST_ALTIVEC_REGNO
][mode
];
34664 ret
= 4 + rs6000_register_move_cost (mode
, rclass
, GENERAL_REGS
);
34666 if (TARGET_DEBUG_COST
)
34668 if (dbg_cost_ctrl
== 1)
34670 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
34671 ret
, GET_MODE_NAME (mode
), reg_class_names
[rclass
], in
);
34678 /* Returns a code for a target-specific builtin that implements
34679 reciprocal of the function, or NULL_TREE if not available. */
34682 rs6000_builtin_reciprocal (tree fndecl
)
34684 switch (DECL_FUNCTION_CODE (fndecl
))
34686 case VSX_BUILTIN_XVSQRTDP
:
34687 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode
))
34690 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_2DF
];
34692 case VSX_BUILTIN_XVSQRTSP
:
34693 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode
))
34696 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_4SF
];
34703 /* Load up a constant. If the mode is a vector mode, splat the value across
34704 all of the vector elements. */
34707 rs6000_load_constant_and_splat (machine_mode mode
, REAL_VALUE_TYPE dconst
)
34711 if (mode
== SFmode
|| mode
== DFmode
)
34713 rtx d
= const_double_from_real_value (dconst
, mode
);
34714 reg
= force_reg (mode
, d
);
34716 else if (mode
== V4SFmode
)
34718 rtx d
= const_double_from_real_value (dconst
, SFmode
);
34719 rtvec v
= gen_rtvec (4, d
, d
, d
, d
);
34720 reg
= gen_reg_rtx (mode
);
34721 rs6000_expand_vector_init (reg
, gen_rtx_PARALLEL (mode
, v
));
34723 else if (mode
== V2DFmode
)
34725 rtx d
= const_double_from_real_value (dconst
, DFmode
);
34726 rtvec v
= gen_rtvec (2, d
, d
);
34727 reg
= gen_reg_rtx (mode
);
34728 rs6000_expand_vector_init (reg
, gen_rtx_PARALLEL (mode
, v
));
34731 gcc_unreachable ();
34736 /* Generate an FMA instruction. */
34739 rs6000_emit_madd (rtx target
, rtx m1
, rtx m2
, rtx a
)
34741 machine_mode mode
= GET_MODE (target
);
34744 dst
= expand_ternary_op (mode
, fma_optab
, m1
, m2
, a
, target
, 0);
34745 gcc_assert (dst
!= NULL
);
34748 emit_move_insn (target
, dst
);
34751 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
34754 rs6000_emit_nmsub (rtx dst
, rtx m1
, rtx m2
, rtx a
)
34756 machine_mode mode
= GET_MODE (dst
);
34759 /* This is a tad more complicated, since the fnma_optab is for
34760 a different expression: fma(-m1, m2, a), which is the same
34761 thing except in the case of signed zeros.
34763 Fortunately we know that if FMA is supported that FNMSUB is
34764 also supported in the ISA. Just expand it directly. */
34766 gcc_assert (optab_handler (fma_optab
, mode
) != CODE_FOR_nothing
);
34768 r
= gen_rtx_NEG (mode
, a
);
34769 r
= gen_rtx_FMA (mode
, m1
, m2
, r
);
34770 r
= gen_rtx_NEG (mode
, r
);
34771 emit_insn (gen_rtx_SET (dst
, r
));
34774 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
34775 add a reg_note saying that this was a division. Support both scalar and
34776 vector divide. Assumes no trapping math and finite arguments. */
34779 rs6000_emit_swdiv (rtx dst
, rtx n
, rtx d
, bool note_p
)
34781 machine_mode mode
= GET_MODE (dst
);
34782 rtx one
, x0
, e0
, x1
, xprev
, eprev
, xnext
, enext
, u
, v
;
34785 /* Low precision estimates guarantee 5 bits of accuracy. High
34786 precision estimates guarantee 14 bits of accuracy. SFmode
34787 requires 23 bits of accuracy. DFmode requires 52 bits of
34788 accuracy. Each pass at least doubles the accuracy, leading
34789 to the following. */
34790 int passes
= (TARGET_RECIP_PRECISION
) ? 1 : 3;
34791 if (mode
== DFmode
|| mode
== V2DFmode
)
34794 enum insn_code code
= optab_handler (smul_optab
, mode
);
34795 insn_gen_fn gen_mul
= GEN_FCN (code
);
34797 gcc_assert (code
!= CODE_FOR_nothing
);
34799 one
= rs6000_load_constant_and_splat (mode
, dconst1
);
34801 /* x0 = 1./d estimate */
34802 x0
= gen_reg_rtx (mode
);
34803 emit_insn (gen_rtx_SET (x0
, gen_rtx_UNSPEC (mode
, gen_rtvec (1, d
),
34806 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
34809 /* e0 = 1. - d * x0 */
34810 e0
= gen_reg_rtx (mode
);
34811 rs6000_emit_nmsub (e0
, d
, x0
, one
);
34813 /* x1 = x0 + e0 * x0 */
34814 x1
= gen_reg_rtx (mode
);
34815 rs6000_emit_madd (x1
, e0
, x0
, x0
);
34817 for (i
= 0, xprev
= x1
, eprev
= e0
; i
< passes
- 2;
34818 ++i
, xprev
= xnext
, eprev
= enext
) {
34820 /* enext = eprev * eprev */
34821 enext
= gen_reg_rtx (mode
);
34822 emit_insn (gen_mul (enext
, eprev
, eprev
));
34824 /* xnext = xprev + enext * xprev */
34825 xnext
= gen_reg_rtx (mode
);
34826 rs6000_emit_madd (xnext
, enext
, xprev
, xprev
);
34832 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
34834 /* u = n * xprev */
34835 u
= gen_reg_rtx (mode
);
34836 emit_insn (gen_mul (u
, n
, xprev
));
34838 /* v = n - (d * u) */
34839 v
= gen_reg_rtx (mode
);
34840 rs6000_emit_nmsub (v
, d
, u
, n
);
34842 /* dst = (v * xprev) + u */
34843 rs6000_emit_madd (dst
, v
, xprev
, u
);
34846 add_reg_note (get_last_insn (), REG_EQUAL
, gen_rtx_DIV (mode
, n
, d
));
34849 /* Goldschmidt's Algorithm for single/double-precision floating point
34850 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
34853 rs6000_emit_swsqrt (rtx dst
, rtx src
, bool recip
)
34855 machine_mode mode
= GET_MODE (src
);
34856 rtx e
= gen_reg_rtx (mode
);
34857 rtx g
= gen_reg_rtx (mode
);
34858 rtx h
= gen_reg_rtx (mode
);
34860 /* Low precision estimates guarantee 5 bits of accuracy. High
34861 precision estimates guarantee 14 bits of accuracy. SFmode
34862 requires 23 bits of accuracy. DFmode requires 52 bits of
34863 accuracy. Each pass at least doubles the accuracy, leading
34864 to the following. */
34865 int passes
= (TARGET_RECIP_PRECISION
) ? 1 : 3;
34866 if (mode
== DFmode
|| mode
== V2DFmode
)
34871 enum insn_code code
= optab_handler (smul_optab
, mode
);
34872 insn_gen_fn gen_mul
= GEN_FCN (code
);
34874 gcc_assert (code
!= CODE_FOR_nothing
);
34876 mhalf
= rs6000_load_constant_and_splat (mode
, dconsthalf
);
34878 /* e = rsqrt estimate */
34879 emit_insn (gen_rtx_SET (e
, gen_rtx_UNSPEC (mode
, gen_rtvec (1, src
),
34882 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
34885 rtx zero
= force_reg (mode
, CONST0_RTX (mode
));
34887 if (mode
== SFmode
)
34889 rtx target
= emit_conditional_move (e
, GT
, src
, zero
, mode
,
34892 emit_move_insn (e
, target
);
34896 rtx cond
= gen_rtx_GT (VOIDmode
, e
, zero
);
34897 rs6000_emit_vector_cond_expr (e
, e
, zero
, cond
, src
, zero
);
34901 /* g = sqrt estimate. */
34902 emit_insn (gen_mul (g
, e
, src
));
34903 /* h = 1/(2*sqrt) estimate. */
34904 emit_insn (gen_mul (h
, e
, mhalf
));
34910 rtx t
= gen_reg_rtx (mode
);
34911 rs6000_emit_nmsub (t
, g
, h
, mhalf
);
34912 /* Apply correction directly to 1/rsqrt estimate. */
34913 rs6000_emit_madd (dst
, e
, t
, e
);
34917 for (i
= 0; i
< passes
; i
++)
34919 rtx t1
= gen_reg_rtx (mode
);
34920 rtx g1
= gen_reg_rtx (mode
);
34921 rtx h1
= gen_reg_rtx (mode
);
34923 rs6000_emit_nmsub (t1
, g
, h
, mhalf
);
34924 rs6000_emit_madd (g1
, g
, t1
, g
);
34925 rs6000_emit_madd (h1
, h
, t1
, h
);
34930 /* Multiply by 2 for 1/rsqrt. */
34931 emit_insn (gen_add3_insn (dst
, h
, h
));
34936 rtx t
= gen_reg_rtx (mode
);
34937 rs6000_emit_nmsub (t
, g
, h
, mhalf
);
34938 rs6000_emit_madd (dst
, g
, t
, g
);
34944 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
34945 (Power7) targets. DST is the target, and SRC is the argument operand. */
34948 rs6000_emit_popcount (rtx dst
, rtx src
)
34950 machine_mode mode
= GET_MODE (dst
);
34953 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
34954 if (TARGET_POPCNTD
)
34956 if (mode
== SImode
)
34957 emit_insn (gen_popcntdsi2 (dst
, src
));
34959 emit_insn (gen_popcntddi2 (dst
, src
));
34963 tmp1
= gen_reg_rtx (mode
);
34965 if (mode
== SImode
)
34967 emit_insn (gen_popcntbsi2 (tmp1
, src
));
34968 tmp2
= expand_mult (SImode
, tmp1
, GEN_INT (0x01010101),
34970 tmp2
= force_reg (SImode
, tmp2
);
34971 emit_insn (gen_lshrsi3 (dst
, tmp2
, GEN_INT (24)));
34975 emit_insn (gen_popcntbdi2 (tmp1
, src
));
34976 tmp2
= expand_mult (DImode
, tmp1
,
34977 GEN_INT ((HOST_WIDE_INT
)
34978 0x01010101 << 32 | 0x01010101),
34980 tmp2
= force_reg (DImode
, tmp2
);
34981 emit_insn (gen_lshrdi3 (dst
, tmp2
, GEN_INT (56)));
34986 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
34987 target, and SRC is the argument operand. */
34990 rs6000_emit_parity (rtx dst
, rtx src
)
34992 machine_mode mode
= GET_MODE (dst
);
34995 tmp
= gen_reg_rtx (mode
);
34997 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
35000 if (mode
== SImode
)
35002 emit_insn (gen_popcntbsi2 (tmp
, src
));
35003 emit_insn (gen_paritysi2_cmpb (dst
, tmp
));
35007 emit_insn (gen_popcntbdi2 (tmp
, src
));
35008 emit_insn (gen_paritydi2_cmpb (dst
, tmp
));
35013 if (mode
== SImode
)
35015 /* Is mult+shift >= shift+xor+shift+xor? */
35016 if (rs6000_cost
->mulsi_const
>= COSTS_N_INSNS (3))
35018 rtx tmp1
, tmp2
, tmp3
, tmp4
;
35020 tmp1
= gen_reg_rtx (SImode
);
35021 emit_insn (gen_popcntbsi2 (tmp1
, src
));
35023 tmp2
= gen_reg_rtx (SImode
);
35024 emit_insn (gen_lshrsi3 (tmp2
, tmp1
, GEN_INT (16)));
35025 tmp3
= gen_reg_rtx (SImode
);
35026 emit_insn (gen_xorsi3 (tmp3
, tmp1
, tmp2
));
35028 tmp4
= gen_reg_rtx (SImode
);
35029 emit_insn (gen_lshrsi3 (tmp4
, tmp3
, GEN_INT (8)));
35030 emit_insn (gen_xorsi3 (tmp
, tmp3
, tmp4
));
35033 rs6000_emit_popcount (tmp
, src
);
35034 emit_insn (gen_andsi3 (dst
, tmp
, const1_rtx
));
35038 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
35039 if (rs6000_cost
->muldi
>= COSTS_N_INSNS (5))
35041 rtx tmp1
, tmp2
, tmp3
, tmp4
, tmp5
, tmp6
;
35043 tmp1
= gen_reg_rtx (DImode
);
35044 emit_insn (gen_popcntbdi2 (tmp1
, src
));
35046 tmp2
= gen_reg_rtx (DImode
);
35047 emit_insn (gen_lshrdi3 (tmp2
, tmp1
, GEN_INT (32)));
35048 tmp3
= gen_reg_rtx (DImode
);
35049 emit_insn (gen_xordi3 (tmp3
, tmp1
, tmp2
));
35051 tmp4
= gen_reg_rtx (DImode
);
35052 emit_insn (gen_lshrdi3 (tmp4
, tmp3
, GEN_INT (16)));
35053 tmp5
= gen_reg_rtx (DImode
);
35054 emit_insn (gen_xordi3 (tmp5
, tmp3
, tmp4
));
35056 tmp6
= gen_reg_rtx (DImode
);
35057 emit_insn (gen_lshrdi3 (tmp6
, tmp5
, GEN_INT (8)));
35058 emit_insn (gen_xordi3 (tmp
, tmp5
, tmp6
));
35061 rs6000_emit_popcount (tmp
, src
);
35062 emit_insn (gen_anddi3 (dst
, tmp
, const1_rtx
));
35066 /* Expand an Altivec constant permutation for little endian mode.
35067 There are two issues: First, the two input operands must be
35068 swapped so that together they form a double-wide array in LE
35069 order. Second, the vperm instruction has surprising behavior
35070 in LE mode: it interprets the elements of the source vectors
35071 in BE mode ("left to right") and interprets the elements of
35072 the destination vector in LE mode ("right to left"). To
35073 correct for this, we must subtract each element of the permute
35074 control vector from 31.
35076 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
35077 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
35078 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
35079 serve as the permute control vector. Then, in BE mode,
35083 places the desired result in vr9. However, in LE mode the
35084 vector contents will be
35086 vr10 = 00000003 00000002 00000001 00000000
35087 vr11 = 00000007 00000006 00000005 00000004
35089 The result of the vperm using the same permute control vector is
35091 vr9 = 05000000 07000000 01000000 03000000
35093 That is, the leftmost 4 bytes of vr10 are interpreted as the
35094 source for the rightmost 4 bytes of vr9, and so on.
35096 If we change the permute control vector to
35098 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35106 vr9 = 00000006 00000004 00000002 00000000. */
35109 altivec_expand_vec_perm_const_le (rtx operands
[4])
35113 rtx constv
, unspec
;
35114 rtx target
= operands
[0];
35115 rtx op0
= operands
[1];
35116 rtx op1
= operands
[2];
35117 rtx sel
= operands
[3];
35119 /* Unpack and adjust the constant selector. */
35120 for (i
= 0; i
< 16; ++i
)
35122 rtx e
= XVECEXP (sel
, 0, i
);
35123 unsigned int elt
= 31 - (INTVAL (e
) & 31);
35124 perm
[i
] = GEN_INT (elt
);
35127 /* Expand to a permute, swapping the inputs and using the
35128 adjusted selector. */
35130 op0
= force_reg (V16QImode
, op0
);
35132 op1
= force_reg (V16QImode
, op1
);
35134 constv
= gen_rtx_CONST_VECTOR (V16QImode
, gen_rtvec_v (16, perm
));
35135 constv
= force_reg (V16QImode
, constv
);
35136 unspec
= gen_rtx_UNSPEC (V16QImode
, gen_rtvec (3, op1
, op0
, constv
),
35138 if (!REG_P (target
))
35140 rtx tmp
= gen_reg_rtx (V16QImode
);
35141 emit_move_insn (tmp
, unspec
);
35145 emit_move_insn (target
, unspec
);
35148 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35149 permute control vector. But here it's not a constant, so we must
35150 generate a vector NAND or NOR to do the adjustment. */
35153 altivec_expand_vec_perm_le (rtx operands
[4])
35155 rtx notx
, iorx
, unspec
;
35156 rtx target
= operands
[0];
35157 rtx op0
= operands
[1];
35158 rtx op1
= operands
[2];
35159 rtx sel
= operands
[3];
35161 rtx norreg
= gen_reg_rtx (V16QImode
);
35162 machine_mode mode
= GET_MODE (target
);
35164 /* Get everything in regs so the pattern matches. */
35166 op0
= force_reg (mode
, op0
);
35168 op1
= force_reg (mode
, op1
);
35170 sel
= force_reg (V16QImode
, sel
);
35171 if (!REG_P (target
))
35172 tmp
= gen_reg_rtx (mode
);
35174 if (TARGET_P9_VECTOR
)
35176 unspec
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, op0
, op1
, sel
),
35181 /* Invert the selector with a VNAND if available, else a VNOR.
35182 The VNAND is preferred for future fusion opportunities. */
35183 notx
= gen_rtx_NOT (V16QImode
, sel
);
35184 iorx
= (TARGET_P8_VECTOR
35185 ? gen_rtx_IOR (V16QImode
, notx
, notx
)
35186 : gen_rtx_AND (V16QImode
, notx
, notx
));
35187 emit_insn (gen_rtx_SET (norreg
, iorx
));
35189 /* Permute with operands reversed and adjusted selector. */
35190 unspec
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, op1
, op0
, norreg
),
35194 /* Copy into target, possibly by way of a register. */
35195 if (!REG_P (target
))
35197 emit_move_insn (tmp
, unspec
);
35201 emit_move_insn (target
, unspec
);
35204 /* Expand an Altivec constant permutation. Return true if we match
35205 an efficient implementation; false to fall back to VPERM. */
35208 altivec_expand_vec_perm_const (rtx operands
[4])
35210 struct altivec_perm_insn
{
35211 HOST_WIDE_INT mask
;
35212 enum insn_code impl
;
35213 unsigned char perm
[16];
35215 static const struct altivec_perm_insn patterns
[] = {
35216 { OPTION_MASK_ALTIVEC
, CODE_FOR_altivec_vpkuhum_direct
,
35217 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35218 { OPTION_MASK_ALTIVEC
, CODE_FOR_altivec_vpkuwum_direct
,
35219 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35220 { OPTION_MASK_ALTIVEC
,
35221 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrghb_direct
35222 : CODE_FOR_altivec_vmrglb_direct
),
35223 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35224 { OPTION_MASK_ALTIVEC
,
35225 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrghh_direct
35226 : CODE_FOR_altivec_vmrglh_direct
),
35227 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35228 { OPTION_MASK_ALTIVEC
,
35229 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrghw_direct
35230 : CODE_FOR_altivec_vmrglw_direct
),
35231 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35232 { OPTION_MASK_ALTIVEC
,
35233 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrglb_direct
35234 : CODE_FOR_altivec_vmrghb_direct
),
35235 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35236 { OPTION_MASK_ALTIVEC
,
35237 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrglh_direct
35238 : CODE_FOR_altivec_vmrghh_direct
),
35239 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35240 { OPTION_MASK_ALTIVEC
,
35241 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrglw_direct
35242 : CODE_FOR_altivec_vmrghw_direct
),
35243 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35244 { OPTION_MASK_P8_VECTOR
,
35245 (BYTES_BIG_ENDIAN
? CODE_FOR_p8_vmrgew_v4sf_direct
35246 : CODE_FOR_p8_vmrgow_v4sf_direct
),
35247 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35248 { OPTION_MASK_P8_VECTOR
,
35249 (BYTES_BIG_ENDIAN
? CODE_FOR_p8_vmrgow_v4sf_direct
35250 : CODE_FOR_p8_vmrgew_v4sf_direct
),
35251 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35254 unsigned int i
, j
, elt
, which
;
35255 unsigned char perm
[16];
35256 rtx target
, op0
, op1
, sel
, x
;
35259 target
= operands
[0];
35264 /* Unpack the constant selector. */
35265 for (i
= which
= 0; i
< 16; ++i
)
35267 rtx e
= XVECEXP (sel
, 0, i
);
35268 elt
= INTVAL (e
) & 31;
35269 which
|= (elt
< 16 ? 1 : 2);
35273 /* Simplify the constant selector based on operands. */
35277 gcc_unreachable ();
35281 if (!rtx_equal_p (op0
, op1
))
35286 for (i
= 0; i
< 16; ++i
)
35298 /* Look for splat patterns. */
35303 for (i
= 0; i
< 16; ++i
)
35304 if (perm
[i
] != elt
)
35308 if (!BYTES_BIG_ENDIAN
)
35310 emit_insn (gen_altivec_vspltb_direct (target
, op0
, GEN_INT (elt
)));
35316 for (i
= 0; i
< 16; i
+= 2)
35317 if (perm
[i
] != elt
|| perm
[i
+ 1] != elt
+ 1)
35321 int field
= BYTES_BIG_ENDIAN
? elt
/ 2 : 7 - elt
/ 2;
35322 x
= gen_reg_rtx (V8HImode
);
35323 emit_insn (gen_altivec_vsplth_direct (x
, gen_lowpart (V8HImode
, op0
),
35325 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
35332 for (i
= 0; i
< 16; i
+= 4)
35334 || perm
[i
+ 1] != elt
+ 1
35335 || perm
[i
+ 2] != elt
+ 2
35336 || perm
[i
+ 3] != elt
+ 3)
35340 int field
= BYTES_BIG_ENDIAN
? elt
/ 4 : 3 - elt
/ 4;
35341 x
= gen_reg_rtx (V4SImode
);
35342 emit_insn (gen_altivec_vspltw_direct (x
, gen_lowpart (V4SImode
, op0
),
35344 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
35350 /* Look for merge and pack patterns. */
35351 for (j
= 0; j
< ARRAY_SIZE (patterns
); ++j
)
35355 if ((patterns
[j
].mask
& rs6000_isa_flags
) == 0)
35358 elt
= patterns
[j
].perm
[0];
35359 if (perm
[0] == elt
)
35361 else if (perm
[0] == elt
+ 16)
35365 for (i
= 1; i
< 16; ++i
)
35367 elt
= patterns
[j
].perm
[i
];
35369 elt
= (elt
>= 16 ? elt
- 16 : elt
+ 16);
35370 else if (one_vec
&& elt
>= 16)
35372 if (perm
[i
] != elt
)
35377 enum insn_code icode
= patterns
[j
].impl
;
35378 machine_mode omode
= insn_data
[icode
].operand
[0].mode
;
35379 machine_mode imode
= insn_data
[icode
].operand
[1].mode
;
35381 /* For little-endian, don't use vpkuwum and vpkuhum if the
35382 underlying vector type is not V4SI and V8HI, respectively.
35383 For example, using vpkuwum with a V8HI picks up the even
35384 halfwords (BE numbering) when the even halfwords (LE
35385 numbering) are what we need. */
35386 if (!BYTES_BIG_ENDIAN
35387 && icode
== CODE_FOR_altivec_vpkuwum_direct
35388 && ((GET_CODE (op0
) == REG
35389 && GET_MODE (op0
) != V4SImode
)
35390 || (GET_CODE (op0
) == SUBREG
35391 && GET_MODE (XEXP (op0
, 0)) != V4SImode
)))
35393 if (!BYTES_BIG_ENDIAN
35394 && icode
== CODE_FOR_altivec_vpkuhum_direct
35395 && ((GET_CODE (op0
) == REG
35396 && GET_MODE (op0
) != V8HImode
)
35397 || (GET_CODE (op0
) == SUBREG
35398 && GET_MODE (XEXP (op0
, 0)) != V8HImode
)))
35401 /* For little-endian, the two input operands must be swapped
35402 (or swapped back) to ensure proper right-to-left numbering
35404 if (swapped
^ !BYTES_BIG_ENDIAN
)
35405 std::swap (op0
, op1
);
35406 if (imode
!= V16QImode
)
35408 op0
= gen_lowpart (imode
, op0
);
35409 op1
= gen_lowpart (imode
, op1
);
35411 if (omode
== V16QImode
)
35414 x
= gen_reg_rtx (omode
);
35415 emit_insn (GEN_FCN (icode
) (x
, op0
, op1
));
35416 if (omode
!= V16QImode
)
35417 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
35422 if (!BYTES_BIG_ENDIAN
)
35424 altivec_expand_vec_perm_const_le (operands
);
35431 /* Expand a Paired Single or VSX Permute Doubleword constant permutation.
35432 Return true if we match an efficient implementation. */
35435 rs6000_expand_vec_perm_const_1 (rtx target
, rtx op0
, rtx op1
,
35436 unsigned char perm0
, unsigned char perm1
)
35440 /* If both selectors come from the same operand, fold to single op. */
35441 if ((perm0
& 2) == (perm1
& 2))
35448 /* If both operands are equal, fold to simpler permutation. */
35449 if (rtx_equal_p (op0
, op1
))
35452 perm1
= (perm1
& 1) + 2;
35454 /* If the first selector comes from the second operand, swap. */
35455 else if (perm0
& 2)
35461 std::swap (op0
, op1
);
35463 /* If the second selector does not come from the second operand, fail. */
35464 else if ((perm1
& 2) == 0)
35468 if (target
!= NULL
)
35470 machine_mode vmode
, dmode
;
35473 vmode
= GET_MODE (target
);
35474 gcc_assert (GET_MODE_NUNITS (vmode
) == 2);
35475 dmode
= mode_for_vector (GET_MODE_INNER (vmode
), 4);
35476 x
= gen_rtx_VEC_CONCAT (dmode
, op0
, op1
);
35477 v
= gen_rtvec (2, GEN_INT (perm0
), GEN_INT (perm1
));
35478 x
= gen_rtx_VEC_SELECT (vmode
, x
, gen_rtx_PARALLEL (VOIDmode
, v
));
35479 emit_insn (gen_rtx_SET (target
, x
));
35485 rs6000_expand_vec_perm_const (rtx operands
[4])
35487 rtx target
, op0
, op1
, sel
;
35488 unsigned char perm0
, perm1
;
35490 target
= operands
[0];
35495 /* Unpack the constant selector. */
35496 perm0
= INTVAL (XVECEXP (sel
, 0, 0)) & 3;
35497 perm1
= INTVAL (XVECEXP (sel
, 0, 1)) & 3;
35499 return rs6000_expand_vec_perm_const_1 (target
, op0
, op1
, perm0
, perm1
);
35502 /* Test whether a constant permutation is supported. */
35505 rs6000_vectorize_vec_perm_const_ok (machine_mode vmode
,
35506 const unsigned char *sel
)
35508 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35509 if (TARGET_ALTIVEC
)
35512 /* Check for ps_merge* or evmerge* insns. */
35513 if (TARGET_PAIRED_FLOAT
&& vmode
== V2SFmode
)
35515 rtx op0
= gen_raw_REG (vmode
, LAST_VIRTUAL_REGISTER
+ 1);
35516 rtx op1
= gen_raw_REG (vmode
, LAST_VIRTUAL_REGISTER
+ 2);
35517 return rs6000_expand_vec_perm_const_1 (NULL
, op0
, op1
, sel
[0], sel
[1]);
35523 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
35526 rs6000_do_expand_vec_perm (rtx target
, rtx op0
, rtx op1
,
35527 machine_mode vmode
, unsigned nelt
, rtx perm
[])
35529 machine_mode imode
;
35533 if (GET_MODE_CLASS (vmode
) != MODE_VECTOR_INT
)
35534 imode
= mode_for_vector
35535 (int_mode_for_mode (GET_MODE_INNER (vmode
)).require (), nelt
);
35537 x
= gen_rtx_CONST_VECTOR (imode
, gen_rtvec_v (nelt
, perm
));
35538 x
= expand_vec_perm (vmode
, op0
, op1
, x
, target
);
35540 emit_move_insn (target
, x
);
35543 /* Expand an extract even operation. */
35546 rs6000_expand_extract_even (rtx target
, rtx op0
, rtx op1
)
35548 machine_mode vmode
= GET_MODE (target
);
35549 unsigned i
, nelt
= GET_MODE_NUNITS (vmode
);
35552 for (i
= 0; i
< nelt
; i
++)
35553 perm
[i
] = GEN_INT (i
* 2);
35555 rs6000_do_expand_vec_perm (target
, op0
, op1
, vmode
, nelt
, perm
);
35558 /* Expand a vector interleave operation. */
35561 rs6000_expand_interleave (rtx target
, rtx op0
, rtx op1
, bool highp
)
35563 machine_mode vmode
= GET_MODE (target
);
35564 unsigned i
, high
, nelt
= GET_MODE_NUNITS (vmode
);
35567 high
= (highp
? 0 : nelt
/ 2);
35568 for (i
= 0; i
< nelt
/ 2; i
++)
35570 perm
[i
* 2] = GEN_INT (i
+ high
);
35571 perm
[i
* 2 + 1] = GEN_INT (i
+ nelt
+ high
);
35574 rs6000_do_expand_vec_perm (target
, op0
, op1
, vmode
, nelt
, perm
);
35577 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
35579 rs6000_scale_v2df (rtx tgt
, rtx src
, int scale
)
35581 HOST_WIDE_INT
hwi_scale (scale
);
35582 REAL_VALUE_TYPE r_pow
;
35583 rtvec v
= rtvec_alloc (2);
35585 rtx scale_vec
= gen_reg_rtx (V2DFmode
);
35586 (void)real_powi (&r_pow
, DFmode
, &dconst2
, hwi_scale
);
35587 elt
= const_double_from_real_value (r_pow
, DFmode
);
35588 RTVEC_ELT (v
, 0) = elt
;
35589 RTVEC_ELT (v
, 1) = elt
;
35590 rs6000_expand_vector_init (scale_vec
, gen_rtx_PARALLEL (V2DFmode
, v
));
35591 emit_insn (gen_mulv2df3 (tgt
, src
, scale_vec
));
35594 /* Return an RTX representing where to find the function value of a
35595 function returning MODE. */
35597 rs6000_complex_function_value (machine_mode mode
)
35599 unsigned int regno
;
35601 machine_mode inner
= GET_MODE_INNER (mode
);
35602 unsigned int inner_bytes
= GET_MODE_UNIT_SIZE (mode
);
35604 if (TARGET_FLOAT128_TYPE
35606 || (mode
== TCmode
&& TARGET_IEEEQUAD
)))
35607 regno
= ALTIVEC_ARG_RETURN
;
35609 else if (FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
)
35610 regno
= FP_ARG_RETURN
;
35614 regno
= GP_ARG_RETURN
;
35616 /* 32-bit is OK since it'll go in r3/r4. */
35617 if (TARGET_32BIT
&& inner_bytes
>= 4)
35618 return gen_rtx_REG (mode
, regno
);
35621 if (inner_bytes
>= 8)
35622 return gen_rtx_REG (mode
, regno
);
35624 r1
= gen_rtx_EXPR_LIST (inner
, gen_rtx_REG (inner
, regno
),
35626 r2
= gen_rtx_EXPR_LIST (inner
, gen_rtx_REG (inner
, regno
+ 1),
35627 GEN_INT (inner_bytes
));
35628 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, r1
, r2
));
35631 /* Return an rtx describing a return value of MODE as a PARALLEL
35632 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
35633 stride REG_STRIDE. */
35636 rs6000_parallel_return (machine_mode mode
,
35637 int n_elts
, machine_mode elt_mode
,
35638 unsigned int regno
, unsigned int reg_stride
)
35640 rtx par
= gen_rtx_PARALLEL (mode
, rtvec_alloc (n_elts
));
35643 for (i
= 0; i
< n_elts
; i
++)
35645 rtx r
= gen_rtx_REG (elt_mode
, regno
);
35646 rtx off
= GEN_INT (i
* GET_MODE_SIZE (elt_mode
));
35647 XVECEXP (par
, 0, i
) = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
35648 regno
+= reg_stride
;
35654 /* Target hook for TARGET_FUNCTION_VALUE.
35656 An integer value is in r3 and a floating-point value is in fp1,
35657 unless -msoft-float. */
35660 rs6000_function_value (const_tree valtype
,
35661 const_tree fn_decl_or_type ATTRIBUTE_UNUSED
,
35662 bool outgoing ATTRIBUTE_UNUSED
)
35665 unsigned int regno
;
35666 machine_mode elt_mode
;
35669 /* Special handling for structs in darwin64. */
35671 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype
), valtype
))
35673 CUMULATIVE_ARGS valcum
;
35677 valcum
.fregno
= FP_ARG_MIN_REG
;
35678 valcum
.vregno
= ALTIVEC_ARG_MIN_REG
;
35679 /* Do a trial code generation as if this were going to be passed as
35680 an argument; if any part goes in memory, we return NULL. */
35681 valret
= rs6000_darwin64_record_arg (&valcum
, valtype
, true, /* retval= */ true);
35684 /* Otherwise fall through to standard ABI rules. */
35687 mode
= TYPE_MODE (valtype
);
35689 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
35690 if (rs6000_discover_homogeneous_aggregate (mode
, valtype
, &elt_mode
, &n_elts
))
35692 int first_reg
, n_regs
;
35694 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode
))
35696 /* _Decimal128 must use even/odd register pairs. */
35697 first_reg
= (elt_mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
35698 n_regs
= (GET_MODE_SIZE (elt_mode
) + 7) >> 3;
35702 first_reg
= ALTIVEC_ARG_RETURN
;
35706 return rs6000_parallel_return (mode
, n_elts
, elt_mode
, first_reg
, n_regs
);
35709 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
35710 if (TARGET_32BIT
&& TARGET_POWERPC64
)
35719 int count
= GET_MODE_SIZE (mode
) / 4;
35720 return rs6000_parallel_return (mode
, count
, SImode
, GP_ARG_RETURN
, 1);
35723 if ((INTEGRAL_TYPE_P (valtype
)
35724 && GET_MODE_BITSIZE (mode
) < (TARGET_32BIT
? 32 : 64))
35725 || POINTER_TYPE_P (valtype
))
35726 mode
= TARGET_32BIT
? SImode
: DImode
;
35728 if (DECIMAL_FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
)
35729 /* _Decimal128 must use an even/odd register pair. */
35730 regno
= (mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
35731 else if (SCALAR_FLOAT_TYPE_P (valtype
) && TARGET_HARD_FLOAT
35732 && !FLOAT128_VECTOR_P (mode
)
35733 && ((TARGET_SINGLE_FLOAT
&& (mode
== SFmode
)) || TARGET_DOUBLE_FLOAT
))
35734 regno
= FP_ARG_RETURN
;
35735 else if (TREE_CODE (valtype
) == COMPLEX_TYPE
35736 && targetm
.calls
.split_complex_arg
)
35737 return rs6000_complex_function_value (mode
);
35738 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35739 return register is used in both cases, and we won't see V2DImode/V2DFmode
35740 for pure altivec, combine the two cases. */
35741 else if ((TREE_CODE (valtype
) == VECTOR_TYPE
|| FLOAT128_VECTOR_P (mode
))
35742 && TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
35743 && ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
35744 regno
= ALTIVEC_ARG_RETURN
;
35746 regno
= GP_ARG_RETURN
;
35748 return gen_rtx_REG (mode
, regno
);
35751 /* Define how to find the value returned by a library function
35752 assuming the value has mode MODE. */
35754 rs6000_libcall_value (machine_mode mode
)
35756 unsigned int regno
;
35758 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
35759 if (TARGET_32BIT
&& TARGET_POWERPC64
&& mode
== DImode
)
35760 return rs6000_parallel_return (mode
, 2, SImode
, GP_ARG_RETURN
, 1);
35762 if (DECIMAL_FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
)
35763 /* _Decimal128 must use an even/odd register pair. */
35764 regno
= (mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
35765 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode
)
35766 && TARGET_HARD_FLOAT
35767 && ((TARGET_SINGLE_FLOAT
&& mode
== SFmode
) || TARGET_DOUBLE_FLOAT
))
35768 regno
= FP_ARG_RETURN
;
35769 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35770 return register is used in both cases, and we won't see V2DImode/V2DFmode
35771 for pure altivec, combine the two cases. */
35772 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
35773 && TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
)
35774 regno
= ALTIVEC_ARG_RETURN
;
35775 else if (COMPLEX_MODE_P (mode
) && targetm
.calls
.split_complex_arg
)
35776 return rs6000_complex_function_value (mode
);
35778 regno
= GP_ARG_RETURN
;
35780 return gen_rtx_REG (mode
, regno
);
35783 /* Compute register pressure classes. We implement the target hook to avoid
35784 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
35785 lead to incorrect estimates of number of available registers and therefor
35786 increased register pressure/spill. */
35788 rs6000_compute_pressure_classes (enum reg_class
*pressure_classes
)
35793 pressure_classes
[n
++] = GENERAL_REGS
;
35795 pressure_classes
[n
++] = VSX_REGS
;
35798 if (TARGET_ALTIVEC
)
35799 pressure_classes
[n
++] = ALTIVEC_REGS
;
35800 if (TARGET_HARD_FLOAT
)
35801 pressure_classes
[n
++] = FLOAT_REGS
;
35803 pressure_classes
[n
++] = CR_REGS
;
35804 pressure_classes
[n
++] = SPECIAL_REGS
;
35809 /* Given FROM and TO register numbers, say whether this elimination is allowed.
35810 Frame pointer elimination is automatically handled.
35812 For the RS/6000, if frame pointer elimination is being done, we would like
35813 to convert ap into fp, not sp.
35815 We need r30 if -mminimal-toc was specified, and there are constant pool
35819 rs6000_can_eliminate (const int from
, const int to
)
35821 return (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
35822 ? ! frame_pointer_needed
35823 : from
== RS6000_PIC_OFFSET_TABLE_REGNUM
35824 ? ! TARGET_MINIMAL_TOC
|| TARGET_NO_TOC
35825 || constant_pool_empty_p ()
35829 /* Define the offset between two registers, FROM to be eliminated and its
35830 replacement TO, at the start of a routine. */
35832 rs6000_initial_elimination_offset (int from
, int to
)
35834 rs6000_stack_t
*info
= rs6000_stack_info ();
35835 HOST_WIDE_INT offset
;
35837 if (from
== HARD_FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
35838 offset
= info
->push_p
? 0 : -info
->total_size
;
35839 else if (from
== FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
35841 offset
= info
->push_p
? 0 : -info
->total_size
;
35842 if (FRAME_GROWS_DOWNWARD
)
35843 offset
+= info
->fixed_size
+ info
->vars_size
+ info
->parm_size
;
35845 else if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
35846 offset
= FRAME_GROWS_DOWNWARD
35847 ? info
->fixed_size
+ info
->vars_size
+ info
->parm_size
35849 else if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
35850 offset
= info
->total_size
;
35851 else if (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
35852 offset
= info
->push_p
? info
->total_size
: 0;
35853 else if (from
== RS6000_PIC_OFFSET_TABLE_REGNUM
)
35856 gcc_unreachable ();
35861 /* Fill in sizes of registers used by unwinder. */
35864 rs6000_init_dwarf_reg_sizes_extra (tree address
)
35866 if (TARGET_MACHO
&& ! TARGET_ALTIVEC
)
35869 machine_mode mode
= TYPE_MODE (char_type_node
);
35870 rtx addr
= expand_expr (address
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
35871 rtx mem
= gen_rtx_MEM (BLKmode
, addr
);
35872 rtx value
= gen_int_mode (16, mode
);
35874 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
35875 The unwinder still needs to know the size of Altivec registers. */
35877 for (i
= FIRST_ALTIVEC_REGNO
; i
< LAST_ALTIVEC_REGNO
+1; i
++)
35879 int column
= DWARF_REG_TO_UNWIND_COLUMN
35880 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i
), true));
35881 HOST_WIDE_INT offset
= column
* GET_MODE_SIZE (mode
);
35883 emit_move_insn (adjust_address (mem
, mode
, offset
), value
);
35888 /* Map internal gcc register numbers to debug format register numbers.
35889 FORMAT specifies the type of debug register number to use:
35890 0 -- debug information, except for frame-related sections
35891 1 -- DWARF .debug_frame section
35892 2 -- DWARF .eh_frame section */
35895 rs6000_dbx_register_number (unsigned int regno
, unsigned int format
)
35897 /* Except for the above, we use the internal number for non-DWARF
35898 debug information, and also for .eh_frame. */
35899 if ((format
== 0 && write_symbols
!= DWARF2_DEBUG
) || format
== 2)
35902 /* On some platforms, we use the standard DWARF register
35903 numbering for .debug_info and .debug_frame. */
35904 #ifdef RS6000_USE_DWARF_NUMBERING
35907 if (regno
== LR_REGNO
)
35909 if (regno
== CTR_REGNO
)
35911 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
35912 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
35913 The actual code emitted saves the whole of CR, so we map CR2_REGNO
35914 to the DWARF reg for CR. */
35915 if (format
== 1 && regno
== CR2_REGNO
)
35917 if (CR_REGNO_P (regno
))
35918 return regno
- CR0_REGNO
+ 86;
35919 if (regno
== CA_REGNO
)
35920 return 101; /* XER */
35921 if (ALTIVEC_REGNO_P (regno
))
35922 return regno
- FIRST_ALTIVEC_REGNO
+ 1124;
35923 if (regno
== VRSAVE_REGNO
)
35925 if (regno
== VSCR_REGNO
)
35931 /* target hook eh_return_filter_mode */
35932 static scalar_int_mode
35933 rs6000_eh_return_filter_mode (void)
35935 return TARGET_32BIT
? SImode
: word_mode
;
35938 /* Target hook for scalar_mode_supported_p. */
35940 rs6000_scalar_mode_supported_p (scalar_mode mode
)
35942 /* -m32 does not support TImode. This is the default, from
35943 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
35944 same ABI as for -m32. But default_scalar_mode_supported_p allows
35945 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
35946 for -mpowerpc64. */
35947 if (TARGET_32BIT
&& mode
== TImode
)
35950 if (DECIMAL_FLOAT_MODE_P (mode
))
35951 return default_decimal_float_supported_p ();
35952 else if (TARGET_FLOAT128_TYPE
&& (mode
== KFmode
|| mode
== IFmode
))
35955 return default_scalar_mode_supported_p (mode
);
35958 /* Target hook for vector_mode_supported_p. */
35960 rs6000_vector_mode_supported_p (machine_mode mode
)
35963 if (TARGET_PAIRED_FLOAT
&& PAIRED_VECTOR_MODE (mode
))
35966 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
35967 128-bit, the compiler might try to widen IEEE 128-bit to IBM
35969 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
) && !FLOAT128_IEEE_P (mode
))
35976 /* Target hook for floatn_mode. */
35977 static opt_scalar_float_mode
35978 rs6000_floatn_mode (int n
, bool extended
)
35988 if (TARGET_FLOAT128_KEYWORD
)
35989 return (FLOAT128_IEEE_P (TFmode
)) ? TFmode
: KFmode
;
35991 return opt_scalar_float_mode ();
35994 return opt_scalar_float_mode ();
35997 /* Those are the only valid _FloatNx types. */
35998 gcc_unreachable ();
36012 if (TARGET_FLOAT128_KEYWORD
)
36013 return (FLOAT128_IEEE_P (TFmode
)) ? TFmode
: KFmode
;
36015 return opt_scalar_float_mode ();
36018 return opt_scalar_float_mode ();
36024 /* Target hook for c_mode_for_suffix. */
36025 static machine_mode
36026 rs6000_c_mode_for_suffix (char suffix
)
36028 if (TARGET_FLOAT128_TYPE
)
36030 if (suffix
== 'q' || suffix
== 'Q')
36031 return (FLOAT128_IEEE_P (TFmode
)) ? TFmode
: KFmode
;
36033 /* At the moment, we are not defining a suffix for IBM extended double.
36034 If/when the default for -mabi=ieeelongdouble is changed, and we want
36035 to support __ibm128 constants in legacy library code, we may need to
36036 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
36037 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
36038 __float80 constants. */
36044 /* Target hook for invalid_arg_for_unprototyped_fn. */
36045 static const char *
36046 invalid_arg_for_unprototyped_fn (const_tree typelist
, const_tree funcdecl
, const_tree val
)
36048 return (!rs6000_darwin64_abi
36050 && TREE_CODE (TREE_TYPE (val
)) == VECTOR_TYPE
36051 && (funcdecl
== NULL_TREE
36052 || (TREE_CODE (funcdecl
) == FUNCTION_DECL
36053 && DECL_BUILT_IN_CLASS (funcdecl
) != BUILT_IN_MD
)))
36054 ? N_("AltiVec argument passed to unprototyped function")
36058 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
36059 setup by using __stack_chk_fail_local hidden function instead of
36060 calling __stack_chk_fail directly. Otherwise it is better to call
36061 __stack_chk_fail directly. */
36063 static tree ATTRIBUTE_UNUSED
36064 rs6000_stack_protect_fail (void)
36066 return (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
36067 ? default_hidden_stack_protect_fail ()
36068 : default_external_stack_protect_fail ();
36071 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
36074 static unsigned HOST_WIDE_INT
36075 rs6000_asan_shadow_offset (void)
36077 return (unsigned HOST_WIDE_INT
) 1 << (TARGET_64BIT
? 41 : 29);
36081 /* Mask options that we want to support inside of attribute((target)) and
36082 #pragma GCC target operations. Note, we do not include things like
36083 64/32-bit, endianness, hard/soft floating point, etc. that would have
36084 different calling sequences. */
36086 struct rs6000_opt_mask
{
36087 const char *name
; /* option name */
36088 HOST_WIDE_INT mask
; /* mask to set */
36089 bool invert
; /* invert sense of mask */
36090 bool valid_target
; /* option is a target option */
36093 static struct rs6000_opt_mask
const rs6000_opt_masks
[] =
36095 { "altivec", OPTION_MASK_ALTIVEC
, false, true },
36096 { "cmpb", OPTION_MASK_CMPB
, false, true },
36097 { "crypto", OPTION_MASK_CRYPTO
, false, true },
36098 { "direct-move", OPTION_MASK_DIRECT_MOVE
, false, true },
36099 { "dlmzb", OPTION_MASK_DLMZB
, false, true },
36100 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX
,
36102 { "float128", OPTION_MASK_FLOAT128_KEYWORD
, false, false },
36103 { "float128-type", OPTION_MASK_FLOAT128_TYPE
, false, false },
36104 { "float128-hardware", OPTION_MASK_FLOAT128_HW
, false, false },
36105 { "fprnd", OPTION_MASK_FPRND
, false, true },
36106 { "hard-dfp", OPTION_MASK_DFP
, false, true },
36107 { "htm", OPTION_MASK_HTM
, false, true },
36108 { "isel", OPTION_MASK_ISEL
, false, true },
36109 { "mfcrf", OPTION_MASK_MFCRF
, false, true },
36110 { "mfpgpr", OPTION_MASK_MFPGPR
, false, true },
36111 { "modulo", OPTION_MASK_MODULO
, false, true },
36112 { "mulhw", OPTION_MASK_MULHW
, false, true },
36113 { "multiple", OPTION_MASK_MULTIPLE
, false, true },
36114 { "popcntb", OPTION_MASK_POPCNTB
, false, true },
36115 { "popcntd", OPTION_MASK_POPCNTD
, false, true },
36116 { "power8-fusion", OPTION_MASK_P8_FUSION
, false, true },
36117 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN
, false, true },
36118 { "power8-vector", OPTION_MASK_P8_VECTOR
, false, true },
36119 { "power9-fusion", OPTION_MASK_P9_FUSION
, false, true },
36120 { "power9-minmax", OPTION_MASK_P9_MINMAX
, false, true },
36121 { "power9-misc", OPTION_MASK_P9_MISC
, false, true },
36122 { "power9-vector", OPTION_MASK_P9_VECTOR
, false, true },
36123 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT
, false, true },
36124 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT
, false, true },
36125 { "quad-memory", OPTION_MASK_QUAD_MEMORY
, false, true },
36126 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC
, false, true },
36127 { "recip-precision", OPTION_MASK_RECIP_PRECISION
, false, true },
36128 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT
, false, true },
36129 { "string", OPTION_MASK_STRING
, false, true },
36130 { "toc-fusion", OPTION_MASK_TOC_FUSION
, false, true },
36131 { "update", OPTION_MASK_NO_UPDATE
, true , true },
36132 { "vsx", OPTION_MASK_VSX
, false, true },
36133 #ifdef OPTION_MASK_64BIT
36135 { "aix64", OPTION_MASK_64BIT
, false, false },
36136 { "aix32", OPTION_MASK_64BIT
, true, false },
36138 { "64", OPTION_MASK_64BIT
, false, false },
36139 { "32", OPTION_MASK_64BIT
, true, false },
36142 #ifdef OPTION_MASK_EABI
36143 { "eabi", OPTION_MASK_EABI
, false, false },
36145 #ifdef OPTION_MASK_LITTLE_ENDIAN
36146 { "little", OPTION_MASK_LITTLE_ENDIAN
, false, false },
36147 { "big", OPTION_MASK_LITTLE_ENDIAN
, true, false },
36149 #ifdef OPTION_MASK_RELOCATABLE
36150 { "relocatable", OPTION_MASK_RELOCATABLE
, false, false },
36152 #ifdef OPTION_MASK_STRICT_ALIGN
36153 { "strict-align", OPTION_MASK_STRICT_ALIGN
, false, false },
36155 { "soft-float", OPTION_MASK_SOFT_FLOAT
, false, false },
36156 { "string", OPTION_MASK_STRING
, false, false },
36159 /* Builtin mask mapping for printing the flags. */
36160 static struct rs6000_opt_mask
const rs6000_builtin_mask_names
[] =
36162 { "altivec", RS6000_BTM_ALTIVEC
, false, false },
36163 { "vsx", RS6000_BTM_VSX
, false, false },
36164 { "paired", RS6000_BTM_PAIRED
, false, false },
36165 { "fre", RS6000_BTM_FRE
, false, false },
36166 { "fres", RS6000_BTM_FRES
, false, false },
36167 { "frsqrte", RS6000_BTM_FRSQRTE
, false, false },
36168 { "frsqrtes", RS6000_BTM_FRSQRTES
, false, false },
36169 { "popcntd", RS6000_BTM_POPCNTD
, false, false },
36170 { "cell", RS6000_BTM_CELL
, false, false },
36171 { "power8-vector", RS6000_BTM_P8_VECTOR
, false, false },
36172 { "power9-vector", RS6000_BTM_P9_VECTOR
, false, false },
36173 { "power9-misc", RS6000_BTM_P9_MISC
, false, false },
36174 { "crypto", RS6000_BTM_CRYPTO
, false, false },
36175 { "htm", RS6000_BTM_HTM
, false, false },
36176 { "hard-dfp", RS6000_BTM_DFP
, false, false },
36177 { "hard-float", RS6000_BTM_HARD_FLOAT
, false, false },
36178 { "long-double-128", RS6000_BTM_LDBL128
, false, false },
36179 { "float128", RS6000_BTM_FLOAT128
, false, false },
36182 /* Option variables that we want to support inside attribute((target)) and
36183 #pragma GCC target operations. */
36185 struct rs6000_opt_var
{
36186 const char *name
; /* option name */
36187 size_t global_offset
; /* offset of the option in global_options. */
36188 size_t target_offset
; /* offset of the option in target options. */
36191 static struct rs6000_opt_var
const rs6000_opt_vars
[] =
36194 offsetof (struct gcc_options
, x_TARGET_FRIZ
),
36195 offsetof (struct cl_target_option
, x_TARGET_FRIZ
), },
36196 { "avoid-indexed-addresses",
36197 offsetof (struct gcc_options
, x_TARGET_AVOID_XFORM
),
36198 offsetof (struct cl_target_option
, x_TARGET_AVOID_XFORM
) },
36200 offsetof (struct gcc_options
, x_rs6000_paired_float
),
36201 offsetof (struct cl_target_option
, x_rs6000_paired_float
), },
36203 offsetof (struct gcc_options
, x_rs6000_default_long_calls
),
36204 offsetof (struct cl_target_option
, x_rs6000_default_long_calls
), },
36205 { "optimize-swaps",
36206 offsetof (struct gcc_options
, x_rs6000_optimize_swaps
),
36207 offsetof (struct cl_target_option
, x_rs6000_optimize_swaps
), },
36208 { "allow-movmisalign",
36209 offsetof (struct gcc_options
, x_TARGET_ALLOW_MOVMISALIGN
),
36210 offsetof (struct cl_target_option
, x_TARGET_ALLOW_MOVMISALIGN
), },
36212 offsetof (struct gcc_options
, x_TARGET_SCHED_GROUPS
),
36213 offsetof (struct cl_target_option
, x_TARGET_SCHED_GROUPS
), },
36215 offsetof (struct gcc_options
, x_TARGET_ALWAYS_HINT
),
36216 offsetof (struct cl_target_option
, x_TARGET_ALWAYS_HINT
), },
36217 { "align-branch-targets",
36218 offsetof (struct gcc_options
, x_TARGET_ALIGN_BRANCH_TARGETS
),
36219 offsetof (struct cl_target_option
, x_TARGET_ALIGN_BRANCH_TARGETS
), },
36221 offsetof (struct gcc_options
, x_tls_markers
),
36222 offsetof (struct cl_target_option
, x_tls_markers
), },
36224 offsetof (struct gcc_options
, x_TARGET_SCHED_PROLOG
),
36225 offsetof (struct cl_target_option
, x_TARGET_SCHED_PROLOG
), },
36227 offsetof (struct gcc_options
, x_TARGET_SCHED_PROLOG
),
36228 offsetof (struct cl_target_option
, x_TARGET_SCHED_PROLOG
), },
36231 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36232 parsing. Return true if there were no errors. */
36235 rs6000_inner_target_options (tree args
, bool attr_p
)
36239 if (args
== NULL_TREE
)
36242 else if (TREE_CODE (args
) == STRING_CST
)
36244 char *p
= ASTRDUP (TREE_STRING_POINTER (args
));
36247 while ((q
= strtok (p
, ",")) != NULL
)
36249 bool error_p
= false;
36250 bool not_valid_p
= false;
36251 const char *cpu_opt
= NULL
;
36254 if (strncmp (q
, "cpu=", 4) == 0)
36256 int cpu_index
= rs6000_cpu_name_lookup (q
+4);
36257 if (cpu_index
>= 0)
36258 rs6000_cpu_index
= cpu_index
;
36265 else if (strncmp (q
, "tune=", 5) == 0)
36267 int tune_index
= rs6000_cpu_name_lookup (q
+5);
36268 if (tune_index
>= 0)
36269 rs6000_tune_index
= tune_index
;
36279 bool invert
= false;
36283 if (strncmp (r
, "no-", 3) == 0)
36289 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_masks
); i
++)
36290 if (strcmp (r
, rs6000_opt_masks
[i
].name
) == 0)
36292 HOST_WIDE_INT mask
= rs6000_opt_masks
[i
].mask
;
36294 if (!rs6000_opt_masks
[i
].valid_target
)
36295 not_valid_p
= true;
36299 rs6000_isa_flags_explicit
|= mask
;
36301 /* VSX needs altivec, so -mvsx automagically sets
36302 altivec and disables -mavoid-indexed-addresses. */
36305 if (mask
== OPTION_MASK_VSX
)
36307 mask
|= OPTION_MASK_ALTIVEC
;
36308 TARGET_AVOID_XFORM
= 0;
36312 if (rs6000_opt_masks
[i
].invert
)
36316 rs6000_isa_flags
&= ~mask
;
36318 rs6000_isa_flags
|= mask
;
36323 if (error_p
&& !not_valid_p
)
36325 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_vars
); i
++)
36326 if (strcmp (r
, rs6000_opt_vars
[i
].name
) == 0)
36328 size_t j
= rs6000_opt_vars
[i
].global_offset
;
36329 *((int *) ((char *)&global_options
+ j
)) = !invert
;
36331 not_valid_p
= false;
36339 const char *eprefix
, *esuffix
;
36344 eprefix
= "__attribute__((__target__(";
36349 eprefix
= "#pragma GCC target ";
36354 error ("invalid cpu %qs for %s%qs%s", cpu_opt
, eprefix
,
36356 else if (not_valid_p
)
36357 error ("%s%qs%s is not allowed", eprefix
, q
, esuffix
);
36359 error ("%s%qs%s is invalid", eprefix
, q
, esuffix
);
36364 else if (TREE_CODE (args
) == TREE_LIST
)
36368 tree value
= TREE_VALUE (args
);
36371 bool ret2
= rs6000_inner_target_options (value
, attr_p
);
36375 args
= TREE_CHAIN (args
);
36377 while (args
!= NULL_TREE
);
36382 error ("attribute %<target%> argument not a string");
36389 /* Print out the target options as a list for -mdebug=target. */
36392 rs6000_debug_target_options (tree args
, const char *prefix
)
36394 if (args
== NULL_TREE
)
36395 fprintf (stderr
, "%s<NULL>", prefix
);
36397 else if (TREE_CODE (args
) == STRING_CST
)
36399 char *p
= ASTRDUP (TREE_STRING_POINTER (args
));
36402 while ((q
= strtok (p
, ",")) != NULL
)
36405 fprintf (stderr
, "%s\"%s\"", prefix
, q
);
36410 else if (TREE_CODE (args
) == TREE_LIST
)
36414 tree value
= TREE_VALUE (args
);
36417 rs6000_debug_target_options (value
, prefix
);
36420 args
= TREE_CHAIN (args
);
36422 while (args
!= NULL_TREE
);
36426 gcc_unreachable ();
36432 /* Hook to validate attribute((target("..."))). */
36435 rs6000_valid_attribute_p (tree fndecl
,
36436 tree
ARG_UNUSED (name
),
36440 struct cl_target_option cur_target
;
36442 tree old_optimize
= build_optimization_node (&global_options
);
36443 tree new_target
, new_optimize
;
36444 tree func_optimize
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
);
36446 gcc_assert ((fndecl
!= NULL_TREE
) && (args
!= NULL_TREE
));
36448 if (TARGET_DEBUG_TARGET
)
36450 tree tname
= DECL_NAME (fndecl
);
36451 fprintf (stderr
, "\n==================== rs6000_valid_attribute_p:\n");
36453 fprintf (stderr
, "function: %.*s\n",
36454 (int) IDENTIFIER_LENGTH (tname
),
36455 IDENTIFIER_POINTER (tname
));
36457 fprintf (stderr
, "function: unknown\n");
36459 fprintf (stderr
, "args:");
36460 rs6000_debug_target_options (args
, " ");
36461 fprintf (stderr
, "\n");
36464 fprintf (stderr
, "flags: 0x%x\n", flags
);
36466 fprintf (stderr
, "--------------------\n");
36469 /* attribute((target("default"))) does nothing, beyond
36470 affecting multi-versioning. */
36471 if (TREE_VALUE (args
)
36472 && TREE_CODE (TREE_VALUE (args
)) == STRING_CST
36473 && TREE_CHAIN (args
) == NULL_TREE
36474 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args
)), "default") == 0)
36477 old_optimize
= build_optimization_node (&global_options
);
36478 func_optimize
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
);
36480 /* If the function changed the optimization levels as well as setting target
36481 options, start with the optimizations specified. */
36482 if (func_optimize
&& func_optimize
!= old_optimize
)
36483 cl_optimization_restore (&global_options
,
36484 TREE_OPTIMIZATION (func_optimize
));
36486 /* The target attributes may also change some optimization flags, so update
36487 the optimization options if necessary. */
36488 cl_target_option_save (&cur_target
, &global_options
);
36489 rs6000_cpu_index
= rs6000_tune_index
= -1;
36490 ret
= rs6000_inner_target_options (args
, true);
36492 /* Set up any additional state. */
36495 ret
= rs6000_option_override_internal (false);
36496 new_target
= build_target_option_node (&global_options
);
36501 new_optimize
= build_optimization_node (&global_options
);
36508 DECL_FUNCTION_SPECIFIC_TARGET (fndecl
) = new_target
;
36510 if (old_optimize
!= new_optimize
)
36511 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
) = new_optimize
;
36514 cl_target_option_restore (&global_options
, &cur_target
);
36516 if (old_optimize
!= new_optimize
)
36517 cl_optimization_restore (&global_options
,
36518 TREE_OPTIMIZATION (old_optimize
));
36524 /* Hook to validate the current #pragma GCC target and set the state, and
36525 update the macros based on what was changed. If ARGS is NULL, then
36526 POP_TARGET is used to reset the options. */
36529 rs6000_pragma_target_parse (tree args
, tree pop_target
)
36531 tree prev_tree
= build_target_option_node (&global_options
);
36533 struct cl_target_option
*prev_opt
, *cur_opt
;
36534 HOST_WIDE_INT prev_flags
, cur_flags
, diff_flags
;
36535 HOST_WIDE_INT prev_bumask
, cur_bumask
, diff_bumask
;
36537 if (TARGET_DEBUG_TARGET
)
36539 fprintf (stderr
, "\n==================== rs6000_pragma_target_parse\n");
36540 fprintf (stderr
, "args:");
36541 rs6000_debug_target_options (args
, " ");
36542 fprintf (stderr
, "\n");
36546 fprintf (stderr
, "pop_target:\n");
36547 debug_tree (pop_target
);
36550 fprintf (stderr
, "pop_target: <NULL>\n");
36552 fprintf (stderr
, "--------------------\n");
36557 cur_tree
= ((pop_target
)
36559 : target_option_default_node
);
36560 cl_target_option_restore (&global_options
,
36561 TREE_TARGET_OPTION (cur_tree
));
36565 rs6000_cpu_index
= rs6000_tune_index
= -1;
36566 if (!rs6000_inner_target_options (args
, false)
36567 || !rs6000_option_override_internal (false)
36568 || (cur_tree
= build_target_option_node (&global_options
))
36571 if (TARGET_DEBUG_BUILTIN
|| TARGET_DEBUG_TARGET
)
36572 fprintf (stderr
, "invalid pragma\n");
36578 target_option_current_node
= cur_tree
;
36580 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
36581 change the macros that are defined. */
36582 if (rs6000_target_modify_macros_ptr
)
36584 prev_opt
= TREE_TARGET_OPTION (prev_tree
);
36585 prev_bumask
= prev_opt
->x_rs6000_builtin_mask
;
36586 prev_flags
= prev_opt
->x_rs6000_isa_flags
;
36588 cur_opt
= TREE_TARGET_OPTION (cur_tree
);
36589 cur_flags
= cur_opt
->x_rs6000_isa_flags
;
36590 cur_bumask
= cur_opt
->x_rs6000_builtin_mask
;
36592 diff_bumask
= (prev_bumask
^ cur_bumask
);
36593 diff_flags
= (prev_flags
^ cur_flags
);
36595 if ((diff_flags
!= 0) || (diff_bumask
!= 0))
36597 /* Delete old macros. */
36598 rs6000_target_modify_macros_ptr (false,
36599 prev_flags
& diff_flags
,
36600 prev_bumask
& diff_bumask
);
36602 /* Define new macros. */
36603 rs6000_target_modify_macros_ptr (true,
36604 cur_flags
& diff_flags
,
36605 cur_bumask
& diff_bumask
);
36613 /* Remember the last target of rs6000_set_current_function. */
36614 static GTY(()) tree rs6000_previous_fndecl
;
36616 /* Restore target's globals from NEW_TREE and invalidate the
36617 rs6000_previous_fndecl cache. */
36620 rs6000_activate_target_options (tree new_tree
)
36622 cl_target_option_restore (&global_options
, TREE_TARGET_OPTION (new_tree
));
36623 if (TREE_TARGET_GLOBALS (new_tree
))
36624 restore_target_globals (TREE_TARGET_GLOBALS (new_tree
));
36625 else if (new_tree
== target_option_default_node
)
36626 restore_target_globals (&default_target_globals
);
36628 TREE_TARGET_GLOBALS (new_tree
) = save_target_globals_default_opts ();
36629 rs6000_previous_fndecl
= NULL_TREE
;
36632 /* Establish appropriate back-end context for processing the function
36633 FNDECL. The argument might be NULL to indicate processing at top
36634 level, outside of any function scope. */
36636 rs6000_set_current_function (tree fndecl
)
36638 if (TARGET_DEBUG_TARGET
)
36640 fprintf (stderr
, "\n==================== rs6000_set_current_function");
36643 fprintf (stderr
, ", fndecl %s (%p)",
36644 (DECL_NAME (fndecl
)
36645 ? IDENTIFIER_POINTER (DECL_NAME (fndecl
))
36646 : "<unknown>"), (void *)fndecl
);
36648 if (rs6000_previous_fndecl
)
36649 fprintf (stderr
, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl
);
36651 fprintf (stderr
, "\n");
36654 /* Only change the context if the function changes. This hook is called
36655 several times in the course of compiling a function, and we don't want to
36656 slow things down too much or call target_reinit when it isn't safe. */
36657 if (fndecl
== rs6000_previous_fndecl
)
36661 if (rs6000_previous_fndecl
== NULL_TREE
)
36662 old_tree
= target_option_current_node
;
36663 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl
))
36664 old_tree
= DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl
);
36666 old_tree
= target_option_default_node
;
36669 if (fndecl
== NULL_TREE
)
36671 if (old_tree
!= target_option_current_node
)
36672 new_tree
= target_option_current_node
;
36674 new_tree
= NULL_TREE
;
36678 new_tree
= DECL_FUNCTION_SPECIFIC_TARGET (fndecl
);
36679 if (new_tree
== NULL_TREE
)
36680 new_tree
= target_option_default_node
;
36683 if (TARGET_DEBUG_TARGET
)
36687 fprintf (stderr
, "\nnew fndecl target specific options:\n");
36688 debug_tree (new_tree
);
36693 fprintf (stderr
, "\nold fndecl target specific options:\n");
36694 debug_tree (old_tree
);
36697 if (old_tree
!= NULL_TREE
|| new_tree
!= NULL_TREE
)
36698 fprintf (stderr
, "--------------------\n");
36701 if (new_tree
&& old_tree
!= new_tree
)
36702 rs6000_activate_target_options (new_tree
);
36705 rs6000_previous_fndecl
= fndecl
;
36709 /* Save the current options */
36712 rs6000_function_specific_save (struct cl_target_option
*ptr
,
36713 struct gcc_options
*opts
)
36715 ptr
->x_rs6000_isa_flags
= opts
->x_rs6000_isa_flags
;
36716 ptr
->x_rs6000_isa_flags_explicit
= opts
->x_rs6000_isa_flags_explicit
;
36719 /* Restore the current options */
36722 rs6000_function_specific_restore (struct gcc_options
*opts
,
36723 struct cl_target_option
*ptr
)
36726 opts
->x_rs6000_isa_flags
= ptr
->x_rs6000_isa_flags
;
36727 opts
->x_rs6000_isa_flags_explicit
= ptr
->x_rs6000_isa_flags_explicit
;
36728 (void) rs6000_option_override_internal (false);
36731 /* Print the current options */
36734 rs6000_function_specific_print (FILE *file
, int indent
,
36735 struct cl_target_option
*ptr
)
36737 rs6000_print_isa_options (file
, indent
, "Isa options set",
36738 ptr
->x_rs6000_isa_flags
);
36740 rs6000_print_isa_options (file
, indent
, "Isa options explicit",
36741 ptr
->x_rs6000_isa_flags_explicit
);
36744 /* Helper function to print the current isa or misc options on a line. */
36747 rs6000_print_options_internal (FILE *file
,
36749 const char *string
,
36750 HOST_WIDE_INT flags
,
36751 const char *prefix
,
36752 const struct rs6000_opt_mask
*opts
,
36753 size_t num_elements
)
36756 size_t start_column
= 0;
36758 size_t max_column
= 120;
36759 size_t prefix_len
= strlen (prefix
);
36760 size_t comma_len
= 0;
36761 const char *comma
= "";
36764 start_column
+= fprintf (file
, "%*s", indent
, "");
36768 fprintf (stderr
, DEBUG_FMT_S
, string
, "<none>");
36772 start_column
+= fprintf (stderr
, DEBUG_FMT_WX
, string
, flags
);
36774 /* Print the various mask options. */
36775 cur_column
= start_column
;
36776 for (i
= 0; i
< num_elements
; i
++)
36778 bool invert
= opts
[i
].invert
;
36779 const char *name
= opts
[i
].name
;
36780 const char *no_str
= "";
36781 HOST_WIDE_INT mask
= opts
[i
].mask
;
36782 size_t len
= comma_len
+ prefix_len
+ strlen (name
);
36786 if ((flags
& mask
) == 0)
36789 len
+= sizeof ("no-") - 1;
36797 if ((flags
& mask
) != 0)
36800 len
+= sizeof ("no-") - 1;
36807 if (cur_column
> max_column
)
36809 fprintf (stderr
, ", \\\n%*s", (int)start_column
, "");
36810 cur_column
= start_column
+ len
;
36814 fprintf (file
, "%s%s%s%s", comma
, prefix
, no_str
, name
);
36816 comma_len
= sizeof (", ") - 1;
36819 fputs ("\n", file
);
36822 /* Helper function to print the current isa options on a line. */
36825 rs6000_print_isa_options (FILE *file
, int indent
, const char *string
,
36826 HOST_WIDE_INT flags
)
36828 rs6000_print_options_internal (file
, indent
, string
, flags
, "-m",
36829 &rs6000_opt_masks
[0],
36830 ARRAY_SIZE (rs6000_opt_masks
));
36834 rs6000_print_builtin_options (FILE *file
, int indent
, const char *string
,
36835 HOST_WIDE_INT flags
)
36837 rs6000_print_options_internal (file
, indent
, string
, flags
, "",
36838 &rs6000_builtin_mask_names
[0],
36839 ARRAY_SIZE (rs6000_builtin_mask_names
));
36842 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
36843 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
36844 -mupper-regs-df, etc.).
36846 If the user used -mno-power8-vector, we need to turn off all of the implicit
36847 ISA 2.07 and 3.0 options that relate to the vector unit.
36849 If the user used -mno-power9-vector, we need to turn off all of the implicit
36850 ISA 3.0 options that relate to the vector unit.
36852 This function does not handle explicit options such as the user specifying
36853 -mdirect-move. These are handled in rs6000_option_override_internal, and
36854 the appropriate error is given if needed.
36856 We return a mask of all of the implicit options that should not be enabled
36859 static HOST_WIDE_INT
36860 rs6000_disable_incompatible_switches (void)
36862 HOST_WIDE_INT ignore_masks
= rs6000_isa_flags_explicit
;
36865 static const struct {
36866 const HOST_WIDE_INT no_flag
; /* flag explicitly turned off. */
36867 const HOST_WIDE_INT dep_flags
; /* flags that depend on this option. */
36868 const char *const name
; /* name of the switch. */
36870 { OPTION_MASK_P9_VECTOR
, OTHER_P9_VECTOR_MASKS
, "power9-vector" },
36871 { OPTION_MASK_P8_VECTOR
, OTHER_P8_VECTOR_MASKS
, "power8-vector" },
36872 { OPTION_MASK_VSX
, OTHER_VSX_VECTOR_MASKS
, "vsx" },
36875 for (i
= 0; i
< ARRAY_SIZE (flags
); i
++)
36877 HOST_WIDE_INT no_flag
= flags
[i
].no_flag
;
36879 if ((rs6000_isa_flags
& no_flag
) == 0
36880 && (rs6000_isa_flags_explicit
& no_flag
) != 0)
36882 HOST_WIDE_INT dep_flags
= flags
[i
].dep_flags
;
36883 HOST_WIDE_INT set_flags
= (rs6000_isa_flags_explicit
36889 for (j
= 0; j
< ARRAY_SIZE (rs6000_opt_masks
); j
++)
36890 if ((set_flags
& rs6000_opt_masks
[j
].mask
) != 0)
36892 set_flags
&= ~rs6000_opt_masks
[j
].mask
;
36893 error ("%<-mno-%s%> turns off %<-m%s%>",
36895 rs6000_opt_masks
[j
].name
);
36898 gcc_assert (!set_flags
);
36901 rs6000_isa_flags
&= ~dep_flags
;
36902 ignore_masks
|= no_flag
| dep_flags
;
36906 return ignore_masks
;
36910 /* Helper function for printing the function name when debugging. */
36912 static const char *
36913 get_decl_name (tree fn
)
36920 name
= DECL_NAME (fn
);
36922 return "<no-name>";
36924 return IDENTIFIER_POINTER (name
);
36927 /* Return the clone id of the target we are compiling code for in a target
36928 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
36929 the priority list for the target clones (ordered from lowest to
36933 rs6000_clone_priority (tree fndecl
)
36935 tree fn_opts
= DECL_FUNCTION_SPECIFIC_TARGET (fndecl
);
36936 HOST_WIDE_INT isa_masks
;
36937 int ret
= CLONE_DEFAULT
;
36938 tree attrs
= lookup_attribute ("target", DECL_ATTRIBUTES (fndecl
));
36939 const char *attrs_str
= NULL
;
36941 attrs
= TREE_VALUE (TREE_VALUE (attrs
));
36942 attrs_str
= TREE_STRING_POINTER (attrs
);
36944 /* Return priority zero for default function. Return the ISA needed for the
36945 function if it is not the default. */
36946 if (strcmp (attrs_str
, "default") != 0)
36948 if (fn_opts
== NULL_TREE
)
36949 fn_opts
= target_option_default_node
;
36951 if (!fn_opts
|| !TREE_TARGET_OPTION (fn_opts
))
36952 isa_masks
= rs6000_isa_flags
;
36954 isa_masks
= TREE_TARGET_OPTION (fn_opts
)->x_rs6000_isa_flags
;
36956 for (ret
= CLONE_MAX
- 1; ret
!= 0; ret
--)
36957 if ((rs6000_clone_map
[ret
].isa_mask
& isa_masks
) != 0)
36961 if (TARGET_DEBUG_TARGET
)
36962 fprintf (stderr
, "rs6000_get_function_version_priority (%s) => %d\n",
36963 get_decl_name (fndecl
), ret
);
36968 /* This compares the priority of target features in function DECL1 and DECL2.
36969 It returns positive value if DECL1 is higher priority, negative value if
36970 DECL2 is higher priority and 0 if they are the same. Note, priorities are
36971 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
36974 rs6000_compare_version_priority (tree decl1
, tree decl2
)
36976 int priority1
= rs6000_clone_priority (decl1
);
36977 int priority2
= rs6000_clone_priority (decl2
);
36978 int ret
= priority1
- priority2
;
36980 if (TARGET_DEBUG_TARGET
)
36981 fprintf (stderr
, "rs6000_compare_version_priority (%s, %s) => %d\n",
36982 get_decl_name (decl1
), get_decl_name (decl2
), ret
);
36987 /* Make a dispatcher declaration for the multi-versioned function DECL.
36988 Calls to DECL function will be replaced with calls to the dispatcher
36989 by the front-end. Returns the decl of the dispatcher function. */
36992 rs6000_get_function_versions_dispatcher (void *decl
)
36994 tree fn
= (tree
) decl
;
36995 struct cgraph_node
*node
= NULL
;
36996 struct cgraph_node
*default_node
= NULL
;
36997 struct cgraph_function_version_info
*node_v
= NULL
;
36998 struct cgraph_function_version_info
*first_v
= NULL
;
37000 tree dispatch_decl
= NULL
;
37002 struct cgraph_function_version_info
*default_version_info
= NULL
;
37003 gcc_assert (fn
!= NULL
&& DECL_FUNCTION_VERSIONED (fn
));
37005 if (TARGET_DEBUG_TARGET
)
37006 fprintf (stderr
, "rs6000_get_function_versions_dispatcher (%s)\n",
37007 get_decl_name (fn
));
37009 node
= cgraph_node::get (fn
);
37010 gcc_assert (node
!= NULL
);
37012 node_v
= node
->function_version ();
37013 gcc_assert (node_v
!= NULL
);
37015 if (node_v
->dispatcher_resolver
!= NULL
)
37016 return node_v
->dispatcher_resolver
;
37018 /* Find the default version and make it the first node. */
37020 /* Go to the beginning of the chain. */
37021 while (first_v
->prev
!= NULL
)
37022 first_v
= first_v
->prev
;
37024 default_version_info
= first_v
;
37025 while (default_version_info
!= NULL
)
37027 const tree decl2
= default_version_info
->this_node
->decl
;
37028 if (is_function_default_version (decl2
))
37030 default_version_info
= default_version_info
->next
;
37033 /* If there is no default node, just return NULL. */
37034 if (default_version_info
== NULL
)
37037 /* Make default info the first node. */
37038 if (first_v
!= default_version_info
)
37040 default_version_info
->prev
->next
= default_version_info
->next
;
37041 if (default_version_info
->next
)
37042 default_version_info
->next
->prev
= default_version_info
->prev
;
37043 first_v
->prev
= default_version_info
;
37044 default_version_info
->next
= first_v
;
37045 default_version_info
->prev
= NULL
;
37048 default_node
= default_version_info
->this_node
;
37050 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
37051 error_at (DECL_SOURCE_LOCATION (default_node
->decl
),
37052 "target_clones attribute needs GLIBC (2.23 and newer) that "
37053 "exports hardware capability bits");
37056 if (targetm
.has_ifunc_p ())
37058 struct cgraph_function_version_info
*it_v
= NULL
;
37059 struct cgraph_node
*dispatcher_node
= NULL
;
37060 struct cgraph_function_version_info
*dispatcher_version_info
= NULL
;
37062 /* Right now, the dispatching is done via ifunc. */
37063 dispatch_decl
= make_dispatcher_decl (default_node
->decl
);
37065 dispatcher_node
= cgraph_node::get_create (dispatch_decl
);
37066 gcc_assert (dispatcher_node
!= NULL
);
37067 dispatcher_node
->dispatcher_function
= 1;
37068 dispatcher_version_info
37069 = dispatcher_node
->insert_new_function_version ();
37070 dispatcher_version_info
->next
= default_version_info
;
37071 dispatcher_node
->definition
= 1;
37073 /* Set the dispatcher for all the versions. */
37074 it_v
= default_version_info
;
37075 while (it_v
!= NULL
)
37077 it_v
->dispatcher_resolver
= dispatch_decl
;
37083 error_at (DECL_SOURCE_LOCATION (default_node
->decl
),
37084 "multiversioning needs ifunc which is not supported "
37089 return dispatch_decl
;
37092 /* Make the resolver function decl to dispatch the versions of a multi-
37093 versioned function, DEFAULT_DECL. Create an empty basic block in the
37094 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
37098 make_resolver_func (const tree default_decl
,
37099 const tree dispatch_decl
,
37100 basic_block
*empty_bb
)
37102 /* Make the resolver function static. The resolver function returns
37104 tree decl_name
= clone_function_name (default_decl
, "resolver");
37105 const char *resolver_name
= IDENTIFIER_POINTER (decl_name
);
37106 tree type
= build_function_type_list (ptr_type_node
, NULL_TREE
);
37107 tree decl
= build_fn_decl (resolver_name
, type
);
37108 SET_DECL_ASSEMBLER_NAME (decl
, decl_name
);
37110 DECL_NAME (decl
) = decl_name
;
37111 TREE_USED (decl
) = 1;
37112 DECL_ARTIFICIAL (decl
) = 1;
37113 DECL_IGNORED_P (decl
) = 0;
37114 TREE_PUBLIC (decl
) = 0;
37115 DECL_UNINLINABLE (decl
) = 1;
37117 /* Resolver is not external, body is generated. */
37118 DECL_EXTERNAL (decl
) = 0;
37119 DECL_EXTERNAL (dispatch_decl
) = 0;
37121 DECL_CONTEXT (decl
) = NULL_TREE
;
37122 DECL_INITIAL (decl
) = make_node (BLOCK
);
37123 DECL_STATIC_CONSTRUCTOR (decl
) = 0;
37125 /* Build result decl and add to function_decl. */
37126 tree t
= build_decl (UNKNOWN_LOCATION
, RESULT_DECL
, NULL_TREE
, ptr_type_node
);
37127 DECL_ARTIFICIAL (t
) = 1;
37128 DECL_IGNORED_P (t
) = 1;
37129 DECL_RESULT (decl
) = t
;
37131 gimplify_function_tree (decl
);
37132 push_cfun (DECL_STRUCT_FUNCTION (decl
));
37133 *empty_bb
= init_lowered_empty_function (decl
, false,
37134 profile_count::uninitialized ());
37136 cgraph_node::add_new_function (decl
, true);
37137 symtab
->call_cgraph_insertion_hooks (cgraph_node::get_create (decl
));
37141 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37142 DECL_ATTRIBUTES (dispatch_decl
)
37143 = make_attribute ("ifunc", resolver_name
, DECL_ATTRIBUTES (dispatch_decl
));
37145 cgraph_node::create_same_body_alias (dispatch_decl
, decl
);
37150 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37151 return a pointer to VERSION_DECL if we are running on a machine that
37152 supports the index CLONE_ISA hardware architecture bits. This function will
37153 be called during version dispatch to decide which function version to
37154 execute. It returns the basic block at the end, to which more conditions
37158 add_condition_to_bb (tree function_decl
, tree version_decl
,
37159 int clone_isa
, basic_block new_bb
)
37161 push_cfun (DECL_STRUCT_FUNCTION (function_decl
));
37163 gcc_assert (new_bb
!= NULL
);
37164 gimple_seq gseq
= bb_seq (new_bb
);
37167 tree convert_expr
= build1 (CONVERT_EXPR
, ptr_type_node
,
37168 build_fold_addr_expr (version_decl
));
37169 tree result_var
= create_tmp_var (ptr_type_node
);
37170 gimple
*convert_stmt
= gimple_build_assign (result_var
, convert_expr
);
37171 gimple
*return_stmt
= gimple_build_return (result_var
);
37173 if (clone_isa
== CLONE_DEFAULT
)
37175 gimple_seq_add_stmt (&gseq
, convert_stmt
);
37176 gimple_seq_add_stmt (&gseq
, return_stmt
);
37177 set_bb_seq (new_bb
, gseq
);
37178 gimple_set_bb (convert_stmt
, new_bb
);
37179 gimple_set_bb (return_stmt
, new_bb
);
37184 tree bool_zero
= build_int_cst (bool_int_type_node
, 0);
37185 tree cond_var
= create_tmp_var (bool_int_type_node
);
37186 tree predicate_decl
= rs6000_builtin_decls
[(int) RS6000_BUILTIN_CPU_SUPPORTS
];
37187 const char *arg_str
= rs6000_clone_map
[clone_isa
].name
;
37188 tree predicate_arg
= build_string_literal (strlen (arg_str
) + 1, arg_str
);
37189 gimple
*call_cond_stmt
= gimple_build_call (predicate_decl
, 1, predicate_arg
);
37190 gimple_call_set_lhs (call_cond_stmt
, cond_var
);
37192 gimple_set_block (call_cond_stmt
, DECL_INITIAL (function_decl
));
37193 gimple_set_bb (call_cond_stmt
, new_bb
);
37194 gimple_seq_add_stmt (&gseq
, call_cond_stmt
);
37196 gimple
*if_else_stmt
= gimple_build_cond (NE_EXPR
, cond_var
, bool_zero
,
37197 NULL_TREE
, NULL_TREE
);
37198 gimple_set_block (if_else_stmt
, DECL_INITIAL (function_decl
));
37199 gimple_set_bb (if_else_stmt
, new_bb
);
37200 gimple_seq_add_stmt (&gseq
, if_else_stmt
);
37202 gimple_seq_add_stmt (&gseq
, convert_stmt
);
37203 gimple_seq_add_stmt (&gseq
, return_stmt
);
37204 set_bb_seq (new_bb
, gseq
);
37206 basic_block bb1
= new_bb
;
37207 edge e12
= split_block (bb1
, if_else_stmt
);
37208 basic_block bb2
= e12
->dest
;
37209 e12
->flags
&= ~EDGE_FALLTHRU
;
37210 e12
->flags
|= EDGE_TRUE_VALUE
;
37212 edge e23
= split_block (bb2
, return_stmt
);
37213 gimple_set_bb (convert_stmt
, bb2
);
37214 gimple_set_bb (return_stmt
, bb2
);
37216 basic_block bb3
= e23
->dest
;
37217 make_edge (bb1
, bb3
, EDGE_FALSE_VALUE
);
37220 make_edge (bb2
, EXIT_BLOCK_PTR_FOR_FN (cfun
), 0);
37226 /* This function generates the dispatch function for multi-versioned functions.
37227 DISPATCH_DECL is the function which will contain the dispatch logic.
37228 FNDECLS are the function choices for dispatch, and is a tree chain.
37229 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37230 code is generated. */
37233 dispatch_function_versions (tree dispatch_decl
,
37235 basic_block
*empty_bb
)
37239 vec
<tree
> *fndecls
;
37240 tree clones
[CLONE_MAX
];
37242 if (TARGET_DEBUG_TARGET
)
37243 fputs ("dispatch_function_versions, top\n", stderr
);
37245 gcc_assert (dispatch_decl
!= NULL
37246 && fndecls_p
!= NULL
37247 && empty_bb
!= NULL
);
37249 /* fndecls_p is actually a vector. */
37250 fndecls
= static_cast<vec
<tree
> *> (fndecls_p
);
37252 /* At least one more version other than the default. */
37253 gcc_assert (fndecls
->length () >= 2);
37255 /* The first version in the vector is the default decl. */
37256 memset ((void *) clones
, '\0', sizeof (clones
));
37257 clones
[CLONE_DEFAULT
] = (*fndecls
)[0];
37259 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37260 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37261 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37262 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37263 to insert the code here to do the call. */
37265 for (ix
= 1; fndecls
->iterate (ix
, &ele
); ++ix
)
37267 int priority
= rs6000_clone_priority (ele
);
37268 if (!clones
[priority
])
37269 clones
[priority
] = ele
;
37272 for (ix
= CLONE_MAX
- 1; ix
>= 0; ix
--)
37275 if (TARGET_DEBUG_TARGET
)
37276 fprintf (stderr
, "dispatch_function_versions, clone %d, %s\n",
37277 ix
, get_decl_name (clones
[ix
]));
37279 *empty_bb
= add_condition_to_bb (dispatch_decl
, clones
[ix
], ix
,
37286 /* Generate the dispatching code body to dispatch multi-versioned function
37287 DECL. The target hook is called to process the "target" attributes and
37288 provide the code to dispatch the right function at run-time. NODE points
37289 to the dispatcher decl whose body will be created. */
37292 rs6000_generate_version_dispatcher_body (void *node_p
)
37295 basic_block empty_bb
;
37296 struct cgraph_node
*node
= (cgraph_node
*) node_p
;
37297 struct cgraph_function_version_info
*ninfo
= node
->function_version ();
37299 if (ninfo
->dispatcher_resolver
)
37300 return ninfo
->dispatcher_resolver
;
37302 /* node is going to be an alias, so remove the finalized bit. */
37303 node
->definition
= false;
37305 /* The first version in the chain corresponds to the default version. */
37306 ninfo
->dispatcher_resolver
= resolver
37307 = make_resolver_func (ninfo
->next
->this_node
->decl
, node
->decl
, &empty_bb
);
37309 if (TARGET_DEBUG_TARGET
)
37310 fprintf (stderr
, "rs6000_get_function_versions_dispatcher, %s\n",
37311 get_decl_name (resolver
));
37313 push_cfun (DECL_STRUCT_FUNCTION (resolver
));
37314 auto_vec
<tree
, 2> fn_ver_vec
;
37316 for (struct cgraph_function_version_info
*vinfo
= ninfo
->next
;
37318 vinfo
= vinfo
->next
)
37320 struct cgraph_node
*version
= vinfo
->this_node
;
37321 /* Check for virtual functions here again, as by this time it should
37322 have been determined if this function needs a vtable index or
37323 not. This happens for methods in derived classes that override
37324 virtual methods in base classes but are not explicitly marked as
37326 if (DECL_VINDEX (version
->decl
))
37327 sorry ("Virtual function multiversioning not supported");
37329 fn_ver_vec
.safe_push (version
->decl
);
37332 dispatch_function_versions (resolver
, &fn_ver_vec
, &empty_bb
);
37333 cgraph_edge::rebuild_edges ();
37339 /* Hook to determine if one function can safely inline another. */
37342 rs6000_can_inline_p (tree caller
, tree callee
)
37345 tree caller_tree
= DECL_FUNCTION_SPECIFIC_TARGET (caller
);
37346 tree callee_tree
= DECL_FUNCTION_SPECIFIC_TARGET (callee
);
37348 /* If callee has no option attributes, then it is ok to inline. */
37352 /* If caller has no option attributes, but callee does then it is not ok to
37354 else if (!caller_tree
)
37359 struct cl_target_option
*caller_opts
= TREE_TARGET_OPTION (caller_tree
);
37360 struct cl_target_option
*callee_opts
= TREE_TARGET_OPTION (callee_tree
);
37362 /* Callee's options should a subset of the caller's, i.e. a vsx function
37363 can inline an altivec function but a non-vsx function can't inline a
37365 if ((caller_opts
->x_rs6000_isa_flags
& callee_opts
->x_rs6000_isa_flags
)
37366 == callee_opts
->x_rs6000_isa_flags
)
37370 if (TARGET_DEBUG_TARGET
)
37371 fprintf (stderr
, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37372 get_decl_name (caller
), get_decl_name (callee
),
37373 (ret
? "can" : "cannot"));
37378 /* Allocate a stack temp and fixup the address so it meets the particular
37379 memory requirements (either offetable or REG+REG addressing). */
37382 rs6000_allocate_stack_temp (machine_mode mode
,
37383 bool offsettable_p
,
37386 rtx stack
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
37387 rtx addr
= XEXP (stack
, 0);
37388 int strict_p
= reload_completed
;
37390 if (!legitimate_indirect_address_p (addr
, strict_p
))
37393 && !rs6000_legitimate_offset_address_p (mode
, addr
, strict_p
, true))
37394 stack
= replace_equiv_address (stack
, copy_addr_to_reg (addr
));
37396 else if (reg_reg_p
&& !legitimate_indexed_address_p (addr
, strict_p
))
37397 stack
= replace_equiv_address (stack
, copy_addr_to_reg (addr
));
37403 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
37404 to such a form to deal with memory reference instructions like STFIWX that
37405 only take reg+reg addressing. */
37408 rs6000_address_for_fpconvert (rtx x
)
37412 gcc_assert (MEM_P (x
));
37413 addr
= XEXP (x
, 0);
37414 if (! legitimate_indirect_address_p (addr
, reload_completed
)
37415 && ! legitimate_indexed_address_p (addr
, reload_completed
))
37417 if (GET_CODE (addr
) == PRE_INC
|| GET_CODE (addr
) == PRE_DEC
)
37419 rtx reg
= XEXP (addr
, 0);
37420 HOST_WIDE_INT size
= GET_MODE_SIZE (GET_MODE (x
));
37421 rtx size_rtx
= GEN_INT ((GET_CODE (addr
) == PRE_DEC
) ? -size
: size
);
37422 gcc_assert (REG_P (reg
));
37423 emit_insn (gen_add3_insn (reg
, reg
, size_rtx
));
37426 else if (GET_CODE (addr
) == PRE_MODIFY
)
37428 rtx reg
= XEXP (addr
, 0);
37429 rtx expr
= XEXP (addr
, 1);
37430 gcc_assert (REG_P (reg
));
37431 gcc_assert (GET_CODE (expr
) == PLUS
);
37432 emit_insn (gen_add3_insn (reg
, XEXP (expr
, 0), XEXP (expr
, 1)));
37436 x
= replace_equiv_address (x
, copy_addr_to_reg (addr
));
37442 /* Given a memory reference, if it is not in the form for altivec memory
37443 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
37444 convert to the altivec format. */
37447 rs6000_address_for_altivec (rtx x
)
37449 gcc_assert (MEM_P (x
));
37450 if (!altivec_indexed_or_indirect_operand (x
, GET_MODE (x
)))
37452 rtx addr
= XEXP (x
, 0);
37454 if (!legitimate_indexed_address_p (addr
, reload_completed
)
37455 && !legitimate_indirect_address_p (addr
, reload_completed
))
37456 addr
= copy_to_mode_reg (Pmode
, addr
);
37458 addr
= gen_rtx_AND (Pmode
, addr
, GEN_INT (-16));
37459 x
= change_address (x
, GET_MODE (x
), addr
);
37465 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37467 On the RS/6000, all integer constants are acceptable, most won't be valid
37468 for particular insns, though. Only easy FP constants are acceptable. */
37471 rs6000_legitimate_constant_p (machine_mode mode
, rtx x
)
37473 if (TARGET_ELF
&& tls_referenced_p (x
))
37476 return ((GET_CODE (x
) != CONST_DOUBLE
&& GET_CODE (x
) != CONST_VECTOR
)
37477 || GET_MODE (x
) == VOIDmode
37478 || (TARGET_POWERPC64
&& mode
== DImode
)
37479 || easy_fp_constant (x
, mode
)
37480 || easy_vector_constant (x
, mode
));
37484 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37487 chain_already_loaded (rtx_insn
*last
)
37489 for (; last
!= NULL
; last
= PREV_INSN (last
))
37491 if (NONJUMP_INSN_P (last
))
37493 rtx patt
= PATTERN (last
);
37495 if (GET_CODE (patt
) == SET
)
37497 rtx lhs
= XEXP (patt
, 0);
37499 if (REG_P (lhs
) && REGNO (lhs
) == STATIC_CHAIN_REGNUM
)
37507 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37510 rs6000_call_aix (rtx value
, rtx func_desc
, rtx flag
, rtx cookie
)
37512 const bool direct_call_p
37513 = GET_CODE (func_desc
) == SYMBOL_REF
&& SYMBOL_REF_FUNCTION_P (func_desc
);
37514 rtx toc_reg
= gen_rtx_REG (Pmode
, TOC_REGNUM
);
37515 rtx toc_load
= NULL_RTX
;
37516 rtx toc_restore
= NULL_RTX
;
37518 rtx abi_reg
= NULL_RTX
;
37523 /* Handle longcall attributes. */
37524 if (INTVAL (cookie
) & CALL_LONG
)
37525 func_desc
= rs6000_longcall_ref (func_desc
);
37527 /* Handle indirect calls. */
37528 if (GET_CODE (func_desc
) != SYMBOL_REF
37529 || (DEFAULT_ABI
== ABI_AIX
&& !SYMBOL_REF_FUNCTION_P (func_desc
)))
37531 /* Save the TOC into its reserved slot before the call,
37532 and prepare to restore it after the call. */
37533 rtx stack_ptr
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
37534 rtx stack_toc_offset
= GEN_INT (RS6000_TOC_SAVE_SLOT
);
37535 rtx stack_toc_mem
= gen_frame_mem (Pmode
,
37536 gen_rtx_PLUS (Pmode
, stack_ptr
,
37537 stack_toc_offset
));
37538 rtx stack_toc_unspec
= gen_rtx_UNSPEC (Pmode
,
37539 gen_rtvec (1, stack_toc_offset
),
37541 toc_restore
= gen_rtx_SET (toc_reg
, stack_toc_unspec
);
37543 /* Can we optimize saving the TOC in the prologue or
37544 do we need to do it at every call? */
37545 if (TARGET_SAVE_TOC_INDIRECT
&& !cfun
->calls_alloca
)
37546 cfun
->machine
->save_toc_in_prologue
= true;
37549 MEM_VOLATILE_P (stack_toc_mem
) = 1;
37550 emit_move_insn (stack_toc_mem
, toc_reg
);
37553 if (DEFAULT_ABI
== ABI_ELFv2
)
37555 /* A function pointer in the ELFv2 ABI is just a plain address, but
37556 the ABI requires it to be loaded into r12 before the call. */
37557 func_addr
= gen_rtx_REG (Pmode
, 12);
37558 emit_move_insn (func_addr
, func_desc
);
37559 abi_reg
= func_addr
;
37563 /* A function pointer under AIX is a pointer to a data area whose
37564 first word contains the actual address of the function, whose
37565 second word contains a pointer to its TOC, and whose third word
37566 contains a value to place in the static chain register (r11).
37567 Note that if we load the static chain, our "trampoline" need
37568 not have any executable code. */
37570 /* Load up address of the actual function. */
37571 func_desc
= force_reg (Pmode
, func_desc
);
37572 func_addr
= gen_reg_rtx (Pmode
);
37573 emit_move_insn (func_addr
, gen_rtx_MEM (Pmode
, func_desc
));
37575 /* Prepare to load the TOC of the called function. Note that the
37576 TOC load must happen immediately before the actual call so
37577 that unwinding the TOC registers works correctly. See the
37578 comment in frob_update_context. */
37579 rtx func_toc_offset
= GEN_INT (GET_MODE_SIZE (Pmode
));
37580 rtx func_toc_mem
= gen_rtx_MEM (Pmode
,
37581 gen_rtx_PLUS (Pmode
, func_desc
,
37583 toc_load
= gen_rtx_USE (VOIDmode
, func_toc_mem
);
37585 /* If we have a static chain, load it up. But, if the call was
37586 originally direct, the 3rd word has not been written since no
37587 trampoline has been built, so we ought not to load it, lest we
37588 override a static chain value. */
37590 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
37591 && !chain_already_loaded (get_current_sequence ()->next
->last
))
37593 rtx sc_reg
= gen_rtx_REG (Pmode
, STATIC_CHAIN_REGNUM
);
37594 rtx func_sc_offset
= GEN_INT (2 * GET_MODE_SIZE (Pmode
));
37595 rtx func_sc_mem
= gen_rtx_MEM (Pmode
,
37596 gen_rtx_PLUS (Pmode
, func_desc
,
37598 emit_move_insn (sc_reg
, func_sc_mem
);
37605 /* Direct calls use the TOC: for local calls, the callee will
37606 assume the TOC register is set; for non-local calls, the
37607 PLT stub needs the TOC register. */
37609 func_addr
= func_desc
;
37612 /* Create the call. */
37613 call
[0] = gen_rtx_CALL (VOIDmode
, gen_rtx_MEM (SImode
, func_addr
), flag
);
37614 if (value
!= NULL_RTX
)
37615 call
[0] = gen_rtx_SET (value
, call
[0]);
37619 call
[n_call
++] = toc_load
;
37621 call
[n_call
++] = toc_restore
;
37623 call
[n_call
++] = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, LR_REGNO
));
37625 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (n_call
, call
));
37626 insn
= emit_call_insn (insn
);
37628 /* Mention all registers defined by the ABI to hold information
37629 as uses in CALL_INSN_FUNCTION_USAGE. */
37631 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), abi_reg
);
37634 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
37637 rs6000_sibcall_aix (rtx value
, rtx func_desc
, rtx flag
, rtx cookie
)
37642 gcc_assert (INTVAL (cookie
) == 0);
37644 /* Create the call. */
37645 call
[0] = gen_rtx_CALL (VOIDmode
, gen_rtx_MEM (SImode
, func_desc
), flag
);
37646 if (value
!= NULL_RTX
)
37647 call
[0] = gen_rtx_SET (value
, call
[0]);
37649 call
[1] = simple_return_rtx
;
37651 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (2, call
));
37652 insn
= emit_call_insn (insn
);
37654 /* Note use of the TOC register. */
37655 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), gen_rtx_REG (Pmode
, TOC_REGNUM
));
37658 /* Return whether we need to always update the saved TOC pointer when we update
37659 the stack pointer. */
37662 rs6000_save_toc_in_prologue_p (void)
37664 return (cfun
&& cfun
->machine
&& cfun
->machine
->save_toc_in_prologue
);
37667 #ifdef HAVE_GAS_HIDDEN
37668 # define USE_HIDDEN_LINKONCE 1
37670 # define USE_HIDDEN_LINKONCE 0
37673 /* Fills in the label name that should be used for a 476 link stack thunk. */
37676 get_ppc476_thunk_name (char name
[32])
37678 gcc_assert (TARGET_LINK_STACK
);
37680 if (USE_HIDDEN_LINKONCE
)
37681 sprintf (name
, "__ppc476.get_thunk");
37683 ASM_GENERATE_INTERNAL_LABEL (name
, "LPPC476_", 0);
37686 /* This function emits the simple thunk routine that is used to preserve
37687 the link stack on the 476 cpu. */
37689 static void rs6000_code_end (void) ATTRIBUTE_UNUSED
;
37691 rs6000_code_end (void)
37696 if (!TARGET_LINK_STACK
)
37699 get_ppc476_thunk_name (name
);
37701 decl
= build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
, get_identifier (name
),
37702 build_function_type_list (void_type_node
, NULL_TREE
));
37703 DECL_RESULT (decl
) = build_decl (BUILTINS_LOCATION
, RESULT_DECL
,
37704 NULL_TREE
, void_type_node
);
37705 TREE_PUBLIC (decl
) = 1;
37706 TREE_STATIC (decl
) = 1;
37709 if (USE_HIDDEN_LINKONCE
&& !TARGET_XCOFF
)
37711 cgraph_node::create (decl
)->set_comdat_group (DECL_ASSEMBLER_NAME (decl
));
37712 targetm
.asm_out
.unique_section (decl
, 0);
37713 switch_to_section (get_named_section (decl
, NULL
, 0));
37714 DECL_WEAK (decl
) = 1;
37715 ASM_WEAKEN_DECL (asm_out_file
, decl
, name
, 0);
37716 targetm
.asm_out
.globalize_label (asm_out_file
, name
);
37717 targetm
.asm_out
.assemble_visibility (decl
, VISIBILITY_HIDDEN
);
37718 ASM_DECLARE_FUNCTION_NAME (asm_out_file
, name
, decl
);
37723 switch_to_section (text_section
);
37724 ASM_OUTPUT_LABEL (asm_out_file
, name
);
37727 DECL_INITIAL (decl
) = make_node (BLOCK
);
37728 current_function_decl
= decl
;
37729 allocate_struct_function (decl
, false);
37730 init_function_start (decl
);
37731 first_function_block_is_cold
= false;
37732 /* Make sure unwind info is emitted for the thunk if needed. */
37733 final_start_function (emit_barrier (), asm_out_file
, 1);
37735 fputs ("\tblr\n", asm_out_file
);
37737 final_end_function ();
37738 init_insn_lengths ();
37739 free_after_compilation (cfun
);
37741 current_function_decl
= NULL
;
37744 /* Add r30 to hard reg set if the prologue sets it up and it is not
37745 pic_offset_table_rtx. */
37748 rs6000_set_up_by_prologue (struct hard_reg_set_container
*set
)
37750 if (!TARGET_SINGLE_PIC_BASE
37752 && TARGET_MINIMAL_TOC
37753 && !constant_pool_empty_p ())
37754 add_to_hard_reg_set (&set
->set
, Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
37755 if (cfun
->machine
->split_stack_argp_used
)
37756 add_to_hard_reg_set (&set
->set
, Pmode
, 12);
37760 /* Helper function for rs6000_split_logical to emit a logical instruction after
37761 spliting the operation to single GPR registers.
37763 DEST is the destination register.
37764 OP1 and OP2 are the input source registers.
37765 CODE is the base operation (AND, IOR, XOR, NOT).
37766 MODE is the machine mode.
37767 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37768 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37769 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
37772 rs6000_split_logical_inner (rtx dest
,
37775 enum rtx_code code
,
37777 bool complement_final_p
,
37778 bool complement_op1_p
,
37779 bool complement_op2_p
)
37783 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
37784 if (op2
&& GET_CODE (op2
) == CONST_INT
37785 && (mode
== SImode
|| (mode
== DImode
&& TARGET_POWERPC64
))
37786 && !complement_final_p
&& !complement_op1_p
&& !complement_op2_p
)
37788 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
37789 HOST_WIDE_INT value
= INTVAL (op2
) & mask
;
37791 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
37796 emit_insn (gen_rtx_SET (dest
, const0_rtx
));
37800 else if (value
== mask
)
37802 if (!rtx_equal_p (dest
, op1
))
37803 emit_insn (gen_rtx_SET (dest
, op1
));
37808 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
37809 into separate ORI/ORIS or XORI/XORIS instrucitons. */
37810 else if (code
== IOR
|| code
== XOR
)
37814 if (!rtx_equal_p (dest
, op1
))
37815 emit_insn (gen_rtx_SET (dest
, op1
));
37821 if (code
== AND
&& mode
== SImode
37822 && !complement_final_p
&& !complement_op1_p
&& !complement_op2_p
)
37824 emit_insn (gen_andsi3 (dest
, op1
, op2
));
37828 if (complement_op1_p
)
37829 op1
= gen_rtx_NOT (mode
, op1
);
37831 if (complement_op2_p
)
37832 op2
= gen_rtx_NOT (mode
, op2
);
37834 /* For canonical RTL, if only one arm is inverted it is the first. */
37835 if (!complement_op1_p
&& complement_op2_p
)
37836 std::swap (op1
, op2
);
37838 bool_rtx
= ((code
== NOT
)
37839 ? gen_rtx_NOT (mode
, op1
)
37840 : gen_rtx_fmt_ee (code
, mode
, op1
, op2
));
37842 if (complement_final_p
)
37843 bool_rtx
= gen_rtx_NOT (mode
, bool_rtx
);
37845 emit_insn (gen_rtx_SET (dest
, bool_rtx
));
37848 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
37849 operations are split immediately during RTL generation to allow for more
37850 optimizations of the AND/IOR/XOR.
37852 OPERANDS is an array containing the destination and two input operands.
37853 CODE is the base operation (AND, IOR, XOR, NOT).
37854 MODE is the machine mode.
37855 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37856 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37857 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
37858 CLOBBER_REG is either NULL or a scratch register of type CC to allow
37859 formation of the AND instructions. */
37862 rs6000_split_logical_di (rtx operands
[3],
37863 enum rtx_code code
,
37864 bool complement_final_p
,
37865 bool complement_op1_p
,
37866 bool complement_op2_p
)
37868 const HOST_WIDE_INT lower_32bits
= HOST_WIDE_INT_C(0xffffffff);
37869 const HOST_WIDE_INT upper_32bits
= ~ lower_32bits
;
37870 const HOST_WIDE_INT sign_bit
= HOST_WIDE_INT_C(0x80000000);
37871 enum hi_lo
{ hi
= 0, lo
= 1 };
37872 rtx op0_hi_lo
[2], op1_hi_lo
[2], op2_hi_lo
[2];
37875 op0_hi_lo
[hi
] = gen_highpart (SImode
, operands
[0]);
37876 op1_hi_lo
[hi
] = gen_highpart (SImode
, operands
[1]);
37877 op0_hi_lo
[lo
] = gen_lowpart (SImode
, operands
[0]);
37878 op1_hi_lo
[lo
] = gen_lowpart (SImode
, operands
[1]);
37881 op2_hi_lo
[hi
] = op2_hi_lo
[lo
] = NULL_RTX
;
37884 if (GET_CODE (operands
[2]) != CONST_INT
)
37886 op2_hi_lo
[hi
] = gen_highpart_mode (SImode
, DImode
, operands
[2]);
37887 op2_hi_lo
[lo
] = gen_lowpart (SImode
, operands
[2]);
37891 HOST_WIDE_INT value
= INTVAL (operands
[2]);
37892 HOST_WIDE_INT value_hi_lo
[2];
37894 gcc_assert (!complement_final_p
);
37895 gcc_assert (!complement_op1_p
);
37896 gcc_assert (!complement_op2_p
);
37898 value_hi_lo
[hi
] = value
>> 32;
37899 value_hi_lo
[lo
] = value
& lower_32bits
;
37901 for (i
= 0; i
< 2; i
++)
37903 HOST_WIDE_INT sub_value
= value_hi_lo
[i
];
37905 if (sub_value
& sign_bit
)
37906 sub_value
|= upper_32bits
;
37908 op2_hi_lo
[i
] = GEN_INT (sub_value
);
37910 /* If this is an AND instruction, check to see if we need to load
37911 the value in a register. */
37912 if (code
== AND
&& sub_value
!= -1 && sub_value
!= 0
37913 && !and_operand (op2_hi_lo
[i
], SImode
))
37914 op2_hi_lo
[i
] = force_reg (SImode
, op2_hi_lo
[i
]);
37919 for (i
= 0; i
< 2; i
++)
37921 /* Split large IOR/XOR operations. */
37922 if ((code
== IOR
|| code
== XOR
)
37923 && GET_CODE (op2_hi_lo
[i
]) == CONST_INT
37924 && !complement_final_p
37925 && !complement_op1_p
37926 && !complement_op2_p
37927 && !logical_const_operand (op2_hi_lo
[i
], SImode
))
37929 HOST_WIDE_INT value
= INTVAL (op2_hi_lo
[i
]);
37930 HOST_WIDE_INT hi_16bits
= value
& HOST_WIDE_INT_C(0xffff0000);
37931 HOST_WIDE_INT lo_16bits
= value
& HOST_WIDE_INT_C(0x0000ffff);
37932 rtx tmp
= gen_reg_rtx (SImode
);
37934 /* Make sure the constant is sign extended. */
37935 if ((hi_16bits
& sign_bit
) != 0)
37936 hi_16bits
|= upper_32bits
;
37938 rs6000_split_logical_inner (tmp
, op1_hi_lo
[i
], GEN_INT (hi_16bits
),
37939 code
, SImode
, false, false, false);
37941 rs6000_split_logical_inner (op0_hi_lo
[i
], tmp
, GEN_INT (lo_16bits
),
37942 code
, SImode
, false, false, false);
37945 rs6000_split_logical_inner (op0_hi_lo
[i
], op1_hi_lo
[i
], op2_hi_lo
[i
],
37946 code
, SImode
, complement_final_p
,
37947 complement_op1_p
, complement_op2_p
);
37953 /* Split the insns that make up boolean operations operating on multiple GPR
37954 registers. The boolean MD patterns ensure that the inputs either are
37955 exactly the same as the output registers, or there is no overlap.
37957 OPERANDS is an array containing the destination and two input operands.
37958 CODE is the base operation (AND, IOR, XOR, NOT).
37959 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37960 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37961 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
37964 rs6000_split_logical (rtx operands
[3],
37965 enum rtx_code code
,
37966 bool complement_final_p
,
37967 bool complement_op1_p
,
37968 bool complement_op2_p
)
37970 machine_mode mode
= GET_MODE (operands
[0]);
37971 machine_mode sub_mode
;
37973 int sub_size
, regno0
, regno1
, nregs
, i
;
37975 /* If this is DImode, use the specialized version that can run before
37976 register allocation. */
37977 if (mode
== DImode
&& !TARGET_POWERPC64
)
37979 rs6000_split_logical_di (operands
, code
, complement_final_p
,
37980 complement_op1_p
, complement_op2_p
);
37986 op2
= (code
== NOT
) ? NULL_RTX
: operands
[2];
37987 sub_mode
= (TARGET_POWERPC64
) ? DImode
: SImode
;
37988 sub_size
= GET_MODE_SIZE (sub_mode
);
37989 regno0
= REGNO (op0
);
37990 regno1
= REGNO (op1
);
37992 gcc_assert (reload_completed
);
37993 gcc_assert (IN_RANGE (regno0
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
));
37994 gcc_assert (IN_RANGE (regno1
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
));
37996 nregs
= rs6000_hard_regno_nregs
[(int)mode
][regno0
];
37997 gcc_assert (nregs
> 1);
37999 if (op2
&& REG_P (op2
))
38000 gcc_assert (IN_RANGE (REGNO (op2
), FIRST_GPR_REGNO
, LAST_GPR_REGNO
));
38002 for (i
= 0; i
< nregs
; i
++)
38004 int offset
= i
* sub_size
;
38005 rtx sub_op0
= simplify_subreg (sub_mode
, op0
, mode
, offset
);
38006 rtx sub_op1
= simplify_subreg (sub_mode
, op1
, mode
, offset
);
38007 rtx sub_op2
= ((code
== NOT
)
38009 : simplify_subreg (sub_mode
, op2
, mode
, offset
));
38011 rs6000_split_logical_inner (sub_op0
, sub_op1
, sub_op2
, code
, sub_mode
,
38012 complement_final_p
, complement_op1_p
,
38020 /* Return true if the peephole2 can combine a load involving a combination of
38021 an addis instruction and a load with an offset that can be fused together on
38025 fusion_gpr_load_p (rtx addis_reg
, /* register set via addis. */
38026 rtx addis_value
, /* addis value. */
38027 rtx target
, /* target register that is loaded. */
38028 rtx mem
) /* bottom part of the memory addr. */
38033 /* Validate arguments. */
38034 if (!base_reg_operand (addis_reg
, GET_MODE (addis_reg
)))
38037 if (!base_reg_operand (target
, GET_MODE (target
)))
38040 if (!fusion_gpr_addis (addis_value
, GET_MODE (addis_value
)))
38043 /* Allow sign/zero extension. */
38044 if (GET_CODE (mem
) == ZERO_EXTEND
38045 || (GET_CODE (mem
) == SIGN_EXTEND
&& TARGET_P8_FUSION_SIGN
))
38046 mem
= XEXP (mem
, 0);
38051 if (!fusion_gpr_mem_load (mem
, GET_MODE (mem
)))
38054 addr
= XEXP (mem
, 0); /* either PLUS or LO_SUM. */
38055 if (GET_CODE (addr
) != PLUS
&& GET_CODE (addr
) != LO_SUM
)
38058 /* Validate that the register used to load the high value is either the
38059 register being loaded, or we can safely replace its use.
38061 This function is only called from the peephole2 pass and we assume that
38062 there are 2 instructions in the peephole (addis and load), so we want to
38063 check if the target register was not used in the memory address and the
38064 register to hold the addis result is dead after the peephole. */
38065 if (REGNO (addis_reg
) != REGNO (target
))
38067 if (reg_mentioned_p (target
, mem
))
38070 if (!peep2_reg_dead_p (2, addis_reg
))
38073 /* If the target register being loaded is the stack pointer, we must
38074 avoid loading any other value into it, even temporarily. */
38075 if (REG_P (target
) && REGNO (target
) == STACK_POINTER_REGNUM
)
38079 base_reg
= XEXP (addr
, 0);
38080 return REGNO (addis_reg
) == REGNO (base_reg
);
38083 /* During the peephole2 pass, adjust and expand the insns for a load fusion
38084 sequence. We adjust the addis register to use the target register. If the
38085 load sign extends, we adjust the code to do the zero extending load, and an
38086 explicit sign extension later since the fusion only covers zero extending
38090 operands[0] register set with addis (to be replaced with target)
38091 operands[1] value set via addis
38092 operands[2] target register being loaded
38093 operands[3] D-form memory reference using operands[0]. */
38096 expand_fusion_gpr_load (rtx
*operands
)
38098 rtx addis_value
= operands
[1];
38099 rtx target
= operands
[2];
38100 rtx orig_mem
= operands
[3];
38101 rtx new_addr
, new_mem
, orig_addr
, offset
;
38102 enum rtx_code plus_or_lo_sum
;
38103 machine_mode target_mode
= GET_MODE (target
);
38104 machine_mode extend_mode
= target_mode
;
38105 machine_mode ptr_mode
= Pmode
;
38106 enum rtx_code extend
= UNKNOWN
;
38108 if (GET_CODE (orig_mem
) == ZERO_EXTEND
38109 || (TARGET_P8_FUSION_SIGN
&& GET_CODE (orig_mem
) == SIGN_EXTEND
))
38111 extend
= GET_CODE (orig_mem
);
38112 orig_mem
= XEXP (orig_mem
, 0);
38113 target_mode
= GET_MODE (orig_mem
);
38116 gcc_assert (MEM_P (orig_mem
));
38118 orig_addr
= XEXP (orig_mem
, 0);
38119 plus_or_lo_sum
= GET_CODE (orig_addr
);
38120 gcc_assert (plus_or_lo_sum
== PLUS
|| plus_or_lo_sum
== LO_SUM
);
38122 offset
= XEXP (orig_addr
, 1);
38123 new_addr
= gen_rtx_fmt_ee (plus_or_lo_sum
, ptr_mode
, addis_value
, offset
);
38124 new_mem
= replace_equiv_address_nv (orig_mem
, new_addr
, false);
38126 if (extend
!= UNKNOWN
)
38127 new_mem
= gen_rtx_fmt_e (ZERO_EXTEND
, extend_mode
, new_mem
);
38129 new_mem
= gen_rtx_UNSPEC (extend_mode
, gen_rtvec (1, new_mem
),
38130 UNSPEC_FUSION_GPR
);
38131 emit_insn (gen_rtx_SET (target
, new_mem
));
38133 if (extend
== SIGN_EXTEND
)
38135 int sub_off
= ((BYTES_BIG_ENDIAN
)
38136 ? GET_MODE_SIZE (extend_mode
) - GET_MODE_SIZE (target_mode
)
38139 = simplify_subreg (target_mode
, target
, extend_mode
, sub_off
);
38141 emit_insn (gen_rtx_SET (target
,
38142 gen_rtx_SIGN_EXTEND (extend_mode
, sign_reg
)));
38148 /* Emit the addis instruction that will be part of a fused instruction
38152 emit_fusion_addis (rtx target
, rtx addis_value
, const char *comment
,
38153 const char *mode_name
)
38156 char insn_template
[80];
38157 const char *addis_str
= NULL
;
38158 const char *comment_str
= ASM_COMMENT_START
;
38160 if (*comment_str
== ' ')
38163 /* Emit the addis instruction. */
38164 fuse_ops
[0] = target
;
38165 if (satisfies_constraint_L (addis_value
))
38167 fuse_ops
[1] = addis_value
;
38168 addis_str
= "lis %0,%v1";
38171 else if (GET_CODE (addis_value
) == PLUS
)
38173 rtx op0
= XEXP (addis_value
, 0);
38174 rtx op1
= XEXP (addis_value
, 1);
38176 if (REG_P (op0
) && CONST_INT_P (op1
)
38177 && satisfies_constraint_L (op1
))
38181 addis_str
= "addis %0,%1,%v2";
38185 else if (GET_CODE (addis_value
) == HIGH
)
38187 rtx value
= XEXP (addis_value
, 0);
38188 if (GET_CODE (value
) == UNSPEC
&& XINT (value
, 1) == UNSPEC_TOCREL
)
38190 fuse_ops
[1] = XVECEXP (value
, 0, 0); /* symbol ref. */
38191 fuse_ops
[2] = XVECEXP (value
, 0, 1); /* TOC register. */
38193 addis_str
= "addis %0,%2,%1@toc@ha";
38195 else if (TARGET_XCOFF
)
38196 addis_str
= "addis %0,%1@u(%2)";
38199 gcc_unreachable ();
38202 else if (GET_CODE (value
) == PLUS
)
38204 rtx op0
= XEXP (value
, 0);
38205 rtx op1
= XEXP (value
, 1);
38207 if (GET_CODE (op0
) == UNSPEC
38208 && XINT (op0
, 1) == UNSPEC_TOCREL
38209 && CONST_INT_P (op1
))
38211 fuse_ops
[1] = XVECEXP (op0
, 0, 0); /* symbol ref. */
38212 fuse_ops
[2] = XVECEXP (op0
, 0, 1); /* TOC register. */
38215 addis_str
= "addis %0,%2,%1+%3@toc@ha";
38217 else if (TARGET_XCOFF
)
38218 addis_str
= "addis %0,%1+%3@u(%2)";
38221 gcc_unreachable ();
38225 else if (satisfies_constraint_L (value
))
38227 fuse_ops
[1] = value
;
38228 addis_str
= "lis %0,%v1";
38231 else if (TARGET_ELF
&& !TARGET_POWERPC64
&& CONSTANT_P (value
))
38233 fuse_ops
[1] = value
;
38234 addis_str
= "lis %0,%1@ha";
38239 fatal_insn ("Could not generate addis value for fusion", addis_value
);
38241 sprintf (insn_template
, "%s\t\t%s %s, type %s", addis_str
, comment_str
,
38242 comment
, mode_name
);
38243 output_asm_insn (insn_template
, fuse_ops
);
38246 /* Emit a D-form load or store instruction that is the second instruction
38247 of a fusion sequence. */
38250 emit_fusion_load_store (rtx load_store_reg
, rtx addis_reg
, rtx offset
,
38251 const char *insn_str
)
38254 char insn_template
[80];
38256 fuse_ops
[0] = load_store_reg
;
38257 fuse_ops
[1] = addis_reg
;
38259 if (CONST_INT_P (offset
) && satisfies_constraint_I (offset
))
38261 sprintf (insn_template
, "%s %%0,%%2(%%1)", insn_str
);
38262 fuse_ops
[2] = offset
;
38263 output_asm_insn (insn_template
, fuse_ops
);
38266 else if (GET_CODE (offset
) == UNSPEC
38267 && XINT (offset
, 1) == UNSPEC_TOCREL
)
38270 sprintf (insn_template
, "%s %%0,%%2@toc@l(%%1)", insn_str
);
38272 else if (TARGET_XCOFF
)
38273 sprintf (insn_template
, "%s %%0,%%2@l(%%1)", insn_str
);
38276 gcc_unreachable ();
38278 fuse_ops
[2] = XVECEXP (offset
, 0, 0);
38279 output_asm_insn (insn_template
, fuse_ops
);
38282 else if (GET_CODE (offset
) == PLUS
38283 && GET_CODE (XEXP (offset
, 0)) == UNSPEC
38284 && XINT (XEXP (offset
, 0), 1) == UNSPEC_TOCREL
38285 && CONST_INT_P (XEXP (offset
, 1)))
38287 rtx tocrel_unspec
= XEXP (offset
, 0);
38289 sprintf (insn_template
, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str
);
38291 else if (TARGET_XCOFF
)
38292 sprintf (insn_template
, "%s %%0,%%2+%%3@l(%%1)", insn_str
);
38295 gcc_unreachable ();
38297 fuse_ops
[2] = XVECEXP (tocrel_unspec
, 0, 0);
38298 fuse_ops
[3] = XEXP (offset
, 1);
38299 output_asm_insn (insn_template
, fuse_ops
);
38302 else if (TARGET_ELF
&& !TARGET_POWERPC64
&& CONSTANT_P (offset
))
38304 sprintf (insn_template
, "%s %%0,%%2@l(%%1)", insn_str
);
38306 fuse_ops
[2] = offset
;
38307 output_asm_insn (insn_template
, fuse_ops
);
38311 fatal_insn ("Unable to generate load/store offset for fusion", offset
);
38316 /* Wrap a TOC address that can be fused to indicate that special fusion
38317 processing is needed. */
38320 fusion_wrap_memory_address (rtx old_mem
)
38322 rtx old_addr
= XEXP (old_mem
, 0);
38323 rtvec v
= gen_rtvec (1, old_addr
);
38324 rtx new_addr
= gen_rtx_UNSPEC (Pmode
, v
, UNSPEC_FUSION_ADDIS
);
38325 return replace_equiv_address_nv (old_mem
, new_addr
, false);
38328 /* Given an address, convert it into the addis and load offset parts. Addresses
38329 created during the peephole2 process look like:
38330 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38331 (unspec [(...)] UNSPEC_TOCREL))
38333 Addresses created via toc fusion look like:
38334 (unspec [(unspec [(...)] UNSPEC_TOCREL)] UNSPEC_FUSION_ADDIS)) */
38337 fusion_split_address (rtx addr
, rtx
*p_hi
, rtx
*p_lo
)
38341 if (GET_CODE (addr
) == UNSPEC
&& XINT (addr
, 1) == UNSPEC_FUSION_ADDIS
)
38343 lo
= XVECEXP (addr
, 0, 0);
38344 hi
= gen_rtx_HIGH (Pmode
, lo
);
38346 else if (GET_CODE (addr
) == PLUS
|| GET_CODE (addr
) == LO_SUM
)
38348 hi
= XEXP (addr
, 0);
38349 lo
= XEXP (addr
, 1);
38352 gcc_unreachable ();
38358 /* Return a string to fuse an addis instruction with a gpr load to the same
38359 register that we loaded up the addis instruction. The address that is used
38360 is the logical address that was formed during peephole2:
38361 (lo_sum (high) (low-part))
38363 Or the address is the TOC address that is wrapped before register allocation:
38364 (unspec [(addr) (toc-reg)] UNSPEC_FUSION_ADDIS)
38366 The code is complicated, so we call output_asm_insn directly, and just
38370 emit_fusion_gpr_load (rtx target
, rtx mem
)
38375 const char *load_str
= NULL
;
38376 const char *mode_name
= NULL
;
38379 if (GET_CODE (mem
) == ZERO_EXTEND
)
38380 mem
= XEXP (mem
, 0);
38382 gcc_assert (REG_P (target
) && MEM_P (mem
));
38384 addr
= XEXP (mem
, 0);
38385 fusion_split_address (addr
, &addis_value
, &load_offset
);
38387 /* Now emit the load instruction to the same register. */
38388 mode
= GET_MODE (mem
);
38392 mode_name
= "char";
38397 mode_name
= "short";
38403 mode_name
= (mode
== SFmode
) ? "float" : "int";
38409 gcc_assert (TARGET_POWERPC64
);
38410 mode_name
= (mode
== DFmode
) ? "double" : "long";
38415 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target
, mem
));
38418 /* Emit the addis instruction. */
38419 emit_fusion_addis (target
, addis_value
, "gpr load fusion", mode_name
);
38421 /* Emit the D-form load instruction. */
38422 emit_fusion_load_store (target
, target
, load_offset
, load_str
);
38428 /* Return true if the peephole2 can combine a load/store involving a
38429 combination of an addis instruction and the memory operation. This was
38430 added to the ISA 3.0 (power9) hardware. */
38433 fusion_p9_p (rtx addis_reg
, /* register set via addis. */
38434 rtx addis_value
, /* addis value. */
38435 rtx dest
, /* destination (memory or register). */
38436 rtx src
) /* source (register or memory). */
38438 rtx addr
, mem
, offset
;
38439 machine_mode mode
= GET_MODE (src
);
38441 /* Validate arguments. */
38442 if (!base_reg_operand (addis_reg
, GET_MODE (addis_reg
)))
38445 if (!fusion_gpr_addis (addis_value
, GET_MODE (addis_value
)))
38448 /* Ignore extend operations that are part of the load. */
38449 if (GET_CODE (src
) == FLOAT_EXTEND
|| GET_CODE (src
) == ZERO_EXTEND
)
38450 src
= XEXP (src
, 0);
38452 /* Test for memory<-register or register<-memory. */
38453 if (fpr_reg_operand (src
, mode
) || int_reg_operand (src
, mode
))
38461 else if (MEM_P (src
))
38463 if (!fpr_reg_operand (dest
, mode
) && !int_reg_operand (dest
, mode
))
38472 addr
= XEXP (mem
, 0); /* either PLUS or LO_SUM. */
38473 if (GET_CODE (addr
) == PLUS
)
38475 if (!rtx_equal_p (addis_reg
, XEXP (addr
, 0)))
38478 return satisfies_constraint_I (XEXP (addr
, 1));
38481 else if (GET_CODE (addr
) == LO_SUM
)
38483 if (!rtx_equal_p (addis_reg
, XEXP (addr
, 0)))
38486 offset
= XEXP (addr
, 1);
38487 if (TARGET_XCOFF
|| (TARGET_ELF
&& TARGET_POWERPC64
))
38488 return small_toc_ref (offset
, GET_MODE (offset
));
38490 else if (TARGET_ELF
&& !TARGET_POWERPC64
)
38491 return CONSTANT_P (offset
);
38497 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38501 operands[0] register set with addis
38502 operands[1] value set via addis
38503 operands[2] target register being loaded
38504 operands[3] D-form memory reference using operands[0].
38506 This is similar to the fusion introduced with power8, except it scales to
38507 both loads/stores and does not require the result register to be the same as
38508 the base register. At the moment, we only do this if register set with addis
38512 expand_fusion_p9_load (rtx
*operands
)
38514 rtx tmp_reg
= operands
[0];
38515 rtx addis_value
= operands
[1];
38516 rtx target
= operands
[2];
38517 rtx orig_mem
= operands
[3];
38518 rtx new_addr
, new_mem
, orig_addr
, offset
, set
, clobber
, insn
;
38519 enum rtx_code plus_or_lo_sum
;
38520 machine_mode target_mode
= GET_MODE (target
);
38521 machine_mode extend_mode
= target_mode
;
38522 machine_mode ptr_mode
= Pmode
;
38523 enum rtx_code extend
= UNKNOWN
;
38525 if (GET_CODE (orig_mem
) == FLOAT_EXTEND
|| GET_CODE (orig_mem
) == ZERO_EXTEND
)
38527 extend
= GET_CODE (orig_mem
);
38528 orig_mem
= XEXP (orig_mem
, 0);
38529 target_mode
= GET_MODE (orig_mem
);
38532 gcc_assert (MEM_P (orig_mem
));
38534 orig_addr
= XEXP (orig_mem
, 0);
38535 plus_or_lo_sum
= GET_CODE (orig_addr
);
38536 gcc_assert (plus_or_lo_sum
== PLUS
|| plus_or_lo_sum
== LO_SUM
);
38538 offset
= XEXP (orig_addr
, 1);
38539 new_addr
= gen_rtx_fmt_ee (plus_or_lo_sum
, ptr_mode
, addis_value
, offset
);
38540 new_mem
= replace_equiv_address_nv (orig_mem
, new_addr
, false);
38542 if (extend
!= UNKNOWN
)
38543 new_mem
= gen_rtx_fmt_e (extend
, extend_mode
, new_mem
);
38545 new_mem
= gen_rtx_UNSPEC (extend_mode
, gen_rtvec (1, new_mem
),
38548 set
= gen_rtx_SET (target
, new_mem
);
38549 clobber
= gen_rtx_CLOBBER (VOIDmode
, tmp_reg
);
38550 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, set
, clobber
));
38556 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38560 operands[0] register set with addis
38561 operands[1] value set via addis
38562 operands[2] target D-form memory being stored to
38563 operands[3] register being stored
38565 This is similar to the fusion introduced with power8, except it scales to
38566 both loads/stores and does not require the result register to be the same as
38567 the base register. At the moment, we only do this if register set with addis
38571 expand_fusion_p9_store (rtx
*operands
)
38573 rtx tmp_reg
= operands
[0];
38574 rtx addis_value
= operands
[1];
38575 rtx orig_mem
= operands
[2];
38576 rtx src
= operands
[3];
38577 rtx new_addr
, new_mem
, orig_addr
, offset
, set
, clobber
, insn
, new_src
;
38578 enum rtx_code plus_or_lo_sum
;
38579 machine_mode target_mode
= GET_MODE (orig_mem
);
38580 machine_mode ptr_mode
= Pmode
;
38582 gcc_assert (MEM_P (orig_mem
));
38584 orig_addr
= XEXP (orig_mem
, 0);
38585 plus_or_lo_sum
= GET_CODE (orig_addr
);
38586 gcc_assert (plus_or_lo_sum
== PLUS
|| plus_or_lo_sum
== LO_SUM
);
38588 offset
= XEXP (orig_addr
, 1);
38589 new_addr
= gen_rtx_fmt_ee (plus_or_lo_sum
, ptr_mode
, addis_value
, offset
);
38590 new_mem
= replace_equiv_address_nv (orig_mem
, new_addr
, false);
38592 new_src
= gen_rtx_UNSPEC (target_mode
, gen_rtvec (1, src
),
38595 set
= gen_rtx_SET (new_mem
, new_src
);
38596 clobber
= gen_rtx_CLOBBER (VOIDmode
, tmp_reg
);
38597 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, set
, clobber
));
38603 /* Return a string to fuse an addis instruction with a load using extended
38604 fusion. The address that is used is the logical address that was formed
38605 during peephole2: (lo_sum (high) (low-part))
38607 The code is complicated, so we call output_asm_insn directly, and just
38611 emit_fusion_p9_load (rtx reg
, rtx mem
, rtx tmp_reg
)
38613 machine_mode mode
= GET_MODE (reg
);
38617 const char *load_string
;
38620 if (GET_CODE (mem
) == FLOAT_EXTEND
|| GET_CODE (mem
) == ZERO_EXTEND
)
38622 mem
= XEXP (mem
, 0);
38623 mode
= GET_MODE (mem
);
38626 if (GET_CODE (reg
) == SUBREG
)
38628 gcc_assert (SUBREG_BYTE (reg
) == 0);
38629 reg
= SUBREG_REG (reg
);
38633 fatal_insn ("emit_fusion_p9_load, bad reg #1", reg
);
38636 if (FP_REGNO_P (r
))
38638 if (mode
== SFmode
)
38639 load_string
= "lfs";
38640 else if (mode
== DFmode
|| mode
== DImode
)
38641 load_string
= "lfd";
38643 gcc_unreachable ();
38645 else if (ALTIVEC_REGNO_P (r
) && TARGET_P9_VECTOR
)
38647 if (mode
== SFmode
)
38648 load_string
= "lxssp";
38649 else if (mode
== DFmode
|| mode
== DImode
)
38650 load_string
= "lxsd";
38652 gcc_unreachable ();
38654 else if (INT_REGNO_P (r
))
38659 load_string
= "lbz";
38662 load_string
= "lhz";
38666 load_string
= "lwz";
38670 if (!TARGET_POWERPC64
)
38671 gcc_unreachable ();
38672 load_string
= "ld";
38675 gcc_unreachable ();
38679 fatal_insn ("emit_fusion_p9_load, bad reg #2", reg
);
38682 fatal_insn ("emit_fusion_p9_load not MEM", mem
);
38684 addr
= XEXP (mem
, 0);
38685 fusion_split_address (addr
, &hi
, &lo
);
38687 /* Emit the addis instruction. */
38688 emit_fusion_addis (tmp_reg
, hi
, "power9 load fusion", GET_MODE_NAME (mode
));
38690 /* Emit the D-form load instruction. */
38691 emit_fusion_load_store (reg
, tmp_reg
, lo
, load_string
);
38696 /* Return a string to fuse an addis instruction with a store using extended
38697 fusion. The address that is used is the logical address that was formed
38698 during peephole2: (lo_sum (high) (low-part))
38700 The code is complicated, so we call output_asm_insn directly, and just
38704 emit_fusion_p9_store (rtx mem
, rtx reg
, rtx tmp_reg
)
38706 machine_mode mode
= GET_MODE (reg
);
38710 const char *store_string
;
38713 if (GET_CODE (reg
) == SUBREG
)
38715 gcc_assert (SUBREG_BYTE (reg
) == 0);
38716 reg
= SUBREG_REG (reg
);
38720 fatal_insn ("emit_fusion_p9_store, bad reg #1", reg
);
38723 if (FP_REGNO_P (r
))
38725 if (mode
== SFmode
)
38726 store_string
= "stfs";
38727 else if (mode
== DFmode
)
38728 store_string
= "stfd";
38730 gcc_unreachable ();
38732 else if (ALTIVEC_REGNO_P (r
) && TARGET_P9_VECTOR
)
38734 if (mode
== SFmode
)
38735 store_string
= "stxssp";
38736 else if (mode
== DFmode
|| mode
== DImode
)
38737 store_string
= "stxsd";
38739 gcc_unreachable ();
38741 else if (INT_REGNO_P (r
))
38746 store_string
= "stb";
38749 store_string
= "sth";
38753 store_string
= "stw";
38757 if (!TARGET_POWERPC64
)
38758 gcc_unreachable ();
38759 store_string
= "std";
38762 gcc_unreachable ();
38766 fatal_insn ("emit_fusion_p9_store, bad reg #2", reg
);
38769 fatal_insn ("emit_fusion_p9_store not MEM", mem
);
38771 addr
= XEXP (mem
, 0);
38772 fusion_split_address (addr
, &hi
, &lo
);
38774 /* Emit the addis instruction. */
38775 emit_fusion_addis (tmp_reg
, hi
, "power9 store fusion", GET_MODE_NAME (mode
));
38777 /* Emit the D-form load instruction. */
38778 emit_fusion_load_store (reg
, tmp_reg
, lo
, store_string
);
38783 #ifdef RS6000_GLIBC_ATOMIC_FENV
38784 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
38785 static tree atomic_hold_decl
, atomic_clear_decl
, atomic_update_decl
;
38788 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
38791 rs6000_atomic_assign_expand_fenv (tree
*hold
, tree
*clear
, tree
*update
)
38793 if (!TARGET_HARD_FLOAT
)
38795 #ifdef RS6000_GLIBC_ATOMIC_FENV
38796 if (atomic_hold_decl
== NULL_TREE
)
38799 = build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
,
38800 get_identifier ("__atomic_feholdexcept"),
38801 build_function_type_list (void_type_node
,
38802 double_ptr_type_node
,
38804 TREE_PUBLIC (atomic_hold_decl
) = 1;
38805 DECL_EXTERNAL (atomic_hold_decl
) = 1;
38808 if (atomic_clear_decl
== NULL_TREE
)
38811 = build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
,
38812 get_identifier ("__atomic_feclearexcept"),
38813 build_function_type_list (void_type_node
,
38815 TREE_PUBLIC (atomic_clear_decl
) = 1;
38816 DECL_EXTERNAL (atomic_clear_decl
) = 1;
38819 tree const_double
= build_qualified_type (double_type_node
,
38821 tree const_double_ptr
= build_pointer_type (const_double
);
38822 if (atomic_update_decl
== NULL_TREE
)
38825 = build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
,
38826 get_identifier ("__atomic_feupdateenv"),
38827 build_function_type_list (void_type_node
,
38830 TREE_PUBLIC (atomic_update_decl
) = 1;
38831 DECL_EXTERNAL (atomic_update_decl
) = 1;
38834 tree fenv_var
= create_tmp_var_raw (double_type_node
);
38835 TREE_ADDRESSABLE (fenv_var
) = 1;
38836 tree fenv_addr
= build1 (ADDR_EXPR
, double_ptr_type_node
, fenv_var
);
38838 *hold
= build_call_expr (atomic_hold_decl
, 1, fenv_addr
);
38839 *clear
= build_call_expr (atomic_clear_decl
, 0);
38840 *update
= build_call_expr (atomic_update_decl
, 1,
38841 fold_convert (const_double_ptr
, fenv_addr
));
38846 tree mffs
= rs6000_builtin_decls
[RS6000_BUILTIN_MFFS
];
38847 tree mtfsf
= rs6000_builtin_decls
[RS6000_BUILTIN_MTFSF
];
38848 tree call_mffs
= build_call_expr (mffs
, 0);
38850 /* Generates the equivalent of feholdexcept (&fenv_var)
38852 *fenv_var = __builtin_mffs ();
38854 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
38855 __builtin_mtfsf (0xff, fenv_hold); */
38857 /* Mask to clear everything except for the rounding modes and non-IEEE
38858 arithmetic flag. */
38859 const unsigned HOST_WIDE_INT hold_exception_mask
=
38860 HOST_WIDE_INT_C (0xffffffff00000007);
38862 tree fenv_var
= create_tmp_var_raw (double_type_node
);
38864 tree hold_mffs
= build2 (MODIFY_EXPR
, void_type_node
, fenv_var
, call_mffs
);
38866 tree fenv_llu
= build1 (VIEW_CONVERT_EXPR
, uint64_type_node
, fenv_var
);
38867 tree fenv_llu_and
= build2 (BIT_AND_EXPR
, uint64_type_node
, fenv_llu
,
38868 build_int_cst (uint64_type_node
,
38869 hold_exception_mask
));
38871 tree fenv_hold_mtfsf
= build1 (VIEW_CONVERT_EXPR
, double_type_node
,
38874 tree hold_mtfsf
= build_call_expr (mtfsf
, 2,
38875 build_int_cst (unsigned_type_node
, 0xff),
38878 *hold
= build2 (COMPOUND_EXPR
, void_type_node
, hold_mffs
, hold_mtfsf
);
38880 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
38882 double fenv_clear = __builtin_mffs ();
38883 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
38884 __builtin_mtfsf (0xff, fenv_clear); */
38886 /* Mask to clear everything except for the rounding modes and non-IEEE
38887 arithmetic flag. */
38888 const unsigned HOST_WIDE_INT clear_exception_mask
=
38889 HOST_WIDE_INT_C (0xffffffff00000000);
38891 tree fenv_clear
= create_tmp_var_raw (double_type_node
);
38893 tree clear_mffs
= build2 (MODIFY_EXPR
, void_type_node
, fenv_clear
, call_mffs
);
38895 tree fenv_clean_llu
= build1 (VIEW_CONVERT_EXPR
, uint64_type_node
, fenv_clear
);
38896 tree fenv_clear_llu_and
= build2 (BIT_AND_EXPR
, uint64_type_node
,
38898 build_int_cst (uint64_type_node
,
38899 clear_exception_mask
));
38901 tree fenv_clear_mtfsf
= build1 (VIEW_CONVERT_EXPR
, double_type_node
,
38902 fenv_clear_llu_and
);
38904 tree clear_mtfsf
= build_call_expr (mtfsf
, 2,
38905 build_int_cst (unsigned_type_node
, 0xff),
38908 *clear
= build2 (COMPOUND_EXPR
, void_type_node
, clear_mffs
, clear_mtfsf
);
38910 /* Generates the equivalent of feupdateenv (&fenv_var)
38912 double old_fenv = __builtin_mffs ();
38913 double fenv_update;
38914 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
38915 (*(uint64_t*)fenv_var 0x1ff80fff);
38916 __builtin_mtfsf (0xff, fenv_update); */
38918 const unsigned HOST_WIDE_INT update_exception_mask
=
38919 HOST_WIDE_INT_C (0xffffffff1fffff00);
38920 const unsigned HOST_WIDE_INT new_exception_mask
=
38921 HOST_WIDE_INT_C (0x1ff80fff);
38923 tree old_fenv
= create_tmp_var_raw (double_type_node
);
38924 tree update_mffs
= build2 (MODIFY_EXPR
, void_type_node
, old_fenv
, call_mffs
);
38926 tree old_llu
= build1 (VIEW_CONVERT_EXPR
, uint64_type_node
, old_fenv
);
38927 tree old_llu_and
= build2 (BIT_AND_EXPR
, uint64_type_node
, old_llu
,
38928 build_int_cst (uint64_type_node
,
38929 update_exception_mask
));
38931 tree new_llu_and
= build2 (BIT_AND_EXPR
, uint64_type_node
, fenv_llu
,
38932 build_int_cst (uint64_type_node
,
38933 new_exception_mask
));
38935 tree new_llu_mask
= build2 (BIT_IOR_EXPR
, uint64_type_node
,
38936 old_llu_and
, new_llu_and
);
38938 tree fenv_update_mtfsf
= build1 (VIEW_CONVERT_EXPR
, double_type_node
,
38941 tree update_mtfsf
= build_call_expr (mtfsf
, 2,
38942 build_int_cst (unsigned_type_node
, 0xff),
38943 fenv_update_mtfsf
);
38945 *update
= build2 (COMPOUND_EXPR
, void_type_node
, update_mffs
, update_mtfsf
);
38949 rs6000_generate_float2_code (bool signed_convert
, rtx dst
, rtx src1
, rtx src2
)
38951 rtx rtx_tmp0
, rtx_tmp1
, rtx_tmp2
, rtx_tmp3
;
38953 rtx_tmp0
= gen_reg_rtx (V2DImode
);
38954 rtx_tmp1
= gen_reg_rtx (V2DImode
);
38956 /* The destination of the vmrgew instruction layout is:
38957 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
38958 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
38959 vmrgew instruction will be correct. */
38960 if (VECTOR_ELT_ORDER_BIG
)
38962 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0
, src1
, src2
, GEN_INT (0)));
38963 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1
, src1
, src2
, GEN_INT (3)));
38967 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0
, src1
, src2
, GEN_INT (3)));
38968 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1
, src1
, src2
, GEN_INT (0)));
38971 rtx_tmp2
= gen_reg_rtx (V4SFmode
);
38972 rtx_tmp3
= gen_reg_rtx (V4SFmode
);
38974 if (signed_convert
)
38976 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2
, rtx_tmp0
));
38977 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3
, rtx_tmp1
));
38981 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2
, rtx_tmp0
));
38982 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3
, rtx_tmp1
));
38985 if (VECTOR_ELT_ORDER_BIG
)
38986 emit_insn (gen_p8_vmrgew_v4sf (dst
, rtx_tmp2
, rtx_tmp3
));
38988 emit_insn (gen_p8_vmrgew_v4sf (dst
, rtx_tmp3
, rtx_tmp2
));
38992 rs6000_generate_vsigned2_code (bool signed_convert
, rtx dst
, rtx src1
,
38995 rtx rtx_tmp0
, rtx_tmp1
, rtx_tmp2
, rtx_tmp3
;
38997 rtx_tmp0
= gen_reg_rtx (V2DFmode
);
38998 rtx_tmp1
= gen_reg_rtx (V2DFmode
);
39000 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0
, src1
, src2
, GEN_INT (0)));
39001 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1
, src1
, src2
, GEN_INT (3)));
39003 rtx_tmp2
= gen_reg_rtx (V4SImode
);
39004 rtx_tmp3
= gen_reg_rtx (V4SImode
);
39006 if (signed_convert
)
39008 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2
, rtx_tmp0
));
39009 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3
, rtx_tmp1
));
39013 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2
, rtx_tmp0
));
39014 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3
, rtx_tmp1
));
39017 emit_insn (gen_p8_vmrgew_v4si (dst
, rtx_tmp2
, rtx_tmp3
));
39020 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
39023 rs6000_optab_supported_p (int op
, machine_mode mode1
, machine_mode
,
39024 optimization_type opt_type
)
39029 return (opt_type
== OPTIMIZE_FOR_SPEED
39030 && RS6000_RECIP_AUTO_RSQRTE_P (mode1
));
39037 struct gcc_target targetm
= TARGET_INITIALIZER
;
39039 #include "gt-rs6000.h"