1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2018 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #define IN_TARGET_CODE 1
25 #include "coretypes.h"
35 #include "stringpool.h"
42 #include "diagnostic-core.h"
43 #include "insn-attr.h"
46 #include "fold-const.h"
48 #include "stor-layout.h"
50 #include "print-tree.h"
56 #include "common/common-target.h"
57 #include "langhooks.h"
59 #include "sched-int.h"
61 #include "gimple-fold.h"
62 #include "gimple-iterator.h"
63 #include "gimple-ssa.h"
64 #include "gimple-walk.h"
67 #include "tm-constrs.h"
68 #include "tree-vectorizer.h"
69 #include "target-globals.h"
72 #include "tree-pass.h"
75 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
78 #include "gstab.h" /* for N_SLINE */
80 #include "case-cfn-macros.h"
82 #include "tree-ssa-propagate.h"
84 /* This file should be included last. */
85 #include "target-def.h"
87 #ifndef TARGET_NO_PROTOTYPE
88 #define TARGET_NO_PROTOTYPE 0
91 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
92 systems will also set long double to be IEEE 128-bit. AIX and Darwin
93 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
94 those systems will not pick up this default. This needs to be after all
95 of the include files, so that POWERPC_LINUX and POWERPC_FREEBSD are
97 #ifndef TARGET_IEEEQUAD_DEFAULT
98 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
99 #define TARGET_IEEEQUAD_DEFAULT 1
101 #define TARGET_IEEEQUAD_DEFAULT 0
105 #define min(A,B) ((A) < (B) ? (A) : (B))
106 #define max(A,B) ((A) > (B) ? (A) : (B))
108 static pad_direction
rs6000_function_arg_padding (machine_mode
, const_tree
);
110 /* Structure used to define the rs6000 stack */
111 typedef struct rs6000_stack
{
112 int reload_completed
; /* stack info won't change from here on */
113 int first_gp_reg_save
; /* first callee saved GP register used */
114 int first_fp_reg_save
; /* first callee saved FP register used */
115 int first_altivec_reg_save
; /* first callee saved AltiVec register used */
116 int lr_save_p
; /* true if the link reg needs to be saved */
117 int cr_save_p
; /* true if the CR reg needs to be saved */
118 unsigned int vrsave_mask
; /* mask of vec registers to save */
119 int push_p
; /* true if we need to allocate stack space */
120 int calls_p
; /* true if the function makes any calls */
121 int world_save_p
; /* true if we're saving *everything*:
122 r13-r31, cr, f14-f31, vrsave, v20-v31 */
123 enum rs6000_abi abi
; /* which ABI to use */
124 int gp_save_offset
; /* offset to save GP regs from initial SP */
125 int fp_save_offset
; /* offset to save FP regs from initial SP */
126 int altivec_save_offset
; /* offset to save AltiVec regs from initial SP */
127 int lr_save_offset
; /* offset to save LR from initial SP */
128 int cr_save_offset
; /* offset to save CR from initial SP */
129 int vrsave_save_offset
; /* offset to save VRSAVE from initial SP */
130 int varargs_save_offset
; /* offset to save the varargs registers */
131 int ehrd_offset
; /* offset to EH return data */
132 int ehcr_offset
; /* offset to EH CR field data */
133 int reg_size
; /* register size (4 or 8) */
134 HOST_WIDE_INT vars_size
; /* variable save area size */
135 int parm_size
; /* outgoing parameter size */
136 int save_size
; /* save area size */
137 int fixed_size
; /* fixed size of stack frame */
138 int gp_size
; /* size of saved GP registers */
139 int fp_size
; /* size of saved FP registers */
140 int altivec_size
; /* size of saved AltiVec registers */
141 int cr_size
; /* size to hold CR if not in fixed area */
142 int vrsave_size
; /* size to hold VRSAVE */
143 int altivec_padding_size
; /* size of altivec alignment padding */
144 HOST_WIDE_INT total_size
; /* total bytes allocated for stack */
148 /* A C structure for machine-specific, per-function data.
149 This is added to the cfun structure. */
150 typedef struct GTY(()) machine_function
152 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
153 int ra_needs_full_frame
;
154 /* Flags if __builtin_return_address (0) was used. */
156 /* Cache lr_save_p after expansion of builtin_eh_return. */
158 /* Whether we need to save the TOC to the reserved stack location in the
159 function prologue. */
160 bool save_toc_in_prologue
;
161 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
162 varargs save area. */
163 HOST_WIDE_INT varargs_save_offset
;
164 /* Alternative internal arg pointer for -fsplit-stack. */
165 rtx split_stack_arg_pointer
;
166 bool split_stack_argp_used
;
167 /* Flag if r2 setup is needed with ELFv2 ABI. */
168 bool r2_setup_needed
;
169 /* The number of components we use for separate shrink-wrapping. */
171 /* The components already handled by separate shrink-wrapping, which should
172 not be considered by the prologue and epilogue. */
173 bool gpr_is_wrapped_separately
[32];
174 bool fpr_is_wrapped_separately
[32];
175 bool lr_is_wrapped_separately
;
176 bool toc_is_wrapped_separately
;
179 /* Support targetm.vectorize.builtin_mask_for_load. */
180 static GTY(()) tree altivec_builtin_mask_for_load
;
182 /* Set to nonzero once AIX common-mode calls have been defined. */
183 static GTY(()) int common_mode_defined
;
185 /* Label number of label created for -mrelocatable, to call to so we can
186 get the address of the GOT section */
187 static int rs6000_pic_labelno
;
190 /* Counter for labels which are to be placed in .fixup. */
191 int fixuplabelno
= 0;
194 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
197 /* Specify the machine mode that pointers have. After generation of rtl, the
198 compiler makes no further distinction between pointers and any other objects
199 of this machine mode. */
200 scalar_int_mode rs6000_pmode
;
202 /* Width in bits of a pointer. */
203 unsigned rs6000_pointer_size
;
205 #ifdef HAVE_AS_GNU_ATTRIBUTE
206 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
207 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
209 /* Flag whether floating point values have been passed/returned.
210 Note that this doesn't say whether fprs are used, since the
211 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
212 should be set for soft-float values passed in gprs and ieee128
213 values passed in vsx registers. */
214 static bool rs6000_passes_float
;
215 static bool rs6000_passes_long_double
;
216 /* Flag whether vector values have been passed/returned. */
217 static bool rs6000_passes_vector
;
218 /* Flag whether small (<= 8 byte) structures have been returned. */
219 static bool rs6000_returns_struct
;
222 /* Value is TRUE if register/mode pair is acceptable. */
223 static bool rs6000_hard_regno_mode_ok_p
224 [NUM_MACHINE_MODES
][FIRST_PSEUDO_REGISTER
];
226 /* Maximum number of registers needed for a given register class and mode. */
227 unsigned char rs6000_class_max_nregs
[NUM_MACHINE_MODES
][LIM_REG_CLASSES
];
229 /* How many registers are needed for a given register and mode. */
230 unsigned char rs6000_hard_regno_nregs
[NUM_MACHINE_MODES
][FIRST_PSEUDO_REGISTER
];
232 /* Map register number to register class. */
233 enum reg_class rs6000_regno_regclass
[FIRST_PSEUDO_REGISTER
];
235 static int dbg_cost_ctrl
;
237 /* Built in types. */
238 tree rs6000_builtin_types
[RS6000_BTI_MAX
];
239 tree rs6000_builtin_decls
[RS6000_BUILTIN_COUNT
];
241 /* Flag to say the TOC is initialized */
242 int toc_initialized
, need_toc_init
;
243 char toc_label_name
[10];
245 /* Cached value of rs6000_variable_issue. This is cached in
246 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
247 static short cached_can_issue_more
;
249 static GTY(()) section
*read_only_data_section
;
250 static GTY(()) section
*private_data_section
;
251 static GTY(()) section
*tls_data_section
;
252 static GTY(()) section
*tls_private_data_section
;
253 static GTY(()) section
*read_only_private_data_section
;
254 static GTY(()) section
*sdata2_section
;
255 static GTY(()) section
*toc_section
;
257 struct builtin_description
259 const HOST_WIDE_INT mask
;
260 const enum insn_code icode
;
261 const char *const name
;
262 const enum rs6000_builtins code
;
265 /* Describe the vector unit used for modes. */
266 enum rs6000_vector rs6000_vector_unit
[NUM_MACHINE_MODES
];
267 enum rs6000_vector rs6000_vector_mem
[NUM_MACHINE_MODES
];
269 /* Register classes for various constraints that are based on the target
271 enum reg_class rs6000_constraints
[RS6000_CONSTRAINT_MAX
];
273 /* Describe the alignment of a vector. */
274 int rs6000_vector_align
[NUM_MACHINE_MODES
];
276 /* Map selected modes to types for builtins. */
277 static GTY(()) tree builtin_mode_to_type
[MAX_MACHINE_MODE
][2];
279 /* What modes to automatically generate reciprocal divide estimate (fre) and
280 reciprocal sqrt (frsqrte) for. */
281 unsigned char rs6000_recip_bits
[MAX_MACHINE_MODE
];
283 /* Masks to determine which reciprocal esitmate instructions to generate
285 enum rs6000_recip_mask
{
286 RECIP_SF_DIV
= 0x001, /* Use divide estimate */
287 RECIP_DF_DIV
= 0x002,
288 RECIP_V4SF_DIV
= 0x004,
289 RECIP_V2DF_DIV
= 0x008,
291 RECIP_SF_RSQRT
= 0x010, /* Use reciprocal sqrt estimate. */
292 RECIP_DF_RSQRT
= 0x020,
293 RECIP_V4SF_RSQRT
= 0x040,
294 RECIP_V2DF_RSQRT
= 0x080,
296 /* Various combination of flags for -mrecip=xxx. */
298 RECIP_ALL
= (RECIP_SF_DIV
| RECIP_DF_DIV
| RECIP_V4SF_DIV
299 | RECIP_V2DF_DIV
| RECIP_SF_RSQRT
| RECIP_DF_RSQRT
300 | RECIP_V4SF_RSQRT
| RECIP_V2DF_RSQRT
),
302 RECIP_HIGH_PRECISION
= RECIP_ALL
,
304 /* On low precision machines like the power5, don't enable double precision
305 reciprocal square root estimate, since it isn't accurate enough. */
306 RECIP_LOW_PRECISION
= (RECIP_ALL
& ~(RECIP_DF_RSQRT
| RECIP_V2DF_RSQRT
))
309 /* -mrecip options. */
312 const char *string
; /* option name */
313 unsigned int mask
; /* mask bits to set */
314 } recip_options
[] = {
315 { "all", RECIP_ALL
},
316 { "none", RECIP_NONE
},
317 { "div", (RECIP_SF_DIV
| RECIP_DF_DIV
| RECIP_V4SF_DIV
319 { "divf", (RECIP_SF_DIV
| RECIP_V4SF_DIV
) },
320 { "divd", (RECIP_DF_DIV
| RECIP_V2DF_DIV
) },
321 { "rsqrt", (RECIP_SF_RSQRT
| RECIP_DF_RSQRT
| RECIP_V4SF_RSQRT
322 | RECIP_V2DF_RSQRT
) },
323 { "rsqrtf", (RECIP_SF_RSQRT
| RECIP_V4SF_RSQRT
) },
324 { "rsqrtd", (RECIP_DF_RSQRT
| RECIP_V2DF_RSQRT
) },
327 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
333 { "power9", PPC_PLATFORM_POWER9
},
334 { "power8", PPC_PLATFORM_POWER8
},
335 { "power7", PPC_PLATFORM_POWER7
},
336 { "power6x", PPC_PLATFORM_POWER6X
},
337 { "power6", PPC_PLATFORM_POWER6
},
338 { "power5+", PPC_PLATFORM_POWER5_PLUS
},
339 { "power5", PPC_PLATFORM_POWER5
},
340 { "ppc970", PPC_PLATFORM_PPC970
},
341 { "power4", PPC_PLATFORM_POWER4
},
342 { "ppca2", PPC_PLATFORM_PPCA2
},
343 { "ppc476", PPC_PLATFORM_PPC476
},
344 { "ppc464", PPC_PLATFORM_PPC464
},
345 { "ppc440", PPC_PLATFORM_PPC440
},
346 { "ppc405", PPC_PLATFORM_PPC405
},
347 { "ppc-cell-be", PPC_PLATFORM_CELL_BE
}
350 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
356 } cpu_supports_info
[] = {
357 /* AT_HWCAP masks. */
358 { "4xxmac", PPC_FEATURE_HAS_4xxMAC
, 0 },
359 { "altivec", PPC_FEATURE_HAS_ALTIVEC
, 0 },
360 { "arch_2_05", PPC_FEATURE_ARCH_2_05
, 0 },
361 { "arch_2_06", PPC_FEATURE_ARCH_2_06
, 0 },
362 { "archpmu", PPC_FEATURE_PERFMON_COMPAT
, 0 },
363 { "booke", PPC_FEATURE_BOOKE
, 0 },
364 { "cellbe", PPC_FEATURE_CELL_BE
, 0 },
365 { "dfp", PPC_FEATURE_HAS_DFP
, 0 },
366 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE
, 0 },
367 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE
, 0 },
368 { "fpu", PPC_FEATURE_HAS_FPU
, 0 },
369 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP
, 0 },
370 { "mmu", PPC_FEATURE_HAS_MMU
, 0 },
371 { "notb", PPC_FEATURE_NO_TB
, 0 },
372 { "pa6t", PPC_FEATURE_PA6T
, 0 },
373 { "power4", PPC_FEATURE_POWER4
, 0 },
374 { "power5", PPC_FEATURE_POWER5
, 0 },
375 { "power5+", PPC_FEATURE_POWER5_PLUS
, 0 },
376 { "power6x", PPC_FEATURE_POWER6_EXT
, 0 },
377 { "ppc32", PPC_FEATURE_32
, 0 },
378 { "ppc601", PPC_FEATURE_601_INSTR
, 0 },
379 { "ppc64", PPC_FEATURE_64
, 0 },
380 { "ppcle", PPC_FEATURE_PPC_LE
, 0 },
381 { "smt", PPC_FEATURE_SMT
, 0 },
382 { "spe", PPC_FEATURE_HAS_SPE
, 0 },
383 { "true_le", PPC_FEATURE_TRUE_LE
, 0 },
384 { "ucache", PPC_FEATURE_UNIFIED_CACHE
, 0 },
385 { "vsx", PPC_FEATURE_HAS_VSX
, 0 },
387 /* AT_HWCAP2 masks. */
388 { "arch_2_07", PPC_FEATURE2_ARCH_2_07
, 1 },
389 { "dscr", PPC_FEATURE2_HAS_DSCR
, 1 },
390 { "ebb", PPC_FEATURE2_HAS_EBB
, 1 },
391 { "htm", PPC_FEATURE2_HAS_HTM
, 1 },
392 { "htm-nosc", PPC_FEATURE2_HTM_NOSC
, 1 },
393 { "htm-no-suspend", PPC_FEATURE2_HTM_NO_SUSPEND
, 1 },
394 { "isel", PPC_FEATURE2_HAS_ISEL
, 1 },
395 { "tar", PPC_FEATURE2_HAS_TAR
, 1 },
396 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO
, 1 },
397 { "arch_3_00", PPC_FEATURE2_ARCH_3_00
, 1 },
398 { "ieee128", PPC_FEATURE2_HAS_IEEE128
, 1 },
399 { "darn", PPC_FEATURE2_DARN
, 1 },
400 { "scv", PPC_FEATURE2_SCV
, 1 }
403 /* On PowerPC, we have a limited number of target clones that we care about
404 which means we can use an array to hold the options, rather than having more
405 elaborate data structures to identify each possible variation. Order the
406 clones from the default to the highest ISA. */
408 CLONE_DEFAULT
= 0, /* default clone. */
409 CLONE_ISA_2_05
, /* ISA 2.05 (power6). */
410 CLONE_ISA_2_06
, /* ISA 2.06 (power7). */
411 CLONE_ISA_2_07
, /* ISA 2.07 (power8). */
412 CLONE_ISA_3_00
, /* ISA 3.00 (power9). */
416 /* Map compiler ISA bits into HWCAP names. */
418 HOST_WIDE_INT isa_mask
; /* rs6000_isa mask */
419 const char *name
; /* name to use in __builtin_cpu_supports. */
422 static const struct clone_map rs6000_clone_map
[CLONE_MAX
] = {
423 { 0, "" }, /* Default options. */
424 { OPTION_MASK_CMPB
, "arch_2_05" }, /* ISA 2.05 (power6). */
425 { OPTION_MASK_POPCNTD
, "arch_2_06" }, /* ISA 2.06 (power7). */
426 { OPTION_MASK_P8_VECTOR
, "arch_2_07" }, /* ISA 2.07 (power8). */
427 { OPTION_MASK_P9_VECTOR
, "arch_3_00" }, /* ISA 3.00 (power9). */
431 /* Newer LIBCs explicitly export this symbol to declare that they provide
432 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
433 reference to this symbol whenever we expand a CPU builtin, so that
434 we never link against an old LIBC. */
435 const char *tcb_verification_symbol
= "__parse_hwcap_and_convert_at_platform";
437 /* True if we have expanded a CPU builtin. */
440 /* Pointer to function (in rs6000-c.c) that can define or undefine target
441 macros that have changed. Languages that don't support the preprocessor
442 don't link in rs6000-c.c, so we can't call it directly. */
443 void (*rs6000_target_modify_macros_ptr
) (bool, HOST_WIDE_INT
, HOST_WIDE_INT
);
445 /* Simplfy register classes into simpler classifications. We assume
446 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
447 check for standard register classes (gpr/floating/altivec/vsx) and
448 floating/vector classes (float/altivec/vsx). */
450 enum rs6000_reg_type
{
461 /* Map register class to register type. */
462 static enum rs6000_reg_type reg_class_to_reg_type
[N_REG_CLASSES
];
464 /* First/last register type for the 'normal' register types (i.e. general
465 purpose, floating point, altivec, and VSX registers). */
466 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
468 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
471 /* Register classes we care about in secondary reload or go if legitimate
472 address. We only need to worry about GPR, FPR, and Altivec registers here,
473 along an ANY field that is the OR of the 3 register classes. */
475 enum rs6000_reload_reg_type
{
476 RELOAD_REG_GPR
, /* General purpose registers. */
477 RELOAD_REG_FPR
, /* Traditional floating point regs. */
478 RELOAD_REG_VMX
, /* Altivec (VMX) registers. */
479 RELOAD_REG_ANY
, /* OR of GPR, FPR, Altivec masks. */
483 /* For setting up register classes, loop through the 3 register classes mapping
484 into real registers, and skip the ANY class, which is just an OR of the
486 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
487 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
489 /* Map reload register type to a register in the register class. */
490 struct reload_reg_map_type
{
491 const char *name
; /* Register class name. */
492 int reg
; /* Register in the register class. */
495 static const struct reload_reg_map_type reload_reg_map
[N_RELOAD_REG
] = {
496 { "Gpr", FIRST_GPR_REGNO
}, /* RELOAD_REG_GPR. */
497 { "Fpr", FIRST_FPR_REGNO
}, /* RELOAD_REG_FPR. */
498 { "VMX", FIRST_ALTIVEC_REGNO
}, /* RELOAD_REG_VMX. */
499 { "Any", -1 }, /* RELOAD_REG_ANY. */
502 /* Mask bits for each register class, indexed per mode. Historically the
503 compiler has been more restrictive which types can do PRE_MODIFY instead of
504 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
505 typedef unsigned char addr_mask_type
;
507 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
508 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
509 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
510 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
511 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
512 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
513 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
514 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
516 /* Register type masks based on the type, of valid addressing modes. */
517 struct rs6000_reg_addr
{
518 enum insn_code reload_load
; /* INSN to reload for loading. */
519 enum insn_code reload_store
; /* INSN to reload for storing. */
520 enum insn_code reload_fpr_gpr
; /* INSN to move from FPR to GPR. */
521 enum insn_code reload_gpr_vsx
; /* INSN to move from GPR to VSX. */
522 enum insn_code reload_vsx_gpr
; /* INSN to move from VSX to GPR. */
523 enum insn_code fusion_gpr_ld
; /* INSN for fusing gpr ADDIS/loads. */
524 /* INSNs for fusing addi with loads
525 or stores for each reg. class. */
526 enum insn_code fusion_addi_ld
[(int)N_RELOAD_REG
];
527 enum insn_code fusion_addi_st
[(int)N_RELOAD_REG
];
528 /* INSNs for fusing addis with loads
529 or stores for each reg. class. */
530 enum insn_code fusion_addis_ld
[(int)N_RELOAD_REG
];
531 enum insn_code fusion_addis_st
[(int)N_RELOAD_REG
];
532 addr_mask_type addr_mask
[(int)N_RELOAD_REG
]; /* Valid address masks. */
533 bool scalar_in_vmx_p
; /* Scalar value can go in VMX. */
534 bool fused_toc
; /* Mode supports TOC fusion. */
537 static struct rs6000_reg_addr reg_addr
[NUM_MACHINE_MODES
];
539 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
541 mode_supports_pre_incdec_p (machine_mode mode
)
543 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
] & RELOAD_REG_PRE_INCDEC
)
547 /* Helper function to say whether a mode supports PRE_MODIFY. */
549 mode_supports_pre_modify_p (machine_mode mode
)
551 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
] & RELOAD_REG_PRE_MODIFY
)
555 /* Given that there exists at least one variable that is set (produced)
556 by OUT_INSN and read (consumed) by IN_INSN, return true iff
557 IN_INSN represents one or more memory store operations and none of
558 the variables set by OUT_INSN is used by IN_INSN as the address of a
559 store operation. If either IN_INSN or OUT_INSN does not represent
560 a "single" RTL SET expression (as loosely defined by the
561 implementation of the single_set function) or a PARALLEL with only
562 SETs, CLOBBERs, and USEs inside, this function returns false.
564 This rs6000-specific version of store_data_bypass_p checks for
565 certain conditions that result in assertion failures (and internal
566 compiler errors) in the generic store_data_bypass_p function and
567 returns false rather than calling store_data_bypass_p if one of the
568 problematic conditions is detected. */
571 rs6000_store_data_bypass_p (rtx_insn
*out_insn
, rtx_insn
*in_insn
)
578 in_set
= single_set (in_insn
);
581 if (MEM_P (SET_DEST (in_set
)))
583 out_set
= single_set (out_insn
);
586 out_pat
= PATTERN (out_insn
);
587 if (GET_CODE (out_pat
) == PARALLEL
)
589 for (i
= 0; i
< XVECLEN (out_pat
, 0); i
++)
591 out_exp
= XVECEXP (out_pat
, 0, i
);
592 if ((GET_CODE (out_exp
) == CLOBBER
)
593 || (GET_CODE (out_exp
) == USE
))
595 else if (GET_CODE (out_exp
) != SET
)
604 in_pat
= PATTERN (in_insn
);
605 if (GET_CODE (in_pat
) != PARALLEL
)
608 for (i
= 0; i
< XVECLEN (in_pat
, 0); i
++)
610 in_exp
= XVECEXP (in_pat
, 0, i
);
611 if ((GET_CODE (in_exp
) == CLOBBER
) || (GET_CODE (in_exp
) == USE
))
613 else if (GET_CODE (in_exp
) != SET
)
616 if (MEM_P (SET_DEST (in_exp
)))
618 out_set
= single_set (out_insn
);
621 out_pat
= PATTERN (out_insn
);
622 if (GET_CODE (out_pat
) != PARALLEL
)
624 for (j
= 0; j
< XVECLEN (out_pat
, 0); j
++)
626 out_exp
= XVECEXP (out_pat
, 0, j
);
627 if ((GET_CODE (out_exp
) == CLOBBER
)
628 || (GET_CODE (out_exp
) == USE
))
630 else if (GET_CODE (out_exp
) != SET
)
637 return store_data_bypass_p (out_insn
, in_insn
);
640 /* Return true if we have D-form addressing in altivec registers. */
642 mode_supports_vmx_dform (machine_mode mode
)
644 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
] & RELOAD_REG_OFFSET
) != 0);
647 /* Return true if we have D-form addressing in VSX registers. This addressing
648 is more limited than normal d-form addressing in that the offset must be
649 aligned on a 16-byte boundary. */
651 mode_supports_vsx_dform_quad (machine_mode mode
)
653 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
] & RELOAD_REG_QUAD_OFFSET
)
658 /* Processor costs (relative to an add) */
660 const struct processor_costs
*rs6000_cost
;
662 /* Instruction size costs on 32bit processors. */
664 struct processor_costs size32_cost
= {
665 COSTS_N_INSNS (1), /* mulsi */
666 COSTS_N_INSNS (1), /* mulsi_const */
667 COSTS_N_INSNS (1), /* mulsi_const9 */
668 COSTS_N_INSNS (1), /* muldi */
669 COSTS_N_INSNS (1), /* divsi */
670 COSTS_N_INSNS (1), /* divdi */
671 COSTS_N_INSNS (1), /* fp */
672 COSTS_N_INSNS (1), /* dmul */
673 COSTS_N_INSNS (1), /* sdiv */
674 COSTS_N_INSNS (1), /* ddiv */
675 32, /* cache line size */
679 0, /* SF->DF convert */
682 /* Instruction size costs on 64bit processors. */
684 struct processor_costs size64_cost
= {
685 COSTS_N_INSNS (1), /* mulsi */
686 COSTS_N_INSNS (1), /* mulsi_const */
687 COSTS_N_INSNS (1), /* mulsi_const9 */
688 COSTS_N_INSNS (1), /* muldi */
689 COSTS_N_INSNS (1), /* divsi */
690 COSTS_N_INSNS (1), /* divdi */
691 COSTS_N_INSNS (1), /* fp */
692 COSTS_N_INSNS (1), /* dmul */
693 COSTS_N_INSNS (1), /* sdiv */
694 COSTS_N_INSNS (1), /* ddiv */
695 128, /* cache line size */
699 0, /* SF->DF convert */
702 /* Instruction costs on RS64A processors. */
704 struct processor_costs rs64a_cost
= {
705 COSTS_N_INSNS (20), /* mulsi */
706 COSTS_N_INSNS (12), /* mulsi_const */
707 COSTS_N_INSNS (8), /* mulsi_const9 */
708 COSTS_N_INSNS (34), /* muldi */
709 COSTS_N_INSNS (65), /* divsi */
710 COSTS_N_INSNS (67), /* divdi */
711 COSTS_N_INSNS (4), /* fp */
712 COSTS_N_INSNS (4), /* dmul */
713 COSTS_N_INSNS (31), /* sdiv */
714 COSTS_N_INSNS (31), /* ddiv */
715 128, /* cache line size */
719 0, /* SF->DF convert */
722 /* Instruction costs on MPCCORE processors. */
724 struct processor_costs mpccore_cost
= {
725 COSTS_N_INSNS (2), /* mulsi */
726 COSTS_N_INSNS (2), /* mulsi_const */
727 COSTS_N_INSNS (2), /* mulsi_const9 */
728 COSTS_N_INSNS (2), /* muldi */
729 COSTS_N_INSNS (6), /* divsi */
730 COSTS_N_INSNS (6), /* divdi */
731 COSTS_N_INSNS (4), /* fp */
732 COSTS_N_INSNS (5), /* dmul */
733 COSTS_N_INSNS (10), /* sdiv */
734 COSTS_N_INSNS (17), /* ddiv */
735 32, /* cache line size */
739 0, /* SF->DF convert */
742 /* Instruction costs on PPC403 processors. */
744 struct processor_costs ppc403_cost
= {
745 COSTS_N_INSNS (4), /* mulsi */
746 COSTS_N_INSNS (4), /* mulsi_const */
747 COSTS_N_INSNS (4), /* mulsi_const9 */
748 COSTS_N_INSNS (4), /* muldi */
749 COSTS_N_INSNS (33), /* divsi */
750 COSTS_N_INSNS (33), /* divdi */
751 COSTS_N_INSNS (11), /* fp */
752 COSTS_N_INSNS (11), /* dmul */
753 COSTS_N_INSNS (11), /* sdiv */
754 COSTS_N_INSNS (11), /* ddiv */
755 32, /* cache line size */
759 0, /* SF->DF convert */
762 /* Instruction costs on PPC405 processors. */
764 struct processor_costs ppc405_cost
= {
765 COSTS_N_INSNS (5), /* mulsi */
766 COSTS_N_INSNS (4), /* mulsi_const */
767 COSTS_N_INSNS (3), /* mulsi_const9 */
768 COSTS_N_INSNS (5), /* muldi */
769 COSTS_N_INSNS (35), /* divsi */
770 COSTS_N_INSNS (35), /* divdi */
771 COSTS_N_INSNS (11), /* fp */
772 COSTS_N_INSNS (11), /* dmul */
773 COSTS_N_INSNS (11), /* sdiv */
774 COSTS_N_INSNS (11), /* ddiv */
775 32, /* cache line size */
779 0, /* SF->DF convert */
782 /* Instruction costs on PPC440 processors. */
784 struct processor_costs ppc440_cost
= {
785 COSTS_N_INSNS (3), /* mulsi */
786 COSTS_N_INSNS (2), /* mulsi_const */
787 COSTS_N_INSNS (2), /* mulsi_const9 */
788 COSTS_N_INSNS (3), /* muldi */
789 COSTS_N_INSNS (34), /* divsi */
790 COSTS_N_INSNS (34), /* divdi */
791 COSTS_N_INSNS (5), /* fp */
792 COSTS_N_INSNS (5), /* dmul */
793 COSTS_N_INSNS (19), /* sdiv */
794 COSTS_N_INSNS (33), /* ddiv */
795 32, /* cache line size */
799 0, /* SF->DF convert */
802 /* Instruction costs on PPC476 processors. */
804 struct processor_costs ppc476_cost
= {
805 COSTS_N_INSNS (4), /* mulsi */
806 COSTS_N_INSNS (4), /* mulsi_const */
807 COSTS_N_INSNS (4), /* mulsi_const9 */
808 COSTS_N_INSNS (4), /* muldi */
809 COSTS_N_INSNS (11), /* divsi */
810 COSTS_N_INSNS (11), /* divdi */
811 COSTS_N_INSNS (6), /* fp */
812 COSTS_N_INSNS (6), /* dmul */
813 COSTS_N_INSNS (19), /* sdiv */
814 COSTS_N_INSNS (33), /* ddiv */
815 32, /* l1 cache line size */
819 0, /* SF->DF convert */
822 /* Instruction costs on PPC601 processors. */
824 struct processor_costs ppc601_cost
= {
825 COSTS_N_INSNS (5), /* mulsi */
826 COSTS_N_INSNS (5), /* mulsi_const */
827 COSTS_N_INSNS (5), /* mulsi_const9 */
828 COSTS_N_INSNS (5), /* muldi */
829 COSTS_N_INSNS (36), /* divsi */
830 COSTS_N_INSNS (36), /* divdi */
831 COSTS_N_INSNS (4), /* fp */
832 COSTS_N_INSNS (5), /* dmul */
833 COSTS_N_INSNS (17), /* sdiv */
834 COSTS_N_INSNS (31), /* ddiv */
835 32, /* cache line size */
839 0, /* SF->DF convert */
842 /* Instruction costs on PPC603 processors. */
844 struct processor_costs ppc603_cost
= {
845 COSTS_N_INSNS (5), /* mulsi */
846 COSTS_N_INSNS (3), /* mulsi_const */
847 COSTS_N_INSNS (2), /* mulsi_const9 */
848 COSTS_N_INSNS (5), /* muldi */
849 COSTS_N_INSNS (37), /* divsi */
850 COSTS_N_INSNS (37), /* divdi */
851 COSTS_N_INSNS (3), /* fp */
852 COSTS_N_INSNS (4), /* dmul */
853 COSTS_N_INSNS (18), /* sdiv */
854 COSTS_N_INSNS (33), /* ddiv */
855 32, /* cache line size */
859 0, /* SF->DF convert */
862 /* Instruction costs on PPC604 processors. */
864 struct processor_costs ppc604_cost
= {
865 COSTS_N_INSNS (4), /* mulsi */
866 COSTS_N_INSNS (4), /* mulsi_const */
867 COSTS_N_INSNS (4), /* mulsi_const9 */
868 COSTS_N_INSNS (4), /* muldi */
869 COSTS_N_INSNS (20), /* divsi */
870 COSTS_N_INSNS (20), /* divdi */
871 COSTS_N_INSNS (3), /* fp */
872 COSTS_N_INSNS (3), /* dmul */
873 COSTS_N_INSNS (18), /* sdiv */
874 COSTS_N_INSNS (32), /* ddiv */
875 32, /* cache line size */
879 0, /* SF->DF convert */
882 /* Instruction costs on PPC604e processors. */
884 struct processor_costs ppc604e_cost
= {
885 COSTS_N_INSNS (2), /* mulsi */
886 COSTS_N_INSNS (2), /* mulsi_const */
887 COSTS_N_INSNS (2), /* mulsi_const9 */
888 COSTS_N_INSNS (2), /* muldi */
889 COSTS_N_INSNS (20), /* divsi */
890 COSTS_N_INSNS (20), /* divdi */
891 COSTS_N_INSNS (3), /* fp */
892 COSTS_N_INSNS (3), /* dmul */
893 COSTS_N_INSNS (18), /* sdiv */
894 COSTS_N_INSNS (32), /* ddiv */
895 32, /* cache line size */
899 0, /* SF->DF convert */
902 /* Instruction costs on PPC620 processors. */
904 struct processor_costs ppc620_cost
= {
905 COSTS_N_INSNS (5), /* mulsi */
906 COSTS_N_INSNS (4), /* mulsi_const */
907 COSTS_N_INSNS (3), /* mulsi_const9 */
908 COSTS_N_INSNS (7), /* muldi */
909 COSTS_N_INSNS (21), /* divsi */
910 COSTS_N_INSNS (37), /* divdi */
911 COSTS_N_INSNS (3), /* fp */
912 COSTS_N_INSNS (3), /* dmul */
913 COSTS_N_INSNS (18), /* sdiv */
914 COSTS_N_INSNS (32), /* ddiv */
915 128, /* cache line size */
919 0, /* SF->DF convert */
922 /* Instruction costs on PPC630 processors. */
924 struct processor_costs ppc630_cost
= {
925 COSTS_N_INSNS (5), /* mulsi */
926 COSTS_N_INSNS (4), /* mulsi_const */
927 COSTS_N_INSNS (3), /* mulsi_const9 */
928 COSTS_N_INSNS (7), /* muldi */
929 COSTS_N_INSNS (21), /* divsi */
930 COSTS_N_INSNS (37), /* divdi */
931 COSTS_N_INSNS (3), /* fp */
932 COSTS_N_INSNS (3), /* dmul */
933 COSTS_N_INSNS (17), /* sdiv */
934 COSTS_N_INSNS (21), /* ddiv */
935 128, /* cache line size */
939 0, /* SF->DF convert */
942 /* Instruction costs on Cell processor. */
943 /* COSTS_N_INSNS (1) ~ one add. */
945 struct processor_costs ppccell_cost
= {
946 COSTS_N_INSNS (9/2)+2, /* mulsi */
947 COSTS_N_INSNS (6/2), /* mulsi_const */
948 COSTS_N_INSNS (6/2), /* mulsi_const9 */
949 COSTS_N_INSNS (15/2)+2, /* muldi */
950 COSTS_N_INSNS (38/2), /* divsi */
951 COSTS_N_INSNS (70/2), /* divdi */
952 COSTS_N_INSNS (10/2), /* fp */
953 COSTS_N_INSNS (10/2), /* dmul */
954 COSTS_N_INSNS (74/2), /* sdiv */
955 COSTS_N_INSNS (74/2), /* ddiv */
956 128, /* cache line size */
960 0, /* SF->DF convert */
963 /* Instruction costs on PPC750 and PPC7400 processors. */
965 struct processor_costs ppc750_cost
= {
966 COSTS_N_INSNS (5), /* mulsi */
967 COSTS_N_INSNS (3), /* mulsi_const */
968 COSTS_N_INSNS (2), /* mulsi_const9 */
969 COSTS_N_INSNS (5), /* muldi */
970 COSTS_N_INSNS (17), /* divsi */
971 COSTS_N_INSNS (17), /* divdi */
972 COSTS_N_INSNS (3), /* fp */
973 COSTS_N_INSNS (3), /* dmul */
974 COSTS_N_INSNS (17), /* sdiv */
975 COSTS_N_INSNS (31), /* ddiv */
976 32, /* cache line size */
980 0, /* SF->DF convert */
983 /* Instruction costs on PPC7450 processors. */
985 struct processor_costs ppc7450_cost
= {
986 COSTS_N_INSNS (4), /* mulsi */
987 COSTS_N_INSNS (3), /* mulsi_const */
988 COSTS_N_INSNS (3), /* mulsi_const9 */
989 COSTS_N_INSNS (4), /* muldi */
990 COSTS_N_INSNS (23), /* divsi */
991 COSTS_N_INSNS (23), /* divdi */
992 COSTS_N_INSNS (5), /* fp */
993 COSTS_N_INSNS (5), /* dmul */
994 COSTS_N_INSNS (21), /* sdiv */
995 COSTS_N_INSNS (35), /* ddiv */
996 32, /* cache line size */
1000 0, /* SF->DF convert */
1003 /* Instruction costs on PPC8540 processors. */
1005 struct processor_costs ppc8540_cost
= {
1006 COSTS_N_INSNS (4), /* mulsi */
1007 COSTS_N_INSNS (4), /* mulsi_const */
1008 COSTS_N_INSNS (4), /* mulsi_const9 */
1009 COSTS_N_INSNS (4), /* muldi */
1010 COSTS_N_INSNS (19), /* divsi */
1011 COSTS_N_INSNS (19), /* divdi */
1012 COSTS_N_INSNS (4), /* fp */
1013 COSTS_N_INSNS (4), /* dmul */
1014 COSTS_N_INSNS (29), /* sdiv */
1015 COSTS_N_INSNS (29), /* ddiv */
1016 32, /* cache line size */
1019 1, /* prefetch streams /*/
1020 0, /* SF->DF convert */
1023 /* Instruction costs on E300C2 and E300C3 cores. */
1025 struct processor_costs ppce300c2c3_cost
= {
1026 COSTS_N_INSNS (4), /* mulsi */
1027 COSTS_N_INSNS (4), /* mulsi_const */
1028 COSTS_N_INSNS (4), /* mulsi_const9 */
1029 COSTS_N_INSNS (4), /* muldi */
1030 COSTS_N_INSNS (19), /* divsi */
1031 COSTS_N_INSNS (19), /* divdi */
1032 COSTS_N_INSNS (3), /* fp */
1033 COSTS_N_INSNS (4), /* dmul */
1034 COSTS_N_INSNS (18), /* sdiv */
1035 COSTS_N_INSNS (33), /* ddiv */
1039 1, /* prefetch streams /*/
1040 0, /* SF->DF convert */
1043 /* Instruction costs on PPCE500MC processors. */
1045 struct processor_costs ppce500mc_cost
= {
1046 COSTS_N_INSNS (4), /* mulsi */
1047 COSTS_N_INSNS (4), /* mulsi_const */
1048 COSTS_N_INSNS (4), /* mulsi_const9 */
1049 COSTS_N_INSNS (4), /* muldi */
1050 COSTS_N_INSNS (14), /* divsi */
1051 COSTS_N_INSNS (14), /* divdi */
1052 COSTS_N_INSNS (8), /* fp */
1053 COSTS_N_INSNS (10), /* dmul */
1054 COSTS_N_INSNS (36), /* sdiv */
1055 COSTS_N_INSNS (66), /* ddiv */
1056 64, /* cache line size */
1059 1, /* prefetch streams /*/
1060 0, /* SF->DF convert */
1063 /* Instruction costs on PPCE500MC64 processors. */
1065 struct processor_costs ppce500mc64_cost
= {
1066 COSTS_N_INSNS (4), /* mulsi */
1067 COSTS_N_INSNS (4), /* mulsi_const */
1068 COSTS_N_INSNS (4), /* mulsi_const9 */
1069 COSTS_N_INSNS (4), /* muldi */
1070 COSTS_N_INSNS (14), /* divsi */
1071 COSTS_N_INSNS (14), /* divdi */
1072 COSTS_N_INSNS (4), /* fp */
1073 COSTS_N_INSNS (10), /* dmul */
1074 COSTS_N_INSNS (36), /* sdiv */
1075 COSTS_N_INSNS (66), /* ddiv */
1076 64, /* cache line size */
1079 1, /* prefetch streams /*/
1080 0, /* SF->DF convert */
1083 /* Instruction costs on PPCE5500 processors. */
1085 struct processor_costs ppce5500_cost
= {
1086 COSTS_N_INSNS (5), /* mulsi */
1087 COSTS_N_INSNS (5), /* mulsi_const */
1088 COSTS_N_INSNS (4), /* mulsi_const9 */
1089 COSTS_N_INSNS (5), /* muldi */
1090 COSTS_N_INSNS (14), /* divsi */
1091 COSTS_N_INSNS (14), /* divdi */
1092 COSTS_N_INSNS (7), /* fp */
1093 COSTS_N_INSNS (10), /* dmul */
1094 COSTS_N_INSNS (36), /* sdiv */
1095 COSTS_N_INSNS (66), /* ddiv */
1096 64, /* cache line size */
1099 1, /* prefetch streams /*/
1100 0, /* SF->DF convert */
1103 /* Instruction costs on PPCE6500 processors. */
1105 struct processor_costs ppce6500_cost
= {
1106 COSTS_N_INSNS (5), /* mulsi */
1107 COSTS_N_INSNS (5), /* mulsi_const */
1108 COSTS_N_INSNS (4), /* mulsi_const9 */
1109 COSTS_N_INSNS (5), /* muldi */
1110 COSTS_N_INSNS (14), /* divsi */
1111 COSTS_N_INSNS (14), /* divdi */
1112 COSTS_N_INSNS (7), /* fp */
1113 COSTS_N_INSNS (10), /* dmul */
1114 COSTS_N_INSNS (36), /* sdiv */
1115 COSTS_N_INSNS (66), /* ddiv */
1116 64, /* cache line size */
1119 1, /* prefetch streams /*/
1120 0, /* SF->DF convert */
1123 /* Instruction costs on AppliedMicro Titan processors. */
1125 struct processor_costs titan_cost
= {
1126 COSTS_N_INSNS (5), /* mulsi */
1127 COSTS_N_INSNS (5), /* mulsi_const */
1128 COSTS_N_INSNS (5), /* mulsi_const9 */
1129 COSTS_N_INSNS (5), /* muldi */
1130 COSTS_N_INSNS (18), /* divsi */
1131 COSTS_N_INSNS (18), /* divdi */
1132 COSTS_N_INSNS (10), /* fp */
1133 COSTS_N_INSNS (10), /* dmul */
1134 COSTS_N_INSNS (46), /* sdiv */
1135 COSTS_N_INSNS (72), /* ddiv */
1136 32, /* cache line size */
1139 1, /* prefetch streams /*/
1140 0, /* SF->DF convert */
1143 /* Instruction costs on POWER4 and POWER5 processors. */
1145 struct processor_costs power4_cost
= {
1146 COSTS_N_INSNS (3), /* mulsi */
1147 COSTS_N_INSNS (2), /* mulsi_const */
1148 COSTS_N_INSNS (2), /* mulsi_const9 */
1149 COSTS_N_INSNS (4), /* muldi */
1150 COSTS_N_INSNS (18), /* divsi */
1151 COSTS_N_INSNS (34), /* divdi */
1152 COSTS_N_INSNS (3), /* fp */
1153 COSTS_N_INSNS (3), /* dmul */
1154 COSTS_N_INSNS (17), /* sdiv */
1155 COSTS_N_INSNS (17), /* ddiv */
1156 128, /* cache line size */
1158 1024, /* l2 cache */
1159 8, /* prefetch streams /*/
1160 0, /* SF->DF convert */
1163 /* Instruction costs on POWER6 processors. */
1165 struct processor_costs power6_cost
= {
1166 COSTS_N_INSNS (8), /* mulsi */
1167 COSTS_N_INSNS (8), /* mulsi_const */
1168 COSTS_N_INSNS (8), /* mulsi_const9 */
1169 COSTS_N_INSNS (8), /* muldi */
1170 COSTS_N_INSNS (22), /* divsi */
1171 COSTS_N_INSNS (28), /* divdi */
1172 COSTS_N_INSNS (3), /* fp */
1173 COSTS_N_INSNS (3), /* dmul */
1174 COSTS_N_INSNS (13), /* sdiv */
1175 COSTS_N_INSNS (16), /* ddiv */
1176 128, /* cache line size */
1178 2048, /* l2 cache */
1179 16, /* prefetch streams */
1180 0, /* SF->DF convert */
1183 /* Instruction costs on POWER7 processors. */
1185 struct processor_costs power7_cost
= {
1186 COSTS_N_INSNS (2), /* mulsi */
1187 COSTS_N_INSNS (2), /* mulsi_const */
1188 COSTS_N_INSNS (2), /* mulsi_const9 */
1189 COSTS_N_INSNS (2), /* muldi */
1190 COSTS_N_INSNS (18), /* divsi */
1191 COSTS_N_INSNS (34), /* divdi */
1192 COSTS_N_INSNS (3), /* fp */
1193 COSTS_N_INSNS (3), /* dmul */
1194 COSTS_N_INSNS (13), /* sdiv */
1195 COSTS_N_INSNS (16), /* ddiv */
1196 128, /* cache line size */
1199 12, /* prefetch streams */
1200 COSTS_N_INSNS (3), /* SF->DF convert */
1203 /* Instruction costs on POWER8 processors. */
1205 struct processor_costs power8_cost
= {
1206 COSTS_N_INSNS (3), /* mulsi */
1207 COSTS_N_INSNS (3), /* mulsi_const */
1208 COSTS_N_INSNS (3), /* mulsi_const9 */
1209 COSTS_N_INSNS (3), /* muldi */
1210 COSTS_N_INSNS (19), /* divsi */
1211 COSTS_N_INSNS (35), /* divdi */
1212 COSTS_N_INSNS (3), /* fp */
1213 COSTS_N_INSNS (3), /* dmul */
1214 COSTS_N_INSNS (14), /* sdiv */
1215 COSTS_N_INSNS (17), /* ddiv */
1216 128, /* cache line size */
1219 12, /* prefetch streams */
1220 COSTS_N_INSNS (3), /* SF->DF convert */
1223 /* Instruction costs on POWER9 processors. */
1225 struct processor_costs power9_cost
= {
1226 COSTS_N_INSNS (3), /* mulsi */
1227 COSTS_N_INSNS (3), /* mulsi_const */
1228 COSTS_N_INSNS (3), /* mulsi_const9 */
1229 COSTS_N_INSNS (3), /* muldi */
1230 COSTS_N_INSNS (8), /* divsi */
1231 COSTS_N_INSNS (12), /* divdi */
1232 COSTS_N_INSNS (3), /* fp */
1233 COSTS_N_INSNS (3), /* dmul */
1234 COSTS_N_INSNS (13), /* sdiv */
1235 COSTS_N_INSNS (18), /* ddiv */
1236 128, /* cache line size */
1239 8, /* prefetch streams */
1240 COSTS_N_INSNS (3), /* SF->DF convert */
1243 /* Instruction costs on POWER A2 processors. */
1245 struct processor_costs ppca2_cost
= {
1246 COSTS_N_INSNS (16), /* mulsi */
1247 COSTS_N_INSNS (16), /* mulsi_const */
1248 COSTS_N_INSNS (16), /* mulsi_const9 */
1249 COSTS_N_INSNS (16), /* muldi */
1250 COSTS_N_INSNS (22), /* divsi */
1251 COSTS_N_INSNS (28), /* divdi */
1252 COSTS_N_INSNS (3), /* fp */
1253 COSTS_N_INSNS (3), /* dmul */
1254 COSTS_N_INSNS (59), /* sdiv */
1255 COSTS_N_INSNS (72), /* ddiv */
1258 2048, /* l2 cache */
1259 16, /* prefetch streams */
1260 0, /* SF->DF convert */
1264 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1265 #undef RS6000_BUILTIN_0
1266 #undef RS6000_BUILTIN_1
1267 #undef RS6000_BUILTIN_2
1268 #undef RS6000_BUILTIN_3
1269 #undef RS6000_BUILTIN_A
1270 #undef RS6000_BUILTIN_D
1271 #undef RS6000_BUILTIN_H
1272 #undef RS6000_BUILTIN_P
1273 #undef RS6000_BUILTIN_Q
1274 #undef RS6000_BUILTIN_X
1276 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1277 { NAME, ICODE, MASK, ATTR },
1279 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1280 { NAME, ICODE, MASK, ATTR },
1282 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1283 { NAME, ICODE, MASK, ATTR },
1285 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1286 { NAME, ICODE, MASK, ATTR },
1288 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1289 { NAME, ICODE, MASK, ATTR },
1291 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1292 { NAME, ICODE, MASK, ATTR },
1294 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1295 { NAME, ICODE, MASK, ATTR },
1297 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1298 { NAME, ICODE, MASK, ATTR },
1300 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
1301 { NAME, ICODE, MASK, ATTR },
1303 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1304 { NAME, ICODE, MASK, ATTR },
1306 struct rs6000_builtin_info_type
{
1308 const enum insn_code icode
;
1309 const HOST_WIDE_INT mask
;
1310 const unsigned attr
;
1313 static const struct rs6000_builtin_info_type rs6000_builtin_info
[] =
1315 #include "rs6000-builtin.def"
1318 #undef RS6000_BUILTIN_0
1319 #undef RS6000_BUILTIN_1
1320 #undef RS6000_BUILTIN_2
1321 #undef RS6000_BUILTIN_3
1322 #undef RS6000_BUILTIN_A
1323 #undef RS6000_BUILTIN_D
1324 #undef RS6000_BUILTIN_H
1325 #undef RS6000_BUILTIN_P
1326 #undef RS6000_BUILTIN_Q
1327 #undef RS6000_BUILTIN_X
1329 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1330 static tree (*rs6000_veclib_handler
) (combined_fn
, tree
, tree
);
1333 static bool rs6000_debug_legitimate_address_p (machine_mode
, rtx
, bool);
1334 static struct machine_function
* rs6000_init_machine_status (void);
1335 static int rs6000_ra_ever_killed (void);
1336 static tree
rs6000_handle_longcall_attribute (tree
*, tree
, tree
, int, bool *);
1337 static tree
rs6000_handle_altivec_attribute (tree
*, tree
, tree
, int, bool *);
1338 static tree
rs6000_handle_struct_attribute (tree
*, tree
, tree
, int, bool *);
1339 static tree
rs6000_builtin_vectorized_libmass (combined_fn
, tree
, tree
);
1340 static void rs6000_emit_set_long_const (rtx
, HOST_WIDE_INT
);
1341 static int rs6000_memory_move_cost (machine_mode
, reg_class_t
, bool);
1342 static bool rs6000_debug_rtx_costs (rtx
, machine_mode
, int, int, int *, bool);
1343 static int rs6000_debug_address_cost (rtx
, machine_mode
, addr_space_t
,
1345 static int rs6000_debug_adjust_cost (rtx_insn
*, int, rtx_insn
*, int,
1347 static bool is_microcoded_insn (rtx_insn
*);
1348 static bool is_nonpipeline_insn (rtx_insn
*);
1349 static bool is_cracked_insn (rtx_insn
*);
1350 static bool is_load_insn (rtx
, rtx
*);
1351 static bool is_store_insn (rtx
, rtx
*);
1352 static bool set_to_load_agen (rtx_insn
*,rtx_insn
*);
1353 static bool insn_terminates_group_p (rtx_insn
*, enum group_termination
);
1354 static bool insn_must_be_first_in_group (rtx_insn
*);
1355 static bool insn_must_be_last_in_group (rtx_insn
*);
1356 static void altivec_init_builtins (void);
1357 static tree
builtin_function_type (machine_mode
, machine_mode
,
1358 machine_mode
, machine_mode
,
1359 enum rs6000_builtins
, const char *name
);
1360 static void rs6000_common_init_builtins (void);
1361 static void paired_init_builtins (void);
1362 static rtx
paired_expand_predicate_builtin (enum insn_code
, tree
, rtx
);
1363 static void htm_init_builtins (void);
1364 static rs6000_stack_t
*rs6000_stack_info (void);
1365 static void is_altivec_return_reg (rtx
, void *);
1366 int easy_vector_constant (rtx
, machine_mode
);
1367 static rtx
rs6000_debug_legitimize_address (rtx
, rtx
, machine_mode
);
1368 static rtx
rs6000_legitimize_tls_address (rtx
, enum tls_model
);
1369 static rtx
rs6000_darwin64_record_arg (CUMULATIVE_ARGS
*, const_tree
,
1372 static void macho_branch_islands (void);
1374 static rtx
rs6000_legitimize_reload_address (rtx
, machine_mode
, int, int,
1376 static rtx
rs6000_debug_legitimize_reload_address (rtx
, machine_mode
, int,
1378 static bool rs6000_mode_dependent_address (const_rtx
);
1379 static bool rs6000_debug_mode_dependent_address (const_rtx
);
1380 static enum reg_class
rs6000_secondary_reload_class (enum reg_class
,
1382 static enum reg_class
rs6000_debug_secondary_reload_class (enum reg_class
,
1385 static enum reg_class
rs6000_preferred_reload_class (rtx
, enum reg_class
);
1386 static enum reg_class
rs6000_debug_preferred_reload_class (rtx
,
1388 static bool rs6000_debug_secondary_memory_needed (machine_mode
,
1391 static bool rs6000_debug_can_change_mode_class (machine_mode
,
1394 static bool rs6000_save_toc_in_prologue_p (void);
1395 static rtx
rs6000_internal_arg_pointer (void);
1397 rtx (*rs6000_legitimize_reload_address_ptr
) (rtx
, machine_mode
, int, int,
1399 = rs6000_legitimize_reload_address
;
1401 static bool (*rs6000_mode_dependent_address_ptr
) (const_rtx
)
1402 = rs6000_mode_dependent_address
;
1404 enum reg_class (*rs6000_secondary_reload_class_ptr
) (enum reg_class
,
1406 = rs6000_secondary_reload_class
;
1408 enum reg_class (*rs6000_preferred_reload_class_ptr
) (rtx
, enum reg_class
)
1409 = rs6000_preferred_reload_class
;
1411 const int INSN_NOT_AVAILABLE
= -1;
1413 static void rs6000_print_isa_options (FILE *, int, const char *,
1415 static void rs6000_print_builtin_options (FILE *, int, const char *,
1417 static HOST_WIDE_INT
rs6000_disable_incompatible_switches (void);
1419 static enum rs6000_reg_type
register_to_reg_type (rtx
, bool *);
1420 static bool rs6000_secondary_reload_move (enum rs6000_reg_type
,
1421 enum rs6000_reg_type
,
1423 secondary_reload_info
*,
1425 rtl_opt_pass
*make_pass_analyze_swaps (gcc::context
*);
1426 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused
));
1427 static tree
rs6000_fold_builtin (tree
, int, tree
*, bool);
1429 /* Hash table stuff for keeping track of TOC entries. */
1431 struct GTY((for_user
)) toc_hash_struct
1433 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1434 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1436 machine_mode key_mode
;
1440 struct toc_hasher
: ggc_ptr_hash
<toc_hash_struct
>
1442 static hashval_t
hash (toc_hash_struct
*);
1443 static bool equal (toc_hash_struct
*, toc_hash_struct
*);
1446 static GTY (()) hash_table
<toc_hasher
> *toc_hash_table
;
1448 /* Hash table to keep track of the argument types for builtin functions. */
1450 struct GTY((for_user
)) builtin_hash_struct
1453 machine_mode mode
[4]; /* return value + 3 arguments. */
1454 unsigned char uns_p
[4]; /* and whether the types are unsigned. */
1457 struct builtin_hasher
: ggc_ptr_hash
<builtin_hash_struct
>
1459 static hashval_t
hash (builtin_hash_struct
*);
1460 static bool equal (builtin_hash_struct
*, builtin_hash_struct
*);
1463 static GTY (()) hash_table
<builtin_hasher
> *builtin_hash_table
;
1466 /* Default register names. */
1467 char rs6000_reg_names
[][8] =
1469 "0", "1", "2", "3", "4", "5", "6", "7",
1470 "8", "9", "10", "11", "12", "13", "14", "15",
1471 "16", "17", "18", "19", "20", "21", "22", "23",
1472 "24", "25", "26", "27", "28", "29", "30", "31",
1473 "0", "1", "2", "3", "4", "5", "6", "7",
1474 "8", "9", "10", "11", "12", "13", "14", "15",
1475 "16", "17", "18", "19", "20", "21", "22", "23",
1476 "24", "25", "26", "27", "28", "29", "30", "31",
1477 "mq", "lr", "ctr","ap",
1478 "0", "1", "2", "3", "4", "5", "6", "7",
1480 /* AltiVec registers. */
1481 "0", "1", "2", "3", "4", "5", "6", "7",
1482 "8", "9", "10", "11", "12", "13", "14", "15",
1483 "16", "17", "18", "19", "20", "21", "22", "23",
1484 "24", "25", "26", "27", "28", "29", "30", "31",
1486 /* Soft frame pointer. */
1488 /* HTM SPR registers. */
1489 "tfhar", "tfiar", "texasr"
1492 #ifdef TARGET_REGNAMES
1493 static const char alt_reg_names
[][8] =
1495 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1496 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1497 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1498 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1499 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1500 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1501 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1502 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1503 "mq", "lr", "ctr", "ap",
1504 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1506 /* AltiVec registers. */
1507 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1508 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1509 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1510 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1512 /* Soft frame pointer. */
1514 /* HTM SPR registers. */
1515 "tfhar", "tfiar", "texasr"
1519 /* Table of valid machine attributes. */
1521 static const struct attribute_spec rs6000_attribute_table
[] =
1523 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
1524 affects_type_identity, handler, exclude } */
1525 { "altivec", 1, 1, false, true, false, false,
1526 rs6000_handle_altivec_attribute
, NULL
},
1527 { "longcall", 0, 0, false, true, true, false,
1528 rs6000_handle_longcall_attribute
, NULL
},
1529 { "shortcall", 0, 0, false, true, true, false,
1530 rs6000_handle_longcall_attribute
, NULL
},
1531 { "ms_struct", 0, 0, false, false, false, false,
1532 rs6000_handle_struct_attribute
, NULL
},
1533 { "gcc_struct", 0, 0, false, false, false, false,
1534 rs6000_handle_struct_attribute
, NULL
},
1535 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1536 SUBTARGET_ATTRIBUTE_TABLE
,
1538 { NULL
, 0, 0, false, false, false, false, NULL
, NULL
}
1541 #ifndef TARGET_PROFILE_KERNEL
1542 #define TARGET_PROFILE_KERNEL 0
1545 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1546 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1548 /* Initialize the GCC target structure. */
1549 #undef TARGET_ATTRIBUTE_TABLE
1550 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1551 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1552 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1553 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1554 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1556 #undef TARGET_ASM_ALIGNED_DI_OP
1557 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1559 /* Default unaligned ops are only provided for ELF. Find the ops needed
1560 for non-ELF systems. */
1561 #ifndef OBJECT_FORMAT_ELF
1563 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1565 #undef TARGET_ASM_UNALIGNED_HI_OP
1566 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1567 #undef TARGET_ASM_UNALIGNED_SI_OP
1568 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1569 #undef TARGET_ASM_UNALIGNED_DI_OP
1570 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1573 #undef TARGET_ASM_UNALIGNED_HI_OP
1574 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1575 #undef TARGET_ASM_UNALIGNED_SI_OP
1576 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1577 #undef TARGET_ASM_UNALIGNED_DI_OP
1578 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1579 #undef TARGET_ASM_ALIGNED_DI_OP
1580 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1584 /* This hook deals with fixups for relocatable code and DI-mode objects
1586 #undef TARGET_ASM_INTEGER
1587 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1589 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1590 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1591 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1594 #undef TARGET_SET_UP_BY_PROLOGUE
1595 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1597 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1598 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1599 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1600 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1601 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1602 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1603 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1604 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1605 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1606 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1607 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1608 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1610 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1611 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1613 #undef TARGET_INTERNAL_ARG_POINTER
1614 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1616 #undef TARGET_HAVE_TLS
1617 #define TARGET_HAVE_TLS HAVE_AS_TLS
1619 #undef TARGET_CANNOT_FORCE_CONST_MEM
1620 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1622 #undef TARGET_DELEGITIMIZE_ADDRESS
1623 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1625 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1626 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1628 #undef TARGET_LEGITIMATE_COMBINED_INSN
1629 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1631 #undef TARGET_ASM_FUNCTION_PROLOGUE
1632 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1633 #undef TARGET_ASM_FUNCTION_EPILOGUE
1634 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1636 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1637 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1639 #undef TARGET_LEGITIMIZE_ADDRESS
1640 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1642 #undef TARGET_SCHED_VARIABLE_ISSUE
1643 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1645 #undef TARGET_SCHED_ISSUE_RATE
1646 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1647 #undef TARGET_SCHED_ADJUST_COST
1648 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1649 #undef TARGET_SCHED_ADJUST_PRIORITY
1650 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1651 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1652 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1653 #undef TARGET_SCHED_INIT
1654 #define TARGET_SCHED_INIT rs6000_sched_init
1655 #undef TARGET_SCHED_FINISH
1656 #define TARGET_SCHED_FINISH rs6000_sched_finish
1657 #undef TARGET_SCHED_REORDER
1658 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1659 #undef TARGET_SCHED_REORDER2
1660 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1662 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1663 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1665 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1666 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1668 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1669 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1670 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1671 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1672 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1673 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1674 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1675 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1677 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1678 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1680 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1681 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1682 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1683 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1684 rs6000_builtin_support_vector_misalignment
1685 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1686 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1687 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1688 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1689 rs6000_builtin_vectorization_cost
1690 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1691 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1692 rs6000_preferred_simd_mode
1693 #undef TARGET_VECTORIZE_INIT_COST
1694 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1695 #undef TARGET_VECTORIZE_ADD_STMT_COST
1696 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1697 #undef TARGET_VECTORIZE_FINISH_COST
1698 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1699 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1700 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1702 #undef TARGET_INIT_BUILTINS
1703 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1704 #undef TARGET_BUILTIN_DECL
1705 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1707 #undef TARGET_FOLD_BUILTIN
1708 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1709 #undef TARGET_GIMPLE_FOLD_BUILTIN
1710 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1712 #undef TARGET_EXPAND_BUILTIN
1713 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1715 #undef TARGET_MANGLE_TYPE
1716 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1718 #undef TARGET_INIT_LIBFUNCS
1719 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1722 #undef TARGET_BINDS_LOCAL_P
1723 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1726 #undef TARGET_MS_BITFIELD_LAYOUT_P
1727 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1729 #undef TARGET_ASM_OUTPUT_MI_THUNK
1730 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1732 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1733 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1735 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1736 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1738 #undef TARGET_REGISTER_MOVE_COST
1739 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1740 #undef TARGET_MEMORY_MOVE_COST
1741 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1742 #undef TARGET_CANNOT_COPY_INSN_P
1743 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1744 #undef TARGET_RTX_COSTS
1745 #define TARGET_RTX_COSTS rs6000_rtx_costs
1746 #undef TARGET_ADDRESS_COST
1747 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1748 #undef TARGET_INSN_COST
1749 #define TARGET_INSN_COST rs6000_insn_cost
1751 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1752 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1754 #undef TARGET_PROMOTE_FUNCTION_MODE
1755 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1757 #undef TARGET_RETURN_IN_MEMORY
1758 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1760 #undef TARGET_RETURN_IN_MSB
1761 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1763 #undef TARGET_SETUP_INCOMING_VARARGS
1764 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1766 /* Always strict argument naming on rs6000. */
1767 #undef TARGET_STRICT_ARGUMENT_NAMING
1768 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1769 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1770 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1771 #undef TARGET_SPLIT_COMPLEX_ARG
1772 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1773 #undef TARGET_MUST_PASS_IN_STACK
1774 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1775 #undef TARGET_PASS_BY_REFERENCE
1776 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1777 #undef TARGET_ARG_PARTIAL_BYTES
1778 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1779 #undef TARGET_FUNCTION_ARG_ADVANCE
1780 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1781 #undef TARGET_FUNCTION_ARG
1782 #define TARGET_FUNCTION_ARG rs6000_function_arg
1783 #undef TARGET_FUNCTION_ARG_PADDING
1784 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1785 #undef TARGET_FUNCTION_ARG_BOUNDARY
1786 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1788 #undef TARGET_BUILD_BUILTIN_VA_LIST
1789 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1791 #undef TARGET_EXPAND_BUILTIN_VA_START
1792 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1794 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1795 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1797 #undef TARGET_EH_RETURN_FILTER_MODE
1798 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1800 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1801 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1803 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1804 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1806 #undef TARGET_FLOATN_MODE
1807 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1809 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1810 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1812 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1813 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1815 #undef TARGET_MD_ASM_ADJUST
1816 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1818 #undef TARGET_OPTION_OVERRIDE
1819 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1821 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1822 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1823 rs6000_builtin_vectorized_function
1825 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1826 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1827 rs6000_builtin_md_vectorized_function
1829 #undef TARGET_STACK_PROTECT_GUARD
1830 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1833 #undef TARGET_STACK_PROTECT_FAIL
1834 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1838 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1839 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1842 /* Use a 32-bit anchor range. This leads to sequences like:
1844 addis tmp,anchor,high
1847 where tmp itself acts as an anchor, and can be shared between
1848 accesses to the same 64k page. */
1849 #undef TARGET_MIN_ANCHOR_OFFSET
1850 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1851 #undef TARGET_MAX_ANCHOR_OFFSET
1852 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1853 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1854 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1855 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1856 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1858 #undef TARGET_BUILTIN_RECIPROCAL
1859 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1861 #undef TARGET_SECONDARY_RELOAD
1862 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1863 #undef TARGET_SECONDARY_MEMORY_NEEDED
1864 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1865 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1866 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1868 #undef TARGET_LEGITIMATE_ADDRESS_P
1869 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1871 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1872 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1874 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1875 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1877 #undef TARGET_CAN_ELIMINATE
1878 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1880 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1881 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1883 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1884 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1886 #undef TARGET_TRAMPOLINE_INIT
1887 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1889 #undef TARGET_FUNCTION_VALUE
1890 #define TARGET_FUNCTION_VALUE rs6000_function_value
1892 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1893 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1895 #undef TARGET_OPTION_SAVE
1896 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1898 #undef TARGET_OPTION_RESTORE
1899 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1901 #undef TARGET_OPTION_PRINT
1902 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1904 #undef TARGET_CAN_INLINE_P
1905 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1907 #undef TARGET_SET_CURRENT_FUNCTION
1908 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1910 #undef TARGET_LEGITIMATE_CONSTANT_P
1911 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1913 #undef TARGET_VECTORIZE_VEC_PERM_CONST
1914 #define TARGET_VECTORIZE_VEC_PERM_CONST rs6000_vectorize_vec_perm_const
1916 #undef TARGET_CAN_USE_DOLOOP_P
1917 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1919 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1920 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1922 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1923 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1924 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1925 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1926 #undef TARGET_UNWIND_WORD_MODE
1927 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1929 #undef TARGET_OFFLOAD_OPTIONS
1930 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1932 #undef TARGET_C_MODE_FOR_SUFFIX
1933 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1935 #undef TARGET_INVALID_BINARY_OP
1936 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1938 #undef TARGET_OPTAB_SUPPORTED_P
1939 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1941 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1942 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1944 #undef TARGET_COMPARE_VERSION_PRIORITY
1945 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1947 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1948 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1949 rs6000_generate_version_dispatcher_body
1951 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1952 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1953 rs6000_get_function_versions_dispatcher
1955 #undef TARGET_OPTION_FUNCTION_VERSIONS
1956 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1958 #undef TARGET_HARD_REGNO_NREGS
1959 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1960 #undef TARGET_HARD_REGNO_MODE_OK
1961 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1963 #undef TARGET_MODES_TIEABLE_P
1964 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1966 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1967 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1968 rs6000_hard_regno_call_part_clobbered
1970 #undef TARGET_SLOW_UNALIGNED_ACCESS
1971 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1973 #undef TARGET_CAN_CHANGE_MODE_CLASS
1974 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1976 #undef TARGET_CONSTANT_ALIGNMENT
1977 #define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment
1979 #undef TARGET_STARTING_FRAME_OFFSET
1980 #define TARGET_STARTING_FRAME_OFFSET rs6000_starting_frame_offset
1983 /* Processor table. */
1986 const char *const name
; /* Canonical processor name. */
1987 const enum processor_type processor
; /* Processor type enum value. */
1988 const HOST_WIDE_INT target_enable
; /* Target flags to enable. */
1991 static struct rs6000_ptt
const processor_target_table
[] =
1993 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1994 #include "rs6000-cpus.def"
1998 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
2002 rs6000_cpu_name_lookup (const char *name
)
2008 for (i
= 0; i
< ARRAY_SIZE (processor_target_table
); i
++)
2009 if (! strcmp (name
, processor_target_table
[i
].name
))
2017 /* Return number of consecutive hard regs needed starting at reg REGNO
2018 to hold something of mode MODE.
2019 This is ordinarily the length in words of a value of mode MODE
2020 but can be less for certain modes in special long registers.
2022 POWER and PowerPC GPRs hold 32 bits worth;
2023 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2026 rs6000_hard_regno_nregs_internal (int regno
, machine_mode mode
)
2028 unsigned HOST_WIDE_INT reg_size
;
2030 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2031 128-bit floating point that can go in vector registers, which has VSX
2032 memory addressing. */
2033 if (FP_REGNO_P (regno
))
2034 reg_size
= (VECTOR_MEM_VSX_P (mode
) || FLOAT128_VECTOR_P (mode
)
2035 ? UNITS_PER_VSX_WORD
2036 : UNITS_PER_FP_WORD
);
2038 else if (ALTIVEC_REGNO_P (regno
))
2039 reg_size
= UNITS_PER_ALTIVEC_WORD
;
2042 reg_size
= UNITS_PER_WORD
;
2044 return (GET_MODE_SIZE (mode
) + reg_size
- 1) / reg_size
;
2047 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2050 rs6000_hard_regno_mode_ok_uncached (int regno
, machine_mode mode
)
2052 int last_regno
= regno
+ rs6000_hard_regno_nregs
[mode
][regno
] - 1;
2054 if (COMPLEX_MODE_P (mode
))
2055 mode
= GET_MODE_INNER (mode
);
2057 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2058 register combinations, and use PTImode where we need to deal with quad
2059 word memory operations. Don't allow quad words in the argument or frame
2060 pointer registers, just registers 0..31. */
2061 if (mode
== PTImode
)
2062 return (IN_RANGE (regno
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
)
2063 && IN_RANGE (last_regno
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
)
2064 && ((regno
& 1) == 0));
2066 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2067 implementations. Don't allow an item to be split between a FP register
2068 and an Altivec register. Allow TImode in all VSX registers if the user
2070 if (TARGET_VSX
&& VSX_REGNO_P (regno
)
2071 && (VECTOR_MEM_VSX_P (mode
)
2072 || FLOAT128_VECTOR_P (mode
)
2073 || reg_addr
[mode
].scalar_in_vmx_p
2075 || (TARGET_VADDUQM
&& mode
== V1TImode
)))
2077 if (FP_REGNO_P (regno
))
2078 return FP_REGNO_P (last_regno
);
2080 if (ALTIVEC_REGNO_P (regno
))
2082 if (GET_MODE_SIZE (mode
) != 16 && !reg_addr
[mode
].scalar_in_vmx_p
)
2085 return ALTIVEC_REGNO_P (last_regno
);
2089 /* The GPRs can hold any mode, but values bigger than one register
2090 cannot go past R31. */
2091 if (INT_REGNO_P (regno
))
2092 return INT_REGNO_P (last_regno
);
2094 /* The float registers (except for VSX vector modes) can only hold floating
2095 modes and DImode. */
2096 if (FP_REGNO_P (regno
))
2098 if (FLOAT128_VECTOR_P (mode
))
2101 if (SCALAR_FLOAT_MODE_P (mode
)
2102 && (mode
!= TDmode
|| (regno
% 2) == 0)
2103 && FP_REGNO_P (last_regno
))
2106 if (GET_MODE_CLASS (mode
) == MODE_INT
)
2108 if(GET_MODE_SIZE (mode
) == UNITS_PER_FP_WORD
)
2111 if (TARGET_P8_VECTOR
&& (mode
== SImode
))
2114 if (TARGET_P9_VECTOR
&& (mode
== QImode
|| mode
== HImode
))
2118 if (PAIRED_SIMD_REGNO_P (regno
) && TARGET_PAIRED_FLOAT
2119 && PAIRED_VECTOR_MODE (mode
))
2125 /* The CR register can only hold CC modes. */
2126 if (CR_REGNO_P (regno
))
2127 return GET_MODE_CLASS (mode
) == MODE_CC
;
2129 if (CA_REGNO_P (regno
))
2130 return mode
== Pmode
|| mode
== SImode
;
2132 /* AltiVec only in AldyVec registers. */
2133 if (ALTIVEC_REGNO_P (regno
))
2134 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
)
2135 || mode
== V1TImode
);
2137 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2138 and it must be able to fit within the register set. */
2140 return GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
;
2143 /* Implement TARGET_HARD_REGNO_NREGS. */
2146 rs6000_hard_regno_nregs_hook (unsigned int regno
, machine_mode mode
)
2148 return rs6000_hard_regno_nregs
[mode
][regno
];
2151 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2154 rs6000_hard_regno_mode_ok (unsigned int regno
, machine_mode mode
)
2156 return rs6000_hard_regno_mode_ok_p
[mode
][regno
];
2159 /* Implement TARGET_MODES_TIEABLE_P.
2161 PTImode cannot tie with other modes because PTImode is restricted to even
2162 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2165 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2166 128-bit floating point on VSX systems ties with other vectors. */
2169 rs6000_modes_tieable_p (machine_mode mode1
, machine_mode mode2
)
2171 if (mode1
== PTImode
)
2172 return mode2
== PTImode
;
2173 if (mode2
== PTImode
)
2176 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1
))
2177 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2
);
2178 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2
))
2181 if (SCALAR_FLOAT_MODE_P (mode1
))
2182 return SCALAR_FLOAT_MODE_P (mode2
);
2183 if (SCALAR_FLOAT_MODE_P (mode2
))
2186 if (GET_MODE_CLASS (mode1
) == MODE_CC
)
2187 return GET_MODE_CLASS (mode2
) == MODE_CC
;
2188 if (GET_MODE_CLASS (mode2
) == MODE_CC
)
2191 if (PAIRED_VECTOR_MODE (mode1
))
2192 return PAIRED_VECTOR_MODE (mode2
);
2193 if (PAIRED_VECTOR_MODE (mode2
))
2199 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2202 rs6000_hard_regno_call_part_clobbered (unsigned int regno
, machine_mode mode
)
2206 && GET_MODE_SIZE (mode
) > 4
2207 && INT_REGNO_P (regno
))
2211 && FP_REGNO_P (regno
)
2212 && GET_MODE_SIZE (mode
) > 8
2213 && !FLOAT128_2REG_P (mode
))
2219 /* Print interesting facts about registers. */
2221 rs6000_debug_reg_print (int first_regno
, int last_regno
, const char *reg_name
)
2225 for (r
= first_regno
; r
<= last_regno
; ++r
)
2227 const char *comma
= "";
2230 if (first_regno
== last_regno
)
2231 fprintf (stderr
, "%s:\t", reg_name
);
2233 fprintf (stderr
, "%s%d:\t", reg_name
, r
- first_regno
);
2236 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2237 if (rs6000_hard_regno_mode_ok_p
[m
][r
] && rs6000_hard_regno_nregs
[m
][r
])
2241 fprintf (stderr
, ",\n\t");
2246 if (rs6000_hard_regno_nregs
[m
][r
] > 1)
2247 len
+= fprintf (stderr
, "%s%s/%d", comma
, GET_MODE_NAME (m
),
2248 rs6000_hard_regno_nregs
[m
][r
]);
2250 len
+= fprintf (stderr
, "%s%s", comma
, GET_MODE_NAME (m
));
2255 if (call_used_regs
[r
])
2259 fprintf (stderr
, ",\n\t");
2264 len
+= fprintf (stderr
, "%s%s", comma
, "call-used");
2272 fprintf (stderr
, ",\n\t");
2277 len
+= fprintf (stderr
, "%s%s", comma
, "fixed");
2283 fprintf (stderr
, ",\n\t");
2287 len
+= fprintf (stderr
, "%sreg-class = %s", comma
,
2288 reg_class_names
[(int)rs6000_regno_regclass
[r
]]);
2293 fprintf (stderr
, ",\n\t");
2297 fprintf (stderr
, "%sregno = %d\n", comma
, r
);
2302 rs6000_debug_vector_unit (enum rs6000_vector v
)
2308 case VECTOR_NONE
: ret
= "none"; break;
2309 case VECTOR_ALTIVEC
: ret
= "altivec"; break;
2310 case VECTOR_VSX
: ret
= "vsx"; break;
2311 case VECTOR_P8_VECTOR
: ret
= "p8_vector"; break;
2312 case VECTOR_PAIRED
: ret
= "paired"; break;
2313 case VECTOR_OTHER
: ret
= "other"; break;
2314 default: ret
= "unknown"; break;
2320 /* Inner function printing just the address mask for a particular reload
2322 DEBUG_FUNCTION
char *
2323 rs6000_debug_addr_mask (addr_mask_type mask
, bool keep_spaces
)
2328 if ((mask
& RELOAD_REG_VALID
) != 0)
2330 else if (keep_spaces
)
2333 if ((mask
& RELOAD_REG_MULTIPLE
) != 0)
2335 else if (keep_spaces
)
2338 if ((mask
& RELOAD_REG_INDEXED
) != 0)
2340 else if (keep_spaces
)
2343 if ((mask
& RELOAD_REG_QUAD_OFFSET
) != 0)
2345 else if ((mask
& RELOAD_REG_OFFSET
) != 0)
2347 else if (keep_spaces
)
2350 if ((mask
& RELOAD_REG_PRE_INCDEC
) != 0)
2352 else if (keep_spaces
)
2355 if ((mask
& RELOAD_REG_PRE_MODIFY
) != 0)
2357 else if (keep_spaces
)
2360 if ((mask
& RELOAD_REG_AND_M16
) != 0)
2362 else if (keep_spaces
)
2370 /* Print the address masks in a human readble fashion. */
2372 rs6000_debug_print_mode (ssize_t m
)
2378 fprintf (stderr
, "Mode: %-5s", GET_MODE_NAME (m
));
2379 for (rc
= 0; rc
< N_RELOAD_REG
; rc
++)
2380 fprintf (stderr
, " %s: %s", reload_reg_map
[rc
].name
,
2381 rs6000_debug_addr_mask (reg_addr
[m
].addr_mask
[rc
], true));
2383 if ((reg_addr
[m
].reload_store
!= CODE_FOR_nothing
)
2384 || (reg_addr
[m
].reload_load
!= CODE_FOR_nothing
))
2385 fprintf (stderr
, " Reload=%c%c",
2386 (reg_addr
[m
].reload_store
!= CODE_FOR_nothing
) ? 's' : '*',
2387 (reg_addr
[m
].reload_load
!= CODE_FOR_nothing
) ? 'l' : '*');
2389 spaces
+= sizeof (" Reload=sl") - 1;
2391 if (reg_addr
[m
].scalar_in_vmx_p
)
2393 fprintf (stderr
, "%*s Upper=y", spaces
, "");
2397 spaces
+= sizeof (" Upper=y") - 1;
2399 fuse_extra_p
= ((reg_addr
[m
].fusion_gpr_ld
!= CODE_FOR_nothing
)
2400 || reg_addr
[m
].fused_toc
);
2403 for (rc
= 0; rc
< N_RELOAD_REG
; rc
++)
2405 if (rc
!= RELOAD_REG_ANY
)
2407 if (reg_addr
[m
].fusion_addi_ld
[rc
] != CODE_FOR_nothing
2408 || reg_addr
[m
].fusion_addi_ld
[rc
] != CODE_FOR_nothing
2409 || reg_addr
[m
].fusion_addi_st
[rc
] != CODE_FOR_nothing
2410 || reg_addr
[m
].fusion_addis_ld
[rc
] != CODE_FOR_nothing
2411 || reg_addr
[m
].fusion_addis_st
[rc
] != CODE_FOR_nothing
)
2413 fuse_extra_p
= true;
2422 fprintf (stderr
, "%*s Fuse:", spaces
, "");
2425 for (rc
= 0; rc
< N_RELOAD_REG
; rc
++)
2427 if (rc
!= RELOAD_REG_ANY
)
2431 if (reg_addr
[m
].fusion_addis_ld
[rc
] != CODE_FOR_nothing
)
2433 else if (reg_addr
[m
].fusion_addi_ld
[rc
] != CODE_FOR_nothing
)
2438 if (reg_addr
[m
].fusion_addis_st
[rc
] != CODE_FOR_nothing
)
2440 else if (reg_addr
[m
].fusion_addi_st
[rc
] != CODE_FOR_nothing
)
2445 if (load
== '-' && store
== '-')
2449 fprintf (stderr
, "%*s%c=%c%c", (spaces
+ 1), "",
2450 reload_reg_map
[rc
].name
[0], load
, store
);
2456 if (reg_addr
[m
].fusion_gpr_ld
!= CODE_FOR_nothing
)
2458 fprintf (stderr
, "%*sP8gpr", (spaces
+ 1), "");
2462 spaces
+= sizeof (" P8gpr") - 1;
2464 if (reg_addr
[m
].fused_toc
)
2466 fprintf (stderr
, "%*sToc", (spaces
+ 1), "");
2470 spaces
+= sizeof (" Toc") - 1;
2473 spaces
+= sizeof (" Fuse: G=ls F=ls v=ls P8gpr Toc") - 1;
2475 if (rs6000_vector_unit
[m
] != VECTOR_NONE
2476 || rs6000_vector_mem
[m
] != VECTOR_NONE
)
2478 fprintf (stderr
, "%*s vector: arith=%-10s mem=%s",
2480 rs6000_debug_vector_unit (rs6000_vector_unit
[m
]),
2481 rs6000_debug_vector_unit (rs6000_vector_mem
[m
]));
2484 fputs ("\n", stderr
);
2487 #define DEBUG_FMT_ID "%-32s= "
2488 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2489 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2490 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2492 /* Print various interesting information with -mdebug=reg. */
2494 rs6000_debug_reg_global (void)
2496 static const char *const tf
[2] = { "false", "true" };
2497 const char *nl
= (const char *)0;
2500 char costly_num
[20];
2502 char flags_buffer
[40];
2503 const char *costly_str
;
2504 const char *nop_str
;
2505 const char *trace_str
;
2506 const char *abi_str
;
2507 const char *cmodel_str
;
2508 struct cl_target_option cl_opts
;
2510 /* Modes we want tieable information on. */
2511 static const machine_mode print_tieable_modes
[] = {
2547 /* Virtual regs we are interested in. */
2548 const static struct {
2549 int regno
; /* register number. */
2550 const char *name
; /* register name. */
2551 } virtual_regs
[] = {
2552 { STACK_POINTER_REGNUM
, "stack pointer:" },
2553 { TOC_REGNUM
, "toc: " },
2554 { STATIC_CHAIN_REGNUM
, "static chain: " },
2555 { RS6000_PIC_OFFSET_TABLE_REGNUM
, "pic offset: " },
2556 { HARD_FRAME_POINTER_REGNUM
, "hard frame: " },
2557 { ARG_POINTER_REGNUM
, "arg pointer: " },
2558 { FRAME_POINTER_REGNUM
, "frame pointer:" },
2559 { FIRST_PSEUDO_REGISTER
, "first pseudo: " },
2560 { FIRST_VIRTUAL_REGISTER
, "first virtual:" },
2561 { VIRTUAL_INCOMING_ARGS_REGNUM
, "incoming_args:" },
2562 { VIRTUAL_STACK_VARS_REGNUM
, "stack_vars: " },
2563 { VIRTUAL_STACK_DYNAMIC_REGNUM
, "stack_dynamic:" },
2564 { VIRTUAL_OUTGOING_ARGS_REGNUM
, "outgoing_args:" },
2565 { VIRTUAL_CFA_REGNUM
, "cfa (frame): " },
2566 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM
, "stack boundry:" },
2567 { LAST_VIRTUAL_REGISTER
, "last virtual: " },
2570 fputs ("\nHard register information:\n", stderr
);
2571 rs6000_debug_reg_print (FIRST_GPR_REGNO
, LAST_GPR_REGNO
, "gr");
2572 rs6000_debug_reg_print (FIRST_FPR_REGNO
, LAST_FPR_REGNO
, "fp");
2573 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO
,
2576 rs6000_debug_reg_print (LR_REGNO
, LR_REGNO
, "lr");
2577 rs6000_debug_reg_print (CTR_REGNO
, CTR_REGNO
, "ctr");
2578 rs6000_debug_reg_print (CR0_REGNO
, CR7_REGNO
, "cr");
2579 rs6000_debug_reg_print (CA_REGNO
, CA_REGNO
, "ca");
2580 rs6000_debug_reg_print (VRSAVE_REGNO
, VRSAVE_REGNO
, "vrsave");
2581 rs6000_debug_reg_print (VSCR_REGNO
, VSCR_REGNO
, "vscr");
2583 fputs ("\nVirtual/stack/frame registers:\n", stderr
);
2584 for (v
= 0; v
< ARRAY_SIZE (virtual_regs
); v
++)
2585 fprintf (stderr
, "%s regno = %3d\n", virtual_regs
[v
].name
, virtual_regs
[v
].regno
);
2589 "d reg_class = %s\n"
2590 "f reg_class = %s\n"
2591 "v reg_class = %s\n"
2592 "wa reg_class = %s\n"
2593 "wb reg_class = %s\n"
2594 "wd reg_class = %s\n"
2595 "we reg_class = %s\n"
2596 "wf reg_class = %s\n"
2597 "wg reg_class = %s\n"
2598 "wh reg_class = %s\n"
2599 "wi reg_class = %s\n"
2600 "wj reg_class = %s\n"
2601 "wk reg_class = %s\n"
2602 "wl reg_class = %s\n"
2603 "wm reg_class = %s\n"
2604 "wo reg_class = %s\n"
2605 "wp reg_class = %s\n"
2606 "wq reg_class = %s\n"
2607 "wr reg_class = %s\n"
2608 "ws reg_class = %s\n"
2609 "wt reg_class = %s\n"
2610 "wu reg_class = %s\n"
2611 "wv reg_class = %s\n"
2612 "ww reg_class = %s\n"
2613 "wx reg_class = %s\n"
2614 "wy reg_class = %s\n"
2615 "wz reg_class = %s\n"
2616 "wA reg_class = %s\n"
2617 "wH reg_class = %s\n"
2618 "wI reg_class = %s\n"
2619 "wJ reg_class = %s\n"
2620 "wK reg_class = %s\n"
2622 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_d
]],
2623 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_f
]],
2624 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_v
]],
2625 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wa
]],
2626 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wb
]],
2627 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wd
]],
2628 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_we
]],
2629 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wf
]],
2630 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wg
]],
2631 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wh
]],
2632 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wi
]],
2633 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wj
]],
2634 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wk
]],
2635 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wl
]],
2636 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wm
]],
2637 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wo
]],
2638 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wp
]],
2639 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wq
]],
2640 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wr
]],
2641 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_ws
]],
2642 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wt
]],
2643 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wu
]],
2644 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wv
]],
2645 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_ww
]],
2646 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wx
]],
2647 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wy
]],
2648 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wz
]],
2649 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wA
]],
2650 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wH
]],
2651 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wI
]],
2652 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wJ
]],
2653 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wK
]]);
2656 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2657 rs6000_debug_print_mode (m
);
2659 fputs ("\n", stderr
);
2661 for (m1
= 0; m1
< ARRAY_SIZE (print_tieable_modes
); m1
++)
2663 machine_mode mode1
= print_tieable_modes
[m1
];
2664 bool first_time
= true;
2666 nl
= (const char *)0;
2667 for (m2
= 0; m2
< ARRAY_SIZE (print_tieable_modes
); m2
++)
2669 machine_mode mode2
= print_tieable_modes
[m2
];
2670 if (mode1
!= mode2
&& rs6000_modes_tieable_p (mode1
, mode2
))
2674 fprintf (stderr
, "Tieable modes %s:", GET_MODE_NAME (mode1
));
2679 fprintf (stderr
, " %s", GET_MODE_NAME (mode2
));
2684 fputs ("\n", stderr
);
2690 if (rs6000_recip_control
)
2692 fprintf (stderr
, "\nReciprocal mask = 0x%x\n", rs6000_recip_control
);
2694 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2695 if (rs6000_recip_bits
[m
])
2698 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2700 (RS6000_RECIP_AUTO_RE_P (m
)
2702 : (RS6000_RECIP_HAVE_RE_P (m
) ? "have" : "none")),
2703 (RS6000_RECIP_AUTO_RSQRTE_P (m
)
2705 : (RS6000_RECIP_HAVE_RSQRTE_P (m
) ? "have" : "none")));
2708 fputs ("\n", stderr
);
2711 if (rs6000_cpu_index
>= 0)
2713 const char *name
= processor_target_table
[rs6000_cpu_index
].name
;
2715 = processor_target_table
[rs6000_cpu_index
].target_enable
;
2717 sprintf (flags_buffer
, "-mcpu=%s flags", name
);
2718 rs6000_print_isa_options (stderr
, 0, flags_buffer
, flags
);
2721 fprintf (stderr
, DEBUG_FMT_S
, "cpu", "<none>");
2723 if (rs6000_tune_index
>= 0)
2725 const char *name
= processor_target_table
[rs6000_tune_index
].name
;
2727 = processor_target_table
[rs6000_tune_index
].target_enable
;
2729 sprintf (flags_buffer
, "-mtune=%s flags", name
);
2730 rs6000_print_isa_options (stderr
, 0, flags_buffer
, flags
);
2733 fprintf (stderr
, DEBUG_FMT_S
, "tune", "<none>");
2735 cl_target_option_save (&cl_opts
, &global_options
);
2736 rs6000_print_isa_options (stderr
, 0, "rs6000_isa_flags",
2739 rs6000_print_isa_options (stderr
, 0, "rs6000_isa_flags_explicit",
2740 rs6000_isa_flags_explicit
);
2742 rs6000_print_builtin_options (stderr
, 0, "rs6000_builtin_mask",
2743 rs6000_builtin_mask
);
2745 rs6000_print_isa_options (stderr
, 0, "TARGET_DEFAULT", TARGET_DEFAULT
);
2747 fprintf (stderr
, DEBUG_FMT_S
, "--with-cpu default",
2748 OPTION_TARGET_CPU_DEFAULT
? OPTION_TARGET_CPU_DEFAULT
: "<none>");
2750 switch (rs6000_sched_costly_dep
)
2752 case max_dep_latency
:
2753 costly_str
= "max_dep_latency";
2757 costly_str
= "no_dep_costly";
2760 case all_deps_costly
:
2761 costly_str
= "all_deps_costly";
2764 case true_store_to_load_dep_costly
:
2765 costly_str
= "true_store_to_load_dep_costly";
2768 case store_to_load_dep_costly
:
2769 costly_str
= "store_to_load_dep_costly";
2773 costly_str
= costly_num
;
2774 sprintf (costly_num
, "%d", (int)rs6000_sched_costly_dep
);
2778 fprintf (stderr
, DEBUG_FMT_S
, "sched_costly_dep", costly_str
);
2780 switch (rs6000_sched_insert_nops
)
2782 case sched_finish_regroup_exact
:
2783 nop_str
= "sched_finish_regroup_exact";
2786 case sched_finish_pad_groups
:
2787 nop_str
= "sched_finish_pad_groups";
2790 case sched_finish_none
:
2791 nop_str
= "sched_finish_none";
2796 sprintf (nop_num
, "%d", (int)rs6000_sched_insert_nops
);
2800 fprintf (stderr
, DEBUG_FMT_S
, "sched_insert_nops", nop_str
);
2802 switch (rs6000_sdata
)
2809 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "data");
2813 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "sysv");
2817 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "eabi");
2822 switch (rs6000_traceback
)
2824 case traceback_default
: trace_str
= "default"; break;
2825 case traceback_none
: trace_str
= "none"; break;
2826 case traceback_part
: trace_str
= "part"; break;
2827 case traceback_full
: trace_str
= "full"; break;
2828 default: trace_str
= "unknown"; break;
2831 fprintf (stderr
, DEBUG_FMT_S
, "traceback", trace_str
);
2833 switch (rs6000_current_cmodel
)
2835 case CMODEL_SMALL
: cmodel_str
= "small"; break;
2836 case CMODEL_MEDIUM
: cmodel_str
= "medium"; break;
2837 case CMODEL_LARGE
: cmodel_str
= "large"; break;
2838 default: cmodel_str
= "unknown"; break;
2841 fprintf (stderr
, DEBUG_FMT_S
, "cmodel", cmodel_str
);
2843 switch (rs6000_current_abi
)
2845 case ABI_NONE
: abi_str
= "none"; break;
2846 case ABI_AIX
: abi_str
= "aix"; break;
2847 case ABI_ELFv2
: abi_str
= "ELFv2"; break;
2848 case ABI_V4
: abi_str
= "V4"; break;
2849 case ABI_DARWIN
: abi_str
= "darwin"; break;
2850 default: abi_str
= "unknown"; break;
2853 fprintf (stderr
, DEBUG_FMT_S
, "abi", abi_str
);
2855 if (rs6000_altivec_abi
)
2856 fprintf (stderr
, DEBUG_FMT_S
, "altivec_abi", "true");
2858 if (rs6000_darwin64_abi
)
2859 fprintf (stderr
, DEBUG_FMT_S
, "darwin64_abi", "true");
2861 fprintf (stderr
, DEBUG_FMT_S
, "single_float",
2862 (TARGET_SINGLE_FLOAT
? "true" : "false"));
2864 fprintf (stderr
, DEBUG_FMT_S
, "double_float",
2865 (TARGET_DOUBLE_FLOAT
? "true" : "false"));
2867 fprintf (stderr
, DEBUG_FMT_S
, "soft_float",
2868 (TARGET_SOFT_FLOAT
? "true" : "false"));
2870 if (TARGET_LINK_STACK
)
2871 fprintf (stderr
, DEBUG_FMT_S
, "link_stack", "true");
2873 if (TARGET_P8_FUSION
)
2877 strcpy (options
, (TARGET_P9_FUSION
) ? "power9" : "power8");
2878 if (TARGET_TOC_FUSION
)
2879 strcat (options
, ", toc");
2881 if (TARGET_P8_FUSION_SIGN
)
2882 strcat (options
, ", sign");
2884 fprintf (stderr
, DEBUG_FMT_S
, "fusion", options
);
2887 fprintf (stderr
, DEBUG_FMT_S
, "plt-format",
2888 TARGET_SECURE_PLT
? "secure" : "bss");
2889 fprintf (stderr
, DEBUG_FMT_S
, "struct-return",
2890 aix_struct_return
? "aix" : "sysv");
2891 fprintf (stderr
, DEBUG_FMT_S
, "always_hint", tf
[!!rs6000_always_hint
]);
2892 fprintf (stderr
, DEBUG_FMT_S
, "sched_groups", tf
[!!rs6000_sched_groups
]);
2893 fprintf (stderr
, DEBUG_FMT_S
, "align_branch",
2894 tf
[!!rs6000_align_branch_targets
]);
2895 fprintf (stderr
, DEBUG_FMT_D
, "tls_size", rs6000_tls_size
);
2896 fprintf (stderr
, DEBUG_FMT_D
, "long_double_size",
2897 rs6000_long_double_type_size
);
2898 if (rs6000_long_double_type_size
== 128)
2900 fprintf (stderr
, DEBUG_FMT_S
, "long double type",
2901 TARGET_IEEEQUAD
? "IEEE" : "IBM");
2902 fprintf (stderr
, DEBUG_FMT_S
, "default long double type",
2903 TARGET_IEEEQUAD_DEFAULT
? "IEEE" : "IBM");
2905 fprintf (stderr
, DEBUG_FMT_D
, "sched_restricted_insns_priority",
2906 (int)rs6000_sched_restricted_insns_priority
);
2907 fprintf (stderr
, DEBUG_FMT_D
, "Number of standard builtins",
2909 fprintf (stderr
, DEBUG_FMT_D
, "Number of rs6000 builtins",
2910 (int)RS6000_BUILTIN_COUNT
);
2912 fprintf (stderr
, DEBUG_FMT_D
, "Enable float128 on VSX",
2913 (int)TARGET_FLOAT128_ENABLE_TYPE
);
2916 fprintf (stderr
, DEBUG_FMT_D
, "VSX easy 64-bit scalar element",
2917 (int)VECTOR_ELEMENT_SCALAR_64BIT
);
2919 if (TARGET_DIRECT_MOVE_128
)
2920 fprintf (stderr
, DEBUG_FMT_D
, "VSX easy 64-bit mfvsrld element",
2921 (int)VECTOR_ELEMENT_MFVSRLD_64BIT
);
2925 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2926 legitimate address support to figure out the appropriate addressing to
2930 rs6000_setup_reg_addr_masks (void)
2932 ssize_t rc
, reg
, m
, nregs
;
2933 addr_mask_type any_addr_mask
, addr_mask
;
2935 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2937 machine_mode m2
= (machine_mode
) m
;
2938 bool complex_p
= false;
2939 bool small_int_p
= (m2
== QImode
|| m2
== HImode
|| m2
== SImode
);
2942 if (COMPLEX_MODE_P (m2
))
2945 m2
= GET_MODE_INNER (m2
);
2948 msize
= GET_MODE_SIZE (m2
);
2950 /* SDmode is special in that we want to access it only via REG+REG
2951 addressing on power7 and above, since we want to use the LFIWZX and
2952 STFIWZX instructions to load it. */
2953 bool indexed_only_p
= (m
== SDmode
&& TARGET_NO_SDMODE_STACK
);
2956 for (rc
= FIRST_RELOAD_REG_CLASS
; rc
<= LAST_RELOAD_REG_CLASS
; rc
++)
2959 reg
= reload_reg_map
[rc
].reg
;
2961 /* Can mode values go in the GPR/FPR/Altivec registers? */
2962 if (reg
>= 0 && rs6000_hard_regno_mode_ok_p
[m
][reg
])
2964 bool small_int_vsx_p
= (small_int_p
2965 && (rc
== RELOAD_REG_FPR
2966 || rc
== RELOAD_REG_VMX
));
2968 nregs
= rs6000_hard_regno_nregs
[m
][reg
];
2969 addr_mask
|= RELOAD_REG_VALID
;
2971 /* Indicate if the mode takes more than 1 physical register. If
2972 it takes a single register, indicate it can do REG+REG
2973 addressing. Small integers in VSX registers can only do
2974 REG+REG addressing. */
2975 if (small_int_vsx_p
)
2976 addr_mask
|= RELOAD_REG_INDEXED
;
2977 else if (nregs
> 1 || m
== BLKmode
|| complex_p
)
2978 addr_mask
|= RELOAD_REG_MULTIPLE
;
2980 addr_mask
|= RELOAD_REG_INDEXED
;
2982 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2983 addressing. If we allow scalars into Altivec registers,
2984 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY. */
2987 && (rc
== RELOAD_REG_GPR
|| rc
== RELOAD_REG_FPR
)
2989 && !VECTOR_MODE_P (m2
)
2990 && !FLOAT128_VECTOR_P (m2
)
2992 && !small_int_vsx_p
)
2994 addr_mask
|= RELOAD_REG_PRE_INCDEC
;
2996 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2997 we don't allow PRE_MODIFY for some multi-register
3002 addr_mask
|= RELOAD_REG_PRE_MODIFY
;
3006 if (TARGET_POWERPC64
)
3007 addr_mask
|= RELOAD_REG_PRE_MODIFY
;
3013 addr_mask
|= RELOAD_REG_PRE_MODIFY
;
3019 /* GPR and FPR registers can do REG+OFFSET addressing, except
3020 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
3021 for 64-bit scalars and 32-bit SFmode to altivec registers. */
3022 if ((addr_mask
!= 0) && !indexed_only_p
3024 && (rc
== RELOAD_REG_GPR
3025 || ((msize
== 8 || m2
== SFmode
)
3026 && (rc
== RELOAD_REG_FPR
3027 || (rc
== RELOAD_REG_VMX
&& TARGET_P9_VECTOR
)))))
3028 addr_mask
|= RELOAD_REG_OFFSET
;
3030 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
3031 instructions are enabled. The offset for 128-bit VSX registers is
3032 only 12-bits. While GPRs can handle the full offset range, VSX
3033 registers can only handle the restricted range. */
3034 else if ((addr_mask
!= 0) && !indexed_only_p
3035 && msize
== 16 && TARGET_P9_VECTOR
3036 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2
)
3037 || (m2
== TImode
&& TARGET_VSX
)))
3039 addr_mask
|= RELOAD_REG_OFFSET
;
3040 if (rc
== RELOAD_REG_FPR
|| rc
== RELOAD_REG_VMX
)
3041 addr_mask
|= RELOAD_REG_QUAD_OFFSET
;
3044 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
3045 addressing on 128-bit types. */
3046 if (rc
== RELOAD_REG_VMX
&& msize
== 16
3047 && (addr_mask
& RELOAD_REG_VALID
) != 0)
3048 addr_mask
|= RELOAD_REG_AND_M16
;
3050 reg_addr
[m
].addr_mask
[rc
] = addr_mask
;
3051 any_addr_mask
|= addr_mask
;
3054 reg_addr
[m
].addr_mask
[RELOAD_REG_ANY
] = any_addr_mask
;
3059 /* Initialize the various global tables that are based on register size. */
3061 rs6000_init_hard_regno_mode_ok (bool global_init_p
)
3067 /* Precalculate REGNO_REG_CLASS. */
3068 rs6000_regno_regclass
[0] = GENERAL_REGS
;
3069 for (r
= 1; r
< 32; ++r
)
3070 rs6000_regno_regclass
[r
] = BASE_REGS
;
3072 for (r
= 32; r
< 64; ++r
)
3073 rs6000_regno_regclass
[r
] = FLOAT_REGS
;
3075 for (r
= 64; r
< FIRST_PSEUDO_REGISTER
; ++r
)
3076 rs6000_regno_regclass
[r
] = NO_REGS
;
3078 for (r
= FIRST_ALTIVEC_REGNO
; r
<= LAST_ALTIVEC_REGNO
; ++r
)
3079 rs6000_regno_regclass
[r
] = ALTIVEC_REGS
;
3081 rs6000_regno_regclass
[CR0_REGNO
] = CR0_REGS
;
3082 for (r
= CR1_REGNO
; r
<= CR7_REGNO
; ++r
)
3083 rs6000_regno_regclass
[r
] = CR_REGS
;
3085 rs6000_regno_regclass
[LR_REGNO
] = LINK_REGS
;
3086 rs6000_regno_regclass
[CTR_REGNO
] = CTR_REGS
;
3087 rs6000_regno_regclass
[CA_REGNO
] = NO_REGS
;
3088 rs6000_regno_regclass
[VRSAVE_REGNO
] = VRSAVE_REGS
;
3089 rs6000_regno_regclass
[VSCR_REGNO
] = VRSAVE_REGS
;
3090 rs6000_regno_regclass
[TFHAR_REGNO
] = SPR_REGS
;
3091 rs6000_regno_regclass
[TFIAR_REGNO
] = SPR_REGS
;
3092 rs6000_regno_regclass
[TEXASR_REGNO
] = SPR_REGS
;
3093 rs6000_regno_regclass
[ARG_POINTER_REGNUM
] = BASE_REGS
;
3094 rs6000_regno_regclass
[FRAME_POINTER_REGNUM
] = BASE_REGS
;
3096 /* Precalculate register class to simpler reload register class. We don't
3097 need all of the register classes that are combinations of different
3098 classes, just the simple ones that have constraint letters. */
3099 for (c
= 0; c
< N_REG_CLASSES
; c
++)
3100 reg_class_to_reg_type
[c
] = NO_REG_TYPE
;
3102 reg_class_to_reg_type
[(int)GENERAL_REGS
] = GPR_REG_TYPE
;
3103 reg_class_to_reg_type
[(int)BASE_REGS
] = GPR_REG_TYPE
;
3104 reg_class_to_reg_type
[(int)VSX_REGS
] = VSX_REG_TYPE
;
3105 reg_class_to_reg_type
[(int)VRSAVE_REGS
] = SPR_REG_TYPE
;
3106 reg_class_to_reg_type
[(int)VSCR_REGS
] = SPR_REG_TYPE
;
3107 reg_class_to_reg_type
[(int)LINK_REGS
] = SPR_REG_TYPE
;
3108 reg_class_to_reg_type
[(int)CTR_REGS
] = SPR_REG_TYPE
;
3109 reg_class_to_reg_type
[(int)LINK_OR_CTR_REGS
] = SPR_REG_TYPE
;
3110 reg_class_to_reg_type
[(int)CR_REGS
] = CR_REG_TYPE
;
3111 reg_class_to_reg_type
[(int)CR0_REGS
] = CR_REG_TYPE
;
3115 reg_class_to_reg_type
[(int)FLOAT_REGS
] = VSX_REG_TYPE
;
3116 reg_class_to_reg_type
[(int)ALTIVEC_REGS
] = VSX_REG_TYPE
;
3120 reg_class_to_reg_type
[(int)FLOAT_REGS
] = FPR_REG_TYPE
;
3121 reg_class_to_reg_type
[(int)ALTIVEC_REGS
] = ALTIVEC_REG_TYPE
;
3124 /* Precalculate the valid memory formats as well as the vector information,
3125 this must be set up before the rs6000_hard_regno_nregs_internal calls
3127 gcc_assert ((int)VECTOR_NONE
== 0);
3128 memset ((void *) &rs6000_vector_unit
[0], '\0', sizeof (rs6000_vector_unit
));
3129 memset ((void *) &rs6000_vector_mem
[0], '\0', sizeof (rs6000_vector_unit
));
3131 gcc_assert ((int)CODE_FOR_nothing
== 0);
3132 memset ((void *) ®_addr
[0], '\0', sizeof (reg_addr
));
3134 gcc_assert ((int)NO_REGS
== 0);
3135 memset ((void *) &rs6000_constraints
[0], '\0', sizeof (rs6000_constraints
));
3137 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3138 believes it can use native alignment or still uses 128-bit alignment. */
3139 if (TARGET_VSX
&& !TARGET_VSX_ALIGN_128
)
3150 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3151 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3152 if (TARGET_FLOAT128_TYPE
)
3154 rs6000_vector_mem
[KFmode
] = VECTOR_VSX
;
3155 rs6000_vector_align
[KFmode
] = 128;
3157 if (FLOAT128_IEEE_P (TFmode
))
3159 rs6000_vector_mem
[TFmode
] = VECTOR_VSX
;
3160 rs6000_vector_align
[TFmode
] = 128;
3164 /* V2DF mode, VSX only. */
3167 rs6000_vector_unit
[V2DFmode
] = VECTOR_VSX
;
3168 rs6000_vector_mem
[V2DFmode
] = VECTOR_VSX
;
3169 rs6000_vector_align
[V2DFmode
] = align64
;
3172 /* V4SF mode, either VSX or Altivec. */
3175 rs6000_vector_unit
[V4SFmode
] = VECTOR_VSX
;
3176 rs6000_vector_mem
[V4SFmode
] = VECTOR_VSX
;
3177 rs6000_vector_align
[V4SFmode
] = align32
;
3179 else if (TARGET_ALTIVEC
)
3181 rs6000_vector_unit
[V4SFmode
] = VECTOR_ALTIVEC
;
3182 rs6000_vector_mem
[V4SFmode
] = VECTOR_ALTIVEC
;
3183 rs6000_vector_align
[V4SFmode
] = align32
;
3186 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3190 rs6000_vector_unit
[V4SImode
] = VECTOR_ALTIVEC
;
3191 rs6000_vector_unit
[V8HImode
] = VECTOR_ALTIVEC
;
3192 rs6000_vector_unit
[V16QImode
] = VECTOR_ALTIVEC
;
3193 rs6000_vector_align
[V4SImode
] = align32
;
3194 rs6000_vector_align
[V8HImode
] = align32
;
3195 rs6000_vector_align
[V16QImode
] = align32
;
3199 rs6000_vector_mem
[V4SImode
] = VECTOR_VSX
;
3200 rs6000_vector_mem
[V8HImode
] = VECTOR_VSX
;
3201 rs6000_vector_mem
[V16QImode
] = VECTOR_VSX
;
3205 rs6000_vector_mem
[V4SImode
] = VECTOR_ALTIVEC
;
3206 rs6000_vector_mem
[V8HImode
] = VECTOR_ALTIVEC
;
3207 rs6000_vector_mem
[V16QImode
] = VECTOR_ALTIVEC
;
3211 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3212 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3215 rs6000_vector_mem
[V2DImode
] = VECTOR_VSX
;
3216 rs6000_vector_unit
[V2DImode
]
3217 = (TARGET_P8_VECTOR
) ? VECTOR_P8_VECTOR
: VECTOR_NONE
;
3218 rs6000_vector_align
[V2DImode
] = align64
;
3220 rs6000_vector_mem
[V1TImode
] = VECTOR_VSX
;
3221 rs6000_vector_unit
[V1TImode
]
3222 = (TARGET_P8_VECTOR
) ? VECTOR_P8_VECTOR
: VECTOR_NONE
;
3223 rs6000_vector_align
[V1TImode
] = 128;
3226 /* DFmode, see if we want to use the VSX unit. Memory is handled
3227 differently, so don't set rs6000_vector_mem. */
3230 rs6000_vector_unit
[DFmode
] = VECTOR_VSX
;
3231 rs6000_vector_align
[DFmode
] = 64;
3234 /* SFmode, see if we want to use the VSX unit. */
3235 if (TARGET_P8_VECTOR
)
3237 rs6000_vector_unit
[SFmode
] = VECTOR_VSX
;
3238 rs6000_vector_align
[SFmode
] = 32;
3241 /* Allow TImode in VSX register and set the VSX memory macros. */
3244 rs6000_vector_mem
[TImode
] = VECTOR_VSX
;
3245 rs6000_vector_align
[TImode
] = align64
;
3248 /* TODO add paired floating point vector support. */
3250 /* Register class constraints for the constraints that depend on compile
3251 switches. When the VSX code was added, different constraints were added
3252 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3253 of the VSX registers are used. The register classes for scalar floating
3254 point types is set, based on whether we allow that type into the upper
3255 (Altivec) registers. GCC has register classes to target the Altivec
3256 registers for load/store operations, to select using a VSX memory
3257 operation instead of the traditional floating point operation. The
3260 d - Register class to use with traditional DFmode instructions.
3261 f - Register class to use with traditional SFmode instructions.
3262 v - Altivec register.
3263 wa - Any VSX register.
3264 wc - Reserved to represent individual CR bits (used in LLVM).
3265 wd - Preferred register class for V2DFmode.
3266 wf - Preferred register class for V4SFmode.
3267 wg - Float register for power6x move insns.
3268 wh - FP register for direct move instructions.
3269 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3270 wj - FP or VSX register to hold 64-bit integers for direct moves.
3271 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3272 wl - Float register if we can do 32-bit signed int loads.
3273 wm - VSX register for ISA 2.07 direct move operations.
3274 wn - always NO_REGS.
3275 wr - GPR if 64-bit mode is permitted.
3276 ws - Register class to do ISA 2.06 DF operations.
3277 wt - VSX register for TImode in VSX registers.
3278 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3279 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3280 ww - Register class to do SF conversions in with VSX operations.
3281 wx - Float register if we can do 32-bit int stores.
3282 wy - Register class to do ISA 2.07 SF operations.
3283 wz - Float register if we can do 32-bit unsigned int loads.
3284 wH - Altivec register if SImode is allowed in VSX registers.
3285 wI - VSX register if SImode is allowed in VSX registers.
3286 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3287 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3289 if (TARGET_HARD_FLOAT
)
3290 rs6000_constraints
[RS6000_CONSTRAINT_f
] = FLOAT_REGS
; /* SFmode */
3292 if (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
3293 rs6000_constraints
[RS6000_CONSTRAINT_d
] = FLOAT_REGS
; /* DFmode */
3297 rs6000_constraints
[RS6000_CONSTRAINT_wa
] = VSX_REGS
;
3298 rs6000_constraints
[RS6000_CONSTRAINT_wd
] = VSX_REGS
; /* V2DFmode */
3299 rs6000_constraints
[RS6000_CONSTRAINT_wf
] = VSX_REGS
; /* V4SFmode */
3300 rs6000_constraints
[RS6000_CONSTRAINT_ws
] = VSX_REGS
; /* DFmode */
3301 rs6000_constraints
[RS6000_CONSTRAINT_wv
] = ALTIVEC_REGS
; /* DFmode */
3302 rs6000_constraints
[RS6000_CONSTRAINT_wi
] = VSX_REGS
; /* DImode */
3303 rs6000_constraints
[RS6000_CONSTRAINT_wt
] = VSX_REGS
; /* TImode */
3306 /* Add conditional constraints based on various options, to allow us to
3307 collapse multiple insn patterns. */
3309 rs6000_constraints
[RS6000_CONSTRAINT_v
] = ALTIVEC_REGS
;
3311 if (TARGET_MFPGPR
) /* DFmode */
3312 rs6000_constraints
[RS6000_CONSTRAINT_wg
] = FLOAT_REGS
;
3315 rs6000_constraints
[RS6000_CONSTRAINT_wl
] = FLOAT_REGS
; /* DImode */
3317 if (TARGET_DIRECT_MOVE
)
3319 rs6000_constraints
[RS6000_CONSTRAINT_wh
] = FLOAT_REGS
;
3320 rs6000_constraints
[RS6000_CONSTRAINT_wj
] /* DImode */
3321 = rs6000_constraints
[RS6000_CONSTRAINT_wi
];
3322 rs6000_constraints
[RS6000_CONSTRAINT_wk
] /* DFmode */
3323 = rs6000_constraints
[RS6000_CONSTRAINT_ws
];
3324 rs6000_constraints
[RS6000_CONSTRAINT_wm
] = VSX_REGS
;
3327 if (TARGET_POWERPC64
)
3329 rs6000_constraints
[RS6000_CONSTRAINT_wr
] = GENERAL_REGS
;
3330 rs6000_constraints
[RS6000_CONSTRAINT_wA
] = BASE_REGS
;
3333 if (TARGET_P8_VECTOR
) /* SFmode */
3335 rs6000_constraints
[RS6000_CONSTRAINT_wu
] = ALTIVEC_REGS
;
3336 rs6000_constraints
[RS6000_CONSTRAINT_wy
] = VSX_REGS
;
3337 rs6000_constraints
[RS6000_CONSTRAINT_ww
] = VSX_REGS
;
3339 else if (TARGET_VSX
)
3340 rs6000_constraints
[RS6000_CONSTRAINT_ww
] = FLOAT_REGS
;
3343 rs6000_constraints
[RS6000_CONSTRAINT_wx
] = FLOAT_REGS
; /* DImode */
3346 rs6000_constraints
[RS6000_CONSTRAINT_wz
] = FLOAT_REGS
; /* DImode */
3348 if (TARGET_FLOAT128_TYPE
)
3350 rs6000_constraints
[RS6000_CONSTRAINT_wq
] = VSX_REGS
; /* KFmode */
3351 if (FLOAT128_IEEE_P (TFmode
))
3352 rs6000_constraints
[RS6000_CONSTRAINT_wp
] = VSX_REGS
; /* TFmode */
3355 if (TARGET_P9_VECTOR
)
3357 /* Support for new D-form instructions. */
3358 rs6000_constraints
[RS6000_CONSTRAINT_wb
] = ALTIVEC_REGS
;
3360 /* Support for ISA 3.0 (power9) vectors. */
3361 rs6000_constraints
[RS6000_CONSTRAINT_wo
] = VSX_REGS
;
3364 /* Support for new direct moves (ISA 3.0 + 64bit). */
3365 if (TARGET_DIRECT_MOVE_128
)
3366 rs6000_constraints
[RS6000_CONSTRAINT_we
] = VSX_REGS
;
3368 /* Support small integers in VSX registers. */
3369 if (TARGET_P8_VECTOR
)
3371 rs6000_constraints
[RS6000_CONSTRAINT_wH
] = ALTIVEC_REGS
;
3372 rs6000_constraints
[RS6000_CONSTRAINT_wI
] = FLOAT_REGS
;
3373 if (TARGET_P9_VECTOR
)
3375 rs6000_constraints
[RS6000_CONSTRAINT_wJ
] = FLOAT_REGS
;
3376 rs6000_constraints
[RS6000_CONSTRAINT_wK
] = ALTIVEC_REGS
;
3380 /* Set up the reload helper and direct move functions. */
3381 if (TARGET_VSX
|| TARGET_ALTIVEC
)
3385 reg_addr
[V16QImode
].reload_store
= CODE_FOR_reload_v16qi_di_store
;
3386 reg_addr
[V16QImode
].reload_load
= CODE_FOR_reload_v16qi_di_load
;
3387 reg_addr
[V8HImode
].reload_store
= CODE_FOR_reload_v8hi_di_store
;
3388 reg_addr
[V8HImode
].reload_load
= CODE_FOR_reload_v8hi_di_load
;
3389 reg_addr
[V4SImode
].reload_store
= CODE_FOR_reload_v4si_di_store
;
3390 reg_addr
[V4SImode
].reload_load
= CODE_FOR_reload_v4si_di_load
;
3391 reg_addr
[V2DImode
].reload_store
= CODE_FOR_reload_v2di_di_store
;
3392 reg_addr
[V2DImode
].reload_load
= CODE_FOR_reload_v2di_di_load
;
3393 reg_addr
[V1TImode
].reload_store
= CODE_FOR_reload_v1ti_di_store
;
3394 reg_addr
[V1TImode
].reload_load
= CODE_FOR_reload_v1ti_di_load
;
3395 reg_addr
[V4SFmode
].reload_store
= CODE_FOR_reload_v4sf_di_store
;
3396 reg_addr
[V4SFmode
].reload_load
= CODE_FOR_reload_v4sf_di_load
;
3397 reg_addr
[V2DFmode
].reload_store
= CODE_FOR_reload_v2df_di_store
;
3398 reg_addr
[V2DFmode
].reload_load
= CODE_FOR_reload_v2df_di_load
;
3399 reg_addr
[DFmode
].reload_store
= CODE_FOR_reload_df_di_store
;
3400 reg_addr
[DFmode
].reload_load
= CODE_FOR_reload_df_di_load
;
3401 reg_addr
[DDmode
].reload_store
= CODE_FOR_reload_dd_di_store
;
3402 reg_addr
[DDmode
].reload_load
= CODE_FOR_reload_dd_di_load
;
3403 reg_addr
[SFmode
].reload_store
= CODE_FOR_reload_sf_di_store
;
3404 reg_addr
[SFmode
].reload_load
= CODE_FOR_reload_sf_di_load
;
3406 if (FLOAT128_VECTOR_P (KFmode
))
3408 reg_addr
[KFmode
].reload_store
= CODE_FOR_reload_kf_di_store
;
3409 reg_addr
[KFmode
].reload_load
= CODE_FOR_reload_kf_di_load
;
3412 if (FLOAT128_VECTOR_P (TFmode
))
3414 reg_addr
[TFmode
].reload_store
= CODE_FOR_reload_tf_di_store
;
3415 reg_addr
[TFmode
].reload_load
= CODE_FOR_reload_tf_di_load
;
3418 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3420 if (TARGET_NO_SDMODE_STACK
)
3422 reg_addr
[SDmode
].reload_store
= CODE_FOR_reload_sd_di_store
;
3423 reg_addr
[SDmode
].reload_load
= CODE_FOR_reload_sd_di_load
;
3428 reg_addr
[TImode
].reload_store
= CODE_FOR_reload_ti_di_store
;
3429 reg_addr
[TImode
].reload_load
= CODE_FOR_reload_ti_di_load
;
3432 if (TARGET_DIRECT_MOVE
&& !TARGET_DIRECT_MOVE_128
)
3434 reg_addr
[TImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxti
;
3435 reg_addr
[V1TImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv1ti
;
3436 reg_addr
[V2DFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv2df
;
3437 reg_addr
[V2DImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv2di
;
3438 reg_addr
[V4SFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv4sf
;
3439 reg_addr
[V4SImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv4si
;
3440 reg_addr
[V8HImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv8hi
;
3441 reg_addr
[V16QImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv16qi
;
3442 reg_addr
[SFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxsf
;
3444 reg_addr
[TImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprti
;
3445 reg_addr
[V1TImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv1ti
;
3446 reg_addr
[V2DFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv2df
;
3447 reg_addr
[V2DImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv2di
;
3448 reg_addr
[V4SFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv4sf
;
3449 reg_addr
[V4SImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv4si
;
3450 reg_addr
[V8HImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv8hi
;
3451 reg_addr
[V16QImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv16qi
;
3452 reg_addr
[SFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprsf
;
3454 if (FLOAT128_VECTOR_P (KFmode
))
3456 reg_addr
[KFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxkf
;
3457 reg_addr
[KFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprkf
;
3460 if (FLOAT128_VECTOR_P (TFmode
))
3462 reg_addr
[TFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxtf
;
3463 reg_addr
[TFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprtf
;
3469 reg_addr
[V16QImode
].reload_store
= CODE_FOR_reload_v16qi_si_store
;
3470 reg_addr
[V16QImode
].reload_load
= CODE_FOR_reload_v16qi_si_load
;
3471 reg_addr
[V8HImode
].reload_store
= CODE_FOR_reload_v8hi_si_store
;
3472 reg_addr
[V8HImode
].reload_load
= CODE_FOR_reload_v8hi_si_load
;
3473 reg_addr
[V4SImode
].reload_store
= CODE_FOR_reload_v4si_si_store
;
3474 reg_addr
[V4SImode
].reload_load
= CODE_FOR_reload_v4si_si_load
;
3475 reg_addr
[V2DImode
].reload_store
= CODE_FOR_reload_v2di_si_store
;
3476 reg_addr
[V2DImode
].reload_load
= CODE_FOR_reload_v2di_si_load
;
3477 reg_addr
[V1TImode
].reload_store
= CODE_FOR_reload_v1ti_si_store
;
3478 reg_addr
[V1TImode
].reload_load
= CODE_FOR_reload_v1ti_si_load
;
3479 reg_addr
[V4SFmode
].reload_store
= CODE_FOR_reload_v4sf_si_store
;
3480 reg_addr
[V4SFmode
].reload_load
= CODE_FOR_reload_v4sf_si_load
;
3481 reg_addr
[V2DFmode
].reload_store
= CODE_FOR_reload_v2df_si_store
;
3482 reg_addr
[V2DFmode
].reload_load
= CODE_FOR_reload_v2df_si_load
;
3483 reg_addr
[DFmode
].reload_store
= CODE_FOR_reload_df_si_store
;
3484 reg_addr
[DFmode
].reload_load
= CODE_FOR_reload_df_si_load
;
3485 reg_addr
[DDmode
].reload_store
= CODE_FOR_reload_dd_si_store
;
3486 reg_addr
[DDmode
].reload_load
= CODE_FOR_reload_dd_si_load
;
3487 reg_addr
[SFmode
].reload_store
= CODE_FOR_reload_sf_si_store
;
3488 reg_addr
[SFmode
].reload_load
= CODE_FOR_reload_sf_si_load
;
3490 if (FLOAT128_VECTOR_P (KFmode
))
3492 reg_addr
[KFmode
].reload_store
= CODE_FOR_reload_kf_si_store
;
3493 reg_addr
[KFmode
].reload_load
= CODE_FOR_reload_kf_si_load
;
3496 if (FLOAT128_IEEE_P (TFmode
))
3498 reg_addr
[TFmode
].reload_store
= CODE_FOR_reload_tf_si_store
;
3499 reg_addr
[TFmode
].reload_load
= CODE_FOR_reload_tf_si_load
;
3502 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3504 if (TARGET_NO_SDMODE_STACK
)
3506 reg_addr
[SDmode
].reload_store
= CODE_FOR_reload_sd_si_store
;
3507 reg_addr
[SDmode
].reload_load
= CODE_FOR_reload_sd_si_load
;
3512 reg_addr
[TImode
].reload_store
= CODE_FOR_reload_ti_si_store
;
3513 reg_addr
[TImode
].reload_load
= CODE_FOR_reload_ti_si_load
;
3516 if (TARGET_DIRECT_MOVE
)
3518 reg_addr
[DImode
].reload_fpr_gpr
= CODE_FOR_reload_fpr_from_gprdi
;
3519 reg_addr
[DDmode
].reload_fpr_gpr
= CODE_FOR_reload_fpr_from_gprdd
;
3520 reg_addr
[DFmode
].reload_fpr_gpr
= CODE_FOR_reload_fpr_from_gprdf
;
3524 reg_addr
[DFmode
].scalar_in_vmx_p
= true;
3525 reg_addr
[DImode
].scalar_in_vmx_p
= true;
3527 if (TARGET_P8_VECTOR
)
3529 reg_addr
[SFmode
].scalar_in_vmx_p
= true;
3530 reg_addr
[SImode
].scalar_in_vmx_p
= true;
3532 if (TARGET_P9_VECTOR
)
3534 reg_addr
[HImode
].scalar_in_vmx_p
= true;
3535 reg_addr
[QImode
].scalar_in_vmx_p
= true;
3540 /* Setup the fusion operations. */
3541 if (TARGET_P8_FUSION
)
3543 reg_addr
[QImode
].fusion_gpr_ld
= CODE_FOR_fusion_gpr_load_qi
;
3544 reg_addr
[HImode
].fusion_gpr_ld
= CODE_FOR_fusion_gpr_load_hi
;
3545 reg_addr
[SImode
].fusion_gpr_ld
= CODE_FOR_fusion_gpr_load_si
;
3547 reg_addr
[DImode
].fusion_gpr_ld
= CODE_FOR_fusion_gpr_load_di
;
3550 if (TARGET_P9_FUSION
)
3553 enum machine_mode mode
; /* mode of the fused type. */
3554 enum machine_mode pmode
; /* pointer mode. */
3555 enum rs6000_reload_reg_type rtype
; /* register type. */
3556 enum insn_code load
; /* load insn. */
3557 enum insn_code store
; /* store insn. */
3560 static const struct fuse_insns addis_insns
[] = {
3561 { E_SFmode
, E_DImode
, RELOAD_REG_FPR
,
3562 CODE_FOR_fusion_vsx_di_sf_load
,
3563 CODE_FOR_fusion_vsx_di_sf_store
},
3565 { E_SFmode
, E_SImode
, RELOAD_REG_FPR
,
3566 CODE_FOR_fusion_vsx_si_sf_load
,
3567 CODE_FOR_fusion_vsx_si_sf_store
},
3569 { E_DFmode
, E_DImode
, RELOAD_REG_FPR
,
3570 CODE_FOR_fusion_vsx_di_df_load
,
3571 CODE_FOR_fusion_vsx_di_df_store
},
3573 { E_DFmode
, E_SImode
, RELOAD_REG_FPR
,
3574 CODE_FOR_fusion_vsx_si_df_load
,
3575 CODE_FOR_fusion_vsx_si_df_store
},
3577 { E_DImode
, E_DImode
, RELOAD_REG_FPR
,
3578 CODE_FOR_fusion_vsx_di_di_load
,
3579 CODE_FOR_fusion_vsx_di_di_store
},
3581 { E_DImode
, E_SImode
, RELOAD_REG_FPR
,
3582 CODE_FOR_fusion_vsx_si_di_load
,
3583 CODE_FOR_fusion_vsx_si_di_store
},
3585 { E_QImode
, E_DImode
, RELOAD_REG_GPR
,
3586 CODE_FOR_fusion_gpr_di_qi_load
,
3587 CODE_FOR_fusion_gpr_di_qi_store
},
3589 { E_QImode
, E_SImode
, RELOAD_REG_GPR
,
3590 CODE_FOR_fusion_gpr_si_qi_load
,
3591 CODE_FOR_fusion_gpr_si_qi_store
},
3593 { E_HImode
, E_DImode
, RELOAD_REG_GPR
,
3594 CODE_FOR_fusion_gpr_di_hi_load
,
3595 CODE_FOR_fusion_gpr_di_hi_store
},
3597 { E_HImode
, E_SImode
, RELOAD_REG_GPR
,
3598 CODE_FOR_fusion_gpr_si_hi_load
,
3599 CODE_FOR_fusion_gpr_si_hi_store
},
3601 { E_SImode
, E_DImode
, RELOAD_REG_GPR
,
3602 CODE_FOR_fusion_gpr_di_si_load
,
3603 CODE_FOR_fusion_gpr_di_si_store
},
3605 { E_SImode
, E_SImode
, RELOAD_REG_GPR
,
3606 CODE_FOR_fusion_gpr_si_si_load
,
3607 CODE_FOR_fusion_gpr_si_si_store
},
3609 { E_SFmode
, E_DImode
, RELOAD_REG_GPR
,
3610 CODE_FOR_fusion_gpr_di_sf_load
,
3611 CODE_FOR_fusion_gpr_di_sf_store
},
3613 { E_SFmode
, E_SImode
, RELOAD_REG_GPR
,
3614 CODE_FOR_fusion_gpr_si_sf_load
,
3615 CODE_FOR_fusion_gpr_si_sf_store
},
3617 { E_DImode
, E_DImode
, RELOAD_REG_GPR
,
3618 CODE_FOR_fusion_gpr_di_di_load
,
3619 CODE_FOR_fusion_gpr_di_di_store
},
3621 { E_DFmode
, E_DImode
, RELOAD_REG_GPR
,
3622 CODE_FOR_fusion_gpr_di_df_load
,
3623 CODE_FOR_fusion_gpr_di_df_store
},
3626 machine_mode cur_pmode
= Pmode
;
3629 for (i
= 0; i
< ARRAY_SIZE (addis_insns
); i
++)
3631 machine_mode xmode
= addis_insns
[i
].mode
;
3632 enum rs6000_reload_reg_type rtype
= addis_insns
[i
].rtype
;
3634 if (addis_insns
[i
].pmode
!= cur_pmode
)
3637 if (rtype
== RELOAD_REG_FPR
&& !TARGET_HARD_FLOAT
)
3640 reg_addr
[xmode
].fusion_addis_ld
[rtype
] = addis_insns
[i
].load
;
3641 reg_addr
[xmode
].fusion_addis_st
[rtype
] = addis_insns
[i
].store
;
3643 if (rtype
== RELOAD_REG_FPR
&& TARGET_P9_VECTOR
)
3645 reg_addr
[xmode
].fusion_addis_ld
[RELOAD_REG_VMX
]
3646 = addis_insns
[i
].load
;
3647 reg_addr
[xmode
].fusion_addis_st
[RELOAD_REG_VMX
]
3648 = addis_insns
[i
].store
;
3653 /* Note which types we support fusing TOC setup plus memory insn. We only do
3654 fused TOCs for medium/large code models. */
3655 if (TARGET_P8_FUSION
&& TARGET_TOC_FUSION
&& TARGET_POWERPC64
3656 && (TARGET_CMODEL
!= CMODEL_SMALL
))
3658 reg_addr
[QImode
].fused_toc
= true;
3659 reg_addr
[HImode
].fused_toc
= true;
3660 reg_addr
[SImode
].fused_toc
= true;
3661 reg_addr
[DImode
].fused_toc
= true;
3662 if (TARGET_HARD_FLOAT
)
3664 if (TARGET_SINGLE_FLOAT
)
3665 reg_addr
[SFmode
].fused_toc
= true;
3666 if (TARGET_DOUBLE_FLOAT
)
3667 reg_addr
[DFmode
].fused_toc
= true;
3671 /* Precalculate HARD_REGNO_NREGS. */
3672 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; ++r
)
3673 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
3674 rs6000_hard_regno_nregs
[m
][r
]
3675 = rs6000_hard_regno_nregs_internal (r
, (machine_mode
)m
);
3677 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3678 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; ++r
)
3679 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
3680 if (rs6000_hard_regno_mode_ok_uncached (r
, (machine_mode
)m
))
3681 rs6000_hard_regno_mode_ok_p
[m
][r
] = true;
3683 /* Precalculate CLASS_MAX_NREGS sizes. */
3684 for (c
= 0; c
< LIM_REG_CLASSES
; ++c
)
3688 if (TARGET_VSX
&& VSX_REG_CLASS_P (c
))
3689 reg_size
= UNITS_PER_VSX_WORD
;
3691 else if (c
== ALTIVEC_REGS
)
3692 reg_size
= UNITS_PER_ALTIVEC_WORD
;
3694 else if (c
== FLOAT_REGS
)
3695 reg_size
= UNITS_PER_FP_WORD
;
3698 reg_size
= UNITS_PER_WORD
;
3700 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
3702 machine_mode m2
= (machine_mode
)m
;
3703 int reg_size2
= reg_size
;
3705 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3707 if (TARGET_VSX
&& VSX_REG_CLASS_P (c
) && FLOAT128_2REG_P (m
))
3708 reg_size2
= UNITS_PER_FP_WORD
;
3710 rs6000_class_max_nregs
[m
][c
]
3711 = (GET_MODE_SIZE (m2
) + reg_size2
- 1) / reg_size2
;
3715 /* Calculate which modes to automatically generate code to use a the
3716 reciprocal divide and square root instructions. In the future, possibly
3717 automatically generate the instructions even if the user did not specify
3718 -mrecip. The older machines double precision reciprocal sqrt estimate is
3719 not accurate enough. */
3720 memset (rs6000_recip_bits
, 0, sizeof (rs6000_recip_bits
));
3722 rs6000_recip_bits
[SFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
3724 rs6000_recip_bits
[DFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
3725 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
))
3726 rs6000_recip_bits
[V4SFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
3727 if (VECTOR_UNIT_VSX_P (V2DFmode
))
3728 rs6000_recip_bits
[V2DFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
3730 if (TARGET_FRSQRTES
)
3731 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
3733 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
3734 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
))
3735 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
3736 if (VECTOR_UNIT_VSX_P (V2DFmode
))
3737 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
3739 if (rs6000_recip_control
)
3741 if (!flag_finite_math_only
)
3742 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3744 if (flag_trapping_math
)
3745 warning (0, "%qs requires %qs or %qs", "-mrecip",
3746 "-fno-trapping-math", "-ffast-math");
3747 if (!flag_reciprocal_math
)
3748 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3750 if (flag_finite_math_only
&& !flag_trapping_math
&& flag_reciprocal_math
)
3752 if (RS6000_RECIP_HAVE_RE_P (SFmode
)
3753 && (rs6000_recip_control
& RECIP_SF_DIV
) != 0)
3754 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
3756 if (RS6000_RECIP_HAVE_RE_P (DFmode
)
3757 && (rs6000_recip_control
& RECIP_DF_DIV
) != 0)
3758 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
3760 if (RS6000_RECIP_HAVE_RE_P (V4SFmode
)
3761 && (rs6000_recip_control
& RECIP_V4SF_DIV
) != 0)
3762 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
3764 if (RS6000_RECIP_HAVE_RE_P (V2DFmode
)
3765 && (rs6000_recip_control
& RECIP_V2DF_DIV
) != 0)
3766 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
3768 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode
)
3769 && (rs6000_recip_control
& RECIP_SF_RSQRT
) != 0)
3770 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
3772 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode
)
3773 && (rs6000_recip_control
& RECIP_DF_RSQRT
) != 0)
3774 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
3776 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode
)
3777 && (rs6000_recip_control
& RECIP_V4SF_RSQRT
) != 0)
3778 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
3780 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode
)
3781 && (rs6000_recip_control
& RECIP_V2DF_RSQRT
) != 0)
3782 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
3786 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3787 legitimate address support to figure out the appropriate addressing to
3789 rs6000_setup_reg_addr_masks ();
3791 if (global_init_p
|| TARGET_DEBUG_TARGET
)
3793 if (TARGET_DEBUG_REG
)
3794 rs6000_debug_reg_global ();
3796 if (TARGET_DEBUG_COST
|| TARGET_DEBUG_REG
)
3798 "SImode variable mult cost = %d\n"
3799 "SImode constant mult cost = %d\n"
3800 "SImode short constant mult cost = %d\n"
3801 "DImode multipliciation cost = %d\n"
3802 "SImode division cost = %d\n"
3803 "DImode division cost = %d\n"
3804 "Simple fp operation cost = %d\n"
3805 "DFmode multiplication cost = %d\n"
3806 "SFmode division cost = %d\n"
3807 "DFmode division cost = %d\n"
3808 "cache line size = %d\n"
3809 "l1 cache size = %d\n"
3810 "l2 cache size = %d\n"
3811 "simultaneous prefetches = %d\n"
3814 rs6000_cost
->mulsi_const
,
3815 rs6000_cost
->mulsi_const9
,
3823 rs6000_cost
->cache_line_size
,
3824 rs6000_cost
->l1_cache_size
,
3825 rs6000_cost
->l2_cache_size
,
3826 rs6000_cost
->simultaneous_prefetches
);
3831 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3834 darwin_rs6000_override_options (void)
3836 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3838 rs6000_altivec_abi
= 1;
3839 TARGET_ALTIVEC_VRSAVE
= 1;
3840 rs6000_current_abi
= ABI_DARWIN
;
3842 if (DEFAULT_ABI
== ABI_DARWIN
3844 darwin_one_byte_bool
= 1;
3846 if (TARGET_64BIT
&& ! TARGET_POWERPC64
)
3848 rs6000_isa_flags
|= OPTION_MASK_POWERPC64
;
3849 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3853 rs6000_default_long_calls
= 1;
3854 rs6000_isa_flags
|= OPTION_MASK_SOFT_FLOAT
;
3857 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3859 if (!flag_mkernel
&& !flag_apple_kext
3861 && ! (rs6000_isa_flags_explicit
& OPTION_MASK_ALTIVEC
))
3862 rs6000_isa_flags
|= OPTION_MASK_ALTIVEC
;
3864 /* Unless the user (not the configurer) has explicitly overridden
3865 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3866 G4 unless targeting the kernel. */
3869 && strverscmp (darwin_macosx_version_min
, "10.5") >= 0
3870 && ! (rs6000_isa_flags_explicit
& OPTION_MASK_ALTIVEC
)
3871 && ! global_options_set
.x_rs6000_cpu_index
)
3873 rs6000_isa_flags
|= OPTION_MASK_ALTIVEC
;
3878 /* If not otherwise specified by a target, make 'long double' equivalent to
3881 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3882 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3885 /* Return the builtin mask of the various options used that could affect which
3886 builtins were used. In the past we used target_flags, but we've run out of
3887 bits, and some options like PAIRED are no longer in target_flags. */
3890 rs6000_builtin_mask_calculate (void)
3892 return (((TARGET_ALTIVEC
) ? RS6000_BTM_ALTIVEC
: 0)
3893 | ((TARGET_CMPB
) ? RS6000_BTM_CMPB
: 0)
3894 | ((TARGET_VSX
) ? RS6000_BTM_VSX
: 0)
3895 | ((TARGET_PAIRED_FLOAT
) ? RS6000_BTM_PAIRED
: 0)
3896 | ((TARGET_FRE
) ? RS6000_BTM_FRE
: 0)
3897 | ((TARGET_FRES
) ? RS6000_BTM_FRES
: 0)
3898 | ((TARGET_FRSQRTE
) ? RS6000_BTM_FRSQRTE
: 0)
3899 | ((TARGET_FRSQRTES
) ? RS6000_BTM_FRSQRTES
: 0)
3900 | ((TARGET_POPCNTD
) ? RS6000_BTM_POPCNTD
: 0)
3901 | ((rs6000_cpu
== PROCESSOR_CELL
) ? RS6000_BTM_CELL
: 0)
3902 | ((TARGET_P8_VECTOR
) ? RS6000_BTM_P8_VECTOR
: 0)
3903 | ((TARGET_P9_VECTOR
) ? RS6000_BTM_P9_VECTOR
: 0)
3904 | ((TARGET_P9_MISC
) ? RS6000_BTM_P9_MISC
: 0)
3905 | ((TARGET_MODULO
) ? RS6000_BTM_MODULO
: 0)
3906 | ((TARGET_64BIT
) ? RS6000_BTM_64BIT
: 0)
3907 | ((TARGET_CRYPTO
) ? RS6000_BTM_CRYPTO
: 0)
3908 | ((TARGET_HTM
) ? RS6000_BTM_HTM
: 0)
3909 | ((TARGET_DFP
) ? RS6000_BTM_DFP
: 0)
3910 | ((TARGET_HARD_FLOAT
) ? RS6000_BTM_HARD_FLOAT
: 0)
3911 | ((TARGET_LONG_DOUBLE_128
) ? RS6000_BTM_LDBL128
: 0)
3912 | ((TARGET_FLOAT128_TYPE
) ? RS6000_BTM_FLOAT128
: 0)
3913 | ((TARGET_FLOAT128_HW
) ? RS6000_BTM_FLOAT128_HW
: 0));
3916 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3917 to clobber the XER[CA] bit because clobbering that bit without telling
3918 the compiler worked just fine with versions of GCC before GCC 5, and
3919 breaking a lot of older code in ways that are hard to track down is
3920 not such a great idea. */
3923 rs6000_md_asm_adjust (vec
<rtx
> &/*outputs*/, vec
<rtx
> &/*inputs*/,
3924 vec
<const char *> &/*constraints*/,
3925 vec
<rtx
> &clobbers
, HARD_REG_SET
&clobbered_regs
)
3927 clobbers
.safe_push (gen_rtx_REG (SImode
, CA_REGNO
));
3928 SET_HARD_REG_BIT (clobbered_regs
, CA_REGNO
);
3932 /* Override command line options.
3934 Combine build-specific configuration information with options
3935 specified on the command line to set various state variables which
3936 influence code generation, optimization, and expansion of built-in
3937 functions. Assure that command-line configuration preferences are
3938 compatible with each other and with the build configuration; issue
3939 warnings while adjusting configuration or error messages while
3940 rejecting configuration.
3942 Upon entry to this function:
3944 This function is called once at the beginning of
3945 compilation, and then again at the start and end of compiling
3946 each section of code that has a different configuration, as
3947 indicated, for example, by adding the
3949 __attribute__((__target__("cpu=power9")))
3951 qualifier to a function definition or, for example, by bracketing
3954 #pragma GCC target("altivec")
3958 #pragma GCC reset_options
3960 directives. Parameter global_init_p is true for the initial
3961 invocation, which initializes global variables, and false for all
3962 subsequent invocations.
3965 Various global state information is assumed to be valid. This
3966 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3967 default CPU specified at build configure time, TARGET_DEFAULT,
3968 representing the default set of option flags for the default
3969 target, and global_options_set.x_rs6000_isa_flags, representing
3970 which options were requested on the command line.
3972 Upon return from this function:
3974 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3975 was set by name on the command line. Additionally, if certain
3976 attributes are automatically enabled or disabled by this function
3977 in order to assure compatibility between options and
3978 configuration, the flags associated with those attributes are
3979 also set. By setting these "explicit bits", we avoid the risk
3980 that other code might accidentally overwrite these particular
3981 attributes with "default values".
3983 The various bits of rs6000_isa_flags are set to indicate the
3984 target options that have been selected for the most current
3985 compilation efforts. This has the effect of also turning on the
3986 associated TARGET_XXX values since these are macros which are
3987 generally defined to test the corresponding bit of the
3988 rs6000_isa_flags variable.
3990 The variable rs6000_builtin_mask is set to represent the target
3991 options for the most current compilation efforts, consistent with
3992 the current contents of rs6000_isa_flags. This variable controls
3993 expansion of built-in functions.
3995 Various other global variables and fields of global structures
3996 (over 50 in all) are initialized to reflect the desired options
3997 for the most current compilation efforts. */
4000 rs6000_option_override_internal (bool global_init_p
)
4004 HOST_WIDE_INT set_masks
;
4005 HOST_WIDE_INT ignore_masks
;
4008 struct cl_target_option
*main_target_opt
4009 = ((global_init_p
|| target_option_default_node
== NULL
)
4010 ? NULL
: TREE_TARGET_OPTION (target_option_default_node
));
4012 /* Print defaults. */
4013 if ((TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
) && global_init_p
)
4014 rs6000_print_isa_options (stderr
, 0, "TARGET_DEFAULT", TARGET_DEFAULT
);
4016 /* Remember the explicit arguments. */
4018 rs6000_isa_flags_explicit
= global_options_set
.x_rs6000_isa_flags
;
4020 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
4021 library functions, so warn about it. The flag may be useful for
4022 performance studies from time to time though, so don't disable it
4024 if (global_options_set
.x_rs6000_alignment_flags
4025 && rs6000_alignment_flags
== MASK_ALIGN_POWER
4026 && DEFAULT_ABI
== ABI_DARWIN
4028 warning (0, "%qs is not supported for 64-bit Darwin;"
4029 " it is incompatible with the installed C and C++ libraries",
4032 /* Numerous experiment shows that IRA based loop pressure
4033 calculation works better for RTL loop invariant motion on targets
4034 with enough (>= 32) registers. It is an expensive optimization.
4035 So it is on only for peak performance. */
4036 if (optimize
>= 3 && global_init_p
4037 && !global_options_set
.x_flag_ira_loop_pressure
)
4038 flag_ira_loop_pressure
= 1;
4040 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
4041 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
4042 options were already specified. */
4043 if (flag_sanitize
& SANITIZE_USER_ADDRESS
4044 && !global_options_set
.x_flag_asynchronous_unwind_tables
)
4045 flag_asynchronous_unwind_tables
= 1;
4047 /* Set the pointer size. */
4050 rs6000_pmode
= DImode
;
4051 rs6000_pointer_size
= 64;
4055 rs6000_pmode
= SImode
;
4056 rs6000_pointer_size
= 32;
4059 /* Some OSs don't support saving the high part of 64-bit registers on context
4060 switch. Other OSs don't support saving Altivec registers. On those OSs,
4061 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
4062 if the user wants either, the user must explicitly specify them and we
4063 won't interfere with the user's specification. */
4065 set_masks
= POWERPC_MASKS
;
4066 #ifdef OS_MISSING_POWERPC64
4067 if (OS_MISSING_POWERPC64
)
4068 set_masks
&= ~OPTION_MASK_POWERPC64
;
4070 #ifdef OS_MISSING_ALTIVEC
4071 if (OS_MISSING_ALTIVEC
)
4072 set_masks
&= ~(OPTION_MASK_ALTIVEC
| OPTION_MASK_VSX
4073 | OTHER_VSX_VECTOR_MASKS
);
4076 /* Don't override by the processor default if given explicitly. */
4077 set_masks
&= ~rs6000_isa_flags_explicit
;
4079 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
4080 the cpu in a target attribute or pragma, but did not specify a tuning
4081 option, use the cpu for the tuning option rather than the option specified
4082 with -mtune on the command line. Process a '--with-cpu' configuration
4083 request as an implicit --cpu. */
4084 if (rs6000_cpu_index
>= 0)
4085 cpu_index
= rs6000_cpu_index
;
4086 else if (main_target_opt
!= NULL
&& main_target_opt
->x_rs6000_cpu_index
>= 0)
4087 cpu_index
= main_target_opt
->x_rs6000_cpu_index
;
4088 else if (OPTION_TARGET_CPU_DEFAULT
)
4089 cpu_index
= rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT
);
4093 const char *unavailable_cpu
= NULL
;
4094 switch (processor_target_table
[cpu_index
].processor
)
4096 #ifndef HAVE_AS_POWER9
4097 case PROCESSOR_POWER9
:
4098 unavailable_cpu
= "power9";
4101 #ifndef HAVE_AS_POWER8
4102 case PROCESSOR_POWER8
:
4103 unavailable_cpu
= "power8";
4106 #ifndef HAVE_AS_POPCNTD
4107 case PROCESSOR_POWER7
:
4108 unavailable_cpu
= "power7";
4112 case PROCESSOR_POWER6
:
4113 unavailable_cpu
= "power6";
4116 #ifndef HAVE_AS_POPCNTB
4117 case PROCESSOR_POWER5
:
4118 unavailable_cpu
= "power5";
4124 if (unavailable_cpu
)
4127 warning (0, "will not generate %qs instructions because "
4128 "assembler lacks %qs support", unavailable_cpu
,
4133 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
4134 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
4135 with those from the cpu, except for options that were explicitly set. If
4136 we don't have a cpu, do not override the target bits set in
4140 rs6000_cpu_index
= cpu_index
;
4141 rs6000_isa_flags
&= ~set_masks
;
4142 rs6000_isa_flags
|= (processor_target_table
[cpu_index
].target_enable
4147 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
4148 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
4149 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
4150 to using rs6000_isa_flags, we need to do the initialization here.
4152 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
4153 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
4154 HOST_WIDE_INT flags
;
4156 flags
= TARGET_DEFAULT
;
4159 /* PowerPC 64-bit LE requires at least ISA 2.07. */
4160 const char *default_cpu
= (!TARGET_POWERPC64
4165 int default_cpu_index
= rs6000_cpu_name_lookup (default_cpu
);
4166 flags
= processor_target_table
[default_cpu_index
].target_enable
;
4168 rs6000_isa_flags
|= (flags
& ~rs6000_isa_flags_explicit
);
4171 if (rs6000_tune_index
>= 0)
4172 tune_index
= rs6000_tune_index
;
4173 else if (cpu_index
>= 0)
4174 rs6000_tune_index
= tune_index
= cpu_index
;
4178 enum processor_type tune_proc
4179 = (TARGET_POWERPC64
? PROCESSOR_DEFAULT64
: PROCESSOR_DEFAULT
);
4182 for (i
= 0; i
< ARRAY_SIZE (processor_target_table
); i
++)
4183 if (processor_target_table
[i
].processor
== tune_proc
)
4191 rs6000_cpu
= processor_target_table
[cpu_index
].processor
;
4193 rs6000_cpu
= TARGET_POWERPC64
? PROCESSOR_DEFAULT64
: PROCESSOR_DEFAULT
;
4195 gcc_assert (tune_index
>= 0);
4196 rs6000_tune
= processor_target_table
[tune_index
].processor
;
4198 if (rs6000_cpu
== PROCESSOR_PPCE300C2
|| rs6000_cpu
== PROCESSOR_PPCE300C3
4199 || rs6000_cpu
== PROCESSOR_PPCE500MC
|| rs6000_cpu
== PROCESSOR_PPCE500MC64
4200 || rs6000_cpu
== PROCESSOR_PPCE5500
)
4203 error ("AltiVec not supported in this target");
4206 /* If we are optimizing big endian systems for space, use the load/store
4207 multiple and string instructions. */
4208 if (BYTES_BIG_ENDIAN
&& optimize_size
)
4209 rs6000_isa_flags
|= ~rs6000_isa_flags_explicit
& (OPTION_MASK_MULTIPLE
4210 | OPTION_MASK_STRING
);
4212 /* Don't allow -mmultiple or -mstring on little endian systems
4213 unless the cpu is a 750, because the hardware doesn't support the
4214 instructions used in little endian mode, and causes an alignment
4215 trap. The 750 does not cause an alignment trap (except when the
4216 target is unaligned). */
4218 if (!BYTES_BIG_ENDIAN
&& rs6000_cpu
!= PROCESSOR_PPC750
)
4220 if (TARGET_MULTIPLE
)
4222 rs6000_isa_flags
&= ~OPTION_MASK_MULTIPLE
;
4223 if ((rs6000_isa_flags_explicit
& OPTION_MASK_MULTIPLE
) != 0)
4224 warning (0, "%qs is not supported on little endian systems",
4230 rs6000_isa_flags
&= ~OPTION_MASK_STRING
;
4231 if ((rs6000_isa_flags_explicit
& OPTION_MASK_STRING
) != 0)
4232 warning (0, "%qs is not supported on little endian systems",
4237 /* If little-endian, default to -mstrict-align on older processors.
4238 Testing for htm matches power8 and later. */
4239 if (!BYTES_BIG_ENDIAN
4240 && !(processor_target_table
[tune_index
].target_enable
& OPTION_MASK_HTM
))
4241 rs6000_isa_flags
|= ~rs6000_isa_flags_explicit
& OPTION_MASK_STRICT_ALIGN
;
4243 /* -maltivec={le,be} implies -maltivec. */
4244 if (rs6000_altivec_element_order
!= 0)
4245 rs6000_isa_flags
|= OPTION_MASK_ALTIVEC
;
4247 /* Disallow -maltivec=le in big endian mode for now. This is not
4248 known to be useful for anyone. */
4249 if (BYTES_BIG_ENDIAN
&& rs6000_altivec_element_order
== 1)
4251 warning (0, N_("-maltivec=le not allowed for big-endian targets"));
4252 rs6000_altivec_element_order
= 0;
4255 if (!rs6000_fold_gimple
)
4257 "gimple folding of rs6000 builtins has been disabled.\n");
4259 /* Add some warnings for VSX. */
4262 const char *msg
= NULL
;
4263 if (!TARGET_HARD_FLOAT
|| !TARGET_SINGLE_FLOAT
|| !TARGET_DOUBLE_FLOAT
)
4265 if (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
)
4266 msg
= N_("-mvsx requires hardware floating point");
4269 rs6000_isa_flags
&= ~ OPTION_MASK_VSX
;
4270 rs6000_isa_flags_explicit
|= OPTION_MASK_VSX
;
4273 else if (TARGET_PAIRED_FLOAT
)
4274 msg
= N_("-mvsx and -mpaired are incompatible");
4275 else if (TARGET_AVOID_XFORM
> 0)
4276 msg
= N_("-mvsx needs indexed addressing");
4277 else if (!TARGET_ALTIVEC
&& (rs6000_isa_flags_explicit
4278 & OPTION_MASK_ALTIVEC
))
4280 if (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
)
4281 msg
= N_("-mvsx and -mno-altivec are incompatible");
4283 msg
= N_("-mno-altivec disables vsx");
4289 rs6000_isa_flags
&= ~ OPTION_MASK_VSX
;
4290 rs6000_isa_flags_explicit
|= OPTION_MASK_VSX
;
4294 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4295 the -mcpu setting to enable options that conflict. */
4296 if ((!TARGET_HARD_FLOAT
|| !TARGET_ALTIVEC
|| !TARGET_VSX
)
4297 && (rs6000_isa_flags_explicit
& (OPTION_MASK_SOFT_FLOAT
4298 | OPTION_MASK_ALTIVEC
4299 | OPTION_MASK_VSX
)) != 0)
4300 rs6000_isa_flags
&= ~((OPTION_MASK_P8_VECTOR
| OPTION_MASK_CRYPTO
4301 | OPTION_MASK_DIRECT_MOVE
)
4302 & ~rs6000_isa_flags_explicit
);
4304 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
4305 rs6000_print_isa_options (stderr
, 0, "before defaults", rs6000_isa_flags
);
4307 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4308 off all of the options that depend on those flags. */
4309 ignore_masks
= rs6000_disable_incompatible_switches ();
4311 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4312 unless the user explicitly used the -mno-<option> to disable the code. */
4313 if (TARGET_P9_VECTOR
|| TARGET_MODULO
|| TARGET_P9_MISC
)
4314 rs6000_isa_flags
|= (ISA_3_0_MASKS_SERVER
& ~ignore_masks
);
4315 else if (TARGET_P9_MINMAX
)
4319 if (cpu_index
== PROCESSOR_POWER9
)
4321 /* legacy behavior: allow -mcpu=power9 with certain
4322 capabilities explicitly disabled. */
4323 rs6000_isa_flags
|= (ISA_3_0_MASKS_SERVER
& ~ignore_masks
);
4326 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4327 "for <xxx> less than power9", "-mcpu");
4329 else if ((ISA_3_0_MASKS_SERVER
& rs6000_isa_flags_explicit
)
4330 != (ISA_3_0_MASKS_SERVER
& rs6000_isa_flags
4331 & rs6000_isa_flags_explicit
))
4332 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4333 were explicitly cleared. */
4334 error ("%qs incompatible with explicitly disabled options",
4337 rs6000_isa_flags
|= ISA_3_0_MASKS_SERVER
;
4339 else if (TARGET_P8_VECTOR
|| TARGET_DIRECT_MOVE
|| TARGET_CRYPTO
)
4340 rs6000_isa_flags
|= (ISA_2_7_MASKS_SERVER
& ~ignore_masks
);
4341 else if (TARGET_VSX
)
4342 rs6000_isa_flags
|= (ISA_2_6_MASKS_SERVER
& ~ignore_masks
);
4343 else if (TARGET_POPCNTD
)
4344 rs6000_isa_flags
|= (ISA_2_6_MASKS_EMBEDDED
& ~ignore_masks
);
4345 else if (TARGET_DFP
)
4346 rs6000_isa_flags
|= (ISA_2_5_MASKS_SERVER
& ~ignore_masks
);
4347 else if (TARGET_CMPB
)
4348 rs6000_isa_flags
|= (ISA_2_5_MASKS_EMBEDDED
& ~ignore_masks
);
4349 else if (TARGET_FPRND
)
4350 rs6000_isa_flags
|= (ISA_2_4_MASKS
& ~ignore_masks
);
4351 else if (TARGET_POPCNTB
)
4352 rs6000_isa_flags
|= (ISA_2_2_MASKS
& ~ignore_masks
);
4353 else if (TARGET_ALTIVEC
)
4354 rs6000_isa_flags
|= (OPTION_MASK_PPC_GFXOPT
& ~ignore_masks
);
4356 if (TARGET_CRYPTO
&& !TARGET_ALTIVEC
)
4358 if (rs6000_isa_flags_explicit
& OPTION_MASK_CRYPTO
)
4359 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4360 rs6000_isa_flags
&= ~OPTION_MASK_CRYPTO
;
4363 if (TARGET_DIRECT_MOVE
&& !TARGET_VSX
)
4365 if (rs6000_isa_flags_explicit
& OPTION_MASK_DIRECT_MOVE
)
4366 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4367 rs6000_isa_flags
&= ~OPTION_MASK_DIRECT_MOVE
;
4370 if (TARGET_P8_VECTOR
&& !TARGET_ALTIVEC
)
4372 if (rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
)
4373 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4374 rs6000_isa_flags
&= ~OPTION_MASK_P8_VECTOR
;
4377 if (TARGET_P8_VECTOR
&& !TARGET_VSX
)
4379 if ((rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
)
4380 && (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
))
4381 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4382 else if ((rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
) == 0)
4384 rs6000_isa_flags
&= ~OPTION_MASK_P8_VECTOR
;
4385 if (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
)
4386 rs6000_isa_flags_explicit
|= OPTION_MASK_P8_VECTOR
;
4390 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4392 rs6000_isa_flags
|= OPTION_MASK_VSX
;
4393 rs6000_isa_flags_explicit
|= OPTION_MASK_VSX
;
4397 if (TARGET_DFP
&& !TARGET_HARD_FLOAT
)
4399 if (rs6000_isa_flags_explicit
& OPTION_MASK_DFP
)
4400 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4401 rs6000_isa_flags
&= ~OPTION_MASK_DFP
;
4404 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4405 silently turn off quad memory mode. */
4406 if ((TARGET_QUAD_MEMORY
|| TARGET_QUAD_MEMORY_ATOMIC
) && !TARGET_POWERPC64
)
4408 if ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY
) != 0)
4409 warning (0, N_("-mquad-memory requires 64-bit mode"));
4411 if ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY_ATOMIC
) != 0)
4412 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4414 rs6000_isa_flags
&= ~(OPTION_MASK_QUAD_MEMORY
4415 | OPTION_MASK_QUAD_MEMORY_ATOMIC
);
4418 /* Non-atomic quad memory load/store are disabled for little endian, since
4419 the words are reversed, but atomic operations can still be done by
4420 swapping the words. */
4421 if (TARGET_QUAD_MEMORY
&& !WORDS_BIG_ENDIAN
)
4423 if ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY
) != 0)
4424 warning (0, N_("-mquad-memory is not available in little endian "
4427 rs6000_isa_flags
&= ~OPTION_MASK_QUAD_MEMORY
;
4430 /* Assume if the user asked for normal quad memory instructions, they want
4431 the atomic versions as well, unless they explicity told us not to use quad
4432 word atomic instructions. */
4433 if (TARGET_QUAD_MEMORY
4434 && !TARGET_QUAD_MEMORY_ATOMIC
4435 && ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY_ATOMIC
) == 0))
4436 rs6000_isa_flags
|= OPTION_MASK_QUAD_MEMORY_ATOMIC
;
4438 /* If we can shrink-wrap the TOC register save separately, then use
4439 -msave-toc-indirect unless explicitly disabled. */
4440 if ((rs6000_isa_flags_explicit
& OPTION_MASK_SAVE_TOC_INDIRECT
) == 0
4441 && flag_shrink_wrap_separate
4442 && optimize_function_for_speed_p (cfun
))
4443 rs6000_isa_flags
|= OPTION_MASK_SAVE_TOC_INDIRECT
;
4445 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4446 generating power8 instructions. */
4447 if (!(rs6000_isa_flags_explicit
& OPTION_MASK_P8_FUSION
))
4448 rs6000_isa_flags
|= (processor_target_table
[tune_index
].target_enable
4449 & OPTION_MASK_P8_FUSION
);
4451 /* Setting additional fusion flags turns on base fusion. */
4452 if (!TARGET_P8_FUSION
&& (TARGET_P8_FUSION_SIGN
|| TARGET_TOC_FUSION
))
4454 if (rs6000_isa_flags_explicit
& OPTION_MASK_P8_FUSION
)
4456 if (TARGET_P8_FUSION_SIGN
)
4457 error ("%qs requires %qs", "-mpower8-fusion-sign",
4460 if (TARGET_TOC_FUSION
)
4461 error ("%qs requires %qs", "-mtoc-fusion", "-mpower8-fusion");
4463 rs6000_isa_flags
&= ~OPTION_MASK_P8_FUSION
;
4466 rs6000_isa_flags
|= OPTION_MASK_P8_FUSION
;
4469 /* Power9 fusion is a superset over power8 fusion. */
4470 if (TARGET_P9_FUSION
&& !TARGET_P8_FUSION
)
4472 if (rs6000_isa_flags_explicit
& OPTION_MASK_P8_FUSION
)
4474 /* We prefer to not mention undocumented options in
4475 error messages. However, if users have managed to select
4476 power9-fusion without selecting power8-fusion, they
4477 already know about undocumented flags. */
4478 error ("%qs requires %qs", "-mpower9-fusion", "-mpower8-fusion");
4479 rs6000_isa_flags
&= ~OPTION_MASK_P9_FUSION
;
4482 rs6000_isa_flags
|= OPTION_MASK_P8_FUSION
;
4485 /* Enable power9 fusion if we are tuning for power9, even if we aren't
4486 generating power9 instructions. */
4487 if (!(rs6000_isa_flags_explicit
& OPTION_MASK_P9_FUSION
))
4488 rs6000_isa_flags
|= (processor_target_table
[tune_index
].target_enable
4489 & OPTION_MASK_P9_FUSION
);
4491 /* Power8 does not fuse sign extended loads with the addis. If we are
4492 optimizing at high levels for speed, convert a sign extended load into a
4493 zero extending load, and an explicit sign extension. */
4494 if (TARGET_P8_FUSION
4495 && !(rs6000_isa_flags_explicit
& OPTION_MASK_P8_FUSION_SIGN
)
4496 && optimize_function_for_speed_p (cfun
)
4498 rs6000_isa_flags
|= OPTION_MASK_P8_FUSION_SIGN
;
4500 /* TOC fusion requires 64-bit and medium/large code model. */
4501 if (TARGET_TOC_FUSION
&& !TARGET_POWERPC64
)
4503 rs6000_isa_flags
&= ~OPTION_MASK_TOC_FUSION
;
4504 if ((rs6000_isa_flags_explicit
& OPTION_MASK_TOC_FUSION
) != 0)
4505 warning (0, N_("-mtoc-fusion requires 64-bit"));
4508 if (TARGET_TOC_FUSION
&& (TARGET_CMODEL
== CMODEL_SMALL
))
4510 rs6000_isa_flags
&= ~OPTION_MASK_TOC_FUSION
;
4511 if ((rs6000_isa_flags_explicit
& OPTION_MASK_TOC_FUSION
) != 0)
4512 warning (0, N_("-mtoc-fusion requires medium/large code model"));
4515 /* Turn on -mtoc-fusion by default if p8-fusion and 64-bit medium/large code
4517 if (TARGET_P8_FUSION
&& !TARGET_TOC_FUSION
&& TARGET_POWERPC64
4518 && (TARGET_CMODEL
!= CMODEL_SMALL
)
4519 && !(rs6000_isa_flags_explicit
& OPTION_MASK_TOC_FUSION
))
4520 rs6000_isa_flags
|= OPTION_MASK_TOC_FUSION
;
4522 /* ISA 3.0 vector instructions include ISA 2.07. */
4523 if (TARGET_P9_VECTOR
&& !TARGET_P8_VECTOR
)
4525 /* We prefer to not mention undocumented options in
4526 error messages. However, if users have managed to select
4527 power9-vector without selecting power8-vector, they
4528 already know about undocumented flags. */
4529 if ((rs6000_isa_flags_explicit
& OPTION_MASK_P9_VECTOR
) &&
4530 (rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
))
4531 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4532 else if ((rs6000_isa_flags_explicit
& OPTION_MASK_P9_VECTOR
) == 0)
4534 rs6000_isa_flags
&= ~OPTION_MASK_P9_VECTOR
;
4535 if (rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
)
4536 rs6000_isa_flags_explicit
|= OPTION_MASK_P9_VECTOR
;
4540 /* OPTION_MASK_P9_VECTOR is explicit and
4541 OPTION_MASK_P8_VECTOR is not explicit. */
4542 rs6000_isa_flags
|= OPTION_MASK_P8_VECTOR
;
4543 rs6000_isa_flags_explicit
|= OPTION_MASK_P8_VECTOR
;
4547 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4548 support. If we only have ISA 2.06 support, and the user did not specify
4549 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4550 but we don't enable the full vectorization support */
4551 if (TARGET_ALLOW_MOVMISALIGN
== -1 && TARGET_P8_VECTOR
&& TARGET_DIRECT_MOVE
)
4552 TARGET_ALLOW_MOVMISALIGN
= 1;
4554 else if (TARGET_ALLOW_MOVMISALIGN
&& !TARGET_VSX
)
4556 if (TARGET_ALLOW_MOVMISALIGN
> 0
4557 && global_options_set
.x_TARGET_ALLOW_MOVMISALIGN
)
4558 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4560 TARGET_ALLOW_MOVMISALIGN
= 0;
4563 /* Determine when unaligned vector accesses are permitted, and when
4564 they are preferred over masked Altivec loads. Note that if
4565 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4566 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4568 if (TARGET_EFFICIENT_UNALIGNED_VSX
)
4572 if (rs6000_isa_flags_explicit
& OPTION_MASK_EFFICIENT_UNALIGNED_VSX
)
4573 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4575 rs6000_isa_flags
&= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX
;
4578 else if (!TARGET_ALLOW_MOVMISALIGN
)
4580 if (rs6000_isa_flags_explicit
& OPTION_MASK_EFFICIENT_UNALIGNED_VSX
)
4581 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4582 "-mallow-movmisalign");
4584 rs6000_isa_flags
&= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX
;
4588 /* Set long double size before the IEEE 128-bit tests. */
4589 if (!global_options_set
.x_rs6000_long_double_type_size
)
4591 if (main_target_opt
!= NULL
4592 && (main_target_opt
->x_rs6000_long_double_type_size
4593 != RS6000_DEFAULT_LONG_DOUBLE_SIZE
))
4594 error ("target attribute or pragma changes long double size");
4596 rs6000_long_double_type_size
= RS6000_DEFAULT_LONG_DOUBLE_SIZE
;
4599 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
4600 systems will also set long double to be IEEE 128-bit. AIX and Darwin
4601 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
4602 those systems will not pick up this default. Warn if the user changes the
4603 default unless -Wno-psabi. */
4604 if (!global_options_set
.x_rs6000_ieeequad
)
4605 rs6000_ieeequad
= TARGET_IEEEQUAD_DEFAULT
;
4607 else if (rs6000_ieeequad
!= TARGET_IEEEQUAD_DEFAULT
&& TARGET_LONG_DOUBLE_128
)
4609 static bool warned_change_long_double
;
4610 if (!warned_change_long_double
)
4612 warned_change_long_double
= true;
4613 if (TARGET_IEEEQUAD
)
4614 warning (OPT_Wpsabi
, "Using IEEE extended precision long double");
4616 warning (OPT_Wpsabi
, "Using IBM extended precision long double");
4620 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4621 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4622 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4623 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4624 the keyword as well as the type. */
4625 TARGET_FLOAT128_TYPE
= TARGET_FLOAT128_ENABLE_TYPE
&& TARGET_VSX
;
4627 /* IEEE 128-bit floating point requires VSX support. */
4628 if (TARGET_FLOAT128_KEYWORD
)
4632 if ((rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_KEYWORD
) != 0)
4633 error ("%qs requires VSX support", "-mfloat128");
4635 TARGET_FLOAT128_TYPE
= 0;
4636 rs6000_isa_flags
&= ~(OPTION_MASK_FLOAT128_KEYWORD
4637 | OPTION_MASK_FLOAT128_HW
);
4639 else if (!TARGET_FLOAT128_TYPE
)
4641 TARGET_FLOAT128_TYPE
= 1;
4642 warning (0, "The -mfloat128 option may not be fully supported");
4646 /* Enable the __float128 keyword under Linux by default. */
4647 if (TARGET_FLOAT128_TYPE
&& !TARGET_FLOAT128_KEYWORD
4648 && (rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_KEYWORD
) == 0)
4649 rs6000_isa_flags
|= OPTION_MASK_FLOAT128_KEYWORD
;
4651 /* If we have are supporting the float128 type and full ISA 3.0 support,
4652 enable -mfloat128-hardware by default. However, don't enable the
4653 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4654 because sometimes the compiler wants to put things in an integer
4655 container, and if we don't have __int128 support, it is impossible. */
4656 if (TARGET_FLOAT128_TYPE
&& !TARGET_FLOAT128_HW
&& TARGET_64BIT
4657 && (rs6000_isa_flags
& ISA_3_0_MASKS_IEEE
) == ISA_3_0_MASKS_IEEE
4658 && !(rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_HW
))
4659 rs6000_isa_flags
|= OPTION_MASK_FLOAT128_HW
;
4661 if (TARGET_FLOAT128_HW
4662 && (rs6000_isa_flags
& ISA_3_0_MASKS_IEEE
) != ISA_3_0_MASKS_IEEE
)
4664 if ((rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_HW
) != 0)
4665 error ("%qs requires full ISA 3.0 support", "-mfloat128-hardware");
4667 rs6000_isa_flags
&= ~OPTION_MASK_FLOAT128_HW
;
4670 if (TARGET_FLOAT128_HW
&& !TARGET_64BIT
)
4672 if ((rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_HW
) != 0)
4673 error ("%qs requires %qs", "-mfloat128-hardware", "-m64");
4675 rs6000_isa_flags
&= ~OPTION_MASK_FLOAT128_HW
;
4678 /* Print the options after updating the defaults. */
4679 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
4680 rs6000_print_isa_options (stderr
, 0, "after defaults", rs6000_isa_flags
);
4682 /* E500mc does "better" if we inline more aggressively. Respect the
4683 user's opinion, though. */
4684 if (rs6000_block_move_inline_limit
== 0
4685 && (rs6000_tune
== PROCESSOR_PPCE500MC
4686 || rs6000_tune
== PROCESSOR_PPCE500MC64
4687 || rs6000_tune
== PROCESSOR_PPCE5500
4688 || rs6000_tune
== PROCESSOR_PPCE6500
))
4689 rs6000_block_move_inline_limit
= 128;
4691 /* store_one_arg depends on expand_block_move to handle at least the
4692 size of reg_parm_stack_space. */
4693 if (rs6000_block_move_inline_limit
< (TARGET_POWERPC64
? 64 : 32))
4694 rs6000_block_move_inline_limit
= (TARGET_POWERPC64
? 64 : 32);
4698 /* If the appropriate debug option is enabled, replace the target hooks
4699 with debug versions that call the real version and then prints
4700 debugging information. */
4701 if (TARGET_DEBUG_COST
)
4703 targetm
.rtx_costs
= rs6000_debug_rtx_costs
;
4704 targetm
.address_cost
= rs6000_debug_address_cost
;
4705 targetm
.sched
.adjust_cost
= rs6000_debug_adjust_cost
;
4708 if (TARGET_DEBUG_ADDR
)
4710 targetm
.legitimate_address_p
= rs6000_debug_legitimate_address_p
;
4711 targetm
.legitimize_address
= rs6000_debug_legitimize_address
;
4712 rs6000_secondary_reload_class_ptr
4713 = rs6000_debug_secondary_reload_class
;
4714 targetm
.secondary_memory_needed
4715 = rs6000_debug_secondary_memory_needed
;
4716 targetm
.can_change_mode_class
4717 = rs6000_debug_can_change_mode_class
;
4718 rs6000_preferred_reload_class_ptr
4719 = rs6000_debug_preferred_reload_class
;
4720 rs6000_legitimize_reload_address_ptr
4721 = rs6000_debug_legitimize_reload_address
;
4722 rs6000_mode_dependent_address_ptr
4723 = rs6000_debug_mode_dependent_address
;
4726 if (rs6000_veclibabi_name
)
4728 if (strcmp (rs6000_veclibabi_name
, "mass") == 0)
4729 rs6000_veclib_handler
= rs6000_builtin_vectorized_libmass
;
4732 error ("unknown vectorization library ABI type (%qs) for "
4733 "%qs switch", rs6000_veclibabi_name
, "-mveclibabi=");
4739 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4740 target attribute or pragma which automatically enables both options,
4741 unless the altivec ABI was set. This is set by default for 64-bit, but
4743 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_altivec_abi
)
4745 TARGET_FLOAT128_TYPE
= 0;
4746 rs6000_isa_flags
&= ~((OPTION_MASK_VSX
| OPTION_MASK_ALTIVEC
4747 | OPTION_MASK_FLOAT128_KEYWORD
)
4748 & ~rs6000_isa_flags_explicit
);
4751 /* Enable Altivec ABI for AIX -maltivec. */
4752 if (TARGET_XCOFF
&& (TARGET_ALTIVEC
|| TARGET_VSX
))
4754 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_altivec_abi
)
4755 error ("target attribute or pragma changes AltiVec ABI");
4757 rs6000_altivec_abi
= 1;
4760 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4761 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4762 be explicitly overridden in either case. */
4765 if (!global_options_set
.x_rs6000_altivec_abi
4766 && (TARGET_64BIT
|| TARGET_ALTIVEC
|| TARGET_VSX
))
4768 if (main_target_opt
!= NULL
&&
4769 !main_target_opt
->x_rs6000_altivec_abi
)
4770 error ("target attribute or pragma changes AltiVec ABI");
4772 rs6000_altivec_abi
= 1;
4776 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4777 So far, the only darwin64 targets are also MACH-O. */
4779 && DEFAULT_ABI
== ABI_DARWIN
4782 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_darwin64_abi
)
4783 error ("target attribute or pragma changes darwin64 ABI");
4786 rs6000_darwin64_abi
= 1;
4787 /* Default to natural alignment, for better performance. */
4788 rs6000_alignment_flags
= MASK_ALIGN_NATURAL
;
4792 /* Place FP constants in the constant pool instead of TOC
4793 if section anchors enabled. */
4794 if (flag_section_anchors
4795 && !global_options_set
.x_TARGET_NO_FP_IN_TOC
)
4796 TARGET_NO_FP_IN_TOC
= 1;
4798 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
4799 rs6000_print_isa_options (stderr
, 0, "before subtarget", rs6000_isa_flags
);
4801 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4802 SUBTARGET_OVERRIDE_OPTIONS
;
4804 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4805 SUBSUBTARGET_OVERRIDE_OPTIONS
;
4807 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4808 SUB3TARGET_OVERRIDE_OPTIONS
;
4811 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
4812 rs6000_print_isa_options (stderr
, 0, "after subtarget", rs6000_isa_flags
);
4814 /* For the E500 family of cores, reset the single/double FP flags to let us
4815 check that they remain constant across attributes or pragmas. Also,
4816 clear a possible request for string instructions, not supported and which
4817 we might have silently queried above for -Os. */
4821 case PROCESSOR_PPC8540
:
4822 case PROCESSOR_PPC8548
:
4823 case PROCESSOR_PPCE500MC
:
4824 case PROCESSOR_PPCE500MC64
:
4825 case PROCESSOR_PPCE5500
:
4826 case PROCESSOR_PPCE6500
:
4827 rs6000_single_float
= 0;
4828 rs6000_double_float
= 0;
4829 rs6000_isa_flags
&= ~OPTION_MASK_STRING
;
4836 if (main_target_opt
)
4838 if (main_target_opt
->x_rs6000_single_float
!= rs6000_single_float
)
4839 error ("target attribute or pragma changes single precision floating "
4841 if (main_target_opt
->x_rs6000_double_float
!= rs6000_double_float
)
4842 error ("target attribute or pragma changes double precision floating "
4846 rs6000_always_hint
= (rs6000_tune
!= PROCESSOR_POWER4
4847 && rs6000_tune
!= PROCESSOR_POWER5
4848 && rs6000_tune
!= PROCESSOR_POWER6
4849 && rs6000_tune
!= PROCESSOR_POWER7
4850 && rs6000_tune
!= PROCESSOR_POWER8
4851 && rs6000_tune
!= PROCESSOR_POWER9
4852 && rs6000_tune
!= PROCESSOR_PPCA2
4853 && rs6000_tune
!= PROCESSOR_CELL
4854 && rs6000_tune
!= PROCESSOR_PPC476
);
4855 rs6000_sched_groups
= (rs6000_tune
== PROCESSOR_POWER4
4856 || rs6000_tune
== PROCESSOR_POWER5
4857 || rs6000_tune
== PROCESSOR_POWER7
4858 || rs6000_tune
== PROCESSOR_POWER8
);
4859 rs6000_align_branch_targets
= (rs6000_tune
== PROCESSOR_POWER4
4860 || rs6000_tune
== PROCESSOR_POWER5
4861 || rs6000_tune
== PROCESSOR_POWER6
4862 || rs6000_tune
== PROCESSOR_POWER7
4863 || rs6000_tune
== PROCESSOR_POWER8
4864 || rs6000_tune
== PROCESSOR_POWER9
4865 || rs6000_tune
== PROCESSOR_PPCE500MC
4866 || rs6000_tune
== PROCESSOR_PPCE500MC64
4867 || rs6000_tune
== PROCESSOR_PPCE5500
4868 || rs6000_tune
== PROCESSOR_PPCE6500
);
4870 /* Allow debug switches to override the above settings. These are set to -1
4871 in rs6000.opt to indicate the user hasn't directly set the switch. */
4872 if (TARGET_ALWAYS_HINT
>= 0)
4873 rs6000_always_hint
= TARGET_ALWAYS_HINT
;
4875 if (TARGET_SCHED_GROUPS
>= 0)
4876 rs6000_sched_groups
= TARGET_SCHED_GROUPS
;
4878 if (TARGET_ALIGN_BRANCH_TARGETS
>= 0)
4879 rs6000_align_branch_targets
= TARGET_ALIGN_BRANCH_TARGETS
;
4881 rs6000_sched_restricted_insns_priority
4882 = (rs6000_sched_groups
? 1 : 0);
4884 /* Handle -msched-costly-dep option. */
4885 rs6000_sched_costly_dep
4886 = (rs6000_sched_groups
? true_store_to_load_dep_costly
: no_dep_costly
);
4888 if (rs6000_sched_costly_dep_str
)
4890 if (! strcmp (rs6000_sched_costly_dep_str
, "no"))
4891 rs6000_sched_costly_dep
= no_dep_costly
;
4892 else if (! strcmp (rs6000_sched_costly_dep_str
, "all"))
4893 rs6000_sched_costly_dep
= all_deps_costly
;
4894 else if (! strcmp (rs6000_sched_costly_dep_str
, "true_store_to_load"))
4895 rs6000_sched_costly_dep
= true_store_to_load_dep_costly
;
4896 else if (! strcmp (rs6000_sched_costly_dep_str
, "store_to_load"))
4897 rs6000_sched_costly_dep
= store_to_load_dep_costly
;
4899 rs6000_sched_costly_dep
= ((enum rs6000_dependence_cost
)
4900 atoi (rs6000_sched_costly_dep_str
));
4903 /* Handle -minsert-sched-nops option. */
4904 rs6000_sched_insert_nops
4905 = (rs6000_sched_groups
? sched_finish_regroup_exact
: sched_finish_none
);
4907 if (rs6000_sched_insert_nops_str
)
4909 if (! strcmp (rs6000_sched_insert_nops_str
, "no"))
4910 rs6000_sched_insert_nops
= sched_finish_none
;
4911 else if (! strcmp (rs6000_sched_insert_nops_str
, "pad"))
4912 rs6000_sched_insert_nops
= sched_finish_pad_groups
;
4913 else if (! strcmp (rs6000_sched_insert_nops_str
, "regroup_exact"))
4914 rs6000_sched_insert_nops
= sched_finish_regroup_exact
;
4916 rs6000_sched_insert_nops
= ((enum rs6000_nop_insertion
)
4917 atoi (rs6000_sched_insert_nops_str
));
4920 /* Handle stack protector */
4921 if (!global_options_set
.x_rs6000_stack_protector_guard
)
4922 #ifdef TARGET_THREAD_SSP_OFFSET
4923 rs6000_stack_protector_guard
= SSP_TLS
;
4925 rs6000_stack_protector_guard
= SSP_GLOBAL
;
4928 #ifdef TARGET_THREAD_SSP_OFFSET
4929 rs6000_stack_protector_guard_offset
= TARGET_THREAD_SSP_OFFSET
;
4930 rs6000_stack_protector_guard_reg
= TARGET_64BIT
? 13 : 2;
4933 if (global_options_set
.x_rs6000_stack_protector_guard_offset_str
)
4936 const char *str
= rs6000_stack_protector_guard_offset_str
;
4939 long offset
= strtol (str
, &endp
, 0);
4940 if (!*str
|| *endp
|| errno
)
4941 error ("%qs is not a valid number in %qs", str
,
4942 "-mstack-protector-guard-offset=");
4944 if (!IN_RANGE (offset
, -0x8000, 0x7fff)
4945 || (TARGET_64BIT
&& (offset
& 3)))
4946 error ("%qs is not a valid offset in %qs", str
,
4947 "-mstack-protector-guard-offset=");
4949 rs6000_stack_protector_guard_offset
= offset
;
4952 if (global_options_set
.x_rs6000_stack_protector_guard_reg_str
)
4954 const char *str
= rs6000_stack_protector_guard_reg_str
;
4955 int reg
= decode_reg_name (str
);
4957 if (!IN_RANGE (reg
, 1, 31))
4958 error ("%qs is not a valid base register in %qs", str
,
4959 "-mstack-protector-guard-reg=");
4961 rs6000_stack_protector_guard_reg
= reg
;
4964 if (rs6000_stack_protector_guard
== SSP_TLS
4965 && !IN_RANGE (rs6000_stack_protector_guard_reg
, 1, 31))
4966 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4970 #ifdef TARGET_REGNAMES
4971 /* If the user desires alternate register names, copy in the
4972 alternate names now. */
4973 if (TARGET_REGNAMES
)
4974 memcpy (rs6000_reg_names
, alt_reg_names
, sizeof (rs6000_reg_names
));
4977 /* Set aix_struct_return last, after the ABI is determined.
4978 If -maix-struct-return or -msvr4-struct-return was explicitly
4979 used, don't override with the ABI default. */
4980 if (!global_options_set
.x_aix_struct_return
)
4981 aix_struct_return
= (DEFAULT_ABI
!= ABI_V4
|| DRAFT_V4_STRUCT_RET
);
4984 /* IBM XL compiler defaults to unsigned bitfields. */
4985 if (TARGET_XL_COMPAT
)
4986 flag_signed_bitfields
= 0;
4989 if (TARGET_LONG_DOUBLE_128
&& !TARGET_IEEEQUAD
)
4990 REAL_MODE_FORMAT (TFmode
) = &ibm_extended_format
;
4992 ASM_GENERATE_INTERNAL_LABEL (toc_label_name
, "LCTOC", 1);
4994 /* We can only guarantee the availability of DI pseudo-ops when
4995 assembling for 64-bit targets. */
4998 targetm
.asm_out
.aligned_op
.di
= NULL
;
4999 targetm
.asm_out
.unaligned_op
.di
= NULL
;
5003 /* Set branch target alignment, if not optimizing for size. */
5006 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
5007 aligned 8byte to avoid misprediction by the branch predictor. */
5008 if (rs6000_tune
== PROCESSOR_TITAN
5009 || rs6000_tune
== PROCESSOR_CELL
)
5011 if (align_functions
<= 0)
5012 align_functions
= 8;
5013 if (align_jumps
<= 0)
5015 if (align_loops
<= 0)
5018 if (rs6000_align_branch_targets
)
5020 if (align_functions
<= 0)
5021 align_functions
= 16;
5022 if (align_jumps
<= 0)
5024 if (align_loops
<= 0)
5026 can_override_loop_align
= 1;
5030 if (align_jumps_max_skip
<= 0)
5031 align_jumps_max_skip
= 15;
5032 if (align_loops_max_skip
<= 0)
5033 align_loops_max_skip
= 15;
5036 /* Arrange to save and restore machine status around nested functions. */
5037 init_machine_status
= rs6000_init_machine_status
;
5039 /* We should always be splitting complex arguments, but we can't break
5040 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
5041 if (DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
)
5042 targetm
.calls
.split_complex_arg
= NULL
;
5044 /* The AIX and ELFv1 ABIs define standard function descriptors. */
5045 if (DEFAULT_ABI
== ABI_AIX
)
5046 targetm
.calls
.custom_function_descriptors
= 0;
5049 /* Initialize rs6000_cost with the appropriate target costs. */
5051 rs6000_cost
= TARGET_POWERPC64
? &size64_cost
: &size32_cost
;
5053 switch (rs6000_tune
)
5055 case PROCESSOR_RS64A
:
5056 rs6000_cost
= &rs64a_cost
;
5059 case PROCESSOR_MPCCORE
:
5060 rs6000_cost
= &mpccore_cost
;
5063 case PROCESSOR_PPC403
:
5064 rs6000_cost
= &ppc403_cost
;
5067 case PROCESSOR_PPC405
:
5068 rs6000_cost
= &ppc405_cost
;
5071 case PROCESSOR_PPC440
:
5072 rs6000_cost
= &ppc440_cost
;
5075 case PROCESSOR_PPC476
:
5076 rs6000_cost
= &ppc476_cost
;
5079 case PROCESSOR_PPC601
:
5080 rs6000_cost
= &ppc601_cost
;
5083 case PROCESSOR_PPC603
:
5084 rs6000_cost
= &ppc603_cost
;
5087 case PROCESSOR_PPC604
:
5088 rs6000_cost
= &ppc604_cost
;
5091 case PROCESSOR_PPC604e
:
5092 rs6000_cost
= &ppc604e_cost
;
5095 case PROCESSOR_PPC620
:
5096 rs6000_cost
= &ppc620_cost
;
5099 case PROCESSOR_PPC630
:
5100 rs6000_cost
= &ppc630_cost
;
5103 case PROCESSOR_CELL
:
5104 rs6000_cost
= &ppccell_cost
;
5107 case PROCESSOR_PPC750
:
5108 case PROCESSOR_PPC7400
:
5109 rs6000_cost
= &ppc750_cost
;
5112 case PROCESSOR_PPC7450
:
5113 rs6000_cost
= &ppc7450_cost
;
5116 case PROCESSOR_PPC8540
:
5117 case PROCESSOR_PPC8548
:
5118 rs6000_cost
= &ppc8540_cost
;
5121 case PROCESSOR_PPCE300C2
:
5122 case PROCESSOR_PPCE300C3
:
5123 rs6000_cost
= &ppce300c2c3_cost
;
5126 case PROCESSOR_PPCE500MC
:
5127 rs6000_cost
= &ppce500mc_cost
;
5130 case PROCESSOR_PPCE500MC64
:
5131 rs6000_cost
= &ppce500mc64_cost
;
5134 case PROCESSOR_PPCE5500
:
5135 rs6000_cost
= &ppce5500_cost
;
5138 case PROCESSOR_PPCE6500
:
5139 rs6000_cost
= &ppce6500_cost
;
5142 case PROCESSOR_TITAN
:
5143 rs6000_cost
= &titan_cost
;
5146 case PROCESSOR_POWER4
:
5147 case PROCESSOR_POWER5
:
5148 rs6000_cost
= &power4_cost
;
5151 case PROCESSOR_POWER6
:
5152 rs6000_cost
= &power6_cost
;
5155 case PROCESSOR_POWER7
:
5156 rs6000_cost
= &power7_cost
;
5159 case PROCESSOR_POWER8
:
5160 rs6000_cost
= &power8_cost
;
5163 case PROCESSOR_POWER9
:
5164 rs6000_cost
= &power9_cost
;
5167 case PROCESSOR_PPCA2
:
5168 rs6000_cost
= &ppca2_cost
;
5177 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES
,
5178 rs6000_cost
->simultaneous_prefetches
,
5179 global_options
.x_param_values
,
5180 global_options_set
.x_param_values
);
5181 maybe_set_param_value (PARAM_L1_CACHE_SIZE
, rs6000_cost
->l1_cache_size
,
5182 global_options
.x_param_values
,
5183 global_options_set
.x_param_values
);
5184 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE
,
5185 rs6000_cost
->cache_line_size
,
5186 global_options
.x_param_values
,
5187 global_options_set
.x_param_values
);
5188 maybe_set_param_value (PARAM_L2_CACHE_SIZE
, rs6000_cost
->l2_cache_size
,
5189 global_options
.x_param_values
,
5190 global_options_set
.x_param_values
);
5192 /* Increase loop peeling limits based on performance analysis. */
5193 maybe_set_param_value (PARAM_MAX_PEELED_INSNS
, 400,
5194 global_options
.x_param_values
,
5195 global_options_set
.x_param_values
);
5196 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS
, 400,
5197 global_options
.x_param_values
,
5198 global_options_set
.x_param_values
);
5200 /* Use the 'model' -fsched-pressure algorithm by default. */
5201 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM
,
5202 SCHED_PRESSURE_MODEL
,
5203 global_options
.x_param_values
,
5204 global_options_set
.x_param_values
);
5206 /* If using typedef char *va_list, signal that
5207 __builtin_va_start (&ap, 0) can be optimized to
5208 ap = __builtin_next_arg (0). */
5209 if (DEFAULT_ABI
!= ABI_V4
)
5210 targetm
.expand_builtin_va_start
= NULL
;
5213 /* Set up single/double float flags.
5214 If TARGET_HARD_FLOAT is set, but neither single or double is set,
5215 then set both flags. */
5216 if (TARGET_HARD_FLOAT
&& rs6000_single_float
== 0 && rs6000_double_float
== 0)
5217 rs6000_single_float
= rs6000_double_float
= 1;
5219 /* If not explicitly specified via option, decide whether to generate indexed
5220 load/store instructions. A value of -1 indicates that the
5221 initial value of this variable has not been overwritten. During
5222 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
5223 if (TARGET_AVOID_XFORM
== -1)
5224 /* Avoid indexed addressing when targeting Power6 in order to avoid the
5225 DERAT mispredict penalty. However the LVE and STVE altivec instructions
5226 need indexed accesses and the type used is the scalar type of the element
5227 being loaded or stored. */
5228 TARGET_AVOID_XFORM
= (rs6000_tune
== PROCESSOR_POWER6
&& TARGET_CMPB
5229 && !TARGET_ALTIVEC
);
5231 /* Set the -mrecip options. */
5232 if (rs6000_recip_name
)
5234 char *p
= ASTRDUP (rs6000_recip_name
);
5236 unsigned int mask
, i
;
5239 while ((q
= strtok (p
, ",")) != NULL
)
5250 if (!strcmp (q
, "default"))
5251 mask
= ((TARGET_RECIP_PRECISION
)
5252 ? RECIP_HIGH_PRECISION
: RECIP_LOW_PRECISION
);
5255 for (i
= 0; i
< ARRAY_SIZE (recip_options
); i
++)
5256 if (!strcmp (q
, recip_options
[i
].string
))
5258 mask
= recip_options
[i
].mask
;
5262 if (i
== ARRAY_SIZE (recip_options
))
5264 error ("unknown option for %<%s=%s%>", "-mrecip", q
);
5272 rs6000_recip_control
&= ~mask
;
5274 rs6000_recip_control
|= mask
;
5278 /* Set the builtin mask of the various options used that could affect which
5279 builtins were used. In the past we used target_flags, but we've run out
5280 of bits, and some options like PAIRED are no longer in target_flags. */
5281 rs6000_builtin_mask
= rs6000_builtin_mask_calculate ();
5282 if (TARGET_DEBUG_BUILTIN
|| TARGET_DEBUG_TARGET
)
5283 rs6000_print_builtin_options (stderr
, 0, "builtin mask",
5284 rs6000_builtin_mask
);
5286 /* Initialize all of the registers. */
5287 rs6000_init_hard_regno_mode_ok (global_init_p
);
5289 /* Save the initial options in case the user does function specific options */
5291 target_option_default_node
= target_option_current_node
5292 = build_target_option_node (&global_options
);
5294 /* If not explicitly specified via option, decide whether to generate the
5295 extra blr's required to preserve the link stack on some cpus (eg, 476). */
5296 if (TARGET_LINK_STACK
== -1)
5297 SET_TARGET_LINK_STACK (rs6000_tune
== PROCESSOR_PPC476
&& flag_pic
);
5302 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
5303 define the target cpu type. */
5306 rs6000_option_override (void)
5308 (void) rs6000_option_override_internal (true);
5312 /* Implement targetm.vectorize.builtin_mask_for_load. */
5314 rs6000_builtin_mask_for_load (void)
5316 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
5317 if ((TARGET_ALTIVEC
&& !TARGET_VSX
)
5318 || (TARGET_VSX
&& !TARGET_EFFICIENT_UNALIGNED_VSX
))
5319 return altivec_builtin_mask_for_load
;
5324 /* Implement LOOP_ALIGN. */
5326 rs6000_loop_align (rtx label
)
5331 /* Don't override loop alignment if -falign-loops was specified. */
5332 if (!can_override_loop_align
)
5333 return align_loops_log
;
5335 bb
= BLOCK_FOR_INSN (label
);
5336 ninsns
= num_loop_insns(bb
->loop_father
);
5338 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5339 if (ninsns
> 4 && ninsns
<= 8
5340 && (rs6000_tune
== PROCESSOR_POWER4
5341 || rs6000_tune
== PROCESSOR_POWER5
5342 || rs6000_tune
== PROCESSOR_POWER6
5343 || rs6000_tune
== PROCESSOR_POWER7
5344 || rs6000_tune
== PROCESSOR_POWER8
5345 || rs6000_tune
== PROCESSOR_POWER9
))
5348 return align_loops_log
;
5351 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
5353 rs6000_loop_align_max_skip (rtx_insn
*label
)
5355 return (1 << rs6000_loop_align (label
)) - 1;
5358 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5359 after applying N number of iterations. This routine does not determine
5360 how may iterations are required to reach desired alignment. */
5363 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED
, bool is_packed
)
5370 if (rs6000_alignment_flags
== MASK_ALIGN_NATURAL
)
5373 if (rs6000_alignment_flags
== MASK_ALIGN_POWER
)
5383 /* Assuming that all other types are naturally aligned. CHECKME! */
5388 /* Return true if the vector misalignment factor is supported by the
5391 rs6000_builtin_support_vector_misalignment (machine_mode mode
,
5398 if (TARGET_EFFICIENT_UNALIGNED_VSX
)
5401 /* Return if movmisalign pattern is not supported for this mode. */
5402 if (optab_handler (movmisalign_optab
, mode
) == CODE_FOR_nothing
)
5405 if (misalignment
== -1)
5407 /* Misalignment factor is unknown at compile time but we know
5408 it's word aligned. */
5409 if (rs6000_vector_alignment_reachable (type
, is_packed
))
5411 int element_size
= TREE_INT_CST_LOW (TYPE_SIZE (type
));
5413 if (element_size
== 64 || element_size
== 32)
5420 /* VSX supports word-aligned vector. */
5421 if (misalignment
% 4 == 0)
5427 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5429 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost
,
5430 tree vectype
, int misalign
)
5435 switch (type_of_cost
)
5445 case cond_branch_not_taken
:
5454 case vec_promote_demote
:
5460 case cond_branch_taken
:
5463 case unaligned_load
:
5464 case vector_gather_load
:
5465 if (TARGET_EFFICIENT_UNALIGNED_VSX
)
5468 if (TARGET_VSX
&& TARGET_ALLOW_MOVMISALIGN
)
5470 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
5472 /* Double word aligned. */
5480 /* Double word aligned. */
5484 /* Unknown misalignment. */
5497 /* Misaligned loads are not supported. */
5502 case unaligned_store
:
5503 case vector_scatter_store
:
5504 if (TARGET_EFFICIENT_UNALIGNED_VSX
)
5507 if (TARGET_VSX
&& TARGET_ALLOW_MOVMISALIGN
)
5509 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
5511 /* Double word aligned. */
5519 /* Double word aligned. */
5523 /* Unknown misalignment. */
5536 /* Misaligned stores are not supported. */
5542 /* This is a rough approximation assuming non-constant elements
5543 constructed into a vector via element insertion. FIXME:
5544 vec_construct is not granular enough for uniformly good
5545 decisions. If the initialization is a splat, this is
5546 cheaper than we estimate. Improve this someday. */
5547 elem_type
= TREE_TYPE (vectype
);
5548 /* 32-bit vectors loaded into registers are stored as double
5549 precision, so we need 2 permutes, 2 converts, and 1 merge
5550 to construct a vector of short floats from them. */
5551 if (SCALAR_FLOAT_TYPE_P (elem_type
)
5552 && TYPE_PRECISION (elem_type
) == 32)
5554 /* On POWER9, integer vector types are built up in GPRs and then
5555 use a direct move (2 cycles). For POWER8 this is even worse,
5556 as we need two direct moves and a merge, and the direct moves
5558 else if (INTEGRAL_TYPE_P (elem_type
))
5560 if (TARGET_P9_VECTOR
)
5561 return TYPE_VECTOR_SUBPARTS (vectype
) - 1 + 2;
5563 return TYPE_VECTOR_SUBPARTS (vectype
) - 1 + 5;
5566 /* V2DFmode doesn't need a direct move. */
5574 /* Implement targetm.vectorize.preferred_simd_mode. */
5577 rs6000_preferred_simd_mode (scalar_mode mode
)
5586 if (TARGET_ALTIVEC
|| TARGET_VSX
)
5603 if (TARGET_PAIRED_FLOAT
5609 typedef struct _rs6000_cost_data
5611 struct loop
*loop_info
;
5615 /* Test for likely overcommitment of vector hardware resources. If a
5616 loop iteration is relatively large, and too large a percentage of
5617 instructions in the loop are vectorized, the cost model may not
5618 adequately reflect delays from unavailable vector resources.
5619 Penalize the loop body cost for this case. */
5622 rs6000_density_test (rs6000_cost_data
*data
)
5624 const int DENSITY_PCT_THRESHOLD
= 85;
5625 const int DENSITY_SIZE_THRESHOLD
= 70;
5626 const int DENSITY_PENALTY
= 10;
5627 struct loop
*loop
= data
->loop_info
;
5628 basic_block
*bbs
= get_loop_body (loop
);
5629 int nbbs
= loop
->num_nodes
;
5630 int vec_cost
= data
->cost
[vect_body
], not_vec_cost
= 0;
5633 for (i
= 0; i
< nbbs
; i
++)
5635 basic_block bb
= bbs
[i
];
5636 gimple_stmt_iterator gsi
;
5638 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
5640 gimple
*stmt
= gsi_stmt (gsi
);
5641 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5643 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
5644 && !STMT_VINFO_IN_PATTERN_P (stmt_info
))
5650 density_pct
= (vec_cost
* 100) / (vec_cost
+ not_vec_cost
);
5652 if (density_pct
> DENSITY_PCT_THRESHOLD
5653 && vec_cost
+ not_vec_cost
> DENSITY_SIZE_THRESHOLD
)
5655 data
->cost
[vect_body
] = vec_cost
* (100 + DENSITY_PENALTY
) / 100;
5656 if (dump_enabled_p ())
5657 dump_printf_loc (MSG_NOTE
, vect_location
,
5658 "density %d%%, cost %d exceeds threshold, penalizing "
5659 "loop body cost by %d%%", density_pct
,
5660 vec_cost
+ not_vec_cost
, DENSITY_PENALTY
);
5664 /* Implement targetm.vectorize.init_cost. */
5666 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5667 instruction is needed by the vectorization. */
5668 static bool rs6000_vect_nonmem
;
5671 rs6000_init_cost (struct loop
*loop_info
)
5673 rs6000_cost_data
*data
= XNEW (struct _rs6000_cost_data
);
5674 data
->loop_info
= loop_info
;
5675 data
->cost
[vect_prologue
] = 0;
5676 data
->cost
[vect_body
] = 0;
5677 data
->cost
[vect_epilogue
] = 0;
5678 rs6000_vect_nonmem
= false;
5682 /* Implement targetm.vectorize.add_stmt_cost. */
5685 rs6000_add_stmt_cost (void *data
, int count
, enum vect_cost_for_stmt kind
,
5686 struct _stmt_vec_info
*stmt_info
, int misalign
,
5687 enum vect_cost_model_location where
)
5689 rs6000_cost_data
*cost_data
= (rs6000_cost_data
*) data
;
5690 unsigned retval
= 0;
5692 if (flag_vect_cost_model
)
5694 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
5695 int stmt_cost
= rs6000_builtin_vectorization_cost (kind
, vectype
,
5697 /* Statements in an inner loop relative to the loop being
5698 vectorized are weighted more heavily. The value here is
5699 arbitrary and could potentially be improved with analysis. */
5700 if (where
== vect_body
&& stmt_info
&& stmt_in_inner_loop_p (stmt_info
))
5701 count
*= 50; /* FIXME. */
5703 retval
= (unsigned) (count
* stmt_cost
);
5704 cost_data
->cost
[where
] += retval
;
5706 /* Check whether we're doing something other than just a copy loop.
5707 Not all such loops may be profitably vectorized; see
5708 rs6000_finish_cost. */
5709 if ((kind
== vec_to_scalar
|| kind
== vec_perm
5710 || kind
== vec_promote_demote
|| kind
== vec_construct
5711 || kind
== scalar_to_vec
)
5712 || (where
== vect_body
&& kind
== vector_stmt
))
5713 rs6000_vect_nonmem
= true;
5719 /* Implement targetm.vectorize.finish_cost. */
5722 rs6000_finish_cost (void *data
, unsigned *prologue_cost
,
5723 unsigned *body_cost
, unsigned *epilogue_cost
)
5725 rs6000_cost_data
*cost_data
= (rs6000_cost_data
*) data
;
5727 if (cost_data
->loop_info
)
5728 rs6000_density_test (cost_data
);
5730 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5731 that require versioning for any reason. The vectorization is at
5732 best a wash inside the loop, and the versioning checks make
5733 profitability highly unlikely and potentially quite harmful. */
5734 if (cost_data
->loop_info
)
5736 loop_vec_info vec_info
= loop_vec_info_for_loop (cost_data
->loop_info
);
5737 if (!rs6000_vect_nonmem
5738 && LOOP_VINFO_VECT_FACTOR (vec_info
) == 2
5739 && LOOP_REQUIRES_VERSIONING (vec_info
))
5740 cost_data
->cost
[vect_body
] += 10000;
5743 *prologue_cost
= cost_data
->cost
[vect_prologue
];
5744 *body_cost
= cost_data
->cost
[vect_body
];
5745 *epilogue_cost
= cost_data
->cost
[vect_epilogue
];
5748 /* Implement targetm.vectorize.destroy_cost_data. */
5751 rs6000_destroy_cost_data (void *data
)
5756 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5757 library with vectorized intrinsics. */
5760 rs6000_builtin_vectorized_libmass (combined_fn fn
, tree type_out
,
5764 const char *suffix
= NULL
;
5765 tree fntype
, new_fndecl
, bdecl
= NULL_TREE
;
5768 machine_mode el_mode
, in_mode
;
5771 /* Libmass is suitable for unsafe math only as it does not correctly support
5772 parts of IEEE with the required precision such as denormals. Only support
5773 it if we have VSX to use the simd d2 or f4 functions.
5774 XXX: Add variable length support. */
5775 if (!flag_unsafe_math_optimizations
|| !TARGET_VSX
)
5778 el_mode
= TYPE_MODE (TREE_TYPE (type_out
));
5779 n
= TYPE_VECTOR_SUBPARTS (type_out
);
5780 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
5781 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
5782 if (el_mode
!= in_mode
5818 if (el_mode
== DFmode
&& n
== 2)
5820 bdecl
= mathfn_built_in (double_type_node
, fn
);
5821 suffix
= "d2"; /* pow -> powd2 */
5823 else if (el_mode
== SFmode
&& n
== 4)
5825 bdecl
= mathfn_built_in (float_type_node
, fn
);
5826 suffix
= "4"; /* powf -> powf4 */
5838 gcc_assert (suffix
!= NULL
);
5839 bname
= IDENTIFIER_POINTER (DECL_NAME (bdecl
));
5843 strcpy (name
, bname
+ sizeof ("__builtin_") - 1);
5844 strcat (name
, suffix
);
5847 fntype
= build_function_type_list (type_out
, type_in
, NULL
);
5848 else if (n_args
== 2)
5849 fntype
= build_function_type_list (type_out
, type_in
, type_in
, NULL
);
5853 /* Build a function declaration for the vectorized function. */
5854 new_fndecl
= build_decl (BUILTINS_LOCATION
,
5855 FUNCTION_DECL
, get_identifier (name
), fntype
);
5856 TREE_PUBLIC (new_fndecl
) = 1;
5857 DECL_EXTERNAL (new_fndecl
) = 1;
5858 DECL_IS_NOVOPS (new_fndecl
) = 1;
5859 TREE_READONLY (new_fndecl
) = 1;
5864 /* Returns a function decl for a vectorized version of the builtin function
5865 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5866 if it is not available. */
5869 rs6000_builtin_vectorized_function (unsigned int fn
, tree type_out
,
5872 machine_mode in_mode
, out_mode
;
5875 if (TARGET_DEBUG_BUILTIN
)
5876 fprintf (stderr
, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5877 combined_fn_name (combined_fn (fn
)),
5878 GET_MODE_NAME (TYPE_MODE (type_out
)),
5879 GET_MODE_NAME (TYPE_MODE (type_in
)));
5881 if (TREE_CODE (type_out
) != VECTOR_TYPE
5882 || TREE_CODE (type_in
) != VECTOR_TYPE
)
5885 out_mode
= TYPE_MODE (TREE_TYPE (type_out
));
5886 out_n
= TYPE_VECTOR_SUBPARTS (type_out
);
5887 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
5888 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
5893 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5894 && out_mode
== DFmode
&& out_n
== 2
5895 && in_mode
== DFmode
&& in_n
== 2)
5896 return rs6000_builtin_decls
[VSX_BUILTIN_CPSGNDP
];
5897 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5898 && out_mode
== SFmode
&& out_n
== 4
5899 && in_mode
== SFmode
&& in_n
== 4)
5900 return rs6000_builtin_decls
[VSX_BUILTIN_CPSGNSP
];
5901 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5902 && out_mode
== SFmode
&& out_n
== 4
5903 && in_mode
== SFmode
&& in_n
== 4)
5904 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_COPYSIGN_V4SF
];
5907 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5908 && out_mode
== DFmode
&& out_n
== 2
5909 && in_mode
== DFmode
&& in_n
== 2)
5910 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIP
];
5911 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5912 && out_mode
== SFmode
&& out_n
== 4
5913 && in_mode
== SFmode
&& in_n
== 4)
5914 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIP
];
5915 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5916 && out_mode
== SFmode
&& out_n
== 4
5917 && in_mode
== SFmode
&& in_n
== 4)
5918 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIP
];
5921 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5922 && out_mode
== DFmode
&& out_n
== 2
5923 && in_mode
== DFmode
&& in_n
== 2)
5924 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIM
];
5925 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5926 && out_mode
== SFmode
&& out_n
== 4
5927 && in_mode
== SFmode
&& in_n
== 4)
5928 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIM
];
5929 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5930 && out_mode
== SFmode
&& out_n
== 4
5931 && in_mode
== SFmode
&& in_n
== 4)
5932 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIM
];
5935 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5936 && out_mode
== DFmode
&& out_n
== 2
5937 && in_mode
== DFmode
&& in_n
== 2)
5938 return rs6000_builtin_decls
[VSX_BUILTIN_XVMADDDP
];
5939 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5940 && out_mode
== SFmode
&& out_n
== 4
5941 && in_mode
== SFmode
&& in_n
== 4)
5942 return rs6000_builtin_decls
[VSX_BUILTIN_XVMADDSP
];
5943 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5944 && out_mode
== SFmode
&& out_n
== 4
5945 && in_mode
== SFmode
&& in_n
== 4)
5946 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VMADDFP
];
5949 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5950 && out_mode
== DFmode
&& out_n
== 2
5951 && in_mode
== DFmode
&& in_n
== 2)
5952 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIZ
];
5953 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5954 && out_mode
== SFmode
&& out_n
== 4
5955 && in_mode
== SFmode
&& in_n
== 4)
5956 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIZ
];
5957 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5958 && out_mode
== SFmode
&& out_n
== 4
5959 && in_mode
== SFmode
&& in_n
== 4)
5960 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIZ
];
5963 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5964 && flag_unsafe_math_optimizations
5965 && out_mode
== DFmode
&& out_n
== 2
5966 && in_mode
== DFmode
&& in_n
== 2)
5967 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPI
];
5968 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5969 && flag_unsafe_math_optimizations
5970 && out_mode
== SFmode
&& out_n
== 4
5971 && in_mode
== SFmode
&& in_n
== 4)
5972 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPI
];
5975 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5976 && !flag_trapping_math
5977 && out_mode
== DFmode
&& out_n
== 2
5978 && in_mode
== DFmode
&& in_n
== 2)
5979 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIC
];
5980 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5981 && !flag_trapping_math
5982 && out_mode
== SFmode
&& out_n
== 4
5983 && in_mode
== SFmode
&& in_n
== 4)
5984 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIC
];
5990 /* Generate calls to libmass if appropriate. */
5991 if (rs6000_veclib_handler
)
5992 return rs6000_veclib_handler (combined_fn (fn
), type_out
, type_in
);
5997 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
6000 rs6000_builtin_md_vectorized_function (tree fndecl
, tree type_out
,
6003 machine_mode in_mode
, out_mode
;
6006 if (TARGET_DEBUG_BUILTIN
)
6007 fprintf (stderr
, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
6008 IDENTIFIER_POINTER (DECL_NAME (fndecl
)),
6009 GET_MODE_NAME (TYPE_MODE (type_out
)),
6010 GET_MODE_NAME (TYPE_MODE (type_in
)));
6012 if (TREE_CODE (type_out
) != VECTOR_TYPE
6013 || TREE_CODE (type_in
) != VECTOR_TYPE
)
6016 out_mode
= TYPE_MODE (TREE_TYPE (type_out
));
6017 out_n
= TYPE_VECTOR_SUBPARTS (type_out
);
6018 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
6019 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
6021 enum rs6000_builtins fn
6022 = (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
6025 case RS6000_BUILTIN_RSQRTF
:
6026 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
)
6027 && out_mode
== SFmode
&& out_n
== 4
6028 && in_mode
== SFmode
&& in_n
== 4)
6029 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRSQRTFP
];
6031 case RS6000_BUILTIN_RSQRT
:
6032 if (VECTOR_UNIT_VSX_P (V2DFmode
)
6033 && out_mode
== DFmode
&& out_n
== 2
6034 && in_mode
== DFmode
&& in_n
== 2)
6035 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_2DF
];
6037 case RS6000_BUILTIN_RECIPF
:
6038 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
)
6039 && out_mode
== SFmode
&& out_n
== 4
6040 && in_mode
== SFmode
&& in_n
== 4)
6041 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRECIPFP
];
6043 case RS6000_BUILTIN_RECIP
:
6044 if (VECTOR_UNIT_VSX_P (V2DFmode
)
6045 && out_mode
== DFmode
&& out_n
== 2
6046 && in_mode
== DFmode
&& in_n
== 2)
6047 return rs6000_builtin_decls
[VSX_BUILTIN_RECIP_V2DF
];
6055 /* Default CPU string for rs6000*_file_start functions. */
6056 static const char *rs6000_default_cpu
;
6058 /* Do anything needed at the start of the asm file. */
6061 rs6000_file_start (void)
6064 const char *start
= buffer
;
6065 FILE *file
= asm_out_file
;
6067 rs6000_default_cpu
= TARGET_CPU_DEFAULT
;
6069 default_file_start ();
6071 if (flag_verbose_asm
)
6073 sprintf (buffer
, "\n%s rs6000/powerpc options:", ASM_COMMENT_START
);
6075 if (rs6000_default_cpu
!= 0 && rs6000_default_cpu
[0] != '\0')
6077 fprintf (file
, "%s --with-cpu=%s", start
, rs6000_default_cpu
);
6081 if (global_options_set
.x_rs6000_cpu_index
)
6083 fprintf (file
, "%s -mcpu=%s", start
,
6084 processor_target_table
[rs6000_cpu_index
].name
);
6088 if (global_options_set
.x_rs6000_tune_index
)
6090 fprintf (file
, "%s -mtune=%s", start
,
6091 processor_target_table
[rs6000_tune_index
].name
);
6095 if (PPC405_ERRATUM77
)
6097 fprintf (file
, "%s PPC405CR_ERRATUM77", start
);
6101 #ifdef USING_ELFOS_H
6102 switch (rs6000_sdata
)
6104 case SDATA_NONE
: fprintf (file
, "%s -msdata=none", start
); start
= ""; break;
6105 case SDATA_DATA
: fprintf (file
, "%s -msdata=data", start
); start
= ""; break;
6106 case SDATA_SYSV
: fprintf (file
, "%s -msdata=sysv", start
); start
= ""; break;
6107 case SDATA_EABI
: fprintf (file
, "%s -msdata=eabi", start
); start
= ""; break;
6110 if (rs6000_sdata
&& g_switch_value
)
6112 fprintf (file
, "%s -G %d", start
,
6122 #ifdef USING_ELFOS_H
6123 if (!(rs6000_default_cpu
&& rs6000_default_cpu
[0])
6124 && !global_options_set
.x_rs6000_cpu_index
)
6126 fputs ("\t.machine ", asm_out_file
);
6127 if ((rs6000_isa_flags
& OPTION_MASK_MODULO
) != 0)
6128 fputs ("power9\n", asm_out_file
);
6129 else if ((rs6000_isa_flags
& OPTION_MASK_DIRECT_MOVE
) != 0)
6130 fputs ("power8\n", asm_out_file
);
6131 else if ((rs6000_isa_flags
& OPTION_MASK_POPCNTD
) != 0)
6132 fputs ("power7\n", asm_out_file
);
6133 else if ((rs6000_isa_flags
& OPTION_MASK_CMPB
) != 0)
6134 fputs ("power6\n", asm_out_file
);
6135 else if ((rs6000_isa_flags
& OPTION_MASK_POPCNTB
) != 0)
6136 fputs ("power5\n", asm_out_file
);
6137 else if ((rs6000_isa_flags
& OPTION_MASK_MFCRF
) != 0)
6138 fputs ("power4\n", asm_out_file
);
6139 else if ((rs6000_isa_flags
& OPTION_MASK_POWERPC64
) != 0)
6140 fputs ("ppc64\n", asm_out_file
);
6142 fputs ("ppc\n", asm_out_file
);
6146 if (DEFAULT_ABI
== ABI_ELFv2
)
6147 fprintf (file
, "\t.abiversion 2\n");
6151 /* Return nonzero if this function is known to have a null epilogue. */
6154 direct_return (void)
6156 if (reload_completed
)
6158 rs6000_stack_t
*info
= rs6000_stack_info ();
6160 if (info
->first_gp_reg_save
== 32
6161 && info
->first_fp_reg_save
== 64
6162 && info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
+ 1
6163 && ! info
->lr_save_p
6164 && ! info
->cr_save_p
6165 && info
->vrsave_size
== 0
6173 /* Return the number of instructions it takes to form a constant in an
6174 integer register. */
6177 num_insns_constant_wide (HOST_WIDE_INT value
)
6179 /* signed constant loadable with addi */
6180 if (((unsigned HOST_WIDE_INT
) value
+ 0x8000) < 0x10000)
6183 /* constant loadable with addis */
6184 else if ((value
& 0xffff) == 0
6185 && (value
>> 31 == -1 || value
>> 31 == 0))
6188 else if (TARGET_POWERPC64
)
6190 HOST_WIDE_INT low
= ((value
& 0xffffffff) ^ 0x80000000) - 0x80000000;
6191 HOST_WIDE_INT high
= value
>> 31;
6193 if (high
== 0 || high
== -1)
6199 return num_insns_constant_wide (high
) + 1;
6201 return num_insns_constant_wide (low
) + 1;
6203 return (num_insns_constant_wide (high
)
6204 + num_insns_constant_wide (low
) + 1);
6212 num_insns_constant (rtx op
, machine_mode mode
)
6214 HOST_WIDE_INT low
, high
;
6216 switch (GET_CODE (op
))
6219 if ((INTVAL (op
) >> 31) != 0 && (INTVAL (op
) >> 31) != -1
6220 && rs6000_is_valid_and_mask (op
, mode
))
6223 return num_insns_constant_wide (INTVAL (op
));
6225 case CONST_WIDE_INT
:
6228 int ins
= CONST_WIDE_INT_NUNITS (op
) - 1;
6229 for (i
= 0; i
< CONST_WIDE_INT_NUNITS (op
); i
++)
6230 ins
+= num_insns_constant_wide (CONST_WIDE_INT_ELT (op
, i
));
6235 if (mode
== SFmode
|| mode
== SDmode
)
6239 if (DECIMAL_FLOAT_MODE_P (mode
))
6240 REAL_VALUE_TO_TARGET_DECIMAL32
6241 (*CONST_DOUBLE_REAL_VALUE (op
), l
);
6243 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op
), l
);
6244 return num_insns_constant_wide ((HOST_WIDE_INT
) l
);
6248 if (DECIMAL_FLOAT_MODE_P (mode
))
6249 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (op
), l
);
6251 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op
), l
);
6252 high
= l
[WORDS_BIG_ENDIAN
== 0];
6253 low
= l
[WORDS_BIG_ENDIAN
!= 0];
6256 return (num_insns_constant_wide (low
)
6257 + num_insns_constant_wide (high
));
6260 if ((high
== 0 && low
>= 0)
6261 || (high
== -1 && low
< 0))
6262 return num_insns_constant_wide (low
);
6264 else if (rs6000_is_valid_and_mask (op
, mode
))
6268 return num_insns_constant_wide (high
) + 1;
6271 return (num_insns_constant_wide (high
)
6272 + num_insns_constant_wide (low
) + 1);
6280 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
6281 If the mode of OP is MODE_VECTOR_INT, this simply returns the
6282 corresponding element of the vector, but for V4SFmode and V2SFmode,
6283 the corresponding "float" is interpreted as an SImode integer. */
6286 const_vector_elt_as_int (rtx op
, unsigned int elt
)
6290 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
6291 gcc_assert (GET_MODE (op
) != V2DImode
6292 && GET_MODE (op
) != V2DFmode
);
6294 tmp
= CONST_VECTOR_ELT (op
, elt
);
6295 if (GET_MODE (op
) == V4SFmode
6296 || GET_MODE (op
) == V2SFmode
)
6297 tmp
= gen_lowpart (SImode
, tmp
);
6298 return INTVAL (tmp
);
6301 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
6302 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
6303 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
6304 all items are set to the same value and contain COPIES replicas of the
6305 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6306 operand and the others are set to the value of the operand's msb. */
6309 vspltis_constant (rtx op
, unsigned step
, unsigned copies
)
6311 machine_mode mode
= GET_MODE (op
);
6312 machine_mode inner
= GET_MODE_INNER (mode
);
6320 HOST_WIDE_INT splat_val
;
6321 HOST_WIDE_INT msb_val
;
6323 if (mode
== V2DImode
|| mode
== V2DFmode
|| mode
== V1TImode
)
6326 nunits
= GET_MODE_NUNITS (mode
);
6327 bitsize
= GET_MODE_BITSIZE (inner
);
6328 mask
= GET_MODE_MASK (inner
);
6330 val
= const_vector_elt_as_int (op
, BYTES_BIG_ENDIAN
? nunits
- 1 : 0);
6332 msb_val
= val
>= 0 ? 0 : -1;
6334 /* Construct the value to be splatted, if possible. If not, return 0. */
6335 for (i
= 2; i
<= copies
; i
*= 2)
6337 HOST_WIDE_INT small_val
;
6339 small_val
= splat_val
>> bitsize
;
6341 if (splat_val
!= ((HOST_WIDE_INT
)
6342 ((unsigned HOST_WIDE_INT
) small_val
<< bitsize
)
6343 | (small_val
& mask
)))
6345 splat_val
= small_val
;
6348 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6349 if (EASY_VECTOR_15 (splat_val
))
6352 /* Also check if we can splat, and then add the result to itself. Do so if
6353 the value is positive, of if the splat instruction is using OP's mode;
6354 for splat_val < 0, the splat and the add should use the same mode. */
6355 else if (EASY_VECTOR_15_ADD_SELF (splat_val
)
6356 && (splat_val
>= 0 || (step
== 1 && copies
== 1)))
6359 /* Also check if are loading up the most significant bit which can be done by
6360 loading up -1 and shifting the value left by -1. */
6361 else if (EASY_VECTOR_MSB (splat_val
, inner
))
6367 /* Check if VAL is present in every STEP-th element, and the
6368 other elements are filled with its most significant bit. */
6369 for (i
= 1; i
< nunits
; ++i
)
6371 HOST_WIDE_INT desired_val
;
6372 unsigned elt
= BYTES_BIG_ENDIAN
? nunits
- 1 - i
: i
;
6373 if ((i
& (step
- 1)) == 0)
6376 desired_val
= msb_val
;
6378 if (desired_val
!= const_vector_elt_as_int (op
, elt
))
6385 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6386 instruction, filling in the bottom elements with 0 or -1.
6388 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6389 for the number of zeroes to shift in, or negative for the number of 0xff
6392 OP is a CONST_VECTOR. */
6395 vspltis_shifted (rtx op
)
6397 machine_mode mode
= GET_MODE (op
);
6398 machine_mode inner
= GET_MODE_INNER (mode
);
6406 if (mode
!= V16QImode
&& mode
!= V8HImode
&& mode
!= V4SImode
)
6409 /* We need to create pseudo registers to do the shift, so don't recognize
6410 shift vector constants after reload. */
6411 if (!can_create_pseudo_p ())
6414 nunits
= GET_MODE_NUNITS (mode
);
6415 mask
= GET_MODE_MASK (inner
);
6417 val
= const_vector_elt_as_int (op
, BYTES_BIG_ENDIAN
? 0 : nunits
- 1);
6419 /* Check if the value can really be the operand of a vspltis[bhw]. */
6420 if (EASY_VECTOR_15 (val
))
6423 /* Also check if we are loading up the most significant bit which can be done
6424 by loading up -1 and shifting the value left by -1. */
6425 else if (EASY_VECTOR_MSB (val
, inner
))
6431 /* Check if VAL is present in every STEP-th element until we find elements
6432 that are 0 or all 1 bits. */
6433 for (i
= 1; i
< nunits
; ++i
)
6435 unsigned elt
= BYTES_BIG_ENDIAN
? i
: nunits
- 1 - i
;
6436 HOST_WIDE_INT elt_val
= const_vector_elt_as_int (op
, elt
);
6438 /* If the value isn't the splat value, check for the remaining elements
6444 for (j
= i
+1; j
< nunits
; ++j
)
6446 unsigned elt2
= BYTES_BIG_ENDIAN
? j
: nunits
- 1 - j
;
6447 if (const_vector_elt_as_int (op
, elt2
) != 0)
6451 return (nunits
- i
) * GET_MODE_SIZE (inner
);
6454 else if ((elt_val
& mask
) == mask
)
6456 for (j
= i
+1; j
< nunits
; ++j
)
6458 unsigned elt2
= BYTES_BIG_ENDIAN
? j
: nunits
- 1 - j
;
6459 if ((const_vector_elt_as_int (op
, elt2
) & mask
) != mask
)
6463 return -((nunits
- i
) * GET_MODE_SIZE (inner
));
6471 /* If all elements are equal, we don't need to do VLSDOI. */
6476 /* Return true if OP is of the given MODE and can be synthesized
6477 with a vspltisb, vspltish or vspltisw. */
6480 easy_altivec_constant (rtx op
, machine_mode mode
)
6482 unsigned step
, copies
;
6484 if (mode
== VOIDmode
)
6485 mode
= GET_MODE (op
);
6486 else if (mode
!= GET_MODE (op
))
6489 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6491 if (mode
== V2DFmode
)
6492 return zero_constant (op
, mode
);
6494 else if (mode
== V2DImode
)
6496 if (GET_CODE (CONST_VECTOR_ELT (op
, 0)) != CONST_INT
6497 || GET_CODE (CONST_VECTOR_ELT (op
, 1)) != CONST_INT
)
6500 if (zero_constant (op
, mode
))
6503 if (INTVAL (CONST_VECTOR_ELT (op
, 0)) == -1
6504 && INTVAL (CONST_VECTOR_ELT (op
, 1)) == -1)
6510 /* V1TImode is a special container for TImode. Ignore for now. */
6511 else if (mode
== V1TImode
)
6514 /* Start with a vspltisw. */
6515 step
= GET_MODE_NUNITS (mode
) / 4;
6518 if (vspltis_constant (op
, step
, copies
))
6521 /* Then try with a vspltish. */
6527 if (vspltis_constant (op
, step
, copies
))
6530 /* And finally a vspltisb. */
6536 if (vspltis_constant (op
, step
, copies
))
6539 if (vspltis_shifted (op
) != 0)
6545 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6546 result is OP. Abort if it is not possible. */
6549 gen_easy_altivec_constant (rtx op
)
6551 machine_mode mode
= GET_MODE (op
);
6552 int nunits
= GET_MODE_NUNITS (mode
);
6553 rtx val
= CONST_VECTOR_ELT (op
, BYTES_BIG_ENDIAN
? nunits
- 1 : 0);
6554 unsigned step
= nunits
/ 4;
6555 unsigned copies
= 1;
6557 /* Start with a vspltisw. */
6558 if (vspltis_constant (op
, step
, copies
))
6559 return gen_rtx_VEC_DUPLICATE (V4SImode
, gen_lowpart (SImode
, val
));
6561 /* Then try with a vspltish. */
6567 if (vspltis_constant (op
, step
, copies
))
6568 return gen_rtx_VEC_DUPLICATE (V8HImode
, gen_lowpart (HImode
, val
));
6570 /* And finally a vspltisb. */
6576 if (vspltis_constant (op
, step
, copies
))
6577 return gen_rtx_VEC_DUPLICATE (V16QImode
, gen_lowpart (QImode
, val
));
6582 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6583 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6585 Return the number of instructions needed (1 or 2) into the address pointed
6588 Return the constant that is being split via CONSTANT_PTR. */
6591 xxspltib_constant_p (rtx op
,
6596 size_t nunits
= GET_MODE_NUNITS (mode
);
6598 HOST_WIDE_INT value
;
6601 /* Set the returned values to out of bound values. */
6602 *num_insns_ptr
= -1;
6603 *constant_ptr
= 256;
6605 if (!TARGET_P9_VECTOR
)
6608 if (mode
== VOIDmode
)
6609 mode
= GET_MODE (op
);
6611 else if (mode
!= GET_MODE (op
) && GET_MODE (op
) != VOIDmode
)
6614 /* Handle (vec_duplicate <constant>). */
6615 if (GET_CODE (op
) == VEC_DUPLICATE
)
6617 if (mode
!= V16QImode
&& mode
!= V8HImode
&& mode
!= V4SImode
6618 && mode
!= V2DImode
)
6621 element
= XEXP (op
, 0);
6622 if (!CONST_INT_P (element
))
6625 value
= INTVAL (element
);
6626 if (!IN_RANGE (value
, -128, 127))
6630 /* Handle (const_vector [...]). */
6631 else if (GET_CODE (op
) == CONST_VECTOR
)
6633 if (mode
!= V16QImode
&& mode
!= V8HImode
&& mode
!= V4SImode
6634 && mode
!= V2DImode
)
6637 element
= CONST_VECTOR_ELT (op
, 0);
6638 if (!CONST_INT_P (element
))
6641 value
= INTVAL (element
);
6642 if (!IN_RANGE (value
, -128, 127))
6645 for (i
= 1; i
< nunits
; i
++)
6647 element
= CONST_VECTOR_ELT (op
, i
);
6648 if (!CONST_INT_P (element
))
6651 if (value
!= INTVAL (element
))
6656 /* Handle integer constants being loaded into the upper part of the VSX
6657 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6658 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6659 else if (CONST_INT_P (op
))
6661 if (!SCALAR_INT_MODE_P (mode
))
6664 value
= INTVAL (op
);
6665 if (!IN_RANGE (value
, -128, 127))
6668 if (!IN_RANGE (value
, -1, 0))
6670 if (!(reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
] & RELOAD_REG_VALID
))
6673 if (EASY_VECTOR_15 (value
))
6681 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6682 sign extend. Special case 0/-1 to allow getting any VSX register instead
6683 of an Altivec register. */
6684 if ((mode
== V4SImode
|| mode
== V8HImode
) && !IN_RANGE (value
, -1, 0)
6685 && EASY_VECTOR_15 (value
))
6688 /* Return # of instructions and the constant byte for XXSPLTIB. */
6689 if (mode
== V16QImode
)
6692 else if (IN_RANGE (value
, -1, 0))
6698 *constant_ptr
= (int) value
;
6703 output_vec_const_move (rtx
*operands
)
6711 mode
= GET_MODE (dest
);
6715 bool dest_vmx_p
= ALTIVEC_REGNO_P (REGNO (dest
));
6716 int xxspltib_value
= 256;
6719 if (zero_constant (vec
, mode
))
6721 if (TARGET_P9_VECTOR
)
6722 return "xxspltib %x0,0";
6724 else if (dest_vmx_p
)
6725 return "vspltisw %0,0";
6728 return "xxlxor %x0,%x0,%x0";
6731 if (all_ones_constant (vec
, mode
))
6733 if (TARGET_P9_VECTOR
)
6734 return "xxspltib %x0,255";
6736 else if (dest_vmx_p
)
6737 return "vspltisw %0,-1";
6739 else if (TARGET_P8_VECTOR
)
6740 return "xxlorc %x0,%x0,%x0";
6746 if (TARGET_P9_VECTOR
6747 && xxspltib_constant_p (vec
, mode
, &num_insns
, &xxspltib_value
))
6751 operands
[2] = GEN_INT (xxspltib_value
& 0xff);
6752 return "xxspltib %x0,%2";
6763 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest
)));
6764 if (zero_constant (vec
, mode
))
6765 return "vspltisw %0,0";
6767 if (all_ones_constant (vec
, mode
))
6768 return "vspltisw %0,-1";
6770 /* Do we need to construct a value using VSLDOI? */
6771 shift
= vspltis_shifted (vec
);
6775 splat_vec
= gen_easy_altivec_constant (vec
);
6776 gcc_assert (GET_CODE (splat_vec
) == VEC_DUPLICATE
);
6777 operands
[1] = XEXP (splat_vec
, 0);
6778 if (!EASY_VECTOR_15 (INTVAL (operands
[1])))
6781 switch (GET_MODE (splat_vec
))
6784 return "vspltisw %0,%1";
6787 return "vspltish %0,%1";
6790 return "vspltisb %0,%1";
6800 /* Initialize TARGET of vector PAIRED to VALS. */
6803 paired_expand_vector_init (rtx target
, rtx vals
)
6805 machine_mode mode
= GET_MODE (target
);
6806 int n_elts
= GET_MODE_NUNITS (mode
);
6808 rtx x
, new_rtx
, tmp
, constant_op
, op1
, op2
;
6811 for (i
= 0; i
< n_elts
; ++i
)
6813 x
= XVECEXP (vals
, 0, i
);
6814 if (!(CONST_SCALAR_INT_P (x
) || CONST_DOUBLE_P (x
) || CONST_FIXED_P (x
)))
6819 /* Load from constant pool. */
6820 emit_move_insn (target
, gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0)));
6826 /* The vector is initialized only with non-constants. */
6827 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, XVECEXP (vals
, 0, 0),
6828 XVECEXP (vals
, 0, 1));
6830 emit_move_insn (target
, new_rtx
);
6834 /* One field is non-constant and the other one is a constant. Load the
6835 constant from the constant pool and use ps_merge instruction to
6836 construct the whole vector. */
6837 op1
= XVECEXP (vals
, 0, 0);
6838 op2
= XVECEXP (vals
, 0, 1);
6840 constant_op
= (CONSTANT_P (op1
)) ? op1
: op2
;
6842 tmp
= gen_reg_rtx (GET_MODE (constant_op
));
6843 emit_move_insn (tmp
, constant_op
);
6845 if (CONSTANT_P (op1
))
6846 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, tmp
, op2
);
6848 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, op1
, tmp
);
6850 emit_move_insn (target
, new_rtx
);
6854 paired_expand_vector_move (rtx operands
[])
6856 rtx op0
= operands
[0], op1
= operands
[1];
6858 emit_move_insn (op0
, op1
);
6861 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
6862 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
6863 operands for the relation operation COND. This is a recursive
6867 paired_emit_vector_compare (enum rtx_code rcode
,
6868 rtx dest
, rtx op0
, rtx op1
,
6869 rtx cc_op0
, rtx cc_op1
)
6871 rtx tmp
= gen_reg_rtx (V2SFmode
);
6874 gcc_assert (TARGET_PAIRED_FLOAT
);
6875 gcc_assert (GET_MODE (op0
) == GET_MODE (op1
));
6881 paired_emit_vector_compare (GE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6885 emit_insn (gen_subv2sf3 (tmp
, cc_op0
, cc_op1
));
6886 emit_insn (gen_selv2sf4 (dest
, tmp
, op0
, op1
, CONST0_RTX (SFmode
)));
6890 paired_emit_vector_compare (GE
, dest
, op0
, op1
, cc_op1
, cc_op0
);
6893 paired_emit_vector_compare (LE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6896 tmp1
= gen_reg_rtx (V2SFmode
);
6897 max
= gen_reg_rtx (V2SFmode
);
6898 min
= gen_reg_rtx (V2SFmode
);
6899 gen_reg_rtx (V2SFmode
);
6901 emit_insn (gen_subv2sf3 (tmp
, cc_op0
, cc_op1
));
6902 emit_insn (gen_selv2sf4
6903 (max
, tmp
, cc_op0
, cc_op1
, CONST0_RTX (SFmode
)));
6904 emit_insn (gen_subv2sf3 (tmp
, cc_op1
, cc_op0
));
6905 emit_insn (gen_selv2sf4
6906 (min
, tmp
, cc_op0
, cc_op1
, CONST0_RTX (SFmode
)));
6907 emit_insn (gen_subv2sf3 (tmp1
, min
, max
));
6908 emit_insn (gen_selv2sf4 (dest
, tmp1
, op0
, op1
, CONST0_RTX (SFmode
)));
6911 paired_emit_vector_compare (EQ
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6914 paired_emit_vector_compare (LE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6917 paired_emit_vector_compare (LT
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6920 paired_emit_vector_compare (GE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6923 paired_emit_vector_compare (GT
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6932 /* Emit vector conditional expression.
6933 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
6934 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
6937 paired_emit_vector_cond_expr (rtx dest
, rtx op1
, rtx op2
,
6938 rtx cond
, rtx cc_op0
, rtx cc_op1
)
6940 enum rtx_code rcode
= GET_CODE (cond
);
6942 if (!TARGET_PAIRED_FLOAT
)
6945 paired_emit_vector_compare (rcode
, dest
, op1
, op2
, cc_op0
, cc_op1
);
6950 /* Initialize vector TARGET to VALS. */
6953 rs6000_expand_vector_init (rtx target
, rtx vals
)
6955 machine_mode mode
= GET_MODE (target
);
6956 machine_mode inner_mode
= GET_MODE_INNER (mode
);
6957 int n_elts
= GET_MODE_NUNITS (mode
);
6958 int n_var
= 0, one_var
= -1;
6959 bool all_same
= true, all_const_zero
= true;
6963 for (i
= 0; i
< n_elts
; ++i
)
6965 x
= XVECEXP (vals
, 0, i
);
6966 if (!(CONST_SCALAR_INT_P (x
) || CONST_DOUBLE_P (x
) || CONST_FIXED_P (x
)))
6967 ++n_var
, one_var
= i
;
6968 else if (x
!= CONST0_RTX (inner_mode
))
6969 all_const_zero
= false;
6971 if (i
> 0 && !rtx_equal_p (x
, XVECEXP (vals
, 0, 0)))
6977 rtx const_vec
= gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0));
6978 bool int_vector_p
= (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
);
6979 if ((int_vector_p
|| TARGET_VSX
) && all_const_zero
)
6981 /* Zero register. */
6982 emit_move_insn (target
, CONST0_RTX (mode
));
6985 else if (int_vector_p
&& easy_vector_constant (const_vec
, mode
))
6987 /* Splat immediate. */
6988 emit_insn (gen_rtx_SET (target
, const_vec
));
6993 /* Load from constant pool. */
6994 emit_move_insn (target
, const_vec
);
6999 /* Double word values on VSX can use xxpermdi or lxvdsx. */
7000 if (VECTOR_MEM_VSX_P (mode
) && (mode
== V2DFmode
|| mode
== V2DImode
))
7004 size_t num_elements
= all_same
? 1 : 2;
7005 for (i
= 0; i
< num_elements
; i
++)
7007 op
[i
] = XVECEXP (vals
, 0, i
);
7008 /* Just in case there is a SUBREG with a smaller mode, do a
7010 if (GET_MODE (op
[i
]) != inner_mode
)
7012 rtx tmp
= gen_reg_rtx (inner_mode
);
7013 convert_move (tmp
, op
[i
], 0);
7016 /* Allow load with splat double word. */
7017 else if (MEM_P (op
[i
]))
7020 op
[i
] = force_reg (inner_mode
, op
[i
]);
7022 else if (!REG_P (op
[i
]))
7023 op
[i
] = force_reg (inner_mode
, op
[i
]);
7028 if (mode
== V2DFmode
)
7029 emit_insn (gen_vsx_splat_v2df (target
, op
[0]));
7031 emit_insn (gen_vsx_splat_v2di (target
, op
[0]));
7035 if (mode
== V2DFmode
)
7036 emit_insn (gen_vsx_concat_v2df (target
, op
[0], op
[1]));
7038 emit_insn (gen_vsx_concat_v2di (target
, op
[0], op
[1]));
7043 /* Special case initializing vector int if we are on 64-bit systems with
7044 direct move or we have the ISA 3.0 instructions. */
7045 if (mode
== V4SImode
&& VECTOR_MEM_VSX_P (V4SImode
)
7046 && TARGET_DIRECT_MOVE_64BIT
)
7050 rtx element0
= XVECEXP (vals
, 0, 0);
7051 if (MEM_P (element0
))
7052 element0
= rs6000_address_for_fpconvert (element0
);
7054 element0
= force_reg (SImode
, element0
);
7056 if (TARGET_P9_VECTOR
)
7057 emit_insn (gen_vsx_splat_v4si (target
, element0
));
7060 rtx tmp
= gen_reg_rtx (DImode
);
7061 emit_insn (gen_zero_extendsidi2 (tmp
, element0
));
7062 emit_insn (gen_vsx_splat_v4si_di (target
, tmp
));
7071 for (i
= 0; i
< 4; i
++)
7073 elements
[i
] = XVECEXP (vals
, 0, i
);
7074 if (!CONST_INT_P (elements
[i
]) && !REG_P (elements
[i
]))
7075 elements
[i
] = copy_to_mode_reg (SImode
, elements
[i
]);
7078 emit_insn (gen_vsx_init_v4si (target
, elements
[0], elements
[1],
7079 elements
[2], elements
[3]));
7084 /* With single precision floating point on VSX, know that internally single
7085 precision is actually represented as a double, and either make 2 V2DF
7086 vectors, and convert these vectors to single precision, or do one
7087 conversion, and splat the result to the other elements. */
7088 if (mode
== V4SFmode
&& VECTOR_MEM_VSX_P (V4SFmode
))
7092 rtx element0
= XVECEXP (vals
, 0, 0);
7094 if (TARGET_P9_VECTOR
)
7096 if (MEM_P (element0
))
7097 element0
= rs6000_address_for_fpconvert (element0
);
7099 emit_insn (gen_vsx_splat_v4sf (target
, element0
));
7104 rtx freg
= gen_reg_rtx (V4SFmode
);
7105 rtx sreg
= force_reg (SFmode
, element0
);
7106 rtx cvt
= (TARGET_XSCVDPSPN
7107 ? gen_vsx_xscvdpspn_scalar (freg
, sreg
)
7108 : gen_vsx_xscvdpsp_scalar (freg
, sreg
));
7111 emit_insn (gen_vsx_xxspltw_v4sf_direct (target
, freg
,
7117 rtx dbl_even
= gen_reg_rtx (V2DFmode
);
7118 rtx dbl_odd
= gen_reg_rtx (V2DFmode
);
7119 rtx flt_even
= gen_reg_rtx (V4SFmode
);
7120 rtx flt_odd
= gen_reg_rtx (V4SFmode
);
7121 rtx op0
= force_reg (SFmode
, XVECEXP (vals
, 0, 0));
7122 rtx op1
= force_reg (SFmode
, XVECEXP (vals
, 0, 1));
7123 rtx op2
= force_reg (SFmode
, XVECEXP (vals
, 0, 2));
7124 rtx op3
= force_reg (SFmode
, XVECEXP (vals
, 0, 3));
7126 /* Use VMRGEW if we can instead of doing a permute. */
7127 if (TARGET_P8_VECTOR
)
7129 emit_insn (gen_vsx_concat_v2sf (dbl_even
, op0
, op2
));
7130 emit_insn (gen_vsx_concat_v2sf (dbl_odd
, op1
, op3
));
7131 emit_insn (gen_vsx_xvcvdpsp (flt_even
, dbl_even
));
7132 emit_insn (gen_vsx_xvcvdpsp (flt_odd
, dbl_odd
));
7133 if (BYTES_BIG_ENDIAN
)
7134 emit_insn (gen_p8_vmrgew_v4sf_direct (target
, flt_even
, flt_odd
));
7136 emit_insn (gen_p8_vmrgew_v4sf_direct (target
, flt_odd
, flt_even
));
7140 emit_insn (gen_vsx_concat_v2sf (dbl_even
, op0
, op1
));
7141 emit_insn (gen_vsx_concat_v2sf (dbl_odd
, op2
, op3
));
7142 emit_insn (gen_vsx_xvcvdpsp (flt_even
, dbl_even
));
7143 emit_insn (gen_vsx_xvcvdpsp (flt_odd
, dbl_odd
));
7144 rs6000_expand_extract_even (target
, flt_even
, flt_odd
);
7150 /* Special case initializing vector short/char that are splats if we are on
7151 64-bit systems with direct move. */
7152 if (all_same
&& TARGET_DIRECT_MOVE_64BIT
7153 && (mode
== V16QImode
|| mode
== V8HImode
))
7155 rtx op0
= XVECEXP (vals
, 0, 0);
7156 rtx di_tmp
= gen_reg_rtx (DImode
);
7159 op0
= force_reg (GET_MODE_INNER (mode
), op0
);
7161 if (mode
== V16QImode
)
7163 emit_insn (gen_zero_extendqidi2 (di_tmp
, op0
));
7164 emit_insn (gen_vsx_vspltb_di (target
, di_tmp
));
7168 if (mode
== V8HImode
)
7170 emit_insn (gen_zero_extendhidi2 (di_tmp
, op0
));
7171 emit_insn (gen_vsx_vsplth_di (target
, di_tmp
));
7176 /* Store value to stack temp. Load vector element. Splat. However, splat
7177 of 64-bit items is not supported on Altivec. */
7178 if (all_same
&& GET_MODE_SIZE (inner_mode
) <= 4)
7180 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (inner_mode
));
7181 emit_move_insn (adjust_address_nv (mem
, inner_mode
, 0),
7182 XVECEXP (vals
, 0, 0));
7183 x
= gen_rtx_UNSPEC (VOIDmode
,
7184 gen_rtvec (1, const0_rtx
), UNSPEC_LVE
);
7185 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
7187 gen_rtx_SET (target
, mem
),
7189 x
= gen_rtx_VEC_SELECT (inner_mode
, target
,
7190 gen_rtx_PARALLEL (VOIDmode
,
7191 gen_rtvec (1, const0_rtx
)));
7192 emit_insn (gen_rtx_SET (target
, gen_rtx_VEC_DUPLICATE (mode
, x
)));
7196 /* One field is non-constant. Load constant then overwrite
7200 rtx copy
= copy_rtx (vals
);
7202 /* Load constant part of vector, substitute neighboring value for
7204 XVECEXP (copy
, 0, one_var
) = XVECEXP (vals
, 0, (one_var
+ 1) % n_elts
);
7205 rs6000_expand_vector_init (target
, copy
);
7207 /* Insert variable. */
7208 rs6000_expand_vector_set (target
, XVECEXP (vals
, 0, one_var
), one_var
);
7212 /* Construct the vector in memory one field at a time
7213 and load the whole vector. */
7214 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
7215 for (i
= 0; i
< n_elts
; i
++)
7216 emit_move_insn (adjust_address_nv (mem
, inner_mode
,
7217 i
* GET_MODE_SIZE (inner_mode
)),
7218 XVECEXP (vals
, 0, i
));
7219 emit_move_insn (target
, mem
);
7222 /* Set field ELT of TARGET to VAL. */
7225 rs6000_expand_vector_set (rtx target
, rtx val
, int elt
)
7227 machine_mode mode
= GET_MODE (target
);
7228 machine_mode inner_mode
= GET_MODE_INNER (mode
);
7229 rtx reg
= gen_reg_rtx (mode
);
7231 int width
= GET_MODE_SIZE (inner_mode
);
7234 val
= force_reg (GET_MODE (val
), val
);
7236 if (VECTOR_MEM_VSX_P (mode
))
7238 rtx insn
= NULL_RTX
;
7239 rtx elt_rtx
= GEN_INT (elt
);
7241 if (mode
== V2DFmode
)
7242 insn
= gen_vsx_set_v2df (target
, target
, val
, elt_rtx
);
7244 else if (mode
== V2DImode
)
7245 insn
= gen_vsx_set_v2di (target
, target
, val
, elt_rtx
);
7247 else if (TARGET_P9_VECTOR
&& TARGET_POWERPC64
)
7249 if (mode
== V4SImode
)
7250 insn
= gen_vsx_set_v4si_p9 (target
, target
, val
, elt_rtx
);
7251 else if (mode
== V8HImode
)
7252 insn
= gen_vsx_set_v8hi_p9 (target
, target
, val
, elt_rtx
);
7253 else if (mode
== V16QImode
)
7254 insn
= gen_vsx_set_v16qi_p9 (target
, target
, val
, elt_rtx
);
7255 else if (mode
== V4SFmode
)
7256 insn
= gen_vsx_set_v4sf_p9 (target
, target
, val
, elt_rtx
);
7266 /* Simplify setting single element vectors like V1TImode. */
7267 if (GET_MODE_SIZE (mode
) == GET_MODE_SIZE (inner_mode
) && elt
== 0)
7269 emit_move_insn (target
, gen_lowpart (mode
, val
));
7273 /* Load single variable value. */
7274 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (inner_mode
));
7275 emit_move_insn (adjust_address_nv (mem
, inner_mode
, 0), val
);
7276 x
= gen_rtx_UNSPEC (VOIDmode
,
7277 gen_rtvec (1, const0_rtx
), UNSPEC_LVE
);
7278 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
7280 gen_rtx_SET (reg
, mem
),
7283 /* Linear sequence. */
7284 mask
= gen_rtx_PARALLEL (V16QImode
, rtvec_alloc (16));
7285 for (i
= 0; i
< 16; ++i
)
7286 XVECEXP (mask
, 0, i
) = GEN_INT (i
);
7288 /* Set permute mask to insert element into target. */
7289 for (i
= 0; i
< width
; ++i
)
7290 XVECEXP (mask
, 0, elt
*width
+ i
)
7291 = GEN_INT (i
+ 0x10);
7292 x
= gen_rtx_CONST_VECTOR (V16QImode
, XVEC (mask
, 0));
7294 if (BYTES_BIG_ENDIAN
)
7295 x
= gen_rtx_UNSPEC (mode
,
7296 gen_rtvec (3, target
, reg
,
7297 force_reg (V16QImode
, x
)),
7301 if (TARGET_P9_VECTOR
)
7302 x
= gen_rtx_UNSPEC (mode
,
7303 gen_rtvec (3, reg
, target
,
7304 force_reg (V16QImode
, x
)),
7308 /* Invert selector. We prefer to generate VNAND on P8 so
7309 that future fusion opportunities can kick in, but must
7310 generate VNOR elsewhere. */
7311 rtx notx
= gen_rtx_NOT (V16QImode
, force_reg (V16QImode
, x
));
7312 rtx iorx
= (TARGET_P8_VECTOR
7313 ? gen_rtx_IOR (V16QImode
, notx
, notx
)
7314 : gen_rtx_AND (V16QImode
, notx
, notx
));
7315 rtx tmp
= gen_reg_rtx (V16QImode
);
7316 emit_insn (gen_rtx_SET (tmp
, iorx
));
7318 /* Permute with operands reversed and adjusted selector. */
7319 x
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, reg
, target
, tmp
),
7324 emit_insn (gen_rtx_SET (target
, x
));
7327 /* Extract field ELT from VEC into TARGET. */
7330 rs6000_expand_vector_extract (rtx target
, rtx vec
, rtx elt
)
7332 machine_mode mode
= GET_MODE (vec
);
7333 machine_mode inner_mode
= GET_MODE_INNER (mode
);
7336 if (VECTOR_MEM_VSX_P (mode
) && CONST_INT_P (elt
))
7343 gcc_assert (INTVAL (elt
) == 0 && inner_mode
== TImode
);
7344 emit_move_insn (target
, gen_lowpart (TImode
, vec
));
7347 emit_insn (gen_vsx_extract_v2df (target
, vec
, elt
));
7350 emit_insn (gen_vsx_extract_v2di (target
, vec
, elt
));
7353 emit_insn (gen_vsx_extract_v4sf (target
, vec
, elt
));
7356 if (TARGET_DIRECT_MOVE_64BIT
)
7358 emit_insn (gen_vsx_extract_v16qi (target
, vec
, elt
));
7364 if (TARGET_DIRECT_MOVE_64BIT
)
7366 emit_insn (gen_vsx_extract_v8hi (target
, vec
, elt
));
7372 if (TARGET_DIRECT_MOVE_64BIT
)
7374 emit_insn (gen_vsx_extract_v4si (target
, vec
, elt
));
7380 else if (VECTOR_MEM_VSX_P (mode
) && !CONST_INT_P (elt
)
7381 && TARGET_DIRECT_MOVE_64BIT
)
7383 if (GET_MODE (elt
) != DImode
)
7385 rtx tmp
= gen_reg_rtx (DImode
);
7386 convert_move (tmp
, elt
, 0);
7389 else if (!REG_P (elt
))
7390 elt
= force_reg (DImode
, elt
);
7395 emit_insn (gen_vsx_extract_v2df_var (target
, vec
, elt
));
7399 emit_insn (gen_vsx_extract_v2di_var (target
, vec
, elt
));
7403 emit_insn (gen_vsx_extract_v4sf_var (target
, vec
, elt
));
7407 emit_insn (gen_vsx_extract_v4si_var (target
, vec
, elt
));
7411 emit_insn (gen_vsx_extract_v8hi_var (target
, vec
, elt
));
7415 emit_insn (gen_vsx_extract_v16qi_var (target
, vec
, elt
));
7423 gcc_assert (CONST_INT_P (elt
));
7425 /* Allocate mode-sized buffer. */
7426 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
7428 emit_move_insn (mem
, vec
);
7430 /* Add offset to field within buffer matching vector element. */
7431 mem
= adjust_address_nv (mem
, inner_mode
,
7432 INTVAL (elt
) * GET_MODE_SIZE (inner_mode
));
7434 emit_move_insn (target
, adjust_address_nv (mem
, inner_mode
, 0));
7437 /* Helper function to return the register number of a RTX. */
7439 regno_or_subregno (rtx op
)
7443 else if (SUBREG_P (op
))
7444 return subreg_regno (op
);
7449 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
7450 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
7451 temporary (BASE_TMP) to fixup the address. Return the new memory address
7452 that is valid for reads or writes to a given register (SCALAR_REG). */
7455 rs6000_adjust_vec_address (rtx scalar_reg
,
7459 machine_mode scalar_mode
)
7461 unsigned scalar_size
= GET_MODE_SIZE (scalar_mode
);
7462 rtx addr
= XEXP (mem
, 0);
7467 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7468 gcc_assert (GET_RTX_CLASS (GET_CODE (addr
)) != RTX_AUTOINC
);
7470 /* Calculate what we need to add to the address to get the element
7472 if (CONST_INT_P (element
))
7473 element_offset
= GEN_INT (INTVAL (element
) * scalar_size
);
7476 int byte_shift
= exact_log2 (scalar_size
);
7477 gcc_assert (byte_shift
>= 0);
7479 if (byte_shift
== 0)
7480 element_offset
= element
;
7484 if (TARGET_POWERPC64
)
7485 emit_insn (gen_ashldi3 (base_tmp
, element
, GEN_INT (byte_shift
)));
7487 emit_insn (gen_ashlsi3 (base_tmp
, element
, GEN_INT (byte_shift
)));
7489 element_offset
= base_tmp
;
7493 /* Create the new address pointing to the element within the vector. If we
7494 are adding 0, we don't have to change the address. */
7495 if (element_offset
== const0_rtx
)
7498 /* A simple indirect address can be converted into a reg + offset
7500 else if (REG_P (addr
) || SUBREG_P (addr
))
7501 new_addr
= gen_rtx_PLUS (Pmode
, addr
, element_offset
);
7503 /* Optimize D-FORM addresses with constant offset with a constant element, to
7504 include the element offset in the address directly. */
7505 else if (GET_CODE (addr
) == PLUS
)
7507 rtx op0
= XEXP (addr
, 0);
7508 rtx op1
= XEXP (addr
, 1);
7511 gcc_assert (REG_P (op0
) || SUBREG_P (op0
));
7512 if (CONST_INT_P (op1
) && CONST_INT_P (element_offset
))
7514 HOST_WIDE_INT offset
= INTVAL (op1
) + INTVAL (element_offset
);
7515 rtx offset_rtx
= GEN_INT (offset
);
7517 if (IN_RANGE (offset
, -32768, 32767)
7518 && (scalar_size
< 8 || (offset
& 0x3) == 0))
7519 new_addr
= gen_rtx_PLUS (Pmode
, op0
, offset_rtx
);
7522 emit_move_insn (base_tmp
, offset_rtx
);
7523 new_addr
= gen_rtx_PLUS (Pmode
, op0
, base_tmp
);
7528 bool op1_reg_p
= (REG_P (op1
) || SUBREG_P (op1
));
7529 bool ele_reg_p
= (REG_P (element_offset
) || SUBREG_P (element_offset
));
7531 /* Note, ADDI requires the register being added to be a base
7532 register. If the register was R0, load it up into the temporary
7535 && (ele_reg_p
|| reg_or_subregno (op1
) != FIRST_GPR_REGNO
))
7537 insn
= gen_add3_insn (base_tmp
, op1
, element_offset
);
7538 gcc_assert (insn
!= NULL_RTX
);
7543 && reg_or_subregno (element_offset
) != FIRST_GPR_REGNO
)
7545 insn
= gen_add3_insn (base_tmp
, element_offset
, op1
);
7546 gcc_assert (insn
!= NULL_RTX
);
7552 emit_move_insn (base_tmp
, op1
);
7553 emit_insn (gen_add2_insn (base_tmp
, element_offset
));
7556 new_addr
= gen_rtx_PLUS (Pmode
, op0
, base_tmp
);
7562 emit_move_insn (base_tmp
, addr
);
7563 new_addr
= gen_rtx_PLUS (Pmode
, base_tmp
, element_offset
);
7566 /* If we have a PLUS, we need to see whether the particular register class
7567 allows for D-FORM or X-FORM addressing. */
7568 if (GET_CODE (new_addr
) == PLUS
)
7570 rtx op1
= XEXP (new_addr
, 1);
7571 addr_mask_type addr_mask
;
7572 int scalar_regno
= regno_or_subregno (scalar_reg
);
7574 gcc_assert (scalar_regno
< FIRST_PSEUDO_REGISTER
);
7575 if (INT_REGNO_P (scalar_regno
))
7576 addr_mask
= reg_addr
[scalar_mode
].addr_mask
[RELOAD_REG_GPR
];
7578 else if (FP_REGNO_P (scalar_regno
))
7579 addr_mask
= reg_addr
[scalar_mode
].addr_mask
[RELOAD_REG_FPR
];
7581 else if (ALTIVEC_REGNO_P (scalar_regno
))
7582 addr_mask
= reg_addr
[scalar_mode
].addr_mask
[RELOAD_REG_VMX
];
7587 if (REG_P (op1
) || SUBREG_P (op1
))
7588 valid_addr_p
= (addr_mask
& RELOAD_REG_INDEXED
) != 0;
7590 valid_addr_p
= (addr_mask
& RELOAD_REG_OFFSET
) != 0;
7593 else if (REG_P (new_addr
) || SUBREG_P (new_addr
))
7594 valid_addr_p
= true;
7597 valid_addr_p
= false;
7601 emit_move_insn (base_tmp
, new_addr
);
7602 new_addr
= base_tmp
;
7605 return change_address (mem
, scalar_mode
, new_addr
);
7608 /* Split a variable vec_extract operation into the component instructions. */
7611 rs6000_split_vec_extract_var (rtx dest
, rtx src
, rtx element
, rtx tmp_gpr
,
7614 machine_mode mode
= GET_MODE (src
);
7615 machine_mode scalar_mode
= GET_MODE (dest
);
7616 unsigned scalar_size
= GET_MODE_SIZE (scalar_mode
);
7617 int byte_shift
= exact_log2 (scalar_size
);
7619 gcc_assert (byte_shift
>= 0);
7621 /* If we are given a memory address, optimize to load just the element. We
7622 don't have to adjust the vector element number on little endian
7626 gcc_assert (REG_P (tmp_gpr
));
7627 emit_move_insn (dest
, rs6000_adjust_vec_address (dest
, src
, element
,
7628 tmp_gpr
, scalar_mode
));
7632 else if (REG_P (src
) || SUBREG_P (src
))
7634 int bit_shift
= byte_shift
+ 3;
7636 int dest_regno
= regno_or_subregno (dest
);
7637 int src_regno
= regno_or_subregno (src
);
7638 int element_regno
= regno_or_subregno (element
);
7640 gcc_assert (REG_P (tmp_gpr
));
7642 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7643 a general purpose register. */
7644 if (TARGET_P9_VECTOR
7645 && (mode
== V16QImode
|| mode
== V8HImode
|| mode
== V4SImode
)
7646 && INT_REGNO_P (dest_regno
)
7647 && ALTIVEC_REGNO_P (src_regno
)
7648 && INT_REGNO_P (element_regno
))
7650 rtx dest_si
= gen_rtx_REG (SImode
, dest_regno
);
7651 rtx element_si
= gen_rtx_REG (SImode
, element_regno
);
7653 if (mode
== V16QImode
)
7654 emit_insn (VECTOR_ELT_ORDER_BIG
7655 ? gen_vextublx (dest_si
, element_si
, src
)
7656 : gen_vextubrx (dest_si
, element_si
, src
));
7658 else if (mode
== V8HImode
)
7660 rtx tmp_gpr_si
= gen_rtx_REG (SImode
, REGNO (tmp_gpr
));
7661 emit_insn (gen_ashlsi3 (tmp_gpr_si
, element_si
, const1_rtx
));
7662 emit_insn (VECTOR_ELT_ORDER_BIG
7663 ? gen_vextuhlx (dest_si
, tmp_gpr_si
, src
)
7664 : gen_vextuhrx (dest_si
, tmp_gpr_si
, src
));
7670 rtx tmp_gpr_si
= gen_rtx_REG (SImode
, REGNO (tmp_gpr
));
7671 emit_insn (gen_ashlsi3 (tmp_gpr_si
, element_si
, const2_rtx
));
7672 emit_insn (VECTOR_ELT_ORDER_BIG
7673 ? gen_vextuwlx (dest_si
, tmp_gpr_si
, src
)
7674 : gen_vextuwrx (dest_si
, tmp_gpr_si
, src
));
7681 gcc_assert (REG_P (tmp_altivec
));
7683 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7684 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7685 will shift the element into the upper position (adding 3 to convert a
7686 byte shift into a bit shift). */
7687 if (scalar_size
== 8)
7689 if (!VECTOR_ELT_ORDER_BIG
)
7691 emit_insn (gen_xordi3 (tmp_gpr
, element
, const1_rtx
));
7697 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7699 emit_insn (gen_rtx_SET (tmp_gpr
,
7700 gen_rtx_AND (DImode
,
7701 gen_rtx_ASHIFT (DImode
,
7708 if (!VECTOR_ELT_ORDER_BIG
)
7710 rtx num_ele_m1
= GEN_INT (GET_MODE_NUNITS (mode
) - 1);
7712 emit_insn (gen_anddi3 (tmp_gpr
, element
, num_ele_m1
));
7713 emit_insn (gen_subdi3 (tmp_gpr
, num_ele_m1
, tmp_gpr
));
7719 emit_insn (gen_ashldi3 (tmp_gpr
, element2
, GEN_INT (bit_shift
)));
7722 /* Get the value into the lower byte of the Altivec register where VSLO
7724 if (TARGET_P9_VECTOR
)
7725 emit_insn (gen_vsx_splat_v2di (tmp_altivec
, tmp_gpr
));
7726 else if (can_create_pseudo_p ())
7727 emit_insn (gen_vsx_concat_v2di (tmp_altivec
, tmp_gpr
, tmp_gpr
));
7730 rtx tmp_di
= gen_rtx_REG (DImode
, REGNO (tmp_altivec
));
7731 emit_move_insn (tmp_di
, tmp_gpr
);
7732 emit_insn (gen_vsx_concat_v2di (tmp_altivec
, tmp_di
, tmp_di
));
7735 /* Do the VSLO to get the value into the final location. */
7739 emit_insn (gen_vsx_vslo_v2df (dest
, src
, tmp_altivec
));
7743 emit_insn (gen_vsx_vslo_v2di (dest
, src
, tmp_altivec
));
7748 rtx tmp_altivec_di
= gen_rtx_REG (DImode
, REGNO (tmp_altivec
));
7749 rtx tmp_altivec_v4sf
= gen_rtx_REG (V4SFmode
, REGNO (tmp_altivec
));
7750 rtx src_v2di
= gen_rtx_REG (V2DImode
, REGNO (src
));
7751 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di
, src_v2di
,
7754 emit_insn (gen_vsx_xscvspdp_scalar2 (dest
, tmp_altivec_v4sf
));
7762 rtx tmp_altivec_di
= gen_rtx_REG (DImode
, REGNO (tmp_altivec
));
7763 rtx src_v2di
= gen_rtx_REG (V2DImode
, REGNO (src
));
7764 rtx tmp_gpr_di
= gen_rtx_REG (DImode
, REGNO (dest
));
7765 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di
, src_v2di
,
7767 emit_move_insn (tmp_gpr_di
, tmp_altivec_di
);
7768 emit_insn (gen_ashrdi3 (tmp_gpr_di
, tmp_gpr_di
,
7769 GEN_INT (64 - (8 * scalar_size
))));
7783 /* Helper function for rs6000_split_v4si_init to build up a DImode value from
7784 two SImode values. */
7787 rs6000_split_v4si_init_di_reg (rtx dest
, rtx si1
, rtx si2
, rtx tmp
)
7789 const unsigned HOST_WIDE_INT mask_32bit
= HOST_WIDE_INT_C (0xffffffff);
7791 if (CONST_INT_P (si1
) && CONST_INT_P (si2
))
7793 unsigned HOST_WIDE_INT const1
= (UINTVAL (si1
) & mask_32bit
) << 32;
7794 unsigned HOST_WIDE_INT const2
= UINTVAL (si2
) & mask_32bit
;
7796 emit_move_insn (dest
, GEN_INT (const1
| const2
));
7800 /* Put si1 into upper 32-bits of dest. */
7801 if (CONST_INT_P (si1
))
7802 emit_move_insn (dest
, GEN_INT ((UINTVAL (si1
) & mask_32bit
) << 32));
7805 /* Generate RLDIC. */
7806 rtx si1_di
= gen_rtx_REG (DImode
, regno_or_subregno (si1
));
7807 rtx shift_rtx
= gen_rtx_ASHIFT (DImode
, si1_di
, GEN_INT (32));
7808 rtx mask_rtx
= GEN_INT (mask_32bit
<< 32);
7809 rtx and_rtx
= gen_rtx_AND (DImode
, shift_rtx
, mask_rtx
);
7810 gcc_assert (!reg_overlap_mentioned_p (dest
, si1
));
7811 emit_insn (gen_rtx_SET (dest
, and_rtx
));
7814 /* Put si2 into the temporary. */
7815 gcc_assert (!reg_overlap_mentioned_p (dest
, tmp
));
7816 if (CONST_INT_P (si2
))
7817 emit_move_insn (tmp
, GEN_INT (UINTVAL (si2
) & mask_32bit
));
7819 emit_insn (gen_zero_extendsidi2 (tmp
, si2
));
7821 /* Combine the two parts. */
7822 emit_insn (gen_iordi3 (dest
, dest
, tmp
));
7826 /* Split a V4SI initialization. */
7829 rs6000_split_v4si_init (rtx operands
[])
7831 rtx dest
= operands
[0];
7833 /* Destination is a GPR, build up the two DImode parts in place. */
7834 if (REG_P (dest
) || SUBREG_P (dest
))
7836 int d_regno
= regno_or_subregno (dest
);
7837 rtx scalar1
= operands
[1];
7838 rtx scalar2
= operands
[2];
7839 rtx scalar3
= operands
[3];
7840 rtx scalar4
= operands
[4];
7841 rtx tmp1
= operands
[5];
7842 rtx tmp2
= operands
[6];
7844 /* Even though we only need one temporary (plus the destination, which
7845 has an early clobber constraint, try to use two temporaries, one for
7846 each double word created. That way the 2nd insn scheduling pass can
7847 rearrange things so the two parts are done in parallel. */
7848 if (BYTES_BIG_ENDIAN
)
7850 rtx di_lo
= gen_rtx_REG (DImode
, d_regno
);
7851 rtx di_hi
= gen_rtx_REG (DImode
, d_regno
+ 1);
7852 rs6000_split_v4si_init_di_reg (di_lo
, scalar1
, scalar2
, tmp1
);
7853 rs6000_split_v4si_init_di_reg (di_hi
, scalar3
, scalar4
, tmp2
);
7857 rtx di_lo
= gen_rtx_REG (DImode
, d_regno
+ 1);
7858 rtx di_hi
= gen_rtx_REG (DImode
, d_regno
);
7859 gcc_assert (!VECTOR_ELT_ORDER_BIG
);
7860 rs6000_split_v4si_init_di_reg (di_lo
, scalar4
, scalar3
, tmp1
);
7861 rs6000_split_v4si_init_di_reg (di_hi
, scalar2
, scalar1
, tmp2
);
7870 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7871 selects whether the alignment is abi mandated, optional, or
7872 both abi and optional alignment. */
7875 rs6000_data_alignment (tree type
, unsigned int align
, enum data_align how
)
7877 if (how
!= align_opt
)
7879 if (TREE_CODE (type
) == VECTOR_TYPE
)
7881 if (TARGET_PAIRED_FLOAT
&& PAIRED_VECTOR_MODE (TYPE_MODE (type
)))
7886 else if (align
< 128)
7891 if (how
!= align_abi
)
7893 if (TREE_CODE (type
) == ARRAY_TYPE
7894 && TYPE_MODE (TREE_TYPE (type
)) == QImode
)
7896 if (align
< BITS_PER_WORD
)
7897 align
= BITS_PER_WORD
;
7904 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7905 instructions simply ignore the low bits; VSX memory instructions
7906 are aligned to 4 or 8 bytes. */
7909 rs6000_slow_unaligned_access (machine_mode mode
, unsigned int align
)
7911 return (STRICT_ALIGNMENT
7912 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7913 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode
) && align
< 32)
7914 || ((VECTOR_MODE_P (mode
) || FLOAT128_VECTOR_P (mode
))
7915 && (int) align
< VECTOR_ALIGN (mode
)))));
7918 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7921 rs6000_special_adjust_field_align_p (tree type
, unsigned int computed
)
7923 if (TARGET_ALTIVEC
&& TREE_CODE (type
) == VECTOR_TYPE
)
7925 if (computed
!= 128)
7928 if (!warned
&& warn_psabi
)
7931 inform (input_location
,
7932 "the layout of aggregates containing vectors with"
7933 " %d-byte alignment has changed in GCC 5",
7934 computed
/ BITS_PER_UNIT
);
7937 /* In current GCC there is no special case. */
7944 /* AIX increases natural record alignment to doubleword if the first
7945 field is an FP double while the FP fields remain word aligned. */
7948 rs6000_special_round_type_align (tree type
, unsigned int computed
,
7949 unsigned int specified
)
7951 unsigned int align
= MAX (computed
, specified
);
7952 tree field
= TYPE_FIELDS (type
);
7954 /* Skip all non field decls */
7955 while (field
!= NULL
&& TREE_CODE (field
) != FIELD_DECL
)
7956 field
= DECL_CHAIN (field
);
7958 if (field
!= NULL
&& field
!= type
)
7960 type
= TREE_TYPE (field
);
7961 while (TREE_CODE (type
) == ARRAY_TYPE
)
7962 type
= TREE_TYPE (type
);
7964 if (type
!= error_mark_node
&& TYPE_MODE (type
) == DFmode
)
7965 align
= MAX (align
, 64);
7971 /* Darwin increases record alignment to the natural alignment of
7975 darwin_rs6000_special_round_type_align (tree type
, unsigned int computed
,
7976 unsigned int specified
)
7978 unsigned int align
= MAX (computed
, specified
);
7980 if (TYPE_PACKED (type
))
7983 /* Find the first field, looking down into aggregates. */
7985 tree field
= TYPE_FIELDS (type
);
7986 /* Skip all non field decls */
7987 while (field
!= NULL
&& TREE_CODE (field
) != FIELD_DECL
)
7988 field
= DECL_CHAIN (field
);
7991 /* A packed field does not contribute any extra alignment. */
7992 if (DECL_PACKED (field
))
7994 type
= TREE_TYPE (field
);
7995 while (TREE_CODE (type
) == ARRAY_TYPE
)
7996 type
= TREE_TYPE (type
);
7997 } while (AGGREGATE_TYPE_P (type
));
7999 if (! AGGREGATE_TYPE_P (type
) && type
!= error_mark_node
)
8000 align
= MAX (align
, TYPE_ALIGN (type
));
8005 /* Return 1 for an operand in small memory on V.4/eabi. */
8008 small_data_operand (rtx op ATTRIBUTE_UNUSED
,
8009 machine_mode mode ATTRIBUTE_UNUSED
)
8014 if (rs6000_sdata
== SDATA_NONE
|| rs6000_sdata
== SDATA_DATA
)
8017 if (DEFAULT_ABI
!= ABI_V4
)
8020 if (GET_CODE (op
) == SYMBOL_REF
)
8023 else if (GET_CODE (op
) != CONST
8024 || GET_CODE (XEXP (op
, 0)) != PLUS
8025 || GET_CODE (XEXP (XEXP (op
, 0), 0)) != SYMBOL_REF
8026 || GET_CODE (XEXP (XEXP (op
, 0), 1)) != CONST_INT
)
8031 rtx sum
= XEXP (op
, 0);
8032 HOST_WIDE_INT summand
;
8034 /* We have to be careful here, because it is the referenced address
8035 that must be 32k from _SDA_BASE_, not just the symbol. */
8036 summand
= INTVAL (XEXP (sum
, 1));
8037 if (summand
< 0 || summand
> g_switch_value
)
8040 sym_ref
= XEXP (sum
, 0);
8043 return SYMBOL_REF_SMALL_P (sym_ref
);
8049 /* Return true if either operand is a general purpose register. */
8052 gpr_or_gpr_p (rtx op0
, rtx op1
)
8054 return ((REG_P (op0
) && INT_REGNO_P (REGNO (op0
)))
8055 || (REG_P (op1
) && INT_REGNO_P (REGNO (op1
))));
8058 /* Return true if this is a move direct operation between GPR registers and
8059 floating point/VSX registers. */
8062 direct_move_p (rtx op0
, rtx op1
)
8066 if (!REG_P (op0
) || !REG_P (op1
))
8069 if (!TARGET_DIRECT_MOVE
&& !TARGET_MFPGPR
)
8072 regno0
= REGNO (op0
);
8073 regno1
= REGNO (op1
);
8074 if (regno0
>= FIRST_PSEUDO_REGISTER
|| regno1
>= FIRST_PSEUDO_REGISTER
)
8077 if (INT_REGNO_P (regno0
))
8078 return (TARGET_DIRECT_MOVE
) ? VSX_REGNO_P (regno1
) : FP_REGNO_P (regno1
);
8080 else if (INT_REGNO_P (regno1
))
8082 if (TARGET_MFPGPR
&& FP_REGNO_P (regno0
))
8085 else if (TARGET_DIRECT_MOVE
&& VSX_REGNO_P (regno0
))
8092 /* Return true if the OFFSET is valid for the quad address instructions that
8093 use d-form (register + offset) addressing. */
8096 quad_address_offset_p (HOST_WIDE_INT offset
)
8098 return (IN_RANGE (offset
, -32768, 32767) && ((offset
) & 0xf) == 0);
8101 /* Return true if the ADDR is an acceptable address for a quad memory
8102 operation of mode MODE (either LQ/STQ for general purpose registers, or
8103 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
8104 is intended for LQ/STQ. If it is false, the address is intended for the ISA
8105 3.0 LXV/STXV instruction. */
8108 quad_address_p (rtx addr
, machine_mode mode
, bool strict
)
8112 if (GET_MODE_SIZE (mode
) != 16)
8115 if (legitimate_indirect_address_p (addr
, strict
))
8118 if (VECTOR_MODE_P (mode
) && !mode_supports_vsx_dform_quad (mode
))
8121 if (GET_CODE (addr
) != PLUS
)
8124 op0
= XEXP (addr
, 0);
8125 if (!REG_P (op0
) || !INT_REG_OK_FOR_BASE_P (op0
, strict
))
8128 op1
= XEXP (addr
, 1);
8129 if (!CONST_INT_P (op1
))
8132 return quad_address_offset_p (INTVAL (op1
));
8135 /* Return true if this is a load or store quad operation. This function does
8136 not handle the atomic quad memory instructions. */
8139 quad_load_store_p (rtx op0
, rtx op1
)
8143 if (!TARGET_QUAD_MEMORY
)
8146 else if (REG_P (op0
) && MEM_P (op1
))
8147 ret
= (quad_int_reg_operand (op0
, GET_MODE (op0
))
8148 && quad_memory_operand (op1
, GET_MODE (op1
))
8149 && !reg_overlap_mentioned_p (op0
, op1
));
8151 else if (MEM_P (op0
) && REG_P (op1
))
8152 ret
= (quad_memory_operand (op0
, GET_MODE (op0
))
8153 && quad_int_reg_operand (op1
, GET_MODE (op1
)));
8158 if (TARGET_DEBUG_ADDR
)
8160 fprintf (stderr
, "\n========== quad_load_store, return %s\n",
8161 ret
? "true" : "false");
8162 debug_rtx (gen_rtx_SET (op0
, op1
));
8168 /* Given an address, return a constant offset term if one exists. */
8171 address_offset (rtx op
)
8173 if (GET_CODE (op
) == PRE_INC
8174 || GET_CODE (op
) == PRE_DEC
)
8176 else if (GET_CODE (op
) == PRE_MODIFY
8177 || GET_CODE (op
) == LO_SUM
)
8180 if (GET_CODE (op
) == CONST
)
8183 if (GET_CODE (op
) == PLUS
)
8186 if (CONST_INT_P (op
))
8192 /* Return true if the MEM operand is a memory operand suitable for use
8193 with a (full width, possibly multiple) gpr load/store. On
8194 powerpc64 this means the offset must be divisible by 4.
8195 Implements 'Y' constraint.
8197 Accept direct, indexed, offset, lo_sum and tocref. Since this is
8198 a constraint function we know the operand has satisfied a suitable
8199 memory predicate. Also accept some odd rtl generated by reload
8200 (see rs6000_legitimize_reload_address for various forms). It is
8201 important that reload rtl be accepted by appropriate constraints
8202 but not by the operand predicate.
8204 Offsetting a lo_sum should not be allowed, except where we know by
8205 alignment that a 32k boundary is not crossed, but see the ???
8206 comment in rs6000_legitimize_reload_address. Note that by
8207 "offsetting" here we mean a further offset to access parts of the
8208 MEM. It's fine to have a lo_sum where the inner address is offset
8209 from a sym, since the same sym+offset will appear in the high part
8210 of the address calculation. */
8213 mem_operand_gpr (rtx op
, machine_mode mode
)
8215 unsigned HOST_WIDE_INT offset
;
8217 rtx addr
= XEXP (op
, 0);
8219 op
= address_offset (addr
);
8223 offset
= INTVAL (op
);
8224 if (TARGET_POWERPC64
&& (offset
& 3) != 0)
8227 extra
= GET_MODE_SIZE (mode
) - UNITS_PER_WORD
;
8231 if (GET_CODE (addr
) == LO_SUM
)
8232 /* For lo_sum addresses, we must allow any offset except one that
8233 causes a wrap, so test only the low 16 bits. */
8234 offset
= ((offset
& 0xffff) ^ 0x8000) - 0x8000;
8236 return offset
+ 0x8000 < 0x10000u
- extra
;
8239 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
8240 enforce an offset divisible by 4 even for 32-bit. */
8243 mem_operand_ds_form (rtx op
, machine_mode mode
)
8245 unsigned HOST_WIDE_INT offset
;
8247 rtx addr
= XEXP (op
, 0);
8249 if (!offsettable_address_p (false, mode
, addr
))
8252 op
= address_offset (addr
);
8256 offset
= INTVAL (op
);
8257 if ((offset
& 3) != 0)
8260 extra
= GET_MODE_SIZE (mode
) - UNITS_PER_WORD
;
8264 if (GET_CODE (addr
) == LO_SUM
)
8265 /* For lo_sum addresses, we must allow any offset except one that
8266 causes a wrap, so test only the low 16 bits. */
8267 offset
= ((offset
& 0xffff) ^ 0x8000) - 0x8000;
8269 return offset
+ 0x8000 < 0x10000u
- extra
;
8272 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
8275 reg_offset_addressing_ok_p (machine_mode mode
)
8289 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
8290 ISA 3.0 vector d-form addressing mode was added. While TImode is not
8291 a vector mode, if we want to use the VSX registers to move it around,
8292 we need to restrict ourselves to reg+reg addressing. Similarly for
8293 IEEE 128-bit floating point that is passed in a single vector
8295 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
))
8296 return mode_supports_vsx_dform_quad (mode
);
8301 /* Paired vector modes. Only reg+reg addressing is valid. */
8302 if (TARGET_PAIRED_FLOAT
)
8307 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
8308 addressing for the LFIWZX and STFIWX instructions. */
8309 if (TARGET_NO_SDMODE_STACK
)
8321 virtual_stack_registers_memory_p (rtx op
)
8325 if (GET_CODE (op
) == REG
)
8326 regnum
= REGNO (op
);
8328 else if (GET_CODE (op
) == PLUS
8329 && GET_CODE (XEXP (op
, 0)) == REG
8330 && GET_CODE (XEXP (op
, 1)) == CONST_INT
)
8331 regnum
= REGNO (XEXP (op
, 0));
8336 return (regnum
>= FIRST_VIRTUAL_REGISTER
8337 && regnum
<= LAST_VIRTUAL_POINTER_REGISTER
);
8340 /* Return true if a MODE sized memory accesses to OP plus OFFSET
8341 is known to not straddle a 32k boundary. This function is used
8342 to determine whether -mcmodel=medium code can use TOC pointer
8343 relative addressing for OP. This means the alignment of the TOC
8344 pointer must also be taken into account, and unfortunately that is
8347 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
8348 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
8352 offsettable_ok_by_alignment (rtx op
, HOST_WIDE_INT offset
,
8356 unsigned HOST_WIDE_INT dsize
, dalign
, lsb
, mask
;
8358 if (GET_CODE (op
) != SYMBOL_REF
)
8361 /* ISA 3.0 vector d-form addressing is restricted, don't allow
8363 if (mode_supports_vsx_dform_quad (mode
))
8366 dsize
= GET_MODE_SIZE (mode
);
8367 decl
= SYMBOL_REF_DECL (op
);
8373 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
8374 replacing memory addresses with an anchor plus offset. We
8375 could find the decl by rummaging around in the block->objects
8376 VEC for the given offset but that seems like too much work. */
8377 dalign
= BITS_PER_UNIT
;
8378 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op
)
8379 && SYMBOL_REF_ANCHOR_P (op
)
8380 && SYMBOL_REF_BLOCK (op
) != NULL
)
8382 struct object_block
*block
= SYMBOL_REF_BLOCK (op
);
8384 dalign
= block
->alignment
;
8385 offset
+= SYMBOL_REF_BLOCK_OFFSET (op
);
8387 else if (CONSTANT_POOL_ADDRESS_P (op
))
8389 /* It would be nice to have get_pool_align().. */
8390 machine_mode cmode
= get_pool_mode (op
);
8392 dalign
= GET_MODE_ALIGNMENT (cmode
);
8395 else if (DECL_P (decl
))
8397 dalign
= DECL_ALIGN (decl
);
8401 /* Allow BLKmode when the entire object is known to not
8402 cross a 32k boundary. */
8403 if (!DECL_SIZE_UNIT (decl
))
8406 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl
)))
8409 dsize
= tree_to_uhwi (DECL_SIZE_UNIT (decl
));
8413 dalign
/= BITS_PER_UNIT
;
8414 if (dalign
> POWERPC64_TOC_POINTER_ALIGNMENT
)
8415 dalign
= POWERPC64_TOC_POINTER_ALIGNMENT
;
8416 return dalign
>= dsize
;
8422 /* Find how many bits of the alignment we know for this access. */
8423 dalign
/= BITS_PER_UNIT
;
8424 if (dalign
> POWERPC64_TOC_POINTER_ALIGNMENT
)
8425 dalign
= POWERPC64_TOC_POINTER_ALIGNMENT
;
8427 lsb
= offset
& -offset
;
8431 return dalign
>= dsize
;
8435 constant_pool_expr_p (rtx op
)
8439 split_const (op
, &base
, &offset
);
8440 return (GET_CODE (base
) == SYMBOL_REF
8441 && CONSTANT_POOL_ADDRESS_P (base
)
8442 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base
), Pmode
));
8445 /* These are only used to pass through from print_operand/print_operand_address
8446 to rs6000_output_addr_const_extra over the intervening function
8447 output_addr_const which is not target code. */
8448 static const_rtx tocrel_base_oac
, tocrel_offset_oac
;
8450 /* Return true if OP is a toc pointer relative address (the output
8451 of create_TOC_reference). If STRICT, do not match non-split
8452 -mcmodel=large/medium toc pointer relative addresses. If the pointers
8453 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
8454 TOCREL_OFFSET_RET respectively. */
8457 toc_relative_expr_p (const_rtx op
, bool strict
, const_rtx
*tocrel_base_ret
,
8458 const_rtx
*tocrel_offset_ret
)
8463 if (TARGET_CMODEL
!= CMODEL_SMALL
)
8465 /* When strict ensure we have everything tidy. */
8467 && !(GET_CODE (op
) == LO_SUM
8468 && REG_P (XEXP (op
, 0))
8469 && INT_REG_OK_FOR_BASE_P (XEXP (op
, 0), strict
)))
8472 /* When not strict, allow non-split TOC addresses and also allow
8473 (lo_sum (high ..)) TOC addresses created during reload. */
8474 if (GET_CODE (op
) == LO_SUM
)
8478 const_rtx tocrel_base
= op
;
8479 const_rtx tocrel_offset
= const0_rtx
;
8481 if (GET_CODE (op
) == PLUS
&& add_cint_operand (XEXP (op
, 1), GET_MODE (op
)))
8483 tocrel_base
= XEXP (op
, 0);
8484 tocrel_offset
= XEXP (op
, 1);
8487 if (tocrel_base_ret
)
8488 *tocrel_base_ret
= tocrel_base
;
8489 if (tocrel_offset_ret
)
8490 *tocrel_offset_ret
= tocrel_offset
;
8492 return (GET_CODE (tocrel_base
) == UNSPEC
8493 && XINT (tocrel_base
, 1) == UNSPEC_TOCREL
);
8496 /* Return true if X is a constant pool address, and also for cmodel=medium
8497 if X is a toc-relative address known to be offsettable within MODE. */
8500 legitimate_constant_pool_address_p (const_rtx x
, machine_mode mode
,
8503 const_rtx tocrel_base
, tocrel_offset
;
8504 return (toc_relative_expr_p (x
, strict
, &tocrel_base
, &tocrel_offset
)
8505 && (TARGET_CMODEL
!= CMODEL_MEDIUM
8506 || constant_pool_expr_p (XVECEXP (tocrel_base
, 0, 0))
8508 || offsettable_ok_by_alignment (XVECEXP (tocrel_base
, 0, 0),
8509 INTVAL (tocrel_offset
), mode
)));
8513 legitimate_small_data_p (machine_mode mode
, rtx x
)
8515 return (DEFAULT_ABI
== ABI_V4
8516 && !flag_pic
&& !TARGET_TOC
8517 && (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == CONST
)
8518 && small_data_operand (x
, mode
));
8522 rs6000_legitimate_offset_address_p (machine_mode mode
, rtx x
,
8523 bool strict
, bool worst_case
)
8525 unsigned HOST_WIDE_INT offset
;
8528 if (GET_CODE (x
) != PLUS
)
8530 if (!REG_P (XEXP (x
, 0)))
8532 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), strict
))
8534 if (mode_supports_vsx_dform_quad (mode
))
8535 return quad_address_p (x
, mode
, strict
);
8536 if (!reg_offset_addressing_ok_p (mode
))
8537 return virtual_stack_registers_memory_p (x
);
8538 if (legitimate_constant_pool_address_p (x
, mode
, strict
|| lra_in_progress
))
8540 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
8543 offset
= INTVAL (XEXP (x
, 1));
8549 /* Paired single modes: offset addressing isn't valid. */
8555 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8557 if (VECTOR_MEM_VSX_P (mode
))
8562 if (!TARGET_POWERPC64
)
8564 else if (offset
& 3)
8577 if (!TARGET_POWERPC64
)
8579 else if (offset
& 3)
8588 return offset
< 0x10000 - extra
;
8592 legitimate_indexed_address_p (rtx x
, int strict
)
8596 if (GET_CODE (x
) != PLUS
)
8602 return (REG_P (op0
) && REG_P (op1
)
8603 && ((INT_REG_OK_FOR_BASE_P (op0
, strict
)
8604 && INT_REG_OK_FOR_INDEX_P (op1
, strict
))
8605 || (INT_REG_OK_FOR_BASE_P (op1
, strict
)
8606 && INT_REG_OK_FOR_INDEX_P (op0
, strict
))));
8610 avoiding_indexed_address_p (machine_mode mode
)
8612 /* Avoid indexed addressing for modes that have non-indexed
8613 load/store instruction forms. */
8614 return (TARGET_AVOID_XFORM
&& VECTOR_MEM_NONE_P (mode
));
8618 legitimate_indirect_address_p (rtx x
, int strict
)
8620 return GET_CODE (x
) == REG
&& INT_REG_OK_FOR_BASE_P (x
, strict
);
8624 macho_lo_sum_memory_operand (rtx x
, machine_mode mode
)
8626 if (!TARGET_MACHO
|| !flag_pic
8627 || mode
!= SImode
|| GET_CODE (x
) != MEM
)
8631 if (GET_CODE (x
) != LO_SUM
)
8633 if (GET_CODE (XEXP (x
, 0)) != REG
)
8635 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 0))
8639 return CONSTANT_P (x
);
8643 legitimate_lo_sum_address_p (machine_mode mode
, rtx x
, int strict
)
8645 if (GET_CODE (x
) != LO_SUM
)
8647 if (GET_CODE (XEXP (x
, 0)) != REG
)
8649 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), strict
))
8651 /* quad word addresses are restricted, and we can't use LO_SUM. */
8652 if (mode_supports_vsx_dform_quad (mode
))
8656 if (TARGET_ELF
|| TARGET_MACHO
)
8660 if (DEFAULT_ABI
== ABI_V4
&& flag_pic
)
8662 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8663 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8664 recognizes some LO_SUM addresses as valid although this
8665 function says opposite. In most cases, LRA through different
8666 transformations can generate correct code for address reloads.
8667 It can not manage only some LO_SUM cases. So we need to add
8668 code analogous to one in rs6000_legitimize_reload_address for
8669 LOW_SUM here saying that some addresses are still valid. */
8670 large_toc_ok
= (lra_in_progress
&& TARGET_CMODEL
!= CMODEL_SMALL
8671 && small_toc_ref (x
, VOIDmode
));
8672 if (TARGET_TOC
&& ! large_toc_ok
)
8674 if (GET_MODE_NUNITS (mode
) != 1)
8676 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
8677 && !(/* ??? Assume floating point reg based on mode? */
8678 TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
8679 && (mode
== DFmode
|| mode
== DDmode
)))
8682 return CONSTANT_P (x
) || large_toc_ok
;
8689 /* Try machine-dependent ways of modifying an illegitimate address
8690 to be legitimate. If we find one, return the new, valid address.
8691 This is used from only one place: `memory_address' in explow.c.
8693 OLDX is the address as it was before break_out_memory_refs was
8694 called. In some cases it is useful to look at this to decide what
8697 It is always safe for this function to do nothing. It exists to
8698 recognize opportunities to optimize the output.
8700 On RS/6000, first check for the sum of a register with a constant
8701 integer that is out of range. If so, generate code to add the
8702 constant with the low-order 16 bits masked to the register and force
8703 this result into another register (this can be done with `cau').
8704 Then generate an address of REG+(CONST&0xffff), allowing for the
8705 possibility of bit 16 being a one.
8707 Then check for the sum of a register and something not constant, try to
8708 load the other things into a register and return the sum. */
8711 rs6000_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
8716 if (!reg_offset_addressing_ok_p (mode
)
8717 || mode_supports_vsx_dform_quad (mode
))
8719 if (virtual_stack_registers_memory_p (x
))
8722 /* In theory we should not be seeing addresses of the form reg+0,
8723 but just in case it is generated, optimize it away. */
8724 if (GET_CODE (x
) == PLUS
&& XEXP (x
, 1) == const0_rtx
)
8725 return force_reg (Pmode
, XEXP (x
, 0));
8727 /* For TImode with load/store quad, restrict addresses to just a single
8728 pointer, so it works with both GPRs and VSX registers. */
8729 /* Make sure both operands are registers. */
8730 else if (GET_CODE (x
) == PLUS
8731 && (mode
!= TImode
|| !TARGET_VSX
))
8732 return gen_rtx_PLUS (Pmode
,
8733 force_reg (Pmode
, XEXP (x
, 0)),
8734 force_reg (Pmode
, XEXP (x
, 1)));
8736 return force_reg (Pmode
, x
);
8738 if (GET_CODE (x
) == SYMBOL_REF
)
8740 enum tls_model model
= SYMBOL_REF_TLS_MODEL (x
);
8742 return rs6000_legitimize_tls_address (x
, model
);
8754 /* As in legitimate_offset_address_p we do not assume
8755 worst-case. The mode here is just a hint as to the registers
8756 used. A TImode is usually in gprs, but may actually be in
8757 fprs. Leave worst-case scenario for reload to handle via
8758 insn constraints. PTImode is only GPRs. */
8765 if (GET_CODE (x
) == PLUS
8766 && GET_CODE (XEXP (x
, 0)) == REG
8767 && GET_CODE (XEXP (x
, 1)) == CONST_INT
8768 && ((unsigned HOST_WIDE_INT
) (INTVAL (XEXP (x
, 1)) + 0x8000)
8770 && !PAIRED_VECTOR_MODE (mode
))
8772 HOST_WIDE_INT high_int
, low_int
;
8774 low_int
= ((INTVAL (XEXP (x
, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8775 if (low_int
>= 0x8000 - extra
)
8777 high_int
= INTVAL (XEXP (x
, 1)) - low_int
;
8778 sum
= force_operand (gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
8779 GEN_INT (high_int
)), 0);
8780 return plus_constant (Pmode
, sum
, low_int
);
8782 else if (GET_CODE (x
) == PLUS
8783 && GET_CODE (XEXP (x
, 0)) == REG
8784 && GET_CODE (XEXP (x
, 1)) != CONST_INT
8785 && GET_MODE_NUNITS (mode
) == 1
8786 && (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
8787 || (/* ??? Assume floating point reg based on mode? */
8788 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
8789 && (mode
== DFmode
|| mode
== DDmode
)))
8790 && !avoiding_indexed_address_p (mode
))
8792 return gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
8793 force_reg (Pmode
, force_operand (XEXP (x
, 1), 0)));
8795 else if (PAIRED_VECTOR_MODE (mode
))
8799 /* We accept [reg + reg]. */
8801 if (GET_CODE (x
) == PLUS
)
8803 rtx op1
= XEXP (x
, 0);
8804 rtx op2
= XEXP (x
, 1);
8807 op1
= force_reg (Pmode
, op1
);
8808 op2
= force_reg (Pmode
, op2
);
8810 /* We can't always do [reg + reg] for these, because [reg +
8811 reg + offset] is not a legitimate addressing mode. */
8812 y
= gen_rtx_PLUS (Pmode
, op1
, op2
);
8814 if ((GET_MODE_SIZE (mode
) > 8 || mode
== DDmode
) && REG_P (op2
))
8815 return force_reg (Pmode
, y
);
8820 return force_reg (Pmode
, x
);
8822 else if ((TARGET_ELF
8824 || !MACHO_DYNAMIC_NO_PIC_P
8830 && GET_CODE (x
) != CONST_INT
8831 && GET_CODE (x
) != CONST_WIDE_INT
8832 && GET_CODE (x
) != CONST_DOUBLE
8834 && GET_MODE_NUNITS (mode
) == 1
8835 && (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
8836 || (/* ??? Assume floating point reg based on mode? */
8837 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
8838 && (mode
== DFmode
|| mode
== DDmode
))))
8840 rtx reg
= gen_reg_rtx (Pmode
);
8842 emit_insn (gen_elf_high (reg
, x
));
8844 emit_insn (gen_macho_high (reg
, x
));
8845 return gen_rtx_LO_SUM (Pmode
, reg
, x
);
8848 && GET_CODE (x
) == SYMBOL_REF
8849 && constant_pool_expr_p (x
)
8850 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x
), Pmode
))
8851 return create_TOC_reference (x
, NULL_RTX
);
8856 /* Debug version of rs6000_legitimize_address. */
8858 rs6000_debug_legitimize_address (rtx x
, rtx oldx
, machine_mode mode
)
8864 ret
= rs6000_legitimize_address (x
, oldx
, mode
);
8865 insns
= get_insns ();
8871 "\nrs6000_legitimize_address: mode %s, old code %s, "
8872 "new code %s, modified\n",
8873 GET_MODE_NAME (mode
), GET_RTX_NAME (GET_CODE (x
)),
8874 GET_RTX_NAME (GET_CODE (ret
)));
8876 fprintf (stderr
, "Original address:\n");
8879 fprintf (stderr
, "oldx:\n");
8882 fprintf (stderr
, "New address:\n");
8887 fprintf (stderr
, "Insns added:\n");
8888 debug_rtx_list (insns
, 20);
8894 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8895 GET_MODE_NAME (mode
), GET_RTX_NAME (GET_CODE (x
)));
8906 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8907 We need to emit DTP-relative relocations. */
8909 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx
) ATTRIBUTE_UNUSED
;
8911 rs6000_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
8916 fputs ("\t.long\t", file
);
8919 fputs (DOUBLE_INT_ASM_OP
, file
);
8924 output_addr_const (file
, x
);
8926 fputs ("@dtprel+0x8000", file
);
8927 else if (TARGET_XCOFF
&& GET_CODE (x
) == SYMBOL_REF
)
8929 switch (SYMBOL_REF_TLS_MODEL (x
))
8933 case TLS_MODEL_LOCAL_EXEC
:
8934 fputs ("@le", file
);
8936 case TLS_MODEL_INITIAL_EXEC
:
8937 fputs ("@ie", file
);
8939 case TLS_MODEL_GLOBAL_DYNAMIC
:
8940 case TLS_MODEL_LOCAL_DYNAMIC
:
8949 /* Return true if X is a symbol that refers to real (rather than emulated)
8953 rs6000_real_tls_symbol_ref_p (rtx x
)
8955 return (GET_CODE (x
) == SYMBOL_REF
8956 && SYMBOL_REF_TLS_MODEL (x
) >= TLS_MODEL_REAL
);
8959 /* In the name of slightly smaller debug output, and to cater to
8960 general assembler lossage, recognize various UNSPEC sequences
8961 and turn them back into a direct symbol reference. */
8964 rs6000_delegitimize_address (rtx orig_x
)
8968 orig_x
= delegitimize_mem_from_attrs (orig_x
);
8974 if (TARGET_CMODEL
!= CMODEL_SMALL
8975 && GET_CODE (y
) == LO_SUM
)
8979 if (GET_CODE (y
) == PLUS
8980 && GET_MODE (y
) == Pmode
8981 && CONST_INT_P (XEXP (y
, 1)))
8983 offset
= XEXP (y
, 1);
8987 if (GET_CODE (y
) == UNSPEC
8988 && XINT (y
, 1) == UNSPEC_TOCREL
)
8990 y
= XVECEXP (y
, 0, 0);
8993 /* Do not associate thread-local symbols with the original
8994 constant pool symbol. */
8996 && GET_CODE (y
) == SYMBOL_REF
8997 && CONSTANT_POOL_ADDRESS_P (y
)
8998 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y
)))
9002 if (offset
!= NULL_RTX
)
9003 y
= gen_rtx_PLUS (Pmode
, y
, offset
);
9004 if (!MEM_P (orig_x
))
9007 return replace_equiv_address_nv (orig_x
, y
);
9011 && GET_CODE (orig_x
) == LO_SUM
9012 && GET_CODE (XEXP (orig_x
, 1)) == CONST
)
9014 y
= XEXP (XEXP (orig_x
, 1), 0);
9015 if (GET_CODE (y
) == UNSPEC
9016 && XINT (y
, 1) == UNSPEC_MACHOPIC_OFFSET
)
9017 return XVECEXP (y
, 0, 0);
9023 /* Return true if X shouldn't be emitted into the debug info.
9024 The linker doesn't like .toc section references from
9025 .debug_* sections, so reject .toc section symbols. */
9028 rs6000_const_not_ok_for_debug_p (rtx x
)
9030 if (GET_CODE (x
) == UNSPEC
)
9032 if (GET_CODE (x
) == SYMBOL_REF
9033 && CONSTANT_POOL_ADDRESS_P (x
))
9035 rtx c
= get_pool_constant (x
);
9036 machine_mode cmode
= get_pool_mode (x
);
9037 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c
, cmode
))
9045 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
9048 rs6000_legitimate_combined_insn (rtx_insn
*insn
)
9050 int icode
= INSN_CODE (insn
);
9052 /* Reject creating doloop insns. Combine should not be allowed
9053 to create these for a number of reasons:
9054 1) In a nested loop, if combine creates one of these in an
9055 outer loop and the register allocator happens to allocate ctr
9056 to the outer loop insn, then the inner loop can't use ctr.
9057 Inner loops ought to be more highly optimized.
9058 2) Combine often wants to create one of these from what was
9059 originally a three insn sequence, first combining the three
9060 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
9061 allocated ctr, the splitter takes use back to the three insn
9062 sequence. It's better to stop combine at the two insn
9064 3) Faced with not being able to allocate ctr for ctrsi/crtdi
9065 insns, the register allocator sometimes uses floating point
9066 or vector registers for the pseudo. Since ctrsi/ctrdi is a
9067 jump insn and output reloads are not implemented for jumps,
9068 the ctrsi/ctrdi splitters need to handle all possible cases.
9069 That's a pain, and it gets to be seriously difficult when a
9070 splitter that runs after reload needs memory to transfer from
9071 a gpr to fpr. See PR70098 and PR71763 which are not fixed
9072 for the difficult case. It's better to not create problems
9073 in the first place. */
9074 if (icode
!= CODE_FOR_nothing
9075 && (icode
== CODE_FOR_bdz_si
9076 || icode
== CODE_FOR_bdz_di
9077 || icode
== CODE_FOR_bdnz_si
9078 || icode
== CODE_FOR_bdnz_di
9079 || icode
== CODE_FOR_bdztf_si
9080 || icode
== CODE_FOR_bdztf_di
9081 || icode
== CODE_FOR_bdnztf_si
9082 || icode
== CODE_FOR_bdnztf_di
))
9088 /* Construct the SYMBOL_REF for the tls_get_addr function. */
9090 static GTY(()) rtx rs6000_tls_symbol
;
9092 rs6000_tls_get_addr (void)
9094 if (!rs6000_tls_symbol
)
9095 rs6000_tls_symbol
= init_one_libfunc ("__tls_get_addr");
9097 return rs6000_tls_symbol
;
9100 /* Construct the SYMBOL_REF for TLS GOT references. */
9102 static GTY(()) rtx rs6000_got_symbol
;
9104 rs6000_got_sym (void)
9106 if (!rs6000_got_symbol
)
9108 rs6000_got_symbol
= gen_rtx_SYMBOL_REF (Pmode
, "_GLOBAL_OFFSET_TABLE_");
9109 SYMBOL_REF_FLAGS (rs6000_got_symbol
) |= SYMBOL_FLAG_LOCAL
;
9110 SYMBOL_REF_FLAGS (rs6000_got_symbol
) |= SYMBOL_FLAG_EXTERNAL
;
9113 return rs6000_got_symbol
;
9116 /* AIX Thread-Local Address support. */
9119 rs6000_legitimize_tls_address_aix (rtx addr
, enum tls_model model
)
9121 rtx sym
, mem
, tocref
, tlsreg
, tmpreg
, dest
, tlsaddr
;
9125 name
= XSTR (addr
, 0);
9126 /* Append TLS CSECT qualifier, unless the symbol already is qualified
9127 or the symbol will be in TLS private data section. */
9128 if (name
[strlen (name
) - 1] != ']'
9129 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr
))
9130 || bss_initializer_p (SYMBOL_REF_DECL (addr
))))
9132 tlsname
= XALLOCAVEC (char, strlen (name
) + 4);
9133 strcpy (tlsname
, name
);
9135 bss_initializer_p (SYMBOL_REF_DECL (addr
)) ? "[UL]" : "[TL]");
9136 tlsaddr
= copy_rtx (addr
);
9137 XSTR (tlsaddr
, 0) = ggc_strdup (tlsname
);
9142 /* Place addr into TOC constant pool. */
9143 sym
= force_const_mem (GET_MODE (tlsaddr
), tlsaddr
);
9145 /* Output the TOC entry and create the MEM referencing the value. */
9146 if (constant_pool_expr_p (XEXP (sym
, 0))
9147 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym
, 0)), Pmode
))
9149 tocref
= create_TOC_reference (XEXP (sym
, 0), NULL_RTX
);
9150 mem
= gen_const_mem (Pmode
, tocref
);
9151 set_mem_alias_set (mem
, get_TOC_alias_set ());
9156 /* Use global-dynamic for local-dynamic. */
9157 if (model
== TLS_MODEL_GLOBAL_DYNAMIC
9158 || model
== TLS_MODEL_LOCAL_DYNAMIC
)
9160 /* Create new TOC reference for @m symbol. */
9161 name
= XSTR (XVECEXP (XEXP (mem
, 0), 0, 0), 0);
9162 tlsname
= XALLOCAVEC (char, strlen (name
) + 1);
9163 strcpy (tlsname
, "*LCM");
9164 strcat (tlsname
, name
+ 3);
9165 rtx modaddr
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (tlsname
));
9166 SYMBOL_REF_FLAGS (modaddr
) |= SYMBOL_FLAG_LOCAL
;
9167 tocref
= create_TOC_reference (modaddr
, NULL_RTX
);
9168 rtx modmem
= gen_const_mem (Pmode
, tocref
);
9169 set_mem_alias_set (modmem
, get_TOC_alias_set ());
9171 rtx modreg
= gen_reg_rtx (Pmode
);
9172 emit_insn (gen_rtx_SET (modreg
, modmem
));
9174 tmpreg
= gen_reg_rtx (Pmode
);
9175 emit_insn (gen_rtx_SET (tmpreg
, mem
));
9177 dest
= gen_reg_rtx (Pmode
);
9179 emit_insn (gen_tls_get_addrsi (dest
, modreg
, tmpreg
));
9181 emit_insn (gen_tls_get_addrdi (dest
, modreg
, tmpreg
));
9184 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
9185 else if (TARGET_32BIT
)
9187 tlsreg
= gen_reg_rtx (SImode
);
9188 emit_insn (gen_tls_get_tpointer (tlsreg
));
9191 tlsreg
= gen_rtx_REG (DImode
, 13);
9193 /* Load the TOC value into temporary register. */
9194 tmpreg
= gen_reg_rtx (Pmode
);
9195 emit_insn (gen_rtx_SET (tmpreg
, mem
));
9196 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
9197 gen_rtx_MINUS (Pmode
, addr
, tlsreg
));
9199 /* Add TOC symbol value to TLS pointer. */
9200 dest
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, tmpreg
, tlsreg
));
9205 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
9206 this (thread-local) address. */
9209 rs6000_legitimize_tls_address (rtx addr
, enum tls_model model
)
9214 return rs6000_legitimize_tls_address_aix (addr
, model
);
9216 dest
= gen_reg_rtx (Pmode
);
9217 if (model
== TLS_MODEL_LOCAL_EXEC
&& rs6000_tls_size
== 16)
9223 tlsreg
= gen_rtx_REG (Pmode
, 13);
9224 insn
= gen_tls_tprel_64 (dest
, tlsreg
, addr
);
9228 tlsreg
= gen_rtx_REG (Pmode
, 2);
9229 insn
= gen_tls_tprel_32 (dest
, tlsreg
, addr
);
9233 else if (model
== TLS_MODEL_LOCAL_EXEC
&& rs6000_tls_size
== 32)
9237 tmp
= gen_reg_rtx (Pmode
);
9240 tlsreg
= gen_rtx_REG (Pmode
, 13);
9241 insn
= gen_tls_tprel_ha_64 (tmp
, tlsreg
, addr
);
9245 tlsreg
= gen_rtx_REG (Pmode
, 2);
9246 insn
= gen_tls_tprel_ha_32 (tmp
, tlsreg
, addr
);
9250 insn
= gen_tls_tprel_lo_64 (dest
, tmp
, addr
);
9252 insn
= gen_tls_tprel_lo_32 (dest
, tmp
, addr
);
9257 rtx r3
, got
, tga
, tmp1
, tmp2
, call_insn
;
9259 /* We currently use relocations like @got@tlsgd for tls, which
9260 means the linker will handle allocation of tls entries, placing
9261 them in the .got section. So use a pointer to the .got section,
9262 not one to secondary TOC sections used by 64-bit -mminimal-toc,
9263 or to secondary GOT sections used by 32-bit -fPIC. */
9265 got
= gen_rtx_REG (Pmode
, 2);
9269 got
= gen_rtx_REG (Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
9272 rtx gsym
= rs6000_got_sym ();
9273 got
= gen_reg_rtx (Pmode
);
9275 rs6000_emit_move (got
, gsym
, Pmode
);
9280 tmp1
= gen_reg_rtx (Pmode
);
9281 tmp2
= gen_reg_rtx (Pmode
);
9282 mem
= gen_const_mem (Pmode
, tmp1
);
9283 lab
= gen_label_rtx ();
9284 emit_insn (gen_load_toc_v4_PIC_1b (gsym
, lab
));
9285 emit_move_insn (tmp1
, gen_rtx_REG (Pmode
, LR_REGNO
));
9286 if (TARGET_LINK_STACK
)
9287 emit_insn (gen_addsi3 (tmp1
, tmp1
, GEN_INT (4)));
9288 emit_move_insn (tmp2
, mem
);
9289 rtx_insn
*last
= emit_insn (gen_addsi3 (got
, tmp1
, tmp2
));
9290 set_unique_reg_note (last
, REG_EQUAL
, gsym
);
9295 if (model
== TLS_MODEL_GLOBAL_DYNAMIC
)
9297 tga
= rs6000_tls_get_addr ();
9298 emit_library_call_value (tga
, dest
, LCT_CONST
, Pmode
,
9301 r3
= gen_rtx_REG (Pmode
, 3);
9302 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
9305 insn
= gen_tls_gd_aix64 (r3
, got
, addr
, tga
, const0_rtx
);
9307 insn
= gen_tls_gd_aix32 (r3
, got
, addr
, tga
, const0_rtx
);
9309 else if (DEFAULT_ABI
== ABI_V4
)
9310 insn
= gen_tls_gd_sysvsi (r3
, got
, addr
, tga
, const0_rtx
);
9313 call_insn
= last_call_insn ();
9314 PATTERN (call_insn
) = insn
;
9315 if (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
9316 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
),
9317 pic_offset_table_rtx
);
9319 else if (model
== TLS_MODEL_LOCAL_DYNAMIC
)
9321 tga
= rs6000_tls_get_addr ();
9322 tmp1
= gen_reg_rtx (Pmode
);
9323 emit_library_call_value (tga
, tmp1
, LCT_CONST
, Pmode
,
9326 r3
= gen_rtx_REG (Pmode
, 3);
9327 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
9330 insn
= gen_tls_ld_aix64 (r3
, got
, tga
, const0_rtx
);
9332 insn
= gen_tls_ld_aix32 (r3
, got
, tga
, const0_rtx
);
9334 else if (DEFAULT_ABI
== ABI_V4
)
9335 insn
= gen_tls_ld_sysvsi (r3
, got
, tga
, const0_rtx
);
9338 call_insn
= last_call_insn ();
9339 PATTERN (call_insn
) = insn
;
9340 if (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
9341 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
),
9342 pic_offset_table_rtx
);
9344 if (rs6000_tls_size
== 16)
9347 insn
= gen_tls_dtprel_64 (dest
, tmp1
, addr
);
9349 insn
= gen_tls_dtprel_32 (dest
, tmp1
, addr
);
9351 else if (rs6000_tls_size
== 32)
9353 tmp2
= gen_reg_rtx (Pmode
);
9355 insn
= gen_tls_dtprel_ha_64 (tmp2
, tmp1
, addr
);
9357 insn
= gen_tls_dtprel_ha_32 (tmp2
, tmp1
, addr
);
9360 insn
= gen_tls_dtprel_lo_64 (dest
, tmp2
, addr
);
9362 insn
= gen_tls_dtprel_lo_32 (dest
, tmp2
, addr
);
9366 tmp2
= gen_reg_rtx (Pmode
);
9368 insn
= gen_tls_got_dtprel_64 (tmp2
, got
, addr
);
9370 insn
= gen_tls_got_dtprel_32 (tmp2
, got
, addr
);
9372 insn
= gen_rtx_SET (dest
, gen_rtx_PLUS (Pmode
, tmp2
, tmp1
));
9378 /* IE, or 64-bit offset LE. */
9379 tmp2
= gen_reg_rtx (Pmode
);
9381 insn
= gen_tls_got_tprel_64 (tmp2
, got
, addr
);
9383 insn
= gen_tls_got_tprel_32 (tmp2
, got
, addr
);
9386 insn
= gen_tls_tls_64 (dest
, tmp2
, addr
);
9388 insn
= gen_tls_tls_32 (dest
, tmp2
, addr
);
9396 /* Only create the global variable for the stack protect guard if we are using
9397 the global flavor of that guard. */
9399 rs6000_init_stack_protect_guard (void)
9401 if (rs6000_stack_protector_guard
== SSP_GLOBAL
)
9402 return default_stack_protect_guard ();
9407 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
9410 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
9412 if (GET_CODE (x
) == HIGH
9413 && GET_CODE (XEXP (x
, 0)) == UNSPEC
)
9416 /* A TLS symbol in the TOC cannot contain a sum. */
9417 if (GET_CODE (x
) == CONST
9418 && GET_CODE (XEXP (x
, 0)) == PLUS
9419 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
9420 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x
, 0), 0)) != 0)
9423 /* Do not place an ELF TLS symbol in the constant pool. */
9424 return TARGET_ELF
&& tls_referenced_p (x
);
9427 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
9428 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
9429 can be addressed relative to the toc pointer. */
9432 use_toc_relative_ref (rtx sym
, machine_mode mode
)
9434 return ((constant_pool_expr_p (sym
)
9435 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym
),
9436 get_pool_mode (sym
)))
9437 || (TARGET_CMODEL
== CMODEL_MEDIUM
9438 && SYMBOL_REF_LOCAL_P (sym
)
9439 && GET_MODE_SIZE (mode
) <= POWERPC64_TOC_POINTER_ALIGNMENT
));
9442 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
9443 replace the input X, or the original X if no replacement is called for.
9444 The output parameter *WIN is 1 if the calling macro should goto WIN,
9447 For RS/6000, we wish to handle large displacements off a base
9448 register by splitting the addend across an addiu/addis and the mem insn.
9449 This cuts number of extra insns needed from 3 to 1.
9451 On Darwin, we use this to generate code for floating point constants.
9452 A movsf_low is generated so we wind up with 2 instructions rather than 3.
9453 The Darwin code is inside #if TARGET_MACHO because only then are the
9454 machopic_* functions defined. */
9456 rs6000_legitimize_reload_address (rtx x
, machine_mode mode
,
9457 int opnum
, int type
,
9458 int ind_levels ATTRIBUTE_UNUSED
, int *win
)
9460 bool reg_offset_p
= reg_offset_addressing_ok_p (mode
);
9461 bool quad_offset_p
= mode_supports_vsx_dform_quad (mode
);
9463 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
9464 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
9467 && ((mode
== DFmode
&& recog_data
.operand_mode
[0] == V2DFmode
)
9468 || (mode
== DImode
&& recog_data
.operand_mode
[0] == V2DImode
)
9469 || (mode
== SFmode
&& recog_data
.operand_mode
[0] == V4SFmode
9470 && TARGET_P9_VECTOR
)
9471 || (mode
== SImode
&& recog_data
.operand_mode
[0] == V4SImode
9472 && TARGET_P9_VECTOR
)))
9473 reg_offset_p
= false;
9475 /* We must recognize output that we have already generated ourselves. */
9476 if (GET_CODE (x
) == PLUS
9477 && GET_CODE (XEXP (x
, 0)) == PLUS
9478 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
9479 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
9480 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
9482 if (TARGET_DEBUG_ADDR
)
9484 fprintf (stderr
, "\nlegitimize_reload_address push_reload #1:\n");
9487 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9488 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
9489 opnum
, (enum reload_type
) type
);
9494 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
9495 if (GET_CODE (x
) == LO_SUM
9496 && GET_CODE (XEXP (x
, 0)) == HIGH
)
9498 if (TARGET_DEBUG_ADDR
)
9500 fprintf (stderr
, "\nlegitimize_reload_address push_reload #2:\n");
9503 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9504 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9505 opnum
, (enum reload_type
) type
);
9511 if (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
9512 && GET_CODE (x
) == LO_SUM
9513 && GET_CODE (XEXP (x
, 0)) == PLUS
9514 && XEXP (XEXP (x
, 0), 0) == pic_offset_table_rtx
9515 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == HIGH
9516 && XEXP (XEXP (XEXP (x
, 0), 1), 0) == XEXP (x
, 1)
9517 && machopic_operand_p (XEXP (x
, 1)))
9519 /* Result of previous invocation of this function on Darwin
9520 floating point constant. */
9521 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9522 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9523 opnum
, (enum reload_type
) type
);
9529 if (TARGET_CMODEL
!= CMODEL_SMALL
9532 && small_toc_ref (x
, VOIDmode
))
9534 rtx hi
= gen_rtx_HIGH (Pmode
, copy_rtx (x
));
9535 x
= gen_rtx_LO_SUM (Pmode
, hi
, x
);
9536 if (TARGET_DEBUG_ADDR
)
9538 fprintf (stderr
, "\nlegitimize_reload_address push_reload #3:\n");
9541 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9542 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9543 opnum
, (enum reload_type
) type
);
9548 if (GET_CODE (x
) == PLUS
9549 && REG_P (XEXP (x
, 0))
9550 && REGNO (XEXP (x
, 0)) < FIRST_PSEUDO_REGISTER
9551 && INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 1)
9552 && CONST_INT_P (XEXP (x
, 1))
9554 && !PAIRED_VECTOR_MODE (mode
)
9555 && (quad_offset_p
|| !VECTOR_MODE_P (mode
) || VECTOR_MEM_NONE_P (mode
)))
9557 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
9558 HOST_WIDE_INT low
= ((val
& 0xffff) ^ 0x8000) - 0x8000;
9560 = (((val
- low
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
9562 /* Check for 32-bit overflow or quad addresses with one of the
9563 four least significant bits set. */
9564 if (high
+ low
!= val
9565 || (quad_offset_p
&& (low
& 0xf)))
9571 /* Reload the high part into a base reg; leave the low part
9572 in the mem directly. */
9574 x
= gen_rtx_PLUS (GET_MODE (x
),
9575 gen_rtx_PLUS (GET_MODE (x
), XEXP (x
, 0),
9579 if (TARGET_DEBUG_ADDR
)
9581 fprintf (stderr
, "\nlegitimize_reload_address push_reload #4:\n");
9584 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9585 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
9586 opnum
, (enum reload_type
) type
);
9591 if (GET_CODE (x
) == SYMBOL_REF
9594 && (!VECTOR_MODE_P (mode
) || VECTOR_MEM_NONE_P (mode
))
9595 && !PAIRED_VECTOR_MODE (mode
)
9597 && DEFAULT_ABI
== ABI_DARWIN
9598 && (flag_pic
|| MACHO_DYNAMIC_NO_PIC_P
)
9599 && machopic_symbol_defined_p (x
)
9601 && DEFAULT_ABI
== ABI_V4
9604 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
9605 The same goes for DImode without 64-bit gprs and DFmode and DDmode
9607 ??? Assume floating point reg based on mode? This assumption is
9608 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
9609 where reload ends up doing a DFmode load of a constant from
9610 mem using two gprs. Unfortunately, at this point reload
9611 hasn't yet selected regs so poking around in reload data
9612 won't help and even if we could figure out the regs reliably,
9613 we'd still want to allow this transformation when the mem is
9614 naturally aligned. Since we say the address is good here, we
9615 can't disable offsets from LO_SUMs in mem_operand_gpr.
9616 FIXME: Allow offset from lo_sum for other modes too, when
9617 mem is sufficiently aligned.
9619 Also disallow this if the type can go in VMX/Altivec registers, since
9620 those registers do not have d-form (reg+offset) address modes. */
9621 && !reg_addr
[mode
].scalar_in_vmx_p
9626 && (mode
!= TImode
|| !TARGET_VSX
)
9628 && (mode
!= DImode
|| TARGET_POWERPC64
)
9629 && ((mode
!= DFmode
&& mode
!= DDmode
) || TARGET_POWERPC64
9630 || (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)))
9635 rtx offset
= machopic_gen_offset (x
);
9636 x
= gen_rtx_LO_SUM (GET_MODE (x
),
9637 gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
,
9638 gen_rtx_HIGH (Pmode
, offset
)), offset
);
9642 x
= gen_rtx_LO_SUM (GET_MODE (x
),
9643 gen_rtx_HIGH (Pmode
, x
), x
);
9645 if (TARGET_DEBUG_ADDR
)
9647 fprintf (stderr
, "\nlegitimize_reload_address push_reload #5:\n");
9650 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9651 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9652 opnum
, (enum reload_type
) type
);
9657 /* Reload an offset address wrapped by an AND that represents the
9658 masking of the lower bits. Strip the outer AND and let reload
9659 convert the offset address into an indirect address. For VSX,
9660 force reload to create the address with an AND in a separate
9661 register, because we can't guarantee an altivec register will
9663 if (VECTOR_MEM_ALTIVEC_P (mode
)
9664 && GET_CODE (x
) == AND
9665 && GET_CODE (XEXP (x
, 0)) == PLUS
9666 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
9667 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
9668 && GET_CODE (XEXP (x
, 1)) == CONST_INT
9669 && INTVAL (XEXP (x
, 1)) == -16)
9679 && GET_CODE (x
) == SYMBOL_REF
9680 && use_toc_relative_ref (x
, mode
))
9682 x
= create_TOC_reference (x
, NULL_RTX
);
9683 if (TARGET_CMODEL
!= CMODEL_SMALL
)
9685 if (TARGET_DEBUG_ADDR
)
9687 fprintf (stderr
, "\nlegitimize_reload_address push_reload #6:\n");
9690 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9691 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9692 opnum
, (enum reload_type
) type
);
9701 /* Debug version of rs6000_legitimize_reload_address. */
9703 rs6000_debug_legitimize_reload_address (rtx x
, machine_mode mode
,
9704 int opnum
, int type
,
9705 int ind_levels
, int *win
)
9707 rtx ret
= rs6000_legitimize_reload_address (x
, mode
, opnum
, type
,
9710 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9711 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9712 GET_MODE_NAME (mode
), opnum
, type
, ind_levels
, *win
);
9716 fprintf (stderr
, "Same address returned\n");
9718 fprintf (stderr
, "NULL returned\n");
9721 fprintf (stderr
, "New address:\n");
9728 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9729 that is a valid memory address for an instruction.
9730 The MODE argument is the machine mode for the MEM expression
9731 that wants to use this address.
9733 On the RS/6000, there are four valid address: a SYMBOL_REF that
9734 refers to a constant pool entry of an address (or the sum of it
9735 plus a constant), a short (16-bit signed) constant plus a register,
9736 the sum of two registers, or a register indirect, possibly with an
9737 auto-increment. For DFmode, DDmode and DImode with a constant plus
9738 register, we must ensure that both words are addressable or PowerPC64
9739 with offset word aligned.
9741 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9742 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9743 because adjacent memory cells are accessed by adding word-sized offsets
9744 during assembly output. */
9746 rs6000_legitimate_address_p (machine_mode mode
, rtx x
, bool reg_ok_strict
)
9748 bool reg_offset_p
= reg_offset_addressing_ok_p (mode
);
9749 bool quad_offset_p
= mode_supports_vsx_dform_quad (mode
);
9751 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9752 if (VECTOR_MEM_ALTIVEC_P (mode
)
9753 && GET_CODE (x
) == AND
9754 && GET_CODE (XEXP (x
, 1)) == CONST_INT
9755 && INTVAL (XEXP (x
, 1)) == -16)
9758 if (TARGET_ELF
&& RS6000_SYMBOL_REF_TLS_P (x
))
9760 if (legitimate_indirect_address_p (x
, reg_ok_strict
))
9763 && (GET_CODE (x
) == PRE_INC
|| GET_CODE (x
) == PRE_DEC
)
9764 && mode_supports_pre_incdec_p (mode
)
9765 && legitimate_indirect_address_p (XEXP (x
, 0), reg_ok_strict
))
9767 /* Handle restricted vector d-form offsets in ISA 3.0. */
9770 if (quad_address_p (x
, mode
, reg_ok_strict
))
9773 else if (virtual_stack_registers_memory_p (x
))
9776 else if (reg_offset_p
)
9778 if (legitimate_small_data_p (mode
, x
))
9780 if (legitimate_constant_pool_address_p (x
, mode
,
9781 reg_ok_strict
|| lra_in_progress
))
9783 if (reg_addr
[mode
].fused_toc
&& GET_CODE (x
) == UNSPEC
9784 && XINT (x
, 1) == UNSPEC_FUSION_ADDIS
)
9788 /* For TImode, if we have TImode in VSX registers, only allow register
9789 indirect addresses. This will allow the values to go in either GPRs
9790 or VSX registers without reloading. The vector types would tend to
9791 go into VSX registers, so we allow REG+REG, while TImode seems
9792 somewhat split, in that some uses are GPR based, and some VSX based. */
9793 /* FIXME: We could loosen this by changing the following to
9794 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
9795 but currently we cannot allow REG+REG addressing for TImode. See
9796 PR72827 for complete details on how this ends up hoodwinking DSE. */
9797 if (mode
== TImode
&& TARGET_VSX
)
9799 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9802 && GET_CODE (x
) == PLUS
9803 && GET_CODE (XEXP (x
, 0)) == REG
9804 && (XEXP (x
, 0) == virtual_stack_vars_rtx
9805 || XEXP (x
, 0) == arg_pointer_rtx
)
9806 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
9808 if (rs6000_legitimate_offset_address_p (mode
, x
, reg_ok_strict
, false))
9810 if (!FLOAT128_2REG_P (mode
)
9811 && ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
9813 || (mode
!= DFmode
&& mode
!= DDmode
))
9814 && (TARGET_POWERPC64
|| mode
!= DImode
)
9815 && (mode
!= TImode
|| VECTOR_MEM_VSX_P (TImode
))
9817 && !avoiding_indexed_address_p (mode
)
9818 && legitimate_indexed_address_p (x
, reg_ok_strict
))
9820 if (TARGET_UPDATE
&& GET_CODE (x
) == PRE_MODIFY
9821 && mode_supports_pre_modify_p (mode
)
9822 && legitimate_indirect_address_p (XEXP (x
, 0), reg_ok_strict
)
9823 && (rs6000_legitimate_offset_address_p (mode
, XEXP (x
, 1),
9824 reg_ok_strict
, false)
9825 || (!avoiding_indexed_address_p (mode
)
9826 && legitimate_indexed_address_p (XEXP (x
, 1), reg_ok_strict
)))
9827 && rtx_equal_p (XEXP (XEXP (x
, 1), 0), XEXP (x
, 0)))
9829 if (reg_offset_p
&& !quad_offset_p
9830 && legitimate_lo_sum_address_p (mode
, x
, reg_ok_strict
))
9835 /* Debug version of rs6000_legitimate_address_p. */
9837 rs6000_debug_legitimate_address_p (machine_mode mode
, rtx x
,
9840 bool ret
= rs6000_legitimate_address_p (mode
, x
, reg_ok_strict
);
9842 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9843 "strict = %d, reload = %s, code = %s\n",
9844 ret
? "true" : "false",
9845 GET_MODE_NAME (mode
),
9847 (reload_completed
? "after" : "before"),
9848 GET_RTX_NAME (GET_CODE (x
)));
9854 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9857 rs6000_mode_dependent_address_p (const_rtx addr
,
9858 addr_space_t as ATTRIBUTE_UNUSED
)
9860 return rs6000_mode_dependent_address_ptr (addr
);
9863 /* Go to LABEL if ADDR (a legitimate address expression)
9864 has an effect that depends on the machine mode it is used for.
9866 On the RS/6000 this is true of all integral offsets (since AltiVec
9867 and VSX modes don't allow them) or is a pre-increment or decrement.
9869 ??? Except that due to conceptual problems in offsettable_address_p
9870 we can't really report the problems of integral offsets. So leave
9871 this assuming that the adjustable offset must be valid for the
9872 sub-words of a TFmode operand, which is what we had before. */
9875 rs6000_mode_dependent_address (const_rtx addr
)
9877 switch (GET_CODE (addr
))
9880 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9881 is considered a legitimate address before reload, so there
9882 are no offset restrictions in that case. Note that this
9883 condition is safe in strict mode because any address involving
9884 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9885 been rejected as illegitimate. */
9886 if (XEXP (addr
, 0) != virtual_stack_vars_rtx
9887 && XEXP (addr
, 0) != arg_pointer_rtx
9888 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
)
9890 unsigned HOST_WIDE_INT val
= INTVAL (XEXP (addr
, 1));
9891 return val
+ 0x8000 >= 0x10000 - (TARGET_POWERPC64
? 8 : 12);
9896 /* Anything in the constant pool is sufficiently aligned that
9897 all bytes have the same high part address. */
9898 return !legitimate_constant_pool_address_p (addr
, QImode
, false);
9900 /* Auto-increment cases are now treated generically in recog.c. */
9902 return TARGET_UPDATE
;
9904 /* AND is only allowed in Altivec loads. */
9915 /* Debug version of rs6000_mode_dependent_address. */
9917 rs6000_debug_mode_dependent_address (const_rtx addr
)
9919 bool ret
= rs6000_mode_dependent_address (addr
);
9921 fprintf (stderr
, "\nrs6000_mode_dependent_address: ret = %s\n",
9922 ret
? "true" : "false");
9928 /* Implement FIND_BASE_TERM. */
9931 rs6000_find_base_term (rtx op
)
9936 if (GET_CODE (base
) == CONST
)
9937 base
= XEXP (base
, 0);
9938 if (GET_CODE (base
) == PLUS
)
9939 base
= XEXP (base
, 0);
9940 if (GET_CODE (base
) == UNSPEC
)
9941 switch (XINT (base
, 1))
9944 case UNSPEC_MACHOPIC_OFFSET
:
9945 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9946 for aliasing purposes. */
9947 return XVECEXP (base
, 0, 0);
9953 /* More elaborate version of recog's offsettable_memref_p predicate
9954 that works around the ??? note of rs6000_mode_dependent_address.
9955 In particular it accepts
9957 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9959 in 32-bit mode, that the recog predicate rejects. */
9962 rs6000_offsettable_memref_p (rtx op
, machine_mode reg_mode
)
9969 /* First mimic offsettable_memref_p. */
9970 if (offsettable_address_p (true, GET_MODE (op
), XEXP (op
, 0)))
9973 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9974 the latter predicate knows nothing about the mode of the memory
9975 reference and, therefore, assumes that it is the largest supported
9976 mode (TFmode). As a consequence, legitimate offsettable memory
9977 references are rejected. rs6000_legitimate_offset_address_p contains
9978 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9979 at least with a little bit of help here given that we know the
9980 actual registers used. */
9981 worst_case
= ((TARGET_POWERPC64
&& GET_MODE_CLASS (reg_mode
) == MODE_INT
)
9982 || GET_MODE_SIZE (reg_mode
) == 4);
9983 return rs6000_legitimate_offset_address_p (GET_MODE (op
), XEXP (op
, 0),
9987 /* Determine the reassociation width to be used in reassociate_bb.
9988 This takes into account how many parallel operations we
9989 can actually do of a given type, and also the latency.
9993 vect add/sub/mul 2/cycle
9994 fp add/sub/mul 2/cycle
9999 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED
,
10002 switch (rs6000_tune
)
10004 case PROCESSOR_POWER8
:
10005 case PROCESSOR_POWER9
:
10006 if (DECIMAL_FLOAT_MODE_P (mode
))
10008 if (VECTOR_MODE_P (mode
))
10010 if (INTEGRAL_MODE_P (mode
))
10011 return opc
== MULT_EXPR
? 4 : 6;
10012 if (FLOAT_MODE_P (mode
))
10021 /* Change register usage conditional on target flags. */
10023 rs6000_conditional_register_usage (void)
10027 if (TARGET_DEBUG_TARGET
)
10028 fprintf (stderr
, "rs6000_conditional_register_usage called\n");
10030 /* Set MQ register fixed (already call_used) so that it will not be
10032 fixed_regs
[64] = 1;
10034 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
10036 fixed_regs
[13] = call_used_regs
[13]
10037 = call_really_used_regs
[13] = 1;
10039 /* Conditionally disable FPRs. */
10040 if (TARGET_SOFT_FLOAT
)
10041 for (i
= 32; i
< 64; i
++)
10042 fixed_regs
[i
] = call_used_regs
[i
]
10043 = call_really_used_regs
[i
] = 1;
10045 /* The TOC register is not killed across calls in a way that is
10046 visible to the compiler. */
10047 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
10048 call_really_used_regs
[2] = 0;
10050 if (DEFAULT_ABI
== ABI_V4
&& flag_pic
== 2)
10051 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
10053 if (DEFAULT_ABI
== ABI_V4
&& flag_pic
== 1)
10054 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
10055 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
10056 = call_really_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
10058 if (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
)
10059 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
10060 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
10061 = call_really_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
10063 if (TARGET_TOC
&& TARGET_MINIMAL_TOC
)
10064 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
10065 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
10067 if (!TARGET_ALTIVEC
&& !TARGET_VSX
)
10069 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
10070 fixed_regs
[i
] = call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
10071 call_really_used_regs
[VRSAVE_REGNO
] = 1;
10074 if (TARGET_ALTIVEC
|| TARGET_VSX
)
10075 global_regs
[VSCR_REGNO
] = 1;
10077 if (TARGET_ALTIVEC_ABI
)
10079 for (i
= FIRST_ALTIVEC_REGNO
; i
< FIRST_ALTIVEC_REGNO
+ 20; ++i
)
10080 call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
10082 /* AIX reserves VR20:31 in non-extended ABI mode. */
10084 for (i
= FIRST_ALTIVEC_REGNO
+ 20; i
< FIRST_ALTIVEC_REGNO
+ 32; ++i
)
10085 fixed_regs
[i
] = call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
10090 /* Output insns to set DEST equal to the constant SOURCE as a series of
10091 lis, ori and shl instructions and return TRUE. */
10094 rs6000_emit_set_const (rtx dest
, rtx source
)
10096 machine_mode mode
= GET_MODE (dest
);
10101 gcc_checking_assert (CONST_INT_P (source
));
10102 c
= INTVAL (source
);
10107 emit_insn (gen_rtx_SET (dest
, source
));
10111 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (SImode
);
10113 emit_insn (gen_rtx_SET (copy_rtx (temp
),
10114 GEN_INT (c
& ~(HOST_WIDE_INT
) 0xffff)));
10115 emit_insn (gen_rtx_SET (dest
,
10116 gen_rtx_IOR (SImode
, copy_rtx (temp
),
10117 GEN_INT (c
& 0xffff))));
10121 if (!TARGET_POWERPC64
)
10125 hi
= operand_subword_force (copy_rtx (dest
), WORDS_BIG_ENDIAN
== 0,
10127 lo
= operand_subword_force (dest
, WORDS_BIG_ENDIAN
!= 0,
10129 emit_move_insn (hi
, GEN_INT (c
>> 32));
10130 c
= ((c
& 0xffffffff) ^ 0x80000000) - 0x80000000;
10131 emit_move_insn (lo
, GEN_INT (c
));
10134 rs6000_emit_set_long_const (dest
, c
);
10138 gcc_unreachable ();
10141 insn
= get_last_insn ();
10142 set
= single_set (insn
);
10143 if (! CONSTANT_P (SET_SRC (set
)))
10144 set_unique_reg_note (insn
, REG_EQUAL
, GEN_INT (c
));
10149 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
10150 Output insns to set DEST equal to the constant C as a series of
10151 lis, ori and shl instructions. */
10154 rs6000_emit_set_long_const (rtx dest
, HOST_WIDE_INT c
)
10157 HOST_WIDE_INT ud1
, ud2
, ud3
, ud4
;
10167 if ((ud4
== 0xffff && ud3
== 0xffff && ud2
== 0xffff && (ud1
& 0x8000))
10168 || (ud4
== 0 && ud3
== 0 && ud2
== 0 && ! (ud1
& 0x8000)))
10169 emit_move_insn (dest
, GEN_INT ((ud1
^ 0x8000) - 0x8000));
10171 else if ((ud4
== 0xffff && ud3
== 0xffff && (ud2
& 0x8000))
10172 || (ud4
== 0 && ud3
== 0 && ! (ud2
& 0x8000)))
10174 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
10176 emit_move_insn (ud1
!= 0 ? copy_rtx (temp
) : dest
,
10177 GEN_INT (((ud2
<< 16) ^ 0x80000000) - 0x80000000));
10179 emit_move_insn (dest
,
10180 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10183 else if (ud3
== 0 && ud4
== 0)
10185 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
10187 gcc_assert (ud2
& 0x8000);
10188 emit_move_insn (copy_rtx (temp
),
10189 GEN_INT (((ud2
<< 16) ^ 0x80000000) - 0x80000000));
10191 emit_move_insn (copy_rtx (temp
),
10192 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10194 emit_move_insn (dest
,
10195 gen_rtx_ZERO_EXTEND (DImode
,
10196 gen_lowpart (SImode
,
10197 copy_rtx (temp
))));
10199 else if ((ud4
== 0xffff && (ud3
& 0x8000))
10200 || (ud4
== 0 && ! (ud3
& 0x8000)))
10202 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
10204 emit_move_insn (copy_rtx (temp
),
10205 GEN_INT (((ud3
<< 16) ^ 0x80000000) - 0x80000000));
10207 emit_move_insn (copy_rtx (temp
),
10208 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10210 emit_move_insn (ud1
!= 0 ? copy_rtx (temp
) : dest
,
10211 gen_rtx_ASHIFT (DImode
, copy_rtx (temp
),
10214 emit_move_insn (dest
,
10215 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10220 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
10222 emit_move_insn (copy_rtx (temp
),
10223 GEN_INT (((ud4
<< 16) ^ 0x80000000) - 0x80000000));
10225 emit_move_insn (copy_rtx (temp
),
10226 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10229 emit_move_insn (ud2
!= 0 || ud1
!= 0 ? copy_rtx (temp
) : dest
,
10230 gen_rtx_ASHIFT (DImode
, copy_rtx (temp
),
10233 emit_move_insn (ud1
!= 0 ? copy_rtx (temp
) : dest
,
10234 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10235 GEN_INT (ud2
<< 16)));
10237 emit_move_insn (dest
,
10238 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10243 /* Helper for the following. Get rid of [r+r] memory refs
10244 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
10247 rs6000_eliminate_indexed_memrefs (rtx operands
[2])
10249 if (GET_CODE (operands
[0]) == MEM
10250 && GET_CODE (XEXP (operands
[0], 0)) != REG
10251 && ! legitimate_constant_pool_address_p (XEXP (operands
[0], 0),
10252 GET_MODE (operands
[0]), false))
10254 = replace_equiv_address (operands
[0],
10255 copy_addr_to_reg (XEXP (operands
[0], 0)));
10257 if (GET_CODE (operands
[1]) == MEM
10258 && GET_CODE (XEXP (operands
[1], 0)) != REG
10259 && ! legitimate_constant_pool_address_p (XEXP (operands
[1], 0),
10260 GET_MODE (operands
[1]), false))
10262 = replace_equiv_address (operands
[1],
10263 copy_addr_to_reg (XEXP (operands
[1], 0)));
10266 /* Generate a vector of constants to permute MODE for a little-endian
10267 storage operation by swapping the two halves of a vector. */
10269 rs6000_const_vec (machine_mode mode
)
10297 v
= rtvec_alloc (subparts
);
10299 for (i
= 0; i
< subparts
/ 2; ++i
)
10300 RTVEC_ELT (v
, i
) = gen_rtx_CONST_INT (DImode
, i
+ subparts
/ 2);
10301 for (i
= subparts
/ 2; i
< subparts
; ++i
)
10302 RTVEC_ELT (v
, i
) = gen_rtx_CONST_INT (DImode
, i
- subparts
/ 2);
10307 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
10308 store operation. */
10310 rs6000_emit_le_vsx_permute (rtx dest
, rtx source
, machine_mode mode
)
10312 /* Scalar permutations are easier to express in integer modes rather than
10313 floating-point modes, so cast them here. We use V1TImode instead
10314 of TImode to ensure that the values don't go through GPRs. */
10315 if (FLOAT128_VECTOR_P (mode
))
10317 dest
= gen_lowpart (V1TImode
, dest
);
10318 source
= gen_lowpart (V1TImode
, source
);
10322 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
10324 if (mode
== TImode
|| mode
== V1TImode
)
10325 emit_insn (gen_rtx_SET (dest
, gen_rtx_ROTATE (mode
, source
,
10329 rtx par
= gen_rtx_PARALLEL (VOIDmode
, rs6000_const_vec (mode
));
10330 emit_insn (gen_rtx_SET (dest
, gen_rtx_VEC_SELECT (mode
, source
, par
)));
10334 /* Emit a little-endian load from vector memory location SOURCE to VSX
10335 register DEST in mode MODE. The load is done with two permuting
10336 insn's that represent an lxvd2x and xxpermdi. */
10338 rs6000_emit_le_vsx_load (rtx dest
, rtx source
, machine_mode mode
)
10340 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
10342 if (mode
== TImode
|| mode
== V1TImode
)
10345 dest
= gen_lowpart (V2DImode
, dest
);
10346 source
= adjust_address (source
, V2DImode
, 0);
10349 rtx tmp
= can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest
) : dest
;
10350 rs6000_emit_le_vsx_permute (tmp
, source
, mode
);
10351 rs6000_emit_le_vsx_permute (dest
, tmp
, mode
);
10354 /* Emit a little-endian store to vector memory location DEST from VSX
10355 register SOURCE in mode MODE. The store is done with two permuting
10356 insn's that represent an xxpermdi and an stxvd2x. */
10358 rs6000_emit_le_vsx_store (rtx dest
, rtx source
, machine_mode mode
)
10360 /* This should never be called during or after LRA, because it does
10361 not re-permute the source register. It is intended only for use
10363 gcc_assert (!lra_in_progress
&& !reload_completed
);
10365 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
10367 if (mode
== TImode
|| mode
== V1TImode
)
10370 dest
= adjust_address (dest
, V2DImode
, 0);
10371 source
= gen_lowpart (V2DImode
, source
);
10374 rtx tmp
= can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source
) : source
;
10375 rs6000_emit_le_vsx_permute (tmp
, source
, mode
);
10376 rs6000_emit_le_vsx_permute (dest
, tmp
, mode
);
10379 /* Emit a sequence representing a little-endian VSX load or store,
10380 moving data from SOURCE to DEST in mode MODE. This is done
10381 separately from rs6000_emit_move to ensure it is called only
10382 during expand. LE VSX loads and stores introduced later are
10383 handled with a split. The expand-time RTL generation allows
10384 us to optimize away redundant pairs of register-permutes. */
10386 rs6000_emit_le_vsx_move (rtx dest
, rtx source
, machine_mode mode
)
10388 gcc_assert (!BYTES_BIG_ENDIAN
10389 && VECTOR_MEM_VSX_P (mode
)
10390 && !TARGET_P9_VECTOR
10391 && !gpr_or_gpr_p (dest
, source
)
10392 && (MEM_P (source
) ^ MEM_P (dest
)));
10394 if (MEM_P (source
))
10396 gcc_assert (REG_P (dest
) || GET_CODE (dest
) == SUBREG
);
10397 rs6000_emit_le_vsx_load (dest
, source
, mode
);
10401 if (!REG_P (source
))
10402 source
= force_reg (mode
, source
);
10403 rs6000_emit_le_vsx_store (dest
, source
, mode
);
10407 /* Return whether a SFmode or SImode move can be done without converting one
10408 mode to another. This arrises when we have:
10410 (SUBREG:SF (REG:SI ...))
10411 (SUBREG:SI (REG:SF ...))
10413 and one of the values is in a floating point/vector register, where SFmode
10414 scalars are stored in DFmode format. */
10417 valid_sf_si_move (rtx dest
, rtx src
, machine_mode mode
)
10419 if (TARGET_ALLOW_SF_SUBREG
)
10422 if (mode
!= SFmode
&& GET_MODE_CLASS (mode
) != MODE_INT
)
10425 if (!SUBREG_P (src
) || !sf_subreg_operand (src
, mode
))
10428 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
10429 if (SUBREG_P (dest
))
10431 rtx dest_subreg
= SUBREG_REG (dest
);
10432 rtx src_subreg
= SUBREG_REG (src
);
10433 return GET_MODE (dest_subreg
) == GET_MODE (src_subreg
);
10440 /* Helper function to change moves with:
10442 (SUBREG:SF (REG:SI)) and
10443 (SUBREG:SI (REG:SF))
10445 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
10446 values are stored as DFmode values in the VSX registers. We need to convert
10447 the bits before we can use a direct move or operate on the bits in the
10448 vector register as an integer type.
10450 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
10453 rs6000_emit_move_si_sf_subreg (rtx dest
, rtx source
, machine_mode mode
)
10455 if (TARGET_DIRECT_MOVE_64BIT
&& !lra_in_progress
&& !reload_completed
10456 && (!SUBREG_P (dest
) || !sf_subreg_operand (dest
, mode
))
10457 && SUBREG_P (source
) && sf_subreg_operand (source
, mode
))
10459 rtx inner_source
= SUBREG_REG (source
);
10460 machine_mode inner_mode
= GET_MODE (inner_source
);
10462 if (mode
== SImode
&& inner_mode
== SFmode
)
10464 emit_insn (gen_movsi_from_sf (dest
, inner_source
));
10468 if (mode
== SFmode
&& inner_mode
== SImode
)
10470 emit_insn (gen_movsf_from_si (dest
, inner_source
));
10478 /* Emit a move from SOURCE to DEST in mode MODE. */
10480 rs6000_emit_move (rtx dest
, rtx source
, machine_mode mode
)
10483 operands
[0] = dest
;
10484 operands
[1] = source
;
10486 if (TARGET_DEBUG_ADDR
)
10489 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
10490 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
10491 GET_MODE_NAME (mode
),
10494 can_create_pseudo_p ());
10496 fprintf (stderr
, "source:\n");
10497 debug_rtx (source
);
10500 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
10501 if (CONST_WIDE_INT_P (operands
[1])
10502 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
10504 /* This should be fixed with the introduction of CONST_WIDE_INT. */
10505 gcc_unreachable ();
10508 /* See if we need to special case SImode/SFmode SUBREG moves. */
10509 if ((mode
== SImode
|| mode
== SFmode
) && SUBREG_P (source
)
10510 && rs6000_emit_move_si_sf_subreg (dest
, source
, mode
))
10513 /* Check if GCC is setting up a block move that will end up using FP
10514 registers as temporaries. We must make sure this is acceptable. */
10515 if (GET_CODE (operands
[0]) == MEM
10516 && GET_CODE (operands
[1]) == MEM
10518 && (rs6000_slow_unaligned_access (DImode
, MEM_ALIGN (operands
[0]))
10519 || rs6000_slow_unaligned_access (DImode
, MEM_ALIGN (operands
[1])))
10520 && ! (rs6000_slow_unaligned_access (SImode
,
10521 (MEM_ALIGN (operands
[0]) > 32
10522 ? 32 : MEM_ALIGN (operands
[0])))
10523 || rs6000_slow_unaligned_access (SImode
,
10524 (MEM_ALIGN (operands
[1]) > 32
10525 ? 32 : MEM_ALIGN (operands
[1]))))
10526 && ! MEM_VOLATILE_P (operands
[0])
10527 && ! MEM_VOLATILE_P (operands
[1]))
10529 emit_move_insn (adjust_address (operands
[0], SImode
, 0),
10530 adjust_address (operands
[1], SImode
, 0));
10531 emit_move_insn (adjust_address (copy_rtx (operands
[0]), SImode
, 4),
10532 adjust_address (copy_rtx (operands
[1]), SImode
, 4));
10536 if (can_create_pseudo_p () && GET_CODE (operands
[0]) == MEM
10537 && !gpc_reg_operand (operands
[1], mode
))
10538 operands
[1] = force_reg (mode
, operands
[1]);
10540 /* Recognize the case where operand[1] is a reference to thread-local
10541 data and load its address to a register. */
10542 if (tls_referenced_p (operands
[1]))
10544 enum tls_model model
;
10545 rtx tmp
= operands
[1];
10548 if (GET_CODE (tmp
) == CONST
&& GET_CODE (XEXP (tmp
, 0)) == PLUS
)
10550 addend
= XEXP (XEXP (tmp
, 0), 1);
10551 tmp
= XEXP (XEXP (tmp
, 0), 0);
10554 gcc_assert (GET_CODE (tmp
) == SYMBOL_REF
);
10555 model
= SYMBOL_REF_TLS_MODEL (tmp
);
10556 gcc_assert (model
!= 0);
10558 tmp
= rs6000_legitimize_tls_address (tmp
, model
);
10561 tmp
= gen_rtx_PLUS (mode
, tmp
, addend
);
10562 tmp
= force_operand (tmp
, operands
[0]);
10567 /* 128-bit constant floating-point values on Darwin should really be loaded
10568 as two parts. However, this premature splitting is a problem when DFmode
10569 values can go into Altivec registers. */
10570 if (FLOAT128_IBM_P (mode
) && !reg_addr
[DFmode
].scalar_in_vmx_p
10571 && GET_CODE (operands
[1]) == CONST_DOUBLE
)
10573 rs6000_emit_move (simplify_gen_subreg (DFmode
, operands
[0], mode
, 0),
10574 simplify_gen_subreg (DFmode
, operands
[1], mode
, 0),
10576 rs6000_emit_move (simplify_gen_subreg (DFmode
, operands
[0], mode
,
10577 GET_MODE_SIZE (DFmode
)),
10578 simplify_gen_subreg (DFmode
, operands
[1], mode
,
10579 GET_MODE_SIZE (DFmode
)),
10584 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
10585 p1:SD) if p1 is not of floating point class and p0 is spilled as
10586 we can have no analogous movsd_store for this. */
10587 if (lra_in_progress
&& mode
== DDmode
10588 && REG_P (operands
[0]) && REGNO (operands
[0]) >= FIRST_PSEUDO_REGISTER
10589 && reg_preferred_class (REGNO (operands
[0])) == NO_REGS
10590 && GET_CODE (operands
[1]) == SUBREG
&& REG_P (SUBREG_REG (operands
[1]))
10591 && GET_MODE (SUBREG_REG (operands
[1])) == SDmode
)
10594 int regno
= REGNO (SUBREG_REG (operands
[1]));
10596 if (regno
>= FIRST_PSEUDO_REGISTER
)
10598 cl
= reg_preferred_class (regno
);
10599 regno
= cl
== NO_REGS
? -1 : ira_class_hard_regs
[cl
][1];
10601 if (regno
>= 0 && ! FP_REGNO_P (regno
))
10604 operands
[0] = gen_lowpart_SUBREG (SDmode
, operands
[0]);
10605 operands
[1] = SUBREG_REG (operands
[1]);
10608 if (lra_in_progress
10610 && REG_P (operands
[0]) && REGNO (operands
[0]) >= FIRST_PSEUDO_REGISTER
10611 && reg_preferred_class (REGNO (operands
[0])) == NO_REGS
10612 && (REG_P (operands
[1])
10613 || (GET_CODE (operands
[1]) == SUBREG
10614 && REG_P (SUBREG_REG (operands
[1])))))
10616 int regno
= REGNO (GET_CODE (operands
[1]) == SUBREG
10617 ? SUBREG_REG (operands
[1]) : operands
[1]);
10620 if (regno
>= FIRST_PSEUDO_REGISTER
)
10622 cl
= reg_preferred_class (regno
);
10623 gcc_assert (cl
!= NO_REGS
);
10624 regno
= ira_class_hard_regs
[cl
][0];
10626 if (FP_REGNO_P (regno
))
10628 if (GET_MODE (operands
[0]) != DDmode
)
10629 operands
[0] = gen_rtx_SUBREG (DDmode
, operands
[0], 0);
10630 emit_insn (gen_movsd_store (operands
[0], operands
[1]));
10632 else if (INT_REGNO_P (regno
))
10633 emit_insn (gen_movsd_hardfloat (operands
[0], operands
[1]));
10638 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10639 p:DD)) if p0 is not of floating point class and p1 is spilled as
10640 we can have no analogous movsd_load for this. */
10641 if (lra_in_progress
&& mode
== DDmode
10642 && GET_CODE (operands
[0]) == SUBREG
&& REG_P (SUBREG_REG (operands
[0]))
10643 && GET_MODE (SUBREG_REG (operands
[0])) == SDmode
10644 && REG_P (operands
[1]) && REGNO (operands
[1]) >= FIRST_PSEUDO_REGISTER
10645 && reg_preferred_class (REGNO (operands
[1])) == NO_REGS
)
10648 int regno
= REGNO (SUBREG_REG (operands
[0]));
10650 if (regno
>= FIRST_PSEUDO_REGISTER
)
10652 cl
= reg_preferred_class (regno
);
10653 regno
= cl
== NO_REGS
? -1 : ira_class_hard_regs
[cl
][0];
10655 if (regno
>= 0 && ! FP_REGNO_P (regno
))
10658 operands
[0] = SUBREG_REG (operands
[0]);
10659 operands
[1] = gen_lowpart_SUBREG (SDmode
, operands
[1]);
10662 if (lra_in_progress
10664 && (REG_P (operands
[0])
10665 || (GET_CODE (operands
[0]) == SUBREG
10666 && REG_P (SUBREG_REG (operands
[0]))))
10667 && REG_P (operands
[1]) && REGNO (operands
[1]) >= FIRST_PSEUDO_REGISTER
10668 && reg_preferred_class (REGNO (operands
[1])) == NO_REGS
)
10670 int regno
= REGNO (GET_CODE (operands
[0]) == SUBREG
10671 ? SUBREG_REG (operands
[0]) : operands
[0]);
10674 if (regno
>= FIRST_PSEUDO_REGISTER
)
10676 cl
= reg_preferred_class (regno
);
10677 gcc_assert (cl
!= NO_REGS
);
10678 regno
= ira_class_hard_regs
[cl
][0];
10680 if (FP_REGNO_P (regno
))
10682 if (GET_MODE (operands
[1]) != DDmode
)
10683 operands
[1] = gen_rtx_SUBREG (DDmode
, operands
[1], 0);
10684 emit_insn (gen_movsd_load (operands
[0], operands
[1]));
10686 else if (INT_REGNO_P (regno
))
10687 emit_insn (gen_movsd_hardfloat (operands
[0], operands
[1]));
10693 /* FIXME: In the long term, this switch statement should go away
10694 and be replaced by a sequence of tests based on things like
10700 if (CONSTANT_P (operands
[1])
10701 && GET_CODE (operands
[1]) != CONST_INT
)
10702 operands
[1] = force_const_mem (mode
, operands
[1]);
10709 if (FLOAT128_2REG_P (mode
))
10710 rs6000_eliminate_indexed_memrefs (operands
);
10717 if (CONSTANT_P (operands
[1])
10718 && ! easy_fp_constant (operands
[1], mode
))
10719 operands
[1] = force_const_mem (mode
, operands
[1]);
10731 if (CONSTANT_P (operands
[1])
10732 && !easy_vector_constant (operands
[1], mode
))
10733 operands
[1] = force_const_mem (mode
, operands
[1]);
10738 /* Use default pattern for address of ELF small data */
10741 && DEFAULT_ABI
== ABI_V4
10742 && (GET_CODE (operands
[1]) == SYMBOL_REF
10743 || GET_CODE (operands
[1]) == CONST
)
10744 && small_data_operand (operands
[1], mode
))
10746 emit_insn (gen_rtx_SET (operands
[0], operands
[1]));
10750 if (DEFAULT_ABI
== ABI_V4
10751 && mode
== Pmode
&& mode
== SImode
10752 && flag_pic
== 1 && got_operand (operands
[1], mode
))
10754 emit_insn (gen_movsi_got (operands
[0], operands
[1]));
10758 if ((TARGET_ELF
|| DEFAULT_ABI
== ABI_DARWIN
)
10762 && CONSTANT_P (operands
[1])
10763 && GET_CODE (operands
[1]) != HIGH
10764 && GET_CODE (operands
[1]) != CONST_INT
)
10766 rtx target
= (!can_create_pseudo_p ()
10768 : gen_reg_rtx (mode
));
10770 /* If this is a function address on -mcall-aixdesc,
10771 convert it to the address of the descriptor. */
10772 if (DEFAULT_ABI
== ABI_AIX
10773 && GET_CODE (operands
[1]) == SYMBOL_REF
10774 && XSTR (operands
[1], 0)[0] == '.')
10776 const char *name
= XSTR (operands
[1], 0);
10778 while (*name
== '.')
10780 new_ref
= gen_rtx_SYMBOL_REF (Pmode
, name
);
10781 CONSTANT_POOL_ADDRESS_P (new_ref
)
10782 = CONSTANT_POOL_ADDRESS_P (operands
[1]);
10783 SYMBOL_REF_FLAGS (new_ref
) = SYMBOL_REF_FLAGS (operands
[1]);
10784 SYMBOL_REF_USED (new_ref
) = SYMBOL_REF_USED (operands
[1]);
10785 SYMBOL_REF_DATA (new_ref
) = SYMBOL_REF_DATA (operands
[1]);
10786 operands
[1] = new_ref
;
10789 if (DEFAULT_ABI
== ABI_DARWIN
)
10792 if (MACHO_DYNAMIC_NO_PIC_P
)
10794 /* Take care of any required data indirection. */
10795 operands
[1] = rs6000_machopic_legitimize_pic_address (
10796 operands
[1], mode
, operands
[0]);
10797 if (operands
[0] != operands
[1])
10798 emit_insn (gen_rtx_SET (operands
[0], operands
[1]));
10802 emit_insn (gen_macho_high (target
, operands
[1]));
10803 emit_insn (gen_macho_low (operands
[0], target
, operands
[1]));
10807 emit_insn (gen_elf_high (target
, operands
[1]));
10808 emit_insn (gen_elf_low (operands
[0], target
, operands
[1]));
10812 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10813 and we have put it in the TOC, we just need to make a TOC-relative
10814 reference to it. */
10816 && GET_CODE (operands
[1]) == SYMBOL_REF
10817 && use_toc_relative_ref (operands
[1], mode
))
10818 operands
[1] = create_TOC_reference (operands
[1], operands
[0]);
10819 else if (mode
== Pmode
10820 && CONSTANT_P (operands
[1])
10821 && GET_CODE (operands
[1]) != HIGH
10822 && ((GET_CODE (operands
[1]) != CONST_INT
10823 && ! easy_fp_constant (operands
[1], mode
))
10824 || (GET_CODE (operands
[1]) == CONST_INT
10825 && (num_insns_constant (operands
[1], mode
)
10826 > (TARGET_CMODEL
!= CMODEL_SMALL
? 3 : 2)))
10827 || (GET_CODE (operands
[0]) == REG
10828 && FP_REGNO_P (REGNO (operands
[0]))))
10829 && !toc_relative_expr_p (operands
[1], false, NULL
, NULL
)
10830 && (TARGET_CMODEL
== CMODEL_SMALL
10831 || can_create_pseudo_p ()
10832 || (REG_P (operands
[0])
10833 && INT_REG_OK_FOR_BASE_P (operands
[0], true))))
10837 /* Darwin uses a special PIC legitimizer. */
10838 if (DEFAULT_ABI
== ABI_DARWIN
&& MACHOPIC_INDIRECT
)
10841 rs6000_machopic_legitimize_pic_address (operands
[1], mode
,
10843 if (operands
[0] != operands
[1])
10844 emit_insn (gen_rtx_SET (operands
[0], operands
[1]));
10849 /* If we are to limit the number of things we put in the TOC and
10850 this is a symbol plus a constant we can add in one insn,
10851 just put the symbol in the TOC and add the constant. */
10852 if (GET_CODE (operands
[1]) == CONST
10853 && TARGET_NO_SUM_IN_TOC
10854 && GET_CODE (XEXP (operands
[1], 0)) == PLUS
10855 && add_operand (XEXP (XEXP (operands
[1], 0), 1), mode
)
10856 && (GET_CODE (XEXP (XEXP (operands
[1], 0), 0)) == LABEL_REF
10857 || GET_CODE (XEXP (XEXP (operands
[1], 0), 0)) == SYMBOL_REF
)
10858 && ! side_effects_p (operands
[0]))
10861 force_const_mem (mode
, XEXP (XEXP (operands
[1], 0), 0));
10862 rtx other
= XEXP (XEXP (operands
[1], 0), 1);
10864 sym
= force_reg (mode
, sym
);
10865 emit_insn (gen_add3_insn (operands
[0], sym
, other
));
10869 operands
[1] = force_const_mem (mode
, operands
[1]);
10872 && GET_CODE (XEXP (operands
[1], 0)) == SYMBOL_REF
10873 && use_toc_relative_ref (XEXP (operands
[1], 0), mode
))
10875 rtx tocref
= create_TOC_reference (XEXP (operands
[1], 0),
10877 operands
[1] = gen_const_mem (mode
, tocref
);
10878 set_mem_alias_set (operands
[1], get_TOC_alias_set ());
10884 if (!VECTOR_MEM_VSX_P (TImode
))
10885 rs6000_eliminate_indexed_memrefs (operands
);
10889 rs6000_eliminate_indexed_memrefs (operands
);
10893 fatal_insn ("bad move", gen_rtx_SET (dest
, source
));
10896 /* Above, we may have called force_const_mem which may have returned
10897 an invalid address. If we can, fix this up; otherwise, reload will
10898 have to deal with it. */
10899 if (GET_CODE (operands
[1]) == MEM
)
10900 operands
[1] = validize_mem (operands
[1]);
10902 emit_insn (gen_rtx_SET (operands
[0], operands
[1]));
10905 /* Nonzero if we can use a floating-point register to pass this arg. */
10906 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10907 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10908 && (CUM)->fregno <= FP_ARG_MAX_REG \
10909 && TARGET_HARD_FLOAT)
10911 /* Nonzero if we can use an AltiVec register to pass this arg. */
10912 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10913 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10914 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10915 && TARGET_ALTIVEC_ABI \
10918 /* Walk down the type tree of TYPE counting consecutive base elements.
10919 If *MODEP is VOIDmode, then set it to the first valid floating point
10920 or vector type. If a non-floating point or vector type is found, or
10921 if a floating point or vector type that doesn't match a non-VOIDmode
10922 *MODEP is found, then return -1, otherwise return the count in the
10926 rs6000_aggregate_candidate (const_tree type
, machine_mode
*modep
)
10929 HOST_WIDE_INT size
;
10931 switch (TREE_CODE (type
))
10934 mode
= TYPE_MODE (type
);
10935 if (!SCALAR_FLOAT_MODE_P (mode
))
10938 if (*modep
== VOIDmode
)
10941 if (*modep
== mode
)
10947 mode
= TYPE_MODE (TREE_TYPE (type
));
10948 if (!SCALAR_FLOAT_MODE_P (mode
))
10951 if (*modep
== VOIDmode
)
10954 if (*modep
== mode
)
10960 if (!TARGET_ALTIVEC_ABI
|| !TARGET_ALTIVEC
)
10963 /* Use V4SImode as representative of all 128-bit vector types. */
10964 size
= int_size_in_bytes (type
);
10974 if (*modep
== VOIDmode
)
10977 /* Vector modes are considered to be opaque: two vectors are
10978 equivalent for the purposes of being homogeneous aggregates
10979 if they are the same size. */
10980 if (*modep
== mode
)
10988 tree index
= TYPE_DOMAIN (type
);
10990 /* Can't handle incomplete types nor sizes that are not
10992 if (!COMPLETE_TYPE_P (type
)
10993 || TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
)
10996 count
= rs6000_aggregate_candidate (TREE_TYPE (type
), modep
);
10999 || !TYPE_MAX_VALUE (index
)
11000 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index
))
11001 || !TYPE_MIN_VALUE (index
)
11002 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index
))
11006 count
*= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index
))
11007 - tree_to_uhwi (TYPE_MIN_VALUE (index
)));
11009 /* There must be no padding. */
11010 if (wi::to_wide (TYPE_SIZE (type
))
11011 != count
* GET_MODE_BITSIZE (*modep
))
11023 /* Can't handle incomplete types nor sizes that are not
11025 if (!COMPLETE_TYPE_P (type
)
11026 || TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
)
11029 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
11031 if (TREE_CODE (field
) != FIELD_DECL
)
11034 sub_count
= rs6000_aggregate_candidate (TREE_TYPE (field
), modep
);
11037 count
+= sub_count
;
11040 /* There must be no padding. */
11041 if (wi::to_wide (TYPE_SIZE (type
))
11042 != count
* GET_MODE_BITSIZE (*modep
))
11049 case QUAL_UNION_TYPE
:
11051 /* These aren't very interesting except in a degenerate case. */
11056 /* Can't handle incomplete types nor sizes that are not
11058 if (!COMPLETE_TYPE_P (type
)
11059 || TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
)
11062 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
11064 if (TREE_CODE (field
) != FIELD_DECL
)
11067 sub_count
= rs6000_aggregate_candidate (TREE_TYPE (field
), modep
);
11070 count
= count
> sub_count
? count
: sub_count
;
11073 /* There must be no padding. */
11074 if (wi::to_wide (TYPE_SIZE (type
))
11075 != count
* GET_MODE_BITSIZE (*modep
))
11088 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
11089 float or vector aggregate that shall be passed in FP/vector registers
11090 according to the ELFv2 ABI, return the homogeneous element mode in
11091 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
11093 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
11096 rs6000_discover_homogeneous_aggregate (machine_mode mode
, const_tree type
,
11097 machine_mode
*elt_mode
,
11100 /* Note that we do not accept complex types at the top level as
11101 homogeneous aggregates; these types are handled via the
11102 targetm.calls.split_complex_arg mechanism. Complex types
11103 can be elements of homogeneous aggregates, however. */
11104 if (TARGET_HARD_FLOAT
&& DEFAULT_ABI
== ABI_ELFv2
&& type
11105 && AGGREGATE_TYPE_P (type
))
11107 machine_mode field_mode
= VOIDmode
;
11108 int field_count
= rs6000_aggregate_candidate (type
, &field_mode
);
11110 if (field_count
> 0)
11112 int n_regs
= (SCALAR_FLOAT_MODE_P (field_mode
) ?
11113 (GET_MODE_SIZE (field_mode
) + 7) >> 3 : 1);
11115 /* The ELFv2 ABI allows homogeneous aggregates to occupy
11116 up to AGGR_ARG_NUM_REG registers. */
11117 if (field_count
* n_regs
<= AGGR_ARG_NUM_REG
)
11120 *elt_mode
= field_mode
;
11122 *n_elts
= field_count
;
11135 /* Return a nonzero value to say to return the function value in
11136 memory, just as large structures are always returned. TYPE will be
11137 the data type of the value, and FNTYPE will be the type of the
11138 function doing the returning, or @code{NULL} for libcalls.
11140 The AIX ABI for the RS/6000 specifies that all structures are
11141 returned in memory. The Darwin ABI does the same.
11143 For the Darwin 64 Bit ABI, a function result can be returned in
11144 registers or in memory, depending on the size of the return data
11145 type. If it is returned in registers, the value occupies the same
11146 registers as it would if it were the first and only function
11147 argument. Otherwise, the function places its result in memory at
11148 the location pointed to by GPR3.
11150 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
11151 but a draft put them in memory, and GCC used to implement the draft
11152 instead of the final standard. Therefore, aix_struct_return
11153 controls this instead of DEFAULT_ABI; V.4 targets needing backward
11154 compatibility can change DRAFT_V4_STRUCT_RET to override the
11155 default, and -m switches get the final word. See
11156 rs6000_option_override_internal for more details.
11158 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
11159 long double support is enabled. These values are returned in memory.
11161 int_size_in_bytes returns -1 for variable size objects, which go in
11162 memory always. The cast to unsigned makes -1 > 8. */
11165 rs6000_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
11167 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
11169 && rs6000_darwin64_abi
11170 && TREE_CODE (type
) == RECORD_TYPE
11171 && int_size_in_bytes (type
) > 0)
11173 CUMULATIVE_ARGS valcum
;
11177 valcum
.fregno
= FP_ARG_MIN_REG
;
11178 valcum
.vregno
= ALTIVEC_ARG_MIN_REG
;
11179 /* Do a trial code generation as if this were going to be passed
11180 as an argument; if any part goes in memory, we return NULL. */
11181 valret
= rs6000_darwin64_record_arg (&valcum
, type
, true, true);
11184 /* Otherwise fall through to more conventional ABI rules. */
11187 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
11188 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type
), type
,
11192 /* The ELFv2 ABI returns aggregates up to 16B in registers */
11193 if (DEFAULT_ABI
== ABI_ELFv2
&& AGGREGATE_TYPE_P (type
)
11194 && (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) <= 16)
11197 if (AGGREGATE_TYPE_P (type
)
11198 && (aix_struct_return
11199 || (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) > 8))
11202 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
11203 modes only exist for GCC vector types if -maltivec. */
11204 if (TARGET_32BIT
&& !TARGET_ALTIVEC_ABI
11205 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type
)))
11208 /* Return synthetic vectors in memory. */
11209 if (TREE_CODE (type
) == VECTOR_TYPE
11210 && int_size_in_bytes (type
) > (TARGET_ALTIVEC_ABI
? 16 : 8))
11212 static bool warned_for_return_big_vectors
= false;
11213 if (!warned_for_return_big_vectors
)
11215 warning (OPT_Wpsabi
, "GCC vector returned by reference: "
11216 "non-standard ABI extension with no compatibility "
11218 warned_for_return_big_vectors
= true;
11223 if (DEFAULT_ABI
== ABI_V4
&& TARGET_IEEEQUAD
11224 && FLOAT128_IEEE_P (TYPE_MODE (type
)))
11230 /* Specify whether values returned in registers should be at the most
11231 significant end of a register. We want aggregates returned by
11232 value to match the way aggregates are passed to functions. */
11235 rs6000_return_in_msb (const_tree valtype
)
11237 return (DEFAULT_ABI
== ABI_ELFv2
11238 && BYTES_BIG_ENDIAN
11239 && AGGREGATE_TYPE_P (valtype
)
11240 && (rs6000_function_arg_padding (TYPE_MODE (valtype
), valtype
)
11244 #ifdef HAVE_AS_GNU_ATTRIBUTE
11245 /* Return TRUE if a call to function FNDECL may be one that
11246 potentially affects the function calling ABI of the object file. */
11249 call_ABI_of_interest (tree fndecl
)
11251 if (rs6000_gnu_attr
&& symtab
->state
== EXPANSION
)
11253 struct cgraph_node
*c_node
;
11255 /* Libcalls are always interesting. */
11256 if (fndecl
== NULL_TREE
)
11259 /* Any call to an external function is interesting. */
11260 if (DECL_EXTERNAL (fndecl
))
11263 /* Interesting functions that we are emitting in this object file. */
11264 c_node
= cgraph_node::get (fndecl
);
11265 c_node
= c_node
->ultimate_alias_target ();
11266 return !c_node
->only_called_directly_p ();
11272 /* Initialize a variable CUM of type CUMULATIVE_ARGS
11273 for a call to a function whose data type is FNTYPE.
11274 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
11276 For incoming args we set the number of arguments in the prototype large
11277 so we never return a PARALLEL. */
11280 init_cumulative_args (CUMULATIVE_ARGS
*cum
, tree fntype
,
11281 rtx libname ATTRIBUTE_UNUSED
, int incoming
,
11282 int libcall
, int n_named_args
,
11283 tree fndecl ATTRIBUTE_UNUSED
,
11284 machine_mode return_mode ATTRIBUTE_UNUSED
)
11286 static CUMULATIVE_ARGS zero_cumulative
;
11288 *cum
= zero_cumulative
;
11290 cum
->fregno
= FP_ARG_MIN_REG
;
11291 cum
->vregno
= ALTIVEC_ARG_MIN_REG
;
11292 cum
->prototype
= (fntype
&& prototype_p (fntype
));
11293 cum
->call_cookie
= ((DEFAULT_ABI
== ABI_V4
&& libcall
)
11294 ? CALL_LIBCALL
: CALL_NORMAL
);
11295 cum
->sysv_gregno
= GP_ARG_MIN_REG
;
11296 cum
->stdarg
= stdarg_p (fntype
);
11297 cum
->libcall
= libcall
;
11299 cum
->nargs_prototype
= 0;
11300 if (incoming
|| cum
->prototype
)
11301 cum
->nargs_prototype
= n_named_args
;
11303 /* Check for a longcall attribute. */
11304 if ((!fntype
&& rs6000_default_long_calls
)
11306 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype
))
11307 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype
))))
11308 cum
->call_cookie
|= CALL_LONG
;
11310 if (TARGET_DEBUG_ARG
)
11312 fprintf (stderr
, "\ninit_cumulative_args:");
11315 tree ret_type
= TREE_TYPE (fntype
);
11316 fprintf (stderr
, " ret code = %s,",
11317 get_tree_code_name (TREE_CODE (ret_type
)));
11320 if (cum
->call_cookie
& CALL_LONG
)
11321 fprintf (stderr
, " longcall,");
11323 fprintf (stderr
, " proto = %d, nargs = %d\n",
11324 cum
->prototype
, cum
->nargs_prototype
);
11327 #ifdef HAVE_AS_GNU_ATTRIBUTE
11328 if (TARGET_ELF
&& (TARGET_64BIT
|| DEFAULT_ABI
== ABI_V4
))
11330 cum
->escapes
= call_ABI_of_interest (fndecl
);
11337 return_type
= TREE_TYPE (fntype
);
11338 return_mode
= TYPE_MODE (return_type
);
11341 return_type
= lang_hooks
.types
.type_for_mode (return_mode
, 0);
11343 if (return_type
!= NULL
)
11345 if (TREE_CODE (return_type
) == RECORD_TYPE
11346 && TYPE_TRANSPARENT_AGGR (return_type
))
11348 return_type
= TREE_TYPE (first_field (return_type
));
11349 return_mode
= TYPE_MODE (return_type
);
11351 if (AGGREGATE_TYPE_P (return_type
)
11352 && ((unsigned HOST_WIDE_INT
) int_size_in_bytes (return_type
)
11354 rs6000_returns_struct
= true;
11356 if (SCALAR_FLOAT_MODE_P (return_mode
))
11358 rs6000_passes_float
= true;
11359 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
|| TARGET_64BIT
)
11360 && (FLOAT128_IBM_P (return_mode
)
11361 || FLOAT128_IEEE_P (return_mode
)
11362 || (return_type
!= NULL
11363 && (TYPE_MAIN_VARIANT (return_type
)
11364 == long_double_type_node
))))
11365 rs6000_passes_long_double
= true;
11367 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode
)
11368 || PAIRED_VECTOR_MODE (return_mode
))
11369 rs6000_passes_vector
= true;
11376 && TARGET_ALTIVEC_ABI
11377 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype
))))
11379 error ("cannot return value in vector register because"
11380 " altivec instructions are disabled, use %qs"
11381 " to enable them", "-maltivec");
11385 /* The mode the ABI uses for a word. This is not the same as word_mode
11386 for -m32 -mpowerpc64. This is used to implement various target hooks. */
11388 static scalar_int_mode
11389 rs6000_abi_word_mode (void)
11391 return TARGET_32BIT
? SImode
: DImode
;
11394 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
11396 rs6000_offload_options (void)
11399 return xstrdup ("-foffload-abi=lp64");
11401 return xstrdup ("-foffload-abi=ilp32");
11404 /* On rs6000, function arguments are promoted, as are function return
11407 static machine_mode
11408 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED
,
11410 int *punsignedp ATTRIBUTE_UNUSED
,
11413 PROMOTE_MODE (mode
, *punsignedp
, type
);
11418 /* Return true if TYPE must be passed on the stack and not in registers. */
11421 rs6000_must_pass_in_stack (machine_mode mode
, const_tree type
)
11423 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
|| TARGET_64BIT
)
11424 return must_pass_in_stack_var_size (mode
, type
);
11426 return must_pass_in_stack_var_size_or_pad (mode
, type
);
11430 is_complex_IBM_long_double (machine_mode mode
)
11432 return mode
== ICmode
|| (mode
== TCmode
&& FLOAT128_IBM_P (TCmode
));
11435 /* Whether ABI_V4 passes MODE args to a function in floating point
11439 abi_v4_pass_in_fpr (machine_mode mode
)
11441 if (!TARGET_HARD_FLOAT
)
11443 if (TARGET_SINGLE_FLOAT
&& mode
== SFmode
)
11445 if (TARGET_DOUBLE_FLOAT
&& mode
== DFmode
)
11447 /* ABI_V4 passes complex IBM long double in 8 gprs.
11448 Stupid, but we can't change the ABI now. */
11449 if (is_complex_IBM_long_double (mode
))
11451 if (FLOAT128_2REG_P (mode
))
11453 if (DECIMAL_FLOAT_MODE_P (mode
))
11458 /* Implement TARGET_FUNCTION_ARG_PADDING.
11460 For the AIX ABI structs are always stored left shifted in their
11463 static pad_direction
11464 rs6000_function_arg_padding (machine_mode mode
, const_tree type
)
11466 #ifndef AGGREGATE_PADDING_FIXED
11467 #define AGGREGATE_PADDING_FIXED 0
11469 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
11470 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
11473 if (!AGGREGATE_PADDING_FIXED
)
11475 /* GCC used to pass structures of the same size as integer types as
11476 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
11477 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
11478 passed padded downward, except that -mstrict-align further
11479 muddied the water in that multi-component structures of 2 and 4
11480 bytes in size were passed padded upward.
11482 The following arranges for best compatibility with previous
11483 versions of gcc, but removes the -mstrict-align dependency. */
11484 if (BYTES_BIG_ENDIAN
)
11486 HOST_WIDE_INT size
= 0;
11488 if (mode
== BLKmode
)
11490 if (type
&& TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
)
11491 size
= int_size_in_bytes (type
);
11494 size
= GET_MODE_SIZE (mode
);
11496 if (size
== 1 || size
== 2 || size
== 4)
11497 return PAD_DOWNWARD
;
11502 if (AGGREGATES_PAD_UPWARD_ALWAYS
)
11504 if (type
!= 0 && AGGREGATE_TYPE_P (type
))
11508 /* Fall back to the default. */
11509 return default_function_arg_padding (mode
, type
);
11512 /* If defined, a C expression that gives the alignment boundary, in bits,
11513 of an argument with the specified mode and type. If it is not defined,
11514 PARM_BOUNDARY is used for all arguments.
11516 V.4 wants long longs and doubles to be double word aligned. Just
11517 testing the mode size is a boneheaded way to do this as it means
11518 that other types such as complex int are also double word aligned.
11519 However, we're stuck with this because changing the ABI might break
11520 existing library interfaces.
11522 Quadword align Altivec/VSX vectors.
11523 Quadword align large synthetic vector types. */
11525 static unsigned int
11526 rs6000_function_arg_boundary (machine_mode mode
, const_tree type
)
11528 machine_mode elt_mode
;
11531 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
11533 if (DEFAULT_ABI
== ABI_V4
11534 && (GET_MODE_SIZE (mode
) == 8
11535 || (TARGET_HARD_FLOAT
11536 && !is_complex_IBM_long_double (mode
)
11537 && FLOAT128_2REG_P (mode
))))
11539 else if (FLOAT128_VECTOR_P (mode
))
11541 else if (PAIRED_VECTOR_MODE (mode
)
11542 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
11543 && int_size_in_bytes (type
) >= 8
11544 && int_size_in_bytes (type
) < 16))
11546 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode
)
11547 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
11548 && int_size_in_bytes (type
) >= 16))
11551 /* Aggregate types that need > 8 byte alignment are quadword-aligned
11552 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
11553 -mcompat-align-parm is used. */
11554 if (((DEFAULT_ABI
== ABI_AIX
&& !rs6000_compat_align_parm
)
11555 || DEFAULT_ABI
== ABI_ELFv2
)
11556 && type
&& TYPE_ALIGN (type
) > 64)
11558 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
11559 or homogeneous float/vector aggregates here. We already handled
11560 vector aggregates above, but still need to check for float here. */
11561 bool aggregate_p
= (AGGREGATE_TYPE_P (type
)
11562 && !SCALAR_FLOAT_MODE_P (elt_mode
));
11564 /* We used to check for BLKmode instead of the above aggregate type
11565 check. Warn when this results in any difference to the ABI. */
11566 if (aggregate_p
!= (mode
== BLKmode
))
11568 static bool warned
;
11569 if (!warned
&& warn_psabi
)
11572 inform (input_location
,
11573 "the ABI of passing aggregates with %d-byte alignment"
11574 " has changed in GCC 5",
11575 (int) TYPE_ALIGN (type
) / BITS_PER_UNIT
);
11583 /* Similar for the Darwin64 ABI. Note that for historical reasons we
11584 implement the "aggregate type" check as a BLKmode check here; this
11585 means certain aggregate types are in fact not aligned. */
11586 if (TARGET_MACHO
&& rs6000_darwin64_abi
11588 && type
&& TYPE_ALIGN (type
) > 64)
11591 return PARM_BOUNDARY
;
11594 /* The offset in words to the start of the parameter save area. */
11596 static unsigned int
11597 rs6000_parm_offset (void)
11599 return (DEFAULT_ABI
== ABI_V4
? 2
11600 : DEFAULT_ABI
== ABI_ELFv2
? 4
11604 /* For a function parm of MODE and TYPE, return the starting word in
11605 the parameter area. NWORDS of the parameter area are already used. */
11607 static unsigned int
11608 rs6000_parm_start (machine_mode mode
, const_tree type
,
11609 unsigned int nwords
)
11611 unsigned int align
;
11613 align
= rs6000_function_arg_boundary (mode
, type
) / PARM_BOUNDARY
- 1;
11614 return nwords
+ (-(rs6000_parm_offset () + nwords
) & align
);
11617 /* Compute the size (in words) of a function argument. */
11619 static unsigned long
11620 rs6000_arg_size (machine_mode mode
, const_tree type
)
11622 unsigned long size
;
11624 if (mode
!= BLKmode
)
11625 size
= GET_MODE_SIZE (mode
);
11627 size
= int_size_in_bytes (type
);
11630 return (size
+ 3) >> 2;
11632 return (size
+ 7) >> 3;
11635 /* Use this to flush pending int fields. */
11638 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS
*cum
,
11639 HOST_WIDE_INT bitpos
, int final
)
11641 unsigned int startbit
, endbit
;
11642 int intregs
, intoffset
;
11644 /* Handle the situations where a float is taking up the first half
11645 of the GPR, and the other half is empty (typically due to
11646 alignment restrictions). We can detect this by a 8-byte-aligned
11647 int field, or by seeing that this is the final flush for this
11648 argument. Count the word and continue on. */
11649 if (cum
->floats_in_gpr
== 1
11650 && (cum
->intoffset
% 64 == 0
11651 || (cum
->intoffset
== -1 && final
)))
11654 cum
->floats_in_gpr
= 0;
11657 if (cum
->intoffset
== -1)
11660 intoffset
= cum
->intoffset
;
11661 cum
->intoffset
= -1;
11662 cum
->floats_in_gpr
= 0;
11664 if (intoffset
% BITS_PER_WORD
!= 0)
11666 unsigned int bits
= BITS_PER_WORD
- intoffset
% BITS_PER_WORD
;
11667 if (!int_mode_for_size (bits
, 0).exists ())
11669 /* We couldn't find an appropriate mode, which happens,
11670 e.g., in packed structs when there are 3 bytes to load.
11671 Back intoffset back to the beginning of the word in this
11673 intoffset
= ROUND_DOWN (intoffset
, BITS_PER_WORD
);
11677 startbit
= ROUND_DOWN (intoffset
, BITS_PER_WORD
);
11678 endbit
= ROUND_UP (bitpos
, BITS_PER_WORD
);
11679 intregs
= (endbit
- startbit
) / BITS_PER_WORD
;
11680 cum
->words
+= intregs
;
11681 /* words should be unsigned. */
11682 if ((unsigned)cum
->words
< (endbit
/BITS_PER_WORD
))
11684 int pad
= (endbit
/BITS_PER_WORD
) - cum
->words
;
11689 /* The darwin64 ABI calls for us to recurse down through structs,
11690 looking for elements passed in registers. Unfortunately, we have
11691 to track int register count here also because of misalignments
11692 in powerpc alignment mode. */
11695 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS
*cum
,
11697 HOST_WIDE_INT startbitpos
)
11701 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
11702 if (TREE_CODE (f
) == FIELD_DECL
)
11704 HOST_WIDE_INT bitpos
= startbitpos
;
11705 tree ftype
= TREE_TYPE (f
);
11707 if (ftype
== error_mark_node
)
11709 mode
= TYPE_MODE (ftype
);
11711 if (DECL_SIZE (f
) != 0
11712 && tree_fits_uhwi_p (bit_position (f
)))
11713 bitpos
+= int_bit_position (f
);
11715 /* ??? FIXME: else assume zero offset. */
11717 if (TREE_CODE (ftype
) == RECORD_TYPE
)
11718 rs6000_darwin64_record_arg_advance_recurse (cum
, ftype
, bitpos
);
11719 else if (USE_FP_FOR_ARG_P (cum
, mode
))
11721 unsigned n_fpregs
= (GET_MODE_SIZE (mode
) + 7) >> 3;
11722 rs6000_darwin64_record_arg_advance_flush (cum
, bitpos
, 0);
11723 cum
->fregno
+= n_fpregs
;
11724 /* Single-precision floats present a special problem for
11725 us, because they are smaller than an 8-byte GPR, and so
11726 the structure-packing rules combined with the standard
11727 varargs behavior mean that we want to pack float/float
11728 and float/int combinations into a single register's
11729 space. This is complicated by the arg advance flushing,
11730 which works on arbitrarily large groups of int-type
11732 if (mode
== SFmode
)
11734 if (cum
->floats_in_gpr
== 1)
11736 /* Two floats in a word; count the word and reset
11737 the float count. */
11739 cum
->floats_in_gpr
= 0;
11741 else if (bitpos
% 64 == 0)
11743 /* A float at the beginning of an 8-byte word;
11744 count it and put off adjusting cum->words until
11745 we see if a arg advance flush is going to do it
11747 cum
->floats_in_gpr
++;
11751 /* The float is at the end of a word, preceded
11752 by integer fields, so the arg advance flush
11753 just above has already set cum->words and
11754 everything is taken care of. */
11758 cum
->words
+= n_fpregs
;
11760 else if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, 1))
11762 rs6000_darwin64_record_arg_advance_flush (cum
, bitpos
, 0);
11766 else if (cum
->intoffset
== -1)
11767 cum
->intoffset
= bitpos
;
11771 /* Check for an item that needs to be considered specially under the darwin 64
11772 bit ABI. These are record types where the mode is BLK or the structure is
11773 8 bytes in size. */
11775 rs6000_darwin64_struct_check_p (machine_mode mode
, const_tree type
)
11777 return rs6000_darwin64_abi
11778 && ((mode
== BLKmode
11779 && TREE_CODE (type
) == RECORD_TYPE
11780 && int_size_in_bytes (type
) > 0)
11781 || (type
&& TREE_CODE (type
) == RECORD_TYPE
11782 && int_size_in_bytes (type
) == 8)) ? 1 : 0;
11785 /* Update the data in CUM to advance over an argument
11786 of mode MODE and data type TYPE.
11787 (TYPE is null for libcalls where that information may not be available.)
11789 Note that for args passed by reference, function_arg will be called
11790 with MODE and TYPE set to that of the pointer to the arg, not the arg
11794 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS
*cum
, machine_mode mode
,
11795 const_tree type
, bool named
, int depth
)
11797 machine_mode elt_mode
;
11800 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
11802 /* Only tick off an argument if we're not recursing. */
11804 cum
->nargs_prototype
--;
11806 #ifdef HAVE_AS_GNU_ATTRIBUTE
11807 if (TARGET_ELF
&& (TARGET_64BIT
|| DEFAULT_ABI
== ABI_V4
)
11810 if (SCALAR_FLOAT_MODE_P (mode
))
11812 rs6000_passes_float
= true;
11813 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
|| TARGET_64BIT
)
11814 && (FLOAT128_IBM_P (mode
)
11815 || FLOAT128_IEEE_P (mode
)
11817 && TYPE_MAIN_VARIANT (type
) == long_double_type_node
)))
11818 rs6000_passes_long_double
= true;
11820 if ((named
&& ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
11821 || (PAIRED_VECTOR_MODE (mode
)
11823 && cum
->sysv_gregno
<= GP_ARG_MAX_REG
))
11824 rs6000_passes_vector
= true;
11828 if (TARGET_ALTIVEC_ABI
11829 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode
)
11830 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
11831 && int_size_in_bytes (type
) == 16)))
11833 bool stack
= false;
11835 if (USE_ALTIVEC_FOR_ARG_P (cum
, elt_mode
, named
))
11837 cum
->vregno
+= n_elts
;
11839 if (!TARGET_ALTIVEC
)
11840 error ("cannot pass argument in vector register because"
11841 " altivec instructions are disabled, use %qs"
11842 " to enable them", "-maltivec");
11844 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11845 even if it is going to be passed in a vector register.
11846 Darwin does the same for variable-argument functions. */
11847 if (((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
11849 || (cum
->stdarg
&& DEFAULT_ABI
!= ABI_V4
))
11859 /* Vector parameters must be 16-byte aligned. In 32-bit
11860 mode this means we need to take into account the offset
11861 to the parameter save area. In 64-bit mode, they just
11862 have to start on an even word, since the parameter save
11863 area is 16-byte aligned. */
11865 align
= -(rs6000_parm_offset () + cum
->words
) & 3;
11867 align
= cum
->words
& 1;
11868 cum
->words
+= align
+ rs6000_arg_size (mode
, type
);
11870 if (TARGET_DEBUG_ARG
)
11872 fprintf (stderr
, "function_adv: words = %2d, align=%d, ",
11873 cum
->words
, align
);
11874 fprintf (stderr
, "nargs = %4d, proto = %d, mode = %4s\n",
11875 cum
->nargs_prototype
, cum
->prototype
,
11876 GET_MODE_NAME (mode
));
11880 else if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
11882 int size
= int_size_in_bytes (type
);
11883 /* Variable sized types have size == -1 and are
11884 treated as if consisting entirely of ints.
11885 Pad to 16 byte boundary if needed. */
11886 if (TYPE_ALIGN (type
) >= 2 * BITS_PER_WORD
11887 && (cum
->words
% 2) != 0)
11889 /* For varargs, we can just go up by the size of the struct. */
11891 cum
->words
+= (size
+ 7) / 8;
11894 /* It is tempting to say int register count just goes up by
11895 sizeof(type)/8, but this is wrong in a case such as
11896 { int; double; int; } [powerpc alignment]. We have to
11897 grovel through the fields for these too. */
11898 cum
->intoffset
= 0;
11899 cum
->floats_in_gpr
= 0;
11900 rs6000_darwin64_record_arg_advance_recurse (cum
, type
, 0);
11901 rs6000_darwin64_record_arg_advance_flush (cum
,
11902 size
* BITS_PER_UNIT
, 1);
11904 if (TARGET_DEBUG_ARG
)
11906 fprintf (stderr
, "function_adv: words = %2d, align=%d, size=%d",
11907 cum
->words
, TYPE_ALIGN (type
), size
);
11909 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11910 cum
->nargs_prototype
, cum
->prototype
,
11911 GET_MODE_NAME (mode
));
11914 else if (DEFAULT_ABI
== ABI_V4
)
11916 if (abi_v4_pass_in_fpr (mode
))
11918 /* _Decimal128 must use an even/odd register pair. This assumes
11919 that the register number is odd when fregno is odd. */
11920 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
11923 if (cum
->fregno
+ (FLOAT128_2REG_P (mode
) ? 1 : 0)
11924 <= FP_ARG_V4_MAX_REG
)
11925 cum
->fregno
+= (GET_MODE_SIZE (mode
) + 7) >> 3;
11928 cum
->fregno
= FP_ARG_V4_MAX_REG
+ 1;
11929 if (mode
== DFmode
|| FLOAT128_IBM_P (mode
)
11930 || mode
== DDmode
|| mode
== TDmode
)
11931 cum
->words
+= cum
->words
& 1;
11932 cum
->words
+= rs6000_arg_size (mode
, type
);
11937 int n_words
= rs6000_arg_size (mode
, type
);
11938 int gregno
= cum
->sysv_gregno
;
11940 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11941 As does any other 2 word item such as complex int due to a
11942 historical mistake. */
11944 gregno
+= (1 - gregno
) & 1;
11946 /* Multi-reg args are not split between registers and stack. */
11947 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
11949 /* Long long is aligned on the stack. So are other 2 word
11950 items such as complex int due to a historical mistake. */
11952 cum
->words
+= cum
->words
& 1;
11953 cum
->words
+= n_words
;
11956 /* Note: continuing to accumulate gregno past when we've started
11957 spilling to the stack indicates the fact that we've started
11958 spilling to the stack to expand_builtin_saveregs. */
11959 cum
->sysv_gregno
= gregno
+ n_words
;
11962 if (TARGET_DEBUG_ARG
)
11964 fprintf (stderr
, "function_adv: words = %2d, fregno = %2d, ",
11965 cum
->words
, cum
->fregno
);
11966 fprintf (stderr
, "gregno = %2d, nargs = %4d, proto = %d, ",
11967 cum
->sysv_gregno
, cum
->nargs_prototype
, cum
->prototype
);
11968 fprintf (stderr
, "mode = %4s, named = %d\n",
11969 GET_MODE_NAME (mode
), named
);
11974 int n_words
= rs6000_arg_size (mode
, type
);
11975 int start_words
= cum
->words
;
11976 int align_words
= rs6000_parm_start (mode
, type
, start_words
);
11978 cum
->words
= align_words
+ n_words
;
11980 if (SCALAR_FLOAT_MODE_P (elt_mode
) && TARGET_HARD_FLOAT
)
11982 /* _Decimal128 must be passed in an even/odd float register pair.
11983 This assumes that the register number is odd when fregno is
11985 if (elt_mode
== TDmode
&& (cum
->fregno
% 2) == 1)
11987 cum
->fregno
+= n_elts
* ((GET_MODE_SIZE (elt_mode
) + 7) >> 3);
11990 if (TARGET_DEBUG_ARG
)
11992 fprintf (stderr
, "function_adv: words = %2d, fregno = %2d, ",
11993 cum
->words
, cum
->fregno
);
11994 fprintf (stderr
, "nargs = %4d, proto = %d, mode = %4s, ",
11995 cum
->nargs_prototype
, cum
->prototype
, GET_MODE_NAME (mode
));
11996 fprintf (stderr
, "named = %d, align = %d, depth = %d\n",
11997 named
, align_words
- start_words
, depth
);
12003 rs6000_function_arg_advance (cumulative_args_t cum
, machine_mode mode
,
12004 const_tree type
, bool named
)
12006 rs6000_function_arg_advance_1 (get_cumulative_args (cum
), mode
, type
, named
,
12010 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
12011 structure between cum->intoffset and bitpos to integer registers. */
12014 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS
*cum
,
12015 HOST_WIDE_INT bitpos
, rtx rvec
[], int *k
)
12018 unsigned int regno
;
12019 unsigned int startbit
, endbit
;
12020 int this_regno
, intregs
, intoffset
;
12023 if (cum
->intoffset
== -1)
12026 intoffset
= cum
->intoffset
;
12027 cum
->intoffset
= -1;
12029 /* If this is the trailing part of a word, try to only load that
12030 much into the register. Otherwise load the whole register. Note
12031 that in the latter case we may pick up unwanted bits. It's not a
12032 problem at the moment but may wish to revisit. */
12034 if (intoffset
% BITS_PER_WORD
!= 0)
12036 unsigned int bits
= BITS_PER_WORD
- intoffset
% BITS_PER_WORD
;
12037 if (!int_mode_for_size (bits
, 0).exists (&mode
))
12039 /* We couldn't find an appropriate mode, which happens,
12040 e.g., in packed structs when there are 3 bytes to load.
12041 Back intoffset back to the beginning of the word in this
12043 intoffset
= ROUND_DOWN (intoffset
, BITS_PER_WORD
);
12050 startbit
= ROUND_DOWN (intoffset
, BITS_PER_WORD
);
12051 endbit
= ROUND_UP (bitpos
, BITS_PER_WORD
);
12052 intregs
= (endbit
- startbit
) / BITS_PER_WORD
;
12053 this_regno
= cum
->words
+ intoffset
/ BITS_PER_WORD
;
12055 if (intregs
> 0 && intregs
> GP_ARG_NUM_REG
- this_regno
)
12056 cum
->use_stack
= 1;
12058 intregs
= MIN (intregs
, GP_ARG_NUM_REG
- this_regno
);
12062 intoffset
/= BITS_PER_UNIT
;
12065 regno
= GP_ARG_MIN_REG
+ this_regno
;
12066 reg
= gen_rtx_REG (mode
, regno
);
12068 gen_rtx_EXPR_LIST (VOIDmode
, reg
, GEN_INT (intoffset
));
12071 intoffset
= (intoffset
| (UNITS_PER_WORD
-1)) + 1;
12075 while (intregs
> 0);
12078 /* Recursive workhorse for the following. */
12081 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS
*cum
, const_tree type
,
12082 HOST_WIDE_INT startbitpos
, rtx rvec
[],
12087 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
12088 if (TREE_CODE (f
) == FIELD_DECL
)
12090 HOST_WIDE_INT bitpos
= startbitpos
;
12091 tree ftype
= TREE_TYPE (f
);
12093 if (ftype
== error_mark_node
)
12095 mode
= TYPE_MODE (ftype
);
12097 if (DECL_SIZE (f
) != 0
12098 && tree_fits_uhwi_p (bit_position (f
)))
12099 bitpos
+= int_bit_position (f
);
12101 /* ??? FIXME: else assume zero offset. */
12103 if (TREE_CODE (ftype
) == RECORD_TYPE
)
12104 rs6000_darwin64_record_arg_recurse (cum
, ftype
, bitpos
, rvec
, k
);
12105 else if (cum
->named
&& USE_FP_FOR_ARG_P (cum
, mode
))
12107 unsigned n_fpreg
= (GET_MODE_SIZE (mode
) + 7) >> 3;
12111 case E_SCmode
: mode
= SFmode
; break;
12112 case E_DCmode
: mode
= DFmode
; break;
12113 case E_TCmode
: mode
= TFmode
; break;
12117 rs6000_darwin64_record_arg_flush (cum
, bitpos
, rvec
, k
);
12118 if (cum
->fregno
+ n_fpreg
> FP_ARG_MAX_REG
+ 1)
12120 gcc_assert (cum
->fregno
== FP_ARG_MAX_REG
12121 && (mode
== TFmode
|| mode
== TDmode
));
12122 /* Long double or _Decimal128 split over regs and memory. */
12123 mode
= DECIMAL_FLOAT_MODE_P (mode
) ? DDmode
: DFmode
;
12127 = gen_rtx_EXPR_LIST (VOIDmode
,
12128 gen_rtx_REG (mode
, cum
->fregno
++),
12129 GEN_INT (bitpos
/ BITS_PER_UNIT
));
12130 if (FLOAT128_2REG_P (mode
))
12133 else if (cum
->named
&& USE_ALTIVEC_FOR_ARG_P (cum
, mode
, 1))
12135 rs6000_darwin64_record_arg_flush (cum
, bitpos
, rvec
, k
);
12137 = gen_rtx_EXPR_LIST (VOIDmode
,
12138 gen_rtx_REG (mode
, cum
->vregno
++),
12139 GEN_INT (bitpos
/ BITS_PER_UNIT
));
12141 else if (cum
->intoffset
== -1)
12142 cum
->intoffset
= bitpos
;
12146 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
12147 the register(s) to be used for each field and subfield of a struct
12148 being passed by value, along with the offset of where the
12149 register's value may be found in the block. FP fields go in FP
12150 register, vector fields go in vector registers, and everything
12151 else goes in int registers, packed as in memory.
12153 This code is also used for function return values. RETVAL indicates
12154 whether this is the case.
12156 Much of this is taken from the SPARC V9 port, which has a similar
12157 calling convention. */
12160 rs6000_darwin64_record_arg (CUMULATIVE_ARGS
*orig_cum
, const_tree type
,
12161 bool named
, bool retval
)
12163 rtx rvec
[FIRST_PSEUDO_REGISTER
];
12164 int k
= 1, kbase
= 1;
12165 HOST_WIDE_INT typesize
= int_size_in_bytes (type
);
12166 /* This is a copy; modifications are not visible to our caller. */
12167 CUMULATIVE_ARGS copy_cum
= *orig_cum
;
12168 CUMULATIVE_ARGS
*cum
= ©_cum
;
12170 /* Pad to 16 byte boundary if needed. */
12171 if (!retval
&& TYPE_ALIGN (type
) >= 2 * BITS_PER_WORD
12172 && (cum
->words
% 2) != 0)
12175 cum
->intoffset
= 0;
12176 cum
->use_stack
= 0;
12177 cum
->named
= named
;
12179 /* Put entries into rvec[] for individual FP and vector fields, and
12180 for the chunks of memory that go in int regs. Note we start at
12181 element 1; 0 is reserved for an indication of using memory, and
12182 may or may not be filled in below. */
12183 rs6000_darwin64_record_arg_recurse (cum
, type
, /* startbit pos= */ 0, rvec
, &k
);
12184 rs6000_darwin64_record_arg_flush (cum
, typesize
* BITS_PER_UNIT
, rvec
, &k
);
12186 /* If any part of the struct went on the stack put all of it there.
12187 This hack is because the generic code for
12188 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
12189 parts of the struct are not at the beginning. */
12190 if (cum
->use_stack
)
12193 return NULL_RTX
; /* doesn't go in registers at all */
12195 rvec
[0] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
12197 if (k
> 1 || cum
->use_stack
)
12198 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec_v (k
- kbase
, &rvec
[kbase
]));
12203 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
12206 rs6000_mixed_function_arg (machine_mode mode
, const_tree type
,
12211 rtx rvec
[GP_ARG_NUM_REG
+ 1];
12213 if (align_words
>= GP_ARG_NUM_REG
)
12216 n_units
= rs6000_arg_size (mode
, type
);
12218 /* Optimize the simple case where the arg fits in one gpr, except in
12219 the case of BLKmode due to assign_parms assuming that registers are
12220 BITS_PER_WORD wide. */
12222 || (n_units
== 1 && mode
!= BLKmode
))
12223 return gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
12226 if (align_words
+ n_units
> GP_ARG_NUM_REG
)
12227 /* Not all of the arg fits in gprs. Say that it goes in memory too,
12228 using a magic NULL_RTX component.
12229 This is not strictly correct. Only some of the arg belongs in
12230 memory, not all of it. However, the normal scheme using
12231 function_arg_partial_nregs can result in unusual subregs, eg.
12232 (subreg:SI (reg:DF) 4), which are not handled well. The code to
12233 store the whole arg to memory is often more efficient than code
12234 to store pieces, and we know that space is available in the right
12235 place for the whole arg. */
12236 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
12241 rtx r
= gen_rtx_REG (SImode
, GP_ARG_MIN_REG
+ align_words
);
12242 rtx off
= GEN_INT (i
++ * 4);
12243 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
12245 while (++align_words
< GP_ARG_NUM_REG
&& --n_units
!= 0);
12247 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (k
, rvec
));
12250 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
12251 but must also be copied into the parameter save area starting at
12252 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
12253 to the GPRs and/or memory. Return the number of elements used. */
12256 rs6000_psave_function_arg (machine_mode mode
, const_tree type
,
12257 int align_words
, rtx
*rvec
)
12261 if (align_words
< GP_ARG_NUM_REG
)
12263 int n_words
= rs6000_arg_size (mode
, type
);
12265 if (align_words
+ n_words
> GP_ARG_NUM_REG
12267 || (TARGET_32BIT
&& TARGET_POWERPC64
))
12269 /* If this is partially on the stack, then we only
12270 include the portion actually in registers here. */
12271 machine_mode rmode
= TARGET_32BIT
? SImode
: DImode
;
12274 if (align_words
+ n_words
> GP_ARG_NUM_REG
)
12276 /* Not all of the arg fits in gprs. Say that it goes in memory
12277 too, using a magic NULL_RTX component. Also see comment in
12278 rs6000_mixed_function_arg for why the normal
12279 function_arg_partial_nregs scheme doesn't work in this case. */
12280 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
12285 rtx r
= gen_rtx_REG (rmode
, GP_ARG_MIN_REG
+ align_words
);
12286 rtx off
= GEN_INT (i
++ * GET_MODE_SIZE (rmode
));
12287 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
12289 while (++align_words
< GP_ARG_NUM_REG
&& --n_words
!= 0);
12293 /* The whole arg fits in gprs. */
12294 rtx r
= gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
12295 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, const0_rtx
);
12300 /* It's entirely in memory. */
12301 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
12307 /* RVEC is a vector of K components of an argument of mode MODE.
12308 Construct the final function_arg return value from it. */
12311 rs6000_finish_function_arg (machine_mode mode
, rtx
*rvec
, int k
)
12313 gcc_assert (k
>= 1);
12315 /* Avoid returning a PARALLEL in the trivial cases. */
12318 if (XEXP (rvec
[0], 0) == NULL_RTX
)
12321 if (GET_MODE (XEXP (rvec
[0], 0)) == mode
)
12322 return XEXP (rvec
[0], 0);
12325 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (k
, rvec
));
12328 /* Determine where to put an argument to a function.
12329 Value is zero to push the argument on the stack,
12330 or a hard register in which to store the argument.
12332 MODE is the argument's machine mode.
12333 TYPE is the data type of the argument (as a tree).
12334 This is null for libcalls where that information may
12336 CUM is a variable of type CUMULATIVE_ARGS which gives info about
12337 the preceding args and about the function being called. It is
12338 not modified in this routine.
12339 NAMED is nonzero if this argument is a named parameter
12340 (otherwise it is an extra parameter matching an ellipsis).
12342 On RS/6000 the first eight words of non-FP are normally in registers
12343 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
12344 Under V.4, the first 8 FP args are in registers.
12346 If this is floating-point and no prototype is specified, we use
12347 both an FP and integer register (or possibly FP reg and stack). Library
12348 functions (when CALL_LIBCALL is set) always have the proper types for args,
12349 so we can pass the FP value just in one register. emit_library_function
12350 doesn't support PARALLEL anyway.
12352 Note that for args passed by reference, function_arg will be called
12353 with MODE and TYPE set to that of the pointer to the arg, not the arg
12357 rs6000_function_arg (cumulative_args_t cum_v
, machine_mode mode
,
12358 const_tree type
, bool named
)
12360 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
12361 enum rs6000_abi abi
= DEFAULT_ABI
;
12362 machine_mode elt_mode
;
12365 /* Return a marker to indicate whether CR1 needs to set or clear the
12366 bit that V.4 uses to say fp args were passed in registers.
12367 Assume that we don't need the marker for software floating point,
12368 or compiler generated library calls. */
12369 if (mode
== VOIDmode
)
12372 && (cum
->call_cookie
& CALL_LIBCALL
) == 0
12374 || (cum
->nargs_prototype
< 0
12375 && (cum
->prototype
|| TARGET_NO_PROTOTYPE
)))
12376 && TARGET_HARD_FLOAT
)
12377 return GEN_INT (cum
->call_cookie
12378 | ((cum
->fregno
== FP_ARG_MIN_REG
)
12379 ? CALL_V4_SET_FP_ARGS
12380 : CALL_V4_CLEAR_FP_ARGS
));
12382 return GEN_INT (cum
->call_cookie
& ~CALL_LIBCALL
);
12385 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
12387 if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
12389 rtx rslt
= rs6000_darwin64_record_arg (cum
, type
, named
, /*retval= */false);
12390 if (rslt
!= NULL_RTX
)
12392 /* Else fall through to usual handling. */
12395 if (USE_ALTIVEC_FOR_ARG_P (cum
, elt_mode
, named
))
12397 rtx rvec
[GP_ARG_NUM_REG
+ AGGR_ARG_NUM_REG
+ 1];
12401 /* Do we also need to pass this argument in the parameter save area?
12402 Library support functions for IEEE 128-bit are assumed to not need the
12403 value passed both in GPRs and in vector registers. */
12404 if (TARGET_64BIT
&& !cum
->prototype
12405 && (!cum
->libcall
|| !FLOAT128_VECTOR_P (elt_mode
)))
12407 int align_words
= ROUND_UP (cum
->words
, 2);
12408 k
= rs6000_psave_function_arg (mode
, type
, align_words
, rvec
);
12411 /* Describe where this argument goes in the vector registers. */
12412 for (i
= 0; i
< n_elts
&& cum
->vregno
+ i
<= ALTIVEC_ARG_MAX_REG
; i
++)
12414 r
= gen_rtx_REG (elt_mode
, cum
->vregno
+ i
);
12415 off
= GEN_INT (i
* GET_MODE_SIZE (elt_mode
));
12416 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
12419 return rs6000_finish_function_arg (mode
, rvec
, k
);
12421 else if (TARGET_ALTIVEC_ABI
12422 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
12423 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
12424 && int_size_in_bytes (type
) == 16)))
12426 if (named
|| abi
== ABI_V4
)
12430 /* Vector parameters to varargs functions under AIX or Darwin
12431 get passed in memory and possibly also in GPRs. */
12432 int align
, align_words
, n_words
;
12433 machine_mode part_mode
;
12435 /* Vector parameters must be 16-byte aligned. In 32-bit
12436 mode this means we need to take into account the offset
12437 to the parameter save area. In 64-bit mode, they just
12438 have to start on an even word, since the parameter save
12439 area is 16-byte aligned. */
12441 align
= -(rs6000_parm_offset () + cum
->words
) & 3;
12443 align
= cum
->words
& 1;
12444 align_words
= cum
->words
+ align
;
12446 /* Out of registers? Memory, then. */
12447 if (align_words
>= GP_ARG_NUM_REG
)
12450 if (TARGET_32BIT
&& TARGET_POWERPC64
)
12451 return rs6000_mixed_function_arg (mode
, type
, align_words
);
12453 /* The vector value goes in GPRs. Only the part of the
12454 value in GPRs is reported here. */
12456 n_words
= rs6000_arg_size (mode
, type
);
12457 if (align_words
+ n_words
> GP_ARG_NUM_REG
)
12458 /* Fortunately, there are only two possibilities, the value
12459 is either wholly in GPRs or half in GPRs and half not. */
12460 part_mode
= DImode
;
12462 return gen_rtx_REG (part_mode
, GP_ARG_MIN_REG
+ align_words
);
12466 else if (abi
== ABI_V4
)
12468 if (abi_v4_pass_in_fpr (mode
))
12470 /* _Decimal128 must use an even/odd register pair. This assumes
12471 that the register number is odd when fregno is odd. */
12472 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
12475 if (cum
->fregno
+ (FLOAT128_2REG_P (mode
) ? 1 : 0)
12476 <= FP_ARG_V4_MAX_REG
)
12477 return gen_rtx_REG (mode
, cum
->fregno
);
12483 int n_words
= rs6000_arg_size (mode
, type
);
12484 int gregno
= cum
->sysv_gregno
;
12486 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
12487 As does any other 2 word item such as complex int due to a
12488 historical mistake. */
12490 gregno
+= (1 - gregno
) & 1;
12492 /* Multi-reg args are not split between registers and stack. */
12493 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
12496 if (TARGET_32BIT
&& TARGET_POWERPC64
)
12497 return rs6000_mixed_function_arg (mode
, type
,
12498 gregno
- GP_ARG_MIN_REG
);
12499 return gen_rtx_REG (mode
, gregno
);
12504 int align_words
= rs6000_parm_start (mode
, type
, cum
->words
);
12506 /* _Decimal128 must be passed in an even/odd float register pair.
12507 This assumes that the register number is odd when fregno is odd. */
12508 if (elt_mode
== TDmode
&& (cum
->fregno
% 2) == 1)
12511 if (USE_FP_FOR_ARG_P (cum
, elt_mode
))
12513 rtx rvec
[GP_ARG_NUM_REG
+ AGGR_ARG_NUM_REG
+ 1];
12516 unsigned long n_fpreg
= (GET_MODE_SIZE (elt_mode
) + 7) >> 3;
12519 /* Do we also need to pass this argument in the parameter
12521 if (type
&& (cum
->nargs_prototype
<= 0
12522 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
12523 && TARGET_XL_COMPAT
12524 && align_words
>= GP_ARG_NUM_REG
)))
12525 k
= rs6000_psave_function_arg (mode
, type
, align_words
, rvec
);
12527 /* Describe where this argument goes in the fprs. */
12528 for (i
= 0; i
< n_elts
12529 && cum
->fregno
+ i
* n_fpreg
<= FP_ARG_MAX_REG
; i
++)
12531 /* Check if the argument is split over registers and memory.
12532 This can only ever happen for long double or _Decimal128;
12533 complex types are handled via split_complex_arg. */
12534 machine_mode fmode
= elt_mode
;
12535 if (cum
->fregno
+ (i
+ 1) * n_fpreg
> FP_ARG_MAX_REG
+ 1)
12537 gcc_assert (FLOAT128_2REG_P (fmode
));
12538 fmode
= DECIMAL_FLOAT_MODE_P (fmode
) ? DDmode
: DFmode
;
12541 r
= gen_rtx_REG (fmode
, cum
->fregno
+ i
* n_fpreg
);
12542 off
= GEN_INT (i
* GET_MODE_SIZE (elt_mode
));
12543 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
12546 /* If there were not enough FPRs to hold the argument, the rest
12547 usually goes into memory. However, if the current position
12548 is still within the register parameter area, a portion may
12549 actually have to go into GPRs.
12551 Note that it may happen that the portion of the argument
12552 passed in the first "half" of the first GPR was already
12553 passed in the last FPR as well.
12555 For unnamed arguments, we already set up GPRs to cover the
12556 whole argument in rs6000_psave_function_arg, so there is
12557 nothing further to do at this point. */
12558 fpr_words
= (i
* GET_MODE_SIZE (elt_mode
)) / (TARGET_32BIT
? 4 : 8);
12559 if (i
< n_elts
&& align_words
+ fpr_words
< GP_ARG_NUM_REG
12560 && cum
->nargs_prototype
> 0)
12562 static bool warned
;
12564 machine_mode rmode
= TARGET_32BIT
? SImode
: DImode
;
12565 int n_words
= rs6000_arg_size (mode
, type
);
12567 align_words
+= fpr_words
;
12568 n_words
-= fpr_words
;
12572 r
= gen_rtx_REG (rmode
, GP_ARG_MIN_REG
+ align_words
);
12573 off
= GEN_INT (fpr_words
++ * GET_MODE_SIZE (rmode
));
12574 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
12576 while (++align_words
< GP_ARG_NUM_REG
&& --n_words
!= 0);
12578 if (!warned
&& warn_psabi
)
12581 inform (input_location
,
12582 "the ABI of passing homogeneous float aggregates"
12583 " has changed in GCC 5");
12587 return rs6000_finish_function_arg (mode
, rvec
, k
);
12589 else if (align_words
< GP_ARG_NUM_REG
)
12591 if (TARGET_32BIT
&& TARGET_POWERPC64
)
12592 return rs6000_mixed_function_arg (mode
, type
, align_words
);
12594 return gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
12601 /* For an arg passed partly in registers and partly in memory, this is
12602 the number of bytes passed in registers. For args passed entirely in
12603 registers or entirely in memory, zero. When an arg is described by a
12604 PARALLEL, perhaps using more than one register type, this function
12605 returns the number of bytes used by the first element of the PARALLEL. */
12608 rs6000_arg_partial_bytes (cumulative_args_t cum_v
, machine_mode mode
,
12609 tree type
, bool named
)
12611 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
12612 bool passed_in_gprs
= true;
12615 machine_mode elt_mode
;
12618 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
12620 if (DEFAULT_ABI
== ABI_V4
)
12623 if (USE_ALTIVEC_FOR_ARG_P (cum
, elt_mode
, named
))
12625 /* If we are passing this arg in the fixed parameter save area (gprs or
12626 memory) as well as VRs, we do not use the partial bytes mechanism;
12627 instead, rs6000_function_arg will return a PARALLEL including a memory
12628 element as necessary. Library support functions for IEEE 128-bit are
12629 assumed to not need the value passed both in GPRs and in vector
12631 if (TARGET_64BIT
&& !cum
->prototype
12632 && (!cum
->libcall
|| !FLOAT128_VECTOR_P (elt_mode
)))
12635 /* Otherwise, we pass in VRs only. Check for partial copies. */
12636 passed_in_gprs
= false;
12637 if (cum
->vregno
+ n_elts
> ALTIVEC_ARG_MAX_REG
+ 1)
12638 ret
= (ALTIVEC_ARG_MAX_REG
+ 1 - cum
->vregno
) * 16;
12641 /* In this complicated case we just disable the partial_nregs code. */
12642 if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
12645 align_words
= rs6000_parm_start (mode
, type
, cum
->words
);
12647 if (USE_FP_FOR_ARG_P (cum
, elt_mode
))
12649 unsigned long n_fpreg
= (GET_MODE_SIZE (elt_mode
) + 7) >> 3;
12651 /* If we are passing this arg in the fixed parameter save area
12652 (gprs or memory) as well as FPRs, we do not use the partial
12653 bytes mechanism; instead, rs6000_function_arg will return a
12654 PARALLEL including a memory element as necessary. */
12656 && (cum
->nargs_prototype
<= 0
12657 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
12658 && TARGET_XL_COMPAT
12659 && align_words
>= GP_ARG_NUM_REG
)))
12662 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12663 passed_in_gprs
= false;
12664 if (cum
->fregno
+ n_elts
* n_fpreg
> FP_ARG_MAX_REG
+ 1)
12666 /* Compute number of bytes / words passed in FPRs. If there
12667 is still space available in the register parameter area
12668 *after* that amount, a part of the argument will be passed
12669 in GPRs. In that case, the total amount passed in any
12670 registers is equal to the amount that would have been passed
12671 in GPRs if everything were passed there, so we fall back to
12672 the GPR code below to compute the appropriate value. */
12673 int fpr
= ((FP_ARG_MAX_REG
+ 1 - cum
->fregno
)
12674 * MIN (8, GET_MODE_SIZE (elt_mode
)));
12675 int fpr_words
= fpr
/ (TARGET_32BIT
? 4 : 8);
12677 if (align_words
+ fpr_words
< GP_ARG_NUM_REG
)
12678 passed_in_gprs
= true;
12685 && align_words
< GP_ARG_NUM_REG
12686 && GP_ARG_NUM_REG
< align_words
+ rs6000_arg_size (mode
, type
))
12687 ret
= (GP_ARG_NUM_REG
- align_words
) * (TARGET_32BIT
? 4 : 8);
12689 if (ret
!= 0 && TARGET_DEBUG_ARG
)
12690 fprintf (stderr
, "rs6000_arg_partial_bytes: %d\n", ret
);
12695 /* A C expression that indicates when an argument must be passed by
12696 reference. If nonzero for an argument, a copy of that argument is
12697 made in memory and a pointer to the argument is passed instead of
12698 the argument itself. The pointer is passed in whatever way is
12699 appropriate for passing a pointer to that type.
12701 Under V.4, aggregates and long double are passed by reference.
12703 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12704 reference unless the AltiVec vector extension ABI is in force.
12706 As an extension to all ABIs, variable sized types are passed by
12710 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED
,
12711 machine_mode mode
, const_tree type
,
12712 bool named ATTRIBUTE_UNUSED
)
12717 if (DEFAULT_ABI
== ABI_V4
&& TARGET_IEEEQUAD
12718 && FLOAT128_IEEE_P (TYPE_MODE (type
)))
12720 if (TARGET_DEBUG_ARG
)
12721 fprintf (stderr
, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12725 if (DEFAULT_ABI
== ABI_V4
&& AGGREGATE_TYPE_P (type
))
12727 if (TARGET_DEBUG_ARG
)
12728 fprintf (stderr
, "function_arg_pass_by_reference: V4 aggregate\n");
12732 if (int_size_in_bytes (type
) < 0)
12734 if (TARGET_DEBUG_ARG
)
12735 fprintf (stderr
, "function_arg_pass_by_reference: variable size\n");
12739 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12740 modes only exist for GCC vector types if -maltivec. */
12741 if (TARGET_32BIT
&& !TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
12743 if (TARGET_DEBUG_ARG
)
12744 fprintf (stderr
, "function_arg_pass_by_reference: AltiVec\n");
12748 /* Pass synthetic vectors in memory. */
12749 if (TREE_CODE (type
) == VECTOR_TYPE
12750 && int_size_in_bytes (type
) > (TARGET_ALTIVEC_ABI
? 16 : 8))
12752 static bool warned_for_pass_big_vectors
= false;
12753 if (TARGET_DEBUG_ARG
)
12754 fprintf (stderr
, "function_arg_pass_by_reference: synthetic vector\n");
12755 if (!warned_for_pass_big_vectors
)
12757 warning (OPT_Wpsabi
, "GCC vector passed by reference: "
12758 "non-standard ABI extension with no compatibility "
12760 warned_for_pass_big_vectors
= true;
12768 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12769 already processes. Return true if the parameter must be passed
12770 (fully or partially) on the stack. */
12773 rs6000_parm_needs_stack (cumulative_args_t args_so_far
, tree type
)
12779 /* Catch errors. */
12780 if (type
== NULL
|| type
== error_mark_node
)
12783 /* Handle types with no storage requirement. */
12784 if (TYPE_MODE (type
) == VOIDmode
)
12787 /* Handle complex types. */
12788 if (TREE_CODE (type
) == COMPLEX_TYPE
)
12789 return (rs6000_parm_needs_stack (args_so_far
, TREE_TYPE (type
))
12790 || rs6000_parm_needs_stack (args_so_far
, TREE_TYPE (type
)));
12792 /* Handle transparent aggregates. */
12793 if ((TREE_CODE (type
) == UNION_TYPE
|| TREE_CODE (type
) == RECORD_TYPE
)
12794 && TYPE_TRANSPARENT_AGGR (type
))
12795 type
= TREE_TYPE (first_field (type
));
12797 /* See if this arg was passed by invisible reference. */
12798 if (pass_by_reference (get_cumulative_args (args_so_far
),
12799 TYPE_MODE (type
), type
, true))
12800 type
= build_pointer_type (type
);
12802 /* Find mode as it is passed by the ABI. */
12803 unsignedp
= TYPE_UNSIGNED (type
);
12804 mode
= promote_mode (type
, TYPE_MODE (type
), &unsignedp
);
12806 /* If we must pass in stack, we need a stack. */
12807 if (rs6000_must_pass_in_stack (mode
, type
))
12810 /* If there is no incoming register, we need a stack. */
12811 entry_parm
= rs6000_function_arg (args_so_far
, mode
, type
, true);
12812 if (entry_parm
== NULL
)
12815 /* Likewise if we need to pass both in registers and on the stack. */
12816 if (GET_CODE (entry_parm
) == PARALLEL
12817 && XEXP (XVECEXP (entry_parm
, 0, 0), 0) == NULL_RTX
)
12820 /* Also true if we're partially in registers and partially not. */
12821 if (rs6000_arg_partial_bytes (args_so_far
, mode
, type
, true) != 0)
12824 /* Update info on where next arg arrives in registers. */
12825 rs6000_function_arg_advance (args_so_far
, mode
, type
, true);
12829 /* Return true if FUN has no prototype, has a variable argument
12830 list, or passes any parameter in memory. */
12833 rs6000_function_parms_need_stack (tree fun
, bool incoming
)
12835 tree fntype
, result
;
12836 CUMULATIVE_ARGS args_so_far_v
;
12837 cumulative_args_t args_so_far
;
12840 /* Must be a libcall, all of which only use reg parms. */
12845 fntype
= TREE_TYPE (fun
);
12847 /* Varargs functions need the parameter save area. */
12848 if ((!incoming
&& !prototype_p (fntype
)) || stdarg_p (fntype
))
12851 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v
, fntype
, NULL_RTX
);
12852 args_so_far
= pack_cumulative_args (&args_so_far_v
);
12854 /* When incoming, we will have been passed the function decl.
12855 It is necessary to use the decl to handle K&R style functions,
12856 where TYPE_ARG_TYPES may not be available. */
12859 gcc_assert (DECL_P (fun
));
12860 result
= DECL_RESULT (fun
);
12863 result
= TREE_TYPE (fntype
);
12865 if (result
&& aggregate_value_p (result
, fntype
))
12867 if (!TYPE_P (result
))
12868 result
= TREE_TYPE (result
);
12869 result
= build_pointer_type (result
);
12870 rs6000_parm_needs_stack (args_so_far
, result
);
12877 for (parm
= DECL_ARGUMENTS (fun
);
12878 parm
&& parm
!= void_list_node
;
12879 parm
= TREE_CHAIN (parm
))
12880 if (rs6000_parm_needs_stack (args_so_far
, TREE_TYPE (parm
)))
12885 function_args_iterator args_iter
;
12888 FOREACH_FUNCTION_ARGS (fntype
, arg_type
, args_iter
)
12889 if (rs6000_parm_needs_stack (args_so_far
, arg_type
))
12896 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12897 usually a constant depending on the ABI. However, in the ELFv2 ABI
12898 the register parameter area is optional when calling a function that
12899 has a prototype is scope, has no variable argument list, and passes
12900 all parameters in registers. */
12903 rs6000_reg_parm_stack_space (tree fun
, bool incoming
)
12905 int reg_parm_stack_space
;
12907 switch (DEFAULT_ABI
)
12910 reg_parm_stack_space
= 0;
12915 reg_parm_stack_space
= TARGET_64BIT
? 64 : 32;
12919 /* ??? Recomputing this every time is a bit expensive. Is there
12920 a place to cache this information? */
12921 if (rs6000_function_parms_need_stack (fun
, incoming
))
12922 reg_parm_stack_space
= TARGET_64BIT
? 64 : 32;
12924 reg_parm_stack_space
= 0;
12928 return reg_parm_stack_space
;
12932 rs6000_move_block_from_reg (int regno
, rtx x
, int nregs
)
12935 machine_mode reg_mode
= TARGET_32BIT
? SImode
: DImode
;
12940 for (i
= 0; i
< nregs
; i
++)
12942 rtx tem
= adjust_address_nv (x
, reg_mode
, i
* GET_MODE_SIZE (reg_mode
));
12943 if (reload_completed
)
12945 if (! strict_memory_address_p (reg_mode
, XEXP (tem
, 0)))
12948 tem
= simplify_gen_subreg (reg_mode
, x
, BLKmode
,
12949 i
* GET_MODE_SIZE (reg_mode
));
12952 tem
= replace_equiv_address (tem
, XEXP (tem
, 0));
12956 emit_move_insn (tem
, gen_rtx_REG (reg_mode
, regno
+ i
));
12960 /* Perform any needed actions needed for a function that is receiving a
12961 variable number of arguments.
12965 MODE and TYPE are the mode and type of the current parameter.
12967 PRETEND_SIZE is a variable that should be set to the amount of stack
12968 that must be pushed by the prolog to pretend that our caller pushed
12971 Normally, this macro will push all remaining incoming registers on the
12972 stack and set PRETEND_SIZE to the length of the registers pushed. */
12975 setup_incoming_varargs (cumulative_args_t cum
, machine_mode mode
,
12976 tree type
, int *pretend_size ATTRIBUTE_UNUSED
,
12979 CUMULATIVE_ARGS next_cum
;
12980 int reg_size
= TARGET_32BIT
? 4 : 8;
12981 rtx save_area
= NULL_RTX
, mem
;
12982 int first_reg_offset
;
12983 alias_set_type set
;
12985 /* Skip the last named argument. */
12986 next_cum
= *get_cumulative_args (cum
);
12987 rs6000_function_arg_advance_1 (&next_cum
, mode
, type
, true, 0);
12989 if (DEFAULT_ABI
== ABI_V4
)
12991 first_reg_offset
= next_cum
.sysv_gregno
- GP_ARG_MIN_REG
;
12995 int gpr_reg_num
= 0, gpr_size
= 0, fpr_size
= 0;
12996 HOST_WIDE_INT offset
= 0;
12998 /* Try to optimize the size of the varargs save area.
12999 The ABI requires that ap.reg_save_area is doubleword
13000 aligned, but we don't need to allocate space for all
13001 the bytes, only those to which we actually will save
13003 if (cfun
->va_list_gpr_size
&& first_reg_offset
< GP_ARG_NUM_REG
)
13004 gpr_reg_num
= GP_ARG_NUM_REG
- first_reg_offset
;
13005 if (TARGET_HARD_FLOAT
13006 && next_cum
.fregno
<= FP_ARG_V4_MAX_REG
13007 && cfun
->va_list_fpr_size
)
13010 fpr_size
= (next_cum
.fregno
- FP_ARG_MIN_REG
)
13011 * UNITS_PER_FP_WORD
;
13012 if (cfun
->va_list_fpr_size
13013 < FP_ARG_V4_MAX_REG
+ 1 - next_cum
.fregno
)
13014 fpr_size
+= cfun
->va_list_fpr_size
* UNITS_PER_FP_WORD
;
13016 fpr_size
+= (FP_ARG_V4_MAX_REG
+ 1 - next_cum
.fregno
)
13017 * UNITS_PER_FP_WORD
;
13021 offset
= -((first_reg_offset
* reg_size
) & ~7);
13022 if (!fpr_size
&& gpr_reg_num
> cfun
->va_list_gpr_size
)
13024 gpr_reg_num
= cfun
->va_list_gpr_size
;
13025 if (reg_size
== 4 && (first_reg_offset
& 1))
13028 gpr_size
= (gpr_reg_num
* reg_size
+ 7) & ~7;
13031 offset
= - (int) (next_cum
.fregno
- FP_ARG_MIN_REG
)
13032 * UNITS_PER_FP_WORD
13033 - (int) (GP_ARG_NUM_REG
* reg_size
);
13035 if (gpr_size
+ fpr_size
)
13038 = assign_stack_local (BLKmode
, gpr_size
+ fpr_size
, 64);
13039 gcc_assert (GET_CODE (reg_save_area
) == MEM
);
13040 reg_save_area
= XEXP (reg_save_area
, 0);
13041 if (GET_CODE (reg_save_area
) == PLUS
)
13043 gcc_assert (XEXP (reg_save_area
, 0)
13044 == virtual_stack_vars_rtx
);
13045 gcc_assert (GET_CODE (XEXP (reg_save_area
, 1)) == CONST_INT
);
13046 offset
+= INTVAL (XEXP (reg_save_area
, 1));
13049 gcc_assert (reg_save_area
== virtual_stack_vars_rtx
);
13052 cfun
->machine
->varargs_save_offset
= offset
;
13053 save_area
= plus_constant (Pmode
, virtual_stack_vars_rtx
, offset
);
13058 first_reg_offset
= next_cum
.words
;
13059 save_area
= crtl
->args
.internal_arg_pointer
;
13061 if (targetm
.calls
.must_pass_in_stack (mode
, type
))
13062 first_reg_offset
+= rs6000_arg_size (TYPE_MODE (type
), type
);
13065 set
= get_varargs_alias_set ();
13066 if (! no_rtl
&& first_reg_offset
< GP_ARG_NUM_REG
13067 && cfun
->va_list_gpr_size
)
13069 int n_gpr
, nregs
= GP_ARG_NUM_REG
- first_reg_offset
;
13071 if (va_list_gpr_counter_field
)
13072 /* V4 va_list_gpr_size counts number of registers needed. */
13073 n_gpr
= cfun
->va_list_gpr_size
;
13075 /* char * va_list instead counts number of bytes needed. */
13076 n_gpr
= (cfun
->va_list_gpr_size
+ reg_size
- 1) / reg_size
;
13081 mem
= gen_rtx_MEM (BLKmode
,
13082 plus_constant (Pmode
, save_area
,
13083 first_reg_offset
* reg_size
));
13084 MEM_NOTRAP_P (mem
) = 1;
13085 set_mem_alias_set (mem
, set
);
13086 set_mem_align (mem
, BITS_PER_WORD
);
13088 rs6000_move_block_from_reg (GP_ARG_MIN_REG
+ first_reg_offset
, mem
,
13092 /* Save FP registers if needed. */
13093 if (DEFAULT_ABI
== ABI_V4
13094 && TARGET_HARD_FLOAT
13096 && next_cum
.fregno
<= FP_ARG_V4_MAX_REG
13097 && cfun
->va_list_fpr_size
)
13099 int fregno
= next_cum
.fregno
, nregs
;
13100 rtx cr1
= gen_rtx_REG (CCmode
, CR1_REGNO
);
13101 rtx lab
= gen_label_rtx ();
13102 int off
= (GP_ARG_NUM_REG
* reg_size
) + ((fregno
- FP_ARG_MIN_REG
)
13103 * UNITS_PER_FP_WORD
);
13106 (gen_rtx_SET (pc_rtx
,
13107 gen_rtx_IF_THEN_ELSE (VOIDmode
,
13108 gen_rtx_NE (VOIDmode
, cr1
,
13110 gen_rtx_LABEL_REF (VOIDmode
, lab
),
13114 fregno
<= FP_ARG_V4_MAX_REG
&& nregs
< cfun
->va_list_fpr_size
;
13115 fregno
++, off
+= UNITS_PER_FP_WORD
, nregs
++)
13117 mem
= gen_rtx_MEM ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
13119 plus_constant (Pmode
, save_area
, off
));
13120 MEM_NOTRAP_P (mem
) = 1;
13121 set_mem_alias_set (mem
, set
);
13122 set_mem_align (mem
, GET_MODE_ALIGNMENT (
13123 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
13124 ? DFmode
: SFmode
));
13125 emit_move_insn (mem
, gen_rtx_REG (
13126 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
13127 ? DFmode
: SFmode
, fregno
));
13134 /* Create the va_list data type. */
13137 rs6000_build_builtin_va_list (void)
13139 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
, record
, type_decl
;
13141 /* For AIX, prefer 'char *' because that's what the system
13142 header files like. */
13143 if (DEFAULT_ABI
!= ABI_V4
)
13144 return build_pointer_type (char_type_node
);
13146 record
= (*lang_hooks
.types
.make_type
) (RECORD_TYPE
);
13147 type_decl
= build_decl (BUILTINS_LOCATION
, TYPE_DECL
,
13148 get_identifier ("__va_list_tag"), record
);
13150 f_gpr
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
, get_identifier ("gpr"),
13151 unsigned_char_type_node
);
13152 f_fpr
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
, get_identifier ("fpr"),
13153 unsigned_char_type_node
);
13154 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
13155 every user file. */
13156 f_res
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
13157 get_identifier ("reserved"), short_unsigned_type_node
);
13158 f_ovf
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
13159 get_identifier ("overflow_arg_area"),
13161 f_sav
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
13162 get_identifier ("reg_save_area"),
13165 va_list_gpr_counter_field
= f_gpr
;
13166 va_list_fpr_counter_field
= f_fpr
;
13168 DECL_FIELD_CONTEXT (f_gpr
) = record
;
13169 DECL_FIELD_CONTEXT (f_fpr
) = record
;
13170 DECL_FIELD_CONTEXT (f_res
) = record
;
13171 DECL_FIELD_CONTEXT (f_ovf
) = record
;
13172 DECL_FIELD_CONTEXT (f_sav
) = record
;
13174 TYPE_STUB_DECL (record
) = type_decl
;
13175 TYPE_NAME (record
) = type_decl
;
13176 TYPE_FIELDS (record
) = f_gpr
;
13177 DECL_CHAIN (f_gpr
) = f_fpr
;
13178 DECL_CHAIN (f_fpr
) = f_res
;
13179 DECL_CHAIN (f_res
) = f_ovf
;
13180 DECL_CHAIN (f_ovf
) = f_sav
;
13182 layout_type (record
);
13184 /* The correct type is an array type of one element. */
13185 return build_array_type (record
, build_index_type (size_zero_node
));
13188 /* Implement va_start. */
13191 rs6000_va_start (tree valist
, rtx nextarg
)
13193 HOST_WIDE_INT words
, n_gpr
, n_fpr
;
13194 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
;
13195 tree gpr
, fpr
, ovf
, sav
, t
;
13197 /* Only SVR4 needs something special. */
13198 if (DEFAULT_ABI
!= ABI_V4
)
13200 std_expand_builtin_va_start (valist
, nextarg
);
13204 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
13205 f_fpr
= DECL_CHAIN (f_gpr
);
13206 f_res
= DECL_CHAIN (f_fpr
);
13207 f_ovf
= DECL_CHAIN (f_res
);
13208 f_sav
= DECL_CHAIN (f_ovf
);
13210 valist
= build_simple_mem_ref (valist
);
13211 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
13212 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), unshare_expr (valist
),
13214 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), unshare_expr (valist
),
13216 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), unshare_expr (valist
),
13219 /* Count number of gp and fp argument registers used. */
13220 words
= crtl
->args
.info
.words
;
13221 n_gpr
= MIN (crtl
->args
.info
.sysv_gregno
- GP_ARG_MIN_REG
,
13223 n_fpr
= MIN (crtl
->args
.info
.fregno
- FP_ARG_MIN_REG
,
13226 if (TARGET_DEBUG_ARG
)
13227 fprintf (stderr
, "va_start: words = " HOST_WIDE_INT_PRINT_DEC
", n_gpr = "
13228 HOST_WIDE_INT_PRINT_DEC
", n_fpr = " HOST_WIDE_INT_PRINT_DEC
"\n",
13229 words
, n_gpr
, n_fpr
);
13231 if (cfun
->va_list_gpr_size
)
13233 t
= build2 (MODIFY_EXPR
, TREE_TYPE (gpr
), gpr
,
13234 build_int_cst (NULL_TREE
, n_gpr
));
13235 TREE_SIDE_EFFECTS (t
) = 1;
13236 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
13239 if (cfun
->va_list_fpr_size
)
13241 t
= build2 (MODIFY_EXPR
, TREE_TYPE (fpr
), fpr
,
13242 build_int_cst (NULL_TREE
, n_fpr
));
13243 TREE_SIDE_EFFECTS (t
) = 1;
13244 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
13246 #ifdef HAVE_AS_GNU_ATTRIBUTE
13247 if (call_ABI_of_interest (cfun
->decl
))
13248 rs6000_passes_float
= true;
13252 /* Find the overflow area. */
13253 t
= make_tree (TREE_TYPE (ovf
), crtl
->args
.internal_arg_pointer
);
13255 t
= fold_build_pointer_plus_hwi (t
, words
* MIN_UNITS_PER_WORD
);
13256 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ovf
), ovf
, t
);
13257 TREE_SIDE_EFFECTS (t
) = 1;
13258 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
13260 /* If there were no va_arg invocations, don't set up the register
13262 if (!cfun
->va_list_gpr_size
13263 && !cfun
->va_list_fpr_size
13264 && n_gpr
< GP_ARG_NUM_REG
13265 && n_fpr
< FP_ARG_V4_MAX_REG
)
13268 /* Find the register save area. */
13269 t
= make_tree (TREE_TYPE (sav
), virtual_stack_vars_rtx
);
13270 if (cfun
->machine
->varargs_save_offset
)
13271 t
= fold_build_pointer_plus_hwi (t
, cfun
->machine
->varargs_save_offset
);
13272 t
= build2 (MODIFY_EXPR
, TREE_TYPE (sav
), sav
, t
);
13273 TREE_SIDE_EFFECTS (t
) = 1;
13274 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
13277 /* Implement va_arg. */
13280 rs6000_gimplify_va_arg (tree valist
, tree type
, gimple_seq
*pre_p
,
13281 gimple_seq
*post_p
)
13283 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
;
13284 tree gpr
, fpr
, ovf
, sav
, reg
, t
, u
;
13285 int size
, rsize
, n_reg
, sav_ofs
, sav_scale
;
13286 tree lab_false
, lab_over
, addr
;
13288 tree ptrtype
= build_pointer_type_for_mode (type
, ptr_mode
, true);
13292 if (pass_by_reference (NULL
, TYPE_MODE (type
), type
, false))
13294 t
= rs6000_gimplify_va_arg (valist
, ptrtype
, pre_p
, post_p
);
13295 return build_va_arg_indirect_ref (t
);
13298 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
13299 earlier version of gcc, with the property that it always applied alignment
13300 adjustments to the va-args (even for zero-sized types). The cheapest way
13301 to deal with this is to replicate the effect of the part of
13302 std_gimplify_va_arg_expr that carries out the align adjust, for the case
13304 We don't need to check for pass-by-reference because of the test above.
13305 We can return a simplifed answer, since we know there's no offset to add. */
13308 && rs6000_darwin64_abi
)
13309 || DEFAULT_ABI
== ABI_ELFv2
13310 || (DEFAULT_ABI
== ABI_AIX
&& !rs6000_compat_align_parm
))
13311 && integer_zerop (TYPE_SIZE (type
)))
13313 unsigned HOST_WIDE_INT align
, boundary
;
13314 tree valist_tmp
= get_initialized_tmp_var (valist
, pre_p
, NULL
);
13315 align
= PARM_BOUNDARY
/ BITS_PER_UNIT
;
13316 boundary
= rs6000_function_arg_boundary (TYPE_MODE (type
), type
);
13317 if (boundary
> MAX_SUPPORTED_STACK_ALIGNMENT
)
13318 boundary
= MAX_SUPPORTED_STACK_ALIGNMENT
;
13319 boundary
/= BITS_PER_UNIT
;
13320 if (boundary
> align
)
13323 /* This updates arg ptr by the amount that would be necessary
13324 to align the zero-sized (but not zero-alignment) item. */
13325 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist_tmp
,
13326 fold_build_pointer_plus_hwi (valist_tmp
, boundary
- 1));
13327 gimplify_and_add (t
, pre_p
);
13329 t
= fold_convert (sizetype
, valist_tmp
);
13330 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist_tmp
,
13331 fold_convert (TREE_TYPE (valist
),
13332 fold_build2 (BIT_AND_EXPR
, sizetype
, t
,
13333 size_int (-boundary
))));
13334 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist
, t
);
13335 gimplify_and_add (t
, pre_p
);
13337 /* Since it is zero-sized there's no increment for the item itself. */
13338 valist_tmp
= fold_convert (build_pointer_type (type
), valist_tmp
);
13339 return build_va_arg_indirect_ref (valist_tmp
);
13342 if (DEFAULT_ABI
!= ABI_V4
)
13344 if (targetm
.calls
.split_complex_arg
&& TREE_CODE (type
) == COMPLEX_TYPE
)
13346 tree elem_type
= TREE_TYPE (type
);
13347 machine_mode elem_mode
= TYPE_MODE (elem_type
);
13348 int elem_size
= GET_MODE_SIZE (elem_mode
);
13350 if (elem_size
< UNITS_PER_WORD
)
13352 tree real_part
, imag_part
;
13353 gimple_seq post
= NULL
;
13355 real_part
= rs6000_gimplify_va_arg (valist
, elem_type
, pre_p
,
13357 /* Copy the value into a temporary, lest the formal temporary
13358 be reused out from under us. */
13359 real_part
= get_initialized_tmp_var (real_part
, pre_p
, &post
);
13360 gimple_seq_add_seq (pre_p
, post
);
13362 imag_part
= rs6000_gimplify_va_arg (valist
, elem_type
, pre_p
,
13365 return build2 (COMPLEX_EXPR
, type
, real_part
, imag_part
);
13369 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
13372 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
13373 f_fpr
= DECL_CHAIN (f_gpr
);
13374 f_res
= DECL_CHAIN (f_fpr
);
13375 f_ovf
= DECL_CHAIN (f_res
);
13376 f_sav
= DECL_CHAIN (f_ovf
);
13378 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
13379 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), unshare_expr (valist
),
13381 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), unshare_expr (valist
),
13383 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), unshare_expr (valist
),
13386 size
= int_size_in_bytes (type
);
13387 rsize
= (size
+ 3) / 4;
13388 int pad
= 4 * rsize
- size
;
13391 machine_mode mode
= TYPE_MODE (type
);
13392 if (abi_v4_pass_in_fpr (mode
))
13394 /* FP args go in FP registers, if present. */
13396 n_reg
= (size
+ 7) / 8;
13397 sav_ofs
= ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? 8 : 4) * 4;
13398 sav_scale
= ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? 8 : 4);
13399 if (mode
!= SFmode
&& mode
!= SDmode
)
13404 /* Otherwise into GP registers. */
13413 /* Pull the value out of the saved registers.... */
13416 addr
= create_tmp_var (ptr_type_node
, "addr");
13418 /* AltiVec vectors never go in registers when -mabi=altivec. */
13419 if (TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
13423 lab_false
= create_artificial_label (input_location
);
13424 lab_over
= create_artificial_label (input_location
);
13426 /* Long long is aligned in the registers. As are any other 2 gpr
13427 item such as complex int due to a historical mistake. */
13429 if (n_reg
== 2 && reg
== gpr
)
13432 u
= build2 (BIT_AND_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
13433 build_int_cst (TREE_TYPE (reg
), n_reg
- 1));
13434 u
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (reg
),
13435 unshare_expr (reg
), u
);
13437 /* _Decimal128 is passed in even/odd fpr pairs; the stored
13438 reg number is 0 for f1, so we want to make it odd. */
13439 else if (reg
== fpr
&& mode
== TDmode
)
13441 t
= build2 (BIT_IOR_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
13442 build_int_cst (TREE_TYPE (reg
), 1));
13443 u
= build2 (MODIFY_EXPR
, void_type_node
, unshare_expr (reg
), t
);
13446 t
= fold_convert (TREE_TYPE (reg
), size_int (8 - n_reg
+ 1));
13447 t
= build2 (GE_EXPR
, boolean_type_node
, u
, t
);
13448 u
= build1 (GOTO_EXPR
, void_type_node
, lab_false
);
13449 t
= build3 (COND_EXPR
, void_type_node
, t
, u
, NULL_TREE
);
13450 gimplify_and_add (t
, pre_p
);
13454 t
= fold_build_pointer_plus_hwi (sav
, sav_ofs
);
13456 u
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
13457 build_int_cst (TREE_TYPE (reg
), n_reg
));
13458 u
= fold_convert (sizetype
, u
);
13459 u
= build2 (MULT_EXPR
, sizetype
, u
, size_int (sav_scale
));
13460 t
= fold_build_pointer_plus (t
, u
);
13462 /* _Decimal32 varargs are located in the second word of the 64-bit
13463 FP register for 32-bit binaries. */
13464 if (TARGET_32BIT
&& TARGET_HARD_FLOAT
&& mode
== SDmode
)
13465 t
= fold_build_pointer_plus_hwi (t
, size
);
13467 /* Args are passed right-aligned. */
13468 if (BYTES_BIG_ENDIAN
)
13469 t
= fold_build_pointer_plus_hwi (t
, pad
);
13471 gimplify_assign (addr
, t
, pre_p
);
13473 gimple_seq_add_stmt (pre_p
, gimple_build_goto (lab_over
));
13475 stmt
= gimple_build_label (lab_false
);
13476 gimple_seq_add_stmt (pre_p
, stmt
);
13478 if ((n_reg
== 2 && !regalign
) || n_reg
> 2)
13480 /* Ensure that we don't find any more args in regs.
13481 Alignment has taken care of for special cases. */
13482 gimplify_assign (reg
, build_int_cst (TREE_TYPE (reg
), 8), pre_p
);
13486 /* ... otherwise out of the overflow area. */
13488 /* Care for on-stack alignment if needed. */
13492 t
= fold_build_pointer_plus_hwi (t
, align
- 1);
13493 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (t
), t
,
13494 build_int_cst (TREE_TYPE (t
), -align
));
13497 /* Args are passed right-aligned. */
13498 if (BYTES_BIG_ENDIAN
)
13499 t
= fold_build_pointer_plus_hwi (t
, pad
);
13501 gimplify_expr (&t
, pre_p
, NULL
, is_gimple_val
, fb_rvalue
);
13503 gimplify_assign (unshare_expr (addr
), t
, pre_p
);
13505 t
= fold_build_pointer_plus_hwi (t
, size
);
13506 gimplify_assign (unshare_expr (ovf
), t
, pre_p
);
13510 stmt
= gimple_build_label (lab_over
);
13511 gimple_seq_add_stmt (pre_p
, stmt
);
13514 if (STRICT_ALIGNMENT
13515 && (TYPE_ALIGN (type
)
13516 > (unsigned) BITS_PER_UNIT
* (align
< 4 ? 4 : align
)))
13518 /* The value (of type complex double, for example) may not be
13519 aligned in memory in the saved registers, so copy via a
13520 temporary. (This is the same code as used for SPARC.) */
13521 tree tmp
= create_tmp_var (type
, "va_arg_tmp");
13522 tree dest_addr
= build_fold_addr_expr (tmp
);
13524 tree copy
= build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY
),
13525 3, dest_addr
, addr
, size_int (rsize
* 4));
13527 gimplify_and_add (copy
, pre_p
);
13531 addr
= fold_convert (ptrtype
, addr
);
13532 return build_va_arg_indirect_ref (addr
);
13538 def_builtin (const char *name
, tree type
, enum rs6000_builtins code
)
13541 unsigned classify
= rs6000_builtin_info
[(int)code
].attr
;
13542 const char *attr_string
= "";
13544 gcc_assert (name
!= NULL
);
13545 gcc_assert (IN_RANGE ((int)code
, 0, (int)RS6000_BUILTIN_COUNT
));
13547 if (rs6000_builtin_decls
[(int)code
])
13548 fatal_error (input_location
,
13549 "internal error: builtin function %qs already processed",
13552 rs6000_builtin_decls
[(int)code
] = t
=
13553 add_builtin_function (name
, type
, (int)code
, BUILT_IN_MD
, NULL
, NULL_TREE
);
13555 /* Set any special attributes. */
13556 if ((classify
& RS6000_BTC_CONST
) != 0)
13558 /* const function, function only depends on the inputs. */
13559 TREE_READONLY (t
) = 1;
13560 TREE_NOTHROW (t
) = 1;
13561 attr_string
= ", const";
13563 else if ((classify
& RS6000_BTC_PURE
) != 0)
13565 /* pure function, function can read global memory, but does not set any
13567 DECL_PURE_P (t
) = 1;
13568 TREE_NOTHROW (t
) = 1;
13569 attr_string
= ", pure";
13571 else if ((classify
& RS6000_BTC_FP
) != 0)
13573 /* Function is a math function. If rounding mode is on, then treat the
13574 function as not reading global memory, but it can have arbitrary side
13575 effects. If it is off, then assume the function is a const function.
13576 This mimics the ATTR_MATHFN_FPROUNDING attribute in
13577 builtin-attribute.def that is used for the math functions. */
13578 TREE_NOTHROW (t
) = 1;
13579 if (flag_rounding_math
)
13581 DECL_PURE_P (t
) = 1;
13582 DECL_IS_NOVOPS (t
) = 1;
13583 attr_string
= ", fp, pure";
13587 TREE_READONLY (t
) = 1;
13588 attr_string
= ", fp, const";
13591 else if ((classify
& RS6000_BTC_ATTR_MASK
) != 0)
13592 gcc_unreachable ();
13594 if (TARGET_DEBUG_BUILTIN
)
13595 fprintf (stderr
, "rs6000_builtin, code = %4d, %s%s\n",
13596 (int)code
, name
, attr_string
);
13599 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13601 #undef RS6000_BUILTIN_0
13602 #undef RS6000_BUILTIN_1
13603 #undef RS6000_BUILTIN_2
13604 #undef RS6000_BUILTIN_3
13605 #undef RS6000_BUILTIN_A
13606 #undef RS6000_BUILTIN_D
13607 #undef RS6000_BUILTIN_H
13608 #undef RS6000_BUILTIN_P
13609 #undef RS6000_BUILTIN_Q
13610 #undef RS6000_BUILTIN_X
13612 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13613 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13614 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13615 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13616 { MASK, ICODE, NAME, ENUM },
13618 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13619 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13620 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13621 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13622 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13623 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13625 static const struct builtin_description bdesc_3arg
[] =
13627 #include "rs6000-builtin.def"
13630 /* DST operations: void foo (void *, const int, const char). */
13632 #undef RS6000_BUILTIN_0
13633 #undef RS6000_BUILTIN_1
13634 #undef RS6000_BUILTIN_2
13635 #undef RS6000_BUILTIN_3
13636 #undef RS6000_BUILTIN_A
13637 #undef RS6000_BUILTIN_D
13638 #undef RS6000_BUILTIN_H
13639 #undef RS6000_BUILTIN_P
13640 #undef RS6000_BUILTIN_Q
13641 #undef RS6000_BUILTIN_X
13643 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13644 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13645 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13646 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13647 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13648 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13649 { MASK, ICODE, NAME, ENUM },
13651 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13652 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13653 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13654 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13656 static const struct builtin_description bdesc_dst
[] =
13658 #include "rs6000-builtin.def"
13661 /* Simple binary operations: VECc = foo (VECa, VECb). */
13663 #undef RS6000_BUILTIN_0
13664 #undef RS6000_BUILTIN_1
13665 #undef RS6000_BUILTIN_2
13666 #undef RS6000_BUILTIN_3
13667 #undef RS6000_BUILTIN_A
13668 #undef RS6000_BUILTIN_D
13669 #undef RS6000_BUILTIN_H
13670 #undef RS6000_BUILTIN_P
13671 #undef RS6000_BUILTIN_Q
13672 #undef RS6000_BUILTIN_X
13674 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13675 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13676 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13677 { MASK, ICODE, NAME, ENUM },
13679 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13680 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13681 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13682 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13683 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13684 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13685 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13687 static const struct builtin_description bdesc_2arg
[] =
13689 #include "rs6000-builtin.def"
13692 #undef RS6000_BUILTIN_0
13693 #undef RS6000_BUILTIN_1
13694 #undef RS6000_BUILTIN_2
13695 #undef RS6000_BUILTIN_3
13696 #undef RS6000_BUILTIN_A
13697 #undef RS6000_BUILTIN_D
13698 #undef RS6000_BUILTIN_H
13699 #undef RS6000_BUILTIN_P
13700 #undef RS6000_BUILTIN_Q
13701 #undef RS6000_BUILTIN_X
13703 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13704 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13705 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13706 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13707 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13708 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13709 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13710 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13711 { MASK, ICODE, NAME, ENUM },
13713 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13714 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13716 /* AltiVec predicates. */
13718 static const struct builtin_description bdesc_altivec_preds
[] =
13720 #include "rs6000-builtin.def"
13723 /* PAIRED predicates. */
13724 #undef RS6000_BUILTIN_0
13725 #undef RS6000_BUILTIN_1
13726 #undef RS6000_BUILTIN_2
13727 #undef RS6000_BUILTIN_3
13728 #undef RS6000_BUILTIN_A
13729 #undef RS6000_BUILTIN_D
13730 #undef RS6000_BUILTIN_H
13731 #undef RS6000_BUILTIN_P
13732 #undef RS6000_BUILTIN_Q
13733 #undef RS6000_BUILTIN_X
13735 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13736 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13737 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13738 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13739 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13740 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13741 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13742 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13743 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
13744 { MASK, ICODE, NAME, ENUM },
13746 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13748 static const struct builtin_description bdesc_paired_preds
[] =
13750 #include "rs6000-builtin.def"
13753 /* ABS* operations. */
13755 #undef RS6000_BUILTIN_0
13756 #undef RS6000_BUILTIN_1
13757 #undef RS6000_BUILTIN_2
13758 #undef RS6000_BUILTIN_3
13759 #undef RS6000_BUILTIN_A
13760 #undef RS6000_BUILTIN_D
13761 #undef RS6000_BUILTIN_H
13762 #undef RS6000_BUILTIN_P
13763 #undef RS6000_BUILTIN_Q
13764 #undef RS6000_BUILTIN_X
13766 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13767 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13768 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13769 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13770 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13771 { MASK, ICODE, NAME, ENUM },
13773 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13774 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13775 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13776 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13777 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13779 static const struct builtin_description bdesc_abs
[] =
13781 #include "rs6000-builtin.def"
13784 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13787 #undef RS6000_BUILTIN_0
13788 #undef RS6000_BUILTIN_1
13789 #undef RS6000_BUILTIN_2
13790 #undef RS6000_BUILTIN_3
13791 #undef RS6000_BUILTIN_A
13792 #undef RS6000_BUILTIN_D
13793 #undef RS6000_BUILTIN_H
13794 #undef RS6000_BUILTIN_P
13795 #undef RS6000_BUILTIN_Q
13796 #undef RS6000_BUILTIN_X
13798 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13799 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13800 { MASK, ICODE, NAME, ENUM },
13802 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13803 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13804 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13805 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13806 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13807 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13808 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13809 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13811 static const struct builtin_description bdesc_1arg
[] =
13813 #include "rs6000-builtin.def"
13816 /* Simple no-argument operations: result = __builtin_darn_32 () */
13818 #undef RS6000_BUILTIN_0
13819 #undef RS6000_BUILTIN_1
13820 #undef RS6000_BUILTIN_2
13821 #undef RS6000_BUILTIN_3
13822 #undef RS6000_BUILTIN_A
13823 #undef RS6000_BUILTIN_D
13824 #undef RS6000_BUILTIN_H
13825 #undef RS6000_BUILTIN_P
13826 #undef RS6000_BUILTIN_Q
13827 #undef RS6000_BUILTIN_X
13829 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13830 { MASK, ICODE, NAME, ENUM },
13832 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13833 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13834 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13835 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13836 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13837 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13838 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13839 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13840 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13842 static const struct builtin_description bdesc_0arg
[] =
13844 #include "rs6000-builtin.def"
13847 /* HTM builtins. */
13848 #undef RS6000_BUILTIN_0
13849 #undef RS6000_BUILTIN_1
13850 #undef RS6000_BUILTIN_2
13851 #undef RS6000_BUILTIN_3
13852 #undef RS6000_BUILTIN_A
13853 #undef RS6000_BUILTIN_D
13854 #undef RS6000_BUILTIN_H
13855 #undef RS6000_BUILTIN_P
13856 #undef RS6000_BUILTIN_Q
13857 #undef RS6000_BUILTIN_X
13859 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13860 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13861 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13862 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13863 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13864 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13865 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13866 { MASK, ICODE, NAME, ENUM },
13868 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13869 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13870 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13872 static const struct builtin_description bdesc_htm
[] =
13874 #include "rs6000-builtin.def"
13877 #undef RS6000_BUILTIN_0
13878 #undef RS6000_BUILTIN_1
13879 #undef RS6000_BUILTIN_2
13880 #undef RS6000_BUILTIN_3
13881 #undef RS6000_BUILTIN_A
13882 #undef RS6000_BUILTIN_D
13883 #undef RS6000_BUILTIN_H
13884 #undef RS6000_BUILTIN_P
13885 #undef RS6000_BUILTIN_Q
13887 /* Return true if a builtin function is overloaded. */
13889 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode
)
13891 return (rs6000_builtin_info
[(int)fncode
].attr
& RS6000_BTC_OVERLOADED
) != 0;
13895 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode
)
13897 return rs6000_builtin_info
[(int)fncode
].name
;
13900 /* Expand an expression EXP that calls a builtin without arguments. */
13902 rs6000_expand_zeroop_builtin (enum insn_code icode
, rtx target
)
13905 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
13907 if (icode
== CODE_FOR_nothing
)
13908 /* Builtin not supported on this processor. */
13912 || GET_MODE (target
) != tmode
13913 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
13914 target
= gen_reg_rtx (tmode
);
13916 pat
= GEN_FCN (icode
) (target
);
13926 rs6000_expand_mtfsf_builtin (enum insn_code icode
, tree exp
)
13929 tree arg0
= CALL_EXPR_ARG (exp
, 0);
13930 tree arg1
= CALL_EXPR_ARG (exp
, 1);
13931 rtx op0
= expand_normal (arg0
);
13932 rtx op1
= expand_normal (arg1
);
13933 machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
13934 machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
13936 if (icode
== CODE_FOR_nothing
)
13937 /* Builtin not supported on this processor. */
13940 /* If we got invalid arguments bail out before generating bad rtl. */
13941 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
13944 if (GET_CODE (op0
) != CONST_INT
13945 || INTVAL (op0
) > 255
13946 || INTVAL (op0
) < 0)
13948 error ("argument 1 must be an 8-bit field value");
13952 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
13953 op0
= copy_to_mode_reg (mode0
, op0
);
13955 if (! (*insn_data
[icode
].operand
[1].predicate
) (op1
, mode1
))
13956 op1
= copy_to_mode_reg (mode1
, op1
);
13958 pat
= GEN_FCN (icode
) (op0
, op1
);
13967 rs6000_expand_unop_builtin (enum insn_code icode
, tree exp
, rtx target
)
13970 tree arg0
= CALL_EXPR_ARG (exp
, 0);
13971 rtx op0
= expand_normal (arg0
);
13972 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
13973 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
13975 if (icode
== CODE_FOR_nothing
)
13976 /* Builtin not supported on this processor. */
13979 /* If we got invalid arguments bail out before generating bad rtl. */
13980 if (arg0
== error_mark_node
)
13983 if (icode
== CODE_FOR_altivec_vspltisb
13984 || icode
== CODE_FOR_altivec_vspltish
13985 || icode
== CODE_FOR_altivec_vspltisw
)
13987 /* Only allow 5-bit *signed* literals. */
13988 if (GET_CODE (op0
) != CONST_INT
13989 || INTVAL (op0
) > 15
13990 || INTVAL (op0
) < -16)
13992 error ("argument 1 must be a 5-bit signed literal");
13993 return CONST0_RTX (tmode
);
13998 || GET_MODE (target
) != tmode
13999 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14000 target
= gen_reg_rtx (tmode
);
14002 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
14003 op0
= copy_to_mode_reg (mode0
, op0
);
14005 pat
= GEN_FCN (icode
) (target
, op0
);
14014 altivec_expand_abs_builtin (enum insn_code icode
, tree exp
, rtx target
)
14016 rtx pat
, scratch1
, scratch2
;
14017 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14018 rtx op0
= expand_normal (arg0
);
14019 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14020 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
14022 /* If we have invalid arguments, bail out before generating bad rtl. */
14023 if (arg0
== error_mark_node
)
14027 || GET_MODE (target
) != tmode
14028 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14029 target
= gen_reg_rtx (tmode
);
14031 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
14032 op0
= copy_to_mode_reg (mode0
, op0
);
14034 scratch1
= gen_reg_rtx (mode0
);
14035 scratch2
= gen_reg_rtx (mode0
);
14037 pat
= GEN_FCN (icode
) (target
, op0
, scratch1
, scratch2
);
14046 rs6000_expand_binop_builtin (enum insn_code icode
, tree exp
, rtx target
)
14049 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14050 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14051 rtx op0
= expand_normal (arg0
);
14052 rtx op1
= expand_normal (arg1
);
14053 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14054 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
14055 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
14057 if (icode
== CODE_FOR_nothing
)
14058 /* Builtin not supported on this processor. */
14061 /* If we got invalid arguments bail out before generating bad rtl. */
14062 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
14065 if (icode
== CODE_FOR_altivec_vcfux
14066 || icode
== CODE_FOR_altivec_vcfsx
14067 || icode
== CODE_FOR_altivec_vctsxs
14068 || icode
== CODE_FOR_altivec_vctuxs
14069 || icode
== CODE_FOR_altivec_vspltb
14070 || icode
== CODE_FOR_altivec_vsplth
14071 || icode
== CODE_FOR_altivec_vspltw
)
14073 /* Only allow 5-bit unsigned literals. */
14075 if (TREE_CODE (arg1
) != INTEGER_CST
14076 || TREE_INT_CST_LOW (arg1
) & ~0x1f)
14078 error ("argument 2 must be a 5-bit unsigned literal");
14079 return CONST0_RTX (tmode
);
14082 else if (icode
== CODE_FOR_dfptstsfi_eq_dd
14083 || icode
== CODE_FOR_dfptstsfi_lt_dd
14084 || icode
== CODE_FOR_dfptstsfi_gt_dd
14085 || icode
== CODE_FOR_dfptstsfi_unordered_dd
14086 || icode
== CODE_FOR_dfptstsfi_eq_td
14087 || icode
== CODE_FOR_dfptstsfi_lt_td
14088 || icode
== CODE_FOR_dfptstsfi_gt_td
14089 || icode
== CODE_FOR_dfptstsfi_unordered_td
)
14091 /* Only allow 6-bit unsigned literals. */
14093 if (TREE_CODE (arg0
) != INTEGER_CST
14094 || !IN_RANGE (TREE_INT_CST_LOW (arg0
), 0, 63))
14096 error ("argument 1 must be a 6-bit unsigned literal");
14097 return CONST0_RTX (tmode
);
14100 else if (icode
== CODE_FOR_xststdcqp_kf
14101 || icode
== CODE_FOR_xststdcqp_tf
14102 || icode
== CODE_FOR_xststdcdp
14103 || icode
== CODE_FOR_xststdcsp
14104 || icode
== CODE_FOR_xvtstdcdp
14105 || icode
== CODE_FOR_xvtstdcsp
)
14107 /* Only allow 7-bit unsigned literals. */
14109 if (TREE_CODE (arg1
) != INTEGER_CST
14110 || !IN_RANGE (TREE_INT_CST_LOW (arg1
), 0, 127))
14112 error ("argument 2 must be a 7-bit unsigned literal");
14113 return CONST0_RTX (tmode
);
14116 else if (icode
== CODE_FOR_unpackv1ti
14117 || icode
== CODE_FOR_unpackkf
14118 || icode
== CODE_FOR_unpacktf
14119 || icode
== CODE_FOR_unpackif
14120 || icode
== CODE_FOR_unpacktd
)
14122 /* Only allow 1-bit unsigned literals. */
14124 if (TREE_CODE (arg1
) != INTEGER_CST
14125 || !IN_RANGE (TREE_INT_CST_LOW (arg1
), 0, 1))
14127 error ("argument 2 must be a 1-bit unsigned literal");
14128 return CONST0_RTX (tmode
);
14133 || GET_MODE (target
) != tmode
14134 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14135 target
= gen_reg_rtx (tmode
);
14137 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
14138 op0
= copy_to_mode_reg (mode0
, op0
);
14139 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
14140 op1
= copy_to_mode_reg (mode1
, op1
);
14142 pat
= GEN_FCN (icode
) (target
, op0
, op1
);
14151 altivec_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
14154 tree cr6_form
= CALL_EXPR_ARG (exp
, 0);
14155 tree arg0
= CALL_EXPR_ARG (exp
, 1);
14156 tree arg1
= CALL_EXPR_ARG (exp
, 2);
14157 rtx op0
= expand_normal (arg0
);
14158 rtx op1
= expand_normal (arg1
);
14159 machine_mode tmode
= SImode
;
14160 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
14161 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
14164 if (TREE_CODE (cr6_form
) != INTEGER_CST
)
14166 error ("argument 1 of %qs must be a constant",
14167 "__builtin_altivec_predicate");
14171 cr6_form_int
= TREE_INT_CST_LOW (cr6_form
);
14173 gcc_assert (mode0
== mode1
);
14175 /* If we have invalid arguments, bail out before generating bad rtl. */
14176 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
14180 || GET_MODE (target
) != tmode
14181 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14182 target
= gen_reg_rtx (tmode
);
14184 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
14185 op0
= copy_to_mode_reg (mode0
, op0
);
14186 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
14187 op1
= copy_to_mode_reg (mode1
, op1
);
14189 /* Note that for many of the relevant operations (e.g. cmpne or
14190 cmpeq) with float or double operands, it makes more sense for the
14191 mode of the allocated scratch register to select a vector of
14192 integer. But the choice to copy the mode of operand 0 was made
14193 long ago and there are no plans to change it. */
14194 scratch
= gen_reg_rtx (mode0
);
14196 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
14201 /* The vec_any* and vec_all* predicates use the same opcodes for two
14202 different operations, but the bits in CR6 will be different
14203 depending on what information we want. So we have to play tricks
14204 with CR6 to get the right bits out.
14206 If you think this is disgusting, look at the specs for the
14207 AltiVec predicates. */
14209 switch (cr6_form_int
)
14212 emit_insn (gen_cr6_test_for_zero (target
));
14215 emit_insn (gen_cr6_test_for_zero_reverse (target
));
14218 emit_insn (gen_cr6_test_for_lt (target
));
14221 emit_insn (gen_cr6_test_for_lt_reverse (target
));
14224 error ("argument 1 of %qs is out of range",
14225 "__builtin_altivec_predicate");
14233 paired_expand_lv_builtin (enum insn_code icode
, tree exp
, rtx target
)
14236 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14237 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14238 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14239 machine_mode mode0
= Pmode
;
14240 machine_mode mode1
= Pmode
;
14241 rtx op0
= expand_normal (arg0
);
14242 rtx op1
= expand_normal (arg1
);
14244 if (icode
== CODE_FOR_nothing
)
14245 /* Builtin not supported on this processor. */
14248 /* If we got invalid arguments bail out before generating bad rtl. */
14249 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
14253 || GET_MODE (target
) != tmode
14254 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14255 target
= gen_reg_rtx (tmode
);
14257 op1
= copy_to_mode_reg (mode1
, op1
);
14259 if (op0
== const0_rtx
)
14261 addr
= gen_rtx_MEM (tmode
, op1
);
14265 op0
= copy_to_mode_reg (mode0
, op0
);
14266 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op0
, op1
));
14269 pat
= GEN_FCN (icode
) (target
, addr
);
14278 /* Return a constant vector for use as a little-endian permute control vector
14279 to reverse the order of elements of the given vector mode. */
14281 swap_selector_for_mode (machine_mode mode
)
14283 /* These are little endian vectors, so their elements are reversed
14284 from what you would normally expect for a permute control vector. */
14285 unsigned int swap2
[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
14286 unsigned int swap4
[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
14287 unsigned int swap8
[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
14288 unsigned int swap16
[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
14289 unsigned int *swaparray
, i
;
14306 swaparray
= swap16
;
14309 gcc_unreachable ();
14312 for (i
= 0; i
< 16; ++i
)
14313 perm
[i
] = GEN_INT (swaparray
[i
]);
14315 return force_reg (V16QImode
, gen_rtx_CONST_VECTOR (V16QImode
, gen_rtvec_v (16, perm
)));
14319 swap_endian_selector_for_mode (machine_mode mode
)
14321 unsigned int swap1
[16] = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
14322 unsigned int swap2
[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
14323 unsigned int swap4
[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
14324 unsigned int swap8
[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
14326 unsigned int *swaparray
, i
;
14346 gcc_unreachable ();
14349 for (i
= 0; i
< 16; ++i
)
14350 perm
[i
] = GEN_INT (swaparray
[i
]);
14352 return force_reg (V16QImode
, gen_rtx_CONST_VECTOR (V16QImode
,
14353 gen_rtvec_v (16, perm
)));
14356 /* Generate code for an "lvxl", or "lve*x" built-in for a little endian target
14357 with -maltivec=be specified. Issue the load followed by an element-
14358 reversing permute. */
14360 altivec_expand_lvx_be (rtx op0
, rtx op1
, machine_mode mode
, unsigned unspec
)
14362 rtx tmp
= gen_reg_rtx (mode
);
14363 rtx load
= gen_rtx_SET (tmp
, op1
);
14364 rtx lvx
= gen_rtx_UNSPEC (mode
, gen_rtvec (1, const0_rtx
), unspec
);
14365 rtx par
= gen_rtx_PARALLEL (mode
, gen_rtvec (2, load
, lvx
));
14366 rtx sel
= swap_selector_for_mode (mode
);
14367 rtx vperm
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, tmp
, tmp
, sel
), UNSPEC_VPERM
);
14369 gcc_assert (REG_P (op0
));
14371 emit_insn (gen_rtx_SET (op0
, vperm
));
14374 /* Generate code for a "stvxl" built-in for a little endian target with
14375 -maltivec=be specified. Issue the store preceded by an element-reversing
14378 altivec_expand_stvx_be (rtx op0
, rtx op1
, machine_mode mode
, unsigned unspec
)
14380 rtx tmp
= gen_reg_rtx (mode
);
14381 rtx store
= gen_rtx_SET (op0
, tmp
);
14382 rtx stvx
= gen_rtx_UNSPEC (mode
, gen_rtvec (1, const0_rtx
), unspec
);
14383 rtx par
= gen_rtx_PARALLEL (mode
, gen_rtvec (2, store
, stvx
));
14384 rtx sel
= swap_selector_for_mode (mode
);
14387 gcc_assert (REG_P (op1
));
14388 vperm
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, op1
, op1
, sel
), UNSPEC_VPERM
);
14389 emit_insn (gen_rtx_SET (tmp
, vperm
));
14393 /* Generate code for a "stve*x" built-in for a little endian target with -maltivec=be
14394 specified. Issue the store preceded by an element-reversing permute. */
14396 altivec_expand_stvex_be (rtx op0
, rtx op1
, machine_mode mode
, unsigned unspec
)
14398 machine_mode inner_mode
= GET_MODE_INNER (mode
);
14399 rtx tmp
= gen_reg_rtx (mode
);
14400 rtx stvx
= gen_rtx_UNSPEC (inner_mode
, gen_rtvec (1, tmp
), unspec
);
14401 rtx sel
= swap_selector_for_mode (mode
);
14404 gcc_assert (REG_P (op1
));
14405 vperm
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, op1
, op1
, sel
), UNSPEC_VPERM
);
14406 emit_insn (gen_rtx_SET (tmp
, vperm
));
14407 emit_insn (gen_rtx_SET (op0
, stvx
));
14411 altivec_expand_lv_builtin (enum insn_code icode
, tree exp
, rtx target
, bool blk
)
14414 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14415 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14416 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14417 machine_mode mode0
= Pmode
;
14418 machine_mode mode1
= Pmode
;
14419 rtx op0
= expand_normal (arg0
);
14420 rtx op1
= expand_normal (arg1
);
14422 if (icode
== CODE_FOR_nothing
)
14423 /* Builtin not supported on this processor. */
14426 /* If we got invalid arguments bail out before generating bad rtl. */
14427 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
14431 || GET_MODE (target
) != tmode
14432 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14433 target
= gen_reg_rtx (tmode
);
14435 op1
= copy_to_mode_reg (mode1
, op1
);
14437 /* For LVX, express the RTL accurately by ANDing the address with -16.
14438 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
14439 so the raw address is fine. */
14440 if (icode
== CODE_FOR_altivec_lvx_v2df_2op
14441 || icode
== CODE_FOR_altivec_lvx_v2di_2op
14442 || icode
== CODE_FOR_altivec_lvx_v4sf_2op
14443 || icode
== CODE_FOR_altivec_lvx_v4si_2op
14444 || icode
== CODE_FOR_altivec_lvx_v8hi_2op
14445 || icode
== CODE_FOR_altivec_lvx_v16qi_2op
)
14448 if (op0
== const0_rtx
)
14452 op0
= copy_to_mode_reg (mode0
, op0
);
14453 rawaddr
= gen_rtx_PLUS (Pmode
, op1
, op0
);
14455 addr
= gen_rtx_AND (Pmode
, rawaddr
, gen_rtx_CONST_INT (Pmode
, -16));
14456 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, addr
);
14458 /* For -maltivec=be, emit the load and follow it up with a
14459 permute to swap the elements. */
14460 if (!BYTES_BIG_ENDIAN
&& VECTOR_ELT_ORDER_BIG
)
14462 rtx temp
= gen_reg_rtx (tmode
);
14463 emit_insn (gen_rtx_SET (temp
, addr
));
14465 rtx sel
= swap_selector_for_mode (tmode
);
14466 rtx vperm
= gen_rtx_UNSPEC (tmode
, gen_rtvec (3, temp
, temp
, sel
),
14468 emit_insn (gen_rtx_SET (target
, vperm
));
14471 emit_insn (gen_rtx_SET (target
, addr
));
14475 if (op0
== const0_rtx
)
14476 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, op1
);
14479 op0
= copy_to_mode_reg (mode0
, op0
);
14480 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
,
14481 gen_rtx_PLUS (Pmode
, op1
, op0
));
14484 pat
= GEN_FCN (icode
) (target
, addr
);
14494 paired_expand_stv_builtin (enum insn_code icode
, tree exp
)
14496 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14497 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14498 tree arg2
= CALL_EXPR_ARG (exp
, 2);
14499 rtx op0
= expand_normal (arg0
);
14500 rtx op1
= expand_normal (arg1
);
14501 rtx op2
= expand_normal (arg2
);
14503 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14504 machine_mode mode1
= Pmode
;
14505 machine_mode mode2
= Pmode
;
14507 /* Invalid arguments. Bail before doing anything stoopid! */
14508 if (arg0
== error_mark_node
14509 || arg1
== error_mark_node
14510 || arg2
== error_mark_node
)
14513 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, tmode
))
14514 op0
= copy_to_mode_reg (tmode
, op0
);
14516 op2
= copy_to_mode_reg (mode2
, op2
);
14518 if (op1
== const0_rtx
)
14520 addr
= gen_rtx_MEM (tmode
, op2
);
14524 op1
= copy_to_mode_reg (mode1
, op1
);
14525 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op1
, op2
));
14528 pat
= GEN_FCN (icode
) (addr
, op0
);
14535 altivec_expand_stxvl_builtin (enum insn_code icode
, tree exp
)
14538 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14539 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14540 tree arg2
= CALL_EXPR_ARG (exp
, 2);
14541 rtx op0
= expand_normal (arg0
);
14542 rtx op1
= expand_normal (arg1
);
14543 rtx op2
= expand_normal (arg2
);
14544 machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
14545 machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
14546 machine_mode mode2
= insn_data
[icode
].operand
[2].mode
;
14548 if (icode
== CODE_FOR_nothing
)
14549 /* Builtin not supported on this processor. */
14552 /* If we got invalid arguments bail out before generating bad rtl. */
14553 if (arg0
== error_mark_node
14554 || arg1
== error_mark_node
14555 || arg2
== error_mark_node
)
14558 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
14559 op0
= copy_to_mode_reg (mode0
, op0
);
14560 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
14561 op1
= copy_to_mode_reg (mode1
, op1
);
14562 if (! (*insn_data
[icode
].operand
[3].predicate
) (op2
, mode2
))
14563 op2
= copy_to_mode_reg (mode2
, op2
);
14565 pat
= GEN_FCN (icode
) (op0
, op1
, op2
);
14573 altivec_expand_stv_builtin (enum insn_code icode
, tree exp
)
14575 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14576 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14577 tree arg2
= CALL_EXPR_ARG (exp
, 2);
14578 rtx op0
= expand_normal (arg0
);
14579 rtx op1
= expand_normal (arg1
);
14580 rtx op2
= expand_normal (arg2
);
14581 rtx pat
, addr
, rawaddr
;
14582 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14583 machine_mode smode
= insn_data
[icode
].operand
[1].mode
;
14584 machine_mode mode1
= Pmode
;
14585 machine_mode mode2
= Pmode
;
14587 /* Invalid arguments. Bail before doing anything stoopid! */
14588 if (arg0
== error_mark_node
14589 || arg1
== error_mark_node
14590 || arg2
== error_mark_node
)
14593 op2
= copy_to_mode_reg (mode2
, op2
);
14595 /* For STVX, express the RTL accurately by ANDing the address with -16.
14596 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
14597 so the raw address is fine. */
14598 if (icode
== CODE_FOR_altivec_stvx_v2df_2op
14599 || icode
== CODE_FOR_altivec_stvx_v2di_2op
14600 || icode
== CODE_FOR_altivec_stvx_v4sf_2op
14601 || icode
== CODE_FOR_altivec_stvx_v4si_2op
14602 || icode
== CODE_FOR_altivec_stvx_v8hi_2op
14603 || icode
== CODE_FOR_altivec_stvx_v16qi_2op
)
14605 if (op1
== const0_rtx
)
14609 op1
= copy_to_mode_reg (mode1
, op1
);
14610 rawaddr
= gen_rtx_PLUS (Pmode
, op2
, op1
);
14613 addr
= gen_rtx_AND (Pmode
, rawaddr
, gen_rtx_CONST_INT (Pmode
, -16));
14614 addr
= gen_rtx_MEM (tmode
, addr
);
14616 op0
= copy_to_mode_reg (tmode
, op0
);
14618 /* For -maltivec=be, emit a permute to swap the elements, followed
14620 if (!BYTES_BIG_ENDIAN
&& VECTOR_ELT_ORDER_BIG
)
14622 rtx temp
= gen_reg_rtx (tmode
);
14623 rtx sel
= swap_selector_for_mode (tmode
);
14624 rtx vperm
= gen_rtx_UNSPEC (tmode
, gen_rtvec (3, op0
, op0
, sel
),
14626 emit_insn (gen_rtx_SET (temp
, vperm
));
14627 emit_insn (gen_rtx_SET (addr
, temp
));
14630 emit_insn (gen_rtx_SET (addr
, op0
));
14634 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, smode
))
14635 op0
= copy_to_mode_reg (smode
, op0
);
14637 if (op1
== const0_rtx
)
14638 addr
= gen_rtx_MEM (tmode
, op2
);
14641 op1
= copy_to_mode_reg (mode1
, op1
);
14642 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op2
, op1
));
14645 pat
= GEN_FCN (icode
) (addr
, op0
);
14653 /* Return the appropriate SPR number associated with the given builtin. */
14654 static inline HOST_WIDE_INT
14655 htm_spr_num (enum rs6000_builtins code
)
14657 if (code
== HTM_BUILTIN_GET_TFHAR
14658 || code
== HTM_BUILTIN_SET_TFHAR
)
14660 else if (code
== HTM_BUILTIN_GET_TFIAR
14661 || code
== HTM_BUILTIN_SET_TFIAR
)
14663 else if (code
== HTM_BUILTIN_GET_TEXASR
14664 || code
== HTM_BUILTIN_SET_TEXASR
)
14666 gcc_assert (code
== HTM_BUILTIN_GET_TEXASRU
14667 || code
== HTM_BUILTIN_SET_TEXASRU
);
14668 return TEXASRU_SPR
;
14671 /* Return the appropriate SPR regno associated with the given builtin. */
14672 static inline HOST_WIDE_INT
14673 htm_spr_regno (enum rs6000_builtins code
)
14675 if (code
== HTM_BUILTIN_GET_TFHAR
14676 || code
== HTM_BUILTIN_SET_TFHAR
)
14677 return TFHAR_REGNO
;
14678 else if (code
== HTM_BUILTIN_GET_TFIAR
14679 || code
== HTM_BUILTIN_SET_TFIAR
)
14680 return TFIAR_REGNO
;
14681 gcc_assert (code
== HTM_BUILTIN_GET_TEXASR
14682 || code
== HTM_BUILTIN_SET_TEXASR
14683 || code
== HTM_BUILTIN_GET_TEXASRU
14684 || code
== HTM_BUILTIN_SET_TEXASRU
);
14685 return TEXASR_REGNO
;
14688 /* Return the correct ICODE value depending on whether we are
14689 setting or reading the HTM SPRs. */
14690 static inline enum insn_code
14691 rs6000_htm_spr_icode (bool nonvoid
)
14694 return (TARGET_POWERPC64
) ? CODE_FOR_htm_mfspr_di
: CODE_FOR_htm_mfspr_si
;
14696 return (TARGET_POWERPC64
) ? CODE_FOR_htm_mtspr_di
: CODE_FOR_htm_mtspr_si
;
14699 /* Expand the HTM builtin in EXP and store the result in TARGET.
14700 Store true in *EXPANDEDP if we found a builtin to expand. */
14702 htm_expand_builtin (tree exp
, rtx target
, bool * expandedp
)
14704 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
14705 bool nonvoid
= TREE_TYPE (TREE_TYPE (fndecl
)) != void_type_node
;
14706 enum rs6000_builtins fcode
= (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
14707 const struct builtin_description
*d
;
14712 if (!TARGET_POWERPC64
14713 && (fcode
== HTM_BUILTIN_TABORTDC
14714 || fcode
== HTM_BUILTIN_TABORTDCI
))
14716 size_t uns_fcode
= (size_t)fcode
;
14717 const char *name
= rs6000_builtin_info
[uns_fcode
].name
;
14718 error ("builtin %qs is only valid in 64-bit mode", name
);
14722 /* Expand the HTM builtins. */
14724 for (i
= 0; i
< ARRAY_SIZE (bdesc_htm
); i
++, d
++)
14725 if (d
->code
== fcode
)
14727 rtx op
[MAX_HTM_OPERANDS
], pat
;
14730 call_expr_arg_iterator iter
;
14731 unsigned attr
= rs6000_builtin_info
[fcode
].attr
;
14732 enum insn_code icode
= d
->icode
;
14733 const struct insn_operand_data
*insn_op
;
14734 bool uses_spr
= (attr
& RS6000_BTC_SPR
);
14738 icode
= rs6000_htm_spr_icode (nonvoid
);
14739 insn_op
= &insn_data
[icode
].operand
[0];
14743 machine_mode tmode
= (uses_spr
) ? insn_op
->mode
: E_SImode
;
14745 || GET_MODE (target
) != tmode
14746 || (uses_spr
&& !(*insn_op
->predicate
) (target
, tmode
)))
14747 target
= gen_reg_rtx (tmode
);
14749 op
[nopnds
++] = target
;
14752 FOR_EACH_CALL_EXPR_ARG (arg
, iter
, exp
)
14754 if (arg
== error_mark_node
|| nopnds
>= MAX_HTM_OPERANDS
)
14757 insn_op
= &insn_data
[icode
].operand
[nopnds
];
14759 op
[nopnds
] = expand_normal (arg
);
14761 if (!(*insn_op
->predicate
) (op
[nopnds
], insn_op
->mode
))
14763 if (!strcmp (insn_op
->constraint
, "n"))
14765 int arg_num
= (nonvoid
) ? nopnds
: nopnds
+ 1;
14766 if (!CONST_INT_P (op
[nopnds
]))
14767 error ("argument %d must be an unsigned literal", arg_num
);
14769 error ("argument %d is an unsigned literal that is "
14770 "out of range", arg_num
);
14773 op
[nopnds
] = copy_to_mode_reg (insn_op
->mode
, op
[nopnds
]);
14779 /* Handle the builtins for extended mnemonics. These accept
14780 no arguments, but map to builtins that take arguments. */
14783 case HTM_BUILTIN_TENDALL
: /* Alias for: tend. 1 */
14784 case HTM_BUILTIN_TRESUME
: /* Alias for: tsr. 1 */
14785 op
[nopnds
++] = GEN_INT (1);
14787 attr
|= RS6000_BTC_UNARY
;
14789 case HTM_BUILTIN_TSUSPEND
: /* Alias for: tsr. 0 */
14790 op
[nopnds
++] = GEN_INT (0);
14792 attr
|= RS6000_BTC_UNARY
;
14798 /* If this builtin accesses SPRs, then pass in the appropriate
14799 SPR number and SPR regno as the last two operands. */
14802 machine_mode mode
= (TARGET_POWERPC64
) ? DImode
: SImode
;
14803 op
[nopnds
++] = gen_rtx_CONST_INT (mode
, htm_spr_num (fcode
));
14804 op
[nopnds
++] = gen_rtx_REG (mode
, htm_spr_regno (fcode
));
14806 /* If this builtin accesses a CR, then pass in a scratch
14807 CR as the last operand. */
14808 else if (attr
& RS6000_BTC_CR
)
14809 { cr
= gen_reg_rtx (CCmode
);
14815 int expected_nopnds
= 0;
14816 if ((attr
& RS6000_BTC_TYPE_MASK
) == RS6000_BTC_UNARY
)
14817 expected_nopnds
= 1;
14818 else if ((attr
& RS6000_BTC_TYPE_MASK
) == RS6000_BTC_BINARY
)
14819 expected_nopnds
= 2;
14820 else if ((attr
& RS6000_BTC_TYPE_MASK
) == RS6000_BTC_TERNARY
)
14821 expected_nopnds
= 3;
14822 if (!(attr
& RS6000_BTC_VOID
))
14823 expected_nopnds
+= 1;
14825 expected_nopnds
+= 2;
14827 gcc_assert (nopnds
== expected_nopnds
14828 && nopnds
<= MAX_HTM_OPERANDS
);
14834 pat
= GEN_FCN (icode
) (op
[0]);
14837 pat
= GEN_FCN (icode
) (op
[0], op
[1]);
14840 pat
= GEN_FCN (icode
) (op
[0], op
[1], op
[2]);
14843 pat
= GEN_FCN (icode
) (op
[0], op
[1], op
[2], op
[3]);
14846 gcc_unreachable ();
14852 if (attr
& RS6000_BTC_CR
)
14854 if (fcode
== HTM_BUILTIN_TBEGIN
)
14856 /* Emit code to set TARGET to true or false depending on
14857 whether the tbegin. instruction successfully or failed
14858 to start a transaction. We do this by placing the 1's
14859 complement of CR's EQ bit into TARGET. */
14860 rtx scratch
= gen_reg_rtx (SImode
);
14861 emit_insn (gen_rtx_SET (scratch
,
14862 gen_rtx_EQ (SImode
, cr
,
14864 emit_insn (gen_rtx_SET (target
,
14865 gen_rtx_XOR (SImode
, scratch
,
14870 /* Emit code to copy the 4-bit condition register field
14871 CR into the least significant end of register TARGET. */
14872 rtx scratch1
= gen_reg_rtx (SImode
);
14873 rtx scratch2
= gen_reg_rtx (SImode
);
14874 rtx subreg
= simplify_gen_subreg (CCmode
, scratch1
, SImode
, 0);
14875 emit_insn (gen_movcc (subreg
, cr
));
14876 emit_insn (gen_lshrsi3 (scratch2
, scratch1
, GEN_INT (28)));
14877 emit_insn (gen_andsi3 (target
, scratch2
, GEN_INT (0xf)));
14886 *expandedp
= false;
14890 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14893 cpu_expand_builtin (enum rs6000_builtins fcode
, tree exp ATTRIBUTE_UNUSED
,
14896 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14897 if (fcode
== RS6000_BUILTIN_CPU_INIT
)
14900 if (target
== 0 || GET_MODE (target
) != SImode
)
14901 target
= gen_reg_rtx (SImode
);
14903 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14904 tree arg
= TREE_OPERAND (CALL_EXPR_ARG (exp
, 0), 0);
14905 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14906 to a STRING_CST. */
14907 if (TREE_CODE (arg
) == ARRAY_REF
14908 && TREE_CODE (TREE_OPERAND (arg
, 0)) == STRING_CST
14909 && TREE_CODE (TREE_OPERAND (arg
, 1)) == INTEGER_CST
14910 && compare_tree_int (TREE_OPERAND (arg
, 1), 0) == 0)
14911 arg
= TREE_OPERAND (arg
, 0);
14913 if (TREE_CODE (arg
) != STRING_CST
)
14915 error ("builtin %qs only accepts a string argument",
14916 rs6000_builtin_info
[(size_t) fcode
].name
);
14920 if (fcode
== RS6000_BUILTIN_CPU_IS
)
14922 const char *cpu
= TREE_STRING_POINTER (arg
);
14923 rtx cpuid
= NULL_RTX
;
14924 for (size_t i
= 0; i
< ARRAY_SIZE (cpu_is_info
); i
++)
14925 if (strcmp (cpu
, cpu_is_info
[i
].cpu
) == 0)
14927 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14928 cpuid
= GEN_INT (cpu_is_info
[i
].cpuid
+ _DL_FIRST_PLATFORM
);
14931 if (cpuid
== NULL_RTX
)
14933 /* Invalid CPU argument. */
14934 error ("cpu %qs is an invalid argument to builtin %qs",
14935 cpu
, rs6000_builtin_info
[(size_t) fcode
].name
);
14939 rtx platform
= gen_reg_rtx (SImode
);
14940 rtx tcbmem
= gen_const_mem (SImode
,
14941 gen_rtx_PLUS (Pmode
,
14942 gen_rtx_REG (Pmode
, TLS_REGNUM
),
14943 GEN_INT (TCB_PLATFORM_OFFSET
)));
14944 emit_move_insn (platform
, tcbmem
);
14945 emit_insn (gen_eqsi3 (target
, platform
, cpuid
));
14947 else if (fcode
== RS6000_BUILTIN_CPU_SUPPORTS
)
14949 const char *hwcap
= TREE_STRING_POINTER (arg
);
14950 rtx mask
= NULL_RTX
;
14952 for (size_t i
= 0; i
< ARRAY_SIZE (cpu_supports_info
); i
++)
14953 if (strcmp (hwcap
, cpu_supports_info
[i
].hwcap
) == 0)
14955 mask
= GEN_INT (cpu_supports_info
[i
].mask
);
14956 hwcap_offset
= TCB_HWCAP_OFFSET (cpu_supports_info
[i
].id
);
14959 if (mask
== NULL_RTX
)
14961 /* Invalid HWCAP argument. */
14962 error ("%s %qs is an invalid argument to builtin %qs",
14963 "hwcap", hwcap
, rs6000_builtin_info
[(size_t) fcode
].name
);
14967 rtx tcb_hwcap
= gen_reg_rtx (SImode
);
14968 rtx tcbmem
= gen_const_mem (SImode
,
14969 gen_rtx_PLUS (Pmode
,
14970 gen_rtx_REG (Pmode
, TLS_REGNUM
),
14971 GEN_INT (hwcap_offset
)));
14972 emit_move_insn (tcb_hwcap
, tcbmem
);
14973 rtx scratch1
= gen_reg_rtx (SImode
);
14974 emit_insn (gen_rtx_SET (scratch1
, gen_rtx_AND (SImode
, tcb_hwcap
, mask
)));
14975 rtx scratch2
= gen_reg_rtx (SImode
);
14976 emit_insn (gen_eqsi3 (scratch2
, scratch1
, const0_rtx
));
14977 emit_insn (gen_rtx_SET (target
, gen_rtx_XOR (SImode
, scratch2
, const1_rtx
)));
14980 gcc_unreachable ();
14982 /* Record that we have expanded a CPU builtin, so that we can later
14983 emit a reference to the special symbol exported by LIBC to ensure we
14984 do not link against an old LIBC that doesn't support this feature. */
14985 cpu_builtin_p
= true;
14988 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
14989 "capability bits", rs6000_builtin_info
[(size_t) fcode
].name
);
14991 /* For old LIBCs, always return FALSE. */
14992 emit_move_insn (target
, GEN_INT (0));
14993 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14999 rs6000_expand_ternop_builtin (enum insn_code icode
, tree exp
, rtx target
)
15002 tree arg0
= CALL_EXPR_ARG (exp
, 0);
15003 tree arg1
= CALL_EXPR_ARG (exp
, 1);
15004 tree arg2
= CALL_EXPR_ARG (exp
, 2);
15005 rtx op0
= expand_normal (arg0
);
15006 rtx op1
= expand_normal (arg1
);
15007 rtx op2
= expand_normal (arg2
);
15008 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
15009 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
15010 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
15011 machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
15013 if (icode
== CODE_FOR_nothing
)
15014 /* Builtin not supported on this processor. */
15017 /* If we got invalid arguments bail out before generating bad rtl. */
15018 if (arg0
== error_mark_node
15019 || arg1
== error_mark_node
15020 || arg2
== error_mark_node
)
15023 /* Check and prepare argument depending on the instruction code.
15025 Note that a switch statement instead of the sequence of tests
15026 would be incorrect as many of the CODE_FOR values could be
15027 CODE_FOR_nothing and that would yield multiple alternatives
15028 with identical values. We'd never reach here at runtime in
15030 if (icode
== CODE_FOR_altivec_vsldoi_v4sf
15031 || icode
== CODE_FOR_altivec_vsldoi_v2df
15032 || icode
== CODE_FOR_altivec_vsldoi_v4si
15033 || icode
== CODE_FOR_altivec_vsldoi_v8hi
15034 || icode
== CODE_FOR_altivec_vsldoi_v16qi
)
15036 /* Only allow 4-bit unsigned literals. */
15038 if (TREE_CODE (arg2
) != INTEGER_CST
15039 || TREE_INT_CST_LOW (arg2
) & ~0xf)
15041 error ("argument 3 must be a 4-bit unsigned literal");
15042 return CONST0_RTX (tmode
);
15045 else if (icode
== CODE_FOR_vsx_xxpermdi_v2df
15046 || icode
== CODE_FOR_vsx_xxpermdi_v2di
15047 || icode
== CODE_FOR_vsx_xxpermdi_v2df_be
15048 || icode
== CODE_FOR_vsx_xxpermdi_v2di_be
15049 || icode
== CODE_FOR_vsx_xxpermdi_v1ti
15050 || icode
== CODE_FOR_vsx_xxpermdi_v4sf
15051 || icode
== CODE_FOR_vsx_xxpermdi_v4si
15052 || icode
== CODE_FOR_vsx_xxpermdi_v8hi
15053 || icode
== CODE_FOR_vsx_xxpermdi_v16qi
15054 || icode
== CODE_FOR_vsx_xxsldwi_v16qi
15055 || icode
== CODE_FOR_vsx_xxsldwi_v8hi
15056 || icode
== CODE_FOR_vsx_xxsldwi_v4si
15057 || icode
== CODE_FOR_vsx_xxsldwi_v4sf
15058 || icode
== CODE_FOR_vsx_xxsldwi_v2di
15059 || icode
== CODE_FOR_vsx_xxsldwi_v2df
)
15061 /* Only allow 2-bit unsigned literals. */
15063 if (TREE_CODE (arg2
) != INTEGER_CST
15064 || TREE_INT_CST_LOW (arg2
) & ~0x3)
15066 error ("argument 3 must be a 2-bit unsigned literal");
15067 return CONST0_RTX (tmode
);
15070 else if (icode
== CODE_FOR_vsx_set_v2df
15071 || icode
== CODE_FOR_vsx_set_v2di
15072 || icode
== CODE_FOR_bcdadd
15073 || icode
== CODE_FOR_bcdadd_lt
15074 || icode
== CODE_FOR_bcdadd_eq
15075 || icode
== CODE_FOR_bcdadd_gt
15076 || icode
== CODE_FOR_bcdsub
15077 || icode
== CODE_FOR_bcdsub_lt
15078 || icode
== CODE_FOR_bcdsub_eq
15079 || icode
== CODE_FOR_bcdsub_gt
)
15081 /* Only allow 1-bit unsigned literals. */
15083 if (TREE_CODE (arg2
) != INTEGER_CST
15084 || TREE_INT_CST_LOW (arg2
) & ~0x1)
15086 error ("argument 3 must be a 1-bit unsigned literal");
15087 return CONST0_RTX (tmode
);
15090 else if (icode
== CODE_FOR_dfp_ddedpd_dd
15091 || icode
== CODE_FOR_dfp_ddedpd_td
)
15093 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
15095 if (TREE_CODE (arg0
) != INTEGER_CST
15096 || TREE_INT_CST_LOW (arg2
) & ~0x3)
15098 error ("argument 1 must be 0 or 2");
15099 return CONST0_RTX (tmode
);
15102 else if (icode
== CODE_FOR_dfp_denbcd_dd
15103 || icode
== CODE_FOR_dfp_denbcd_td
)
15105 /* Only allow 1-bit unsigned literals. */
15107 if (TREE_CODE (arg0
) != INTEGER_CST
15108 || TREE_INT_CST_LOW (arg0
) & ~0x1)
15110 error ("argument 1 must be a 1-bit unsigned literal");
15111 return CONST0_RTX (tmode
);
15114 else if (icode
== CODE_FOR_dfp_dscli_dd
15115 || icode
== CODE_FOR_dfp_dscli_td
15116 || icode
== CODE_FOR_dfp_dscri_dd
15117 || icode
== CODE_FOR_dfp_dscri_td
)
15119 /* Only allow 6-bit unsigned literals. */
15121 if (TREE_CODE (arg1
) != INTEGER_CST
15122 || TREE_INT_CST_LOW (arg1
) & ~0x3f)
15124 error ("argument 2 must be a 6-bit unsigned literal");
15125 return CONST0_RTX (tmode
);
15128 else if (icode
== CODE_FOR_crypto_vshasigmaw
15129 || icode
== CODE_FOR_crypto_vshasigmad
)
15131 /* Check whether the 2nd and 3rd arguments are integer constants and in
15132 range and prepare arguments. */
15134 if (TREE_CODE (arg1
) != INTEGER_CST
|| wi::geu_p (wi::to_wide (arg1
), 2))
15136 error ("argument 2 must be 0 or 1");
15137 return CONST0_RTX (tmode
);
15141 if (TREE_CODE (arg2
) != INTEGER_CST
15142 || wi::geu_p (wi::to_wide (arg2
), 16))
15144 error ("argument 3 must be in the range 0..15");
15145 return CONST0_RTX (tmode
);
15150 || GET_MODE (target
) != tmode
15151 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
15152 target
= gen_reg_rtx (tmode
);
15154 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
15155 op0
= copy_to_mode_reg (mode0
, op0
);
15156 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
15157 op1
= copy_to_mode_reg (mode1
, op1
);
15158 if (! (*insn_data
[icode
].operand
[3].predicate
) (op2
, mode2
))
15159 op2
= copy_to_mode_reg (mode2
, op2
);
15161 if (TARGET_PAIRED_FLOAT
&& icode
== CODE_FOR_selv2sf4
)
15162 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
, CONST0_RTX (SFmode
));
15164 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
);
15172 /* Expand the lvx builtins. */
15174 altivec_expand_ld_builtin (tree exp
, rtx target
, bool *expandedp
)
15176 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
15177 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
15179 machine_mode tmode
, mode0
;
15181 enum insn_code icode
;
15185 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi
:
15186 icode
= CODE_FOR_vector_altivec_load_v16qi
;
15188 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi
:
15189 icode
= CODE_FOR_vector_altivec_load_v8hi
;
15191 case ALTIVEC_BUILTIN_LD_INTERNAL_4si
:
15192 icode
= CODE_FOR_vector_altivec_load_v4si
;
15194 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf
:
15195 icode
= CODE_FOR_vector_altivec_load_v4sf
;
15197 case ALTIVEC_BUILTIN_LD_INTERNAL_2df
:
15198 icode
= CODE_FOR_vector_altivec_load_v2df
;
15200 case ALTIVEC_BUILTIN_LD_INTERNAL_2di
:
15201 icode
= CODE_FOR_vector_altivec_load_v2di
;
15203 case ALTIVEC_BUILTIN_LD_INTERNAL_1ti
:
15204 icode
= CODE_FOR_vector_altivec_load_v1ti
;
15207 *expandedp
= false;
15213 arg0
= CALL_EXPR_ARG (exp
, 0);
15214 op0
= expand_normal (arg0
);
15215 tmode
= insn_data
[icode
].operand
[0].mode
;
15216 mode0
= insn_data
[icode
].operand
[1].mode
;
15219 || GET_MODE (target
) != tmode
15220 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
15221 target
= gen_reg_rtx (tmode
);
15223 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
15224 op0
= gen_rtx_MEM (mode0
, copy_to_mode_reg (Pmode
, op0
));
15226 pat
= GEN_FCN (icode
) (target
, op0
);
15233 /* Expand the stvx builtins. */
15235 altivec_expand_st_builtin (tree exp
, rtx target ATTRIBUTE_UNUSED
,
15238 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
15239 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
15241 machine_mode mode0
, mode1
;
15243 enum insn_code icode
;
15247 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi
:
15248 icode
= CODE_FOR_vector_altivec_store_v16qi
;
15250 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi
:
15251 icode
= CODE_FOR_vector_altivec_store_v8hi
;
15253 case ALTIVEC_BUILTIN_ST_INTERNAL_4si
:
15254 icode
= CODE_FOR_vector_altivec_store_v4si
;
15256 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf
:
15257 icode
= CODE_FOR_vector_altivec_store_v4sf
;
15259 case ALTIVEC_BUILTIN_ST_INTERNAL_2df
:
15260 icode
= CODE_FOR_vector_altivec_store_v2df
;
15262 case ALTIVEC_BUILTIN_ST_INTERNAL_2di
:
15263 icode
= CODE_FOR_vector_altivec_store_v2di
;
15265 case ALTIVEC_BUILTIN_ST_INTERNAL_1ti
:
15266 icode
= CODE_FOR_vector_altivec_store_v1ti
;
15269 *expandedp
= false;
15273 arg0
= CALL_EXPR_ARG (exp
, 0);
15274 arg1
= CALL_EXPR_ARG (exp
, 1);
15275 op0
= expand_normal (arg0
);
15276 op1
= expand_normal (arg1
);
15277 mode0
= insn_data
[icode
].operand
[0].mode
;
15278 mode1
= insn_data
[icode
].operand
[1].mode
;
15280 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
15281 op0
= gen_rtx_MEM (mode0
, copy_to_mode_reg (Pmode
, op0
));
15282 if (! (*insn_data
[icode
].operand
[1].predicate
) (op1
, mode1
))
15283 op1
= copy_to_mode_reg (mode1
, op1
);
15285 pat
= GEN_FCN (icode
) (op0
, op1
);
15293 /* Expand the dst builtins. */
15295 altivec_expand_dst_builtin (tree exp
, rtx target ATTRIBUTE_UNUSED
,
15298 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
15299 enum rs6000_builtins fcode
= (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
15300 tree arg0
, arg1
, arg2
;
15301 machine_mode mode0
, mode1
;
15302 rtx pat
, op0
, op1
, op2
;
15303 const struct builtin_description
*d
;
15306 *expandedp
= false;
15308 /* Handle DST variants. */
15310 for (i
= 0; i
< ARRAY_SIZE (bdesc_dst
); i
++, d
++)
15311 if (d
->code
== fcode
)
15313 arg0
= CALL_EXPR_ARG (exp
, 0);
15314 arg1
= CALL_EXPR_ARG (exp
, 1);
15315 arg2
= CALL_EXPR_ARG (exp
, 2);
15316 op0
= expand_normal (arg0
);
15317 op1
= expand_normal (arg1
);
15318 op2
= expand_normal (arg2
);
15319 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
15320 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
15322 /* Invalid arguments, bail out before generating bad rtl. */
15323 if (arg0
== error_mark_node
15324 || arg1
== error_mark_node
15325 || arg2
== error_mark_node
)
15330 if (TREE_CODE (arg2
) != INTEGER_CST
15331 || TREE_INT_CST_LOW (arg2
) & ~0x3)
15333 error ("argument to %qs must be a 2-bit unsigned literal", d
->name
);
15337 if (! (*insn_data
[d
->icode
].operand
[0].predicate
) (op0
, mode0
))
15338 op0
= copy_to_mode_reg (Pmode
, op0
);
15339 if (! (*insn_data
[d
->icode
].operand
[1].predicate
) (op1
, mode1
))
15340 op1
= copy_to_mode_reg (mode1
, op1
);
15342 pat
= GEN_FCN (d
->icode
) (op0
, op1
, op2
);
15352 /* Expand vec_init builtin. */
15354 altivec_expand_vec_init_builtin (tree type
, tree exp
, rtx target
)
15356 machine_mode tmode
= TYPE_MODE (type
);
15357 machine_mode inner_mode
= GET_MODE_INNER (tmode
);
15358 int i
, n_elt
= GET_MODE_NUNITS (tmode
);
15360 gcc_assert (VECTOR_MODE_P (tmode
));
15361 gcc_assert (n_elt
== call_expr_nargs (exp
));
15363 if (!target
|| !register_operand (target
, tmode
))
15364 target
= gen_reg_rtx (tmode
);
15366 /* If we have a vector compromised of a single element, such as V1TImode, do
15367 the initialization directly. */
15368 if (n_elt
== 1 && GET_MODE_SIZE (tmode
) == GET_MODE_SIZE (inner_mode
))
15370 rtx x
= expand_normal (CALL_EXPR_ARG (exp
, 0));
15371 emit_move_insn (target
, gen_lowpart (tmode
, x
));
15375 rtvec v
= rtvec_alloc (n_elt
);
15377 for (i
= 0; i
< n_elt
; ++i
)
15379 rtx x
= expand_normal (CALL_EXPR_ARG (exp
, i
));
15380 RTVEC_ELT (v
, i
) = gen_lowpart (inner_mode
, x
);
15383 rs6000_expand_vector_init (target
, gen_rtx_PARALLEL (tmode
, v
));
15389 /* Return the integer constant in ARG. Constrain it to be in the range
15390 of the subparts of VEC_TYPE; issue an error if not. */
15393 get_element_number (tree vec_type
, tree arg
)
15395 unsigned HOST_WIDE_INT elt
, max
= TYPE_VECTOR_SUBPARTS (vec_type
) - 1;
15397 if (!tree_fits_uhwi_p (arg
)
15398 || (elt
= tree_to_uhwi (arg
), elt
> max
))
15400 error ("selector must be an integer constant in the range 0..%wi", max
);
15407 /* Expand vec_set builtin. */
15409 altivec_expand_vec_set_builtin (tree exp
)
15411 machine_mode tmode
, mode1
;
15412 tree arg0
, arg1
, arg2
;
15416 arg0
= CALL_EXPR_ARG (exp
, 0);
15417 arg1
= CALL_EXPR_ARG (exp
, 1);
15418 arg2
= CALL_EXPR_ARG (exp
, 2);
15420 tmode
= TYPE_MODE (TREE_TYPE (arg0
));
15421 mode1
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
15422 gcc_assert (VECTOR_MODE_P (tmode
));
15424 op0
= expand_expr (arg0
, NULL_RTX
, tmode
, EXPAND_NORMAL
);
15425 op1
= expand_expr (arg1
, NULL_RTX
, mode1
, EXPAND_NORMAL
);
15426 elt
= get_element_number (TREE_TYPE (arg0
), arg2
);
15428 if (GET_MODE (op1
) != mode1
&& GET_MODE (op1
) != VOIDmode
)
15429 op1
= convert_modes (mode1
, GET_MODE (op1
), op1
, true);
15431 op0
= force_reg (tmode
, op0
);
15432 op1
= force_reg (mode1
, op1
);
15434 rs6000_expand_vector_set (op0
, op1
, elt
);
15439 /* Expand vec_ext builtin. */
15441 altivec_expand_vec_ext_builtin (tree exp
, rtx target
)
15443 machine_mode tmode
, mode0
;
15448 arg0
= CALL_EXPR_ARG (exp
, 0);
15449 arg1
= CALL_EXPR_ARG (exp
, 1);
15451 op0
= expand_normal (arg0
);
15452 op1
= expand_normal (arg1
);
15454 /* Call get_element_number to validate arg1 if it is a constant. */
15455 if (TREE_CODE (arg1
) == INTEGER_CST
)
15456 (void) get_element_number (TREE_TYPE (arg0
), arg1
);
15458 tmode
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
15459 mode0
= TYPE_MODE (TREE_TYPE (arg0
));
15460 gcc_assert (VECTOR_MODE_P (mode0
));
15462 op0
= force_reg (mode0
, op0
);
15464 if (optimize
|| !target
|| !register_operand (target
, tmode
))
15465 target
= gen_reg_rtx (tmode
);
15467 rs6000_expand_vector_extract (target
, op0
, op1
);
15472 /* Expand the builtin in EXP and store the result in TARGET. Store
15473 true in *EXPANDEDP if we found a builtin to expand. */
15475 altivec_expand_builtin (tree exp
, rtx target
, bool *expandedp
)
15477 const struct builtin_description
*d
;
15479 enum insn_code icode
;
15480 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
15481 tree arg0
, arg1
, arg2
;
15483 machine_mode tmode
, mode0
;
15484 enum rs6000_builtins fcode
15485 = (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
15487 if (rs6000_overloaded_builtin_p (fcode
))
15490 error ("unresolved overload for Altivec builtin %qF", fndecl
);
15492 /* Given it is invalid, just generate a normal call. */
15493 return expand_call (exp
, target
, false);
15496 target
= altivec_expand_ld_builtin (exp
, target
, expandedp
);
15500 target
= altivec_expand_st_builtin (exp
, target
, expandedp
);
15504 target
= altivec_expand_dst_builtin (exp
, target
, expandedp
);
15512 case ALTIVEC_BUILTIN_STVX_V2DF
:
15513 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df_2op
, exp
);
15514 case ALTIVEC_BUILTIN_STVX_V2DI
:
15515 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di_2op
, exp
);
15516 case ALTIVEC_BUILTIN_STVX_V4SF
:
15517 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf_2op
, exp
);
15518 case ALTIVEC_BUILTIN_STVX
:
15519 case ALTIVEC_BUILTIN_STVX_V4SI
:
15520 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si_2op
, exp
);
15521 case ALTIVEC_BUILTIN_STVX_V8HI
:
15522 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi_2op
, exp
);
15523 case ALTIVEC_BUILTIN_STVX_V16QI
:
15524 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi_2op
, exp
);
15525 case ALTIVEC_BUILTIN_STVEBX
:
15526 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx
, exp
);
15527 case ALTIVEC_BUILTIN_STVEHX
:
15528 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx
, exp
);
15529 case ALTIVEC_BUILTIN_STVEWX
:
15530 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx
, exp
);
15531 case ALTIVEC_BUILTIN_STVXL_V2DF
:
15532 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df
, exp
);
15533 case ALTIVEC_BUILTIN_STVXL_V2DI
:
15534 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di
, exp
);
15535 case ALTIVEC_BUILTIN_STVXL_V4SF
:
15536 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf
, exp
);
15537 case ALTIVEC_BUILTIN_STVXL
:
15538 case ALTIVEC_BUILTIN_STVXL_V4SI
:
15539 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si
, exp
);
15540 case ALTIVEC_BUILTIN_STVXL_V8HI
:
15541 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi
, exp
);
15542 case ALTIVEC_BUILTIN_STVXL_V16QI
:
15543 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi
, exp
);
15545 case ALTIVEC_BUILTIN_STVLX
:
15546 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx
, exp
);
15547 case ALTIVEC_BUILTIN_STVLXL
:
15548 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl
, exp
);
15549 case ALTIVEC_BUILTIN_STVRX
:
15550 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx
, exp
);
15551 case ALTIVEC_BUILTIN_STVRXL
:
15552 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl
, exp
);
15554 case P9V_BUILTIN_STXVL
:
15555 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl
, exp
);
15557 case P9V_BUILTIN_XST_LEN_R
:
15558 return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r
, exp
);
15560 case VSX_BUILTIN_STXVD2X_V1TI
:
15561 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti
, exp
);
15562 case VSX_BUILTIN_STXVD2X_V2DF
:
15563 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df
, exp
);
15564 case VSX_BUILTIN_STXVD2X_V2DI
:
15565 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di
, exp
);
15566 case VSX_BUILTIN_STXVW4X_V4SF
:
15567 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf
, exp
);
15568 case VSX_BUILTIN_STXVW4X_V4SI
:
15569 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si
, exp
);
15570 case VSX_BUILTIN_STXVW4X_V8HI
:
15571 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi
, exp
);
15572 case VSX_BUILTIN_STXVW4X_V16QI
:
15573 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi
, exp
);
15575 /* For the following on big endian, it's ok to use any appropriate
15576 unaligned-supporting store, so use a generic expander. For
15577 little-endian, the exact element-reversing instruction must
15579 case VSX_BUILTIN_ST_ELEMREV_V2DF
:
15581 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v2df
15582 : CODE_FOR_vsx_st_elemrev_v2df
);
15583 return altivec_expand_stv_builtin (code
, exp
);
15585 case VSX_BUILTIN_ST_ELEMREV_V2DI
:
15587 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v2di
15588 : CODE_FOR_vsx_st_elemrev_v2di
);
15589 return altivec_expand_stv_builtin (code
, exp
);
15591 case VSX_BUILTIN_ST_ELEMREV_V4SF
:
15593 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v4sf
15594 : CODE_FOR_vsx_st_elemrev_v4sf
);
15595 return altivec_expand_stv_builtin (code
, exp
);
15597 case VSX_BUILTIN_ST_ELEMREV_V4SI
:
15599 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v4si
15600 : CODE_FOR_vsx_st_elemrev_v4si
);
15601 return altivec_expand_stv_builtin (code
, exp
);
15603 case VSX_BUILTIN_ST_ELEMREV_V8HI
:
15605 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v8hi
15606 : CODE_FOR_vsx_st_elemrev_v8hi
);
15607 return altivec_expand_stv_builtin (code
, exp
);
15609 case VSX_BUILTIN_ST_ELEMREV_V16QI
:
15611 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v16qi
15612 : CODE_FOR_vsx_st_elemrev_v16qi
);
15613 return altivec_expand_stv_builtin (code
, exp
);
15616 case ALTIVEC_BUILTIN_MFVSCR
:
15617 icode
= CODE_FOR_altivec_mfvscr
;
15618 tmode
= insn_data
[icode
].operand
[0].mode
;
15621 || GET_MODE (target
) != tmode
15622 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
15623 target
= gen_reg_rtx (tmode
);
15625 pat
= GEN_FCN (icode
) (target
);
15631 case ALTIVEC_BUILTIN_MTVSCR
:
15632 icode
= CODE_FOR_altivec_mtvscr
;
15633 arg0
= CALL_EXPR_ARG (exp
, 0);
15634 op0
= expand_normal (arg0
);
15635 mode0
= insn_data
[icode
].operand
[0].mode
;
15637 /* If we got invalid arguments bail out before generating bad rtl. */
15638 if (arg0
== error_mark_node
)
15641 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
15642 op0
= copy_to_mode_reg (mode0
, op0
);
15644 pat
= GEN_FCN (icode
) (op0
);
15649 case ALTIVEC_BUILTIN_DSSALL
:
15650 emit_insn (gen_altivec_dssall ());
15653 case ALTIVEC_BUILTIN_DSS
:
15654 icode
= CODE_FOR_altivec_dss
;
15655 arg0
= CALL_EXPR_ARG (exp
, 0);
15657 op0
= expand_normal (arg0
);
15658 mode0
= insn_data
[icode
].operand
[0].mode
;
15660 /* If we got invalid arguments bail out before generating bad rtl. */
15661 if (arg0
== error_mark_node
)
15664 if (TREE_CODE (arg0
) != INTEGER_CST
15665 || TREE_INT_CST_LOW (arg0
) & ~0x3)
15667 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
15671 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
15672 op0
= copy_to_mode_reg (mode0
, op0
);
15674 emit_insn (gen_altivec_dss (op0
));
15677 case ALTIVEC_BUILTIN_VEC_INIT_V4SI
:
15678 case ALTIVEC_BUILTIN_VEC_INIT_V8HI
:
15679 case ALTIVEC_BUILTIN_VEC_INIT_V16QI
:
15680 case ALTIVEC_BUILTIN_VEC_INIT_V4SF
:
15681 case VSX_BUILTIN_VEC_INIT_V2DF
:
15682 case VSX_BUILTIN_VEC_INIT_V2DI
:
15683 case VSX_BUILTIN_VEC_INIT_V1TI
:
15684 return altivec_expand_vec_init_builtin (TREE_TYPE (exp
), exp
, target
);
15686 case ALTIVEC_BUILTIN_VEC_SET_V4SI
:
15687 case ALTIVEC_BUILTIN_VEC_SET_V8HI
:
15688 case ALTIVEC_BUILTIN_VEC_SET_V16QI
:
15689 case ALTIVEC_BUILTIN_VEC_SET_V4SF
:
15690 case VSX_BUILTIN_VEC_SET_V2DF
:
15691 case VSX_BUILTIN_VEC_SET_V2DI
:
15692 case VSX_BUILTIN_VEC_SET_V1TI
:
15693 return altivec_expand_vec_set_builtin (exp
);
15695 case ALTIVEC_BUILTIN_VEC_EXT_V4SI
:
15696 case ALTIVEC_BUILTIN_VEC_EXT_V8HI
:
15697 case ALTIVEC_BUILTIN_VEC_EXT_V16QI
:
15698 case ALTIVEC_BUILTIN_VEC_EXT_V4SF
:
15699 case VSX_BUILTIN_VEC_EXT_V2DF
:
15700 case VSX_BUILTIN_VEC_EXT_V2DI
:
15701 case VSX_BUILTIN_VEC_EXT_V1TI
:
15702 return altivec_expand_vec_ext_builtin (exp
, target
);
15704 case P9V_BUILTIN_VEXTRACT4B
:
15705 case P9V_BUILTIN_VEC_VEXTRACT4B
:
15706 arg1
= CALL_EXPR_ARG (exp
, 1);
15709 /* Generate a normal call if it is invalid. */
15710 if (arg1
== error_mark_node
)
15711 return expand_call (exp
, target
, false);
15713 if (TREE_CODE (arg1
) != INTEGER_CST
|| TREE_INT_CST_LOW (arg1
) > 12)
15715 error ("second argument to %qs must be 0..12", "vec_vextract4b");
15716 return expand_call (exp
, target
, false);
15720 case P9V_BUILTIN_VINSERT4B
:
15721 case P9V_BUILTIN_VINSERT4B_DI
:
15722 case P9V_BUILTIN_VEC_VINSERT4B
:
15723 arg2
= CALL_EXPR_ARG (exp
, 2);
15726 /* Generate a normal call if it is invalid. */
15727 if (arg2
== error_mark_node
)
15728 return expand_call (exp
, target
, false);
15730 if (TREE_CODE (arg2
) != INTEGER_CST
|| TREE_INT_CST_LOW (arg2
) > 12)
15732 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
15733 return expand_call (exp
, target
, false);
15739 /* Fall through. */
15742 /* Expand abs* operations. */
15744 for (i
= 0; i
< ARRAY_SIZE (bdesc_abs
); i
++, d
++)
15745 if (d
->code
== fcode
)
15746 return altivec_expand_abs_builtin (d
->icode
, exp
, target
);
15748 /* Expand the AltiVec predicates. */
15749 d
= bdesc_altivec_preds
;
15750 for (i
= 0; i
< ARRAY_SIZE (bdesc_altivec_preds
); i
++, d
++)
15751 if (d
->code
== fcode
)
15752 return altivec_expand_predicate_builtin (d
->icode
, exp
, target
);
15754 /* LV* are funky. We initialized them differently. */
15757 case ALTIVEC_BUILTIN_LVSL
:
15758 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl
,
15759 exp
, target
, false);
15760 case ALTIVEC_BUILTIN_LVSR
:
15761 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr
,
15762 exp
, target
, false);
15763 case ALTIVEC_BUILTIN_LVEBX
:
15764 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx
,
15765 exp
, target
, false);
15766 case ALTIVEC_BUILTIN_LVEHX
:
15767 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx
,
15768 exp
, target
, false);
15769 case ALTIVEC_BUILTIN_LVEWX
:
15770 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx
,
15771 exp
, target
, false);
15772 case ALTIVEC_BUILTIN_LVXL_V2DF
:
15773 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df
,
15774 exp
, target
, false);
15775 case ALTIVEC_BUILTIN_LVXL_V2DI
:
15776 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di
,
15777 exp
, target
, false);
15778 case ALTIVEC_BUILTIN_LVXL_V4SF
:
15779 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf
,
15780 exp
, target
, false);
15781 case ALTIVEC_BUILTIN_LVXL
:
15782 case ALTIVEC_BUILTIN_LVXL_V4SI
:
15783 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si
,
15784 exp
, target
, false);
15785 case ALTIVEC_BUILTIN_LVXL_V8HI
:
15786 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi
,
15787 exp
, target
, false);
15788 case ALTIVEC_BUILTIN_LVXL_V16QI
:
15789 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi
,
15790 exp
, target
, false);
15791 case ALTIVEC_BUILTIN_LVX_V2DF
:
15792 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df_2op
,
15793 exp
, target
, false);
15794 case ALTIVEC_BUILTIN_LVX_V2DI
:
15795 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di_2op
,
15796 exp
, target
, false);
15797 case ALTIVEC_BUILTIN_LVX_V4SF
:
15798 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf_2op
,
15799 exp
, target
, false);
15800 case ALTIVEC_BUILTIN_LVX
:
15801 case ALTIVEC_BUILTIN_LVX_V4SI
:
15802 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si_2op
,
15803 exp
, target
, false);
15804 case ALTIVEC_BUILTIN_LVX_V8HI
:
15805 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi_2op
,
15806 exp
, target
, false);
15807 case ALTIVEC_BUILTIN_LVX_V16QI
:
15808 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi_2op
,
15809 exp
, target
, false);
15810 case ALTIVEC_BUILTIN_LVLX
:
15811 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx
,
15812 exp
, target
, true);
15813 case ALTIVEC_BUILTIN_LVLXL
:
15814 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl
,
15815 exp
, target
, true);
15816 case ALTIVEC_BUILTIN_LVRX
:
15817 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx
,
15818 exp
, target
, true);
15819 case ALTIVEC_BUILTIN_LVRXL
:
15820 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl
,
15821 exp
, target
, true);
15822 case VSX_BUILTIN_LXVD2X_V1TI
:
15823 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti
,
15824 exp
, target
, false);
15825 case VSX_BUILTIN_LXVD2X_V2DF
:
15826 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df
,
15827 exp
, target
, false);
15828 case VSX_BUILTIN_LXVD2X_V2DI
:
15829 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di
,
15830 exp
, target
, false);
15831 case VSX_BUILTIN_LXVW4X_V4SF
:
15832 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf
,
15833 exp
, target
, false);
15834 case VSX_BUILTIN_LXVW4X_V4SI
:
15835 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si
,
15836 exp
, target
, false);
15837 case VSX_BUILTIN_LXVW4X_V8HI
:
15838 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi
,
15839 exp
, target
, false);
15840 case VSX_BUILTIN_LXVW4X_V16QI
:
15841 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi
,
15842 exp
, target
, false);
15843 /* For the following on big endian, it's ok to use any appropriate
15844 unaligned-supporting load, so use a generic expander. For
15845 little-endian, the exact element-reversing instruction must
15847 case VSX_BUILTIN_LD_ELEMREV_V2DF
:
15849 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v2df
15850 : CODE_FOR_vsx_ld_elemrev_v2df
);
15851 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15853 case VSX_BUILTIN_LD_ELEMREV_V2DI
:
15855 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v2di
15856 : CODE_FOR_vsx_ld_elemrev_v2di
);
15857 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15859 case VSX_BUILTIN_LD_ELEMREV_V4SF
:
15861 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v4sf
15862 : CODE_FOR_vsx_ld_elemrev_v4sf
);
15863 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15865 case VSX_BUILTIN_LD_ELEMREV_V4SI
:
15867 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v4si
15868 : CODE_FOR_vsx_ld_elemrev_v4si
);
15869 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15871 case VSX_BUILTIN_LD_ELEMREV_V8HI
:
15873 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v8hi
15874 : CODE_FOR_vsx_ld_elemrev_v8hi
);
15875 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15877 case VSX_BUILTIN_LD_ELEMREV_V16QI
:
15879 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v16qi
15880 : CODE_FOR_vsx_ld_elemrev_v16qi
);
15881 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15886 /* Fall through. */
15889 *expandedp
= false;
15893 /* Expand the builtin in EXP and store the result in TARGET. Store
15894 true in *EXPANDEDP if we found a builtin to expand. */
15896 paired_expand_builtin (tree exp
, rtx target
, bool * expandedp
)
15898 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
15899 enum rs6000_builtins fcode
= (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
15900 const struct builtin_description
*d
;
15907 case PAIRED_BUILTIN_STX
:
15908 return paired_expand_stv_builtin (CODE_FOR_paired_stx
, exp
);
15909 case PAIRED_BUILTIN_LX
:
15910 return paired_expand_lv_builtin (CODE_FOR_paired_lx
, exp
, target
);
15913 /* Fall through. */
15916 /* Expand the paired predicates. */
15917 d
= bdesc_paired_preds
;
15918 for (i
= 0; i
< ARRAY_SIZE (bdesc_paired_preds
); i
++, d
++)
15919 if (d
->code
== fcode
)
15920 return paired_expand_predicate_builtin (d
->icode
, exp
, target
);
15922 *expandedp
= false;
15927 paired_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
15929 rtx pat
, scratch
, tmp
;
15930 tree form
= CALL_EXPR_ARG (exp
, 0);
15931 tree arg0
= CALL_EXPR_ARG (exp
, 1);
15932 tree arg1
= CALL_EXPR_ARG (exp
, 2);
15933 rtx op0
= expand_normal (arg0
);
15934 rtx op1
= expand_normal (arg1
);
15935 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
15936 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
15938 enum rtx_code code
;
15940 if (TREE_CODE (form
) != INTEGER_CST
)
15942 error ("argument 1 of %s must be a constant",
15943 "__builtin_paired_predicate");
15947 form_int
= TREE_INT_CST_LOW (form
);
15949 gcc_assert (mode0
== mode1
);
15951 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
15955 || GET_MODE (target
) != SImode
15956 || !(*insn_data
[icode
].operand
[0].predicate
) (target
, SImode
))
15957 target
= gen_reg_rtx (SImode
);
15958 if (!(*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
15959 op0
= copy_to_mode_reg (mode0
, op0
);
15960 if (!(*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
15961 op1
= copy_to_mode_reg (mode1
, op1
);
15963 scratch
= gen_reg_rtx (CCFPmode
);
15965 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
15987 emit_insn (gen_move_from_CR_ov_bit (target
, scratch
));
15990 error ("argument 1 of %qs is out of range",
15991 "__builtin_paired_predicate");
15995 tmp
= gen_rtx_fmt_ee (code
, SImode
, scratch
, const0_rtx
);
15996 emit_move_insn (target
, tmp
);
16000 /* Raise an error message for a builtin function that is called without the
16001 appropriate target options being set. */
16004 rs6000_invalid_builtin (enum rs6000_builtins fncode
)
16006 size_t uns_fncode
= (size_t) fncode
;
16007 const char *name
= rs6000_builtin_info
[uns_fncode
].name
;
16008 HOST_WIDE_INT fnmask
= rs6000_builtin_info
[uns_fncode
].mask
;
16010 gcc_assert (name
!= NULL
);
16011 if ((fnmask
& RS6000_BTM_CELL
) != 0)
16012 error ("builtin function %qs is only valid for the cell processor", name
);
16013 else if ((fnmask
& RS6000_BTM_VSX
) != 0)
16014 error ("builtin function %qs requires the %qs option", name
, "-mvsx");
16015 else if ((fnmask
& RS6000_BTM_HTM
) != 0)
16016 error ("builtin function %qs requires the %qs option", name
, "-mhtm");
16017 else if ((fnmask
& RS6000_BTM_ALTIVEC
) != 0)
16018 error ("builtin function %qs requires the %qs option", name
, "-maltivec");
16019 else if ((fnmask
& RS6000_BTM_PAIRED
) != 0)
16020 error ("builtin function %qs requires the %qs option", name
, "-mpaired");
16021 else if ((fnmask
& (RS6000_BTM_DFP
| RS6000_BTM_P8_VECTOR
))
16022 == (RS6000_BTM_DFP
| RS6000_BTM_P8_VECTOR
))
16023 error ("builtin function %qs requires the %qs and %qs options",
16024 name
, "-mhard-dfp", "-mpower8-vector");
16025 else if ((fnmask
& RS6000_BTM_DFP
) != 0)
16026 error ("builtin function %qs requires the %qs option", name
, "-mhard-dfp");
16027 else if ((fnmask
& RS6000_BTM_P8_VECTOR
) != 0)
16028 error ("builtin function %qs requires the %qs option", name
,
16029 "-mpower8-vector");
16030 else if ((fnmask
& (RS6000_BTM_P9_VECTOR
| RS6000_BTM_64BIT
))
16031 == (RS6000_BTM_P9_VECTOR
| RS6000_BTM_64BIT
))
16032 error ("builtin function %qs requires the %qs and %qs options",
16033 name
, "-mcpu=power9", "-m64");
16034 else if ((fnmask
& RS6000_BTM_P9_VECTOR
) != 0)
16035 error ("builtin function %qs requires the %qs option", name
,
16037 else if ((fnmask
& (RS6000_BTM_P9_MISC
| RS6000_BTM_64BIT
))
16038 == (RS6000_BTM_P9_MISC
| RS6000_BTM_64BIT
))
16039 error ("builtin function %qs requires the %qs and %qs options",
16040 name
, "-mcpu=power9", "-m64");
16041 else if ((fnmask
& RS6000_BTM_P9_MISC
) == RS6000_BTM_P9_MISC
)
16042 error ("builtin function %qs requires the %qs option", name
,
16044 else if ((fnmask
& (RS6000_BTM_HARD_FLOAT
| RS6000_BTM_LDBL128
))
16045 == (RS6000_BTM_HARD_FLOAT
| RS6000_BTM_LDBL128
))
16046 error ("builtin function %qs requires the %qs and %qs options",
16047 name
, "-mhard-float", "-mlong-double-128");
16048 else if ((fnmask
& RS6000_BTM_HARD_FLOAT
) != 0)
16049 error ("builtin function %qs requires the %qs option", name
,
16051 else if ((fnmask
& RS6000_BTM_FLOAT128_HW
) != 0)
16052 error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
16054 else if ((fnmask
& RS6000_BTM_FLOAT128
) != 0)
16055 error ("builtin function %qs requires the %qs option", name
, "-mfloat128");
16057 error ("builtin function %qs is not supported with the current options",
16061 /* Target hook for early folding of built-ins, shamelessly stolen
16065 rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED
,
16066 int n_args ATTRIBUTE_UNUSED
,
16067 tree
*args ATTRIBUTE_UNUSED
,
16068 bool ignore ATTRIBUTE_UNUSED
)
16070 #ifdef SUBTARGET_FOLD_BUILTIN
16071 return SUBTARGET_FOLD_BUILTIN (fndecl
, n_args
, args
, ignore
);
16077 /* Helper function to sort out which built-ins may be valid without having
16080 rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code
)
16084 case ALTIVEC_BUILTIN_STVX_V16QI
:
16085 case ALTIVEC_BUILTIN_STVX_V8HI
:
16086 case ALTIVEC_BUILTIN_STVX_V4SI
:
16087 case ALTIVEC_BUILTIN_STVX_V4SF
:
16088 case ALTIVEC_BUILTIN_STVX_V2DI
:
16089 case ALTIVEC_BUILTIN_STVX_V2DF
:
16096 /* Helper function to handle the gimple folding of a vector compare
16097 operation. This sets up true/false vectors, and uses the
16098 VEC_COND_EXPR operation.
16099 CODE indicates which comparison is to be made. (EQ, GT, ...).
16100 TYPE indicates the type of the result. */
16102 fold_build_vec_cmp (tree_code code
, tree type
,
16103 tree arg0
, tree arg1
)
16105 tree cmp_type
= build_same_sized_truth_vector_type (type
);
16106 tree zero_vec
= build_zero_cst (type
);
16107 tree minus_one_vec
= build_minus_one_cst (type
);
16108 tree cmp
= fold_build2 (code
, cmp_type
, arg0
, arg1
);
16109 return fold_build3 (VEC_COND_EXPR
, type
, cmp
, minus_one_vec
, zero_vec
);
16112 /* Helper function to handle the in-between steps for the
16113 vector compare built-ins. */
16115 fold_compare_helper (gimple_stmt_iterator
*gsi
, tree_code code
, gimple
*stmt
)
16117 tree arg0
= gimple_call_arg (stmt
, 0);
16118 tree arg1
= gimple_call_arg (stmt
, 1);
16119 tree lhs
= gimple_call_lhs (stmt
);
16120 tree cmp
= fold_build_vec_cmp (code
, TREE_TYPE (lhs
), arg0
, arg1
);
16121 gimple
*g
= gimple_build_assign (lhs
, cmp
);
16122 gimple_set_location (g
, gimple_location (stmt
));
16123 gsi_replace (gsi
, g
, true);
16126 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
16127 a constant, use rs6000_fold_builtin.) */
16130 rs6000_gimple_fold_builtin (gimple_stmt_iterator
*gsi
)
16132 gimple
*stmt
= gsi_stmt (*gsi
);
16133 tree fndecl
= gimple_call_fndecl (stmt
);
16134 gcc_checking_assert (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
);
16135 enum rs6000_builtins fn_code
16136 = (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
16137 tree arg0
, arg1
, lhs
, temp
;
16140 size_t uns_fncode
= (size_t) fn_code
;
16141 enum insn_code icode
= rs6000_builtin_info
[uns_fncode
].icode
;
16142 const char *fn_name1
= rs6000_builtin_info
[uns_fncode
].name
;
16143 const char *fn_name2
= (icode
!= CODE_FOR_nothing
)
16144 ? get_insn_name ((int) icode
)
16147 if (TARGET_DEBUG_BUILTIN
)
16148 fprintf (stderr
, "rs6000_gimple_fold_builtin %d %s %s\n",
16149 fn_code
, fn_name1
, fn_name2
);
16151 if (!rs6000_fold_gimple
)
16154 /* Prevent gimple folding for code that does not have a LHS, unless it is
16155 allowed per the rs6000_builtin_valid_without_lhs helper function. */
16156 if (!gimple_call_lhs (stmt
) && !rs6000_builtin_valid_without_lhs (fn_code
))
16159 /* Don't fold invalid builtins, let rs6000_expand_builtin diagnose it. */
16160 HOST_WIDE_INT mask
= rs6000_builtin_info
[uns_fncode
].mask
;
16161 bool func_valid_p
= (rs6000_builtin_mask
& mask
) == mask
;
16167 /* Flavors of vec_add. We deliberately don't expand
16168 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
16169 TImode, resulting in much poorer code generation. */
16170 case ALTIVEC_BUILTIN_VADDUBM
:
16171 case ALTIVEC_BUILTIN_VADDUHM
:
16172 case ALTIVEC_BUILTIN_VADDUWM
:
16173 case P8V_BUILTIN_VADDUDM
:
16174 case ALTIVEC_BUILTIN_VADDFP
:
16175 case VSX_BUILTIN_XVADDDP
:
16176 arg0
= gimple_call_arg (stmt
, 0);
16177 arg1
= gimple_call_arg (stmt
, 1);
16178 lhs
= gimple_call_lhs (stmt
);
16179 g
= gimple_build_assign (lhs
, PLUS_EXPR
, arg0
, arg1
);
16180 gimple_set_location (g
, gimple_location (stmt
));
16181 gsi_replace (gsi
, g
, true);
16183 /* Flavors of vec_sub. We deliberately don't expand
16184 P8V_BUILTIN_VSUBUQM. */
16185 case ALTIVEC_BUILTIN_VSUBUBM
:
16186 case ALTIVEC_BUILTIN_VSUBUHM
:
16187 case ALTIVEC_BUILTIN_VSUBUWM
:
16188 case P8V_BUILTIN_VSUBUDM
:
16189 case ALTIVEC_BUILTIN_VSUBFP
:
16190 case VSX_BUILTIN_XVSUBDP
:
16191 arg0
= gimple_call_arg (stmt
, 0);
16192 arg1
= gimple_call_arg (stmt
, 1);
16193 lhs
= gimple_call_lhs (stmt
);
16194 g
= gimple_build_assign (lhs
, MINUS_EXPR
, arg0
, arg1
);
16195 gimple_set_location (g
, gimple_location (stmt
));
16196 gsi_replace (gsi
, g
, true);
16198 case VSX_BUILTIN_XVMULSP
:
16199 case VSX_BUILTIN_XVMULDP
:
16200 arg0
= gimple_call_arg (stmt
, 0);
16201 arg1
= gimple_call_arg (stmt
, 1);
16202 lhs
= gimple_call_lhs (stmt
);
16203 g
= gimple_build_assign (lhs
, MULT_EXPR
, arg0
, arg1
);
16204 gimple_set_location (g
, gimple_location (stmt
));
16205 gsi_replace (gsi
, g
, true);
16207 /* Even element flavors of vec_mul (signed). */
16208 case ALTIVEC_BUILTIN_VMULESB
:
16209 case ALTIVEC_BUILTIN_VMULESH
:
16210 case ALTIVEC_BUILTIN_VMULESW
:
16211 /* Even element flavors of vec_mul (unsigned). */
16212 case ALTIVEC_BUILTIN_VMULEUB
:
16213 case ALTIVEC_BUILTIN_VMULEUH
:
16214 case ALTIVEC_BUILTIN_VMULEUW
:
16215 arg0
= gimple_call_arg (stmt
, 0);
16216 arg1
= gimple_call_arg (stmt
, 1);
16217 lhs
= gimple_call_lhs (stmt
);
16218 g
= gimple_build_assign (lhs
, VEC_WIDEN_MULT_EVEN_EXPR
, arg0
, arg1
);
16219 gimple_set_location (g
, gimple_location (stmt
));
16220 gsi_replace (gsi
, g
, true);
16222 /* Odd element flavors of vec_mul (signed). */
16223 case ALTIVEC_BUILTIN_VMULOSB
:
16224 case ALTIVEC_BUILTIN_VMULOSH
:
16225 case ALTIVEC_BUILTIN_VMULOSW
:
16226 /* Odd element flavors of vec_mul (unsigned). */
16227 case ALTIVEC_BUILTIN_VMULOUB
:
16228 case ALTIVEC_BUILTIN_VMULOUH
:
16229 case ALTIVEC_BUILTIN_VMULOUW
:
16230 arg0
= gimple_call_arg (stmt
, 0);
16231 arg1
= gimple_call_arg (stmt
, 1);
16232 lhs
= gimple_call_lhs (stmt
);
16233 g
= gimple_build_assign (lhs
, VEC_WIDEN_MULT_ODD_EXPR
, arg0
, arg1
);
16234 gimple_set_location (g
, gimple_location (stmt
));
16235 gsi_replace (gsi
, g
, true);
16237 /* Flavors of vec_div (Integer). */
16238 case VSX_BUILTIN_DIV_V2DI
:
16239 case VSX_BUILTIN_UDIV_V2DI
:
16240 arg0
= gimple_call_arg (stmt
, 0);
16241 arg1
= gimple_call_arg (stmt
, 1);
16242 lhs
= gimple_call_lhs (stmt
);
16243 g
= gimple_build_assign (lhs
, TRUNC_DIV_EXPR
, arg0
, arg1
);
16244 gimple_set_location (g
, gimple_location (stmt
));
16245 gsi_replace (gsi
, g
, true);
16247 /* Flavors of vec_div (Float). */
16248 case VSX_BUILTIN_XVDIVSP
:
16249 case VSX_BUILTIN_XVDIVDP
:
16250 arg0
= gimple_call_arg (stmt
, 0);
16251 arg1
= gimple_call_arg (stmt
, 1);
16252 lhs
= gimple_call_lhs (stmt
);
16253 g
= gimple_build_assign (lhs
, RDIV_EXPR
, arg0
, arg1
);
16254 gimple_set_location (g
, gimple_location (stmt
));
16255 gsi_replace (gsi
, g
, true);
16257 /* Flavors of vec_and. */
16258 case ALTIVEC_BUILTIN_VAND
:
16259 arg0
= gimple_call_arg (stmt
, 0);
16260 arg1
= gimple_call_arg (stmt
, 1);
16261 lhs
= gimple_call_lhs (stmt
);
16262 g
= gimple_build_assign (lhs
, BIT_AND_EXPR
, arg0
, arg1
);
16263 gimple_set_location (g
, gimple_location (stmt
));
16264 gsi_replace (gsi
, g
, true);
16266 /* Flavors of vec_andc. */
16267 case ALTIVEC_BUILTIN_VANDC
:
16268 arg0
= gimple_call_arg (stmt
, 0);
16269 arg1
= gimple_call_arg (stmt
, 1);
16270 lhs
= gimple_call_lhs (stmt
);
16271 temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
16272 g
= gimple_build_assign (temp
, BIT_NOT_EXPR
, arg1
);
16273 gimple_set_location (g
, gimple_location (stmt
));
16274 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
16275 g
= gimple_build_assign (lhs
, BIT_AND_EXPR
, arg0
, temp
);
16276 gimple_set_location (g
, gimple_location (stmt
));
16277 gsi_replace (gsi
, g
, true);
16279 /* Flavors of vec_nand. */
16280 case P8V_BUILTIN_VEC_NAND
:
16281 case P8V_BUILTIN_NAND_V16QI
:
16282 case P8V_BUILTIN_NAND_V8HI
:
16283 case P8V_BUILTIN_NAND_V4SI
:
16284 case P8V_BUILTIN_NAND_V4SF
:
16285 case P8V_BUILTIN_NAND_V2DF
:
16286 case P8V_BUILTIN_NAND_V2DI
:
16287 arg0
= gimple_call_arg (stmt
, 0);
16288 arg1
= gimple_call_arg (stmt
, 1);
16289 lhs
= gimple_call_lhs (stmt
);
16290 temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
16291 g
= gimple_build_assign (temp
, BIT_AND_EXPR
, arg0
, arg1
);
16292 gimple_set_location (g
, gimple_location (stmt
));
16293 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
16294 g
= gimple_build_assign (lhs
, BIT_NOT_EXPR
, temp
);
16295 gimple_set_location (g
, gimple_location (stmt
));
16296 gsi_replace (gsi
, g
, true);
16298 /* Flavors of vec_or. */
16299 case ALTIVEC_BUILTIN_VOR
:
16300 arg0
= gimple_call_arg (stmt
, 0);
16301 arg1
= gimple_call_arg (stmt
, 1);
16302 lhs
= gimple_call_lhs (stmt
);
16303 g
= gimple_build_assign (lhs
, BIT_IOR_EXPR
, arg0
, arg1
);
16304 gimple_set_location (g
, gimple_location (stmt
));
16305 gsi_replace (gsi
, g
, true);
16307 /* flavors of vec_orc. */
16308 case P8V_BUILTIN_ORC_V16QI
:
16309 case P8V_BUILTIN_ORC_V8HI
:
16310 case P8V_BUILTIN_ORC_V4SI
:
16311 case P8V_BUILTIN_ORC_V4SF
:
16312 case P8V_BUILTIN_ORC_V2DF
:
16313 case P8V_BUILTIN_ORC_V2DI
:
16314 arg0
= gimple_call_arg (stmt
, 0);
16315 arg1
= gimple_call_arg (stmt
, 1);
16316 lhs
= gimple_call_lhs (stmt
);
16317 temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
16318 g
= gimple_build_assign (temp
, BIT_NOT_EXPR
, arg1
);
16319 gimple_set_location (g
, gimple_location (stmt
));
16320 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
16321 g
= gimple_build_assign (lhs
, BIT_IOR_EXPR
, arg0
, temp
);
16322 gimple_set_location (g
, gimple_location (stmt
));
16323 gsi_replace (gsi
, g
, true);
16325 /* Flavors of vec_xor. */
16326 case ALTIVEC_BUILTIN_VXOR
:
16327 arg0
= gimple_call_arg (stmt
, 0);
16328 arg1
= gimple_call_arg (stmt
, 1);
16329 lhs
= gimple_call_lhs (stmt
);
16330 g
= gimple_build_assign (lhs
, BIT_XOR_EXPR
, arg0
, arg1
);
16331 gimple_set_location (g
, gimple_location (stmt
));
16332 gsi_replace (gsi
, g
, true);
16334 /* Flavors of vec_nor. */
16335 case ALTIVEC_BUILTIN_VNOR
:
16336 arg0
= gimple_call_arg (stmt
, 0);
16337 arg1
= gimple_call_arg (stmt
, 1);
16338 lhs
= gimple_call_lhs (stmt
);
16339 temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
16340 g
= gimple_build_assign (temp
, BIT_IOR_EXPR
, arg0
, arg1
);
16341 gimple_set_location (g
, gimple_location (stmt
));
16342 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
16343 g
= gimple_build_assign (lhs
, BIT_NOT_EXPR
, temp
);
16344 gimple_set_location (g
, gimple_location (stmt
));
16345 gsi_replace (gsi
, g
, true);
16347 /* flavors of vec_abs. */
16348 case ALTIVEC_BUILTIN_ABS_V16QI
:
16349 case ALTIVEC_BUILTIN_ABS_V8HI
:
16350 case ALTIVEC_BUILTIN_ABS_V4SI
:
16351 case ALTIVEC_BUILTIN_ABS_V4SF
:
16352 case P8V_BUILTIN_ABS_V2DI
:
16353 case VSX_BUILTIN_XVABSDP
:
16354 arg0
= gimple_call_arg (stmt
, 0);
16355 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0
)))
16356 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0
))))
16358 lhs
= gimple_call_lhs (stmt
);
16359 g
= gimple_build_assign (lhs
, ABS_EXPR
, arg0
);
16360 gimple_set_location (g
, gimple_location (stmt
));
16361 gsi_replace (gsi
, g
, true);
16363 /* flavors of vec_min. */
16364 case VSX_BUILTIN_XVMINDP
:
16365 case P8V_BUILTIN_VMINSD
:
16366 case P8V_BUILTIN_VMINUD
:
16367 case ALTIVEC_BUILTIN_VMINSB
:
16368 case ALTIVEC_BUILTIN_VMINSH
:
16369 case ALTIVEC_BUILTIN_VMINSW
:
16370 case ALTIVEC_BUILTIN_VMINUB
:
16371 case ALTIVEC_BUILTIN_VMINUH
:
16372 case ALTIVEC_BUILTIN_VMINUW
:
16373 case ALTIVEC_BUILTIN_VMINFP
:
16374 arg0
= gimple_call_arg (stmt
, 0);
16375 arg1
= gimple_call_arg (stmt
, 1);
16376 lhs
= gimple_call_lhs (stmt
);
16377 g
= gimple_build_assign (lhs
, MIN_EXPR
, arg0
, arg1
);
16378 gimple_set_location (g
, gimple_location (stmt
));
16379 gsi_replace (gsi
, g
, true);
16381 /* flavors of vec_max. */
16382 case VSX_BUILTIN_XVMAXDP
:
16383 case P8V_BUILTIN_VMAXSD
:
16384 case P8V_BUILTIN_VMAXUD
:
16385 case ALTIVEC_BUILTIN_VMAXSB
:
16386 case ALTIVEC_BUILTIN_VMAXSH
:
16387 case ALTIVEC_BUILTIN_VMAXSW
:
16388 case ALTIVEC_BUILTIN_VMAXUB
:
16389 case ALTIVEC_BUILTIN_VMAXUH
:
16390 case ALTIVEC_BUILTIN_VMAXUW
:
16391 case ALTIVEC_BUILTIN_VMAXFP
:
16392 arg0
= gimple_call_arg (stmt
, 0);
16393 arg1
= gimple_call_arg (stmt
, 1);
16394 lhs
= gimple_call_lhs (stmt
);
16395 g
= gimple_build_assign (lhs
, MAX_EXPR
, arg0
, arg1
);
16396 gimple_set_location (g
, gimple_location (stmt
));
16397 gsi_replace (gsi
, g
, true);
16399 /* Flavors of vec_eqv. */
16400 case P8V_BUILTIN_EQV_V16QI
:
16401 case P8V_BUILTIN_EQV_V8HI
:
16402 case P8V_BUILTIN_EQV_V4SI
:
16403 case P8V_BUILTIN_EQV_V4SF
:
16404 case P8V_BUILTIN_EQV_V2DF
:
16405 case P8V_BUILTIN_EQV_V2DI
:
16406 arg0
= gimple_call_arg (stmt
, 0);
16407 arg1
= gimple_call_arg (stmt
, 1);
16408 lhs
= gimple_call_lhs (stmt
);
16409 temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
16410 g
= gimple_build_assign (temp
, BIT_XOR_EXPR
, arg0
, arg1
);
16411 gimple_set_location (g
, gimple_location (stmt
));
16412 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
16413 g
= gimple_build_assign (lhs
, BIT_NOT_EXPR
, temp
);
16414 gimple_set_location (g
, gimple_location (stmt
));
16415 gsi_replace (gsi
, g
, true);
16417 /* Flavors of vec_rotate_left. */
16418 case ALTIVEC_BUILTIN_VRLB
:
16419 case ALTIVEC_BUILTIN_VRLH
:
16420 case ALTIVEC_BUILTIN_VRLW
:
16421 case P8V_BUILTIN_VRLD
:
16422 arg0
= gimple_call_arg (stmt
, 0);
16423 arg1
= gimple_call_arg (stmt
, 1);
16424 lhs
= gimple_call_lhs (stmt
);
16425 g
= gimple_build_assign (lhs
, LROTATE_EXPR
, arg0
, arg1
);
16426 gimple_set_location (g
, gimple_location (stmt
));
16427 gsi_replace (gsi
, g
, true);
16429 /* Flavors of vector shift right algebraic.
16430 vec_sra{b,h,w} -> vsra{b,h,w}. */
16431 case ALTIVEC_BUILTIN_VSRAB
:
16432 case ALTIVEC_BUILTIN_VSRAH
:
16433 case ALTIVEC_BUILTIN_VSRAW
:
16434 case P8V_BUILTIN_VSRAD
:
16435 arg0
= gimple_call_arg (stmt
, 0);
16436 arg1
= gimple_call_arg (stmt
, 1);
16437 lhs
= gimple_call_lhs (stmt
);
16438 g
= gimple_build_assign (lhs
, RSHIFT_EXPR
, arg0
, arg1
);
16439 gimple_set_location (g
, gimple_location (stmt
));
16440 gsi_replace (gsi
, g
, true);
16442 /* Flavors of vector shift left.
16443 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
16444 case ALTIVEC_BUILTIN_VSLB
:
16445 case ALTIVEC_BUILTIN_VSLH
:
16446 case ALTIVEC_BUILTIN_VSLW
:
16447 case P8V_BUILTIN_VSLD
:
16448 arg0
= gimple_call_arg (stmt
, 0);
16449 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0
)))
16450 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0
))))
16452 arg1
= gimple_call_arg (stmt
, 1);
16453 lhs
= gimple_call_lhs (stmt
);
16454 g
= gimple_build_assign (lhs
, LSHIFT_EXPR
, arg0
, arg1
);
16455 gimple_set_location (g
, gimple_location (stmt
));
16456 gsi_replace (gsi
, g
, true);
16458 /* Flavors of vector shift right. */
16459 case ALTIVEC_BUILTIN_VSRB
:
16460 case ALTIVEC_BUILTIN_VSRH
:
16461 case ALTIVEC_BUILTIN_VSRW
:
16462 case P8V_BUILTIN_VSRD
:
16464 arg0
= gimple_call_arg (stmt
, 0);
16465 arg1
= gimple_call_arg (stmt
, 1);
16466 lhs
= gimple_call_lhs (stmt
);
16467 gimple_seq stmts
= NULL
;
16468 /* Convert arg0 to unsigned. */
16470 = gimple_build (&stmts
, VIEW_CONVERT_EXPR
,
16471 unsigned_type_for (TREE_TYPE (arg0
)), arg0
);
16473 = gimple_build (&stmts
, RSHIFT_EXPR
,
16474 TREE_TYPE (arg0_unsigned
), arg0_unsigned
, arg1
);
16475 /* Convert result back to the lhs type. */
16476 res
= gimple_build (&stmts
, VIEW_CONVERT_EXPR
, TREE_TYPE (lhs
), res
);
16477 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
16478 update_call_from_tree (gsi
, res
);
16481 /* Vector loads. */
16482 case ALTIVEC_BUILTIN_LVX_V16QI
:
16483 case ALTIVEC_BUILTIN_LVX_V8HI
:
16484 case ALTIVEC_BUILTIN_LVX_V4SI
:
16485 case ALTIVEC_BUILTIN_LVX_V4SF
:
16486 case ALTIVEC_BUILTIN_LVX_V2DI
:
16487 case ALTIVEC_BUILTIN_LVX_V2DF
:
16489 arg0
= gimple_call_arg (stmt
, 0); // offset
16490 arg1
= gimple_call_arg (stmt
, 1); // address
16491 /* Do not fold for -maltivec=be on LE targets. */
16492 if (VECTOR_ELT_ORDER_BIG
&& !BYTES_BIG_ENDIAN
)
16494 lhs
= gimple_call_lhs (stmt
);
16495 location_t loc
= gimple_location (stmt
);
16496 /* Since arg1 may be cast to a different type, just use ptr_type_node
16497 here instead of trying to enforce TBAA on pointer types. */
16498 tree arg1_type
= ptr_type_node
;
16499 tree lhs_type
= TREE_TYPE (lhs
);
16500 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
16501 the tree using the value from arg0. The resulting type will match
16502 the type of arg1. */
16503 gimple_seq stmts
= NULL
;
16504 tree temp_offset
= gimple_convert (&stmts
, loc
, sizetype
, arg0
);
16505 tree temp_addr
= gimple_build (&stmts
, loc
, POINTER_PLUS_EXPR
,
16506 arg1_type
, arg1
, temp_offset
);
16507 /* Mask off any lower bits from the address. */
16508 tree aligned_addr
= gimple_build (&stmts
, loc
, BIT_AND_EXPR
,
16509 arg1_type
, temp_addr
,
16510 build_int_cst (arg1_type
, -16));
16511 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
16512 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
16513 take an offset, but since we've already incorporated the offset
16514 above, here we just pass in a zero. */
16516 = gimple_build_assign (lhs
, build2 (MEM_REF
, lhs_type
, aligned_addr
,
16517 build_int_cst (arg1_type
, 0)));
16518 gimple_set_location (g
, loc
);
16519 gsi_replace (gsi
, g
, true);
16522 /* Vector stores. */
16523 case ALTIVEC_BUILTIN_STVX_V16QI
:
16524 case ALTIVEC_BUILTIN_STVX_V8HI
:
16525 case ALTIVEC_BUILTIN_STVX_V4SI
:
16526 case ALTIVEC_BUILTIN_STVX_V4SF
:
16527 case ALTIVEC_BUILTIN_STVX_V2DI
:
16528 case ALTIVEC_BUILTIN_STVX_V2DF
:
16530 /* Do not fold for -maltivec=be on LE targets. */
16531 if (VECTOR_ELT_ORDER_BIG
&& !BYTES_BIG_ENDIAN
)
16533 arg0
= gimple_call_arg (stmt
, 0); /* Value to be stored. */
16534 arg1
= gimple_call_arg (stmt
, 1); /* Offset. */
16535 tree arg2
= gimple_call_arg (stmt
, 2); /* Store-to address. */
16536 location_t loc
= gimple_location (stmt
);
16537 tree arg0_type
= TREE_TYPE (arg0
);
16538 /* Use ptr_type_node (no TBAA) for the arg2_type.
16539 FIXME: (Richard) "A proper fix would be to transition this type as
16540 seen from the frontend to GIMPLE, for example in a similar way we
16541 do for MEM_REFs by piggy-backing that on an extra argument, a
16542 constant zero pointer of the alias pointer type to use (which would
16543 also serve as a type indicator of the store itself). I'd use a
16544 target specific internal function for this (not sure if we can have
16545 those target specific, but I guess if it's folded away then that's
16546 fine) and get away with the overload set." */
16547 tree arg2_type
= ptr_type_node
;
16548 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
16549 the tree using the value from arg0. The resulting type will match
16550 the type of arg2. */
16551 gimple_seq stmts
= NULL
;
16552 tree temp_offset
= gimple_convert (&stmts
, loc
, sizetype
, arg1
);
16553 tree temp_addr
= gimple_build (&stmts
, loc
, POINTER_PLUS_EXPR
,
16554 arg2_type
, arg2
, temp_offset
);
16555 /* Mask off any lower bits from the address. */
16556 tree aligned_addr
= gimple_build (&stmts
, loc
, BIT_AND_EXPR
,
16557 arg2_type
, temp_addr
,
16558 build_int_cst (arg2_type
, -16));
16559 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
16560 /* The desired gimple result should be similar to:
16561 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
16563 = gimple_build_assign (build2 (MEM_REF
, arg0_type
, aligned_addr
,
16564 build_int_cst (arg2_type
, 0)), arg0
);
16565 gimple_set_location (g
, loc
);
16566 gsi_replace (gsi
, g
, true);
16570 /* Vector Fused multiply-add (fma). */
16571 case ALTIVEC_BUILTIN_VMADDFP
:
16572 case VSX_BUILTIN_XVMADDDP
:
16573 case ALTIVEC_BUILTIN_VMLADDUHM
:
16575 arg0
= gimple_call_arg (stmt
, 0);
16576 arg1
= gimple_call_arg (stmt
, 1);
16577 tree arg2
= gimple_call_arg (stmt
, 2);
16578 lhs
= gimple_call_lhs (stmt
);
16579 gimple
*g
= gimple_build_assign (lhs
, FMA_EXPR
, arg0
, arg1
, arg2
);
16580 gimple_set_location (g
, gimple_location (stmt
));
16581 gsi_replace (gsi
, g
, true);
16585 /* Vector compares; EQ, NE, GE, GT, LE. */
16586 case ALTIVEC_BUILTIN_VCMPEQUB
:
16587 case ALTIVEC_BUILTIN_VCMPEQUH
:
16588 case ALTIVEC_BUILTIN_VCMPEQUW
:
16589 case P8V_BUILTIN_VCMPEQUD
:
16590 fold_compare_helper (gsi
, EQ_EXPR
, stmt
);
16593 case P9V_BUILTIN_CMPNEB
:
16594 case P9V_BUILTIN_CMPNEH
:
16595 case P9V_BUILTIN_CMPNEW
:
16596 fold_compare_helper (gsi
, NE_EXPR
, stmt
);
16599 case VSX_BUILTIN_CMPGE_16QI
:
16600 case VSX_BUILTIN_CMPGE_U16QI
:
16601 case VSX_BUILTIN_CMPGE_8HI
:
16602 case VSX_BUILTIN_CMPGE_U8HI
:
16603 case VSX_BUILTIN_CMPGE_4SI
:
16604 case VSX_BUILTIN_CMPGE_U4SI
:
16605 case VSX_BUILTIN_CMPGE_2DI
:
16606 case VSX_BUILTIN_CMPGE_U2DI
:
16607 fold_compare_helper (gsi
, GE_EXPR
, stmt
);
16610 case ALTIVEC_BUILTIN_VCMPGTSB
:
16611 case ALTIVEC_BUILTIN_VCMPGTUB
:
16612 case ALTIVEC_BUILTIN_VCMPGTSH
:
16613 case ALTIVEC_BUILTIN_VCMPGTUH
:
16614 case ALTIVEC_BUILTIN_VCMPGTSW
:
16615 case ALTIVEC_BUILTIN_VCMPGTUW
:
16616 case P8V_BUILTIN_VCMPGTUD
:
16617 case P8V_BUILTIN_VCMPGTSD
:
16618 fold_compare_helper (gsi
, GT_EXPR
, stmt
);
16621 case VSX_BUILTIN_CMPLE_16QI
:
16622 case VSX_BUILTIN_CMPLE_U16QI
:
16623 case VSX_BUILTIN_CMPLE_8HI
:
16624 case VSX_BUILTIN_CMPLE_U8HI
:
16625 case VSX_BUILTIN_CMPLE_4SI
:
16626 case VSX_BUILTIN_CMPLE_U4SI
:
16627 case VSX_BUILTIN_CMPLE_2DI
:
16628 case VSX_BUILTIN_CMPLE_U2DI
:
16629 fold_compare_helper (gsi
, LE_EXPR
, stmt
);
16632 /* flavors of vec_splat_[us]{8,16,32}. */
16633 case ALTIVEC_BUILTIN_VSPLTISB
:
16634 case ALTIVEC_BUILTIN_VSPLTISH
:
16635 case ALTIVEC_BUILTIN_VSPLTISW
:
16637 arg0
= gimple_call_arg (stmt
, 0);
16638 lhs
= gimple_call_lhs (stmt
);
16639 /* Only fold the vec_splat_*() if arg0 is constant. */
16640 if (TREE_CODE (arg0
) != INTEGER_CST
)
16642 gimple_seq stmts
= NULL
;
16643 location_t loc
= gimple_location (stmt
);
16644 tree splat_value
= gimple_convert (&stmts
, loc
,
16645 TREE_TYPE (TREE_TYPE (lhs
)), arg0
);
16646 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
16647 tree splat_tree
= build_vector_from_val (TREE_TYPE (lhs
), splat_value
);
16648 g
= gimple_build_assign (lhs
, splat_tree
);
16649 gimple_set_location (g
, gimple_location (stmt
));
16650 gsi_replace (gsi
, g
, true);
16655 if (TARGET_DEBUG_BUILTIN
)
16656 fprintf (stderr
, "gimple builtin intrinsic not matched:%d %s %s\n",
16657 fn_code
, fn_name1
, fn_name2
);
16664 /* Expand an expression EXP that calls a built-in function,
16665 with result going to TARGET if that's convenient
16666 (and in mode MODE if that's convenient).
16667 SUBTARGET may be used as the target for computing one of EXP's operands.
16668 IGNORE is nonzero if the value is to be ignored. */
16671 rs6000_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
16672 machine_mode mode ATTRIBUTE_UNUSED
,
16673 int ignore ATTRIBUTE_UNUSED
)
16675 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
16676 enum rs6000_builtins fcode
16677 = (enum rs6000_builtins
)DECL_FUNCTION_CODE (fndecl
);
16678 size_t uns_fcode
= (size_t)fcode
;
16679 const struct builtin_description
*d
;
16683 HOST_WIDE_INT mask
= rs6000_builtin_info
[uns_fcode
].mask
;
16684 bool func_valid_p
= ((rs6000_builtin_mask
& mask
) == mask
);
16685 enum insn_code icode
= rs6000_builtin_info
[uns_fcode
].icode
;
16687 /* We have two different modes (KFmode, TFmode) that are the IEEE 128-bit
16688 floating point type, depending on whether long double is the IBM extended
16689 double (KFmode) or long double is IEEE 128-bit (TFmode). It is simpler if
16690 we only define one variant of the built-in function, and switch the code
16691 when defining it, rather than defining two built-ins and using the
16692 overload table in rs6000-c.c to switch between the two. If we don't have
16693 the proper assembler, don't do this switch because CODE_FOR_*kf* and
16694 CODE_FOR_*tf* will be CODE_FOR_nothing. */
16695 #ifdef HAVE_AS_POWER9
16696 if (FLOAT128_IEEE_P (TFmode
))
16702 case CODE_FOR_sqrtkf2_odd
: icode
= CODE_FOR_sqrttf2_odd
; break;
16703 case CODE_FOR_trunckfdf2_odd
: icode
= CODE_FOR_trunctfdf2_odd
; break;
16704 case CODE_FOR_addkf3_odd
: icode
= CODE_FOR_addtf3_odd
; break;
16705 case CODE_FOR_subkf3_odd
: icode
= CODE_FOR_subtf3_odd
; break;
16706 case CODE_FOR_mulkf3_odd
: icode
= CODE_FOR_multf3_odd
; break;
16707 case CODE_FOR_divkf3_odd
: icode
= CODE_FOR_divtf3_odd
; break;
16708 case CODE_FOR_fmakf4_odd
: icode
= CODE_FOR_fmatf4_odd
; break;
16709 case CODE_FOR_xsxexpqp_kf
: icode
= CODE_FOR_xsxexpqp_tf
; break;
16710 case CODE_FOR_xsxsigqp_kf
: icode
= CODE_FOR_xsxsigqp_tf
; break;
16711 case CODE_FOR_xststdcnegqp_kf
: icode
= CODE_FOR_xststdcnegqp_tf
; break;
16712 case CODE_FOR_xsiexpqp_kf
: icode
= CODE_FOR_xsiexpqp_tf
; break;
16713 case CODE_FOR_xsiexpqpf_kf
: icode
= CODE_FOR_xsiexpqpf_tf
; break;
16714 case CODE_FOR_xststdcqp_kf
: icode
= CODE_FOR_xststdcqp_tf
; break;
16718 if (TARGET_DEBUG_BUILTIN
)
16720 const char *name1
= rs6000_builtin_info
[uns_fcode
].name
;
16721 const char *name2
= (icode
!= CODE_FOR_nothing
)
16722 ? get_insn_name ((int) icode
)
16726 switch (rs6000_builtin_info
[uns_fcode
].attr
& RS6000_BTC_TYPE_MASK
)
16728 default: name3
= "unknown"; break;
16729 case RS6000_BTC_SPECIAL
: name3
= "special"; break;
16730 case RS6000_BTC_UNARY
: name3
= "unary"; break;
16731 case RS6000_BTC_BINARY
: name3
= "binary"; break;
16732 case RS6000_BTC_TERNARY
: name3
= "ternary"; break;
16733 case RS6000_BTC_PREDICATE
: name3
= "predicate"; break;
16734 case RS6000_BTC_ABS
: name3
= "abs"; break;
16735 case RS6000_BTC_DST
: name3
= "dst"; break;
16740 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16741 (name1
) ? name1
: "---", fcode
,
16742 (name2
) ? name2
: "---", (int) icode
,
16744 func_valid_p
? "" : ", not valid");
16749 rs6000_invalid_builtin (fcode
);
16751 /* Given it is invalid, just generate a normal call. */
16752 return expand_call (exp
, target
, ignore
);
16757 case RS6000_BUILTIN_RECIP
:
16758 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3
, exp
, target
);
16760 case RS6000_BUILTIN_RECIPF
:
16761 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3
, exp
, target
);
16763 case RS6000_BUILTIN_RSQRTF
:
16764 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2
, exp
, target
);
16766 case RS6000_BUILTIN_RSQRT
:
16767 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2
, exp
, target
);
16769 case POWER7_BUILTIN_BPERMD
:
16770 return rs6000_expand_binop_builtin (((TARGET_64BIT
)
16771 ? CODE_FOR_bpermd_di
16772 : CODE_FOR_bpermd_si
), exp
, target
);
16774 case RS6000_BUILTIN_GET_TB
:
16775 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase
,
16778 case RS6000_BUILTIN_MFTB
:
16779 return rs6000_expand_zeroop_builtin (((TARGET_64BIT
)
16780 ? CODE_FOR_rs6000_mftb_di
16781 : CODE_FOR_rs6000_mftb_si
),
16784 case RS6000_BUILTIN_MFFS
:
16785 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs
, target
);
16787 case RS6000_BUILTIN_MTFSF
:
16788 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf
, exp
);
16790 case RS6000_BUILTIN_CPU_INIT
:
16791 case RS6000_BUILTIN_CPU_IS
:
16792 case RS6000_BUILTIN_CPU_SUPPORTS
:
16793 return cpu_expand_builtin (fcode
, exp
, target
);
16795 case ALTIVEC_BUILTIN_MASK_FOR_LOAD
:
16796 case ALTIVEC_BUILTIN_MASK_FOR_STORE
:
16798 int icode2
= (BYTES_BIG_ENDIAN
? (int) CODE_FOR_altivec_lvsr_direct
16799 : (int) CODE_FOR_altivec_lvsl_direct
);
16800 machine_mode tmode
= insn_data
[icode2
].operand
[0].mode
;
16801 machine_mode mode
= insn_data
[icode2
].operand
[1].mode
;
16805 gcc_assert (TARGET_ALTIVEC
);
16807 arg
= CALL_EXPR_ARG (exp
, 0);
16808 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg
)));
16809 op
= expand_expr (arg
, NULL_RTX
, Pmode
, EXPAND_NORMAL
);
16810 addr
= memory_address (mode
, op
);
16811 if (fcode
== ALTIVEC_BUILTIN_MASK_FOR_STORE
)
16815 /* For the load case need to negate the address. */
16816 op
= gen_reg_rtx (GET_MODE (addr
));
16817 emit_insn (gen_rtx_SET (op
, gen_rtx_NEG (GET_MODE (addr
), addr
)));
16819 op
= gen_rtx_MEM (mode
, op
);
16822 || GET_MODE (target
) != tmode
16823 || ! (*insn_data
[icode2
].operand
[0].predicate
) (target
, tmode
))
16824 target
= gen_reg_rtx (tmode
);
16826 pat
= GEN_FCN (icode2
) (target
, op
);
16834 case ALTIVEC_BUILTIN_VCFUX
:
16835 case ALTIVEC_BUILTIN_VCFSX
:
16836 case ALTIVEC_BUILTIN_VCTUXS
:
16837 case ALTIVEC_BUILTIN_VCTSXS
:
16838 /* FIXME: There's got to be a nicer way to handle this case than
16839 constructing a new CALL_EXPR. */
16840 if (call_expr_nargs (exp
) == 1)
16842 exp
= build_call_nary (TREE_TYPE (exp
), CALL_EXPR_FN (exp
),
16843 2, CALL_EXPR_ARG (exp
, 0), integer_zero_node
);
16851 if (TARGET_ALTIVEC
)
16853 ret
= altivec_expand_builtin (exp
, target
, &success
);
16858 if (TARGET_PAIRED_FLOAT
)
16860 ret
= paired_expand_builtin (exp
, target
, &success
);
16867 ret
= htm_expand_builtin (exp
, target
, &success
);
16873 unsigned attr
= rs6000_builtin_info
[uns_fcode
].attr
& RS6000_BTC_TYPE_MASK
;
16874 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16875 gcc_assert (attr
== RS6000_BTC_UNARY
16876 || attr
== RS6000_BTC_BINARY
16877 || attr
== RS6000_BTC_TERNARY
16878 || attr
== RS6000_BTC_SPECIAL
);
16880 /* Handle simple unary operations. */
16882 for (i
= 0; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
16883 if (d
->code
== fcode
)
16884 return rs6000_expand_unop_builtin (icode
, exp
, target
);
16886 /* Handle simple binary operations. */
16888 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
16889 if (d
->code
== fcode
)
16890 return rs6000_expand_binop_builtin (icode
, exp
, target
);
16892 /* Handle simple ternary operations. */
16894 for (i
= 0; i
< ARRAY_SIZE (bdesc_3arg
); i
++, d
++)
16895 if (d
->code
== fcode
)
16896 return rs6000_expand_ternop_builtin (icode
, exp
, target
);
16898 /* Handle simple no-argument operations. */
16900 for (i
= 0; i
< ARRAY_SIZE (bdesc_0arg
); i
++, d
++)
16901 if (d
->code
== fcode
)
16902 return rs6000_expand_zeroop_builtin (icode
, target
);
16904 gcc_unreachable ();
16907 /* Create a builtin vector type with a name. Taking care not to give
16908 the canonical type a name. */
16911 rs6000_vector_type (const char *name
, tree elt_type
, unsigned num_elts
)
16913 tree result
= build_vector_type (elt_type
, num_elts
);
16915 /* Copy so we don't give the canonical type a name. */
16916 result
= build_variant_type_copy (result
);
16918 add_builtin_type (name
, result
);
16924 rs6000_init_builtins (void)
16930 if (TARGET_DEBUG_BUILTIN
)
16931 fprintf (stderr
, "rs6000_init_builtins%s%s%s\n",
16932 (TARGET_PAIRED_FLOAT
) ? ", paired" : "",
16933 (TARGET_ALTIVEC
) ? ", altivec" : "",
16934 (TARGET_VSX
) ? ", vsx" : "");
16936 V2SI_type_node
= build_vector_type (intSI_type_node
, 2);
16937 V2SF_type_node
= build_vector_type (float_type_node
, 2);
16938 V2DI_type_node
= rs6000_vector_type (TARGET_POWERPC64
? "__vector long"
16939 : "__vector long long",
16940 intDI_type_node
, 2);
16941 V2DF_type_node
= rs6000_vector_type ("__vector double", double_type_node
, 2);
16942 V4SI_type_node
= rs6000_vector_type ("__vector signed int",
16943 intSI_type_node
, 4);
16944 V4SF_type_node
= rs6000_vector_type ("__vector float", float_type_node
, 4);
16945 V8HI_type_node
= rs6000_vector_type ("__vector signed short",
16946 intHI_type_node
, 8);
16947 V16QI_type_node
= rs6000_vector_type ("__vector signed char",
16948 intQI_type_node
, 16);
16950 unsigned_V16QI_type_node
= rs6000_vector_type ("__vector unsigned char",
16951 unsigned_intQI_type_node
, 16);
16952 unsigned_V8HI_type_node
= rs6000_vector_type ("__vector unsigned short",
16953 unsigned_intHI_type_node
, 8);
16954 unsigned_V4SI_type_node
= rs6000_vector_type ("__vector unsigned int",
16955 unsigned_intSI_type_node
, 4);
16956 unsigned_V2DI_type_node
= rs6000_vector_type (TARGET_POWERPC64
16957 ? "__vector unsigned long"
16958 : "__vector unsigned long long",
16959 unsigned_intDI_type_node
, 2);
16961 opaque_V2SF_type_node
= build_opaque_vector_type (float_type_node
, 2);
16962 opaque_V2SI_type_node
= build_opaque_vector_type (intSI_type_node
, 2);
16963 opaque_p_V2SI_type_node
= build_pointer_type (opaque_V2SI_type_node
);
16964 opaque_V4SI_type_node
= build_opaque_vector_type (intSI_type_node
, 4);
16966 const_str_type_node
16967 = build_pointer_type (build_qualified_type (char_type_node
,
16970 /* We use V1TI mode as a special container to hold __int128_t items that
16971 must live in VSX registers. */
16972 if (intTI_type_node
)
16974 V1TI_type_node
= rs6000_vector_type ("__vector __int128",
16975 intTI_type_node
, 1);
16976 unsigned_V1TI_type_node
16977 = rs6000_vector_type ("__vector unsigned __int128",
16978 unsigned_intTI_type_node
, 1);
16981 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16982 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16983 'vector unsigned short'. */
16985 bool_char_type_node
= build_distinct_type_copy (unsigned_intQI_type_node
);
16986 bool_short_type_node
= build_distinct_type_copy (unsigned_intHI_type_node
);
16987 bool_int_type_node
= build_distinct_type_copy (unsigned_intSI_type_node
);
16988 bool_long_type_node
= build_distinct_type_copy (unsigned_intDI_type_node
);
16989 pixel_type_node
= build_distinct_type_copy (unsigned_intHI_type_node
);
16991 long_integer_type_internal_node
= long_integer_type_node
;
16992 long_unsigned_type_internal_node
= long_unsigned_type_node
;
16993 long_long_integer_type_internal_node
= long_long_integer_type_node
;
16994 long_long_unsigned_type_internal_node
= long_long_unsigned_type_node
;
16995 intQI_type_internal_node
= intQI_type_node
;
16996 uintQI_type_internal_node
= unsigned_intQI_type_node
;
16997 intHI_type_internal_node
= intHI_type_node
;
16998 uintHI_type_internal_node
= unsigned_intHI_type_node
;
16999 intSI_type_internal_node
= intSI_type_node
;
17000 uintSI_type_internal_node
= unsigned_intSI_type_node
;
17001 intDI_type_internal_node
= intDI_type_node
;
17002 uintDI_type_internal_node
= unsigned_intDI_type_node
;
17003 intTI_type_internal_node
= intTI_type_node
;
17004 uintTI_type_internal_node
= unsigned_intTI_type_node
;
17005 float_type_internal_node
= float_type_node
;
17006 double_type_internal_node
= double_type_node
;
17007 long_double_type_internal_node
= long_double_type_node
;
17008 dfloat64_type_internal_node
= dfloat64_type_node
;
17009 dfloat128_type_internal_node
= dfloat128_type_node
;
17010 void_type_internal_node
= void_type_node
;
17012 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
17013 IFmode is the IBM extended 128-bit format that is a pair of doubles.
17014 TFmode will be either IEEE 128-bit floating point or the IBM double-double
17015 format that uses a pair of doubles, depending on the switches and
17018 If we don't support for either 128-bit IBM double double or IEEE 128-bit
17019 floating point, we need make sure the type is non-zero or else self-test
17020 fails during bootstrap.
17022 We don't register a built-in type for __ibm128 if the type is the same as
17023 long double. Instead we add a #define for __ibm128 in
17024 rs6000_cpu_cpp_builtins to long double.
17026 For IEEE 128-bit floating point, always create the type __ieee128. If the
17027 user used -mfloat128, rs6000-c.c will create a define from __float128 to
17029 if (TARGET_LONG_DOUBLE_128
&& FLOAT128_IEEE_P (TFmode
))
17031 ibm128_float_type_node
= make_node (REAL_TYPE
);
17032 TYPE_PRECISION (ibm128_float_type_node
) = 128;
17033 SET_TYPE_MODE (ibm128_float_type_node
, IFmode
);
17034 layout_type (ibm128_float_type_node
);
17036 lang_hooks
.types
.register_builtin_type (ibm128_float_type_node
,
17040 ibm128_float_type_node
= long_double_type_node
;
17042 if (TARGET_FLOAT128_TYPE
)
17044 ieee128_float_type_node
= float128_type_node
;
17045 lang_hooks
.types
.register_builtin_type (ieee128_float_type_node
,
17050 ieee128_float_type_node
= long_double_type_node
;
17052 /* Initialize the modes for builtin_function_type, mapping a machine mode to
17054 builtin_mode_to_type
[QImode
][0] = integer_type_node
;
17055 builtin_mode_to_type
[HImode
][0] = integer_type_node
;
17056 builtin_mode_to_type
[SImode
][0] = intSI_type_node
;
17057 builtin_mode_to_type
[SImode
][1] = unsigned_intSI_type_node
;
17058 builtin_mode_to_type
[DImode
][0] = intDI_type_node
;
17059 builtin_mode_to_type
[DImode
][1] = unsigned_intDI_type_node
;
17060 builtin_mode_to_type
[TImode
][0] = intTI_type_node
;
17061 builtin_mode_to_type
[TImode
][1] = unsigned_intTI_type_node
;
17062 builtin_mode_to_type
[SFmode
][0] = float_type_node
;
17063 builtin_mode_to_type
[DFmode
][0] = double_type_node
;
17064 builtin_mode_to_type
[IFmode
][0] = ibm128_float_type_node
;
17065 builtin_mode_to_type
[KFmode
][0] = ieee128_float_type_node
;
17066 builtin_mode_to_type
[TFmode
][0] = long_double_type_node
;
17067 builtin_mode_to_type
[DDmode
][0] = dfloat64_type_node
;
17068 builtin_mode_to_type
[TDmode
][0] = dfloat128_type_node
;
17069 builtin_mode_to_type
[V1TImode
][0] = V1TI_type_node
;
17070 builtin_mode_to_type
[V1TImode
][1] = unsigned_V1TI_type_node
;
17071 builtin_mode_to_type
[V2SImode
][0] = V2SI_type_node
;
17072 builtin_mode_to_type
[V2SFmode
][0] = V2SF_type_node
;
17073 builtin_mode_to_type
[V2DImode
][0] = V2DI_type_node
;
17074 builtin_mode_to_type
[V2DImode
][1] = unsigned_V2DI_type_node
;
17075 builtin_mode_to_type
[V2DFmode
][0] = V2DF_type_node
;
17076 builtin_mode_to_type
[V4SImode
][0] = V4SI_type_node
;
17077 builtin_mode_to_type
[V4SImode
][1] = unsigned_V4SI_type_node
;
17078 builtin_mode_to_type
[V4SFmode
][0] = V4SF_type_node
;
17079 builtin_mode_to_type
[V8HImode
][0] = V8HI_type_node
;
17080 builtin_mode_to_type
[V8HImode
][1] = unsigned_V8HI_type_node
;
17081 builtin_mode_to_type
[V16QImode
][0] = V16QI_type_node
;
17082 builtin_mode_to_type
[V16QImode
][1] = unsigned_V16QI_type_node
;
17084 tdecl
= add_builtin_type ("__bool char", bool_char_type_node
);
17085 TYPE_NAME (bool_char_type_node
) = tdecl
;
17087 tdecl
= add_builtin_type ("__bool short", bool_short_type_node
);
17088 TYPE_NAME (bool_short_type_node
) = tdecl
;
17090 tdecl
= add_builtin_type ("__bool int", bool_int_type_node
);
17091 TYPE_NAME (bool_int_type_node
) = tdecl
;
17093 tdecl
= add_builtin_type ("__pixel", pixel_type_node
);
17094 TYPE_NAME (pixel_type_node
) = tdecl
;
17096 bool_V16QI_type_node
= rs6000_vector_type ("__vector __bool char",
17097 bool_char_type_node
, 16);
17098 bool_V8HI_type_node
= rs6000_vector_type ("__vector __bool short",
17099 bool_short_type_node
, 8);
17100 bool_V4SI_type_node
= rs6000_vector_type ("__vector __bool int",
17101 bool_int_type_node
, 4);
17102 bool_V2DI_type_node
= rs6000_vector_type (TARGET_POWERPC64
17103 ? "__vector __bool long"
17104 : "__vector __bool long long",
17105 bool_long_type_node
, 2);
17106 pixel_V8HI_type_node
= rs6000_vector_type ("__vector __pixel",
17107 pixel_type_node
, 8);
17109 /* Paired builtins are only available if you build a compiler with the
17110 appropriate options, so only create those builtins with the appropriate
17111 compiler option. Create Altivec and VSX builtins on machines with at
17112 least the general purpose extensions (970 and newer) to allow the use of
17113 the target attribute. */
17114 if (TARGET_PAIRED_FLOAT
)
17115 paired_init_builtins ();
17116 if (TARGET_EXTRA_BUILTINS
)
17117 altivec_init_builtins ();
17119 htm_init_builtins ();
17121 if (TARGET_EXTRA_BUILTINS
|| TARGET_PAIRED_FLOAT
)
17122 rs6000_common_init_builtins ();
17124 ftype
= builtin_function_type (DFmode
, DFmode
, DFmode
, VOIDmode
,
17125 RS6000_BUILTIN_RECIP
, "__builtin_recipdiv");
17126 def_builtin ("__builtin_recipdiv", ftype
, RS6000_BUILTIN_RECIP
);
17128 ftype
= builtin_function_type (SFmode
, SFmode
, SFmode
, VOIDmode
,
17129 RS6000_BUILTIN_RECIPF
, "__builtin_recipdivf");
17130 def_builtin ("__builtin_recipdivf", ftype
, RS6000_BUILTIN_RECIPF
);
17132 ftype
= builtin_function_type (DFmode
, DFmode
, VOIDmode
, VOIDmode
,
17133 RS6000_BUILTIN_RSQRT
, "__builtin_rsqrt");
17134 def_builtin ("__builtin_rsqrt", ftype
, RS6000_BUILTIN_RSQRT
);
17136 ftype
= builtin_function_type (SFmode
, SFmode
, VOIDmode
, VOIDmode
,
17137 RS6000_BUILTIN_RSQRTF
, "__builtin_rsqrtf");
17138 def_builtin ("__builtin_rsqrtf", ftype
, RS6000_BUILTIN_RSQRTF
);
17140 mode
= (TARGET_64BIT
) ? DImode
: SImode
;
17141 ftype
= builtin_function_type (mode
, mode
, mode
, VOIDmode
,
17142 POWER7_BUILTIN_BPERMD
, "__builtin_bpermd");
17143 def_builtin ("__builtin_bpermd", ftype
, POWER7_BUILTIN_BPERMD
);
17145 ftype
= build_function_type_list (unsigned_intDI_type_node
,
17147 def_builtin ("__builtin_ppc_get_timebase", ftype
, RS6000_BUILTIN_GET_TB
);
17150 ftype
= build_function_type_list (unsigned_intDI_type_node
,
17153 ftype
= build_function_type_list (unsigned_intSI_type_node
,
17155 def_builtin ("__builtin_ppc_mftb", ftype
, RS6000_BUILTIN_MFTB
);
17157 ftype
= build_function_type_list (double_type_node
, NULL_TREE
);
17158 def_builtin ("__builtin_mffs", ftype
, RS6000_BUILTIN_MFFS
);
17160 ftype
= build_function_type_list (void_type_node
,
17161 intSI_type_node
, double_type_node
,
17163 def_builtin ("__builtin_mtfsf", ftype
, RS6000_BUILTIN_MTFSF
);
17165 ftype
= build_function_type_list (void_type_node
, NULL_TREE
);
17166 def_builtin ("__builtin_cpu_init", ftype
, RS6000_BUILTIN_CPU_INIT
);
17168 ftype
= build_function_type_list (bool_int_type_node
, const_ptr_type_node
,
17170 def_builtin ("__builtin_cpu_is", ftype
, RS6000_BUILTIN_CPU_IS
);
17171 def_builtin ("__builtin_cpu_supports", ftype
, RS6000_BUILTIN_CPU_SUPPORTS
);
17173 /* AIX libm provides clog as __clog. */
17174 if (TARGET_XCOFF
&&
17175 (tdecl
= builtin_decl_explicit (BUILT_IN_CLOG
)) != NULL_TREE
)
17176 set_user_assembler_name (tdecl
, "__clog");
17178 #ifdef SUBTARGET_INIT_BUILTINS
17179 SUBTARGET_INIT_BUILTINS
;
17183 /* Returns the rs6000 builtin decl for CODE. */
17186 rs6000_builtin_decl (unsigned code
, bool initialize_p ATTRIBUTE_UNUSED
)
17188 HOST_WIDE_INT fnmask
;
17190 if (code
>= RS6000_BUILTIN_COUNT
)
17191 return error_mark_node
;
17193 fnmask
= rs6000_builtin_info
[code
].mask
;
17194 if ((fnmask
& rs6000_builtin_mask
) != fnmask
)
17196 rs6000_invalid_builtin ((enum rs6000_builtins
)code
);
17197 return error_mark_node
;
17200 return rs6000_builtin_decls
[code
];
17204 paired_init_builtins (void)
17206 const struct builtin_description
*d
;
17208 HOST_WIDE_INT builtin_mask
= rs6000_builtin_mask
;
17210 tree int_ftype_int_v2sf_v2sf
17211 = build_function_type_list (integer_type_node
,
17216 tree pcfloat_type_node
=
17217 build_pointer_type (build_qualified_type
17218 (float_type_node
, TYPE_QUAL_CONST
));
17220 tree v2sf_ftype_long_pcfloat
= build_function_type_list (V2SF_type_node
,
17221 long_integer_type_node
,
17224 tree void_ftype_v2sf_long_pcfloat
=
17225 build_function_type_list (void_type_node
,
17227 long_integer_type_node
,
17232 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat
,
17233 PAIRED_BUILTIN_LX
);
17236 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat
,
17237 PAIRED_BUILTIN_STX
);
17240 d
= bdesc_paired_preds
;
17241 for (i
= 0; i
< ARRAY_SIZE (bdesc_paired_preds
); ++i
, d
++)
17244 HOST_WIDE_INT mask
= d
->mask
;
17246 if ((mask
& builtin_mask
) != mask
)
17248 if (TARGET_DEBUG_BUILTIN
)
17249 fprintf (stderr
, "paired_init_builtins, skip predicate %s\n",
17254 /* Cannot define builtin if the instruction is disabled. */
17255 gcc_assert (d
->icode
!= CODE_FOR_nothing
);
17257 if (TARGET_DEBUG_BUILTIN
)
17258 fprintf (stderr
, "paired pred #%d, insn = %s [%d], mode = %s\n",
17259 (int)i
, get_insn_name (d
->icode
), (int)d
->icode
,
17260 GET_MODE_NAME (insn_data
[d
->icode
].operand
[1].mode
));
17262 switch (insn_data
[d
->icode
].operand
[1].mode
)
17265 type
= int_ftype_int_v2sf_v2sf
;
17268 gcc_unreachable ();
17271 def_builtin (d
->name
, type
, d
->code
);
17276 altivec_init_builtins (void)
17278 const struct builtin_description
*d
;
17282 HOST_WIDE_INT builtin_mask
= rs6000_builtin_mask
;
17284 tree pvoid_type_node
= build_pointer_type (void_type_node
);
17286 tree pcvoid_type_node
17287 = build_pointer_type (build_qualified_type (void_type_node
,
17290 tree int_ftype_opaque
17291 = build_function_type_list (integer_type_node
,
17292 opaque_V4SI_type_node
, NULL_TREE
);
17293 tree opaque_ftype_opaque
17294 = build_function_type_list (integer_type_node
, NULL_TREE
);
17295 tree opaque_ftype_opaque_int
17296 = build_function_type_list (opaque_V4SI_type_node
,
17297 opaque_V4SI_type_node
, integer_type_node
, NULL_TREE
);
17298 tree opaque_ftype_opaque_opaque_int
17299 = build_function_type_list (opaque_V4SI_type_node
,
17300 opaque_V4SI_type_node
, opaque_V4SI_type_node
,
17301 integer_type_node
, NULL_TREE
);
17302 tree opaque_ftype_opaque_opaque_opaque
17303 = build_function_type_list (opaque_V4SI_type_node
,
17304 opaque_V4SI_type_node
, opaque_V4SI_type_node
,
17305 opaque_V4SI_type_node
, NULL_TREE
);
17306 tree opaque_ftype_opaque_opaque
17307 = build_function_type_list (opaque_V4SI_type_node
,
17308 opaque_V4SI_type_node
, opaque_V4SI_type_node
,
17310 tree int_ftype_int_opaque_opaque
17311 = build_function_type_list (integer_type_node
,
17312 integer_type_node
, opaque_V4SI_type_node
,
17313 opaque_V4SI_type_node
, NULL_TREE
);
17314 tree int_ftype_int_v4si_v4si
17315 = build_function_type_list (integer_type_node
,
17316 integer_type_node
, V4SI_type_node
,
17317 V4SI_type_node
, NULL_TREE
);
17318 tree int_ftype_int_v2di_v2di
17319 = build_function_type_list (integer_type_node
,
17320 integer_type_node
, V2DI_type_node
,
17321 V2DI_type_node
, NULL_TREE
);
17322 tree void_ftype_v4si
17323 = build_function_type_list (void_type_node
, V4SI_type_node
, NULL_TREE
);
17324 tree v8hi_ftype_void
17325 = build_function_type_list (V8HI_type_node
, NULL_TREE
);
17326 tree void_ftype_void
17327 = build_function_type_list (void_type_node
, NULL_TREE
);
17328 tree void_ftype_int
17329 = build_function_type_list (void_type_node
, integer_type_node
, NULL_TREE
);
17331 tree opaque_ftype_long_pcvoid
17332 = build_function_type_list (opaque_V4SI_type_node
,
17333 long_integer_type_node
, pcvoid_type_node
,
17335 tree v16qi_ftype_long_pcvoid
17336 = build_function_type_list (V16QI_type_node
,
17337 long_integer_type_node
, pcvoid_type_node
,
17339 tree v8hi_ftype_long_pcvoid
17340 = build_function_type_list (V8HI_type_node
,
17341 long_integer_type_node
, pcvoid_type_node
,
17343 tree v4si_ftype_long_pcvoid
17344 = build_function_type_list (V4SI_type_node
,
17345 long_integer_type_node
, pcvoid_type_node
,
17347 tree v4sf_ftype_long_pcvoid
17348 = build_function_type_list (V4SF_type_node
,
17349 long_integer_type_node
, pcvoid_type_node
,
17351 tree v2df_ftype_long_pcvoid
17352 = build_function_type_list (V2DF_type_node
,
17353 long_integer_type_node
, pcvoid_type_node
,
17355 tree v2di_ftype_long_pcvoid
17356 = build_function_type_list (V2DI_type_node
,
17357 long_integer_type_node
, pcvoid_type_node
,
17360 tree void_ftype_opaque_long_pvoid
17361 = build_function_type_list (void_type_node
,
17362 opaque_V4SI_type_node
, long_integer_type_node
,
17363 pvoid_type_node
, NULL_TREE
);
17364 tree void_ftype_v4si_long_pvoid
17365 = build_function_type_list (void_type_node
,
17366 V4SI_type_node
, long_integer_type_node
,
17367 pvoid_type_node
, NULL_TREE
);
17368 tree void_ftype_v16qi_long_pvoid
17369 = build_function_type_list (void_type_node
,
17370 V16QI_type_node
, long_integer_type_node
,
17371 pvoid_type_node
, NULL_TREE
);
17373 tree void_ftype_v16qi_pvoid_long
17374 = build_function_type_list (void_type_node
,
17375 V16QI_type_node
, pvoid_type_node
,
17376 long_integer_type_node
, NULL_TREE
);
17378 tree void_ftype_v8hi_long_pvoid
17379 = build_function_type_list (void_type_node
,
17380 V8HI_type_node
, long_integer_type_node
,
17381 pvoid_type_node
, NULL_TREE
);
17382 tree void_ftype_v4sf_long_pvoid
17383 = build_function_type_list (void_type_node
,
17384 V4SF_type_node
, long_integer_type_node
,
17385 pvoid_type_node
, NULL_TREE
);
17386 tree void_ftype_v2df_long_pvoid
17387 = build_function_type_list (void_type_node
,
17388 V2DF_type_node
, long_integer_type_node
,
17389 pvoid_type_node
, NULL_TREE
);
17390 tree void_ftype_v2di_long_pvoid
17391 = build_function_type_list (void_type_node
,
17392 V2DI_type_node
, long_integer_type_node
,
17393 pvoid_type_node
, NULL_TREE
);
17394 tree int_ftype_int_v8hi_v8hi
17395 = build_function_type_list (integer_type_node
,
17396 integer_type_node
, V8HI_type_node
,
17397 V8HI_type_node
, NULL_TREE
);
17398 tree int_ftype_int_v16qi_v16qi
17399 = build_function_type_list (integer_type_node
,
17400 integer_type_node
, V16QI_type_node
,
17401 V16QI_type_node
, NULL_TREE
);
17402 tree int_ftype_int_v4sf_v4sf
17403 = build_function_type_list (integer_type_node
,
17404 integer_type_node
, V4SF_type_node
,
17405 V4SF_type_node
, NULL_TREE
);
17406 tree int_ftype_int_v2df_v2df
17407 = build_function_type_list (integer_type_node
,
17408 integer_type_node
, V2DF_type_node
,
17409 V2DF_type_node
, NULL_TREE
);
17410 tree v2di_ftype_v2di
17411 = build_function_type_list (V2DI_type_node
, V2DI_type_node
, NULL_TREE
);
17412 tree v4si_ftype_v4si
17413 = build_function_type_list (V4SI_type_node
, V4SI_type_node
, NULL_TREE
);
17414 tree v8hi_ftype_v8hi
17415 = build_function_type_list (V8HI_type_node
, V8HI_type_node
, NULL_TREE
);
17416 tree v16qi_ftype_v16qi
17417 = build_function_type_list (V16QI_type_node
, V16QI_type_node
, NULL_TREE
);
17418 tree v4sf_ftype_v4sf
17419 = build_function_type_list (V4SF_type_node
, V4SF_type_node
, NULL_TREE
);
17420 tree v2df_ftype_v2df
17421 = build_function_type_list (V2DF_type_node
, V2DF_type_node
, NULL_TREE
);
17422 tree void_ftype_pcvoid_int_int
17423 = build_function_type_list (void_type_node
,
17424 pcvoid_type_node
, integer_type_node
,
17425 integer_type_node
, NULL_TREE
);
17427 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si
, ALTIVEC_BUILTIN_MTVSCR
);
17428 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void
, ALTIVEC_BUILTIN_MFVSCR
);
17429 def_builtin ("__builtin_altivec_dssall", void_ftype_void
, ALTIVEC_BUILTIN_DSSALL
);
17430 def_builtin ("__builtin_altivec_dss", void_ftype_int
, ALTIVEC_BUILTIN_DSS
);
17431 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVSL
);
17432 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVSR
);
17433 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEBX
);
17434 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEHX
);
17435 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEWX
);
17436 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVXL
);
17437 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid
,
17438 ALTIVEC_BUILTIN_LVXL_V2DF
);
17439 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid
,
17440 ALTIVEC_BUILTIN_LVXL_V2DI
);
17441 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid
,
17442 ALTIVEC_BUILTIN_LVXL_V4SF
);
17443 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid
,
17444 ALTIVEC_BUILTIN_LVXL_V4SI
);
17445 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid
,
17446 ALTIVEC_BUILTIN_LVXL_V8HI
);
17447 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid
,
17448 ALTIVEC_BUILTIN_LVXL_V16QI
);
17449 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVX
);
17450 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid
,
17451 ALTIVEC_BUILTIN_LVX_V2DF
);
17452 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid
,
17453 ALTIVEC_BUILTIN_LVX_V2DI
);
17454 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid
,
17455 ALTIVEC_BUILTIN_LVX_V4SF
);
17456 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid
,
17457 ALTIVEC_BUILTIN_LVX_V4SI
);
17458 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid
,
17459 ALTIVEC_BUILTIN_LVX_V8HI
);
17460 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid
,
17461 ALTIVEC_BUILTIN_LVX_V16QI
);
17462 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVX
);
17463 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid
,
17464 ALTIVEC_BUILTIN_STVX_V2DF
);
17465 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid
,
17466 ALTIVEC_BUILTIN_STVX_V2DI
);
17467 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid
,
17468 ALTIVEC_BUILTIN_STVX_V4SF
);
17469 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid
,
17470 ALTIVEC_BUILTIN_STVX_V4SI
);
17471 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid
,
17472 ALTIVEC_BUILTIN_STVX_V8HI
);
17473 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid
,
17474 ALTIVEC_BUILTIN_STVX_V16QI
);
17475 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVEWX
);
17476 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVXL
);
17477 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid
,
17478 ALTIVEC_BUILTIN_STVXL_V2DF
);
17479 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid
,
17480 ALTIVEC_BUILTIN_STVXL_V2DI
);
17481 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid
,
17482 ALTIVEC_BUILTIN_STVXL_V4SF
);
17483 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid
,
17484 ALTIVEC_BUILTIN_STVXL_V4SI
);
17485 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid
,
17486 ALTIVEC_BUILTIN_STVXL_V8HI
);
17487 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid
,
17488 ALTIVEC_BUILTIN_STVXL_V16QI
);
17489 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVEBX
);
17490 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid
, ALTIVEC_BUILTIN_STVEHX
);
17491 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LD
);
17492 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LDE
);
17493 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LDL
);
17494 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVSL
);
17495 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVSR
);
17496 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEBX
);
17497 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEHX
);
17498 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEWX
);
17499 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_ST
);
17500 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STE
);
17501 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STL
);
17502 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEWX
);
17503 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEBX
);
17504 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEHX
);
17506 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid
,
17507 VSX_BUILTIN_LXVD2X_V2DF
);
17508 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid
,
17509 VSX_BUILTIN_LXVD2X_V2DI
);
17510 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid
,
17511 VSX_BUILTIN_LXVW4X_V4SF
);
17512 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid
,
17513 VSX_BUILTIN_LXVW4X_V4SI
);
17514 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid
,
17515 VSX_BUILTIN_LXVW4X_V8HI
);
17516 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid
,
17517 VSX_BUILTIN_LXVW4X_V16QI
);
17518 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid
,
17519 VSX_BUILTIN_STXVD2X_V2DF
);
17520 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid
,
17521 VSX_BUILTIN_STXVD2X_V2DI
);
17522 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid
,
17523 VSX_BUILTIN_STXVW4X_V4SF
);
17524 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid
,
17525 VSX_BUILTIN_STXVW4X_V4SI
);
17526 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid
,
17527 VSX_BUILTIN_STXVW4X_V8HI
);
17528 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid
,
17529 VSX_BUILTIN_STXVW4X_V16QI
);
17531 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid
,
17532 VSX_BUILTIN_LD_ELEMREV_V2DF
);
17533 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid
,
17534 VSX_BUILTIN_LD_ELEMREV_V2DI
);
17535 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid
,
17536 VSX_BUILTIN_LD_ELEMREV_V4SF
);
17537 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid
,
17538 VSX_BUILTIN_LD_ELEMREV_V4SI
);
17539 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid
,
17540 VSX_BUILTIN_LD_ELEMREV_V8HI
);
17541 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid
,
17542 VSX_BUILTIN_LD_ELEMREV_V16QI
);
17543 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid
,
17544 VSX_BUILTIN_ST_ELEMREV_V2DF
);
17545 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid
,
17546 VSX_BUILTIN_ST_ELEMREV_V2DI
);
17547 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid
,
17548 VSX_BUILTIN_ST_ELEMREV_V4SF
);
17549 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid
,
17550 VSX_BUILTIN_ST_ELEMREV_V4SI
);
17551 def_builtin ("__builtin_vsx_st_elemrev_v8hi", void_ftype_v8hi_long_pvoid
,
17552 VSX_BUILTIN_ST_ELEMREV_V8HI
);
17553 def_builtin ("__builtin_vsx_st_elemrev_v16qi", void_ftype_v16qi_long_pvoid
,
17554 VSX_BUILTIN_ST_ELEMREV_V16QI
);
17556 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid
,
17557 VSX_BUILTIN_VEC_LD
);
17558 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid
,
17559 VSX_BUILTIN_VEC_ST
);
17560 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid
,
17561 VSX_BUILTIN_VEC_XL
);
17562 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid
,
17563 VSX_BUILTIN_VEC_XL_BE
);
17564 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid
,
17565 VSX_BUILTIN_VEC_XST
);
17566 def_builtin ("__builtin_vec_xst_be", void_ftype_opaque_long_pvoid
,
17567 VSX_BUILTIN_VEC_XST_BE
);
17569 def_builtin ("__builtin_vec_step", int_ftype_opaque
, ALTIVEC_BUILTIN_VEC_STEP
);
17570 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque
, ALTIVEC_BUILTIN_VEC_SPLATS
);
17571 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque
, ALTIVEC_BUILTIN_VEC_PROMOTE
);
17573 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int
, ALTIVEC_BUILTIN_VEC_SLD
);
17574 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_SPLAT
);
17575 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_EXTRACT
);
17576 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int
, ALTIVEC_BUILTIN_VEC_INSERT
);
17577 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTW
);
17578 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTH
);
17579 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTB
);
17580 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTF
);
17581 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VCFSX
);
17582 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VCFUX
);
17583 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTS
);
17584 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTU
);
17586 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque
,
17587 ALTIVEC_BUILTIN_VEC_ADDE
);
17588 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque
,
17589 ALTIVEC_BUILTIN_VEC_ADDEC
);
17590 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque
,
17591 ALTIVEC_BUILTIN_VEC_CMPNE
);
17592 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque
,
17593 ALTIVEC_BUILTIN_VEC_MUL
);
17594 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque
,
17595 ALTIVEC_BUILTIN_VEC_SUBE
);
17596 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque
,
17597 ALTIVEC_BUILTIN_VEC_SUBEC
);
17599 /* Cell builtins. */
17600 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVLX
);
17601 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVLXL
);
17602 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVRX
);
17603 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVRXL
);
17605 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVLX
);
17606 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVLXL
);
17607 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVRX
);
17608 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVRXL
);
17610 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVLX
);
17611 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVLXL
);
17612 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVRX
);
17613 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVRXL
);
17615 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVLX
);
17616 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVLXL
);
17617 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVRX
);
17618 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVRXL
);
17620 if (TARGET_P9_VECTOR
)
17622 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long
,
17623 P9V_BUILTIN_STXVL
);
17624 def_builtin ("__builtin_xst_len_r", void_ftype_v16qi_pvoid_long
,
17625 P9V_BUILTIN_XST_LEN_R
);
17628 /* Add the DST variants. */
17630 for (i
= 0; i
< ARRAY_SIZE (bdesc_dst
); i
++, d
++)
17632 HOST_WIDE_INT mask
= d
->mask
;
17634 /* It is expected that these dst built-in functions may have
17635 d->icode equal to CODE_FOR_nothing. */
17636 if ((mask
& builtin_mask
) != mask
)
17638 if (TARGET_DEBUG_BUILTIN
)
17639 fprintf (stderr
, "altivec_init_builtins, skip dst %s\n",
17643 def_builtin (d
->name
, void_ftype_pcvoid_int_int
, d
->code
);
17646 /* Initialize the predicates. */
17647 d
= bdesc_altivec_preds
;
17648 for (i
= 0; i
< ARRAY_SIZE (bdesc_altivec_preds
); i
++, d
++)
17650 machine_mode mode1
;
17652 HOST_WIDE_INT mask
= d
->mask
;
17654 if ((mask
& builtin_mask
) != mask
)
17656 if (TARGET_DEBUG_BUILTIN
)
17657 fprintf (stderr
, "altivec_init_builtins, skip predicate %s\n",
17662 if (rs6000_overloaded_builtin_p (d
->code
))
17666 /* Cannot define builtin if the instruction is disabled. */
17667 gcc_assert (d
->icode
!= CODE_FOR_nothing
);
17668 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
17674 type
= int_ftype_int_opaque_opaque
;
17677 type
= int_ftype_int_v2di_v2di
;
17680 type
= int_ftype_int_v4si_v4si
;
17683 type
= int_ftype_int_v8hi_v8hi
;
17686 type
= int_ftype_int_v16qi_v16qi
;
17689 type
= int_ftype_int_v4sf_v4sf
;
17692 type
= int_ftype_int_v2df_v2df
;
17695 gcc_unreachable ();
17698 def_builtin (d
->name
, type
, d
->code
);
17701 /* Initialize the abs* operators. */
17703 for (i
= 0; i
< ARRAY_SIZE (bdesc_abs
); i
++, d
++)
17705 machine_mode mode0
;
17707 HOST_WIDE_INT mask
= d
->mask
;
17709 if ((mask
& builtin_mask
) != mask
)
17711 if (TARGET_DEBUG_BUILTIN
)
17712 fprintf (stderr
, "altivec_init_builtins, skip abs %s\n",
17717 /* Cannot define builtin if the instruction is disabled. */
17718 gcc_assert (d
->icode
!= CODE_FOR_nothing
);
17719 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
17724 type
= v2di_ftype_v2di
;
17727 type
= v4si_ftype_v4si
;
17730 type
= v8hi_ftype_v8hi
;
17733 type
= v16qi_ftype_v16qi
;
17736 type
= v4sf_ftype_v4sf
;
17739 type
= v2df_ftype_v2df
;
17742 gcc_unreachable ();
17745 def_builtin (d
->name
, type
, d
->code
);
17748 /* Initialize target builtin that implements
17749 targetm.vectorize.builtin_mask_for_load. */
17751 decl
= add_builtin_function ("__builtin_altivec_mask_for_load",
17752 v16qi_ftype_long_pcvoid
,
17753 ALTIVEC_BUILTIN_MASK_FOR_LOAD
,
17754 BUILT_IN_MD
, NULL
, NULL_TREE
);
17755 TREE_READONLY (decl
) = 1;
17756 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17757 altivec_builtin_mask_for_load
= decl
;
17759 /* Access to the vec_init patterns. */
17760 ftype
= build_function_type_list (V4SI_type_node
, integer_type_node
,
17761 integer_type_node
, integer_type_node
,
17762 integer_type_node
, NULL_TREE
);
17763 def_builtin ("__builtin_vec_init_v4si", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V4SI
);
17765 ftype
= build_function_type_list (V8HI_type_node
, short_integer_type_node
,
17766 short_integer_type_node
,
17767 short_integer_type_node
,
17768 short_integer_type_node
,
17769 short_integer_type_node
,
17770 short_integer_type_node
,
17771 short_integer_type_node
,
17772 short_integer_type_node
, NULL_TREE
);
17773 def_builtin ("__builtin_vec_init_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V8HI
);
17775 ftype
= build_function_type_list (V16QI_type_node
, char_type_node
,
17776 char_type_node
, char_type_node
,
17777 char_type_node
, char_type_node
,
17778 char_type_node
, char_type_node
,
17779 char_type_node
, char_type_node
,
17780 char_type_node
, char_type_node
,
17781 char_type_node
, char_type_node
,
17782 char_type_node
, char_type_node
,
17783 char_type_node
, NULL_TREE
);
17784 def_builtin ("__builtin_vec_init_v16qi", ftype
,
17785 ALTIVEC_BUILTIN_VEC_INIT_V16QI
);
17787 ftype
= build_function_type_list (V4SF_type_node
, float_type_node
,
17788 float_type_node
, float_type_node
,
17789 float_type_node
, NULL_TREE
);
17790 def_builtin ("__builtin_vec_init_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V4SF
);
17792 /* VSX builtins. */
17793 ftype
= build_function_type_list (V2DF_type_node
, double_type_node
,
17794 double_type_node
, NULL_TREE
);
17795 def_builtin ("__builtin_vec_init_v2df", ftype
, VSX_BUILTIN_VEC_INIT_V2DF
);
17797 ftype
= build_function_type_list (V2DI_type_node
, intDI_type_node
,
17798 intDI_type_node
, NULL_TREE
);
17799 def_builtin ("__builtin_vec_init_v2di", ftype
, VSX_BUILTIN_VEC_INIT_V2DI
);
17801 /* Access to the vec_set patterns. */
17802 ftype
= build_function_type_list (V4SI_type_node
, V4SI_type_node
,
17804 integer_type_node
, NULL_TREE
);
17805 def_builtin ("__builtin_vec_set_v4si", ftype
, ALTIVEC_BUILTIN_VEC_SET_V4SI
);
17807 ftype
= build_function_type_list (V8HI_type_node
, V8HI_type_node
,
17809 integer_type_node
, NULL_TREE
);
17810 def_builtin ("__builtin_vec_set_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_SET_V8HI
);
17812 ftype
= build_function_type_list (V16QI_type_node
, V16QI_type_node
,
17814 integer_type_node
, NULL_TREE
);
17815 def_builtin ("__builtin_vec_set_v16qi", ftype
, ALTIVEC_BUILTIN_VEC_SET_V16QI
);
17817 ftype
= build_function_type_list (V4SF_type_node
, V4SF_type_node
,
17819 integer_type_node
, NULL_TREE
);
17820 def_builtin ("__builtin_vec_set_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_SET_V4SF
);
17822 ftype
= build_function_type_list (V2DF_type_node
, V2DF_type_node
,
17824 integer_type_node
, NULL_TREE
);
17825 def_builtin ("__builtin_vec_set_v2df", ftype
, VSX_BUILTIN_VEC_SET_V2DF
);
17827 ftype
= build_function_type_list (V2DI_type_node
, V2DI_type_node
,
17829 integer_type_node
, NULL_TREE
);
17830 def_builtin ("__builtin_vec_set_v2di", ftype
, VSX_BUILTIN_VEC_SET_V2DI
);
17832 /* Access to the vec_extract patterns. */
17833 ftype
= build_function_type_list (intSI_type_node
, V4SI_type_node
,
17834 integer_type_node
, NULL_TREE
);
17835 def_builtin ("__builtin_vec_ext_v4si", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V4SI
);
17837 ftype
= build_function_type_list (intHI_type_node
, V8HI_type_node
,
17838 integer_type_node
, NULL_TREE
);
17839 def_builtin ("__builtin_vec_ext_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V8HI
);
17841 ftype
= build_function_type_list (intQI_type_node
, V16QI_type_node
,
17842 integer_type_node
, NULL_TREE
);
17843 def_builtin ("__builtin_vec_ext_v16qi", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V16QI
);
17845 ftype
= build_function_type_list (float_type_node
, V4SF_type_node
,
17846 integer_type_node
, NULL_TREE
);
17847 def_builtin ("__builtin_vec_ext_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V4SF
);
17849 ftype
= build_function_type_list (double_type_node
, V2DF_type_node
,
17850 integer_type_node
, NULL_TREE
);
17851 def_builtin ("__builtin_vec_ext_v2df", ftype
, VSX_BUILTIN_VEC_EXT_V2DF
);
17853 ftype
= build_function_type_list (intDI_type_node
, V2DI_type_node
,
17854 integer_type_node
, NULL_TREE
);
17855 def_builtin ("__builtin_vec_ext_v2di", ftype
, VSX_BUILTIN_VEC_EXT_V2DI
);
17858 if (V1TI_type_node
)
17860 tree v1ti_ftype_long_pcvoid
17861 = build_function_type_list (V1TI_type_node
,
17862 long_integer_type_node
, pcvoid_type_node
,
17864 tree void_ftype_v1ti_long_pvoid
17865 = build_function_type_list (void_type_node
,
17866 V1TI_type_node
, long_integer_type_node
,
17867 pvoid_type_node
, NULL_TREE
);
17868 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid
,
17869 VSX_BUILTIN_LXVD2X_V1TI
);
17870 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid
,
17871 VSX_BUILTIN_STXVD2X_V1TI
);
17872 ftype
= build_function_type_list (V1TI_type_node
, intTI_type_node
,
17873 NULL_TREE
, NULL_TREE
);
17874 def_builtin ("__builtin_vec_init_v1ti", ftype
, VSX_BUILTIN_VEC_INIT_V1TI
);
17875 ftype
= build_function_type_list (V1TI_type_node
, V1TI_type_node
,
17877 integer_type_node
, NULL_TREE
);
17878 def_builtin ("__builtin_vec_set_v1ti", ftype
, VSX_BUILTIN_VEC_SET_V1TI
);
17879 ftype
= build_function_type_list (intTI_type_node
, V1TI_type_node
,
17880 integer_type_node
, NULL_TREE
);
17881 def_builtin ("__builtin_vec_ext_v1ti", ftype
, VSX_BUILTIN_VEC_EXT_V1TI
);
17887 htm_init_builtins (void)
17889 HOST_WIDE_INT builtin_mask
= rs6000_builtin_mask
;
17890 const struct builtin_description
*d
;
17894 for (i
= 0; i
< ARRAY_SIZE (bdesc_htm
); i
++, d
++)
17896 tree op
[MAX_HTM_OPERANDS
], type
;
17897 HOST_WIDE_INT mask
= d
->mask
;
17898 unsigned attr
= rs6000_builtin_info
[d
->code
].attr
;
17899 bool void_func
= (attr
& RS6000_BTC_VOID
);
17900 int attr_args
= (attr
& RS6000_BTC_TYPE_MASK
);
17902 tree gpr_type_node
;
17906 /* It is expected that these htm built-in functions may have
17907 d->icode equal to CODE_FOR_nothing. */
17909 if (TARGET_32BIT
&& TARGET_POWERPC64
)
17910 gpr_type_node
= long_long_unsigned_type_node
;
17912 gpr_type_node
= long_unsigned_type_node
;
17914 if (attr
& RS6000_BTC_SPR
)
17916 rettype
= gpr_type_node
;
17917 argtype
= gpr_type_node
;
17919 else if (d
->code
== HTM_BUILTIN_TABORTDC
17920 || d
->code
== HTM_BUILTIN_TABORTDCI
)
17922 rettype
= unsigned_type_node
;
17923 argtype
= gpr_type_node
;
17927 rettype
= unsigned_type_node
;
17928 argtype
= unsigned_type_node
;
17931 if ((mask
& builtin_mask
) != mask
)
17933 if (TARGET_DEBUG_BUILTIN
)
17934 fprintf (stderr
, "htm_builtin, skip binary %s\n", d
->name
);
17940 if (TARGET_DEBUG_BUILTIN
)
17941 fprintf (stderr
, "htm_builtin, bdesc_htm[%ld] no name\n",
17942 (long unsigned) i
);
17946 op
[nopnds
++] = (void_func
) ? void_type_node
: rettype
;
17948 if (attr_args
== RS6000_BTC_UNARY
)
17949 op
[nopnds
++] = argtype
;
17950 else if (attr_args
== RS6000_BTC_BINARY
)
17952 op
[nopnds
++] = argtype
;
17953 op
[nopnds
++] = argtype
;
17955 else if (attr_args
== RS6000_BTC_TERNARY
)
17957 op
[nopnds
++] = argtype
;
17958 op
[nopnds
++] = argtype
;
17959 op
[nopnds
++] = argtype
;
17965 type
= build_function_type_list (op
[0], NULL_TREE
);
17968 type
= build_function_type_list (op
[0], op
[1], NULL_TREE
);
17971 type
= build_function_type_list (op
[0], op
[1], op
[2], NULL_TREE
);
17974 type
= build_function_type_list (op
[0], op
[1], op
[2], op
[3],
17978 gcc_unreachable ();
17981 def_builtin (d
->name
, type
, d
->code
);
17985 /* Hash function for builtin functions with up to 3 arguments and a return
17988 builtin_hasher::hash (builtin_hash_struct
*bh
)
17993 for (i
= 0; i
< 4; i
++)
17995 ret
= (ret
* (unsigned)MAX_MACHINE_MODE
) + ((unsigned)bh
->mode
[i
]);
17996 ret
= (ret
* 2) + bh
->uns_p
[i
];
18002 /* Compare builtin hash entries H1 and H2 for equivalence. */
18004 builtin_hasher::equal (builtin_hash_struct
*p1
, builtin_hash_struct
*p2
)
18006 return ((p1
->mode
[0] == p2
->mode
[0])
18007 && (p1
->mode
[1] == p2
->mode
[1])
18008 && (p1
->mode
[2] == p2
->mode
[2])
18009 && (p1
->mode
[3] == p2
->mode
[3])
18010 && (p1
->uns_p
[0] == p2
->uns_p
[0])
18011 && (p1
->uns_p
[1] == p2
->uns_p
[1])
18012 && (p1
->uns_p
[2] == p2
->uns_p
[2])
18013 && (p1
->uns_p
[3] == p2
->uns_p
[3]));
18016 /* Map types for builtin functions with an explicit return type and up to 3
18017 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
18018 of the argument. */
18020 builtin_function_type (machine_mode mode_ret
, machine_mode mode_arg0
,
18021 machine_mode mode_arg1
, machine_mode mode_arg2
,
18022 enum rs6000_builtins builtin
, const char *name
)
18024 struct builtin_hash_struct h
;
18025 struct builtin_hash_struct
*h2
;
18028 tree ret_type
= NULL_TREE
;
18029 tree arg_type
[3] = { NULL_TREE
, NULL_TREE
, NULL_TREE
};
18031 /* Create builtin_hash_table. */
18032 if (builtin_hash_table
== NULL
)
18033 builtin_hash_table
= hash_table
<builtin_hasher
>::create_ggc (1500);
18035 h
.type
= NULL_TREE
;
18036 h
.mode
[0] = mode_ret
;
18037 h
.mode
[1] = mode_arg0
;
18038 h
.mode
[2] = mode_arg1
;
18039 h
.mode
[3] = mode_arg2
;
18045 /* If the builtin is a type that produces unsigned results or takes unsigned
18046 arguments, and it is returned as a decl for the vectorizer (such as
18047 widening multiplies, permute), make sure the arguments and return value
18048 are type correct. */
18051 /* unsigned 1 argument functions. */
18052 case CRYPTO_BUILTIN_VSBOX
:
18053 case P8V_BUILTIN_VGBBD
:
18054 case MISC_BUILTIN_CDTBCD
:
18055 case MISC_BUILTIN_CBCDTD
:
18060 /* unsigned 2 argument functions. */
18061 case ALTIVEC_BUILTIN_VMULEUB
:
18062 case ALTIVEC_BUILTIN_VMULEUH
:
18063 case ALTIVEC_BUILTIN_VMULEUW
:
18064 case ALTIVEC_BUILTIN_VMULOUB
:
18065 case ALTIVEC_BUILTIN_VMULOUH
:
18066 case ALTIVEC_BUILTIN_VMULOUW
:
18067 case CRYPTO_BUILTIN_VCIPHER
:
18068 case CRYPTO_BUILTIN_VCIPHERLAST
:
18069 case CRYPTO_BUILTIN_VNCIPHER
:
18070 case CRYPTO_BUILTIN_VNCIPHERLAST
:
18071 case CRYPTO_BUILTIN_VPMSUMB
:
18072 case CRYPTO_BUILTIN_VPMSUMH
:
18073 case CRYPTO_BUILTIN_VPMSUMW
:
18074 case CRYPTO_BUILTIN_VPMSUMD
:
18075 case CRYPTO_BUILTIN_VPMSUM
:
18076 case MISC_BUILTIN_ADDG6S
:
18077 case MISC_BUILTIN_DIVWEU
:
18078 case MISC_BUILTIN_DIVWEUO
:
18079 case MISC_BUILTIN_DIVDEU
:
18080 case MISC_BUILTIN_DIVDEUO
:
18081 case VSX_BUILTIN_UDIV_V2DI
:
18082 case ALTIVEC_BUILTIN_VMAXUB
:
18083 case ALTIVEC_BUILTIN_VMINUB
:
18084 case ALTIVEC_BUILTIN_VMAXUH
:
18085 case ALTIVEC_BUILTIN_VMINUH
:
18086 case ALTIVEC_BUILTIN_VMAXUW
:
18087 case ALTIVEC_BUILTIN_VMINUW
:
18088 case P8V_BUILTIN_VMAXUD
:
18089 case P8V_BUILTIN_VMINUD
:
18095 /* unsigned 3 argument functions. */
18096 case ALTIVEC_BUILTIN_VPERM_16QI_UNS
:
18097 case ALTIVEC_BUILTIN_VPERM_8HI_UNS
:
18098 case ALTIVEC_BUILTIN_VPERM_4SI_UNS
:
18099 case ALTIVEC_BUILTIN_VPERM_2DI_UNS
:
18100 case ALTIVEC_BUILTIN_VSEL_16QI_UNS
:
18101 case ALTIVEC_BUILTIN_VSEL_8HI_UNS
:
18102 case ALTIVEC_BUILTIN_VSEL_4SI_UNS
:
18103 case ALTIVEC_BUILTIN_VSEL_2DI_UNS
:
18104 case VSX_BUILTIN_VPERM_16QI_UNS
:
18105 case VSX_BUILTIN_VPERM_8HI_UNS
:
18106 case VSX_BUILTIN_VPERM_4SI_UNS
:
18107 case VSX_BUILTIN_VPERM_2DI_UNS
:
18108 case VSX_BUILTIN_XXSEL_16QI_UNS
:
18109 case VSX_BUILTIN_XXSEL_8HI_UNS
:
18110 case VSX_BUILTIN_XXSEL_4SI_UNS
:
18111 case VSX_BUILTIN_XXSEL_2DI_UNS
:
18112 case CRYPTO_BUILTIN_VPERMXOR
:
18113 case CRYPTO_BUILTIN_VPERMXOR_V2DI
:
18114 case CRYPTO_BUILTIN_VPERMXOR_V4SI
:
18115 case CRYPTO_BUILTIN_VPERMXOR_V8HI
:
18116 case CRYPTO_BUILTIN_VPERMXOR_V16QI
:
18117 case CRYPTO_BUILTIN_VSHASIGMAW
:
18118 case CRYPTO_BUILTIN_VSHASIGMAD
:
18119 case CRYPTO_BUILTIN_VSHASIGMA
:
18126 /* signed permute functions with unsigned char mask. */
18127 case ALTIVEC_BUILTIN_VPERM_16QI
:
18128 case ALTIVEC_BUILTIN_VPERM_8HI
:
18129 case ALTIVEC_BUILTIN_VPERM_4SI
:
18130 case ALTIVEC_BUILTIN_VPERM_4SF
:
18131 case ALTIVEC_BUILTIN_VPERM_2DI
:
18132 case ALTIVEC_BUILTIN_VPERM_2DF
:
18133 case VSX_BUILTIN_VPERM_16QI
:
18134 case VSX_BUILTIN_VPERM_8HI
:
18135 case VSX_BUILTIN_VPERM_4SI
:
18136 case VSX_BUILTIN_VPERM_4SF
:
18137 case VSX_BUILTIN_VPERM_2DI
:
18138 case VSX_BUILTIN_VPERM_2DF
:
18142 /* unsigned args, signed return. */
18143 case VSX_BUILTIN_XVCVUXDSP
:
18144 case VSX_BUILTIN_XVCVUXDDP_UNS
:
18145 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF
:
18149 /* signed args, unsigned return. */
18150 case VSX_BUILTIN_XVCVDPUXDS_UNS
:
18151 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI
:
18152 case MISC_BUILTIN_UNPACK_TD
:
18153 case MISC_BUILTIN_UNPACK_V1TI
:
18157 /* unsigned arguments, bool return (compares). */
18158 case ALTIVEC_BUILTIN_VCMPEQUB
:
18159 case ALTIVEC_BUILTIN_VCMPEQUH
:
18160 case ALTIVEC_BUILTIN_VCMPEQUW
:
18161 case P8V_BUILTIN_VCMPEQUD
:
18162 case VSX_BUILTIN_CMPGE_U16QI
:
18163 case VSX_BUILTIN_CMPGE_U8HI
:
18164 case VSX_BUILTIN_CMPGE_U4SI
:
18165 case VSX_BUILTIN_CMPGE_U2DI
:
18166 case ALTIVEC_BUILTIN_VCMPGTUB
:
18167 case ALTIVEC_BUILTIN_VCMPGTUH
:
18168 case ALTIVEC_BUILTIN_VCMPGTUW
:
18169 case P8V_BUILTIN_VCMPGTUD
:
18174 /* unsigned arguments for 128-bit pack instructions. */
18175 case MISC_BUILTIN_PACK_TD
:
18176 case MISC_BUILTIN_PACK_V1TI
:
18181 /* unsigned second arguments (vector shift right). */
18182 case ALTIVEC_BUILTIN_VSRB
:
18183 case ALTIVEC_BUILTIN_VSRH
:
18184 case ALTIVEC_BUILTIN_VSRW
:
18185 case P8V_BUILTIN_VSRD
:
18193 /* Figure out how many args are present. */
18194 while (num_args
> 0 && h
.mode
[num_args
] == VOIDmode
)
18197 ret_type
= builtin_mode_to_type
[h
.mode
[0]][h
.uns_p
[0]];
18198 if (!ret_type
&& h
.uns_p
[0])
18199 ret_type
= builtin_mode_to_type
[h
.mode
[0]][0];
18202 fatal_error (input_location
,
18203 "internal error: builtin function %qs had an unexpected "
18204 "return type %qs", name
, GET_MODE_NAME (h
.mode
[0]));
18206 for (i
= 0; i
< (int) ARRAY_SIZE (arg_type
); i
++)
18207 arg_type
[i
] = NULL_TREE
;
18209 for (i
= 0; i
< num_args
; i
++)
18211 int m
= (int) h
.mode
[i
+1];
18212 int uns_p
= h
.uns_p
[i
+1];
18214 arg_type
[i
] = builtin_mode_to_type
[m
][uns_p
];
18215 if (!arg_type
[i
] && uns_p
)
18216 arg_type
[i
] = builtin_mode_to_type
[m
][0];
18219 fatal_error (input_location
,
18220 "internal error: builtin function %qs, argument %d "
18221 "had unexpected argument type %qs", name
, i
,
18222 GET_MODE_NAME (m
));
18225 builtin_hash_struct
**found
= builtin_hash_table
->find_slot (&h
, INSERT
);
18226 if (*found
== NULL
)
18228 h2
= ggc_alloc
<builtin_hash_struct
> ();
18232 h2
->type
= build_function_type_list (ret_type
, arg_type
[0], arg_type
[1],
18233 arg_type
[2], NULL_TREE
);
18236 return (*found
)->type
;
18240 rs6000_common_init_builtins (void)
18242 const struct builtin_description
*d
;
18245 tree opaque_ftype_opaque
= NULL_TREE
;
18246 tree opaque_ftype_opaque_opaque
= NULL_TREE
;
18247 tree opaque_ftype_opaque_opaque_opaque
= NULL_TREE
;
18248 tree v2si_ftype
= NULL_TREE
;
18249 tree v2si_ftype_qi
= NULL_TREE
;
18250 tree v2si_ftype_v2si_qi
= NULL_TREE
;
18251 tree v2si_ftype_int_qi
= NULL_TREE
;
18252 HOST_WIDE_INT builtin_mask
= rs6000_builtin_mask
;
18254 if (!TARGET_PAIRED_FLOAT
)
18256 builtin_mode_to_type
[V2SImode
][0] = opaque_V2SI_type_node
;
18257 builtin_mode_to_type
[V2SFmode
][0] = opaque_V2SF_type_node
;
18260 /* Paired builtins are only available if you build a compiler with the
18261 appropriate options, so only create those builtins with the appropriate
18262 compiler option. Create Altivec and VSX builtins on machines with at
18263 least the general purpose extensions (970 and newer) to allow the use of
18264 the target attribute.. */
18266 if (TARGET_EXTRA_BUILTINS
)
18267 builtin_mask
|= RS6000_BTM_COMMON
;
18269 /* Add the ternary operators. */
18271 for (i
= 0; i
< ARRAY_SIZE (bdesc_3arg
); i
++, d
++)
18274 HOST_WIDE_INT mask
= d
->mask
;
18276 if ((mask
& builtin_mask
) != mask
)
18278 if (TARGET_DEBUG_BUILTIN
)
18279 fprintf (stderr
, "rs6000_builtin, skip ternary %s\n", d
->name
);
18283 if (rs6000_overloaded_builtin_p (d
->code
))
18285 if (! (type
= opaque_ftype_opaque_opaque_opaque
))
18286 type
= opaque_ftype_opaque_opaque_opaque
18287 = build_function_type_list (opaque_V4SI_type_node
,
18288 opaque_V4SI_type_node
,
18289 opaque_V4SI_type_node
,
18290 opaque_V4SI_type_node
,
18295 enum insn_code icode
= d
->icode
;
18298 if (TARGET_DEBUG_BUILTIN
)
18299 fprintf (stderr
, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
18305 if (icode
== CODE_FOR_nothing
)
18307 if (TARGET_DEBUG_BUILTIN
)
18308 fprintf (stderr
, "rs6000_builtin, skip ternary %s (no code)\n",
18314 type
= builtin_function_type (insn_data
[icode
].operand
[0].mode
,
18315 insn_data
[icode
].operand
[1].mode
,
18316 insn_data
[icode
].operand
[2].mode
,
18317 insn_data
[icode
].operand
[3].mode
,
18321 def_builtin (d
->name
, type
, d
->code
);
18324 /* Add the binary operators. */
18326 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
18328 machine_mode mode0
, mode1
, mode2
;
18330 HOST_WIDE_INT mask
= d
->mask
;
18332 if ((mask
& builtin_mask
) != mask
)
18334 if (TARGET_DEBUG_BUILTIN
)
18335 fprintf (stderr
, "rs6000_builtin, skip binary %s\n", d
->name
);
18339 if (rs6000_overloaded_builtin_p (d
->code
))
18341 if (! (type
= opaque_ftype_opaque_opaque
))
18342 type
= opaque_ftype_opaque_opaque
18343 = build_function_type_list (opaque_V4SI_type_node
,
18344 opaque_V4SI_type_node
,
18345 opaque_V4SI_type_node
,
18350 enum insn_code icode
= d
->icode
;
18353 if (TARGET_DEBUG_BUILTIN
)
18354 fprintf (stderr
, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
18360 if (icode
== CODE_FOR_nothing
)
18362 if (TARGET_DEBUG_BUILTIN
)
18363 fprintf (stderr
, "rs6000_builtin, skip binary %s (no code)\n",
18369 mode0
= insn_data
[icode
].operand
[0].mode
;
18370 mode1
= insn_data
[icode
].operand
[1].mode
;
18371 mode2
= insn_data
[icode
].operand
[2].mode
;
18373 if (mode0
== V2SImode
&& mode1
== V2SImode
&& mode2
== QImode
)
18375 if (! (type
= v2si_ftype_v2si_qi
))
18376 type
= v2si_ftype_v2si_qi
18377 = build_function_type_list (opaque_V2SI_type_node
,
18378 opaque_V2SI_type_node
,
18383 else if (mode0
== V2SImode
&& GET_MODE_CLASS (mode1
) == MODE_INT
18384 && mode2
== QImode
)
18386 if (! (type
= v2si_ftype_int_qi
))
18387 type
= v2si_ftype_int_qi
18388 = build_function_type_list (opaque_V2SI_type_node
,
18395 type
= builtin_function_type (mode0
, mode1
, mode2
, VOIDmode
,
18399 def_builtin (d
->name
, type
, d
->code
);
18402 /* Add the simple unary operators. */
18404 for (i
= 0; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
18406 machine_mode mode0
, mode1
;
18408 HOST_WIDE_INT mask
= d
->mask
;
18410 if ((mask
& builtin_mask
) != mask
)
18412 if (TARGET_DEBUG_BUILTIN
)
18413 fprintf (stderr
, "rs6000_builtin, skip unary %s\n", d
->name
);
18417 if (rs6000_overloaded_builtin_p (d
->code
))
18419 if (! (type
= opaque_ftype_opaque
))
18420 type
= opaque_ftype_opaque
18421 = build_function_type_list (opaque_V4SI_type_node
,
18422 opaque_V4SI_type_node
,
18427 enum insn_code icode
= d
->icode
;
18430 if (TARGET_DEBUG_BUILTIN
)
18431 fprintf (stderr
, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
18437 if (icode
== CODE_FOR_nothing
)
18439 if (TARGET_DEBUG_BUILTIN
)
18440 fprintf (stderr
, "rs6000_builtin, skip unary %s (no code)\n",
18446 mode0
= insn_data
[icode
].operand
[0].mode
;
18447 mode1
= insn_data
[icode
].operand
[1].mode
;
18449 if (mode0
== V2SImode
&& mode1
== QImode
)
18451 if (! (type
= v2si_ftype_qi
))
18452 type
= v2si_ftype_qi
18453 = build_function_type_list (opaque_V2SI_type_node
,
18459 type
= builtin_function_type (mode0
, mode1
, VOIDmode
, VOIDmode
,
18463 def_builtin (d
->name
, type
, d
->code
);
18466 /* Add the simple no-argument operators. */
18468 for (i
= 0; i
< ARRAY_SIZE (bdesc_0arg
); i
++, d
++)
18470 machine_mode mode0
;
18472 HOST_WIDE_INT mask
= d
->mask
;
18474 if ((mask
& builtin_mask
) != mask
)
18476 if (TARGET_DEBUG_BUILTIN
)
18477 fprintf (stderr
, "rs6000_builtin, skip no-argument %s\n", d
->name
);
18480 if (rs6000_overloaded_builtin_p (d
->code
))
18482 if (!opaque_ftype_opaque
)
18483 opaque_ftype_opaque
18484 = build_function_type_list (opaque_V4SI_type_node
, NULL_TREE
);
18485 type
= opaque_ftype_opaque
;
18489 enum insn_code icode
= d
->icode
;
18492 if (TARGET_DEBUG_BUILTIN
)
18493 fprintf (stderr
, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
18494 (long unsigned) i
);
18497 if (icode
== CODE_FOR_nothing
)
18499 if (TARGET_DEBUG_BUILTIN
)
18501 "rs6000_builtin, skip no-argument %s (no code)\n",
18505 mode0
= insn_data
[icode
].operand
[0].mode
;
18506 if (mode0
== V2SImode
)
18508 /* code for paired single */
18509 if (! (type
= v2si_ftype
))
18512 = build_function_type_list (opaque_V2SI_type_node
,
18518 type
= builtin_function_type (mode0
, VOIDmode
, VOIDmode
, VOIDmode
,
18521 def_builtin (d
->name
, type
, d
->code
);
18525 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
18527 init_float128_ibm (machine_mode mode
)
18529 if (!TARGET_XL_COMPAT
)
18531 set_optab_libfunc (add_optab
, mode
, "__gcc_qadd");
18532 set_optab_libfunc (sub_optab
, mode
, "__gcc_qsub");
18533 set_optab_libfunc (smul_optab
, mode
, "__gcc_qmul");
18534 set_optab_libfunc (sdiv_optab
, mode
, "__gcc_qdiv");
18536 if (!TARGET_HARD_FLOAT
)
18538 set_optab_libfunc (neg_optab
, mode
, "__gcc_qneg");
18539 set_optab_libfunc (eq_optab
, mode
, "__gcc_qeq");
18540 set_optab_libfunc (ne_optab
, mode
, "__gcc_qne");
18541 set_optab_libfunc (gt_optab
, mode
, "__gcc_qgt");
18542 set_optab_libfunc (ge_optab
, mode
, "__gcc_qge");
18543 set_optab_libfunc (lt_optab
, mode
, "__gcc_qlt");
18544 set_optab_libfunc (le_optab
, mode
, "__gcc_qle");
18545 set_optab_libfunc (unord_optab
, mode
, "__gcc_qunord");
18547 set_conv_libfunc (sext_optab
, mode
, SFmode
, "__gcc_stoq");
18548 set_conv_libfunc (sext_optab
, mode
, DFmode
, "__gcc_dtoq");
18549 set_conv_libfunc (trunc_optab
, SFmode
, mode
, "__gcc_qtos");
18550 set_conv_libfunc (trunc_optab
, DFmode
, mode
, "__gcc_qtod");
18551 set_conv_libfunc (sfix_optab
, SImode
, mode
, "__gcc_qtoi");
18552 set_conv_libfunc (ufix_optab
, SImode
, mode
, "__gcc_qtou");
18553 set_conv_libfunc (sfloat_optab
, mode
, SImode
, "__gcc_itoq");
18554 set_conv_libfunc (ufloat_optab
, mode
, SImode
, "__gcc_utoq");
18559 set_optab_libfunc (add_optab
, mode
, "_xlqadd");
18560 set_optab_libfunc (sub_optab
, mode
, "_xlqsub");
18561 set_optab_libfunc (smul_optab
, mode
, "_xlqmul");
18562 set_optab_libfunc (sdiv_optab
, mode
, "_xlqdiv");
18565 /* Add various conversions for IFmode to use the traditional TFmode
18567 if (mode
== IFmode
)
18569 set_conv_libfunc (sext_optab
, mode
, SDmode
, "__dpd_extendsdtf2");
18570 set_conv_libfunc (sext_optab
, mode
, DDmode
, "__dpd_extendddtf2");
18571 set_conv_libfunc (trunc_optab
, mode
, TDmode
, "__dpd_trunctftd2");
18572 set_conv_libfunc (trunc_optab
, SDmode
, mode
, "__dpd_trunctfsd2");
18573 set_conv_libfunc (trunc_optab
, DDmode
, mode
, "__dpd_trunctfdd2");
18574 set_conv_libfunc (sext_optab
, TDmode
, mode
, "__dpd_extendtdtf2");
18576 if (TARGET_POWERPC64
)
18578 set_conv_libfunc (sfix_optab
, TImode
, mode
, "__fixtfti");
18579 set_conv_libfunc (ufix_optab
, TImode
, mode
, "__fixunstfti");
18580 set_conv_libfunc (sfloat_optab
, mode
, TImode
, "__floattitf");
18581 set_conv_libfunc (ufloat_optab
, mode
, TImode
, "__floatuntitf");
18586 /* Set up IEEE 128-bit floating point routines. Use different names if the
18587 arguments can be passed in a vector register. The historical PowerPC
18588 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
18589 continue to use that if we aren't using vector registers to pass IEEE
18590 128-bit floating point. */
18593 init_float128_ieee (machine_mode mode
)
18595 if (FLOAT128_VECTOR_P (mode
))
18597 set_optab_libfunc (add_optab
, mode
, "__addkf3");
18598 set_optab_libfunc (sub_optab
, mode
, "__subkf3");
18599 set_optab_libfunc (neg_optab
, mode
, "__negkf2");
18600 set_optab_libfunc (smul_optab
, mode
, "__mulkf3");
18601 set_optab_libfunc (sdiv_optab
, mode
, "__divkf3");
18602 set_optab_libfunc (sqrt_optab
, mode
, "__sqrtkf2");
18603 set_optab_libfunc (abs_optab
, mode
, "__abstkf2");
18605 set_optab_libfunc (eq_optab
, mode
, "__eqkf2");
18606 set_optab_libfunc (ne_optab
, mode
, "__nekf2");
18607 set_optab_libfunc (gt_optab
, mode
, "__gtkf2");
18608 set_optab_libfunc (ge_optab
, mode
, "__gekf2");
18609 set_optab_libfunc (lt_optab
, mode
, "__ltkf2");
18610 set_optab_libfunc (le_optab
, mode
, "__lekf2");
18611 set_optab_libfunc (unord_optab
, mode
, "__unordkf2");
18613 set_conv_libfunc (sext_optab
, mode
, SFmode
, "__extendsfkf2");
18614 set_conv_libfunc (sext_optab
, mode
, DFmode
, "__extenddfkf2");
18615 set_conv_libfunc (trunc_optab
, SFmode
, mode
, "__trunckfsf2");
18616 set_conv_libfunc (trunc_optab
, DFmode
, mode
, "__trunckfdf2");
18618 set_conv_libfunc (sext_optab
, mode
, IFmode
, "__extendtfkf2");
18619 if (mode
!= TFmode
&& FLOAT128_IBM_P (TFmode
))
18620 set_conv_libfunc (sext_optab
, mode
, TFmode
, "__extendtfkf2");
18622 set_conv_libfunc (trunc_optab
, IFmode
, mode
, "__trunckftf2");
18623 if (mode
!= TFmode
&& FLOAT128_IBM_P (TFmode
))
18624 set_conv_libfunc (trunc_optab
, TFmode
, mode
, "__trunckftf2");
18626 set_conv_libfunc (sext_optab
, mode
, SDmode
, "__dpd_extendsdkf2");
18627 set_conv_libfunc (sext_optab
, mode
, DDmode
, "__dpd_extendddkf2");
18628 set_conv_libfunc (trunc_optab
, mode
, TDmode
, "__dpd_trunckftd2");
18629 set_conv_libfunc (trunc_optab
, SDmode
, mode
, "__dpd_trunckfsd2");
18630 set_conv_libfunc (trunc_optab
, DDmode
, mode
, "__dpd_trunckfdd2");
18631 set_conv_libfunc (sext_optab
, TDmode
, mode
, "__dpd_extendtdkf2");
18633 set_conv_libfunc (sfix_optab
, SImode
, mode
, "__fixkfsi");
18634 set_conv_libfunc (ufix_optab
, SImode
, mode
, "__fixunskfsi");
18635 set_conv_libfunc (sfix_optab
, DImode
, mode
, "__fixkfdi");
18636 set_conv_libfunc (ufix_optab
, DImode
, mode
, "__fixunskfdi");
18638 set_conv_libfunc (sfloat_optab
, mode
, SImode
, "__floatsikf");
18639 set_conv_libfunc (ufloat_optab
, mode
, SImode
, "__floatunsikf");
18640 set_conv_libfunc (sfloat_optab
, mode
, DImode
, "__floatdikf");
18641 set_conv_libfunc (ufloat_optab
, mode
, DImode
, "__floatundikf");
18643 if (TARGET_POWERPC64
)
18645 set_conv_libfunc (sfix_optab
, TImode
, mode
, "__fixkfti");
18646 set_conv_libfunc (ufix_optab
, TImode
, mode
, "__fixunskfti");
18647 set_conv_libfunc (sfloat_optab
, mode
, TImode
, "__floattikf");
18648 set_conv_libfunc (ufloat_optab
, mode
, TImode
, "__floatuntikf");
18654 set_optab_libfunc (add_optab
, mode
, "_q_add");
18655 set_optab_libfunc (sub_optab
, mode
, "_q_sub");
18656 set_optab_libfunc (neg_optab
, mode
, "_q_neg");
18657 set_optab_libfunc (smul_optab
, mode
, "_q_mul");
18658 set_optab_libfunc (sdiv_optab
, mode
, "_q_div");
18659 if (TARGET_PPC_GPOPT
)
18660 set_optab_libfunc (sqrt_optab
, mode
, "_q_sqrt");
18662 set_optab_libfunc (eq_optab
, mode
, "_q_feq");
18663 set_optab_libfunc (ne_optab
, mode
, "_q_fne");
18664 set_optab_libfunc (gt_optab
, mode
, "_q_fgt");
18665 set_optab_libfunc (ge_optab
, mode
, "_q_fge");
18666 set_optab_libfunc (lt_optab
, mode
, "_q_flt");
18667 set_optab_libfunc (le_optab
, mode
, "_q_fle");
18669 set_conv_libfunc (sext_optab
, mode
, SFmode
, "_q_stoq");
18670 set_conv_libfunc (sext_optab
, mode
, DFmode
, "_q_dtoq");
18671 set_conv_libfunc (trunc_optab
, SFmode
, mode
, "_q_qtos");
18672 set_conv_libfunc (trunc_optab
, DFmode
, mode
, "_q_qtod");
18673 set_conv_libfunc (sfix_optab
, SImode
, mode
, "_q_qtoi");
18674 set_conv_libfunc (ufix_optab
, SImode
, mode
, "_q_qtou");
18675 set_conv_libfunc (sfloat_optab
, mode
, SImode
, "_q_itoq");
18676 set_conv_libfunc (ufloat_optab
, mode
, SImode
, "_q_utoq");
18681 rs6000_init_libfuncs (void)
18683 /* __float128 support. */
18684 if (TARGET_FLOAT128_TYPE
)
18686 init_float128_ibm (IFmode
);
18687 init_float128_ieee (KFmode
);
18690 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18691 if (TARGET_LONG_DOUBLE_128
)
18693 if (!TARGET_IEEEQUAD
)
18694 init_float128_ibm (TFmode
);
18696 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18698 init_float128_ieee (TFmode
);
18702 /* Emit a potentially record-form instruction, setting DST from SRC.
18703 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18704 signed comparison of DST with zero. If DOT is 1, the generated RTL
18705 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18706 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18707 a separate COMPARE. */
18710 rs6000_emit_dot_insn (rtx dst
, rtx src
, int dot
, rtx ccreg
)
18714 emit_move_insn (dst
, src
);
18718 if (cc_reg_not_cr0_operand (ccreg
, CCmode
))
18720 emit_move_insn (dst
, src
);
18721 emit_move_insn (ccreg
, gen_rtx_COMPARE (CCmode
, dst
, const0_rtx
));
18725 rtx ccset
= gen_rtx_SET (ccreg
, gen_rtx_COMPARE (CCmode
, src
, const0_rtx
));
18728 rtx clobber
= gen_rtx_CLOBBER (VOIDmode
, dst
);
18729 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, ccset
, clobber
)));
18733 rtx set
= gen_rtx_SET (dst
, src
);
18734 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, ccset
, set
)));
18739 /* A validation routine: say whether CODE, a condition code, and MODE
18740 match. The other alternatives either don't make sense or should
18741 never be generated. */
18744 validate_condition_mode (enum rtx_code code
, machine_mode mode
)
18746 gcc_assert ((GET_RTX_CLASS (code
) == RTX_COMPARE
18747 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
18748 && GET_MODE_CLASS (mode
) == MODE_CC
);
18750 /* These don't make sense. */
18751 gcc_assert ((code
!= GT
&& code
!= LT
&& code
!= GE
&& code
!= LE
)
18752 || mode
!= CCUNSmode
);
18754 gcc_assert ((code
!= GTU
&& code
!= LTU
&& code
!= GEU
&& code
!= LEU
)
18755 || mode
== CCUNSmode
);
18757 gcc_assert (mode
== CCFPmode
18758 || (code
!= ORDERED
&& code
!= UNORDERED
18759 && code
!= UNEQ
&& code
!= LTGT
18760 && code
!= UNGT
&& code
!= UNLT
18761 && code
!= UNGE
&& code
!= UNLE
));
18763 /* These should never be generated except for
18764 flag_finite_math_only. */
18765 gcc_assert (mode
!= CCFPmode
18766 || flag_finite_math_only
18767 || (code
!= LE
&& code
!= GE
18768 && code
!= UNEQ
&& code
!= LTGT
18769 && code
!= UNGT
&& code
!= UNLT
));
18771 /* These are invalid; the information is not there. */
18772 gcc_assert (mode
!= CCEQmode
|| code
== EQ
|| code
== NE
);
18776 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18777 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18778 not zero, store there the bit offset (counted from the right) where
18779 the single stretch of 1 bits begins; and similarly for B, the bit
18780 offset where it ends. */
18783 rs6000_is_valid_mask (rtx mask
, int *b
, int *e
, machine_mode mode
)
18785 unsigned HOST_WIDE_INT val
= INTVAL (mask
);
18786 unsigned HOST_WIDE_INT bit
;
18788 int n
= GET_MODE_PRECISION (mode
);
18790 if (mode
!= DImode
&& mode
!= SImode
)
18793 if (INTVAL (mask
) >= 0)
18796 ne
= exact_log2 (bit
);
18797 nb
= exact_log2 (val
+ bit
);
18799 else if (val
+ 1 == 0)
18808 nb
= exact_log2 (bit
);
18809 ne
= exact_log2 (val
+ bit
);
18814 ne
= exact_log2 (bit
);
18815 if (val
+ bit
== 0)
18823 if (nb
< 0 || ne
< 0 || nb
>= n
|| ne
>= n
)
18834 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18835 or rldicr instruction, to implement an AND with it in mode MODE. */
18838 rs6000_is_valid_and_mask (rtx mask
, machine_mode mode
)
18842 if (!rs6000_is_valid_mask (mask
, &nb
, &ne
, mode
))
18845 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18847 if (mode
== DImode
)
18848 return (ne
== 0 || nb
== 63 || (nb
< 32 && ne
<= nb
));
18850 /* For SImode, rlwinm can do everything. */
18851 if (mode
== SImode
)
18852 return (nb
< 32 && ne
< 32);
18857 /* Return the instruction template for an AND with mask in mode MODE, with
18858 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18861 rs6000_insn_for_and_mask (machine_mode mode
, rtx
*operands
, bool dot
)
18865 if (!rs6000_is_valid_mask (operands
[2], &nb
, &ne
, mode
))
18866 gcc_unreachable ();
18868 if (mode
== DImode
&& ne
== 0)
18870 operands
[3] = GEN_INT (63 - nb
);
18872 return "rldicl. %0,%1,0,%3";
18873 return "rldicl %0,%1,0,%3";
18876 if (mode
== DImode
&& nb
== 63)
18878 operands
[3] = GEN_INT (63 - ne
);
18880 return "rldicr. %0,%1,0,%3";
18881 return "rldicr %0,%1,0,%3";
18884 if (nb
< 32 && ne
< 32)
18886 operands
[3] = GEN_INT (31 - nb
);
18887 operands
[4] = GEN_INT (31 - ne
);
18889 return "rlwinm. %0,%1,0,%3,%4";
18890 return "rlwinm %0,%1,0,%3,%4";
18893 gcc_unreachable ();
18896 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18897 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18898 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18901 rs6000_is_valid_shift_mask (rtx mask
, rtx shift
, machine_mode mode
)
18905 if (!rs6000_is_valid_mask (mask
, &nb
, &ne
, mode
))
18908 int n
= GET_MODE_PRECISION (mode
);
18911 if (CONST_INT_P (XEXP (shift
, 1)))
18913 sh
= INTVAL (XEXP (shift
, 1));
18914 if (sh
< 0 || sh
>= n
)
18918 rtx_code code
= GET_CODE (shift
);
18920 /* Convert any shift by 0 to a rotate, to simplify below code. */
18924 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18925 if (code
== ROTATE
&& sh
>= 0 && nb
>= ne
&& ne
>= sh
)
18927 if (code
== ROTATE
&& sh
>= 0 && nb
>= ne
&& nb
< sh
)
18933 /* DImode rotates need rld*. */
18934 if (mode
== DImode
&& code
== ROTATE
)
18935 return (nb
== 63 || ne
== 0 || ne
== sh
);
18937 /* SImode rotates need rlw*. */
18938 if (mode
== SImode
&& code
== ROTATE
)
18939 return (nb
< 32 && ne
< 32 && sh
< 32);
18941 /* Wrap-around masks are only okay for rotates. */
18945 /* Variable shifts are only okay for rotates. */
18949 /* Don't allow ASHIFT if the mask is wrong for that. */
18950 if (code
== ASHIFT
&& ne
< sh
)
18953 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18954 if the mask is wrong for that. */
18955 if (nb
< 32 && ne
< 32 && sh
< 32
18956 && !(code
== LSHIFTRT
&& nb
>= 32 - sh
))
18959 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18960 if the mask is wrong for that. */
18961 if (code
== LSHIFTRT
)
18963 if (nb
== 63 || ne
== 0 || ne
== sh
)
18964 return !(code
== LSHIFTRT
&& nb
>= sh
);
18969 /* Return the instruction template for a shift with mask in mode MODE, with
18970 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18973 rs6000_insn_for_shift_mask (machine_mode mode
, rtx
*operands
, bool dot
)
18977 if (!rs6000_is_valid_mask (operands
[3], &nb
, &ne
, mode
))
18978 gcc_unreachable ();
18980 if (mode
== DImode
&& ne
== 0)
18982 if (GET_CODE (operands
[4]) == LSHIFTRT
&& INTVAL (operands
[2]))
18983 operands
[2] = GEN_INT (64 - INTVAL (operands
[2]));
18984 operands
[3] = GEN_INT (63 - nb
);
18986 return "rld%I2cl. %0,%1,%2,%3";
18987 return "rld%I2cl %0,%1,%2,%3";
18990 if (mode
== DImode
&& nb
== 63)
18992 operands
[3] = GEN_INT (63 - ne
);
18994 return "rld%I2cr. %0,%1,%2,%3";
18995 return "rld%I2cr %0,%1,%2,%3";
18999 && GET_CODE (operands
[4]) != LSHIFTRT
19000 && CONST_INT_P (operands
[2])
19001 && ne
== INTVAL (operands
[2]))
19003 operands
[3] = GEN_INT (63 - nb
);
19005 return "rld%I2c. %0,%1,%2,%3";
19006 return "rld%I2c %0,%1,%2,%3";
19009 if (nb
< 32 && ne
< 32)
19011 if (GET_CODE (operands
[4]) == LSHIFTRT
&& INTVAL (operands
[2]))
19012 operands
[2] = GEN_INT (32 - INTVAL (operands
[2]));
19013 operands
[3] = GEN_INT (31 - nb
);
19014 operands
[4] = GEN_INT (31 - ne
);
19015 /* This insn can also be a 64-bit rotate with mask that really makes
19016 it just a shift right (with mask); the %h below are to adjust for
19017 that situation (shift count is >= 32 in that case). */
19019 return "rlw%I2nm. %0,%1,%h2,%3,%4";
19020 return "rlw%I2nm %0,%1,%h2,%3,%4";
19023 gcc_unreachable ();
19026 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
19027 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
19028 ASHIFT, or LSHIFTRT) in mode MODE. */
19031 rs6000_is_valid_insert_mask (rtx mask
, rtx shift
, machine_mode mode
)
19035 if (!rs6000_is_valid_mask (mask
, &nb
, &ne
, mode
))
19038 int n
= GET_MODE_PRECISION (mode
);
19040 int sh
= INTVAL (XEXP (shift
, 1));
19041 if (sh
< 0 || sh
>= n
)
19044 rtx_code code
= GET_CODE (shift
);
19046 /* Convert any shift by 0 to a rotate, to simplify below code. */
19050 /* Convert rotate to simple shift if we can, to make analysis simpler. */
19051 if (code
== ROTATE
&& sh
>= 0 && nb
>= ne
&& ne
>= sh
)
19053 if (code
== ROTATE
&& sh
>= 0 && nb
>= ne
&& nb
< sh
)
19059 /* DImode rotates need rldimi. */
19060 if (mode
== DImode
&& code
== ROTATE
)
19063 /* SImode rotates need rlwimi. */
19064 if (mode
== SImode
&& code
== ROTATE
)
19065 return (nb
< 32 && ne
< 32 && sh
< 32);
19067 /* Wrap-around masks are only okay for rotates. */
19071 /* Don't allow ASHIFT if the mask is wrong for that. */
19072 if (code
== ASHIFT
&& ne
< sh
)
19075 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
19076 if the mask is wrong for that. */
19077 if (nb
< 32 && ne
< 32 && sh
< 32
19078 && !(code
== LSHIFTRT
&& nb
>= 32 - sh
))
19081 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
19082 if the mask is wrong for that. */
19083 if (code
== LSHIFTRT
)
19086 return !(code
== LSHIFTRT
&& nb
>= sh
);
19091 /* Return the instruction template for an insert with mask in mode MODE, with
19092 operands OPERANDS. If DOT is true, make it a record-form instruction. */
19095 rs6000_insn_for_insert_mask (machine_mode mode
, rtx
*operands
, bool dot
)
19099 if (!rs6000_is_valid_mask (operands
[3], &nb
, &ne
, mode
))
19100 gcc_unreachable ();
19102 /* Prefer rldimi because rlwimi is cracked. */
19103 if (TARGET_POWERPC64
19104 && (!dot
|| mode
== DImode
)
19105 && GET_CODE (operands
[4]) != LSHIFTRT
19106 && ne
== INTVAL (operands
[2]))
19108 operands
[3] = GEN_INT (63 - nb
);
19110 return "rldimi. %0,%1,%2,%3";
19111 return "rldimi %0,%1,%2,%3";
19114 if (nb
< 32 && ne
< 32)
19116 if (GET_CODE (operands
[4]) == LSHIFTRT
&& INTVAL (operands
[2]))
19117 operands
[2] = GEN_INT (32 - INTVAL (operands
[2]));
19118 operands
[3] = GEN_INT (31 - nb
);
19119 operands
[4] = GEN_INT (31 - ne
);
19121 return "rlwimi. %0,%1,%2,%3,%4";
19122 return "rlwimi %0,%1,%2,%3,%4";
19125 gcc_unreachable ();
19128 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
19129 using two machine instructions. */
19132 rs6000_is_valid_2insn_and (rtx c
, machine_mode mode
)
19134 /* There are two kinds of AND we can handle with two insns:
19135 1) those we can do with two rl* insn;
19138 We do not handle that last case yet. */
19140 /* If there is just one stretch of ones, we can do it. */
19141 if (rs6000_is_valid_mask (c
, NULL
, NULL
, mode
))
19144 /* Otherwise, fill in the lowest "hole"; if we can do the result with
19145 one insn, we can do the whole thing with two. */
19146 unsigned HOST_WIDE_INT val
= INTVAL (c
);
19147 unsigned HOST_WIDE_INT bit1
= val
& -val
;
19148 unsigned HOST_WIDE_INT bit2
= (val
+ bit1
) & ~val
;
19149 unsigned HOST_WIDE_INT val1
= (val
+ bit1
) & val
;
19150 unsigned HOST_WIDE_INT bit3
= val1
& -val1
;
19151 return rs6000_is_valid_and_mask (GEN_INT (val
+ bit3
- bit2
), mode
);
19154 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
19155 If EXPAND is true, split rotate-and-mask instructions we generate to
19156 their constituent parts as well (this is used during expand); if DOT
19157 is 1, make the last insn a record-form instruction clobbering the
19158 destination GPR and setting the CC reg (from operands[3]); if 2, set
19159 that GPR as well as the CC reg. */
19162 rs6000_emit_2insn_and (machine_mode mode
, rtx
*operands
, bool expand
, int dot
)
19164 gcc_assert (!(expand
&& dot
));
19166 unsigned HOST_WIDE_INT val
= INTVAL (operands
[2]);
19168 /* If it is one stretch of ones, it is DImode; shift left, mask, then
19169 shift right. This generates better code than doing the masks without
19170 shifts, or shifting first right and then left. */
19172 if (rs6000_is_valid_mask (operands
[2], &nb
, &ne
, mode
) && nb
>= ne
)
19174 gcc_assert (mode
== DImode
);
19176 int shift
= 63 - nb
;
19179 rtx tmp1
= gen_reg_rtx (DImode
);
19180 rtx tmp2
= gen_reg_rtx (DImode
);
19181 emit_insn (gen_ashldi3 (tmp1
, operands
[1], GEN_INT (shift
)));
19182 emit_insn (gen_anddi3 (tmp2
, tmp1
, GEN_INT (val
<< shift
)));
19183 emit_insn (gen_lshrdi3 (operands
[0], tmp2
, GEN_INT (shift
)));
19187 rtx tmp
= gen_rtx_ASHIFT (mode
, operands
[1], GEN_INT (shift
));
19188 tmp
= gen_rtx_AND (mode
, tmp
, GEN_INT (val
<< shift
));
19189 emit_move_insn (operands
[0], tmp
);
19190 tmp
= gen_rtx_LSHIFTRT (mode
, operands
[0], GEN_INT (shift
));
19191 rs6000_emit_dot_insn (operands
[0], tmp
, dot
, dot
? operands
[3] : 0);
19196 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
19197 that does the rest. */
19198 unsigned HOST_WIDE_INT bit1
= val
& -val
;
19199 unsigned HOST_WIDE_INT bit2
= (val
+ bit1
) & ~val
;
19200 unsigned HOST_WIDE_INT val1
= (val
+ bit1
) & val
;
19201 unsigned HOST_WIDE_INT bit3
= val1
& -val1
;
19203 unsigned HOST_WIDE_INT mask1
= -bit3
+ bit2
- 1;
19204 unsigned HOST_WIDE_INT mask2
= val
+ bit3
- bit2
;
19206 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2
), mode
));
19208 /* Two "no-rotate"-and-mask instructions, for SImode. */
19209 if (rs6000_is_valid_and_mask (GEN_INT (mask1
), mode
))
19211 gcc_assert (mode
== SImode
);
19213 rtx reg
= expand
? gen_reg_rtx (mode
) : operands
[0];
19214 rtx tmp
= gen_rtx_AND (mode
, operands
[1], GEN_INT (mask1
));
19215 emit_move_insn (reg
, tmp
);
19216 tmp
= gen_rtx_AND (mode
, reg
, GEN_INT (mask2
));
19217 rs6000_emit_dot_insn (operands
[0], tmp
, dot
, dot
? operands
[3] : 0);
19221 gcc_assert (mode
== DImode
);
19223 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
19224 insns; we have to do the first in SImode, because it wraps. */
19225 if (mask2
<= 0xffffffff
19226 && rs6000_is_valid_and_mask (GEN_INT (mask1
), SImode
))
19228 rtx reg
= expand
? gen_reg_rtx (mode
) : operands
[0];
19229 rtx tmp
= gen_rtx_AND (SImode
, gen_lowpart (SImode
, operands
[1]),
19231 rtx reg_low
= gen_lowpart (SImode
, reg
);
19232 emit_move_insn (reg_low
, tmp
);
19233 tmp
= gen_rtx_AND (mode
, reg
, GEN_INT (mask2
));
19234 rs6000_emit_dot_insn (operands
[0], tmp
, dot
, dot
? operands
[3] : 0);
19238 /* Two rld* insns: rotate, clear the hole in the middle (which now is
19239 at the top end), rotate back and clear the other hole. */
19240 int right
= exact_log2 (bit3
);
19241 int left
= 64 - right
;
19243 /* Rotate the mask too. */
19244 mask1
= (mask1
>> right
) | ((bit2
- 1) << left
);
19248 rtx tmp1
= gen_reg_rtx (DImode
);
19249 rtx tmp2
= gen_reg_rtx (DImode
);
19250 rtx tmp3
= gen_reg_rtx (DImode
);
19251 emit_insn (gen_rotldi3 (tmp1
, operands
[1], GEN_INT (left
)));
19252 emit_insn (gen_anddi3 (tmp2
, tmp1
, GEN_INT (mask1
)));
19253 emit_insn (gen_rotldi3 (tmp3
, tmp2
, GEN_INT (right
)));
19254 emit_insn (gen_anddi3 (operands
[0], tmp3
, GEN_INT (mask2
)));
19258 rtx tmp
= gen_rtx_ROTATE (mode
, operands
[1], GEN_INT (left
));
19259 tmp
= gen_rtx_AND (mode
, tmp
, GEN_INT (mask1
));
19260 emit_move_insn (operands
[0], tmp
);
19261 tmp
= gen_rtx_ROTATE (mode
, operands
[0], GEN_INT (right
));
19262 tmp
= gen_rtx_AND (mode
, tmp
, GEN_INT (mask2
));
19263 rs6000_emit_dot_insn (operands
[0], tmp
, dot
, dot
? operands
[3] : 0);
19267 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
19268 for lfq and stfq insns iff the registers are hard registers. */
19271 registers_ok_for_quad_peep (rtx reg1
, rtx reg2
)
19273 /* We might have been passed a SUBREG. */
19274 if (GET_CODE (reg1
) != REG
|| GET_CODE (reg2
) != REG
)
19277 /* We might have been passed non floating point registers. */
19278 if (!FP_REGNO_P (REGNO (reg1
))
19279 || !FP_REGNO_P (REGNO (reg2
)))
19282 return (REGNO (reg1
) == REGNO (reg2
) - 1);
19285 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
19286 addr1 and addr2 must be in consecutive memory locations
19287 (addr2 == addr1 + 8). */
19290 mems_ok_for_quad_peep (rtx mem1
, rtx mem2
)
19293 unsigned int reg1
, reg2
;
19294 int offset1
, offset2
;
19296 /* The mems cannot be volatile. */
19297 if (MEM_VOLATILE_P (mem1
) || MEM_VOLATILE_P (mem2
))
19300 addr1
= XEXP (mem1
, 0);
19301 addr2
= XEXP (mem2
, 0);
19303 /* Extract an offset (if used) from the first addr. */
19304 if (GET_CODE (addr1
) == PLUS
)
19306 /* If not a REG, return zero. */
19307 if (GET_CODE (XEXP (addr1
, 0)) != REG
)
19311 reg1
= REGNO (XEXP (addr1
, 0));
19312 /* The offset must be constant! */
19313 if (GET_CODE (XEXP (addr1
, 1)) != CONST_INT
)
19315 offset1
= INTVAL (XEXP (addr1
, 1));
19318 else if (GET_CODE (addr1
) != REG
)
19322 reg1
= REGNO (addr1
);
19323 /* This was a simple (mem (reg)) expression. Offset is 0. */
19327 /* And now for the second addr. */
19328 if (GET_CODE (addr2
) == PLUS
)
19330 /* If not a REG, return zero. */
19331 if (GET_CODE (XEXP (addr2
, 0)) != REG
)
19335 reg2
= REGNO (XEXP (addr2
, 0));
19336 /* The offset must be constant. */
19337 if (GET_CODE (XEXP (addr2
, 1)) != CONST_INT
)
19339 offset2
= INTVAL (XEXP (addr2
, 1));
19342 else if (GET_CODE (addr2
) != REG
)
19346 reg2
= REGNO (addr2
);
19347 /* This was a simple (mem (reg)) expression. Offset is 0. */
19351 /* Both of these must have the same base register. */
19355 /* The offset for the second addr must be 8 more than the first addr. */
19356 if (offset2
!= offset1
+ 8)
19359 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
19364 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
19365 need to use DDmode, in all other cases we can use the same mode. */
19366 static machine_mode
19367 rs6000_secondary_memory_needed_mode (machine_mode mode
)
19369 if (lra_in_progress
&& mode
== SDmode
)
19374 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
19375 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
19376 only work on the traditional altivec registers, note if an altivec register
19379 static enum rs6000_reg_type
19380 register_to_reg_type (rtx reg
, bool *is_altivec
)
19382 HOST_WIDE_INT regno
;
19383 enum reg_class rclass
;
19385 if (GET_CODE (reg
) == SUBREG
)
19386 reg
= SUBREG_REG (reg
);
19389 return NO_REG_TYPE
;
19391 regno
= REGNO (reg
);
19392 if (regno
>= FIRST_PSEUDO_REGISTER
)
19394 if (!lra_in_progress
&& !reload_completed
)
19395 return PSEUDO_REG_TYPE
;
19397 regno
= true_regnum (reg
);
19398 if (regno
< 0 || regno
>= FIRST_PSEUDO_REGISTER
)
19399 return PSEUDO_REG_TYPE
;
19402 gcc_assert (regno
>= 0);
19404 if (is_altivec
&& ALTIVEC_REGNO_P (regno
))
19405 *is_altivec
= true;
19407 rclass
= rs6000_regno_regclass
[regno
];
19408 return reg_class_to_reg_type
[(int)rclass
];
19411 /* Helper function to return the cost of adding a TOC entry address. */
19414 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask
)
19418 if (TARGET_CMODEL
!= CMODEL_SMALL
)
19419 ret
= ((addr_mask
& RELOAD_REG_OFFSET
) == 0) ? 1 : 2;
19422 ret
= (TARGET_MINIMAL_TOC
) ? 6 : 3;
19427 /* Helper function for rs6000_secondary_reload to determine whether the memory
19428 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
19429 needs reloading. Return negative if the memory is not handled by the memory
19430 helper functions and to try a different reload method, 0 if no additional
19431 instructions are need, and positive to give the extra cost for the
19435 rs6000_secondary_reload_memory (rtx addr
,
19436 enum reg_class rclass
,
19439 int extra_cost
= 0;
19440 rtx reg
, and_arg
, plus_arg0
, plus_arg1
;
19441 addr_mask_type addr_mask
;
19442 const char *type
= NULL
;
19443 const char *fail_msg
= NULL
;
19445 if (GPR_REG_CLASS_P (rclass
))
19446 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_GPR
];
19448 else if (rclass
== FLOAT_REGS
)
19449 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_FPR
];
19451 else if (rclass
== ALTIVEC_REGS
)
19452 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
];
19454 /* For the combined VSX_REGS, turn off Altivec AND -16. */
19455 else if (rclass
== VSX_REGS
)
19456 addr_mask
= (reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
]
19457 & ~RELOAD_REG_AND_M16
);
19459 /* If the register allocator hasn't made up its mind yet on the register
19460 class to use, settle on defaults to use. */
19461 else if (rclass
== NO_REGS
)
19463 addr_mask
= (reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
]
19464 & ~RELOAD_REG_AND_M16
);
19466 if ((addr_mask
& RELOAD_REG_MULTIPLE
) != 0)
19467 addr_mask
&= ~(RELOAD_REG_INDEXED
19468 | RELOAD_REG_PRE_INCDEC
19469 | RELOAD_REG_PRE_MODIFY
);
19475 /* If the register isn't valid in this register class, just return now. */
19476 if ((addr_mask
& RELOAD_REG_VALID
) == 0)
19478 if (TARGET_DEBUG_ADDR
)
19481 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19482 "not valid in class\n",
19483 GET_MODE_NAME (mode
), reg_class_names
[rclass
]);
19490 switch (GET_CODE (addr
))
19492 /* Does the register class supports auto update forms for this mode? We
19493 don't need a scratch register, since the powerpc only supports
19494 PRE_INC, PRE_DEC, and PRE_MODIFY. */
19497 reg
= XEXP (addr
, 0);
19498 if (!base_reg_operand (addr
, GET_MODE (reg
)))
19500 fail_msg
= "no base register #1";
19504 else if ((addr_mask
& RELOAD_REG_PRE_INCDEC
) == 0)
19512 reg
= XEXP (addr
, 0);
19513 plus_arg1
= XEXP (addr
, 1);
19514 if (!base_reg_operand (reg
, GET_MODE (reg
))
19515 || GET_CODE (plus_arg1
) != PLUS
19516 || !rtx_equal_p (reg
, XEXP (plus_arg1
, 0)))
19518 fail_msg
= "bad PRE_MODIFY";
19522 else if ((addr_mask
& RELOAD_REG_PRE_MODIFY
) == 0)
19529 /* Do we need to simulate AND -16 to clear the bottom address bits used
19530 in VMX load/stores? Only allow the AND for vector sizes. */
19532 and_arg
= XEXP (addr
, 0);
19533 if (GET_MODE_SIZE (mode
) != 16
19534 || GET_CODE (XEXP (addr
, 1)) != CONST_INT
19535 || INTVAL (XEXP (addr
, 1)) != -16)
19537 fail_msg
= "bad Altivec AND #1";
19541 if (rclass
!= ALTIVEC_REGS
)
19543 if (legitimate_indirect_address_p (and_arg
, false))
19546 else if (legitimate_indexed_address_p (and_arg
, false))
19551 fail_msg
= "bad Altivec AND #2";
19559 /* If this is an indirect address, make sure it is a base register. */
19562 if (!legitimate_indirect_address_p (addr
, false))
19569 /* If this is an indexed address, make sure the register class can handle
19570 indexed addresses for this mode. */
19572 plus_arg0
= XEXP (addr
, 0);
19573 plus_arg1
= XEXP (addr
, 1);
19575 /* (plus (plus (reg) (constant)) (constant)) is generated during
19576 push_reload processing, so handle it now. */
19577 if (GET_CODE (plus_arg0
) == PLUS
&& CONST_INT_P (plus_arg1
))
19579 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
19586 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19587 push_reload processing, so handle it now. */
19588 else if (GET_CODE (plus_arg0
) == PLUS
&& REG_P (plus_arg1
))
19590 if ((addr_mask
& RELOAD_REG_INDEXED
) == 0)
19593 type
= "indexed #2";
19597 else if (!base_reg_operand (plus_arg0
, GET_MODE (plus_arg0
)))
19599 fail_msg
= "no base register #2";
19603 else if (int_reg_operand (plus_arg1
, GET_MODE (plus_arg1
)))
19605 if ((addr_mask
& RELOAD_REG_INDEXED
) == 0
19606 || !legitimate_indexed_address_p (addr
, false))
19613 else if ((addr_mask
& RELOAD_REG_QUAD_OFFSET
) != 0
19614 && CONST_INT_P (plus_arg1
))
19616 if (!quad_address_offset_p (INTVAL (plus_arg1
)))
19619 type
= "vector d-form offset";
19623 /* Make sure the register class can handle offset addresses. */
19624 else if (rs6000_legitimate_offset_address_p (mode
, addr
, false, true))
19626 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
19629 type
= "offset #2";
19635 fail_msg
= "bad PLUS";
19642 /* Quad offsets are restricted and can't handle normal addresses. */
19643 if ((addr_mask
& RELOAD_REG_QUAD_OFFSET
) != 0)
19646 type
= "vector d-form lo_sum";
19649 else if (!legitimate_lo_sum_address_p (mode
, addr
, false))
19651 fail_msg
= "bad LO_SUM";
19655 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
19662 /* Static addresses need to create a TOC entry. */
19666 if ((addr_mask
& RELOAD_REG_QUAD_OFFSET
) != 0)
19669 type
= "vector d-form lo_sum #2";
19675 extra_cost
= rs6000_secondary_reload_toc_costs (addr_mask
);
19679 /* TOC references look like offsetable memory. */
19681 if (TARGET_CMODEL
== CMODEL_SMALL
|| XINT (addr
, 1) != UNSPEC_TOCREL
)
19683 fail_msg
= "bad UNSPEC";
19687 else if ((addr_mask
& RELOAD_REG_QUAD_OFFSET
) != 0)
19690 type
= "vector d-form lo_sum #3";
19693 else if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
19696 type
= "toc reference";
19702 fail_msg
= "bad address";
19707 if (TARGET_DEBUG_ADDR
/* && extra_cost != 0 */)
19709 if (extra_cost
< 0)
19711 "rs6000_secondary_reload_memory error: mode = %s, "
19712 "class = %s, addr_mask = '%s', %s\n",
19713 GET_MODE_NAME (mode
),
19714 reg_class_names
[rclass
],
19715 rs6000_debug_addr_mask (addr_mask
, false),
19716 (fail_msg
!= NULL
) ? fail_msg
: "<bad address>");
19720 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19721 "addr_mask = '%s', extra cost = %d, %s\n",
19722 GET_MODE_NAME (mode
),
19723 reg_class_names
[rclass
],
19724 rs6000_debug_addr_mask (addr_mask
, false),
19726 (type
) ? type
: "<none>");
19734 /* Helper function for rs6000_secondary_reload to return true if a move to a
19735 different register classe is really a simple move. */
19738 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type
,
19739 enum rs6000_reg_type from_type
,
19742 int size
= GET_MODE_SIZE (mode
);
19744 /* Add support for various direct moves available. In this function, we only
19745 look at cases where we don't need any extra registers, and one or more
19746 simple move insns are issued. Originally small integers are not allowed
19747 in FPR/VSX registers. Single precision binary floating is not a simple
19748 move because we need to convert to the single precision memory layout.
19749 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19750 need special direct move handling, which we do not support yet. */
19751 if (TARGET_DIRECT_MOVE
19752 && ((to_type
== GPR_REG_TYPE
&& from_type
== VSX_REG_TYPE
)
19753 || (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
)))
19755 if (TARGET_POWERPC64
)
19757 /* ISA 2.07: MTVSRD or MVFVSRD. */
19761 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19762 if (size
== 16 && TARGET_P9_VECTOR
&& mode
!= TDmode
)
19766 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19767 if (TARGET_P8_VECTOR
)
19769 if (mode
== SImode
)
19772 if (TARGET_P9_VECTOR
&& (mode
== HImode
|| mode
== QImode
))
19776 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19777 if (mode
== SDmode
)
19781 /* Power6+: MFTGPR or MFFGPR. */
19782 else if (TARGET_MFPGPR
&& TARGET_POWERPC64
&& size
== 8
19783 && ((to_type
== GPR_REG_TYPE
&& from_type
== FPR_REG_TYPE
)
19784 || (to_type
== FPR_REG_TYPE
&& from_type
== GPR_REG_TYPE
)))
19787 /* Move to/from SPR. */
19788 else if ((size
== 4 || (TARGET_POWERPC64
&& size
== 8))
19789 && ((to_type
== GPR_REG_TYPE
&& from_type
== SPR_REG_TYPE
)
19790 || (to_type
== SPR_REG_TYPE
&& from_type
== GPR_REG_TYPE
)))
19796 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19797 special direct moves that involve allocating an extra register, return the
19798 insn code of the helper function if there is such a function or
19799 CODE_FOR_nothing if not. */
19802 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type
,
19803 enum rs6000_reg_type from_type
,
19805 secondary_reload_info
*sri
,
19809 enum insn_code icode
= CODE_FOR_nothing
;
19811 int size
= GET_MODE_SIZE (mode
);
19813 if (TARGET_POWERPC64
&& size
== 16)
19815 /* Handle moving 128-bit values from GPRs to VSX point registers on
19816 ISA 2.07 (power8, power9) when running in 64-bit mode using
19817 XXPERMDI to glue the two 64-bit values back together. */
19818 if (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
)
19820 cost
= 3; /* 2 mtvsrd's, 1 xxpermdi. */
19821 icode
= reg_addr
[mode
].reload_vsx_gpr
;
19824 /* Handle moving 128-bit values from VSX point registers to GPRs on
19825 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19826 bottom 64-bit value. */
19827 else if (to_type
== GPR_REG_TYPE
&& from_type
== VSX_REG_TYPE
)
19829 cost
= 3; /* 2 mfvsrd's, 1 xxpermdi. */
19830 icode
= reg_addr
[mode
].reload_gpr_vsx
;
19834 else if (TARGET_POWERPC64
&& mode
== SFmode
)
19836 if (to_type
== GPR_REG_TYPE
&& from_type
== VSX_REG_TYPE
)
19838 cost
= 3; /* xscvdpspn, mfvsrd, and. */
19839 icode
= reg_addr
[mode
].reload_gpr_vsx
;
19842 else if (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
)
19844 cost
= 2; /* mtvsrz, xscvspdpn. */
19845 icode
= reg_addr
[mode
].reload_vsx_gpr
;
19849 else if (!TARGET_POWERPC64
&& size
== 8)
19851 /* Handle moving 64-bit values from GPRs to floating point registers on
19852 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19853 32-bit values back together. Altivec register classes must be handled
19854 specially since a different instruction is used, and the secondary
19855 reload support requires a single instruction class in the scratch
19856 register constraint. However, right now TFmode is not allowed in
19857 Altivec registers, so the pattern will never match. */
19858 if (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
&& !altivec_p
)
19860 cost
= 3; /* 2 mtvsrwz's, 1 fmrgow. */
19861 icode
= reg_addr
[mode
].reload_fpr_gpr
;
19865 if (icode
!= CODE_FOR_nothing
)
19870 sri
->icode
= icode
;
19871 sri
->extra_cost
= cost
;
19878 /* Return whether a move between two register classes can be done either
19879 directly (simple move) or via a pattern that uses a single extra temporary
19880 (using ISA 2.07's direct move in this case. */
19883 rs6000_secondary_reload_move (enum rs6000_reg_type to_type
,
19884 enum rs6000_reg_type from_type
,
19886 secondary_reload_info
*sri
,
19889 /* Fall back to load/store reloads if either type is not a register. */
19890 if (to_type
== NO_REG_TYPE
|| from_type
== NO_REG_TYPE
)
19893 /* If we haven't allocated registers yet, assume the move can be done for the
19894 standard register types. */
19895 if ((to_type
== PSEUDO_REG_TYPE
&& from_type
== PSEUDO_REG_TYPE
)
19896 || (to_type
== PSEUDO_REG_TYPE
&& IS_STD_REG_TYPE (from_type
))
19897 || (from_type
== PSEUDO_REG_TYPE
&& IS_STD_REG_TYPE (to_type
)))
19900 /* Moves to the same set of registers is a simple move for non-specialized
19902 if (to_type
== from_type
&& IS_STD_REG_TYPE (to_type
))
19905 /* Check whether a simple move can be done directly. */
19906 if (rs6000_secondary_reload_simple_move (to_type
, from_type
, mode
))
19910 sri
->icode
= CODE_FOR_nothing
;
19911 sri
->extra_cost
= 0;
19916 /* Now check if we can do it in a few steps. */
19917 return rs6000_secondary_reload_direct_move (to_type
, from_type
, mode
, sri
,
19921 /* Inform reload about cases where moving X with a mode MODE to a register in
19922 RCLASS requires an extra scratch or immediate register. Return the class
19923 needed for the immediate register.
19925 For VSX and Altivec, we may need a register to convert sp+offset into
19928 For misaligned 64-bit gpr loads and stores we need a register to
19929 convert an offset address to indirect. */
19932 rs6000_secondary_reload (bool in_p
,
19934 reg_class_t rclass_i
,
19936 secondary_reload_info
*sri
)
19938 enum reg_class rclass
= (enum reg_class
) rclass_i
;
19939 reg_class_t ret
= ALL_REGS
;
19940 enum insn_code icode
;
19941 bool default_p
= false;
19942 bool done_p
= false;
19944 /* Allow subreg of memory before/during reload. */
19945 bool memory_p
= (MEM_P (x
)
19946 || (!reload_completed
&& GET_CODE (x
) == SUBREG
19947 && MEM_P (SUBREG_REG (x
))));
19949 sri
->icode
= CODE_FOR_nothing
;
19950 sri
->t_icode
= CODE_FOR_nothing
;
19951 sri
->extra_cost
= 0;
19953 ? reg_addr
[mode
].reload_load
19954 : reg_addr
[mode
].reload_store
);
19956 if (REG_P (x
) || register_operand (x
, mode
))
19958 enum rs6000_reg_type to_type
= reg_class_to_reg_type
[(int)rclass
];
19959 bool altivec_p
= (rclass
== ALTIVEC_REGS
);
19960 enum rs6000_reg_type from_type
= register_to_reg_type (x
, &altivec_p
);
19963 std::swap (to_type
, from_type
);
19965 /* Can we do a direct move of some sort? */
19966 if (rs6000_secondary_reload_move (to_type
, from_type
, mode
, sri
,
19969 icode
= (enum insn_code
)sri
->icode
;
19976 /* Make sure 0.0 is not reloaded or forced into memory. */
19977 if (x
== CONST0_RTX (mode
) && VSX_REG_CLASS_P (rclass
))
19984 /* If this is a scalar floating point value and we want to load it into the
19985 traditional Altivec registers, do it via a move via a traditional floating
19986 point register, unless we have D-form addressing. Also make sure that
19987 non-zero constants use a FPR. */
19988 if (!done_p
&& reg_addr
[mode
].scalar_in_vmx_p
19989 && !mode_supports_vmx_dform (mode
)
19990 && (rclass
== VSX_REGS
|| rclass
== ALTIVEC_REGS
)
19991 && (memory_p
|| (GET_CODE (x
) == CONST_DOUBLE
)))
19998 /* Handle reload of load/stores if we have reload helper functions. */
19999 if (!done_p
&& icode
!= CODE_FOR_nothing
&& memory_p
)
20001 int extra_cost
= rs6000_secondary_reload_memory (XEXP (x
, 0), rclass
,
20004 if (extra_cost
>= 0)
20008 if (extra_cost
> 0)
20010 sri
->extra_cost
= extra_cost
;
20011 sri
->icode
= icode
;
20016 /* Handle unaligned loads and stores of integer registers. */
20017 if (!done_p
&& TARGET_POWERPC64
20018 && reg_class_to_reg_type
[(int)rclass
] == GPR_REG_TYPE
20020 && GET_MODE_SIZE (GET_MODE (x
)) >= UNITS_PER_WORD
)
20022 rtx addr
= XEXP (x
, 0);
20023 rtx off
= address_offset (addr
);
20025 if (off
!= NULL_RTX
)
20027 unsigned int extra
= GET_MODE_SIZE (GET_MODE (x
)) - UNITS_PER_WORD
;
20028 unsigned HOST_WIDE_INT offset
= INTVAL (off
);
20030 /* We need a secondary reload when our legitimate_address_p
20031 says the address is good (as otherwise the entire address
20032 will be reloaded), and the offset is not a multiple of
20033 four or we have an address wrap. Address wrap will only
20034 occur for LO_SUMs since legitimate_offset_address_p
20035 rejects addresses for 16-byte mems that will wrap. */
20036 if (GET_CODE (addr
) == LO_SUM
20037 ? (1 /* legitimate_address_p allows any offset for lo_sum */
20038 && ((offset
& 3) != 0
20039 || ((offset
& 0xffff) ^ 0x8000) >= 0x10000 - extra
))
20040 : (offset
+ 0x8000 < 0x10000 - extra
/* legitimate_address_p */
20041 && (offset
& 3) != 0))
20043 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
20045 sri
->icode
= ((TARGET_32BIT
) ? CODE_FOR_reload_si_load
20046 : CODE_FOR_reload_di_load
);
20048 sri
->icode
= ((TARGET_32BIT
) ? CODE_FOR_reload_si_store
20049 : CODE_FOR_reload_di_store
);
20050 sri
->extra_cost
= 2;
20061 if (!done_p
&& !TARGET_POWERPC64
20062 && reg_class_to_reg_type
[(int)rclass
] == GPR_REG_TYPE
20064 && GET_MODE_SIZE (GET_MODE (x
)) > UNITS_PER_WORD
)
20066 rtx addr
= XEXP (x
, 0);
20067 rtx off
= address_offset (addr
);
20069 if (off
!= NULL_RTX
)
20071 unsigned int extra
= GET_MODE_SIZE (GET_MODE (x
)) - UNITS_PER_WORD
;
20072 unsigned HOST_WIDE_INT offset
= INTVAL (off
);
20074 /* We need a secondary reload when our legitimate_address_p
20075 says the address is good (as otherwise the entire address
20076 will be reloaded), and we have a wrap.
20078 legitimate_lo_sum_address_p allows LO_SUM addresses to
20079 have any offset so test for wrap in the low 16 bits.
20081 legitimate_offset_address_p checks for the range
20082 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
20083 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
20084 [0x7ff4,0x7fff] respectively, so test for the
20085 intersection of these ranges, [0x7ffc,0x7fff] and
20086 [0x7ff4,0x7ff7] respectively.
20088 Note that the address we see here may have been
20089 manipulated by legitimize_reload_address. */
20090 if (GET_CODE (addr
) == LO_SUM
20091 ? ((offset
& 0xffff) ^ 0x8000) >= 0x10000 - extra
20092 : offset
- (0x8000 - extra
) < UNITS_PER_WORD
)
20095 sri
->icode
= CODE_FOR_reload_si_load
;
20097 sri
->icode
= CODE_FOR_reload_si_store
;
20098 sri
->extra_cost
= 2;
20113 ret
= default_secondary_reload (in_p
, x
, rclass
, mode
, sri
);
20115 gcc_assert (ret
!= ALL_REGS
);
20117 if (TARGET_DEBUG_ADDR
)
20120 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
20122 reg_class_names
[ret
],
20123 in_p
? "true" : "false",
20124 reg_class_names
[rclass
],
20125 GET_MODE_NAME (mode
));
20127 if (reload_completed
)
20128 fputs (", after reload", stderr
);
20131 fputs (", done_p not set", stderr
);
20134 fputs (", default secondary reload", stderr
);
20136 if (sri
->icode
!= CODE_FOR_nothing
)
20137 fprintf (stderr
, ", reload func = %s, extra cost = %d",
20138 insn_data
[sri
->icode
].name
, sri
->extra_cost
);
20140 else if (sri
->extra_cost
> 0)
20141 fprintf (stderr
, ", extra cost = %d", sri
->extra_cost
);
20143 fputs ("\n", stderr
);
20150 /* Better tracing for rs6000_secondary_reload_inner. */
20153 rs6000_secondary_reload_trace (int line
, rtx reg
, rtx mem
, rtx scratch
,
20158 gcc_assert (reg
!= NULL_RTX
&& mem
!= NULL_RTX
&& scratch
!= NULL_RTX
);
20160 fprintf (stderr
, "rs6000_secondary_reload_inner:%d, type = %s\n", line
,
20161 store_p
? "store" : "load");
20164 set
= gen_rtx_SET (mem
, reg
);
20166 set
= gen_rtx_SET (reg
, mem
);
20168 clobber
= gen_rtx_CLOBBER (VOIDmode
, scratch
);
20169 debug_rtx (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, set
, clobber
)));
20172 static void rs6000_secondary_reload_fail (int, rtx
, rtx
, rtx
, bool)
20173 ATTRIBUTE_NORETURN
;
20176 rs6000_secondary_reload_fail (int line
, rtx reg
, rtx mem
, rtx scratch
,
20179 rs6000_secondary_reload_trace (line
, reg
, mem
, scratch
, store_p
);
20180 gcc_unreachable ();
20183 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
20184 reload helper functions. These were identified in
20185 rs6000_secondary_reload_memory, and if reload decided to use the secondary
20186 reload, it calls the insns:
20187 reload_<RELOAD:mode>_<P:mptrsize>_store
20188 reload_<RELOAD:mode>_<P:mptrsize>_load
20190 which in turn calls this function, to do whatever is necessary to create
20191 valid addresses. */
20194 rs6000_secondary_reload_inner (rtx reg
, rtx mem
, rtx scratch
, bool store_p
)
20196 int regno
= true_regnum (reg
);
20197 machine_mode mode
= GET_MODE (reg
);
20198 addr_mask_type addr_mask
;
20201 rtx op_reg
, op0
, op1
;
20206 if (regno
< 0 || regno
>= FIRST_PSEUDO_REGISTER
|| !MEM_P (mem
)
20207 || !base_reg_operand (scratch
, GET_MODE (scratch
)))
20208 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20210 if (IN_RANGE (regno
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
))
20211 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_GPR
];
20213 else if (IN_RANGE (regno
, FIRST_FPR_REGNO
, LAST_FPR_REGNO
))
20214 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_FPR
];
20216 else if (IN_RANGE (regno
, FIRST_ALTIVEC_REGNO
, LAST_ALTIVEC_REGNO
))
20217 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
];
20220 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20222 /* Make sure the mode is valid in this register class. */
20223 if ((addr_mask
& RELOAD_REG_VALID
) == 0)
20224 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20226 if (TARGET_DEBUG_ADDR
)
20227 rs6000_secondary_reload_trace (__LINE__
, reg
, mem
, scratch
, store_p
);
20229 new_addr
= addr
= XEXP (mem
, 0);
20230 switch (GET_CODE (addr
))
20232 /* Does the register class support auto update forms for this mode? If
20233 not, do the update now. We don't need a scratch register, since the
20234 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
20237 op_reg
= XEXP (addr
, 0);
20238 if (!base_reg_operand (op_reg
, Pmode
))
20239 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20241 if ((addr_mask
& RELOAD_REG_PRE_INCDEC
) == 0)
20243 emit_insn (gen_add2_insn (op_reg
, GEN_INT (GET_MODE_SIZE (mode
))));
20249 op0
= XEXP (addr
, 0);
20250 op1
= XEXP (addr
, 1);
20251 if (!base_reg_operand (op0
, Pmode
)
20252 || GET_CODE (op1
) != PLUS
20253 || !rtx_equal_p (op0
, XEXP (op1
, 0)))
20254 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20256 if ((addr_mask
& RELOAD_REG_PRE_MODIFY
) == 0)
20258 emit_insn (gen_rtx_SET (op0
, op1
));
20263 /* Do we need to simulate AND -16 to clear the bottom address bits used
20264 in VMX load/stores? */
20266 op0
= XEXP (addr
, 0);
20267 op1
= XEXP (addr
, 1);
20268 if ((addr_mask
& RELOAD_REG_AND_M16
) == 0)
20270 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
20273 else if (GET_CODE (op1
) == PLUS
)
20275 emit_insn (gen_rtx_SET (scratch
, op1
));
20280 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20282 and_op
= gen_rtx_AND (GET_MODE (scratch
), op_reg
, op1
);
20283 cc_clobber
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (CCmode
));
20284 rv
= gen_rtvec (2, gen_rtx_SET (scratch
, and_op
), cc_clobber
);
20285 emit_insn (gen_rtx_PARALLEL (VOIDmode
, rv
));
20286 new_addr
= scratch
;
20290 /* If this is an indirect address, make sure it is a base register. */
20293 if (!base_reg_operand (addr
, GET_MODE (addr
)))
20295 emit_insn (gen_rtx_SET (scratch
, addr
));
20296 new_addr
= scratch
;
20300 /* If this is an indexed address, make sure the register class can handle
20301 indexed addresses for this mode. */
20303 op0
= XEXP (addr
, 0);
20304 op1
= XEXP (addr
, 1);
20305 if (!base_reg_operand (op0
, Pmode
))
20306 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20308 else if (int_reg_operand (op1
, Pmode
))
20310 if ((addr_mask
& RELOAD_REG_INDEXED
) == 0)
20312 emit_insn (gen_rtx_SET (scratch
, addr
));
20313 new_addr
= scratch
;
20317 else if (mode_supports_vsx_dform_quad (mode
) && CONST_INT_P (op1
))
20319 if (((addr_mask
& RELOAD_REG_QUAD_OFFSET
) == 0)
20320 || !quad_address_p (addr
, mode
, false))
20322 emit_insn (gen_rtx_SET (scratch
, addr
));
20323 new_addr
= scratch
;
20327 /* Make sure the register class can handle offset addresses. */
20328 else if (rs6000_legitimate_offset_address_p (mode
, addr
, false, true))
20330 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
20332 emit_insn (gen_rtx_SET (scratch
, addr
));
20333 new_addr
= scratch
;
20338 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20343 op0
= XEXP (addr
, 0);
20344 op1
= XEXP (addr
, 1);
20345 if (!base_reg_operand (op0
, Pmode
))
20346 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20348 else if (int_reg_operand (op1
, Pmode
))
20350 if ((addr_mask
& RELOAD_REG_INDEXED
) == 0)
20352 emit_insn (gen_rtx_SET (scratch
, addr
));
20353 new_addr
= scratch
;
20357 /* Quad offsets are restricted and can't handle normal addresses. */
20358 else if (mode_supports_vsx_dform_quad (mode
))
20360 emit_insn (gen_rtx_SET (scratch
, addr
));
20361 new_addr
= scratch
;
20364 /* Make sure the register class can handle offset addresses. */
20365 else if (legitimate_lo_sum_address_p (mode
, addr
, false))
20367 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
20369 emit_insn (gen_rtx_SET (scratch
, addr
));
20370 new_addr
= scratch
;
20375 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20382 rs6000_emit_move (scratch
, addr
, Pmode
);
20383 new_addr
= scratch
;
20387 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20390 /* Adjust the address if it changed. */
20391 if (addr
!= new_addr
)
20393 mem
= replace_equiv_address_nv (mem
, new_addr
);
20394 if (TARGET_DEBUG_ADDR
)
20395 fprintf (stderr
, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
20398 /* Now create the move. */
20400 emit_insn (gen_rtx_SET (mem
, reg
));
20402 emit_insn (gen_rtx_SET (reg
, mem
));
20407 /* Convert reloads involving 64-bit gprs and misaligned offset
20408 addressing, or multiple 32-bit gprs and offsets that are too large,
20409 to use indirect addressing. */
20412 rs6000_secondary_reload_gpr (rtx reg
, rtx mem
, rtx scratch
, bool store_p
)
20414 int regno
= true_regnum (reg
);
20415 enum reg_class rclass
;
20417 rtx scratch_or_premodify
= scratch
;
20419 if (TARGET_DEBUG_ADDR
)
20421 fprintf (stderr
, "\nrs6000_secondary_reload_gpr, type = %s\n",
20422 store_p
? "store" : "load");
20423 fprintf (stderr
, "reg:\n");
20425 fprintf (stderr
, "mem:\n");
20427 fprintf (stderr
, "scratch:\n");
20428 debug_rtx (scratch
);
20431 gcc_assert (regno
>= 0 && regno
< FIRST_PSEUDO_REGISTER
);
20432 gcc_assert (GET_CODE (mem
) == MEM
);
20433 rclass
= REGNO_REG_CLASS (regno
);
20434 gcc_assert (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
);
20435 addr
= XEXP (mem
, 0);
20437 if (GET_CODE (addr
) == PRE_MODIFY
)
20439 gcc_assert (REG_P (XEXP (addr
, 0))
20440 && GET_CODE (XEXP (addr
, 1)) == PLUS
20441 && XEXP (XEXP (addr
, 1), 0) == XEXP (addr
, 0));
20442 scratch_or_premodify
= XEXP (addr
, 0);
20443 if (!HARD_REGISTER_P (scratch_or_premodify
))
20444 /* If we have a pseudo here then reload will have arranged
20445 to have it replaced, but only in the original insn.
20446 Use the replacement here too. */
20447 scratch_or_premodify
= find_replacement (&XEXP (addr
, 0));
20449 /* RTL emitted by rs6000_secondary_reload_gpr uses RTL
20450 expressions from the original insn, without unsharing them.
20451 Any RTL that points into the original insn will of course
20452 have register replacements applied. That is why we don't
20453 need to look for replacements under the PLUS. */
20454 addr
= XEXP (addr
, 1);
20456 gcc_assert (GET_CODE (addr
) == PLUS
|| GET_CODE (addr
) == LO_SUM
);
20458 rs6000_emit_move (scratch_or_premodify
, addr
, Pmode
);
20460 mem
= replace_equiv_address_nv (mem
, scratch_or_premodify
);
20462 /* Now create the move. */
20464 emit_insn (gen_rtx_SET (mem
, reg
));
20466 emit_insn (gen_rtx_SET (reg
, mem
));
20471 /* Given an rtx X being reloaded into a reg required to be
20472 in class CLASS, return the class of reg to actually use.
20473 In general this is just CLASS; but on some machines
20474 in some cases it is preferable to use a more restrictive class.
20476 On the RS/6000, we have to return NO_REGS when we want to reload a
20477 floating-point CONST_DOUBLE to force it to be copied to memory.
20479 We also don't want to reload integer values into floating-point
20480 registers if we can at all help it. In fact, this can
20481 cause reload to die, if it tries to generate a reload of CTR
20482 into a FP register and discovers it doesn't have the memory location
20485 ??? Would it be a good idea to have reload do the converse, that is
20486 try to reload floating modes into FP registers if possible?
20489 static enum reg_class
20490 rs6000_preferred_reload_class (rtx x
, enum reg_class rclass
)
20492 machine_mode mode
= GET_MODE (x
);
20493 bool is_constant
= CONSTANT_P (x
);
20495 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
20496 reload class for it. */
20497 if ((rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
20498 && (reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
] & RELOAD_REG_VALID
) == 0)
20501 if ((rclass
== FLOAT_REGS
|| rclass
== VSX_REGS
)
20502 && (reg_addr
[mode
].addr_mask
[RELOAD_REG_FPR
] & RELOAD_REG_VALID
) == 0)
20505 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
20506 the reloading of address expressions using PLUS into floating point
20508 if (TARGET_VSX
&& VSX_REG_CLASS_P (rclass
) && GET_CODE (x
) != PLUS
)
20512 /* Zero is always allowed in all VSX registers. */
20513 if (x
== CONST0_RTX (mode
))
20516 /* If this is a vector constant that can be formed with a few Altivec
20517 instructions, we want altivec registers. */
20518 if (GET_CODE (x
) == CONST_VECTOR
&& easy_vector_constant (x
, mode
))
20519 return ALTIVEC_REGS
;
20521 /* If this is an integer constant that can easily be loaded into
20522 vector registers, allow it. */
20523 if (CONST_INT_P (x
))
20525 HOST_WIDE_INT value
= INTVAL (x
);
20527 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
20528 2.06 can generate it in the Altivec registers with
20532 if (TARGET_P8_VECTOR
)
20534 else if (rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
20535 return ALTIVEC_REGS
;
20540 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
20541 a sign extend in the Altivec registers. */
20542 if (IN_RANGE (value
, -128, 127) && TARGET_P9_VECTOR
20543 && (rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
))
20544 return ALTIVEC_REGS
;
20547 /* Force constant to memory. */
20551 /* D-form addressing can easily reload the value. */
20552 if (mode_supports_vmx_dform (mode
)
20553 || mode_supports_vsx_dform_quad (mode
))
20556 /* If this is a scalar floating point value and we don't have D-form
20557 addressing, prefer the traditional floating point registers so that we
20558 can use D-form (register+offset) addressing. */
20559 if (rclass
== VSX_REGS
20560 && (mode
== SFmode
|| GET_MODE_SIZE (mode
) == 8))
20563 /* Prefer the Altivec registers if Altivec is handling the vector
20564 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20566 if (VECTOR_UNIT_ALTIVEC_P (mode
) || VECTOR_MEM_ALTIVEC_P (mode
)
20567 || mode
== V1TImode
)
20568 return ALTIVEC_REGS
;
20573 if (is_constant
|| GET_CODE (x
) == PLUS
)
20575 if (reg_class_subset_p (GENERAL_REGS
, rclass
))
20576 return GENERAL_REGS
;
20577 if (reg_class_subset_p (BASE_REGS
, rclass
))
20582 if (GET_MODE_CLASS (mode
) == MODE_INT
&& rclass
== NON_SPECIAL_REGS
)
20583 return GENERAL_REGS
;
20588 /* Debug version of rs6000_preferred_reload_class. */
20589 static enum reg_class
20590 rs6000_debug_preferred_reload_class (rtx x
, enum reg_class rclass
)
20592 enum reg_class ret
= rs6000_preferred_reload_class (x
, rclass
);
20595 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20597 reg_class_names
[ret
], reg_class_names
[rclass
],
20598 GET_MODE_NAME (GET_MODE (x
)));
20604 /* If we are copying between FP or AltiVec registers and anything else, we need
20605 a memory location. The exception is when we are targeting ppc64 and the
20606 move to/from fpr to gpr instructions are available. Also, under VSX, you
20607 can copy vector registers from the FP register set to the Altivec register
20608 set and vice versa. */
20611 rs6000_secondary_memory_needed (machine_mode mode
,
20612 reg_class_t from_class
,
20613 reg_class_t to_class
)
20615 enum rs6000_reg_type from_type
, to_type
;
20616 bool altivec_p
= ((from_class
== ALTIVEC_REGS
)
20617 || (to_class
== ALTIVEC_REGS
));
20619 /* If a simple/direct move is available, we don't need secondary memory */
20620 from_type
= reg_class_to_reg_type
[(int)from_class
];
20621 to_type
= reg_class_to_reg_type
[(int)to_class
];
20623 if (rs6000_secondary_reload_move (to_type
, from_type
, mode
,
20624 (secondary_reload_info
*)0, altivec_p
))
20627 /* If we have a floating point or vector register class, we need to use
20628 memory to transfer the data. */
20629 if (IS_FP_VECT_REG_TYPE (from_type
) || IS_FP_VECT_REG_TYPE (to_type
))
20635 /* Debug version of rs6000_secondary_memory_needed. */
20637 rs6000_debug_secondary_memory_needed (machine_mode mode
,
20638 reg_class_t from_class
,
20639 reg_class_t to_class
)
20641 bool ret
= rs6000_secondary_memory_needed (mode
, from_class
, to_class
);
20644 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20645 "to_class = %s, mode = %s\n",
20646 ret
? "true" : "false",
20647 reg_class_names
[from_class
],
20648 reg_class_names
[to_class
],
20649 GET_MODE_NAME (mode
));
20654 /* Return the register class of a scratch register needed to copy IN into
20655 or out of a register in RCLASS in MODE. If it can be done directly,
20656 NO_REGS is returned. */
20658 static enum reg_class
20659 rs6000_secondary_reload_class (enum reg_class rclass
, machine_mode mode
,
20664 if (TARGET_ELF
|| (DEFAULT_ABI
== ABI_DARWIN
20666 && MACHOPIC_INDIRECT
20670 /* We cannot copy a symbolic operand directly into anything
20671 other than BASE_REGS for TARGET_ELF. So indicate that a
20672 register from BASE_REGS is needed as an intermediate
20675 On Darwin, pic addresses require a load from memory, which
20676 needs a base register. */
20677 if (rclass
!= BASE_REGS
20678 && (GET_CODE (in
) == SYMBOL_REF
20679 || GET_CODE (in
) == HIGH
20680 || GET_CODE (in
) == LABEL_REF
20681 || GET_CODE (in
) == CONST
))
20685 if (GET_CODE (in
) == REG
)
20687 regno
= REGNO (in
);
20688 if (regno
>= FIRST_PSEUDO_REGISTER
)
20690 regno
= true_regnum (in
);
20691 if (regno
>= FIRST_PSEUDO_REGISTER
)
20695 else if (GET_CODE (in
) == SUBREG
)
20697 regno
= true_regnum (in
);
20698 if (regno
>= FIRST_PSEUDO_REGISTER
)
20704 /* If we have VSX register moves, prefer moving scalar values between
20705 Altivec registers and GPR by going via an FPR (and then via memory)
20706 instead of reloading the secondary memory address for Altivec moves. */
20708 && GET_MODE_SIZE (mode
) < 16
20709 && !mode_supports_vmx_dform (mode
)
20710 && (((rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
)
20711 && (regno
>= 0 && ALTIVEC_REGNO_P (regno
)))
20712 || ((rclass
== VSX_REGS
|| rclass
== ALTIVEC_REGS
)
20713 && (regno
>= 0 && INT_REGNO_P (regno
)))))
20716 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20718 if (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
20719 || (regno
>= 0 && INT_REGNO_P (regno
)))
20722 /* Constants, memory, and VSX registers can go into VSX registers (both the
20723 traditional floating point and the altivec registers). */
20724 if (rclass
== VSX_REGS
20725 && (regno
== -1 || VSX_REGNO_P (regno
)))
20728 /* Constants, memory, and FP registers can go into FP registers. */
20729 if ((regno
== -1 || FP_REGNO_P (regno
))
20730 && (rclass
== FLOAT_REGS
|| rclass
== NON_SPECIAL_REGS
))
20731 return (mode
!= SDmode
|| lra_in_progress
) ? NO_REGS
: GENERAL_REGS
;
20733 /* Memory, and AltiVec registers can go into AltiVec registers. */
20734 if ((regno
== -1 || ALTIVEC_REGNO_P (regno
))
20735 && rclass
== ALTIVEC_REGS
)
20738 /* We can copy among the CR registers. */
20739 if ((rclass
== CR_REGS
|| rclass
== CR0_REGS
)
20740 && regno
>= 0 && CR_REGNO_P (regno
))
20743 /* Otherwise, we need GENERAL_REGS. */
20744 return GENERAL_REGS
;
20747 /* Debug version of rs6000_secondary_reload_class. */
20748 static enum reg_class
20749 rs6000_debug_secondary_reload_class (enum reg_class rclass
,
20750 machine_mode mode
, rtx in
)
20752 enum reg_class ret
= rs6000_secondary_reload_class (rclass
, mode
, in
);
20754 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20755 "mode = %s, input rtx:\n",
20756 reg_class_names
[ret
], reg_class_names
[rclass
],
20757 GET_MODE_NAME (mode
));
20763 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
20766 rs6000_can_change_mode_class (machine_mode from
,
20768 reg_class_t rclass
)
20770 unsigned from_size
= GET_MODE_SIZE (from
);
20771 unsigned to_size
= GET_MODE_SIZE (to
);
20773 if (from_size
!= to_size
)
20775 enum reg_class xclass
= (TARGET_VSX
) ? VSX_REGS
: FLOAT_REGS
;
20777 if (reg_classes_intersect_p (xclass
, rclass
))
20779 unsigned to_nregs
= hard_regno_nregs (FIRST_FPR_REGNO
, to
);
20780 unsigned from_nregs
= hard_regno_nregs (FIRST_FPR_REGNO
, from
);
20781 bool to_float128_vector_p
= FLOAT128_VECTOR_P (to
);
20782 bool from_float128_vector_p
= FLOAT128_VECTOR_P (from
);
20784 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20785 single register under VSX because the scalar part of the register
20786 is in the upper 64-bits, and not the lower 64-bits. Types like
20787 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20788 IEEE floating point can't overlap, and neither can small
20791 if (to_float128_vector_p
&& from_float128_vector_p
)
20794 else if (to_float128_vector_p
|| from_float128_vector_p
)
20797 /* TDmode in floating-mode registers must always go into a register
20798 pair with the most significant word in the even-numbered register
20799 to match ISA requirements. In little-endian mode, this does not
20800 match subreg numbering, so we cannot allow subregs. */
20801 if (!BYTES_BIG_ENDIAN
&& (to
== TDmode
|| from
== TDmode
))
20804 if (from_size
< 8 || to_size
< 8)
20807 if (from_size
== 8 && (8 * to_nregs
) != to_size
)
20810 if (to_size
== 8 && (8 * from_nregs
) != from_size
)
20819 /* Since the VSX register set includes traditional floating point registers
20820 and altivec registers, just check for the size being different instead of
20821 trying to check whether the modes are vector modes. Otherwise it won't
20822 allow say DF and DI to change classes. For types like TFmode and TDmode
20823 that take 2 64-bit registers, rather than a single 128-bit register, don't
20824 allow subregs of those types to other 128 bit types. */
20825 if (TARGET_VSX
&& VSX_REG_CLASS_P (rclass
))
20827 unsigned num_regs
= (from_size
+ 15) / 16;
20828 if (hard_regno_nregs (FIRST_FPR_REGNO
, to
) > num_regs
20829 || hard_regno_nregs (FIRST_FPR_REGNO
, from
) > num_regs
)
20832 return (from_size
== 8 || from_size
== 16);
20835 if (TARGET_ALTIVEC
&& rclass
== ALTIVEC_REGS
20836 && (ALTIVEC_VECTOR_MODE (from
) + ALTIVEC_VECTOR_MODE (to
)) == 1)
20842 /* Debug version of rs6000_can_change_mode_class. */
20844 rs6000_debug_can_change_mode_class (machine_mode from
,
20846 reg_class_t rclass
)
20848 bool ret
= rs6000_can_change_mode_class (from
, to
, rclass
);
20851 "rs6000_can_change_mode_class, return %s, from = %s, "
20852 "to = %s, rclass = %s\n",
20853 ret
? "true" : "false",
20854 GET_MODE_NAME (from
), GET_MODE_NAME (to
),
20855 reg_class_names
[rclass
]);
20860 /* Return a string to do a move operation of 128 bits of data. */
20863 rs6000_output_move_128bit (rtx operands
[])
20865 rtx dest
= operands
[0];
20866 rtx src
= operands
[1];
20867 machine_mode mode
= GET_MODE (dest
);
20870 bool dest_gpr_p
, dest_fp_p
, dest_vmx_p
, dest_vsx_p
;
20871 bool src_gpr_p
, src_fp_p
, src_vmx_p
, src_vsx_p
;
20875 dest_regno
= REGNO (dest
);
20876 dest_gpr_p
= INT_REGNO_P (dest_regno
);
20877 dest_fp_p
= FP_REGNO_P (dest_regno
);
20878 dest_vmx_p
= ALTIVEC_REGNO_P (dest_regno
);
20879 dest_vsx_p
= dest_fp_p
| dest_vmx_p
;
20884 dest_gpr_p
= dest_fp_p
= dest_vmx_p
= dest_vsx_p
= false;
20889 src_regno
= REGNO (src
);
20890 src_gpr_p
= INT_REGNO_P (src_regno
);
20891 src_fp_p
= FP_REGNO_P (src_regno
);
20892 src_vmx_p
= ALTIVEC_REGNO_P (src_regno
);
20893 src_vsx_p
= src_fp_p
| src_vmx_p
;
20898 src_gpr_p
= src_fp_p
= src_vmx_p
= src_vsx_p
= false;
20901 /* Register moves. */
20902 if (dest_regno
>= 0 && src_regno
>= 0)
20909 if (TARGET_DIRECT_MOVE_128
&& src_vsx_p
)
20910 return (WORDS_BIG_ENDIAN
20911 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20912 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20914 else if (TARGET_VSX
&& TARGET_DIRECT_MOVE
&& src_vsx_p
)
20918 else if (TARGET_VSX
&& dest_vsx_p
)
20921 return "xxlor %x0,%x1,%x1";
20923 else if (TARGET_DIRECT_MOVE_128
&& src_gpr_p
)
20924 return (WORDS_BIG_ENDIAN
20925 ? "mtvsrdd %x0,%1,%L1"
20926 : "mtvsrdd %x0,%L1,%1");
20928 else if (TARGET_DIRECT_MOVE
&& src_gpr_p
)
20932 else if (TARGET_ALTIVEC
&& dest_vmx_p
&& src_vmx_p
)
20933 return "vor %0,%1,%1";
20935 else if (dest_fp_p
&& src_fp_p
)
20940 else if (dest_regno
>= 0 && MEM_P (src
))
20944 if (TARGET_QUAD_MEMORY
&& quad_load_store_p (dest
, src
))
20950 else if (TARGET_ALTIVEC
&& dest_vmx_p
20951 && altivec_indexed_or_indirect_operand (src
, mode
))
20952 return "lvx %0,%y1";
20954 else if (TARGET_VSX
&& dest_vsx_p
)
20956 if (mode_supports_vsx_dform_quad (mode
)
20957 && quad_address_p (XEXP (src
, 0), mode
, true))
20958 return "lxv %x0,%1";
20960 else if (TARGET_P9_VECTOR
)
20961 return "lxvx %x0,%y1";
20963 else if (mode
== V16QImode
|| mode
== V8HImode
|| mode
== V4SImode
)
20964 return "lxvw4x %x0,%y1";
20967 return "lxvd2x %x0,%y1";
20970 else if (TARGET_ALTIVEC
&& dest_vmx_p
)
20971 return "lvx %0,%y1";
20973 else if (dest_fp_p
)
20978 else if (src_regno
>= 0 && MEM_P (dest
))
20982 if (TARGET_QUAD_MEMORY
&& quad_load_store_p (dest
, src
))
20983 return "stq %1,%0";
20988 else if (TARGET_ALTIVEC
&& src_vmx_p
20989 && altivec_indexed_or_indirect_operand (src
, mode
))
20990 return "stvx %1,%y0";
20992 else if (TARGET_VSX
&& src_vsx_p
)
20994 if (mode_supports_vsx_dform_quad (mode
)
20995 && quad_address_p (XEXP (dest
, 0), mode
, true))
20996 return "stxv %x1,%0";
20998 else if (TARGET_P9_VECTOR
)
20999 return "stxvx %x1,%y0";
21001 else if (mode
== V16QImode
|| mode
== V8HImode
|| mode
== V4SImode
)
21002 return "stxvw4x %x1,%y0";
21005 return "stxvd2x %x1,%y0";
21008 else if (TARGET_ALTIVEC
&& src_vmx_p
)
21009 return "stvx %1,%y0";
21016 else if (dest_regno
>= 0
21017 && (GET_CODE (src
) == CONST_INT
21018 || GET_CODE (src
) == CONST_WIDE_INT
21019 || GET_CODE (src
) == CONST_DOUBLE
21020 || GET_CODE (src
) == CONST_VECTOR
))
21025 else if ((dest_vmx_p
&& TARGET_ALTIVEC
)
21026 || (dest_vsx_p
&& TARGET_VSX
))
21027 return output_vec_const_move (operands
);
21030 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest
, src
));
21033 /* Validate a 128-bit move. */
21035 rs6000_move_128bit_ok_p (rtx operands
[])
21037 machine_mode mode
= GET_MODE (operands
[0]);
21038 return (gpc_reg_operand (operands
[0], mode
)
21039 || gpc_reg_operand (operands
[1], mode
));
21042 /* Return true if a 128-bit move needs to be split. */
21044 rs6000_split_128bit_ok_p (rtx operands
[])
21046 if (!reload_completed
)
21049 if (!gpr_or_gpr_p (operands
[0], operands
[1]))
21052 if (quad_load_store_p (operands
[0], operands
[1]))
21059 /* Given a comparison operation, return the bit number in CCR to test. We
21060 know this is a valid comparison.
21062 SCC_P is 1 if this is for an scc. That means that %D will have been
21063 used instead of %C, so the bits will be in different places.
21065 Return -1 if OP isn't a valid comparison for some reason. */
21068 ccr_bit (rtx op
, int scc_p
)
21070 enum rtx_code code
= GET_CODE (op
);
21071 machine_mode cc_mode
;
21076 if (!COMPARISON_P (op
))
21079 reg
= XEXP (op
, 0);
21081 gcc_assert (GET_CODE (reg
) == REG
&& CR_REGNO_P (REGNO (reg
)));
21083 cc_mode
= GET_MODE (reg
);
21084 cc_regnum
= REGNO (reg
);
21085 base_bit
= 4 * (cc_regnum
- CR0_REGNO
);
21087 validate_condition_mode (code
, cc_mode
);
21089 /* When generating a sCOND operation, only positive conditions are
21092 || code
== EQ
|| code
== GT
|| code
== LT
|| code
== UNORDERED
21093 || code
== GTU
|| code
== LTU
);
21098 return scc_p
? base_bit
+ 3 : base_bit
+ 2;
21100 return base_bit
+ 2;
21101 case GT
: case GTU
: case UNLE
:
21102 return base_bit
+ 1;
21103 case LT
: case LTU
: case UNGE
:
21105 case ORDERED
: case UNORDERED
:
21106 return base_bit
+ 3;
21109 /* If scc, we will have done a cror to put the bit in the
21110 unordered position. So test that bit. For integer, this is ! LT
21111 unless this is an scc insn. */
21112 return scc_p
? base_bit
+ 3 : base_bit
;
21115 return scc_p
? base_bit
+ 3 : base_bit
+ 1;
21118 gcc_unreachable ();
21122 /* Return the GOT register. */
21125 rs6000_got_register (rtx value ATTRIBUTE_UNUSED
)
21127 /* The second flow pass currently (June 1999) can't update
21128 regs_ever_live without disturbing other parts of the compiler, so
21129 update it here to make the prolog/epilogue code happy. */
21130 if (!can_create_pseudo_p ()
21131 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))
21132 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM
, true);
21134 crtl
->uses_pic_offset_table
= 1;
21136 return pic_offset_table_rtx
;
21139 static rs6000_stack_t stack_info
;
21141 /* Function to init struct machine_function.
21142 This will be called, via a pointer variable,
21143 from push_function_context. */
21145 static struct machine_function
*
21146 rs6000_init_machine_status (void)
21148 stack_info
.reload_completed
= 0;
21149 return ggc_cleared_alloc
<machine_function
> ();
21152 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
21154 /* Write out a function code label. */
21157 rs6000_output_function_entry (FILE *file
, const char *fname
)
21159 if (fname
[0] != '.')
21161 switch (DEFAULT_ABI
)
21164 gcc_unreachable ();
21170 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "L.");
21180 RS6000_OUTPUT_BASENAME (file
, fname
);
21183 /* Print an operand. Recognize special options, documented below. */
21186 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
21187 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
21189 #define SMALL_DATA_RELOC "sda21"
21190 #define SMALL_DATA_REG 0
21194 print_operand (FILE *file
, rtx x
, int code
)
21197 unsigned HOST_WIDE_INT uval
;
21201 /* %a is output_address. */
21203 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
21207 /* Like 'J' but get to the GT bit only. */
21208 gcc_assert (REG_P (x
));
21210 /* Bit 1 is GT bit. */
21211 i
= 4 * (REGNO (x
) - CR0_REGNO
) + 1;
21213 /* Add one for shift count in rlinm for scc. */
21214 fprintf (file
, "%d", i
+ 1);
21218 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
21221 output_operand_lossage ("invalid %%e value");
21226 if ((uval
& 0xffff) == 0 && uval
!= 0)
21231 /* X is a CR register. Print the number of the EQ bit of the CR */
21232 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
21233 output_operand_lossage ("invalid %%E value");
21235 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
) + 2);
21239 /* X is a CR register. Print the shift count needed to move it
21240 to the high-order four bits. */
21241 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
21242 output_operand_lossage ("invalid %%f value");
21244 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
));
21248 /* Similar, but print the count for the rotate in the opposite
21250 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
21251 output_operand_lossage ("invalid %%F value");
21253 fprintf (file
, "%d", 32 - 4 * (REGNO (x
) - CR0_REGNO
));
21257 /* X is a constant integer. If it is negative, print "m",
21258 otherwise print "z". This is to make an aze or ame insn. */
21259 if (GET_CODE (x
) != CONST_INT
)
21260 output_operand_lossage ("invalid %%G value");
21261 else if (INTVAL (x
) >= 0)
21268 /* If constant, output low-order five bits. Otherwise, write
21271 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) & 31);
21273 print_operand (file
, x
, 0);
21277 /* If constant, output low-order six bits. Otherwise, write
21280 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) & 63);
21282 print_operand (file
, x
, 0);
21286 /* Print `i' if this is a constant, else nothing. */
21292 /* Write the bit number in CCR for jump. */
21293 i
= ccr_bit (x
, 0);
21295 output_operand_lossage ("invalid %%j code");
21297 fprintf (file
, "%d", i
);
21301 /* Similar, but add one for shift count in rlinm for scc and pass
21302 scc flag to `ccr_bit'. */
21303 i
= ccr_bit (x
, 1);
21305 output_operand_lossage ("invalid %%J code");
21307 /* If we want bit 31, write a shift count of zero, not 32. */
21308 fprintf (file
, "%d", i
== 31 ? 0 : i
+ 1);
21312 /* X must be a constant. Write the 1's complement of the
21315 output_operand_lossage ("invalid %%k value");
21317 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~ INTVAL (x
));
21321 /* X must be a symbolic constant on ELF. Write an
21322 expression suitable for an 'addi' that adds in the low 16
21323 bits of the MEM. */
21324 if (GET_CODE (x
) == CONST
)
21326 if (GET_CODE (XEXP (x
, 0)) != PLUS
21327 || (GET_CODE (XEXP (XEXP (x
, 0), 0)) != SYMBOL_REF
21328 && GET_CODE (XEXP (XEXP (x
, 0), 0)) != LABEL_REF
)
21329 || GET_CODE (XEXP (XEXP (x
, 0), 1)) != CONST_INT
)
21330 output_operand_lossage ("invalid %%K value");
21332 print_operand_address (file
, x
);
21333 fputs ("@l", file
);
21336 /* %l is output_asm_label. */
21339 /* Write second word of DImode or DFmode reference. Works on register
21340 or non-indexed memory only. */
21342 fputs (reg_names
[REGNO (x
) + 1], file
);
21343 else if (MEM_P (x
))
21345 machine_mode mode
= GET_MODE (x
);
21346 /* Handle possible auto-increment. Since it is pre-increment and
21347 we have already done it, we can just use an offset of word. */
21348 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
21349 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
21350 output_address (mode
, plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0),
21352 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
21353 output_address (mode
, plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0),
21356 output_address (mode
, XEXP (adjust_address_nv (x
, SImode
,
21360 if (small_data_operand (x
, GET_MODE (x
)))
21361 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
21362 reg_names
[SMALL_DATA_REG
]);
21367 /* Write the number of elements in the vector times 4. */
21368 if (GET_CODE (x
) != PARALLEL
)
21369 output_operand_lossage ("invalid %%N value");
21371 fprintf (file
, "%d", XVECLEN (x
, 0) * 4);
21375 /* Similar, but subtract 1 first. */
21376 if (GET_CODE (x
) != PARALLEL
)
21377 output_operand_lossage ("invalid %%O value");
21379 fprintf (file
, "%d", (XVECLEN (x
, 0) - 1) * 4);
21383 /* X is a CONST_INT that is a power of two. Output the logarithm. */
21386 || (i
= exact_log2 (INTVAL (x
))) < 0)
21387 output_operand_lossage ("invalid %%p value");
21389 fprintf (file
, "%d", i
);
21393 /* The operand must be an indirect memory reference. The result
21394 is the register name. */
21395 if (GET_CODE (x
) != MEM
|| GET_CODE (XEXP (x
, 0)) != REG
21396 || REGNO (XEXP (x
, 0)) >= 32)
21397 output_operand_lossage ("invalid %%P value");
21399 fputs (reg_names
[REGNO (XEXP (x
, 0))], file
);
21403 /* This outputs the logical code corresponding to a boolean
21404 expression. The expression may have one or both operands
21405 negated (if one, only the first one). For condition register
21406 logical operations, it will also treat the negated
21407 CR codes as NOTs, but not handle NOTs of them. */
21409 const char *const *t
= 0;
21411 enum rtx_code code
= GET_CODE (x
);
21412 static const char * const tbl
[3][3] = {
21413 { "and", "andc", "nor" },
21414 { "or", "orc", "nand" },
21415 { "xor", "eqv", "xor" } };
21419 else if (code
== IOR
)
21421 else if (code
== XOR
)
21424 output_operand_lossage ("invalid %%q value");
21426 if (GET_CODE (XEXP (x
, 0)) != NOT
)
21430 if (GET_CODE (XEXP (x
, 1)) == NOT
)
21441 if (! TARGET_MFCRF
)
21447 /* X is a CR register. Print the mask for `mtcrf'. */
21448 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
21449 output_operand_lossage ("invalid %%R value");
21451 fprintf (file
, "%d", 128 >> (REGNO (x
) - CR0_REGNO
));
21455 /* Low 5 bits of 32 - value */
21457 output_operand_lossage ("invalid %%s value");
21459 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (32 - INTVAL (x
)) & 31);
21463 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
21464 gcc_assert (REG_P (x
) && GET_MODE (x
) == CCmode
);
21466 /* Bit 3 is OV bit. */
21467 i
= 4 * (REGNO (x
) - CR0_REGNO
) + 3;
21469 /* If we want bit 31, write a shift count of zero, not 32. */
21470 fprintf (file
, "%d", i
== 31 ? 0 : i
+ 1);
21474 /* Print the symbolic name of a branch target register. */
21475 if (GET_CODE (x
) != REG
|| (REGNO (x
) != LR_REGNO
21476 && REGNO (x
) != CTR_REGNO
))
21477 output_operand_lossage ("invalid %%T value");
21478 else if (REGNO (x
) == LR_REGNO
)
21479 fputs ("lr", file
);
21481 fputs ("ctr", file
);
21485 /* High-order or low-order 16 bits of constant, whichever is non-zero,
21486 for use in unsigned operand. */
21489 output_operand_lossage ("invalid %%u value");
21494 if ((uval
& 0xffff) == 0)
21497 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
, uval
& 0xffff);
21501 /* High-order 16 bits of constant for use in signed operand. */
21503 output_operand_lossage ("invalid %%v value");
21505 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
21506 (INTVAL (x
) >> 16) & 0xffff);
21510 /* Print `u' if this has an auto-increment or auto-decrement. */
21512 && (GET_CODE (XEXP (x
, 0)) == PRE_INC
21513 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
21514 || GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
))
21519 /* Print the trap code for this operand. */
21520 switch (GET_CODE (x
))
21523 fputs ("eq", file
); /* 4 */
21526 fputs ("ne", file
); /* 24 */
21529 fputs ("lt", file
); /* 16 */
21532 fputs ("le", file
); /* 20 */
21535 fputs ("gt", file
); /* 8 */
21538 fputs ("ge", file
); /* 12 */
21541 fputs ("llt", file
); /* 2 */
21544 fputs ("lle", file
); /* 6 */
21547 fputs ("lgt", file
); /* 1 */
21550 fputs ("lge", file
); /* 5 */
21553 gcc_unreachable ();
21558 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21561 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
21562 ((INTVAL (x
) & 0xffff) ^ 0x8000) - 0x8000);
21564 print_operand (file
, x
, 0);
21568 /* X is a FPR or Altivec register used in a VSX context. */
21569 if (GET_CODE (x
) != REG
|| !VSX_REGNO_P (REGNO (x
)))
21570 output_operand_lossage ("invalid %%x value");
21573 int reg
= REGNO (x
);
21574 int vsx_reg
= (FP_REGNO_P (reg
)
21576 : reg
- FIRST_ALTIVEC_REGNO
+ 32);
21578 #ifdef TARGET_REGNAMES
21579 if (TARGET_REGNAMES
)
21580 fprintf (file
, "%%vs%d", vsx_reg
);
21583 fprintf (file
, "%d", vsx_reg
);
21589 && (legitimate_indexed_address_p (XEXP (x
, 0), 0)
21590 || (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
21591 && legitimate_indexed_address_p (XEXP (XEXP (x
, 0), 1), 0))))
21596 /* Like 'L', for third word of TImode/PTImode */
21598 fputs (reg_names
[REGNO (x
) + 2], file
);
21599 else if (MEM_P (x
))
21601 machine_mode mode
= GET_MODE (x
);
21602 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
21603 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
21604 output_address (mode
, plus_constant (Pmode
,
21605 XEXP (XEXP (x
, 0), 0), 8));
21606 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
21607 output_address (mode
, plus_constant (Pmode
,
21608 XEXP (XEXP (x
, 0), 0), 8));
21610 output_address (mode
, XEXP (adjust_address_nv (x
, SImode
, 8), 0));
21611 if (small_data_operand (x
, GET_MODE (x
)))
21612 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
21613 reg_names
[SMALL_DATA_REG
]);
21618 /* X is a SYMBOL_REF. Write out the name preceded by a
21619 period and without any trailing data in brackets. Used for function
21620 names. If we are configured for System V (or the embedded ABI) on
21621 the PowerPC, do not emit the period, since those systems do not use
21622 TOCs and the like. */
21623 gcc_assert (GET_CODE (x
) == SYMBOL_REF
);
21625 /* For macho, check to see if we need a stub. */
21628 const char *name
= XSTR (x
, 0);
21630 if (darwin_emit_branch_islands
21631 && MACHOPIC_INDIRECT
21632 && machopic_classify_symbol (x
) == MACHOPIC_UNDEFINED_FUNCTION
)
21633 name
= machopic_indirection_name (x
, /*stub_p=*/true);
21635 assemble_name (file
, name
);
21637 else if (!DOT_SYMBOLS
)
21638 assemble_name (file
, XSTR (x
, 0));
21640 rs6000_output_function_entry (file
, XSTR (x
, 0));
21644 /* Like 'L', for last word of TImode/PTImode. */
21646 fputs (reg_names
[REGNO (x
) + 3], file
);
21647 else if (MEM_P (x
))
21649 machine_mode mode
= GET_MODE (x
);
21650 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
21651 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
21652 output_address (mode
, plus_constant (Pmode
,
21653 XEXP (XEXP (x
, 0), 0), 12));
21654 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
21655 output_address (mode
, plus_constant (Pmode
,
21656 XEXP (XEXP (x
, 0), 0), 12));
21658 output_address (mode
, XEXP (adjust_address_nv (x
, SImode
, 12), 0));
21659 if (small_data_operand (x
, GET_MODE (x
)))
21660 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
21661 reg_names
[SMALL_DATA_REG
]);
21665 /* Print AltiVec memory operand. */
21670 gcc_assert (MEM_P (x
));
21674 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (GET_MODE (x
))
21675 && GET_CODE (tmp
) == AND
21676 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
21677 && INTVAL (XEXP (tmp
, 1)) == -16)
21678 tmp
= XEXP (tmp
, 0);
21679 else if (VECTOR_MEM_VSX_P (GET_MODE (x
))
21680 && GET_CODE (tmp
) == PRE_MODIFY
)
21681 tmp
= XEXP (tmp
, 1);
21683 fprintf (file
, "0,%s", reg_names
[REGNO (tmp
)]);
21686 if (GET_CODE (tmp
) != PLUS
21687 || !REG_P (XEXP (tmp
, 0))
21688 || !REG_P (XEXP (tmp
, 1)))
21690 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21694 if (REGNO (XEXP (tmp
, 0)) == 0)
21695 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (tmp
, 1)) ],
21696 reg_names
[ REGNO (XEXP (tmp
, 0)) ]);
21698 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (tmp
, 0)) ],
21699 reg_names
[ REGNO (XEXP (tmp
, 1)) ]);
21706 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
21707 else if (MEM_P (x
))
21709 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21710 know the width from the mode. */
21711 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
)
21712 fprintf (file
, "%d(%s)", GET_MODE_SIZE (GET_MODE (x
)),
21713 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
21714 else if (GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
21715 fprintf (file
, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x
)),
21716 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
21717 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
21718 output_address (GET_MODE (x
), XEXP (XEXP (x
, 0), 1));
21720 output_address (GET_MODE (x
), XEXP (x
, 0));
21724 if (toc_relative_expr_p (x
, false, &tocrel_base_oac
, &tocrel_offset_oac
))
21725 /* This hack along with a corresponding hack in
21726 rs6000_output_addr_const_extra arranges to output addends
21727 where the assembler expects to find them. eg.
21728 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21729 without this hack would be output as "x@toc+4". We
21731 output_addr_const (file
, CONST_CAST_RTX (tocrel_base_oac
));
21733 output_addr_const (file
, x
);
21738 if (const char *name
= get_some_local_dynamic_name ())
21739 assemble_name (file
, name
);
21741 output_operand_lossage ("'%%&' used without any "
21742 "local dynamic TLS references");
21746 output_operand_lossage ("invalid %%xn code");
21750 /* Print the address of an operand. */
21753 print_operand_address (FILE *file
, rtx x
)
21756 fprintf (file
, "0(%s)", reg_names
[ REGNO (x
) ]);
21757 else if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == CONST
21758 || GET_CODE (x
) == LABEL_REF
)
21760 output_addr_const (file
, x
);
21761 if (small_data_operand (x
, GET_MODE (x
)))
21762 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
21763 reg_names
[SMALL_DATA_REG
]);
21765 gcc_assert (!TARGET_TOC
);
21767 else if (GET_CODE (x
) == PLUS
&& REG_P (XEXP (x
, 0))
21768 && REG_P (XEXP (x
, 1)))
21770 if (REGNO (XEXP (x
, 0)) == 0)
21771 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (x
, 1)) ],
21772 reg_names
[ REGNO (XEXP (x
, 0)) ]);
21774 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (x
, 0)) ],
21775 reg_names
[ REGNO (XEXP (x
, 1)) ]);
21777 else if (GET_CODE (x
) == PLUS
&& REG_P (XEXP (x
, 0))
21778 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
21779 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
"(%s)",
21780 INTVAL (XEXP (x
, 1)), reg_names
[ REGNO (XEXP (x
, 0)) ]);
21782 else if (GET_CODE (x
) == LO_SUM
&& REG_P (XEXP (x
, 0))
21783 && CONSTANT_P (XEXP (x
, 1)))
21785 fprintf (file
, "lo16(");
21786 output_addr_const (file
, XEXP (x
, 1));
21787 fprintf (file
, ")(%s)", reg_names
[ REGNO (XEXP (x
, 0)) ]);
21791 else if (GET_CODE (x
) == LO_SUM
&& REG_P (XEXP (x
, 0))
21792 && CONSTANT_P (XEXP (x
, 1)))
21794 output_addr_const (file
, XEXP (x
, 1));
21795 fprintf (file
, "@l(%s)", reg_names
[ REGNO (XEXP (x
, 0)) ]);
21798 else if (toc_relative_expr_p (x
, false, &tocrel_base_oac
, &tocrel_offset_oac
))
21800 /* This hack along with a corresponding hack in
21801 rs6000_output_addr_const_extra arranges to output addends
21802 where the assembler expects to find them. eg.
21804 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21805 without this hack would be output as "x@toc+8@l(9)". We
21806 want "x+8@toc@l(9)". */
21807 output_addr_const (file
, CONST_CAST_RTX (tocrel_base_oac
));
21808 if (GET_CODE (x
) == LO_SUM
)
21809 fprintf (file
, "@l(%s)", reg_names
[REGNO (XEXP (x
, 0))]);
21811 fprintf (file
, "(%s)", reg_names
[REGNO (XVECEXP (tocrel_base_oac
, 0, 1))]);
21814 gcc_unreachable ();
21817 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21820 rs6000_output_addr_const_extra (FILE *file
, rtx x
)
21822 if (GET_CODE (x
) == UNSPEC
)
21823 switch (XINT (x
, 1))
21825 case UNSPEC_TOCREL
:
21826 gcc_checking_assert (GET_CODE (XVECEXP (x
, 0, 0)) == SYMBOL_REF
21827 && REG_P (XVECEXP (x
, 0, 1))
21828 && REGNO (XVECEXP (x
, 0, 1)) == TOC_REGISTER
);
21829 output_addr_const (file
, XVECEXP (x
, 0, 0));
21830 if (x
== tocrel_base_oac
&& tocrel_offset_oac
!= const0_rtx
)
21832 if (INTVAL (tocrel_offset_oac
) >= 0)
21833 fprintf (file
, "+");
21834 output_addr_const (file
, CONST_CAST_RTX (tocrel_offset_oac
));
21836 if (!TARGET_AIX
|| (TARGET_ELF
&& TARGET_MINIMAL_TOC
))
21839 assemble_name (file
, toc_label_name
);
21842 else if (TARGET_ELF
)
21843 fputs ("@toc", file
);
21847 case UNSPEC_MACHOPIC_OFFSET
:
21848 output_addr_const (file
, XVECEXP (x
, 0, 0));
21850 machopic_output_function_base_name (file
);
21857 /* Target hook for assembling integer objects. The PowerPC version has
21858 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21859 is defined. It also needs to handle DI-mode objects on 64-bit
21863 rs6000_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
21865 #ifdef RELOCATABLE_NEEDS_FIXUP
21866 /* Special handling for SI values. */
21867 if (RELOCATABLE_NEEDS_FIXUP
&& size
== 4 && aligned_p
)
21869 static int recurse
= 0;
21871 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21872 the .fixup section. Since the TOC section is already relocated, we
21873 don't need to mark it here. We used to skip the text section, but it
21874 should never be valid for relocated addresses to be placed in the text
21876 if (DEFAULT_ABI
== ABI_V4
21877 && (TARGET_RELOCATABLE
|| flag_pic
> 1)
21878 && in_section
!= toc_section
21880 && !CONST_SCALAR_INT_P (x
)
21886 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCP", fixuplabelno
);
21888 ASM_OUTPUT_LABEL (asm_out_file
, buf
);
21889 fprintf (asm_out_file
, "\t.long\t(");
21890 output_addr_const (asm_out_file
, x
);
21891 fprintf (asm_out_file
, ")@fixup\n");
21892 fprintf (asm_out_file
, "\t.section\t\".fixup\",\"aw\"\n");
21893 ASM_OUTPUT_ALIGN (asm_out_file
, 2);
21894 fprintf (asm_out_file
, "\t.long\t");
21895 assemble_name (asm_out_file
, buf
);
21896 fprintf (asm_out_file
, "\n\t.previous\n");
21900 /* Remove initial .'s to turn a -mcall-aixdesc function
21901 address into the address of the descriptor, not the function
21903 else if (GET_CODE (x
) == SYMBOL_REF
21904 && XSTR (x
, 0)[0] == '.'
21905 && DEFAULT_ABI
== ABI_AIX
)
21907 const char *name
= XSTR (x
, 0);
21908 while (*name
== '.')
21911 fprintf (asm_out_file
, "\t.long\t%s\n", name
);
21915 #endif /* RELOCATABLE_NEEDS_FIXUP */
21916 return default_assemble_integer (x
, size
, aligned_p
);
21919 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21920 /* Emit an assembler directive to set symbol visibility for DECL to
21921 VISIBILITY_TYPE. */
21924 rs6000_assemble_visibility (tree decl
, int vis
)
21929 /* Functions need to have their entry point symbol visibility set as
21930 well as their descriptor symbol visibility. */
21931 if (DEFAULT_ABI
== ABI_AIX
21933 && TREE_CODE (decl
) == FUNCTION_DECL
)
21935 static const char * const visibility_types
[] = {
21936 NULL
, "protected", "hidden", "internal"
21939 const char *name
, *type
;
21941 name
= ((* targetm
.strip_name_encoding
)
21942 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
))));
21943 type
= visibility_types
[vis
];
21945 fprintf (asm_out_file
, "\t.%s\t%s\n", type
, name
);
21946 fprintf (asm_out_file
, "\t.%s\t.%s\n", type
, name
);
21949 default_assemble_visibility (decl
, vis
);
21954 rs6000_reverse_condition (machine_mode mode
, enum rtx_code code
)
21956 /* Reversal of FP compares takes care -- an ordered compare
21957 becomes an unordered compare and vice versa. */
21958 if (mode
== CCFPmode
21959 && (!flag_finite_math_only
21960 || code
== UNLT
|| code
== UNLE
|| code
== UNGT
|| code
== UNGE
21961 || code
== UNEQ
|| code
== LTGT
))
21962 return reverse_condition_maybe_unordered (code
);
21964 return reverse_condition (code
);
21967 /* Generate a compare for CODE. Return a brand-new rtx that
21968 represents the result of the compare. */
21971 rs6000_generate_compare (rtx cmp
, machine_mode mode
)
21973 machine_mode comp_mode
;
21974 rtx compare_result
;
21975 enum rtx_code code
= GET_CODE (cmp
);
21976 rtx op0
= XEXP (cmp
, 0);
21977 rtx op1
= XEXP (cmp
, 1);
21979 if (!TARGET_FLOAT128_HW
&& FLOAT128_VECTOR_P (mode
))
21980 comp_mode
= CCmode
;
21981 else if (FLOAT_MODE_P (mode
))
21982 comp_mode
= CCFPmode
;
21983 else if (code
== GTU
|| code
== LTU
21984 || code
== GEU
|| code
== LEU
)
21985 comp_mode
= CCUNSmode
;
21986 else if ((code
== EQ
|| code
== NE
)
21987 && unsigned_reg_p (op0
)
21988 && (unsigned_reg_p (op1
)
21989 || (CONST_INT_P (op1
) && INTVAL (op1
) != 0)))
21990 /* These are unsigned values, perhaps there will be a later
21991 ordering compare that can be shared with this one. */
21992 comp_mode
= CCUNSmode
;
21994 comp_mode
= CCmode
;
21996 /* If we have an unsigned compare, make sure we don't have a signed value as
21998 if (comp_mode
== CCUNSmode
&& GET_CODE (op1
) == CONST_INT
21999 && INTVAL (op1
) < 0)
22001 op0
= copy_rtx_if_shared (op0
);
22002 op1
= force_reg (GET_MODE (op0
), op1
);
22003 cmp
= gen_rtx_fmt_ee (code
, GET_MODE (cmp
), op0
, op1
);
22006 /* First, the compare. */
22007 compare_result
= gen_reg_rtx (comp_mode
);
22009 /* IEEE 128-bit support in VSX registers when we do not have hardware
22011 if (!TARGET_FLOAT128_HW
&& FLOAT128_VECTOR_P (mode
))
22013 rtx libfunc
= NULL_RTX
;
22014 bool check_nan
= false;
22021 libfunc
= optab_libfunc (eq_optab
, mode
);
22026 libfunc
= optab_libfunc (ge_optab
, mode
);
22031 libfunc
= optab_libfunc (le_optab
, mode
);
22036 libfunc
= optab_libfunc (unord_optab
, mode
);
22037 code
= (code
== UNORDERED
) ? NE
: EQ
;
22043 libfunc
= optab_libfunc (ge_optab
, mode
);
22044 code
= (code
== UNGE
) ? GE
: GT
;
22050 libfunc
= optab_libfunc (le_optab
, mode
);
22051 code
= (code
== UNLE
) ? LE
: LT
;
22057 libfunc
= optab_libfunc (eq_optab
, mode
);
22058 code
= (code
= UNEQ
) ? EQ
: NE
;
22062 gcc_unreachable ();
22065 gcc_assert (libfunc
);
22068 dest
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
22069 SImode
, op0
, mode
, op1
, mode
);
22071 /* The library signals an exception for signalling NaNs, so we need to
22072 handle isgreater, etc. by first checking isordered. */
22075 rtx ne_rtx
, normal_dest
, unord_dest
;
22076 rtx unord_func
= optab_libfunc (unord_optab
, mode
);
22077 rtx join_label
= gen_label_rtx ();
22078 rtx join_ref
= gen_rtx_LABEL_REF (VOIDmode
, join_label
);
22079 rtx unord_cmp
= gen_reg_rtx (comp_mode
);
22082 /* Test for either value being a NaN. */
22083 gcc_assert (unord_func
);
22084 unord_dest
= emit_library_call_value (unord_func
, NULL_RTX
, LCT_CONST
,
22085 SImode
, op0
, mode
, op1
, mode
);
22087 /* Set value (0) if either value is a NaN, and jump to the join
22089 dest
= gen_reg_rtx (SImode
);
22090 emit_move_insn (dest
, const1_rtx
);
22091 emit_insn (gen_rtx_SET (unord_cmp
,
22092 gen_rtx_COMPARE (comp_mode
, unord_dest
,
22095 ne_rtx
= gen_rtx_NE (comp_mode
, unord_cmp
, const0_rtx
);
22096 emit_jump_insn (gen_rtx_SET (pc_rtx
,
22097 gen_rtx_IF_THEN_ELSE (VOIDmode
, ne_rtx
,
22101 /* Do the normal comparison, knowing that the values are not
22103 normal_dest
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
22104 SImode
, op0
, mode
, op1
, mode
);
22106 emit_insn (gen_cstoresi4 (dest
,
22107 gen_rtx_fmt_ee (code
, SImode
, normal_dest
,
22109 normal_dest
, const0_rtx
));
22111 /* Join NaN and non-Nan paths. Compare dest against 0. */
22112 emit_label (join_label
);
22116 emit_insn (gen_rtx_SET (compare_result
,
22117 gen_rtx_COMPARE (comp_mode
, dest
, const0_rtx
)));
22122 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
22123 CLOBBERs to match cmptf_internal2 pattern. */
22124 if (comp_mode
== CCFPmode
&& TARGET_XL_COMPAT
22125 && FLOAT128_IBM_P (GET_MODE (op0
))
22126 && TARGET_HARD_FLOAT
)
22127 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
22129 gen_rtx_SET (compare_result
,
22130 gen_rtx_COMPARE (comp_mode
, op0
, op1
)),
22131 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
22132 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
22133 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
22134 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
22135 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
22136 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
22137 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
22138 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
22139 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (Pmode
)))));
22140 else if (GET_CODE (op1
) == UNSPEC
22141 && XINT (op1
, 1) == UNSPEC_SP_TEST
)
22143 rtx op1b
= XVECEXP (op1
, 0, 0);
22144 comp_mode
= CCEQmode
;
22145 compare_result
= gen_reg_rtx (CCEQmode
);
22147 emit_insn (gen_stack_protect_testdi (compare_result
, op0
, op1b
));
22149 emit_insn (gen_stack_protect_testsi (compare_result
, op0
, op1b
));
22152 emit_insn (gen_rtx_SET (compare_result
,
22153 gen_rtx_COMPARE (comp_mode
, op0
, op1
)));
22156 /* Some kinds of FP comparisons need an OR operation;
22157 under flag_finite_math_only we don't bother. */
22158 if (FLOAT_MODE_P (mode
)
22159 && (!FLOAT128_IEEE_P (mode
) || TARGET_FLOAT128_HW
)
22160 && !flag_finite_math_only
22161 && (code
== LE
|| code
== GE
22162 || code
== UNEQ
|| code
== LTGT
22163 || code
== UNGT
|| code
== UNLT
))
22165 enum rtx_code or1
, or2
;
22166 rtx or1_rtx
, or2_rtx
, compare2_rtx
;
22167 rtx or_result
= gen_reg_rtx (CCEQmode
);
22171 case LE
: or1
= LT
; or2
= EQ
; break;
22172 case GE
: or1
= GT
; or2
= EQ
; break;
22173 case UNEQ
: or1
= UNORDERED
; or2
= EQ
; break;
22174 case LTGT
: or1
= LT
; or2
= GT
; break;
22175 case UNGT
: or1
= UNORDERED
; or2
= GT
; break;
22176 case UNLT
: or1
= UNORDERED
; or2
= LT
; break;
22177 default: gcc_unreachable ();
22179 validate_condition_mode (or1
, comp_mode
);
22180 validate_condition_mode (or2
, comp_mode
);
22181 or1_rtx
= gen_rtx_fmt_ee (or1
, SImode
, compare_result
, const0_rtx
);
22182 or2_rtx
= gen_rtx_fmt_ee (or2
, SImode
, compare_result
, const0_rtx
);
22183 compare2_rtx
= gen_rtx_COMPARE (CCEQmode
,
22184 gen_rtx_IOR (SImode
, or1_rtx
, or2_rtx
),
22186 emit_insn (gen_rtx_SET (or_result
, compare2_rtx
));
22188 compare_result
= or_result
;
22192 validate_condition_mode (code
, GET_MODE (compare_result
));
22194 return gen_rtx_fmt_ee (code
, VOIDmode
, compare_result
, const0_rtx
);
22198 /* Return the diagnostic message string if the binary operation OP is
22199 not permitted on TYPE1 and TYPE2, NULL otherwise. */
22202 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED
,
22206 machine_mode mode1
= TYPE_MODE (type1
);
22207 machine_mode mode2
= TYPE_MODE (type2
);
22209 /* For complex modes, use the inner type. */
22210 if (COMPLEX_MODE_P (mode1
))
22211 mode1
= GET_MODE_INNER (mode1
);
22213 if (COMPLEX_MODE_P (mode2
))
22214 mode2
= GET_MODE_INNER (mode2
);
22216 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
22217 double to intermix unless -mfloat128-convert. */
22218 if (mode1
== mode2
)
22221 if (!TARGET_FLOAT128_CVT
)
22223 if ((mode1
== KFmode
&& mode2
== IFmode
)
22224 || (mode1
== IFmode
&& mode2
== KFmode
))
22225 return N_("__float128 and __ibm128 cannot be used in the same "
22228 if (TARGET_IEEEQUAD
22229 && ((mode1
== IFmode
&& mode2
== TFmode
)
22230 || (mode1
== TFmode
&& mode2
== IFmode
)))
22231 return N_("__ibm128 and long double cannot be used in the same "
22234 if (!TARGET_IEEEQUAD
22235 && ((mode1
== KFmode
&& mode2
== TFmode
)
22236 || (mode1
== TFmode
&& mode2
== KFmode
)))
22237 return N_("__float128 and long double cannot be used in the same "
22245 /* Expand floating point conversion to/from __float128 and __ibm128. */
22248 rs6000_expand_float128_convert (rtx dest
, rtx src
, bool unsigned_p
)
22250 machine_mode dest_mode
= GET_MODE (dest
);
22251 machine_mode src_mode
= GET_MODE (src
);
22252 convert_optab cvt
= unknown_optab
;
22253 bool do_move
= false;
22254 rtx libfunc
= NULL_RTX
;
22256 typedef rtx (*rtx_2func_t
) (rtx
, rtx
);
22257 rtx_2func_t hw_convert
= (rtx_2func_t
)0;
22261 rtx_2func_t from_df
;
22262 rtx_2func_t from_sf
;
22263 rtx_2func_t from_si_sign
;
22264 rtx_2func_t from_si_uns
;
22265 rtx_2func_t from_di_sign
;
22266 rtx_2func_t from_di_uns
;
22269 rtx_2func_t to_si_sign
;
22270 rtx_2func_t to_si_uns
;
22271 rtx_2func_t to_di_sign
;
22272 rtx_2func_t to_di_uns
;
22273 } hw_conversions
[2] = {
22274 /* convertions to/from KFmode */
22276 gen_extenddfkf2_hw
, /* KFmode <- DFmode. */
22277 gen_extendsfkf2_hw
, /* KFmode <- SFmode. */
22278 gen_float_kfsi2_hw
, /* KFmode <- SImode (signed). */
22279 gen_floatuns_kfsi2_hw
, /* KFmode <- SImode (unsigned). */
22280 gen_float_kfdi2_hw
, /* KFmode <- DImode (signed). */
22281 gen_floatuns_kfdi2_hw
, /* KFmode <- DImode (unsigned). */
22282 gen_trunckfdf2_hw
, /* DFmode <- KFmode. */
22283 gen_trunckfsf2_hw
, /* SFmode <- KFmode. */
22284 gen_fix_kfsi2_hw
, /* SImode <- KFmode (signed). */
22285 gen_fixuns_kfsi2_hw
, /* SImode <- KFmode (unsigned). */
22286 gen_fix_kfdi2_hw
, /* DImode <- KFmode (signed). */
22287 gen_fixuns_kfdi2_hw
, /* DImode <- KFmode (unsigned). */
22290 /* convertions to/from TFmode */
22292 gen_extenddftf2_hw
, /* TFmode <- DFmode. */
22293 gen_extendsftf2_hw
, /* TFmode <- SFmode. */
22294 gen_float_tfsi2_hw
, /* TFmode <- SImode (signed). */
22295 gen_floatuns_tfsi2_hw
, /* TFmode <- SImode (unsigned). */
22296 gen_float_tfdi2_hw
, /* TFmode <- DImode (signed). */
22297 gen_floatuns_tfdi2_hw
, /* TFmode <- DImode (unsigned). */
22298 gen_trunctfdf2_hw
, /* DFmode <- TFmode. */
22299 gen_trunctfsf2_hw
, /* SFmode <- TFmode. */
22300 gen_fix_tfsi2_hw
, /* SImode <- TFmode (signed). */
22301 gen_fixuns_tfsi2_hw
, /* SImode <- TFmode (unsigned). */
22302 gen_fix_tfdi2_hw
, /* DImode <- TFmode (signed). */
22303 gen_fixuns_tfdi2_hw
, /* DImode <- TFmode (unsigned). */
22307 if (dest_mode
== src_mode
)
22308 gcc_unreachable ();
22310 /* Eliminate memory operations. */
22312 src
= force_reg (src_mode
, src
);
22316 rtx tmp
= gen_reg_rtx (dest_mode
);
22317 rs6000_expand_float128_convert (tmp
, src
, unsigned_p
);
22318 rs6000_emit_move (dest
, tmp
, dest_mode
);
22322 /* Convert to IEEE 128-bit floating point. */
22323 if (FLOAT128_IEEE_P (dest_mode
))
22325 if (dest_mode
== KFmode
)
22327 else if (dest_mode
== TFmode
)
22330 gcc_unreachable ();
22336 hw_convert
= hw_conversions
[kf_or_tf
].from_df
;
22341 hw_convert
= hw_conversions
[kf_or_tf
].from_sf
;
22347 if (FLOAT128_IBM_P (src_mode
))
22356 cvt
= ufloat_optab
;
22357 hw_convert
= hw_conversions
[kf_or_tf
].from_si_uns
;
22361 cvt
= sfloat_optab
;
22362 hw_convert
= hw_conversions
[kf_or_tf
].from_si_sign
;
22369 cvt
= ufloat_optab
;
22370 hw_convert
= hw_conversions
[kf_or_tf
].from_di_uns
;
22374 cvt
= sfloat_optab
;
22375 hw_convert
= hw_conversions
[kf_or_tf
].from_di_sign
;
22380 gcc_unreachable ();
22384 /* Convert from IEEE 128-bit floating point. */
22385 else if (FLOAT128_IEEE_P (src_mode
))
22387 if (src_mode
== KFmode
)
22389 else if (src_mode
== TFmode
)
22392 gcc_unreachable ();
22398 hw_convert
= hw_conversions
[kf_or_tf
].to_df
;
22403 hw_convert
= hw_conversions
[kf_or_tf
].to_sf
;
22409 if (FLOAT128_IBM_P (dest_mode
))
22419 hw_convert
= hw_conversions
[kf_or_tf
].to_si_uns
;
22424 hw_convert
= hw_conversions
[kf_or_tf
].to_si_sign
;
22432 hw_convert
= hw_conversions
[kf_or_tf
].to_di_uns
;
22437 hw_convert
= hw_conversions
[kf_or_tf
].to_di_sign
;
22442 gcc_unreachable ();
22446 /* Both IBM format. */
22447 else if (FLOAT128_IBM_P (dest_mode
) && FLOAT128_IBM_P (src_mode
))
22451 gcc_unreachable ();
22453 /* Handle conversion between TFmode/KFmode. */
22455 emit_move_insn (dest
, gen_lowpart (dest_mode
, src
));
22457 /* Handle conversion if we have hardware support. */
22458 else if (TARGET_FLOAT128_HW
&& hw_convert
)
22459 emit_insn ((hw_convert
) (dest
, src
));
22461 /* Call an external function to do the conversion. */
22462 else if (cvt
!= unknown_optab
)
22464 libfunc
= convert_optab_libfunc (cvt
, dest_mode
, src_mode
);
22465 gcc_assert (libfunc
!= NULL_RTX
);
22467 dest2
= emit_library_call_value (libfunc
, dest
, LCT_CONST
, dest_mode
,
22470 gcc_assert (dest2
!= NULL_RTX
);
22471 if (!rtx_equal_p (dest
, dest2
))
22472 emit_move_insn (dest
, dest2
);
22476 gcc_unreachable ();
22482 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22483 can be used as that dest register. Return the dest register. */
22486 rs6000_emit_eqne (machine_mode mode
, rtx op1
, rtx op2
, rtx scratch
)
22488 if (op2
== const0_rtx
)
22491 if (GET_CODE (scratch
) == SCRATCH
)
22492 scratch
= gen_reg_rtx (mode
);
22494 if (logical_operand (op2
, mode
))
22495 emit_insn (gen_rtx_SET (scratch
, gen_rtx_XOR (mode
, op1
, op2
)));
22497 emit_insn (gen_rtx_SET (scratch
,
22498 gen_rtx_PLUS (mode
, op1
, negate_rtx (mode
, op2
))));
22504 rs6000_emit_sCOND (machine_mode mode
, rtx operands
[])
22507 machine_mode op_mode
;
22508 enum rtx_code cond_code
;
22509 rtx result
= operands
[0];
22511 condition_rtx
= rs6000_generate_compare (operands
[1], mode
);
22512 cond_code
= GET_CODE (condition_rtx
);
22514 if (cond_code
== NE
22515 || cond_code
== GE
|| cond_code
== LE
22516 || cond_code
== GEU
|| cond_code
== LEU
22517 || cond_code
== ORDERED
|| cond_code
== UNGE
|| cond_code
== UNLE
)
22519 rtx not_result
= gen_reg_rtx (CCEQmode
);
22520 rtx not_op
, rev_cond_rtx
;
22521 machine_mode cc_mode
;
22523 cc_mode
= GET_MODE (XEXP (condition_rtx
, 0));
22525 rev_cond_rtx
= gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode
, cond_code
),
22526 SImode
, XEXP (condition_rtx
, 0), const0_rtx
);
22527 not_op
= gen_rtx_COMPARE (CCEQmode
, rev_cond_rtx
, const0_rtx
);
22528 emit_insn (gen_rtx_SET (not_result
, not_op
));
22529 condition_rtx
= gen_rtx_EQ (VOIDmode
, not_result
, const0_rtx
);
22532 op_mode
= GET_MODE (XEXP (operands
[1], 0));
22533 if (op_mode
== VOIDmode
)
22534 op_mode
= GET_MODE (XEXP (operands
[1], 1));
22536 if (TARGET_POWERPC64
&& (op_mode
== DImode
|| FLOAT_MODE_P (mode
)))
22538 PUT_MODE (condition_rtx
, DImode
);
22539 convert_move (result
, condition_rtx
, 0);
22543 PUT_MODE (condition_rtx
, SImode
);
22544 emit_insn (gen_rtx_SET (result
, condition_rtx
));
22548 /* Emit a branch of kind CODE to location LOC. */
22551 rs6000_emit_cbranch (machine_mode mode
, rtx operands
[])
22553 rtx condition_rtx
, loc_ref
;
22555 condition_rtx
= rs6000_generate_compare (operands
[0], mode
);
22556 loc_ref
= gen_rtx_LABEL_REF (VOIDmode
, operands
[3]);
22557 emit_jump_insn (gen_rtx_SET (pc_rtx
,
22558 gen_rtx_IF_THEN_ELSE (VOIDmode
, condition_rtx
,
22559 loc_ref
, pc_rtx
)));
22562 /* Return the string to output a conditional branch to LABEL, which is
22563 the operand template of the label, or NULL if the branch is really a
22564 conditional return.
22566 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22567 condition code register and its mode specifies what kind of
22568 comparison we made.
22570 REVERSED is nonzero if we should reverse the sense of the comparison.
22572 INSN is the insn. */
22575 output_cbranch (rtx op
, const char *label
, int reversed
, rtx_insn
*insn
)
22577 static char string
[64];
22578 enum rtx_code code
= GET_CODE (op
);
22579 rtx cc_reg
= XEXP (op
, 0);
22580 machine_mode mode
= GET_MODE (cc_reg
);
22581 int cc_regno
= REGNO (cc_reg
) - CR0_REGNO
;
22582 int need_longbranch
= label
!= NULL
&& get_attr_length (insn
) == 8;
22583 int really_reversed
= reversed
^ need_longbranch
;
22589 validate_condition_mode (code
, mode
);
22591 /* Work out which way this really branches. We could use
22592 reverse_condition_maybe_unordered here always but this
22593 makes the resulting assembler clearer. */
22594 if (really_reversed
)
22596 /* Reversal of FP compares takes care -- an ordered compare
22597 becomes an unordered compare and vice versa. */
22598 if (mode
== CCFPmode
)
22599 code
= reverse_condition_maybe_unordered (code
);
22601 code
= reverse_condition (code
);
22606 /* Not all of these are actually distinct opcodes, but
22607 we distinguish them for clarity of the resulting assembler. */
22608 case NE
: case LTGT
:
22609 ccode
= "ne"; break;
22610 case EQ
: case UNEQ
:
22611 ccode
= "eq"; break;
22613 ccode
= "ge"; break;
22614 case GT
: case GTU
: case UNGT
:
22615 ccode
= "gt"; break;
22617 ccode
= "le"; break;
22618 case LT
: case LTU
: case UNLT
:
22619 ccode
= "lt"; break;
22620 case UNORDERED
: ccode
= "un"; break;
22621 case ORDERED
: ccode
= "nu"; break;
22622 case UNGE
: ccode
= "nl"; break;
22623 case UNLE
: ccode
= "ng"; break;
22625 gcc_unreachable ();
22628 /* Maybe we have a guess as to how likely the branch is. */
22630 note
= find_reg_note (insn
, REG_BR_PROB
, NULL_RTX
);
22631 if (note
!= NULL_RTX
)
22633 /* PROB is the difference from 50%. */
22634 int prob
= profile_probability::from_reg_br_prob_note (XINT (note
, 0))
22635 .to_reg_br_prob_base () - REG_BR_PROB_BASE
/ 2;
22637 /* Only hint for highly probable/improbable branches on newer cpus when
22638 we have real profile data, as static prediction overrides processor
22639 dynamic prediction. For older cpus we may as well always hint, but
22640 assume not taken for branches that are very close to 50% as a
22641 mispredicted taken branch is more expensive than a
22642 mispredicted not-taken branch. */
22643 if (rs6000_always_hint
22644 || (abs (prob
) > REG_BR_PROB_BASE
/ 100 * 48
22645 && (profile_status_for_fn (cfun
) != PROFILE_GUESSED
)
22646 && br_prob_note_reliable_p (note
)))
22648 if (abs (prob
) > REG_BR_PROB_BASE
/ 20
22649 && ((prob
> 0) ^ need_longbranch
))
22657 s
+= sprintf (s
, "b%slr%s ", ccode
, pred
);
22659 s
+= sprintf (s
, "b%s%s ", ccode
, pred
);
22661 /* We need to escape any '%' characters in the reg_names string.
22662 Assume they'd only be the first character.... */
22663 if (reg_names
[cc_regno
+ CR0_REGNO
][0] == '%')
22665 s
+= sprintf (s
, "%s", reg_names
[cc_regno
+ CR0_REGNO
]);
22669 /* If the branch distance was too far, we may have to use an
22670 unconditional branch to go the distance. */
22671 if (need_longbranch
)
22672 s
+= sprintf (s
, ",$+8\n\tb %s", label
);
22674 s
+= sprintf (s
, ",%s", label
);
22680 /* Return insn for VSX or Altivec comparisons. */
22683 rs6000_emit_vector_compare_inner (enum rtx_code code
, rtx op0
, rtx op1
)
22686 machine_mode mode
= GET_MODE (op0
);
22694 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
22705 mask
= gen_reg_rtx (mode
);
22706 emit_insn (gen_rtx_SET (mask
, gen_rtx_fmt_ee (code
, mode
, op0
, op1
)));
22713 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22714 DMODE is expected destination mode. This is a recursive function. */
22717 rs6000_emit_vector_compare (enum rtx_code rcode
,
22719 machine_mode dmode
)
22722 bool swap_operands
= false;
22723 bool try_again
= false;
22725 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode
));
22726 gcc_assert (GET_MODE (op0
) == GET_MODE (op1
));
22728 /* See if the comparison works as is. */
22729 mask
= rs6000_emit_vector_compare_inner (rcode
, op0
, op1
);
22737 swap_operands
= true;
22742 swap_operands
= true;
22750 /* Invert condition and try again.
22751 e.g., A != B becomes ~(A==B). */
22753 enum rtx_code rev_code
;
22754 enum insn_code nor_code
;
22757 rev_code
= reverse_condition_maybe_unordered (rcode
);
22758 if (rev_code
== UNKNOWN
)
22761 nor_code
= optab_handler (one_cmpl_optab
, dmode
);
22762 if (nor_code
== CODE_FOR_nothing
)
22765 mask2
= rs6000_emit_vector_compare (rev_code
, op0
, op1
, dmode
);
22769 mask
= gen_reg_rtx (dmode
);
22770 emit_insn (GEN_FCN (nor_code
) (mask
, mask2
));
22778 /* Try GT/GTU/LT/LTU OR EQ */
22781 enum insn_code ior_code
;
22782 enum rtx_code new_code
;
22803 gcc_unreachable ();
22806 ior_code
= optab_handler (ior_optab
, dmode
);
22807 if (ior_code
== CODE_FOR_nothing
)
22810 c_rtx
= rs6000_emit_vector_compare (new_code
, op0
, op1
, dmode
);
22814 eq_rtx
= rs6000_emit_vector_compare (EQ
, op0
, op1
, dmode
);
22818 mask
= gen_reg_rtx (dmode
);
22819 emit_insn (GEN_FCN (ior_code
) (mask
, c_rtx
, eq_rtx
));
22830 std::swap (op0
, op1
);
22832 mask
= rs6000_emit_vector_compare_inner (rcode
, op0
, op1
);
22837 /* You only get two chances. */
22841 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22842 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22843 operands for the relation operation COND. */
22846 rs6000_emit_vector_cond_expr (rtx dest
, rtx op_true
, rtx op_false
,
22847 rtx cond
, rtx cc_op0
, rtx cc_op1
)
22849 machine_mode dest_mode
= GET_MODE (dest
);
22850 machine_mode mask_mode
= GET_MODE (cc_op0
);
22851 enum rtx_code rcode
= GET_CODE (cond
);
22852 machine_mode cc_mode
= CCmode
;
22855 bool invert_move
= false;
22857 if (VECTOR_UNIT_NONE_P (dest_mode
))
22860 gcc_assert (GET_MODE_SIZE (dest_mode
) == GET_MODE_SIZE (mask_mode
)
22861 && GET_MODE_NUNITS (dest_mode
) == GET_MODE_NUNITS (mask_mode
));
22865 /* Swap operands if we can, and fall back to doing the operation as
22866 specified, and doing a NOR to invert the test. */
22872 /* Invert condition and try again.
22873 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22874 invert_move
= true;
22875 rcode
= reverse_condition_maybe_unordered (rcode
);
22876 if (rcode
== UNKNOWN
)
22882 if (GET_MODE_CLASS (mask_mode
) == MODE_VECTOR_INT
)
22884 /* Invert condition to avoid compound test. */
22885 invert_move
= true;
22886 rcode
= reverse_condition (rcode
);
22894 /* Mark unsigned tests with CCUNSmode. */
22895 cc_mode
= CCUNSmode
;
22897 /* Invert condition to avoid compound test if necessary. */
22898 if (rcode
== GEU
|| rcode
== LEU
)
22900 invert_move
= true;
22901 rcode
= reverse_condition (rcode
);
22909 /* Get the vector mask for the given relational operations. */
22910 mask
= rs6000_emit_vector_compare (rcode
, cc_op0
, cc_op1
, mask_mode
);
22916 std::swap (op_true
, op_false
);
22918 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22919 if (GET_MODE_CLASS (dest_mode
) == MODE_VECTOR_INT
22920 && (GET_CODE (op_true
) == CONST_VECTOR
22921 || GET_CODE (op_false
) == CONST_VECTOR
))
22923 rtx constant_0
= CONST0_RTX (dest_mode
);
22924 rtx constant_m1
= CONSTM1_RTX (dest_mode
);
22926 if (op_true
== constant_m1
&& op_false
== constant_0
)
22928 emit_move_insn (dest
, mask
);
22932 else if (op_true
== constant_0
&& op_false
== constant_m1
)
22934 emit_insn (gen_rtx_SET (dest
, gen_rtx_NOT (dest_mode
, mask
)));
22938 /* If we can't use the vector comparison directly, perhaps we can use
22939 the mask for the true or false fields, instead of loading up a
22941 if (op_true
== constant_m1
)
22944 if (op_false
== constant_0
)
22948 if (!REG_P (op_true
) && !SUBREG_P (op_true
))
22949 op_true
= force_reg (dest_mode
, op_true
);
22951 if (!REG_P (op_false
) && !SUBREG_P (op_false
))
22952 op_false
= force_reg (dest_mode
, op_false
);
22954 cond2
= gen_rtx_fmt_ee (NE
, cc_mode
, gen_lowpart (dest_mode
, mask
),
22955 CONST0_RTX (dest_mode
));
22956 emit_insn (gen_rtx_SET (dest
,
22957 gen_rtx_IF_THEN_ELSE (dest_mode
,
22964 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22965 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22966 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22967 hardware has no such operation. */
22970 rs6000_emit_p9_fp_minmax (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
22972 enum rtx_code code
= GET_CODE (op
);
22973 rtx op0
= XEXP (op
, 0);
22974 rtx op1
= XEXP (op
, 1);
22975 machine_mode compare_mode
= GET_MODE (op0
);
22976 machine_mode result_mode
= GET_MODE (dest
);
22977 bool max_p
= false;
22979 if (result_mode
!= compare_mode
)
22982 if (code
== GE
|| code
== GT
)
22984 else if (code
== LE
|| code
== LT
)
22989 if (rtx_equal_p (op0
, true_cond
) && rtx_equal_p (op1
, false_cond
))
22992 else if (rtx_equal_p (op1
, true_cond
) && rtx_equal_p (op0
, false_cond
))
22998 rs6000_emit_minmax (dest
, max_p
? SMAX
: SMIN
, op0
, op1
);
23002 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
23003 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
23004 operands of the last comparison is nonzero/true, FALSE_COND if it is
23005 zero/false. Return 0 if the hardware has no such operation. */
23008 rs6000_emit_p9_fp_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
23010 enum rtx_code code
= GET_CODE (op
);
23011 rtx op0
= XEXP (op
, 0);
23012 rtx op1
= XEXP (op
, 1);
23013 machine_mode result_mode
= GET_MODE (dest
);
23018 if (!can_create_pseudo_p ())
23031 code
= swap_condition (code
);
23032 std::swap (op0
, op1
);
23039 /* Generate: [(parallel [(set (dest)
23040 (if_then_else (op (cmp1) (cmp2))
23043 (clobber (scratch))])]. */
23045 compare_rtx
= gen_rtx_fmt_ee (code
, CCFPmode
, op0
, op1
);
23046 cmove_rtx
= gen_rtx_SET (dest
,
23047 gen_rtx_IF_THEN_ELSE (result_mode
,
23052 clobber_rtx
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (V2DImode
));
23053 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
23054 gen_rtvec (2, cmove_rtx
, clobber_rtx
)));
23059 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
23060 operands of the last comparison is nonzero/true, FALSE_COND if it
23061 is zero/false. Return 0 if the hardware has no such operation. */
23064 rs6000_emit_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
23066 enum rtx_code code
= GET_CODE (op
);
23067 rtx op0
= XEXP (op
, 0);
23068 rtx op1
= XEXP (op
, 1);
23069 machine_mode compare_mode
= GET_MODE (op0
);
23070 machine_mode result_mode
= GET_MODE (dest
);
23072 bool is_against_zero
;
23074 /* These modes should always match. */
23075 if (GET_MODE (op1
) != compare_mode
23076 /* In the isel case however, we can use a compare immediate, so
23077 op1 may be a small constant. */
23078 && (!TARGET_ISEL
|| !short_cint_operand (op1
, VOIDmode
)))
23080 if (GET_MODE (true_cond
) != result_mode
)
23082 if (GET_MODE (false_cond
) != result_mode
)
23085 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
23086 if (TARGET_P9_MINMAX
23087 && (compare_mode
== SFmode
|| compare_mode
== DFmode
)
23088 && (result_mode
== SFmode
|| result_mode
== DFmode
))
23090 if (rs6000_emit_p9_fp_minmax (dest
, op
, true_cond
, false_cond
))
23093 if (rs6000_emit_p9_fp_cmove (dest
, op
, true_cond
, false_cond
))
23097 /* Don't allow using floating point comparisons for integer results for
23099 if (FLOAT_MODE_P (compare_mode
) && !FLOAT_MODE_P (result_mode
))
23102 /* First, work out if the hardware can do this at all, or
23103 if it's too slow.... */
23104 if (!FLOAT_MODE_P (compare_mode
))
23107 return rs6000_emit_int_cmove (dest
, op
, true_cond
, false_cond
);
23111 is_against_zero
= op1
== CONST0_RTX (compare_mode
);
23113 /* A floating-point subtract might overflow, underflow, or produce
23114 an inexact result, thus changing the floating-point flags, so it
23115 can't be generated if we care about that. It's safe if one side
23116 of the construct is zero, since then no subtract will be
23118 if (SCALAR_FLOAT_MODE_P (compare_mode
)
23119 && flag_trapping_math
&& ! is_against_zero
)
23122 /* Eliminate half of the comparisons by switching operands, this
23123 makes the remaining code simpler. */
23124 if (code
== UNLT
|| code
== UNGT
|| code
== UNORDERED
|| code
== NE
23125 || code
== LTGT
|| code
== LT
|| code
== UNLE
)
23127 code
= reverse_condition_maybe_unordered (code
);
23129 true_cond
= false_cond
;
23133 /* UNEQ and LTGT take four instructions for a comparison with zero,
23134 it'll probably be faster to use a branch here too. */
23135 if (code
== UNEQ
&& HONOR_NANS (compare_mode
))
23138 /* We're going to try to implement comparisons by performing
23139 a subtract, then comparing against zero. Unfortunately,
23140 Inf - Inf is NaN which is not zero, and so if we don't
23141 know that the operand is finite and the comparison
23142 would treat EQ different to UNORDERED, we can't do it. */
23143 if (HONOR_INFINITIES (compare_mode
)
23144 && code
!= GT
&& code
!= UNGE
23145 && (GET_CODE (op1
) != CONST_DOUBLE
23146 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1
)))
23147 /* Constructs of the form (a OP b ? a : b) are safe. */
23148 && ((! rtx_equal_p (op0
, false_cond
) && ! rtx_equal_p (op1
, false_cond
))
23149 || (! rtx_equal_p (op0
, true_cond
)
23150 && ! rtx_equal_p (op1
, true_cond
))))
23153 /* At this point we know we can use fsel. */
23155 /* Reduce the comparison to a comparison against zero. */
23156 if (! is_against_zero
)
23158 temp
= gen_reg_rtx (compare_mode
);
23159 emit_insn (gen_rtx_SET (temp
, gen_rtx_MINUS (compare_mode
, op0
, op1
)));
23161 op1
= CONST0_RTX (compare_mode
);
23164 /* If we don't care about NaNs we can reduce some of the comparisons
23165 down to faster ones. */
23166 if (! HONOR_NANS (compare_mode
))
23172 true_cond
= false_cond
;
23185 /* Now, reduce everything down to a GE. */
23192 temp
= gen_reg_rtx (compare_mode
);
23193 emit_insn (gen_rtx_SET (temp
, gen_rtx_NEG (compare_mode
, op0
)));
23198 temp
= gen_reg_rtx (compare_mode
);
23199 emit_insn (gen_rtx_SET (temp
, gen_rtx_ABS (compare_mode
, op0
)));
23204 temp
= gen_reg_rtx (compare_mode
);
23205 emit_insn (gen_rtx_SET (temp
,
23206 gen_rtx_NEG (compare_mode
,
23207 gen_rtx_ABS (compare_mode
, op0
))));
23212 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
23213 temp
= gen_reg_rtx (result_mode
);
23214 emit_insn (gen_rtx_SET (temp
,
23215 gen_rtx_IF_THEN_ELSE (result_mode
,
23216 gen_rtx_GE (VOIDmode
,
23218 true_cond
, false_cond
)));
23219 false_cond
= true_cond
;
23222 temp
= gen_reg_rtx (compare_mode
);
23223 emit_insn (gen_rtx_SET (temp
, gen_rtx_NEG (compare_mode
, op0
)));
23228 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
23229 temp
= gen_reg_rtx (result_mode
);
23230 emit_insn (gen_rtx_SET (temp
,
23231 gen_rtx_IF_THEN_ELSE (result_mode
,
23232 gen_rtx_GE (VOIDmode
,
23234 true_cond
, false_cond
)));
23235 true_cond
= false_cond
;
23238 temp
= gen_reg_rtx (compare_mode
);
23239 emit_insn (gen_rtx_SET (temp
, gen_rtx_NEG (compare_mode
, op0
)));
23244 gcc_unreachable ();
23247 emit_insn (gen_rtx_SET (dest
,
23248 gen_rtx_IF_THEN_ELSE (result_mode
,
23249 gen_rtx_GE (VOIDmode
,
23251 true_cond
, false_cond
)));
23255 /* Same as above, but for ints (isel). */
23258 rs6000_emit_int_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
23260 rtx condition_rtx
, cr
;
23261 machine_mode mode
= GET_MODE (dest
);
23262 enum rtx_code cond_code
;
23263 rtx (*isel_func
) (rtx
, rtx
, rtx
, rtx
, rtx
);
23266 if (mode
!= SImode
&& (!TARGET_POWERPC64
|| mode
!= DImode
))
23269 /* We still have to do the compare, because isel doesn't do a
23270 compare, it just looks at the CRx bits set by a previous compare
23272 condition_rtx
= rs6000_generate_compare (op
, mode
);
23273 cond_code
= GET_CODE (condition_rtx
);
23274 cr
= XEXP (condition_rtx
, 0);
23275 signedp
= GET_MODE (cr
) == CCmode
;
23277 isel_func
= (mode
== SImode
23278 ? (signedp
? gen_isel_signed_si
: gen_isel_unsigned_si
)
23279 : (signedp
? gen_isel_signed_di
: gen_isel_unsigned_di
));
23283 case LT
: case GT
: case LTU
: case GTU
: case EQ
:
23284 /* isel handles these directly. */
23288 /* We need to swap the sense of the comparison. */
23290 std::swap (false_cond
, true_cond
);
23291 PUT_CODE (condition_rtx
, reverse_condition (cond_code
));
23296 false_cond
= force_reg (mode
, false_cond
);
23297 if (true_cond
!= const0_rtx
)
23298 true_cond
= force_reg (mode
, true_cond
);
23300 emit_insn (isel_func (dest
, condition_rtx
, true_cond
, false_cond
, cr
));
23306 rs6000_emit_minmax (rtx dest
, enum rtx_code code
, rtx op0
, rtx op1
)
23308 machine_mode mode
= GET_MODE (op0
);
23312 /* VSX/altivec have direct min/max insns. */
23313 if ((code
== SMAX
|| code
== SMIN
)
23314 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode
)
23315 || (mode
== SFmode
&& VECTOR_UNIT_VSX_P (DFmode
))))
23317 emit_insn (gen_rtx_SET (dest
, gen_rtx_fmt_ee (code
, mode
, op0
, op1
)));
23321 if (code
== SMAX
|| code
== SMIN
)
23326 if (code
== SMAX
|| code
== UMAX
)
23327 target
= emit_conditional_move (dest
, c
, op0
, op1
, mode
,
23328 op0
, op1
, mode
, 0);
23330 target
= emit_conditional_move (dest
, c
, op0
, op1
, mode
,
23331 op1
, op0
, mode
, 0);
23332 gcc_assert (target
);
23333 if (target
!= dest
)
23334 emit_move_insn (dest
, target
);
23337 /* Split a signbit operation on 64-bit machines with direct move. Also allow
23338 for the value to come from memory or if it is already loaded into a GPR. */
23341 rs6000_split_signbit (rtx dest
, rtx src
)
23343 machine_mode d_mode
= GET_MODE (dest
);
23344 machine_mode s_mode
= GET_MODE (src
);
23345 rtx dest_di
= (d_mode
== DImode
) ? dest
: gen_lowpart (DImode
, dest
);
23346 rtx shift_reg
= dest_di
;
23348 gcc_assert (FLOAT128_IEEE_P (s_mode
) && TARGET_POWERPC64
);
23352 rtx mem
= (WORDS_BIG_ENDIAN
23353 ? adjust_address (src
, DImode
, 0)
23354 : adjust_address (src
, DImode
, 8));
23355 emit_insn (gen_rtx_SET (dest_di
, mem
));
23360 unsigned int r
= reg_or_subregno (src
);
23362 if (INT_REGNO_P (r
))
23363 shift_reg
= gen_rtx_REG (DImode
, r
+ (BYTES_BIG_ENDIAN
== 0));
23367 /* Generate the special mfvsrd instruction to get it in a GPR. */
23368 gcc_assert (VSX_REGNO_P (r
));
23369 if (s_mode
== KFmode
)
23370 emit_insn (gen_signbitkf2_dm2 (dest_di
, src
));
23372 emit_insn (gen_signbittf2_dm2 (dest_di
, src
));
23376 emit_insn (gen_lshrdi3 (dest_di
, shift_reg
, GEN_INT (63)));
23380 /* A subroutine of the atomic operation splitters. Jump to LABEL if
23381 COND is true. Mark the jump as unlikely to be taken. */
23384 emit_unlikely_jump (rtx cond
, rtx label
)
23386 rtx x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, cond
, label
, pc_rtx
);
23387 rtx_insn
*insn
= emit_jump_insn (gen_rtx_SET (pc_rtx
, x
));
23388 add_reg_br_prob_note (insn
, profile_probability::very_unlikely ());
23391 /* A subroutine of the atomic operation splitters. Emit a load-locked
23392 instruction in MODE. For QI/HImode, possibly use a pattern than includes
23393 the zero_extend operation. */
23396 emit_load_locked (machine_mode mode
, rtx reg
, rtx mem
)
23398 rtx (*fn
) (rtx
, rtx
) = NULL
;
23403 fn
= gen_load_lockedqi
;
23406 fn
= gen_load_lockedhi
;
23409 if (GET_MODE (mem
) == QImode
)
23410 fn
= gen_load_lockedqi_si
;
23411 else if (GET_MODE (mem
) == HImode
)
23412 fn
= gen_load_lockedhi_si
;
23414 fn
= gen_load_lockedsi
;
23417 fn
= gen_load_lockeddi
;
23420 fn
= gen_load_lockedti
;
23423 gcc_unreachable ();
23425 emit_insn (fn (reg
, mem
));
23428 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23429 instruction in MODE. */
23432 emit_store_conditional (machine_mode mode
, rtx res
, rtx mem
, rtx val
)
23434 rtx (*fn
) (rtx
, rtx
, rtx
) = NULL
;
23439 fn
= gen_store_conditionalqi
;
23442 fn
= gen_store_conditionalhi
;
23445 fn
= gen_store_conditionalsi
;
23448 fn
= gen_store_conditionaldi
;
23451 fn
= gen_store_conditionalti
;
23454 gcc_unreachable ();
23457 /* Emit sync before stwcx. to address PPC405 Erratum. */
23458 if (PPC405_ERRATUM77
)
23459 emit_insn (gen_hwsync ());
23461 emit_insn (fn (res
, mem
, val
));
23464 /* Expand barriers before and after a load_locked/store_cond sequence. */
23467 rs6000_pre_atomic_barrier (rtx mem
, enum memmodel model
)
23469 rtx addr
= XEXP (mem
, 0);
23471 if (!legitimate_indirect_address_p (addr
, reload_completed
)
23472 && !legitimate_indexed_address_p (addr
, reload_completed
))
23474 addr
= force_reg (Pmode
, addr
);
23475 mem
= replace_equiv_address_nv (mem
, addr
);
23480 case MEMMODEL_RELAXED
:
23481 case MEMMODEL_CONSUME
:
23482 case MEMMODEL_ACQUIRE
:
23484 case MEMMODEL_RELEASE
:
23485 case MEMMODEL_ACQ_REL
:
23486 emit_insn (gen_lwsync ());
23488 case MEMMODEL_SEQ_CST
:
23489 emit_insn (gen_hwsync ());
23492 gcc_unreachable ();
23498 rs6000_post_atomic_barrier (enum memmodel model
)
23502 case MEMMODEL_RELAXED
:
23503 case MEMMODEL_CONSUME
:
23504 case MEMMODEL_RELEASE
:
23506 case MEMMODEL_ACQUIRE
:
23507 case MEMMODEL_ACQ_REL
:
23508 case MEMMODEL_SEQ_CST
:
23509 emit_insn (gen_isync ());
23512 gcc_unreachable ();
23516 /* A subroutine of the various atomic expanders. For sub-word operations,
23517 we must adjust things to operate on SImode. Given the original MEM,
23518 return a new aligned memory. Also build and return the quantities by
23519 which to shift and mask. */
23522 rs6000_adjust_atomic_subword (rtx orig_mem
, rtx
*pshift
, rtx
*pmask
)
23524 rtx addr
, align
, shift
, mask
, mem
;
23525 HOST_WIDE_INT shift_mask
;
23526 machine_mode mode
= GET_MODE (orig_mem
);
23528 /* For smaller modes, we have to implement this via SImode. */
23529 shift_mask
= (mode
== QImode
? 0x18 : 0x10);
23531 addr
= XEXP (orig_mem
, 0);
23532 addr
= force_reg (GET_MODE (addr
), addr
);
23534 /* Aligned memory containing subword. Generate a new memory. We
23535 do not want any of the existing MEM_ATTR data, as we're now
23536 accessing memory outside the original object. */
23537 align
= expand_simple_binop (Pmode
, AND
, addr
, GEN_INT (-4),
23538 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23539 mem
= gen_rtx_MEM (SImode
, align
);
23540 MEM_VOLATILE_P (mem
) = MEM_VOLATILE_P (orig_mem
);
23541 if (MEM_ALIAS_SET (orig_mem
) == ALIAS_SET_MEMORY_BARRIER
)
23542 set_mem_alias_set (mem
, ALIAS_SET_MEMORY_BARRIER
);
23544 /* Shift amount for subword relative to aligned word. */
23545 shift
= gen_reg_rtx (SImode
);
23546 addr
= gen_lowpart (SImode
, addr
);
23547 rtx tmp
= gen_reg_rtx (SImode
);
23548 emit_insn (gen_ashlsi3 (tmp
, addr
, GEN_INT (3)));
23549 emit_insn (gen_andsi3 (shift
, tmp
, GEN_INT (shift_mask
)));
23550 if (BYTES_BIG_ENDIAN
)
23551 shift
= expand_simple_binop (SImode
, XOR
, shift
, GEN_INT (shift_mask
),
23552 shift
, 1, OPTAB_LIB_WIDEN
);
23555 /* Mask for insertion. */
23556 mask
= expand_simple_binop (SImode
, ASHIFT
, GEN_INT (GET_MODE_MASK (mode
)),
23557 shift
, NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23563 /* A subroutine of the various atomic expanders. For sub-word operands,
23564 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23567 rs6000_mask_atomic_subword (rtx oldval
, rtx newval
, rtx mask
)
23571 x
= gen_reg_rtx (SImode
);
23572 emit_insn (gen_rtx_SET (x
, gen_rtx_AND (SImode
,
23573 gen_rtx_NOT (SImode
, mask
),
23576 x
= expand_simple_binop (SImode
, IOR
, newval
, x
, x
, 1, OPTAB_LIB_WIDEN
);
23581 /* A subroutine of the various atomic expanders. For sub-word operands,
23582 extract WIDE to NARROW via SHIFT. */
23585 rs6000_finish_atomic_subword (rtx narrow
, rtx wide
, rtx shift
)
23587 wide
= expand_simple_binop (SImode
, LSHIFTRT
, wide
, shift
,
23588 wide
, 1, OPTAB_LIB_WIDEN
);
23589 emit_move_insn (narrow
, gen_lowpart (GET_MODE (narrow
), wide
));
23592 /* Expand an atomic compare and swap operation. */
23595 rs6000_expand_atomic_compare_and_swap (rtx operands
[])
23597 rtx boolval
, retval
, mem
, oldval
, newval
, cond
;
23598 rtx label1
, label2
, x
, mask
, shift
;
23599 machine_mode mode
, orig_mode
;
23600 enum memmodel mod_s
, mod_f
;
23603 boolval
= operands
[0];
23604 retval
= operands
[1];
23606 oldval
= operands
[3];
23607 newval
= operands
[4];
23608 is_weak
= (INTVAL (operands
[5]) != 0);
23609 mod_s
= memmodel_base (INTVAL (operands
[6]));
23610 mod_f
= memmodel_base (INTVAL (operands
[7]));
23611 orig_mode
= mode
= GET_MODE (mem
);
23613 mask
= shift
= NULL_RTX
;
23614 if (mode
== QImode
|| mode
== HImode
)
23616 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23617 lwarx and shift/mask operations. With power8, we need to do the
23618 comparison in SImode, but the store is still done in QI/HImode. */
23619 oldval
= convert_modes (SImode
, mode
, oldval
, 1);
23621 if (!TARGET_SYNC_HI_QI
)
23623 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
23625 /* Shift and mask OLDVAL into position with the word. */
23626 oldval
= expand_simple_binop (SImode
, ASHIFT
, oldval
, shift
,
23627 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23629 /* Shift and mask NEWVAL into position within the word. */
23630 newval
= convert_modes (SImode
, mode
, newval
, 1);
23631 newval
= expand_simple_binop (SImode
, ASHIFT
, newval
, shift
,
23632 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23635 /* Prepare to adjust the return value. */
23636 retval
= gen_reg_rtx (SImode
);
23639 else if (reg_overlap_mentioned_p (retval
, oldval
))
23640 oldval
= copy_to_reg (oldval
);
23642 if (mode
!= TImode
&& !reg_or_short_operand (oldval
, mode
))
23643 oldval
= copy_to_mode_reg (mode
, oldval
);
23645 if (reg_overlap_mentioned_p (retval
, newval
))
23646 newval
= copy_to_reg (newval
);
23648 mem
= rs6000_pre_atomic_barrier (mem
, mod_s
);
23653 label1
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
23654 emit_label (XEXP (label1
, 0));
23656 label2
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
23658 emit_load_locked (mode
, retval
, mem
);
23662 x
= expand_simple_binop (SImode
, AND
, retval
, mask
,
23663 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23665 cond
= gen_reg_rtx (CCmode
);
23666 /* If we have TImode, synthesize a comparison. */
23667 if (mode
!= TImode
)
23668 x
= gen_rtx_COMPARE (CCmode
, x
, oldval
);
23671 rtx xor1_result
= gen_reg_rtx (DImode
);
23672 rtx xor2_result
= gen_reg_rtx (DImode
);
23673 rtx or_result
= gen_reg_rtx (DImode
);
23674 rtx new_word0
= simplify_gen_subreg (DImode
, x
, TImode
, 0);
23675 rtx new_word1
= simplify_gen_subreg (DImode
, x
, TImode
, 8);
23676 rtx old_word0
= simplify_gen_subreg (DImode
, oldval
, TImode
, 0);
23677 rtx old_word1
= simplify_gen_subreg (DImode
, oldval
, TImode
, 8);
23679 emit_insn (gen_xordi3 (xor1_result
, new_word0
, old_word0
));
23680 emit_insn (gen_xordi3 (xor2_result
, new_word1
, old_word1
));
23681 emit_insn (gen_iordi3 (or_result
, xor1_result
, xor2_result
));
23682 x
= gen_rtx_COMPARE (CCmode
, or_result
, const0_rtx
);
23685 emit_insn (gen_rtx_SET (cond
, x
));
23687 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
23688 emit_unlikely_jump (x
, label2
);
23692 x
= rs6000_mask_atomic_subword (retval
, newval
, mask
);
23694 emit_store_conditional (orig_mode
, cond
, mem
, x
);
23698 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
23699 emit_unlikely_jump (x
, label1
);
23702 if (!is_mm_relaxed (mod_f
))
23703 emit_label (XEXP (label2
, 0));
23705 rs6000_post_atomic_barrier (mod_s
);
23707 if (is_mm_relaxed (mod_f
))
23708 emit_label (XEXP (label2
, 0));
23711 rs6000_finish_atomic_subword (operands
[1], retval
, shift
);
23712 else if (mode
!= GET_MODE (operands
[1]))
23713 convert_move (operands
[1], retval
, 1);
23715 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23716 x
= gen_rtx_EQ (SImode
, cond
, const0_rtx
);
23717 emit_insn (gen_rtx_SET (boolval
, x
));
23720 /* Expand an atomic exchange operation. */
23723 rs6000_expand_atomic_exchange (rtx operands
[])
23725 rtx retval
, mem
, val
, cond
;
23727 enum memmodel model
;
23728 rtx label
, x
, mask
, shift
;
23730 retval
= operands
[0];
23733 model
= memmodel_base (INTVAL (operands
[3]));
23734 mode
= GET_MODE (mem
);
23736 mask
= shift
= NULL_RTX
;
23737 if (!TARGET_SYNC_HI_QI
&& (mode
== QImode
|| mode
== HImode
))
23739 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
23741 /* Shift and mask VAL into position with the word. */
23742 val
= convert_modes (SImode
, mode
, val
, 1);
23743 val
= expand_simple_binop (SImode
, ASHIFT
, val
, shift
,
23744 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23746 /* Prepare to adjust the return value. */
23747 retval
= gen_reg_rtx (SImode
);
23751 mem
= rs6000_pre_atomic_barrier (mem
, model
);
23753 label
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
23754 emit_label (XEXP (label
, 0));
23756 emit_load_locked (mode
, retval
, mem
);
23760 x
= rs6000_mask_atomic_subword (retval
, val
, mask
);
23762 cond
= gen_reg_rtx (CCmode
);
23763 emit_store_conditional (mode
, cond
, mem
, x
);
23765 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
23766 emit_unlikely_jump (x
, label
);
23768 rs6000_post_atomic_barrier (model
);
23771 rs6000_finish_atomic_subword (operands
[0], retval
, shift
);
23774 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23775 to perform. MEM is the memory on which to operate. VAL is the second
23776 operand of the binary operator. BEFORE and AFTER are optional locations to
23777 return the value of MEM either before of after the operation. MODEL_RTX
23778 is a CONST_INT containing the memory model to use. */
23781 rs6000_expand_atomic_op (enum rtx_code code
, rtx mem
, rtx val
,
23782 rtx orig_before
, rtx orig_after
, rtx model_rtx
)
23784 enum memmodel model
= memmodel_base (INTVAL (model_rtx
));
23785 machine_mode mode
= GET_MODE (mem
);
23786 machine_mode store_mode
= mode
;
23787 rtx label
, x
, cond
, mask
, shift
;
23788 rtx before
= orig_before
, after
= orig_after
;
23790 mask
= shift
= NULL_RTX
;
23791 /* On power8, we want to use SImode for the operation. On previous systems,
23792 use the operation in a subword and shift/mask to get the proper byte or
23794 if (mode
== QImode
|| mode
== HImode
)
23796 if (TARGET_SYNC_HI_QI
)
23798 val
= convert_modes (SImode
, mode
, val
, 1);
23800 /* Prepare to adjust the return value. */
23801 before
= gen_reg_rtx (SImode
);
23803 after
= gen_reg_rtx (SImode
);
23808 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
23810 /* Shift and mask VAL into position with the word. */
23811 val
= convert_modes (SImode
, mode
, val
, 1);
23812 val
= expand_simple_binop (SImode
, ASHIFT
, val
, shift
,
23813 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23819 /* We've already zero-extended VAL. That is sufficient to
23820 make certain that it does not affect other bits. */
23825 /* If we make certain that all of the other bits in VAL are
23826 set, that will be sufficient to not affect other bits. */
23827 x
= gen_rtx_NOT (SImode
, mask
);
23828 x
= gen_rtx_IOR (SImode
, x
, val
);
23829 emit_insn (gen_rtx_SET (val
, x
));
23836 /* These will all affect bits outside the field and need
23837 adjustment via MASK within the loop. */
23841 gcc_unreachable ();
23844 /* Prepare to adjust the return value. */
23845 before
= gen_reg_rtx (SImode
);
23847 after
= gen_reg_rtx (SImode
);
23848 store_mode
= mode
= SImode
;
23852 mem
= rs6000_pre_atomic_barrier (mem
, model
);
23854 label
= gen_label_rtx ();
23855 emit_label (label
);
23856 label
= gen_rtx_LABEL_REF (VOIDmode
, label
);
23858 if (before
== NULL_RTX
)
23859 before
= gen_reg_rtx (mode
);
23861 emit_load_locked (mode
, before
, mem
);
23865 x
= expand_simple_binop (mode
, AND
, before
, val
,
23866 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23867 after
= expand_simple_unop (mode
, NOT
, x
, after
, 1);
23871 after
= expand_simple_binop (mode
, code
, before
, val
,
23872 after
, 1, OPTAB_LIB_WIDEN
);
23878 x
= expand_simple_binop (SImode
, AND
, after
, mask
,
23879 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23880 x
= rs6000_mask_atomic_subword (before
, x
, mask
);
23882 else if (store_mode
!= mode
)
23883 x
= convert_modes (store_mode
, mode
, x
, 1);
23885 cond
= gen_reg_rtx (CCmode
);
23886 emit_store_conditional (store_mode
, cond
, mem
, x
);
23888 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
23889 emit_unlikely_jump (x
, label
);
23891 rs6000_post_atomic_barrier (model
);
23895 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23896 then do the calcuations in a SImode register. */
23898 rs6000_finish_atomic_subword (orig_before
, before
, shift
);
23900 rs6000_finish_atomic_subword (orig_after
, after
, shift
);
23902 else if (store_mode
!= mode
)
23904 /* QImode/HImode on machines with lbarx/lharx where we do the native
23905 operation and then do the calcuations in a SImode register. */
23907 convert_move (orig_before
, before
, 1);
23909 convert_move (orig_after
, after
, 1);
23911 else if (orig_after
&& after
!= orig_after
)
23912 emit_move_insn (orig_after
, after
);
23915 /* Emit instructions to move SRC to DST. Called by splitters for
23916 multi-register moves. It will emit at most one instruction for
23917 each register that is accessed; that is, it won't emit li/lis pairs
23918 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23922 rs6000_split_multireg_move (rtx dst
, rtx src
)
23924 /* The register number of the first register being moved. */
23926 /* The mode that is to be moved. */
23928 /* The mode that the move is being done in, and its size. */
23929 machine_mode reg_mode
;
23931 /* The number of registers that will be moved. */
23934 reg
= REG_P (dst
) ? REGNO (dst
) : REGNO (src
);
23935 mode
= GET_MODE (dst
);
23936 nregs
= hard_regno_nregs (reg
, mode
);
23937 if (FP_REGNO_P (reg
))
23938 reg_mode
= DECIMAL_FLOAT_MODE_P (mode
) ? DDmode
:
23939 ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? DFmode
: SFmode
);
23940 else if (ALTIVEC_REGNO_P (reg
))
23941 reg_mode
= V16QImode
;
23943 reg_mode
= word_mode
;
23944 reg_mode_size
= GET_MODE_SIZE (reg_mode
);
23946 gcc_assert (reg_mode_size
* nregs
== GET_MODE_SIZE (mode
));
23948 /* TDmode residing in FP registers is special, since the ISA requires that
23949 the lower-numbered word of a register pair is always the most significant
23950 word, even in little-endian mode. This does not match the usual subreg
23951 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23952 the appropriate constituent registers "by hand" in little-endian mode.
23954 Note we do not need to check for destructive overlap here since TDmode
23955 can only reside in even/odd register pairs. */
23956 if (FP_REGNO_P (reg
) && DECIMAL_FLOAT_MODE_P (mode
) && !BYTES_BIG_ENDIAN
)
23961 for (i
= 0; i
< nregs
; i
++)
23963 if (REG_P (src
) && FP_REGNO_P (REGNO (src
)))
23964 p_src
= gen_rtx_REG (reg_mode
, REGNO (src
) + nregs
- 1 - i
);
23966 p_src
= simplify_gen_subreg (reg_mode
, src
, mode
,
23967 i
* reg_mode_size
);
23969 if (REG_P (dst
) && FP_REGNO_P (REGNO (dst
)))
23970 p_dst
= gen_rtx_REG (reg_mode
, REGNO (dst
) + nregs
- 1 - i
);
23972 p_dst
= simplify_gen_subreg (reg_mode
, dst
, mode
,
23973 i
* reg_mode_size
);
23975 emit_insn (gen_rtx_SET (p_dst
, p_src
));
23981 if (REG_P (src
) && REG_P (dst
) && (REGNO (src
) < REGNO (dst
)))
23983 /* Move register range backwards, if we might have destructive
23986 for (i
= nregs
- 1; i
>= 0; i
--)
23987 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode
, dst
, mode
,
23988 i
* reg_mode_size
),
23989 simplify_gen_subreg (reg_mode
, src
, mode
,
23990 i
* reg_mode_size
)));
23996 bool used_update
= false;
23997 rtx restore_basereg
= NULL_RTX
;
23999 if (MEM_P (src
) && INT_REGNO_P (reg
))
24003 if (GET_CODE (XEXP (src
, 0)) == PRE_INC
24004 || GET_CODE (XEXP (src
, 0)) == PRE_DEC
)
24007 breg
= XEXP (XEXP (src
, 0), 0);
24008 delta_rtx
= (GET_CODE (XEXP (src
, 0)) == PRE_INC
24009 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src
)))
24010 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src
))));
24011 emit_insn (gen_add3_insn (breg
, breg
, delta_rtx
));
24012 src
= replace_equiv_address (src
, breg
);
24014 else if (! rs6000_offsettable_memref_p (src
, reg_mode
))
24016 if (GET_CODE (XEXP (src
, 0)) == PRE_MODIFY
)
24018 rtx basereg
= XEXP (XEXP (src
, 0), 0);
24021 rtx ndst
= simplify_gen_subreg (reg_mode
, dst
, mode
, 0);
24022 emit_insn (gen_rtx_SET (ndst
,
24023 gen_rtx_MEM (reg_mode
,
24025 used_update
= true;
24028 emit_insn (gen_rtx_SET (basereg
,
24029 XEXP (XEXP (src
, 0), 1)));
24030 src
= replace_equiv_address (src
, basereg
);
24034 rtx basereg
= gen_rtx_REG (Pmode
, reg
);
24035 emit_insn (gen_rtx_SET (basereg
, XEXP (src
, 0)));
24036 src
= replace_equiv_address (src
, basereg
);
24040 breg
= XEXP (src
, 0);
24041 if (GET_CODE (breg
) == PLUS
|| GET_CODE (breg
) == LO_SUM
)
24042 breg
= XEXP (breg
, 0);
24044 /* If the base register we are using to address memory is
24045 also a destination reg, then change that register last. */
24047 && REGNO (breg
) >= REGNO (dst
)
24048 && REGNO (breg
) < REGNO (dst
) + nregs
)
24049 j
= REGNO (breg
) - REGNO (dst
);
24051 else if (MEM_P (dst
) && INT_REGNO_P (reg
))
24055 if (GET_CODE (XEXP (dst
, 0)) == PRE_INC
24056 || GET_CODE (XEXP (dst
, 0)) == PRE_DEC
)
24059 breg
= XEXP (XEXP (dst
, 0), 0);
24060 delta_rtx
= (GET_CODE (XEXP (dst
, 0)) == PRE_INC
24061 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst
)))
24062 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst
))));
24064 /* We have to update the breg before doing the store.
24065 Use store with update, if available. */
24069 rtx nsrc
= simplify_gen_subreg (reg_mode
, src
, mode
, 0);
24070 emit_insn (TARGET_32BIT
24071 ? (TARGET_POWERPC64
24072 ? gen_movdi_si_update (breg
, breg
, delta_rtx
, nsrc
)
24073 : gen_movsi_update (breg
, breg
, delta_rtx
, nsrc
))
24074 : gen_movdi_di_update (breg
, breg
, delta_rtx
, nsrc
));
24075 used_update
= true;
24078 emit_insn (gen_add3_insn (breg
, breg
, delta_rtx
));
24079 dst
= replace_equiv_address (dst
, breg
);
24081 else if (!rs6000_offsettable_memref_p (dst
, reg_mode
)
24082 && GET_CODE (XEXP (dst
, 0)) != LO_SUM
)
24084 if (GET_CODE (XEXP (dst
, 0)) == PRE_MODIFY
)
24086 rtx basereg
= XEXP (XEXP (dst
, 0), 0);
24089 rtx nsrc
= simplify_gen_subreg (reg_mode
, src
, mode
, 0);
24090 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode
,
24093 used_update
= true;
24096 emit_insn (gen_rtx_SET (basereg
,
24097 XEXP (XEXP (dst
, 0), 1)));
24098 dst
= replace_equiv_address (dst
, basereg
);
24102 rtx basereg
= XEXP (XEXP (dst
, 0), 0);
24103 rtx offsetreg
= XEXP (XEXP (dst
, 0), 1);
24104 gcc_assert (GET_CODE (XEXP (dst
, 0)) == PLUS
24106 && REG_P (offsetreg
)
24107 && REGNO (basereg
) != REGNO (offsetreg
));
24108 if (REGNO (basereg
) == 0)
24110 rtx tmp
= offsetreg
;
24111 offsetreg
= basereg
;
24114 emit_insn (gen_add3_insn (basereg
, basereg
, offsetreg
));
24115 restore_basereg
= gen_sub3_insn (basereg
, basereg
, offsetreg
);
24116 dst
= replace_equiv_address (dst
, basereg
);
24119 else if (GET_CODE (XEXP (dst
, 0)) != LO_SUM
)
24120 gcc_assert (rs6000_offsettable_memref_p (dst
, reg_mode
));
24123 for (i
= 0; i
< nregs
; i
++)
24125 /* Calculate index to next subword. */
24130 /* If compiler already emitted move of first word by
24131 store with update, no need to do anything. */
24132 if (j
== 0 && used_update
)
24135 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode
, dst
, mode
,
24136 j
* reg_mode_size
),
24137 simplify_gen_subreg (reg_mode
, src
, mode
,
24138 j
* reg_mode_size
)));
24140 if (restore_basereg
!= NULL_RTX
)
24141 emit_insn (restore_basereg
);
24146 /* This page contains routines that are used to determine what the
24147 function prologue and epilogue code will do and write them out. */
24149 /* Determine whether the REG is really used. */
24152 save_reg_p (int reg
)
24154 /* We need to mark the PIC offset register live for the same conditions
24155 as it is set up, or otherwise it won't be saved before we clobber it. */
24157 if (reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
&& !TARGET_SINGLE_PIC_BASE
)
24159 /* When calling eh_return, we must return true for all the cases
24160 where conditional_register_usage marks the PIC offset reg
24162 if (TARGET_TOC
&& TARGET_MINIMAL_TOC
24163 && (crtl
->calls_eh_return
24164 || df_regs_ever_live_p (reg
)
24165 || !constant_pool_empty_p ()))
24168 if ((DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
)
24173 return !call_used_regs
[reg
] && df_regs_ever_live_p (reg
);
24176 /* Return the first fixed-point register that is required to be
24177 saved. 32 if none. */
24180 first_reg_to_save (void)
24184 /* Find lowest numbered live register. */
24185 for (first_reg
= 13; first_reg
<= 31; first_reg
++)
24186 if (save_reg_p (first_reg
))
24191 && crtl
->uses_pic_offset_table
24192 && first_reg
> RS6000_PIC_OFFSET_TABLE_REGNUM
)
24193 return RS6000_PIC_OFFSET_TABLE_REGNUM
;
24199 /* Similar, for FP regs. */
24202 first_fp_reg_to_save (void)
24206 /* Find lowest numbered live register. */
24207 for (first_reg
= 14 + 32; first_reg
<= 63; first_reg
++)
24208 if (save_reg_p (first_reg
))
24214 /* Similar, for AltiVec regs. */
24217 first_altivec_reg_to_save (void)
24221 /* Stack frame remains as is unless we are in AltiVec ABI. */
24222 if (! TARGET_ALTIVEC_ABI
)
24223 return LAST_ALTIVEC_REGNO
+ 1;
24225 /* On Darwin, the unwind routines are compiled without
24226 TARGET_ALTIVEC, and use save_world to save/restore the
24227 altivec registers when necessary. */
24228 if (DEFAULT_ABI
== ABI_DARWIN
&& crtl
->calls_eh_return
24229 && ! TARGET_ALTIVEC
)
24230 return FIRST_ALTIVEC_REGNO
+ 20;
24232 /* Find lowest numbered live register. */
24233 for (i
= FIRST_ALTIVEC_REGNO
+ 20; i
<= LAST_ALTIVEC_REGNO
; ++i
)
24234 if (save_reg_p (i
))
24240 /* Return a 32-bit mask of the AltiVec registers we need to set in
24241 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
24242 the 32-bit word is 0. */
24244 static unsigned int
24245 compute_vrsave_mask (void)
24247 unsigned int i
, mask
= 0;
24249 /* On Darwin, the unwind routines are compiled without
24250 TARGET_ALTIVEC, and use save_world to save/restore the
24251 call-saved altivec registers when necessary. */
24252 if (DEFAULT_ABI
== ABI_DARWIN
&& crtl
->calls_eh_return
24253 && ! TARGET_ALTIVEC
)
24256 /* First, find out if we use _any_ altivec registers. */
24257 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
24258 if (df_regs_ever_live_p (i
))
24259 mask
|= ALTIVEC_REG_BIT (i
);
24264 /* Next, remove the argument registers from the set. These must
24265 be in the VRSAVE mask set by the caller, so we don't need to add
24266 them in again. More importantly, the mask we compute here is
24267 used to generate CLOBBERs in the set_vrsave insn, and we do not
24268 wish the argument registers to die. */
24269 for (i
= ALTIVEC_ARG_MIN_REG
; i
< (unsigned) crtl
->args
.info
.vregno
; i
++)
24270 mask
&= ~ALTIVEC_REG_BIT (i
);
24272 /* Similarly, remove the return value from the set. */
24275 diddle_return_value (is_altivec_return_reg
, &yes
);
24277 mask
&= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN
);
24283 /* For a very restricted set of circumstances, we can cut down the
24284 size of prologues/epilogues by calling our own save/restore-the-world
24288 compute_save_world_info (rs6000_stack_t
*info
)
24290 info
->world_save_p
= 1;
24292 = (WORLD_SAVE_P (info
)
24293 && DEFAULT_ABI
== ABI_DARWIN
24294 && !cfun
->has_nonlocal_label
24295 && info
->first_fp_reg_save
== FIRST_SAVED_FP_REGNO
24296 && info
->first_gp_reg_save
== FIRST_SAVED_GP_REGNO
24297 && info
->first_altivec_reg_save
== FIRST_SAVED_ALTIVEC_REGNO
24298 && info
->cr_save_p
);
24300 /* This will not work in conjunction with sibcalls. Make sure there
24301 are none. (This check is expensive, but seldom executed.) */
24302 if (WORLD_SAVE_P (info
))
24305 for (insn
= get_last_insn_anywhere (); insn
; insn
= PREV_INSN (insn
))
24306 if (CALL_P (insn
) && SIBLING_CALL_P (insn
))
24308 info
->world_save_p
= 0;
24313 if (WORLD_SAVE_P (info
))
24315 /* Even if we're not touching VRsave, make sure there's room on the
24316 stack for it, if it looks like we're calling SAVE_WORLD, which
24317 will attempt to save it. */
24318 info
->vrsave_size
= 4;
24320 /* If we are going to save the world, we need to save the link register too. */
24321 info
->lr_save_p
= 1;
24323 /* "Save" the VRsave register too if we're saving the world. */
24324 if (info
->vrsave_mask
== 0)
24325 info
->vrsave_mask
= compute_vrsave_mask ();
24327 /* Because the Darwin register save/restore routines only handle
24328 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
24330 gcc_assert (info
->first_fp_reg_save
>= FIRST_SAVED_FP_REGNO
24331 && (info
->first_altivec_reg_save
24332 >= FIRST_SAVED_ALTIVEC_REGNO
));
24340 is_altivec_return_reg (rtx reg
, void *xyes
)
24342 bool *yes
= (bool *) xyes
;
24343 if (REGNO (reg
) == ALTIVEC_ARG_RETURN
)
24348 /* Return whether REG is a global user reg or has been specifed by
24349 -ffixed-REG. We should not restore these, and so cannot use
24350 lmw or out-of-line restore functions if there are any. We also
24351 can't save them (well, emit frame notes for them), because frame
24352 unwinding during exception handling will restore saved registers. */
24355 fixed_reg_p (int reg
)
24357 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
24358 backend sets it, overriding anything the user might have given. */
24359 if (reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
24360 && ((DEFAULT_ABI
== ABI_V4
&& flag_pic
)
24361 || (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
)
24362 || (TARGET_TOC
&& TARGET_MINIMAL_TOC
)))
24365 return fixed_regs
[reg
];
24368 /* Determine the strategy for savings/restoring registers. */
24371 SAVE_MULTIPLE
= 0x1,
24372 SAVE_INLINE_GPRS
= 0x2,
24373 SAVE_INLINE_FPRS
= 0x4,
24374 SAVE_NOINLINE_GPRS_SAVES_LR
= 0x8,
24375 SAVE_NOINLINE_FPRS_SAVES_LR
= 0x10,
24376 SAVE_INLINE_VRS
= 0x20,
24377 REST_MULTIPLE
= 0x100,
24378 REST_INLINE_GPRS
= 0x200,
24379 REST_INLINE_FPRS
= 0x400,
24380 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
= 0x800,
24381 REST_INLINE_VRS
= 0x1000
24385 rs6000_savres_strategy (rs6000_stack_t
*info
,
24386 bool using_static_chain_p
)
24390 /* Select between in-line and out-of-line save and restore of regs.
24391 First, all the obvious cases where we don't use out-of-line. */
24392 if (crtl
->calls_eh_return
24393 || cfun
->machine
->ra_need_lr
)
24394 strategy
|= (SAVE_INLINE_FPRS
| REST_INLINE_FPRS
24395 | SAVE_INLINE_GPRS
| REST_INLINE_GPRS
24396 | SAVE_INLINE_VRS
| REST_INLINE_VRS
);
24398 if (info
->first_gp_reg_save
== 32)
24399 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24401 if (info
->first_fp_reg_save
== 64
24402 /* The out-of-line FP routines use double-precision stores;
24403 we can't use those routines if we don't have such stores. */
24404 || (TARGET_HARD_FLOAT
&& !TARGET_DOUBLE_FLOAT
))
24405 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
24407 if (info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
+ 1)
24408 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
24410 /* Define cutoff for using out-of-line functions to save registers. */
24411 if (DEFAULT_ABI
== ABI_V4
|| TARGET_ELF
)
24413 if (!optimize_size
)
24415 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
24416 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24417 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
24421 /* Prefer out-of-line restore if it will exit. */
24422 if (info
->first_fp_reg_save
> 61)
24423 strategy
|= SAVE_INLINE_FPRS
;
24424 if (info
->first_gp_reg_save
> 29)
24426 if (info
->first_fp_reg_save
== 64)
24427 strategy
|= SAVE_INLINE_GPRS
;
24429 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24431 if (info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
)
24432 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
24435 else if (DEFAULT_ABI
== ABI_DARWIN
)
24437 if (info
->first_fp_reg_save
> 60)
24438 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
24439 if (info
->first_gp_reg_save
> 29)
24440 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24441 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
24445 gcc_checking_assert (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
);
24446 if ((flag_shrink_wrap_separate
&& optimize_function_for_speed_p (cfun
))
24447 || info
->first_fp_reg_save
> 61)
24448 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
24449 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24450 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
24453 /* Don't bother to try to save things out-of-line if r11 is occupied
24454 by the static chain. It would require too much fiddling and the
24455 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24456 pointer on Darwin, and AIX uses r1 or r12. */
24457 if (using_static_chain_p
24458 && (DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
))
24459 strategy
|= ((DEFAULT_ABI
== ABI_DARWIN
? 0 : SAVE_INLINE_FPRS
)
24461 | SAVE_INLINE_VRS
);
24463 /* Don't ever restore fixed regs. That means we can't use the
24464 out-of-line register restore functions if a fixed reg is in the
24465 range of regs restored. */
24466 if (!(strategy
& REST_INLINE_FPRS
))
24467 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
24470 strategy
|= REST_INLINE_FPRS
;
24474 /* We can only use the out-of-line routines to restore fprs if we've
24475 saved all the registers from first_fp_reg_save in the prologue.
24476 Otherwise, we risk loading garbage. Of course, if we have saved
24477 out-of-line then we know we haven't skipped any fprs. */
24478 if ((strategy
& SAVE_INLINE_FPRS
)
24479 && !(strategy
& REST_INLINE_FPRS
))
24480 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
24481 if (!save_reg_p (i
))
24483 strategy
|= REST_INLINE_FPRS
;
24487 /* Similarly, for altivec regs. */
24488 if (!(strategy
& REST_INLINE_VRS
))
24489 for (int i
= info
->first_altivec_reg_save
; i
< LAST_ALTIVEC_REGNO
+ 1; i
++)
24492 strategy
|= REST_INLINE_VRS
;
24496 if ((strategy
& SAVE_INLINE_VRS
)
24497 && !(strategy
& REST_INLINE_VRS
))
24498 for (int i
= info
->first_altivec_reg_save
; i
< LAST_ALTIVEC_REGNO
+ 1; i
++)
24499 if (!save_reg_p (i
))
24501 strategy
|= REST_INLINE_VRS
;
24505 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24506 saved is an out-of-line save or restore. Set up the value for
24507 the next test (excluding out-of-line gprs). */
24508 bool lr_save_p
= (info
->lr_save_p
24509 || !(strategy
& SAVE_INLINE_FPRS
)
24510 || !(strategy
& SAVE_INLINE_VRS
)
24511 || !(strategy
& REST_INLINE_FPRS
)
24512 || !(strategy
& REST_INLINE_VRS
));
24514 if (TARGET_MULTIPLE
24515 && !TARGET_POWERPC64
24516 && info
->first_gp_reg_save
< 31
24517 && !(flag_shrink_wrap
24518 && flag_shrink_wrap_separate
24519 && optimize_function_for_speed_p (cfun
)))
24522 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
24523 if (save_reg_p (i
))
24527 /* Don't use store multiple if only one reg needs to be
24528 saved. This can occur for example when the ABI_V4 pic reg
24529 (r30) needs to be saved to make calls, but r31 is not
24531 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24534 /* Prefer store multiple for saves over out-of-line
24535 routines, since the store-multiple instruction will
24536 always be smaller. */
24537 strategy
|= SAVE_INLINE_GPRS
| SAVE_MULTIPLE
;
24539 /* The situation is more complicated with load multiple.
24540 We'd prefer to use the out-of-line routines for restores,
24541 since the "exit" out-of-line routines can handle the
24542 restore of LR and the frame teardown. However if doesn't
24543 make sense to use the out-of-line routine if that is the
24544 only reason we'd need to save LR, and we can't use the
24545 "exit" out-of-line gpr restore if we have saved some
24546 fprs; In those cases it is advantageous to use load
24547 multiple when available. */
24548 if (info
->first_fp_reg_save
!= 64 || !lr_save_p
)
24549 strategy
|= REST_INLINE_GPRS
| REST_MULTIPLE
;
24553 /* Using the "exit" out-of-line routine does not improve code size
24554 if using it would require lr to be saved and if only saving one
24556 else if (!lr_save_p
&& info
->first_gp_reg_save
> 29)
24557 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24559 /* Don't ever restore fixed regs. */
24560 if ((strategy
& (REST_INLINE_GPRS
| REST_MULTIPLE
)) != REST_INLINE_GPRS
)
24561 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
24562 if (fixed_reg_p (i
))
24564 strategy
|= REST_INLINE_GPRS
;
24565 strategy
&= ~REST_MULTIPLE
;
24569 /* We can only use load multiple or the out-of-line routines to
24570 restore gprs if we've saved all the registers from
24571 first_gp_reg_save. Otherwise, we risk loading garbage.
24572 Of course, if we have saved out-of-line or used stmw then we know
24573 we haven't skipped any gprs. */
24574 if ((strategy
& (SAVE_INLINE_GPRS
| SAVE_MULTIPLE
)) == SAVE_INLINE_GPRS
24575 && (strategy
& (REST_INLINE_GPRS
| REST_MULTIPLE
)) != REST_INLINE_GPRS
)
24576 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
24577 if (!save_reg_p (i
))
24579 strategy
|= REST_INLINE_GPRS
;
24580 strategy
&= ~REST_MULTIPLE
;
24584 if (TARGET_ELF
&& TARGET_64BIT
)
24586 if (!(strategy
& SAVE_INLINE_FPRS
))
24587 strategy
|= SAVE_NOINLINE_FPRS_SAVES_LR
;
24588 else if (!(strategy
& SAVE_INLINE_GPRS
)
24589 && info
->first_fp_reg_save
== 64)
24590 strategy
|= SAVE_NOINLINE_GPRS_SAVES_LR
;
24592 else if (TARGET_AIX
&& !(strategy
& REST_INLINE_FPRS
))
24593 strategy
|= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
;
24595 if (TARGET_MACHO
&& !(strategy
& SAVE_INLINE_FPRS
))
24596 strategy
|= SAVE_NOINLINE_FPRS_SAVES_LR
;
24601 /* Calculate the stack information for the current function. This is
24602 complicated by having two separate calling sequences, the AIX calling
24603 sequence and the V.4 calling sequence.
24605 AIX (and Darwin/Mac OS X) stack frames look like:
24607 SP----> +---------------------------------------+
24608 | back chain to caller | 0 0
24609 +---------------------------------------+
24610 | saved CR | 4 8 (8-11)
24611 +---------------------------------------+
24613 +---------------------------------------+
24614 | reserved for compilers | 12 24
24615 +---------------------------------------+
24616 | reserved for binders | 16 32
24617 +---------------------------------------+
24618 | saved TOC pointer | 20 40
24619 +---------------------------------------+
24620 | Parameter save area (+padding*) (P) | 24 48
24621 +---------------------------------------+
24622 | Alloca space (A) | 24+P etc.
24623 +---------------------------------------+
24624 | Local variable space (L) | 24+P+A
24625 +---------------------------------------+
24626 | Float/int conversion temporary (X) | 24+P+A+L
24627 +---------------------------------------+
24628 | Save area for AltiVec registers (W) | 24+P+A+L+X
24629 +---------------------------------------+
24630 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24631 +---------------------------------------+
24632 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24633 +---------------------------------------+
24634 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24635 +---------------------------------------+
24636 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24637 +---------------------------------------+
24638 old SP->| back chain to caller's caller |
24639 +---------------------------------------+
24641 * If the alloca area is present, the parameter save area is
24642 padded so that the former starts 16-byte aligned.
24644 The required alignment for AIX configurations is two words (i.e., 8
24647 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24649 SP----> +---------------------------------------+
24650 | Back chain to caller | 0
24651 +---------------------------------------+
24652 | Save area for CR | 8
24653 +---------------------------------------+
24655 +---------------------------------------+
24656 | Saved TOC pointer | 24
24657 +---------------------------------------+
24658 | Parameter save area (+padding*) (P) | 32
24659 +---------------------------------------+
24660 | Alloca space (A) | 32+P
24661 +---------------------------------------+
24662 | Local variable space (L) | 32+P+A
24663 +---------------------------------------+
24664 | Save area for AltiVec registers (W) | 32+P+A+L
24665 +---------------------------------------+
24666 | AltiVec alignment padding (Y) | 32+P+A+L+W
24667 +---------------------------------------+
24668 | Save area for GP registers (G) | 32+P+A+L+W+Y
24669 +---------------------------------------+
24670 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24671 +---------------------------------------+
24672 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24673 +---------------------------------------+
24675 * If the alloca area is present, the parameter save area is
24676 padded so that the former starts 16-byte aligned.
24678 V.4 stack frames look like:
24680 SP----> +---------------------------------------+
24681 | back chain to caller | 0
24682 +---------------------------------------+
24683 | caller's saved LR | 4
24684 +---------------------------------------+
24685 | Parameter save area (+padding*) (P) | 8
24686 +---------------------------------------+
24687 | Alloca space (A) | 8+P
24688 +---------------------------------------+
24689 | Varargs save area (V) | 8+P+A
24690 +---------------------------------------+
24691 | Local variable space (L) | 8+P+A+V
24692 +---------------------------------------+
24693 | Float/int conversion temporary (X) | 8+P+A+V+L
24694 +---------------------------------------+
24695 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24696 +---------------------------------------+
24697 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24698 +---------------------------------------+
24699 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24700 +---------------------------------------+
24701 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24702 +---------------------------------------+
24703 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24704 +---------------------------------------+
24705 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24706 +---------------------------------------+
24707 old SP->| back chain to caller's caller |
24708 +---------------------------------------+
24710 * If the alloca area is present and the required alignment is
24711 16 bytes, the parameter save area is padded so that the
24712 alloca area starts 16-byte aligned.
24714 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24715 given. (But note below and in sysv4.h that we require only 8 and
24716 may round up the size of our stack frame anyways. The historical
24717 reason is early versions of powerpc-linux which didn't properly
24718 align the stack at program startup. A happy side-effect is that
24719 -mno-eabi libraries can be used with -meabi programs.)
24721 The EABI configuration defaults to the V.4 layout. However,
24722 the stack alignment requirements may differ. If -mno-eabi is not
24723 given, the required stack alignment is 8 bytes; if -mno-eabi is
24724 given, the required alignment is 16 bytes. (But see V.4 comment
24727 #ifndef ABI_STACK_BOUNDARY
24728 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24731 static rs6000_stack_t
*
24732 rs6000_stack_info (void)
24734 /* We should never be called for thunks, we are not set up for that. */
24735 gcc_assert (!cfun
->is_thunk
);
24737 rs6000_stack_t
*info
= &stack_info
;
24738 int reg_size
= TARGET_32BIT
? 4 : 8;
24743 HOST_WIDE_INT non_fixed_size
;
24744 bool using_static_chain_p
;
24746 if (reload_completed
&& info
->reload_completed
)
24749 memset (info
, 0, sizeof (*info
));
24750 info
->reload_completed
= reload_completed
;
24752 /* Select which calling sequence. */
24753 info
->abi
= DEFAULT_ABI
;
24755 /* Calculate which registers need to be saved & save area size. */
24756 info
->first_gp_reg_save
= first_reg_to_save ();
24757 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24758 even if it currently looks like we won't. Reload may need it to
24759 get at a constant; if so, it will have already created a constant
24760 pool entry for it. */
24761 if (((TARGET_TOC
&& TARGET_MINIMAL_TOC
)
24762 || (flag_pic
== 1 && DEFAULT_ABI
== ABI_V4
)
24763 || (flag_pic
&& DEFAULT_ABI
== ABI_DARWIN
))
24764 && crtl
->uses_const_pool
24765 && info
->first_gp_reg_save
> RS6000_PIC_OFFSET_TABLE_REGNUM
)
24766 first_gp
= RS6000_PIC_OFFSET_TABLE_REGNUM
;
24768 first_gp
= info
->first_gp_reg_save
;
24770 info
->gp_size
= reg_size
* (32 - first_gp
);
24772 info
->first_fp_reg_save
= first_fp_reg_to_save ();
24773 info
->fp_size
= 8 * (64 - info
->first_fp_reg_save
);
24775 info
->first_altivec_reg_save
= first_altivec_reg_to_save ();
24776 info
->altivec_size
= 16 * (LAST_ALTIVEC_REGNO
+ 1
24777 - info
->first_altivec_reg_save
);
24779 /* Does this function call anything? */
24780 info
->calls_p
= (!crtl
->is_leaf
|| cfun
->machine
->ra_needs_full_frame
);
24782 /* Determine if we need to save the condition code registers. */
24783 if (save_reg_p (CR2_REGNO
)
24784 || save_reg_p (CR3_REGNO
)
24785 || save_reg_p (CR4_REGNO
))
24787 info
->cr_save_p
= 1;
24788 if (DEFAULT_ABI
== ABI_V4
)
24789 info
->cr_size
= reg_size
;
24792 /* If the current function calls __builtin_eh_return, then we need
24793 to allocate stack space for registers that will hold data for
24794 the exception handler. */
24795 if (crtl
->calls_eh_return
)
24798 for (i
= 0; EH_RETURN_DATA_REGNO (i
) != INVALID_REGNUM
; ++i
)
24801 ehrd_size
= i
* UNITS_PER_WORD
;
24806 /* In the ELFv2 ABI, we also need to allocate space for separate
24807 CR field save areas if the function calls __builtin_eh_return. */
24808 if (DEFAULT_ABI
== ABI_ELFv2
&& crtl
->calls_eh_return
)
24810 /* This hard-codes that we have three call-saved CR fields. */
24811 ehcr_size
= 3 * reg_size
;
24812 /* We do *not* use the regular CR save mechanism. */
24813 info
->cr_save_p
= 0;
24818 /* Determine various sizes. */
24819 info
->reg_size
= reg_size
;
24820 info
->fixed_size
= RS6000_SAVE_AREA
;
24821 info
->vars_size
= RS6000_ALIGN (get_frame_size (), 8);
24822 if (cfun
->calls_alloca
)
24824 RS6000_ALIGN (crtl
->outgoing_args_size
+ info
->fixed_size
,
24825 STACK_BOUNDARY
/ BITS_PER_UNIT
) - info
->fixed_size
;
24827 info
->parm_size
= RS6000_ALIGN (crtl
->outgoing_args_size
,
24828 TARGET_ALTIVEC
? 16 : 8);
24829 if (FRAME_GROWS_DOWNWARD
)
24831 += RS6000_ALIGN (info
->fixed_size
+ info
->vars_size
+ info
->parm_size
,
24832 ABI_STACK_BOUNDARY
/ BITS_PER_UNIT
)
24833 - (info
->fixed_size
+ info
->vars_size
+ info
->parm_size
);
24835 if (TARGET_ALTIVEC_ABI
)
24836 info
->vrsave_mask
= compute_vrsave_mask ();
24838 if (TARGET_ALTIVEC_VRSAVE
&& info
->vrsave_mask
)
24839 info
->vrsave_size
= 4;
24841 compute_save_world_info (info
);
24843 /* Calculate the offsets. */
24844 switch (DEFAULT_ABI
)
24848 gcc_unreachable ();
24853 info
->fp_save_offset
= -info
->fp_size
;
24854 info
->gp_save_offset
= info
->fp_save_offset
- info
->gp_size
;
24856 if (TARGET_ALTIVEC_ABI
)
24858 info
->vrsave_save_offset
= info
->gp_save_offset
- info
->vrsave_size
;
24860 /* Align stack so vector save area is on a quadword boundary.
24861 The padding goes above the vectors. */
24862 if (info
->altivec_size
!= 0)
24863 info
->altivec_padding_size
= info
->vrsave_save_offset
& 0xF;
24865 info
->altivec_save_offset
= info
->vrsave_save_offset
24866 - info
->altivec_padding_size
24867 - info
->altivec_size
;
24868 gcc_assert (info
->altivec_size
== 0
24869 || info
->altivec_save_offset
% 16 == 0);
24871 /* Adjust for AltiVec case. */
24872 info
->ehrd_offset
= info
->altivec_save_offset
- ehrd_size
;
24875 info
->ehrd_offset
= info
->gp_save_offset
- ehrd_size
;
24877 info
->ehcr_offset
= info
->ehrd_offset
- ehcr_size
;
24878 info
->cr_save_offset
= reg_size
; /* first word when 64-bit. */
24879 info
->lr_save_offset
= 2*reg_size
;
24883 info
->fp_save_offset
= -info
->fp_size
;
24884 info
->gp_save_offset
= info
->fp_save_offset
- info
->gp_size
;
24885 info
->cr_save_offset
= info
->gp_save_offset
- info
->cr_size
;
24887 if (TARGET_ALTIVEC_ABI
)
24889 info
->vrsave_save_offset
= info
->cr_save_offset
- info
->vrsave_size
;
24891 /* Align stack so vector save area is on a quadword boundary. */
24892 if (info
->altivec_size
!= 0)
24893 info
->altivec_padding_size
= 16 - (-info
->vrsave_save_offset
% 16);
24895 info
->altivec_save_offset
= info
->vrsave_save_offset
24896 - info
->altivec_padding_size
24897 - info
->altivec_size
;
24899 /* Adjust for AltiVec case. */
24900 info
->ehrd_offset
= info
->altivec_save_offset
;
24903 info
->ehrd_offset
= info
->cr_save_offset
;
24905 info
->ehrd_offset
-= ehrd_size
;
24906 info
->lr_save_offset
= reg_size
;
24909 save_align
= (TARGET_ALTIVEC_ABI
|| DEFAULT_ABI
== ABI_DARWIN
) ? 16 : 8;
24910 info
->save_size
= RS6000_ALIGN (info
->fp_size
24912 + info
->altivec_size
24913 + info
->altivec_padding_size
24917 + info
->vrsave_size
,
24920 non_fixed_size
= info
->vars_size
+ info
->parm_size
+ info
->save_size
;
24922 info
->total_size
= RS6000_ALIGN (non_fixed_size
+ info
->fixed_size
,
24923 ABI_STACK_BOUNDARY
/ BITS_PER_UNIT
);
24925 /* Determine if we need to save the link register. */
24927 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
24929 && !TARGET_PROFILE_KERNEL
)
24930 || (DEFAULT_ABI
== ABI_V4
&& cfun
->calls_alloca
)
24931 #ifdef TARGET_RELOCATABLE
24932 || (DEFAULT_ABI
== ABI_V4
24933 && (TARGET_RELOCATABLE
|| flag_pic
> 1)
24934 && !constant_pool_empty_p ())
24936 || rs6000_ra_ever_killed ())
24937 info
->lr_save_p
= 1;
24939 using_static_chain_p
= (cfun
->static_chain_decl
!= NULL_TREE
24940 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM
)
24941 && call_used_regs
[STATIC_CHAIN_REGNUM
]);
24942 info
->savres_strategy
= rs6000_savres_strategy (info
, using_static_chain_p
);
24944 if (!(info
->savres_strategy
& SAVE_INLINE_GPRS
)
24945 || !(info
->savres_strategy
& SAVE_INLINE_FPRS
)
24946 || !(info
->savres_strategy
& SAVE_INLINE_VRS
)
24947 || !(info
->savres_strategy
& REST_INLINE_GPRS
)
24948 || !(info
->savres_strategy
& REST_INLINE_FPRS
)
24949 || !(info
->savres_strategy
& REST_INLINE_VRS
))
24950 info
->lr_save_p
= 1;
24952 if (info
->lr_save_p
)
24953 df_set_regs_ever_live (LR_REGNO
, true);
24955 /* Determine if we need to allocate any stack frame:
24957 For AIX we need to push the stack if a frame pointer is needed
24958 (because the stack might be dynamically adjusted), if we are
24959 debugging, if we make calls, or if the sum of fp_save, gp_save,
24960 and local variables are more than the space needed to save all
24961 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24962 + 18*8 = 288 (GPR13 reserved).
24964 For V.4 we don't have the stack cushion that AIX uses, but assume
24965 that the debugger can handle stackless frames. */
24970 else if (DEFAULT_ABI
== ABI_V4
)
24971 info
->push_p
= non_fixed_size
!= 0;
24973 else if (frame_pointer_needed
)
24976 else if (TARGET_XCOFF
&& write_symbols
!= NO_DEBUG
)
24980 info
->push_p
= non_fixed_size
> (TARGET_32BIT
? 220 : 288);
24986 debug_stack_info (rs6000_stack_t
*info
)
24988 const char *abi_string
;
24991 info
= rs6000_stack_info ();
24993 fprintf (stderr
, "\nStack information for function %s:\n",
24994 ((current_function_decl
&& DECL_NAME (current_function_decl
))
24995 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl
))
25000 default: abi_string
= "Unknown"; break;
25001 case ABI_NONE
: abi_string
= "NONE"; break;
25002 case ABI_AIX
: abi_string
= "AIX"; break;
25003 case ABI_ELFv2
: abi_string
= "ELFv2"; break;
25004 case ABI_DARWIN
: abi_string
= "Darwin"; break;
25005 case ABI_V4
: abi_string
= "V.4"; break;
25008 fprintf (stderr
, "\tABI = %5s\n", abi_string
);
25010 if (TARGET_ALTIVEC_ABI
)
25011 fprintf (stderr
, "\tALTIVEC ABI extensions enabled.\n");
25013 if (info
->first_gp_reg_save
!= 32)
25014 fprintf (stderr
, "\tfirst_gp_reg_save = %5d\n", info
->first_gp_reg_save
);
25016 if (info
->first_fp_reg_save
!= 64)
25017 fprintf (stderr
, "\tfirst_fp_reg_save = %5d\n", info
->first_fp_reg_save
);
25019 if (info
->first_altivec_reg_save
<= LAST_ALTIVEC_REGNO
)
25020 fprintf (stderr
, "\tfirst_altivec_reg_save = %5d\n",
25021 info
->first_altivec_reg_save
);
25023 if (info
->lr_save_p
)
25024 fprintf (stderr
, "\tlr_save_p = %5d\n", info
->lr_save_p
);
25026 if (info
->cr_save_p
)
25027 fprintf (stderr
, "\tcr_save_p = %5d\n", info
->cr_save_p
);
25029 if (info
->vrsave_mask
)
25030 fprintf (stderr
, "\tvrsave_mask = 0x%x\n", info
->vrsave_mask
);
25033 fprintf (stderr
, "\tpush_p = %5d\n", info
->push_p
);
25036 fprintf (stderr
, "\tcalls_p = %5d\n", info
->calls_p
);
25039 fprintf (stderr
, "\tgp_save_offset = %5d\n", info
->gp_save_offset
);
25042 fprintf (stderr
, "\tfp_save_offset = %5d\n", info
->fp_save_offset
);
25044 if (info
->altivec_size
)
25045 fprintf (stderr
, "\taltivec_save_offset = %5d\n",
25046 info
->altivec_save_offset
);
25048 if (info
->vrsave_size
)
25049 fprintf (stderr
, "\tvrsave_save_offset = %5d\n",
25050 info
->vrsave_save_offset
);
25052 if (info
->lr_save_p
)
25053 fprintf (stderr
, "\tlr_save_offset = %5d\n", info
->lr_save_offset
);
25055 if (info
->cr_save_p
)
25056 fprintf (stderr
, "\tcr_save_offset = %5d\n", info
->cr_save_offset
);
25058 if (info
->varargs_save_offset
)
25059 fprintf (stderr
, "\tvarargs_save_offset = %5d\n", info
->varargs_save_offset
);
25061 if (info
->total_size
)
25062 fprintf (stderr
, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC
"\n",
25065 if (info
->vars_size
)
25066 fprintf (stderr
, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC
"\n",
25069 if (info
->parm_size
)
25070 fprintf (stderr
, "\tparm_size = %5d\n", info
->parm_size
);
25072 if (info
->fixed_size
)
25073 fprintf (stderr
, "\tfixed_size = %5d\n", info
->fixed_size
);
25076 fprintf (stderr
, "\tgp_size = %5d\n", info
->gp_size
);
25079 fprintf (stderr
, "\tfp_size = %5d\n", info
->fp_size
);
25081 if (info
->altivec_size
)
25082 fprintf (stderr
, "\taltivec_size = %5d\n", info
->altivec_size
);
25084 if (info
->vrsave_size
)
25085 fprintf (stderr
, "\tvrsave_size = %5d\n", info
->vrsave_size
);
25087 if (info
->altivec_padding_size
)
25088 fprintf (stderr
, "\taltivec_padding_size= %5d\n",
25089 info
->altivec_padding_size
);
25092 fprintf (stderr
, "\tcr_size = %5d\n", info
->cr_size
);
25094 if (info
->save_size
)
25095 fprintf (stderr
, "\tsave_size = %5d\n", info
->save_size
);
25097 if (info
->reg_size
!= 4)
25098 fprintf (stderr
, "\treg_size = %5d\n", info
->reg_size
);
25100 fprintf (stderr
, "\tsave-strategy = %04x\n", info
->savres_strategy
);
25102 fprintf (stderr
, "\n");
25106 rs6000_return_addr (int count
, rtx frame
)
25108 /* We can't use get_hard_reg_initial_val for LR when count == 0 if LR
25109 is trashed by the prologue, as it is for PIC on ABI_V4 and Darwin. */
25111 || ((DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
) && flag_pic
))
25113 cfun
->machine
->ra_needs_full_frame
= 1;
25116 /* FRAME is set to frame_pointer_rtx by the generic code, but that
25117 is good for loading 0(r1) only when !FRAME_GROWS_DOWNWARD. */
25118 frame
= stack_pointer_rtx
;
25119 rtx prev_frame_addr
= memory_address (Pmode
, frame
);
25120 rtx prev_frame
= copy_to_reg (gen_rtx_MEM (Pmode
, prev_frame_addr
));
25121 rtx lr_save_off
= plus_constant (Pmode
,
25122 prev_frame
, RETURN_ADDRESS_OFFSET
);
25123 rtx lr_save_addr
= memory_address (Pmode
, lr_save_off
);
25124 return gen_rtx_MEM (Pmode
, lr_save_addr
);
25127 cfun
->machine
->ra_need_lr
= 1;
25128 return get_hard_reg_initial_val (Pmode
, LR_REGNO
);
25131 /* Say whether a function is a candidate for sibcall handling or not. */
25134 rs6000_function_ok_for_sibcall (tree decl
, tree exp
)
25139 fntype
= TREE_TYPE (decl
);
25141 fntype
= TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp
)));
25143 /* We can't do it if the called function has more vector parameters
25144 than the current function; there's nowhere to put the VRsave code. */
25145 if (TARGET_ALTIVEC_ABI
25146 && TARGET_ALTIVEC_VRSAVE
25147 && !(decl
&& decl
== current_function_decl
))
25149 function_args_iterator args_iter
;
25153 /* Functions with vector parameters are required to have a
25154 prototype, so the argument type info must be available
25156 FOREACH_FUNCTION_ARGS(fntype
, type
, args_iter
)
25157 if (TREE_CODE (type
) == VECTOR_TYPE
25158 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type
)))
25161 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl
), type
, args_iter
)
25162 if (TREE_CODE (type
) == VECTOR_TYPE
25163 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type
)))
25170 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
25171 functions, because the callee may have a different TOC pointer to
25172 the caller and there's no way to ensure we restore the TOC when
25173 we return. With the secure-plt SYSV ABI we can't make non-local
25174 calls when -fpic/PIC because the plt call stubs use r30. */
25175 if (DEFAULT_ABI
== ABI_DARWIN
25176 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
25178 && !DECL_EXTERNAL (decl
)
25179 && !DECL_WEAK (decl
)
25180 && (*targetm
.binds_local_p
) (decl
))
25181 || (DEFAULT_ABI
== ABI_V4
25182 && (!TARGET_SECURE_PLT
25185 && (*targetm
.binds_local_p
) (decl
)))))
25187 tree attr_list
= TYPE_ATTRIBUTES (fntype
);
25189 if (!lookup_attribute ("longcall", attr_list
)
25190 || lookup_attribute ("shortcall", attr_list
))
25198 rs6000_ra_ever_killed (void)
25204 if (cfun
->is_thunk
)
25207 if (cfun
->machine
->lr_save_state
)
25208 return cfun
->machine
->lr_save_state
- 1;
25210 /* regs_ever_live has LR marked as used if any sibcalls are present,
25211 but this should not force saving and restoring in the
25212 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
25213 clobbers LR, so that is inappropriate. */
25215 /* Also, the prologue can generate a store into LR that
25216 doesn't really count, like this:
25219 bcl to set PIC register
25223 When we're called from the epilogue, we need to avoid counting
25224 this as a store. */
25226 push_topmost_sequence ();
25227 top
= get_insns ();
25228 pop_topmost_sequence ();
25229 reg
= gen_rtx_REG (Pmode
, LR_REGNO
);
25231 for (insn
= NEXT_INSN (top
); insn
!= NULL_RTX
; insn
= NEXT_INSN (insn
))
25237 if (!SIBLING_CALL_P (insn
))
25240 else if (find_regno_note (insn
, REG_INC
, LR_REGNO
))
25242 else if (set_of (reg
, insn
) != NULL_RTX
25243 && !prologue_epilogue_contains (insn
))
25250 /* Emit instructions needed to load the TOC register.
25251 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
25252 a constant pool; or for SVR4 -fpic. */
25255 rs6000_emit_load_toc_table (int fromprolog
)
25258 dest
= gen_rtx_REG (Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
25260 if (TARGET_ELF
&& TARGET_SECURE_PLT
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
)
25263 rtx lab
, tmp1
, tmp2
, got
;
25265 lab
= gen_label_rtx ();
25266 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (lab
));
25267 lab
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
25270 got
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (toc_label_name
));
25274 got
= rs6000_got_sym ();
25275 tmp1
= tmp2
= dest
;
25278 tmp1
= gen_reg_rtx (Pmode
);
25279 tmp2
= gen_reg_rtx (Pmode
);
25281 emit_insn (gen_load_toc_v4_PIC_1 (lab
));
25282 emit_move_insn (tmp1
, gen_rtx_REG (Pmode
, LR_REGNO
));
25283 emit_insn (gen_load_toc_v4_PIC_3b (tmp2
, tmp1
, got
, lab
));
25284 emit_insn (gen_load_toc_v4_PIC_3c (dest
, tmp2
, got
, lab
));
25286 else if (TARGET_ELF
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
== 1)
25288 emit_insn (gen_load_toc_v4_pic_si ());
25289 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
25291 else if (TARGET_ELF
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
== 2)
25294 rtx temp0
= (fromprolog
25295 ? gen_rtx_REG (Pmode
, 0)
25296 : gen_reg_rtx (Pmode
));
25302 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
25303 symF
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
25305 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCL", rs6000_pic_labelno
);
25306 symL
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
25308 emit_insn (gen_load_toc_v4_PIC_1 (symF
));
25309 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
25310 emit_insn (gen_load_toc_v4_PIC_2 (temp0
, dest
, symL
, symF
));
25316 tocsym
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (toc_label_name
));
25318 lab
= gen_label_rtx ();
25319 emit_insn (gen_load_toc_v4_PIC_1b (tocsym
, lab
));
25320 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
25321 if (TARGET_LINK_STACK
)
25322 emit_insn (gen_addsi3 (dest
, dest
, GEN_INT (4)));
25323 emit_move_insn (temp0
, gen_rtx_MEM (Pmode
, dest
));
25325 emit_insn (gen_addsi3 (dest
, temp0
, dest
));
25327 else if (TARGET_ELF
&& !TARGET_AIX
&& flag_pic
== 0 && TARGET_MINIMAL_TOC
)
25329 /* This is for AIX code running in non-PIC ELF32. */
25330 rtx realsym
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (toc_label_name
));
25333 emit_insn (gen_elf_high (dest
, realsym
));
25334 emit_insn (gen_elf_low (dest
, dest
, realsym
));
25338 gcc_assert (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
);
25341 emit_insn (gen_load_toc_aix_si (dest
));
25343 emit_insn (gen_load_toc_aix_di (dest
));
25347 /* Emit instructions to restore the link register after determining where
25348 its value has been stored. */
25351 rs6000_emit_eh_reg_restore (rtx source
, rtx scratch
)
25353 rs6000_stack_t
*info
= rs6000_stack_info ();
25356 operands
[0] = source
;
25357 operands
[1] = scratch
;
25359 if (info
->lr_save_p
)
25361 rtx frame_rtx
= stack_pointer_rtx
;
25362 HOST_WIDE_INT sp_offset
= 0;
25365 if (frame_pointer_needed
25366 || cfun
->calls_alloca
25367 || info
->total_size
> 32767)
25369 tmp
= gen_frame_mem (Pmode
, frame_rtx
);
25370 emit_move_insn (operands
[1], tmp
);
25371 frame_rtx
= operands
[1];
25373 else if (info
->push_p
)
25374 sp_offset
= info
->total_size
;
25376 tmp
= plus_constant (Pmode
, frame_rtx
,
25377 info
->lr_save_offset
+ sp_offset
);
25378 tmp
= gen_frame_mem (Pmode
, tmp
);
25379 emit_move_insn (tmp
, operands
[0]);
25382 emit_move_insn (gen_rtx_REG (Pmode
, LR_REGNO
), operands
[0]);
25384 /* Freeze lr_save_p. We've just emitted rtl that depends on the
25385 state of lr_save_p so any change from here on would be a bug. In
25386 particular, stop rs6000_ra_ever_killed from considering the SET
25387 of lr we may have added just above. */
25388 cfun
->machine
->lr_save_state
= info
->lr_save_p
+ 1;
25391 static GTY(()) alias_set_type set
= -1;
25394 get_TOC_alias_set (void)
25397 set
= new_alias_set ();
25401 /* This returns nonzero if the current function uses the TOC. This is
25402 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25403 is generated by the ABI_V4 load_toc_* patterns.
25404 Return 2 instead of 1 if the load_toc_* pattern is in the function
25405 partition that doesn't start the function. */
25413 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
25417 rtx pat
= PATTERN (insn
);
25420 if (GET_CODE (pat
) == PARALLEL
)
25421 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
25423 rtx sub
= XVECEXP (pat
, 0, i
);
25424 if (GET_CODE (sub
) == USE
)
25426 sub
= XEXP (sub
, 0);
25427 if (GET_CODE (sub
) == UNSPEC
25428 && XINT (sub
, 1) == UNSPEC_TOC
)
25433 else if (crtl
->has_bb_partition
25435 && NOTE_KIND (insn
) == NOTE_INSN_SWITCH_TEXT_SECTIONS
)
25443 create_TOC_reference (rtx symbol
, rtx largetoc_reg
)
25445 rtx tocrel
, tocreg
, hi
;
25447 if (TARGET_DEBUG_ADDR
)
25449 if (GET_CODE (symbol
) == SYMBOL_REF
)
25450 fprintf (stderr
, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25454 fprintf (stderr
, "\ncreate_TOC_reference, code %s:\n",
25455 GET_RTX_NAME (GET_CODE (symbol
)));
25456 debug_rtx (symbol
);
25460 if (!can_create_pseudo_p ())
25461 df_set_regs_ever_live (TOC_REGISTER
, true);
25463 tocreg
= gen_rtx_REG (Pmode
, TOC_REGISTER
);
25464 tocrel
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, symbol
, tocreg
), UNSPEC_TOCREL
);
25465 if (TARGET_CMODEL
== CMODEL_SMALL
|| can_create_pseudo_p ())
25468 hi
= gen_rtx_HIGH (Pmode
, copy_rtx (tocrel
));
25469 if (largetoc_reg
!= NULL
)
25471 emit_move_insn (largetoc_reg
, hi
);
25474 return gen_rtx_LO_SUM (Pmode
, hi
, tocrel
);
25477 /* Issue assembly directives that create a reference to the given DWARF
25478 FRAME_TABLE_LABEL from the current function section. */
25480 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label
)
25482 fprintf (asm_out_file
, "\t.ref %s\n",
25483 (* targetm
.strip_name_encoding
) (frame_table_label
));
25486 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25487 and the change to the stack pointer. */
25490 rs6000_emit_stack_tie (rtx fp
, bool hard_frame_needed
)
25497 regs
[i
++] = gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
25498 if (hard_frame_needed
)
25499 regs
[i
++] = gen_rtx_REG (Pmode
, HARD_FRAME_POINTER_REGNUM
);
25500 if (!(REGNO (fp
) == STACK_POINTER_REGNUM
25501 || (hard_frame_needed
25502 && REGNO (fp
) == HARD_FRAME_POINTER_REGNUM
)))
25505 p
= rtvec_alloc (i
);
25508 rtx mem
= gen_frame_mem (BLKmode
, regs
[i
]);
25509 RTVEC_ELT (p
, i
) = gen_rtx_SET (mem
, const0_rtx
);
25512 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode
, p
)));
25515 /* Allocate SIZE_INT bytes on the stack using a store with update style insn
25516 and set the appropriate attributes for the generated insn. Return the
25517 first insn which adjusts the stack pointer or the last insn before
25518 the stack adjustment loop.
25520 SIZE_INT is used to create the CFI note for the allocation.
25522 SIZE_RTX is an rtx containing the size of the adjustment. Note that
25523 since stacks grow to lower addresses its runtime value is -SIZE_INT.
25525 ORIG_SP contains the backchain value that must be stored at *sp. */
25528 rs6000_emit_allocate_stack_1 (HOST_WIDE_INT size_int
, rtx orig_sp
)
25532 rtx size_rtx
= GEN_INT (-size_int
);
25533 if (size_int
> 32767)
25535 rtx tmp_reg
= gen_rtx_REG (Pmode
, 0);
25536 /* Need a note here so that try_split doesn't get confused. */
25537 if (get_last_insn () == NULL_RTX
)
25538 emit_note (NOTE_INSN_DELETED
);
25539 insn
= emit_move_insn (tmp_reg
, size_rtx
);
25540 try_split (PATTERN (insn
), insn
, 0);
25541 size_rtx
= tmp_reg
;
25544 if (Pmode
== SImode
)
25545 insn
= emit_insn (gen_movsi_update_stack (stack_pointer_rtx
,
25550 insn
= emit_insn (gen_movdi_di_update_stack (stack_pointer_rtx
,
25554 rtx par
= PATTERN (insn
);
25555 gcc_assert (GET_CODE (par
) == PARALLEL
);
25556 rtx set
= XVECEXP (par
, 0, 0);
25557 gcc_assert (GET_CODE (set
) == SET
);
25558 rtx mem
= SET_DEST (set
);
25559 gcc_assert (MEM_P (mem
));
25560 MEM_NOTRAP_P (mem
) = 1;
25561 set_mem_alias_set (mem
, get_frame_alias_set ());
25563 RTX_FRAME_RELATED_P (insn
) = 1;
25564 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
25565 gen_rtx_SET (stack_pointer_rtx
,
25566 gen_rtx_PLUS (Pmode
,
25568 GEN_INT (-size_int
))));
25570 /* Emit a blockage to ensure the allocation/probing insns are
25571 not optimized, combined, removed, etc. Add REG_STACK_CHECK
25572 note for similar reasons. */
25573 if (flag_stack_clash_protection
)
25575 add_reg_note (insn
, REG_STACK_CHECK
, const0_rtx
);
25576 emit_insn (gen_blockage ());
25582 static HOST_WIDE_INT
25583 get_stack_clash_protection_probe_interval (void)
25585 return (HOST_WIDE_INT_1U
25586 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL
));
25589 static HOST_WIDE_INT
25590 get_stack_clash_protection_guard_size (void)
25592 return (HOST_WIDE_INT_1U
25593 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE
));
25596 /* Allocate ORIG_SIZE bytes on the stack and probe the newly
25597 allocated space every STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes.
25599 COPY_REG, if non-null, should contain a copy of the original
25600 stack pointer at exit from this function.
25602 This is subtly different than the Ada probing in that it tries hard to
25603 prevent attacks that jump the stack guard. Thus it is never allowed to
25604 allocate more than STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes of stack
25605 space without a suitable probe. */
25607 rs6000_emit_probe_stack_range_stack_clash (HOST_WIDE_INT orig_size
,
25610 rtx orig_sp
= copy_reg
;
25612 HOST_WIDE_INT probe_interval
= get_stack_clash_protection_probe_interval ();
25614 /* Round the size down to a multiple of PROBE_INTERVAL. */
25615 HOST_WIDE_INT rounded_size
= ROUND_DOWN (orig_size
, probe_interval
);
25617 /* If explicitly requested,
25618 or the rounded size is not the same as the original size
25619 or the the rounded size is greater than a page,
25620 then we will need a copy of the original stack pointer. */
25621 if (rounded_size
!= orig_size
25622 || rounded_size
> probe_interval
25625 /* If the caller did not request a copy of the incoming stack
25626 pointer, then we use r0 to hold the copy. */
25628 orig_sp
= gen_rtx_REG (Pmode
, 0);
25629 emit_move_insn (orig_sp
, stack_pointer_rtx
);
25632 /* There's three cases here.
25634 One is a single probe which is the most common and most efficiently
25635 implemented as it does not have to have a copy of the original
25636 stack pointer if there are no residuals.
25638 Second is unrolled allocation/probes which we use if there's just
25639 a few of them. It needs to save the original stack pointer into a
25640 temporary for use as a source register in the allocation/probe.
25642 Last is a loop. This is the most uncommon case and least efficient. */
25643 rtx_insn
*retval
= NULL
;
25644 if (rounded_size
== probe_interval
)
25646 retval
= rs6000_emit_allocate_stack_1 (probe_interval
, stack_pointer_rtx
);
25648 dump_stack_clash_frame_info (PROBE_INLINE
, rounded_size
!= orig_size
);
25650 else if (rounded_size
<= 8 * probe_interval
)
25652 /* The ABI requires using the store with update insns to allocate
25653 space and store the backchain into the stack
25655 So we save the current stack pointer into a temporary, then
25656 emit the store-with-update insns to store the saved stack pointer
25657 into the right location in each new page. */
25658 for (int i
= 0; i
< rounded_size
; i
+= probe_interval
)
25661 = rs6000_emit_allocate_stack_1 (probe_interval
, orig_sp
);
25663 /* Save the first stack adjustment in RETVAL. */
25668 dump_stack_clash_frame_info (PROBE_INLINE
, rounded_size
!= orig_size
);
25672 /* Compute the ending address. */
25674 = copy_reg
? gen_rtx_REG (Pmode
, 0) : gen_rtx_REG (Pmode
, 12);
25675 rtx rs
= GEN_INT (-rounded_size
);
25677 if (add_operand (rs
, Pmode
))
25678 insn
= emit_insn (gen_add3_insn (end_addr
, stack_pointer_rtx
, rs
));
25681 emit_move_insn (end_addr
, GEN_INT (-rounded_size
));
25682 insn
= emit_insn (gen_add3_insn (end_addr
, end_addr
,
25683 stack_pointer_rtx
));
25684 /* Describe the effect of INSN to the CFI engine. */
25685 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
25686 gen_rtx_SET (end_addr
,
25687 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
25690 RTX_FRAME_RELATED_P (insn
) = 1;
25692 /* Emit the loop. */
25694 retval
= emit_insn (gen_probe_stack_rangedi (stack_pointer_rtx
,
25695 stack_pointer_rtx
, orig_sp
,
25698 retval
= emit_insn (gen_probe_stack_rangesi (stack_pointer_rtx
,
25699 stack_pointer_rtx
, orig_sp
,
25701 RTX_FRAME_RELATED_P (retval
) = 1;
25702 /* Describe the effect of INSN to the CFI engine. */
25703 add_reg_note (retval
, REG_FRAME_RELATED_EXPR
,
25704 gen_rtx_SET (stack_pointer_rtx
, end_addr
));
25706 /* Emit a blockage to ensure the allocation/probing insns are
25707 not optimized, combined, removed, etc. Other cases handle this
25708 within their call to rs6000_emit_allocate_stack_1. */
25709 emit_insn (gen_blockage ());
25711 dump_stack_clash_frame_info (PROBE_LOOP
, rounded_size
!= orig_size
);
25714 if (orig_size
!= rounded_size
)
25716 /* Allocate (and implicitly probe) any residual space. */
25717 HOST_WIDE_INT residual
= orig_size
- rounded_size
;
25719 rtx_insn
*insn
= rs6000_emit_allocate_stack_1 (residual
, orig_sp
);
25721 /* If the residual was the only allocation, then we can return the
25722 allocating insn. */
25730 /* Emit the correct code for allocating stack space, as insns.
25731 If COPY_REG, make sure a copy of the old frame is left there.
25732 The generated code may use hard register 0 as a temporary. */
25735 rs6000_emit_allocate_stack (HOST_WIDE_INT size
, rtx copy_reg
, int copy_off
)
25738 rtx stack_reg
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
25739 rtx tmp_reg
= gen_rtx_REG (Pmode
, 0);
25740 rtx todec
= gen_int_mode (-size
, Pmode
);
25742 if (INTVAL (todec
) != -size
)
25744 warning (0, "stack frame too large");
25745 emit_insn (gen_trap ());
25749 if (crtl
->limit_stack
)
25751 if (REG_P (stack_limit_rtx
)
25752 && REGNO (stack_limit_rtx
) > 1
25753 && REGNO (stack_limit_rtx
) <= 31)
25756 = gen_add3_insn (tmp_reg
, stack_limit_rtx
, GEN_INT (size
));
25759 emit_insn (gen_cond_trap (LTU
, stack_reg
, tmp_reg
, const0_rtx
));
25761 else if (GET_CODE (stack_limit_rtx
) == SYMBOL_REF
25763 && DEFAULT_ABI
== ABI_V4
25766 rtx toload
= gen_rtx_CONST (VOIDmode
,
25767 gen_rtx_PLUS (Pmode
,
25771 emit_insn (gen_elf_high (tmp_reg
, toload
));
25772 emit_insn (gen_elf_low (tmp_reg
, tmp_reg
, toload
));
25773 emit_insn (gen_cond_trap (LTU
, stack_reg
, tmp_reg
,
25777 warning (0, "stack limit expression is not supported");
25780 if (flag_stack_clash_protection
)
25782 if (size
< get_stack_clash_protection_guard_size ())
25783 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME
, true);
25786 rtx_insn
*insn
= rs6000_emit_probe_stack_range_stack_clash (size
,
25789 /* If we asked for a copy with an offset, then we still need add in
25791 if (copy_reg
&& copy_off
)
25792 emit_insn (gen_add3_insn (copy_reg
, copy_reg
, GEN_INT (copy_off
)));
25800 emit_insn (gen_add3_insn (copy_reg
, stack_reg
, GEN_INT (copy_off
)));
25802 emit_move_insn (copy_reg
, stack_reg
);
25805 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25806 it now and set the alias set/attributes. The above gen_*_update
25807 calls will generate a PARALLEL with the MEM set being the first
25809 insn
= rs6000_emit_allocate_stack_1 (size
, stack_reg
);
25813 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25815 #if PROBE_INTERVAL > 32768
25816 #error Cannot use indexed addressing mode for stack probing
25819 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25820 inclusive. These are offsets from the current stack pointer. */
25823 rs6000_emit_probe_stack_range (HOST_WIDE_INT first
, HOST_WIDE_INT size
)
25825 /* See if we have a constant small number of probes to generate. If so,
25826 that's the easy case. */
25827 if (first
+ size
<= 32768)
25831 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25832 it exceeds SIZE. If only one probe is needed, this will not
25833 generate any code. Then probe at FIRST + SIZE. */
25834 for (i
= PROBE_INTERVAL
; i
< size
; i
+= PROBE_INTERVAL
)
25835 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
25838 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
25842 /* Otherwise, do the same as above, but in a loop. Note that we must be
25843 extra careful with variables wrapping around because we might be at
25844 the very top (or the very bottom) of the address space and we have
25845 to be able to handle this case properly; in particular, we use an
25846 equality test for the loop condition. */
25849 HOST_WIDE_INT rounded_size
;
25850 rtx r12
= gen_rtx_REG (Pmode
, 12);
25851 rtx r0
= gen_rtx_REG (Pmode
, 0);
25853 /* Sanity check for the addressing mode we're going to use. */
25854 gcc_assert (first
<= 32768);
25856 /* Step 1: round SIZE to the previous multiple of the interval. */
25858 rounded_size
= ROUND_DOWN (size
, PROBE_INTERVAL
);
25861 /* Step 2: compute initial and final value of the loop counter. */
25863 /* TEST_ADDR = SP + FIRST. */
25864 emit_insn (gen_rtx_SET (r12
, plus_constant (Pmode
, stack_pointer_rtx
,
25867 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25868 if (rounded_size
> 32768)
25870 emit_move_insn (r0
, GEN_INT (-rounded_size
));
25871 emit_insn (gen_rtx_SET (r0
, gen_rtx_PLUS (Pmode
, r12
, r0
)));
25874 emit_insn (gen_rtx_SET (r0
, plus_constant (Pmode
, r12
,
25878 /* Step 3: the loop
25882 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25885 while (TEST_ADDR != LAST_ADDR)
25887 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25888 until it is equal to ROUNDED_SIZE. */
25891 emit_insn (gen_probe_stack_rangedi (r12
, r12
, stack_pointer_rtx
, r0
));
25893 emit_insn (gen_probe_stack_rangesi (r12
, r12
, stack_pointer_rtx
, r0
));
25896 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25897 that SIZE is equal to ROUNDED_SIZE. */
25899 if (size
!= rounded_size
)
25900 emit_stack_probe (plus_constant (Pmode
, r12
, rounded_size
- size
));
25904 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25905 addresses, not offsets. */
25907 static const char *
25908 output_probe_stack_range_1 (rtx reg1
, rtx reg2
)
25910 static int labelno
= 0;
25914 ASM_GENERATE_INTERNAL_LABEL (loop_lab
, "LPSRL", labelno
++);
25917 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, loop_lab
);
25919 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25921 xops
[1] = GEN_INT (-PROBE_INTERVAL
);
25922 output_asm_insn ("addi %0,%0,%1", xops
);
25924 /* Probe at TEST_ADDR. */
25925 xops
[1] = gen_rtx_REG (Pmode
, 0);
25926 output_asm_insn ("stw %1,0(%0)", xops
);
25928 /* Test if TEST_ADDR == LAST_ADDR. */
25931 output_asm_insn ("cmpd 0,%0,%1", xops
);
25933 output_asm_insn ("cmpw 0,%0,%1", xops
);
25936 fputs ("\tbne 0,", asm_out_file
);
25937 assemble_name_raw (asm_out_file
, loop_lab
);
25938 fputc ('\n', asm_out_file
);
25943 /* This function is called when rs6000_frame_related is processing
25944 SETs within a PARALLEL, and returns whether the REGNO save ought to
25945 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25946 for out-of-line register save functions, store multiple, and the
25947 Darwin world_save. They may contain registers that don't really
25951 interesting_frame_related_regno (unsigned int regno
)
25953 /* Saves apparently of r0 are actually saving LR. It doesn't make
25954 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25955 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25956 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25957 as frame related. */
25960 /* If we see CR2 then we are here on a Darwin world save. Saves of
25961 CR2 signify the whole CR is being saved. This is a long-standing
25962 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25963 that CR needs to be saved. */
25964 if (regno
== CR2_REGNO
)
25966 /* Omit frame info for any user-defined global regs. If frame info
25967 is supplied for them, frame unwinding will restore a user reg.
25968 Also omit frame info for any reg we don't need to save, as that
25969 bloats frame info and can cause problems with shrink wrapping.
25970 Since global regs won't be seen as needing to be saved, both of
25971 these conditions are covered by save_reg_p. */
25972 return save_reg_p (regno
);
25975 /* Probe a range of stack addresses from REG1 to REG3 inclusive. These are
25976 addresses, not offsets.
25978 REG2 contains the backchain that must be stored into *sp at each allocation.
25980 This is subtly different than the Ada probing above in that it tries hard
25981 to prevent attacks that jump the stack guard. Thus, it is never allowed
25982 to allocate more than PROBE_INTERVAL bytes of stack space without a
25985 static const char *
25986 output_probe_stack_range_stack_clash (rtx reg1
, rtx reg2
, rtx reg3
)
25988 static int labelno
= 0;
25992 HOST_WIDE_INT probe_interval
= get_stack_clash_protection_probe_interval ();
25994 ASM_GENERATE_INTERNAL_LABEL (loop_lab
, "LPSRL", labelno
++);
25996 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, loop_lab
);
25998 /* This allocates and probes. */
26001 xops
[2] = GEN_INT (-probe_interval
);
26003 output_asm_insn ("stdu %1,%2(%0)", xops
);
26005 output_asm_insn ("stwu %1,%2(%0)", xops
);
26007 /* Jump to LOOP_LAB if TEST_ADDR != LAST_ADDR. */
26011 output_asm_insn ("cmpd 0,%0,%1", xops
);
26013 output_asm_insn ("cmpw 0,%0,%1", xops
);
26015 fputs ("\tbne 0,", asm_out_file
);
26016 assemble_name_raw (asm_out_file
, loop_lab
);
26017 fputc ('\n', asm_out_file
);
26022 /* Wrapper around the output_probe_stack_range routines. */
26024 output_probe_stack_range (rtx reg1
, rtx reg2
, rtx reg3
)
26026 if (flag_stack_clash_protection
)
26027 return output_probe_stack_range_stack_clash (reg1
, reg2
, reg3
);
26029 return output_probe_stack_range_1 (reg1
, reg3
);
26032 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
26033 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
26034 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
26035 deduce these equivalences by itself so it wasn't necessary to hold
26036 its hand so much. Don't be tempted to always supply d2_f_d_e with
26037 the actual cfa register, ie. r31 when we are using a hard frame
26038 pointer. That fails when saving regs off r1, and sched moves the
26039 r31 setup past the reg saves. */
26042 rs6000_frame_related (rtx_insn
*insn
, rtx reg
, HOST_WIDE_INT val
,
26043 rtx reg2
, rtx repl2
)
26047 if (REGNO (reg
) == STACK_POINTER_REGNUM
)
26049 gcc_checking_assert (val
== 0);
26053 repl
= gen_rtx_PLUS (Pmode
, gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
),
26056 rtx pat
= PATTERN (insn
);
26057 if (!repl
&& !reg2
)
26059 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
26060 if (GET_CODE (pat
) == PARALLEL
)
26061 for (int i
= 0; i
< XVECLEN (pat
, 0); i
++)
26062 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
)
26064 rtx set
= XVECEXP (pat
, 0, i
);
26066 if (!REG_P (SET_SRC (set
))
26067 || interesting_frame_related_regno (REGNO (SET_SRC (set
))))
26068 RTX_FRAME_RELATED_P (set
) = 1;
26070 RTX_FRAME_RELATED_P (insn
) = 1;
26074 /* We expect that 'pat' is either a SET or a PARALLEL containing
26075 SETs (and possibly other stuff). In a PARALLEL, all the SETs
26076 are important so they all have to be marked RTX_FRAME_RELATED_P.
26077 Call simplify_replace_rtx on the SETs rather than the whole insn
26078 so as to leave the other stuff alone (for example USE of r12). */
26080 set_used_flags (pat
);
26081 if (GET_CODE (pat
) == SET
)
26084 pat
= simplify_replace_rtx (pat
, reg
, repl
);
26086 pat
= simplify_replace_rtx (pat
, reg2
, repl2
);
26088 else if (GET_CODE (pat
) == PARALLEL
)
26090 pat
= shallow_copy_rtx (pat
);
26091 XVEC (pat
, 0) = shallow_copy_rtvec (XVEC (pat
, 0));
26093 for (int i
= 0; i
< XVECLEN (pat
, 0); i
++)
26094 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
)
26096 rtx set
= XVECEXP (pat
, 0, i
);
26099 set
= simplify_replace_rtx (set
, reg
, repl
);
26101 set
= simplify_replace_rtx (set
, reg2
, repl2
);
26102 XVECEXP (pat
, 0, i
) = set
;
26104 if (!REG_P (SET_SRC (set
))
26105 || interesting_frame_related_regno (REGNO (SET_SRC (set
))))
26106 RTX_FRAME_RELATED_P (set
) = 1;
26110 gcc_unreachable ();
26112 RTX_FRAME_RELATED_P (insn
) = 1;
26113 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, copy_rtx_if_shared (pat
));
26118 /* Returns an insn that has a vrsave set operation with the
26119 appropriate CLOBBERs. */
26122 generate_set_vrsave (rtx reg
, rs6000_stack_t
*info
, int epiloguep
)
26125 rtx insn
, clobs
[TOTAL_ALTIVEC_REGS
+ 1];
26126 rtx vrsave
= gen_rtx_REG (SImode
, VRSAVE_REGNO
);
26129 = gen_rtx_SET (vrsave
,
26130 gen_rtx_UNSPEC_VOLATILE (SImode
,
26131 gen_rtvec (2, reg
, vrsave
),
26132 UNSPECV_SET_VRSAVE
));
26136 /* We need to clobber the registers in the mask so the scheduler
26137 does not move sets to VRSAVE before sets of AltiVec registers.
26139 However, if the function receives nonlocal gotos, reload will set
26140 all call saved registers live. We will end up with:
26142 (set (reg 999) (mem))
26143 (parallel [ (set (reg vrsave) (unspec blah))
26144 (clobber (reg 999))])
26146 The clobber will cause the store into reg 999 to be dead, and
26147 flow will attempt to delete an epilogue insn. In this case, we
26148 need an unspec use/set of the register. */
26150 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
26151 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
26153 if (!epiloguep
|| call_used_regs
[i
])
26154 clobs
[nclobs
++] = gen_rtx_CLOBBER (VOIDmode
,
26155 gen_rtx_REG (V4SImode
, i
));
26158 rtx reg
= gen_rtx_REG (V4SImode
, i
);
26161 = gen_rtx_SET (reg
,
26162 gen_rtx_UNSPEC (V4SImode
,
26163 gen_rtvec (1, reg
), 27));
26167 insn
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (nclobs
));
26169 for (i
= 0; i
< nclobs
; ++i
)
26170 XVECEXP (insn
, 0, i
) = clobs
[i
];
26176 gen_frame_set (rtx reg
, rtx frame_reg
, int offset
, bool store
)
26180 addr
= gen_rtx_PLUS (Pmode
, frame_reg
, GEN_INT (offset
));
26181 mem
= gen_frame_mem (GET_MODE (reg
), addr
);
26182 return gen_rtx_SET (store
? mem
: reg
, store
? reg
: mem
);
26186 gen_frame_load (rtx reg
, rtx frame_reg
, int offset
)
26188 return gen_frame_set (reg
, frame_reg
, offset
, false);
26192 gen_frame_store (rtx reg
, rtx frame_reg
, int offset
)
26194 return gen_frame_set (reg
, frame_reg
, offset
, true);
26197 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
26198 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
26201 emit_frame_save (rtx frame_reg
, machine_mode mode
,
26202 unsigned int regno
, int offset
, HOST_WIDE_INT frame_reg_to_sp
)
26206 /* Some cases that need register indexed addressing. */
26207 gcc_checking_assert (!(TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
26208 || (TARGET_VSX
&& ALTIVEC_OR_VSX_VECTOR_MODE (mode
)));
26210 reg
= gen_rtx_REG (mode
, regno
);
26211 rtx_insn
*insn
= emit_insn (gen_frame_store (reg
, frame_reg
, offset
));
26212 return rs6000_frame_related (insn
, frame_reg
, frame_reg_to_sp
,
26213 NULL_RTX
, NULL_RTX
);
26216 /* Emit an offset memory reference suitable for a frame store, while
26217 converting to a valid addressing mode. */
26220 gen_frame_mem_offset (machine_mode mode
, rtx reg
, int offset
)
26222 return gen_frame_mem (mode
, gen_rtx_PLUS (Pmode
, reg
, GEN_INT (offset
)));
26225 #ifndef TARGET_FIX_AND_CONTINUE
26226 #define TARGET_FIX_AND_CONTINUE 0
26229 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
26230 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
26231 #define LAST_SAVRES_REGISTER 31
26232 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
26243 static GTY(()) rtx savres_routine_syms
[N_SAVRES_REGISTERS
][12];
26245 /* Temporary holding space for an out-of-line register save/restore
26247 static char savres_routine_name
[30];
26249 /* Return the name for an out-of-line register save/restore routine.
26250 We are saving/restoring GPRs if GPR is true. */
26253 rs6000_savres_routine_name (int regno
, int sel
)
26255 const char *prefix
= "";
26256 const char *suffix
= "";
26258 /* Different targets are supposed to define
26259 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
26260 routine name could be defined with:
26262 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
26264 This is a nice idea in practice, but in reality, things are
26265 complicated in several ways:
26267 - ELF targets have save/restore routines for GPRs.
26269 - PPC64 ELF targets have routines for save/restore of GPRs that
26270 differ in what they do with the link register, so having a set
26271 prefix doesn't work. (We only use one of the save routines at
26272 the moment, though.)
26274 - PPC32 elf targets have "exit" versions of the restore routines
26275 that restore the link register and can save some extra space.
26276 These require an extra suffix. (There are also "tail" versions
26277 of the restore routines and "GOT" versions of the save routines,
26278 but we don't generate those at present. Same problems apply,
26281 We deal with all this by synthesizing our own prefix/suffix and
26282 using that for the simple sprintf call shown above. */
26283 if (DEFAULT_ABI
== ABI_V4
)
26288 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
26289 prefix
= (sel
& SAVRES_SAVE
) ? "_savegpr_" : "_restgpr_";
26290 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
26291 prefix
= (sel
& SAVRES_SAVE
) ? "_savefpr_" : "_restfpr_";
26292 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
26293 prefix
= (sel
& SAVRES_SAVE
) ? "_savevr_" : "_restvr_";
26297 if ((sel
& SAVRES_LR
))
26300 else if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
26302 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
26303 /* No out-of-line save/restore routines for GPRs on AIX. */
26304 gcc_assert (!TARGET_AIX
|| (sel
& SAVRES_REG
) != SAVRES_GPR
);
26308 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
26309 prefix
= ((sel
& SAVRES_SAVE
)
26310 ? ((sel
& SAVRES_LR
) ? "_savegpr0_" : "_savegpr1_")
26311 : ((sel
& SAVRES_LR
) ? "_restgpr0_" : "_restgpr1_"));
26312 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
26314 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
26315 if ((sel
& SAVRES_LR
))
26316 prefix
= ((sel
& SAVRES_SAVE
) ? "_savefpr_" : "_restfpr_");
26320 prefix
= (sel
& SAVRES_SAVE
) ? SAVE_FP_PREFIX
: RESTORE_FP_PREFIX
;
26321 suffix
= (sel
& SAVRES_SAVE
) ? SAVE_FP_SUFFIX
: RESTORE_FP_SUFFIX
;
26324 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
26325 prefix
= (sel
& SAVRES_SAVE
) ? "_savevr_" : "_restvr_";
26330 if (DEFAULT_ABI
== ABI_DARWIN
)
26332 /* The Darwin approach is (slightly) different, in order to be
26333 compatible with code generated by the system toolchain. There is a
26334 single symbol for the start of save sequence, and the code here
26335 embeds an offset into that code on the basis of the first register
26337 prefix
= (sel
& SAVRES_SAVE
) ? "save" : "rest" ;
26338 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
26339 sprintf (savres_routine_name
, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix
,
26340 ((sel
& SAVRES_LR
) ? "x" : ""), (regno
== 13 ? "" : "+"),
26341 (regno
- 13) * 4, prefix
, regno
);
26342 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
26343 sprintf (savres_routine_name
, "*%sFP%s%.0d ; %s f%d-f31", prefix
,
26344 (regno
== 14 ? "" : "+"), (regno
- 14) * 4, prefix
, regno
);
26345 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
26346 sprintf (savres_routine_name
, "*%sVEC%s%.0d ; %s v%d-v31", prefix
,
26347 (regno
== 20 ? "" : "+"), (regno
- 20) * 8, prefix
, regno
);
26352 sprintf (savres_routine_name
, "%s%d%s", prefix
, regno
, suffix
);
26354 return savres_routine_name
;
26357 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
26358 We are saving/restoring GPRs if GPR is true. */
26361 rs6000_savres_routine_sym (rs6000_stack_t
*info
, int sel
)
26363 int regno
= ((sel
& SAVRES_REG
) == SAVRES_GPR
26364 ? info
->first_gp_reg_save
26365 : (sel
& SAVRES_REG
) == SAVRES_FPR
26366 ? info
->first_fp_reg_save
- 32
26367 : (sel
& SAVRES_REG
) == SAVRES_VR
26368 ? info
->first_altivec_reg_save
- FIRST_ALTIVEC_REGNO
26373 /* Don't generate bogus routine names. */
26374 gcc_assert (FIRST_SAVRES_REGISTER
<= regno
26375 && regno
<= LAST_SAVRES_REGISTER
26376 && select
>= 0 && select
<= 12);
26378 sym
= savres_routine_syms
[regno
-FIRST_SAVRES_REGISTER
][select
];
26384 name
= rs6000_savres_routine_name (regno
, sel
);
26386 sym
= savres_routine_syms
[regno
-FIRST_SAVRES_REGISTER
][select
]
26387 = gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (name
));
26388 SYMBOL_REF_FLAGS (sym
) |= SYMBOL_FLAG_FUNCTION
;
26394 /* Emit a sequence of insns, including a stack tie if needed, for
26395 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
26396 reset the stack pointer, but move the base of the frame into
26397 reg UPDT_REGNO for use by out-of-line register restore routines. */
26400 rs6000_emit_stack_reset (rtx frame_reg_rtx
, HOST_WIDE_INT frame_off
,
26401 unsigned updt_regno
)
26403 /* If there is nothing to do, don't do anything. */
26404 if (frame_off
== 0 && REGNO (frame_reg_rtx
) == updt_regno
)
26407 rtx updt_reg_rtx
= gen_rtx_REG (Pmode
, updt_regno
);
26409 /* This blockage is needed so that sched doesn't decide to move
26410 the sp change before the register restores. */
26411 if (DEFAULT_ABI
== ABI_V4
)
26412 return emit_insn (gen_stack_restore_tie (updt_reg_rtx
, frame_reg_rtx
,
26413 GEN_INT (frame_off
)));
26415 /* If we are restoring registers out-of-line, we will be using the
26416 "exit" variants of the restore routines, which will reset the
26417 stack for us. But we do need to point updt_reg into the
26418 right place for those routines. */
26419 if (frame_off
!= 0)
26420 return emit_insn (gen_add3_insn (updt_reg_rtx
,
26421 frame_reg_rtx
, GEN_INT (frame_off
)));
26423 return emit_move_insn (updt_reg_rtx
, frame_reg_rtx
);
26428 /* Return the register number used as a pointer by out-of-line
26429 save/restore functions. */
26431 static inline unsigned
26432 ptr_regno_for_savres (int sel
)
26434 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
26435 return (sel
& SAVRES_REG
) == SAVRES_FPR
|| (sel
& SAVRES_LR
) ? 1 : 12;
26436 return DEFAULT_ABI
== ABI_DARWIN
&& (sel
& SAVRES_REG
) == SAVRES_FPR
? 1 : 11;
26439 /* Construct a parallel rtx describing the effect of a call to an
26440 out-of-line register save/restore routine, and emit the insn
26441 or jump_insn as appropriate. */
26444 rs6000_emit_savres_rtx (rs6000_stack_t
*info
,
26445 rtx frame_reg_rtx
, int save_area_offset
, int lr_offset
,
26446 machine_mode reg_mode
, int sel
)
26449 int offset
, start_reg
, end_reg
, n_regs
, use_reg
;
26450 int reg_size
= GET_MODE_SIZE (reg_mode
);
26457 start_reg
= ((sel
& SAVRES_REG
) == SAVRES_GPR
26458 ? info
->first_gp_reg_save
26459 : (sel
& SAVRES_REG
) == SAVRES_FPR
26460 ? info
->first_fp_reg_save
26461 : (sel
& SAVRES_REG
) == SAVRES_VR
26462 ? info
->first_altivec_reg_save
26464 end_reg
= ((sel
& SAVRES_REG
) == SAVRES_GPR
26466 : (sel
& SAVRES_REG
) == SAVRES_FPR
26468 : (sel
& SAVRES_REG
) == SAVRES_VR
26469 ? LAST_ALTIVEC_REGNO
+ 1
26471 n_regs
= end_reg
- start_reg
;
26472 p
= rtvec_alloc (3 + ((sel
& SAVRES_LR
) ? 1 : 0)
26473 + ((sel
& SAVRES_REG
) == SAVRES_VR
? 1 : 0)
26476 if (!(sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
26477 RTVEC_ELT (p
, offset
++) = ret_rtx
;
26479 RTVEC_ELT (p
, offset
++)
26480 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, LR_REGNO
));
26482 sym
= rs6000_savres_routine_sym (info
, sel
);
26483 RTVEC_ELT (p
, offset
++) = gen_rtx_USE (VOIDmode
, sym
);
26485 use_reg
= ptr_regno_for_savres (sel
);
26486 if ((sel
& SAVRES_REG
) == SAVRES_VR
)
26488 /* Vector regs are saved/restored using [reg+reg] addressing. */
26489 RTVEC_ELT (p
, offset
++)
26490 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, use_reg
));
26491 RTVEC_ELT (p
, offset
++)
26492 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, 0));
26495 RTVEC_ELT (p
, offset
++)
26496 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, use_reg
));
26498 for (i
= 0; i
< end_reg
- start_reg
; i
++)
26499 RTVEC_ELT (p
, i
+ offset
)
26500 = gen_frame_set (gen_rtx_REG (reg_mode
, start_reg
+ i
),
26501 frame_reg_rtx
, save_area_offset
+ reg_size
* i
,
26502 (sel
& SAVRES_SAVE
) != 0);
26504 if ((sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
26505 RTVEC_ELT (p
, i
+ offset
)
26506 = gen_frame_store (gen_rtx_REG (Pmode
, 0), frame_reg_rtx
, lr_offset
);
26508 par
= gen_rtx_PARALLEL (VOIDmode
, p
);
26510 if (!(sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
26512 insn
= emit_jump_insn (par
);
26513 JUMP_LABEL (insn
) = ret_rtx
;
26516 insn
= emit_insn (par
);
26520 /* Emit prologue code to store CR fields that need to be saved into REG. This
26521 function should only be called when moving the non-volatile CRs to REG, it
26522 is not a general purpose routine to move the entire set of CRs to REG.
26523 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
26527 rs6000_emit_prologue_move_from_cr (rtx reg
)
26529 /* Only the ELFv2 ABI allows storing only selected fields. */
26530 if (DEFAULT_ABI
== ABI_ELFv2
&& TARGET_MFCRF
)
26532 int i
, cr_reg
[8], count
= 0;
26534 /* Collect CR fields that must be saved. */
26535 for (i
= 0; i
< 8; i
++)
26536 if (save_reg_p (CR0_REGNO
+ i
))
26537 cr_reg
[count
++] = i
;
26539 /* If it's just a single one, use mfcrf. */
26542 rtvec p
= rtvec_alloc (1);
26543 rtvec r
= rtvec_alloc (2);
26544 RTVEC_ELT (r
, 0) = gen_rtx_REG (CCmode
, CR0_REGNO
+ cr_reg
[0]);
26545 RTVEC_ELT (r
, 1) = GEN_INT (1 << (7 - cr_reg
[0]));
26547 = gen_rtx_SET (reg
,
26548 gen_rtx_UNSPEC (SImode
, r
, UNSPEC_MOVESI_FROM_CR
));
26550 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
26554 /* ??? It might be better to handle count == 2 / 3 cases here
26555 as well, using logical operations to combine the values. */
26558 emit_insn (gen_prologue_movesi_from_cr (reg
));
26561 /* Return whether the split-stack arg pointer (r12) is used. */
26564 split_stack_arg_pointer_used_p (void)
26566 /* If the pseudo holding the arg pointer is no longer a pseudo,
26567 then the arg pointer is used. */
26568 if (cfun
->machine
->split_stack_arg_pointer
!= NULL_RTX
26569 && (!REG_P (cfun
->machine
->split_stack_arg_pointer
)
26570 || (REGNO (cfun
->machine
->split_stack_arg_pointer
)
26571 < FIRST_PSEUDO_REGISTER
)))
26574 /* Unfortunately we also need to do some code scanning, since
26575 r12 may have been substituted for the pseudo. */
26577 basic_block bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
;
26578 FOR_BB_INSNS (bb
, insn
)
26579 if (NONDEBUG_INSN_P (insn
))
26581 /* A call destroys r12. */
26586 FOR_EACH_INSN_USE (use
, insn
)
26588 rtx x
= DF_REF_REG (use
);
26589 if (REG_P (x
) && REGNO (x
) == 12)
26593 FOR_EACH_INSN_DEF (def
, insn
)
26595 rtx x
= DF_REF_REG (def
);
26596 if (REG_P (x
) && REGNO (x
) == 12)
26600 return bitmap_bit_p (DF_LR_OUT (bb
), 12);
26603 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26606 rs6000_global_entry_point_needed_p (void)
26608 /* Only needed for the ELFv2 ABI. */
26609 if (DEFAULT_ABI
!= ABI_ELFv2
)
26612 /* With -msingle-pic-base, we assume the whole program shares the same
26613 TOC, so no global entry point prologues are needed anywhere. */
26614 if (TARGET_SINGLE_PIC_BASE
)
26617 /* Ensure we have a global entry point for thunks. ??? We could
26618 avoid that if the target routine doesn't need a global entry point,
26619 but we do not know whether this is the case at this point. */
26620 if (cfun
->is_thunk
)
26623 /* For regular functions, rs6000_emit_prologue sets this flag if the
26624 routine ever uses the TOC pointer. */
26625 return cfun
->machine
->r2_setup_needed
;
26628 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26630 rs6000_get_separate_components (void)
26632 rs6000_stack_t
*info
= rs6000_stack_info ();
26634 if (WORLD_SAVE_P (info
))
26637 gcc_assert (!(info
->savres_strategy
& SAVE_MULTIPLE
)
26638 && !(info
->savres_strategy
& REST_MULTIPLE
));
26640 /* Component 0 is the save/restore of LR (done via GPR0).
26641 Component 2 is the save of the TOC (GPR2).
26642 Components 13..31 are the save/restore of GPR13..GPR31.
26643 Components 46..63 are the save/restore of FPR14..FPR31. */
26645 cfun
->machine
->n_components
= 64;
26647 sbitmap components
= sbitmap_alloc (cfun
->machine
->n_components
);
26648 bitmap_clear (components
);
26650 int reg_size
= TARGET_32BIT
? 4 : 8;
26651 int fp_reg_size
= 8;
26653 /* The GPRs we need saved to the frame. */
26654 if ((info
->savres_strategy
& SAVE_INLINE_GPRS
)
26655 && (info
->savres_strategy
& REST_INLINE_GPRS
))
26657 int offset
= info
->gp_save_offset
;
26659 offset
+= info
->total_size
;
26661 for (unsigned regno
= info
->first_gp_reg_save
; regno
< 32; regno
++)
26663 if (IN_RANGE (offset
, -0x8000, 0x7fff)
26664 && save_reg_p (regno
))
26665 bitmap_set_bit (components
, regno
);
26667 offset
+= reg_size
;
26671 /* Don't mess with the hard frame pointer. */
26672 if (frame_pointer_needed
)
26673 bitmap_clear_bit (components
, HARD_FRAME_POINTER_REGNUM
);
26675 /* Don't mess with the fixed TOC register. */
26676 if ((TARGET_TOC
&& TARGET_MINIMAL_TOC
)
26677 || (flag_pic
== 1 && DEFAULT_ABI
== ABI_V4
)
26678 || (flag_pic
&& DEFAULT_ABI
== ABI_DARWIN
))
26679 bitmap_clear_bit (components
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
26681 /* The FPRs we need saved to the frame. */
26682 if ((info
->savres_strategy
& SAVE_INLINE_FPRS
)
26683 && (info
->savres_strategy
& REST_INLINE_FPRS
))
26685 int offset
= info
->fp_save_offset
;
26687 offset
+= info
->total_size
;
26689 for (unsigned regno
= info
->first_fp_reg_save
; regno
< 64; regno
++)
26691 if (IN_RANGE (offset
, -0x8000, 0x7fff) && save_reg_p (regno
))
26692 bitmap_set_bit (components
, regno
);
26694 offset
+= fp_reg_size
;
26698 /* Optimize LR save and restore if we can. This is component 0. Any
26699 out-of-line register save/restore routines need LR. */
26700 if (info
->lr_save_p
26701 && !(flag_pic
&& (DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
))
26702 && (info
->savres_strategy
& SAVE_INLINE_GPRS
)
26703 && (info
->savres_strategy
& REST_INLINE_GPRS
)
26704 && (info
->savres_strategy
& SAVE_INLINE_FPRS
)
26705 && (info
->savres_strategy
& REST_INLINE_FPRS
)
26706 && (info
->savres_strategy
& SAVE_INLINE_VRS
)
26707 && (info
->savres_strategy
& REST_INLINE_VRS
))
26709 int offset
= info
->lr_save_offset
;
26711 offset
+= info
->total_size
;
26712 if (IN_RANGE (offset
, -0x8000, 0x7fff))
26713 bitmap_set_bit (components
, 0);
26716 /* Optimize saving the TOC. This is component 2. */
26717 if (cfun
->machine
->save_toc_in_prologue
)
26718 bitmap_set_bit (components
, 2);
26723 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26725 rs6000_components_for_bb (basic_block bb
)
26727 rs6000_stack_t
*info
= rs6000_stack_info ();
26729 bitmap in
= DF_LIVE_IN (bb
);
26730 bitmap gen
= &DF_LIVE_BB_INFO (bb
)->gen
;
26731 bitmap kill
= &DF_LIVE_BB_INFO (bb
)->kill
;
26733 sbitmap components
= sbitmap_alloc (cfun
->machine
->n_components
);
26734 bitmap_clear (components
);
26736 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26739 for (unsigned regno
= info
->first_gp_reg_save
; regno
< 32; regno
++)
26740 if (bitmap_bit_p (in
, regno
)
26741 || bitmap_bit_p (gen
, regno
)
26742 || bitmap_bit_p (kill
, regno
))
26743 bitmap_set_bit (components
, regno
);
26746 for (unsigned regno
= info
->first_fp_reg_save
; regno
< 64; regno
++)
26747 if (bitmap_bit_p (in
, regno
)
26748 || bitmap_bit_p (gen
, regno
)
26749 || bitmap_bit_p (kill
, regno
))
26750 bitmap_set_bit (components
, regno
);
26752 /* The link register. */
26753 if (bitmap_bit_p (in
, LR_REGNO
)
26754 || bitmap_bit_p (gen
, LR_REGNO
)
26755 || bitmap_bit_p (kill
, LR_REGNO
))
26756 bitmap_set_bit (components
, 0);
26758 /* The TOC save. */
26759 if (bitmap_bit_p (in
, TOC_REGNUM
)
26760 || bitmap_bit_p (gen
, TOC_REGNUM
)
26761 || bitmap_bit_p (kill
, TOC_REGNUM
))
26762 bitmap_set_bit (components
, 2);
26767 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26769 rs6000_disqualify_components (sbitmap components
, edge e
,
26770 sbitmap edge_components
, bool /*is_prologue*/)
26772 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26773 live where we want to place that code. */
26774 if (bitmap_bit_p (edge_components
, 0)
26775 && bitmap_bit_p (DF_LIVE_IN (e
->dest
), 0))
26778 fprintf (dump_file
, "Disqualifying LR because GPR0 is live "
26779 "on entry to bb %d\n", e
->dest
->index
);
26780 bitmap_clear_bit (components
, 0);
26784 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26786 rs6000_emit_prologue_components (sbitmap components
)
26788 rs6000_stack_t
*info
= rs6000_stack_info ();
26789 rtx ptr_reg
= gen_rtx_REG (Pmode
, frame_pointer_needed
26790 ? HARD_FRAME_POINTER_REGNUM
26791 : STACK_POINTER_REGNUM
);
26793 machine_mode reg_mode
= Pmode
;
26794 int reg_size
= TARGET_32BIT
? 4 : 8;
26795 machine_mode fp_reg_mode
= (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
26797 int fp_reg_size
= 8;
26799 /* Prologue for LR. */
26800 if (bitmap_bit_p (components
, 0))
26802 rtx reg
= gen_rtx_REG (reg_mode
, 0);
26803 rtx_insn
*insn
= emit_move_insn (reg
, gen_rtx_REG (reg_mode
, LR_REGNO
));
26804 RTX_FRAME_RELATED_P (insn
) = 1;
26805 add_reg_note (insn
, REG_CFA_REGISTER
, NULL
);
26807 int offset
= info
->lr_save_offset
;
26809 offset
+= info
->total_size
;
26811 insn
= emit_insn (gen_frame_store (reg
, ptr_reg
, offset
));
26812 RTX_FRAME_RELATED_P (insn
) = 1;
26813 rtx lr
= gen_rtx_REG (reg_mode
, LR_REGNO
);
26814 rtx mem
= copy_rtx (SET_DEST (single_set (insn
)));
26815 add_reg_note (insn
, REG_CFA_OFFSET
, gen_rtx_SET (mem
, lr
));
26818 /* Prologue for TOC. */
26819 if (bitmap_bit_p (components
, 2))
26821 rtx reg
= gen_rtx_REG (reg_mode
, TOC_REGNUM
);
26822 rtx sp_reg
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
26823 emit_insn (gen_frame_store (reg
, sp_reg
, RS6000_TOC_SAVE_SLOT
));
26826 /* Prologue for the GPRs. */
26827 int offset
= info
->gp_save_offset
;
26829 offset
+= info
->total_size
;
26831 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
26833 if (bitmap_bit_p (components
, i
))
26835 rtx reg
= gen_rtx_REG (reg_mode
, i
);
26836 rtx_insn
*insn
= emit_insn (gen_frame_store (reg
, ptr_reg
, offset
));
26837 RTX_FRAME_RELATED_P (insn
) = 1;
26838 rtx set
= copy_rtx (single_set (insn
));
26839 add_reg_note (insn
, REG_CFA_OFFSET
, set
);
26842 offset
+= reg_size
;
26845 /* Prologue for the FPRs. */
26846 offset
= info
->fp_save_offset
;
26848 offset
+= info
->total_size
;
26850 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
26852 if (bitmap_bit_p (components
, i
))
26854 rtx reg
= gen_rtx_REG (fp_reg_mode
, i
);
26855 rtx_insn
*insn
= emit_insn (gen_frame_store (reg
, ptr_reg
, offset
));
26856 RTX_FRAME_RELATED_P (insn
) = 1;
26857 rtx set
= copy_rtx (single_set (insn
));
26858 add_reg_note (insn
, REG_CFA_OFFSET
, set
);
26861 offset
+= fp_reg_size
;
26865 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26867 rs6000_emit_epilogue_components (sbitmap components
)
26869 rs6000_stack_t
*info
= rs6000_stack_info ();
26870 rtx ptr_reg
= gen_rtx_REG (Pmode
, frame_pointer_needed
26871 ? HARD_FRAME_POINTER_REGNUM
26872 : STACK_POINTER_REGNUM
);
26874 machine_mode reg_mode
= Pmode
;
26875 int reg_size
= TARGET_32BIT
? 4 : 8;
26877 machine_mode fp_reg_mode
= (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
26879 int fp_reg_size
= 8;
26881 /* Epilogue for the FPRs. */
26882 int offset
= info
->fp_save_offset
;
26884 offset
+= info
->total_size
;
26886 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
26888 if (bitmap_bit_p (components
, i
))
26890 rtx reg
= gen_rtx_REG (fp_reg_mode
, i
);
26891 rtx_insn
*insn
= emit_insn (gen_frame_load (reg
, ptr_reg
, offset
));
26892 RTX_FRAME_RELATED_P (insn
) = 1;
26893 add_reg_note (insn
, REG_CFA_RESTORE
, reg
);
26896 offset
+= fp_reg_size
;
26899 /* Epilogue for the GPRs. */
26900 offset
= info
->gp_save_offset
;
26902 offset
+= info
->total_size
;
26904 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
26906 if (bitmap_bit_p (components
, i
))
26908 rtx reg
= gen_rtx_REG (reg_mode
, i
);
26909 rtx_insn
*insn
= emit_insn (gen_frame_load (reg
, ptr_reg
, offset
));
26910 RTX_FRAME_RELATED_P (insn
) = 1;
26911 add_reg_note (insn
, REG_CFA_RESTORE
, reg
);
26914 offset
+= reg_size
;
26917 /* Epilogue for LR. */
26918 if (bitmap_bit_p (components
, 0))
26920 int offset
= info
->lr_save_offset
;
26922 offset
+= info
->total_size
;
26924 rtx reg
= gen_rtx_REG (reg_mode
, 0);
26925 rtx_insn
*insn
= emit_insn (gen_frame_load (reg
, ptr_reg
, offset
));
26927 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
26928 insn
= emit_move_insn (lr
, reg
);
26929 RTX_FRAME_RELATED_P (insn
) = 1;
26930 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
26934 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26936 rs6000_set_handled_components (sbitmap components
)
26938 rs6000_stack_t
*info
= rs6000_stack_info ();
26940 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
26941 if (bitmap_bit_p (components
, i
))
26942 cfun
->machine
->gpr_is_wrapped_separately
[i
] = true;
26944 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
26945 if (bitmap_bit_p (components
, i
))
26946 cfun
->machine
->fpr_is_wrapped_separately
[i
- 32] = true;
26948 if (bitmap_bit_p (components
, 0))
26949 cfun
->machine
->lr_is_wrapped_separately
= true;
26951 if (bitmap_bit_p (components
, 2))
26952 cfun
->machine
->toc_is_wrapped_separately
= true;
26955 /* VRSAVE is a bit vector representing which AltiVec registers
26956 are used. The OS uses this to determine which vector
26957 registers to save on a context switch. We need to save
26958 VRSAVE on the stack frame, add whatever AltiVec registers we
26959 used in this function, and do the corresponding magic in the
26962 emit_vrsave_prologue (rs6000_stack_t
*info
, int save_regno
,
26963 HOST_WIDE_INT frame_off
, rtx frame_reg_rtx
)
26965 /* Get VRSAVE into a GPR. */
26966 rtx reg
= gen_rtx_REG (SImode
, save_regno
);
26967 rtx vrsave
= gen_rtx_REG (SImode
, VRSAVE_REGNO
);
26969 emit_insn (gen_get_vrsave_internal (reg
));
26971 emit_insn (gen_rtx_SET (reg
, vrsave
));
26974 int offset
= info
->vrsave_save_offset
+ frame_off
;
26975 emit_insn (gen_frame_store (reg
, frame_reg_rtx
, offset
));
26977 /* Include the registers in the mask. */
26978 emit_insn (gen_iorsi3 (reg
, reg
, GEN_INT (info
->vrsave_mask
)));
26980 emit_insn (generate_set_vrsave (reg
, info
, 0));
26983 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26984 called, it left the arg pointer to the old stack in r29. Otherwise, the
26985 arg pointer is the top of the current frame. */
26987 emit_split_stack_prologue (rs6000_stack_t
*info
, rtx_insn
*sp_adjust
,
26988 HOST_WIDE_INT frame_off
, rtx frame_reg_rtx
)
26990 cfun
->machine
->split_stack_argp_used
= true;
26994 rtx r12
= gen_rtx_REG (Pmode
, 12);
26995 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
26996 rtx set_r12
= gen_rtx_SET (r12
, sp_reg_rtx
);
26997 emit_insn_before (set_r12
, sp_adjust
);
26999 else if (frame_off
!= 0 || REGNO (frame_reg_rtx
) != 12)
27001 rtx r12
= gen_rtx_REG (Pmode
, 12);
27002 if (frame_off
== 0)
27003 emit_move_insn (r12
, frame_reg_rtx
);
27005 emit_insn (gen_add3_insn (r12
, frame_reg_rtx
, GEN_INT (frame_off
)));
27010 rtx r12
= gen_rtx_REG (Pmode
, 12);
27011 rtx r29
= gen_rtx_REG (Pmode
, 29);
27012 rtx cr7
= gen_rtx_REG (CCUNSmode
, CR7_REGNO
);
27013 rtx not_more
= gen_label_rtx ();
27016 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
27017 gen_rtx_GEU (VOIDmode
, cr7
, const0_rtx
),
27018 gen_rtx_LABEL_REF (VOIDmode
, not_more
),
27020 jump
= emit_jump_insn (gen_rtx_SET (pc_rtx
, jump
));
27021 JUMP_LABEL (jump
) = not_more
;
27022 LABEL_NUSES (not_more
) += 1;
27023 emit_move_insn (r12
, r29
);
27024 emit_label (not_more
);
27028 /* Emit function prologue as insns. */
27031 rs6000_emit_prologue (void)
27033 rs6000_stack_t
*info
= rs6000_stack_info ();
27034 machine_mode reg_mode
= Pmode
;
27035 int reg_size
= TARGET_32BIT
? 4 : 8;
27036 machine_mode fp_reg_mode
= (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
27038 int fp_reg_size
= 8;
27039 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
27040 rtx frame_reg_rtx
= sp_reg_rtx
;
27041 unsigned int cr_save_regno
;
27042 rtx cr_save_rtx
= NULL_RTX
;
27045 int using_static_chain_p
= (cfun
->static_chain_decl
!= NULL_TREE
27046 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM
)
27047 && call_used_regs
[STATIC_CHAIN_REGNUM
]);
27048 int using_split_stack
= (flag_split_stack
27049 && (lookup_attribute ("no_split_stack",
27050 DECL_ATTRIBUTES (cfun
->decl
))
27053 /* Offset to top of frame for frame_reg and sp respectively. */
27054 HOST_WIDE_INT frame_off
= 0;
27055 HOST_WIDE_INT sp_off
= 0;
27056 /* sp_adjust is the stack adjusting instruction, tracked so that the
27057 insn setting up the split-stack arg pointer can be emitted just
27058 prior to it, when r12 is not used here for other purposes. */
27059 rtx_insn
*sp_adjust
= 0;
27062 /* Track and check usage of r0, r11, r12. */
27063 int reg_inuse
= using_static_chain_p
? 1 << 11 : 0;
27064 #define START_USE(R) do \
27066 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
27067 reg_inuse |= 1 << (R); \
27069 #define END_USE(R) do \
27071 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
27072 reg_inuse &= ~(1 << (R)); \
27074 #define NOT_INUSE(R) do \
27076 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
27079 #define START_USE(R) do {} while (0)
27080 #define END_USE(R) do {} while (0)
27081 #define NOT_INUSE(R) do {} while (0)
27084 if (DEFAULT_ABI
== ABI_ELFv2
27085 && !TARGET_SINGLE_PIC_BASE
)
27087 cfun
->machine
->r2_setup_needed
= df_regs_ever_live_p (TOC_REGNUM
);
27089 /* With -mminimal-toc we may generate an extra use of r2 below. */
27090 if (TARGET_TOC
&& TARGET_MINIMAL_TOC
27091 && !constant_pool_empty_p ())
27092 cfun
->machine
->r2_setup_needed
= true;
27096 if (flag_stack_usage_info
)
27097 current_function_static_stack_size
= info
->total_size
;
27099 if (flag_stack_check
== STATIC_BUILTIN_STACK_CHECK
)
27101 HOST_WIDE_INT size
= info
->total_size
;
27103 if (crtl
->is_leaf
&& !cfun
->calls_alloca
)
27105 if (size
> PROBE_INTERVAL
&& size
> get_stack_check_protect ())
27106 rs6000_emit_probe_stack_range (get_stack_check_protect (),
27107 size
- get_stack_check_protect ());
27110 rs6000_emit_probe_stack_range (get_stack_check_protect (), size
);
27113 if (TARGET_FIX_AND_CONTINUE
)
27115 /* gdb on darwin arranges to forward a function from the old
27116 address by modifying the first 5 instructions of the function
27117 to branch to the overriding function. This is necessary to
27118 permit function pointers that point to the old function to
27119 actually forward to the new function. */
27120 emit_insn (gen_nop ());
27121 emit_insn (gen_nop ());
27122 emit_insn (gen_nop ());
27123 emit_insn (gen_nop ());
27124 emit_insn (gen_nop ());
27127 /* Handle world saves specially here. */
27128 if (WORLD_SAVE_P (info
))
27135 /* save_world expects lr in r0. */
27136 reg0
= gen_rtx_REG (Pmode
, 0);
27137 if (info
->lr_save_p
)
27139 insn
= emit_move_insn (reg0
,
27140 gen_rtx_REG (Pmode
, LR_REGNO
));
27141 RTX_FRAME_RELATED_P (insn
) = 1;
27144 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
27145 assumptions about the offsets of various bits of the stack
27147 gcc_assert (info
->gp_save_offset
== -220
27148 && info
->fp_save_offset
== -144
27149 && info
->lr_save_offset
== 8
27150 && info
->cr_save_offset
== 4
27153 && (!crtl
->calls_eh_return
27154 || info
->ehrd_offset
== -432)
27155 && info
->vrsave_save_offset
== -224
27156 && info
->altivec_save_offset
== -416);
27158 treg
= gen_rtx_REG (SImode
, 11);
27159 emit_move_insn (treg
, GEN_INT (-info
->total_size
));
27161 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
27162 in R11. It also clobbers R12, so beware! */
27164 /* Preserve CR2 for save_world prologues */
27166 sz
+= 32 - info
->first_gp_reg_save
;
27167 sz
+= 64 - info
->first_fp_reg_save
;
27168 sz
+= LAST_ALTIVEC_REGNO
- info
->first_altivec_reg_save
+ 1;
27169 p
= rtvec_alloc (sz
);
27171 RTVEC_ELT (p
, j
++) = gen_rtx_CLOBBER (VOIDmode
,
27172 gen_rtx_REG (SImode
,
27174 RTVEC_ELT (p
, j
++) = gen_rtx_USE (VOIDmode
,
27175 gen_rtx_SYMBOL_REF (Pmode
,
27177 /* We do floats first so that the instruction pattern matches
27179 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
27181 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
27183 info
->first_fp_reg_save
+ i
),
27185 info
->fp_save_offset
+ frame_off
+ 8 * i
);
27186 for (i
= 0; info
->first_altivec_reg_save
+ i
<= LAST_ALTIVEC_REGNO
; i
++)
27188 = gen_frame_store (gen_rtx_REG (V4SImode
,
27189 info
->first_altivec_reg_save
+ i
),
27191 info
->altivec_save_offset
+ frame_off
+ 16 * i
);
27192 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
27194 = gen_frame_store (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
27196 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
27198 /* CR register traditionally saved as CR2. */
27200 = gen_frame_store (gen_rtx_REG (SImode
, CR2_REGNO
),
27201 frame_reg_rtx
, info
->cr_save_offset
+ frame_off
);
27202 /* Explain about use of R0. */
27203 if (info
->lr_save_p
)
27205 = gen_frame_store (reg0
,
27206 frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
27207 /* Explain what happens to the stack pointer. */
27209 rtx newval
= gen_rtx_PLUS (Pmode
, sp_reg_rtx
, treg
);
27210 RTVEC_ELT (p
, j
++) = gen_rtx_SET (sp_reg_rtx
, newval
);
27213 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
27214 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
27215 treg
, GEN_INT (-info
->total_size
));
27216 sp_off
= frame_off
= info
->total_size
;
27219 strategy
= info
->savres_strategy
;
27221 /* For V.4, update stack before we do any saving and set back pointer. */
27222 if (! WORLD_SAVE_P (info
)
27224 && (DEFAULT_ABI
== ABI_V4
27225 || crtl
->calls_eh_return
))
27227 bool need_r11
= (!(strategy
& SAVE_INLINE_FPRS
)
27228 || !(strategy
& SAVE_INLINE_GPRS
)
27229 || !(strategy
& SAVE_INLINE_VRS
));
27230 int ptr_regno
= -1;
27231 rtx ptr_reg
= NULL_RTX
;
27234 if (info
->total_size
< 32767)
27235 frame_off
= info
->total_size
;
27238 else if (info
->cr_save_p
27240 || info
->first_fp_reg_save
< 64
27241 || info
->first_gp_reg_save
< 32
27242 || info
->altivec_size
!= 0
27243 || info
->vrsave_size
!= 0
27244 || crtl
->calls_eh_return
)
27248 /* The prologue won't be saving any regs so there is no need
27249 to set up a frame register to access any frame save area.
27250 We also won't be using frame_off anywhere below, but set
27251 the correct value anyway to protect against future
27252 changes to this function. */
27253 frame_off
= info
->total_size
;
27255 if (ptr_regno
!= -1)
27257 /* Set up the frame offset to that needed by the first
27258 out-of-line save function. */
27259 START_USE (ptr_regno
);
27260 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
27261 frame_reg_rtx
= ptr_reg
;
27262 if (!(strategy
& SAVE_INLINE_FPRS
) && info
->fp_size
!= 0)
27263 gcc_checking_assert (info
->fp_save_offset
+ info
->fp_size
== 0);
27264 else if (!(strategy
& SAVE_INLINE_GPRS
) && info
->first_gp_reg_save
< 32)
27265 ptr_off
= info
->gp_save_offset
+ info
->gp_size
;
27266 else if (!(strategy
& SAVE_INLINE_VRS
) && info
->altivec_size
!= 0)
27267 ptr_off
= info
->altivec_save_offset
+ info
->altivec_size
;
27268 frame_off
= -ptr_off
;
27270 sp_adjust
= rs6000_emit_allocate_stack (info
->total_size
,
27272 if (REGNO (frame_reg_rtx
) == 12)
27274 sp_off
= info
->total_size
;
27275 if (frame_reg_rtx
!= sp_reg_rtx
)
27276 rs6000_emit_stack_tie (frame_reg_rtx
, false);
27279 /* If we use the link register, get it into r0. */
27280 if (!WORLD_SAVE_P (info
) && info
->lr_save_p
27281 && !cfun
->machine
->lr_is_wrapped_separately
)
27283 rtx addr
, reg
, mem
;
27285 reg
= gen_rtx_REG (Pmode
, 0);
27287 insn
= emit_move_insn (reg
, gen_rtx_REG (Pmode
, LR_REGNO
));
27288 RTX_FRAME_RELATED_P (insn
) = 1;
27290 if (!(strategy
& (SAVE_NOINLINE_GPRS_SAVES_LR
27291 | SAVE_NOINLINE_FPRS_SAVES_LR
)))
27293 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
27294 GEN_INT (info
->lr_save_offset
+ frame_off
));
27295 mem
= gen_rtx_MEM (Pmode
, addr
);
27296 /* This should not be of rs6000_sr_alias_set, because of
27297 __builtin_return_address. */
27299 insn
= emit_move_insn (mem
, reg
);
27300 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
27301 NULL_RTX
, NULL_RTX
);
27306 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
27307 r12 will be needed by out-of-line gpr restore. */
27308 cr_save_regno
= ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
27309 && !(strategy
& (SAVE_INLINE_GPRS
27310 | SAVE_NOINLINE_GPRS_SAVES_LR
))
27312 if (!WORLD_SAVE_P (info
)
27314 && REGNO (frame_reg_rtx
) != cr_save_regno
27315 && !(using_static_chain_p
&& cr_save_regno
== 11)
27316 && !(using_split_stack
&& cr_save_regno
== 12 && sp_adjust
))
27318 cr_save_rtx
= gen_rtx_REG (SImode
, cr_save_regno
);
27319 START_USE (cr_save_regno
);
27320 rs6000_emit_prologue_move_from_cr (cr_save_rtx
);
27323 /* Do any required saving of fpr's. If only one or two to save, do
27324 it ourselves. Otherwise, call function. */
27325 if (!WORLD_SAVE_P (info
) && (strategy
& SAVE_INLINE_FPRS
))
27327 int offset
= info
->fp_save_offset
+ frame_off
;
27328 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
27331 && !cfun
->machine
->fpr_is_wrapped_separately
[i
- 32])
27332 emit_frame_save (frame_reg_rtx
, fp_reg_mode
, i
, offset
,
27333 sp_off
- frame_off
);
27335 offset
+= fp_reg_size
;
27338 else if (!WORLD_SAVE_P (info
) && info
->first_fp_reg_save
!= 64)
27340 bool lr
= (strategy
& SAVE_NOINLINE_FPRS_SAVES_LR
) != 0;
27341 int sel
= SAVRES_SAVE
| SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
27342 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
27343 rtx ptr_reg
= frame_reg_rtx
;
27345 if (REGNO (frame_reg_rtx
) == ptr_regno
)
27346 gcc_checking_assert (frame_off
== 0);
27349 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
27350 NOT_INUSE (ptr_regno
);
27351 emit_insn (gen_add3_insn (ptr_reg
,
27352 frame_reg_rtx
, GEN_INT (frame_off
)));
27354 insn
= rs6000_emit_savres_rtx (info
, ptr_reg
,
27355 info
->fp_save_offset
,
27356 info
->lr_save_offset
,
27358 rs6000_frame_related (insn
, ptr_reg
, sp_off
,
27359 NULL_RTX
, NULL_RTX
);
27364 /* Save GPRs. This is done as a PARALLEL if we are using
27365 the store-multiple instructions. */
27366 if (!WORLD_SAVE_P (info
) && !(strategy
& SAVE_INLINE_GPRS
))
27368 bool lr
= (strategy
& SAVE_NOINLINE_GPRS_SAVES_LR
) != 0;
27369 int sel
= SAVRES_SAVE
| SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
27370 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
27371 rtx ptr_reg
= frame_reg_rtx
;
27372 bool ptr_set_up
= REGNO (ptr_reg
) == ptr_regno
;
27373 int end_save
= info
->gp_save_offset
+ info
->gp_size
;
27376 if (ptr_regno
== 12)
27379 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
27381 /* Need to adjust r11 (r12) if we saved any FPRs. */
27382 if (end_save
+ frame_off
!= 0)
27384 rtx offset
= GEN_INT (end_save
+ frame_off
);
27387 frame_off
= -end_save
;
27389 NOT_INUSE (ptr_regno
);
27390 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
27392 else if (!ptr_set_up
)
27394 NOT_INUSE (ptr_regno
);
27395 emit_move_insn (ptr_reg
, frame_reg_rtx
);
27397 ptr_off
= -end_save
;
27398 insn
= rs6000_emit_savres_rtx (info
, ptr_reg
,
27399 info
->gp_save_offset
+ ptr_off
,
27400 info
->lr_save_offset
+ ptr_off
,
27402 rs6000_frame_related (insn
, ptr_reg
, sp_off
- ptr_off
,
27403 NULL_RTX
, NULL_RTX
);
27407 else if (!WORLD_SAVE_P (info
) && (strategy
& SAVE_MULTIPLE
))
27411 p
= rtvec_alloc (32 - info
->first_gp_reg_save
);
27412 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
27414 = gen_frame_store (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
27416 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
27417 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
27418 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
27419 NULL_RTX
, NULL_RTX
);
27421 else if (!WORLD_SAVE_P (info
))
27423 int offset
= info
->gp_save_offset
+ frame_off
;
27424 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
27427 && !cfun
->machine
->gpr_is_wrapped_separately
[i
])
27428 emit_frame_save (frame_reg_rtx
, reg_mode
, i
, offset
,
27429 sp_off
- frame_off
);
27431 offset
+= reg_size
;
27435 if (crtl
->calls_eh_return
)
27442 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
27443 if (regno
== INVALID_REGNUM
)
27447 p
= rtvec_alloc (i
);
27451 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
27452 if (regno
== INVALID_REGNUM
)
27456 = gen_frame_store (gen_rtx_REG (reg_mode
, regno
),
27458 info
->ehrd_offset
+ sp_off
+ reg_size
* (int) i
);
27459 RTVEC_ELT (p
, i
) = set
;
27460 RTX_FRAME_RELATED_P (set
) = 1;
27463 insn
= emit_insn (gen_blockage ());
27464 RTX_FRAME_RELATED_P (insn
) = 1;
27465 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, gen_rtx_PARALLEL (VOIDmode
, p
));
27468 /* In AIX ABI we need to make sure r2 is really saved. */
27469 if (TARGET_AIX
&& crtl
->calls_eh_return
)
27471 rtx tmp_reg
, tmp_reg_si
, hi
, lo
, compare_result
, toc_save_done
, jump
;
27472 rtx join_insn
, note
;
27473 rtx_insn
*save_insn
;
27474 long toc_restore_insn
;
27476 tmp_reg
= gen_rtx_REG (Pmode
, 11);
27477 tmp_reg_si
= gen_rtx_REG (SImode
, 11);
27478 if (using_static_chain_p
)
27481 emit_move_insn (gen_rtx_REG (Pmode
, 0), tmp_reg
);
27485 emit_move_insn (tmp_reg
, gen_rtx_REG (Pmode
, LR_REGNO
));
27486 /* Peek at instruction to which this function returns. If it's
27487 restoring r2, then we know we've already saved r2. We can't
27488 unconditionally save r2 because the value we have will already
27489 be updated if we arrived at this function via a plt call or
27490 toc adjusting stub. */
27491 emit_move_insn (tmp_reg_si
, gen_rtx_MEM (SImode
, tmp_reg
));
27492 toc_restore_insn
= ((TARGET_32BIT
? 0x80410000 : 0xE8410000)
27493 + RS6000_TOC_SAVE_SLOT
);
27494 hi
= gen_int_mode (toc_restore_insn
& ~0xffff, SImode
);
27495 emit_insn (gen_xorsi3 (tmp_reg_si
, tmp_reg_si
, hi
));
27496 compare_result
= gen_rtx_REG (CCUNSmode
, CR0_REGNO
);
27497 validate_condition_mode (EQ
, CCUNSmode
);
27498 lo
= gen_int_mode (toc_restore_insn
& 0xffff, SImode
);
27499 emit_insn (gen_rtx_SET (compare_result
,
27500 gen_rtx_COMPARE (CCUNSmode
, tmp_reg_si
, lo
)));
27501 toc_save_done
= gen_label_rtx ();
27502 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
27503 gen_rtx_EQ (VOIDmode
, compare_result
,
27505 gen_rtx_LABEL_REF (VOIDmode
, toc_save_done
),
27507 jump
= emit_jump_insn (gen_rtx_SET (pc_rtx
, jump
));
27508 JUMP_LABEL (jump
) = toc_save_done
;
27509 LABEL_NUSES (toc_save_done
) += 1;
27511 save_insn
= emit_frame_save (frame_reg_rtx
, reg_mode
,
27512 TOC_REGNUM
, frame_off
+ RS6000_TOC_SAVE_SLOT
,
27513 sp_off
- frame_off
);
27515 emit_label (toc_save_done
);
27517 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
27518 have a CFG that has different saves along different paths.
27519 Move the note to a dummy blockage insn, which describes that
27520 R2 is unconditionally saved after the label. */
27521 /* ??? An alternate representation might be a special insn pattern
27522 containing both the branch and the store. That might let the
27523 code that minimizes the number of DW_CFA_advance opcodes better
27524 freedom in placing the annotations. */
27525 note
= find_reg_note (save_insn
, REG_FRAME_RELATED_EXPR
, NULL
);
27527 remove_note (save_insn
, note
);
27529 note
= alloc_reg_note (REG_FRAME_RELATED_EXPR
,
27530 copy_rtx (PATTERN (save_insn
)), NULL_RTX
);
27531 RTX_FRAME_RELATED_P (save_insn
) = 0;
27533 join_insn
= emit_insn (gen_blockage ());
27534 REG_NOTES (join_insn
) = note
;
27535 RTX_FRAME_RELATED_P (join_insn
) = 1;
27537 if (using_static_chain_p
)
27539 emit_move_insn (tmp_reg
, gen_rtx_REG (Pmode
, 0));
27546 /* Save CR if we use any that must be preserved. */
27547 if (!WORLD_SAVE_P (info
) && info
->cr_save_p
)
27549 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
27550 GEN_INT (info
->cr_save_offset
+ frame_off
));
27551 rtx mem
= gen_frame_mem (SImode
, addr
);
27553 /* If we didn't copy cr before, do so now using r0. */
27554 if (cr_save_rtx
== NULL_RTX
)
27557 cr_save_rtx
= gen_rtx_REG (SImode
, 0);
27558 rs6000_emit_prologue_move_from_cr (cr_save_rtx
);
27561 /* Saving CR requires a two-instruction sequence: one instruction
27562 to move the CR to a general-purpose register, and a second
27563 instruction that stores the GPR to memory.
27565 We do not emit any DWARF CFI records for the first of these,
27566 because we cannot properly represent the fact that CR is saved in
27567 a register. One reason is that we cannot express that multiple
27568 CR fields are saved; another reason is that on 64-bit, the size
27569 of the CR register in DWARF (4 bytes) differs from the size of
27570 a general-purpose register.
27572 This means if any intervening instruction were to clobber one of
27573 the call-saved CR fields, we'd have incorrect CFI. To prevent
27574 this from happening, we mark the store to memory as a use of
27575 those CR fields, which prevents any such instruction from being
27576 scheduled in between the two instructions. */
27581 crsave_v
[n_crsave
++] = gen_rtx_SET (mem
, cr_save_rtx
);
27582 for (i
= 0; i
< 8; i
++)
27583 if (save_reg_p (CR0_REGNO
+ i
))
27584 crsave_v
[n_crsave
++]
27585 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (CCmode
, CR0_REGNO
+ i
));
27587 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
,
27588 gen_rtvec_v (n_crsave
, crsave_v
)));
27589 END_USE (REGNO (cr_save_rtx
));
27591 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27592 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27593 so we need to construct a frame expression manually. */
27594 RTX_FRAME_RELATED_P (insn
) = 1;
27596 /* Update address to be stack-pointer relative, like
27597 rs6000_frame_related would do. */
27598 addr
= gen_rtx_PLUS (Pmode
, gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
),
27599 GEN_INT (info
->cr_save_offset
+ sp_off
));
27600 mem
= gen_frame_mem (SImode
, addr
);
27602 if (DEFAULT_ABI
== ABI_ELFv2
)
27604 /* In the ELFv2 ABI we generate separate CFI records for each
27605 CR field that was actually saved. They all point to the
27606 same 32-bit stack slot. */
27610 for (i
= 0; i
< 8; i
++)
27611 if (save_reg_p (CR0_REGNO
+ i
))
27614 = gen_rtx_SET (mem
, gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
27616 RTX_FRAME_RELATED_P (crframe
[n_crframe
]) = 1;
27620 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
27621 gen_rtx_PARALLEL (VOIDmode
,
27622 gen_rtvec_v (n_crframe
, crframe
)));
27626 /* In other ABIs, by convention, we use a single CR regnum to
27627 represent the fact that all call-saved CR fields are saved.
27628 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27629 rtx set
= gen_rtx_SET (mem
, gen_rtx_REG (SImode
, CR2_REGNO
));
27630 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, set
);
27634 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27635 *separate* slots if the routine calls __builtin_eh_return, so
27636 that they can be independently restored by the unwinder. */
27637 if (DEFAULT_ABI
== ABI_ELFv2
&& crtl
->calls_eh_return
)
27639 int i
, cr_off
= info
->ehcr_offset
;
27642 /* ??? We might get better performance by using multiple mfocrf
27644 crsave
= gen_rtx_REG (SImode
, 0);
27645 emit_insn (gen_prologue_movesi_from_cr (crsave
));
27647 for (i
= 0; i
< 8; i
++)
27648 if (!call_used_regs
[CR0_REGNO
+ i
])
27650 rtvec p
= rtvec_alloc (2);
27652 = gen_frame_store (crsave
, frame_reg_rtx
, cr_off
+ frame_off
);
27654 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (CCmode
, CR0_REGNO
+ i
));
27656 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
27658 RTX_FRAME_RELATED_P (insn
) = 1;
27659 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
27660 gen_frame_store (gen_rtx_REG (SImode
, CR0_REGNO
+ i
),
27661 sp_reg_rtx
, cr_off
+ sp_off
));
27663 cr_off
+= reg_size
;
27667 /* If we are emitting stack probes, but allocate no stack, then
27668 just note that in the dump file. */
27669 if (flag_stack_clash_protection
27672 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME
, false);
27674 /* Update stack and set back pointer unless this is V.4,
27675 for which it was done previously. */
27676 if (!WORLD_SAVE_P (info
) && info
->push_p
27677 && !(DEFAULT_ABI
== ABI_V4
|| crtl
->calls_eh_return
))
27679 rtx ptr_reg
= NULL
;
27682 /* If saving altivec regs we need to be able to address all save
27683 locations using a 16-bit offset. */
27684 if ((strategy
& SAVE_INLINE_VRS
) == 0
27685 || (info
->altivec_size
!= 0
27686 && (info
->altivec_save_offset
+ info
->altivec_size
- 16
27687 + info
->total_size
- frame_off
) > 32767)
27688 || (info
->vrsave_size
!= 0
27689 && (info
->vrsave_save_offset
27690 + info
->total_size
- frame_off
) > 32767))
27692 int sel
= SAVRES_SAVE
| SAVRES_VR
;
27693 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
27695 if (using_static_chain_p
27696 && ptr_regno
== STATIC_CHAIN_REGNUM
)
27698 if (REGNO (frame_reg_rtx
) != ptr_regno
)
27699 START_USE (ptr_regno
);
27700 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
27701 frame_reg_rtx
= ptr_reg
;
27702 ptr_off
= info
->altivec_save_offset
+ info
->altivec_size
;
27703 frame_off
= -ptr_off
;
27705 else if (REGNO (frame_reg_rtx
) == 1)
27706 frame_off
= info
->total_size
;
27707 sp_adjust
= rs6000_emit_allocate_stack (info
->total_size
,
27709 if (REGNO (frame_reg_rtx
) == 12)
27711 sp_off
= info
->total_size
;
27712 if (frame_reg_rtx
!= sp_reg_rtx
)
27713 rs6000_emit_stack_tie (frame_reg_rtx
, false);
27716 /* Set frame pointer, if needed. */
27717 if (frame_pointer_needed
)
27719 insn
= emit_move_insn (gen_rtx_REG (Pmode
, HARD_FRAME_POINTER_REGNUM
),
27721 RTX_FRAME_RELATED_P (insn
) = 1;
27724 /* Save AltiVec registers if needed. Save here because the red zone does
27725 not always include AltiVec registers. */
27726 if (!WORLD_SAVE_P (info
)
27727 && info
->altivec_size
!= 0 && (strategy
& SAVE_INLINE_VRS
) == 0)
27729 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
27731 /* Oddly, the vector save/restore functions point r0 at the end
27732 of the save area, then use r11 or r12 to load offsets for
27733 [reg+reg] addressing. */
27734 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
27735 int scratch_regno
= ptr_regno_for_savres (SAVRES_SAVE
| SAVRES_VR
);
27736 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
27738 gcc_checking_assert (scratch_regno
== 11 || scratch_regno
== 12);
27740 if (scratch_regno
== 12)
27742 if (end_save
+ frame_off
!= 0)
27744 rtx offset
= GEN_INT (end_save
+ frame_off
);
27746 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
27749 emit_move_insn (ptr_reg
, frame_reg_rtx
);
27751 ptr_off
= -end_save
;
27752 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
27753 info
->altivec_save_offset
+ ptr_off
,
27754 0, V4SImode
, SAVRES_SAVE
| SAVRES_VR
);
27755 rs6000_frame_related (insn
, scratch_reg
, sp_off
- ptr_off
,
27756 NULL_RTX
, NULL_RTX
);
27757 if (REGNO (frame_reg_rtx
) == REGNO (scratch_reg
))
27759 /* The oddity mentioned above clobbered our frame reg. */
27760 emit_move_insn (frame_reg_rtx
, ptr_reg
);
27761 frame_off
= ptr_off
;
27764 else if (!WORLD_SAVE_P (info
)
27765 && info
->altivec_size
!= 0)
27769 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
27770 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
27772 rtx areg
, savereg
, mem
;
27773 HOST_WIDE_INT offset
;
27775 offset
= (info
->altivec_save_offset
+ frame_off
27776 + 16 * (i
- info
->first_altivec_reg_save
));
27778 savereg
= gen_rtx_REG (V4SImode
, i
);
27780 if (TARGET_P9_VECTOR
&& quad_address_offset_p (offset
))
27782 mem
= gen_frame_mem (V4SImode
,
27783 gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
27784 GEN_INT (offset
)));
27785 insn
= emit_insn (gen_rtx_SET (mem
, savereg
));
27791 areg
= gen_rtx_REG (Pmode
, 0);
27792 emit_move_insn (areg
, GEN_INT (offset
));
27794 /* AltiVec addressing mode is [reg+reg]. */
27795 mem
= gen_frame_mem (V4SImode
,
27796 gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
));
27798 /* Rather than emitting a generic move, force use of the stvx
27799 instruction, which we always want on ISA 2.07 (power8) systems.
27800 In particular we don't want xxpermdi/stxvd2x for little
27802 insn
= emit_insn (gen_altivec_stvx_v4si_internal (mem
, savereg
));
27805 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
27806 areg
, GEN_INT (offset
));
27810 /* VRSAVE is a bit vector representing which AltiVec registers
27811 are used. The OS uses this to determine which vector
27812 registers to save on a context switch. We need to save
27813 VRSAVE on the stack frame, add whatever AltiVec registers we
27814 used in this function, and do the corresponding magic in the
27817 if (!WORLD_SAVE_P (info
) && info
->vrsave_size
!= 0)
27819 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27820 be using r12 as frame_reg_rtx and r11 as the static chain
27821 pointer for nested functions. */
27822 int save_regno
= 12;
27823 if ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
27824 && !using_static_chain_p
)
27826 else if (using_split_stack
|| REGNO (frame_reg_rtx
) == 12)
27829 if (using_static_chain_p
)
27832 NOT_INUSE (save_regno
);
27834 emit_vrsave_prologue (info
, save_regno
, frame_off
, frame_reg_rtx
);
27837 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27838 if (!TARGET_SINGLE_PIC_BASE
27839 && ((TARGET_TOC
&& TARGET_MINIMAL_TOC
27840 && !constant_pool_empty_p ())
27841 || (DEFAULT_ABI
== ABI_V4
27842 && (flag_pic
== 1 || (flag_pic
&& TARGET_SECURE_PLT
))
27843 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))))
27845 /* If emit_load_toc_table will use the link register, we need to save
27846 it. We use R12 for this purpose because emit_load_toc_table
27847 can use register 0. This allows us to use a plain 'blr' to return
27848 from the procedure more often. */
27849 int save_LR_around_toc_setup
= (TARGET_ELF
27850 && DEFAULT_ABI
== ABI_V4
27852 && ! info
->lr_save_p
27853 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun
)->preds
) > 0);
27854 if (save_LR_around_toc_setup
)
27856 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
27857 rtx tmp
= gen_rtx_REG (Pmode
, 12);
27860 insn
= emit_move_insn (tmp
, lr
);
27861 RTX_FRAME_RELATED_P (insn
) = 1;
27863 rs6000_emit_load_toc_table (TRUE
);
27865 insn
= emit_move_insn (lr
, tmp
);
27866 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
27867 RTX_FRAME_RELATED_P (insn
) = 1;
27870 rs6000_emit_load_toc_table (TRUE
);
27874 if (!TARGET_SINGLE_PIC_BASE
27875 && DEFAULT_ABI
== ABI_DARWIN
27876 && flag_pic
&& crtl
->uses_pic_offset_table
)
27878 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
27879 rtx src
= gen_rtx_SYMBOL_REF (Pmode
, MACHOPIC_FUNCTION_BASE_NAME
);
27881 /* Save and restore LR locally around this call (in R0). */
27882 if (!info
->lr_save_p
)
27883 emit_move_insn (gen_rtx_REG (Pmode
, 0), lr
);
27885 emit_insn (gen_load_macho_picbase (src
));
27887 emit_move_insn (gen_rtx_REG (Pmode
,
27888 RS6000_PIC_OFFSET_TABLE_REGNUM
),
27891 if (!info
->lr_save_p
)
27892 emit_move_insn (lr
, gen_rtx_REG (Pmode
, 0));
27896 /* If we need to, save the TOC register after doing the stack setup.
27897 Do not emit eh frame info for this save. The unwinder wants info,
27898 conceptually attached to instructions in this function, about
27899 register values in the caller of this function. This R2 may have
27900 already been changed from the value in the caller.
27901 We don't attempt to write accurate DWARF EH frame info for R2
27902 because code emitted by gcc for a (non-pointer) function call
27903 doesn't save and restore R2. Instead, R2 is managed out-of-line
27904 by a linker generated plt call stub when the function resides in
27905 a shared library. This behavior is costly to describe in DWARF,
27906 both in terms of the size of DWARF info and the time taken in the
27907 unwinder to interpret it. R2 changes, apart from the
27908 calls_eh_return case earlier in this function, are handled by
27909 linux-unwind.h frob_update_context. */
27910 if (rs6000_save_toc_in_prologue_p ()
27911 && !cfun
->machine
->toc_is_wrapped_separately
)
27913 rtx reg
= gen_rtx_REG (reg_mode
, TOC_REGNUM
);
27914 emit_insn (gen_frame_store (reg
, sp_reg_rtx
, RS6000_TOC_SAVE_SLOT
));
27917 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27918 if (using_split_stack
&& split_stack_arg_pointer_used_p ())
27919 emit_split_stack_prologue (info
, sp_adjust
, frame_off
, frame_reg_rtx
);
27922 /* Output .extern statements for the save/restore routines we use. */
27925 rs6000_output_savres_externs (FILE *file
)
27927 rs6000_stack_t
*info
= rs6000_stack_info ();
27929 if (TARGET_DEBUG_STACK
)
27930 debug_stack_info (info
);
27932 /* Write .extern for any function we will call to save and restore
27934 if (info
->first_fp_reg_save
< 64
27939 int regno
= info
->first_fp_reg_save
- 32;
27941 if ((info
->savres_strategy
& SAVE_INLINE_FPRS
) == 0)
27943 bool lr
= (info
->savres_strategy
& SAVE_NOINLINE_FPRS_SAVES_LR
) != 0;
27944 int sel
= SAVRES_SAVE
| SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
27945 name
= rs6000_savres_routine_name (regno
, sel
);
27946 fprintf (file
, "\t.extern %s\n", name
);
27948 if ((info
->savres_strategy
& REST_INLINE_FPRS
) == 0)
27950 bool lr
= (info
->savres_strategy
27951 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
27952 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
27953 name
= rs6000_savres_routine_name (regno
, sel
);
27954 fprintf (file
, "\t.extern %s\n", name
);
27959 /* Write function prologue. */
27962 rs6000_output_function_prologue (FILE *file
)
27964 if (!cfun
->is_thunk
)
27965 rs6000_output_savres_externs (file
);
27967 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27968 immediately after the global entry point label. */
27969 if (rs6000_global_entry_point_needed_p ())
27971 const char *name
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
27973 (*targetm
.asm_out
.internal_label
) (file
, "LCF", rs6000_pic_labelno
);
27975 if (TARGET_CMODEL
!= CMODEL_LARGE
)
27977 /* In the small and medium code models, we assume the TOC is less
27978 2 GB away from the text section, so it can be computed via the
27979 following two-instruction sequence. */
27982 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
27983 fprintf (file
, "0:\taddis 2,12,.TOC.-");
27984 assemble_name (file
, buf
);
27985 fprintf (file
, "@ha\n");
27986 fprintf (file
, "\taddi 2,2,.TOC.-");
27987 assemble_name (file
, buf
);
27988 fprintf (file
, "@l\n");
27992 /* In the large code model, we allow arbitrary offsets between the
27993 TOC and the text section, so we have to load the offset from
27994 memory. The data field is emitted directly before the global
27995 entry point in rs6000_elf_declare_function_name. */
27998 #ifdef HAVE_AS_ENTRY_MARKERS
27999 /* If supported by the linker, emit a marker relocation. If the
28000 total code size of the final executable or shared library
28001 happens to fit into 2 GB after all, the linker will replace
28002 this code sequence with the sequence for the small or medium
28004 fprintf (file
, "\t.reloc .,R_PPC64_ENTRY\n");
28006 fprintf (file
, "\tld 2,");
28007 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCL", rs6000_pic_labelno
);
28008 assemble_name (file
, buf
);
28009 fprintf (file
, "-");
28010 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
28011 assemble_name (file
, buf
);
28012 fprintf (file
, "(12)\n");
28013 fprintf (file
, "\tadd 2,2,12\n");
28016 fputs ("\t.localentry\t", file
);
28017 assemble_name (file
, name
);
28018 fputs (",.-", file
);
28019 assemble_name (file
, name
);
28020 fputs ("\n", file
);
28023 /* Output -mprofile-kernel code. This needs to be done here instead of
28024 in output_function_profile since it must go after the ELFv2 ABI
28025 local entry point. */
28026 if (TARGET_PROFILE_KERNEL
&& crtl
->profile
)
28028 gcc_assert (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
);
28029 gcc_assert (!TARGET_32BIT
);
28031 asm_fprintf (file
, "\tmflr %s\n", reg_names
[0]);
28033 /* In the ELFv2 ABI we have no compiler stack word. It must be
28034 the resposibility of _mcount to preserve the static chain
28035 register if required. */
28036 if (DEFAULT_ABI
!= ABI_ELFv2
28037 && cfun
->static_chain_decl
!= NULL
)
28039 asm_fprintf (file
, "\tstd %s,24(%s)\n",
28040 reg_names
[STATIC_CHAIN_REGNUM
], reg_names
[1]);
28041 fprintf (file
, "\tbl %s\n", RS6000_MCOUNT
);
28042 asm_fprintf (file
, "\tld %s,24(%s)\n",
28043 reg_names
[STATIC_CHAIN_REGNUM
], reg_names
[1]);
28046 fprintf (file
, "\tbl %s\n", RS6000_MCOUNT
);
28049 rs6000_pic_labelno
++;
28052 /* -mprofile-kernel code calls mcount before the function prolog,
28053 so a profiled leaf function should stay a leaf function. */
28055 rs6000_keep_leaf_when_profiled ()
28057 return TARGET_PROFILE_KERNEL
;
28060 /* Non-zero if vmx regs are restored before the frame pop, zero if
28061 we restore after the pop when possible. */
28062 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
28064 /* Restoring cr is a two step process: loading a reg from the frame
28065 save, then moving the reg to cr. For ABI_V4 we must let the
28066 unwinder know that the stack location is no longer valid at or
28067 before the stack deallocation, but we can't emit a cfa_restore for
28068 cr at the stack deallocation like we do for other registers.
28069 The trouble is that it is possible for the move to cr to be
28070 scheduled after the stack deallocation. So say exactly where cr
28071 is located on each of the two insns. */
28074 load_cr_save (int regno
, rtx frame_reg_rtx
, int offset
, bool exit_func
)
28076 rtx mem
= gen_frame_mem_offset (SImode
, frame_reg_rtx
, offset
);
28077 rtx reg
= gen_rtx_REG (SImode
, regno
);
28078 rtx_insn
*insn
= emit_move_insn (reg
, mem
);
28080 if (!exit_func
&& DEFAULT_ABI
== ABI_V4
)
28082 rtx cr
= gen_rtx_REG (SImode
, CR2_REGNO
);
28083 rtx set
= gen_rtx_SET (reg
, cr
);
28085 add_reg_note (insn
, REG_CFA_REGISTER
, set
);
28086 RTX_FRAME_RELATED_P (insn
) = 1;
28091 /* Reload CR from REG. */
28094 restore_saved_cr (rtx reg
, int using_mfcr_multiple
, bool exit_func
)
28099 if (using_mfcr_multiple
)
28101 for (i
= 0; i
< 8; i
++)
28102 if (save_reg_p (CR0_REGNO
+ i
))
28104 gcc_assert (count
);
28107 if (using_mfcr_multiple
&& count
> 1)
28113 p
= rtvec_alloc (count
);
28116 for (i
= 0; i
< 8; i
++)
28117 if (save_reg_p (CR0_REGNO
+ i
))
28119 rtvec r
= rtvec_alloc (2);
28120 RTVEC_ELT (r
, 0) = reg
;
28121 RTVEC_ELT (r
, 1) = GEN_INT (1 << (7-i
));
28122 RTVEC_ELT (p
, ndx
) =
28123 gen_rtx_SET (gen_rtx_REG (CCmode
, CR0_REGNO
+ i
),
28124 gen_rtx_UNSPEC (CCmode
, r
, UNSPEC_MOVESI_TO_CR
));
28127 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
28128 gcc_assert (ndx
== count
);
28130 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
28131 CR field separately. */
28132 if (!exit_func
&& DEFAULT_ABI
== ABI_ELFv2
&& flag_shrink_wrap
)
28134 for (i
= 0; i
< 8; i
++)
28135 if (save_reg_p (CR0_REGNO
+ i
))
28136 add_reg_note (insn
, REG_CFA_RESTORE
,
28137 gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
28139 RTX_FRAME_RELATED_P (insn
) = 1;
28143 for (i
= 0; i
< 8; i
++)
28144 if (save_reg_p (CR0_REGNO
+ i
))
28146 rtx insn
= emit_insn (gen_movsi_to_cr_one
28147 (gen_rtx_REG (CCmode
, CR0_REGNO
+ i
), reg
));
28149 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
28150 CR field separately, attached to the insn that in fact
28151 restores this particular CR field. */
28152 if (!exit_func
&& DEFAULT_ABI
== ABI_ELFv2
&& flag_shrink_wrap
)
28154 add_reg_note (insn
, REG_CFA_RESTORE
,
28155 gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
28157 RTX_FRAME_RELATED_P (insn
) = 1;
28161 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
28162 if (!exit_func
&& DEFAULT_ABI
!= ABI_ELFv2
28163 && (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
))
28165 rtx_insn
*insn
= get_last_insn ();
28166 rtx cr
= gen_rtx_REG (SImode
, CR2_REGNO
);
28168 add_reg_note (insn
, REG_CFA_RESTORE
, cr
);
28169 RTX_FRAME_RELATED_P (insn
) = 1;
28173 /* Like cr, the move to lr instruction can be scheduled after the
28174 stack deallocation, but unlike cr, its stack frame save is still
28175 valid. So we only need to emit the cfa_restore on the correct
28179 load_lr_save (int regno
, rtx frame_reg_rtx
, int offset
)
28181 rtx mem
= gen_frame_mem_offset (Pmode
, frame_reg_rtx
, offset
);
28182 rtx reg
= gen_rtx_REG (Pmode
, regno
);
28184 emit_move_insn (reg
, mem
);
28188 restore_saved_lr (int regno
, bool exit_func
)
28190 rtx reg
= gen_rtx_REG (Pmode
, regno
);
28191 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
28192 rtx_insn
*insn
= emit_move_insn (lr
, reg
);
28194 if (!exit_func
&& flag_shrink_wrap
)
28196 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
28197 RTX_FRAME_RELATED_P (insn
) = 1;
28202 add_crlr_cfa_restore (const rs6000_stack_t
*info
, rtx cfa_restores
)
28204 if (DEFAULT_ABI
== ABI_ELFv2
)
28207 for (i
= 0; i
< 8; i
++)
28208 if (save_reg_p (CR0_REGNO
+ i
))
28210 rtx cr
= gen_rtx_REG (SImode
, CR0_REGNO
+ i
);
28211 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, cr
,
28215 else if (info
->cr_save_p
)
28216 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
28217 gen_rtx_REG (SImode
, CR2_REGNO
),
28220 if (info
->lr_save_p
)
28221 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
28222 gen_rtx_REG (Pmode
, LR_REGNO
),
28224 return cfa_restores
;
28227 /* Return true if OFFSET from stack pointer can be clobbered by signals.
28228 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
28229 below stack pointer not cloberred by signals. */
28232 offset_below_red_zone_p (HOST_WIDE_INT offset
)
28234 return offset
< (DEFAULT_ABI
== ABI_V4
28236 : TARGET_32BIT
? -220 : -288);
28239 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
28242 emit_cfa_restores (rtx cfa_restores
)
28244 rtx_insn
*insn
= get_last_insn ();
28245 rtx
*loc
= ®_NOTES (insn
);
28248 loc
= &XEXP (*loc
, 1);
28249 *loc
= cfa_restores
;
28250 RTX_FRAME_RELATED_P (insn
) = 1;
28253 /* Emit function epilogue as insns. */
28256 rs6000_emit_epilogue (int sibcall
)
28258 rs6000_stack_t
*info
;
28259 int restoring_GPRs_inline
;
28260 int restoring_FPRs_inline
;
28261 int using_load_multiple
;
28262 int using_mtcr_multiple
;
28263 int use_backchain_to_restore_sp
;
28266 HOST_WIDE_INT frame_off
= 0;
28267 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, 1);
28268 rtx frame_reg_rtx
= sp_reg_rtx
;
28269 rtx cfa_restores
= NULL_RTX
;
28271 rtx cr_save_reg
= NULL_RTX
;
28272 machine_mode reg_mode
= Pmode
;
28273 int reg_size
= TARGET_32BIT
? 4 : 8;
28274 machine_mode fp_reg_mode
= (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
28276 int fp_reg_size
= 8;
28279 unsigned ptr_regno
;
28281 info
= rs6000_stack_info ();
28283 strategy
= info
->savres_strategy
;
28284 using_load_multiple
= strategy
& REST_MULTIPLE
;
28285 restoring_FPRs_inline
= sibcall
|| (strategy
& REST_INLINE_FPRS
);
28286 restoring_GPRs_inline
= sibcall
|| (strategy
& REST_INLINE_GPRS
);
28287 using_mtcr_multiple
= (rs6000_tune
== PROCESSOR_PPC601
28288 || rs6000_tune
== PROCESSOR_PPC603
28289 || rs6000_tune
== PROCESSOR_PPC750
28291 /* Restore via the backchain when we have a large frame, since this
28292 is more efficient than an addis, addi pair. The second condition
28293 here will not trigger at the moment; We don't actually need a
28294 frame pointer for alloca, but the generic parts of the compiler
28295 give us one anyway. */
28296 use_backchain_to_restore_sp
= (info
->total_size
+ (info
->lr_save_p
28297 ? info
->lr_save_offset
28299 || (cfun
->calls_alloca
28300 && !frame_pointer_needed
));
28301 restore_lr
= (info
->lr_save_p
28302 && (restoring_FPRs_inline
28303 || (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
))
28304 && (restoring_GPRs_inline
28305 || info
->first_fp_reg_save
< 64)
28306 && !cfun
->machine
->lr_is_wrapped_separately
);
28309 if (WORLD_SAVE_P (info
))
28313 const char *alloc_rname
;
28316 /* eh_rest_world_r10 will return to the location saved in the LR
28317 stack slot (which is not likely to be our caller.)
28318 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
28319 rest_world is similar, except any R10 parameter is ignored.
28320 The exception-handling stuff that was here in 2.95 is no
28321 longer necessary. */
28324 + 32 - info
->first_gp_reg_save
28325 + LAST_ALTIVEC_REGNO
+ 1 - info
->first_altivec_reg_save
28326 + 63 + 1 - info
->first_fp_reg_save
);
28328 strcpy (rname
, ((crtl
->calls_eh_return
) ?
28329 "*eh_rest_world_r10" : "*rest_world"));
28330 alloc_rname
= ggc_strdup (rname
);
28333 RTVEC_ELT (p
, j
++) = ret_rtx
;
28335 = gen_rtx_USE (VOIDmode
, gen_rtx_SYMBOL_REF (Pmode
, alloc_rname
));
28336 /* The instruction pattern requires a clobber here;
28337 it is shared with the restVEC helper. */
28339 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, 11));
28342 /* CR register traditionally saved as CR2. */
28343 rtx reg
= gen_rtx_REG (SImode
, CR2_REGNO
);
28345 = gen_frame_load (reg
, frame_reg_rtx
, info
->cr_save_offset
);
28346 if (flag_shrink_wrap
)
28348 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
28349 gen_rtx_REG (Pmode
, LR_REGNO
),
28351 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28355 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
28357 rtx reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
28359 = gen_frame_load (reg
,
28360 frame_reg_rtx
, info
->gp_save_offset
+ reg_size
* i
);
28361 if (flag_shrink_wrap
28362 && save_reg_p (info
->first_gp_reg_save
+ i
))
28363 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28365 for (i
= 0; info
->first_altivec_reg_save
+ i
<= LAST_ALTIVEC_REGNO
; i
++)
28367 rtx reg
= gen_rtx_REG (V4SImode
, info
->first_altivec_reg_save
+ i
);
28369 = gen_frame_load (reg
,
28370 frame_reg_rtx
, info
->altivec_save_offset
+ 16 * i
);
28371 if (flag_shrink_wrap
28372 && save_reg_p (info
->first_altivec_reg_save
+ i
))
28373 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28375 for (i
= 0; info
->first_fp_reg_save
+ i
<= 63; i
++)
28377 rtx reg
= gen_rtx_REG ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
28378 ? DFmode
: SFmode
),
28379 info
->first_fp_reg_save
+ i
);
28381 = gen_frame_load (reg
, frame_reg_rtx
, info
->fp_save_offset
+ 8 * i
);
28382 if (flag_shrink_wrap
28383 && save_reg_p (info
->first_fp_reg_save
+ i
))
28384 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28387 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, 0));
28389 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 12));
28391 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 7));
28393 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 8));
28395 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (SImode
, 10));
28396 insn
= emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
28398 if (flag_shrink_wrap
)
28400 REG_NOTES (insn
) = cfa_restores
;
28401 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
28402 RTX_FRAME_RELATED_P (insn
) = 1;
28407 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
28409 frame_off
= info
->total_size
;
28411 /* Restore AltiVec registers if we must do so before adjusting the
28413 if (info
->altivec_size
!= 0
28414 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28415 || (DEFAULT_ABI
!= ABI_V4
28416 && offset_below_red_zone_p (info
->altivec_save_offset
))))
28419 int scratch_regno
= ptr_regno_for_savres (SAVRES_VR
);
28421 gcc_checking_assert (scratch_regno
== 11 || scratch_regno
== 12);
28422 if (use_backchain_to_restore_sp
)
28424 int frame_regno
= 11;
28426 if ((strategy
& REST_INLINE_VRS
) == 0)
28428 /* Of r11 and r12, select the one not clobbered by an
28429 out-of-line restore function for the frame register. */
28430 frame_regno
= 11 + 12 - scratch_regno
;
28432 frame_reg_rtx
= gen_rtx_REG (Pmode
, frame_regno
);
28433 emit_move_insn (frame_reg_rtx
,
28434 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
28437 else if (frame_pointer_needed
)
28438 frame_reg_rtx
= hard_frame_pointer_rtx
;
28440 if ((strategy
& REST_INLINE_VRS
) == 0)
28442 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
28444 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
28445 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
28447 if (end_save
+ frame_off
!= 0)
28449 rtx offset
= GEN_INT (end_save
+ frame_off
);
28451 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
28454 emit_move_insn (ptr_reg
, frame_reg_rtx
);
28456 ptr_off
= -end_save
;
28457 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
28458 info
->altivec_save_offset
+ ptr_off
,
28459 0, V4SImode
, SAVRES_VR
);
28463 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
28464 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
28466 rtx addr
, areg
, mem
, insn
;
28467 rtx reg
= gen_rtx_REG (V4SImode
, i
);
28468 HOST_WIDE_INT offset
28469 = (info
->altivec_save_offset
+ frame_off
28470 + 16 * (i
- info
->first_altivec_reg_save
));
28472 if (TARGET_P9_VECTOR
&& quad_address_offset_p (offset
))
28474 mem
= gen_frame_mem (V4SImode
,
28475 gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
28476 GEN_INT (offset
)));
28477 insn
= gen_rtx_SET (reg
, mem
);
28481 areg
= gen_rtx_REG (Pmode
, 0);
28482 emit_move_insn (areg
, GEN_INT (offset
));
28484 /* AltiVec addressing mode is [reg+reg]. */
28485 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
);
28486 mem
= gen_frame_mem (V4SImode
, addr
);
28488 /* Rather than emitting a generic move, force use of the
28489 lvx instruction, which we always want. In particular we
28490 don't want lxvd2x/xxpermdi for little endian. */
28491 insn
= gen_altivec_lvx_v4si_internal (reg
, mem
);
28494 (void) emit_insn (insn
);
28498 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
28499 if (((strategy
& REST_INLINE_VRS
) == 0
28500 || (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
)) != 0)
28501 && (flag_shrink_wrap
28502 || (offset_below_red_zone_p
28503 (info
->altivec_save_offset
28504 + 16 * (i
- info
->first_altivec_reg_save
))))
28507 rtx reg
= gen_rtx_REG (V4SImode
, i
);
28508 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28512 /* Restore VRSAVE if we must do so before adjusting the stack. */
28513 if (info
->vrsave_size
!= 0
28514 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28515 || (DEFAULT_ABI
!= ABI_V4
28516 && offset_below_red_zone_p (info
->vrsave_save_offset
))))
28520 if (frame_reg_rtx
== sp_reg_rtx
)
28522 if (use_backchain_to_restore_sp
)
28524 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
28525 emit_move_insn (frame_reg_rtx
,
28526 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
28529 else if (frame_pointer_needed
)
28530 frame_reg_rtx
= hard_frame_pointer_rtx
;
28533 reg
= gen_rtx_REG (SImode
, 12);
28534 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
28535 info
->vrsave_save_offset
+ frame_off
));
28537 emit_insn (generate_set_vrsave (reg
, info
, 1));
28541 /* If we have a large stack frame, restore the old stack pointer
28542 using the backchain. */
28543 if (use_backchain_to_restore_sp
)
28545 if (frame_reg_rtx
== sp_reg_rtx
)
28547 /* Under V.4, don't reset the stack pointer until after we're done
28548 loading the saved registers. */
28549 if (DEFAULT_ABI
== ABI_V4
)
28550 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
28552 insn
= emit_move_insn (frame_reg_rtx
,
28553 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
28556 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28557 && DEFAULT_ABI
== ABI_V4
)
28558 /* frame_reg_rtx has been set up by the altivec restore. */
28562 insn
= emit_move_insn (sp_reg_rtx
, frame_reg_rtx
);
28563 frame_reg_rtx
= sp_reg_rtx
;
28566 /* If we have a frame pointer, we can restore the old stack pointer
28568 else if (frame_pointer_needed
)
28570 frame_reg_rtx
= sp_reg_rtx
;
28571 if (DEFAULT_ABI
== ABI_V4
)
28572 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
28573 /* Prevent reordering memory accesses against stack pointer restore. */
28574 else if (cfun
->calls_alloca
28575 || offset_below_red_zone_p (-info
->total_size
))
28576 rs6000_emit_stack_tie (frame_reg_rtx
, true);
28578 insn
= emit_insn (gen_add3_insn (frame_reg_rtx
, hard_frame_pointer_rtx
,
28579 GEN_INT (info
->total_size
)));
28582 else if (info
->push_p
28583 && DEFAULT_ABI
!= ABI_V4
28584 && !crtl
->calls_eh_return
)
28586 /* Prevent reordering memory accesses against stack pointer restore. */
28587 if (cfun
->calls_alloca
28588 || offset_below_red_zone_p (-info
->total_size
))
28589 rs6000_emit_stack_tie (frame_reg_rtx
, false);
28590 insn
= emit_insn (gen_add3_insn (sp_reg_rtx
, sp_reg_rtx
,
28591 GEN_INT (info
->total_size
)));
28594 if (insn
&& frame_reg_rtx
== sp_reg_rtx
)
28598 REG_NOTES (insn
) = cfa_restores
;
28599 cfa_restores
= NULL_RTX
;
28601 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
28602 RTX_FRAME_RELATED_P (insn
) = 1;
28605 /* Restore AltiVec registers if we have not done so already. */
28606 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28607 && info
->altivec_size
!= 0
28608 && (DEFAULT_ABI
== ABI_V4
28609 || !offset_below_red_zone_p (info
->altivec_save_offset
)))
28613 if ((strategy
& REST_INLINE_VRS
) == 0)
28615 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
28617 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
28618 int scratch_regno
= ptr_regno_for_savres (SAVRES_VR
);
28619 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
28621 if (end_save
+ frame_off
!= 0)
28623 rtx offset
= GEN_INT (end_save
+ frame_off
);
28625 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
28628 emit_move_insn (ptr_reg
, frame_reg_rtx
);
28630 ptr_off
= -end_save
;
28631 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
28632 info
->altivec_save_offset
+ ptr_off
,
28633 0, V4SImode
, SAVRES_VR
);
28634 if (REGNO (frame_reg_rtx
) == REGNO (scratch_reg
))
28636 /* Frame reg was clobbered by out-of-line save. Restore it
28637 from ptr_reg, and if we are calling out-of-line gpr or
28638 fpr restore set up the correct pointer and offset. */
28639 unsigned newptr_regno
= 1;
28640 if (!restoring_GPRs_inline
)
28642 bool lr
= info
->gp_save_offset
+ info
->gp_size
== 0;
28643 int sel
= SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
28644 newptr_regno
= ptr_regno_for_savres (sel
);
28645 end_save
= info
->gp_save_offset
+ info
->gp_size
;
28647 else if (!restoring_FPRs_inline
)
28649 bool lr
= !(strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
);
28650 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
28651 newptr_regno
= ptr_regno_for_savres (sel
);
28652 end_save
= info
->fp_save_offset
+ info
->fp_size
;
28655 if (newptr_regno
!= 1 && REGNO (frame_reg_rtx
) != newptr_regno
)
28656 frame_reg_rtx
= gen_rtx_REG (Pmode
, newptr_regno
);
28658 if (end_save
+ ptr_off
!= 0)
28660 rtx offset
= GEN_INT (end_save
+ ptr_off
);
28662 frame_off
= -end_save
;
28664 emit_insn (gen_addsi3_carry (frame_reg_rtx
,
28667 emit_insn (gen_adddi3_carry (frame_reg_rtx
,
28672 frame_off
= ptr_off
;
28673 emit_move_insn (frame_reg_rtx
, ptr_reg
);
28679 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
28680 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
28682 rtx addr
, areg
, mem
, insn
;
28683 rtx reg
= gen_rtx_REG (V4SImode
, i
);
28684 HOST_WIDE_INT offset
28685 = (info
->altivec_save_offset
+ frame_off
28686 + 16 * (i
- info
->first_altivec_reg_save
));
28688 if (TARGET_P9_VECTOR
&& quad_address_offset_p (offset
))
28690 mem
= gen_frame_mem (V4SImode
,
28691 gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
28692 GEN_INT (offset
)));
28693 insn
= gen_rtx_SET (reg
, mem
);
28697 areg
= gen_rtx_REG (Pmode
, 0);
28698 emit_move_insn (areg
, GEN_INT (offset
));
28700 /* AltiVec addressing mode is [reg+reg]. */
28701 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
);
28702 mem
= gen_frame_mem (V4SImode
, addr
);
28704 /* Rather than emitting a generic move, force use of the
28705 lvx instruction, which we always want. In particular we
28706 don't want lxvd2x/xxpermdi for little endian. */
28707 insn
= gen_altivec_lvx_v4si_internal (reg
, mem
);
28710 (void) emit_insn (insn
);
28714 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
28715 if (((strategy
& REST_INLINE_VRS
) == 0
28716 || (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
)) != 0)
28717 && (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
28720 rtx reg
= gen_rtx_REG (V4SImode
, i
);
28721 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28725 /* Restore VRSAVE if we have not done so already. */
28726 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28727 && info
->vrsave_size
!= 0
28728 && (DEFAULT_ABI
== ABI_V4
28729 || !offset_below_red_zone_p (info
->vrsave_save_offset
)))
28733 reg
= gen_rtx_REG (SImode
, 12);
28734 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
28735 info
->vrsave_save_offset
+ frame_off
));
28737 emit_insn (generate_set_vrsave (reg
, info
, 1));
28740 /* If we exit by an out-of-line restore function on ABI_V4 then that
28741 function will deallocate the stack, so we don't need to worry
28742 about the unwinder restoring cr from an invalid stack frame
28744 exit_func
= (!restoring_FPRs_inline
28745 || (!restoring_GPRs_inline
28746 && info
->first_fp_reg_save
== 64));
28748 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28749 *separate* slots if the routine calls __builtin_eh_return, so
28750 that they can be independently restored by the unwinder. */
28751 if (DEFAULT_ABI
== ABI_ELFv2
&& crtl
->calls_eh_return
)
28753 int i
, cr_off
= info
->ehcr_offset
;
28755 for (i
= 0; i
< 8; i
++)
28756 if (!call_used_regs
[CR0_REGNO
+ i
])
28758 rtx reg
= gen_rtx_REG (SImode
, 0);
28759 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
28760 cr_off
+ frame_off
));
28762 insn
= emit_insn (gen_movsi_to_cr_one
28763 (gen_rtx_REG (CCmode
, CR0_REGNO
+ i
), reg
));
28765 if (!exit_func
&& flag_shrink_wrap
)
28767 add_reg_note (insn
, REG_CFA_RESTORE
,
28768 gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
28770 RTX_FRAME_RELATED_P (insn
) = 1;
28773 cr_off
+= reg_size
;
28777 /* Get the old lr if we saved it. If we are restoring registers
28778 out-of-line, then the out-of-line routines can do this for us. */
28779 if (restore_lr
&& restoring_GPRs_inline
)
28780 load_lr_save (0, frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
28782 /* Get the old cr if we saved it. */
28783 if (info
->cr_save_p
)
28785 unsigned cr_save_regno
= 12;
28787 if (!restoring_GPRs_inline
)
28789 /* Ensure we don't use the register used by the out-of-line
28790 gpr register restore below. */
28791 bool lr
= info
->gp_save_offset
+ info
->gp_size
== 0;
28792 int sel
= SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
28793 int gpr_ptr_regno
= ptr_regno_for_savres (sel
);
28795 if (gpr_ptr_regno
== 12)
28796 cr_save_regno
= 11;
28797 gcc_checking_assert (REGNO (frame_reg_rtx
) != cr_save_regno
);
28799 else if (REGNO (frame_reg_rtx
) == 12)
28800 cr_save_regno
= 11;
28802 cr_save_reg
= load_cr_save (cr_save_regno
, frame_reg_rtx
,
28803 info
->cr_save_offset
+ frame_off
,
28807 /* Set LR here to try to overlap restores below. */
28808 if (restore_lr
&& restoring_GPRs_inline
)
28809 restore_saved_lr (0, exit_func
);
28811 /* Load exception handler data registers, if needed. */
28812 if (crtl
->calls_eh_return
)
28814 unsigned int i
, regno
;
28818 rtx reg
= gen_rtx_REG (reg_mode
, 2);
28819 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
28820 frame_off
+ RS6000_TOC_SAVE_SLOT
));
28827 regno
= EH_RETURN_DATA_REGNO (i
);
28828 if (regno
== INVALID_REGNUM
)
28831 mem
= gen_frame_mem_offset (reg_mode
, frame_reg_rtx
,
28832 info
->ehrd_offset
+ frame_off
28833 + reg_size
* (int) i
);
28835 emit_move_insn (gen_rtx_REG (reg_mode
, regno
), mem
);
28839 /* Restore GPRs. This is done as a PARALLEL if we are using
28840 the load-multiple instructions. */
28841 if (!restoring_GPRs_inline
)
28843 /* We are jumping to an out-of-line function. */
28845 int end_save
= info
->gp_save_offset
+ info
->gp_size
;
28846 bool can_use_exit
= end_save
== 0;
28847 int sel
= SAVRES_GPR
| (can_use_exit
? SAVRES_LR
: 0);
28850 /* Emit stack reset code if we need it. */
28851 ptr_regno
= ptr_regno_for_savres (sel
);
28852 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
28854 rs6000_emit_stack_reset (frame_reg_rtx
, frame_off
, ptr_regno
);
28855 else if (end_save
+ frame_off
!= 0)
28856 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
,
28857 GEN_INT (end_save
+ frame_off
)));
28858 else if (REGNO (frame_reg_rtx
) != ptr_regno
)
28859 emit_move_insn (ptr_reg
, frame_reg_rtx
);
28860 if (REGNO (frame_reg_rtx
) == ptr_regno
)
28861 frame_off
= -end_save
;
28863 if (can_use_exit
&& info
->cr_save_p
)
28864 restore_saved_cr (cr_save_reg
, using_mtcr_multiple
, true);
28866 ptr_off
= -end_save
;
28867 rs6000_emit_savres_rtx (info
, ptr_reg
,
28868 info
->gp_save_offset
+ ptr_off
,
28869 info
->lr_save_offset
+ ptr_off
,
28872 else if (using_load_multiple
)
28875 p
= rtvec_alloc (32 - info
->first_gp_reg_save
);
28876 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
28878 = gen_frame_load (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
28880 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
28881 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
28885 int offset
= info
->gp_save_offset
+ frame_off
;
28886 for (i
= info
->first_gp_reg_save
; i
< 32; i
++)
28889 && !cfun
->machine
->gpr_is_wrapped_separately
[i
])
28891 rtx reg
= gen_rtx_REG (reg_mode
, i
);
28892 emit_insn (gen_frame_load (reg
, frame_reg_rtx
, offset
));
28895 offset
+= reg_size
;
28899 if (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
28901 /* If the frame pointer was used then we can't delay emitting
28902 a REG_CFA_DEF_CFA note. This must happen on the insn that
28903 restores the frame pointer, r31. We may have already emitted
28904 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28905 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28906 be harmless if emitted. */
28907 if (frame_pointer_needed
)
28909 insn
= get_last_insn ();
28910 add_reg_note (insn
, REG_CFA_DEF_CFA
,
28911 plus_constant (Pmode
, frame_reg_rtx
, frame_off
));
28912 RTX_FRAME_RELATED_P (insn
) = 1;
28915 /* Set up cfa_restores. We always need these when
28916 shrink-wrapping. If not shrink-wrapping then we only need
28917 the cfa_restore when the stack location is no longer valid.
28918 The cfa_restores must be emitted on or before the insn that
28919 invalidates the stack, and of course must not be emitted
28920 before the insn that actually does the restore. The latter
28921 is why it is a bad idea to emit the cfa_restores as a group
28922 on the last instruction here that actually does a restore:
28923 That insn may be reordered with respect to others doing
28925 if (flag_shrink_wrap
28926 && !restoring_GPRs_inline
28927 && info
->first_fp_reg_save
== 64)
28928 cfa_restores
= add_crlr_cfa_restore (info
, cfa_restores
);
28930 for (i
= info
->first_gp_reg_save
; i
< 32; i
++)
28932 && !cfun
->machine
->gpr_is_wrapped_separately
[i
])
28934 rtx reg
= gen_rtx_REG (reg_mode
, i
);
28935 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28939 if (!restoring_GPRs_inline
28940 && info
->first_fp_reg_save
== 64)
28942 /* We are jumping to an out-of-line function. */
28944 emit_cfa_restores (cfa_restores
);
28948 if (restore_lr
&& !restoring_GPRs_inline
)
28950 load_lr_save (0, frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
28951 restore_saved_lr (0, exit_func
);
28954 /* Restore fpr's if we need to do it without calling a function. */
28955 if (restoring_FPRs_inline
)
28957 int offset
= info
->fp_save_offset
+ frame_off
;
28958 for (i
= info
->first_fp_reg_save
; i
< 64; i
++)
28961 && !cfun
->machine
->fpr_is_wrapped_separately
[i
- 32])
28963 rtx reg
= gen_rtx_REG (fp_reg_mode
, i
);
28964 emit_insn (gen_frame_load (reg
, frame_reg_rtx
, offset
));
28965 if (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
28966 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
,
28970 offset
+= fp_reg_size
;
28974 /* If we saved cr, restore it here. Just those that were used. */
28975 if (info
->cr_save_p
)
28976 restore_saved_cr (cr_save_reg
, using_mtcr_multiple
, exit_func
);
28978 /* If this is V.4, unwind the stack pointer after all of the loads
28979 have been done, or set up r11 if we are restoring fp out of line. */
28981 if (!restoring_FPRs_inline
)
28983 bool lr
= (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
28984 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
28985 ptr_regno
= ptr_regno_for_savres (sel
);
28988 insn
= rs6000_emit_stack_reset (frame_reg_rtx
, frame_off
, ptr_regno
);
28989 if (REGNO (frame_reg_rtx
) == ptr_regno
)
28992 if (insn
&& restoring_FPRs_inline
)
28996 REG_NOTES (insn
) = cfa_restores
;
28997 cfa_restores
= NULL_RTX
;
28999 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
29000 RTX_FRAME_RELATED_P (insn
) = 1;
29003 if (crtl
->calls_eh_return
)
29005 rtx sa
= EH_RETURN_STACKADJ_RTX
;
29006 emit_insn (gen_add3_insn (sp_reg_rtx
, sp_reg_rtx
, sa
));
29009 if (!sibcall
&& restoring_FPRs_inline
)
29013 /* We can't hang the cfa_restores off a simple return,
29014 since the shrink-wrap code sometimes uses an existing
29015 return. This means there might be a path from
29016 pre-prologue code to this return, and dwarf2cfi code
29017 wants the eh_frame unwinder state to be the same on
29018 all paths to any point. So we need to emit the
29019 cfa_restores before the return. For -m64 we really
29020 don't need epilogue cfa_restores at all, except for
29021 this irritating dwarf2cfi with shrink-wrap
29022 requirement; The stack red-zone means eh_frame info
29023 from the prologue telling the unwinder to restore
29024 from the stack is perfectly good right to the end of
29026 emit_insn (gen_blockage ());
29027 emit_cfa_restores (cfa_restores
);
29028 cfa_restores
= NULL_RTX
;
29031 emit_jump_insn (targetm
.gen_simple_return ());
29034 if (!sibcall
&& !restoring_FPRs_inline
)
29036 bool lr
= (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
29037 rtvec p
= rtvec_alloc (3 + !!lr
+ 64 - info
->first_fp_reg_save
);
29039 RTVEC_ELT (p
, elt
++) = ret_rtx
;
29041 RTVEC_ELT (p
, elt
++)
29042 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, LR_REGNO
));
29044 /* We have to restore more than two FP registers, so branch to the
29045 restore function. It will return to our caller. */
29050 if (flag_shrink_wrap
)
29051 cfa_restores
= add_crlr_cfa_restore (info
, cfa_restores
);
29053 sym
= rs6000_savres_routine_sym (info
, SAVRES_FPR
| (lr
? SAVRES_LR
: 0));
29054 RTVEC_ELT (p
, elt
++) = gen_rtx_USE (VOIDmode
, sym
);
29055 reg
= (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)? 1 : 11;
29056 RTVEC_ELT (p
, elt
++) = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, reg
));
29058 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
29060 rtx reg
= gen_rtx_REG (DFmode
, info
->first_fp_reg_save
+ i
);
29062 RTVEC_ELT (p
, elt
++)
29063 = gen_frame_load (reg
, sp_reg_rtx
, info
->fp_save_offset
+ 8 * i
);
29064 if (flag_shrink_wrap
29065 && save_reg_p (info
->first_fp_reg_save
+ i
))
29066 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
29069 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
29075 /* Ensure the cfa_restores are hung off an insn that won't
29076 be reordered above other restores. */
29077 emit_insn (gen_blockage ());
29079 emit_cfa_restores (cfa_restores
);
29083 /* Write function epilogue. */
29086 rs6000_output_function_epilogue (FILE *file
)
29089 macho_branch_islands ();
29092 rtx_insn
*insn
= get_last_insn ();
29093 rtx_insn
*deleted_debug_label
= NULL
;
29095 /* Mach-O doesn't support labels at the end of objects, so if
29096 it looks like we might want one, take special action.
29098 First, collect any sequence of deleted debug labels. */
29101 && NOTE_KIND (insn
) != NOTE_INSN_DELETED_LABEL
)
29103 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
29104 notes only, instead set their CODE_LABEL_NUMBER to -1,
29105 otherwise there would be code generation differences
29106 in between -g and -g0. */
29107 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_DELETED_DEBUG_LABEL
)
29108 deleted_debug_label
= insn
;
29109 insn
= PREV_INSN (insn
);
29112 /* Second, if we have:
29115 then this needs to be detected, so skip past the barrier. */
29117 if (insn
&& BARRIER_P (insn
))
29118 insn
= PREV_INSN (insn
);
29120 /* Up to now we've only seen notes or barriers. */
29125 && NOTE_KIND (insn
) == NOTE_INSN_DELETED_LABEL
))
29126 /* Trailing label: <barrier>. */
29127 fputs ("\tnop\n", file
);
29130 /* Lastly, see if we have a completely empty function body. */
29131 while (insn
&& ! INSN_P (insn
))
29132 insn
= PREV_INSN (insn
);
29133 /* If we don't find any insns, we've got an empty function body;
29134 I.e. completely empty - without a return or branch. This is
29135 taken as the case where a function body has been removed
29136 because it contains an inline __builtin_unreachable(). GCC
29137 states that reaching __builtin_unreachable() means UB so we're
29138 not obliged to do anything special; however, we want
29139 non-zero-sized function bodies. To meet this, and help the
29140 user out, let's trap the case. */
29142 fputs ("\ttrap\n", file
);
29145 else if (deleted_debug_label
)
29146 for (insn
= deleted_debug_label
; insn
; insn
= NEXT_INSN (insn
))
29147 if (NOTE_KIND (insn
) == NOTE_INSN_DELETED_DEBUG_LABEL
)
29148 CODE_LABEL_NUMBER (insn
) = -1;
29152 /* Output a traceback table here. See /usr/include/sys/debug.h for info
29155 We don't output a traceback table if -finhibit-size-directive was
29156 used. The documentation for -finhibit-size-directive reads
29157 ``don't output a @code{.size} assembler directive, or anything
29158 else that would cause trouble if the function is split in the
29159 middle, and the two halves are placed at locations far apart in
29160 memory.'' The traceback table has this property, since it
29161 includes the offset from the start of the function to the
29162 traceback table itself.
29164 System V.4 Powerpc's (and the embedded ABI derived from it) use a
29165 different traceback table. */
29166 if ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
29167 && ! flag_inhibit_size_directive
29168 && rs6000_traceback
!= traceback_none
&& !cfun
->is_thunk
)
29170 const char *fname
= NULL
;
29171 const char *language_string
= lang_hooks
.name
;
29172 int fixed_parms
= 0, float_parms
= 0, parm_info
= 0;
29174 int optional_tbtab
;
29175 rs6000_stack_t
*info
= rs6000_stack_info ();
29177 if (rs6000_traceback
== traceback_full
)
29178 optional_tbtab
= 1;
29179 else if (rs6000_traceback
== traceback_part
)
29180 optional_tbtab
= 0;
29182 optional_tbtab
= !optimize_size
&& !TARGET_ELF
;
29184 if (optional_tbtab
)
29186 fname
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
29187 while (*fname
== '.') /* V.4 encodes . in the name */
29190 /* Need label immediately before tbtab, so we can compute
29191 its offset from the function start. */
29192 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LT");
29193 ASM_OUTPUT_LABEL (file
, fname
);
29196 /* The .tbtab pseudo-op can only be used for the first eight
29197 expressions, since it can't handle the possibly variable
29198 length fields that follow. However, if you omit the optional
29199 fields, the assembler outputs zeros for all optional fields
29200 anyways, giving each variable length field is minimum length
29201 (as defined in sys/debug.h). Thus we can not use the .tbtab
29202 pseudo-op at all. */
29204 /* An all-zero word flags the start of the tbtab, for debuggers
29205 that have to find it by searching forward from the entry
29206 point or from the current pc. */
29207 fputs ("\t.long 0\n", file
);
29209 /* Tbtab format type. Use format type 0. */
29210 fputs ("\t.byte 0,", file
);
29212 /* Language type. Unfortunately, there does not seem to be any
29213 official way to discover the language being compiled, so we
29214 use language_string.
29215 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
29216 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
29217 a number, so for now use 9. LTO, Go and JIT aren't assigned numbers
29218 either, so for now use 0. */
29220 || ! strcmp (language_string
, "GNU GIMPLE")
29221 || ! strcmp (language_string
, "GNU Go")
29222 || ! strcmp (language_string
, "libgccjit"))
29224 else if (! strcmp (language_string
, "GNU F77")
29225 || lang_GNU_Fortran ())
29227 else if (! strcmp (language_string
, "GNU Pascal"))
29229 else if (! strcmp (language_string
, "GNU Ada"))
29231 else if (lang_GNU_CXX ()
29232 || ! strcmp (language_string
, "GNU Objective-C++"))
29234 else if (! strcmp (language_string
, "GNU Java"))
29236 else if (! strcmp (language_string
, "GNU Objective-C"))
29239 gcc_unreachable ();
29240 fprintf (file
, "%d,", i
);
29242 /* 8 single bit fields: global linkage (not set for C extern linkage,
29243 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
29244 from start of procedure stored in tbtab, internal function, function
29245 has controlled storage, function has no toc, function uses fp,
29246 function logs/aborts fp operations. */
29247 /* Assume that fp operations are used if any fp reg must be saved. */
29248 fprintf (file
, "%d,",
29249 (optional_tbtab
<< 5) | ((info
->first_fp_reg_save
!= 64) << 1));
29251 /* 6 bitfields: function is interrupt handler, name present in
29252 proc table, function calls alloca, on condition directives
29253 (controls stack walks, 3 bits), saves condition reg, saves
29255 /* The `function calls alloca' bit seems to be set whenever reg 31 is
29256 set up as a frame pointer, even when there is no alloca call. */
29257 fprintf (file
, "%d,",
29258 ((optional_tbtab
<< 6)
29259 | ((optional_tbtab
& frame_pointer_needed
) << 5)
29260 | (info
->cr_save_p
<< 1)
29261 | (info
->lr_save_p
)));
29263 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
29265 fprintf (file
, "%d,",
29266 (info
->push_p
<< 7) | (64 - info
->first_fp_reg_save
));
29268 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
29269 fprintf (file
, "%d,", (32 - first_reg_to_save ()));
29271 if (optional_tbtab
)
29273 /* Compute the parameter info from the function decl argument
29276 int next_parm_info_bit
= 31;
29278 for (decl
= DECL_ARGUMENTS (current_function_decl
);
29279 decl
; decl
= DECL_CHAIN (decl
))
29281 rtx parameter
= DECL_INCOMING_RTL (decl
);
29282 machine_mode mode
= GET_MODE (parameter
);
29284 if (GET_CODE (parameter
) == REG
)
29286 if (SCALAR_FLOAT_MODE_P (mode
))
29309 gcc_unreachable ();
29312 /* If only one bit will fit, don't or in this entry. */
29313 if (next_parm_info_bit
> 0)
29314 parm_info
|= (bits
<< (next_parm_info_bit
- 1));
29315 next_parm_info_bit
-= 2;
29319 fixed_parms
+= ((GET_MODE_SIZE (mode
)
29320 + (UNITS_PER_WORD
- 1))
29322 next_parm_info_bit
-= 1;
29328 /* Number of fixed point parameters. */
29329 /* This is actually the number of words of fixed point parameters; thus
29330 an 8 byte struct counts as 2; and thus the maximum value is 8. */
29331 fprintf (file
, "%d,", fixed_parms
);
29333 /* 2 bitfields: number of floating point parameters (7 bits), parameters
29335 /* This is actually the number of fp registers that hold parameters;
29336 and thus the maximum value is 13. */
29337 /* Set parameters on stack bit if parameters are not in their original
29338 registers, regardless of whether they are on the stack? Xlc
29339 seems to set the bit when not optimizing. */
29340 fprintf (file
, "%d\n", ((float_parms
<< 1) | (! optimize
)));
29342 if (optional_tbtab
)
29344 /* Optional fields follow. Some are variable length. */
29346 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
29347 float, 11 double float. */
29348 /* There is an entry for each parameter in a register, in the order
29349 that they occur in the parameter list. Any intervening arguments
29350 on the stack are ignored. If the list overflows a long (max
29351 possible length 34 bits) then completely leave off all elements
29353 /* Only emit this long if there was at least one parameter. */
29354 if (fixed_parms
|| float_parms
)
29355 fprintf (file
, "\t.long %d\n", parm_info
);
29357 /* Offset from start of code to tb table. */
29358 fputs ("\t.long ", file
);
29359 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LT");
29360 RS6000_OUTPUT_BASENAME (file
, fname
);
29362 rs6000_output_function_entry (file
, fname
);
29365 /* Interrupt handler mask. */
29366 /* Omit this long, since we never set the interrupt handler bit
29369 /* Number of CTL (controlled storage) anchors. */
29370 /* Omit this long, since the has_ctl bit is never set above. */
29372 /* Displacement into stack of each CTL anchor. */
29373 /* Omit this list of longs, because there are no CTL anchors. */
29375 /* Length of function name. */
29378 fprintf (file
, "\t.short %d\n", (int) strlen (fname
));
29380 /* Function name. */
29381 assemble_string (fname
, strlen (fname
));
29383 /* Register for alloca automatic storage; this is always reg 31.
29384 Only emit this if the alloca bit was set above. */
29385 if (frame_pointer_needed
)
29386 fputs ("\t.byte 31\n", file
);
29388 fputs ("\t.align 2\n", file
);
29392 /* Arrange to define .LCTOC1 label, if not already done. */
29396 if (!toc_initialized
)
29398 switch_to_section (toc_section
);
29399 switch_to_section (current_function_section ());
29404 /* -fsplit-stack support. */
29406 /* A SYMBOL_REF for __morestack. */
29407 static GTY(()) rtx morestack_ref
;
29410 gen_add3_const (rtx rt
, rtx ra
, long c
)
29413 return gen_adddi3 (rt
, ra
, GEN_INT (c
));
29415 return gen_addsi3 (rt
, ra
, GEN_INT (c
));
29418 /* Emit -fsplit-stack prologue, which goes before the regular function
29419 prologue (at local entry point in the case of ELFv2). */
29422 rs6000_expand_split_stack_prologue (void)
29424 rs6000_stack_t
*info
= rs6000_stack_info ();
29425 unsigned HOST_WIDE_INT allocate
;
29426 long alloc_hi
, alloc_lo
;
29427 rtx r0
, r1
, r12
, lr
, ok_label
, compare
, jump
, call_fusage
;
29430 gcc_assert (flag_split_stack
&& reload_completed
);
29435 if (global_regs
[29])
29437 error ("%qs uses register r29", "-fsplit-stack");
29438 inform (DECL_SOURCE_LOCATION (global_regs_decl
[29]),
29439 "conflicts with %qD", global_regs_decl
[29]);
29442 allocate
= info
->total_size
;
29443 if (allocate
> (unsigned HOST_WIDE_INT
) 1 << 31)
29445 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
29448 if (morestack_ref
== NULL_RTX
)
29450 morestack_ref
= gen_rtx_SYMBOL_REF (Pmode
, "__morestack");
29451 SYMBOL_REF_FLAGS (morestack_ref
) |= (SYMBOL_FLAG_LOCAL
29452 | SYMBOL_FLAG_FUNCTION
);
29455 r0
= gen_rtx_REG (Pmode
, 0);
29456 r1
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
29457 r12
= gen_rtx_REG (Pmode
, 12);
29458 emit_insn (gen_load_split_stack_limit (r0
));
29459 /* Always emit two insns here to calculate the requested stack,
29460 so that the linker can edit them when adjusting size for calling
29461 non-split-stack code. */
29462 alloc_hi
= (-allocate
+ 0x8000) & ~0xffffL
;
29463 alloc_lo
= -allocate
- alloc_hi
;
29466 emit_insn (gen_add3_const (r12
, r1
, alloc_hi
));
29468 emit_insn (gen_add3_const (r12
, r12
, alloc_lo
));
29470 emit_insn (gen_nop ());
29474 emit_insn (gen_add3_const (r12
, r1
, alloc_lo
));
29475 emit_insn (gen_nop ());
29478 compare
= gen_rtx_REG (CCUNSmode
, CR7_REGNO
);
29479 emit_insn (gen_rtx_SET (compare
, gen_rtx_COMPARE (CCUNSmode
, r12
, r0
)));
29480 ok_label
= gen_label_rtx ();
29481 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
29482 gen_rtx_GEU (VOIDmode
, compare
, const0_rtx
),
29483 gen_rtx_LABEL_REF (VOIDmode
, ok_label
),
29485 insn
= emit_jump_insn (gen_rtx_SET (pc_rtx
, jump
));
29486 JUMP_LABEL (insn
) = ok_label
;
29487 /* Mark the jump as very likely to be taken. */
29488 add_reg_br_prob_note (insn
, profile_probability::very_likely ());
29490 lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
29491 insn
= emit_move_insn (r0
, lr
);
29492 RTX_FRAME_RELATED_P (insn
) = 1;
29493 insn
= emit_insn (gen_frame_store (r0
, r1
, info
->lr_save_offset
));
29494 RTX_FRAME_RELATED_P (insn
) = 1;
29496 insn
= emit_call_insn (gen_call (gen_rtx_MEM (SImode
, morestack_ref
),
29497 const0_rtx
, const0_rtx
));
29498 call_fusage
= NULL_RTX
;
29499 use_reg (&call_fusage
, r12
);
29500 /* Say the call uses r0, even though it doesn't, to stop regrename
29501 from twiddling with the insns saving lr, trashing args for cfun.
29502 The insns restoring lr are similarly protected by making
29503 split_stack_return use r0. */
29504 use_reg (&call_fusage
, r0
);
29505 add_function_usage_to (insn
, call_fusage
);
29506 /* Indicate that this function can't jump to non-local gotos. */
29507 make_reg_eh_region_note_nothrow_nononlocal (insn
);
29508 emit_insn (gen_frame_load (r0
, r1
, info
->lr_save_offset
));
29509 insn
= emit_move_insn (lr
, r0
);
29510 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
29511 RTX_FRAME_RELATED_P (insn
) = 1;
29512 emit_insn (gen_split_stack_return ());
29514 emit_label (ok_label
);
29515 LABEL_NUSES (ok_label
) = 1;
29518 /* Return the internal arg pointer used for function incoming
29519 arguments. When -fsplit-stack, the arg pointer is r12 so we need
29520 to copy it to a pseudo in order for it to be preserved over calls
29521 and suchlike. We'd really like to use a pseudo here for the
29522 internal arg pointer but data-flow analysis is not prepared to
29523 accept pseudos as live at the beginning of a function. */
29526 rs6000_internal_arg_pointer (void)
29528 if (flag_split_stack
29529 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun
->decl
))
29533 if (cfun
->machine
->split_stack_arg_pointer
== NULL_RTX
)
29537 cfun
->machine
->split_stack_arg_pointer
= gen_reg_rtx (Pmode
);
29538 REG_POINTER (cfun
->machine
->split_stack_arg_pointer
) = 1;
29540 /* Put the pseudo initialization right after the note at the
29541 beginning of the function. */
29542 pat
= gen_rtx_SET (cfun
->machine
->split_stack_arg_pointer
,
29543 gen_rtx_REG (Pmode
, 12));
29544 push_topmost_sequence ();
29545 emit_insn_after (pat
, get_insns ());
29546 pop_topmost_sequence ();
29548 return plus_constant (Pmode
, cfun
->machine
->split_stack_arg_pointer
,
29549 FIRST_PARM_OFFSET (current_function_decl
));
29551 return virtual_incoming_args_rtx
;
29554 /* We may have to tell the dataflow pass that the split stack prologue
29555 is initializing a register. */
29558 rs6000_live_on_entry (bitmap regs
)
29560 if (flag_split_stack
)
29561 bitmap_set_bit (regs
, 12);
29564 /* Emit -fsplit-stack dynamic stack allocation space check. */
29567 rs6000_split_stack_space_check (rtx size
, rtx label
)
29569 rtx sp
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
29570 rtx limit
= gen_reg_rtx (Pmode
);
29571 rtx requested
= gen_reg_rtx (Pmode
);
29572 rtx cmp
= gen_reg_rtx (CCUNSmode
);
29575 emit_insn (gen_load_split_stack_limit (limit
));
29576 if (CONST_INT_P (size
))
29577 emit_insn (gen_add3_insn (requested
, sp
, GEN_INT (-INTVAL (size
))));
29580 size
= force_reg (Pmode
, size
);
29581 emit_move_insn (requested
, gen_rtx_MINUS (Pmode
, sp
, size
));
29583 emit_insn (gen_rtx_SET (cmp
, gen_rtx_COMPARE (CCUNSmode
, requested
, limit
)));
29584 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
29585 gen_rtx_GEU (VOIDmode
, cmp
, const0_rtx
),
29586 gen_rtx_LABEL_REF (VOIDmode
, label
),
29588 jump
= emit_jump_insn (gen_rtx_SET (pc_rtx
, jump
));
29589 JUMP_LABEL (jump
) = label
;
29592 /* A C compound statement that outputs the assembler code for a thunk
29593 function, used to implement C++ virtual function calls with
29594 multiple inheritance. The thunk acts as a wrapper around a virtual
29595 function, adjusting the implicit object parameter before handing
29596 control off to the real function.
29598 First, emit code to add the integer DELTA to the location that
29599 contains the incoming first argument. Assume that this argument
29600 contains a pointer, and is the one used to pass the `this' pointer
29601 in C++. This is the incoming argument *before* the function
29602 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29603 values of all other incoming arguments.
29605 After the addition, emit code to jump to FUNCTION, which is a
29606 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29607 not touch the return address. Hence returning from FUNCTION will
29608 return to whoever called the current `thunk'.
29610 The effect must be as if FUNCTION had been called directly with the
29611 adjusted first argument. This macro is responsible for emitting
29612 all of the code for a thunk function; output_function_prologue()
29613 and output_function_epilogue() are not invoked.
29615 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29616 been extracted from it.) It might possibly be useful on some
29617 targets, but probably not.
29619 If you do not define this macro, the target-independent code in the
29620 C++ frontend will generate a less efficient heavyweight thunk that
29621 calls FUNCTION instead of jumping to it. The generic approach does
29622 not support varargs. */
29625 rs6000_output_mi_thunk (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
29626 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
29629 rtx this_rtx
, funexp
;
29632 reload_completed
= 1;
29633 epilogue_completed
= 1;
29635 /* Mark the end of the (empty) prologue. */
29636 emit_note (NOTE_INSN_PROLOGUE_END
);
29638 /* Find the "this" pointer. If the function returns a structure,
29639 the structure return pointer is in r3. */
29640 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
29641 this_rtx
= gen_rtx_REG (Pmode
, 4);
29643 this_rtx
= gen_rtx_REG (Pmode
, 3);
29645 /* Apply the constant offset, if required. */
29647 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, GEN_INT (delta
)));
29649 /* Apply the offset from the vtable, if required. */
29652 rtx vcall_offset_rtx
= GEN_INT (vcall_offset
);
29653 rtx tmp
= gen_rtx_REG (Pmode
, 12);
29655 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, this_rtx
));
29656 if (((unsigned HOST_WIDE_INT
) vcall_offset
) + 0x8000 >= 0x10000)
29658 emit_insn (gen_add3_insn (tmp
, tmp
, vcall_offset_rtx
));
29659 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, tmp
));
29663 rtx loc
= gen_rtx_PLUS (Pmode
, tmp
, vcall_offset_rtx
);
29665 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, loc
));
29667 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, tmp
));
29670 /* Generate a tail call to the target function. */
29671 if (!TREE_USED (function
))
29673 assemble_external (function
);
29674 TREE_USED (function
) = 1;
29676 funexp
= XEXP (DECL_RTL (function
), 0);
29677 funexp
= gen_rtx_MEM (FUNCTION_MODE
, funexp
);
29680 if (MACHOPIC_INDIRECT
)
29681 funexp
= machopic_indirect_call_target (funexp
);
29684 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29685 generate sibcall RTL explicitly. */
29686 insn
= emit_call_insn (
29687 gen_rtx_PARALLEL (VOIDmode
,
29689 gen_rtx_CALL (VOIDmode
,
29690 funexp
, const0_rtx
),
29691 gen_rtx_USE (VOIDmode
, const0_rtx
),
29692 simple_return_rtx
)));
29693 SIBLING_CALL_P (insn
) = 1;
29696 /* Run just enough of rest_of_compilation to get the insns emitted.
29697 There's not really enough bulk here to make other passes such as
29698 instruction scheduling worth while. Note that use_thunk calls
29699 assemble_start_function and assemble_end_function. */
29700 insn
= get_insns ();
29701 shorten_branches (insn
);
29702 final_start_function (insn
, file
, 1);
29703 final (insn
, file
, 1);
29704 final_end_function ();
29706 reload_completed
= 0;
29707 epilogue_completed
= 0;
29710 /* A quick summary of the various types of 'constant-pool tables'
29713 Target Flags Name One table per
29714 AIX (none) AIX TOC object file
29715 AIX -mfull-toc AIX TOC object file
29716 AIX -mminimal-toc AIX minimal TOC translation unit
29717 SVR4/EABI (none) SVR4 SDATA object file
29718 SVR4/EABI -fpic SVR4 pic object file
29719 SVR4/EABI -fPIC SVR4 PIC translation unit
29720 SVR4/EABI -mrelocatable EABI TOC function
29721 SVR4/EABI -maix AIX TOC object file
29722 SVR4/EABI -maix -mminimal-toc
29723 AIX minimal TOC translation unit
29725 Name Reg. Set by entries contains:
29726 made by addrs? fp? sum?
29728 AIX TOC 2 crt0 as Y option option
29729 AIX minimal TOC 30 prolog gcc Y Y option
29730 SVR4 SDATA 13 crt0 gcc N Y N
29731 SVR4 pic 30 prolog ld Y not yet N
29732 SVR4 PIC 30 prolog gcc Y option option
29733 EABI TOC 30 prolog gcc Y option option
29737 /* Hash functions for the hash table. */
29740 rs6000_hash_constant (rtx k
)
29742 enum rtx_code code
= GET_CODE (k
);
29743 machine_mode mode
= GET_MODE (k
);
29744 unsigned result
= (code
<< 3) ^ mode
;
29745 const char *format
;
29748 format
= GET_RTX_FORMAT (code
);
29749 flen
= strlen (format
);
29755 return result
* 1231 + (unsigned) INSN_UID (XEXP (k
, 0));
29757 case CONST_WIDE_INT
:
29760 flen
= CONST_WIDE_INT_NUNITS (k
);
29761 for (i
= 0; i
< flen
; i
++)
29762 result
= result
* 613 + CONST_WIDE_INT_ELT (k
, i
);
29767 if (mode
!= VOIDmode
)
29768 return real_hash (CONST_DOUBLE_REAL_VALUE (k
)) * result
;
29780 for (; fidx
< flen
; fidx
++)
29781 switch (format
[fidx
])
29786 const char *str
= XSTR (k
, fidx
);
29787 len
= strlen (str
);
29788 result
= result
* 613 + len
;
29789 for (i
= 0; i
< len
; i
++)
29790 result
= result
* 613 + (unsigned) str
[i
];
29795 result
= result
* 1231 + rs6000_hash_constant (XEXP (k
, fidx
));
29799 result
= result
* 613 + (unsigned) XINT (k
, fidx
);
29802 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT
))
29803 result
= result
* 613 + (unsigned) XWINT (k
, fidx
);
29807 for (i
= 0; i
< sizeof (HOST_WIDE_INT
) / sizeof (unsigned); i
++)
29808 result
= result
* 613 + (unsigned) (XWINT (k
, fidx
)
29815 gcc_unreachable ();
29822 toc_hasher::hash (toc_hash_struct
*thc
)
29824 return rs6000_hash_constant (thc
->key
) ^ thc
->key_mode
;
29827 /* Compare H1 and H2 for equivalence. */
29830 toc_hasher::equal (toc_hash_struct
*h1
, toc_hash_struct
*h2
)
29835 if (h1
->key_mode
!= h2
->key_mode
)
29838 return rtx_equal_p (r1
, r2
);
29841 /* These are the names given by the C++ front-end to vtables, and
29842 vtable-like objects. Ideally, this logic should not be here;
29843 instead, there should be some programmatic way of inquiring as
29844 to whether or not an object is a vtable. */
29846 #define VTABLE_NAME_P(NAME) \
29847 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29848 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29849 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29850 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29851 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29853 #ifdef NO_DOLLAR_IN_LABEL
29854 /* Return a GGC-allocated character string translating dollar signs in
29855 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29858 rs6000_xcoff_strip_dollar (const char *name
)
29864 q
= (const char *) strchr (name
, '$');
29866 if (q
== 0 || q
== name
)
29869 len
= strlen (name
);
29870 strip
= XALLOCAVEC (char, len
+ 1);
29871 strcpy (strip
, name
);
29872 p
= strip
+ (q
- name
);
29876 p
= strchr (p
+ 1, '$');
29879 return ggc_alloc_string (strip
, len
);
29884 rs6000_output_symbol_ref (FILE *file
, rtx x
)
29886 const char *name
= XSTR (x
, 0);
29888 /* Currently C++ toc references to vtables can be emitted before it
29889 is decided whether the vtable is public or private. If this is
29890 the case, then the linker will eventually complain that there is
29891 a reference to an unknown section. Thus, for vtables only,
29892 we emit the TOC reference to reference the identifier and not the
29894 if (VTABLE_NAME_P (name
))
29896 RS6000_OUTPUT_BASENAME (file
, name
);
29899 assemble_name (file
, name
);
29902 /* Output a TOC entry. We derive the entry name from what is being
29906 output_toc (FILE *file
, rtx x
, int labelno
, machine_mode mode
)
29909 const char *name
= buf
;
29911 HOST_WIDE_INT offset
= 0;
29913 gcc_assert (!TARGET_NO_TOC
);
29915 /* When the linker won't eliminate them, don't output duplicate
29916 TOC entries (this happens on AIX if there is any kind of TOC,
29917 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29919 if (TARGET_TOC
&& GET_CODE (x
) != LABEL_REF
)
29921 struct toc_hash_struct
*h
;
29923 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29924 time because GGC is not initialized at that point. */
29925 if (toc_hash_table
== NULL
)
29926 toc_hash_table
= hash_table
<toc_hasher
>::create_ggc (1021);
29928 h
= ggc_alloc
<toc_hash_struct
> ();
29930 h
->key_mode
= mode
;
29931 h
->labelno
= labelno
;
29933 toc_hash_struct
**found
= toc_hash_table
->find_slot (h
, INSERT
);
29934 if (*found
== NULL
)
29936 else /* This is indeed a duplicate.
29937 Set this label equal to that label. */
29939 fputs ("\t.set ", file
);
29940 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LC");
29941 fprintf (file
, "%d,", labelno
);
29942 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LC");
29943 fprintf (file
, "%d\n", ((*found
)->labelno
));
29946 if (TARGET_XCOFF
&& GET_CODE (x
) == SYMBOL_REF
29947 && (SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_GLOBAL_DYNAMIC
29948 || SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_DYNAMIC
))
29950 fputs ("\t.set ", file
);
29951 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LCM");
29952 fprintf (file
, "%d,", labelno
);
29953 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LCM");
29954 fprintf (file
, "%d\n", ((*found
)->labelno
));
29961 /* If we're going to put a double constant in the TOC, make sure it's
29962 aligned properly when strict alignment is on. */
29963 if ((CONST_DOUBLE_P (x
) || CONST_WIDE_INT_P (x
))
29964 && STRICT_ALIGNMENT
29965 && GET_MODE_BITSIZE (mode
) >= 64
29966 && ! (TARGET_NO_FP_IN_TOC
&& ! TARGET_MINIMAL_TOC
)) {
29967 ASM_OUTPUT_ALIGN (file
, 3);
29970 (*targetm
.asm_out
.internal_label
) (file
, "LC", labelno
);
29972 /* Handle FP constants specially. Note that if we have a minimal
29973 TOC, things we put here aren't actually in the TOC, so we can allow
29975 if (GET_CODE (x
) == CONST_DOUBLE
&&
29976 (GET_MODE (x
) == TFmode
|| GET_MODE (x
) == TDmode
29977 || GET_MODE (x
) == IFmode
|| GET_MODE (x
) == KFmode
))
29981 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
29982 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x
), k
);
29984 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x
), k
);
29988 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29989 fputs (DOUBLE_INT_ASM_OP
, file
);
29991 fprintf (file
, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29992 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
29993 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
29994 fprintf (file
, "0x%lx%08lx,0x%lx%08lx\n",
29995 k
[WORDS_BIG_ENDIAN
? 0 : 1] & 0xffffffff,
29996 k
[WORDS_BIG_ENDIAN
? 1 : 0] & 0xffffffff,
29997 k
[WORDS_BIG_ENDIAN
? 2 : 3] & 0xffffffff,
29998 k
[WORDS_BIG_ENDIAN
? 3 : 2] & 0xffffffff);
30003 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
30004 fputs ("\t.long ", file
);
30006 fprintf (file
, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
30007 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
30008 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
30009 fprintf (file
, "0x%lx,0x%lx,0x%lx,0x%lx\n",
30010 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
30011 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
30015 else if (GET_CODE (x
) == CONST_DOUBLE
&&
30016 (GET_MODE (x
) == DFmode
|| GET_MODE (x
) == DDmode
))
30020 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
30021 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x
), k
);
30023 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x
), k
);
30027 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
30028 fputs (DOUBLE_INT_ASM_OP
, file
);
30030 fprintf (file
, "\t.tc FD_%lx_%lx[TC],",
30031 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
30032 fprintf (file
, "0x%lx%08lx\n",
30033 k
[WORDS_BIG_ENDIAN
? 0 : 1] & 0xffffffff,
30034 k
[WORDS_BIG_ENDIAN
? 1 : 0] & 0xffffffff);
30039 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
30040 fputs ("\t.long ", file
);
30042 fprintf (file
, "\t.tc FD_%lx_%lx[TC],",
30043 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
30044 fprintf (file
, "0x%lx,0x%lx\n",
30045 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
30049 else if (GET_CODE (x
) == CONST_DOUBLE
&&
30050 (GET_MODE (x
) == SFmode
|| GET_MODE (x
) == SDmode
))
30054 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
30055 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x
), l
);
30057 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x
), l
);
30061 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
30062 fputs (DOUBLE_INT_ASM_OP
, file
);
30064 fprintf (file
, "\t.tc FS_%lx[TC],", l
& 0xffffffff);
30065 if (WORDS_BIG_ENDIAN
)
30066 fprintf (file
, "0x%lx00000000\n", l
& 0xffffffff);
30068 fprintf (file
, "0x%lx\n", l
& 0xffffffff);
30073 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
30074 fputs ("\t.long ", file
);
30076 fprintf (file
, "\t.tc FS_%lx[TC],", l
& 0xffffffff);
30077 fprintf (file
, "0x%lx\n", l
& 0xffffffff);
30081 else if (GET_MODE (x
) == VOIDmode
&& GET_CODE (x
) == CONST_INT
)
30083 unsigned HOST_WIDE_INT low
;
30084 HOST_WIDE_INT high
;
30086 low
= INTVAL (x
) & 0xffffffff;
30087 high
= (HOST_WIDE_INT
) INTVAL (x
) >> 32;
30089 /* TOC entries are always Pmode-sized, so when big-endian
30090 smaller integer constants in the TOC need to be padded.
30091 (This is still a win over putting the constants in
30092 a separate constant pool, because then we'd have
30093 to have both a TOC entry _and_ the actual constant.)
30095 For a 32-bit target, CONST_INT values are loaded and shifted
30096 entirely within `low' and can be stored in one TOC entry. */
30098 /* It would be easy to make this work, but it doesn't now. */
30099 gcc_assert (!TARGET_64BIT
|| POINTER_SIZE
>= GET_MODE_BITSIZE (mode
));
30101 if (WORDS_BIG_ENDIAN
&& POINTER_SIZE
> GET_MODE_BITSIZE (mode
))
30104 low
<<= POINTER_SIZE
- GET_MODE_BITSIZE (mode
);
30105 high
= (HOST_WIDE_INT
) low
>> 32;
30111 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
30112 fputs (DOUBLE_INT_ASM_OP
, file
);
30114 fprintf (file
, "\t.tc ID_%lx_%lx[TC],",
30115 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
30116 fprintf (file
, "0x%lx%08lx\n",
30117 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
30122 if (POINTER_SIZE
< GET_MODE_BITSIZE (mode
))
30124 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
30125 fputs ("\t.long ", file
);
30127 fprintf (file
, "\t.tc ID_%lx_%lx[TC],",
30128 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
30129 fprintf (file
, "0x%lx,0x%lx\n",
30130 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
30134 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
30135 fputs ("\t.long ", file
);
30137 fprintf (file
, "\t.tc IS_%lx[TC],", (long) low
& 0xffffffff);
30138 fprintf (file
, "0x%lx\n", (long) low
& 0xffffffff);
30144 if (GET_CODE (x
) == CONST
)
30146 gcc_assert (GET_CODE (XEXP (x
, 0)) == PLUS
30147 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
);
30149 base
= XEXP (XEXP (x
, 0), 0);
30150 offset
= INTVAL (XEXP (XEXP (x
, 0), 1));
30153 switch (GET_CODE (base
))
30156 name
= XSTR (base
, 0);
30160 ASM_GENERATE_INTERNAL_LABEL (buf
, "L",
30161 CODE_LABEL_NUMBER (XEXP (base
, 0)));
30165 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (base
));
30169 gcc_unreachable ();
30172 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
30173 fputs (TARGET_32BIT
? "\t.long " : DOUBLE_INT_ASM_OP
, file
);
30176 fputs ("\t.tc ", file
);
30177 RS6000_OUTPUT_BASENAME (file
, name
);
30180 fprintf (file
, ".N" HOST_WIDE_INT_PRINT_UNSIGNED
, - offset
);
30182 fprintf (file
, ".P" HOST_WIDE_INT_PRINT_UNSIGNED
, offset
);
30184 /* Mark large TOC symbols on AIX with [TE] so they are mapped
30185 after other TOC symbols, reducing overflow of small TOC access
30186 to [TC] symbols. */
30187 fputs (TARGET_XCOFF
&& TARGET_CMODEL
!= CMODEL_SMALL
30188 ? "[TE]," : "[TC],", file
);
30191 /* Currently C++ toc references to vtables can be emitted before it
30192 is decided whether the vtable is public or private. If this is
30193 the case, then the linker will eventually complain that there is
30194 a TOC reference to an unknown section. Thus, for vtables only,
30195 we emit the TOC reference to reference the symbol and not the
30197 if (VTABLE_NAME_P (name
))
30199 RS6000_OUTPUT_BASENAME (file
, name
);
30201 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, offset
);
30202 else if (offset
> 0)
30203 fprintf (file
, "+" HOST_WIDE_INT_PRINT_DEC
, offset
);
30206 output_addr_const (file
, x
);
30209 if (TARGET_XCOFF
&& GET_CODE (base
) == SYMBOL_REF
)
30211 switch (SYMBOL_REF_TLS_MODEL (base
))
30215 case TLS_MODEL_LOCAL_EXEC
:
30216 fputs ("@le", file
);
30218 case TLS_MODEL_INITIAL_EXEC
:
30219 fputs ("@ie", file
);
30221 /* Use global-dynamic for local-dynamic. */
30222 case TLS_MODEL_GLOBAL_DYNAMIC
:
30223 case TLS_MODEL_LOCAL_DYNAMIC
:
30225 (*targetm
.asm_out
.internal_label
) (file
, "LCM", labelno
);
30226 fputs ("\t.tc .", file
);
30227 RS6000_OUTPUT_BASENAME (file
, name
);
30228 fputs ("[TC],", file
);
30229 output_addr_const (file
, x
);
30230 fputs ("@m", file
);
30233 gcc_unreachable ();
30241 /* Output an assembler pseudo-op to write an ASCII string of N characters
30242 starting at P to FILE.
30244 On the RS/6000, we have to do this using the .byte operation and
30245 write out special characters outside the quoted string.
30246 Also, the assembler is broken; very long strings are truncated,
30247 so we must artificially break them up early. */
30250 output_ascii (FILE *file
, const char *p
, int n
)
30253 int i
, count_string
;
30254 const char *for_string
= "\t.byte \"";
30255 const char *for_decimal
= "\t.byte ";
30256 const char *to_close
= NULL
;
30259 for (i
= 0; i
< n
; i
++)
30262 if (c
>= ' ' && c
< 0177)
30265 fputs (for_string
, file
);
30268 /* Write two quotes to get one. */
30276 for_decimal
= "\"\n\t.byte ";
30280 if (count_string
>= 512)
30282 fputs (to_close
, file
);
30284 for_string
= "\t.byte \"";
30285 for_decimal
= "\t.byte ";
30293 fputs (for_decimal
, file
);
30294 fprintf (file
, "%d", c
);
30296 for_string
= "\n\t.byte \"";
30297 for_decimal
= ", ";
30303 /* Now close the string if we have written one. Then end the line. */
30305 fputs (to_close
, file
);
30308 /* Generate a unique section name for FILENAME for a section type
30309 represented by SECTION_DESC. Output goes into BUF.
30311 SECTION_DESC can be any string, as long as it is different for each
30312 possible section type.
30314 We name the section in the same manner as xlc. The name begins with an
30315 underscore followed by the filename (after stripping any leading directory
30316 names) with the last period replaced by the string SECTION_DESC. If
30317 FILENAME does not contain a period, SECTION_DESC is appended to the end of
30321 rs6000_gen_section_name (char **buf
, const char *filename
,
30322 const char *section_desc
)
30324 const char *q
, *after_last_slash
, *last_period
= 0;
30328 after_last_slash
= filename
;
30329 for (q
= filename
; *q
; q
++)
30332 after_last_slash
= q
+ 1;
30333 else if (*q
== '.')
30337 len
= strlen (after_last_slash
) + strlen (section_desc
) + 2;
30338 *buf
= (char *) xmalloc (len
);
30343 for (q
= after_last_slash
; *q
; q
++)
30345 if (q
== last_period
)
30347 strcpy (p
, section_desc
);
30348 p
+= strlen (section_desc
);
30352 else if (ISALNUM (*q
))
30356 if (last_period
== 0)
30357 strcpy (p
, section_desc
);
30362 /* Emit profile function. */
30365 output_profile_hook (int labelno ATTRIBUTE_UNUSED
)
30367 /* Non-standard profiling for kernels, which just saves LR then calls
30368 _mcount without worrying about arg saves. The idea is to change
30369 the function prologue as little as possible as it isn't easy to
30370 account for arg save/restore code added just for _mcount. */
30371 if (TARGET_PROFILE_KERNEL
)
30374 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
30376 #ifndef NO_PROFILE_COUNTERS
30377 # define NO_PROFILE_COUNTERS 0
30379 if (NO_PROFILE_COUNTERS
)
30380 emit_library_call (init_one_libfunc (RS6000_MCOUNT
),
30381 LCT_NORMAL
, VOIDmode
);
30385 const char *label_name
;
30388 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
30389 label_name
= ggc_strdup ((*targetm
.strip_name_encoding
) (buf
));
30390 fun
= gen_rtx_SYMBOL_REF (Pmode
, label_name
);
30392 emit_library_call (init_one_libfunc (RS6000_MCOUNT
),
30393 LCT_NORMAL
, VOIDmode
, fun
, Pmode
);
30396 else if (DEFAULT_ABI
== ABI_DARWIN
)
30398 const char *mcount_name
= RS6000_MCOUNT
;
30399 int caller_addr_regno
= LR_REGNO
;
30401 /* Be conservative and always set this, at least for now. */
30402 crtl
->uses_pic_offset_table
= 1;
30405 /* For PIC code, set up a stub and collect the caller's address
30406 from r0, which is where the prologue puts it. */
30407 if (MACHOPIC_INDIRECT
30408 && crtl
->uses_pic_offset_table
)
30409 caller_addr_regno
= 0;
30411 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, mcount_name
),
30412 LCT_NORMAL
, VOIDmode
,
30413 gen_rtx_REG (Pmode
, caller_addr_regno
), Pmode
);
30417 /* Write function profiler code. */
30420 output_function_profiler (FILE *file
, int labelno
)
30424 switch (DEFAULT_ABI
)
30427 gcc_unreachable ();
30432 warning (0, "no profiling of 64-bit code for this ABI");
30435 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
30436 fprintf (file
, "\tmflr %s\n", reg_names
[0]);
30437 if (NO_PROFILE_COUNTERS
)
30439 asm_fprintf (file
, "\tstw %s,4(%s)\n",
30440 reg_names
[0], reg_names
[1]);
30442 else if (TARGET_SECURE_PLT
&& flag_pic
)
30444 if (TARGET_LINK_STACK
)
30447 get_ppc476_thunk_name (name
);
30448 asm_fprintf (file
, "\tbl %s\n", name
);
30451 asm_fprintf (file
, "\tbcl 20,31,1f\n1:\n");
30452 asm_fprintf (file
, "\tstw %s,4(%s)\n",
30453 reg_names
[0], reg_names
[1]);
30454 asm_fprintf (file
, "\tmflr %s\n", reg_names
[12]);
30455 asm_fprintf (file
, "\taddis %s,%s,",
30456 reg_names
[12], reg_names
[12]);
30457 assemble_name (file
, buf
);
30458 asm_fprintf (file
, "-1b@ha\n\tla %s,", reg_names
[0]);
30459 assemble_name (file
, buf
);
30460 asm_fprintf (file
, "-1b@l(%s)\n", reg_names
[12]);
30462 else if (flag_pic
== 1)
30464 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file
);
30465 asm_fprintf (file
, "\tstw %s,4(%s)\n",
30466 reg_names
[0], reg_names
[1]);
30467 asm_fprintf (file
, "\tmflr %s\n", reg_names
[12]);
30468 asm_fprintf (file
, "\tlwz %s,", reg_names
[0]);
30469 assemble_name (file
, buf
);
30470 asm_fprintf (file
, "@got(%s)\n", reg_names
[12]);
30472 else if (flag_pic
> 1)
30474 asm_fprintf (file
, "\tstw %s,4(%s)\n",
30475 reg_names
[0], reg_names
[1]);
30476 /* Now, we need to get the address of the label. */
30477 if (TARGET_LINK_STACK
)
30480 get_ppc476_thunk_name (name
);
30481 asm_fprintf (file
, "\tbl %s\n\tb 1f\n\t.long ", name
);
30482 assemble_name (file
, buf
);
30483 fputs ("-.\n1:", file
);
30484 asm_fprintf (file
, "\tmflr %s\n", reg_names
[11]);
30485 asm_fprintf (file
, "\taddi %s,%s,4\n",
30486 reg_names
[11], reg_names
[11]);
30490 fputs ("\tbcl 20,31,1f\n\t.long ", file
);
30491 assemble_name (file
, buf
);
30492 fputs ("-.\n1:", file
);
30493 asm_fprintf (file
, "\tmflr %s\n", reg_names
[11]);
30495 asm_fprintf (file
, "\tlwz %s,0(%s)\n",
30496 reg_names
[0], reg_names
[11]);
30497 asm_fprintf (file
, "\tadd %s,%s,%s\n",
30498 reg_names
[0], reg_names
[0], reg_names
[11]);
30502 asm_fprintf (file
, "\tlis %s,", reg_names
[12]);
30503 assemble_name (file
, buf
);
30504 fputs ("@ha\n", file
);
30505 asm_fprintf (file
, "\tstw %s,4(%s)\n",
30506 reg_names
[0], reg_names
[1]);
30507 asm_fprintf (file
, "\tla %s,", reg_names
[0]);
30508 assemble_name (file
, buf
);
30509 asm_fprintf (file
, "@l(%s)\n", reg_names
[12]);
30512 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
30513 fprintf (file
, "\tbl %s%s\n",
30514 RS6000_MCOUNT
, flag_pic
? "@plt" : "");
30520 /* Don't do anything, done in output_profile_hook (). */
30527 /* The following variable value is the last issued insn. */
30529 static rtx_insn
*last_scheduled_insn
;
30531 /* The following variable helps to balance issuing of load and
30532 store instructions */
30534 static int load_store_pendulum
;
30536 /* The following variable helps pair divide insns during scheduling. */
30537 static int divide_cnt
;
30538 /* The following variable helps pair and alternate vector and vector load
30539 insns during scheduling. */
30540 static int vec_pairing
;
30543 /* Power4 load update and store update instructions are cracked into a
30544 load or store and an integer insn which are executed in the same cycle.
30545 Branches have their own dispatch slot which does not count against the
30546 GCC issue rate, but it changes the program flow so there are no other
30547 instructions to issue in this cycle. */
30550 rs6000_variable_issue_1 (rtx_insn
*insn
, int more
)
30552 last_scheduled_insn
= insn
;
30553 if (GET_CODE (PATTERN (insn
)) == USE
30554 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
30556 cached_can_issue_more
= more
;
30557 return cached_can_issue_more
;
30560 if (insn_terminates_group_p (insn
, current_group
))
30562 cached_can_issue_more
= 0;
30563 return cached_can_issue_more
;
30566 /* If no reservation, but reach here */
30567 if (recog_memoized (insn
) < 0)
30570 if (rs6000_sched_groups
)
30572 if (is_microcoded_insn (insn
))
30573 cached_can_issue_more
= 0;
30574 else if (is_cracked_insn (insn
))
30575 cached_can_issue_more
= more
> 2 ? more
- 2 : 0;
30577 cached_can_issue_more
= more
- 1;
30579 return cached_can_issue_more
;
30582 if (rs6000_tune
== PROCESSOR_CELL
&& is_nonpipeline_insn (insn
))
30585 cached_can_issue_more
= more
- 1;
30586 return cached_can_issue_more
;
30590 rs6000_variable_issue (FILE *stream
, int verbose
, rtx_insn
*insn
, int more
)
30592 int r
= rs6000_variable_issue_1 (insn
, more
);
30594 fprintf (stream
, "// rs6000_variable_issue (more = %d) = %d\n", more
, r
);
30598 /* Adjust the cost of a scheduling dependency. Return the new cost of
30599 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30602 rs6000_adjust_cost (rtx_insn
*insn
, int dep_type
, rtx_insn
*dep_insn
, int cost
,
30605 enum attr_type attr_type
;
30607 if (recog_memoized (insn
) < 0 || recog_memoized (dep_insn
) < 0)
30614 /* Data dependency; DEP_INSN writes a register that INSN reads
30615 some cycles later. */
30617 /* Separate a load from a narrower, dependent store. */
30618 if ((rs6000_sched_groups
|| rs6000_tune
== PROCESSOR_POWER9
)
30619 && GET_CODE (PATTERN (insn
)) == SET
30620 && GET_CODE (PATTERN (dep_insn
)) == SET
30621 && GET_CODE (XEXP (PATTERN (insn
), 1)) == MEM
30622 && GET_CODE (XEXP (PATTERN (dep_insn
), 0)) == MEM
30623 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn
), 1)))
30624 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn
), 0)))))
30627 attr_type
= get_attr_type (insn
);
30632 /* Tell the first scheduling pass about the latency between
30633 a mtctr and bctr (and mtlr and br/blr). The first
30634 scheduling pass will not know about this latency since
30635 the mtctr instruction, which has the latency associated
30636 to it, will be generated by reload. */
30639 /* Leave some extra cycles between a compare and its
30640 dependent branch, to inhibit expensive mispredicts. */
30641 if ((rs6000_tune
== PROCESSOR_PPC603
30642 || rs6000_tune
== PROCESSOR_PPC604
30643 || rs6000_tune
== PROCESSOR_PPC604e
30644 || rs6000_tune
== PROCESSOR_PPC620
30645 || rs6000_tune
== PROCESSOR_PPC630
30646 || rs6000_tune
== PROCESSOR_PPC750
30647 || rs6000_tune
== PROCESSOR_PPC7400
30648 || rs6000_tune
== PROCESSOR_PPC7450
30649 || rs6000_tune
== PROCESSOR_PPCE5500
30650 || rs6000_tune
== PROCESSOR_PPCE6500
30651 || rs6000_tune
== PROCESSOR_POWER4
30652 || rs6000_tune
== PROCESSOR_POWER5
30653 || rs6000_tune
== PROCESSOR_POWER7
30654 || rs6000_tune
== PROCESSOR_POWER8
30655 || rs6000_tune
== PROCESSOR_POWER9
30656 || rs6000_tune
== PROCESSOR_CELL
)
30657 && recog_memoized (dep_insn
)
30658 && (INSN_CODE (dep_insn
) >= 0))
30660 switch (get_attr_type (dep_insn
))
30663 case TYPE_FPCOMPARE
:
30664 case TYPE_CR_LOGICAL
:
30665 case TYPE_DELAYED_CR
:
30669 if (get_attr_dot (dep_insn
) == DOT_YES
)
30674 if (get_attr_dot (dep_insn
) == DOT_YES
30675 && get_attr_var_shift (dep_insn
) == VAR_SHIFT_NO
)
30686 if ((rs6000_tune
== PROCESSOR_POWER6
)
30687 && recog_memoized (dep_insn
)
30688 && (INSN_CODE (dep_insn
) >= 0))
30691 if (GET_CODE (PATTERN (insn
)) != SET
)
30692 /* If this happens, we have to extend this to schedule
30693 optimally. Return default for now. */
30696 /* Adjust the cost for the case where the value written
30697 by a fixed point operation is used as the address
30698 gen value on a store. */
30699 switch (get_attr_type (dep_insn
))
30704 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30705 return get_attr_sign_extend (dep_insn
)
30706 == SIGN_EXTEND_YES
? 6 : 4;
30711 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30712 return get_attr_var_shift (dep_insn
) == VAR_SHIFT_YES
?
30722 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30730 if (get_attr_update (dep_insn
) == UPDATE_YES
30731 && ! rs6000_store_data_bypass_p (dep_insn
, insn
))
30737 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30743 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30744 return get_attr_size (dep_insn
) == SIZE_32
? 45 : 57;
30754 if ((rs6000_tune
== PROCESSOR_POWER6
)
30755 && recog_memoized (dep_insn
)
30756 && (INSN_CODE (dep_insn
) >= 0))
30759 /* Adjust the cost for the case where the value written
30760 by a fixed point instruction is used within the address
30761 gen portion of a subsequent load(u)(x) */
30762 switch (get_attr_type (dep_insn
))
30767 if (set_to_load_agen (dep_insn
, insn
))
30768 return get_attr_sign_extend (dep_insn
)
30769 == SIGN_EXTEND_YES
? 6 : 4;
30774 if (set_to_load_agen (dep_insn
, insn
))
30775 return get_attr_var_shift (dep_insn
) == VAR_SHIFT_YES
?
30785 if (set_to_load_agen (dep_insn
, insn
))
30793 if (get_attr_update (dep_insn
) == UPDATE_YES
30794 && set_to_load_agen (dep_insn
, insn
))
30800 if (set_to_load_agen (dep_insn
, insn
))
30806 if (set_to_load_agen (dep_insn
, insn
))
30807 return get_attr_size (dep_insn
) == SIZE_32
? 45 : 57;
30817 if ((rs6000_tune
== PROCESSOR_POWER6
)
30818 && get_attr_update (insn
) == UPDATE_NO
30819 && recog_memoized (dep_insn
)
30820 && (INSN_CODE (dep_insn
) >= 0)
30821 && (get_attr_type (dep_insn
) == TYPE_MFFGPR
))
30828 /* Fall out to return default cost. */
30832 case REG_DEP_OUTPUT
:
30833 /* Output dependency; DEP_INSN writes a register that INSN writes some
30835 if ((rs6000_tune
== PROCESSOR_POWER6
)
30836 && recog_memoized (dep_insn
)
30837 && (INSN_CODE (dep_insn
) >= 0))
30839 attr_type
= get_attr_type (insn
);
30844 case TYPE_FPSIMPLE
:
30845 if (get_attr_type (dep_insn
) == TYPE_FP
30846 || get_attr_type (dep_insn
) == TYPE_FPSIMPLE
)
30850 if (get_attr_update (insn
) == UPDATE_NO
30851 && get_attr_type (dep_insn
) == TYPE_MFFGPR
)
30858 /* Fall through, no cost for output dependency. */
30862 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30867 gcc_unreachable ();
30873 /* Debug version of rs6000_adjust_cost. */
30876 rs6000_debug_adjust_cost (rtx_insn
*insn
, int dep_type
, rtx_insn
*dep_insn
,
30877 int cost
, unsigned int dw
)
30879 int ret
= rs6000_adjust_cost (insn
, dep_type
, dep_insn
, cost
, dw
);
30887 default: dep
= "unknown depencency"; break;
30888 case REG_DEP_TRUE
: dep
= "data dependency"; break;
30889 case REG_DEP_OUTPUT
: dep
= "output dependency"; break;
30890 case REG_DEP_ANTI
: dep
= "anti depencency"; break;
30894 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30895 "%s, insn:\n", ret
, cost
, dep
);
30903 /* The function returns a true if INSN is microcoded.
30904 Return false otherwise. */
30907 is_microcoded_insn (rtx_insn
*insn
)
30909 if (!insn
|| !NONDEBUG_INSN_P (insn
)
30910 || GET_CODE (PATTERN (insn
)) == USE
30911 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
30914 if (rs6000_tune
== PROCESSOR_CELL
)
30915 return get_attr_cell_micro (insn
) == CELL_MICRO_ALWAYS
;
30917 if (rs6000_sched_groups
30918 && (rs6000_tune
== PROCESSOR_POWER4
|| rs6000_tune
== PROCESSOR_POWER5
))
30920 enum attr_type type
= get_attr_type (insn
);
30921 if ((type
== TYPE_LOAD
30922 && get_attr_update (insn
) == UPDATE_YES
30923 && get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
)
30924 || ((type
== TYPE_LOAD
|| type
== TYPE_STORE
)
30925 && get_attr_update (insn
) == UPDATE_YES
30926 && get_attr_indexed (insn
) == INDEXED_YES
)
30927 || type
== TYPE_MFCR
)
30934 /* The function returns true if INSN is cracked into 2 instructions
30935 by the processor (and therefore occupies 2 issue slots). */
30938 is_cracked_insn (rtx_insn
*insn
)
30940 if (!insn
|| !NONDEBUG_INSN_P (insn
)
30941 || GET_CODE (PATTERN (insn
)) == USE
30942 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
30945 if (rs6000_sched_groups
30946 && (rs6000_tune
== PROCESSOR_POWER4
|| rs6000_tune
== PROCESSOR_POWER5
))
30948 enum attr_type type
= get_attr_type (insn
);
30949 if ((type
== TYPE_LOAD
30950 && get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
30951 && get_attr_update (insn
) == UPDATE_NO
)
30952 || (type
== TYPE_LOAD
30953 && get_attr_sign_extend (insn
) == SIGN_EXTEND_NO
30954 && get_attr_update (insn
) == UPDATE_YES
30955 && get_attr_indexed (insn
) == INDEXED_NO
)
30956 || (type
== TYPE_STORE
30957 && get_attr_update (insn
) == UPDATE_YES
30958 && get_attr_indexed (insn
) == INDEXED_NO
)
30959 || ((type
== TYPE_FPLOAD
|| type
== TYPE_FPSTORE
)
30960 && get_attr_update (insn
) == UPDATE_YES
)
30961 || type
== TYPE_DELAYED_CR
30962 || (type
== TYPE_EXTS
30963 && get_attr_dot (insn
) == DOT_YES
)
30964 || (type
== TYPE_SHIFT
30965 && get_attr_dot (insn
) == DOT_YES
30966 && get_attr_var_shift (insn
) == VAR_SHIFT_NO
)
30967 || (type
== TYPE_MUL
30968 && get_attr_dot (insn
) == DOT_YES
)
30969 || type
== TYPE_DIV
30970 || (type
== TYPE_INSERT
30971 && get_attr_size (insn
) == SIZE_32
))
30978 /* The function returns true if INSN can be issued only from
30979 the branch slot. */
30982 is_branch_slot_insn (rtx_insn
*insn
)
30984 if (!insn
|| !NONDEBUG_INSN_P (insn
)
30985 || GET_CODE (PATTERN (insn
)) == USE
30986 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
30989 if (rs6000_sched_groups
)
30991 enum attr_type type
= get_attr_type (insn
);
30992 if (type
== TYPE_BRANCH
|| type
== TYPE_JMPREG
)
31000 /* The function returns true if out_inst sets a value that is
31001 used in the address generation computation of in_insn */
31003 set_to_load_agen (rtx_insn
*out_insn
, rtx_insn
*in_insn
)
31005 rtx out_set
, in_set
;
31007 /* For performance reasons, only handle the simple case where
31008 both loads are a single_set. */
31009 out_set
= single_set (out_insn
);
31012 in_set
= single_set (in_insn
);
31014 return reg_mentioned_p (SET_DEST (out_set
), SET_SRC (in_set
));
31020 /* Try to determine base/offset/size parts of the given MEM.
31021 Return true if successful, false if all the values couldn't
31024 This function only looks for REG or REG+CONST address forms.
31025 REG+REG address form will return false. */
31028 get_memref_parts (rtx mem
, rtx
*base
, HOST_WIDE_INT
*offset
,
31029 HOST_WIDE_INT
*size
)
31032 if MEM_SIZE_KNOWN_P (mem
)
31033 *size
= MEM_SIZE (mem
);
31037 addr_rtx
= (XEXP (mem
, 0));
31038 if (GET_CODE (addr_rtx
) == PRE_MODIFY
)
31039 addr_rtx
= XEXP (addr_rtx
, 1);
31042 while (GET_CODE (addr_rtx
) == PLUS
31043 && CONST_INT_P (XEXP (addr_rtx
, 1)))
31045 *offset
+= INTVAL (XEXP (addr_rtx
, 1));
31046 addr_rtx
= XEXP (addr_rtx
, 0);
31048 if (!REG_P (addr_rtx
))
31055 /* The function returns true if the target storage location of
31056 mem1 is adjacent to the target storage location of mem2 */
31057 /* Return 1 if memory locations are adjacent. */
31060 adjacent_mem_locations (rtx mem1
, rtx mem2
)
31063 HOST_WIDE_INT off1
, size1
, off2
, size2
;
31065 if (get_memref_parts (mem1
, ®1
, &off1
, &size1
)
31066 && get_memref_parts (mem2
, ®2
, &off2
, &size2
))
31067 return ((REGNO (reg1
) == REGNO (reg2
))
31068 && ((off1
+ size1
== off2
)
31069 || (off2
+ size2
== off1
)));
31074 /* This function returns true if it can be determined that the two MEM
31075 locations overlap by at least 1 byte based on base reg/offset/size. */
31078 mem_locations_overlap (rtx mem1
, rtx mem2
)
31081 HOST_WIDE_INT off1
, size1
, off2
, size2
;
31083 if (get_memref_parts (mem1
, ®1
, &off1
, &size1
)
31084 && get_memref_parts (mem2
, ®2
, &off2
, &size2
))
31085 return ((REGNO (reg1
) == REGNO (reg2
))
31086 && (((off1
<= off2
) && (off1
+ size1
> off2
))
31087 || ((off2
<= off1
) && (off2
+ size2
> off1
))));
31092 /* A C statement (sans semicolon) to update the integer scheduling
31093 priority INSN_PRIORITY (INSN). Increase the priority to execute the
31094 INSN earlier, reduce the priority to execute INSN later. Do not
31095 define this macro if you do not need to adjust the scheduling
31096 priorities of insns. */
31099 rs6000_adjust_priority (rtx_insn
*insn ATTRIBUTE_UNUSED
, int priority
)
31101 rtx load_mem
, str_mem
;
31102 /* On machines (like the 750) which have asymmetric integer units,
31103 where one integer unit can do multiply and divides and the other
31104 can't, reduce the priority of multiply/divide so it is scheduled
31105 before other integer operations. */
31108 if (! INSN_P (insn
))
31111 if (GET_CODE (PATTERN (insn
)) == USE
)
31114 switch (rs6000_tune
) {
31115 case PROCESSOR_PPC750
:
31116 switch (get_attr_type (insn
))
31123 fprintf (stderr
, "priority was %#x (%d) before adjustment\n",
31124 priority
, priority
);
31125 if (priority
>= 0 && priority
< 0x01000000)
31132 if (insn_must_be_first_in_group (insn
)
31133 && reload_completed
31134 && current_sched_info
->sched_max_insns_priority
31135 && rs6000_sched_restricted_insns_priority
)
31138 /* Prioritize insns that can be dispatched only in the first
31140 if (rs6000_sched_restricted_insns_priority
== 1)
31141 /* Attach highest priority to insn. This means that in
31142 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
31143 precede 'priority' (critical path) considerations. */
31144 return current_sched_info
->sched_max_insns_priority
;
31145 else if (rs6000_sched_restricted_insns_priority
== 2)
31146 /* Increase priority of insn by a minimal amount. This means that in
31147 haifa-sched.c:ready_sort(), only 'priority' (critical path)
31148 considerations precede dispatch-slot restriction considerations. */
31149 return (priority
+ 1);
31152 if (rs6000_tune
== PROCESSOR_POWER6
31153 && ((load_store_pendulum
== -2 && is_load_insn (insn
, &load_mem
))
31154 || (load_store_pendulum
== 2 && is_store_insn (insn
, &str_mem
))))
31155 /* Attach highest priority to insn if the scheduler has just issued two
31156 stores and this instruction is a load, or two loads and this instruction
31157 is a store. Power6 wants loads and stores scheduled alternately
31159 return current_sched_info
->sched_max_insns_priority
;
31164 /* Return true if the instruction is nonpipelined on the Cell. */
31166 is_nonpipeline_insn (rtx_insn
*insn
)
31168 enum attr_type type
;
31169 if (!insn
|| !NONDEBUG_INSN_P (insn
)
31170 || GET_CODE (PATTERN (insn
)) == USE
31171 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
31174 type
= get_attr_type (insn
);
31175 if (type
== TYPE_MUL
31176 || type
== TYPE_DIV
31177 || type
== TYPE_SDIV
31178 || type
== TYPE_DDIV
31179 || type
== TYPE_SSQRT
31180 || type
== TYPE_DSQRT
31181 || type
== TYPE_MFCR
31182 || type
== TYPE_MFCRF
31183 || type
== TYPE_MFJMPR
)
31191 /* Return how many instructions the machine can issue per cycle. */
31194 rs6000_issue_rate (void)
31196 /* Unless scheduling for register pressure, use issue rate of 1 for
31197 first scheduling pass to decrease degradation. */
31198 if (!reload_completed
&& !flag_sched_pressure
)
31201 switch (rs6000_tune
) {
31202 case PROCESSOR_RS64A
:
31203 case PROCESSOR_PPC601
: /* ? */
31204 case PROCESSOR_PPC7450
:
31206 case PROCESSOR_PPC440
:
31207 case PROCESSOR_PPC603
:
31208 case PROCESSOR_PPC750
:
31209 case PROCESSOR_PPC7400
:
31210 case PROCESSOR_PPC8540
:
31211 case PROCESSOR_PPC8548
:
31212 case PROCESSOR_CELL
:
31213 case PROCESSOR_PPCE300C2
:
31214 case PROCESSOR_PPCE300C3
:
31215 case PROCESSOR_PPCE500MC
:
31216 case PROCESSOR_PPCE500MC64
:
31217 case PROCESSOR_PPCE5500
:
31218 case PROCESSOR_PPCE6500
:
31219 case PROCESSOR_TITAN
:
31221 case PROCESSOR_PPC476
:
31222 case PROCESSOR_PPC604
:
31223 case PROCESSOR_PPC604e
:
31224 case PROCESSOR_PPC620
:
31225 case PROCESSOR_PPC630
:
31227 case PROCESSOR_POWER4
:
31228 case PROCESSOR_POWER5
:
31229 case PROCESSOR_POWER6
:
31230 case PROCESSOR_POWER7
:
31232 case PROCESSOR_POWER8
:
31234 case PROCESSOR_POWER9
:
31241 /* Return how many instructions to look ahead for better insn
31245 rs6000_use_sched_lookahead (void)
31247 switch (rs6000_tune
)
31249 case PROCESSOR_PPC8540
:
31250 case PROCESSOR_PPC8548
:
31253 case PROCESSOR_CELL
:
31254 return (reload_completed
? 8 : 0);
31261 /* We are choosing insn from the ready queue. Return zero if INSN can be
31264 rs6000_use_sched_lookahead_guard (rtx_insn
*insn
, int ready_index
)
31266 if (ready_index
== 0)
31269 if (rs6000_tune
!= PROCESSOR_CELL
)
31272 gcc_assert (insn
!= NULL_RTX
&& INSN_P (insn
));
31274 if (!reload_completed
31275 || is_nonpipeline_insn (insn
)
31276 || is_microcoded_insn (insn
))
31282 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
31283 and return true. */
31286 find_mem_ref (rtx pat
, rtx
*mem_ref
)
31291 /* stack_tie does not produce any real memory traffic. */
31292 if (tie_operand (pat
, VOIDmode
))
31295 if (GET_CODE (pat
) == MEM
)
31301 /* Recursively process the pattern. */
31302 fmt
= GET_RTX_FORMAT (GET_CODE (pat
));
31304 for (i
= GET_RTX_LENGTH (GET_CODE (pat
)) - 1; i
>= 0; i
--)
31308 if (find_mem_ref (XEXP (pat
, i
), mem_ref
))
31311 else if (fmt
[i
] == 'E')
31312 for (j
= XVECLEN (pat
, i
) - 1; j
>= 0; j
--)
31314 if (find_mem_ref (XVECEXP (pat
, i
, j
), mem_ref
))
31322 /* Determine if PAT is a PATTERN of a load insn. */
31325 is_load_insn1 (rtx pat
, rtx
*load_mem
)
31327 if (!pat
|| pat
== NULL_RTX
)
31330 if (GET_CODE (pat
) == SET
)
31331 return find_mem_ref (SET_SRC (pat
), load_mem
);
31333 if (GET_CODE (pat
) == PARALLEL
)
31337 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
31338 if (is_load_insn1 (XVECEXP (pat
, 0, i
), load_mem
))
31345 /* Determine if INSN loads from memory. */
31348 is_load_insn (rtx insn
, rtx
*load_mem
)
31350 if (!insn
|| !INSN_P (insn
))
31356 return is_load_insn1 (PATTERN (insn
), load_mem
);
31359 /* Determine if PAT is a PATTERN of a store insn. */
31362 is_store_insn1 (rtx pat
, rtx
*str_mem
)
31364 if (!pat
|| pat
== NULL_RTX
)
31367 if (GET_CODE (pat
) == SET
)
31368 return find_mem_ref (SET_DEST (pat
), str_mem
);
31370 if (GET_CODE (pat
) == PARALLEL
)
31374 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
31375 if (is_store_insn1 (XVECEXP (pat
, 0, i
), str_mem
))
31382 /* Determine if INSN stores to memory. */
31385 is_store_insn (rtx insn
, rtx
*str_mem
)
31387 if (!insn
|| !INSN_P (insn
))
31390 return is_store_insn1 (PATTERN (insn
), str_mem
);
31393 /* Return whether TYPE is a Power9 pairable vector instruction type. */
31396 is_power9_pairable_vec_type (enum attr_type type
)
31400 case TYPE_VECSIMPLE
:
31401 case TYPE_VECCOMPLEX
:
31405 case TYPE_VECFLOAT
:
31407 case TYPE_VECDOUBLE
:
31415 /* Returns whether the dependence between INSN and NEXT is considered
31416 costly by the given target. */
31419 rs6000_is_costly_dependence (dep_t dep
, int cost
, int distance
)
31423 rtx load_mem
, str_mem
;
31425 /* If the flag is not enabled - no dependence is considered costly;
31426 allow all dependent insns in the same group.
31427 This is the most aggressive option. */
31428 if (rs6000_sched_costly_dep
== no_dep_costly
)
31431 /* If the flag is set to 1 - a dependence is always considered costly;
31432 do not allow dependent instructions in the same group.
31433 This is the most conservative option. */
31434 if (rs6000_sched_costly_dep
== all_deps_costly
)
31437 insn
= DEP_PRO (dep
);
31438 next
= DEP_CON (dep
);
31440 if (rs6000_sched_costly_dep
== store_to_load_dep_costly
31441 && is_load_insn (next
, &load_mem
)
31442 && is_store_insn (insn
, &str_mem
))
31443 /* Prevent load after store in the same group. */
31446 if (rs6000_sched_costly_dep
== true_store_to_load_dep_costly
31447 && is_load_insn (next
, &load_mem
)
31448 && is_store_insn (insn
, &str_mem
)
31449 && DEP_TYPE (dep
) == REG_DEP_TRUE
31450 && mem_locations_overlap(str_mem
, load_mem
))
31451 /* Prevent load after store in the same group if it is a true
31455 /* The flag is set to X; dependences with latency >= X are considered costly,
31456 and will not be scheduled in the same group. */
31457 if (rs6000_sched_costly_dep
<= max_dep_latency
31458 && ((cost
- distance
) >= (int)rs6000_sched_costly_dep
))
31464 /* Return the next insn after INSN that is found before TAIL is reached,
31465 skipping any "non-active" insns - insns that will not actually occupy
31466 an issue slot. Return NULL_RTX if such an insn is not found. */
31469 get_next_active_insn (rtx_insn
*insn
, rtx_insn
*tail
)
31471 if (insn
== NULL_RTX
|| insn
== tail
)
31476 insn
= NEXT_INSN (insn
);
31477 if (insn
== NULL_RTX
|| insn
== tail
)
31481 || JUMP_P (insn
) || JUMP_TABLE_DATA_P (insn
)
31482 || (NONJUMP_INSN_P (insn
)
31483 && GET_CODE (PATTERN (insn
)) != USE
31484 && GET_CODE (PATTERN (insn
)) != CLOBBER
31485 && INSN_CODE (insn
) != CODE_FOR_stack_tie
))
31491 /* Do Power9 specific sched_reorder2 reordering of ready list. */
31494 power9_sched_reorder2 (rtx_insn
**ready
, int lastpos
)
31499 enum attr_type type
, type2
;
31501 type
= get_attr_type (last_scheduled_insn
);
31503 /* Try to issue fixed point divides back-to-back in pairs so they will be
31504 routed to separate execution units and execute in parallel. */
31505 if (type
== TYPE_DIV
&& divide_cnt
== 0)
31507 /* First divide has been scheduled. */
31510 /* Scan the ready list looking for another divide, if found move it
31511 to the end of the list so it is chosen next. */
31515 if (recog_memoized (ready
[pos
]) >= 0
31516 && get_attr_type (ready
[pos
]) == TYPE_DIV
)
31519 for (i
= pos
; i
< lastpos
; i
++)
31520 ready
[i
] = ready
[i
+ 1];
31521 ready
[lastpos
] = tmp
;
31529 /* Last insn was the 2nd divide or not a divide, reset the counter. */
31532 /* The best dispatch throughput for vector and vector load insns can be
31533 achieved by interleaving a vector and vector load such that they'll
31534 dispatch to the same superslice. If this pairing cannot be achieved
31535 then it is best to pair vector insns together and vector load insns
31538 To aid in this pairing, vec_pairing maintains the current state with
31539 the following values:
31541 0 : Initial state, no vecload/vector pairing has been started.
31543 1 : A vecload or vector insn has been issued and a candidate for
31544 pairing has been found and moved to the end of the ready
31546 if (type
== TYPE_VECLOAD
)
31548 /* Issued a vecload. */
31549 if (vec_pairing
== 0)
31551 int vecload_pos
= -1;
31552 /* We issued a single vecload, look for a vector insn to pair it
31553 with. If one isn't found, try to pair another vecload. */
31557 if (recog_memoized (ready
[pos
]) >= 0)
31559 type2
= get_attr_type (ready
[pos
]);
31560 if (is_power9_pairable_vec_type (type2
))
31562 /* Found a vector insn to pair with, move it to the
31563 end of the ready list so it is scheduled next. */
31565 for (i
= pos
; i
< lastpos
; i
++)
31566 ready
[i
] = ready
[i
+ 1];
31567 ready
[lastpos
] = tmp
;
31569 return cached_can_issue_more
;
31571 else if (type2
== TYPE_VECLOAD
&& vecload_pos
== -1)
31572 /* Remember position of first vecload seen. */
31577 if (vecload_pos
>= 0)
31579 /* Didn't find a vector to pair with but did find a vecload,
31580 move it to the end of the ready list. */
31581 tmp
= ready
[vecload_pos
];
31582 for (i
= vecload_pos
; i
< lastpos
; i
++)
31583 ready
[i
] = ready
[i
+ 1];
31584 ready
[lastpos
] = tmp
;
31586 return cached_can_issue_more
;
31590 else if (is_power9_pairable_vec_type (type
))
31592 /* Issued a vector operation. */
31593 if (vec_pairing
== 0)
31596 /* We issued a single vector insn, look for a vecload to pair it
31597 with. If one isn't found, try to pair another vector. */
31601 if (recog_memoized (ready
[pos
]) >= 0)
31603 type2
= get_attr_type (ready
[pos
]);
31604 if (type2
== TYPE_VECLOAD
)
31606 /* Found a vecload insn to pair with, move it to the
31607 end of the ready list so it is scheduled next. */
31609 for (i
= pos
; i
< lastpos
; i
++)
31610 ready
[i
] = ready
[i
+ 1];
31611 ready
[lastpos
] = tmp
;
31613 return cached_can_issue_more
;
31615 else if (is_power9_pairable_vec_type (type2
)
31617 /* Remember position of first vector insn seen. */
31624 /* Didn't find a vecload to pair with but did find a vector
31625 insn, move it to the end of the ready list. */
31626 tmp
= ready
[vec_pos
];
31627 for (i
= vec_pos
; i
< lastpos
; i
++)
31628 ready
[i
] = ready
[i
+ 1];
31629 ready
[lastpos
] = tmp
;
31631 return cached_can_issue_more
;
31636 /* We've either finished a vec/vecload pair, couldn't find an insn to
31637 continue the current pair, or the last insn had nothing to do with
31638 with pairing. In any case, reset the state. */
31642 return cached_can_issue_more
;
31645 /* We are about to begin issuing insns for this clock cycle. */
31648 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED
, int sched_verbose
,
31649 rtx_insn
**ready ATTRIBUTE_UNUSED
,
31650 int *pn_ready ATTRIBUTE_UNUSED
,
31651 int clock_var ATTRIBUTE_UNUSED
)
31653 int n_ready
= *pn_ready
;
31656 fprintf (dump
, "// rs6000_sched_reorder :\n");
31658 /* Reorder the ready list, if the second to last ready insn
31659 is a nonepipeline insn. */
31660 if (rs6000_tune
== PROCESSOR_CELL
&& n_ready
> 1)
31662 if (is_nonpipeline_insn (ready
[n_ready
- 1])
31663 && (recog_memoized (ready
[n_ready
- 2]) > 0))
31664 /* Simply swap first two insns. */
31665 std::swap (ready
[n_ready
- 1], ready
[n_ready
- 2]);
31668 if (rs6000_tune
== PROCESSOR_POWER6
)
31669 load_store_pendulum
= 0;
31671 return rs6000_issue_rate ();
31674 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31677 rs6000_sched_reorder2 (FILE *dump
, int sched_verbose
, rtx_insn
**ready
,
31678 int *pn_ready
, int clock_var ATTRIBUTE_UNUSED
)
31681 fprintf (dump
, "// rs6000_sched_reorder2 :\n");
31683 /* For Power6, we need to handle some special cases to try and keep the
31684 store queue from overflowing and triggering expensive flushes.
31686 This code monitors how load and store instructions are being issued
31687 and skews the ready list one way or the other to increase the likelihood
31688 that a desired instruction is issued at the proper time.
31690 A couple of things are done. First, we maintain a "load_store_pendulum"
31691 to track the current state of load/store issue.
31693 - If the pendulum is at zero, then no loads or stores have been
31694 issued in the current cycle so we do nothing.
31696 - If the pendulum is 1, then a single load has been issued in this
31697 cycle and we attempt to locate another load in the ready list to
31700 - If the pendulum is -2, then two stores have already been
31701 issued in this cycle, so we increase the priority of the first load
31702 in the ready list to increase it's likelihood of being chosen first
31705 - If the pendulum is -1, then a single store has been issued in this
31706 cycle and we attempt to locate another store in the ready list to
31707 issue with it, preferring a store to an adjacent memory location to
31708 facilitate store pairing in the store queue.
31710 - If the pendulum is 2, then two loads have already been
31711 issued in this cycle, so we increase the priority of the first store
31712 in the ready list to increase it's likelihood of being chosen first
31715 - If the pendulum < -2 or > 2, then do nothing.
31717 Note: This code covers the most common scenarios. There exist non
31718 load/store instructions which make use of the LSU and which
31719 would need to be accounted for to strictly model the behavior
31720 of the machine. Those instructions are currently unaccounted
31721 for to help minimize compile time overhead of this code.
31723 if (rs6000_tune
== PROCESSOR_POWER6
&& last_scheduled_insn
)
31728 rtx load_mem
, str_mem
;
31730 if (is_store_insn (last_scheduled_insn
, &str_mem
))
31731 /* Issuing a store, swing the load_store_pendulum to the left */
31732 load_store_pendulum
--;
31733 else if (is_load_insn (last_scheduled_insn
, &load_mem
))
31734 /* Issuing a load, swing the load_store_pendulum to the right */
31735 load_store_pendulum
++;
31737 return cached_can_issue_more
;
31739 /* If the pendulum is balanced, or there is only one instruction on
31740 the ready list, then all is well, so return. */
31741 if ((load_store_pendulum
== 0) || (*pn_ready
<= 1))
31742 return cached_can_issue_more
;
31744 if (load_store_pendulum
== 1)
31746 /* A load has been issued in this cycle. Scan the ready list
31747 for another load to issue with it */
31752 if (is_load_insn (ready
[pos
], &load_mem
))
31754 /* Found a load. Move it to the head of the ready list,
31755 and adjust it's priority so that it is more likely to
31758 for (i
=pos
; i
<*pn_ready
-1; i
++)
31759 ready
[i
] = ready
[i
+ 1];
31760 ready
[*pn_ready
-1] = tmp
;
31762 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
31763 INSN_PRIORITY (tmp
)++;
31769 else if (load_store_pendulum
== -2)
31771 /* Two stores have been issued in this cycle. Increase the
31772 priority of the first load in the ready list to favor it for
31773 issuing in the next cycle. */
31778 if (is_load_insn (ready
[pos
], &load_mem
)
31780 && INSN_PRIORITY_KNOWN (ready
[pos
]))
31782 INSN_PRIORITY (ready
[pos
])++;
31784 /* Adjust the pendulum to account for the fact that a load
31785 was found and increased in priority. This is to prevent
31786 increasing the priority of multiple loads */
31787 load_store_pendulum
--;
31794 else if (load_store_pendulum
== -1)
31796 /* A store has been issued in this cycle. Scan the ready list for
31797 another store to issue with it, preferring a store to an adjacent
31799 int first_store_pos
= -1;
31805 if (is_store_insn (ready
[pos
], &str_mem
))
31808 /* Maintain the index of the first store found on the
31810 if (first_store_pos
== -1)
31811 first_store_pos
= pos
;
31813 if (is_store_insn (last_scheduled_insn
, &str_mem2
)
31814 && adjacent_mem_locations (str_mem
, str_mem2
))
31816 /* Found an adjacent store. Move it to the head of the
31817 ready list, and adjust it's priority so that it is
31818 more likely to stay there */
31820 for (i
=pos
; i
<*pn_ready
-1; i
++)
31821 ready
[i
] = ready
[i
+ 1];
31822 ready
[*pn_ready
-1] = tmp
;
31824 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
31825 INSN_PRIORITY (tmp
)++;
31827 first_store_pos
= -1;
31835 if (first_store_pos
>= 0)
31837 /* An adjacent store wasn't found, but a non-adjacent store was,
31838 so move the non-adjacent store to the front of the ready
31839 list, and adjust its priority so that it is more likely to
31841 tmp
= ready
[first_store_pos
];
31842 for (i
=first_store_pos
; i
<*pn_ready
-1; i
++)
31843 ready
[i
] = ready
[i
+ 1];
31844 ready
[*pn_ready
-1] = tmp
;
31845 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
31846 INSN_PRIORITY (tmp
)++;
31849 else if (load_store_pendulum
== 2)
31851 /* Two loads have been issued in this cycle. Increase the priority
31852 of the first store in the ready list to favor it for issuing in
31858 if (is_store_insn (ready
[pos
], &str_mem
)
31860 && INSN_PRIORITY_KNOWN (ready
[pos
]))
31862 INSN_PRIORITY (ready
[pos
])++;
31864 /* Adjust the pendulum to account for the fact that a store
31865 was found and increased in priority. This is to prevent
31866 increasing the priority of multiple stores */
31867 load_store_pendulum
++;
31876 /* Do Power9 dependent reordering if necessary. */
31877 if (rs6000_tune
== PROCESSOR_POWER9
&& last_scheduled_insn
31878 && recog_memoized (last_scheduled_insn
) >= 0)
31879 return power9_sched_reorder2 (ready
, *pn_ready
- 1);
31881 return cached_can_issue_more
;
31884 /* Return whether the presence of INSN causes a dispatch group termination
31885 of group WHICH_GROUP.
31887 If WHICH_GROUP == current_group, this function will return true if INSN
31888 causes the termination of the current group (i.e, the dispatch group to
31889 which INSN belongs). This means that INSN will be the last insn in the
31890 group it belongs to.
31892 If WHICH_GROUP == previous_group, this function will return true if INSN
31893 causes the termination of the previous group (i.e, the dispatch group that
31894 precedes the group to which INSN belongs). This means that INSN will be
31895 the first insn in the group it belongs to). */
31898 insn_terminates_group_p (rtx_insn
*insn
, enum group_termination which_group
)
31905 first
= insn_must_be_first_in_group (insn
);
31906 last
= insn_must_be_last_in_group (insn
);
31911 if (which_group
== current_group
)
31913 else if (which_group
== previous_group
)
31921 insn_must_be_first_in_group (rtx_insn
*insn
)
31923 enum attr_type type
;
31927 || DEBUG_INSN_P (insn
)
31928 || GET_CODE (PATTERN (insn
)) == USE
31929 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
31932 switch (rs6000_tune
)
31934 case PROCESSOR_POWER5
:
31935 if (is_cracked_insn (insn
))
31938 case PROCESSOR_POWER4
:
31939 if (is_microcoded_insn (insn
))
31942 if (!rs6000_sched_groups
)
31945 type
= get_attr_type (insn
);
31952 case TYPE_DELAYED_CR
:
31953 case TYPE_CR_LOGICAL
:
31966 case PROCESSOR_POWER6
:
31967 type
= get_attr_type (insn
);
31976 case TYPE_FPCOMPARE
:
31987 if (get_attr_dot (insn
) == DOT_NO
31988 || get_attr_var_shift (insn
) == VAR_SHIFT_NO
)
31993 if (get_attr_size (insn
) == SIZE_32
)
32001 if (get_attr_update (insn
) == UPDATE_YES
)
32009 case PROCESSOR_POWER7
:
32010 type
= get_attr_type (insn
);
32014 case TYPE_CR_LOGICAL
:
32028 if (get_attr_dot (insn
) == DOT_YES
)
32033 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
32034 || get_attr_update (insn
) == UPDATE_YES
)
32041 if (get_attr_update (insn
) == UPDATE_YES
)
32049 case PROCESSOR_POWER8
:
32050 type
= get_attr_type (insn
);
32054 case TYPE_CR_LOGICAL
:
32055 case TYPE_DELAYED_CR
:
32063 case TYPE_VECSTORE
:
32070 if (get_attr_dot (insn
) == DOT_YES
)
32075 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
32076 || get_attr_update (insn
) == UPDATE_YES
)
32081 if (get_attr_update (insn
) == UPDATE_YES
32082 && get_attr_indexed (insn
) == INDEXED_YES
)
32098 insn_must_be_last_in_group (rtx_insn
*insn
)
32100 enum attr_type type
;
32104 || DEBUG_INSN_P (insn
)
32105 || GET_CODE (PATTERN (insn
)) == USE
32106 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
32109 switch (rs6000_tune
) {
32110 case PROCESSOR_POWER4
:
32111 case PROCESSOR_POWER5
:
32112 if (is_microcoded_insn (insn
))
32115 if (is_branch_slot_insn (insn
))
32119 case PROCESSOR_POWER6
:
32120 type
= get_attr_type (insn
);
32128 case TYPE_FPCOMPARE
:
32139 if (get_attr_dot (insn
) == DOT_NO
32140 || get_attr_var_shift (insn
) == VAR_SHIFT_NO
)
32145 if (get_attr_size (insn
) == SIZE_32
)
32153 case PROCESSOR_POWER7
:
32154 type
= get_attr_type (insn
);
32164 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
32165 && get_attr_update (insn
) == UPDATE_YES
)
32170 if (get_attr_update (insn
) == UPDATE_YES
32171 && get_attr_indexed (insn
) == INDEXED_YES
)
32179 case PROCESSOR_POWER8
:
32180 type
= get_attr_type (insn
);
32192 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
32193 && get_attr_update (insn
) == UPDATE_YES
)
32198 if (get_attr_update (insn
) == UPDATE_YES
32199 && get_attr_indexed (insn
) == INDEXED_YES
)
32214 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
32215 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
32218 is_costly_group (rtx
*group_insns
, rtx next_insn
)
32221 int issue_rate
= rs6000_issue_rate ();
32223 for (i
= 0; i
< issue_rate
; i
++)
32225 sd_iterator_def sd_it
;
32227 rtx insn
= group_insns
[i
];
32232 FOR_EACH_DEP (insn
, SD_LIST_RES_FORW
, sd_it
, dep
)
32234 rtx next
= DEP_CON (dep
);
32236 if (next
== next_insn
32237 && rs6000_is_costly_dependence (dep
, dep_cost (dep
), 0))
32245 /* Utility of the function redefine_groups.
32246 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
32247 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
32248 to keep it "far" (in a separate group) from GROUP_INSNS, following
32249 one of the following schemes, depending on the value of the flag
32250 -minsert_sched_nops = X:
32251 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
32252 in order to force NEXT_INSN into a separate group.
32253 (2) X < sched_finish_regroup_exact: insert exactly X nops.
32254 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
32255 insertion (has a group just ended, how many vacant issue slots remain in the
32256 last group, and how many dispatch groups were encountered so far). */
32259 force_new_group (int sched_verbose
, FILE *dump
, rtx
*group_insns
,
32260 rtx_insn
*next_insn
, bool *group_end
, int can_issue_more
,
32265 int issue_rate
= rs6000_issue_rate ();
32266 bool end
= *group_end
;
32269 if (next_insn
== NULL_RTX
|| DEBUG_INSN_P (next_insn
))
32270 return can_issue_more
;
32272 if (rs6000_sched_insert_nops
> sched_finish_regroup_exact
)
32273 return can_issue_more
;
32275 force
= is_costly_group (group_insns
, next_insn
);
32277 return can_issue_more
;
32279 if (sched_verbose
> 6)
32280 fprintf (dump
,"force: group count = %d, can_issue_more = %d\n",
32281 *group_count
,can_issue_more
);
32283 if (rs6000_sched_insert_nops
== sched_finish_regroup_exact
)
32286 can_issue_more
= 0;
32288 /* Since only a branch can be issued in the last issue_slot, it is
32289 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
32290 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
32291 in this case the last nop will start a new group and the branch
32292 will be forced to the new group. */
32293 if (can_issue_more
&& !is_branch_slot_insn (next_insn
))
32296 /* Do we have a special group ending nop? */
32297 if (rs6000_tune
== PROCESSOR_POWER6
|| rs6000_tune
== PROCESSOR_POWER7
32298 || rs6000_tune
== PROCESSOR_POWER8
)
32300 nop
= gen_group_ending_nop ();
32301 emit_insn_before (nop
, next_insn
);
32302 can_issue_more
= 0;
32305 while (can_issue_more
> 0)
32308 emit_insn_before (nop
, next_insn
);
32316 if (rs6000_sched_insert_nops
< sched_finish_regroup_exact
)
32318 int n_nops
= rs6000_sched_insert_nops
;
32320 /* Nops can't be issued from the branch slot, so the effective
32321 issue_rate for nops is 'issue_rate - 1'. */
32322 if (can_issue_more
== 0)
32323 can_issue_more
= issue_rate
;
32325 if (can_issue_more
== 0)
32327 can_issue_more
= issue_rate
- 1;
32330 for (i
= 0; i
< issue_rate
; i
++)
32332 group_insns
[i
] = 0;
32339 emit_insn_before (nop
, next_insn
);
32340 if (can_issue_more
== issue_rate
- 1) /* new group begins */
32343 if (can_issue_more
== 0)
32345 can_issue_more
= issue_rate
- 1;
32348 for (i
= 0; i
< issue_rate
; i
++)
32350 group_insns
[i
] = 0;
32356 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
32359 /* Is next_insn going to start a new group? */
32362 || (can_issue_more
== 1 && !is_branch_slot_insn (next_insn
))
32363 || (can_issue_more
<= 2 && is_cracked_insn (next_insn
))
32364 || (can_issue_more
< issue_rate
&&
32365 insn_terminates_group_p (next_insn
, previous_group
)));
32366 if (*group_end
&& end
)
32369 if (sched_verbose
> 6)
32370 fprintf (dump
, "done force: group count = %d, can_issue_more = %d\n",
32371 *group_count
, can_issue_more
);
32372 return can_issue_more
;
32375 return can_issue_more
;
32378 /* This function tries to synch the dispatch groups that the compiler "sees"
32379 with the dispatch groups that the processor dispatcher is expected to
32380 form in practice. It tries to achieve this synchronization by forcing the
32381 estimated processor grouping on the compiler (as opposed to the function
32382 'pad_goups' which tries to force the scheduler's grouping on the processor).
32384 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
32385 examines the (estimated) dispatch groups that will be formed by the processor
32386 dispatcher. It marks these group boundaries to reflect the estimated
32387 processor grouping, overriding the grouping that the scheduler had marked.
32388 Depending on the value of the flag '-minsert-sched-nops' this function can
32389 force certain insns into separate groups or force a certain distance between
32390 them by inserting nops, for example, if there exists a "costly dependence"
32393 The function estimates the group boundaries that the processor will form as
32394 follows: It keeps track of how many vacant issue slots are available after
32395 each insn. A subsequent insn will start a new group if one of the following
32397 - no more vacant issue slots remain in the current dispatch group.
32398 - only the last issue slot, which is the branch slot, is vacant, but the next
32399 insn is not a branch.
32400 - only the last 2 or less issue slots, including the branch slot, are vacant,
32401 which means that a cracked insn (which occupies two issue slots) can't be
32402 issued in this group.
32403 - less than 'issue_rate' slots are vacant, and the next insn always needs to
32404 start a new group. */
32407 redefine_groups (FILE *dump
, int sched_verbose
, rtx_insn
*prev_head_insn
,
32410 rtx_insn
*insn
, *next_insn
;
32412 int can_issue_more
;
32415 int group_count
= 0;
32419 issue_rate
= rs6000_issue_rate ();
32420 group_insns
= XALLOCAVEC (rtx
, issue_rate
);
32421 for (i
= 0; i
< issue_rate
; i
++)
32423 group_insns
[i
] = 0;
32425 can_issue_more
= issue_rate
;
32427 insn
= get_next_active_insn (prev_head_insn
, tail
);
32430 while (insn
!= NULL_RTX
)
32432 slot
= (issue_rate
- can_issue_more
);
32433 group_insns
[slot
] = insn
;
32435 rs6000_variable_issue (dump
, sched_verbose
, insn
, can_issue_more
);
32436 if (insn_terminates_group_p (insn
, current_group
))
32437 can_issue_more
= 0;
32439 next_insn
= get_next_active_insn (insn
, tail
);
32440 if (next_insn
== NULL_RTX
)
32441 return group_count
+ 1;
32443 /* Is next_insn going to start a new group? */
32445 = (can_issue_more
== 0
32446 || (can_issue_more
== 1 && !is_branch_slot_insn (next_insn
))
32447 || (can_issue_more
<= 2 && is_cracked_insn (next_insn
))
32448 || (can_issue_more
< issue_rate
&&
32449 insn_terminates_group_p (next_insn
, previous_group
)));
32451 can_issue_more
= force_new_group (sched_verbose
, dump
, group_insns
,
32452 next_insn
, &group_end
, can_issue_more
,
32458 can_issue_more
= 0;
32459 for (i
= 0; i
< issue_rate
; i
++)
32461 group_insns
[i
] = 0;
32465 if (GET_MODE (next_insn
) == TImode
&& can_issue_more
)
32466 PUT_MODE (next_insn
, VOIDmode
);
32467 else if (!can_issue_more
&& GET_MODE (next_insn
) != TImode
)
32468 PUT_MODE (next_insn
, TImode
);
32471 if (can_issue_more
== 0)
32472 can_issue_more
= issue_rate
;
32475 return group_count
;
32478 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32479 dispatch group boundaries that the scheduler had marked. Pad with nops
32480 any dispatch groups which have vacant issue slots, in order to force the
32481 scheduler's grouping on the processor dispatcher. The function
32482 returns the number of dispatch groups found. */
32485 pad_groups (FILE *dump
, int sched_verbose
, rtx_insn
*prev_head_insn
,
32488 rtx_insn
*insn
, *next_insn
;
32491 int can_issue_more
;
32493 int group_count
= 0;
32495 /* Initialize issue_rate. */
32496 issue_rate
= rs6000_issue_rate ();
32497 can_issue_more
= issue_rate
;
32499 insn
= get_next_active_insn (prev_head_insn
, tail
);
32500 next_insn
= get_next_active_insn (insn
, tail
);
32502 while (insn
!= NULL_RTX
)
32505 rs6000_variable_issue (dump
, sched_verbose
, insn
, can_issue_more
);
32507 group_end
= (next_insn
== NULL_RTX
|| GET_MODE (next_insn
) == TImode
);
32509 if (next_insn
== NULL_RTX
)
32514 /* If the scheduler had marked group termination at this location
32515 (between insn and next_insn), and neither insn nor next_insn will
32516 force group termination, pad the group with nops to force group
32519 && (rs6000_sched_insert_nops
== sched_finish_pad_groups
)
32520 && !insn_terminates_group_p (insn
, current_group
)
32521 && !insn_terminates_group_p (next_insn
, previous_group
))
32523 if (!is_branch_slot_insn (next_insn
))
32526 while (can_issue_more
)
32529 emit_insn_before (nop
, next_insn
);
32534 can_issue_more
= issue_rate
;
32539 next_insn
= get_next_active_insn (insn
, tail
);
32542 return group_count
;
32545 /* We're beginning a new block. Initialize data structures as necessary. */
32548 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED
,
32549 int sched_verbose ATTRIBUTE_UNUSED
,
32550 int max_ready ATTRIBUTE_UNUSED
)
32552 last_scheduled_insn
= NULL
;
32553 load_store_pendulum
= 0;
32558 /* The following function is called at the end of scheduling BB.
32559 After reload, it inserts nops at insn group bundling. */
32562 rs6000_sched_finish (FILE *dump
, int sched_verbose
)
32567 fprintf (dump
, "=== Finishing schedule.\n");
32569 if (reload_completed
&& rs6000_sched_groups
)
32571 /* Do not run sched_finish hook when selective scheduling enabled. */
32572 if (sel_sched_p ())
32575 if (rs6000_sched_insert_nops
== sched_finish_none
)
32578 if (rs6000_sched_insert_nops
== sched_finish_pad_groups
)
32579 n_groups
= pad_groups (dump
, sched_verbose
,
32580 current_sched_info
->prev_head
,
32581 current_sched_info
->next_tail
);
32583 n_groups
= redefine_groups (dump
, sched_verbose
,
32584 current_sched_info
->prev_head
,
32585 current_sched_info
->next_tail
);
32587 if (sched_verbose
>= 6)
32589 fprintf (dump
, "ngroups = %d\n", n_groups
);
32590 print_rtl (dump
, current_sched_info
->prev_head
);
32591 fprintf (dump
, "Done finish_sched\n");
32596 struct rs6000_sched_context
32598 short cached_can_issue_more
;
32599 rtx_insn
*last_scheduled_insn
;
32600 int load_store_pendulum
;
32605 typedef struct rs6000_sched_context rs6000_sched_context_def
;
32606 typedef rs6000_sched_context_def
*rs6000_sched_context_t
;
32608 /* Allocate store for new scheduling context. */
32610 rs6000_alloc_sched_context (void)
32612 return xmalloc (sizeof (rs6000_sched_context_def
));
32615 /* If CLEAN_P is true then initializes _SC with clean data,
32616 and from the global context otherwise. */
32618 rs6000_init_sched_context (void *_sc
, bool clean_p
)
32620 rs6000_sched_context_t sc
= (rs6000_sched_context_t
) _sc
;
32624 sc
->cached_can_issue_more
= 0;
32625 sc
->last_scheduled_insn
= NULL
;
32626 sc
->load_store_pendulum
= 0;
32627 sc
->divide_cnt
= 0;
32628 sc
->vec_pairing
= 0;
32632 sc
->cached_can_issue_more
= cached_can_issue_more
;
32633 sc
->last_scheduled_insn
= last_scheduled_insn
;
32634 sc
->load_store_pendulum
= load_store_pendulum
;
32635 sc
->divide_cnt
= divide_cnt
;
32636 sc
->vec_pairing
= vec_pairing
;
32640 /* Sets the global scheduling context to the one pointed to by _SC. */
32642 rs6000_set_sched_context (void *_sc
)
32644 rs6000_sched_context_t sc
= (rs6000_sched_context_t
) _sc
;
32646 gcc_assert (sc
!= NULL
);
32648 cached_can_issue_more
= sc
->cached_can_issue_more
;
32649 last_scheduled_insn
= sc
->last_scheduled_insn
;
32650 load_store_pendulum
= sc
->load_store_pendulum
;
32651 divide_cnt
= sc
->divide_cnt
;
32652 vec_pairing
= sc
->vec_pairing
;
32657 rs6000_free_sched_context (void *_sc
)
32659 gcc_assert (_sc
!= NULL
);
32665 rs6000_sched_can_speculate_insn (rtx_insn
*insn
)
32667 switch (get_attr_type (insn
))
32682 /* Length in units of the trampoline for entering a nested function. */
32685 rs6000_trampoline_size (void)
32689 switch (DEFAULT_ABI
)
32692 gcc_unreachable ();
32695 ret
= (TARGET_32BIT
) ? 12 : 24;
32699 gcc_assert (!TARGET_32BIT
);
32705 ret
= (TARGET_32BIT
) ? 40 : 48;
32712 /* Emit RTL insns to initialize the variable parts of a trampoline.
32713 FNADDR is an RTX for the address of the function's pure code.
32714 CXT is an RTX for the static chain value for the function. */
32717 rs6000_trampoline_init (rtx m_tramp
, tree fndecl
, rtx cxt
)
32719 int regsize
= (TARGET_32BIT
) ? 4 : 8;
32720 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
32721 rtx ctx_reg
= force_reg (Pmode
, cxt
);
32722 rtx addr
= force_reg (Pmode
, XEXP (m_tramp
, 0));
32724 switch (DEFAULT_ABI
)
32727 gcc_unreachable ();
32729 /* Under AIX, just build the 3 word function descriptor */
32732 rtx fnmem
, fn_reg
, toc_reg
;
32734 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS
)
32735 error ("you cannot take the address of a nested function if you use "
32736 "the %qs option", "-mno-pointers-to-nested-functions");
32738 fnmem
= gen_const_mem (Pmode
, force_reg (Pmode
, fnaddr
));
32739 fn_reg
= gen_reg_rtx (Pmode
);
32740 toc_reg
= gen_reg_rtx (Pmode
);
32742 /* Macro to shorten the code expansions below. */
32743 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32745 m_tramp
= replace_equiv_address (m_tramp
, addr
);
32747 emit_move_insn (fn_reg
, MEM_PLUS (fnmem
, 0));
32748 emit_move_insn (toc_reg
, MEM_PLUS (fnmem
, regsize
));
32749 emit_move_insn (MEM_PLUS (m_tramp
, 0), fn_reg
);
32750 emit_move_insn (MEM_PLUS (m_tramp
, regsize
), toc_reg
);
32751 emit_move_insn (MEM_PLUS (m_tramp
, 2*regsize
), ctx_reg
);
32757 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32761 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__trampoline_setup"),
32762 LCT_NORMAL
, VOIDmode
,
32764 GEN_INT (rs6000_trampoline_size ()), SImode
,
32772 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32773 identifier as an argument, so the front end shouldn't look it up. */
32776 rs6000_attribute_takes_identifier_p (const_tree attr_id
)
32778 return is_attribute_p ("altivec", attr_id
);
32781 /* Handle the "altivec" attribute. The attribute may have
32782 arguments as follows:
32784 __attribute__((altivec(vector__)))
32785 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32786 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32788 and may appear more than once (e.g., 'vector bool char') in a
32789 given declaration. */
32792 rs6000_handle_altivec_attribute (tree
*node
,
32793 tree name ATTRIBUTE_UNUSED
,
32795 int flags ATTRIBUTE_UNUSED
,
32796 bool *no_add_attrs
)
32798 tree type
= *node
, result
= NULL_TREE
;
32802 = ((args
&& TREE_CODE (args
) == TREE_LIST
&& TREE_VALUE (args
)
32803 && TREE_CODE (TREE_VALUE (args
)) == IDENTIFIER_NODE
)
32804 ? *IDENTIFIER_POINTER (TREE_VALUE (args
))
32807 while (POINTER_TYPE_P (type
)
32808 || TREE_CODE (type
) == FUNCTION_TYPE
32809 || TREE_CODE (type
) == METHOD_TYPE
32810 || TREE_CODE (type
) == ARRAY_TYPE
)
32811 type
= TREE_TYPE (type
);
32813 mode
= TYPE_MODE (type
);
32815 /* Check for invalid AltiVec type qualifiers. */
32816 if (type
== long_double_type_node
)
32817 error ("use of %<long double%> in AltiVec types is invalid");
32818 else if (type
== boolean_type_node
)
32819 error ("use of boolean types in AltiVec types is invalid");
32820 else if (TREE_CODE (type
) == COMPLEX_TYPE
)
32821 error ("use of %<complex%> in AltiVec types is invalid");
32822 else if (DECIMAL_FLOAT_MODE_P (mode
))
32823 error ("use of decimal floating point types in AltiVec types is invalid");
32824 else if (!TARGET_VSX
)
32826 if (type
== long_unsigned_type_node
|| type
== long_integer_type_node
)
32829 error ("use of %<long%> in AltiVec types is invalid for "
32830 "64-bit code without %qs", "-mvsx");
32831 else if (rs6000_warn_altivec_long
)
32832 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32835 else if (type
== long_long_unsigned_type_node
32836 || type
== long_long_integer_type_node
)
32837 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32839 else if (type
== double_type_node
)
32840 error ("use of %<double%> in AltiVec types is invalid without %qs",
32844 switch (altivec_type
)
32847 unsigned_p
= TYPE_UNSIGNED (type
);
32851 result
= (unsigned_p
? unsigned_V1TI_type_node
: V1TI_type_node
);
32854 result
= (unsigned_p
? unsigned_V2DI_type_node
: V2DI_type_node
);
32857 result
= (unsigned_p
? unsigned_V4SI_type_node
: V4SI_type_node
);
32860 result
= (unsigned_p
? unsigned_V8HI_type_node
: V8HI_type_node
);
32863 result
= (unsigned_p
? unsigned_V16QI_type_node
: V16QI_type_node
);
32865 case E_SFmode
: result
= V4SF_type_node
; break;
32866 case E_DFmode
: result
= V2DF_type_node
; break;
32867 /* If the user says 'vector int bool', we may be handed the 'bool'
32868 attribute _before_ the 'vector' attribute, and so select the
32869 proper type in the 'b' case below. */
32870 case E_V4SImode
: case E_V8HImode
: case E_V16QImode
: case E_V4SFmode
:
32871 case E_V2DImode
: case E_V2DFmode
:
32879 case E_DImode
: case E_V2DImode
: result
= bool_V2DI_type_node
; break;
32880 case E_SImode
: case E_V4SImode
: result
= bool_V4SI_type_node
; break;
32881 case E_HImode
: case E_V8HImode
: result
= bool_V8HI_type_node
; break;
32882 case E_QImode
: case E_V16QImode
: result
= bool_V16QI_type_node
;
32889 case E_V8HImode
: result
= pixel_V8HI_type_node
;
32895 /* Propagate qualifiers attached to the element type
32896 onto the vector type. */
32897 if (result
&& result
!= type
&& TYPE_QUALS (type
))
32898 result
= build_qualified_type (result
, TYPE_QUALS (type
));
32900 *no_add_attrs
= true; /* No need to hang on to the attribute. */
32903 *node
= lang_hooks
.types
.reconstruct_complex_type (*node
, result
);
32908 /* AltiVec defines four built-in scalar types that serve as vector
32909 elements; we must teach the compiler how to mangle them. */
32911 static const char *
32912 rs6000_mangle_type (const_tree type
)
32914 type
= TYPE_MAIN_VARIANT (type
);
32916 if (TREE_CODE (type
) != VOID_TYPE
&& TREE_CODE (type
) != BOOLEAN_TYPE
32917 && TREE_CODE (type
) != INTEGER_TYPE
&& TREE_CODE (type
) != REAL_TYPE
)
32920 if (type
== bool_char_type_node
) return "U6__boolc";
32921 if (type
== bool_short_type_node
) return "U6__bools";
32922 if (type
== pixel_type_node
) return "u7__pixel";
32923 if (type
== bool_int_type_node
) return "U6__booli";
32924 if (type
== bool_long_type_node
) return "U6__booll";
32926 /* Use a unique name for __float128 rather than trying to use "e" or "g". Use
32927 "g" for IBM extended double, no matter whether it is long double (using
32928 -mabi=ibmlongdouble) or the distinct __ibm128 type. */
32929 if (TARGET_FLOAT128_TYPE
)
32931 if (type
== ieee128_float_type_node
)
32932 return "U10__float128";
32934 if (TARGET_LONG_DOUBLE_128
)
32936 if (type
== long_double_type_node
)
32937 return (TARGET_IEEEQUAD
) ? "U10__float128" : "g";
32939 if (type
== ibm128_float_type_node
)
32944 /* Mangle IBM extended float long double as `g' (__float128) on
32945 powerpc*-linux where long-double-64 previously was the default. */
32946 if (TYPE_MAIN_VARIANT (type
) == long_double_type_node
32948 && TARGET_LONG_DOUBLE_128
32949 && !TARGET_IEEEQUAD
)
32952 /* For all other types, use normal C++ mangling. */
32956 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32957 struct attribute_spec.handler. */
32960 rs6000_handle_longcall_attribute (tree
*node
, tree name
,
32961 tree args ATTRIBUTE_UNUSED
,
32962 int flags ATTRIBUTE_UNUSED
,
32963 bool *no_add_attrs
)
32965 if (TREE_CODE (*node
) != FUNCTION_TYPE
32966 && TREE_CODE (*node
) != FIELD_DECL
32967 && TREE_CODE (*node
) != TYPE_DECL
)
32969 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
32971 *no_add_attrs
= true;
32977 /* Set longcall attributes on all functions declared when
32978 rs6000_default_long_calls is true. */
32980 rs6000_set_default_type_attributes (tree type
)
32982 if (rs6000_default_long_calls
32983 && (TREE_CODE (type
) == FUNCTION_TYPE
32984 || TREE_CODE (type
) == METHOD_TYPE
))
32985 TYPE_ATTRIBUTES (type
) = tree_cons (get_identifier ("longcall"),
32987 TYPE_ATTRIBUTES (type
));
32990 darwin_set_default_type_attributes (type
);
32994 /* Return a reference suitable for calling a function with the
32995 longcall attribute. */
32998 rs6000_longcall_ref (rtx call_ref
)
33000 const char *call_name
;
33003 if (GET_CODE (call_ref
) != SYMBOL_REF
)
33006 /* System V adds '.' to the internal name, so skip them. */
33007 call_name
= XSTR (call_ref
, 0);
33008 if (*call_name
== '.')
33010 while (*call_name
== '.')
33013 node
= get_identifier (call_name
);
33014 call_ref
= gen_rtx_SYMBOL_REF (VOIDmode
, IDENTIFIER_POINTER (node
));
33017 return force_reg (Pmode
, call_ref
);
33020 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
33021 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
33024 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
33025 struct attribute_spec.handler. */
33027 rs6000_handle_struct_attribute (tree
*node
, tree name
,
33028 tree args ATTRIBUTE_UNUSED
,
33029 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
33032 if (DECL_P (*node
))
33034 if (TREE_CODE (*node
) == TYPE_DECL
)
33035 type
= &TREE_TYPE (*node
);
33040 if (!(type
&& (TREE_CODE (*type
) == RECORD_TYPE
33041 || TREE_CODE (*type
) == UNION_TYPE
)))
33043 warning (OPT_Wattributes
, "%qE attribute ignored", name
);
33044 *no_add_attrs
= true;
33047 else if ((is_attribute_p ("ms_struct", name
)
33048 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type
)))
33049 || ((is_attribute_p ("gcc_struct", name
)
33050 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type
)))))
33052 warning (OPT_Wattributes
, "%qE incompatible attribute ignored",
33054 *no_add_attrs
= true;
33061 rs6000_ms_bitfield_layout_p (const_tree record_type
)
33063 return (TARGET_USE_MS_BITFIELD_LAYOUT
&&
33064 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type
)))
33065 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type
));
33068 #ifdef USING_ELFOS_H
33070 /* A get_unnamed_section callback, used for switching to toc_section. */
33073 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
33075 if ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
33076 && TARGET_MINIMAL_TOC
)
33078 if (!toc_initialized
)
33080 fprintf (asm_out_file
, "%s\n", TOC_SECTION_ASM_OP
);
33081 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
33082 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "LCTOC", 0);
33083 fprintf (asm_out_file
, "\t.tc ");
33084 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1[TC],");
33085 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
33086 fprintf (asm_out_file
, "\n");
33088 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
33089 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
33090 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
33091 fprintf (asm_out_file
, " = .+32768\n");
33092 toc_initialized
= 1;
33095 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
33097 else if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
33099 fprintf (asm_out_file
, "%s\n", TOC_SECTION_ASM_OP
);
33100 if (!toc_initialized
)
33102 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
33103 toc_initialized
= 1;
33108 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
33109 if (!toc_initialized
)
33111 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
33112 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
33113 fprintf (asm_out_file
, " = .+32768\n");
33114 toc_initialized
= 1;
33119 /* Implement TARGET_ASM_INIT_SECTIONS. */
33122 rs6000_elf_asm_init_sections (void)
33125 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op
, NULL
);
33128 = get_unnamed_section (SECTION_WRITE
, output_section_asm_op
,
33129 SDATA2_SECTION_ASM_OP
);
33132 /* Implement TARGET_SELECT_RTX_SECTION. */
33135 rs6000_elf_select_rtx_section (machine_mode mode
, rtx x
,
33136 unsigned HOST_WIDE_INT align
)
33138 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
))
33139 return toc_section
;
33141 return default_elf_select_rtx_section (mode
, x
, align
);
33144 /* For a SYMBOL_REF, set generic flags and then perform some
33145 target-specific processing.
33147 When the AIX ABI is requested on a non-AIX system, replace the
33148 function name with the real name (with a leading .) rather than the
33149 function descriptor name. This saves a lot of overriding code to
33150 read the prefixes. */
33152 static void rs6000_elf_encode_section_info (tree
, rtx
, int) ATTRIBUTE_UNUSED
;
33154 rs6000_elf_encode_section_info (tree decl
, rtx rtl
, int first
)
33156 default_encode_section_info (decl
, rtl
, first
);
33159 && TREE_CODE (decl
) == FUNCTION_DECL
33161 && DEFAULT_ABI
== ABI_AIX
)
33163 rtx sym_ref
= XEXP (rtl
, 0);
33164 size_t len
= strlen (XSTR (sym_ref
, 0));
33165 char *str
= XALLOCAVEC (char, len
+ 2);
33167 memcpy (str
+ 1, XSTR (sym_ref
, 0), len
+ 1);
33168 XSTR (sym_ref
, 0) = ggc_alloc_string (str
, len
+ 1);
33173 compare_section_name (const char *section
, const char *templ
)
33177 len
= strlen (templ
);
33178 return (strncmp (section
, templ
, len
) == 0
33179 && (section
[len
] == 0 || section
[len
] == '.'));
33183 rs6000_elf_in_small_data_p (const_tree decl
)
33185 if (rs6000_sdata
== SDATA_NONE
)
33188 /* We want to merge strings, so we never consider them small data. */
33189 if (TREE_CODE (decl
) == STRING_CST
)
33192 /* Functions are never in the small data area. */
33193 if (TREE_CODE (decl
) == FUNCTION_DECL
)
33196 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_SECTION_NAME (decl
))
33198 const char *section
= DECL_SECTION_NAME (decl
);
33199 if (compare_section_name (section
, ".sdata")
33200 || compare_section_name (section
, ".sdata2")
33201 || compare_section_name (section
, ".gnu.linkonce.s")
33202 || compare_section_name (section
, ".sbss")
33203 || compare_section_name (section
, ".sbss2")
33204 || compare_section_name (section
, ".gnu.linkonce.sb")
33205 || strcmp (section
, ".PPC.EMB.sdata0") == 0
33206 || strcmp (section
, ".PPC.EMB.sbss0") == 0)
33211 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (decl
));
33214 && size
<= g_switch_value
33215 /* If it's not public, and we're not going to reference it there,
33216 there's no need to put it in the small data section. */
33217 && (rs6000_sdata
!= SDATA_DATA
|| TREE_PUBLIC (decl
)))
33224 #endif /* USING_ELFOS_H */
33226 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
33229 rs6000_use_blocks_for_constant_p (machine_mode mode
, const_rtx x
)
33231 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
);
33234 /* Do not place thread-local symbols refs in the object blocks. */
33237 rs6000_use_blocks_for_decl_p (const_tree decl
)
33239 return !DECL_THREAD_LOCAL_P (decl
);
33242 /* Return a REG that occurs in ADDR with coefficient 1.
33243 ADDR can be effectively incremented by incrementing REG.
33245 r0 is special and we must not select it as an address
33246 register by this routine since our caller will try to
33247 increment the returned register via an "la" instruction. */
33250 find_addr_reg (rtx addr
)
33252 while (GET_CODE (addr
) == PLUS
)
33254 if (GET_CODE (XEXP (addr
, 0)) == REG
33255 && REGNO (XEXP (addr
, 0)) != 0)
33256 addr
= XEXP (addr
, 0);
33257 else if (GET_CODE (XEXP (addr
, 1)) == REG
33258 && REGNO (XEXP (addr
, 1)) != 0)
33259 addr
= XEXP (addr
, 1);
33260 else if (CONSTANT_P (XEXP (addr
, 0)))
33261 addr
= XEXP (addr
, 1);
33262 else if (CONSTANT_P (XEXP (addr
, 1)))
33263 addr
= XEXP (addr
, 0);
33265 gcc_unreachable ();
33267 gcc_assert (GET_CODE (addr
) == REG
&& REGNO (addr
) != 0);
33272 rs6000_fatal_bad_address (rtx op
)
33274 fatal_insn ("bad address", op
);
33279 typedef struct branch_island_d
{
33280 tree function_name
;
33286 static vec
<branch_island
, va_gc
> *branch_islands
;
33288 /* Remember to generate a branch island for far calls to the given
33292 add_compiler_branch_island (tree label_name
, tree function_name
,
33295 branch_island bi
= {function_name
, label_name
, line_number
};
33296 vec_safe_push (branch_islands
, bi
);
33299 /* Generate far-jump branch islands for everything recorded in
33300 branch_islands. Invoked immediately after the last instruction of
33301 the epilogue has been emitted; the branch islands must be appended
33302 to, and contiguous with, the function body. Mach-O stubs are
33303 generated in machopic_output_stub(). */
33306 macho_branch_islands (void)
33310 while (!vec_safe_is_empty (branch_islands
))
33312 branch_island
*bi
= &branch_islands
->last ();
33313 const char *label
= IDENTIFIER_POINTER (bi
->label_name
);
33314 const char *name
= IDENTIFIER_POINTER (bi
->function_name
);
33315 char name_buf
[512];
33316 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
33317 if (name
[0] == '*' || name
[0] == '&')
33318 strcpy (name_buf
, name
+1);
33322 strcpy (name_buf
+1, name
);
33324 strcpy (tmp_buf
, "\n");
33325 strcat (tmp_buf
, label
);
33326 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33327 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
33328 dbxout_stabd (N_SLINE
, bi
->line_number
);
33329 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33332 if (TARGET_LINK_STACK
)
33335 get_ppc476_thunk_name (name
);
33336 strcat (tmp_buf
, ":\n\tmflr r0\n\tbl ");
33337 strcat (tmp_buf
, name
);
33338 strcat (tmp_buf
, "\n");
33339 strcat (tmp_buf
, label
);
33340 strcat (tmp_buf
, "_pic:\n\tmflr r11\n");
33344 strcat (tmp_buf
, ":\n\tmflr r0\n\tbcl 20,31,");
33345 strcat (tmp_buf
, label
);
33346 strcat (tmp_buf
, "_pic\n");
33347 strcat (tmp_buf
, label
);
33348 strcat (tmp_buf
, "_pic:\n\tmflr r11\n");
33351 strcat (tmp_buf
, "\taddis r11,r11,ha16(");
33352 strcat (tmp_buf
, name_buf
);
33353 strcat (tmp_buf
, " - ");
33354 strcat (tmp_buf
, label
);
33355 strcat (tmp_buf
, "_pic)\n");
33357 strcat (tmp_buf
, "\tmtlr r0\n");
33359 strcat (tmp_buf
, "\taddi r12,r11,lo16(");
33360 strcat (tmp_buf
, name_buf
);
33361 strcat (tmp_buf
, " - ");
33362 strcat (tmp_buf
, label
);
33363 strcat (tmp_buf
, "_pic)\n");
33365 strcat (tmp_buf
, "\tmtctr r12\n\tbctr\n");
33369 strcat (tmp_buf
, ":\nlis r12,hi16(");
33370 strcat (tmp_buf
, name_buf
);
33371 strcat (tmp_buf
, ")\n\tori r12,r12,lo16(");
33372 strcat (tmp_buf
, name_buf
);
33373 strcat (tmp_buf
, ")\n\tmtctr r12\n\tbctr");
33375 output_asm_insn (tmp_buf
, 0);
33376 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33377 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
33378 dbxout_stabd (N_SLINE
, bi
->line_number
);
33379 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33380 branch_islands
->pop ();
33384 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
33385 already there or not. */
33388 no_previous_def (tree function_name
)
33393 FOR_EACH_VEC_SAFE_ELT (branch_islands
, ix
, bi
)
33394 if (function_name
== bi
->function_name
)
33399 /* GET_PREV_LABEL gets the label name from the previous definition of
33403 get_prev_label (tree function_name
)
33408 FOR_EACH_VEC_SAFE_ELT (branch_islands
, ix
, bi
)
33409 if (function_name
== bi
->function_name
)
33410 return bi
->label_name
;
33414 /* INSN is either a function call or a millicode call. It may have an
33415 unconditional jump in its delay slot.
33417 CALL_DEST is the routine we are calling. */
33420 output_call (rtx_insn
*insn
, rtx
*operands
, int dest_operand_number
,
33421 int cookie_operand_number
)
33423 static char buf
[256];
33424 if (darwin_emit_branch_islands
33425 && GET_CODE (operands
[dest_operand_number
]) == SYMBOL_REF
33426 && (INTVAL (operands
[cookie_operand_number
]) & CALL_LONG
))
33429 tree funname
= get_identifier (XSTR (operands
[dest_operand_number
], 0));
33431 if (no_previous_def (funname
))
33433 rtx label_rtx
= gen_label_rtx ();
33434 char *label_buf
, temp_buf
[256];
33435 ASM_GENERATE_INTERNAL_LABEL (temp_buf
, "L",
33436 CODE_LABEL_NUMBER (label_rtx
));
33437 label_buf
= temp_buf
[0] == '*' ? temp_buf
+ 1 : temp_buf
;
33438 labelname
= get_identifier (label_buf
);
33439 add_compiler_branch_island (labelname
, funname
, insn_line (insn
));
33442 labelname
= get_prev_label (funname
);
33444 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
33445 instruction will reach 'foo', otherwise link as 'bl L42'".
33446 "L42" should be a 'branch island', that will do a far jump to
33447 'foo'. Branch islands are generated in
33448 macho_branch_islands(). */
33449 sprintf (buf
, "jbsr %%z%d,%.246s",
33450 dest_operand_number
, IDENTIFIER_POINTER (labelname
));
33453 sprintf (buf
, "bl %%z%d", dest_operand_number
);
33457 /* Generate PIC and indirect symbol stubs. */
33460 machopic_output_stub (FILE *file
, const char *symb
, const char *stub
)
33462 unsigned int length
;
33463 char *symbol_name
, *lazy_ptr_name
;
33464 char *local_label_0
;
33465 static int label
= 0;
33467 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
33468 symb
= (*targetm
.strip_name_encoding
) (symb
);
33471 length
= strlen (symb
);
33472 symbol_name
= XALLOCAVEC (char, length
+ 32);
33473 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name
, symb
, length
);
33475 lazy_ptr_name
= XALLOCAVEC (char, length
+ 32);
33476 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name
, symb
, length
);
33479 switch_to_section (darwin_sections
[machopic_picsymbol_stub1_section
]);
33481 switch_to_section (darwin_sections
[machopic_symbol_stub1_section
]);
33485 fprintf (file
, "\t.align 5\n");
33487 fprintf (file
, "%s:\n", stub
);
33488 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
33491 local_label_0
= XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
33492 sprintf (local_label_0
, "\"L%011d$spb\"", label
);
33494 fprintf (file
, "\tmflr r0\n");
33495 if (TARGET_LINK_STACK
)
33498 get_ppc476_thunk_name (name
);
33499 fprintf (file
, "\tbl %s\n", name
);
33500 fprintf (file
, "%s:\n\tmflr r11\n", local_label_0
);
33504 fprintf (file
, "\tbcl 20,31,%s\n", local_label_0
);
33505 fprintf (file
, "%s:\n\tmflr r11\n", local_label_0
);
33507 fprintf (file
, "\taddis r11,r11,ha16(%s-%s)\n",
33508 lazy_ptr_name
, local_label_0
);
33509 fprintf (file
, "\tmtlr r0\n");
33510 fprintf (file
, "\t%s r12,lo16(%s-%s)(r11)\n",
33511 (TARGET_64BIT
? "ldu" : "lwzu"),
33512 lazy_ptr_name
, local_label_0
);
33513 fprintf (file
, "\tmtctr r12\n");
33514 fprintf (file
, "\tbctr\n");
33518 fprintf (file
, "\t.align 4\n");
33520 fprintf (file
, "%s:\n", stub
);
33521 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
33523 fprintf (file
, "\tlis r11,ha16(%s)\n", lazy_ptr_name
);
33524 fprintf (file
, "\t%s r12,lo16(%s)(r11)\n",
33525 (TARGET_64BIT
? "ldu" : "lwzu"),
33527 fprintf (file
, "\tmtctr r12\n");
33528 fprintf (file
, "\tbctr\n");
33531 switch_to_section (darwin_sections
[machopic_lazy_symbol_ptr_section
]);
33532 fprintf (file
, "%s:\n", lazy_ptr_name
);
33533 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
33534 fprintf (file
, "%sdyld_stub_binding_helper\n",
33535 (TARGET_64BIT
? DOUBLE_INT_ASM_OP
: "\t.long\t"));
33538 /* Legitimize PIC addresses. If the address is already
33539 position-independent, we return ORIG. Newly generated
33540 position-independent addresses go into a reg. This is REG if non
33541 zero, otherwise we allocate register(s) as necessary. */
33543 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33546 rs6000_machopic_legitimize_pic_address (rtx orig
, machine_mode mode
,
33551 if (reg
== NULL
&& !reload_completed
)
33552 reg
= gen_reg_rtx (Pmode
);
33554 if (GET_CODE (orig
) == CONST
)
33558 if (GET_CODE (XEXP (orig
, 0)) == PLUS
33559 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
33562 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
33564 /* Use a different reg for the intermediate value, as
33565 it will be marked UNCHANGING. */
33566 reg_temp
= !can_create_pseudo_p () ? reg
: gen_reg_rtx (Pmode
);
33567 base
= rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig
, 0), 0),
33570 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig
, 0), 1),
33573 if (GET_CODE (offset
) == CONST_INT
)
33575 if (SMALL_INT (offset
))
33576 return plus_constant (Pmode
, base
, INTVAL (offset
));
33577 else if (!reload_completed
)
33578 offset
= force_reg (Pmode
, offset
);
33581 rtx mem
= force_const_mem (Pmode
, orig
);
33582 return machopic_legitimize_pic_address (mem
, Pmode
, reg
);
33585 return gen_rtx_PLUS (Pmode
, base
, offset
);
33588 /* Fall back on generic machopic code. */
33589 return machopic_legitimize_pic_address (orig
, mode
, reg
);
33592 /* Output a .machine directive for the Darwin assembler, and call
33593 the generic start_file routine. */
33596 rs6000_darwin_file_start (void)
33598 static const struct
33602 HOST_WIDE_INT if_set
;
33604 { "ppc64", "ppc64", MASK_64BIT
},
33605 { "970", "ppc970", MASK_PPC_GPOPT
| MASK_MFCRF
| MASK_POWERPC64
},
33606 { "power4", "ppc970", 0 },
33607 { "G5", "ppc970", 0 },
33608 { "7450", "ppc7450", 0 },
33609 { "7400", "ppc7400", MASK_ALTIVEC
},
33610 { "G4", "ppc7400", 0 },
33611 { "750", "ppc750", 0 },
33612 { "740", "ppc750", 0 },
33613 { "G3", "ppc750", 0 },
33614 { "604e", "ppc604e", 0 },
33615 { "604", "ppc604", 0 },
33616 { "603e", "ppc603", 0 },
33617 { "603", "ppc603", 0 },
33618 { "601", "ppc601", 0 },
33619 { NULL
, "ppc", 0 } };
33620 const char *cpu_id
= "";
33623 rs6000_file_start ();
33624 darwin_file_start ();
33626 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33628 if (rs6000_default_cpu
!= 0 && rs6000_default_cpu
[0] != '\0')
33629 cpu_id
= rs6000_default_cpu
;
33631 if (global_options_set
.x_rs6000_cpu_index
)
33632 cpu_id
= processor_target_table
[rs6000_cpu_index
].name
;
33634 /* Look through the mapping array. Pick the first name that either
33635 matches the argument, has a bit set in IF_SET that is also set
33636 in the target flags, or has a NULL name. */
33639 while (mapping
[i
].arg
!= NULL
33640 && strcmp (mapping
[i
].arg
, cpu_id
) != 0
33641 && (mapping
[i
].if_set
& rs6000_isa_flags
) == 0)
33644 fprintf (asm_out_file
, "\t.machine %s\n", mapping
[i
].name
);
33647 #endif /* TARGET_MACHO */
33651 rs6000_elf_reloc_rw_mask (void)
33655 else if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
33661 /* Record an element in the table of global constructors. SYMBOL is
33662 a SYMBOL_REF of the function to be called; PRIORITY is a number
33663 between 0 and MAX_INIT_PRIORITY.
33665 This differs from default_named_section_asm_out_constructor in
33666 that we have special handling for -mrelocatable. */
33668 static void rs6000_elf_asm_out_constructor (rtx
, int) ATTRIBUTE_UNUSED
;
33670 rs6000_elf_asm_out_constructor (rtx symbol
, int priority
)
33672 const char *section
= ".ctors";
33675 if (priority
!= DEFAULT_INIT_PRIORITY
)
33677 sprintf (buf
, ".ctors.%.5u",
33678 /* Invert the numbering so the linker puts us in the proper
33679 order; constructors are run from right to left, and the
33680 linker sorts in increasing order. */
33681 MAX_INIT_PRIORITY
- priority
);
33685 switch_to_section (get_section (section
, SECTION_WRITE
, NULL
));
33686 assemble_align (POINTER_SIZE
);
33688 if (DEFAULT_ABI
== ABI_V4
33689 && (TARGET_RELOCATABLE
|| flag_pic
> 1))
33691 fputs ("\t.long (", asm_out_file
);
33692 output_addr_const (asm_out_file
, symbol
);
33693 fputs (")@fixup\n", asm_out_file
);
33696 assemble_integer (symbol
, POINTER_SIZE
/ BITS_PER_UNIT
, POINTER_SIZE
, 1);
33699 static void rs6000_elf_asm_out_destructor (rtx
, int) ATTRIBUTE_UNUSED
;
33701 rs6000_elf_asm_out_destructor (rtx symbol
, int priority
)
33703 const char *section
= ".dtors";
33706 if (priority
!= DEFAULT_INIT_PRIORITY
)
33708 sprintf (buf
, ".dtors.%.5u",
33709 /* Invert the numbering so the linker puts us in the proper
33710 order; constructors are run from right to left, and the
33711 linker sorts in increasing order. */
33712 MAX_INIT_PRIORITY
- priority
);
33716 switch_to_section (get_section (section
, SECTION_WRITE
, NULL
));
33717 assemble_align (POINTER_SIZE
);
33719 if (DEFAULT_ABI
== ABI_V4
33720 && (TARGET_RELOCATABLE
|| flag_pic
> 1))
33722 fputs ("\t.long (", asm_out_file
);
33723 output_addr_const (asm_out_file
, symbol
);
33724 fputs (")@fixup\n", asm_out_file
);
33727 assemble_integer (symbol
, POINTER_SIZE
/ BITS_PER_UNIT
, POINTER_SIZE
, 1);
33731 rs6000_elf_declare_function_name (FILE *file
, const char *name
, tree decl
)
33733 if (TARGET_64BIT
&& DEFAULT_ABI
!= ABI_ELFv2
)
33735 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file
);
33736 ASM_OUTPUT_LABEL (file
, name
);
33737 fputs (DOUBLE_INT_ASM_OP
, file
);
33738 rs6000_output_function_entry (file
, name
);
33739 fputs (",.TOC.@tocbase,0\n\t.previous\n", file
);
33742 fputs ("\t.size\t", file
);
33743 assemble_name (file
, name
);
33744 fputs (",24\n\t.type\t.", file
);
33745 assemble_name (file
, name
);
33746 fputs (",@function\n", file
);
33747 if (TREE_PUBLIC (decl
) && ! DECL_WEAK (decl
))
33749 fputs ("\t.globl\t.", file
);
33750 assemble_name (file
, name
);
33755 ASM_OUTPUT_TYPE_DIRECTIVE (file
, name
, "function");
33756 ASM_DECLARE_RESULT (file
, DECL_RESULT (decl
));
33757 rs6000_output_function_entry (file
, name
);
33758 fputs (":\n", file
);
33763 if (DEFAULT_ABI
== ABI_V4
33764 && (TARGET_RELOCATABLE
|| flag_pic
> 1)
33765 && !TARGET_SECURE_PLT
33766 && (!constant_pool_empty_p () || crtl
->profile
)
33767 && (uses_toc
= uses_TOC ()))
33772 switch_to_other_text_partition ();
33773 (*targetm
.asm_out
.internal_label
) (file
, "LCL", rs6000_pic_labelno
);
33775 fprintf (file
, "\t.long ");
33776 assemble_name (file
, toc_label_name
);
33779 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
33780 assemble_name (file
, buf
);
33783 switch_to_other_text_partition ();
33786 ASM_OUTPUT_TYPE_DIRECTIVE (file
, name
, "function");
33787 ASM_DECLARE_RESULT (file
, DECL_RESULT (decl
));
33789 if (TARGET_CMODEL
== CMODEL_LARGE
&& rs6000_global_entry_point_needed_p ())
33793 (*targetm
.asm_out
.internal_label
) (file
, "LCL", rs6000_pic_labelno
);
33795 fprintf (file
, "\t.quad .TOC.-");
33796 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
33797 assemble_name (file
, buf
);
33801 if (DEFAULT_ABI
== ABI_AIX
)
33803 const char *desc_name
, *orig_name
;
33805 orig_name
= (*targetm
.strip_name_encoding
) (name
);
33806 desc_name
= orig_name
;
33807 while (*desc_name
== '.')
33810 if (TREE_PUBLIC (decl
))
33811 fprintf (file
, "\t.globl %s\n", desc_name
);
33813 fprintf (file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
33814 fprintf (file
, "%s:\n", desc_name
);
33815 fprintf (file
, "\t.long %s\n", orig_name
);
33816 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file
);
33817 fputs ("\t.long 0\n", file
);
33818 fprintf (file
, "\t.previous\n");
33820 ASM_OUTPUT_LABEL (file
, name
);
33823 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED
;
33825 rs6000_elf_file_end (void)
33827 #ifdef HAVE_AS_GNU_ATTRIBUTE
33828 /* ??? The value emitted depends on options active at file end.
33829 Assume anyone using #pragma or attributes that might change
33830 options knows what they are doing. */
33831 if ((TARGET_64BIT
|| DEFAULT_ABI
== ABI_V4
)
33832 && rs6000_passes_float
)
33838 else if (TARGET_SF_FPR
)
33842 if (rs6000_passes_long_double
)
33844 if (!TARGET_LONG_DOUBLE_128
)
33846 else if (TARGET_IEEEQUAD
)
33851 fprintf (asm_out_file
, "\t.gnu_attribute 4, %d\n", fp
);
33853 if (TARGET_32BIT
&& DEFAULT_ABI
== ABI_V4
)
33855 if (rs6000_passes_vector
)
33856 fprintf (asm_out_file
, "\t.gnu_attribute 8, %d\n",
33857 (TARGET_ALTIVEC_ABI
? 2 : 1));
33858 if (rs6000_returns_struct
)
33859 fprintf (asm_out_file
, "\t.gnu_attribute 12, %d\n",
33860 aix_struct_return
? 2 : 1);
33863 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33864 if (TARGET_32BIT
|| DEFAULT_ABI
== ABI_ELFv2
)
33865 file_end_indicate_exec_stack ();
33868 if (flag_split_stack
)
33869 file_end_indicate_split_stack ();
33873 /* We have expanded a CPU builtin, so we need to emit a reference to
33874 the special symbol that LIBC uses to declare it supports the
33875 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33876 switch_to_section (data_section
);
33877 fprintf (asm_out_file
, "\t.align %u\n", TARGET_32BIT
? 2 : 3);
33878 fprintf (asm_out_file
, "\t%s %s\n",
33879 TARGET_32BIT
? ".long" : ".quad", tcb_verification_symbol
);
33886 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33887 #define HAVE_XCOFF_DWARF_EXTRAS 0
33890 static enum unwind_info_type
33891 rs6000_xcoff_debug_unwind_info (void)
33897 rs6000_xcoff_asm_output_anchor (rtx symbol
)
33901 sprintf (buffer
, "$ + " HOST_WIDE_INT_PRINT_DEC
,
33902 SYMBOL_REF_BLOCK_OFFSET (symbol
));
33903 fprintf (asm_out_file
, "%s", SET_ASM_OP
);
33904 RS6000_OUTPUT_BASENAME (asm_out_file
, XSTR (symbol
, 0));
33905 fprintf (asm_out_file
, ",");
33906 RS6000_OUTPUT_BASENAME (asm_out_file
, buffer
);
33907 fprintf (asm_out_file
, "\n");
33911 rs6000_xcoff_asm_globalize_label (FILE *stream
, const char *name
)
33913 fputs (GLOBAL_ASM_OP
, stream
);
33914 RS6000_OUTPUT_BASENAME (stream
, name
);
33915 putc ('\n', stream
);
33918 /* A get_unnamed_decl callback, used for read-only sections. PTR
33919 points to the section string variable. */
33922 rs6000_xcoff_output_readonly_section_asm_op (const void *directive
)
33924 fprintf (asm_out_file
, "\t.csect %s[RO],%s\n",
33925 *(const char *const *) directive
,
33926 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
33929 /* Likewise for read-write sections. */
33932 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive
)
33934 fprintf (asm_out_file
, "\t.csect %s[RW],%s\n",
33935 *(const char *const *) directive
,
33936 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
33940 rs6000_xcoff_output_tls_section_asm_op (const void *directive
)
33942 fprintf (asm_out_file
, "\t.csect %s[TL],%s\n",
33943 *(const char *const *) directive
,
33944 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
33947 /* A get_unnamed_section callback, used for switching to toc_section. */
33950 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
33952 if (TARGET_MINIMAL_TOC
)
33954 /* toc_section is always selected at least once from
33955 rs6000_xcoff_file_start, so this is guaranteed to
33956 always be defined once and only once in each file. */
33957 if (!toc_initialized
)
33959 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file
);
33960 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file
);
33961 toc_initialized
= 1;
33963 fprintf (asm_out_file
, "\t.csect toc_table[RW]%s\n",
33964 (TARGET_32BIT
? "" : ",3"));
33967 fputs ("\t.toc\n", asm_out_file
);
33970 /* Implement TARGET_ASM_INIT_SECTIONS. */
33973 rs6000_xcoff_asm_init_sections (void)
33975 read_only_data_section
33976 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op
,
33977 &xcoff_read_only_section_name
);
33979 private_data_section
33980 = get_unnamed_section (SECTION_WRITE
,
33981 rs6000_xcoff_output_readwrite_section_asm_op
,
33982 &xcoff_private_data_section_name
);
33985 = get_unnamed_section (SECTION_TLS
,
33986 rs6000_xcoff_output_tls_section_asm_op
,
33987 &xcoff_tls_data_section_name
);
33989 tls_private_data_section
33990 = get_unnamed_section (SECTION_TLS
,
33991 rs6000_xcoff_output_tls_section_asm_op
,
33992 &xcoff_private_data_section_name
);
33994 read_only_private_data_section
33995 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op
,
33996 &xcoff_private_data_section_name
);
33999 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op
, NULL
);
34001 readonly_data_section
= read_only_data_section
;
34005 rs6000_xcoff_reloc_rw_mask (void)
34011 rs6000_xcoff_asm_named_section (const char *name
, unsigned int flags
,
34012 tree decl ATTRIBUTE_UNUSED
)
34015 static const char * const suffix
[5] = { "PR", "RO", "RW", "TL", "XO" };
34017 if (flags
& SECTION_EXCLUDE
)
34019 else if (flags
& SECTION_DEBUG
)
34021 fprintf (asm_out_file
, "\t.dwsect %s\n", name
);
34024 else if (flags
& SECTION_CODE
)
34026 else if (flags
& SECTION_TLS
)
34028 else if (flags
& SECTION_WRITE
)
34033 fprintf (asm_out_file
, "\t.csect %s%s[%s],%u\n",
34034 (flags
& SECTION_CODE
) ? "." : "",
34035 name
, suffix
[smclass
], flags
& SECTION_ENTSIZE
);
34038 #define IN_NAMED_SECTION(DECL) \
34039 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
34040 && DECL_SECTION_NAME (DECL) != NULL)
34043 rs6000_xcoff_select_section (tree decl
, int reloc
,
34044 unsigned HOST_WIDE_INT align
)
34046 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
34048 if (align
> BIGGEST_ALIGNMENT
)
34050 resolve_unique_section (decl
, reloc
, true);
34051 if (IN_NAMED_SECTION (decl
))
34052 return get_named_section (decl
, NULL
, reloc
);
34055 if (decl_readonly_section (decl
, reloc
))
34057 if (TREE_PUBLIC (decl
))
34058 return read_only_data_section
;
34060 return read_only_private_data_section
;
34065 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_THREAD_LOCAL_P (decl
))
34067 if (TREE_PUBLIC (decl
))
34068 return tls_data_section
;
34069 else if (bss_initializer_p (decl
))
34071 /* Convert to COMMON to emit in BSS. */
34072 DECL_COMMON (decl
) = 1;
34073 return tls_comm_section
;
34076 return tls_private_data_section
;
34080 if (TREE_PUBLIC (decl
))
34081 return data_section
;
34083 return private_data_section
;
34088 rs6000_xcoff_unique_section (tree decl
, int reloc ATTRIBUTE_UNUSED
)
34092 /* Use select_section for private data and uninitialized data with
34093 alignment <= BIGGEST_ALIGNMENT. */
34094 if (!TREE_PUBLIC (decl
)
34095 || DECL_COMMON (decl
)
34096 || (DECL_INITIAL (decl
) == NULL_TREE
34097 && DECL_ALIGN (decl
) <= BIGGEST_ALIGNMENT
)
34098 || DECL_INITIAL (decl
) == error_mark_node
34099 || (flag_zero_initialized_in_bss
34100 && initializer_zerop (DECL_INITIAL (decl
))))
34103 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
34104 name
= (*targetm
.strip_name_encoding
) (name
);
34105 set_decl_section_name (decl
, name
);
34108 /* Select section for constant in constant pool.
34110 On RS/6000, all constants are in the private read-only data area.
34111 However, if this is being placed in the TOC it must be output as a
34115 rs6000_xcoff_select_rtx_section (machine_mode mode
, rtx x
,
34116 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
34118 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
))
34119 return toc_section
;
34121 return read_only_private_data_section
;
34124 /* Remove any trailing [DS] or the like from the symbol name. */
34126 static const char *
34127 rs6000_xcoff_strip_name_encoding (const char *name
)
34132 len
= strlen (name
);
34133 if (name
[len
- 1] == ']')
34134 return ggc_alloc_string (name
, len
- 4);
34139 /* Section attributes. AIX is always PIC. */
34141 static unsigned int
34142 rs6000_xcoff_section_type_flags (tree decl
, const char *name
, int reloc
)
34144 unsigned int align
;
34145 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
34147 /* Align to at least UNIT size. */
34148 if ((flags
& SECTION_CODE
) != 0 || !decl
|| !DECL_P (decl
))
34149 align
= MIN_UNITS_PER_WORD
;
34151 /* Increase alignment of large objects if not already stricter. */
34152 align
= MAX ((DECL_ALIGN (decl
) / BITS_PER_UNIT
),
34153 int_size_in_bytes (TREE_TYPE (decl
)) > MIN_UNITS_PER_WORD
34154 ? UNITS_PER_FP_WORD
: MIN_UNITS_PER_WORD
);
34156 return flags
| (exact_log2 (align
) & SECTION_ENTSIZE
);
34159 /* Output at beginning of assembler file.
34161 Initialize the section names for the RS/6000 at this point.
34163 Specify filename, including full path, to assembler.
34165 We want to go into the TOC section so at least one .toc will be emitted.
34166 Also, in order to output proper .bs/.es pairs, we need at least one static
34167 [RW] section emitted.
34169 Finally, declare mcount when profiling to make the assembler happy. */
34172 rs6000_xcoff_file_start (void)
34174 rs6000_gen_section_name (&xcoff_bss_section_name
,
34175 main_input_filename
, ".bss_");
34176 rs6000_gen_section_name (&xcoff_private_data_section_name
,
34177 main_input_filename
, ".rw_");
34178 rs6000_gen_section_name (&xcoff_read_only_section_name
,
34179 main_input_filename
, ".ro_");
34180 rs6000_gen_section_name (&xcoff_tls_data_section_name
,
34181 main_input_filename
, ".tls_");
34182 rs6000_gen_section_name (&xcoff_tbss_section_name
,
34183 main_input_filename
, ".tbss_[UL]");
34185 fputs ("\t.file\t", asm_out_file
);
34186 output_quoted_string (asm_out_file
, main_input_filename
);
34187 fputc ('\n', asm_out_file
);
34188 if (write_symbols
!= NO_DEBUG
)
34189 switch_to_section (private_data_section
);
34190 switch_to_section (toc_section
);
34191 switch_to_section (text_section
);
34193 fprintf (asm_out_file
, "\t.extern %s\n", RS6000_MCOUNT
);
34194 rs6000_file_start ();
34197 /* Output at end of assembler file.
34198 On the RS/6000, referencing data should automatically pull in text. */
34201 rs6000_xcoff_file_end (void)
34203 switch_to_section (text_section
);
34204 fputs ("_section_.text:\n", asm_out_file
);
34205 switch_to_section (data_section
);
34206 fputs (TARGET_32BIT
34207 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
34211 struct declare_alias_data
34214 bool function_descriptor
;
34217 /* Declare alias N. A helper function for for_node_and_aliases. */
34220 rs6000_declare_alias (struct symtab_node
*n
, void *d
)
34222 struct declare_alias_data
*data
= (struct declare_alias_data
*)d
;
34223 /* Main symbol is output specially, because varasm machinery does part of
34224 the job for us - we do not need to declare .globl/lglobs and such. */
34225 if (!n
->alias
|| n
->weakref
)
34228 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n
->decl
)))
34231 /* Prevent assemble_alias from trying to use .set pseudo operation
34232 that does not behave as expected by the middle-end. */
34233 TREE_ASM_WRITTEN (n
->decl
) = true;
34235 const char *name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n
->decl
));
34236 char *buffer
= (char *) alloca (strlen (name
) + 2);
34238 int dollar_inside
= 0;
34240 strcpy (buffer
, name
);
34241 p
= strchr (buffer
, '$');
34245 p
= strchr (p
+ 1, '$');
34247 if (TREE_PUBLIC (n
->decl
))
34249 if (!RS6000_WEAK
|| !DECL_WEAK (n
->decl
))
34251 if (dollar_inside
) {
34252 if (data
->function_descriptor
)
34253 fprintf(data
->file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
34254 fprintf(data
->file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
34256 if (data
->function_descriptor
)
34258 fputs ("\t.globl .", data
->file
);
34259 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
34260 putc ('\n', data
->file
);
34262 fputs ("\t.globl ", data
->file
);
34263 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
34264 putc ('\n', data
->file
);
34266 #ifdef ASM_WEAKEN_DECL
34267 else if (DECL_WEAK (n
->decl
) && !data
->function_descriptor
)
34268 ASM_WEAKEN_DECL (data
->file
, n
->decl
, name
, NULL
);
34275 if (data
->function_descriptor
)
34276 fprintf(data
->file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
34277 fprintf(data
->file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
34279 if (data
->function_descriptor
)
34281 fputs ("\t.lglobl .", data
->file
);
34282 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
34283 putc ('\n', data
->file
);
34285 fputs ("\t.lglobl ", data
->file
);
34286 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
34287 putc ('\n', data
->file
);
34289 if (data
->function_descriptor
)
34290 fputs (".", data
->file
);
34291 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
34292 fputs (":\n", data
->file
);
34297 #ifdef HAVE_GAS_HIDDEN
34298 /* Helper function to calculate visibility of a DECL
34299 and return the value as a const string. */
34301 static const char *
34302 rs6000_xcoff_visibility (tree decl
)
34304 static const char * const visibility_types
[] = {
34305 "", ",protected", ",hidden", ",internal"
34308 enum symbol_visibility vis
= DECL_VISIBILITY (decl
);
34310 if (TREE_CODE (decl
) == FUNCTION_DECL
34311 && cgraph_node::get (decl
)
34312 && cgraph_node::get (decl
)->instrumentation_clone
34313 && cgraph_node::get (decl
)->instrumented_version
)
34314 vis
= DECL_VISIBILITY (cgraph_node::get (decl
)->instrumented_version
->decl
);
34316 return visibility_types
[vis
];
34321 /* This macro produces the initial definition of a function name.
34322 On the RS/6000, we need to place an extra '.' in the function name and
34323 output the function descriptor.
34324 Dollar signs are converted to underscores.
34326 The csect for the function will have already been created when
34327 text_section was selected. We do have to go back to that csect, however.
34329 The third and fourth parameters to the .function pseudo-op (16 and 044)
34330 are placeholders which no longer have any use.
34332 Because AIX assembler's .set command has unexpected semantics, we output
34333 all aliases as alternative labels in front of the definition. */
34336 rs6000_xcoff_declare_function_name (FILE *file
, const char *name
, tree decl
)
34338 char *buffer
= (char *) alloca (strlen (name
) + 1);
34340 int dollar_inside
= 0;
34341 struct declare_alias_data data
= {file
, false};
34343 strcpy (buffer
, name
);
34344 p
= strchr (buffer
, '$');
34348 p
= strchr (p
+ 1, '$');
34350 if (TREE_PUBLIC (decl
))
34352 if (!RS6000_WEAK
|| !DECL_WEAK (decl
))
34354 if (dollar_inside
) {
34355 fprintf(file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
34356 fprintf(file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
34358 fputs ("\t.globl .", file
);
34359 RS6000_OUTPUT_BASENAME (file
, buffer
);
34360 #ifdef HAVE_GAS_HIDDEN
34361 fputs (rs6000_xcoff_visibility (decl
), file
);
34368 if (dollar_inside
) {
34369 fprintf(file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
34370 fprintf(file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
34372 fputs ("\t.lglobl .", file
);
34373 RS6000_OUTPUT_BASENAME (file
, buffer
);
34376 fputs ("\t.csect ", file
);
34377 RS6000_OUTPUT_BASENAME (file
, buffer
);
34378 fputs (TARGET_32BIT
? "[DS]\n" : "[DS],3\n", file
);
34379 RS6000_OUTPUT_BASENAME (file
, buffer
);
34380 fputs (":\n", file
);
34381 symtab_node::get (decl
)->call_for_symbol_and_aliases (rs6000_declare_alias
,
34383 fputs (TARGET_32BIT
? "\t.long ." : "\t.llong .", file
);
34384 RS6000_OUTPUT_BASENAME (file
, buffer
);
34385 fputs (", TOC[tc0], 0\n", file
);
34387 switch_to_section (function_section (decl
));
34389 RS6000_OUTPUT_BASENAME (file
, buffer
);
34390 fputs (":\n", file
);
34391 data
.function_descriptor
= true;
34392 symtab_node::get (decl
)->call_for_symbol_and_aliases (rs6000_declare_alias
,
34394 if (!DECL_IGNORED_P (decl
))
34396 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
34397 xcoffout_declare_function (file
, decl
, buffer
);
34398 else if (write_symbols
== DWARF2_DEBUG
)
34400 name
= (*targetm
.strip_name_encoding
) (name
);
34401 fprintf (file
, "\t.function .%s,.%s,2,0\n", name
, name
);
34408 /* Output assembly language to globalize a symbol from a DECL,
34409 possibly with visibility. */
34412 rs6000_xcoff_asm_globalize_decl_name (FILE *stream
, tree decl
)
34414 const char *name
= XSTR (XEXP (DECL_RTL (decl
), 0), 0);
34415 fputs (GLOBAL_ASM_OP
, stream
);
34416 RS6000_OUTPUT_BASENAME (stream
, name
);
34417 #ifdef HAVE_GAS_HIDDEN
34418 fputs (rs6000_xcoff_visibility (decl
), stream
);
34420 putc ('\n', stream
);
34423 /* Output assembly language to define a symbol as COMMON from a DECL,
34424 possibly with visibility. */
34427 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream
,
34428 tree decl ATTRIBUTE_UNUSED
,
34430 unsigned HOST_WIDE_INT size
,
34431 unsigned HOST_WIDE_INT align
)
34433 unsigned HOST_WIDE_INT align2
= 2;
34436 align2
= floor_log2 (align
/ BITS_PER_UNIT
);
34440 fputs (COMMON_ASM_OP
, stream
);
34441 RS6000_OUTPUT_BASENAME (stream
, name
);
34444 "," HOST_WIDE_INT_PRINT_UNSIGNED
"," HOST_WIDE_INT_PRINT_UNSIGNED
,
34447 #ifdef HAVE_GAS_HIDDEN
34449 fputs (rs6000_xcoff_visibility (decl
), stream
);
34451 putc ('\n', stream
);
34454 /* This macro produces the initial definition of a object (variable) name.
34455 Because AIX assembler's .set command has unexpected semantics, we output
34456 all aliases as alternative labels in front of the definition. */
34459 rs6000_xcoff_declare_object_name (FILE *file
, const char *name
, tree decl
)
34461 struct declare_alias_data data
= {file
, false};
34462 RS6000_OUTPUT_BASENAME (file
, name
);
34463 fputs (":\n", file
);
34464 symtab_node::get_create (decl
)->call_for_symbol_and_aliases (rs6000_declare_alias
,
34468 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
34471 rs6000_asm_output_dwarf_pcrel (FILE *file
, int size
, const char *label
)
34473 fputs (integer_asm_op (size
, FALSE
), file
);
34474 assemble_name (file
, label
);
34475 fputs ("-$", file
);
34478 /* Output a symbol offset relative to the dbase for the current object.
34479 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
34482 __gcc_unwind_dbase is embedded in all executables/libraries through
34483 libgcc/config/rs6000/crtdbase.S. */
34486 rs6000_asm_output_dwarf_datarel (FILE *file
, int size
, const char *label
)
34488 fputs (integer_asm_op (size
, FALSE
), file
);
34489 assemble_name (file
, label
);
34490 fputs("-__gcc_unwind_dbase", file
);
34495 rs6000_xcoff_encode_section_info (tree decl
, rtx rtl
, int first
)
34499 const char *symname
;
34501 default_encode_section_info (decl
, rtl
, first
);
34503 /* Careful not to prod global register variables. */
34506 symbol
= XEXP (rtl
, 0);
34507 if (GET_CODE (symbol
) != SYMBOL_REF
)
34510 flags
= SYMBOL_REF_FLAGS (symbol
);
34512 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_THREAD_LOCAL_P (decl
))
34513 flags
&= ~SYMBOL_FLAG_HAS_BLOCK_INFO
;
34515 SYMBOL_REF_FLAGS (symbol
) = flags
;
34517 /* Append mapping class to extern decls. */
34518 symname
= XSTR (symbol
, 0);
34519 if (decl
/* sync condition with assemble_external () */
34520 && DECL_P (decl
) && DECL_EXTERNAL (decl
) && TREE_PUBLIC (decl
)
34521 && ((TREE_CODE (decl
) == VAR_DECL
&& !DECL_THREAD_LOCAL_P (decl
))
34522 || TREE_CODE (decl
) == FUNCTION_DECL
)
34523 && symname
[strlen (symname
) - 1] != ']')
34525 char *newname
= (char *) alloca (strlen (symname
) + 5);
34526 strcpy (newname
, symname
);
34527 strcat (newname
, (TREE_CODE (decl
) == FUNCTION_DECL
34528 ? "[DS]" : "[UA]"));
34529 XSTR (symbol
, 0) = ggc_strdup (newname
);
34532 #endif /* HAVE_AS_TLS */
34533 #endif /* TARGET_XCOFF */
34536 rs6000_asm_weaken_decl (FILE *stream
, tree decl
,
34537 const char *name
, const char *val
)
34539 fputs ("\t.weak\t", stream
);
34540 RS6000_OUTPUT_BASENAME (stream
, name
);
34541 if (decl
&& TREE_CODE (decl
) == FUNCTION_DECL
34542 && DEFAULT_ABI
== ABI_AIX
&& DOT_SYMBOLS
)
34545 fputs ("[DS]", stream
);
34546 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34548 fputs (rs6000_xcoff_visibility (decl
), stream
);
34550 fputs ("\n\t.weak\t.", stream
);
34551 RS6000_OUTPUT_BASENAME (stream
, name
);
34553 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34555 fputs (rs6000_xcoff_visibility (decl
), stream
);
34557 fputc ('\n', stream
);
34560 #ifdef ASM_OUTPUT_DEF
34561 ASM_OUTPUT_DEF (stream
, name
, val
);
34563 if (decl
&& TREE_CODE (decl
) == FUNCTION_DECL
34564 && DEFAULT_ABI
== ABI_AIX
&& DOT_SYMBOLS
)
34566 fputs ("\t.set\t.", stream
);
34567 RS6000_OUTPUT_BASENAME (stream
, name
);
34568 fputs (",.", stream
);
34569 RS6000_OUTPUT_BASENAME (stream
, val
);
34570 fputc ('\n', stream
);
34576 /* Return true if INSN should not be copied. */
34579 rs6000_cannot_copy_insn_p (rtx_insn
*insn
)
34581 return recog_memoized (insn
) >= 0
34582 && get_attr_cannot_copy (insn
);
34585 /* Compute a (partial) cost for rtx X. Return true if the complete
34586 cost has been computed, and false if subexpressions should be
34587 scanned. In either case, *TOTAL contains the cost result. */
34590 rs6000_rtx_costs (rtx x
, machine_mode mode
, int outer_code
,
34591 int opno ATTRIBUTE_UNUSED
, int *total
, bool speed
)
34593 int code
= GET_CODE (x
);
34597 /* On the RS/6000, if it is valid in the insn, it is free. */
34599 if (((outer_code
== SET
34600 || outer_code
== PLUS
34601 || outer_code
== MINUS
)
34602 && (satisfies_constraint_I (x
)
34603 || satisfies_constraint_L (x
)))
34604 || (outer_code
== AND
34605 && (satisfies_constraint_K (x
)
34607 ? satisfies_constraint_L (x
)
34608 : satisfies_constraint_J (x
))))
34609 || ((outer_code
== IOR
|| outer_code
== XOR
)
34610 && (satisfies_constraint_K (x
)
34612 ? satisfies_constraint_L (x
)
34613 : satisfies_constraint_J (x
))))
34614 || outer_code
== ASHIFT
34615 || outer_code
== ASHIFTRT
34616 || outer_code
== LSHIFTRT
34617 || outer_code
== ROTATE
34618 || outer_code
== ROTATERT
34619 || outer_code
== ZERO_EXTRACT
34620 || (outer_code
== MULT
34621 && satisfies_constraint_I (x
))
34622 || ((outer_code
== DIV
|| outer_code
== UDIV
34623 || outer_code
== MOD
|| outer_code
== UMOD
)
34624 && exact_log2 (INTVAL (x
)) >= 0)
34625 || (outer_code
== COMPARE
34626 && (satisfies_constraint_I (x
)
34627 || satisfies_constraint_K (x
)))
34628 || ((outer_code
== EQ
|| outer_code
== NE
)
34629 && (satisfies_constraint_I (x
)
34630 || satisfies_constraint_K (x
)
34632 ? satisfies_constraint_L (x
)
34633 : satisfies_constraint_J (x
))))
34634 || (outer_code
== GTU
34635 && satisfies_constraint_I (x
))
34636 || (outer_code
== LTU
34637 && satisfies_constraint_P (x
)))
34642 else if ((outer_code
== PLUS
34643 && reg_or_add_cint_operand (x
, VOIDmode
))
34644 || (outer_code
== MINUS
34645 && reg_or_sub_cint_operand (x
, VOIDmode
))
34646 || ((outer_code
== SET
34647 || outer_code
== IOR
34648 || outer_code
== XOR
)
34650 & ~ (unsigned HOST_WIDE_INT
) 0xffffffff) == 0))
34652 *total
= COSTS_N_INSNS (1);
34658 case CONST_WIDE_INT
:
34662 *total
= !speed
? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34666 /* When optimizing for size, MEM should be slightly more expensive
34667 than generating address, e.g., (plus (reg) (const)).
34668 L1 cache latency is about two instructions. */
34669 *total
= !speed
? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34670 if (rs6000_slow_unaligned_access (mode
, MEM_ALIGN (x
)))
34671 *total
+= COSTS_N_INSNS (100);
34680 if (FLOAT_MODE_P (mode
))
34681 *total
= rs6000_cost
->fp
;
34683 *total
= COSTS_N_INSNS (1);
34687 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
34688 && satisfies_constraint_I (XEXP (x
, 1)))
34690 if (INTVAL (XEXP (x
, 1)) >= -256
34691 && INTVAL (XEXP (x
, 1)) <= 255)
34692 *total
= rs6000_cost
->mulsi_const9
;
34694 *total
= rs6000_cost
->mulsi_const
;
34696 else if (mode
== SFmode
)
34697 *total
= rs6000_cost
->fp
;
34698 else if (FLOAT_MODE_P (mode
))
34699 *total
= rs6000_cost
->dmul
;
34700 else if (mode
== DImode
)
34701 *total
= rs6000_cost
->muldi
;
34703 *total
= rs6000_cost
->mulsi
;
34707 if (mode
== SFmode
)
34708 *total
= rs6000_cost
->fp
;
34710 *total
= rs6000_cost
->dmul
;
34715 if (FLOAT_MODE_P (mode
))
34717 *total
= mode
== DFmode
? rs6000_cost
->ddiv
34718 : rs6000_cost
->sdiv
;
34725 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
34726 && exact_log2 (INTVAL (XEXP (x
, 1))) >= 0)
34728 if (code
== DIV
|| code
== MOD
)
34730 *total
= COSTS_N_INSNS (2);
34733 *total
= COSTS_N_INSNS (1);
34737 if (GET_MODE (XEXP (x
, 1)) == DImode
)
34738 *total
= rs6000_cost
->divdi
;
34740 *total
= rs6000_cost
->divsi
;
34742 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34743 if (!TARGET_MODULO
&& (code
== MOD
|| code
== UMOD
))
34744 *total
+= COSTS_N_INSNS (2);
34748 *total
= COSTS_N_INSNS (TARGET_CTZ
? 1 : 4);
34752 *total
= COSTS_N_INSNS (4);
34756 *total
= COSTS_N_INSNS (TARGET_POPCNTD
? 1 : 6);
34760 *total
= COSTS_N_INSNS (TARGET_CMPB
? 2 : 6);
34764 if (outer_code
== AND
|| outer_code
== IOR
|| outer_code
== XOR
)
34767 *total
= COSTS_N_INSNS (1);
34771 if (CONST_INT_P (XEXP (x
, 1)))
34773 rtx left
= XEXP (x
, 0);
34774 rtx_code left_code
= GET_CODE (left
);
34776 /* rotate-and-mask: 1 insn. */
34777 if ((left_code
== ROTATE
34778 || left_code
== ASHIFT
34779 || left_code
== LSHIFTRT
)
34780 && rs6000_is_valid_shift_mask (XEXP (x
, 1), left
, mode
))
34782 *total
= rtx_cost (XEXP (left
, 0), mode
, left_code
, 0, speed
);
34783 if (!CONST_INT_P (XEXP (left
, 1)))
34784 *total
+= rtx_cost (XEXP (left
, 1), SImode
, left_code
, 1, speed
);
34785 *total
+= COSTS_N_INSNS (1);
34789 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34790 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
34791 if (rs6000_is_valid_and_mask (XEXP (x
, 1), mode
)
34792 || (val
& 0xffff) == val
34793 || (val
& 0xffff0000) == val
34794 || ((val
& 0xffff) == 0 && mode
== SImode
))
34796 *total
= rtx_cost (left
, mode
, AND
, 0, speed
);
34797 *total
+= COSTS_N_INSNS (1);
34802 if (rs6000_is_valid_2insn_and (XEXP (x
, 1), mode
))
34804 *total
= rtx_cost (left
, mode
, AND
, 0, speed
);
34805 *total
+= COSTS_N_INSNS (2);
34810 *total
= COSTS_N_INSNS (1);
34815 *total
= COSTS_N_INSNS (1);
34821 *total
= COSTS_N_INSNS (1);
34825 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34826 the sign extend and shift separately within the insn. */
34827 if (TARGET_EXTSWSLI
&& mode
== DImode
34828 && GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
34829 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == SImode
)
34840 /* Handle mul_highpart. */
34841 if (outer_code
== TRUNCATE
34842 && GET_CODE (XEXP (x
, 0)) == MULT
)
34844 if (mode
== DImode
)
34845 *total
= rs6000_cost
->muldi
;
34847 *total
= rs6000_cost
->mulsi
;
34850 else if (outer_code
== AND
)
34853 *total
= COSTS_N_INSNS (1);
34858 if (GET_CODE (XEXP (x
, 0)) == MEM
)
34861 *total
= COSTS_N_INSNS (1);
34867 if (!FLOAT_MODE_P (mode
))
34869 *total
= COSTS_N_INSNS (1);
34875 case UNSIGNED_FLOAT
:
34878 case FLOAT_TRUNCATE
:
34879 *total
= rs6000_cost
->fp
;
34883 if (mode
== DFmode
)
34884 *total
= rs6000_cost
->sfdf_convert
;
34886 *total
= rs6000_cost
->fp
;
34890 switch (XINT (x
, 1))
34893 *total
= rs6000_cost
->fp
;
34905 *total
= COSTS_N_INSNS (1);
34908 else if (FLOAT_MODE_P (mode
) && TARGET_PPC_GFXOPT
&& TARGET_HARD_FLOAT
)
34910 *total
= rs6000_cost
->fp
;
34919 /* Carry bit requires mode == Pmode.
34920 NEG or PLUS already counted so only add one. */
34922 && (outer_code
== NEG
|| outer_code
== PLUS
))
34924 *total
= COSTS_N_INSNS (1);
34932 if (outer_code
== SET
)
34934 if (XEXP (x
, 1) == const0_rtx
)
34936 *total
= COSTS_N_INSNS (2);
34941 *total
= COSTS_N_INSNS (3);
34946 if (outer_code
== COMPARE
)
34960 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34963 rs6000_debug_rtx_costs (rtx x
, machine_mode mode
, int outer_code
,
34964 int opno
, int *total
, bool speed
)
34966 bool ret
= rs6000_rtx_costs (x
, mode
, outer_code
, opno
, total
, speed
);
34969 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34970 "opno = %d, total = %d, speed = %s, x:\n",
34971 ret
? "complete" : "scan inner",
34972 GET_MODE_NAME (mode
),
34973 GET_RTX_NAME (outer_code
),
34976 speed
? "true" : "false");
34984 rs6000_insn_cost (rtx_insn
*insn
, bool speed
)
34986 if (recog_memoized (insn
) < 0)
34990 return get_attr_length (insn
);
34992 int cost
= get_attr_cost (insn
);
34996 int n
= get_attr_length (insn
) / 4;
34997 enum attr_type type
= get_attr_type (insn
);
35004 cost
= COSTS_N_INSNS (n
+ 1);
35008 switch (get_attr_size (insn
))
35011 cost
= COSTS_N_INSNS (n
- 1) + rs6000_cost
->mulsi_const9
;
35014 cost
= COSTS_N_INSNS (n
- 1) + rs6000_cost
->mulsi_const
;
35017 cost
= COSTS_N_INSNS (n
- 1) + rs6000_cost
->mulsi
;
35020 cost
= COSTS_N_INSNS (n
- 1) + rs6000_cost
->muldi
;
35023 gcc_unreachable ();
35027 switch (get_attr_size (insn
))
35030 cost
= COSTS_N_INSNS (n
- 1) + rs6000_cost
->divsi
;
35033 cost
= COSTS_N_INSNS (n
- 1) + rs6000_cost
->divdi
;
35036 gcc_unreachable ();
35041 cost
= n
* rs6000_cost
->fp
;
35044 cost
= n
* rs6000_cost
->dmul
;
35047 cost
= n
* rs6000_cost
->sdiv
;
35050 cost
= n
* rs6000_cost
->ddiv
;
35057 cost
= COSTS_N_INSNS (n
+ 2);
35061 cost
= COSTS_N_INSNS (n
);
35067 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
35070 rs6000_debug_address_cost (rtx x
, machine_mode mode
,
35071 addr_space_t as
, bool speed
)
35073 int ret
= TARGET_ADDRESS_COST (x
, mode
, as
, speed
);
35075 fprintf (stderr
, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
35076 ret
, speed
? "true" : "false");
35083 /* A C expression returning the cost of moving data from a register of class
35084 CLASS1 to one of CLASS2. */
35087 rs6000_register_move_cost (machine_mode mode
,
35088 reg_class_t from
, reg_class_t to
)
35092 if (TARGET_DEBUG_COST
)
35095 /* Moves from/to GENERAL_REGS. */
35096 if (reg_classes_intersect_p (to
, GENERAL_REGS
)
35097 || reg_classes_intersect_p (from
, GENERAL_REGS
))
35099 reg_class_t rclass
= from
;
35101 if (! reg_classes_intersect_p (to
, GENERAL_REGS
))
35104 if (rclass
== FLOAT_REGS
|| rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
35105 ret
= (rs6000_memory_move_cost (mode
, rclass
, false)
35106 + rs6000_memory_move_cost (mode
, GENERAL_REGS
, false));
35108 /* It's more expensive to move CR_REGS than CR0_REGS because of the
35110 else if (rclass
== CR_REGS
)
35113 /* For those processors that have slow LR/CTR moves, make them more
35114 expensive than memory in order to bias spills to memory .*/
35115 else if ((rs6000_tune
== PROCESSOR_POWER6
35116 || rs6000_tune
== PROCESSOR_POWER7
35117 || rs6000_tune
== PROCESSOR_POWER8
35118 || rs6000_tune
== PROCESSOR_POWER9
)
35119 && reg_classes_intersect_p (rclass
, LINK_OR_CTR_REGS
))
35120 ret
= 6 * hard_regno_nregs (0, mode
);
35123 /* A move will cost one instruction per GPR moved. */
35124 ret
= 2 * hard_regno_nregs (0, mode
);
35127 /* If we have VSX, we can easily move between FPR or Altivec registers. */
35128 else if (VECTOR_MEM_VSX_P (mode
)
35129 && reg_classes_intersect_p (to
, VSX_REGS
)
35130 && reg_classes_intersect_p (from
, VSX_REGS
))
35131 ret
= 2 * hard_regno_nregs (FIRST_FPR_REGNO
, mode
);
35133 /* Moving between two similar registers is just one instruction. */
35134 else if (reg_classes_intersect_p (to
, from
))
35135 ret
= (FLOAT128_2REG_P (mode
)) ? 4 : 2;
35137 /* Everything else has to go through GENERAL_REGS. */
35139 ret
= (rs6000_register_move_cost (mode
, GENERAL_REGS
, to
)
35140 + rs6000_register_move_cost (mode
, from
, GENERAL_REGS
));
35142 if (TARGET_DEBUG_COST
)
35144 if (dbg_cost_ctrl
== 1)
35146 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
35147 ret
, GET_MODE_NAME (mode
), reg_class_names
[from
],
35148 reg_class_names
[to
]);
35155 /* A C expressions returning the cost of moving data of MODE from a register to
35159 rs6000_memory_move_cost (machine_mode mode
, reg_class_t rclass
,
35160 bool in ATTRIBUTE_UNUSED
)
35164 if (TARGET_DEBUG_COST
)
35167 if (reg_classes_intersect_p (rclass
, GENERAL_REGS
))
35168 ret
= 4 * hard_regno_nregs (0, mode
);
35169 else if ((reg_classes_intersect_p (rclass
, FLOAT_REGS
)
35170 || reg_classes_intersect_p (rclass
, VSX_REGS
)))
35171 ret
= 4 * hard_regno_nregs (32, mode
);
35172 else if (reg_classes_intersect_p (rclass
, ALTIVEC_REGS
))
35173 ret
= 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO
, mode
);
35175 ret
= 4 + rs6000_register_move_cost (mode
, rclass
, GENERAL_REGS
);
35177 if (TARGET_DEBUG_COST
)
35179 if (dbg_cost_ctrl
== 1)
35181 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
35182 ret
, GET_MODE_NAME (mode
), reg_class_names
[rclass
], in
);
35189 /* Returns a code for a target-specific builtin that implements
35190 reciprocal of the function, or NULL_TREE if not available. */
35193 rs6000_builtin_reciprocal (tree fndecl
)
35195 switch (DECL_FUNCTION_CODE (fndecl
))
35197 case VSX_BUILTIN_XVSQRTDP
:
35198 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode
))
35201 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_2DF
];
35203 case VSX_BUILTIN_XVSQRTSP
:
35204 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode
))
35207 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_4SF
];
35214 /* Load up a constant. If the mode is a vector mode, splat the value across
35215 all of the vector elements. */
35218 rs6000_load_constant_and_splat (machine_mode mode
, REAL_VALUE_TYPE dconst
)
35222 if (mode
== SFmode
|| mode
== DFmode
)
35224 rtx d
= const_double_from_real_value (dconst
, mode
);
35225 reg
= force_reg (mode
, d
);
35227 else if (mode
== V4SFmode
)
35229 rtx d
= const_double_from_real_value (dconst
, SFmode
);
35230 rtvec v
= gen_rtvec (4, d
, d
, d
, d
);
35231 reg
= gen_reg_rtx (mode
);
35232 rs6000_expand_vector_init (reg
, gen_rtx_PARALLEL (mode
, v
));
35234 else if (mode
== V2DFmode
)
35236 rtx d
= const_double_from_real_value (dconst
, DFmode
);
35237 rtvec v
= gen_rtvec (2, d
, d
);
35238 reg
= gen_reg_rtx (mode
);
35239 rs6000_expand_vector_init (reg
, gen_rtx_PARALLEL (mode
, v
));
35242 gcc_unreachable ();
35247 /* Generate an FMA instruction. */
35250 rs6000_emit_madd (rtx target
, rtx m1
, rtx m2
, rtx a
)
35252 machine_mode mode
= GET_MODE (target
);
35255 dst
= expand_ternary_op (mode
, fma_optab
, m1
, m2
, a
, target
, 0);
35256 gcc_assert (dst
!= NULL
);
35259 emit_move_insn (target
, dst
);
35262 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
35265 rs6000_emit_nmsub (rtx dst
, rtx m1
, rtx m2
, rtx a
)
35267 machine_mode mode
= GET_MODE (dst
);
35270 /* This is a tad more complicated, since the fnma_optab is for
35271 a different expression: fma(-m1, m2, a), which is the same
35272 thing except in the case of signed zeros.
35274 Fortunately we know that if FMA is supported that FNMSUB is
35275 also supported in the ISA. Just expand it directly. */
35277 gcc_assert (optab_handler (fma_optab
, mode
) != CODE_FOR_nothing
);
35279 r
= gen_rtx_NEG (mode
, a
);
35280 r
= gen_rtx_FMA (mode
, m1
, m2
, r
);
35281 r
= gen_rtx_NEG (mode
, r
);
35282 emit_insn (gen_rtx_SET (dst
, r
));
35285 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
35286 add a reg_note saying that this was a division. Support both scalar and
35287 vector divide. Assumes no trapping math and finite arguments. */
35290 rs6000_emit_swdiv (rtx dst
, rtx n
, rtx d
, bool note_p
)
35292 machine_mode mode
= GET_MODE (dst
);
35293 rtx one
, x0
, e0
, x1
, xprev
, eprev
, xnext
, enext
, u
, v
;
35296 /* Low precision estimates guarantee 5 bits of accuracy. High
35297 precision estimates guarantee 14 bits of accuracy. SFmode
35298 requires 23 bits of accuracy. DFmode requires 52 bits of
35299 accuracy. Each pass at least doubles the accuracy, leading
35300 to the following. */
35301 int passes
= (TARGET_RECIP_PRECISION
) ? 1 : 3;
35302 if (mode
== DFmode
|| mode
== V2DFmode
)
35305 enum insn_code code
= optab_handler (smul_optab
, mode
);
35306 insn_gen_fn gen_mul
= GEN_FCN (code
);
35308 gcc_assert (code
!= CODE_FOR_nothing
);
35310 one
= rs6000_load_constant_and_splat (mode
, dconst1
);
35312 /* x0 = 1./d estimate */
35313 x0
= gen_reg_rtx (mode
);
35314 emit_insn (gen_rtx_SET (x0
, gen_rtx_UNSPEC (mode
, gen_rtvec (1, d
),
35317 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
35320 /* e0 = 1. - d * x0 */
35321 e0
= gen_reg_rtx (mode
);
35322 rs6000_emit_nmsub (e0
, d
, x0
, one
);
35324 /* x1 = x0 + e0 * x0 */
35325 x1
= gen_reg_rtx (mode
);
35326 rs6000_emit_madd (x1
, e0
, x0
, x0
);
35328 for (i
= 0, xprev
= x1
, eprev
= e0
; i
< passes
- 2;
35329 ++i
, xprev
= xnext
, eprev
= enext
) {
35331 /* enext = eprev * eprev */
35332 enext
= gen_reg_rtx (mode
);
35333 emit_insn (gen_mul (enext
, eprev
, eprev
));
35335 /* xnext = xprev + enext * xprev */
35336 xnext
= gen_reg_rtx (mode
);
35337 rs6000_emit_madd (xnext
, enext
, xprev
, xprev
);
35343 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
35345 /* u = n * xprev */
35346 u
= gen_reg_rtx (mode
);
35347 emit_insn (gen_mul (u
, n
, xprev
));
35349 /* v = n - (d * u) */
35350 v
= gen_reg_rtx (mode
);
35351 rs6000_emit_nmsub (v
, d
, u
, n
);
35353 /* dst = (v * xprev) + u */
35354 rs6000_emit_madd (dst
, v
, xprev
, u
);
35357 add_reg_note (get_last_insn (), REG_EQUAL
, gen_rtx_DIV (mode
, n
, d
));
35360 /* Goldschmidt's Algorithm for single/double-precision floating point
35361 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
35364 rs6000_emit_swsqrt (rtx dst
, rtx src
, bool recip
)
35366 machine_mode mode
= GET_MODE (src
);
35367 rtx e
= gen_reg_rtx (mode
);
35368 rtx g
= gen_reg_rtx (mode
);
35369 rtx h
= gen_reg_rtx (mode
);
35371 /* Low precision estimates guarantee 5 bits of accuracy. High
35372 precision estimates guarantee 14 bits of accuracy. SFmode
35373 requires 23 bits of accuracy. DFmode requires 52 bits of
35374 accuracy. Each pass at least doubles the accuracy, leading
35375 to the following. */
35376 int passes
= (TARGET_RECIP_PRECISION
) ? 1 : 3;
35377 if (mode
== DFmode
|| mode
== V2DFmode
)
35382 enum insn_code code
= optab_handler (smul_optab
, mode
);
35383 insn_gen_fn gen_mul
= GEN_FCN (code
);
35385 gcc_assert (code
!= CODE_FOR_nothing
);
35387 mhalf
= rs6000_load_constant_and_splat (mode
, dconsthalf
);
35389 /* e = rsqrt estimate */
35390 emit_insn (gen_rtx_SET (e
, gen_rtx_UNSPEC (mode
, gen_rtvec (1, src
),
35393 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
35396 rtx zero
= force_reg (mode
, CONST0_RTX (mode
));
35398 if (mode
== SFmode
)
35400 rtx target
= emit_conditional_move (e
, GT
, src
, zero
, mode
,
35403 emit_move_insn (e
, target
);
35407 rtx cond
= gen_rtx_GT (VOIDmode
, e
, zero
);
35408 rs6000_emit_vector_cond_expr (e
, e
, zero
, cond
, src
, zero
);
35412 /* g = sqrt estimate. */
35413 emit_insn (gen_mul (g
, e
, src
));
35414 /* h = 1/(2*sqrt) estimate. */
35415 emit_insn (gen_mul (h
, e
, mhalf
));
35421 rtx t
= gen_reg_rtx (mode
);
35422 rs6000_emit_nmsub (t
, g
, h
, mhalf
);
35423 /* Apply correction directly to 1/rsqrt estimate. */
35424 rs6000_emit_madd (dst
, e
, t
, e
);
35428 for (i
= 0; i
< passes
; i
++)
35430 rtx t1
= gen_reg_rtx (mode
);
35431 rtx g1
= gen_reg_rtx (mode
);
35432 rtx h1
= gen_reg_rtx (mode
);
35434 rs6000_emit_nmsub (t1
, g
, h
, mhalf
);
35435 rs6000_emit_madd (g1
, g
, t1
, g
);
35436 rs6000_emit_madd (h1
, h
, t1
, h
);
35441 /* Multiply by 2 for 1/rsqrt. */
35442 emit_insn (gen_add3_insn (dst
, h
, h
));
35447 rtx t
= gen_reg_rtx (mode
);
35448 rs6000_emit_nmsub (t
, g
, h
, mhalf
);
35449 rs6000_emit_madd (dst
, g
, t
, g
);
35455 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
35456 (Power7) targets. DST is the target, and SRC is the argument operand. */
35459 rs6000_emit_popcount (rtx dst
, rtx src
)
35461 machine_mode mode
= GET_MODE (dst
);
35464 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
35465 if (TARGET_POPCNTD
)
35467 if (mode
== SImode
)
35468 emit_insn (gen_popcntdsi2 (dst
, src
));
35470 emit_insn (gen_popcntddi2 (dst
, src
));
35474 tmp1
= gen_reg_rtx (mode
);
35476 if (mode
== SImode
)
35478 emit_insn (gen_popcntbsi2 (tmp1
, src
));
35479 tmp2
= expand_mult (SImode
, tmp1
, GEN_INT (0x01010101),
35481 tmp2
= force_reg (SImode
, tmp2
);
35482 emit_insn (gen_lshrsi3 (dst
, tmp2
, GEN_INT (24)));
35486 emit_insn (gen_popcntbdi2 (tmp1
, src
));
35487 tmp2
= expand_mult (DImode
, tmp1
,
35488 GEN_INT ((HOST_WIDE_INT
)
35489 0x01010101 << 32 | 0x01010101),
35491 tmp2
= force_reg (DImode
, tmp2
);
35492 emit_insn (gen_lshrdi3 (dst
, tmp2
, GEN_INT (56)));
35497 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
35498 target, and SRC is the argument operand. */
35501 rs6000_emit_parity (rtx dst
, rtx src
)
35503 machine_mode mode
= GET_MODE (dst
);
35506 tmp
= gen_reg_rtx (mode
);
35508 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
35511 if (mode
== SImode
)
35513 emit_insn (gen_popcntbsi2 (tmp
, src
));
35514 emit_insn (gen_paritysi2_cmpb (dst
, tmp
));
35518 emit_insn (gen_popcntbdi2 (tmp
, src
));
35519 emit_insn (gen_paritydi2_cmpb (dst
, tmp
));
35524 if (mode
== SImode
)
35526 /* Is mult+shift >= shift+xor+shift+xor? */
35527 if (rs6000_cost
->mulsi_const
>= COSTS_N_INSNS (3))
35529 rtx tmp1
, tmp2
, tmp3
, tmp4
;
35531 tmp1
= gen_reg_rtx (SImode
);
35532 emit_insn (gen_popcntbsi2 (tmp1
, src
));
35534 tmp2
= gen_reg_rtx (SImode
);
35535 emit_insn (gen_lshrsi3 (tmp2
, tmp1
, GEN_INT (16)));
35536 tmp3
= gen_reg_rtx (SImode
);
35537 emit_insn (gen_xorsi3 (tmp3
, tmp1
, tmp2
));
35539 tmp4
= gen_reg_rtx (SImode
);
35540 emit_insn (gen_lshrsi3 (tmp4
, tmp3
, GEN_INT (8)));
35541 emit_insn (gen_xorsi3 (tmp
, tmp3
, tmp4
));
35544 rs6000_emit_popcount (tmp
, src
);
35545 emit_insn (gen_andsi3 (dst
, tmp
, const1_rtx
));
35549 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
35550 if (rs6000_cost
->muldi
>= COSTS_N_INSNS (5))
35552 rtx tmp1
, tmp2
, tmp3
, tmp4
, tmp5
, tmp6
;
35554 tmp1
= gen_reg_rtx (DImode
);
35555 emit_insn (gen_popcntbdi2 (tmp1
, src
));
35557 tmp2
= gen_reg_rtx (DImode
);
35558 emit_insn (gen_lshrdi3 (tmp2
, tmp1
, GEN_INT (32)));
35559 tmp3
= gen_reg_rtx (DImode
);
35560 emit_insn (gen_xordi3 (tmp3
, tmp1
, tmp2
));
35562 tmp4
= gen_reg_rtx (DImode
);
35563 emit_insn (gen_lshrdi3 (tmp4
, tmp3
, GEN_INT (16)));
35564 tmp5
= gen_reg_rtx (DImode
);
35565 emit_insn (gen_xordi3 (tmp5
, tmp3
, tmp4
));
35567 tmp6
= gen_reg_rtx (DImode
);
35568 emit_insn (gen_lshrdi3 (tmp6
, tmp5
, GEN_INT (8)));
35569 emit_insn (gen_xordi3 (tmp
, tmp5
, tmp6
));
35572 rs6000_emit_popcount (tmp
, src
);
35573 emit_insn (gen_anddi3 (dst
, tmp
, const1_rtx
));
35577 /* Expand an Altivec constant permutation for little endian mode.
35578 OP0 and OP1 are the input vectors and TARGET is the output vector.
35579 SEL specifies the constant permutation vector.
35581 There are two issues: First, the two input operands must be
35582 swapped so that together they form a double-wide array in LE
35583 order. Second, the vperm instruction has surprising behavior
35584 in LE mode: it interprets the elements of the source vectors
35585 in BE mode ("left to right") and interprets the elements of
35586 the destination vector in LE mode ("right to left"). To
35587 correct for this, we must subtract each element of the permute
35588 control vector from 31.
35590 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
35591 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
35592 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
35593 serve as the permute control vector. Then, in BE mode,
35597 places the desired result in vr9. However, in LE mode the
35598 vector contents will be
35600 vr10 = 00000003 00000002 00000001 00000000
35601 vr11 = 00000007 00000006 00000005 00000004
35603 The result of the vperm using the same permute control vector is
35605 vr9 = 05000000 07000000 01000000 03000000
35607 That is, the leftmost 4 bytes of vr10 are interpreted as the
35608 source for the rightmost 4 bytes of vr9, and so on.
35610 If we change the permute control vector to
35612 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35620 vr9 = 00000006 00000004 00000002 00000000. */
35623 altivec_expand_vec_perm_const_le (rtx target
, rtx op0
, rtx op1
,
35624 const vec_perm_indices
&sel
)
35628 rtx constv
, unspec
;
35630 /* Unpack and adjust the constant selector. */
35631 for (i
= 0; i
< 16; ++i
)
35633 unsigned int elt
= 31 - (sel
[i
] & 31);
35634 perm
[i
] = GEN_INT (elt
);
35637 /* Expand to a permute, swapping the inputs and using the
35638 adjusted selector. */
35640 op0
= force_reg (V16QImode
, op0
);
35642 op1
= force_reg (V16QImode
, op1
);
35644 constv
= gen_rtx_CONST_VECTOR (V16QImode
, gen_rtvec_v (16, perm
));
35645 constv
= force_reg (V16QImode
, constv
);
35646 unspec
= gen_rtx_UNSPEC (V16QImode
, gen_rtvec (3, op1
, op0
, constv
),
35648 if (!REG_P (target
))
35650 rtx tmp
= gen_reg_rtx (V16QImode
);
35651 emit_move_insn (tmp
, unspec
);
35655 emit_move_insn (target
, unspec
);
35658 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35659 permute control vector. But here it's not a constant, so we must
35660 generate a vector NAND or NOR to do the adjustment. */
35663 altivec_expand_vec_perm_le (rtx operands
[4])
35665 rtx notx
, iorx
, unspec
;
35666 rtx target
= operands
[0];
35667 rtx op0
= operands
[1];
35668 rtx op1
= operands
[2];
35669 rtx sel
= operands
[3];
35671 rtx norreg
= gen_reg_rtx (V16QImode
);
35672 machine_mode mode
= GET_MODE (target
);
35674 /* Get everything in regs so the pattern matches. */
35676 op0
= force_reg (mode
, op0
);
35678 op1
= force_reg (mode
, op1
);
35680 sel
= force_reg (V16QImode
, sel
);
35681 if (!REG_P (target
))
35682 tmp
= gen_reg_rtx (mode
);
35684 if (TARGET_P9_VECTOR
)
35686 unspec
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, op1
, op0
, sel
),
35691 /* Invert the selector with a VNAND if available, else a VNOR.
35692 The VNAND is preferred for future fusion opportunities. */
35693 notx
= gen_rtx_NOT (V16QImode
, sel
);
35694 iorx
= (TARGET_P8_VECTOR
35695 ? gen_rtx_IOR (V16QImode
, notx
, notx
)
35696 : gen_rtx_AND (V16QImode
, notx
, notx
));
35697 emit_insn (gen_rtx_SET (norreg
, iorx
));
35699 /* Permute with operands reversed and adjusted selector. */
35700 unspec
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, op1
, op0
, norreg
),
35704 /* Copy into target, possibly by way of a register. */
35705 if (!REG_P (target
))
35707 emit_move_insn (tmp
, unspec
);
35711 emit_move_insn (target
, unspec
);
35714 /* Expand an Altivec constant permutation. Return true if we match
35715 an efficient implementation; false to fall back to VPERM.
35717 OP0 and OP1 are the input vectors and TARGET is the output vector.
35718 SEL specifies the constant permutation vector. */
35721 altivec_expand_vec_perm_const (rtx target
, rtx op0
, rtx op1
,
35722 const vec_perm_indices
&sel
)
35724 struct altivec_perm_insn
{
35725 HOST_WIDE_INT mask
;
35726 enum insn_code impl
;
35727 unsigned char perm
[16];
35729 static const struct altivec_perm_insn patterns
[] = {
35730 { OPTION_MASK_ALTIVEC
, CODE_FOR_altivec_vpkuhum_direct
,
35731 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35732 { OPTION_MASK_ALTIVEC
, CODE_FOR_altivec_vpkuwum_direct
,
35733 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35734 { OPTION_MASK_ALTIVEC
,
35735 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrghb_direct
35736 : CODE_FOR_altivec_vmrglb_direct
),
35737 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35738 { OPTION_MASK_ALTIVEC
,
35739 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrghh_direct
35740 : CODE_FOR_altivec_vmrglh_direct
),
35741 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35742 { OPTION_MASK_ALTIVEC
,
35743 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrghw_direct
35744 : CODE_FOR_altivec_vmrglw_direct
),
35745 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35746 { OPTION_MASK_ALTIVEC
,
35747 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrglb_direct
35748 : CODE_FOR_altivec_vmrghb_direct
),
35749 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35750 { OPTION_MASK_ALTIVEC
,
35751 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrglh_direct
35752 : CODE_FOR_altivec_vmrghh_direct
),
35753 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35754 { OPTION_MASK_ALTIVEC
,
35755 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrglw_direct
35756 : CODE_FOR_altivec_vmrghw_direct
),
35757 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35758 { OPTION_MASK_P8_VECTOR
,
35759 (BYTES_BIG_ENDIAN
? CODE_FOR_p8_vmrgew_v4sf_direct
35760 : CODE_FOR_p8_vmrgow_v4sf_direct
),
35761 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35762 { OPTION_MASK_P8_VECTOR
,
35763 (BYTES_BIG_ENDIAN
? CODE_FOR_p8_vmrgow_v4sf_direct
35764 : CODE_FOR_p8_vmrgew_v4sf_direct
),
35765 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35768 unsigned int i
, j
, elt
, which
;
35769 unsigned char perm
[16];
35773 /* Unpack the constant selector. */
35774 for (i
= which
= 0; i
< 16; ++i
)
35777 which
|= (elt
< 16 ? 1 : 2);
35781 /* Simplify the constant selector based on operands. */
35785 gcc_unreachable ();
35789 if (!rtx_equal_p (op0
, op1
))
35794 for (i
= 0; i
< 16; ++i
)
35806 /* Look for splat patterns. */
35811 for (i
= 0; i
< 16; ++i
)
35812 if (perm
[i
] != elt
)
35816 if (!BYTES_BIG_ENDIAN
)
35818 emit_insn (gen_altivec_vspltb_direct (target
, op0
, GEN_INT (elt
)));
35824 for (i
= 0; i
< 16; i
+= 2)
35825 if (perm
[i
] != elt
|| perm
[i
+ 1] != elt
+ 1)
35829 int field
= BYTES_BIG_ENDIAN
? elt
/ 2 : 7 - elt
/ 2;
35830 x
= gen_reg_rtx (V8HImode
);
35831 emit_insn (gen_altivec_vsplth_direct (x
, gen_lowpart (V8HImode
, op0
),
35833 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
35840 for (i
= 0; i
< 16; i
+= 4)
35842 || perm
[i
+ 1] != elt
+ 1
35843 || perm
[i
+ 2] != elt
+ 2
35844 || perm
[i
+ 3] != elt
+ 3)
35848 int field
= BYTES_BIG_ENDIAN
? elt
/ 4 : 3 - elt
/ 4;
35849 x
= gen_reg_rtx (V4SImode
);
35850 emit_insn (gen_altivec_vspltw_direct (x
, gen_lowpart (V4SImode
, op0
),
35852 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
35858 /* Look for merge and pack patterns. */
35859 for (j
= 0; j
< ARRAY_SIZE (patterns
); ++j
)
35863 if ((patterns
[j
].mask
& rs6000_isa_flags
) == 0)
35866 elt
= patterns
[j
].perm
[0];
35867 if (perm
[0] == elt
)
35869 else if (perm
[0] == elt
+ 16)
35873 for (i
= 1; i
< 16; ++i
)
35875 elt
= patterns
[j
].perm
[i
];
35877 elt
= (elt
>= 16 ? elt
- 16 : elt
+ 16);
35878 else if (one_vec
&& elt
>= 16)
35880 if (perm
[i
] != elt
)
35885 enum insn_code icode
= patterns
[j
].impl
;
35886 machine_mode omode
= insn_data
[icode
].operand
[0].mode
;
35887 machine_mode imode
= insn_data
[icode
].operand
[1].mode
;
35889 /* For little-endian, don't use vpkuwum and vpkuhum if the
35890 underlying vector type is not V4SI and V8HI, respectively.
35891 For example, using vpkuwum with a V8HI picks up the even
35892 halfwords (BE numbering) when the even halfwords (LE
35893 numbering) are what we need. */
35894 if (!BYTES_BIG_ENDIAN
35895 && icode
== CODE_FOR_altivec_vpkuwum_direct
35896 && ((GET_CODE (op0
) == REG
35897 && GET_MODE (op0
) != V4SImode
)
35898 || (GET_CODE (op0
) == SUBREG
35899 && GET_MODE (XEXP (op0
, 0)) != V4SImode
)))
35901 if (!BYTES_BIG_ENDIAN
35902 && icode
== CODE_FOR_altivec_vpkuhum_direct
35903 && ((GET_CODE (op0
) == REG
35904 && GET_MODE (op0
) != V8HImode
)
35905 || (GET_CODE (op0
) == SUBREG
35906 && GET_MODE (XEXP (op0
, 0)) != V8HImode
)))
35909 /* For little-endian, the two input operands must be swapped
35910 (or swapped back) to ensure proper right-to-left numbering
35912 if (swapped
^ !BYTES_BIG_ENDIAN
)
35913 std::swap (op0
, op1
);
35914 if (imode
!= V16QImode
)
35916 op0
= gen_lowpart (imode
, op0
);
35917 op1
= gen_lowpart (imode
, op1
);
35919 if (omode
== V16QImode
)
35922 x
= gen_reg_rtx (omode
);
35923 emit_insn (GEN_FCN (icode
) (x
, op0
, op1
));
35924 if (omode
!= V16QImode
)
35925 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
35930 if (!BYTES_BIG_ENDIAN
)
35932 altivec_expand_vec_perm_const_le (target
, op0
, op1
, sel
);
35939 /* Expand a Paired Single or VSX Permute Doubleword constant permutation.
35940 Return true if we match an efficient implementation. */
35943 rs6000_expand_vec_perm_const_1 (rtx target
, rtx op0
, rtx op1
,
35944 unsigned char perm0
, unsigned char perm1
)
35948 /* If both selectors come from the same operand, fold to single op. */
35949 if ((perm0
& 2) == (perm1
& 2))
35956 /* If both operands are equal, fold to simpler permutation. */
35957 if (rtx_equal_p (op0
, op1
))
35960 perm1
= (perm1
& 1) + 2;
35962 /* If the first selector comes from the second operand, swap. */
35963 else if (perm0
& 2)
35969 std::swap (op0
, op1
);
35971 /* If the second selector does not come from the second operand, fail. */
35972 else if ((perm1
& 2) == 0)
35976 if (target
!= NULL
)
35978 machine_mode vmode
, dmode
;
35981 vmode
= GET_MODE (target
);
35982 gcc_assert (GET_MODE_NUNITS (vmode
) == 2);
35983 dmode
= mode_for_vector (GET_MODE_INNER (vmode
), 4).require ();
35984 x
= gen_rtx_VEC_CONCAT (dmode
, op0
, op1
);
35985 v
= gen_rtvec (2, GEN_INT (perm0
), GEN_INT (perm1
));
35986 x
= gen_rtx_VEC_SELECT (vmode
, x
, gen_rtx_PARALLEL (VOIDmode
, v
));
35987 emit_insn (gen_rtx_SET (target
, x
));
35992 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */
35995 rs6000_vectorize_vec_perm_const (machine_mode vmode
, rtx target
, rtx op0
,
35996 rtx op1
, const vec_perm_indices
&sel
)
35998 bool testing_p
= !target
;
36000 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
36001 if (TARGET_ALTIVEC
&& testing_p
)
36004 /* Check for ps_merge* or xxpermdi insns. */
36005 if ((vmode
== V2SFmode
&& TARGET_PAIRED_FLOAT
)
36006 || ((vmode
== V2DFmode
|| vmode
== V2DImode
)
36007 && VECTOR_MEM_VSX_P (vmode
)))
36011 op0
= gen_raw_REG (vmode
, LAST_VIRTUAL_REGISTER
+ 1);
36012 op1
= gen_raw_REG (vmode
, LAST_VIRTUAL_REGISTER
+ 2);
36014 if (rs6000_expand_vec_perm_const_1 (target
, op0
, op1
, sel
[0], sel
[1]))
36018 if (TARGET_ALTIVEC
)
36020 /* Force the target-independent code to lower to V16QImode. */
36021 if (vmode
!= V16QImode
)
36023 if (altivec_expand_vec_perm_const (target
, op0
, op1
, sel
))
36030 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave.
36031 OP0 and OP1 are the input vectors and TARGET is the output vector.
36032 PERM specifies the constant permutation vector. */
36035 rs6000_do_expand_vec_perm (rtx target
, rtx op0
, rtx op1
,
36036 machine_mode vmode
, const vec_perm_builder
&perm
)
36038 rtx x
= expand_vec_perm_const (vmode
, op0
, op1
, perm
, BLKmode
, target
);
36040 emit_move_insn (target
, x
);
36043 /* Expand an extract even operation. */
36046 rs6000_expand_extract_even (rtx target
, rtx op0
, rtx op1
)
36048 machine_mode vmode
= GET_MODE (target
);
36049 unsigned i
, nelt
= GET_MODE_NUNITS (vmode
);
36050 vec_perm_builder
perm (nelt
, nelt
, 1);
36052 for (i
= 0; i
< nelt
; i
++)
36053 perm
.quick_push (i
* 2);
36055 rs6000_do_expand_vec_perm (target
, op0
, op1
, vmode
, perm
);
36058 /* Expand a vector interleave operation. */
36061 rs6000_expand_interleave (rtx target
, rtx op0
, rtx op1
, bool highp
)
36063 machine_mode vmode
= GET_MODE (target
);
36064 unsigned i
, high
, nelt
= GET_MODE_NUNITS (vmode
);
36065 vec_perm_builder
perm (nelt
, nelt
, 1);
36067 high
= (highp
? 0 : nelt
/ 2);
36068 for (i
= 0; i
< nelt
/ 2; i
++)
36070 perm
.quick_push (i
+ high
);
36071 perm
.quick_push (i
+ nelt
+ high
);
36074 rs6000_do_expand_vec_perm (target
, op0
, op1
, vmode
, perm
);
36077 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
36079 rs6000_scale_v2df (rtx tgt
, rtx src
, int scale
)
36081 HOST_WIDE_INT
hwi_scale (scale
);
36082 REAL_VALUE_TYPE r_pow
;
36083 rtvec v
= rtvec_alloc (2);
36085 rtx scale_vec
= gen_reg_rtx (V2DFmode
);
36086 (void)real_powi (&r_pow
, DFmode
, &dconst2
, hwi_scale
);
36087 elt
= const_double_from_real_value (r_pow
, DFmode
);
36088 RTVEC_ELT (v
, 0) = elt
;
36089 RTVEC_ELT (v
, 1) = elt
;
36090 rs6000_expand_vector_init (scale_vec
, gen_rtx_PARALLEL (V2DFmode
, v
));
36091 emit_insn (gen_mulv2df3 (tgt
, src
, scale_vec
));
36094 /* Return an RTX representing where to find the function value of a
36095 function returning MODE. */
36097 rs6000_complex_function_value (machine_mode mode
)
36099 unsigned int regno
;
36101 machine_mode inner
= GET_MODE_INNER (mode
);
36102 unsigned int inner_bytes
= GET_MODE_UNIT_SIZE (mode
);
36104 if (TARGET_FLOAT128_TYPE
36106 || (mode
== TCmode
&& TARGET_IEEEQUAD
)))
36107 regno
= ALTIVEC_ARG_RETURN
;
36109 else if (FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
)
36110 regno
= FP_ARG_RETURN
;
36114 regno
= GP_ARG_RETURN
;
36116 /* 32-bit is OK since it'll go in r3/r4. */
36117 if (TARGET_32BIT
&& inner_bytes
>= 4)
36118 return gen_rtx_REG (mode
, regno
);
36121 if (inner_bytes
>= 8)
36122 return gen_rtx_REG (mode
, regno
);
36124 r1
= gen_rtx_EXPR_LIST (inner
, gen_rtx_REG (inner
, regno
),
36126 r2
= gen_rtx_EXPR_LIST (inner
, gen_rtx_REG (inner
, regno
+ 1),
36127 GEN_INT (inner_bytes
));
36128 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, r1
, r2
));
36131 /* Return an rtx describing a return value of MODE as a PARALLEL
36132 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
36133 stride REG_STRIDE. */
36136 rs6000_parallel_return (machine_mode mode
,
36137 int n_elts
, machine_mode elt_mode
,
36138 unsigned int regno
, unsigned int reg_stride
)
36140 rtx par
= gen_rtx_PARALLEL (mode
, rtvec_alloc (n_elts
));
36143 for (i
= 0; i
< n_elts
; i
++)
36145 rtx r
= gen_rtx_REG (elt_mode
, regno
);
36146 rtx off
= GEN_INT (i
* GET_MODE_SIZE (elt_mode
));
36147 XVECEXP (par
, 0, i
) = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
36148 regno
+= reg_stride
;
36154 /* Target hook for TARGET_FUNCTION_VALUE.
36156 An integer value is in r3 and a floating-point value is in fp1,
36157 unless -msoft-float. */
36160 rs6000_function_value (const_tree valtype
,
36161 const_tree fn_decl_or_type ATTRIBUTE_UNUSED
,
36162 bool outgoing ATTRIBUTE_UNUSED
)
36165 unsigned int regno
;
36166 machine_mode elt_mode
;
36169 /* Special handling for structs in darwin64. */
36171 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype
), valtype
))
36173 CUMULATIVE_ARGS valcum
;
36177 valcum
.fregno
= FP_ARG_MIN_REG
;
36178 valcum
.vregno
= ALTIVEC_ARG_MIN_REG
;
36179 /* Do a trial code generation as if this were going to be passed as
36180 an argument; if any part goes in memory, we return NULL. */
36181 valret
= rs6000_darwin64_record_arg (&valcum
, valtype
, true, /* retval= */ true);
36184 /* Otherwise fall through to standard ABI rules. */
36187 mode
= TYPE_MODE (valtype
);
36189 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
36190 if (rs6000_discover_homogeneous_aggregate (mode
, valtype
, &elt_mode
, &n_elts
))
36192 int first_reg
, n_regs
;
36194 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode
))
36196 /* _Decimal128 must use even/odd register pairs. */
36197 first_reg
= (elt_mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
36198 n_regs
= (GET_MODE_SIZE (elt_mode
) + 7) >> 3;
36202 first_reg
= ALTIVEC_ARG_RETURN
;
36206 return rs6000_parallel_return (mode
, n_elts
, elt_mode
, first_reg
, n_regs
);
36209 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
36210 if (TARGET_32BIT
&& TARGET_POWERPC64
)
36219 int count
= GET_MODE_SIZE (mode
) / 4;
36220 return rs6000_parallel_return (mode
, count
, SImode
, GP_ARG_RETURN
, 1);
36223 if ((INTEGRAL_TYPE_P (valtype
)
36224 && GET_MODE_BITSIZE (mode
) < (TARGET_32BIT
? 32 : 64))
36225 || POINTER_TYPE_P (valtype
))
36226 mode
= TARGET_32BIT
? SImode
: DImode
;
36228 if (DECIMAL_FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
)
36229 /* _Decimal128 must use an even/odd register pair. */
36230 regno
= (mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
36231 else if (SCALAR_FLOAT_TYPE_P (valtype
) && TARGET_HARD_FLOAT
36232 && !FLOAT128_VECTOR_P (mode
)
36233 && ((TARGET_SINGLE_FLOAT
&& (mode
== SFmode
)) || TARGET_DOUBLE_FLOAT
))
36234 regno
= FP_ARG_RETURN
;
36235 else if (TREE_CODE (valtype
) == COMPLEX_TYPE
36236 && targetm
.calls
.split_complex_arg
)
36237 return rs6000_complex_function_value (mode
);
36238 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
36239 return register is used in both cases, and we won't see V2DImode/V2DFmode
36240 for pure altivec, combine the two cases. */
36241 else if ((TREE_CODE (valtype
) == VECTOR_TYPE
|| FLOAT128_VECTOR_P (mode
))
36242 && TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
36243 && ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
36244 regno
= ALTIVEC_ARG_RETURN
;
36246 regno
= GP_ARG_RETURN
;
36248 return gen_rtx_REG (mode
, regno
);
36251 /* Define how to find the value returned by a library function
36252 assuming the value has mode MODE. */
36254 rs6000_libcall_value (machine_mode mode
)
36256 unsigned int regno
;
36258 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
36259 if (TARGET_32BIT
&& TARGET_POWERPC64
&& mode
== DImode
)
36260 return rs6000_parallel_return (mode
, 2, SImode
, GP_ARG_RETURN
, 1);
36262 if (DECIMAL_FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
)
36263 /* _Decimal128 must use an even/odd register pair. */
36264 regno
= (mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
36265 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode
)
36266 && TARGET_HARD_FLOAT
36267 && ((TARGET_SINGLE_FLOAT
&& mode
== SFmode
) || TARGET_DOUBLE_FLOAT
))
36268 regno
= FP_ARG_RETURN
;
36269 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
36270 return register is used in both cases, and we won't see V2DImode/V2DFmode
36271 for pure altivec, combine the two cases. */
36272 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
36273 && TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
)
36274 regno
= ALTIVEC_ARG_RETURN
;
36275 else if (COMPLEX_MODE_P (mode
) && targetm
.calls
.split_complex_arg
)
36276 return rs6000_complex_function_value (mode
);
36278 regno
= GP_ARG_RETURN
;
36280 return gen_rtx_REG (mode
, regno
);
36283 /* Compute register pressure classes. We implement the target hook to avoid
36284 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
36285 lead to incorrect estimates of number of available registers and therefor
36286 increased register pressure/spill. */
36288 rs6000_compute_pressure_classes (enum reg_class
*pressure_classes
)
36293 pressure_classes
[n
++] = GENERAL_REGS
;
36295 pressure_classes
[n
++] = VSX_REGS
;
36298 if (TARGET_ALTIVEC
)
36299 pressure_classes
[n
++] = ALTIVEC_REGS
;
36300 if (TARGET_HARD_FLOAT
)
36301 pressure_classes
[n
++] = FLOAT_REGS
;
36303 pressure_classes
[n
++] = CR_REGS
;
36304 pressure_classes
[n
++] = SPECIAL_REGS
;
36309 /* Given FROM and TO register numbers, say whether this elimination is allowed.
36310 Frame pointer elimination is automatically handled.
36312 For the RS/6000, if frame pointer elimination is being done, we would like
36313 to convert ap into fp, not sp.
36315 We need r30 if -mminimal-toc was specified, and there are constant pool
36319 rs6000_can_eliminate (const int from
, const int to
)
36321 return (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
36322 ? ! frame_pointer_needed
36323 : from
== RS6000_PIC_OFFSET_TABLE_REGNUM
36324 ? ! TARGET_MINIMAL_TOC
|| TARGET_NO_TOC
36325 || constant_pool_empty_p ()
36329 /* Define the offset between two registers, FROM to be eliminated and its
36330 replacement TO, at the start of a routine. */
36332 rs6000_initial_elimination_offset (int from
, int to
)
36334 rs6000_stack_t
*info
= rs6000_stack_info ();
36335 HOST_WIDE_INT offset
;
36337 if (from
== HARD_FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
36338 offset
= info
->push_p
? 0 : -info
->total_size
;
36339 else if (from
== FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
36341 offset
= info
->push_p
? 0 : -info
->total_size
;
36342 if (FRAME_GROWS_DOWNWARD
)
36343 offset
+= info
->fixed_size
+ info
->vars_size
+ info
->parm_size
;
36345 else if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
36346 offset
= FRAME_GROWS_DOWNWARD
36347 ? info
->fixed_size
+ info
->vars_size
+ info
->parm_size
36349 else if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
36350 offset
= info
->total_size
;
36351 else if (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
36352 offset
= info
->push_p
? info
->total_size
: 0;
36353 else if (from
== RS6000_PIC_OFFSET_TABLE_REGNUM
)
36356 gcc_unreachable ();
36361 /* Fill in sizes of registers used by unwinder. */
36364 rs6000_init_dwarf_reg_sizes_extra (tree address
)
36366 if (TARGET_MACHO
&& ! TARGET_ALTIVEC
)
36369 machine_mode mode
= TYPE_MODE (char_type_node
);
36370 rtx addr
= expand_expr (address
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
36371 rtx mem
= gen_rtx_MEM (BLKmode
, addr
);
36372 rtx value
= gen_int_mode (16, mode
);
36374 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
36375 The unwinder still needs to know the size of Altivec registers. */
36377 for (i
= FIRST_ALTIVEC_REGNO
; i
< LAST_ALTIVEC_REGNO
+1; i
++)
36379 int column
= DWARF_REG_TO_UNWIND_COLUMN
36380 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i
), true));
36381 HOST_WIDE_INT offset
= column
* GET_MODE_SIZE (mode
);
36383 emit_move_insn (adjust_address (mem
, mode
, offset
), value
);
36388 /* Map internal gcc register numbers to debug format register numbers.
36389 FORMAT specifies the type of debug register number to use:
36390 0 -- debug information, except for frame-related sections
36391 1 -- DWARF .debug_frame section
36392 2 -- DWARF .eh_frame section */
36395 rs6000_dbx_register_number (unsigned int regno
, unsigned int format
)
36397 /* Except for the above, we use the internal number for non-DWARF
36398 debug information, and also for .eh_frame. */
36399 if ((format
== 0 && write_symbols
!= DWARF2_DEBUG
) || format
== 2)
36402 /* On some platforms, we use the standard DWARF register
36403 numbering for .debug_info and .debug_frame. */
36404 #ifdef RS6000_USE_DWARF_NUMBERING
36407 if (regno
== LR_REGNO
)
36409 if (regno
== CTR_REGNO
)
36411 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
36412 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
36413 The actual code emitted saves the whole of CR, so we map CR2_REGNO
36414 to the DWARF reg for CR. */
36415 if (format
== 1 && regno
== CR2_REGNO
)
36417 if (CR_REGNO_P (regno
))
36418 return regno
- CR0_REGNO
+ 86;
36419 if (regno
== CA_REGNO
)
36420 return 101; /* XER */
36421 if (ALTIVEC_REGNO_P (regno
))
36422 return regno
- FIRST_ALTIVEC_REGNO
+ 1124;
36423 if (regno
== VRSAVE_REGNO
)
36425 if (regno
== VSCR_REGNO
)
36431 /* target hook eh_return_filter_mode */
36432 static scalar_int_mode
36433 rs6000_eh_return_filter_mode (void)
36435 return TARGET_32BIT
? SImode
: word_mode
;
36438 /* Target hook for scalar_mode_supported_p. */
36440 rs6000_scalar_mode_supported_p (scalar_mode mode
)
36442 /* -m32 does not support TImode. This is the default, from
36443 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
36444 same ABI as for -m32. But default_scalar_mode_supported_p allows
36445 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
36446 for -mpowerpc64. */
36447 if (TARGET_32BIT
&& mode
== TImode
)
36450 if (DECIMAL_FLOAT_MODE_P (mode
))
36451 return default_decimal_float_supported_p ();
36452 else if (TARGET_FLOAT128_TYPE
&& (mode
== KFmode
|| mode
== IFmode
))
36455 return default_scalar_mode_supported_p (mode
);
36458 /* Target hook for vector_mode_supported_p. */
36460 rs6000_vector_mode_supported_p (machine_mode mode
)
36463 if (TARGET_PAIRED_FLOAT
&& PAIRED_VECTOR_MODE (mode
))
36466 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
36467 128-bit, the compiler might try to widen IEEE 128-bit to IBM
36469 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
) && !FLOAT128_IEEE_P (mode
))
36476 /* Target hook for floatn_mode. */
36477 static opt_scalar_float_mode
36478 rs6000_floatn_mode (int n
, bool extended
)
36488 if (TARGET_FLOAT128_TYPE
)
36489 return (FLOAT128_IEEE_P (TFmode
)) ? TFmode
: KFmode
;
36491 return opt_scalar_float_mode ();
36494 return opt_scalar_float_mode ();
36497 /* Those are the only valid _FloatNx types. */
36498 gcc_unreachable ();
36512 if (TARGET_FLOAT128_TYPE
)
36513 return (FLOAT128_IEEE_P (TFmode
)) ? TFmode
: KFmode
;
36515 return opt_scalar_float_mode ();
36518 return opt_scalar_float_mode ();
36524 /* Target hook for c_mode_for_suffix. */
36525 static machine_mode
36526 rs6000_c_mode_for_suffix (char suffix
)
36528 if (TARGET_FLOAT128_TYPE
)
36530 if (suffix
== 'q' || suffix
== 'Q')
36531 return (FLOAT128_IEEE_P (TFmode
)) ? TFmode
: KFmode
;
36533 /* At the moment, we are not defining a suffix for IBM extended double.
36534 If/when the default for -mabi=ieeelongdouble is changed, and we want
36535 to support __ibm128 constants in legacy library code, we may need to
36536 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
36537 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
36538 __float80 constants. */
36544 /* Target hook for invalid_arg_for_unprototyped_fn. */
36545 static const char *
36546 invalid_arg_for_unprototyped_fn (const_tree typelist
, const_tree funcdecl
, const_tree val
)
36548 return (!rs6000_darwin64_abi
36550 && TREE_CODE (TREE_TYPE (val
)) == VECTOR_TYPE
36551 && (funcdecl
== NULL_TREE
36552 || (TREE_CODE (funcdecl
) == FUNCTION_DECL
36553 && DECL_BUILT_IN_CLASS (funcdecl
) != BUILT_IN_MD
)))
36554 ? N_("AltiVec argument passed to unprototyped function")
36558 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
36559 setup by using __stack_chk_fail_local hidden function instead of
36560 calling __stack_chk_fail directly. Otherwise it is better to call
36561 __stack_chk_fail directly. */
36563 static tree ATTRIBUTE_UNUSED
36564 rs6000_stack_protect_fail (void)
36566 return (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
36567 ? default_hidden_stack_protect_fail ()
36568 : default_external_stack_protect_fail ();
36571 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
36574 static unsigned HOST_WIDE_INT
36575 rs6000_asan_shadow_offset (void)
36577 return (unsigned HOST_WIDE_INT
) 1 << (TARGET_64BIT
? 41 : 29);
36581 /* Mask options that we want to support inside of attribute((target)) and
36582 #pragma GCC target operations. Note, we do not include things like
36583 64/32-bit, endianness, hard/soft floating point, etc. that would have
36584 different calling sequences. */
36586 struct rs6000_opt_mask
{
36587 const char *name
; /* option name */
36588 HOST_WIDE_INT mask
; /* mask to set */
36589 bool invert
; /* invert sense of mask */
36590 bool valid_target
; /* option is a target option */
36593 static struct rs6000_opt_mask
const rs6000_opt_masks
[] =
36595 { "altivec", OPTION_MASK_ALTIVEC
, false, true },
36596 { "cmpb", OPTION_MASK_CMPB
, false, true },
36597 { "crypto", OPTION_MASK_CRYPTO
, false, true },
36598 { "direct-move", OPTION_MASK_DIRECT_MOVE
, false, true },
36599 { "dlmzb", OPTION_MASK_DLMZB
, false, true },
36600 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX
,
36602 { "float128", OPTION_MASK_FLOAT128_KEYWORD
, false, true },
36603 { "float128-hardware", OPTION_MASK_FLOAT128_HW
, false, true },
36604 { "fprnd", OPTION_MASK_FPRND
, false, true },
36605 { "hard-dfp", OPTION_MASK_DFP
, false, true },
36606 { "htm", OPTION_MASK_HTM
, false, true },
36607 { "isel", OPTION_MASK_ISEL
, false, true },
36608 { "mfcrf", OPTION_MASK_MFCRF
, false, true },
36609 { "mfpgpr", OPTION_MASK_MFPGPR
, false, true },
36610 { "modulo", OPTION_MASK_MODULO
, false, true },
36611 { "mulhw", OPTION_MASK_MULHW
, false, true },
36612 { "multiple", OPTION_MASK_MULTIPLE
, false, true },
36613 { "popcntb", OPTION_MASK_POPCNTB
, false, true },
36614 { "popcntd", OPTION_MASK_POPCNTD
, false, true },
36615 { "power8-fusion", OPTION_MASK_P8_FUSION
, false, true },
36616 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN
, false, true },
36617 { "power8-vector", OPTION_MASK_P8_VECTOR
, false, true },
36618 { "power9-fusion", OPTION_MASK_P9_FUSION
, false, true },
36619 { "power9-minmax", OPTION_MASK_P9_MINMAX
, false, true },
36620 { "power9-misc", OPTION_MASK_P9_MISC
, false, true },
36621 { "power9-vector", OPTION_MASK_P9_VECTOR
, false, true },
36622 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT
, false, true },
36623 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT
, false, true },
36624 { "quad-memory", OPTION_MASK_QUAD_MEMORY
, false, true },
36625 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC
, false, true },
36626 { "recip-precision", OPTION_MASK_RECIP_PRECISION
, false, true },
36627 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT
, false, true },
36628 { "string", OPTION_MASK_STRING
, false, true },
36629 { "toc-fusion", OPTION_MASK_TOC_FUSION
, false, true },
36630 { "update", OPTION_MASK_NO_UPDATE
, true , true },
36631 { "vsx", OPTION_MASK_VSX
, false, true },
36632 #ifdef OPTION_MASK_64BIT
36634 { "aix64", OPTION_MASK_64BIT
, false, false },
36635 { "aix32", OPTION_MASK_64BIT
, true, false },
36637 { "64", OPTION_MASK_64BIT
, false, false },
36638 { "32", OPTION_MASK_64BIT
, true, false },
36641 #ifdef OPTION_MASK_EABI
36642 { "eabi", OPTION_MASK_EABI
, false, false },
36644 #ifdef OPTION_MASK_LITTLE_ENDIAN
36645 { "little", OPTION_MASK_LITTLE_ENDIAN
, false, false },
36646 { "big", OPTION_MASK_LITTLE_ENDIAN
, true, false },
36648 #ifdef OPTION_MASK_RELOCATABLE
36649 { "relocatable", OPTION_MASK_RELOCATABLE
, false, false },
36651 #ifdef OPTION_MASK_STRICT_ALIGN
36652 { "strict-align", OPTION_MASK_STRICT_ALIGN
, false, false },
36654 { "soft-float", OPTION_MASK_SOFT_FLOAT
, false, false },
36655 { "string", OPTION_MASK_STRING
, false, false },
36658 /* Builtin mask mapping for printing the flags. */
36659 static struct rs6000_opt_mask
const rs6000_builtin_mask_names
[] =
36661 { "altivec", RS6000_BTM_ALTIVEC
, false, false },
36662 { "vsx", RS6000_BTM_VSX
, false, false },
36663 { "paired", RS6000_BTM_PAIRED
, false, false },
36664 { "fre", RS6000_BTM_FRE
, false, false },
36665 { "fres", RS6000_BTM_FRES
, false, false },
36666 { "frsqrte", RS6000_BTM_FRSQRTE
, false, false },
36667 { "frsqrtes", RS6000_BTM_FRSQRTES
, false, false },
36668 { "popcntd", RS6000_BTM_POPCNTD
, false, false },
36669 { "cell", RS6000_BTM_CELL
, false, false },
36670 { "power8-vector", RS6000_BTM_P8_VECTOR
, false, false },
36671 { "power9-vector", RS6000_BTM_P9_VECTOR
, false, false },
36672 { "power9-misc", RS6000_BTM_P9_MISC
, false, false },
36673 { "crypto", RS6000_BTM_CRYPTO
, false, false },
36674 { "htm", RS6000_BTM_HTM
, false, false },
36675 { "hard-dfp", RS6000_BTM_DFP
, false, false },
36676 { "hard-float", RS6000_BTM_HARD_FLOAT
, false, false },
36677 { "long-double-128", RS6000_BTM_LDBL128
, false, false },
36678 { "float128", RS6000_BTM_FLOAT128
, false, false },
36679 { "float128-hw", RS6000_BTM_FLOAT128_HW
,false, false },
36682 /* Option variables that we want to support inside attribute((target)) and
36683 #pragma GCC target operations. */
36685 struct rs6000_opt_var
{
36686 const char *name
; /* option name */
36687 size_t global_offset
; /* offset of the option in global_options. */
36688 size_t target_offset
; /* offset of the option in target options. */
36691 static struct rs6000_opt_var
const rs6000_opt_vars
[] =
36694 offsetof (struct gcc_options
, x_TARGET_FRIZ
),
36695 offsetof (struct cl_target_option
, x_TARGET_FRIZ
), },
36696 { "avoid-indexed-addresses",
36697 offsetof (struct gcc_options
, x_TARGET_AVOID_XFORM
),
36698 offsetof (struct cl_target_option
, x_TARGET_AVOID_XFORM
) },
36700 offsetof (struct gcc_options
, x_rs6000_paired_float
),
36701 offsetof (struct cl_target_option
, x_rs6000_paired_float
), },
36703 offsetof (struct gcc_options
, x_rs6000_default_long_calls
),
36704 offsetof (struct cl_target_option
, x_rs6000_default_long_calls
), },
36705 { "optimize-swaps",
36706 offsetof (struct gcc_options
, x_rs6000_optimize_swaps
),
36707 offsetof (struct cl_target_option
, x_rs6000_optimize_swaps
), },
36708 { "allow-movmisalign",
36709 offsetof (struct gcc_options
, x_TARGET_ALLOW_MOVMISALIGN
),
36710 offsetof (struct cl_target_option
, x_TARGET_ALLOW_MOVMISALIGN
), },
36712 offsetof (struct gcc_options
, x_TARGET_SCHED_GROUPS
),
36713 offsetof (struct cl_target_option
, x_TARGET_SCHED_GROUPS
), },
36715 offsetof (struct gcc_options
, x_TARGET_ALWAYS_HINT
),
36716 offsetof (struct cl_target_option
, x_TARGET_ALWAYS_HINT
), },
36717 { "align-branch-targets",
36718 offsetof (struct gcc_options
, x_TARGET_ALIGN_BRANCH_TARGETS
),
36719 offsetof (struct cl_target_option
, x_TARGET_ALIGN_BRANCH_TARGETS
), },
36721 offsetof (struct gcc_options
, x_tls_markers
),
36722 offsetof (struct cl_target_option
, x_tls_markers
), },
36724 offsetof (struct gcc_options
, x_TARGET_SCHED_PROLOG
),
36725 offsetof (struct cl_target_option
, x_TARGET_SCHED_PROLOG
), },
36727 offsetof (struct gcc_options
, x_TARGET_SCHED_PROLOG
),
36728 offsetof (struct cl_target_option
, x_TARGET_SCHED_PROLOG
), },
36731 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36732 parsing. Return true if there were no errors. */
36735 rs6000_inner_target_options (tree args
, bool attr_p
)
36739 if (args
== NULL_TREE
)
36742 else if (TREE_CODE (args
) == STRING_CST
)
36744 char *p
= ASTRDUP (TREE_STRING_POINTER (args
));
36747 while ((q
= strtok (p
, ",")) != NULL
)
36749 bool error_p
= false;
36750 bool not_valid_p
= false;
36751 const char *cpu_opt
= NULL
;
36754 if (strncmp (q
, "cpu=", 4) == 0)
36756 int cpu_index
= rs6000_cpu_name_lookup (q
+4);
36757 if (cpu_index
>= 0)
36758 rs6000_cpu_index
= cpu_index
;
36765 else if (strncmp (q
, "tune=", 5) == 0)
36767 int tune_index
= rs6000_cpu_name_lookup (q
+5);
36768 if (tune_index
>= 0)
36769 rs6000_tune_index
= tune_index
;
36779 bool invert
= false;
36783 if (strncmp (r
, "no-", 3) == 0)
36789 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_masks
); i
++)
36790 if (strcmp (r
, rs6000_opt_masks
[i
].name
) == 0)
36792 HOST_WIDE_INT mask
= rs6000_opt_masks
[i
].mask
;
36794 if (!rs6000_opt_masks
[i
].valid_target
)
36795 not_valid_p
= true;
36799 rs6000_isa_flags_explicit
|= mask
;
36801 /* VSX needs altivec, so -mvsx automagically sets
36802 altivec and disables -mavoid-indexed-addresses. */
36805 if (mask
== OPTION_MASK_VSX
)
36807 mask
|= OPTION_MASK_ALTIVEC
;
36808 TARGET_AVOID_XFORM
= 0;
36812 if (rs6000_opt_masks
[i
].invert
)
36816 rs6000_isa_flags
&= ~mask
;
36818 rs6000_isa_flags
|= mask
;
36823 if (error_p
&& !not_valid_p
)
36825 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_vars
); i
++)
36826 if (strcmp (r
, rs6000_opt_vars
[i
].name
) == 0)
36828 size_t j
= rs6000_opt_vars
[i
].global_offset
;
36829 *((int *) ((char *)&global_options
+ j
)) = !invert
;
36831 not_valid_p
= false;
36839 const char *eprefix
, *esuffix
;
36844 eprefix
= "__attribute__((__target__(";
36849 eprefix
= "#pragma GCC target ";
36854 error ("invalid cpu %qs for %s%qs%s", cpu_opt
, eprefix
,
36856 else if (not_valid_p
)
36857 error ("%s%qs%s is not allowed", eprefix
, q
, esuffix
);
36859 error ("%s%qs%s is invalid", eprefix
, q
, esuffix
);
36864 else if (TREE_CODE (args
) == TREE_LIST
)
36868 tree value
= TREE_VALUE (args
);
36871 bool ret2
= rs6000_inner_target_options (value
, attr_p
);
36875 args
= TREE_CHAIN (args
);
36877 while (args
!= NULL_TREE
);
36882 error ("attribute %<target%> argument not a string");
36889 /* Print out the target options as a list for -mdebug=target. */
36892 rs6000_debug_target_options (tree args
, const char *prefix
)
36894 if (args
== NULL_TREE
)
36895 fprintf (stderr
, "%s<NULL>", prefix
);
36897 else if (TREE_CODE (args
) == STRING_CST
)
36899 char *p
= ASTRDUP (TREE_STRING_POINTER (args
));
36902 while ((q
= strtok (p
, ",")) != NULL
)
36905 fprintf (stderr
, "%s\"%s\"", prefix
, q
);
36910 else if (TREE_CODE (args
) == TREE_LIST
)
36914 tree value
= TREE_VALUE (args
);
36917 rs6000_debug_target_options (value
, prefix
);
36920 args
= TREE_CHAIN (args
);
36922 while (args
!= NULL_TREE
);
36926 gcc_unreachable ();
36932 /* Hook to validate attribute((target("..."))). */
36935 rs6000_valid_attribute_p (tree fndecl
,
36936 tree
ARG_UNUSED (name
),
36940 struct cl_target_option cur_target
;
36943 tree new_target
, new_optimize
;
36944 tree func_optimize
;
36946 gcc_assert ((fndecl
!= NULL_TREE
) && (args
!= NULL_TREE
));
36948 if (TARGET_DEBUG_TARGET
)
36950 tree tname
= DECL_NAME (fndecl
);
36951 fprintf (stderr
, "\n==================== rs6000_valid_attribute_p:\n");
36953 fprintf (stderr
, "function: %.*s\n",
36954 (int) IDENTIFIER_LENGTH (tname
),
36955 IDENTIFIER_POINTER (tname
));
36957 fprintf (stderr
, "function: unknown\n");
36959 fprintf (stderr
, "args:");
36960 rs6000_debug_target_options (args
, " ");
36961 fprintf (stderr
, "\n");
36964 fprintf (stderr
, "flags: 0x%x\n", flags
);
36966 fprintf (stderr
, "--------------------\n");
36969 /* attribute((target("default"))) does nothing, beyond
36970 affecting multi-versioning. */
36971 if (TREE_VALUE (args
)
36972 && TREE_CODE (TREE_VALUE (args
)) == STRING_CST
36973 && TREE_CHAIN (args
) == NULL_TREE
36974 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args
)), "default") == 0)
36977 old_optimize
= build_optimization_node (&global_options
);
36978 func_optimize
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
);
36980 /* If the function changed the optimization levels as well as setting target
36981 options, start with the optimizations specified. */
36982 if (func_optimize
&& func_optimize
!= old_optimize
)
36983 cl_optimization_restore (&global_options
,
36984 TREE_OPTIMIZATION (func_optimize
));
36986 /* The target attributes may also change some optimization flags, so update
36987 the optimization options if necessary. */
36988 cl_target_option_save (&cur_target
, &global_options
);
36989 rs6000_cpu_index
= rs6000_tune_index
= -1;
36990 ret
= rs6000_inner_target_options (args
, true);
36992 /* Set up any additional state. */
36995 ret
= rs6000_option_override_internal (false);
36996 new_target
= build_target_option_node (&global_options
);
37001 new_optimize
= build_optimization_node (&global_options
);
37008 DECL_FUNCTION_SPECIFIC_TARGET (fndecl
) = new_target
;
37010 if (old_optimize
!= new_optimize
)
37011 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
) = new_optimize
;
37014 cl_target_option_restore (&global_options
, &cur_target
);
37016 if (old_optimize
!= new_optimize
)
37017 cl_optimization_restore (&global_options
,
37018 TREE_OPTIMIZATION (old_optimize
));
37024 /* Hook to validate the current #pragma GCC target and set the state, and
37025 update the macros based on what was changed. If ARGS is NULL, then
37026 POP_TARGET is used to reset the options. */
37029 rs6000_pragma_target_parse (tree args
, tree pop_target
)
37031 tree prev_tree
= build_target_option_node (&global_options
);
37033 struct cl_target_option
*prev_opt
, *cur_opt
;
37034 HOST_WIDE_INT prev_flags
, cur_flags
, diff_flags
;
37035 HOST_WIDE_INT prev_bumask
, cur_bumask
, diff_bumask
;
37037 if (TARGET_DEBUG_TARGET
)
37039 fprintf (stderr
, "\n==================== rs6000_pragma_target_parse\n");
37040 fprintf (stderr
, "args:");
37041 rs6000_debug_target_options (args
, " ");
37042 fprintf (stderr
, "\n");
37046 fprintf (stderr
, "pop_target:\n");
37047 debug_tree (pop_target
);
37050 fprintf (stderr
, "pop_target: <NULL>\n");
37052 fprintf (stderr
, "--------------------\n");
37057 cur_tree
= ((pop_target
)
37059 : target_option_default_node
);
37060 cl_target_option_restore (&global_options
,
37061 TREE_TARGET_OPTION (cur_tree
));
37065 rs6000_cpu_index
= rs6000_tune_index
= -1;
37066 if (!rs6000_inner_target_options (args
, false)
37067 || !rs6000_option_override_internal (false)
37068 || (cur_tree
= build_target_option_node (&global_options
))
37071 if (TARGET_DEBUG_BUILTIN
|| TARGET_DEBUG_TARGET
)
37072 fprintf (stderr
, "invalid pragma\n");
37078 target_option_current_node
= cur_tree
;
37079 rs6000_activate_target_options (target_option_current_node
);
37081 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
37082 change the macros that are defined. */
37083 if (rs6000_target_modify_macros_ptr
)
37085 prev_opt
= TREE_TARGET_OPTION (prev_tree
);
37086 prev_bumask
= prev_opt
->x_rs6000_builtin_mask
;
37087 prev_flags
= prev_opt
->x_rs6000_isa_flags
;
37089 cur_opt
= TREE_TARGET_OPTION (cur_tree
);
37090 cur_flags
= cur_opt
->x_rs6000_isa_flags
;
37091 cur_bumask
= cur_opt
->x_rs6000_builtin_mask
;
37093 diff_bumask
= (prev_bumask
^ cur_bumask
);
37094 diff_flags
= (prev_flags
^ cur_flags
);
37096 if ((diff_flags
!= 0) || (diff_bumask
!= 0))
37098 /* Delete old macros. */
37099 rs6000_target_modify_macros_ptr (false,
37100 prev_flags
& diff_flags
,
37101 prev_bumask
& diff_bumask
);
37103 /* Define new macros. */
37104 rs6000_target_modify_macros_ptr (true,
37105 cur_flags
& diff_flags
,
37106 cur_bumask
& diff_bumask
);
37114 /* Remember the last target of rs6000_set_current_function. */
37115 static GTY(()) tree rs6000_previous_fndecl
;
37117 /* Restore target's globals from NEW_TREE and invalidate the
37118 rs6000_previous_fndecl cache. */
37121 rs6000_activate_target_options (tree new_tree
)
37123 cl_target_option_restore (&global_options
, TREE_TARGET_OPTION (new_tree
));
37124 if (TREE_TARGET_GLOBALS (new_tree
))
37125 restore_target_globals (TREE_TARGET_GLOBALS (new_tree
));
37126 else if (new_tree
== target_option_default_node
)
37127 restore_target_globals (&default_target_globals
);
37129 TREE_TARGET_GLOBALS (new_tree
) = save_target_globals_default_opts ();
37130 rs6000_previous_fndecl
= NULL_TREE
;
37133 /* Establish appropriate back-end context for processing the function
37134 FNDECL. The argument might be NULL to indicate processing at top
37135 level, outside of any function scope. */
37137 rs6000_set_current_function (tree fndecl
)
37139 if (TARGET_DEBUG_TARGET
)
37141 fprintf (stderr
, "\n==================== rs6000_set_current_function");
37144 fprintf (stderr
, ", fndecl %s (%p)",
37145 (DECL_NAME (fndecl
)
37146 ? IDENTIFIER_POINTER (DECL_NAME (fndecl
))
37147 : "<unknown>"), (void *)fndecl
);
37149 if (rs6000_previous_fndecl
)
37150 fprintf (stderr
, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl
);
37152 fprintf (stderr
, "\n");
37155 /* Only change the context if the function changes. This hook is called
37156 several times in the course of compiling a function, and we don't want to
37157 slow things down too much or call target_reinit when it isn't safe. */
37158 if (fndecl
== rs6000_previous_fndecl
)
37162 if (rs6000_previous_fndecl
== NULL_TREE
)
37163 old_tree
= target_option_current_node
;
37164 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl
))
37165 old_tree
= DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl
);
37167 old_tree
= target_option_default_node
;
37170 if (fndecl
== NULL_TREE
)
37172 if (old_tree
!= target_option_current_node
)
37173 new_tree
= target_option_current_node
;
37175 new_tree
= NULL_TREE
;
37179 new_tree
= DECL_FUNCTION_SPECIFIC_TARGET (fndecl
);
37180 if (new_tree
== NULL_TREE
)
37181 new_tree
= target_option_default_node
;
37184 if (TARGET_DEBUG_TARGET
)
37188 fprintf (stderr
, "\nnew fndecl target specific options:\n");
37189 debug_tree (new_tree
);
37194 fprintf (stderr
, "\nold fndecl target specific options:\n");
37195 debug_tree (old_tree
);
37198 if (old_tree
!= NULL_TREE
|| new_tree
!= NULL_TREE
)
37199 fprintf (stderr
, "--------------------\n");
37202 if (new_tree
&& old_tree
!= new_tree
)
37203 rs6000_activate_target_options (new_tree
);
37206 rs6000_previous_fndecl
= fndecl
;
37210 /* Save the current options */
37213 rs6000_function_specific_save (struct cl_target_option
*ptr
,
37214 struct gcc_options
*opts
)
37216 ptr
->x_rs6000_isa_flags
= opts
->x_rs6000_isa_flags
;
37217 ptr
->x_rs6000_isa_flags_explicit
= opts
->x_rs6000_isa_flags_explicit
;
37220 /* Restore the current options */
37223 rs6000_function_specific_restore (struct gcc_options
*opts
,
37224 struct cl_target_option
*ptr
)
37227 opts
->x_rs6000_isa_flags
= ptr
->x_rs6000_isa_flags
;
37228 opts
->x_rs6000_isa_flags_explicit
= ptr
->x_rs6000_isa_flags_explicit
;
37229 (void) rs6000_option_override_internal (false);
37232 /* Print the current options */
37235 rs6000_function_specific_print (FILE *file
, int indent
,
37236 struct cl_target_option
*ptr
)
37238 rs6000_print_isa_options (file
, indent
, "Isa options set",
37239 ptr
->x_rs6000_isa_flags
);
37241 rs6000_print_isa_options (file
, indent
, "Isa options explicit",
37242 ptr
->x_rs6000_isa_flags_explicit
);
37245 /* Helper function to print the current isa or misc options on a line. */
37248 rs6000_print_options_internal (FILE *file
,
37250 const char *string
,
37251 HOST_WIDE_INT flags
,
37252 const char *prefix
,
37253 const struct rs6000_opt_mask
*opts
,
37254 size_t num_elements
)
37257 size_t start_column
= 0;
37259 size_t max_column
= 120;
37260 size_t prefix_len
= strlen (prefix
);
37261 size_t comma_len
= 0;
37262 const char *comma
= "";
37265 start_column
+= fprintf (file
, "%*s", indent
, "");
37269 fprintf (stderr
, DEBUG_FMT_S
, string
, "<none>");
37273 start_column
+= fprintf (stderr
, DEBUG_FMT_WX
, string
, flags
);
37275 /* Print the various mask options. */
37276 cur_column
= start_column
;
37277 for (i
= 0; i
< num_elements
; i
++)
37279 bool invert
= opts
[i
].invert
;
37280 const char *name
= opts
[i
].name
;
37281 const char *no_str
= "";
37282 HOST_WIDE_INT mask
= opts
[i
].mask
;
37283 size_t len
= comma_len
+ prefix_len
+ strlen (name
);
37287 if ((flags
& mask
) == 0)
37290 len
+= sizeof ("no-") - 1;
37298 if ((flags
& mask
) != 0)
37301 len
+= sizeof ("no-") - 1;
37308 if (cur_column
> max_column
)
37310 fprintf (stderr
, ", \\\n%*s", (int)start_column
, "");
37311 cur_column
= start_column
+ len
;
37315 fprintf (file
, "%s%s%s%s", comma
, prefix
, no_str
, name
);
37317 comma_len
= sizeof (", ") - 1;
37320 fputs ("\n", file
);
37323 /* Helper function to print the current isa options on a line. */
37326 rs6000_print_isa_options (FILE *file
, int indent
, const char *string
,
37327 HOST_WIDE_INT flags
)
37329 rs6000_print_options_internal (file
, indent
, string
, flags
, "-m",
37330 &rs6000_opt_masks
[0],
37331 ARRAY_SIZE (rs6000_opt_masks
));
37335 rs6000_print_builtin_options (FILE *file
, int indent
, const char *string
,
37336 HOST_WIDE_INT flags
)
37338 rs6000_print_options_internal (file
, indent
, string
, flags
, "",
37339 &rs6000_builtin_mask_names
[0],
37340 ARRAY_SIZE (rs6000_builtin_mask_names
));
37343 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
37344 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
37345 -mupper-regs-df, etc.).
37347 If the user used -mno-power8-vector, we need to turn off all of the implicit
37348 ISA 2.07 and 3.0 options that relate to the vector unit.
37350 If the user used -mno-power9-vector, we need to turn off all of the implicit
37351 ISA 3.0 options that relate to the vector unit.
37353 This function does not handle explicit options such as the user specifying
37354 -mdirect-move. These are handled in rs6000_option_override_internal, and
37355 the appropriate error is given if needed.
37357 We return a mask of all of the implicit options that should not be enabled
37360 static HOST_WIDE_INT
37361 rs6000_disable_incompatible_switches (void)
37363 HOST_WIDE_INT ignore_masks
= rs6000_isa_flags_explicit
;
37366 static const struct {
37367 const HOST_WIDE_INT no_flag
; /* flag explicitly turned off. */
37368 const HOST_WIDE_INT dep_flags
; /* flags that depend on this option. */
37369 const char *const name
; /* name of the switch. */
37371 { OPTION_MASK_P9_VECTOR
, OTHER_P9_VECTOR_MASKS
, "power9-vector" },
37372 { OPTION_MASK_P8_VECTOR
, OTHER_P8_VECTOR_MASKS
, "power8-vector" },
37373 { OPTION_MASK_VSX
, OTHER_VSX_VECTOR_MASKS
, "vsx" },
37376 for (i
= 0; i
< ARRAY_SIZE (flags
); i
++)
37378 HOST_WIDE_INT no_flag
= flags
[i
].no_flag
;
37380 if ((rs6000_isa_flags
& no_flag
) == 0
37381 && (rs6000_isa_flags_explicit
& no_flag
) != 0)
37383 HOST_WIDE_INT dep_flags
= flags
[i
].dep_flags
;
37384 HOST_WIDE_INT set_flags
= (rs6000_isa_flags_explicit
37390 for (j
= 0; j
< ARRAY_SIZE (rs6000_opt_masks
); j
++)
37391 if ((set_flags
& rs6000_opt_masks
[j
].mask
) != 0)
37393 set_flags
&= ~rs6000_opt_masks
[j
].mask
;
37394 error ("%<-mno-%s%> turns off %<-m%s%>",
37396 rs6000_opt_masks
[j
].name
);
37399 gcc_assert (!set_flags
);
37402 rs6000_isa_flags
&= ~dep_flags
;
37403 ignore_masks
|= no_flag
| dep_flags
;
37407 return ignore_masks
;
37411 /* Helper function for printing the function name when debugging. */
37413 static const char *
37414 get_decl_name (tree fn
)
37421 name
= DECL_NAME (fn
);
37423 return "<no-name>";
37425 return IDENTIFIER_POINTER (name
);
37428 /* Return the clone id of the target we are compiling code for in a target
37429 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
37430 the priority list for the target clones (ordered from lowest to
37434 rs6000_clone_priority (tree fndecl
)
37436 tree fn_opts
= DECL_FUNCTION_SPECIFIC_TARGET (fndecl
);
37437 HOST_WIDE_INT isa_masks
;
37438 int ret
= CLONE_DEFAULT
;
37439 tree attrs
= lookup_attribute ("target", DECL_ATTRIBUTES (fndecl
));
37440 const char *attrs_str
= NULL
;
37442 attrs
= TREE_VALUE (TREE_VALUE (attrs
));
37443 attrs_str
= TREE_STRING_POINTER (attrs
);
37445 /* Return priority zero for default function. Return the ISA needed for the
37446 function if it is not the default. */
37447 if (strcmp (attrs_str
, "default") != 0)
37449 if (fn_opts
== NULL_TREE
)
37450 fn_opts
= target_option_default_node
;
37452 if (!fn_opts
|| !TREE_TARGET_OPTION (fn_opts
))
37453 isa_masks
= rs6000_isa_flags
;
37455 isa_masks
= TREE_TARGET_OPTION (fn_opts
)->x_rs6000_isa_flags
;
37457 for (ret
= CLONE_MAX
- 1; ret
!= 0; ret
--)
37458 if ((rs6000_clone_map
[ret
].isa_mask
& isa_masks
) != 0)
37462 if (TARGET_DEBUG_TARGET
)
37463 fprintf (stderr
, "rs6000_get_function_version_priority (%s) => %d\n",
37464 get_decl_name (fndecl
), ret
);
37469 /* This compares the priority of target features in function DECL1 and DECL2.
37470 It returns positive value if DECL1 is higher priority, negative value if
37471 DECL2 is higher priority and 0 if they are the same. Note, priorities are
37472 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
37475 rs6000_compare_version_priority (tree decl1
, tree decl2
)
37477 int priority1
= rs6000_clone_priority (decl1
);
37478 int priority2
= rs6000_clone_priority (decl2
);
37479 int ret
= priority1
- priority2
;
37481 if (TARGET_DEBUG_TARGET
)
37482 fprintf (stderr
, "rs6000_compare_version_priority (%s, %s) => %d\n",
37483 get_decl_name (decl1
), get_decl_name (decl2
), ret
);
37488 /* Make a dispatcher declaration for the multi-versioned function DECL.
37489 Calls to DECL function will be replaced with calls to the dispatcher
37490 by the front-end. Returns the decl of the dispatcher function. */
37493 rs6000_get_function_versions_dispatcher (void *decl
)
37495 tree fn
= (tree
) decl
;
37496 struct cgraph_node
*node
= NULL
;
37497 struct cgraph_node
*default_node
= NULL
;
37498 struct cgraph_function_version_info
*node_v
= NULL
;
37499 struct cgraph_function_version_info
*first_v
= NULL
;
37501 tree dispatch_decl
= NULL
;
37503 struct cgraph_function_version_info
*default_version_info
= NULL
;
37504 gcc_assert (fn
!= NULL
&& DECL_FUNCTION_VERSIONED (fn
));
37506 if (TARGET_DEBUG_TARGET
)
37507 fprintf (stderr
, "rs6000_get_function_versions_dispatcher (%s)\n",
37508 get_decl_name (fn
));
37510 node
= cgraph_node::get (fn
);
37511 gcc_assert (node
!= NULL
);
37513 node_v
= node
->function_version ();
37514 gcc_assert (node_v
!= NULL
);
37516 if (node_v
->dispatcher_resolver
!= NULL
)
37517 return node_v
->dispatcher_resolver
;
37519 /* Find the default version and make it the first node. */
37521 /* Go to the beginning of the chain. */
37522 while (first_v
->prev
!= NULL
)
37523 first_v
= first_v
->prev
;
37525 default_version_info
= first_v
;
37526 while (default_version_info
!= NULL
)
37528 const tree decl2
= default_version_info
->this_node
->decl
;
37529 if (is_function_default_version (decl2
))
37531 default_version_info
= default_version_info
->next
;
37534 /* If there is no default node, just return NULL. */
37535 if (default_version_info
== NULL
)
37538 /* Make default info the first node. */
37539 if (first_v
!= default_version_info
)
37541 default_version_info
->prev
->next
= default_version_info
->next
;
37542 if (default_version_info
->next
)
37543 default_version_info
->next
->prev
= default_version_info
->prev
;
37544 first_v
->prev
= default_version_info
;
37545 default_version_info
->next
= first_v
;
37546 default_version_info
->prev
= NULL
;
37549 default_node
= default_version_info
->this_node
;
37551 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
37552 error_at (DECL_SOURCE_LOCATION (default_node
->decl
),
37553 "target_clones attribute needs GLIBC (2.23 and newer) that "
37554 "exports hardware capability bits");
37557 if (targetm
.has_ifunc_p ())
37559 struct cgraph_function_version_info
*it_v
= NULL
;
37560 struct cgraph_node
*dispatcher_node
= NULL
;
37561 struct cgraph_function_version_info
*dispatcher_version_info
= NULL
;
37563 /* Right now, the dispatching is done via ifunc. */
37564 dispatch_decl
= make_dispatcher_decl (default_node
->decl
);
37566 dispatcher_node
= cgraph_node::get_create (dispatch_decl
);
37567 gcc_assert (dispatcher_node
!= NULL
);
37568 dispatcher_node
->dispatcher_function
= 1;
37569 dispatcher_version_info
37570 = dispatcher_node
->insert_new_function_version ();
37571 dispatcher_version_info
->next
= default_version_info
;
37572 dispatcher_node
->definition
= 1;
37574 /* Set the dispatcher for all the versions. */
37575 it_v
= default_version_info
;
37576 while (it_v
!= NULL
)
37578 it_v
->dispatcher_resolver
= dispatch_decl
;
37584 error_at (DECL_SOURCE_LOCATION (default_node
->decl
),
37585 "multiversioning needs ifunc which is not supported "
37590 return dispatch_decl
;
37593 /* Make the resolver function decl to dispatch the versions of a multi-
37594 versioned function, DEFAULT_DECL. Create an empty basic block in the
37595 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
37599 make_resolver_func (const tree default_decl
,
37600 const tree dispatch_decl
,
37601 basic_block
*empty_bb
)
37603 /* Make the resolver function static. The resolver function returns
37605 tree decl_name
= clone_function_name (default_decl
, "resolver");
37606 const char *resolver_name
= IDENTIFIER_POINTER (decl_name
);
37607 tree type
= build_function_type_list (ptr_type_node
, NULL_TREE
);
37608 tree decl
= build_fn_decl (resolver_name
, type
);
37609 SET_DECL_ASSEMBLER_NAME (decl
, decl_name
);
37611 DECL_NAME (decl
) = decl_name
;
37612 TREE_USED (decl
) = 1;
37613 DECL_ARTIFICIAL (decl
) = 1;
37614 DECL_IGNORED_P (decl
) = 0;
37615 TREE_PUBLIC (decl
) = 0;
37616 DECL_UNINLINABLE (decl
) = 1;
37618 /* Resolver is not external, body is generated. */
37619 DECL_EXTERNAL (decl
) = 0;
37620 DECL_EXTERNAL (dispatch_decl
) = 0;
37622 DECL_CONTEXT (decl
) = NULL_TREE
;
37623 DECL_INITIAL (decl
) = make_node (BLOCK
);
37624 DECL_STATIC_CONSTRUCTOR (decl
) = 0;
37626 /* Build result decl and add to function_decl. */
37627 tree t
= build_decl (UNKNOWN_LOCATION
, RESULT_DECL
, NULL_TREE
, ptr_type_node
);
37628 DECL_ARTIFICIAL (t
) = 1;
37629 DECL_IGNORED_P (t
) = 1;
37630 DECL_RESULT (decl
) = t
;
37632 gimplify_function_tree (decl
);
37633 push_cfun (DECL_STRUCT_FUNCTION (decl
));
37634 *empty_bb
= init_lowered_empty_function (decl
, false,
37635 profile_count::uninitialized ());
37637 cgraph_node::add_new_function (decl
, true);
37638 symtab
->call_cgraph_insertion_hooks (cgraph_node::get_create (decl
));
37642 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37643 DECL_ATTRIBUTES (dispatch_decl
)
37644 = make_attribute ("ifunc", resolver_name
, DECL_ATTRIBUTES (dispatch_decl
));
37646 cgraph_node::create_same_body_alias (dispatch_decl
, decl
);
37651 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37652 return a pointer to VERSION_DECL if we are running on a machine that
37653 supports the index CLONE_ISA hardware architecture bits. This function will
37654 be called during version dispatch to decide which function version to
37655 execute. It returns the basic block at the end, to which more conditions
37659 add_condition_to_bb (tree function_decl
, tree version_decl
,
37660 int clone_isa
, basic_block new_bb
)
37662 push_cfun (DECL_STRUCT_FUNCTION (function_decl
));
37664 gcc_assert (new_bb
!= NULL
);
37665 gimple_seq gseq
= bb_seq (new_bb
);
37668 tree convert_expr
= build1 (CONVERT_EXPR
, ptr_type_node
,
37669 build_fold_addr_expr (version_decl
));
37670 tree result_var
= create_tmp_var (ptr_type_node
);
37671 gimple
*convert_stmt
= gimple_build_assign (result_var
, convert_expr
);
37672 gimple
*return_stmt
= gimple_build_return (result_var
);
37674 if (clone_isa
== CLONE_DEFAULT
)
37676 gimple_seq_add_stmt (&gseq
, convert_stmt
);
37677 gimple_seq_add_stmt (&gseq
, return_stmt
);
37678 set_bb_seq (new_bb
, gseq
);
37679 gimple_set_bb (convert_stmt
, new_bb
);
37680 gimple_set_bb (return_stmt
, new_bb
);
37685 tree bool_zero
= build_int_cst (bool_int_type_node
, 0);
37686 tree cond_var
= create_tmp_var (bool_int_type_node
);
37687 tree predicate_decl
= rs6000_builtin_decls
[(int) RS6000_BUILTIN_CPU_SUPPORTS
];
37688 const char *arg_str
= rs6000_clone_map
[clone_isa
].name
;
37689 tree predicate_arg
= build_string_literal (strlen (arg_str
) + 1, arg_str
);
37690 gimple
*call_cond_stmt
= gimple_build_call (predicate_decl
, 1, predicate_arg
);
37691 gimple_call_set_lhs (call_cond_stmt
, cond_var
);
37693 gimple_set_block (call_cond_stmt
, DECL_INITIAL (function_decl
));
37694 gimple_set_bb (call_cond_stmt
, new_bb
);
37695 gimple_seq_add_stmt (&gseq
, call_cond_stmt
);
37697 gimple
*if_else_stmt
= gimple_build_cond (NE_EXPR
, cond_var
, bool_zero
,
37698 NULL_TREE
, NULL_TREE
);
37699 gimple_set_block (if_else_stmt
, DECL_INITIAL (function_decl
));
37700 gimple_set_bb (if_else_stmt
, new_bb
);
37701 gimple_seq_add_stmt (&gseq
, if_else_stmt
);
37703 gimple_seq_add_stmt (&gseq
, convert_stmt
);
37704 gimple_seq_add_stmt (&gseq
, return_stmt
);
37705 set_bb_seq (new_bb
, gseq
);
37707 basic_block bb1
= new_bb
;
37708 edge e12
= split_block (bb1
, if_else_stmt
);
37709 basic_block bb2
= e12
->dest
;
37710 e12
->flags
&= ~EDGE_FALLTHRU
;
37711 e12
->flags
|= EDGE_TRUE_VALUE
;
37713 edge e23
= split_block (bb2
, return_stmt
);
37714 gimple_set_bb (convert_stmt
, bb2
);
37715 gimple_set_bb (return_stmt
, bb2
);
37717 basic_block bb3
= e23
->dest
;
37718 make_edge (bb1
, bb3
, EDGE_FALSE_VALUE
);
37721 make_edge (bb2
, EXIT_BLOCK_PTR_FOR_FN (cfun
), 0);
37727 /* This function generates the dispatch function for multi-versioned functions.
37728 DISPATCH_DECL is the function which will contain the dispatch logic.
37729 FNDECLS are the function choices for dispatch, and is a tree chain.
37730 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37731 code is generated. */
37734 dispatch_function_versions (tree dispatch_decl
,
37736 basic_block
*empty_bb
)
37740 vec
<tree
> *fndecls
;
37741 tree clones
[CLONE_MAX
];
37743 if (TARGET_DEBUG_TARGET
)
37744 fputs ("dispatch_function_versions, top\n", stderr
);
37746 gcc_assert (dispatch_decl
!= NULL
37747 && fndecls_p
!= NULL
37748 && empty_bb
!= NULL
);
37750 /* fndecls_p is actually a vector. */
37751 fndecls
= static_cast<vec
<tree
> *> (fndecls_p
);
37753 /* At least one more version other than the default. */
37754 gcc_assert (fndecls
->length () >= 2);
37756 /* The first version in the vector is the default decl. */
37757 memset ((void *) clones
, '\0', sizeof (clones
));
37758 clones
[CLONE_DEFAULT
] = (*fndecls
)[0];
37760 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37761 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37762 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37763 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37764 to insert the code here to do the call. */
37766 for (ix
= 1; fndecls
->iterate (ix
, &ele
); ++ix
)
37768 int priority
= rs6000_clone_priority (ele
);
37769 if (!clones
[priority
])
37770 clones
[priority
] = ele
;
37773 for (ix
= CLONE_MAX
- 1; ix
>= 0; ix
--)
37776 if (TARGET_DEBUG_TARGET
)
37777 fprintf (stderr
, "dispatch_function_versions, clone %d, %s\n",
37778 ix
, get_decl_name (clones
[ix
]));
37780 *empty_bb
= add_condition_to_bb (dispatch_decl
, clones
[ix
], ix
,
37787 /* Generate the dispatching code body to dispatch multi-versioned function
37788 DECL. The target hook is called to process the "target" attributes and
37789 provide the code to dispatch the right function at run-time. NODE points
37790 to the dispatcher decl whose body will be created. */
37793 rs6000_generate_version_dispatcher_body (void *node_p
)
37796 basic_block empty_bb
;
37797 struct cgraph_node
*node
= (cgraph_node
*) node_p
;
37798 struct cgraph_function_version_info
*ninfo
= node
->function_version ();
37800 if (ninfo
->dispatcher_resolver
)
37801 return ninfo
->dispatcher_resolver
;
37803 /* node is going to be an alias, so remove the finalized bit. */
37804 node
->definition
= false;
37806 /* The first version in the chain corresponds to the default version. */
37807 ninfo
->dispatcher_resolver
= resolver
37808 = make_resolver_func (ninfo
->next
->this_node
->decl
, node
->decl
, &empty_bb
);
37810 if (TARGET_DEBUG_TARGET
)
37811 fprintf (stderr
, "rs6000_get_function_versions_dispatcher, %s\n",
37812 get_decl_name (resolver
));
37814 push_cfun (DECL_STRUCT_FUNCTION (resolver
));
37815 auto_vec
<tree
, 2> fn_ver_vec
;
37817 for (struct cgraph_function_version_info
*vinfo
= ninfo
->next
;
37819 vinfo
= vinfo
->next
)
37821 struct cgraph_node
*version
= vinfo
->this_node
;
37822 /* Check for virtual functions here again, as by this time it should
37823 have been determined if this function needs a vtable index or
37824 not. This happens for methods in derived classes that override
37825 virtual methods in base classes but are not explicitly marked as
37827 if (DECL_VINDEX (version
->decl
))
37828 sorry ("Virtual function multiversioning not supported");
37830 fn_ver_vec
.safe_push (version
->decl
);
37833 dispatch_function_versions (resolver
, &fn_ver_vec
, &empty_bb
);
37834 cgraph_edge::rebuild_edges ();
37840 /* Hook to determine if one function can safely inline another. */
37843 rs6000_can_inline_p (tree caller
, tree callee
)
37846 tree caller_tree
= DECL_FUNCTION_SPECIFIC_TARGET (caller
);
37847 tree callee_tree
= DECL_FUNCTION_SPECIFIC_TARGET (callee
);
37849 /* If callee has no option attributes, then it is ok to inline. */
37853 /* If caller has no option attributes, but callee does then it is not ok to
37855 else if (!caller_tree
)
37860 struct cl_target_option
*caller_opts
= TREE_TARGET_OPTION (caller_tree
);
37861 struct cl_target_option
*callee_opts
= TREE_TARGET_OPTION (callee_tree
);
37863 /* Callee's options should a subset of the caller's, i.e. a vsx function
37864 can inline an altivec function but a non-vsx function can't inline a
37866 if ((caller_opts
->x_rs6000_isa_flags
& callee_opts
->x_rs6000_isa_flags
)
37867 == callee_opts
->x_rs6000_isa_flags
)
37871 if (TARGET_DEBUG_TARGET
)
37872 fprintf (stderr
, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37873 get_decl_name (caller
), get_decl_name (callee
),
37874 (ret
? "can" : "cannot"));
37879 /* Allocate a stack temp and fixup the address so it meets the particular
37880 memory requirements (either offetable or REG+REG addressing). */
37883 rs6000_allocate_stack_temp (machine_mode mode
,
37884 bool offsettable_p
,
37887 rtx stack
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
37888 rtx addr
= XEXP (stack
, 0);
37889 int strict_p
= reload_completed
;
37891 if (!legitimate_indirect_address_p (addr
, strict_p
))
37894 && !rs6000_legitimate_offset_address_p (mode
, addr
, strict_p
, true))
37895 stack
= replace_equiv_address (stack
, copy_addr_to_reg (addr
));
37897 else if (reg_reg_p
&& !legitimate_indexed_address_p (addr
, strict_p
))
37898 stack
= replace_equiv_address (stack
, copy_addr_to_reg (addr
));
37904 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
37905 to such a form to deal with memory reference instructions like STFIWX that
37906 only take reg+reg addressing. */
37909 rs6000_address_for_fpconvert (rtx x
)
37913 gcc_assert (MEM_P (x
));
37914 addr
= XEXP (x
, 0);
37915 if (can_create_pseudo_p ()
37916 && ! legitimate_indirect_address_p (addr
, reload_completed
)
37917 && ! legitimate_indexed_address_p (addr
, reload_completed
))
37919 if (GET_CODE (addr
) == PRE_INC
|| GET_CODE (addr
) == PRE_DEC
)
37921 rtx reg
= XEXP (addr
, 0);
37922 HOST_WIDE_INT size
= GET_MODE_SIZE (GET_MODE (x
));
37923 rtx size_rtx
= GEN_INT ((GET_CODE (addr
) == PRE_DEC
) ? -size
: size
);
37924 gcc_assert (REG_P (reg
));
37925 emit_insn (gen_add3_insn (reg
, reg
, size_rtx
));
37928 else if (GET_CODE (addr
) == PRE_MODIFY
)
37930 rtx reg
= XEXP (addr
, 0);
37931 rtx expr
= XEXP (addr
, 1);
37932 gcc_assert (REG_P (reg
));
37933 gcc_assert (GET_CODE (expr
) == PLUS
);
37934 emit_insn (gen_add3_insn (reg
, XEXP (expr
, 0), XEXP (expr
, 1)));
37938 x
= replace_equiv_address (x
, copy_addr_to_reg (addr
));
37944 /* Given a memory reference, if it is not in the form for altivec memory
37945 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
37946 convert to the altivec format. */
37949 rs6000_address_for_altivec (rtx x
)
37951 gcc_assert (MEM_P (x
));
37952 if (!altivec_indexed_or_indirect_operand (x
, GET_MODE (x
)))
37954 rtx addr
= XEXP (x
, 0);
37956 if (!legitimate_indexed_address_p (addr
, reload_completed
)
37957 && !legitimate_indirect_address_p (addr
, reload_completed
))
37958 addr
= copy_to_mode_reg (Pmode
, addr
);
37960 addr
= gen_rtx_AND (Pmode
, addr
, GEN_INT (-16));
37961 x
= change_address (x
, GET_MODE (x
), addr
);
37967 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37969 On the RS/6000, all integer constants are acceptable, most won't be valid
37970 for particular insns, though. Only easy FP constants are acceptable. */
37973 rs6000_legitimate_constant_p (machine_mode mode
, rtx x
)
37975 if (TARGET_ELF
&& tls_referenced_p (x
))
37978 return ((GET_CODE (x
) != CONST_DOUBLE
&& GET_CODE (x
) != CONST_VECTOR
)
37979 || GET_MODE (x
) == VOIDmode
37980 || (TARGET_POWERPC64
&& mode
== DImode
)
37981 || easy_fp_constant (x
, mode
)
37982 || easy_vector_constant (x
, mode
));
37986 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37989 chain_already_loaded (rtx_insn
*last
)
37991 for (; last
!= NULL
; last
= PREV_INSN (last
))
37993 if (NONJUMP_INSN_P (last
))
37995 rtx patt
= PATTERN (last
);
37997 if (GET_CODE (patt
) == SET
)
37999 rtx lhs
= XEXP (patt
, 0);
38001 if (REG_P (lhs
) && REGNO (lhs
) == STATIC_CHAIN_REGNUM
)
38009 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
38012 rs6000_call_aix (rtx value
, rtx func_desc
, rtx flag
, rtx cookie
)
38014 const bool direct_call_p
38015 = GET_CODE (func_desc
) == SYMBOL_REF
&& SYMBOL_REF_FUNCTION_P (func_desc
);
38016 rtx toc_reg
= gen_rtx_REG (Pmode
, TOC_REGNUM
);
38017 rtx toc_load
= NULL_RTX
;
38018 rtx toc_restore
= NULL_RTX
;
38020 rtx abi_reg
= NULL_RTX
;
38025 /* Handle longcall attributes. */
38026 if (INTVAL (cookie
) & CALL_LONG
)
38027 func_desc
= rs6000_longcall_ref (func_desc
);
38029 /* Handle indirect calls. */
38030 if (GET_CODE (func_desc
) != SYMBOL_REF
38031 || (DEFAULT_ABI
== ABI_AIX
&& !SYMBOL_REF_FUNCTION_P (func_desc
)))
38033 /* Save the TOC into its reserved slot before the call,
38034 and prepare to restore it after the call. */
38035 rtx stack_ptr
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
38036 rtx stack_toc_offset
= GEN_INT (RS6000_TOC_SAVE_SLOT
);
38037 rtx stack_toc_mem
= gen_frame_mem (Pmode
,
38038 gen_rtx_PLUS (Pmode
, stack_ptr
,
38039 stack_toc_offset
));
38040 rtx stack_toc_unspec
= gen_rtx_UNSPEC (Pmode
,
38041 gen_rtvec (1, stack_toc_offset
),
38043 toc_restore
= gen_rtx_SET (toc_reg
, stack_toc_unspec
);
38045 /* Can we optimize saving the TOC in the prologue or
38046 do we need to do it at every call? */
38047 if (TARGET_SAVE_TOC_INDIRECT
&& !cfun
->calls_alloca
)
38048 cfun
->machine
->save_toc_in_prologue
= true;
38051 MEM_VOLATILE_P (stack_toc_mem
) = 1;
38052 emit_move_insn (stack_toc_mem
, toc_reg
);
38055 if (DEFAULT_ABI
== ABI_ELFv2
)
38057 /* A function pointer in the ELFv2 ABI is just a plain address, but
38058 the ABI requires it to be loaded into r12 before the call. */
38059 func_addr
= gen_rtx_REG (Pmode
, 12);
38060 emit_move_insn (func_addr
, func_desc
);
38061 abi_reg
= func_addr
;
38065 /* A function pointer under AIX is a pointer to a data area whose
38066 first word contains the actual address of the function, whose
38067 second word contains a pointer to its TOC, and whose third word
38068 contains a value to place in the static chain register (r11).
38069 Note that if we load the static chain, our "trampoline" need
38070 not have any executable code. */
38072 /* Load up address of the actual function. */
38073 func_desc
= force_reg (Pmode
, func_desc
);
38074 func_addr
= gen_reg_rtx (Pmode
);
38075 emit_move_insn (func_addr
, gen_rtx_MEM (Pmode
, func_desc
));
38077 /* Prepare to load the TOC of the called function. Note that the
38078 TOC load must happen immediately before the actual call so
38079 that unwinding the TOC registers works correctly. See the
38080 comment in frob_update_context. */
38081 rtx func_toc_offset
= GEN_INT (GET_MODE_SIZE (Pmode
));
38082 rtx func_toc_mem
= gen_rtx_MEM (Pmode
,
38083 gen_rtx_PLUS (Pmode
, func_desc
,
38085 toc_load
= gen_rtx_USE (VOIDmode
, func_toc_mem
);
38087 /* If we have a static chain, load it up. But, if the call was
38088 originally direct, the 3rd word has not been written since no
38089 trampoline has been built, so we ought not to load it, lest we
38090 override a static chain value. */
38092 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
38093 && !chain_already_loaded (get_current_sequence ()->next
->last
))
38095 rtx sc_reg
= gen_rtx_REG (Pmode
, STATIC_CHAIN_REGNUM
);
38096 rtx func_sc_offset
= GEN_INT (2 * GET_MODE_SIZE (Pmode
));
38097 rtx func_sc_mem
= gen_rtx_MEM (Pmode
,
38098 gen_rtx_PLUS (Pmode
, func_desc
,
38100 emit_move_insn (sc_reg
, func_sc_mem
);
38107 /* Direct calls use the TOC: for local calls, the callee will
38108 assume the TOC register is set; for non-local calls, the
38109 PLT stub needs the TOC register. */
38111 func_addr
= func_desc
;
38114 /* Create the call. */
38115 call
[0] = gen_rtx_CALL (VOIDmode
, gen_rtx_MEM (SImode
, func_addr
), flag
);
38116 if (value
!= NULL_RTX
)
38117 call
[0] = gen_rtx_SET (value
, call
[0]);
38121 call
[n_call
++] = toc_load
;
38123 call
[n_call
++] = toc_restore
;
38125 call
[n_call
++] = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, LR_REGNO
));
38127 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (n_call
, call
));
38128 insn
= emit_call_insn (insn
);
38130 /* Mention all registers defined by the ABI to hold information
38131 as uses in CALL_INSN_FUNCTION_USAGE. */
38133 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), abi_reg
);
38136 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
38139 rs6000_sibcall_aix (rtx value
, rtx func_desc
, rtx flag
, rtx cookie
)
38144 gcc_assert (INTVAL (cookie
) == 0);
38146 /* Create the call. */
38147 call
[0] = gen_rtx_CALL (VOIDmode
, gen_rtx_MEM (SImode
, func_desc
), flag
);
38148 if (value
!= NULL_RTX
)
38149 call
[0] = gen_rtx_SET (value
, call
[0]);
38151 call
[1] = simple_return_rtx
;
38153 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (2, call
));
38154 insn
= emit_call_insn (insn
);
38156 /* Note use of the TOC register. */
38157 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), gen_rtx_REG (Pmode
, TOC_REGNUM
));
38160 /* Return whether we need to always update the saved TOC pointer when we update
38161 the stack pointer. */
38164 rs6000_save_toc_in_prologue_p (void)
38166 return (cfun
&& cfun
->machine
&& cfun
->machine
->save_toc_in_prologue
);
38169 #ifdef HAVE_GAS_HIDDEN
38170 # define USE_HIDDEN_LINKONCE 1
38172 # define USE_HIDDEN_LINKONCE 0
38175 /* Fills in the label name that should be used for a 476 link stack thunk. */
38178 get_ppc476_thunk_name (char name
[32])
38180 gcc_assert (TARGET_LINK_STACK
);
38182 if (USE_HIDDEN_LINKONCE
)
38183 sprintf (name
, "__ppc476.get_thunk");
38185 ASM_GENERATE_INTERNAL_LABEL (name
, "LPPC476_", 0);
38188 /* This function emits the simple thunk routine that is used to preserve
38189 the link stack on the 476 cpu. */
38191 static void rs6000_code_end (void) ATTRIBUTE_UNUSED
;
38193 rs6000_code_end (void)
38198 if (!TARGET_LINK_STACK
)
38201 get_ppc476_thunk_name (name
);
38203 decl
= build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
, get_identifier (name
),
38204 build_function_type_list (void_type_node
, NULL_TREE
));
38205 DECL_RESULT (decl
) = build_decl (BUILTINS_LOCATION
, RESULT_DECL
,
38206 NULL_TREE
, void_type_node
);
38207 TREE_PUBLIC (decl
) = 1;
38208 TREE_STATIC (decl
) = 1;
38211 if (USE_HIDDEN_LINKONCE
&& !TARGET_XCOFF
)
38213 cgraph_node::create (decl
)->set_comdat_group (DECL_ASSEMBLER_NAME (decl
));
38214 targetm
.asm_out
.unique_section (decl
, 0);
38215 switch_to_section (get_named_section (decl
, NULL
, 0));
38216 DECL_WEAK (decl
) = 1;
38217 ASM_WEAKEN_DECL (asm_out_file
, decl
, name
, 0);
38218 targetm
.asm_out
.globalize_label (asm_out_file
, name
);
38219 targetm
.asm_out
.assemble_visibility (decl
, VISIBILITY_HIDDEN
);
38220 ASM_DECLARE_FUNCTION_NAME (asm_out_file
, name
, decl
);
38225 switch_to_section (text_section
);
38226 ASM_OUTPUT_LABEL (asm_out_file
, name
);
38229 DECL_INITIAL (decl
) = make_node (BLOCK
);
38230 current_function_decl
= decl
;
38231 allocate_struct_function (decl
, false);
38232 init_function_start (decl
);
38233 first_function_block_is_cold
= false;
38234 /* Make sure unwind info is emitted for the thunk if needed. */
38235 final_start_function (emit_barrier (), asm_out_file
, 1);
38237 fputs ("\tblr\n", asm_out_file
);
38239 final_end_function ();
38240 init_insn_lengths ();
38241 free_after_compilation (cfun
);
38243 current_function_decl
= NULL
;
38246 /* Add r30 to hard reg set if the prologue sets it up and it is not
38247 pic_offset_table_rtx. */
38250 rs6000_set_up_by_prologue (struct hard_reg_set_container
*set
)
38252 if (!TARGET_SINGLE_PIC_BASE
38254 && TARGET_MINIMAL_TOC
38255 && !constant_pool_empty_p ())
38256 add_to_hard_reg_set (&set
->set
, Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
38257 if (cfun
->machine
->split_stack_argp_used
)
38258 add_to_hard_reg_set (&set
->set
, Pmode
, 12);
38260 /* Make sure the hard reg set doesn't include r2, which was possibly added
38261 via PIC_OFFSET_TABLE_REGNUM. */
38263 remove_from_hard_reg_set (&set
->set
, Pmode
, TOC_REGNUM
);
38267 /* Helper function for rs6000_split_logical to emit a logical instruction after
38268 spliting the operation to single GPR registers.
38270 DEST is the destination register.
38271 OP1 and OP2 are the input source registers.
38272 CODE is the base operation (AND, IOR, XOR, NOT).
38273 MODE is the machine mode.
38274 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38275 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38276 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38279 rs6000_split_logical_inner (rtx dest
,
38282 enum rtx_code code
,
38284 bool complement_final_p
,
38285 bool complement_op1_p
,
38286 bool complement_op2_p
)
38290 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
38291 if (op2
&& GET_CODE (op2
) == CONST_INT
38292 && (mode
== SImode
|| (mode
== DImode
&& TARGET_POWERPC64
))
38293 && !complement_final_p
&& !complement_op1_p
&& !complement_op2_p
)
38295 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
38296 HOST_WIDE_INT value
= INTVAL (op2
) & mask
;
38298 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
38303 emit_insn (gen_rtx_SET (dest
, const0_rtx
));
38307 else if (value
== mask
)
38309 if (!rtx_equal_p (dest
, op1
))
38310 emit_insn (gen_rtx_SET (dest
, op1
));
38315 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
38316 into separate ORI/ORIS or XORI/XORIS instrucitons. */
38317 else if (code
== IOR
|| code
== XOR
)
38321 if (!rtx_equal_p (dest
, op1
))
38322 emit_insn (gen_rtx_SET (dest
, op1
));
38328 if (code
== AND
&& mode
== SImode
38329 && !complement_final_p
&& !complement_op1_p
&& !complement_op2_p
)
38331 emit_insn (gen_andsi3 (dest
, op1
, op2
));
38335 if (complement_op1_p
)
38336 op1
= gen_rtx_NOT (mode
, op1
);
38338 if (complement_op2_p
)
38339 op2
= gen_rtx_NOT (mode
, op2
);
38341 /* For canonical RTL, if only one arm is inverted it is the first. */
38342 if (!complement_op1_p
&& complement_op2_p
)
38343 std::swap (op1
, op2
);
38345 bool_rtx
= ((code
== NOT
)
38346 ? gen_rtx_NOT (mode
, op1
)
38347 : gen_rtx_fmt_ee (code
, mode
, op1
, op2
));
38349 if (complement_final_p
)
38350 bool_rtx
= gen_rtx_NOT (mode
, bool_rtx
);
38352 emit_insn (gen_rtx_SET (dest
, bool_rtx
));
38355 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
38356 operations are split immediately during RTL generation to allow for more
38357 optimizations of the AND/IOR/XOR.
38359 OPERANDS is an array containing the destination and two input operands.
38360 CODE is the base operation (AND, IOR, XOR, NOT).
38361 MODE is the machine mode.
38362 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38363 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38364 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
38365 CLOBBER_REG is either NULL or a scratch register of type CC to allow
38366 formation of the AND instructions. */
38369 rs6000_split_logical_di (rtx operands
[3],
38370 enum rtx_code code
,
38371 bool complement_final_p
,
38372 bool complement_op1_p
,
38373 bool complement_op2_p
)
38375 const HOST_WIDE_INT lower_32bits
= HOST_WIDE_INT_C(0xffffffff);
38376 const HOST_WIDE_INT upper_32bits
= ~ lower_32bits
;
38377 const HOST_WIDE_INT sign_bit
= HOST_WIDE_INT_C(0x80000000);
38378 enum hi_lo
{ hi
= 0, lo
= 1 };
38379 rtx op0_hi_lo
[2], op1_hi_lo
[2], op2_hi_lo
[2];
38382 op0_hi_lo
[hi
] = gen_highpart (SImode
, operands
[0]);
38383 op1_hi_lo
[hi
] = gen_highpart (SImode
, operands
[1]);
38384 op0_hi_lo
[lo
] = gen_lowpart (SImode
, operands
[0]);
38385 op1_hi_lo
[lo
] = gen_lowpart (SImode
, operands
[1]);
38388 op2_hi_lo
[hi
] = op2_hi_lo
[lo
] = NULL_RTX
;
38391 if (GET_CODE (operands
[2]) != CONST_INT
)
38393 op2_hi_lo
[hi
] = gen_highpart_mode (SImode
, DImode
, operands
[2]);
38394 op2_hi_lo
[lo
] = gen_lowpart (SImode
, operands
[2]);
38398 HOST_WIDE_INT value
= INTVAL (operands
[2]);
38399 HOST_WIDE_INT value_hi_lo
[2];
38401 gcc_assert (!complement_final_p
);
38402 gcc_assert (!complement_op1_p
);
38403 gcc_assert (!complement_op2_p
);
38405 value_hi_lo
[hi
] = value
>> 32;
38406 value_hi_lo
[lo
] = value
& lower_32bits
;
38408 for (i
= 0; i
< 2; i
++)
38410 HOST_WIDE_INT sub_value
= value_hi_lo
[i
];
38412 if (sub_value
& sign_bit
)
38413 sub_value
|= upper_32bits
;
38415 op2_hi_lo
[i
] = GEN_INT (sub_value
);
38417 /* If this is an AND instruction, check to see if we need to load
38418 the value in a register. */
38419 if (code
== AND
&& sub_value
!= -1 && sub_value
!= 0
38420 && !and_operand (op2_hi_lo
[i
], SImode
))
38421 op2_hi_lo
[i
] = force_reg (SImode
, op2_hi_lo
[i
]);
38426 for (i
= 0; i
< 2; i
++)
38428 /* Split large IOR/XOR operations. */
38429 if ((code
== IOR
|| code
== XOR
)
38430 && GET_CODE (op2_hi_lo
[i
]) == CONST_INT
38431 && !complement_final_p
38432 && !complement_op1_p
38433 && !complement_op2_p
38434 && !logical_const_operand (op2_hi_lo
[i
], SImode
))
38436 HOST_WIDE_INT value
= INTVAL (op2_hi_lo
[i
]);
38437 HOST_WIDE_INT hi_16bits
= value
& HOST_WIDE_INT_C(0xffff0000);
38438 HOST_WIDE_INT lo_16bits
= value
& HOST_WIDE_INT_C(0x0000ffff);
38439 rtx tmp
= gen_reg_rtx (SImode
);
38441 /* Make sure the constant is sign extended. */
38442 if ((hi_16bits
& sign_bit
) != 0)
38443 hi_16bits
|= upper_32bits
;
38445 rs6000_split_logical_inner (tmp
, op1_hi_lo
[i
], GEN_INT (hi_16bits
),
38446 code
, SImode
, false, false, false);
38448 rs6000_split_logical_inner (op0_hi_lo
[i
], tmp
, GEN_INT (lo_16bits
),
38449 code
, SImode
, false, false, false);
38452 rs6000_split_logical_inner (op0_hi_lo
[i
], op1_hi_lo
[i
], op2_hi_lo
[i
],
38453 code
, SImode
, complement_final_p
,
38454 complement_op1_p
, complement_op2_p
);
38460 /* Split the insns that make up boolean operations operating on multiple GPR
38461 registers. The boolean MD patterns ensure that the inputs either are
38462 exactly the same as the output registers, or there is no overlap.
38464 OPERANDS is an array containing the destination and two input operands.
38465 CODE is the base operation (AND, IOR, XOR, NOT).
38466 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38467 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38468 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38471 rs6000_split_logical (rtx operands
[3],
38472 enum rtx_code code
,
38473 bool complement_final_p
,
38474 bool complement_op1_p
,
38475 bool complement_op2_p
)
38477 machine_mode mode
= GET_MODE (operands
[0]);
38478 machine_mode sub_mode
;
38480 int sub_size
, regno0
, regno1
, nregs
, i
;
38482 /* If this is DImode, use the specialized version that can run before
38483 register allocation. */
38484 if (mode
== DImode
&& !TARGET_POWERPC64
)
38486 rs6000_split_logical_di (operands
, code
, complement_final_p
,
38487 complement_op1_p
, complement_op2_p
);
38493 op2
= (code
== NOT
) ? NULL_RTX
: operands
[2];
38494 sub_mode
= (TARGET_POWERPC64
) ? DImode
: SImode
;
38495 sub_size
= GET_MODE_SIZE (sub_mode
);
38496 regno0
= REGNO (op0
);
38497 regno1
= REGNO (op1
);
38499 gcc_assert (reload_completed
);
38500 gcc_assert (IN_RANGE (regno0
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
));
38501 gcc_assert (IN_RANGE (regno1
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
));
38503 nregs
= rs6000_hard_regno_nregs
[(int)mode
][regno0
];
38504 gcc_assert (nregs
> 1);
38506 if (op2
&& REG_P (op2
))
38507 gcc_assert (IN_RANGE (REGNO (op2
), FIRST_GPR_REGNO
, LAST_GPR_REGNO
));
38509 for (i
= 0; i
< nregs
; i
++)
38511 int offset
= i
* sub_size
;
38512 rtx sub_op0
= simplify_subreg (sub_mode
, op0
, mode
, offset
);
38513 rtx sub_op1
= simplify_subreg (sub_mode
, op1
, mode
, offset
);
38514 rtx sub_op2
= ((code
== NOT
)
38516 : simplify_subreg (sub_mode
, op2
, mode
, offset
));
38518 rs6000_split_logical_inner (sub_op0
, sub_op1
, sub_op2
, code
, sub_mode
,
38519 complement_final_p
, complement_op1_p
,
38527 /* Return true if the peephole2 can combine a load involving a combination of
38528 an addis instruction and a load with an offset that can be fused together on
38532 fusion_gpr_load_p (rtx addis_reg
, /* register set via addis. */
38533 rtx addis_value
, /* addis value. */
38534 rtx target
, /* target register that is loaded. */
38535 rtx mem
) /* bottom part of the memory addr. */
38540 /* Validate arguments. */
38541 if (!base_reg_operand (addis_reg
, GET_MODE (addis_reg
)))
38544 if (!base_reg_operand (target
, GET_MODE (target
)))
38547 if (!fusion_gpr_addis (addis_value
, GET_MODE (addis_value
)))
38550 /* Allow sign/zero extension. */
38551 if (GET_CODE (mem
) == ZERO_EXTEND
38552 || (GET_CODE (mem
) == SIGN_EXTEND
&& TARGET_P8_FUSION_SIGN
))
38553 mem
= XEXP (mem
, 0);
38558 if (!fusion_gpr_mem_load (mem
, GET_MODE (mem
)))
38561 addr
= XEXP (mem
, 0); /* either PLUS or LO_SUM. */
38562 if (GET_CODE (addr
) != PLUS
&& GET_CODE (addr
) != LO_SUM
)
38565 /* Validate that the register used to load the high value is either the
38566 register being loaded, or we can safely replace its use.
38568 This function is only called from the peephole2 pass and we assume that
38569 there are 2 instructions in the peephole (addis and load), so we want to
38570 check if the target register was not used in the memory address and the
38571 register to hold the addis result is dead after the peephole. */
38572 if (REGNO (addis_reg
) != REGNO (target
))
38574 if (reg_mentioned_p (target
, mem
))
38577 if (!peep2_reg_dead_p (2, addis_reg
))
38580 /* If the target register being loaded is the stack pointer, we must
38581 avoid loading any other value into it, even temporarily. */
38582 if (REG_P (target
) && REGNO (target
) == STACK_POINTER_REGNUM
)
38586 base_reg
= XEXP (addr
, 0);
38587 return REGNO (addis_reg
) == REGNO (base_reg
);
38590 /* During the peephole2 pass, adjust and expand the insns for a load fusion
38591 sequence. We adjust the addis register to use the target register. If the
38592 load sign extends, we adjust the code to do the zero extending load, and an
38593 explicit sign extension later since the fusion only covers zero extending
38597 operands[0] register set with addis (to be replaced with target)
38598 operands[1] value set via addis
38599 operands[2] target register being loaded
38600 operands[3] D-form memory reference using operands[0]. */
38603 expand_fusion_gpr_load (rtx
*operands
)
38605 rtx addis_value
= operands
[1];
38606 rtx target
= operands
[2];
38607 rtx orig_mem
= operands
[3];
38608 rtx new_addr
, new_mem
, orig_addr
, offset
;
38609 enum rtx_code plus_or_lo_sum
;
38610 machine_mode target_mode
= GET_MODE (target
);
38611 machine_mode extend_mode
= target_mode
;
38612 machine_mode ptr_mode
= Pmode
;
38613 enum rtx_code extend
= UNKNOWN
;
38615 if (GET_CODE (orig_mem
) == ZERO_EXTEND
38616 || (TARGET_P8_FUSION_SIGN
&& GET_CODE (orig_mem
) == SIGN_EXTEND
))
38618 extend
= GET_CODE (orig_mem
);
38619 orig_mem
= XEXP (orig_mem
, 0);
38620 target_mode
= GET_MODE (orig_mem
);
38623 gcc_assert (MEM_P (orig_mem
));
38625 orig_addr
= XEXP (orig_mem
, 0);
38626 plus_or_lo_sum
= GET_CODE (orig_addr
);
38627 gcc_assert (plus_or_lo_sum
== PLUS
|| plus_or_lo_sum
== LO_SUM
);
38629 offset
= XEXP (orig_addr
, 1);
38630 new_addr
= gen_rtx_fmt_ee (plus_or_lo_sum
, ptr_mode
, addis_value
, offset
);
38631 new_mem
= replace_equiv_address_nv (orig_mem
, new_addr
, false);
38633 if (extend
!= UNKNOWN
)
38634 new_mem
= gen_rtx_fmt_e (ZERO_EXTEND
, extend_mode
, new_mem
);
38636 new_mem
= gen_rtx_UNSPEC (extend_mode
, gen_rtvec (1, new_mem
),
38637 UNSPEC_FUSION_GPR
);
38638 emit_insn (gen_rtx_SET (target
, new_mem
));
38640 if (extend
== SIGN_EXTEND
)
38642 int sub_off
= ((BYTES_BIG_ENDIAN
)
38643 ? GET_MODE_SIZE (extend_mode
) - GET_MODE_SIZE (target_mode
)
38646 = simplify_subreg (target_mode
, target
, extend_mode
, sub_off
);
38648 emit_insn (gen_rtx_SET (target
,
38649 gen_rtx_SIGN_EXTEND (extend_mode
, sign_reg
)));
38655 /* Emit the addis instruction that will be part of a fused instruction
38659 emit_fusion_addis (rtx target
, rtx addis_value
)
38662 const char *addis_str
= NULL
;
38664 /* Emit the addis instruction. */
38665 fuse_ops
[0] = target
;
38666 if (satisfies_constraint_L (addis_value
))
38668 fuse_ops
[1] = addis_value
;
38669 addis_str
= "lis %0,%v1";
38672 else if (GET_CODE (addis_value
) == PLUS
)
38674 rtx op0
= XEXP (addis_value
, 0);
38675 rtx op1
= XEXP (addis_value
, 1);
38677 if (REG_P (op0
) && CONST_INT_P (op1
)
38678 && satisfies_constraint_L (op1
))
38682 addis_str
= "addis %0,%1,%v2";
38686 else if (GET_CODE (addis_value
) == HIGH
)
38688 rtx value
= XEXP (addis_value
, 0);
38689 if (GET_CODE (value
) == UNSPEC
&& XINT (value
, 1) == UNSPEC_TOCREL
)
38691 fuse_ops
[1] = XVECEXP (value
, 0, 0); /* symbol ref. */
38692 fuse_ops
[2] = XVECEXP (value
, 0, 1); /* TOC register. */
38694 addis_str
= "addis %0,%2,%1@toc@ha";
38696 else if (TARGET_XCOFF
)
38697 addis_str
= "addis %0,%1@u(%2)";
38700 gcc_unreachable ();
38703 else if (GET_CODE (value
) == PLUS
)
38705 rtx op0
= XEXP (value
, 0);
38706 rtx op1
= XEXP (value
, 1);
38708 if (GET_CODE (op0
) == UNSPEC
38709 && XINT (op0
, 1) == UNSPEC_TOCREL
38710 && CONST_INT_P (op1
))
38712 fuse_ops
[1] = XVECEXP (op0
, 0, 0); /* symbol ref. */
38713 fuse_ops
[2] = XVECEXP (op0
, 0, 1); /* TOC register. */
38716 addis_str
= "addis %0,%2,%1+%3@toc@ha";
38718 else if (TARGET_XCOFF
)
38719 addis_str
= "addis %0,%1+%3@u(%2)";
38722 gcc_unreachable ();
38726 else if (satisfies_constraint_L (value
))
38728 fuse_ops
[1] = value
;
38729 addis_str
= "lis %0,%v1";
38732 else if (TARGET_ELF
&& !TARGET_POWERPC64
&& CONSTANT_P (value
))
38734 fuse_ops
[1] = value
;
38735 addis_str
= "lis %0,%1@ha";
38740 fatal_insn ("Could not generate addis value for fusion", addis_value
);
38742 output_asm_insn (addis_str
, fuse_ops
);
38745 /* Emit a D-form load or store instruction that is the second instruction
38746 of a fusion sequence. */
38749 emit_fusion_load_store (rtx load_store_reg
, rtx addis_reg
, rtx offset
,
38750 const char *insn_str
)
38753 char insn_template
[80];
38755 fuse_ops
[0] = load_store_reg
;
38756 fuse_ops
[1] = addis_reg
;
38758 if (CONST_INT_P (offset
) && satisfies_constraint_I (offset
))
38760 sprintf (insn_template
, "%s %%0,%%2(%%1)", insn_str
);
38761 fuse_ops
[2] = offset
;
38762 output_asm_insn (insn_template
, fuse_ops
);
38765 else if (GET_CODE (offset
) == UNSPEC
38766 && XINT (offset
, 1) == UNSPEC_TOCREL
)
38769 sprintf (insn_template
, "%s %%0,%%2@toc@l(%%1)", insn_str
);
38771 else if (TARGET_XCOFF
)
38772 sprintf (insn_template
, "%s %%0,%%2@l(%%1)", insn_str
);
38775 gcc_unreachable ();
38777 fuse_ops
[2] = XVECEXP (offset
, 0, 0);
38778 output_asm_insn (insn_template
, fuse_ops
);
38781 else if (GET_CODE (offset
) == PLUS
38782 && GET_CODE (XEXP (offset
, 0)) == UNSPEC
38783 && XINT (XEXP (offset
, 0), 1) == UNSPEC_TOCREL
38784 && CONST_INT_P (XEXP (offset
, 1)))
38786 rtx tocrel_unspec
= XEXP (offset
, 0);
38788 sprintf (insn_template
, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str
);
38790 else if (TARGET_XCOFF
)
38791 sprintf (insn_template
, "%s %%0,%%2+%%3@l(%%1)", insn_str
);
38794 gcc_unreachable ();
38796 fuse_ops
[2] = XVECEXP (tocrel_unspec
, 0, 0);
38797 fuse_ops
[3] = XEXP (offset
, 1);
38798 output_asm_insn (insn_template
, fuse_ops
);
38801 else if (TARGET_ELF
&& !TARGET_POWERPC64
&& CONSTANT_P (offset
))
38803 sprintf (insn_template
, "%s %%0,%%2@l(%%1)", insn_str
);
38805 fuse_ops
[2] = offset
;
38806 output_asm_insn (insn_template
, fuse_ops
);
38810 fatal_insn ("Unable to generate load/store offset for fusion", offset
);
38815 /* Wrap a TOC address that can be fused to indicate that special fusion
38816 processing is needed. */
38819 fusion_wrap_memory_address (rtx old_mem
)
38821 rtx old_addr
= XEXP (old_mem
, 0);
38822 rtvec v
= gen_rtvec (1, old_addr
);
38823 rtx new_addr
= gen_rtx_UNSPEC (Pmode
, v
, UNSPEC_FUSION_ADDIS
);
38824 return replace_equiv_address_nv (old_mem
, new_addr
, false);
38827 /* Given an address, convert it into the addis and load offset parts. Addresses
38828 created during the peephole2 process look like:
38829 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38830 (unspec [(...)] UNSPEC_TOCREL))
38832 Addresses created via toc fusion look like:
38833 (unspec [(unspec [(...)] UNSPEC_TOCREL)] UNSPEC_FUSION_ADDIS)) */
38836 fusion_split_address (rtx addr
, rtx
*p_hi
, rtx
*p_lo
)
38840 if (GET_CODE (addr
) == UNSPEC
&& XINT (addr
, 1) == UNSPEC_FUSION_ADDIS
)
38842 lo
= XVECEXP (addr
, 0, 0);
38843 hi
= gen_rtx_HIGH (Pmode
, lo
);
38845 else if (GET_CODE (addr
) == PLUS
|| GET_CODE (addr
) == LO_SUM
)
38847 hi
= XEXP (addr
, 0);
38848 lo
= XEXP (addr
, 1);
38851 gcc_unreachable ();
38857 /* Return a string to fuse an addis instruction with a gpr load to the same
38858 register that we loaded up the addis instruction. The address that is used
38859 is the logical address that was formed during peephole2:
38860 (lo_sum (high) (low-part))
38862 Or the address is the TOC address that is wrapped before register allocation:
38863 (unspec [(addr) (toc-reg)] UNSPEC_FUSION_ADDIS)
38865 The code is complicated, so we call output_asm_insn directly, and just
38869 emit_fusion_gpr_load (rtx target
, rtx mem
)
38874 const char *load_str
= NULL
;
38877 if (GET_CODE (mem
) == ZERO_EXTEND
)
38878 mem
= XEXP (mem
, 0);
38880 gcc_assert (REG_P (target
) && MEM_P (mem
));
38882 addr
= XEXP (mem
, 0);
38883 fusion_split_address (addr
, &addis_value
, &load_offset
);
38885 /* Now emit the load instruction to the same register. */
38886 mode
= GET_MODE (mem
);
38904 gcc_assert (TARGET_POWERPC64
);
38909 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target
, mem
));
38912 /* Emit the addis instruction. */
38913 emit_fusion_addis (target
, addis_value
);
38915 /* Emit the D-form load instruction. */
38916 emit_fusion_load_store (target
, target
, load_offset
, load_str
);
38922 /* Return true if the peephole2 can combine a load/store involving a
38923 combination of an addis instruction and the memory operation. This was
38924 added to the ISA 3.0 (power9) hardware. */
38927 fusion_p9_p (rtx addis_reg
, /* register set via addis. */
38928 rtx addis_value
, /* addis value. */
38929 rtx dest
, /* destination (memory or register). */
38930 rtx src
) /* source (register or memory). */
38932 rtx addr
, mem
, offset
;
38933 machine_mode mode
= GET_MODE (src
);
38935 /* Validate arguments. */
38936 if (!base_reg_operand (addis_reg
, GET_MODE (addis_reg
)))
38939 if (!fusion_gpr_addis (addis_value
, GET_MODE (addis_value
)))
38942 /* Ignore extend operations that are part of the load. */
38943 if (GET_CODE (src
) == FLOAT_EXTEND
|| GET_CODE (src
) == ZERO_EXTEND
)
38944 src
= XEXP (src
, 0);
38946 /* Test for memory<-register or register<-memory. */
38947 if (fpr_reg_operand (src
, mode
) || int_reg_operand (src
, mode
))
38955 else if (MEM_P (src
))
38957 if (!fpr_reg_operand (dest
, mode
) && !int_reg_operand (dest
, mode
))
38966 addr
= XEXP (mem
, 0); /* either PLUS or LO_SUM. */
38967 if (GET_CODE (addr
) == PLUS
)
38969 if (!rtx_equal_p (addis_reg
, XEXP (addr
, 0)))
38972 return satisfies_constraint_I (XEXP (addr
, 1));
38975 else if (GET_CODE (addr
) == LO_SUM
)
38977 if (!rtx_equal_p (addis_reg
, XEXP (addr
, 0)))
38980 offset
= XEXP (addr
, 1);
38981 if (TARGET_XCOFF
|| (TARGET_ELF
&& TARGET_POWERPC64
))
38982 return small_toc_ref (offset
, GET_MODE (offset
));
38984 else if (TARGET_ELF
&& !TARGET_POWERPC64
)
38985 return CONSTANT_P (offset
);
38991 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38995 operands[0] register set with addis
38996 operands[1] value set via addis
38997 operands[2] target register being loaded
38998 operands[3] D-form memory reference using operands[0].
39000 This is similar to the fusion introduced with power8, except it scales to
39001 both loads/stores and does not require the result register to be the same as
39002 the base register. At the moment, we only do this if register set with addis
39006 expand_fusion_p9_load (rtx
*operands
)
39008 rtx tmp_reg
= operands
[0];
39009 rtx addis_value
= operands
[1];
39010 rtx target
= operands
[2];
39011 rtx orig_mem
= operands
[3];
39012 rtx new_addr
, new_mem
, orig_addr
, offset
, set
, clobber
, insn
;
39013 enum rtx_code plus_or_lo_sum
;
39014 machine_mode target_mode
= GET_MODE (target
);
39015 machine_mode extend_mode
= target_mode
;
39016 machine_mode ptr_mode
= Pmode
;
39017 enum rtx_code extend
= UNKNOWN
;
39019 if (GET_CODE (orig_mem
) == FLOAT_EXTEND
|| GET_CODE (orig_mem
) == ZERO_EXTEND
)
39021 extend
= GET_CODE (orig_mem
);
39022 orig_mem
= XEXP (orig_mem
, 0);
39023 target_mode
= GET_MODE (orig_mem
);
39026 gcc_assert (MEM_P (orig_mem
));
39028 orig_addr
= XEXP (orig_mem
, 0);
39029 plus_or_lo_sum
= GET_CODE (orig_addr
);
39030 gcc_assert (plus_or_lo_sum
== PLUS
|| plus_or_lo_sum
== LO_SUM
);
39032 offset
= XEXP (orig_addr
, 1);
39033 new_addr
= gen_rtx_fmt_ee (plus_or_lo_sum
, ptr_mode
, addis_value
, offset
);
39034 new_mem
= replace_equiv_address_nv (orig_mem
, new_addr
, false);
39036 if (extend
!= UNKNOWN
)
39037 new_mem
= gen_rtx_fmt_e (extend
, extend_mode
, new_mem
);
39039 new_mem
= gen_rtx_UNSPEC (extend_mode
, gen_rtvec (1, new_mem
),
39042 set
= gen_rtx_SET (target
, new_mem
);
39043 clobber
= gen_rtx_CLOBBER (VOIDmode
, tmp_reg
);
39044 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, set
, clobber
));
39050 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
39054 operands[0] register set with addis
39055 operands[1] value set via addis
39056 operands[2] target D-form memory being stored to
39057 operands[3] register being stored
39059 This is similar to the fusion introduced with power8, except it scales to
39060 both loads/stores and does not require the result register to be the same as
39061 the base register. At the moment, we only do this if register set with addis
39065 expand_fusion_p9_store (rtx
*operands
)
39067 rtx tmp_reg
= operands
[0];
39068 rtx addis_value
= operands
[1];
39069 rtx orig_mem
= operands
[2];
39070 rtx src
= operands
[3];
39071 rtx new_addr
, new_mem
, orig_addr
, offset
, set
, clobber
, insn
, new_src
;
39072 enum rtx_code plus_or_lo_sum
;
39073 machine_mode target_mode
= GET_MODE (orig_mem
);
39074 machine_mode ptr_mode
= Pmode
;
39076 gcc_assert (MEM_P (orig_mem
));
39078 orig_addr
= XEXP (orig_mem
, 0);
39079 plus_or_lo_sum
= GET_CODE (orig_addr
);
39080 gcc_assert (plus_or_lo_sum
== PLUS
|| plus_or_lo_sum
== LO_SUM
);
39082 offset
= XEXP (orig_addr
, 1);
39083 new_addr
= gen_rtx_fmt_ee (plus_or_lo_sum
, ptr_mode
, addis_value
, offset
);
39084 new_mem
= replace_equiv_address_nv (orig_mem
, new_addr
, false);
39086 new_src
= gen_rtx_UNSPEC (target_mode
, gen_rtvec (1, src
),
39089 set
= gen_rtx_SET (new_mem
, new_src
);
39090 clobber
= gen_rtx_CLOBBER (VOIDmode
, tmp_reg
);
39091 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, set
, clobber
));
39097 /* Return a string to fuse an addis instruction with a load using extended
39098 fusion. The address that is used is the logical address that was formed
39099 during peephole2: (lo_sum (high) (low-part))
39101 The code is complicated, so we call output_asm_insn directly, and just
39105 emit_fusion_p9_load (rtx reg
, rtx mem
, rtx tmp_reg
)
39107 machine_mode mode
= GET_MODE (reg
);
39111 const char *load_string
;
39114 if (GET_CODE (mem
) == FLOAT_EXTEND
|| GET_CODE (mem
) == ZERO_EXTEND
)
39116 mem
= XEXP (mem
, 0);
39117 mode
= GET_MODE (mem
);
39120 if (GET_CODE (reg
) == SUBREG
)
39122 gcc_assert (SUBREG_BYTE (reg
) == 0);
39123 reg
= SUBREG_REG (reg
);
39127 fatal_insn ("emit_fusion_p9_load, bad reg #1", reg
);
39130 if (FP_REGNO_P (r
))
39132 if (mode
== SFmode
)
39133 load_string
= "lfs";
39134 else if (mode
== DFmode
|| mode
== DImode
)
39135 load_string
= "lfd";
39137 gcc_unreachable ();
39139 else if (ALTIVEC_REGNO_P (r
) && TARGET_P9_VECTOR
)
39141 if (mode
== SFmode
)
39142 load_string
= "lxssp";
39143 else if (mode
== DFmode
|| mode
== DImode
)
39144 load_string
= "lxsd";
39146 gcc_unreachable ();
39148 else if (INT_REGNO_P (r
))
39153 load_string
= "lbz";
39156 load_string
= "lhz";
39160 load_string
= "lwz";
39164 if (!TARGET_POWERPC64
)
39165 gcc_unreachable ();
39166 load_string
= "ld";
39169 gcc_unreachable ();
39173 fatal_insn ("emit_fusion_p9_load, bad reg #2", reg
);
39176 fatal_insn ("emit_fusion_p9_load not MEM", mem
);
39178 addr
= XEXP (mem
, 0);
39179 fusion_split_address (addr
, &hi
, &lo
);
39181 /* Emit the addis instruction. */
39182 emit_fusion_addis (tmp_reg
, hi
);
39184 /* Emit the D-form load instruction. */
39185 emit_fusion_load_store (reg
, tmp_reg
, lo
, load_string
);
39190 /* Return a string to fuse an addis instruction with a store using extended
39191 fusion. The address that is used is the logical address that was formed
39192 during peephole2: (lo_sum (high) (low-part))
39194 The code is complicated, so we call output_asm_insn directly, and just
39198 emit_fusion_p9_store (rtx mem
, rtx reg
, rtx tmp_reg
)
39200 machine_mode mode
= GET_MODE (reg
);
39204 const char *store_string
;
39207 if (GET_CODE (reg
) == SUBREG
)
39209 gcc_assert (SUBREG_BYTE (reg
) == 0);
39210 reg
= SUBREG_REG (reg
);
39214 fatal_insn ("emit_fusion_p9_store, bad reg #1", reg
);
39217 if (FP_REGNO_P (r
))
39219 if (mode
== SFmode
)
39220 store_string
= "stfs";
39221 else if (mode
== DFmode
)
39222 store_string
= "stfd";
39224 gcc_unreachable ();
39226 else if (ALTIVEC_REGNO_P (r
) && TARGET_P9_VECTOR
)
39228 if (mode
== SFmode
)
39229 store_string
= "stxssp";
39230 else if (mode
== DFmode
|| mode
== DImode
)
39231 store_string
= "stxsd";
39233 gcc_unreachable ();
39235 else if (INT_REGNO_P (r
))
39240 store_string
= "stb";
39243 store_string
= "sth";
39247 store_string
= "stw";
39251 if (!TARGET_POWERPC64
)
39252 gcc_unreachable ();
39253 store_string
= "std";
39256 gcc_unreachable ();
39260 fatal_insn ("emit_fusion_p9_store, bad reg #2", reg
);
39263 fatal_insn ("emit_fusion_p9_store not MEM", mem
);
39265 addr
= XEXP (mem
, 0);
39266 fusion_split_address (addr
, &hi
, &lo
);
39268 /* Emit the addis instruction. */
39269 emit_fusion_addis (tmp_reg
, hi
);
39271 /* Emit the D-form load instruction. */
39272 emit_fusion_load_store (reg
, tmp_reg
, lo
, store_string
);
39277 #ifdef RS6000_GLIBC_ATOMIC_FENV
39278 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
39279 static tree atomic_hold_decl
, atomic_clear_decl
, atomic_update_decl
;
39282 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
39285 rs6000_atomic_assign_expand_fenv (tree
*hold
, tree
*clear
, tree
*update
)
39287 if (!TARGET_HARD_FLOAT
)
39289 #ifdef RS6000_GLIBC_ATOMIC_FENV
39290 if (atomic_hold_decl
== NULL_TREE
)
39293 = build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
,
39294 get_identifier ("__atomic_feholdexcept"),
39295 build_function_type_list (void_type_node
,
39296 double_ptr_type_node
,
39298 TREE_PUBLIC (atomic_hold_decl
) = 1;
39299 DECL_EXTERNAL (atomic_hold_decl
) = 1;
39302 if (atomic_clear_decl
== NULL_TREE
)
39305 = build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
,
39306 get_identifier ("__atomic_feclearexcept"),
39307 build_function_type_list (void_type_node
,
39309 TREE_PUBLIC (atomic_clear_decl
) = 1;
39310 DECL_EXTERNAL (atomic_clear_decl
) = 1;
39313 tree const_double
= build_qualified_type (double_type_node
,
39315 tree const_double_ptr
= build_pointer_type (const_double
);
39316 if (atomic_update_decl
== NULL_TREE
)
39319 = build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
,
39320 get_identifier ("__atomic_feupdateenv"),
39321 build_function_type_list (void_type_node
,
39324 TREE_PUBLIC (atomic_update_decl
) = 1;
39325 DECL_EXTERNAL (atomic_update_decl
) = 1;
39328 tree fenv_var
= create_tmp_var_raw (double_type_node
);
39329 TREE_ADDRESSABLE (fenv_var
) = 1;
39330 tree fenv_addr
= build1 (ADDR_EXPR
, double_ptr_type_node
, fenv_var
);
39332 *hold
= build_call_expr (atomic_hold_decl
, 1, fenv_addr
);
39333 *clear
= build_call_expr (atomic_clear_decl
, 0);
39334 *update
= build_call_expr (atomic_update_decl
, 1,
39335 fold_convert (const_double_ptr
, fenv_addr
));
39340 tree mffs
= rs6000_builtin_decls
[RS6000_BUILTIN_MFFS
];
39341 tree mtfsf
= rs6000_builtin_decls
[RS6000_BUILTIN_MTFSF
];
39342 tree call_mffs
= build_call_expr (mffs
, 0);
39344 /* Generates the equivalent of feholdexcept (&fenv_var)
39346 *fenv_var = __builtin_mffs ();
39348 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
39349 __builtin_mtfsf (0xff, fenv_hold); */
39351 /* Mask to clear everything except for the rounding modes and non-IEEE
39352 arithmetic flag. */
39353 const unsigned HOST_WIDE_INT hold_exception_mask
=
39354 HOST_WIDE_INT_C (0xffffffff00000007);
39356 tree fenv_var
= create_tmp_var_raw (double_type_node
);
39358 tree hold_mffs
= build2 (MODIFY_EXPR
, void_type_node
, fenv_var
, call_mffs
);
39360 tree fenv_llu
= build1 (VIEW_CONVERT_EXPR
, uint64_type_node
, fenv_var
);
39361 tree fenv_llu_and
= build2 (BIT_AND_EXPR
, uint64_type_node
, fenv_llu
,
39362 build_int_cst (uint64_type_node
,
39363 hold_exception_mask
));
39365 tree fenv_hold_mtfsf
= build1 (VIEW_CONVERT_EXPR
, double_type_node
,
39368 tree hold_mtfsf
= build_call_expr (mtfsf
, 2,
39369 build_int_cst (unsigned_type_node
, 0xff),
39372 *hold
= build2 (COMPOUND_EXPR
, void_type_node
, hold_mffs
, hold_mtfsf
);
39374 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
39376 double fenv_clear = __builtin_mffs ();
39377 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
39378 __builtin_mtfsf (0xff, fenv_clear); */
39380 /* Mask to clear everything except for the rounding modes and non-IEEE
39381 arithmetic flag. */
39382 const unsigned HOST_WIDE_INT clear_exception_mask
=
39383 HOST_WIDE_INT_C (0xffffffff00000000);
39385 tree fenv_clear
= create_tmp_var_raw (double_type_node
);
39387 tree clear_mffs
= build2 (MODIFY_EXPR
, void_type_node
, fenv_clear
, call_mffs
);
39389 tree fenv_clean_llu
= build1 (VIEW_CONVERT_EXPR
, uint64_type_node
, fenv_clear
);
39390 tree fenv_clear_llu_and
= build2 (BIT_AND_EXPR
, uint64_type_node
,
39392 build_int_cst (uint64_type_node
,
39393 clear_exception_mask
));
39395 tree fenv_clear_mtfsf
= build1 (VIEW_CONVERT_EXPR
, double_type_node
,
39396 fenv_clear_llu_and
);
39398 tree clear_mtfsf
= build_call_expr (mtfsf
, 2,
39399 build_int_cst (unsigned_type_node
, 0xff),
39402 *clear
= build2 (COMPOUND_EXPR
, void_type_node
, clear_mffs
, clear_mtfsf
);
39404 /* Generates the equivalent of feupdateenv (&fenv_var)
39406 double old_fenv = __builtin_mffs ();
39407 double fenv_update;
39408 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
39409 (*(uint64_t*)fenv_var 0x1ff80fff);
39410 __builtin_mtfsf (0xff, fenv_update); */
39412 const unsigned HOST_WIDE_INT update_exception_mask
=
39413 HOST_WIDE_INT_C (0xffffffff1fffff00);
39414 const unsigned HOST_WIDE_INT new_exception_mask
=
39415 HOST_WIDE_INT_C (0x1ff80fff);
39417 tree old_fenv
= create_tmp_var_raw (double_type_node
);
39418 tree update_mffs
= build2 (MODIFY_EXPR
, void_type_node
, old_fenv
, call_mffs
);
39420 tree old_llu
= build1 (VIEW_CONVERT_EXPR
, uint64_type_node
, old_fenv
);
39421 tree old_llu_and
= build2 (BIT_AND_EXPR
, uint64_type_node
, old_llu
,
39422 build_int_cst (uint64_type_node
,
39423 update_exception_mask
));
39425 tree new_llu_and
= build2 (BIT_AND_EXPR
, uint64_type_node
, fenv_llu
,
39426 build_int_cst (uint64_type_node
,
39427 new_exception_mask
));
39429 tree new_llu_mask
= build2 (BIT_IOR_EXPR
, uint64_type_node
,
39430 old_llu_and
, new_llu_and
);
39432 tree fenv_update_mtfsf
= build1 (VIEW_CONVERT_EXPR
, double_type_node
,
39435 tree update_mtfsf
= build_call_expr (mtfsf
, 2,
39436 build_int_cst (unsigned_type_node
, 0xff),
39437 fenv_update_mtfsf
);
39439 *update
= build2 (COMPOUND_EXPR
, void_type_node
, update_mffs
, update_mtfsf
);
39443 rs6000_generate_float2_double_code (rtx dst
, rtx src1
, rtx src2
)
39445 rtx rtx_tmp0
, rtx_tmp1
, rtx_tmp2
, rtx_tmp3
;
39447 rtx_tmp0
= gen_reg_rtx (V2DFmode
);
39448 rtx_tmp1
= gen_reg_rtx (V2DFmode
);
39450 /* The destination of the vmrgew instruction layout is:
39451 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39452 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39453 vmrgew instruction will be correct. */
39454 if (VECTOR_ELT_ORDER_BIG
)
39456 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp0
, src1
, src2
,
39458 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp1
, src1
, src2
,
39463 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0
, src1
, src2
, GEN_INT (3)));
39464 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1
, src1
, src2
, GEN_INT (0)));
39467 rtx_tmp2
= gen_reg_rtx (V4SFmode
);
39468 rtx_tmp3
= gen_reg_rtx (V4SFmode
);
39470 emit_insn (gen_vsx_xvcdpsp (rtx_tmp2
, rtx_tmp0
));
39471 emit_insn (gen_vsx_xvcdpsp (rtx_tmp3
, rtx_tmp1
));
39473 if (VECTOR_ELT_ORDER_BIG
)
39474 emit_insn (gen_p8_vmrgew_v4sf (dst
, rtx_tmp2
, rtx_tmp3
));
39476 emit_insn (gen_p8_vmrgew_v4sf (dst
, rtx_tmp3
, rtx_tmp2
));
39480 rs6000_generate_float2_code (bool signed_convert
, rtx dst
, rtx src1
, rtx src2
)
39482 rtx rtx_tmp0
, rtx_tmp1
, rtx_tmp2
, rtx_tmp3
;
39484 rtx_tmp0
= gen_reg_rtx (V2DImode
);
39485 rtx_tmp1
= gen_reg_rtx (V2DImode
);
39487 /* The destination of the vmrgew instruction layout is:
39488 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39489 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39490 vmrgew instruction will be correct. */
39491 if (VECTOR_ELT_ORDER_BIG
)
39493 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0
, src1
, src2
, GEN_INT (0)));
39494 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1
, src1
, src2
, GEN_INT (3)));
39498 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0
, src1
, src2
, GEN_INT (3)));
39499 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1
, src1
, src2
, GEN_INT (0)));
39502 rtx_tmp2
= gen_reg_rtx (V4SFmode
);
39503 rtx_tmp3
= gen_reg_rtx (V4SFmode
);
39505 if (signed_convert
)
39507 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2
, rtx_tmp0
));
39508 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3
, rtx_tmp1
));
39512 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2
, rtx_tmp0
));
39513 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3
, rtx_tmp1
));
39516 if (VECTOR_ELT_ORDER_BIG
)
39517 emit_insn (gen_p8_vmrgew_v4sf (dst
, rtx_tmp2
, rtx_tmp3
));
39519 emit_insn (gen_p8_vmrgew_v4sf (dst
, rtx_tmp3
, rtx_tmp2
));
39523 rs6000_generate_vsigned2_code (bool signed_convert
, rtx dst
, rtx src1
,
39526 rtx rtx_tmp0
, rtx_tmp1
, rtx_tmp2
, rtx_tmp3
;
39528 rtx_tmp0
= gen_reg_rtx (V2DFmode
);
39529 rtx_tmp1
= gen_reg_rtx (V2DFmode
);
39531 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0
, src1
, src2
, GEN_INT (0)));
39532 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1
, src1
, src2
, GEN_INT (3)));
39534 rtx_tmp2
= gen_reg_rtx (V4SImode
);
39535 rtx_tmp3
= gen_reg_rtx (V4SImode
);
39537 if (signed_convert
)
39539 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2
, rtx_tmp0
));
39540 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3
, rtx_tmp1
));
39544 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2
, rtx_tmp0
));
39545 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3
, rtx_tmp1
));
39548 emit_insn (gen_p8_vmrgew_v4si (dst
, rtx_tmp2
, rtx_tmp3
));
39551 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
39554 rs6000_optab_supported_p (int op
, machine_mode mode1
, machine_mode
,
39555 optimization_type opt_type
)
39560 return (opt_type
== OPTIMIZE_FOR_SPEED
39561 && RS6000_RECIP_AUTO_RSQRTE_P (mode1
));
39568 /* Implement TARGET_CONSTANT_ALIGNMENT. */
39570 static HOST_WIDE_INT
39571 rs6000_constant_alignment (const_tree exp
, HOST_WIDE_INT align
)
39573 if (TREE_CODE (exp
) == STRING_CST
39574 && (STRICT_ALIGNMENT
|| !optimize_size
))
39575 return MAX (align
, BITS_PER_WORD
);
39579 /* Implement TARGET_STARTING_FRAME_OFFSET. */
39581 static HOST_WIDE_INT
39582 rs6000_starting_frame_offset (void)
39584 if (FRAME_GROWS_DOWNWARD
)
39586 return RS6000_STARTING_FRAME_OFFSET
;
39589 struct gcc_target targetm
= TARGET_INITIALIZER
;
39591 #include "gt-rs6000.h"