PR target/81979
[official-gcc.git] / gcc / config / rs6000 / rs6000.c
blobecdf776b986f890a02f5a18e9beb5fc493eeb221
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2017 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "memmodel.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "cfgloop.h"
31 #include "df.h"
32 #include "tm_p.h"
33 #include "stringpool.h"
34 #include "expmed.h"
35 #include "optabs.h"
36 #include "regs.h"
37 #include "ira.h"
38 #include "recog.h"
39 #include "cgraph.h"
40 #include "diagnostic-core.h"
41 #include "insn-attr.h"
42 #include "flags.h"
43 #include "alias.h"
44 #include "fold-const.h"
45 #include "attribs.h"
46 #include "stor-layout.h"
47 #include "calls.h"
48 #include "print-tree.h"
49 #include "varasm.h"
50 #include "explow.h"
51 #include "expr.h"
52 #include "output.h"
53 #include "dbxout.h"
54 #include "common/common-target.h"
55 #include "langhooks.h"
56 #include "reload.h"
57 #include "sched-int.h"
58 #include "gimplify.h"
59 #include "gimple-fold.h"
60 #include "gimple-iterator.h"
61 #include "gimple-ssa.h"
62 #include "gimple-walk.h"
63 #include "intl.h"
64 #include "params.h"
65 #include "tm-constrs.h"
66 #include "tree-vectorizer.h"
67 #include "target-globals.h"
68 #include "builtins.h"
69 #include "context.h"
70 #include "tree-pass.h"
71 #include "except.h"
72 #if TARGET_XCOFF
73 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
74 #endif
75 #if TARGET_MACHO
76 #include "gstab.h" /* for N_SLINE */
77 #endif
78 #include "case-cfn-macros.h"
79 #include "ppc-auxv.h"
80 #include "tree-ssa-propagate.h"
82 /* This file should be included last. */
83 #include "target-def.h"
85 #ifndef TARGET_NO_PROTOTYPE
86 #define TARGET_NO_PROTOTYPE 0
87 #endif
89 #define min(A,B) ((A) < (B) ? (A) : (B))
90 #define max(A,B) ((A) > (B) ? (A) : (B))
92 static pad_direction rs6000_function_arg_padding (machine_mode, const_tree);
94 /* Structure used to define the rs6000 stack */
95 typedef struct rs6000_stack {
96 int reload_completed; /* stack info won't change from here on */
97 int first_gp_reg_save; /* first callee saved GP register used */
98 int first_fp_reg_save; /* first callee saved FP register used */
99 int first_altivec_reg_save; /* first callee saved AltiVec register used */
100 int lr_save_p; /* true if the link reg needs to be saved */
101 int cr_save_p; /* true if the CR reg needs to be saved */
102 unsigned int vrsave_mask; /* mask of vec registers to save */
103 int push_p; /* true if we need to allocate stack space */
104 int calls_p; /* true if the function makes any calls */
105 int world_save_p; /* true if we're saving *everything*:
106 r13-r31, cr, f14-f31, vrsave, v20-v31 */
107 enum rs6000_abi abi; /* which ABI to use */
108 int gp_save_offset; /* offset to save GP regs from initial SP */
109 int fp_save_offset; /* offset to save FP regs from initial SP */
110 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
111 int lr_save_offset; /* offset to save LR from initial SP */
112 int cr_save_offset; /* offset to save CR from initial SP */
113 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
114 int varargs_save_offset; /* offset to save the varargs registers */
115 int ehrd_offset; /* offset to EH return data */
116 int ehcr_offset; /* offset to EH CR field data */
117 int reg_size; /* register size (4 or 8) */
118 HOST_WIDE_INT vars_size; /* variable save area size */
119 int parm_size; /* outgoing parameter size */
120 int save_size; /* save area size */
121 int fixed_size; /* fixed size of stack frame */
122 int gp_size; /* size of saved GP registers */
123 int fp_size; /* size of saved FP registers */
124 int altivec_size; /* size of saved AltiVec registers */
125 int cr_size; /* size to hold CR if not in fixed area */
126 int vrsave_size; /* size to hold VRSAVE */
127 int altivec_padding_size; /* size of altivec alignment padding */
128 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
129 int savres_strategy;
130 } rs6000_stack_t;
132 /* A C structure for machine-specific, per-function data.
133 This is added to the cfun structure. */
134 typedef struct GTY(()) machine_function
136 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
137 int ra_needs_full_frame;
138 /* Flags if __builtin_return_address (0) was used. */
139 int ra_need_lr;
140 /* Cache lr_save_p after expansion of builtin_eh_return. */
141 int lr_save_state;
142 /* Whether we need to save the TOC to the reserved stack location in the
143 function prologue. */
144 bool save_toc_in_prologue;
145 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
146 varargs save area. */
147 HOST_WIDE_INT varargs_save_offset;
148 /* Alternative internal arg pointer for -fsplit-stack. */
149 rtx split_stack_arg_pointer;
150 bool split_stack_argp_used;
151 /* Flag if r2 setup is needed with ELFv2 ABI. */
152 bool r2_setup_needed;
153 /* The number of components we use for separate shrink-wrapping. */
154 int n_components;
155 /* The components already handled by separate shrink-wrapping, which should
156 not be considered by the prologue and epilogue. */
157 bool gpr_is_wrapped_separately[32];
158 bool fpr_is_wrapped_separately[32];
159 bool lr_is_wrapped_separately;
160 } machine_function;
162 /* Support targetm.vectorize.builtin_mask_for_load. */
163 static GTY(()) tree altivec_builtin_mask_for_load;
165 /* Set to nonzero once AIX common-mode calls have been defined. */
166 static GTY(()) int common_mode_defined;
168 /* Label number of label created for -mrelocatable, to call to so we can
169 get the address of the GOT section */
170 static int rs6000_pic_labelno;
172 #ifdef USING_ELFOS_H
173 /* Counter for labels which are to be placed in .fixup. */
174 int fixuplabelno = 0;
175 #endif
177 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
178 int dot_symbols;
180 /* Specify the machine mode that pointers have. After generation of rtl, the
181 compiler makes no further distinction between pointers and any other objects
182 of this machine mode. */
183 scalar_int_mode rs6000_pmode;
185 /* Width in bits of a pointer. */
186 unsigned rs6000_pointer_size;
188 #ifdef HAVE_AS_GNU_ATTRIBUTE
189 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
190 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
191 # endif
192 /* Flag whether floating point values have been passed/returned.
193 Note that this doesn't say whether fprs are used, since the
194 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
195 should be set for soft-float values passed in gprs and ieee128
196 values passed in vsx registers. */
197 static bool rs6000_passes_float;
198 static bool rs6000_passes_long_double;
199 /* Flag whether vector values have been passed/returned. */
200 static bool rs6000_passes_vector;
201 /* Flag whether small (<= 8 byte) structures have been returned. */
202 static bool rs6000_returns_struct;
203 #endif
205 /* Value is TRUE if register/mode pair is acceptable. */
206 static bool rs6000_hard_regno_mode_ok_p
207 [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
209 /* Maximum number of registers needed for a given register class and mode. */
210 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
212 /* How many registers are needed for a given register and mode. */
213 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
215 /* Map register number to register class. */
216 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
218 static int dbg_cost_ctrl;
220 /* Built in types. */
221 tree rs6000_builtin_types[RS6000_BTI_MAX];
222 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
224 /* Flag to say the TOC is initialized */
225 int toc_initialized, need_toc_init;
226 char toc_label_name[10];
228 /* Cached value of rs6000_variable_issue. This is cached in
229 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
230 static short cached_can_issue_more;
232 static GTY(()) section *read_only_data_section;
233 static GTY(()) section *private_data_section;
234 static GTY(()) section *tls_data_section;
235 static GTY(()) section *tls_private_data_section;
236 static GTY(()) section *read_only_private_data_section;
237 static GTY(()) section *sdata2_section;
238 static GTY(()) section *toc_section;
240 struct builtin_description
242 const HOST_WIDE_INT mask;
243 const enum insn_code icode;
244 const char *const name;
245 const enum rs6000_builtins code;
248 /* Describe the vector unit used for modes. */
249 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
250 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
252 /* Register classes for various constraints that are based on the target
253 switches. */
254 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
256 /* Describe the alignment of a vector. */
257 int rs6000_vector_align[NUM_MACHINE_MODES];
259 /* Map selected modes to types for builtins. */
260 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
262 /* What modes to automatically generate reciprocal divide estimate (fre) and
263 reciprocal sqrt (frsqrte) for. */
264 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
266 /* Masks to determine which reciprocal esitmate instructions to generate
267 automatically. */
268 enum rs6000_recip_mask {
269 RECIP_SF_DIV = 0x001, /* Use divide estimate */
270 RECIP_DF_DIV = 0x002,
271 RECIP_V4SF_DIV = 0x004,
272 RECIP_V2DF_DIV = 0x008,
274 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
275 RECIP_DF_RSQRT = 0x020,
276 RECIP_V4SF_RSQRT = 0x040,
277 RECIP_V2DF_RSQRT = 0x080,
279 /* Various combination of flags for -mrecip=xxx. */
280 RECIP_NONE = 0,
281 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
282 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
283 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
285 RECIP_HIGH_PRECISION = RECIP_ALL,
287 /* On low precision machines like the power5, don't enable double precision
288 reciprocal square root estimate, since it isn't accurate enough. */
289 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
292 /* -mrecip options. */
293 static struct
295 const char *string; /* option name */
296 unsigned int mask; /* mask bits to set */
297 } recip_options[] = {
298 { "all", RECIP_ALL },
299 { "none", RECIP_NONE },
300 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
301 | RECIP_V2DF_DIV) },
302 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
303 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
304 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
305 | RECIP_V2DF_RSQRT) },
306 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
307 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
310 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
311 static const struct
313 const char *cpu;
314 unsigned int cpuid;
315 } cpu_is_info[] = {
316 { "power9", PPC_PLATFORM_POWER9 },
317 { "power8", PPC_PLATFORM_POWER8 },
318 { "power7", PPC_PLATFORM_POWER7 },
319 { "power6x", PPC_PLATFORM_POWER6X },
320 { "power6", PPC_PLATFORM_POWER6 },
321 { "power5+", PPC_PLATFORM_POWER5_PLUS },
322 { "power5", PPC_PLATFORM_POWER5 },
323 { "ppc970", PPC_PLATFORM_PPC970 },
324 { "power4", PPC_PLATFORM_POWER4 },
325 { "ppca2", PPC_PLATFORM_PPCA2 },
326 { "ppc476", PPC_PLATFORM_PPC476 },
327 { "ppc464", PPC_PLATFORM_PPC464 },
328 { "ppc440", PPC_PLATFORM_PPC440 },
329 { "ppc405", PPC_PLATFORM_PPC405 },
330 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
333 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
334 static const struct
336 const char *hwcap;
337 int mask;
338 unsigned int id;
339 } cpu_supports_info[] = {
340 /* AT_HWCAP masks. */
341 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
342 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
343 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
344 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
345 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
346 { "booke", PPC_FEATURE_BOOKE, 0 },
347 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
348 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
349 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
350 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
351 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
352 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
353 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
354 { "notb", PPC_FEATURE_NO_TB, 0 },
355 { "pa6t", PPC_FEATURE_PA6T, 0 },
356 { "power4", PPC_FEATURE_POWER4, 0 },
357 { "power5", PPC_FEATURE_POWER5, 0 },
358 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
359 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
360 { "ppc32", PPC_FEATURE_32, 0 },
361 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
362 { "ppc64", PPC_FEATURE_64, 0 },
363 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
364 { "smt", PPC_FEATURE_SMT, 0 },
365 { "spe", PPC_FEATURE_HAS_SPE, 0 },
366 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
367 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
368 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
370 /* AT_HWCAP2 masks. */
371 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
372 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
373 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
374 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
375 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
376 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
377 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
378 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
379 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
380 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
381 { "darn", PPC_FEATURE2_DARN, 1 },
382 { "scv", PPC_FEATURE2_SCV, 1 }
385 /* On PowerPC, we have a limited number of target clones that we care about
386 which means we can use an array to hold the options, rather than having more
387 elaborate data structures to identify each possible variation. Order the
388 clones from the default to the highest ISA. */
389 enum {
390 CLONE_DEFAULT = 0, /* default clone. */
391 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
392 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
393 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
394 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
395 CLONE_MAX
398 /* Map compiler ISA bits into HWCAP names. */
399 struct clone_map {
400 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
401 const char *name; /* name to use in __builtin_cpu_supports. */
404 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
405 { 0, "" }, /* Default options. */
406 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
407 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
408 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
409 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
413 /* Newer LIBCs explicitly export this symbol to declare that they provide
414 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
415 reference to this symbol whenever we expand a CPU builtin, so that
416 we never link against an old LIBC. */
417 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
419 /* True if we have expanded a CPU builtin. */
420 bool cpu_builtin_p;
422 /* Pointer to function (in rs6000-c.c) that can define or undefine target
423 macros that have changed. Languages that don't support the preprocessor
424 don't link in rs6000-c.c, so we can't call it directly. */
425 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
427 /* Simplfy register classes into simpler classifications. We assume
428 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
429 check for standard register classes (gpr/floating/altivec/vsx) and
430 floating/vector classes (float/altivec/vsx). */
432 enum rs6000_reg_type {
433 NO_REG_TYPE,
434 PSEUDO_REG_TYPE,
435 GPR_REG_TYPE,
436 VSX_REG_TYPE,
437 ALTIVEC_REG_TYPE,
438 FPR_REG_TYPE,
439 SPR_REG_TYPE,
440 CR_REG_TYPE
443 /* Map register class to register type. */
444 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
446 /* First/last register type for the 'normal' register types (i.e. general
447 purpose, floating point, altivec, and VSX registers). */
448 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
450 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
453 /* Register classes we care about in secondary reload or go if legitimate
454 address. We only need to worry about GPR, FPR, and Altivec registers here,
455 along an ANY field that is the OR of the 3 register classes. */
457 enum rs6000_reload_reg_type {
458 RELOAD_REG_GPR, /* General purpose registers. */
459 RELOAD_REG_FPR, /* Traditional floating point regs. */
460 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
461 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
462 N_RELOAD_REG
465 /* For setting up register classes, loop through the 3 register classes mapping
466 into real registers, and skip the ANY class, which is just an OR of the
467 bits. */
468 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
469 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
471 /* Map reload register type to a register in the register class. */
472 struct reload_reg_map_type {
473 const char *name; /* Register class name. */
474 int reg; /* Register in the register class. */
477 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
478 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
479 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
480 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
481 { "Any", -1 }, /* RELOAD_REG_ANY. */
484 /* Mask bits for each register class, indexed per mode. Historically the
485 compiler has been more restrictive which types can do PRE_MODIFY instead of
486 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
487 typedef unsigned char addr_mask_type;
489 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
490 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
491 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
492 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
493 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
494 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
495 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
496 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
498 /* Register type masks based on the type, of valid addressing modes. */
499 struct rs6000_reg_addr {
500 enum insn_code reload_load; /* INSN to reload for loading. */
501 enum insn_code reload_store; /* INSN to reload for storing. */
502 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
503 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
504 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
505 enum insn_code fusion_gpr_ld; /* INSN for fusing gpr ADDIS/loads. */
506 /* INSNs for fusing addi with loads
507 or stores for each reg. class. */
508 enum insn_code fusion_addi_ld[(int)N_RELOAD_REG];
509 enum insn_code fusion_addi_st[(int)N_RELOAD_REG];
510 /* INSNs for fusing addis with loads
511 or stores for each reg. class. */
512 enum insn_code fusion_addis_ld[(int)N_RELOAD_REG];
513 enum insn_code fusion_addis_st[(int)N_RELOAD_REG];
514 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
515 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
516 bool fused_toc; /* Mode supports TOC fusion. */
519 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
521 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
522 static inline bool
523 mode_supports_pre_incdec_p (machine_mode mode)
525 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
526 != 0);
529 /* Helper function to say whether a mode supports PRE_MODIFY. */
530 static inline bool
531 mode_supports_pre_modify_p (machine_mode mode)
533 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
534 != 0);
537 /* Given that there exists at least one variable that is set (produced)
538 by OUT_INSN and read (consumed) by IN_INSN, return true iff
539 IN_INSN represents one or more memory store operations and none of
540 the variables set by OUT_INSN is used by IN_INSN as the address of a
541 store operation. If either IN_INSN or OUT_INSN does not represent
542 a "single" RTL SET expression (as loosely defined by the
543 implementation of the single_set function) or a PARALLEL with only
544 SETs, CLOBBERs, and USEs inside, this function returns false.
546 This rs6000-specific version of store_data_bypass_p checks for
547 certain conditions that result in assertion failures (and internal
548 compiler errors) in the generic store_data_bypass_p function and
549 returns false rather than calling store_data_bypass_p if one of the
550 problematic conditions is detected. */
553 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
555 rtx out_set, in_set;
556 rtx out_pat, in_pat;
557 rtx out_exp, in_exp;
558 int i, j;
560 in_set = single_set (in_insn);
561 if (in_set)
563 if (MEM_P (SET_DEST (in_set)))
565 out_set = single_set (out_insn);
566 if (!out_set)
568 out_pat = PATTERN (out_insn);
569 if (GET_CODE (out_pat) == PARALLEL)
571 for (i = 0; i < XVECLEN (out_pat, 0); i++)
573 out_exp = XVECEXP (out_pat, 0, i);
574 if ((GET_CODE (out_exp) == CLOBBER)
575 || (GET_CODE (out_exp) == USE))
576 continue;
577 else if (GET_CODE (out_exp) != SET)
578 return false;
584 else
586 in_pat = PATTERN (in_insn);
587 if (GET_CODE (in_pat) != PARALLEL)
588 return false;
590 for (i = 0; i < XVECLEN (in_pat, 0); i++)
592 in_exp = XVECEXP (in_pat, 0, i);
593 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
594 continue;
595 else if (GET_CODE (in_exp) != SET)
596 return false;
598 if (MEM_P (SET_DEST (in_exp)))
600 out_set = single_set (out_insn);
601 if (!out_set)
603 out_pat = PATTERN (out_insn);
604 if (GET_CODE (out_pat) != PARALLEL)
605 return false;
606 for (j = 0; j < XVECLEN (out_pat, 0); j++)
608 out_exp = XVECEXP (out_pat, 0, j);
609 if ((GET_CODE (out_exp) == CLOBBER)
610 || (GET_CODE (out_exp) == USE))
611 continue;
612 else if (GET_CODE (out_exp) != SET)
613 return false;
619 return store_data_bypass_p (out_insn, in_insn);
622 /* Return true if we have D-form addressing in altivec registers. */
623 static inline bool
624 mode_supports_vmx_dform (machine_mode mode)
626 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
629 /* Return true if we have D-form addressing in VSX registers. This addressing
630 is more limited than normal d-form addressing in that the offset must be
631 aligned on a 16-byte boundary. */
632 static inline bool
633 mode_supports_vsx_dform_quad (machine_mode mode)
635 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
636 != 0);
640 /* Target cpu costs. */
642 struct processor_costs {
643 const int mulsi; /* cost of SImode multiplication. */
644 const int mulsi_const; /* cost of SImode multiplication by constant. */
645 const int mulsi_const9; /* cost of SImode mult by short constant. */
646 const int muldi; /* cost of DImode multiplication. */
647 const int divsi; /* cost of SImode division. */
648 const int divdi; /* cost of DImode division. */
649 const int fp; /* cost of simple SFmode and DFmode insns. */
650 const int dmul; /* cost of DFmode multiplication (and fmadd). */
651 const int sdiv; /* cost of SFmode division (fdivs). */
652 const int ddiv; /* cost of DFmode division (fdiv). */
653 const int cache_line_size; /* cache line size in bytes. */
654 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
655 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
656 const int simultaneous_prefetches; /* number of parallel prefetch
657 operations. */
658 const int sfdf_convert; /* cost of SF->DF conversion. */
661 const struct processor_costs *rs6000_cost;
663 /* Processor costs (relative to an add) */
665 /* Instruction size costs on 32bit processors. */
666 static const
667 struct processor_costs size32_cost = {
668 COSTS_N_INSNS (1), /* mulsi */
669 COSTS_N_INSNS (1), /* mulsi_const */
670 COSTS_N_INSNS (1), /* mulsi_const9 */
671 COSTS_N_INSNS (1), /* muldi */
672 COSTS_N_INSNS (1), /* divsi */
673 COSTS_N_INSNS (1), /* divdi */
674 COSTS_N_INSNS (1), /* fp */
675 COSTS_N_INSNS (1), /* dmul */
676 COSTS_N_INSNS (1), /* sdiv */
677 COSTS_N_INSNS (1), /* ddiv */
678 32, /* cache line size */
679 0, /* l1 cache */
680 0, /* l2 cache */
681 0, /* streams */
682 0, /* SF->DF convert */
685 /* Instruction size costs on 64bit processors. */
686 static const
687 struct processor_costs size64_cost = {
688 COSTS_N_INSNS (1), /* mulsi */
689 COSTS_N_INSNS (1), /* mulsi_const */
690 COSTS_N_INSNS (1), /* mulsi_const9 */
691 COSTS_N_INSNS (1), /* muldi */
692 COSTS_N_INSNS (1), /* divsi */
693 COSTS_N_INSNS (1), /* divdi */
694 COSTS_N_INSNS (1), /* fp */
695 COSTS_N_INSNS (1), /* dmul */
696 COSTS_N_INSNS (1), /* sdiv */
697 COSTS_N_INSNS (1), /* ddiv */
698 128, /* cache line size */
699 0, /* l1 cache */
700 0, /* l2 cache */
701 0, /* streams */
702 0, /* SF->DF convert */
705 /* Instruction costs on RS64A processors. */
706 static const
707 struct processor_costs rs64a_cost = {
708 COSTS_N_INSNS (20), /* mulsi */
709 COSTS_N_INSNS (12), /* mulsi_const */
710 COSTS_N_INSNS (8), /* mulsi_const9 */
711 COSTS_N_INSNS (34), /* muldi */
712 COSTS_N_INSNS (65), /* divsi */
713 COSTS_N_INSNS (67), /* divdi */
714 COSTS_N_INSNS (4), /* fp */
715 COSTS_N_INSNS (4), /* dmul */
716 COSTS_N_INSNS (31), /* sdiv */
717 COSTS_N_INSNS (31), /* ddiv */
718 128, /* cache line size */
719 128, /* l1 cache */
720 2048, /* l2 cache */
721 1, /* streams */
722 0, /* SF->DF convert */
725 /* Instruction costs on MPCCORE processors. */
726 static const
727 struct processor_costs mpccore_cost = {
728 COSTS_N_INSNS (2), /* mulsi */
729 COSTS_N_INSNS (2), /* mulsi_const */
730 COSTS_N_INSNS (2), /* mulsi_const9 */
731 COSTS_N_INSNS (2), /* muldi */
732 COSTS_N_INSNS (6), /* divsi */
733 COSTS_N_INSNS (6), /* divdi */
734 COSTS_N_INSNS (4), /* fp */
735 COSTS_N_INSNS (5), /* dmul */
736 COSTS_N_INSNS (10), /* sdiv */
737 COSTS_N_INSNS (17), /* ddiv */
738 32, /* cache line size */
739 4, /* l1 cache */
740 16, /* l2 cache */
741 1, /* streams */
742 0, /* SF->DF convert */
745 /* Instruction costs on PPC403 processors. */
746 static const
747 struct processor_costs ppc403_cost = {
748 COSTS_N_INSNS (4), /* mulsi */
749 COSTS_N_INSNS (4), /* mulsi_const */
750 COSTS_N_INSNS (4), /* mulsi_const9 */
751 COSTS_N_INSNS (4), /* muldi */
752 COSTS_N_INSNS (33), /* divsi */
753 COSTS_N_INSNS (33), /* divdi */
754 COSTS_N_INSNS (11), /* fp */
755 COSTS_N_INSNS (11), /* dmul */
756 COSTS_N_INSNS (11), /* sdiv */
757 COSTS_N_INSNS (11), /* ddiv */
758 32, /* cache line size */
759 4, /* l1 cache */
760 16, /* l2 cache */
761 1, /* streams */
762 0, /* SF->DF convert */
765 /* Instruction costs on PPC405 processors. */
766 static const
767 struct processor_costs ppc405_cost = {
768 COSTS_N_INSNS (5), /* mulsi */
769 COSTS_N_INSNS (4), /* mulsi_const */
770 COSTS_N_INSNS (3), /* mulsi_const9 */
771 COSTS_N_INSNS (5), /* muldi */
772 COSTS_N_INSNS (35), /* divsi */
773 COSTS_N_INSNS (35), /* divdi */
774 COSTS_N_INSNS (11), /* fp */
775 COSTS_N_INSNS (11), /* dmul */
776 COSTS_N_INSNS (11), /* sdiv */
777 COSTS_N_INSNS (11), /* ddiv */
778 32, /* cache line size */
779 16, /* l1 cache */
780 128, /* l2 cache */
781 1, /* streams */
782 0, /* SF->DF convert */
785 /* Instruction costs on PPC440 processors. */
786 static const
787 struct processor_costs ppc440_cost = {
788 COSTS_N_INSNS (3), /* mulsi */
789 COSTS_N_INSNS (2), /* mulsi_const */
790 COSTS_N_INSNS (2), /* mulsi_const9 */
791 COSTS_N_INSNS (3), /* muldi */
792 COSTS_N_INSNS (34), /* divsi */
793 COSTS_N_INSNS (34), /* divdi */
794 COSTS_N_INSNS (5), /* fp */
795 COSTS_N_INSNS (5), /* dmul */
796 COSTS_N_INSNS (19), /* sdiv */
797 COSTS_N_INSNS (33), /* ddiv */
798 32, /* cache line size */
799 32, /* l1 cache */
800 256, /* l2 cache */
801 1, /* streams */
802 0, /* SF->DF convert */
805 /* Instruction costs on PPC476 processors. */
806 static const
807 struct processor_costs ppc476_cost = {
808 COSTS_N_INSNS (4), /* mulsi */
809 COSTS_N_INSNS (4), /* mulsi_const */
810 COSTS_N_INSNS (4), /* mulsi_const9 */
811 COSTS_N_INSNS (4), /* muldi */
812 COSTS_N_INSNS (11), /* divsi */
813 COSTS_N_INSNS (11), /* divdi */
814 COSTS_N_INSNS (6), /* fp */
815 COSTS_N_INSNS (6), /* dmul */
816 COSTS_N_INSNS (19), /* sdiv */
817 COSTS_N_INSNS (33), /* ddiv */
818 32, /* l1 cache line size */
819 32, /* l1 cache */
820 512, /* l2 cache */
821 1, /* streams */
822 0, /* SF->DF convert */
825 /* Instruction costs on PPC601 processors. */
826 static const
827 struct processor_costs ppc601_cost = {
828 COSTS_N_INSNS (5), /* mulsi */
829 COSTS_N_INSNS (5), /* mulsi_const */
830 COSTS_N_INSNS (5), /* mulsi_const9 */
831 COSTS_N_INSNS (5), /* muldi */
832 COSTS_N_INSNS (36), /* divsi */
833 COSTS_N_INSNS (36), /* divdi */
834 COSTS_N_INSNS (4), /* fp */
835 COSTS_N_INSNS (5), /* dmul */
836 COSTS_N_INSNS (17), /* sdiv */
837 COSTS_N_INSNS (31), /* ddiv */
838 32, /* cache line size */
839 32, /* l1 cache */
840 256, /* l2 cache */
841 1, /* streams */
842 0, /* SF->DF convert */
845 /* Instruction costs on PPC603 processors. */
846 static const
847 struct processor_costs ppc603_cost = {
848 COSTS_N_INSNS (5), /* mulsi */
849 COSTS_N_INSNS (3), /* mulsi_const */
850 COSTS_N_INSNS (2), /* mulsi_const9 */
851 COSTS_N_INSNS (5), /* muldi */
852 COSTS_N_INSNS (37), /* divsi */
853 COSTS_N_INSNS (37), /* divdi */
854 COSTS_N_INSNS (3), /* fp */
855 COSTS_N_INSNS (4), /* dmul */
856 COSTS_N_INSNS (18), /* sdiv */
857 COSTS_N_INSNS (33), /* ddiv */
858 32, /* cache line size */
859 8, /* l1 cache */
860 64, /* l2 cache */
861 1, /* streams */
862 0, /* SF->DF convert */
865 /* Instruction costs on PPC604 processors. */
866 static const
867 struct processor_costs ppc604_cost = {
868 COSTS_N_INSNS (4), /* mulsi */
869 COSTS_N_INSNS (4), /* mulsi_const */
870 COSTS_N_INSNS (4), /* mulsi_const9 */
871 COSTS_N_INSNS (4), /* muldi */
872 COSTS_N_INSNS (20), /* divsi */
873 COSTS_N_INSNS (20), /* divdi */
874 COSTS_N_INSNS (3), /* fp */
875 COSTS_N_INSNS (3), /* dmul */
876 COSTS_N_INSNS (18), /* sdiv */
877 COSTS_N_INSNS (32), /* ddiv */
878 32, /* cache line size */
879 16, /* l1 cache */
880 512, /* l2 cache */
881 1, /* streams */
882 0, /* SF->DF convert */
885 /* Instruction costs on PPC604e processors. */
886 static const
887 struct processor_costs ppc604e_cost = {
888 COSTS_N_INSNS (2), /* mulsi */
889 COSTS_N_INSNS (2), /* mulsi_const */
890 COSTS_N_INSNS (2), /* mulsi_const9 */
891 COSTS_N_INSNS (2), /* muldi */
892 COSTS_N_INSNS (20), /* divsi */
893 COSTS_N_INSNS (20), /* divdi */
894 COSTS_N_INSNS (3), /* fp */
895 COSTS_N_INSNS (3), /* dmul */
896 COSTS_N_INSNS (18), /* sdiv */
897 COSTS_N_INSNS (32), /* ddiv */
898 32, /* cache line size */
899 32, /* l1 cache */
900 1024, /* l2 cache */
901 1, /* streams */
902 0, /* SF->DF convert */
905 /* Instruction costs on PPC620 processors. */
906 static const
907 struct processor_costs ppc620_cost = {
908 COSTS_N_INSNS (5), /* mulsi */
909 COSTS_N_INSNS (4), /* mulsi_const */
910 COSTS_N_INSNS (3), /* mulsi_const9 */
911 COSTS_N_INSNS (7), /* muldi */
912 COSTS_N_INSNS (21), /* divsi */
913 COSTS_N_INSNS (37), /* divdi */
914 COSTS_N_INSNS (3), /* fp */
915 COSTS_N_INSNS (3), /* dmul */
916 COSTS_N_INSNS (18), /* sdiv */
917 COSTS_N_INSNS (32), /* ddiv */
918 128, /* cache line size */
919 32, /* l1 cache */
920 1024, /* l2 cache */
921 1, /* streams */
922 0, /* SF->DF convert */
925 /* Instruction costs on PPC630 processors. */
926 static const
927 struct processor_costs ppc630_cost = {
928 COSTS_N_INSNS (5), /* mulsi */
929 COSTS_N_INSNS (4), /* mulsi_const */
930 COSTS_N_INSNS (3), /* mulsi_const9 */
931 COSTS_N_INSNS (7), /* muldi */
932 COSTS_N_INSNS (21), /* divsi */
933 COSTS_N_INSNS (37), /* divdi */
934 COSTS_N_INSNS (3), /* fp */
935 COSTS_N_INSNS (3), /* dmul */
936 COSTS_N_INSNS (17), /* sdiv */
937 COSTS_N_INSNS (21), /* ddiv */
938 128, /* cache line size */
939 64, /* l1 cache */
940 1024, /* l2 cache */
941 1, /* streams */
942 0, /* SF->DF convert */
945 /* Instruction costs on Cell processor. */
946 /* COSTS_N_INSNS (1) ~ one add. */
947 static const
948 struct processor_costs ppccell_cost = {
949 COSTS_N_INSNS (9/2)+2, /* mulsi */
950 COSTS_N_INSNS (6/2), /* mulsi_const */
951 COSTS_N_INSNS (6/2), /* mulsi_const9 */
952 COSTS_N_INSNS (15/2)+2, /* muldi */
953 COSTS_N_INSNS (38/2), /* divsi */
954 COSTS_N_INSNS (70/2), /* divdi */
955 COSTS_N_INSNS (10/2), /* fp */
956 COSTS_N_INSNS (10/2), /* dmul */
957 COSTS_N_INSNS (74/2), /* sdiv */
958 COSTS_N_INSNS (74/2), /* ddiv */
959 128, /* cache line size */
960 32, /* l1 cache */
961 512, /* l2 cache */
962 6, /* streams */
963 0, /* SF->DF convert */
966 /* Instruction costs on PPC750 and PPC7400 processors. */
967 static const
968 struct processor_costs ppc750_cost = {
969 COSTS_N_INSNS (5), /* mulsi */
970 COSTS_N_INSNS (3), /* mulsi_const */
971 COSTS_N_INSNS (2), /* mulsi_const9 */
972 COSTS_N_INSNS (5), /* muldi */
973 COSTS_N_INSNS (17), /* divsi */
974 COSTS_N_INSNS (17), /* divdi */
975 COSTS_N_INSNS (3), /* fp */
976 COSTS_N_INSNS (3), /* dmul */
977 COSTS_N_INSNS (17), /* sdiv */
978 COSTS_N_INSNS (31), /* ddiv */
979 32, /* cache line size */
980 32, /* l1 cache */
981 512, /* l2 cache */
982 1, /* streams */
983 0, /* SF->DF convert */
986 /* Instruction costs on PPC7450 processors. */
987 static const
988 struct processor_costs ppc7450_cost = {
989 COSTS_N_INSNS (4), /* mulsi */
990 COSTS_N_INSNS (3), /* mulsi_const */
991 COSTS_N_INSNS (3), /* mulsi_const9 */
992 COSTS_N_INSNS (4), /* muldi */
993 COSTS_N_INSNS (23), /* divsi */
994 COSTS_N_INSNS (23), /* divdi */
995 COSTS_N_INSNS (5), /* fp */
996 COSTS_N_INSNS (5), /* dmul */
997 COSTS_N_INSNS (21), /* sdiv */
998 COSTS_N_INSNS (35), /* ddiv */
999 32, /* cache line size */
1000 32, /* l1 cache */
1001 1024, /* l2 cache */
1002 1, /* streams */
1003 0, /* SF->DF convert */
1006 /* Instruction costs on PPC8540 processors. */
1007 static const
1008 struct processor_costs ppc8540_cost = {
1009 COSTS_N_INSNS (4), /* mulsi */
1010 COSTS_N_INSNS (4), /* mulsi_const */
1011 COSTS_N_INSNS (4), /* mulsi_const9 */
1012 COSTS_N_INSNS (4), /* muldi */
1013 COSTS_N_INSNS (19), /* divsi */
1014 COSTS_N_INSNS (19), /* divdi */
1015 COSTS_N_INSNS (4), /* fp */
1016 COSTS_N_INSNS (4), /* dmul */
1017 COSTS_N_INSNS (29), /* sdiv */
1018 COSTS_N_INSNS (29), /* ddiv */
1019 32, /* cache line size */
1020 32, /* l1 cache */
1021 256, /* l2 cache */
1022 1, /* prefetch streams /*/
1023 0, /* SF->DF convert */
1026 /* Instruction costs on E300C2 and E300C3 cores. */
1027 static const
1028 struct processor_costs ppce300c2c3_cost = {
1029 COSTS_N_INSNS (4), /* mulsi */
1030 COSTS_N_INSNS (4), /* mulsi_const */
1031 COSTS_N_INSNS (4), /* mulsi_const9 */
1032 COSTS_N_INSNS (4), /* muldi */
1033 COSTS_N_INSNS (19), /* divsi */
1034 COSTS_N_INSNS (19), /* divdi */
1035 COSTS_N_INSNS (3), /* fp */
1036 COSTS_N_INSNS (4), /* dmul */
1037 COSTS_N_INSNS (18), /* sdiv */
1038 COSTS_N_INSNS (33), /* ddiv */
1040 16, /* l1 cache */
1041 16, /* l2 cache */
1042 1, /* prefetch streams /*/
1043 0, /* SF->DF convert */
1046 /* Instruction costs on PPCE500MC processors. */
1047 static const
1048 struct processor_costs ppce500mc_cost = {
1049 COSTS_N_INSNS (4), /* mulsi */
1050 COSTS_N_INSNS (4), /* mulsi_const */
1051 COSTS_N_INSNS (4), /* mulsi_const9 */
1052 COSTS_N_INSNS (4), /* muldi */
1053 COSTS_N_INSNS (14), /* divsi */
1054 COSTS_N_INSNS (14), /* divdi */
1055 COSTS_N_INSNS (8), /* fp */
1056 COSTS_N_INSNS (10), /* dmul */
1057 COSTS_N_INSNS (36), /* sdiv */
1058 COSTS_N_INSNS (66), /* ddiv */
1059 64, /* cache line size */
1060 32, /* l1 cache */
1061 128, /* l2 cache */
1062 1, /* prefetch streams /*/
1063 0, /* SF->DF convert */
1066 /* Instruction costs on PPCE500MC64 processors. */
1067 static const
1068 struct processor_costs ppce500mc64_cost = {
1069 COSTS_N_INSNS (4), /* mulsi */
1070 COSTS_N_INSNS (4), /* mulsi_const */
1071 COSTS_N_INSNS (4), /* mulsi_const9 */
1072 COSTS_N_INSNS (4), /* muldi */
1073 COSTS_N_INSNS (14), /* divsi */
1074 COSTS_N_INSNS (14), /* divdi */
1075 COSTS_N_INSNS (4), /* fp */
1076 COSTS_N_INSNS (10), /* dmul */
1077 COSTS_N_INSNS (36), /* sdiv */
1078 COSTS_N_INSNS (66), /* ddiv */
1079 64, /* cache line size */
1080 32, /* l1 cache */
1081 128, /* l2 cache */
1082 1, /* prefetch streams /*/
1083 0, /* SF->DF convert */
1086 /* Instruction costs on PPCE5500 processors. */
1087 static const
1088 struct processor_costs ppce5500_cost = {
1089 COSTS_N_INSNS (5), /* mulsi */
1090 COSTS_N_INSNS (5), /* mulsi_const */
1091 COSTS_N_INSNS (4), /* mulsi_const9 */
1092 COSTS_N_INSNS (5), /* muldi */
1093 COSTS_N_INSNS (14), /* divsi */
1094 COSTS_N_INSNS (14), /* divdi */
1095 COSTS_N_INSNS (7), /* fp */
1096 COSTS_N_INSNS (10), /* dmul */
1097 COSTS_N_INSNS (36), /* sdiv */
1098 COSTS_N_INSNS (66), /* ddiv */
1099 64, /* cache line size */
1100 32, /* l1 cache */
1101 128, /* l2 cache */
1102 1, /* prefetch streams /*/
1103 0, /* SF->DF convert */
1106 /* Instruction costs on PPCE6500 processors. */
1107 static const
1108 struct processor_costs ppce6500_cost = {
1109 COSTS_N_INSNS (5), /* mulsi */
1110 COSTS_N_INSNS (5), /* mulsi_const */
1111 COSTS_N_INSNS (4), /* mulsi_const9 */
1112 COSTS_N_INSNS (5), /* muldi */
1113 COSTS_N_INSNS (14), /* divsi */
1114 COSTS_N_INSNS (14), /* divdi */
1115 COSTS_N_INSNS (7), /* fp */
1116 COSTS_N_INSNS (10), /* dmul */
1117 COSTS_N_INSNS (36), /* sdiv */
1118 COSTS_N_INSNS (66), /* ddiv */
1119 64, /* cache line size */
1120 32, /* l1 cache */
1121 128, /* l2 cache */
1122 1, /* prefetch streams /*/
1123 0, /* SF->DF convert */
1126 /* Instruction costs on AppliedMicro Titan processors. */
1127 static const
1128 struct processor_costs titan_cost = {
1129 COSTS_N_INSNS (5), /* mulsi */
1130 COSTS_N_INSNS (5), /* mulsi_const */
1131 COSTS_N_INSNS (5), /* mulsi_const9 */
1132 COSTS_N_INSNS (5), /* muldi */
1133 COSTS_N_INSNS (18), /* divsi */
1134 COSTS_N_INSNS (18), /* divdi */
1135 COSTS_N_INSNS (10), /* fp */
1136 COSTS_N_INSNS (10), /* dmul */
1137 COSTS_N_INSNS (46), /* sdiv */
1138 COSTS_N_INSNS (72), /* ddiv */
1139 32, /* cache line size */
1140 32, /* l1 cache */
1141 512, /* l2 cache */
1142 1, /* prefetch streams /*/
1143 0, /* SF->DF convert */
1146 /* Instruction costs on POWER4 and POWER5 processors. */
1147 static const
1148 struct processor_costs power4_cost = {
1149 COSTS_N_INSNS (3), /* mulsi */
1150 COSTS_N_INSNS (2), /* mulsi_const */
1151 COSTS_N_INSNS (2), /* mulsi_const9 */
1152 COSTS_N_INSNS (4), /* muldi */
1153 COSTS_N_INSNS (18), /* divsi */
1154 COSTS_N_INSNS (34), /* divdi */
1155 COSTS_N_INSNS (3), /* fp */
1156 COSTS_N_INSNS (3), /* dmul */
1157 COSTS_N_INSNS (17), /* sdiv */
1158 COSTS_N_INSNS (17), /* ddiv */
1159 128, /* cache line size */
1160 32, /* l1 cache */
1161 1024, /* l2 cache */
1162 8, /* prefetch streams /*/
1163 0, /* SF->DF convert */
1166 /* Instruction costs on POWER6 processors. */
1167 static const
1168 struct processor_costs power6_cost = {
1169 COSTS_N_INSNS (8), /* mulsi */
1170 COSTS_N_INSNS (8), /* mulsi_const */
1171 COSTS_N_INSNS (8), /* mulsi_const9 */
1172 COSTS_N_INSNS (8), /* muldi */
1173 COSTS_N_INSNS (22), /* divsi */
1174 COSTS_N_INSNS (28), /* divdi */
1175 COSTS_N_INSNS (3), /* fp */
1176 COSTS_N_INSNS (3), /* dmul */
1177 COSTS_N_INSNS (13), /* sdiv */
1178 COSTS_N_INSNS (16), /* ddiv */
1179 128, /* cache line size */
1180 64, /* l1 cache */
1181 2048, /* l2 cache */
1182 16, /* prefetch streams */
1183 0, /* SF->DF convert */
1186 /* Instruction costs on POWER7 processors. */
1187 static const
1188 struct processor_costs power7_cost = {
1189 COSTS_N_INSNS (2), /* mulsi */
1190 COSTS_N_INSNS (2), /* mulsi_const */
1191 COSTS_N_INSNS (2), /* mulsi_const9 */
1192 COSTS_N_INSNS (2), /* muldi */
1193 COSTS_N_INSNS (18), /* divsi */
1194 COSTS_N_INSNS (34), /* divdi */
1195 COSTS_N_INSNS (3), /* fp */
1196 COSTS_N_INSNS (3), /* dmul */
1197 COSTS_N_INSNS (13), /* sdiv */
1198 COSTS_N_INSNS (16), /* ddiv */
1199 128, /* cache line size */
1200 32, /* l1 cache */
1201 256, /* l2 cache */
1202 12, /* prefetch streams */
1203 COSTS_N_INSNS (3), /* SF->DF convert */
1206 /* Instruction costs on POWER8 processors. */
1207 static const
1208 struct processor_costs power8_cost = {
1209 COSTS_N_INSNS (3), /* mulsi */
1210 COSTS_N_INSNS (3), /* mulsi_const */
1211 COSTS_N_INSNS (3), /* mulsi_const9 */
1212 COSTS_N_INSNS (3), /* muldi */
1213 COSTS_N_INSNS (19), /* divsi */
1214 COSTS_N_INSNS (35), /* divdi */
1215 COSTS_N_INSNS (3), /* fp */
1216 COSTS_N_INSNS (3), /* dmul */
1217 COSTS_N_INSNS (14), /* sdiv */
1218 COSTS_N_INSNS (17), /* ddiv */
1219 128, /* cache line size */
1220 32, /* l1 cache */
1221 256, /* l2 cache */
1222 12, /* prefetch streams */
1223 COSTS_N_INSNS (3), /* SF->DF convert */
1226 /* Instruction costs on POWER9 processors. */
1227 static const
1228 struct processor_costs power9_cost = {
1229 COSTS_N_INSNS (3), /* mulsi */
1230 COSTS_N_INSNS (3), /* mulsi_const */
1231 COSTS_N_INSNS (3), /* mulsi_const9 */
1232 COSTS_N_INSNS (3), /* muldi */
1233 COSTS_N_INSNS (8), /* divsi */
1234 COSTS_N_INSNS (12), /* divdi */
1235 COSTS_N_INSNS (3), /* fp */
1236 COSTS_N_INSNS (3), /* dmul */
1237 COSTS_N_INSNS (13), /* sdiv */
1238 COSTS_N_INSNS (18), /* ddiv */
1239 128, /* cache line size */
1240 32, /* l1 cache */
1241 512, /* l2 cache */
1242 8, /* prefetch streams */
1243 COSTS_N_INSNS (3), /* SF->DF convert */
1246 /* Instruction costs on POWER A2 processors. */
1247 static const
1248 struct processor_costs ppca2_cost = {
1249 COSTS_N_INSNS (16), /* mulsi */
1250 COSTS_N_INSNS (16), /* mulsi_const */
1251 COSTS_N_INSNS (16), /* mulsi_const9 */
1252 COSTS_N_INSNS (16), /* muldi */
1253 COSTS_N_INSNS (22), /* divsi */
1254 COSTS_N_INSNS (28), /* divdi */
1255 COSTS_N_INSNS (3), /* fp */
1256 COSTS_N_INSNS (3), /* dmul */
1257 COSTS_N_INSNS (59), /* sdiv */
1258 COSTS_N_INSNS (72), /* ddiv */
1260 16, /* l1 cache */
1261 2048, /* l2 cache */
1262 16, /* prefetch streams */
1263 0, /* SF->DF convert */
1267 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1268 #undef RS6000_BUILTIN_0
1269 #undef RS6000_BUILTIN_1
1270 #undef RS6000_BUILTIN_2
1271 #undef RS6000_BUILTIN_3
1272 #undef RS6000_BUILTIN_A
1273 #undef RS6000_BUILTIN_D
1274 #undef RS6000_BUILTIN_H
1275 #undef RS6000_BUILTIN_P
1276 #undef RS6000_BUILTIN_Q
1277 #undef RS6000_BUILTIN_X
1279 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1280 { NAME, ICODE, MASK, ATTR },
1282 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1283 { NAME, ICODE, MASK, ATTR },
1285 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1286 { NAME, ICODE, MASK, ATTR },
1288 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1289 { NAME, ICODE, MASK, ATTR },
1291 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1292 { NAME, ICODE, MASK, ATTR },
1294 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1295 { NAME, ICODE, MASK, ATTR },
1297 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1298 { NAME, ICODE, MASK, ATTR },
1300 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1301 { NAME, ICODE, MASK, ATTR },
1303 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
1304 { NAME, ICODE, MASK, ATTR },
1306 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1307 { NAME, ICODE, MASK, ATTR },
1309 struct rs6000_builtin_info_type {
1310 const char *name;
1311 const enum insn_code icode;
1312 const HOST_WIDE_INT mask;
1313 const unsigned attr;
1316 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1318 #include "rs6000-builtin.def"
1321 #undef RS6000_BUILTIN_0
1322 #undef RS6000_BUILTIN_1
1323 #undef RS6000_BUILTIN_2
1324 #undef RS6000_BUILTIN_3
1325 #undef RS6000_BUILTIN_A
1326 #undef RS6000_BUILTIN_D
1327 #undef RS6000_BUILTIN_H
1328 #undef RS6000_BUILTIN_P
1329 #undef RS6000_BUILTIN_Q
1330 #undef RS6000_BUILTIN_X
1332 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1333 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1336 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1337 static struct machine_function * rs6000_init_machine_status (void);
1338 static int rs6000_ra_ever_killed (void);
1339 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1340 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1341 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1342 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1343 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1344 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1345 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1346 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1347 bool);
1348 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1349 unsigned int);
1350 static bool is_microcoded_insn (rtx_insn *);
1351 static bool is_nonpipeline_insn (rtx_insn *);
1352 static bool is_cracked_insn (rtx_insn *);
1353 static bool is_load_insn (rtx, rtx *);
1354 static bool is_store_insn (rtx, rtx *);
1355 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1356 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1357 static bool insn_must_be_first_in_group (rtx_insn *);
1358 static bool insn_must_be_last_in_group (rtx_insn *);
1359 static void altivec_init_builtins (void);
1360 static tree builtin_function_type (machine_mode, machine_mode,
1361 machine_mode, machine_mode,
1362 enum rs6000_builtins, const char *name);
1363 static void rs6000_common_init_builtins (void);
1364 static void paired_init_builtins (void);
1365 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
1366 static void htm_init_builtins (void);
1367 static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
1368 static rs6000_stack_t *rs6000_stack_info (void);
1369 static void is_altivec_return_reg (rtx, void *);
1370 int easy_vector_constant (rtx, machine_mode);
1371 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1372 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1373 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1374 bool, bool);
1375 #if TARGET_MACHO
1376 static void macho_branch_islands (void);
1377 #endif
1378 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1379 int, int *);
1380 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1381 int, int, int *);
1382 static bool rs6000_mode_dependent_address (const_rtx);
1383 static bool rs6000_debug_mode_dependent_address (const_rtx);
1384 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1385 machine_mode, rtx);
1386 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1387 machine_mode,
1388 rtx);
1389 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1390 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1391 enum reg_class);
1392 static bool rs6000_secondary_memory_needed (enum reg_class, enum reg_class,
1393 machine_mode);
1394 static bool rs6000_debug_secondary_memory_needed (enum reg_class,
1395 enum reg_class,
1396 machine_mode);
1397 static bool rs6000_cannot_change_mode_class (machine_mode,
1398 machine_mode,
1399 enum reg_class);
1400 static bool rs6000_debug_cannot_change_mode_class (machine_mode,
1401 machine_mode,
1402 enum reg_class);
1403 static bool rs6000_save_toc_in_prologue_p (void);
1404 static rtx rs6000_internal_arg_pointer (void);
1406 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1407 int, int *)
1408 = rs6000_legitimize_reload_address;
1410 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1411 = rs6000_mode_dependent_address;
1413 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1414 machine_mode, rtx)
1415 = rs6000_secondary_reload_class;
1417 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1418 = rs6000_preferred_reload_class;
1420 bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, enum reg_class,
1421 machine_mode)
1422 = rs6000_secondary_memory_needed;
1424 bool (*rs6000_cannot_change_mode_class_ptr) (machine_mode,
1425 machine_mode,
1426 enum reg_class)
1427 = rs6000_cannot_change_mode_class;
1429 const int INSN_NOT_AVAILABLE = -1;
1431 static void rs6000_print_isa_options (FILE *, int, const char *,
1432 HOST_WIDE_INT);
1433 static void rs6000_print_builtin_options (FILE *, int, const char *,
1434 HOST_WIDE_INT);
1435 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1437 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1438 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1439 enum rs6000_reg_type,
1440 machine_mode,
1441 secondary_reload_info *,
1442 bool);
1443 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1444 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1445 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1447 /* Hash table stuff for keeping track of TOC entries. */
1449 struct GTY((for_user)) toc_hash_struct
1451 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1452 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1453 rtx key;
1454 machine_mode key_mode;
1455 int labelno;
1458 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1460 static hashval_t hash (toc_hash_struct *);
1461 static bool equal (toc_hash_struct *, toc_hash_struct *);
1464 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1466 /* Hash table to keep track of the argument types for builtin functions. */
1468 struct GTY((for_user)) builtin_hash_struct
1470 tree type;
1471 machine_mode mode[4]; /* return value + 3 arguments. */
1472 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1475 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1477 static hashval_t hash (builtin_hash_struct *);
1478 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1481 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1484 /* Default register names. */
1485 char rs6000_reg_names[][8] =
1487 "0", "1", "2", "3", "4", "5", "6", "7",
1488 "8", "9", "10", "11", "12", "13", "14", "15",
1489 "16", "17", "18", "19", "20", "21", "22", "23",
1490 "24", "25", "26", "27", "28", "29", "30", "31",
1491 "0", "1", "2", "3", "4", "5", "6", "7",
1492 "8", "9", "10", "11", "12", "13", "14", "15",
1493 "16", "17", "18", "19", "20", "21", "22", "23",
1494 "24", "25", "26", "27", "28", "29", "30", "31",
1495 "mq", "lr", "ctr","ap",
1496 "0", "1", "2", "3", "4", "5", "6", "7",
1497 "ca",
1498 /* AltiVec registers. */
1499 "0", "1", "2", "3", "4", "5", "6", "7",
1500 "8", "9", "10", "11", "12", "13", "14", "15",
1501 "16", "17", "18", "19", "20", "21", "22", "23",
1502 "24", "25", "26", "27", "28", "29", "30", "31",
1503 "vrsave", "vscr",
1504 /* Soft frame pointer. */
1505 "sfp",
1506 /* HTM SPR registers. */
1507 "tfhar", "tfiar", "texasr"
1510 #ifdef TARGET_REGNAMES
1511 static const char alt_reg_names[][8] =
1513 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1514 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1515 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1516 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1517 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1518 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1519 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1520 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1521 "mq", "lr", "ctr", "ap",
1522 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1523 "ca",
1524 /* AltiVec registers. */
1525 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1526 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1527 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1528 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1529 "vrsave", "vscr",
1530 /* Soft frame pointer. */
1531 "sfp",
1532 /* HTM SPR registers. */
1533 "tfhar", "tfiar", "texasr"
1535 #endif
1537 /* Table of valid machine attributes. */
1539 static const struct attribute_spec rs6000_attribute_table[] =
1541 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1542 affects_type_identity } */
1543 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
1544 false },
1545 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1546 false },
1547 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1548 false },
1549 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1550 false },
1551 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1552 false },
1553 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1554 SUBTARGET_ATTRIBUTE_TABLE,
1555 #endif
1556 { NULL, 0, 0, false, false, false, NULL, false }
1559 #ifndef TARGET_PROFILE_KERNEL
1560 #define TARGET_PROFILE_KERNEL 0
1561 #endif
1563 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1564 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1566 /* Initialize the GCC target structure. */
1567 #undef TARGET_ATTRIBUTE_TABLE
1568 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1569 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1570 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1571 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1572 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1574 #undef TARGET_ASM_ALIGNED_DI_OP
1575 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1577 /* Default unaligned ops are only provided for ELF. Find the ops needed
1578 for non-ELF systems. */
1579 #ifndef OBJECT_FORMAT_ELF
1580 #if TARGET_XCOFF
1581 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1582 64-bit targets. */
1583 #undef TARGET_ASM_UNALIGNED_HI_OP
1584 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1585 #undef TARGET_ASM_UNALIGNED_SI_OP
1586 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1587 #undef TARGET_ASM_UNALIGNED_DI_OP
1588 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1589 #else
1590 /* For Darwin. */
1591 #undef TARGET_ASM_UNALIGNED_HI_OP
1592 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1593 #undef TARGET_ASM_UNALIGNED_SI_OP
1594 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1595 #undef TARGET_ASM_UNALIGNED_DI_OP
1596 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1597 #undef TARGET_ASM_ALIGNED_DI_OP
1598 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1599 #endif
1600 #endif
1602 /* This hook deals with fixups for relocatable code and DI-mode objects
1603 in 64-bit code. */
1604 #undef TARGET_ASM_INTEGER
1605 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1607 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1608 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1609 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1610 #endif
1612 #undef TARGET_SET_UP_BY_PROLOGUE
1613 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1615 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1616 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1617 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1618 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1619 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1620 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1621 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1622 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1623 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1624 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1625 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1626 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1628 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1629 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1631 #undef TARGET_INTERNAL_ARG_POINTER
1632 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1634 #undef TARGET_HAVE_TLS
1635 #define TARGET_HAVE_TLS HAVE_AS_TLS
1637 #undef TARGET_CANNOT_FORCE_CONST_MEM
1638 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1640 #undef TARGET_DELEGITIMIZE_ADDRESS
1641 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1643 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1644 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1646 #undef TARGET_LEGITIMATE_COMBINED_INSN
1647 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1649 #undef TARGET_ASM_FUNCTION_PROLOGUE
1650 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1651 #undef TARGET_ASM_FUNCTION_EPILOGUE
1652 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1654 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1655 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1657 #undef TARGET_LEGITIMIZE_ADDRESS
1658 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1660 #undef TARGET_SCHED_VARIABLE_ISSUE
1661 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1663 #undef TARGET_SCHED_ISSUE_RATE
1664 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1665 #undef TARGET_SCHED_ADJUST_COST
1666 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1667 #undef TARGET_SCHED_ADJUST_PRIORITY
1668 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1669 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1670 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1671 #undef TARGET_SCHED_INIT
1672 #define TARGET_SCHED_INIT rs6000_sched_init
1673 #undef TARGET_SCHED_FINISH
1674 #define TARGET_SCHED_FINISH rs6000_sched_finish
1675 #undef TARGET_SCHED_REORDER
1676 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1677 #undef TARGET_SCHED_REORDER2
1678 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1680 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1681 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1683 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1684 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1686 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1687 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1688 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1689 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1690 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1691 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1692 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1693 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1695 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1696 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1698 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1699 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1700 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1701 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1702 rs6000_builtin_support_vector_misalignment
1703 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1704 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1705 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1706 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1707 rs6000_builtin_vectorization_cost
1708 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1709 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1710 rs6000_preferred_simd_mode
1711 #undef TARGET_VECTORIZE_INIT_COST
1712 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1713 #undef TARGET_VECTORIZE_ADD_STMT_COST
1714 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1715 #undef TARGET_VECTORIZE_FINISH_COST
1716 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1717 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1718 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1720 #undef TARGET_INIT_BUILTINS
1721 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1722 #undef TARGET_BUILTIN_DECL
1723 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1725 #undef TARGET_FOLD_BUILTIN
1726 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1727 #undef TARGET_GIMPLE_FOLD_BUILTIN
1728 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1730 #undef TARGET_EXPAND_BUILTIN
1731 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1733 #undef TARGET_MANGLE_TYPE
1734 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1736 #undef TARGET_INIT_LIBFUNCS
1737 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1739 #if TARGET_MACHO
1740 #undef TARGET_BINDS_LOCAL_P
1741 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1742 #endif
1744 #undef TARGET_MS_BITFIELD_LAYOUT_P
1745 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1747 #undef TARGET_ASM_OUTPUT_MI_THUNK
1748 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1750 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1751 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1753 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1754 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1756 #undef TARGET_REGISTER_MOVE_COST
1757 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1758 #undef TARGET_MEMORY_MOVE_COST
1759 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1760 #undef TARGET_CANNOT_COPY_INSN_P
1761 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1762 #undef TARGET_RTX_COSTS
1763 #define TARGET_RTX_COSTS rs6000_rtx_costs
1764 #undef TARGET_ADDRESS_COST
1765 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1767 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1768 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1770 #undef TARGET_PROMOTE_FUNCTION_MODE
1771 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1773 #undef TARGET_RETURN_IN_MEMORY
1774 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1776 #undef TARGET_RETURN_IN_MSB
1777 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1779 #undef TARGET_SETUP_INCOMING_VARARGS
1780 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1782 /* Always strict argument naming on rs6000. */
1783 #undef TARGET_STRICT_ARGUMENT_NAMING
1784 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1785 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1786 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1787 #undef TARGET_SPLIT_COMPLEX_ARG
1788 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1789 #undef TARGET_MUST_PASS_IN_STACK
1790 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1791 #undef TARGET_PASS_BY_REFERENCE
1792 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1793 #undef TARGET_ARG_PARTIAL_BYTES
1794 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1795 #undef TARGET_FUNCTION_ARG_ADVANCE
1796 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1797 #undef TARGET_FUNCTION_ARG
1798 #define TARGET_FUNCTION_ARG rs6000_function_arg
1799 #undef TARGET_FUNCTION_ARG_PADDING
1800 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1801 #undef TARGET_FUNCTION_ARG_BOUNDARY
1802 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1804 #undef TARGET_BUILD_BUILTIN_VA_LIST
1805 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1807 #undef TARGET_EXPAND_BUILTIN_VA_START
1808 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1810 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1811 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1813 #undef TARGET_EH_RETURN_FILTER_MODE
1814 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1816 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1817 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1819 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1820 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1822 #undef TARGET_FLOATN_MODE
1823 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1825 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1826 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1828 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1829 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1831 #undef TARGET_MD_ASM_ADJUST
1832 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1834 #undef TARGET_OPTION_OVERRIDE
1835 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1837 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1838 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1839 rs6000_builtin_vectorized_function
1841 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1842 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1843 rs6000_builtin_md_vectorized_function
1845 #undef TARGET_STACK_PROTECT_GUARD
1846 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1848 #if !TARGET_MACHO
1849 #undef TARGET_STACK_PROTECT_FAIL
1850 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1851 #endif
1853 #ifdef HAVE_AS_TLS
1854 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1855 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1856 #endif
1858 /* Use a 32-bit anchor range. This leads to sequences like:
1860 addis tmp,anchor,high
1861 add dest,tmp,low
1863 where tmp itself acts as an anchor, and can be shared between
1864 accesses to the same 64k page. */
1865 #undef TARGET_MIN_ANCHOR_OFFSET
1866 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1867 #undef TARGET_MAX_ANCHOR_OFFSET
1868 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1869 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1870 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1871 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1872 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1874 #undef TARGET_BUILTIN_RECIPROCAL
1875 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1877 #undef TARGET_SECONDARY_RELOAD
1878 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1880 #undef TARGET_LEGITIMATE_ADDRESS_P
1881 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1883 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1884 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1886 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1887 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1889 #undef TARGET_CAN_ELIMINATE
1890 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1892 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1893 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1895 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1896 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1898 #undef TARGET_TRAMPOLINE_INIT
1899 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1901 #undef TARGET_FUNCTION_VALUE
1902 #define TARGET_FUNCTION_VALUE rs6000_function_value
1904 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1905 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1907 #undef TARGET_OPTION_SAVE
1908 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1910 #undef TARGET_OPTION_RESTORE
1911 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1913 #undef TARGET_OPTION_PRINT
1914 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1916 #undef TARGET_CAN_INLINE_P
1917 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1919 #undef TARGET_SET_CURRENT_FUNCTION
1920 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1922 #undef TARGET_LEGITIMATE_CONSTANT_P
1923 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1925 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1926 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1928 #undef TARGET_CAN_USE_DOLOOP_P
1929 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1931 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1932 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1934 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1935 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1936 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1937 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1938 #undef TARGET_UNWIND_WORD_MODE
1939 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1941 #undef TARGET_OFFLOAD_OPTIONS
1942 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1944 #undef TARGET_C_MODE_FOR_SUFFIX
1945 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1947 #undef TARGET_INVALID_BINARY_OP
1948 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1950 #undef TARGET_OPTAB_SUPPORTED_P
1951 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1953 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1954 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1956 #undef TARGET_COMPARE_VERSION_PRIORITY
1957 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1959 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1960 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1961 rs6000_generate_version_dispatcher_body
1963 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1964 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1965 rs6000_get_function_versions_dispatcher
1967 #undef TARGET_OPTION_FUNCTION_VERSIONS
1968 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1970 #undef TARGET_HARD_REGNO_MODE_OK
1971 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1973 #undef TARGET_MODES_TIEABLE_P
1974 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1976 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1977 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1978 rs6000_hard_regno_call_part_clobbered
1981 /* Processor table. */
1982 struct rs6000_ptt
1984 const char *const name; /* Canonical processor name. */
1985 const enum processor_type processor; /* Processor type enum value. */
1986 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1989 static struct rs6000_ptt const processor_target_table[] =
1991 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1992 #include "rs6000-cpus.def"
1993 #undef RS6000_CPU
1996 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1997 name is invalid. */
1999 static int
2000 rs6000_cpu_name_lookup (const char *name)
2002 size_t i;
2004 if (name != NULL)
2006 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2007 if (! strcmp (name, processor_target_table[i].name))
2008 return (int)i;
2011 return -1;
2015 /* Return number of consecutive hard regs needed starting at reg REGNO
2016 to hold something of mode MODE.
2017 This is ordinarily the length in words of a value of mode MODE
2018 but can be less for certain modes in special long registers.
2020 POWER and PowerPC GPRs hold 32 bits worth;
2021 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2023 static int
2024 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
2026 unsigned HOST_WIDE_INT reg_size;
2028 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2029 128-bit floating point that can go in vector registers, which has VSX
2030 memory addressing. */
2031 if (FP_REGNO_P (regno))
2032 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
2033 ? UNITS_PER_VSX_WORD
2034 : UNITS_PER_FP_WORD);
2036 else if (ALTIVEC_REGNO_P (regno))
2037 reg_size = UNITS_PER_ALTIVEC_WORD;
2039 else
2040 reg_size = UNITS_PER_WORD;
2042 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
2045 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2046 MODE. */
2047 static int
2048 rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
2050 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
2052 if (COMPLEX_MODE_P (mode))
2053 mode = GET_MODE_INNER (mode);
2055 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2056 register combinations, and use PTImode where we need to deal with quad
2057 word memory operations. Don't allow quad words in the argument or frame
2058 pointer registers, just registers 0..31. */
2059 if (mode == PTImode)
2060 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2061 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2062 && ((regno & 1) == 0));
2064 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2065 implementations. Don't allow an item to be split between a FP register
2066 and an Altivec register. Allow TImode in all VSX registers if the user
2067 asked for it. */
2068 if (TARGET_VSX && VSX_REGNO_P (regno)
2069 && (VECTOR_MEM_VSX_P (mode)
2070 || FLOAT128_VECTOR_P (mode)
2071 || reg_addr[mode].scalar_in_vmx_p
2072 || mode == TImode
2073 || (TARGET_VADDUQM && mode == V1TImode)))
2075 if (FP_REGNO_P (regno))
2076 return FP_REGNO_P (last_regno);
2078 if (ALTIVEC_REGNO_P (regno))
2080 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2081 return 0;
2083 return ALTIVEC_REGNO_P (last_regno);
2087 /* The GPRs can hold any mode, but values bigger than one register
2088 cannot go past R31. */
2089 if (INT_REGNO_P (regno))
2090 return INT_REGNO_P (last_regno);
2092 /* The float registers (except for VSX vector modes) can only hold floating
2093 modes and DImode. */
2094 if (FP_REGNO_P (regno))
2096 if (FLOAT128_VECTOR_P (mode))
2097 return false;
2099 if (SCALAR_FLOAT_MODE_P (mode)
2100 && (mode != TDmode || (regno % 2) == 0)
2101 && FP_REGNO_P (last_regno))
2102 return 1;
2104 if (GET_MODE_CLASS (mode) == MODE_INT)
2106 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2107 return 1;
2109 if (TARGET_P8_VECTOR && (mode == SImode))
2110 return 1;
2112 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2113 return 1;
2116 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
2117 && PAIRED_VECTOR_MODE (mode))
2118 return 1;
2120 return 0;
2123 /* The CR register can only hold CC modes. */
2124 if (CR_REGNO_P (regno))
2125 return GET_MODE_CLASS (mode) == MODE_CC;
2127 if (CA_REGNO_P (regno))
2128 return mode == Pmode || mode == SImode;
2130 /* AltiVec only in AldyVec registers. */
2131 if (ALTIVEC_REGNO_P (regno))
2132 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2133 || mode == V1TImode);
2135 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2136 and it must be able to fit within the register set. */
2138 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2141 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2143 static bool
2144 rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2146 return rs6000_hard_regno_mode_ok_p[mode][regno];
2149 /* Implement TARGET_MODES_TIEABLE_P.
2151 PTImode cannot tie with other modes because PTImode is restricted to even
2152 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2153 57744).
2155 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2156 128-bit floating point on VSX systems ties with other vectors. */
2158 static bool
2159 rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2161 if (mode1 == PTImode)
2162 return mode2 == PTImode;
2163 if (mode2 == PTImode)
2164 return false;
2166 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
2167 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2);
2168 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2))
2169 return false;
2171 if (SCALAR_FLOAT_MODE_P (mode1))
2172 return SCALAR_FLOAT_MODE_P (mode2);
2173 if (SCALAR_FLOAT_MODE_P (mode2))
2174 return false;
2176 if (GET_MODE_CLASS (mode1) == MODE_CC)
2177 return GET_MODE_CLASS (mode2) == MODE_CC;
2178 if (GET_MODE_CLASS (mode2) == MODE_CC)
2179 return false;
2181 if (PAIRED_VECTOR_MODE (mode1))
2182 return PAIRED_VECTOR_MODE (mode2);
2183 if (PAIRED_VECTOR_MODE (mode2))
2184 return false;
2186 return true;
2189 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2191 static bool
2192 rs6000_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode)
2194 if (TARGET_32BIT
2195 && TARGET_POWERPC64
2196 && GET_MODE_SIZE (mode) > 4
2197 && INT_REGNO_P (regno))
2198 return true;
2200 if (TARGET_VSX
2201 && FP_REGNO_P (regno)
2202 && GET_MODE_SIZE (mode) > 8
2203 && !FLOAT128_2REG_P (mode))
2204 return true;
2206 return false;
2209 /* Print interesting facts about registers. */
2210 static void
2211 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2213 int r, m;
2215 for (r = first_regno; r <= last_regno; ++r)
2217 const char *comma = "";
2218 int len;
2220 if (first_regno == last_regno)
2221 fprintf (stderr, "%s:\t", reg_name);
2222 else
2223 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2225 len = 8;
2226 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2227 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2229 if (len > 70)
2231 fprintf (stderr, ",\n\t");
2232 len = 8;
2233 comma = "";
2236 if (rs6000_hard_regno_nregs[m][r] > 1)
2237 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2238 rs6000_hard_regno_nregs[m][r]);
2239 else
2240 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2242 comma = ", ";
2245 if (call_used_regs[r])
2247 if (len > 70)
2249 fprintf (stderr, ",\n\t");
2250 len = 8;
2251 comma = "";
2254 len += fprintf (stderr, "%s%s", comma, "call-used");
2255 comma = ", ";
2258 if (fixed_regs[r])
2260 if (len > 70)
2262 fprintf (stderr, ",\n\t");
2263 len = 8;
2264 comma = "";
2267 len += fprintf (stderr, "%s%s", comma, "fixed");
2268 comma = ", ";
2271 if (len > 70)
2273 fprintf (stderr, ",\n\t");
2274 comma = "";
2277 len += fprintf (stderr, "%sreg-class = %s", comma,
2278 reg_class_names[(int)rs6000_regno_regclass[r]]);
2279 comma = ", ";
2281 if (len > 70)
2283 fprintf (stderr, ",\n\t");
2284 comma = "";
2287 fprintf (stderr, "%sregno = %d\n", comma, r);
2291 static const char *
2292 rs6000_debug_vector_unit (enum rs6000_vector v)
2294 const char *ret;
2296 switch (v)
2298 case VECTOR_NONE: ret = "none"; break;
2299 case VECTOR_ALTIVEC: ret = "altivec"; break;
2300 case VECTOR_VSX: ret = "vsx"; break;
2301 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2302 case VECTOR_PAIRED: ret = "paired"; break;
2303 case VECTOR_OTHER: ret = "other"; break;
2304 default: ret = "unknown"; break;
2307 return ret;
2310 /* Inner function printing just the address mask for a particular reload
2311 register class. */
2312 DEBUG_FUNCTION char *
2313 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2315 static char ret[8];
2316 char *p = ret;
2318 if ((mask & RELOAD_REG_VALID) != 0)
2319 *p++ = 'v';
2320 else if (keep_spaces)
2321 *p++ = ' ';
2323 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2324 *p++ = 'm';
2325 else if (keep_spaces)
2326 *p++ = ' ';
2328 if ((mask & RELOAD_REG_INDEXED) != 0)
2329 *p++ = 'i';
2330 else if (keep_spaces)
2331 *p++ = ' ';
2333 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2334 *p++ = 'O';
2335 else if ((mask & RELOAD_REG_OFFSET) != 0)
2336 *p++ = 'o';
2337 else if (keep_spaces)
2338 *p++ = ' ';
2340 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2341 *p++ = '+';
2342 else if (keep_spaces)
2343 *p++ = ' ';
2345 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2346 *p++ = '+';
2347 else if (keep_spaces)
2348 *p++ = ' ';
2350 if ((mask & RELOAD_REG_AND_M16) != 0)
2351 *p++ = '&';
2352 else if (keep_spaces)
2353 *p++ = ' ';
2355 *p = '\0';
2357 return ret;
2360 /* Print the address masks in a human readble fashion. */
2361 DEBUG_FUNCTION void
2362 rs6000_debug_print_mode (ssize_t m)
2364 ssize_t rc;
2365 int spaces = 0;
2366 bool fuse_extra_p;
2368 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2369 for (rc = 0; rc < N_RELOAD_REG; rc++)
2370 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2371 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2373 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2374 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2375 fprintf (stderr, " Reload=%c%c",
2376 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2377 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2378 else
2379 spaces += sizeof (" Reload=sl") - 1;
2381 if (reg_addr[m].scalar_in_vmx_p)
2383 fprintf (stderr, "%*s Upper=y", spaces, "");
2384 spaces = 0;
2386 else
2387 spaces += sizeof (" Upper=y") - 1;
2389 fuse_extra_p = ((reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2390 || reg_addr[m].fused_toc);
2391 if (!fuse_extra_p)
2393 for (rc = 0; rc < N_RELOAD_REG; rc++)
2395 if (rc != RELOAD_REG_ANY)
2397 if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2398 || reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2399 || reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing
2400 || reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing
2401 || reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2403 fuse_extra_p = true;
2404 break;
2410 if (fuse_extra_p)
2412 fprintf (stderr, "%*s Fuse:", spaces, "");
2413 spaces = 0;
2415 for (rc = 0; rc < N_RELOAD_REG; rc++)
2417 if (rc != RELOAD_REG_ANY)
2419 char load, store;
2421 if (reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing)
2422 load = 'l';
2423 else if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing)
2424 load = 'L';
2425 else
2426 load = '-';
2428 if (reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2429 store = 's';
2430 else if (reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing)
2431 store = 'S';
2432 else
2433 store = '-';
2435 if (load == '-' && store == '-')
2436 spaces += 5;
2437 else
2439 fprintf (stderr, "%*s%c=%c%c", (spaces + 1), "",
2440 reload_reg_map[rc].name[0], load, store);
2441 spaces = 0;
2446 if (reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2448 fprintf (stderr, "%*sP8gpr", (spaces + 1), "");
2449 spaces = 0;
2451 else
2452 spaces += sizeof (" P8gpr") - 1;
2454 if (reg_addr[m].fused_toc)
2456 fprintf (stderr, "%*sToc", (spaces + 1), "");
2457 spaces = 0;
2459 else
2460 spaces += sizeof (" Toc") - 1;
2462 else
2463 spaces += sizeof (" Fuse: G=ls F=ls v=ls P8gpr Toc") - 1;
2465 if (rs6000_vector_unit[m] != VECTOR_NONE
2466 || rs6000_vector_mem[m] != VECTOR_NONE)
2468 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2469 spaces, "",
2470 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2471 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2474 fputs ("\n", stderr);
2477 #define DEBUG_FMT_ID "%-32s= "
2478 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2479 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2480 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2482 /* Print various interesting information with -mdebug=reg. */
2483 static void
2484 rs6000_debug_reg_global (void)
2486 static const char *const tf[2] = { "false", "true" };
2487 const char *nl = (const char *)0;
2488 int m;
2489 size_t m1, m2, v;
2490 char costly_num[20];
2491 char nop_num[20];
2492 char flags_buffer[40];
2493 const char *costly_str;
2494 const char *nop_str;
2495 const char *trace_str;
2496 const char *abi_str;
2497 const char *cmodel_str;
2498 struct cl_target_option cl_opts;
2500 /* Modes we want tieable information on. */
2501 static const machine_mode print_tieable_modes[] = {
2502 QImode,
2503 HImode,
2504 SImode,
2505 DImode,
2506 TImode,
2507 PTImode,
2508 SFmode,
2509 DFmode,
2510 TFmode,
2511 IFmode,
2512 KFmode,
2513 SDmode,
2514 DDmode,
2515 TDmode,
2516 V2SImode,
2517 V16QImode,
2518 V8HImode,
2519 V4SImode,
2520 V2DImode,
2521 V1TImode,
2522 V32QImode,
2523 V16HImode,
2524 V8SImode,
2525 V4DImode,
2526 V2TImode,
2527 V2SFmode,
2528 V4SFmode,
2529 V2DFmode,
2530 V8SFmode,
2531 V4DFmode,
2532 CCmode,
2533 CCUNSmode,
2534 CCEQmode,
2537 /* Virtual regs we are interested in. */
2538 const static struct {
2539 int regno; /* register number. */
2540 const char *name; /* register name. */
2541 } virtual_regs[] = {
2542 { STACK_POINTER_REGNUM, "stack pointer:" },
2543 { TOC_REGNUM, "toc: " },
2544 { STATIC_CHAIN_REGNUM, "static chain: " },
2545 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2546 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2547 { ARG_POINTER_REGNUM, "arg pointer: " },
2548 { FRAME_POINTER_REGNUM, "frame pointer:" },
2549 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2550 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2551 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2552 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2553 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2554 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2555 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2556 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2557 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2560 fputs ("\nHard register information:\n", stderr);
2561 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2562 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2563 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2564 LAST_ALTIVEC_REGNO,
2565 "vs");
2566 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2567 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2568 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2569 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2570 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2571 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2573 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2574 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2575 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2577 fprintf (stderr,
2578 "\n"
2579 "d reg_class = %s\n"
2580 "f reg_class = %s\n"
2581 "v reg_class = %s\n"
2582 "wa reg_class = %s\n"
2583 "wb reg_class = %s\n"
2584 "wd reg_class = %s\n"
2585 "we reg_class = %s\n"
2586 "wf reg_class = %s\n"
2587 "wg reg_class = %s\n"
2588 "wh reg_class = %s\n"
2589 "wi reg_class = %s\n"
2590 "wj reg_class = %s\n"
2591 "wk reg_class = %s\n"
2592 "wl reg_class = %s\n"
2593 "wm reg_class = %s\n"
2594 "wo reg_class = %s\n"
2595 "wp reg_class = %s\n"
2596 "wq reg_class = %s\n"
2597 "wr reg_class = %s\n"
2598 "ws reg_class = %s\n"
2599 "wt reg_class = %s\n"
2600 "wu reg_class = %s\n"
2601 "wv reg_class = %s\n"
2602 "ww reg_class = %s\n"
2603 "wx reg_class = %s\n"
2604 "wy reg_class = %s\n"
2605 "wz reg_class = %s\n"
2606 "wA reg_class = %s\n"
2607 "wH reg_class = %s\n"
2608 "wI reg_class = %s\n"
2609 "wJ reg_class = %s\n"
2610 "wK reg_class = %s\n"
2611 "\n",
2612 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2613 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2614 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2615 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2616 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2617 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2618 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2619 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2620 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2621 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2622 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2623 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2624 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2625 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2626 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2627 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2628 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2629 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2630 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2631 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2632 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2633 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2634 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2635 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2636 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2637 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2638 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
2639 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]],
2640 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wH]],
2641 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wI]],
2642 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wJ]],
2643 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wK]]);
2645 nl = "\n";
2646 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2647 rs6000_debug_print_mode (m);
2649 fputs ("\n", stderr);
2651 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2653 machine_mode mode1 = print_tieable_modes[m1];
2654 bool first_time = true;
2656 nl = (const char *)0;
2657 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2659 machine_mode mode2 = print_tieable_modes[m2];
2660 if (mode1 != mode2 && rs6000_modes_tieable_p (mode1, mode2))
2662 if (first_time)
2664 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2665 nl = "\n";
2666 first_time = false;
2669 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2673 if (!first_time)
2674 fputs ("\n", stderr);
2677 if (nl)
2678 fputs (nl, stderr);
2680 if (rs6000_recip_control)
2682 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2684 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2685 if (rs6000_recip_bits[m])
2687 fprintf (stderr,
2688 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2689 GET_MODE_NAME (m),
2690 (RS6000_RECIP_AUTO_RE_P (m)
2691 ? "auto"
2692 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2693 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2694 ? "auto"
2695 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2698 fputs ("\n", stderr);
2701 if (rs6000_cpu_index >= 0)
2703 const char *name = processor_target_table[rs6000_cpu_index].name;
2704 HOST_WIDE_INT flags
2705 = processor_target_table[rs6000_cpu_index].target_enable;
2707 sprintf (flags_buffer, "-mcpu=%s flags", name);
2708 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2710 else
2711 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2713 if (rs6000_tune_index >= 0)
2715 const char *name = processor_target_table[rs6000_tune_index].name;
2716 HOST_WIDE_INT flags
2717 = processor_target_table[rs6000_tune_index].target_enable;
2719 sprintf (flags_buffer, "-mtune=%s flags", name);
2720 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2722 else
2723 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2725 cl_target_option_save (&cl_opts, &global_options);
2726 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2727 rs6000_isa_flags);
2729 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2730 rs6000_isa_flags_explicit);
2732 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2733 rs6000_builtin_mask);
2735 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2737 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2738 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2740 switch (rs6000_sched_costly_dep)
2742 case max_dep_latency:
2743 costly_str = "max_dep_latency";
2744 break;
2746 case no_dep_costly:
2747 costly_str = "no_dep_costly";
2748 break;
2750 case all_deps_costly:
2751 costly_str = "all_deps_costly";
2752 break;
2754 case true_store_to_load_dep_costly:
2755 costly_str = "true_store_to_load_dep_costly";
2756 break;
2758 case store_to_load_dep_costly:
2759 costly_str = "store_to_load_dep_costly";
2760 break;
2762 default:
2763 costly_str = costly_num;
2764 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2765 break;
2768 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2770 switch (rs6000_sched_insert_nops)
2772 case sched_finish_regroup_exact:
2773 nop_str = "sched_finish_regroup_exact";
2774 break;
2776 case sched_finish_pad_groups:
2777 nop_str = "sched_finish_pad_groups";
2778 break;
2780 case sched_finish_none:
2781 nop_str = "sched_finish_none";
2782 break;
2784 default:
2785 nop_str = nop_num;
2786 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2787 break;
2790 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2792 switch (rs6000_sdata)
2794 default:
2795 case SDATA_NONE:
2796 break;
2798 case SDATA_DATA:
2799 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2800 break;
2802 case SDATA_SYSV:
2803 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2804 break;
2806 case SDATA_EABI:
2807 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2808 break;
2812 switch (rs6000_traceback)
2814 case traceback_default: trace_str = "default"; break;
2815 case traceback_none: trace_str = "none"; break;
2816 case traceback_part: trace_str = "part"; break;
2817 case traceback_full: trace_str = "full"; break;
2818 default: trace_str = "unknown"; break;
2821 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2823 switch (rs6000_current_cmodel)
2825 case CMODEL_SMALL: cmodel_str = "small"; break;
2826 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2827 case CMODEL_LARGE: cmodel_str = "large"; break;
2828 default: cmodel_str = "unknown"; break;
2831 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2833 switch (rs6000_current_abi)
2835 case ABI_NONE: abi_str = "none"; break;
2836 case ABI_AIX: abi_str = "aix"; break;
2837 case ABI_ELFv2: abi_str = "ELFv2"; break;
2838 case ABI_V4: abi_str = "V4"; break;
2839 case ABI_DARWIN: abi_str = "darwin"; break;
2840 default: abi_str = "unknown"; break;
2843 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2845 if (rs6000_altivec_abi)
2846 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2848 if (rs6000_darwin64_abi)
2849 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2851 fprintf (stderr, DEBUG_FMT_S, "single_float",
2852 (TARGET_SINGLE_FLOAT ? "true" : "false"));
2854 fprintf (stderr, DEBUG_FMT_S, "double_float",
2855 (TARGET_DOUBLE_FLOAT ? "true" : "false"));
2857 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2858 (TARGET_SOFT_FLOAT ? "true" : "false"));
2860 if (TARGET_LINK_STACK)
2861 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2863 if (TARGET_P8_FUSION)
2865 char options[80];
2867 strcpy (options, (TARGET_P9_FUSION) ? "power9" : "power8");
2868 if (TARGET_TOC_FUSION)
2869 strcat (options, ", toc");
2871 if (TARGET_P8_FUSION_SIGN)
2872 strcat (options, ", sign");
2874 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2877 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2878 TARGET_SECURE_PLT ? "secure" : "bss");
2879 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2880 aix_struct_return ? "aix" : "sysv");
2881 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2882 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2883 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2884 tf[!!rs6000_align_branch_targets]);
2885 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2886 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2887 rs6000_long_double_type_size);
2888 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2889 (int)rs6000_sched_restricted_insns_priority);
2890 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2891 (int)END_BUILTINS);
2892 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2893 (int)RS6000_BUILTIN_COUNT);
2895 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2896 (int)TARGET_FLOAT128_ENABLE_TYPE);
2898 if (TARGET_VSX)
2899 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2900 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2902 if (TARGET_DIRECT_MOVE_128)
2903 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2904 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2908 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2909 legitimate address support to figure out the appropriate addressing to
2910 use. */
2912 static void
2913 rs6000_setup_reg_addr_masks (void)
2915 ssize_t rc, reg, m, nregs;
2916 addr_mask_type any_addr_mask, addr_mask;
2918 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2920 machine_mode m2 = (machine_mode) m;
2921 bool complex_p = false;
2922 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2923 size_t msize;
2925 if (COMPLEX_MODE_P (m2))
2927 complex_p = true;
2928 m2 = GET_MODE_INNER (m2);
2931 msize = GET_MODE_SIZE (m2);
2933 /* SDmode is special in that we want to access it only via REG+REG
2934 addressing on power7 and above, since we want to use the LFIWZX and
2935 STFIWZX instructions to load it. */
2936 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2938 any_addr_mask = 0;
2939 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2941 addr_mask = 0;
2942 reg = reload_reg_map[rc].reg;
2944 /* Can mode values go in the GPR/FPR/Altivec registers? */
2945 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2947 bool small_int_vsx_p = (small_int_p
2948 && (rc == RELOAD_REG_FPR
2949 || rc == RELOAD_REG_VMX));
2951 nregs = rs6000_hard_regno_nregs[m][reg];
2952 addr_mask |= RELOAD_REG_VALID;
2954 /* Indicate if the mode takes more than 1 physical register. If
2955 it takes a single register, indicate it can do REG+REG
2956 addressing. Small integers in VSX registers can only do
2957 REG+REG addressing. */
2958 if (small_int_vsx_p)
2959 addr_mask |= RELOAD_REG_INDEXED;
2960 else if (nregs > 1 || m == BLKmode || complex_p)
2961 addr_mask |= RELOAD_REG_MULTIPLE;
2962 else
2963 addr_mask |= RELOAD_REG_INDEXED;
2965 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2966 addressing. If we allow scalars into Altivec registers,
2967 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY. */
2969 if (TARGET_UPDATE
2970 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2971 && msize <= 8
2972 && !VECTOR_MODE_P (m2)
2973 && !FLOAT128_VECTOR_P (m2)
2974 && !complex_p
2975 && !small_int_vsx_p)
2977 addr_mask |= RELOAD_REG_PRE_INCDEC;
2979 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2980 we don't allow PRE_MODIFY for some multi-register
2981 operations. */
2982 switch (m)
2984 default:
2985 addr_mask |= RELOAD_REG_PRE_MODIFY;
2986 break;
2988 case E_DImode:
2989 if (TARGET_POWERPC64)
2990 addr_mask |= RELOAD_REG_PRE_MODIFY;
2991 break;
2993 case E_DFmode:
2994 case E_DDmode:
2995 if (TARGET_DF_INSN)
2996 addr_mask |= RELOAD_REG_PRE_MODIFY;
2997 break;
3002 /* GPR and FPR registers can do REG+OFFSET addressing, except
3003 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
3004 for 64-bit scalars and 32-bit SFmode to altivec registers. */
3005 if ((addr_mask != 0) && !indexed_only_p
3006 && msize <= 8
3007 && (rc == RELOAD_REG_GPR
3008 || ((msize == 8 || m2 == SFmode)
3009 && (rc == RELOAD_REG_FPR
3010 || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR)))))
3011 addr_mask |= RELOAD_REG_OFFSET;
3013 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
3014 instructions are enabled. The offset for 128-bit VSX registers is
3015 only 12-bits. While GPRs can handle the full offset range, VSX
3016 registers can only handle the restricted range. */
3017 else if ((addr_mask != 0) && !indexed_only_p
3018 && msize == 16 && TARGET_P9_VECTOR
3019 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
3020 || (m2 == TImode && TARGET_VSX)))
3022 addr_mask |= RELOAD_REG_OFFSET;
3023 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
3024 addr_mask |= RELOAD_REG_QUAD_OFFSET;
3027 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
3028 addressing on 128-bit types. */
3029 if (rc == RELOAD_REG_VMX && msize == 16
3030 && (addr_mask & RELOAD_REG_VALID) != 0)
3031 addr_mask |= RELOAD_REG_AND_M16;
3033 reg_addr[m].addr_mask[rc] = addr_mask;
3034 any_addr_mask |= addr_mask;
3037 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
3042 /* Initialize the various global tables that are based on register size. */
3043 static void
3044 rs6000_init_hard_regno_mode_ok (bool global_init_p)
3046 ssize_t r, m, c;
3047 int align64;
3048 int align32;
3050 /* Precalculate REGNO_REG_CLASS. */
3051 rs6000_regno_regclass[0] = GENERAL_REGS;
3052 for (r = 1; r < 32; ++r)
3053 rs6000_regno_regclass[r] = BASE_REGS;
3055 for (r = 32; r < 64; ++r)
3056 rs6000_regno_regclass[r] = FLOAT_REGS;
3058 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
3059 rs6000_regno_regclass[r] = NO_REGS;
3061 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
3062 rs6000_regno_regclass[r] = ALTIVEC_REGS;
3064 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
3065 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
3066 rs6000_regno_regclass[r] = CR_REGS;
3068 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
3069 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
3070 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
3071 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
3072 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
3073 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
3074 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
3075 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
3076 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
3077 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
3079 /* Precalculate register class to simpler reload register class. We don't
3080 need all of the register classes that are combinations of different
3081 classes, just the simple ones that have constraint letters. */
3082 for (c = 0; c < N_REG_CLASSES; c++)
3083 reg_class_to_reg_type[c] = NO_REG_TYPE;
3085 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
3086 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
3087 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
3088 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
3089 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
3090 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
3091 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
3092 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
3093 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
3094 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
3096 if (TARGET_VSX)
3098 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
3099 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
3101 else
3103 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
3104 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
3107 /* Precalculate the valid memory formats as well as the vector information,
3108 this must be set up before the rs6000_hard_regno_nregs_internal calls
3109 below. */
3110 gcc_assert ((int)VECTOR_NONE == 0);
3111 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
3112 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
3114 gcc_assert ((int)CODE_FOR_nothing == 0);
3115 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3117 gcc_assert ((int)NO_REGS == 0);
3118 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3120 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3121 believes it can use native alignment or still uses 128-bit alignment. */
3122 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3124 align64 = 64;
3125 align32 = 32;
3127 else
3129 align64 = 128;
3130 align32 = 128;
3133 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3134 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3135 if (TARGET_FLOAT128_TYPE)
3137 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3138 rs6000_vector_align[KFmode] = 128;
3140 if (FLOAT128_IEEE_P (TFmode))
3142 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3143 rs6000_vector_align[TFmode] = 128;
3147 /* V2DF mode, VSX only. */
3148 if (TARGET_VSX)
3150 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3151 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3152 rs6000_vector_align[V2DFmode] = align64;
3155 /* V4SF mode, either VSX or Altivec. */
3156 if (TARGET_VSX)
3158 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3159 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3160 rs6000_vector_align[V4SFmode] = align32;
3162 else if (TARGET_ALTIVEC)
3164 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3165 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3166 rs6000_vector_align[V4SFmode] = align32;
3169 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3170 and stores. */
3171 if (TARGET_ALTIVEC)
3173 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3174 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3175 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3176 rs6000_vector_align[V4SImode] = align32;
3177 rs6000_vector_align[V8HImode] = align32;
3178 rs6000_vector_align[V16QImode] = align32;
3180 if (TARGET_VSX)
3182 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3183 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3184 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3186 else
3188 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3189 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3190 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3194 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3195 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3196 if (TARGET_VSX)
3198 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3199 rs6000_vector_unit[V2DImode]
3200 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3201 rs6000_vector_align[V2DImode] = align64;
3203 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3204 rs6000_vector_unit[V1TImode]
3205 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3206 rs6000_vector_align[V1TImode] = 128;
3209 /* DFmode, see if we want to use the VSX unit. Memory is handled
3210 differently, so don't set rs6000_vector_mem. */
3211 if (TARGET_VSX)
3213 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3214 rs6000_vector_align[DFmode] = 64;
3217 /* SFmode, see if we want to use the VSX unit. */
3218 if (TARGET_P8_VECTOR)
3220 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3221 rs6000_vector_align[SFmode] = 32;
3224 /* Allow TImode in VSX register and set the VSX memory macros. */
3225 if (TARGET_VSX)
3227 rs6000_vector_mem[TImode] = VECTOR_VSX;
3228 rs6000_vector_align[TImode] = align64;
3231 /* TODO add paired floating point vector support. */
3233 /* Register class constraints for the constraints that depend on compile
3234 switches. When the VSX code was added, different constraints were added
3235 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3236 of the VSX registers are used. The register classes for scalar floating
3237 point types is set, based on whether we allow that type into the upper
3238 (Altivec) registers. GCC has register classes to target the Altivec
3239 registers for load/store operations, to select using a VSX memory
3240 operation instead of the traditional floating point operation. The
3241 constraints are:
3243 d - Register class to use with traditional DFmode instructions.
3244 f - Register class to use with traditional SFmode instructions.
3245 v - Altivec register.
3246 wa - Any VSX register.
3247 wc - Reserved to represent individual CR bits (used in LLVM).
3248 wd - Preferred register class for V2DFmode.
3249 wf - Preferred register class for V4SFmode.
3250 wg - Float register for power6x move insns.
3251 wh - FP register for direct move instructions.
3252 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3253 wj - FP or VSX register to hold 64-bit integers for direct moves.
3254 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3255 wl - Float register if we can do 32-bit signed int loads.
3256 wm - VSX register for ISA 2.07 direct move operations.
3257 wn - always NO_REGS.
3258 wr - GPR if 64-bit mode is permitted.
3259 ws - Register class to do ISA 2.06 DF operations.
3260 wt - VSX register for TImode in VSX registers.
3261 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3262 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3263 ww - Register class to do SF conversions in with VSX operations.
3264 wx - Float register if we can do 32-bit int stores.
3265 wy - Register class to do ISA 2.07 SF operations.
3266 wz - Float register if we can do 32-bit unsigned int loads.
3267 wH - Altivec register if SImode is allowed in VSX registers.
3268 wI - VSX register if SImode is allowed in VSX registers.
3269 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3270 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3272 if (TARGET_HARD_FLOAT)
3273 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3275 if (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
3276 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3278 if (TARGET_VSX)
3280 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3281 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3282 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3283 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS; /* DFmode */
3284 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS; /* DFmode */
3285 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS; /* DImode */
3286 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3289 /* Add conditional constraints based on various options, to allow us to
3290 collapse multiple insn patterns. */
3291 if (TARGET_ALTIVEC)
3292 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3294 if (TARGET_MFPGPR) /* DFmode */
3295 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3297 if (TARGET_LFIWAX)
3298 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3300 if (TARGET_DIRECT_MOVE)
3302 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3303 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3304 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3305 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3306 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3307 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3310 if (TARGET_POWERPC64)
3312 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3313 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3316 if (TARGET_P8_VECTOR) /* SFmode */
3318 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3319 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3320 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3322 else if (TARGET_VSX)
3323 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3325 if (TARGET_STFIWX)
3326 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3328 if (TARGET_LFIWZX)
3329 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3331 if (TARGET_FLOAT128_TYPE)
3333 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3334 if (FLOAT128_IEEE_P (TFmode))
3335 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3338 if (TARGET_P9_VECTOR)
3340 /* Support for new D-form instructions. */
3341 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3343 /* Support for ISA 3.0 (power9) vectors. */
3344 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3347 /* Support for new direct moves (ISA 3.0 + 64bit). */
3348 if (TARGET_DIRECT_MOVE_128)
3349 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3351 /* Support small integers in VSX registers. */
3352 if (TARGET_P8_VECTOR)
3354 rs6000_constraints[RS6000_CONSTRAINT_wH] = ALTIVEC_REGS;
3355 rs6000_constraints[RS6000_CONSTRAINT_wI] = FLOAT_REGS;
3356 if (TARGET_P9_VECTOR)
3358 rs6000_constraints[RS6000_CONSTRAINT_wJ] = FLOAT_REGS;
3359 rs6000_constraints[RS6000_CONSTRAINT_wK] = ALTIVEC_REGS;
3363 /* Set up the reload helper and direct move functions. */
3364 if (TARGET_VSX || TARGET_ALTIVEC)
3366 if (TARGET_64BIT)
3368 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3369 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3370 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3371 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3372 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3373 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3374 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3375 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3376 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3377 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3378 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3379 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3380 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3381 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3382 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3383 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3384 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3385 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3386 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3387 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3389 if (FLOAT128_VECTOR_P (KFmode))
3391 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3392 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3395 if (FLOAT128_VECTOR_P (TFmode))
3397 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3398 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3401 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3402 available. */
3403 if (TARGET_NO_SDMODE_STACK)
3405 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3406 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3409 if (TARGET_VSX)
3411 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3412 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3415 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3417 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3418 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3419 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3420 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3421 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3422 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3423 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3424 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3425 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3427 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3428 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3429 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3430 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3431 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3432 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3433 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3434 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3435 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3437 if (FLOAT128_VECTOR_P (KFmode))
3439 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3440 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3443 if (FLOAT128_VECTOR_P (TFmode))
3445 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3446 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3450 else
3452 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3453 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3454 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3455 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3456 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3457 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3458 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3459 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3460 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3461 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3462 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3463 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3464 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3465 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3466 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3467 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3468 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3469 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3470 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3471 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3473 if (FLOAT128_VECTOR_P (KFmode))
3475 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3476 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3479 if (FLOAT128_IEEE_P (TFmode))
3481 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3482 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3485 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3486 available. */
3487 if (TARGET_NO_SDMODE_STACK)
3489 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3490 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3493 if (TARGET_VSX)
3495 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3496 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3499 if (TARGET_DIRECT_MOVE)
3501 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3502 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3503 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3507 reg_addr[DFmode].scalar_in_vmx_p = true;
3508 reg_addr[DImode].scalar_in_vmx_p = true;
3510 if (TARGET_P8_VECTOR)
3512 reg_addr[SFmode].scalar_in_vmx_p = true;
3513 reg_addr[SImode].scalar_in_vmx_p = true;
3515 if (TARGET_P9_VECTOR)
3517 reg_addr[HImode].scalar_in_vmx_p = true;
3518 reg_addr[QImode].scalar_in_vmx_p = true;
3523 /* Setup the fusion operations. */
3524 if (TARGET_P8_FUSION)
3526 reg_addr[QImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_qi;
3527 reg_addr[HImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_hi;
3528 reg_addr[SImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_si;
3529 if (TARGET_64BIT)
3530 reg_addr[DImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_di;
3533 if (TARGET_P9_FUSION)
3535 struct fuse_insns {
3536 enum machine_mode mode; /* mode of the fused type. */
3537 enum machine_mode pmode; /* pointer mode. */
3538 enum rs6000_reload_reg_type rtype; /* register type. */
3539 enum insn_code load; /* load insn. */
3540 enum insn_code store; /* store insn. */
3543 static const struct fuse_insns addis_insns[] = {
3544 { E_SFmode, E_DImode, RELOAD_REG_FPR,
3545 CODE_FOR_fusion_vsx_di_sf_load,
3546 CODE_FOR_fusion_vsx_di_sf_store },
3548 { E_SFmode, E_SImode, RELOAD_REG_FPR,
3549 CODE_FOR_fusion_vsx_si_sf_load,
3550 CODE_FOR_fusion_vsx_si_sf_store },
3552 { E_DFmode, E_DImode, RELOAD_REG_FPR,
3553 CODE_FOR_fusion_vsx_di_df_load,
3554 CODE_FOR_fusion_vsx_di_df_store },
3556 { E_DFmode, E_SImode, RELOAD_REG_FPR,
3557 CODE_FOR_fusion_vsx_si_df_load,
3558 CODE_FOR_fusion_vsx_si_df_store },
3560 { E_DImode, E_DImode, RELOAD_REG_FPR,
3561 CODE_FOR_fusion_vsx_di_di_load,
3562 CODE_FOR_fusion_vsx_di_di_store },
3564 { E_DImode, E_SImode, RELOAD_REG_FPR,
3565 CODE_FOR_fusion_vsx_si_di_load,
3566 CODE_FOR_fusion_vsx_si_di_store },
3568 { E_QImode, E_DImode, RELOAD_REG_GPR,
3569 CODE_FOR_fusion_gpr_di_qi_load,
3570 CODE_FOR_fusion_gpr_di_qi_store },
3572 { E_QImode, E_SImode, RELOAD_REG_GPR,
3573 CODE_FOR_fusion_gpr_si_qi_load,
3574 CODE_FOR_fusion_gpr_si_qi_store },
3576 { E_HImode, E_DImode, RELOAD_REG_GPR,
3577 CODE_FOR_fusion_gpr_di_hi_load,
3578 CODE_FOR_fusion_gpr_di_hi_store },
3580 { E_HImode, E_SImode, RELOAD_REG_GPR,
3581 CODE_FOR_fusion_gpr_si_hi_load,
3582 CODE_FOR_fusion_gpr_si_hi_store },
3584 { E_SImode, E_DImode, RELOAD_REG_GPR,
3585 CODE_FOR_fusion_gpr_di_si_load,
3586 CODE_FOR_fusion_gpr_di_si_store },
3588 { E_SImode, E_SImode, RELOAD_REG_GPR,
3589 CODE_FOR_fusion_gpr_si_si_load,
3590 CODE_FOR_fusion_gpr_si_si_store },
3592 { E_SFmode, E_DImode, RELOAD_REG_GPR,
3593 CODE_FOR_fusion_gpr_di_sf_load,
3594 CODE_FOR_fusion_gpr_di_sf_store },
3596 { E_SFmode, E_SImode, RELOAD_REG_GPR,
3597 CODE_FOR_fusion_gpr_si_sf_load,
3598 CODE_FOR_fusion_gpr_si_sf_store },
3600 { E_DImode, E_DImode, RELOAD_REG_GPR,
3601 CODE_FOR_fusion_gpr_di_di_load,
3602 CODE_FOR_fusion_gpr_di_di_store },
3604 { E_DFmode, E_DImode, RELOAD_REG_GPR,
3605 CODE_FOR_fusion_gpr_di_df_load,
3606 CODE_FOR_fusion_gpr_di_df_store },
3609 machine_mode cur_pmode = Pmode;
3610 size_t i;
3612 for (i = 0; i < ARRAY_SIZE (addis_insns); i++)
3614 machine_mode xmode = addis_insns[i].mode;
3615 enum rs6000_reload_reg_type rtype = addis_insns[i].rtype;
3617 if (addis_insns[i].pmode != cur_pmode)
3618 continue;
3620 if (rtype == RELOAD_REG_FPR && !TARGET_HARD_FLOAT)
3621 continue;
3623 reg_addr[xmode].fusion_addis_ld[rtype] = addis_insns[i].load;
3624 reg_addr[xmode].fusion_addis_st[rtype] = addis_insns[i].store;
3626 if (rtype == RELOAD_REG_FPR && TARGET_P9_VECTOR)
3628 reg_addr[xmode].fusion_addis_ld[RELOAD_REG_VMX]
3629 = addis_insns[i].load;
3630 reg_addr[xmode].fusion_addis_st[RELOAD_REG_VMX]
3631 = addis_insns[i].store;
3636 /* Note which types we support fusing TOC setup plus memory insn. We only do
3637 fused TOCs for medium/large code models. */
3638 if (TARGET_P8_FUSION && TARGET_TOC_FUSION && TARGET_POWERPC64
3639 && (TARGET_CMODEL != CMODEL_SMALL))
3641 reg_addr[QImode].fused_toc = true;
3642 reg_addr[HImode].fused_toc = true;
3643 reg_addr[SImode].fused_toc = true;
3644 reg_addr[DImode].fused_toc = true;
3645 if (TARGET_HARD_FLOAT)
3647 if (TARGET_SINGLE_FLOAT)
3648 reg_addr[SFmode].fused_toc = true;
3649 if (TARGET_DOUBLE_FLOAT)
3650 reg_addr[DFmode].fused_toc = true;
3654 /* Precalculate HARD_REGNO_NREGS. */
3655 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3656 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3657 rs6000_hard_regno_nregs[m][r]
3658 = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
3660 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3661 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3662 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3663 if (rs6000_hard_regno_mode_ok_uncached (r, (machine_mode)m))
3664 rs6000_hard_regno_mode_ok_p[m][r] = true;
3666 /* Precalculate CLASS_MAX_NREGS sizes. */
3667 for (c = 0; c < LIM_REG_CLASSES; ++c)
3669 int reg_size;
3671 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3672 reg_size = UNITS_PER_VSX_WORD;
3674 else if (c == ALTIVEC_REGS)
3675 reg_size = UNITS_PER_ALTIVEC_WORD;
3677 else if (c == FLOAT_REGS)
3678 reg_size = UNITS_PER_FP_WORD;
3680 else
3681 reg_size = UNITS_PER_WORD;
3683 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3685 machine_mode m2 = (machine_mode)m;
3686 int reg_size2 = reg_size;
3688 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3689 in VSX. */
3690 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3691 reg_size2 = UNITS_PER_FP_WORD;
3693 rs6000_class_max_nregs[m][c]
3694 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3698 /* Calculate which modes to automatically generate code to use a the
3699 reciprocal divide and square root instructions. In the future, possibly
3700 automatically generate the instructions even if the user did not specify
3701 -mrecip. The older machines double precision reciprocal sqrt estimate is
3702 not accurate enough. */
3703 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3704 if (TARGET_FRES)
3705 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3706 if (TARGET_FRE)
3707 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3708 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3709 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3710 if (VECTOR_UNIT_VSX_P (V2DFmode))
3711 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3713 if (TARGET_FRSQRTES)
3714 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3715 if (TARGET_FRSQRTE)
3716 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3717 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3718 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3719 if (VECTOR_UNIT_VSX_P (V2DFmode))
3720 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3722 if (rs6000_recip_control)
3724 if (!flag_finite_math_only)
3725 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3726 "-ffast-math");
3727 if (flag_trapping_math)
3728 warning (0, "%qs requires %qs or %qs", "-mrecip",
3729 "-fno-trapping-math", "-ffast-math");
3730 if (!flag_reciprocal_math)
3731 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3732 "-ffast-math");
3733 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3735 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3736 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3737 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3739 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3740 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3741 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3743 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3744 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3745 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3747 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3748 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3749 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3751 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3752 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3753 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3755 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3756 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3757 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3759 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3760 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3761 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3763 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3764 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3765 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3769 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3770 legitimate address support to figure out the appropriate addressing to
3771 use. */
3772 rs6000_setup_reg_addr_masks ();
3774 if (global_init_p || TARGET_DEBUG_TARGET)
3776 if (TARGET_DEBUG_REG)
3777 rs6000_debug_reg_global ();
3779 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3780 fprintf (stderr,
3781 "SImode variable mult cost = %d\n"
3782 "SImode constant mult cost = %d\n"
3783 "SImode short constant mult cost = %d\n"
3784 "DImode multipliciation cost = %d\n"
3785 "SImode division cost = %d\n"
3786 "DImode division cost = %d\n"
3787 "Simple fp operation cost = %d\n"
3788 "DFmode multiplication cost = %d\n"
3789 "SFmode division cost = %d\n"
3790 "DFmode division cost = %d\n"
3791 "cache line size = %d\n"
3792 "l1 cache size = %d\n"
3793 "l2 cache size = %d\n"
3794 "simultaneous prefetches = %d\n"
3795 "\n",
3796 rs6000_cost->mulsi,
3797 rs6000_cost->mulsi_const,
3798 rs6000_cost->mulsi_const9,
3799 rs6000_cost->muldi,
3800 rs6000_cost->divsi,
3801 rs6000_cost->divdi,
3802 rs6000_cost->fp,
3803 rs6000_cost->dmul,
3804 rs6000_cost->sdiv,
3805 rs6000_cost->ddiv,
3806 rs6000_cost->cache_line_size,
3807 rs6000_cost->l1_cache_size,
3808 rs6000_cost->l2_cache_size,
3809 rs6000_cost->simultaneous_prefetches);
3813 #if TARGET_MACHO
3814 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3816 static void
3817 darwin_rs6000_override_options (void)
3819 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3820 off. */
3821 rs6000_altivec_abi = 1;
3822 TARGET_ALTIVEC_VRSAVE = 1;
3823 rs6000_current_abi = ABI_DARWIN;
3825 if (DEFAULT_ABI == ABI_DARWIN
3826 && TARGET_64BIT)
3827 darwin_one_byte_bool = 1;
3829 if (TARGET_64BIT && ! TARGET_POWERPC64)
3831 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3832 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3834 if (flag_mkernel)
3836 rs6000_default_long_calls = 1;
3837 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3840 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3841 Altivec. */
3842 if (!flag_mkernel && !flag_apple_kext
3843 && TARGET_64BIT
3844 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3845 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3847 /* Unless the user (not the configurer) has explicitly overridden
3848 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3849 G4 unless targeting the kernel. */
3850 if (!flag_mkernel
3851 && !flag_apple_kext
3852 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3853 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3854 && ! global_options_set.x_rs6000_cpu_index)
3856 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3859 #endif
3861 /* If not otherwise specified by a target, make 'long double' equivalent to
3862 'double'. */
3864 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3865 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3866 #endif
3868 /* Return the builtin mask of the various options used that could affect which
3869 builtins were used. In the past we used target_flags, but we've run out of
3870 bits, and some options like PAIRED are no longer in target_flags. */
3872 HOST_WIDE_INT
3873 rs6000_builtin_mask_calculate (void)
3875 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3876 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3877 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3878 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
3879 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3880 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3881 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3882 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3883 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3884 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3885 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3886 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3887 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3888 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3889 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3890 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3891 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3892 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3893 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3894 | ((TARGET_LONG_DOUBLE_128) ? RS6000_BTM_LDBL128 : 0)
3895 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0));
3898 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3899 to clobber the XER[CA] bit because clobbering that bit without telling
3900 the compiler worked just fine with versions of GCC before GCC 5, and
3901 breaking a lot of older code in ways that are hard to track down is
3902 not such a great idea. */
3904 static rtx_insn *
3905 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3906 vec<const char *> &/*constraints*/,
3907 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3909 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3910 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3911 return NULL;
3914 /* Override command line options.
3916 Combine build-specific configuration information with options
3917 specified on the command line to set various state variables which
3918 influence code generation, optimization, and expansion of built-in
3919 functions. Assure that command-line configuration preferences are
3920 compatible with each other and with the build configuration; issue
3921 warnings while adjusting configuration or error messages while
3922 rejecting configuration.
3924 Upon entry to this function:
3926 This function is called once at the beginning of
3927 compilation, and then again at the start and end of compiling
3928 each section of code that has a different configuration, as
3929 indicated, for example, by adding the
3931 __attribute__((__target__("cpu=power9")))
3933 qualifier to a function definition or, for example, by bracketing
3934 code between
3936 #pragma GCC target("altivec")
3940 #pragma GCC reset_options
3942 directives. Parameter global_init_p is true for the initial
3943 invocation, which initializes global variables, and false for all
3944 subsequent invocations.
3947 Various global state information is assumed to be valid. This
3948 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3949 default CPU specified at build configure time, TARGET_DEFAULT,
3950 representing the default set of option flags for the default
3951 target, and global_options_set.x_rs6000_isa_flags, representing
3952 which options were requested on the command line.
3954 Upon return from this function:
3956 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3957 was set by name on the command line. Additionally, if certain
3958 attributes are automatically enabled or disabled by this function
3959 in order to assure compatibility between options and
3960 configuration, the flags associated with those attributes are
3961 also set. By setting these "explicit bits", we avoid the risk
3962 that other code might accidentally overwrite these particular
3963 attributes with "default values".
3965 The various bits of rs6000_isa_flags are set to indicate the
3966 target options that have been selected for the most current
3967 compilation efforts. This has the effect of also turning on the
3968 associated TARGET_XXX values since these are macros which are
3969 generally defined to test the corresponding bit of the
3970 rs6000_isa_flags variable.
3972 The variable rs6000_builtin_mask is set to represent the target
3973 options for the most current compilation efforts, consistent with
3974 the current contents of rs6000_isa_flags. This variable controls
3975 expansion of built-in functions.
3977 Various other global variables and fields of global structures
3978 (over 50 in all) are initialized to reflect the desired options
3979 for the most current compilation efforts. */
3981 static bool
3982 rs6000_option_override_internal (bool global_init_p)
3984 bool ret = true;
3985 bool have_cpu = false;
3987 /* The default cpu requested at configure time, if any. */
3988 const char *implicit_cpu = OPTION_TARGET_CPU_DEFAULT;
3990 HOST_WIDE_INT set_masks;
3991 HOST_WIDE_INT ignore_masks;
3992 int cpu_index;
3993 int tune_index;
3994 struct cl_target_option *main_target_opt
3995 = ((global_init_p || target_option_default_node == NULL)
3996 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3998 /* Print defaults. */
3999 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
4000 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
4002 /* Remember the explicit arguments. */
4003 if (global_init_p)
4004 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
4006 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
4007 library functions, so warn about it. The flag may be useful for
4008 performance studies from time to time though, so don't disable it
4009 entirely. */
4010 if (global_options_set.x_rs6000_alignment_flags
4011 && rs6000_alignment_flags == MASK_ALIGN_POWER
4012 && DEFAULT_ABI == ABI_DARWIN
4013 && TARGET_64BIT)
4014 warning (0, "%qs is not supported for 64-bit Darwin;"
4015 " it is incompatible with the installed C and C++ libraries",
4016 "-malign-power");
4018 /* Numerous experiment shows that IRA based loop pressure
4019 calculation works better for RTL loop invariant motion on targets
4020 with enough (>= 32) registers. It is an expensive optimization.
4021 So it is on only for peak performance. */
4022 if (optimize >= 3 && global_init_p
4023 && !global_options_set.x_flag_ira_loop_pressure)
4024 flag_ira_loop_pressure = 1;
4026 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
4027 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
4028 options were already specified. */
4029 if (flag_sanitize & SANITIZE_USER_ADDRESS
4030 && !global_options_set.x_flag_asynchronous_unwind_tables)
4031 flag_asynchronous_unwind_tables = 1;
4033 /* Set the pointer size. */
4034 if (TARGET_64BIT)
4036 rs6000_pmode = DImode;
4037 rs6000_pointer_size = 64;
4039 else
4041 rs6000_pmode = SImode;
4042 rs6000_pointer_size = 32;
4045 /* Some OSs don't support saving the high part of 64-bit registers on context
4046 switch. Other OSs don't support saving Altivec registers. On those OSs,
4047 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
4048 if the user wants either, the user must explicitly specify them and we
4049 won't interfere with the user's specification. */
4051 set_masks = POWERPC_MASKS;
4052 #ifdef OS_MISSING_POWERPC64
4053 if (OS_MISSING_POWERPC64)
4054 set_masks &= ~OPTION_MASK_POWERPC64;
4055 #endif
4056 #ifdef OS_MISSING_ALTIVEC
4057 if (OS_MISSING_ALTIVEC)
4058 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
4059 | OTHER_VSX_VECTOR_MASKS);
4060 #endif
4062 /* Don't override by the processor default if given explicitly. */
4063 set_masks &= ~rs6000_isa_flags_explicit;
4065 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
4066 the cpu in a target attribute or pragma, but did not specify a tuning
4067 option, use the cpu for the tuning option rather than the option specified
4068 with -mtune on the command line. Process a '--with-cpu' configuration
4069 request as an implicit --cpu. */
4070 if (rs6000_cpu_index >= 0)
4072 cpu_index = rs6000_cpu_index;
4073 have_cpu = true;
4075 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
4077 rs6000_cpu_index = cpu_index = main_target_opt->x_rs6000_cpu_index;
4078 have_cpu = true;
4080 else if (implicit_cpu)
4082 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (implicit_cpu);
4083 have_cpu = true;
4085 else
4087 /* PowerPC 64-bit LE requires at least ISA 2.07. */
4088 const char *default_cpu = ((!TARGET_POWERPC64)
4089 ? "powerpc"
4090 : ((BYTES_BIG_ENDIAN)
4091 ? "powerpc64"
4092 : "powerpc64le"));
4094 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
4095 have_cpu = false;
4098 gcc_assert (cpu_index >= 0);
4100 if (have_cpu)
4102 #ifndef HAVE_AS_POWER9
4103 if (processor_target_table[rs6000_cpu_index].processor
4104 == PROCESSOR_POWER9)
4106 have_cpu = false;
4107 warning (0, "will not generate power9 instructions because "
4108 "assembler lacks power9 support");
4110 #endif
4111 #ifndef HAVE_AS_POWER8
4112 if (processor_target_table[rs6000_cpu_index].processor
4113 == PROCESSOR_POWER8)
4115 have_cpu = false;
4116 warning (0, "will not generate power8 instructions because "
4117 "assembler lacks power8 support");
4119 #endif
4120 #ifndef HAVE_AS_POPCNTD
4121 if (processor_target_table[rs6000_cpu_index].processor
4122 == PROCESSOR_POWER7)
4124 have_cpu = false;
4125 warning (0, "will not generate power7 instructions because "
4126 "assembler lacks power7 support");
4128 #endif
4129 #ifndef HAVE_AS_DFP
4130 if (processor_target_table[rs6000_cpu_index].processor
4131 == PROCESSOR_POWER6)
4133 have_cpu = false;
4134 warning (0, "will not generate power6 instructions because "
4135 "assembler lacks power6 support");
4137 #endif
4138 #ifndef HAVE_AS_POPCNTB
4139 if (processor_target_table[rs6000_cpu_index].processor
4140 == PROCESSOR_POWER5)
4142 have_cpu = false;
4143 warning (0, "will not generate power5 instructions because "
4144 "assembler lacks power5 support");
4146 #endif
4148 if (!have_cpu)
4150 /* PowerPC 64-bit LE requires at least ISA 2.07. */
4151 const char *default_cpu = (!TARGET_POWERPC64
4152 ? "powerpc"
4153 : (BYTES_BIG_ENDIAN
4154 ? "powerpc64"
4155 : "powerpc64le"));
4157 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
4161 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
4162 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
4163 with those from the cpu, except for options that were explicitly set. If
4164 we don't have a cpu, do not override the target bits set in
4165 TARGET_DEFAULT. */
4166 if (have_cpu)
4168 rs6000_isa_flags &= ~set_masks;
4169 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
4170 & set_masks);
4172 else
4174 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
4175 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
4176 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
4177 to using rs6000_isa_flags, we need to do the initialization here.
4179 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
4180 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
4181 HOST_WIDE_INT flags = ((TARGET_DEFAULT) ? TARGET_DEFAULT
4182 : processor_target_table[cpu_index].target_enable);
4183 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
4186 if (rs6000_tune_index >= 0)
4187 tune_index = rs6000_tune_index;
4188 else if (have_cpu)
4189 rs6000_tune_index = tune_index = cpu_index;
4190 else
4192 size_t i;
4193 enum processor_type tune_proc
4194 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
4196 tune_index = -1;
4197 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
4198 if (processor_target_table[i].processor == tune_proc)
4200 rs6000_tune_index = tune_index = i;
4201 break;
4205 gcc_assert (tune_index >= 0);
4206 rs6000_cpu = processor_target_table[tune_index].processor;
4208 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
4209 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
4210 || rs6000_cpu == PROCESSOR_PPCE5500)
4212 if (TARGET_ALTIVEC)
4213 error ("AltiVec not supported in this target");
4216 /* If we are optimizing big endian systems for space, use the load/store
4217 multiple and string instructions. */
4218 if (BYTES_BIG_ENDIAN && optimize_size)
4219 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & (OPTION_MASK_MULTIPLE
4220 | OPTION_MASK_STRING);
4222 /* Don't allow -mmultiple or -mstring on little endian systems
4223 unless the cpu is a 750, because the hardware doesn't support the
4224 instructions used in little endian mode, and causes an alignment
4225 trap. The 750 does not cause an alignment trap (except when the
4226 target is unaligned). */
4228 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
4230 if (TARGET_MULTIPLE)
4232 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
4233 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
4234 warning (0, "%qs is not supported on little endian systems",
4235 "-mmultiple");
4238 if (TARGET_STRING)
4240 rs6000_isa_flags &= ~OPTION_MASK_STRING;
4241 if ((rs6000_isa_flags_explicit & OPTION_MASK_STRING) != 0)
4242 warning (0, "%qs is not supported on little endian systems",
4243 "-mstring");
4247 /* If little-endian, default to -mstrict-align on older processors.
4248 Testing for htm matches power8 and later. */
4249 if (!BYTES_BIG_ENDIAN
4250 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
4251 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
4253 /* -maltivec={le,be} implies -maltivec. */
4254 if (rs6000_altivec_element_order != 0)
4255 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
4257 /* Disallow -maltivec=le in big endian mode for now. This is not
4258 known to be useful for anyone. */
4259 if (BYTES_BIG_ENDIAN && rs6000_altivec_element_order == 1)
4261 warning (0, N_("-maltivec=le not allowed for big-endian targets"));
4262 rs6000_altivec_element_order = 0;
4265 if (!rs6000_fold_gimple)
4266 fprintf (stderr,
4267 "gimple folding of rs6000 builtins has been disabled.\n");
4269 /* Add some warnings for VSX. */
4270 if (TARGET_VSX)
4272 const char *msg = NULL;
4273 if (!TARGET_HARD_FLOAT || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
4275 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4276 msg = N_("-mvsx requires hardware floating point");
4277 else
4279 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4280 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4283 else if (TARGET_PAIRED_FLOAT)
4284 msg = N_("-mvsx and -mpaired are incompatible");
4285 else if (TARGET_AVOID_XFORM > 0)
4286 msg = N_("-mvsx needs indexed addressing");
4287 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
4288 & OPTION_MASK_ALTIVEC))
4290 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4291 msg = N_("-mvsx and -mno-altivec are incompatible");
4292 else
4293 msg = N_("-mno-altivec disables vsx");
4296 if (msg)
4298 warning (0, msg);
4299 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4300 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4304 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4305 the -mcpu setting to enable options that conflict. */
4306 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4307 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4308 | OPTION_MASK_ALTIVEC
4309 | OPTION_MASK_VSX)) != 0)
4310 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4311 | OPTION_MASK_DIRECT_MOVE)
4312 & ~rs6000_isa_flags_explicit);
4314 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4315 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4317 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4318 off all of the options that depend on those flags. */
4319 ignore_masks = rs6000_disable_incompatible_switches ();
4321 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4322 unless the user explicitly used the -mno-<option> to disable the code. */
4323 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_MISC)
4324 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4325 else if (TARGET_P9_MINMAX)
4327 if (have_cpu)
4329 if (cpu_index == PROCESSOR_POWER9)
4331 /* legacy behavior: allow -mcpu=power9 with certain
4332 capabilities explicitly disabled. */
4333 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4335 else
4336 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4337 "for <xxx> less than power9", "-mcpu");
4339 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
4340 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
4341 & rs6000_isa_flags_explicit))
4342 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4343 were explicitly cleared. */
4344 error ("%qs incompatible with explicitly disabled options",
4345 "-mpower9-minmax");
4346 else
4347 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
4349 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4350 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
4351 else if (TARGET_VSX)
4352 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
4353 else if (TARGET_POPCNTD)
4354 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
4355 else if (TARGET_DFP)
4356 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
4357 else if (TARGET_CMPB)
4358 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
4359 else if (TARGET_FPRND)
4360 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
4361 else if (TARGET_POPCNTB)
4362 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
4363 else if (TARGET_ALTIVEC)
4364 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
4366 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4368 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4369 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4370 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4373 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4375 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4376 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4377 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4380 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4382 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4383 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4384 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4387 if (TARGET_P8_VECTOR && !TARGET_VSX)
4389 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4390 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
4391 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4392 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
4394 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4395 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4396 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4398 else
4400 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4401 not explicit. */
4402 rs6000_isa_flags |= OPTION_MASK_VSX;
4403 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4407 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4409 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4410 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4411 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4414 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4415 silently turn off quad memory mode. */
4416 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4418 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4419 warning (0, N_("-mquad-memory requires 64-bit mode"));
4421 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4422 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4424 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4425 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4428 /* Non-atomic quad memory load/store are disabled for little endian, since
4429 the words are reversed, but atomic operations can still be done by
4430 swapping the words. */
4431 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4433 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4434 warning (0, N_("-mquad-memory is not available in little endian "
4435 "mode"));
4437 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4440 /* Assume if the user asked for normal quad memory instructions, they want
4441 the atomic versions as well, unless they explicity told us not to use quad
4442 word atomic instructions. */
4443 if (TARGET_QUAD_MEMORY
4444 && !TARGET_QUAD_MEMORY_ATOMIC
4445 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4446 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4448 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4449 generating power8 instructions. */
4450 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4451 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4452 & OPTION_MASK_P8_FUSION);
4454 /* Setting additional fusion flags turns on base fusion. */
4455 if (!TARGET_P8_FUSION && (TARGET_P8_FUSION_SIGN || TARGET_TOC_FUSION))
4457 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4459 if (TARGET_P8_FUSION_SIGN)
4460 error ("%qs requires %qs", "-mpower8-fusion-sign",
4461 "-mpower8-fusion");
4463 if (TARGET_TOC_FUSION)
4464 error ("%qs requires %qs", "-mtoc-fusion", "-mpower8-fusion");
4466 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4468 else
4469 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4472 /* Power9 fusion is a superset over power8 fusion. */
4473 if (TARGET_P9_FUSION && !TARGET_P8_FUSION)
4475 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4477 /* We prefer to not mention undocumented options in
4478 error messages. However, if users have managed to select
4479 power9-fusion without selecting power8-fusion, they
4480 already know about undocumented flags. */
4481 error ("%qs requires %qs", "-mpower9-fusion", "-mpower8-fusion");
4482 rs6000_isa_flags &= ~OPTION_MASK_P9_FUSION;
4484 else
4485 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4488 /* Enable power9 fusion if we are tuning for power9, even if we aren't
4489 generating power9 instructions. */
4490 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_FUSION))
4491 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4492 & OPTION_MASK_P9_FUSION);
4494 /* Power8 does not fuse sign extended loads with the addis. If we are
4495 optimizing at high levels for speed, convert a sign extended load into a
4496 zero extending load, and an explicit sign extension. */
4497 if (TARGET_P8_FUSION
4498 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4499 && optimize_function_for_speed_p (cfun)
4500 && optimize >= 3)
4501 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4503 /* TOC fusion requires 64-bit and medium/large code model. */
4504 if (TARGET_TOC_FUSION && !TARGET_POWERPC64)
4506 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4507 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4508 warning (0, N_("-mtoc-fusion requires 64-bit"));
4511 if (TARGET_TOC_FUSION && (TARGET_CMODEL == CMODEL_SMALL))
4513 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4514 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4515 warning (0, N_("-mtoc-fusion requires medium/large code model"));
4518 /* Turn on -mtoc-fusion by default if p8-fusion and 64-bit medium/large code
4519 model. */
4520 if (TARGET_P8_FUSION && !TARGET_TOC_FUSION && TARGET_POWERPC64
4521 && (TARGET_CMODEL != CMODEL_SMALL)
4522 && !(rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION))
4523 rs6000_isa_flags |= OPTION_MASK_TOC_FUSION;
4525 /* ISA 3.0 vector instructions include ISA 2.07. */
4526 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4528 /* We prefer to not mention undocumented options in
4529 error messages. However, if users have managed to select
4530 power9-vector without selecting power8-vector, they
4531 already know about undocumented flags. */
4532 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4533 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4534 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4535 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4537 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4538 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4539 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4541 else
4543 /* OPTION_MASK_P9_VECTOR is explicit and
4544 OPTION_MASK_P8_VECTOR is not explicit. */
4545 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4546 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4550 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4551 support. If we only have ISA 2.06 support, and the user did not specify
4552 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4553 but we don't enable the full vectorization support */
4554 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4555 TARGET_ALLOW_MOVMISALIGN = 1;
4557 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4559 if (TARGET_ALLOW_MOVMISALIGN > 0
4560 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4561 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4563 TARGET_ALLOW_MOVMISALIGN = 0;
4566 /* Determine when unaligned vector accesses are permitted, and when
4567 they are preferred over masked Altivec loads. Note that if
4568 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4569 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4570 not true. */
4571 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4573 if (!TARGET_VSX)
4575 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4576 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4578 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4581 else if (!TARGET_ALLOW_MOVMISALIGN)
4583 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4584 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4585 "-mallow-movmisalign");
4587 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4591 /* Set long double size before the IEEE 128-bit tests. */
4592 if (!global_options_set.x_rs6000_long_double_type_size)
4594 if (main_target_opt != NULL
4595 && (main_target_opt->x_rs6000_long_double_type_size
4596 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
4597 error ("target attribute or pragma changes long double size");
4598 else
4599 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
4602 /* Set -mabi=ieeelongdouble on some old targets. Note, AIX and Darwin
4603 explicitly redefine TARGET_IEEEQUAD to 0, so those systems will not
4604 pick up this default. */
4605 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
4606 if (!global_options_set.x_rs6000_ieeequad)
4607 rs6000_ieeequad = 1;
4608 #endif
4610 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4611 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4612 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4613 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4614 the keyword as well as the type. */
4615 TARGET_FLOAT128_TYPE = TARGET_FLOAT128_ENABLE_TYPE && TARGET_VSX;
4617 /* IEEE 128-bit floating point requires VSX support. */
4618 if (TARGET_FLOAT128_KEYWORD)
4620 if (!TARGET_VSX)
4622 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4623 error ("%qs requires VSX support", "-mfloat128");
4625 TARGET_FLOAT128_TYPE = 0;
4626 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_KEYWORD
4627 | OPTION_MASK_FLOAT128_HW);
4629 else if (!TARGET_FLOAT128_TYPE)
4631 TARGET_FLOAT128_TYPE = 1;
4632 warning (0, "The -mfloat128 option may not be fully supported");
4636 /* Enable the __float128 keyword under Linux by default. */
4637 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_KEYWORD
4638 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4639 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4641 /* If we have are supporting the float128 type and full ISA 3.0 support,
4642 enable -mfloat128-hardware by default. However, don't enable the
4643 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4644 because sometimes the compiler wants to put things in an integer
4645 container, and if we don't have __int128 support, it is impossible. */
4646 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW && TARGET_64BIT
4647 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4648 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4649 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4651 if (TARGET_FLOAT128_HW
4652 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4654 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4655 error ("%qs requires full ISA 3.0 support", "-mfloat128-hardware");
4657 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4660 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4662 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4663 error ("%qs requires %qs", "-mfloat128-hardware", "-m64");
4665 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4668 /* Print the options after updating the defaults. */
4669 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4670 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4672 /* E500mc does "better" if we inline more aggressively. Respect the
4673 user's opinion, though. */
4674 if (rs6000_block_move_inline_limit == 0
4675 && (rs6000_cpu == PROCESSOR_PPCE500MC
4676 || rs6000_cpu == PROCESSOR_PPCE500MC64
4677 || rs6000_cpu == PROCESSOR_PPCE5500
4678 || rs6000_cpu == PROCESSOR_PPCE6500))
4679 rs6000_block_move_inline_limit = 128;
4681 /* store_one_arg depends on expand_block_move to handle at least the
4682 size of reg_parm_stack_space. */
4683 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4684 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4686 if (global_init_p)
4688 /* If the appropriate debug option is enabled, replace the target hooks
4689 with debug versions that call the real version and then prints
4690 debugging information. */
4691 if (TARGET_DEBUG_COST)
4693 targetm.rtx_costs = rs6000_debug_rtx_costs;
4694 targetm.address_cost = rs6000_debug_address_cost;
4695 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4698 if (TARGET_DEBUG_ADDR)
4700 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4701 targetm.legitimize_address = rs6000_debug_legitimize_address;
4702 rs6000_secondary_reload_class_ptr
4703 = rs6000_debug_secondary_reload_class;
4704 rs6000_secondary_memory_needed_ptr
4705 = rs6000_debug_secondary_memory_needed;
4706 rs6000_cannot_change_mode_class_ptr
4707 = rs6000_debug_cannot_change_mode_class;
4708 rs6000_preferred_reload_class_ptr
4709 = rs6000_debug_preferred_reload_class;
4710 rs6000_legitimize_reload_address_ptr
4711 = rs6000_debug_legitimize_reload_address;
4712 rs6000_mode_dependent_address_ptr
4713 = rs6000_debug_mode_dependent_address;
4716 if (rs6000_veclibabi_name)
4718 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4719 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4720 else
4722 error ("unknown vectorization library ABI type (%qs) for "
4723 "%qs switch", rs6000_veclibabi_name, "-mveclibabi=");
4724 ret = false;
4729 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4730 target attribute or pragma which automatically enables both options,
4731 unless the altivec ABI was set. This is set by default for 64-bit, but
4732 not for 32-bit. */
4733 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4735 TARGET_FLOAT128_TYPE = 0;
4736 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4737 | OPTION_MASK_FLOAT128_KEYWORD)
4738 & ~rs6000_isa_flags_explicit);
4741 /* Enable Altivec ABI for AIX -maltivec. */
4742 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4744 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4745 error ("target attribute or pragma changes AltiVec ABI");
4746 else
4747 rs6000_altivec_abi = 1;
4750 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4751 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4752 be explicitly overridden in either case. */
4753 if (TARGET_ELF)
4755 if (!global_options_set.x_rs6000_altivec_abi
4756 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4758 if (main_target_opt != NULL &&
4759 !main_target_opt->x_rs6000_altivec_abi)
4760 error ("target attribute or pragma changes AltiVec ABI");
4761 else
4762 rs6000_altivec_abi = 1;
4766 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4767 So far, the only darwin64 targets are also MACH-O. */
4768 if (TARGET_MACHO
4769 && DEFAULT_ABI == ABI_DARWIN
4770 && TARGET_64BIT)
4772 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4773 error ("target attribute or pragma changes darwin64 ABI");
4774 else
4776 rs6000_darwin64_abi = 1;
4777 /* Default to natural alignment, for better performance. */
4778 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4782 /* Place FP constants in the constant pool instead of TOC
4783 if section anchors enabled. */
4784 if (flag_section_anchors
4785 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4786 TARGET_NO_FP_IN_TOC = 1;
4788 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4789 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4791 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4792 SUBTARGET_OVERRIDE_OPTIONS;
4793 #endif
4794 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4795 SUBSUBTARGET_OVERRIDE_OPTIONS;
4796 #endif
4797 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4798 SUB3TARGET_OVERRIDE_OPTIONS;
4799 #endif
4801 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4802 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4804 /* For the E500 family of cores, reset the single/double FP flags to let us
4805 check that they remain constant across attributes or pragmas. Also,
4806 clear a possible request for string instructions, not supported and which
4807 we might have silently queried above for -Os.
4809 For other families, clear ISEL in case it was set implicitly.
4812 switch (rs6000_cpu)
4814 case PROCESSOR_PPC8540:
4815 case PROCESSOR_PPC8548:
4816 case PROCESSOR_PPCE500MC:
4817 case PROCESSOR_PPCE500MC64:
4818 case PROCESSOR_PPCE5500:
4819 case PROCESSOR_PPCE6500:
4821 rs6000_single_float = 0;
4822 rs6000_double_float = 0;
4824 rs6000_isa_flags &= ~OPTION_MASK_STRING;
4826 break;
4828 default:
4830 if (have_cpu && !(rs6000_isa_flags_explicit & OPTION_MASK_ISEL))
4831 rs6000_isa_flags &= ~OPTION_MASK_ISEL;
4833 break;
4836 if (main_target_opt)
4838 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
4839 error ("target attribute or pragma changes single precision floating "
4840 "point");
4841 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
4842 error ("target attribute or pragma changes double precision floating "
4843 "point");
4846 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
4847 && rs6000_cpu != PROCESSOR_POWER5
4848 && rs6000_cpu != PROCESSOR_POWER6
4849 && rs6000_cpu != PROCESSOR_POWER7
4850 && rs6000_cpu != PROCESSOR_POWER8
4851 && rs6000_cpu != PROCESSOR_POWER9
4852 && rs6000_cpu != PROCESSOR_PPCA2
4853 && rs6000_cpu != PROCESSOR_CELL
4854 && rs6000_cpu != PROCESSOR_PPC476);
4855 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
4856 || rs6000_cpu == PROCESSOR_POWER5
4857 || rs6000_cpu == PROCESSOR_POWER7
4858 || rs6000_cpu == PROCESSOR_POWER8);
4859 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
4860 || rs6000_cpu == PROCESSOR_POWER5
4861 || rs6000_cpu == PROCESSOR_POWER6
4862 || rs6000_cpu == PROCESSOR_POWER7
4863 || rs6000_cpu == PROCESSOR_POWER8
4864 || rs6000_cpu == PROCESSOR_POWER9
4865 || rs6000_cpu == PROCESSOR_PPCE500MC
4866 || rs6000_cpu == PROCESSOR_PPCE500MC64
4867 || rs6000_cpu == PROCESSOR_PPCE5500
4868 || rs6000_cpu == PROCESSOR_PPCE6500);
4870 /* Allow debug switches to override the above settings. These are set to -1
4871 in rs6000.opt to indicate the user hasn't directly set the switch. */
4872 if (TARGET_ALWAYS_HINT >= 0)
4873 rs6000_always_hint = TARGET_ALWAYS_HINT;
4875 if (TARGET_SCHED_GROUPS >= 0)
4876 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4878 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4879 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4881 rs6000_sched_restricted_insns_priority
4882 = (rs6000_sched_groups ? 1 : 0);
4884 /* Handle -msched-costly-dep option. */
4885 rs6000_sched_costly_dep
4886 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4888 if (rs6000_sched_costly_dep_str)
4890 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4891 rs6000_sched_costly_dep = no_dep_costly;
4892 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4893 rs6000_sched_costly_dep = all_deps_costly;
4894 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4895 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4896 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4897 rs6000_sched_costly_dep = store_to_load_dep_costly;
4898 else
4899 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4900 atoi (rs6000_sched_costly_dep_str));
4903 /* Handle -minsert-sched-nops option. */
4904 rs6000_sched_insert_nops
4905 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4907 if (rs6000_sched_insert_nops_str)
4909 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4910 rs6000_sched_insert_nops = sched_finish_none;
4911 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4912 rs6000_sched_insert_nops = sched_finish_pad_groups;
4913 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4914 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4915 else
4916 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4917 atoi (rs6000_sched_insert_nops_str));
4920 /* Handle stack protector */
4921 if (!global_options_set.x_rs6000_stack_protector_guard)
4922 #ifdef TARGET_THREAD_SSP_OFFSET
4923 rs6000_stack_protector_guard = SSP_TLS;
4924 #else
4925 rs6000_stack_protector_guard = SSP_GLOBAL;
4926 #endif
4928 #ifdef TARGET_THREAD_SSP_OFFSET
4929 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4930 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4931 #endif
4933 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4935 char *endp;
4936 const char *str = rs6000_stack_protector_guard_offset_str;
4938 errno = 0;
4939 long offset = strtol (str, &endp, 0);
4940 if (!*str || *endp || errno)
4941 error ("%qs is not a valid number in %qs", str,
4942 "-mstack-protector-guard-offset=");
4944 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4945 || (TARGET_64BIT && (offset & 3)))
4946 error ("%qs is not a valid offset in %qs", str,
4947 "-mstack-protector-guard-offset=");
4949 rs6000_stack_protector_guard_offset = offset;
4952 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4954 const char *str = rs6000_stack_protector_guard_reg_str;
4955 int reg = decode_reg_name (str);
4957 if (!IN_RANGE (reg, 1, 31))
4958 error ("%qs is not a valid base register in %qs", str,
4959 "-mstack-protector-guard-reg=");
4961 rs6000_stack_protector_guard_reg = reg;
4964 if (rs6000_stack_protector_guard == SSP_TLS
4965 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
4966 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4968 if (global_init_p)
4970 #ifdef TARGET_REGNAMES
4971 /* If the user desires alternate register names, copy in the
4972 alternate names now. */
4973 if (TARGET_REGNAMES)
4974 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4975 #endif
4977 /* Set aix_struct_return last, after the ABI is determined.
4978 If -maix-struct-return or -msvr4-struct-return was explicitly
4979 used, don't override with the ABI default. */
4980 if (!global_options_set.x_aix_struct_return)
4981 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4983 #if 0
4984 /* IBM XL compiler defaults to unsigned bitfields. */
4985 if (TARGET_XL_COMPAT)
4986 flag_signed_bitfields = 0;
4987 #endif
4989 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4990 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4992 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4994 /* We can only guarantee the availability of DI pseudo-ops when
4995 assembling for 64-bit targets. */
4996 if (!TARGET_64BIT)
4998 targetm.asm_out.aligned_op.di = NULL;
4999 targetm.asm_out.unaligned_op.di = NULL;
5003 /* Set branch target alignment, if not optimizing for size. */
5004 if (!optimize_size)
5006 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
5007 aligned 8byte to avoid misprediction by the branch predictor. */
5008 if (rs6000_cpu == PROCESSOR_TITAN
5009 || rs6000_cpu == PROCESSOR_CELL)
5011 if (align_functions <= 0)
5012 align_functions = 8;
5013 if (align_jumps <= 0)
5014 align_jumps = 8;
5015 if (align_loops <= 0)
5016 align_loops = 8;
5018 if (rs6000_align_branch_targets)
5020 if (align_functions <= 0)
5021 align_functions = 16;
5022 if (align_jumps <= 0)
5023 align_jumps = 16;
5024 if (align_loops <= 0)
5026 can_override_loop_align = 1;
5027 align_loops = 16;
5030 if (align_jumps_max_skip <= 0)
5031 align_jumps_max_skip = 15;
5032 if (align_loops_max_skip <= 0)
5033 align_loops_max_skip = 15;
5036 /* Arrange to save and restore machine status around nested functions. */
5037 init_machine_status = rs6000_init_machine_status;
5039 /* We should always be splitting complex arguments, but we can't break
5040 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
5041 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
5042 targetm.calls.split_complex_arg = NULL;
5044 /* The AIX and ELFv1 ABIs define standard function descriptors. */
5045 if (DEFAULT_ABI == ABI_AIX)
5046 targetm.calls.custom_function_descriptors = 0;
5049 /* Initialize rs6000_cost with the appropriate target costs. */
5050 if (optimize_size)
5051 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
5052 else
5053 switch (rs6000_cpu)
5055 case PROCESSOR_RS64A:
5056 rs6000_cost = &rs64a_cost;
5057 break;
5059 case PROCESSOR_MPCCORE:
5060 rs6000_cost = &mpccore_cost;
5061 break;
5063 case PROCESSOR_PPC403:
5064 rs6000_cost = &ppc403_cost;
5065 break;
5067 case PROCESSOR_PPC405:
5068 rs6000_cost = &ppc405_cost;
5069 break;
5071 case PROCESSOR_PPC440:
5072 rs6000_cost = &ppc440_cost;
5073 break;
5075 case PROCESSOR_PPC476:
5076 rs6000_cost = &ppc476_cost;
5077 break;
5079 case PROCESSOR_PPC601:
5080 rs6000_cost = &ppc601_cost;
5081 break;
5083 case PROCESSOR_PPC603:
5084 rs6000_cost = &ppc603_cost;
5085 break;
5087 case PROCESSOR_PPC604:
5088 rs6000_cost = &ppc604_cost;
5089 break;
5091 case PROCESSOR_PPC604e:
5092 rs6000_cost = &ppc604e_cost;
5093 break;
5095 case PROCESSOR_PPC620:
5096 rs6000_cost = &ppc620_cost;
5097 break;
5099 case PROCESSOR_PPC630:
5100 rs6000_cost = &ppc630_cost;
5101 break;
5103 case PROCESSOR_CELL:
5104 rs6000_cost = &ppccell_cost;
5105 break;
5107 case PROCESSOR_PPC750:
5108 case PROCESSOR_PPC7400:
5109 rs6000_cost = &ppc750_cost;
5110 break;
5112 case PROCESSOR_PPC7450:
5113 rs6000_cost = &ppc7450_cost;
5114 break;
5116 case PROCESSOR_PPC8540:
5117 case PROCESSOR_PPC8548:
5118 rs6000_cost = &ppc8540_cost;
5119 break;
5121 case PROCESSOR_PPCE300C2:
5122 case PROCESSOR_PPCE300C3:
5123 rs6000_cost = &ppce300c2c3_cost;
5124 break;
5126 case PROCESSOR_PPCE500MC:
5127 rs6000_cost = &ppce500mc_cost;
5128 break;
5130 case PROCESSOR_PPCE500MC64:
5131 rs6000_cost = &ppce500mc64_cost;
5132 break;
5134 case PROCESSOR_PPCE5500:
5135 rs6000_cost = &ppce5500_cost;
5136 break;
5138 case PROCESSOR_PPCE6500:
5139 rs6000_cost = &ppce6500_cost;
5140 break;
5142 case PROCESSOR_TITAN:
5143 rs6000_cost = &titan_cost;
5144 break;
5146 case PROCESSOR_POWER4:
5147 case PROCESSOR_POWER5:
5148 rs6000_cost = &power4_cost;
5149 break;
5151 case PROCESSOR_POWER6:
5152 rs6000_cost = &power6_cost;
5153 break;
5155 case PROCESSOR_POWER7:
5156 rs6000_cost = &power7_cost;
5157 break;
5159 case PROCESSOR_POWER8:
5160 rs6000_cost = &power8_cost;
5161 break;
5163 case PROCESSOR_POWER9:
5164 rs6000_cost = &power9_cost;
5165 break;
5167 case PROCESSOR_PPCA2:
5168 rs6000_cost = &ppca2_cost;
5169 break;
5171 default:
5172 gcc_unreachable ();
5175 if (global_init_p)
5177 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
5178 rs6000_cost->simultaneous_prefetches,
5179 global_options.x_param_values,
5180 global_options_set.x_param_values);
5181 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
5182 global_options.x_param_values,
5183 global_options_set.x_param_values);
5184 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
5185 rs6000_cost->cache_line_size,
5186 global_options.x_param_values,
5187 global_options_set.x_param_values);
5188 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
5189 global_options.x_param_values,
5190 global_options_set.x_param_values);
5192 /* Increase loop peeling limits based on performance analysis. */
5193 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
5194 global_options.x_param_values,
5195 global_options_set.x_param_values);
5196 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
5197 global_options.x_param_values,
5198 global_options_set.x_param_values);
5200 /* Use the 'model' -fsched-pressure algorithm by default. */
5201 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
5202 SCHED_PRESSURE_MODEL,
5203 global_options.x_param_values,
5204 global_options_set.x_param_values);
5206 /* If using typedef char *va_list, signal that
5207 __builtin_va_start (&ap, 0) can be optimized to
5208 ap = __builtin_next_arg (0). */
5209 if (DEFAULT_ABI != ABI_V4)
5210 targetm.expand_builtin_va_start = NULL;
5213 /* Set up single/double float flags.
5214 If TARGET_HARD_FLOAT is set, but neither single or double is set,
5215 then set both flags. */
5216 if (TARGET_HARD_FLOAT && rs6000_single_float == 0 && rs6000_double_float == 0)
5217 rs6000_single_float = rs6000_double_float = 1;
5219 /* If not explicitly specified via option, decide whether to generate indexed
5220 load/store instructions. A value of -1 indicates that the
5221 initial value of this variable has not been overwritten. During
5222 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
5223 if (TARGET_AVOID_XFORM == -1)
5224 /* Avoid indexed addressing when targeting Power6 in order to avoid the
5225 DERAT mispredict penalty. However the LVE and STVE altivec instructions
5226 need indexed accesses and the type used is the scalar type of the element
5227 being loaded or stored. */
5228 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB
5229 && !TARGET_ALTIVEC);
5231 /* Set the -mrecip options. */
5232 if (rs6000_recip_name)
5234 char *p = ASTRDUP (rs6000_recip_name);
5235 char *q;
5236 unsigned int mask, i;
5237 bool invert;
5239 while ((q = strtok (p, ",")) != NULL)
5241 p = NULL;
5242 if (*q == '!')
5244 invert = true;
5245 q++;
5247 else
5248 invert = false;
5250 if (!strcmp (q, "default"))
5251 mask = ((TARGET_RECIP_PRECISION)
5252 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
5253 else
5255 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
5256 if (!strcmp (q, recip_options[i].string))
5258 mask = recip_options[i].mask;
5259 break;
5262 if (i == ARRAY_SIZE (recip_options))
5264 error ("unknown option for %<%s=%s%>", "-mrecip", q);
5265 invert = false;
5266 mask = 0;
5267 ret = false;
5271 if (invert)
5272 rs6000_recip_control &= ~mask;
5273 else
5274 rs6000_recip_control |= mask;
5278 /* Set the builtin mask of the various options used that could affect which
5279 builtins were used. In the past we used target_flags, but we've run out
5280 of bits, and some options like PAIRED are no longer in target_flags. */
5281 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
5282 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
5283 rs6000_print_builtin_options (stderr, 0, "builtin mask",
5284 rs6000_builtin_mask);
5286 /* Initialize all of the registers. */
5287 rs6000_init_hard_regno_mode_ok (global_init_p);
5289 /* Save the initial options in case the user does function specific options */
5290 if (global_init_p)
5291 target_option_default_node = target_option_current_node
5292 = build_target_option_node (&global_options);
5294 /* If not explicitly specified via option, decide whether to generate the
5295 extra blr's required to preserve the link stack on some cpus (eg, 476). */
5296 if (TARGET_LINK_STACK == -1)
5297 SET_TARGET_LINK_STACK (rs6000_cpu == PROCESSOR_PPC476 && flag_pic);
5299 return ret;
5302 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
5303 define the target cpu type. */
5305 static void
5306 rs6000_option_override (void)
5308 (void) rs6000_option_override_internal (true);
5312 /* Implement targetm.vectorize.builtin_mask_for_load. */
5313 static tree
5314 rs6000_builtin_mask_for_load (void)
5316 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
5317 if ((TARGET_ALTIVEC && !TARGET_VSX)
5318 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
5319 return altivec_builtin_mask_for_load;
5320 else
5321 return 0;
5324 /* Implement LOOP_ALIGN. */
5326 rs6000_loop_align (rtx label)
5328 basic_block bb;
5329 int ninsns;
5331 /* Don't override loop alignment if -falign-loops was specified. */
5332 if (!can_override_loop_align)
5333 return align_loops_log;
5335 bb = BLOCK_FOR_INSN (label);
5336 ninsns = num_loop_insns(bb->loop_father);
5338 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5339 if (ninsns > 4 && ninsns <= 8
5340 && (rs6000_cpu == PROCESSOR_POWER4
5341 || rs6000_cpu == PROCESSOR_POWER5
5342 || rs6000_cpu == PROCESSOR_POWER6
5343 || rs6000_cpu == PROCESSOR_POWER7
5344 || rs6000_cpu == PROCESSOR_POWER8
5345 || rs6000_cpu == PROCESSOR_POWER9))
5346 return 5;
5347 else
5348 return align_loops_log;
5351 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
5352 static int
5353 rs6000_loop_align_max_skip (rtx_insn *label)
5355 return (1 << rs6000_loop_align (label)) - 1;
5358 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5359 after applying N number of iterations. This routine does not determine
5360 how may iterations are required to reach desired alignment. */
5362 static bool
5363 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5365 if (is_packed)
5366 return false;
5368 if (TARGET_32BIT)
5370 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5371 return true;
5373 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5374 return true;
5376 return false;
5378 else
5380 if (TARGET_MACHO)
5381 return false;
5383 /* Assuming that all other types are naturally aligned. CHECKME! */
5384 return true;
5388 /* Return true if the vector misalignment factor is supported by the
5389 target. */
5390 static bool
5391 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5392 const_tree type,
5393 int misalignment,
5394 bool is_packed)
5396 if (TARGET_VSX)
5398 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5399 return true;
5401 /* Return if movmisalign pattern is not supported for this mode. */
5402 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5403 return false;
5405 if (misalignment == -1)
5407 /* Misalignment factor is unknown at compile time but we know
5408 it's word aligned. */
5409 if (rs6000_vector_alignment_reachable (type, is_packed))
5411 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5413 if (element_size == 64 || element_size == 32)
5414 return true;
5417 return false;
5420 /* VSX supports word-aligned vector. */
5421 if (misalignment % 4 == 0)
5422 return true;
5424 return false;
5427 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5428 static int
5429 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5430 tree vectype, int misalign)
5432 unsigned elements;
5433 tree elem_type;
5435 switch (type_of_cost)
5437 case scalar_stmt:
5438 case scalar_load:
5439 case scalar_store:
5440 case vector_stmt:
5441 case vector_load:
5442 case vector_store:
5443 case vec_to_scalar:
5444 case scalar_to_vec:
5445 case cond_branch_not_taken:
5446 return 1;
5448 case vec_perm:
5449 if (TARGET_VSX)
5450 return 3;
5451 else
5452 return 1;
5454 case vec_promote_demote:
5455 if (TARGET_VSX)
5456 return 4;
5457 else
5458 return 1;
5460 case cond_branch_taken:
5461 return 3;
5463 case unaligned_load:
5464 if (TARGET_P9_VECTOR)
5465 return 3;
5467 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5468 return 1;
5470 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5472 elements = TYPE_VECTOR_SUBPARTS (vectype);
5473 if (elements == 2)
5474 /* Double word aligned. */
5475 return 2;
5477 if (elements == 4)
5479 switch (misalign)
5481 case 8:
5482 /* Double word aligned. */
5483 return 2;
5485 case -1:
5486 /* Unknown misalignment. */
5487 case 4:
5488 case 12:
5489 /* Word aligned. */
5490 return 22;
5492 default:
5493 gcc_unreachable ();
5498 if (TARGET_ALTIVEC)
5499 /* Misaligned loads are not supported. */
5500 gcc_unreachable ();
5502 return 2;
5504 case unaligned_store:
5505 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5506 return 1;
5508 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5510 elements = TYPE_VECTOR_SUBPARTS (vectype);
5511 if (elements == 2)
5512 /* Double word aligned. */
5513 return 2;
5515 if (elements == 4)
5517 switch (misalign)
5519 case 8:
5520 /* Double word aligned. */
5521 return 2;
5523 case -1:
5524 /* Unknown misalignment. */
5525 case 4:
5526 case 12:
5527 /* Word aligned. */
5528 return 23;
5530 default:
5531 gcc_unreachable ();
5536 if (TARGET_ALTIVEC)
5537 /* Misaligned stores are not supported. */
5538 gcc_unreachable ();
5540 return 2;
5542 case vec_construct:
5543 /* This is a rough approximation assuming non-constant elements
5544 constructed into a vector via element insertion. FIXME:
5545 vec_construct is not granular enough for uniformly good
5546 decisions. If the initialization is a splat, this is
5547 cheaper than we estimate. Improve this someday. */
5548 elem_type = TREE_TYPE (vectype);
5549 /* 32-bit vectors loaded into registers are stored as double
5550 precision, so we need 2 permutes, 2 converts, and 1 merge
5551 to construct a vector of short floats from them. */
5552 if (SCALAR_FLOAT_TYPE_P (elem_type)
5553 && TYPE_PRECISION (elem_type) == 32)
5554 return 5;
5555 /* On POWER9, integer vector types are built up in GPRs and then
5556 use a direct move (2 cycles). For POWER8 this is even worse,
5557 as we need two direct moves and a merge, and the direct moves
5558 are five cycles. */
5559 else if (INTEGRAL_TYPE_P (elem_type))
5561 if (TARGET_P9_VECTOR)
5562 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5563 else
5564 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5566 else
5567 /* V2DFmode doesn't need a direct move. */
5568 return 2;
5570 default:
5571 gcc_unreachable ();
5575 /* Implement targetm.vectorize.preferred_simd_mode. */
5577 static machine_mode
5578 rs6000_preferred_simd_mode (scalar_mode mode)
5580 if (TARGET_VSX)
5581 switch (mode)
5583 case E_DFmode:
5584 return V2DFmode;
5585 default:;
5587 if (TARGET_ALTIVEC || TARGET_VSX)
5588 switch (mode)
5590 case E_SFmode:
5591 return V4SFmode;
5592 case E_TImode:
5593 return V1TImode;
5594 case E_DImode:
5595 return V2DImode;
5596 case E_SImode:
5597 return V4SImode;
5598 case E_HImode:
5599 return V8HImode;
5600 case E_QImode:
5601 return V16QImode;
5602 default:;
5604 if (TARGET_PAIRED_FLOAT
5605 && mode == SFmode)
5606 return V2SFmode;
5607 return word_mode;
5610 typedef struct _rs6000_cost_data
5612 struct loop *loop_info;
5613 unsigned cost[3];
5614 } rs6000_cost_data;
5616 /* Test for likely overcommitment of vector hardware resources. If a
5617 loop iteration is relatively large, and too large a percentage of
5618 instructions in the loop are vectorized, the cost model may not
5619 adequately reflect delays from unavailable vector resources.
5620 Penalize the loop body cost for this case. */
5622 static void
5623 rs6000_density_test (rs6000_cost_data *data)
5625 const int DENSITY_PCT_THRESHOLD = 85;
5626 const int DENSITY_SIZE_THRESHOLD = 70;
5627 const int DENSITY_PENALTY = 10;
5628 struct loop *loop = data->loop_info;
5629 basic_block *bbs = get_loop_body (loop);
5630 int nbbs = loop->num_nodes;
5631 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5632 int i, density_pct;
5634 for (i = 0; i < nbbs; i++)
5636 basic_block bb = bbs[i];
5637 gimple_stmt_iterator gsi;
5639 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5641 gimple *stmt = gsi_stmt (gsi);
5642 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5644 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5645 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5646 not_vec_cost++;
5650 free (bbs);
5651 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5653 if (density_pct > DENSITY_PCT_THRESHOLD
5654 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5656 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5657 if (dump_enabled_p ())
5658 dump_printf_loc (MSG_NOTE, vect_location,
5659 "density %d%%, cost %d exceeds threshold, penalizing "
5660 "loop body cost by %d%%", density_pct,
5661 vec_cost + not_vec_cost, DENSITY_PENALTY);
5665 /* Implement targetm.vectorize.init_cost. */
5667 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5668 instruction is needed by the vectorization. */
5669 static bool rs6000_vect_nonmem;
5671 static void *
5672 rs6000_init_cost (struct loop *loop_info)
5674 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5675 data->loop_info = loop_info;
5676 data->cost[vect_prologue] = 0;
5677 data->cost[vect_body] = 0;
5678 data->cost[vect_epilogue] = 0;
5679 rs6000_vect_nonmem = false;
5680 return data;
5683 /* Implement targetm.vectorize.add_stmt_cost. */
5685 static unsigned
5686 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5687 struct _stmt_vec_info *stmt_info, int misalign,
5688 enum vect_cost_model_location where)
5690 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5691 unsigned retval = 0;
5693 if (flag_vect_cost_model)
5695 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5696 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5697 misalign);
5698 /* Statements in an inner loop relative to the loop being
5699 vectorized are weighted more heavily. The value here is
5700 arbitrary and could potentially be improved with analysis. */
5701 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5702 count *= 50; /* FIXME. */
5704 retval = (unsigned) (count * stmt_cost);
5705 cost_data->cost[where] += retval;
5707 /* Check whether we're doing something other than just a copy loop.
5708 Not all such loops may be profitably vectorized; see
5709 rs6000_finish_cost. */
5710 if ((kind == vec_to_scalar || kind == vec_perm
5711 || kind == vec_promote_demote || kind == vec_construct
5712 || kind == scalar_to_vec)
5713 || (where == vect_body && kind == vector_stmt))
5714 rs6000_vect_nonmem = true;
5717 return retval;
5720 /* Implement targetm.vectorize.finish_cost. */
5722 static void
5723 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5724 unsigned *body_cost, unsigned *epilogue_cost)
5726 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5728 if (cost_data->loop_info)
5729 rs6000_density_test (cost_data);
5731 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5732 that require versioning for any reason. The vectorization is at
5733 best a wash inside the loop, and the versioning checks make
5734 profitability highly unlikely and potentially quite harmful. */
5735 if (cost_data->loop_info)
5737 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5738 if (!rs6000_vect_nonmem
5739 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5740 && LOOP_REQUIRES_VERSIONING (vec_info))
5741 cost_data->cost[vect_body] += 10000;
5744 *prologue_cost = cost_data->cost[vect_prologue];
5745 *body_cost = cost_data->cost[vect_body];
5746 *epilogue_cost = cost_data->cost[vect_epilogue];
5749 /* Implement targetm.vectorize.destroy_cost_data. */
5751 static void
5752 rs6000_destroy_cost_data (void *data)
5754 free (data);
5757 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5758 library with vectorized intrinsics. */
5760 static tree
5761 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5762 tree type_in)
5764 char name[32];
5765 const char *suffix = NULL;
5766 tree fntype, new_fndecl, bdecl = NULL_TREE;
5767 int n_args = 1;
5768 const char *bname;
5769 machine_mode el_mode, in_mode;
5770 int n, in_n;
5772 /* Libmass is suitable for unsafe math only as it does not correctly support
5773 parts of IEEE with the required precision such as denormals. Only support
5774 it if we have VSX to use the simd d2 or f4 functions.
5775 XXX: Add variable length support. */
5776 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5777 return NULL_TREE;
5779 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5780 n = TYPE_VECTOR_SUBPARTS (type_out);
5781 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5782 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5783 if (el_mode != in_mode
5784 || n != in_n)
5785 return NULL_TREE;
5787 switch (fn)
5789 CASE_CFN_ATAN2:
5790 CASE_CFN_HYPOT:
5791 CASE_CFN_POW:
5792 n_args = 2;
5793 gcc_fallthrough ();
5795 CASE_CFN_ACOS:
5796 CASE_CFN_ACOSH:
5797 CASE_CFN_ASIN:
5798 CASE_CFN_ASINH:
5799 CASE_CFN_ATAN:
5800 CASE_CFN_ATANH:
5801 CASE_CFN_CBRT:
5802 CASE_CFN_COS:
5803 CASE_CFN_COSH:
5804 CASE_CFN_ERF:
5805 CASE_CFN_ERFC:
5806 CASE_CFN_EXP2:
5807 CASE_CFN_EXP:
5808 CASE_CFN_EXPM1:
5809 CASE_CFN_LGAMMA:
5810 CASE_CFN_LOG10:
5811 CASE_CFN_LOG1P:
5812 CASE_CFN_LOG2:
5813 CASE_CFN_LOG:
5814 CASE_CFN_SIN:
5815 CASE_CFN_SINH:
5816 CASE_CFN_SQRT:
5817 CASE_CFN_TAN:
5818 CASE_CFN_TANH:
5819 if (el_mode == DFmode && n == 2)
5821 bdecl = mathfn_built_in (double_type_node, fn);
5822 suffix = "d2"; /* pow -> powd2 */
5824 else if (el_mode == SFmode && n == 4)
5826 bdecl = mathfn_built_in (float_type_node, fn);
5827 suffix = "4"; /* powf -> powf4 */
5829 else
5830 return NULL_TREE;
5831 if (!bdecl)
5832 return NULL_TREE;
5833 break;
5835 default:
5836 return NULL_TREE;
5839 gcc_assert (suffix != NULL);
5840 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5841 if (!bname)
5842 return NULL_TREE;
5844 strcpy (name, bname + sizeof ("__builtin_") - 1);
5845 strcat (name, suffix);
5847 if (n_args == 1)
5848 fntype = build_function_type_list (type_out, type_in, NULL);
5849 else if (n_args == 2)
5850 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5851 else
5852 gcc_unreachable ();
5854 /* Build a function declaration for the vectorized function. */
5855 new_fndecl = build_decl (BUILTINS_LOCATION,
5856 FUNCTION_DECL, get_identifier (name), fntype);
5857 TREE_PUBLIC (new_fndecl) = 1;
5858 DECL_EXTERNAL (new_fndecl) = 1;
5859 DECL_IS_NOVOPS (new_fndecl) = 1;
5860 TREE_READONLY (new_fndecl) = 1;
5862 return new_fndecl;
5865 /* Returns a function decl for a vectorized version of the builtin function
5866 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5867 if it is not available. */
5869 static tree
5870 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5871 tree type_in)
5873 machine_mode in_mode, out_mode;
5874 int in_n, out_n;
5876 if (TARGET_DEBUG_BUILTIN)
5877 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5878 combined_fn_name (combined_fn (fn)),
5879 GET_MODE_NAME (TYPE_MODE (type_out)),
5880 GET_MODE_NAME (TYPE_MODE (type_in)));
5882 if (TREE_CODE (type_out) != VECTOR_TYPE
5883 || TREE_CODE (type_in) != VECTOR_TYPE)
5884 return NULL_TREE;
5886 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5887 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5888 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5889 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5891 switch (fn)
5893 CASE_CFN_COPYSIGN:
5894 if (VECTOR_UNIT_VSX_P (V2DFmode)
5895 && out_mode == DFmode && out_n == 2
5896 && in_mode == DFmode && in_n == 2)
5897 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5898 if (VECTOR_UNIT_VSX_P (V4SFmode)
5899 && out_mode == SFmode && out_n == 4
5900 && in_mode == SFmode && in_n == 4)
5901 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5902 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5903 && out_mode == SFmode && out_n == 4
5904 && in_mode == SFmode && in_n == 4)
5905 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5906 break;
5907 CASE_CFN_CEIL:
5908 if (VECTOR_UNIT_VSX_P (V2DFmode)
5909 && out_mode == DFmode && out_n == 2
5910 && in_mode == DFmode && in_n == 2)
5911 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5912 if (VECTOR_UNIT_VSX_P (V4SFmode)
5913 && out_mode == SFmode && out_n == 4
5914 && in_mode == SFmode && in_n == 4)
5915 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5916 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5917 && out_mode == SFmode && out_n == 4
5918 && in_mode == SFmode && in_n == 4)
5919 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5920 break;
5921 CASE_CFN_FLOOR:
5922 if (VECTOR_UNIT_VSX_P (V2DFmode)
5923 && out_mode == DFmode && out_n == 2
5924 && in_mode == DFmode && in_n == 2)
5925 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5926 if (VECTOR_UNIT_VSX_P (V4SFmode)
5927 && out_mode == SFmode && out_n == 4
5928 && in_mode == SFmode && in_n == 4)
5929 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5930 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5931 && out_mode == SFmode && out_n == 4
5932 && in_mode == SFmode && in_n == 4)
5933 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5934 break;
5935 CASE_CFN_FMA:
5936 if (VECTOR_UNIT_VSX_P (V2DFmode)
5937 && out_mode == DFmode && out_n == 2
5938 && in_mode == DFmode && in_n == 2)
5939 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5940 if (VECTOR_UNIT_VSX_P (V4SFmode)
5941 && out_mode == SFmode && out_n == 4
5942 && in_mode == SFmode && in_n == 4)
5943 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5944 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5945 && out_mode == SFmode && out_n == 4
5946 && in_mode == SFmode && in_n == 4)
5947 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5948 break;
5949 CASE_CFN_TRUNC:
5950 if (VECTOR_UNIT_VSX_P (V2DFmode)
5951 && out_mode == DFmode && out_n == 2
5952 && in_mode == DFmode && in_n == 2)
5953 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5954 if (VECTOR_UNIT_VSX_P (V4SFmode)
5955 && out_mode == SFmode && out_n == 4
5956 && in_mode == SFmode && in_n == 4)
5957 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5958 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5959 && out_mode == SFmode && out_n == 4
5960 && in_mode == SFmode && in_n == 4)
5961 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5962 break;
5963 CASE_CFN_NEARBYINT:
5964 if (VECTOR_UNIT_VSX_P (V2DFmode)
5965 && flag_unsafe_math_optimizations
5966 && out_mode == DFmode && out_n == 2
5967 && in_mode == DFmode && in_n == 2)
5968 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5969 if (VECTOR_UNIT_VSX_P (V4SFmode)
5970 && flag_unsafe_math_optimizations
5971 && out_mode == SFmode && out_n == 4
5972 && in_mode == SFmode && in_n == 4)
5973 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5974 break;
5975 CASE_CFN_RINT:
5976 if (VECTOR_UNIT_VSX_P (V2DFmode)
5977 && !flag_trapping_math
5978 && out_mode == DFmode && out_n == 2
5979 && in_mode == DFmode && in_n == 2)
5980 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5981 if (VECTOR_UNIT_VSX_P (V4SFmode)
5982 && !flag_trapping_math
5983 && out_mode == SFmode && out_n == 4
5984 && in_mode == SFmode && in_n == 4)
5985 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5986 break;
5987 default:
5988 break;
5991 /* Generate calls to libmass if appropriate. */
5992 if (rs6000_veclib_handler)
5993 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5995 return NULL_TREE;
5998 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
6000 static tree
6001 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
6002 tree type_in)
6004 machine_mode in_mode, out_mode;
6005 int in_n, out_n;
6007 if (TARGET_DEBUG_BUILTIN)
6008 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
6009 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
6010 GET_MODE_NAME (TYPE_MODE (type_out)),
6011 GET_MODE_NAME (TYPE_MODE (type_in)));
6013 if (TREE_CODE (type_out) != VECTOR_TYPE
6014 || TREE_CODE (type_in) != VECTOR_TYPE)
6015 return NULL_TREE;
6017 out_mode = TYPE_MODE (TREE_TYPE (type_out));
6018 out_n = TYPE_VECTOR_SUBPARTS (type_out);
6019 in_mode = TYPE_MODE (TREE_TYPE (type_in));
6020 in_n = TYPE_VECTOR_SUBPARTS (type_in);
6022 enum rs6000_builtins fn
6023 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
6024 switch (fn)
6026 case RS6000_BUILTIN_RSQRTF:
6027 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
6028 && out_mode == SFmode && out_n == 4
6029 && in_mode == SFmode && in_n == 4)
6030 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
6031 break;
6032 case RS6000_BUILTIN_RSQRT:
6033 if (VECTOR_UNIT_VSX_P (V2DFmode)
6034 && out_mode == DFmode && out_n == 2
6035 && in_mode == DFmode && in_n == 2)
6036 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
6037 break;
6038 case RS6000_BUILTIN_RECIPF:
6039 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
6040 && out_mode == SFmode && out_n == 4
6041 && in_mode == SFmode && in_n == 4)
6042 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
6043 break;
6044 case RS6000_BUILTIN_RECIP:
6045 if (VECTOR_UNIT_VSX_P (V2DFmode)
6046 && out_mode == DFmode && out_n == 2
6047 && in_mode == DFmode && in_n == 2)
6048 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
6049 break;
6050 default:
6051 break;
6053 return NULL_TREE;
6056 /* Default CPU string for rs6000*_file_start functions. */
6057 static const char *rs6000_default_cpu;
6059 /* Do anything needed at the start of the asm file. */
6061 static void
6062 rs6000_file_start (void)
6064 char buffer[80];
6065 const char *start = buffer;
6066 FILE *file = asm_out_file;
6068 rs6000_default_cpu = TARGET_CPU_DEFAULT;
6070 default_file_start ();
6072 if (flag_verbose_asm)
6074 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
6076 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
6078 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
6079 start = "";
6082 if (global_options_set.x_rs6000_cpu_index)
6084 fprintf (file, "%s -mcpu=%s", start,
6085 processor_target_table[rs6000_cpu_index].name);
6086 start = "";
6089 if (global_options_set.x_rs6000_tune_index)
6091 fprintf (file, "%s -mtune=%s", start,
6092 processor_target_table[rs6000_tune_index].name);
6093 start = "";
6096 if (PPC405_ERRATUM77)
6098 fprintf (file, "%s PPC405CR_ERRATUM77", start);
6099 start = "";
6102 #ifdef USING_ELFOS_H
6103 switch (rs6000_sdata)
6105 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
6106 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
6107 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
6108 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
6111 if (rs6000_sdata && g_switch_value)
6113 fprintf (file, "%s -G %d", start,
6114 g_switch_value);
6115 start = "";
6117 #endif
6119 if (*start == '\0')
6120 putc ('\n', file);
6123 #ifdef USING_ELFOS_H
6124 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
6125 && !global_options_set.x_rs6000_cpu_index)
6127 fputs ("\t.machine ", asm_out_file);
6128 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
6129 fputs ("power9\n", asm_out_file);
6130 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
6131 fputs ("power8\n", asm_out_file);
6132 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
6133 fputs ("power7\n", asm_out_file);
6134 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
6135 fputs ("power6\n", asm_out_file);
6136 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
6137 fputs ("power5\n", asm_out_file);
6138 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
6139 fputs ("power4\n", asm_out_file);
6140 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
6141 fputs ("ppc64\n", asm_out_file);
6142 else
6143 fputs ("ppc\n", asm_out_file);
6145 #endif
6147 if (DEFAULT_ABI == ABI_ELFv2)
6148 fprintf (file, "\t.abiversion 2\n");
6152 /* Return nonzero if this function is known to have a null epilogue. */
6155 direct_return (void)
6157 if (reload_completed)
6159 rs6000_stack_t *info = rs6000_stack_info ();
6161 if (info->first_gp_reg_save == 32
6162 && info->first_fp_reg_save == 64
6163 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
6164 && ! info->lr_save_p
6165 && ! info->cr_save_p
6166 && info->vrsave_size == 0
6167 && ! info->push_p)
6168 return 1;
6171 return 0;
6174 /* Return the number of instructions it takes to form a constant in an
6175 integer register. */
6178 num_insns_constant_wide (HOST_WIDE_INT value)
6180 /* signed constant loadable with addi */
6181 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
6182 return 1;
6184 /* constant loadable with addis */
6185 else if ((value & 0xffff) == 0
6186 && (value >> 31 == -1 || value >> 31 == 0))
6187 return 1;
6189 else if (TARGET_POWERPC64)
6191 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
6192 HOST_WIDE_INT high = value >> 31;
6194 if (high == 0 || high == -1)
6195 return 2;
6197 high >>= 1;
6199 if (low == 0)
6200 return num_insns_constant_wide (high) + 1;
6201 else if (high == 0)
6202 return num_insns_constant_wide (low) + 1;
6203 else
6204 return (num_insns_constant_wide (high)
6205 + num_insns_constant_wide (low) + 1);
6208 else
6209 return 2;
6213 num_insns_constant (rtx op, machine_mode mode)
6215 HOST_WIDE_INT low, high;
6217 switch (GET_CODE (op))
6219 case CONST_INT:
6220 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
6221 && rs6000_is_valid_and_mask (op, mode))
6222 return 2;
6223 else
6224 return num_insns_constant_wide (INTVAL (op));
6226 case CONST_WIDE_INT:
6228 int i;
6229 int ins = CONST_WIDE_INT_NUNITS (op) - 1;
6230 for (i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
6231 ins += num_insns_constant_wide (CONST_WIDE_INT_ELT (op, i));
6232 return ins;
6235 case CONST_DOUBLE:
6236 if (mode == SFmode || mode == SDmode)
6238 long l;
6240 if (DECIMAL_FLOAT_MODE_P (mode))
6241 REAL_VALUE_TO_TARGET_DECIMAL32
6242 (*CONST_DOUBLE_REAL_VALUE (op), l);
6243 else
6244 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6245 return num_insns_constant_wide ((HOST_WIDE_INT) l);
6248 long l[2];
6249 if (DECIMAL_FLOAT_MODE_P (mode))
6250 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (op), l);
6251 else
6252 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6253 high = l[WORDS_BIG_ENDIAN == 0];
6254 low = l[WORDS_BIG_ENDIAN != 0];
6256 if (TARGET_32BIT)
6257 return (num_insns_constant_wide (low)
6258 + num_insns_constant_wide (high));
6259 else
6261 if ((high == 0 && low >= 0)
6262 || (high == -1 && low < 0))
6263 return num_insns_constant_wide (low);
6265 else if (rs6000_is_valid_and_mask (op, mode))
6266 return 2;
6268 else if (low == 0)
6269 return num_insns_constant_wide (high) + 1;
6271 else
6272 return (num_insns_constant_wide (high)
6273 + num_insns_constant_wide (low) + 1);
6276 default:
6277 gcc_unreachable ();
6281 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
6282 If the mode of OP is MODE_VECTOR_INT, this simply returns the
6283 corresponding element of the vector, but for V4SFmode and V2SFmode,
6284 the corresponding "float" is interpreted as an SImode integer. */
6286 HOST_WIDE_INT
6287 const_vector_elt_as_int (rtx op, unsigned int elt)
6289 rtx tmp;
6291 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
6292 gcc_assert (GET_MODE (op) != V2DImode
6293 && GET_MODE (op) != V2DFmode);
6295 tmp = CONST_VECTOR_ELT (op, elt);
6296 if (GET_MODE (op) == V4SFmode
6297 || GET_MODE (op) == V2SFmode)
6298 tmp = gen_lowpart (SImode, tmp);
6299 return INTVAL (tmp);
6302 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
6303 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
6304 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
6305 all items are set to the same value and contain COPIES replicas of the
6306 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6307 operand and the others are set to the value of the operand's msb. */
6309 static bool
6310 vspltis_constant (rtx op, unsigned step, unsigned copies)
6312 machine_mode mode = GET_MODE (op);
6313 machine_mode inner = GET_MODE_INNER (mode);
6315 unsigned i;
6316 unsigned nunits;
6317 unsigned bitsize;
6318 unsigned mask;
6320 HOST_WIDE_INT val;
6321 HOST_WIDE_INT splat_val;
6322 HOST_WIDE_INT msb_val;
6324 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
6325 return false;
6327 nunits = GET_MODE_NUNITS (mode);
6328 bitsize = GET_MODE_BITSIZE (inner);
6329 mask = GET_MODE_MASK (inner);
6331 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6332 splat_val = val;
6333 msb_val = val >= 0 ? 0 : -1;
6335 /* Construct the value to be splatted, if possible. If not, return 0. */
6336 for (i = 2; i <= copies; i *= 2)
6338 HOST_WIDE_INT small_val;
6339 bitsize /= 2;
6340 small_val = splat_val >> bitsize;
6341 mask >>= bitsize;
6342 if (splat_val != ((HOST_WIDE_INT)
6343 ((unsigned HOST_WIDE_INT) small_val << bitsize)
6344 | (small_val & mask)))
6345 return false;
6346 splat_val = small_val;
6349 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6350 if (EASY_VECTOR_15 (splat_val))
6353 /* Also check if we can splat, and then add the result to itself. Do so if
6354 the value is positive, of if the splat instruction is using OP's mode;
6355 for splat_val < 0, the splat and the add should use the same mode. */
6356 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6357 && (splat_val >= 0 || (step == 1 && copies == 1)))
6360 /* Also check if are loading up the most significant bit which can be done by
6361 loading up -1 and shifting the value left by -1. */
6362 else if (EASY_VECTOR_MSB (splat_val, inner))
6365 else
6366 return false;
6368 /* Check if VAL is present in every STEP-th element, and the
6369 other elements are filled with its most significant bit. */
6370 for (i = 1; i < nunits; ++i)
6372 HOST_WIDE_INT desired_val;
6373 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6374 if ((i & (step - 1)) == 0)
6375 desired_val = val;
6376 else
6377 desired_val = msb_val;
6379 if (desired_val != const_vector_elt_as_int (op, elt))
6380 return false;
6383 return true;
6386 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6387 instruction, filling in the bottom elements with 0 or -1.
6389 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6390 for the number of zeroes to shift in, or negative for the number of 0xff
6391 bytes to shift in.
6393 OP is a CONST_VECTOR. */
6396 vspltis_shifted (rtx op)
6398 machine_mode mode = GET_MODE (op);
6399 machine_mode inner = GET_MODE_INNER (mode);
6401 unsigned i, j;
6402 unsigned nunits;
6403 unsigned mask;
6405 HOST_WIDE_INT val;
6407 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6408 return false;
6410 /* We need to create pseudo registers to do the shift, so don't recognize
6411 shift vector constants after reload. */
6412 if (!can_create_pseudo_p ())
6413 return false;
6415 nunits = GET_MODE_NUNITS (mode);
6416 mask = GET_MODE_MASK (inner);
6418 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6420 /* Check if the value can really be the operand of a vspltis[bhw]. */
6421 if (EASY_VECTOR_15 (val))
6424 /* Also check if we are loading up the most significant bit which can be done
6425 by loading up -1 and shifting the value left by -1. */
6426 else if (EASY_VECTOR_MSB (val, inner))
6429 else
6430 return 0;
6432 /* Check if VAL is present in every STEP-th element until we find elements
6433 that are 0 or all 1 bits. */
6434 for (i = 1; i < nunits; ++i)
6436 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6437 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6439 /* If the value isn't the splat value, check for the remaining elements
6440 being 0/-1. */
6441 if (val != elt_val)
6443 if (elt_val == 0)
6445 for (j = i+1; j < nunits; ++j)
6447 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6448 if (const_vector_elt_as_int (op, elt2) != 0)
6449 return 0;
6452 return (nunits - i) * GET_MODE_SIZE (inner);
6455 else if ((elt_val & mask) == mask)
6457 for (j = i+1; j < nunits; ++j)
6459 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6460 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6461 return 0;
6464 return -((nunits - i) * GET_MODE_SIZE (inner));
6467 else
6468 return 0;
6472 /* If all elements are equal, we don't need to do VLSDOI. */
6473 return 0;
6477 /* Return true if OP is of the given MODE and can be synthesized
6478 with a vspltisb, vspltish or vspltisw. */
6480 bool
6481 easy_altivec_constant (rtx op, machine_mode mode)
6483 unsigned step, copies;
6485 if (mode == VOIDmode)
6486 mode = GET_MODE (op);
6487 else if (mode != GET_MODE (op))
6488 return false;
6490 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6491 constants. */
6492 if (mode == V2DFmode)
6493 return zero_constant (op, mode);
6495 else if (mode == V2DImode)
6497 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
6498 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
6499 return false;
6501 if (zero_constant (op, mode))
6502 return true;
6504 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6505 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6506 return true;
6508 return false;
6511 /* V1TImode is a special container for TImode. Ignore for now. */
6512 else if (mode == V1TImode)
6513 return false;
6515 /* Start with a vspltisw. */
6516 step = GET_MODE_NUNITS (mode) / 4;
6517 copies = 1;
6519 if (vspltis_constant (op, step, copies))
6520 return true;
6522 /* Then try with a vspltish. */
6523 if (step == 1)
6524 copies <<= 1;
6525 else
6526 step >>= 1;
6528 if (vspltis_constant (op, step, copies))
6529 return true;
6531 /* And finally a vspltisb. */
6532 if (step == 1)
6533 copies <<= 1;
6534 else
6535 step >>= 1;
6537 if (vspltis_constant (op, step, copies))
6538 return true;
6540 if (vspltis_shifted (op) != 0)
6541 return true;
6543 return false;
6546 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6547 result is OP. Abort if it is not possible. */
6550 gen_easy_altivec_constant (rtx op)
6552 machine_mode mode = GET_MODE (op);
6553 int nunits = GET_MODE_NUNITS (mode);
6554 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6555 unsigned step = nunits / 4;
6556 unsigned copies = 1;
6558 /* Start with a vspltisw. */
6559 if (vspltis_constant (op, step, copies))
6560 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6562 /* Then try with a vspltish. */
6563 if (step == 1)
6564 copies <<= 1;
6565 else
6566 step >>= 1;
6568 if (vspltis_constant (op, step, copies))
6569 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6571 /* And finally a vspltisb. */
6572 if (step == 1)
6573 copies <<= 1;
6574 else
6575 step >>= 1;
6577 if (vspltis_constant (op, step, copies))
6578 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6580 gcc_unreachable ();
6583 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6584 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6586 Return the number of instructions needed (1 or 2) into the address pointed
6587 via NUM_INSNS_PTR.
6589 Return the constant that is being split via CONSTANT_PTR. */
6591 bool
6592 xxspltib_constant_p (rtx op,
6593 machine_mode mode,
6594 int *num_insns_ptr,
6595 int *constant_ptr)
6597 size_t nunits = GET_MODE_NUNITS (mode);
6598 size_t i;
6599 HOST_WIDE_INT value;
6600 rtx element;
6602 /* Set the returned values to out of bound values. */
6603 *num_insns_ptr = -1;
6604 *constant_ptr = 256;
6606 if (!TARGET_P9_VECTOR)
6607 return false;
6609 if (mode == VOIDmode)
6610 mode = GET_MODE (op);
6612 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6613 return false;
6615 /* Handle (vec_duplicate <constant>). */
6616 if (GET_CODE (op) == VEC_DUPLICATE)
6618 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6619 && mode != V2DImode)
6620 return false;
6622 element = XEXP (op, 0);
6623 if (!CONST_INT_P (element))
6624 return false;
6626 value = INTVAL (element);
6627 if (!IN_RANGE (value, -128, 127))
6628 return false;
6631 /* Handle (const_vector [...]). */
6632 else if (GET_CODE (op) == CONST_VECTOR)
6634 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6635 && mode != V2DImode)
6636 return false;
6638 element = CONST_VECTOR_ELT (op, 0);
6639 if (!CONST_INT_P (element))
6640 return false;
6642 value = INTVAL (element);
6643 if (!IN_RANGE (value, -128, 127))
6644 return false;
6646 for (i = 1; i < nunits; i++)
6648 element = CONST_VECTOR_ELT (op, i);
6649 if (!CONST_INT_P (element))
6650 return false;
6652 if (value != INTVAL (element))
6653 return false;
6657 /* Handle integer constants being loaded into the upper part of the VSX
6658 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6659 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6660 else if (CONST_INT_P (op))
6662 if (!SCALAR_INT_MODE_P (mode))
6663 return false;
6665 value = INTVAL (op);
6666 if (!IN_RANGE (value, -128, 127))
6667 return false;
6669 if (!IN_RANGE (value, -1, 0))
6671 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6672 return false;
6674 if (EASY_VECTOR_15 (value))
6675 return false;
6679 else
6680 return false;
6682 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6683 sign extend. Special case 0/-1 to allow getting any VSX register instead
6684 of an Altivec register. */
6685 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6686 && EASY_VECTOR_15 (value))
6687 return false;
6689 /* Return # of instructions and the constant byte for XXSPLTIB. */
6690 if (mode == V16QImode)
6691 *num_insns_ptr = 1;
6693 else if (IN_RANGE (value, -1, 0))
6694 *num_insns_ptr = 1;
6696 else
6697 *num_insns_ptr = 2;
6699 *constant_ptr = (int) value;
6700 return true;
6703 const char *
6704 output_vec_const_move (rtx *operands)
6706 int shift;
6707 machine_mode mode;
6708 rtx dest, vec;
6710 dest = operands[0];
6711 vec = operands[1];
6712 mode = GET_MODE (dest);
6714 if (TARGET_VSX)
6716 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6717 int xxspltib_value = 256;
6718 int num_insns = -1;
6720 if (zero_constant (vec, mode))
6722 if (TARGET_P9_VECTOR)
6723 return "xxspltib %x0,0";
6725 else if (dest_vmx_p)
6726 return "vspltisw %0,0";
6728 else
6729 return "xxlxor %x0,%x0,%x0";
6732 if (all_ones_constant (vec, mode))
6734 if (TARGET_P9_VECTOR)
6735 return "xxspltib %x0,255";
6737 else if (dest_vmx_p)
6738 return "vspltisw %0,-1";
6740 else if (TARGET_P8_VECTOR)
6741 return "xxlorc %x0,%x0,%x0";
6743 else
6744 gcc_unreachable ();
6747 if (TARGET_P9_VECTOR
6748 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6750 if (num_insns == 1)
6752 operands[2] = GEN_INT (xxspltib_value & 0xff);
6753 return "xxspltib %x0,%2";
6756 return "#";
6760 if (TARGET_ALTIVEC)
6762 rtx splat_vec;
6764 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6765 if (zero_constant (vec, mode))
6766 return "vspltisw %0,0";
6768 if (all_ones_constant (vec, mode))
6769 return "vspltisw %0,-1";
6771 /* Do we need to construct a value using VSLDOI? */
6772 shift = vspltis_shifted (vec);
6773 if (shift != 0)
6774 return "#";
6776 splat_vec = gen_easy_altivec_constant (vec);
6777 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6778 operands[1] = XEXP (splat_vec, 0);
6779 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6780 return "#";
6782 switch (GET_MODE (splat_vec))
6784 case E_V4SImode:
6785 return "vspltisw %0,%1";
6787 case E_V8HImode:
6788 return "vspltish %0,%1";
6790 case E_V16QImode:
6791 return "vspltisb %0,%1";
6793 default:
6794 gcc_unreachable ();
6798 gcc_unreachable ();
6801 /* Initialize TARGET of vector PAIRED to VALS. */
6803 void
6804 paired_expand_vector_init (rtx target, rtx vals)
6806 machine_mode mode = GET_MODE (target);
6807 int n_elts = GET_MODE_NUNITS (mode);
6808 int n_var = 0;
6809 rtx x, new_rtx, tmp, constant_op, op1, op2;
6810 int i;
6812 for (i = 0; i < n_elts; ++i)
6814 x = XVECEXP (vals, 0, i);
6815 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6816 ++n_var;
6818 if (n_var == 0)
6820 /* Load from constant pool. */
6821 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
6822 return;
6825 if (n_var == 2)
6827 /* The vector is initialized only with non-constants. */
6828 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
6829 XVECEXP (vals, 0, 1));
6831 emit_move_insn (target, new_rtx);
6832 return;
6835 /* One field is non-constant and the other one is a constant. Load the
6836 constant from the constant pool and use ps_merge instruction to
6837 construct the whole vector. */
6838 op1 = XVECEXP (vals, 0, 0);
6839 op2 = XVECEXP (vals, 0, 1);
6841 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
6843 tmp = gen_reg_rtx (GET_MODE (constant_op));
6844 emit_move_insn (tmp, constant_op);
6846 if (CONSTANT_P (op1))
6847 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
6848 else
6849 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
6851 emit_move_insn (target, new_rtx);
6854 void
6855 paired_expand_vector_move (rtx operands[])
6857 rtx op0 = operands[0], op1 = operands[1];
6859 emit_move_insn (op0, op1);
6862 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
6863 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
6864 operands for the relation operation COND. This is a recursive
6865 function. */
6867 static void
6868 paired_emit_vector_compare (enum rtx_code rcode,
6869 rtx dest, rtx op0, rtx op1,
6870 rtx cc_op0, rtx cc_op1)
6872 rtx tmp = gen_reg_rtx (V2SFmode);
6873 rtx tmp1, max, min;
6875 gcc_assert (TARGET_PAIRED_FLOAT);
6876 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
6878 switch (rcode)
6880 case LT:
6881 case LTU:
6882 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6883 return;
6884 case GE:
6885 case GEU:
6886 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6887 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
6888 return;
6889 case LE:
6890 case LEU:
6891 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
6892 return;
6893 case GT:
6894 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6895 return;
6896 case EQ:
6897 tmp1 = gen_reg_rtx (V2SFmode);
6898 max = gen_reg_rtx (V2SFmode);
6899 min = gen_reg_rtx (V2SFmode);
6900 gen_reg_rtx (V2SFmode);
6902 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6903 emit_insn (gen_selv2sf4
6904 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6905 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
6906 emit_insn (gen_selv2sf4
6907 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6908 emit_insn (gen_subv2sf3 (tmp1, min, max));
6909 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
6910 return;
6911 case NE:
6912 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
6913 return;
6914 case UNLE:
6915 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6916 return;
6917 case UNLT:
6918 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
6919 return;
6920 case UNGE:
6921 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6922 return;
6923 case UNGT:
6924 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
6925 return;
6926 default:
6927 gcc_unreachable ();
6930 return;
6933 /* Emit vector conditional expression.
6934 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
6935 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
6938 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
6939 rtx cond, rtx cc_op0, rtx cc_op1)
6941 enum rtx_code rcode = GET_CODE (cond);
6943 if (!TARGET_PAIRED_FLOAT)
6944 return 0;
6946 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
6948 return 1;
6951 /* Initialize vector TARGET to VALS. */
6953 void
6954 rs6000_expand_vector_init (rtx target, rtx vals)
6956 machine_mode mode = GET_MODE (target);
6957 machine_mode inner_mode = GET_MODE_INNER (mode);
6958 int n_elts = GET_MODE_NUNITS (mode);
6959 int n_var = 0, one_var = -1;
6960 bool all_same = true, all_const_zero = true;
6961 rtx x, mem;
6962 int i;
6964 for (i = 0; i < n_elts; ++i)
6966 x = XVECEXP (vals, 0, i);
6967 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6968 ++n_var, one_var = i;
6969 else if (x != CONST0_RTX (inner_mode))
6970 all_const_zero = false;
6972 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6973 all_same = false;
6976 if (n_var == 0)
6978 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6979 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6980 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6982 /* Zero register. */
6983 emit_move_insn (target, CONST0_RTX (mode));
6984 return;
6986 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6988 /* Splat immediate. */
6989 emit_insn (gen_rtx_SET (target, const_vec));
6990 return;
6992 else
6994 /* Load from constant pool. */
6995 emit_move_insn (target, const_vec);
6996 return;
7000 /* Double word values on VSX can use xxpermdi or lxvdsx. */
7001 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
7003 rtx op[2];
7004 size_t i;
7005 size_t num_elements = all_same ? 1 : 2;
7006 for (i = 0; i < num_elements; i++)
7008 op[i] = XVECEXP (vals, 0, i);
7009 /* Just in case there is a SUBREG with a smaller mode, do a
7010 conversion. */
7011 if (GET_MODE (op[i]) != inner_mode)
7013 rtx tmp = gen_reg_rtx (inner_mode);
7014 convert_move (tmp, op[i], 0);
7015 op[i] = tmp;
7017 /* Allow load with splat double word. */
7018 else if (MEM_P (op[i]))
7020 if (!all_same)
7021 op[i] = force_reg (inner_mode, op[i]);
7023 else if (!REG_P (op[i]))
7024 op[i] = force_reg (inner_mode, op[i]);
7027 if (all_same)
7029 if (mode == V2DFmode)
7030 emit_insn (gen_vsx_splat_v2df (target, op[0]));
7031 else
7032 emit_insn (gen_vsx_splat_v2di (target, op[0]));
7034 else
7036 if (mode == V2DFmode)
7037 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
7038 else
7039 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
7041 return;
7044 /* Special case initializing vector int if we are on 64-bit systems with
7045 direct move or we have the ISA 3.0 instructions. */
7046 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
7047 && TARGET_DIRECT_MOVE_64BIT)
7049 if (all_same)
7051 rtx element0 = XVECEXP (vals, 0, 0);
7052 if (MEM_P (element0))
7053 element0 = rs6000_address_for_fpconvert (element0);
7054 else
7055 element0 = force_reg (SImode, element0);
7057 if (TARGET_P9_VECTOR)
7058 emit_insn (gen_vsx_splat_v4si (target, element0));
7059 else
7061 rtx tmp = gen_reg_rtx (DImode);
7062 emit_insn (gen_zero_extendsidi2 (tmp, element0));
7063 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
7065 return;
7067 else
7069 rtx elements[4];
7070 size_t i;
7072 for (i = 0; i < 4; i++)
7074 elements[i] = XVECEXP (vals, 0, i);
7075 if (!CONST_INT_P (elements[i]) && !REG_P (elements[i]))
7076 elements[i] = copy_to_mode_reg (SImode, elements[i]);
7079 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
7080 elements[2], elements[3]));
7081 return;
7085 /* With single precision floating point on VSX, know that internally single
7086 precision is actually represented as a double, and either make 2 V2DF
7087 vectors, and convert these vectors to single precision, or do one
7088 conversion, and splat the result to the other elements. */
7089 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
7091 if (all_same)
7093 rtx element0 = XVECEXP (vals, 0, 0);
7095 if (TARGET_P9_VECTOR)
7097 if (MEM_P (element0))
7098 element0 = rs6000_address_for_fpconvert (element0);
7100 emit_insn (gen_vsx_splat_v4sf (target, element0));
7103 else
7105 rtx freg = gen_reg_rtx (V4SFmode);
7106 rtx sreg = force_reg (SFmode, element0);
7107 rtx cvt = (TARGET_XSCVDPSPN
7108 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
7109 : gen_vsx_xscvdpsp_scalar (freg, sreg));
7111 emit_insn (cvt);
7112 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
7113 const0_rtx));
7116 else
7118 rtx dbl_even = gen_reg_rtx (V2DFmode);
7119 rtx dbl_odd = gen_reg_rtx (V2DFmode);
7120 rtx flt_even = gen_reg_rtx (V4SFmode);
7121 rtx flt_odd = gen_reg_rtx (V4SFmode);
7122 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
7123 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
7124 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
7125 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
7127 /* Use VMRGEW if we can instead of doing a permute. */
7128 if (TARGET_P8_VECTOR)
7130 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
7131 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
7132 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
7133 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
7134 if (BYTES_BIG_ENDIAN)
7135 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
7136 else
7137 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
7139 else
7141 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
7142 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
7143 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
7144 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
7145 rs6000_expand_extract_even (target, flt_even, flt_odd);
7148 return;
7151 /* Special case initializing vector short/char that are splats if we are on
7152 64-bit systems with direct move. */
7153 if (all_same && TARGET_DIRECT_MOVE_64BIT
7154 && (mode == V16QImode || mode == V8HImode))
7156 rtx op0 = XVECEXP (vals, 0, 0);
7157 rtx di_tmp = gen_reg_rtx (DImode);
7159 if (!REG_P (op0))
7160 op0 = force_reg (GET_MODE_INNER (mode), op0);
7162 if (mode == V16QImode)
7164 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
7165 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
7166 return;
7169 if (mode == V8HImode)
7171 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
7172 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
7173 return;
7177 /* Store value to stack temp. Load vector element. Splat. However, splat
7178 of 64-bit items is not supported on Altivec. */
7179 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
7181 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
7182 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
7183 XVECEXP (vals, 0, 0));
7184 x = gen_rtx_UNSPEC (VOIDmode,
7185 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
7186 emit_insn (gen_rtx_PARALLEL (VOIDmode,
7187 gen_rtvec (2,
7188 gen_rtx_SET (target, mem),
7189 x)));
7190 x = gen_rtx_VEC_SELECT (inner_mode, target,
7191 gen_rtx_PARALLEL (VOIDmode,
7192 gen_rtvec (1, const0_rtx)));
7193 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
7194 return;
7197 /* One field is non-constant. Load constant then overwrite
7198 varying field. */
7199 if (n_var == 1)
7201 rtx copy = copy_rtx (vals);
7203 /* Load constant part of vector, substitute neighboring value for
7204 varying element. */
7205 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
7206 rs6000_expand_vector_init (target, copy);
7208 /* Insert variable. */
7209 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
7210 return;
7213 /* Construct the vector in memory one field at a time
7214 and load the whole vector. */
7215 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7216 for (i = 0; i < n_elts; i++)
7217 emit_move_insn (adjust_address_nv (mem, inner_mode,
7218 i * GET_MODE_SIZE (inner_mode)),
7219 XVECEXP (vals, 0, i));
7220 emit_move_insn (target, mem);
7223 /* Set field ELT of TARGET to VAL. */
7225 void
7226 rs6000_expand_vector_set (rtx target, rtx val, int elt)
7228 machine_mode mode = GET_MODE (target);
7229 machine_mode inner_mode = GET_MODE_INNER (mode);
7230 rtx reg = gen_reg_rtx (mode);
7231 rtx mask, mem, x;
7232 int width = GET_MODE_SIZE (inner_mode);
7233 int i;
7235 val = force_reg (GET_MODE (val), val);
7237 if (VECTOR_MEM_VSX_P (mode))
7239 rtx insn = NULL_RTX;
7240 rtx elt_rtx = GEN_INT (elt);
7242 if (mode == V2DFmode)
7243 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
7245 else if (mode == V2DImode)
7246 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
7248 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
7250 if (mode == V4SImode)
7251 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
7252 else if (mode == V8HImode)
7253 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
7254 else if (mode == V16QImode)
7255 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
7256 else if (mode == V4SFmode)
7257 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
7260 if (insn)
7262 emit_insn (insn);
7263 return;
7267 /* Simplify setting single element vectors like V1TImode. */
7268 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
7270 emit_move_insn (target, gen_lowpart (mode, val));
7271 return;
7274 /* Load single variable value. */
7275 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
7276 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
7277 x = gen_rtx_UNSPEC (VOIDmode,
7278 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
7279 emit_insn (gen_rtx_PARALLEL (VOIDmode,
7280 gen_rtvec (2,
7281 gen_rtx_SET (reg, mem),
7282 x)));
7284 /* Linear sequence. */
7285 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
7286 for (i = 0; i < 16; ++i)
7287 XVECEXP (mask, 0, i) = GEN_INT (i);
7289 /* Set permute mask to insert element into target. */
7290 for (i = 0; i < width; ++i)
7291 XVECEXP (mask, 0, elt*width + i)
7292 = GEN_INT (i + 0x10);
7293 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
7295 if (BYTES_BIG_ENDIAN)
7296 x = gen_rtx_UNSPEC (mode,
7297 gen_rtvec (3, target, reg,
7298 force_reg (V16QImode, x)),
7299 UNSPEC_VPERM);
7300 else
7302 if (TARGET_P9_VECTOR)
7303 x = gen_rtx_UNSPEC (mode,
7304 gen_rtvec (3, target, reg,
7305 force_reg (V16QImode, x)),
7306 UNSPEC_VPERMR);
7307 else
7309 /* Invert selector. We prefer to generate VNAND on P8 so
7310 that future fusion opportunities can kick in, but must
7311 generate VNOR elsewhere. */
7312 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
7313 rtx iorx = (TARGET_P8_VECTOR
7314 ? gen_rtx_IOR (V16QImode, notx, notx)
7315 : gen_rtx_AND (V16QImode, notx, notx));
7316 rtx tmp = gen_reg_rtx (V16QImode);
7317 emit_insn (gen_rtx_SET (tmp, iorx));
7319 /* Permute with operands reversed and adjusted selector. */
7320 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
7321 UNSPEC_VPERM);
7325 emit_insn (gen_rtx_SET (target, x));
7328 /* Extract field ELT from VEC into TARGET. */
7330 void
7331 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
7333 machine_mode mode = GET_MODE (vec);
7334 machine_mode inner_mode = GET_MODE_INNER (mode);
7335 rtx mem;
7337 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
7339 switch (mode)
7341 default:
7342 break;
7343 case E_V1TImode:
7344 gcc_assert (INTVAL (elt) == 0 && inner_mode == TImode);
7345 emit_move_insn (target, gen_lowpart (TImode, vec));
7346 break;
7347 case E_V2DFmode:
7348 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
7349 return;
7350 case E_V2DImode:
7351 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
7352 return;
7353 case E_V4SFmode:
7354 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
7355 return;
7356 case E_V16QImode:
7357 if (TARGET_DIRECT_MOVE_64BIT)
7359 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
7360 return;
7362 else
7363 break;
7364 case E_V8HImode:
7365 if (TARGET_DIRECT_MOVE_64BIT)
7367 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
7368 return;
7370 else
7371 break;
7372 case E_V4SImode:
7373 if (TARGET_DIRECT_MOVE_64BIT)
7375 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
7376 return;
7378 break;
7381 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
7382 && TARGET_DIRECT_MOVE_64BIT)
7384 if (GET_MODE (elt) != DImode)
7386 rtx tmp = gen_reg_rtx (DImode);
7387 convert_move (tmp, elt, 0);
7388 elt = tmp;
7390 else if (!REG_P (elt))
7391 elt = force_reg (DImode, elt);
7393 switch (mode)
7395 case E_V2DFmode:
7396 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
7397 return;
7399 case E_V2DImode:
7400 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
7401 return;
7403 case E_V4SFmode:
7404 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
7405 return;
7407 case E_V4SImode:
7408 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
7409 return;
7411 case E_V8HImode:
7412 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
7413 return;
7415 case E_V16QImode:
7416 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
7417 return;
7419 default:
7420 gcc_unreachable ();
7424 gcc_assert (CONST_INT_P (elt));
7426 /* Allocate mode-sized buffer. */
7427 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7429 emit_move_insn (mem, vec);
7431 /* Add offset to field within buffer matching vector element. */
7432 mem = adjust_address_nv (mem, inner_mode,
7433 INTVAL (elt) * GET_MODE_SIZE (inner_mode));
7435 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
7438 /* Helper function to return the register number of a RTX. */
7439 static inline int
7440 regno_or_subregno (rtx op)
7442 if (REG_P (op))
7443 return REGNO (op);
7444 else if (SUBREG_P (op))
7445 return subreg_regno (op);
7446 else
7447 gcc_unreachable ();
7450 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
7451 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
7452 temporary (BASE_TMP) to fixup the address. Return the new memory address
7453 that is valid for reads or writes to a given register (SCALAR_REG). */
7456 rs6000_adjust_vec_address (rtx scalar_reg,
7457 rtx mem,
7458 rtx element,
7459 rtx base_tmp,
7460 machine_mode scalar_mode)
7462 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7463 rtx addr = XEXP (mem, 0);
7464 rtx element_offset;
7465 rtx new_addr;
7466 bool valid_addr_p;
7468 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7469 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
7471 /* Calculate what we need to add to the address to get the element
7472 address. */
7473 if (CONST_INT_P (element))
7474 element_offset = GEN_INT (INTVAL (element) * scalar_size);
7475 else
7477 int byte_shift = exact_log2 (scalar_size);
7478 gcc_assert (byte_shift >= 0);
7480 if (byte_shift == 0)
7481 element_offset = element;
7483 else
7485 if (TARGET_POWERPC64)
7486 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
7487 else
7488 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
7490 element_offset = base_tmp;
7494 /* Create the new address pointing to the element within the vector. If we
7495 are adding 0, we don't have to change the address. */
7496 if (element_offset == const0_rtx)
7497 new_addr = addr;
7499 /* A simple indirect address can be converted into a reg + offset
7500 address. */
7501 else if (REG_P (addr) || SUBREG_P (addr))
7502 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
7504 /* Optimize D-FORM addresses with constant offset with a constant element, to
7505 include the element offset in the address directly. */
7506 else if (GET_CODE (addr) == PLUS)
7508 rtx op0 = XEXP (addr, 0);
7509 rtx op1 = XEXP (addr, 1);
7510 rtx insn;
7512 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7513 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7515 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7516 rtx offset_rtx = GEN_INT (offset);
7518 if (IN_RANGE (offset, -32768, 32767)
7519 && (scalar_size < 8 || (offset & 0x3) == 0))
7520 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7521 else
7523 emit_move_insn (base_tmp, offset_rtx);
7524 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7527 else
7529 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7530 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7532 /* Note, ADDI requires the register being added to be a base
7533 register. If the register was R0, load it up into the temporary
7534 and do the add. */
7535 if (op1_reg_p
7536 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7538 insn = gen_add3_insn (base_tmp, op1, element_offset);
7539 gcc_assert (insn != NULL_RTX);
7540 emit_insn (insn);
7543 else if (ele_reg_p
7544 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7546 insn = gen_add3_insn (base_tmp, element_offset, op1);
7547 gcc_assert (insn != NULL_RTX);
7548 emit_insn (insn);
7551 else
7553 emit_move_insn (base_tmp, op1);
7554 emit_insn (gen_add2_insn (base_tmp, element_offset));
7557 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7561 else
7563 emit_move_insn (base_tmp, addr);
7564 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7567 /* If we have a PLUS, we need to see whether the particular register class
7568 allows for D-FORM or X-FORM addressing. */
7569 if (GET_CODE (new_addr) == PLUS)
7571 rtx op1 = XEXP (new_addr, 1);
7572 addr_mask_type addr_mask;
7573 int scalar_regno = regno_or_subregno (scalar_reg);
7575 gcc_assert (scalar_regno < FIRST_PSEUDO_REGISTER);
7576 if (INT_REGNO_P (scalar_regno))
7577 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7579 else if (FP_REGNO_P (scalar_regno))
7580 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7582 else if (ALTIVEC_REGNO_P (scalar_regno))
7583 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7585 else
7586 gcc_unreachable ();
7588 if (REG_P (op1) || SUBREG_P (op1))
7589 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7590 else
7591 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7594 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7595 valid_addr_p = true;
7597 else
7598 valid_addr_p = false;
7600 if (!valid_addr_p)
7602 emit_move_insn (base_tmp, new_addr);
7603 new_addr = base_tmp;
7606 return change_address (mem, scalar_mode, new_addr);
7609 /* Split a variable vec_extract operation into the component instructions. */
7611 void
7612 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7613 rtx tmp_altivec)
7615 machine_mode mode = GET_MODE (src);
7616 machine_mode scalar_mode = GET_MODE (dest);
7617 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7618 int byte_shift = exact_log2 (scalar_size);
7620 gcc_assert (byte_shift >= 0);
7622 /* If we are given a memory address, optimize to load just the element. We
7623 don't have to adjust the vector element number on little endian
7624 systems. */
7625 if (MEM_P (src))
7627 gcc_assert (REG_P (tmp_gpr));
7628 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7629 tmp_gpr, scalar_mode));
7630 return;
7633 else if (REG_P (src) || SUBREG_P (src))
7635 int bit_shift = byte_shift + 3;
7636 rtx element2;
7637 int dest_regno = regno_or_subregno (dest);
7638 int src_regno = regno_or_subregno (src);
7639 int element_regno = regno_or_subregno (element);
7641 gcc_assert (REG_P (tmp_gpr));
7643 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7644 a general purpose register. */
7645 if (TARGET_P9_VECTOR
7646 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7647 && INT_REGNO_P (dest_regno)
7648 && ALTIVEC_REGNO_P (src_regno)
7649 && INT_REGNO_P (element_regno))
7651 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7652 rtx element_si = gen_rtx_REG (SImode, element_regno);
7654 if (mode == V16QImode)
7655 emit_insn (VECTOR_ELT_ORDER_BIG
7656 ? gen_vextublx (dest_si, element_si, src)
7657 : gen_vextubrx (dest_si, element_si, src));
7659 else if (mode == V8HImode)
7661 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7662 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7663 emit_insn (VECTOR_ELT_ORDER_BIG
7664 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7665 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7669 else
7671 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7672 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7673 emit_insn (VECTOR_ELT_ORDER_BIG
7674 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7675 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7678 return;
7682 gcc_assert (REG_P (tmp_altivec));
7684 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7685 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7686 will shift the element into the upper position (adding 3 to convert a
7687 byte shift into a bit shift). */
7688 if (scalar_size == 8)
7690 if (!VECTOR_ELT_ORDER_BIG)
7692 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7693 element2 = tmp_gpr;
7695 else
7696 element2 = element;
7698 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7699 bit. */
7700 emit_insn (gen_rtx_SET (tmp_gpr,
7701 gen_rtx_AND (DImode,
7702 gen_rtx_ASHIFT (DImode,
7703 element2,
7704 GEN_INT (6)),
7705 GEN_INT (64))));
7707 else
7709 if (!VECTOR_ELT_ORDER_BIG)
7711 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
7713 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7714 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7715 element2 = tmp_gpr;
7717 else
7718 element2 = element;
7720 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7723 /* Get the value into the lower byte of the Altivec register where VSLO
7724 expects it. */
7725 if (TARGET_P9_VECTOR)
7726 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7727 else if (can_create_pseudo_p ())
7728 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7729 else
7731 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7732 emit_move_insn (tmp_di, tmp_gpr);
7733 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7736 /* Do the VSLO to get the value into the final location. */
7737 switch (mode)
7739 case E_V2DFmode:
7740 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7741 return;
7743 case E_V2DImode:
7744 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7745 return;
7747 case E_V4SFmode:
7749 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7750 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7751 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7752 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7753 tmp_altivec));
7755 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7756 return;
7759 case E_V4SImode:
7760 case E_V8HImode:
7761 case E_V16QImode:
7763 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7764 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7765 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7766 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7767 tmp_altivec));
7768 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7769 emit_insn (gen_ashrdi3 (tmp_gpr_di, tmp_gpr_di,
7770 GEN_INT (64 - (8 * scalar_size))));
7771 return;
7774 default:
7775 gcc_unreachable ();
7778 return;
7780 else
7781 gcc_unreachable ();
7784 /* Helper function for rs6000_split_v4si_init to build up a DImode value from
7785 two SImode values. */
7787 static void
7788 rs6000_split_v4si_init_di_reg (rtx dest, rtx si1, rtx si2, rtx tmp)
7790 const unsigned HOST_WIDE_INT mask_32bit = HOST_WIDE_INT_C (0xffffffff);
7792 if (CONST_INT_P (si1) && CONST_INT_P (si2))
7794 unsigned HOST_WIDE_INT const1 = (UINTVAL (si1) & mask_32bit) << 32;
7795 unsigned HOST_WIDE_INT const2 = UINTVAL (si2) & mask_32bit;
7797 emit_move_insn (dest, GEN_INT (const1 | const2));
7798 return;
7801 /* Put si1 into upper 32-bits of dest. */
7802 if (CONST_INT_P (si1))
7803 emit_move_insn (dest, GEN_INT ((UINTVAL (si1) & mask_32bit) << 32));
7804 else
7806 /* Generate RLDIC. */
7807 rtx si1_di = gen_rtx_REG (DImode, regno_or_subregno (si1));
7808 rtx shift_rtx = gen_rtx_ASHIFT (DImode, si1_di, GEN_INT (32));
7809 rtx mask_rtx = GEN_INT (mask_32bit << 32);
7810 rtx and_rtx = gen_rtx_AND (DImode, shift_rtx, mask_rtx);
7811 gcc_assert (!reg_overlap_mentioned_p (dest, si1));
7812 emit_insn (gen_rtx_SET (dest, and_rtx));
7815 /* Put si2 into the temporary. */
7816 gcc_assert (!reg_overlap_mentioned_p (dest, tmp));
7817 if (CONST_INT_P (si2))
7818 emit_move_insn (tmp, GEN_INT (UINTVAL (si2) & mask_32bit));
7819 else
7820 emit_insn (gen_zero_extendsidi2 (tmp, si2));
7822 /* Combine the two parts. */
7823 emit_insn (gen_iordi3 (dest, dest, tmp));
7824 return;
7827 /* Split a V4SI initialization. */
7829 void
7830 rs6000_split_v4si_init (rtx operands[])
7832 rtx dest = operands[0];
7834 /* Destination is a GPR, build up the two DImode parts in place. */
7835 if (REG_P (dest) || SUBREG_P (dest))
7837 int d_regno = regno_or_subregno (dest);
7838 rtx scalar1 = operands[1];
7839 rtx scalar2 = operands[2];
7840 rtx scalar3 = operands[3];
7841 rtx scalar4 = operands[4];
7842 rtx tmp1 = operands[5];
7843 rtx tmp2 = operands[6];
7845 /* Even though we only need one temporary (plus the destination, which
7846 has an early clobber constraint, try to use two temporaries, one for
7847 each double word created. That way the 2nd insn scheduling pass can
7848 rearrange things so the two parts are done in parallel. */
7849 if (BYTES_BIG_ENDIAN)
7851 rtx di_lo = gen_rtx_REG (DImode, d_regno);
7852 rtx di_hi = gen_rtx_REG (DImode, d_regno + 1);
7853 rs6000_split_v4si_init_di_reg (di_lo, scalar1, scalar2, tmp1);
7854 rs6000_split_v4si_init_di_reg (di_hi, scalar3, scalar4, tmp2);
7856 else
7858 rtx di_lo = gen_rtx_REG (DImode, d_regno + 1);
7859 rtx di_hi = gen_rtx_REG (DImode, d_regno);
7860 gcc_assert (!VECTOR_ELT_ORDER_BIG);
7861 rs6000_split_v4si_init_di_reg (di_lo, scalar4, scalar3, tmp1);
7862 rs6000_split_v4si_init_di_reg (di_hi, scalar2, scalar1, tmp2);
7864 return;
7867 else
7868 gcc_unreachable ();
7871 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7872 selects whether the alignment is abi mandated, optional, or
7873 both abi and optional alignment. */
7875 unsigned int
7876 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7878 if (how != align_opt)
7880 if (TREE_CODE (type) == VECTOR_TYPE)
7882 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (TYPE_MODE (type)))
7884 if (align < 64)
7885 align = 64;
7887 else if (align < 128)
7888 align = 128;
7892 if (how != align_abi)
7894 if (TREE_CODE (type) == ARRAY_TYPE
7895 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7897 if (align < BITS_PER_WORD)
7898 align = BITS_PER_WORD;
7902 return align;
7905 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7907 bool
7908 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7910 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7912 if (computed != 128)
7914 static bool warned;
7915 if (!warned && warn_psabi)
7917 warned = true;
7918 inform (input_location,
7919 "the layout of aggregates containing vectors with"
7920 " %d-byte alignment has changed in GCC 5",
7921 computed / BITS_PER_UNIT);
7924 /* In current GCC there is no special case. */
7925 return false;
7928 return false;
7931 /* AIX increases natural record alignment to doubleword if the first
7932 field is an FP double while the FP fields remain word aligned. */
7934 unsigned int
7935 rs6000_special_round_type_align (tree type, unsigned int computed,
7936 unsigned int specified)
7938 unsigned int align = MAX (computed, specified);
7939 tree field = TYPE_FIELDS (type);
7941 /* Skip all non field decls */
7942 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7943 field = DECL_CHAIN (field);
7945 if (field != NULL && field != type)
7947 type = TREE_TYPE (field);
7948 while (TREE_CODE (type) == ARRAY_TYPE)
7949 type = TREE_TYPE (type);
7951 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7952 align = MAX (align, 64);
7955 return align;
7958 /* Darwin increases record alignment to the natural alignment of
7959 the first field. */
7961 unsigned int
7962 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7963 unsigned int specified)
7965 unsigned int align = MAX (computed, specified);
7967 if (TYPE_PACKED (type))
7968 return align;
7970 /* Find the first field, looking down into aggregates. */
7971 do {
7972 tree field = TYPE_FIELDS (type);
7973 /* Skip all non field decls */
7974 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7975 field = DECL_CHAIN (field);
7976 if (! field)
7977 break;
7978 /* A packed field does not contribute any extra alignment. */
7979 if (DECL_PACKED (field))
7980 return align;
7981 type = TREE_TYPE (field);
7982 while (TREE_CODE (type) == ARRAY_TYPE)
7983 type = TREE_TYPE (type);
7984 } while (AGGREGATE_TYPE_P (type));
7986 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7987 align = MAX (align, TYPE_ALIGN (type));
7989 return align;
7992 /* Return 1 for an operand in small memory on V.4/eabi. */
7995 small_data_operand (rtx op ATTRIBUTE_UNUSED,
7996 machine_mode mode ATTRIBUTE_UNUSED)
7998 #if TARGET_ELF
7999 rtx sym_ref;
8001 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
8002 return 0;
8004 if (DEFAULT_ABI != ABI_V4)
8005 return 0;
8007 if (GET_CODE (op) == SYMBOL_REF)
8008 sym_ref = op;
8010 else if (GET_CODE (op) != CONST
8011 || GET_CODE (XEXP (op, 0)) != PLUS
8012 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
8013 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
8014 return 0;
8016 else
8018 rtx sum = XEXP (op, 0);
8019 HOST_WIDE_INT summand;
8021 /* We have to be careful here, because it is the referenced address
8022 that must be 32k from _SDA_BASE_, not just the symbol. */
8023 summand = INTVAL (XEXP (sum, 1));
8024 if (summand < 0 || summand > g_switch_value)
8025 return 0;
8027 sym_ref = XEXP (sum, 0);
8030 return SYMBOL_REF_SMALL_P (sym_ref);
8031 #else
8032 return 0;
8033 #endif
8036 /* Return true if either operand is a general purpose register. */
8038 bool
8039 gpr_or_gpr_p (rtx op0, rtx op1)
8041 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
8042 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
8045 /* Return true if this is a move direct operation between GPR registers and
8046 floating point/VSX registers. */
8048 bool
8049 direct_move_p (rtx op0, rtx op1)
8051 int regno0, regno1;
8053 if (!REG_P (op0) || !REG_P (op1))
8054 return false;
8056 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
8057 return false;
8059 regno0 = REGNO (op0);
8060 regno1 = REGNO (op1);
8061 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
8062 return false;
8064 if (INT_REGNO_P (regno0))
8065 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
8067 else if (INT_REGNO_P (regno1))
8069 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
8070 return true;
8072 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
8073 return true;
8076 return false;
8079 /* Return true if the OFFSET is valid for the quad address instructions that
8080 use d-form (register + offset) addressing. */
8082 static inline bool
8083 quad_address_offset_p (HOST_WIDE_INT offset)
8085 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
8088 /* Return true if the ADDR is an acceptable address for a quad memory
8089 operation of mode MODE (either LQ/STQ for general purpose registers, or
8090 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
8091 is intended for LQ/STQ. If it is false, the address is intended for the ISA
8092 3.0 LXV/STXV instruction. */
8094 bool
8095 quad_address_p (rtx addr, machine_mode mode, bool strict)
8097 rtx op0, op1;
8099 if (GET_MODE_SIZE (mode) != 16)
8100 return false;
8102 if (legitimate_indirect_address_p (addr, strict))
8103 return true;
8105 if (VECTOR_MODE_P (mode) && !mode_supports_vsx_dform_quad (mode))
8106 return false;
8108 if (GET_CODE (addr) != PLUS)
8109 return false;
8111 op0 = XEXP (addr, 0);
8112 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
8113 return false;
8115 op1 = XEXP (addr, 1);
8116 if (!CONST_INT_P (op1))
8117 return false;
8119 return quad_address_offset_p (INTVAL (op1));
8122 /* Return true if this is a load or store quad operation. This function does
8123 not handle the atomic quad memory instructions. */
8125 bool
8126 quad_load_store_p (rtx op0, rtx op1)
8128 bool ret;
8130 if (!TARGET_QUAD_MEMORY)
8131 ret = false;
8133 else if (REG_P (op0) && MEM_P (op1))
8134 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
8135 && quad_memory_operand (op1, GET_MODE (op1))
8136 && !reg_overlap_mentioned_p (op0, op1));
8138 else if (MEM_P (op0) && REG_P (op1))
8139 ret = (quad_memory_operand (op0, GET_MODE (op0))
8140 && quad_int_reg_operand (op1, GET_MODE (op1)));
8142 else
8143 ret = false;
8145 if (TARGET_DEBUG_ADDR)
8147 fprintf (stderr, "\n========== quad_load_store, return %s\n",
8148 ret ? "true" : "false");
8149 debug_rtx (gen_rtx_SET (op0, op1));
8152 return ret;
8155 /* Given an address, return a constant offset term if one exists. */
8157 static rtx
8158 address_offset (rtx op)
8160 if (GET_CODE (op) == PRE_INC
8161 || GET_CODE (op) == PRE_DEC)
8162 op = XEXP (op, 0);
8163 else if (GET_CODE (op) == PRE_MODIFY
8164 || GET_CODE (op) == LO_SUM)
8165 op = XEXP (op, 1);
8167 if (GET_CODE (op) == CONST)
8168 op = XEXP (op, 0);
8170 if (GET_CODE (op) == PLUS)
8171 op = XEXP (op, 1);
8173 if (CONST_INT_P (op))
8174 return op;
8176 return NULL_RTX;
8179 /* Return true if the MEM operand is a memory operand suitable for use
8180 with a (full width, possibly multiple) gpr load/store. On
8181 powerpc64 this means the offset must be divisible by 4.
8182 Implements 'Y' constraint.
8184 Accept direct, indexed, offset, lo_sum and tocref. Since this is
8185 a constraint function we know the operand has satisfied a suitable
8186 memory predicate. Also accept some odd rtl generated by reload
8187 (see rs6000_legitimize_reload_address for various forms). It is
8188 important that reload rtl be accepted by appropriate constraints
8189 but not by the operand predicate.
8191 Offsetting a lo_sum should not be allowed, except where we know by
8192 alignment that a 32k boundary is not crossed, but see the ???
8193 comment in rs6000_legitimize_reload_address. Note that by
8194 "offsetting" here we mean a further offset to access parts of the
8195 MEM. It's fine to have a lo_sum where the inner address is offset
8196 from a sym, since the same sym+offset will appear in the high part
8197 of the address calculation. */
8199 bool
8200 mem_operand_gpr (rtx op, machine_mode mode)
8202 unsigned HOST_WIDE_INT offset;
8203 int extra;
8204 rtx addr = XEXP (op, 0);
8206 op = address_offset (addr);
8207 if (op == NULL_RTX)
8208 return true;
8210 offset = INTVAL (op);
8211 if (TARGET_POWERPC64 && (offset & 3) != 0)
8212 return false;
8214 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
8215 if (extra < 0)
8216 extra = 0;
8218 if (GET_CODE (addr) == LO_SUM)
8219 /* For lo_sum addresses, we must allow any offset except one that
8220 causes a wrap, so test only the low 16 bits. */
8221 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8223 return offset + 0x8000 < 0x10000u - extra;
8226 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
8227 enforce an offset divisible by 4 even for 32-bit. */
8229 bool
8230 mem_operand_ds_form (rtx op, machine_mode mode)
8232 unsigned HOST_WIDE_INT offset;
8233 int extra;
8234 rtx addr = XEXP (op, 0);
8236 if (!offsettable_address_p (false, mode, addr))
8237 return false;
8239 op = address_offset (addr);
8240 if (op == NULL_RTX)
8241 return true;
8243 offset = INTVAL (op);
8244 if ((offset & 3) != 0)
8245 return false;
8247 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
8248 if (extra < 0)
8249 extra = 0;
8251 if (GET_CODE (addr) == LO_SUM)
8252 /* For lo_sum addresses, we must allow any offset except one that
8253 causes a wrap, so test only the low 16 bits. */
8254 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8256 return offset + 0x8000 < 0x10000u - extra;
8259 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
8261 static bool
8262 reg_offset_addressing_ok_p (machine_mode mode)
8264 switch (mode)
8266 case E_V16QImode:
8267 case E_V8HImode:
8268 case E_V4SFmode:
8269 case E_V4SImode:
8270 case E_V2DFmode:
8271 case E_V2DImode:
8272 case E_V1TImode:
8273 case E_TImode:
8274 case E_TFmode:
8275 case E_KFmode:
8276 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
8277 ISA 3.0 vector d-form addressing mode was added. While TImode is not
8278 a vector mode, if we want to use the VSX registers to move it around,
8279 we need to restrict ourselves to reg+reg addressing. Similarly for
8280 IEEE 128-bit floating point that is passed in a single vector
8281 register. */
8282 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
8283 return mode_supports_vsx_dform_quad (mode);
8284 break;
8286 case E_V2SImode:
8287 case E_V2SFmode:
8288 /* Paired vector modes. Only reg+reg addressing is valid. */
8289 if (TARGET_PAIRED_FLOAT)
8290 return false;
8291 break;
8293 case E_SDmode:
8294 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
8295 addressing for the LFIWZX and STFIWX instructions. */
8296 if (TARGET_NO_SDMODE_STACK)
8297 return false;
8298 break;
8300 default:
8301 break;
8304 return true;
8307 static bool
8308 virtual_stack_registers_memory_p (rtx op)
8310 int regnum;
8312 if (GET_CODE (op) == REG)
8313 regnum = REGNO (op);
8315 else if (GET_CODE (op) == PLUS
8316 && GET_CODE (XEXP (op, 0)) == REG
8317 && GET_CODE (XEXP (op, 1)) == CONST_INT)
8318 regnum = REGNO (XEXP (op, 0));
8320 else
8321 return false;
8323 return (regnum >= FIRST_VIRTUAL_REGISTER
8324 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
8327 /* Return true if a MODE sized memory accesses to OP plus OFFSET
8328 is known to not straddle a 32k boundary. This function is used
8329 to determine whether -mcmodel=medium code can use TOC pointer
8330 relative addressing for OP. This means the alignment of the TOC
8331 pointer must also be taken into account, and unfortunately that is
8332 only 8 bytes. */
8334 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
8335 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
8336 #endif
8338 static bool
8339 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
8340 machine_mode mode)
8342 tree decl;
8343 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
8345 if (GET_CODE (op) != SYMBOL_REF)
8346 return false;
8348 /* ISA 3.0 vector d-form addressing is restricted, don't allow
8349 SYMBOL_REF. */
8350 if (mode_supports_vsx_dform_quad (mode))
8351 return false;
8353 dsize = GET_MODE_SIZE (mode);
8354 decl = SYMBOL_REF_DECL (op);
8355 if (!decl)
8357 if (dsize == 0)
8358 return false;
8360 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
8361 replacing memory addresses with an anchor plus offset. We
8362 could find the decl by rummaging around in the block->objects
8363 VEC for the given offset but that seems like too much work. */
8364 dalign = BITS_PER_UNIT;
8365 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
8366 && SYMBOL_REF_ANCHOR_P (op)
8367 && SYMBOL_REF_BLOCK (op) != NULL)
8369 struct object_block *block = SYMBOL_REF_BLOCK (op);
8371 dalign = block->alignment;
8372 offset += SYMBOL_REF_BLOCK_OFFSET (op);
8374 else if (CONSTANT_POOL_ADDRESS_P (op))
8376 /* It would be nice to have get_pool_align().. */
8377 machine_mode cmode = get_pool_mode (op);
8379 dalign = GET_MODE_ALIGNMENT (cmode);
8382 else if (DECL_P (decl))
8384 dalign = DECL_ALIGN (decl);
8386 if (dsize == 0)
8388 /* Allow BLKmode when the entire object is known to not
8389 cross a 32k boundary. */
8390 if (!DECL_SIZE_UNIT (decl))
8391 return false;
8393 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
8394 return false;
8396 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
8397 if (dsize > 32768)
8398 return false;
8400 dalign /= BITS_PER_UNIT;
8401 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8402 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8403 return dalign >= dsize;
8406 else
8407 gcc_unreachable ();
8409 /* Find how many bits of the alignment we know for this access. */
8410 dalign /= BITS_PER_UNIT;
8411 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8412 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8413 mask = dalign - 1;
8414 lsb = offset & -offset;
8415 mask &= lsb - 1;
8416 dalign = mask + 1;
8418 return dalign >= dsize;
8421 static bool
8422 constant_pool_expr_p (rtx op)
8424 rtx base, offset;
8426 split_const (op, &base, &offset);
8427 return (GET_CODE (base) == SYMBOL_REF
8428 && CONSTANT_POOL_ADDRESS_P (base)
8429 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
8432 /* These are only used to pass through from print_operand/print_operand_address
8433 to rs6000_output_addr_const_extra over the intervening function
8434 output_addr_const which is not target code. */
8435 static const_rtx tocrel_base_oac, tocrel_offset_oac;
8437 /* Return true if OP is a toc pointer relative address (the output
8438 of create_TOC_reference). If STRICT, do not match non-split
8439 -mcmodel=large/medium toc pointer relative addresses. If the pointers
8440 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
8441 TOCREL_OFFSET_RET respectively. */
8443 bool
8444 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
8445 const_rtx *tocrel_offset_ret)
8447 if (!TARGET_TOC)
8448 return false;
8450 if (TARGET_CMODEL != CMODEL_SMALL)
8452 /* When strict ensure we have everything tidy. */
8453 if (strict
8454 && !(GET_CODE (op) == LO_SUM
8455 && REG_P (XEXP (op, 0))
8456 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
8457 return false;
8459 /* When not strict, allow non-split TOC addresses and also allow
8460 (lo_sum (high ..)) TOC addresses created during reload. */
8461 if (GET_CODE (op) == LO_SUM)
8462 op = XEXP (op, 1);
8465 const_rtx tocrel_base = op;
8466 const_rtx tocrel_offset = const0_rtx;
8468 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
8470 tocrel_base = XEXP (op, 0);
8471 tocrel_offset = XEXP (op, 1);
8474 if (tocrel_base_ret)
8475 *tocrel_base_ret = tocrel_base;
8476 if (tocrel_offset_ret)
8477 *tocrel_offset_ret = tocrel_offset;
8479 return (GET_CODE (tocrel_base) == UNSPEC
8480 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
8483 /* Return true if X is a constant pool address, and also for cmodel=medium
8484 if X is a toc-relative address known to be offsettable within MODE. */
8486 bool
8487 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
8488 bool strict)
8490 const_rtx tocrel_base, tocrel_offset;
8491 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
8492 && (TARGET_CMODEL != CMODEL_MEDIUM
8493 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
8494 || mode == QImode
8495 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
8496 INTVAL (tocrel_offset), mode)));
8499 static bool
8500 legitimate_small_data_p (machine_mode mode, rtx x)
8502 return (DEFAULT_ABI == ABI_V4
8503 && !flag_pic && !TARGET_TOC
8504 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
8505 && small_data_operand (x, mode));
8508 bool
8509 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
8510 bool strict, bool worst_case)
8512 unsigned HOST_WIDE_INT offset;
8513 unsigned int extra;
8515 if (GET_CODE (x) != PLUS)
8516 return false;
8517 if (!REG_P (XEXP (x, 0)))
8518 return false;
8519 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8520 return false;
8521 if (mode_supports_vsx_dform_quad (mode))
8522 return quad_address_p (x, mode, strict);
8523 if (!reg_offset_addressing_ok_p (mode))
8524 return virtual_stack_registers_memory_p (x);
8525 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
8526 return true;
8527 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
8528 return false;
8530 offset = INTVAL (XEXP (x, 1));
8531 extra = 0;
8532 switch (mode)
8534 case E_V2SImode:
8535 case E_V2SFmode:
8536 /* Paired single modes: offset addressing isn't valid. */
8537 return false;
8539 case E_DFmode:
8540 case E_DDmode:
8541 case E_DImode:
8542 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8543 addressing. */
8544 if (VECTOR_MEM_VSX_P (mode))
8545 return false;
8547 if (!worst_case)
8548 break;
8549 if (!TARGET_POWERPC64)
8550 extra = 4;
8551 else if (offset & 3)
8552 return false;
8553 break;
8555 case E_TFmode:
8556 case E_IFmode:
8557 case E_KFmode:
8558 case E_TDmode:
8559 case E_TImode:
8560 case E_PTImode:
8561 extra = 8;
8562 if (!worst_case)
8563 break;
8564 if (!TARGET_POWERPC64)
8565 extra = 12;
8566 else if (offset & 3)
8567 return false;
8568 break;
8570 default:
8571 break;
8574 offset += 0x8000;
8575 return offset < 0x10000 - extra;
8578 bool
8579 legitimate_indexed_address_p (rtx x, int strict)
8581 rtx op0, op1;
8583 if (GET_CODE (x) != PLUS)
8584 return false;
8586 op0 = XEXP (x, 0);
8587 op1 = XEXP (x, 1);
8589 return (REG_P (op0) && REG_P (op1)
8590 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8591 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8592 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8593 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8596 bool
8597 avoiding_indexed_address_p (machine_mode mode)
8599 /* Avoid indexed addressing for modes that have non-indexed
8600 load/store instruction forms. */
8601 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8604 bool
8605 legitimate_indirect_address_p (rtx x, int strict)
8607 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
8610 bool
8611 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
8613 if (!TARGET_MACHO || !flag_pic
8614 || mode != SImode || GET_CODE (x) != MEM)
8615 return false;
8616 x = XEXP (x, 0);
8618 if (GET_CODE (x) != LO_SUM)
8619 return false;
8620 if (GET_CODE (XEXP (x, 0)) != REG)
8621 return false;
8622 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
8623 return false;
8624 x = XEXP (x, 1);
8626 return CONSTANT_P (x);
8629 static bool
8630 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
8632 if (GET_CODE (x) != LO_SUM)
8633 return false;
8634 if (GET_CODE (XEXP (x, 0)) != REG)
8635 return false;
8636 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8637 return false;
8638 /* quad word addresses are restricted, and we can't use LO_SUM. */
8639 if (mode_supports_vsx_dform_quad (mode))
8640 return false;
8641 x = XEXP (x, 1);
8643 if (TARGET_ELF || TARGET_MACHO)
8645 bool large_toc_ok;
8647 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8648 return false;
8649 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8650 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8651 recognizes some LO_SUM addresses as valid although this
8652 function says opposite. In most cases, LRA through different
8653 transformations can generate correct code for address reloads.
8654 It can not manage only some LO_SUM cases. So we need to add
8655 code analogous to one in rs6000_legitimize_reload_address for
8656 LOW_SUM here saying that some addresses are still valid. */
8657 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8658 && small_toc_ref (x, VOIDmode));
8659 if (TARGET_TOC && ! large_toc_ok)
8660 return false;
8661 if (GET_MODE_NUNITS (mode) != 1)
8662 return false;
8663 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8664 && !(/* ??? Assume floating point reg based on mode? */
8665 TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
8666 && (mode == DFmode || mode == DDmode)))
8667 return false;
8669 return CONSTANT_P (x) || large_toc_ok;
8672 return false;
8676 /* Try machine-dependent ways of modifying an illegitimate address
8677 to be legitimate. If we find one, return the new, valid address.
8678 This is used from only one place: `memory_address' in explow.c.
8680 OLDX is the address as it was before break_out_memory_refs was
8681 called. In some cases it is useful to look at this to decide what
8682 needs to be done.
8684 It is always safe for this function to do nothing. It exists to
8685 recognize opportunities to optimize the output.
8687 On RS/6000, first check for the sum of a register with a constant
8688 integer that is out of range. If so, generate code to add the
8689 constant with the low-order 16 bits masked to the register and force
8690 this result into another register (this can be done with `cau').
8691 Then generate an address of REG+(CONST&0xffff), allowing for the
8692 possibility of bit 16 being a one.
8694 Then check for the sum of a register and something not constant, try to
8695 load the other things into a register and return the sum. */
8697 static rtx
8698 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8699 machine_mode mode)
8701 unsigned int extra;
8703 if (!reg_offset_addressing_ok_p (mode)
8704 || mode_supports_vsx_dform_quad (mode))
8706 if (virtual_stack_registers_memory_p (x))
8707 return x;
8709 /* In theory we should not be seeing addresses of the form reg+0,
8710 but just in case it is generated, optimize it away. */
8711 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8712 return force_reg (Pmode, XEXP (x, 0));
8714 /* For TImode with load/store quad, restrict addresses to just a single
8715 pointer, so it works with both GPRs and VSX registers. */
8716 /* Make sure both operands are registers. */
8717 else if (GET_CODE (x) == PLUS
8718 && (mode != TImode || !TARGET_VSX))
8719 return gen_rtx_PLUS (Pmode,
8720 force_reg (Pmode, XEXP (x, 0)),
8721 force_reg (Pmode, XEXP (x, 1)));
8722 else
8723 return force_reg (Pmode, x);
8725 if (GET_CODE (x) == SYMBOL_REF)
8727 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8728 if (model != 0)
8729 return rs6000_legitimize_tls_address (x, model);
8732 extra = 0;
8733 switch (mode)
8735 case E_TFmode:
8736 case E_TDmode:
8737 case E_TImode:
8738 case E_PTImode:
8739 case E_IFmode:
8740 case E_KFmode:
8741 /* As in legitimate_offset_address_p we do not assume
8742 worst-case. The mode here is just a hint as to the registers
8743 used. A TImode is usually in gprs, but may actually be in
8744 fprs. Leave worst-case scenario for reload to handle via
8745 insn constraints. PTImode is only GPRs. */
8746 extra = 8;
8747 break;
8748 default:
8749 break;
8752 if (GET_CODE (x) == PLUS
8753 && GET_CODE (XEXP (x, 0)) == REG
8754 && GET_CODE (XEXP (x, 1)) == CONST_INT
8755 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8756 >= 0x10000 - extra)
8757 && !PAIRED_VECTOR_MODE (mode))
8759 HOST_WIDE_INT high_int, low_int;
8760 rtx sum;
8761 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8762 if (low_int >= 0x8000 - extra)
8763 low_int = 0;
8764 high_int = INTVAL (XEXP (x, 1)) - low_int;
8765 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8766 GEN_INT (high_int)), 0);
8767 return plus_constant (Pmode, sum, low_int);
8769 else if (GET_CODE (x) == PLUS
8770 && GET_CODE (XEXP (x, 0)) == REG
8771 && GET_CODE (XEXP (x, 1)) != CONST_INT
8772 && GET_MODE_NUNITS (mode) == 1
8773 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8774 || (/* ??? Assume floating point reg based on mode? */
8775 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8776 && (mode == DFmode || mode == DDmode)))
8777 && !avoiding_indexed_address_p (mode))
8779 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8780 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8782 else if (PAIRED_VECTOR_MODE (mode))
8784 if (mode == DImode)
8785 return x;
8786 /* We accept [reg + reg]. */
8788 if (GET_CODE (x) == PLUS)
8790 rtx op1 = XEXP (x, 0);
8791 rtx op2 = XEXP (x, 1);
8792 rtx y;
8794 op1 = force_reg (Pmode, op1);
8795 op2 = force_reg (Pmode, op2);
8797 /* We can't always do [reg + reg] for these, because [reg +
8798 reg + offset] is not a legitimate addressing mode. */
8799 y = gen_rtx_PLUS (Pmode, op1, op2);
8801 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
8802 return force_reg (Pmode, y);
8803 else
8804 return y;
8807 return force_reg (Pmode, x);
8809 else if ((TARGET_ELF
8810 #if TARGET_MACHO
8811 || !MACHO_DYNAMIC_NO_PIC_P
8812 #endif
8814 && TARGET_32BIT
8815 && TARGET_NO_TOC
8816 && ! flag_pic
8817 && GET_CODE (x) != CONST_INT
8818 && GET_CODE (x) != CONST_WIDE_INT
8819 && GET_CODE (x) != CONST_DOUBLE
8820 && CONSTANT_P (x)
8821 && GET_MODE_NUNITS (mode) == 1
8822 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8823 || (/* ??? Assume floating point reg based on mode? */
8824 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8825 && (mode == DFmode || mode == DDmode))))
8827 rtx reg = gen_reg_rtx (Pmode);
8828 if (TARGET_ELF)
8829 emit_insn (gen_elf_high (reg, x));
8830 else
8831 emit_insn (gen_macho_high (reg, x));
8832 return gen_rtx_LO_SUM (Pmode, reg, x);
8834 else if (TARGET_TOC
8835 && GET_CODE (x) == SYMBOL_REF
8836 && constant_pool_expr_p (x)
8837 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8838 return create_TOC_reference (x, NULL_RTX);
8839 else
8840 return x;
8843 /* Debug version of rs6000_legitimize_address. */
8844 static rtx
8845 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8847 rtx ret;
8848 rtx_insn *insns;
8850 start_sequence ();
8851 ret = rs6000_legitimize_address (x, oldx, mode);
8852 insns = get_insns ();
8853 end_sequence ();
8855 if (ret != x)
8857 fprintf (stderr,
8858 "\nrs6000_legitimize_address: mode %s, old code %s, "
8859 "new code %s, modified\n",
8860 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8861 GET_RTX_NAME (GET_CODE (ret)));
8863 fprintf (stderr, "Original address:\n");
8864 debug_rtx (x);
8866 fprintf (stderr, "oldx:\n");
8867 debug_rtx (oldx);
8869 fprintf (stderr, "New address:\n");
8870 debug_rtx (ret);
8872 if (insns)
8874 fprintf (stderr, "Insns added:\n");
8875 debug_rtx_list (insns, 20);
8878 else
8880 fprintf (stderr,
8881 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8882 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8884 debug_rtx (x);
8887 if (insns)
8888 emit_insn (insns);
8890 return ret;
8893 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8894 We need to emit DTP-relative relocations. */
8896 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8897 static void
8898 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8900 switch (size)
8902 case 4:
8903 fputs ("\t.long\t", file);
8904 break;
8905 case 8:
8906 fputs (DOUBLE_INT_ASM_OP, file);
8907 break;
8908 default:
8909 gcc_unreachable ();
8911 output_addr_const (file, x);
8912 if (TARGET_ELF)
8913 fputs ("@dtprel+0x8000", file);
8914 else if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF)
8916 switch (SYMBOL_REF_TLS_MODEL (x))
8918 case 0:
8919 break;
8920 case TLS_MODEL_LOCAL_EXEC:
8921 fputs ("@le", file);
8922 break;
8923 case TLS_MODEL_INITIAL_EXEC:
8924 fputs ("@ie", file);
8925 break;
8926 case TLS_MODEL_GLOBAL_DYNAMIC:
8927 case TLS_MODEL_LOCAL_DYNAMIC:
8928 fputs ("@m", file);
8929 break;
8930 default:
8931 gcc_unreachable ();
8936 /* Return true if X is a symbol that refers to real (rather than emulated)
8937 TLS. */
8939 static bool
8940 rs6000_real_tls_symbol_ref_p (rtx x)
8942 return (GET_CODE (x) == SYMBOL_REF
8943 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8946 /* In the name of slightly smaller debug output, and to cater to
8947 general assembler lossage, recognize various UNSPEC sequences
8948 and turn them back into a direct symbol reference. */
8950 static rtx
8951 rs6000_delegitimize_address (rtx orig_x)
8953 rtx x, y, offset;
8955 orig_x = delegitimize_mem_from_attrs (orig_x);
8956 x = orig_x;
8957 if (MEM_P (x))
8958 x = XEXP (x, 0);
8960 y = x;
8961 if (TARGET_CMODEL != CMODEL_SMALL
8962 && GET_CODE (y) == LO_SUM)
8963 y = XEXP (y, 1);
8965 offset = NULL_RTX;
8966 if (GET_CODE (y) == PLUS
8967 && GET_MODE (y) == Pmode
8968 && CONST_INT_P (XEXP (y, 1)))
8970 offset = XEXP (y, 1);
8971 y = XEXP (y, 0);
8974 if (GET_CODE (y) == UNSPEC
8975 && XINT (y, 1) == UNSPEC_TOCREL)
8977 y = XVECEXP (y, 0, 0);
8979 #ifdef HAVE_AS_TLS
8980 /* Do not associate thread-local symbols with the original
8981 constant pool symbol. */
8982 if (TARGET_XCOFF
8983 && GET_CODE (y) == SYMBOL_REF
8984 && CONSTANT_POOL_ADDRESS_P (y)
8985 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8986 return orig_x;
8987 #endif
8989 if (offset != NULL_RTX)
8990 y = gen_rtx_PLUS (Pmode, y, offset);
8991 if (!MEM_P (orig_x))
8992 return y;
8993 else
8994 return replace_equiv_address_nv (orig_x, y);
8997 if (TARGET_MACHO
8998 && GET_CODE (orig_x) == LO_SUM
8999 && GET_CODE (XEXP (orig_x, 1)) == CONST)
9001 y = XEXP (XEXP (orig_x, 1), 0);
9002 if (GET_CODE (y) == UNSPEC
9003 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
9004 return XVECEXP (y, 0, 0);
9007 return orig_x;
9010 /* Return true if X shouldn't be emitted into the debug info.
9011 The linker doesn't like .toc section references from
9012 .debug_* sections, so reject .toc section symbols. */
9014 static bool
9015 rs6000_const_not_ok_for_debug_p (rtx x)
9017 if (GET_CODE (x) == SYMBOL_REF
9018 && CONSTANT_POOL_ADDRESS_P (x))
9020 rtx c = get_pool_constant (x);
9021 machine_mode cmode = get_pool_mode (x);
9022 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
9023 return true;
9026 return false;
9030 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
9032 static bool
9033 rs6000_legitimate_combined_insn (rtx_insn *insn)
9035 int icode = INSN_CODE (insn);
9037 /* Reject creating doloop insns. Combine should not be allowed
9038 to create these for a number of reasons:
9039 1) In a nested loop, if combine creates one of these in an
9040 outer loop and the register allocator happens to allocate ctr
9041 to the outer loop insn, then the inner loop can't use ctr.
9042 Inner loops ought to be more highly optimized.
9043 2) Combine often wants to create one of these from what was
9044 originally a three insn sequence, first combining the three
9045 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
9046 allocated ctr, the splitter takes use back to the three insn
9047 sequence. It's better to stop combine at the two insn
9048 sequence.
9049 3) Faced with not being able to allocate ctr for ctrsi/crtdi
9050 insns, the register allocator sometimes uses floating point
9051 or vector registers for the pseudo. Since ctrsi/ctrdi is a
9052 jump insn and output reloads are not implemented for jumps,
9053 the ctrsi/ctrdi splitters need to handle all possible cases.
9054 That's a pain, and it gets to be seriously difficult when a
9055 splitter that runs after reload needs memory to transfer from
9056 a gpr to fpr. See PR70098 and PR71763 which are not fixed
9057 for the difficult case. It's better to not create problems
9058 in the first place. */
9059 if (icode != CODE_FOR_nothing
9060 && (icode == CODE_FOR_ctrsi_internal1
9061 || icode == CODE_FOR_ctrdi_internal1
9062 || icode == CODE_FOR_ctrsi_internal2
9063 || icode == CODE_FOR_ctrdi_internal2
9064 || icode == CODE_FOR_ctrsi_internal3
9065 || icode == CODE_FOR_ctrdi_internal3
9066 || icode == CODE_FOR_ctrsi_internal4
9067 || icode == CODE_FOR_ctrdi_internal4))
9068 return false;
9070 return true;
9073 /* Construct the SYMBOL_REF for the tls_get_addr function. */
9075 static GTY(()) rtx rs6000_tls_symbol;
9076 static rtx
9077 rs6000_tls_get_addr (void)
9079 if (!rs6000_tls_symbol)
9080 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
9082 return rs6000_tls_symbol;
9085 /* Construct the SYMBOL_REF for TLS GOT references. */
9087 static GTY(()) rtx rs6000_got_symbol;
9088 static rtx
9089 rs6000_got_sym (void)
9091 if (!rs6000_got_symbol)
9093 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9094 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
9095 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
9098 return rs6000_got_symbol;
9101 /* AIX Thread-Local Address support. */
9103 static rtx
9104 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
9106 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
9107 const char *name;
9108 char *tlsname;
9110 name = XSTR (addr, 0);
9111 /* Append TLS CSECT qualifier, unless the symbol already is qualified
9112 or the symbol will be in TLS private data section. */
9113 if (name[strlen (name) - 1] != ']'
9114 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
9115 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
9117 tlsname = XALLOCAVEC (char, strlen (name) + 4);
9118 strcpy (tlsname, name);
9119 strcat (tlsname,
9120 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
9121 tlsaddr = copy_rtx (addr);
9122 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
9124 else
9125 tlsaddr = addr;
9127 /* Place addr into TOC constant pool. */
9128 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
9130 /* Output the TOC entry and create the MEM referencing the value. */
9131 if (constant_pool_expr_p (XEXP (sym, 0))
9132 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
9134 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
9135 mem = gen_const_mem (Pmode, tocref);
9136 set_mem_alias_set (mem, get_TOC_alias_set ());
9138 else
9139 return sym;
9141 /* Use global-dynamic for local-dynamic. */
9142 if (model == TLS_MODEL_GLOBAL_DYNAMIC
9143 || model == TLS_MODEL_LOCAL_DYNAMIC)
9145 /* Create new TOC reference for @m symbol. */
9146 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
9147 tlsname = XALLOCAVEC (char, strlen (name) + 1);
9148 strcpy (tlsname, "*LCM");
9149 strcat (tlsname, name + 3);
9150 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
9151 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
9152 tocref = create_TOC_reference (modaddr, NULL_RTX);
9153 rtx modmem = gen_const_mem (Pmode, tocref);
9154 set_mem_alias_set (modmem, get_TOC_alias_set ());
9156 rtx modreg = gen_reg_rtx (Pmode);
9157 emit_insn (gen_rtx_SET (modreg, modmem));
9159 tmpreg = gen_reg_rtx (Pmode);
9160 emit_insn (gen_rtx_SET (tmpreg, mem));
9162 dest = gen_reg_rtx (Pmode);
9163 if (TARGET_32BIT)
9164 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
9165 else
9166 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
9167 return dest;
9169 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
9170 else if (TARGET_32BIT)
9172 tlsreg = gen_reg_rtx (SImode);
9173 emit_insn (gen_tls_get_tpointer (tlsreg));
9175 else
9176 tlsreg = gen_rtx_REG (DImode, 13);
9178 /* Load the TOC value into temporary register. */
9179 tmpreg = gen_reg_rtx (Pmode);
9180 emit_insn (gen_rtx_SET (tmpreg, mem));
9181 set_unique_reg_note (get_last_insn (), REG_EQUAL,
9182 gen_rtx_MINUS (Pmode, addr, tlsreg));
9184 /* Add TOC symbol value to TLS pointer. */
9185 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
9187 return dest;
9190 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
9191 this (thread-local) address. */
9193 static rtx
9194 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
9196 rtx dest, insn;
9198 if (TARGET_XCOFF)
9199 return rs6000_legitimize_tls_address_aix (addr, model);
9201 dest = gen_reg_rtx (Pmode);
9202 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
9204 rtx tlsreg;
9206 if (TARGET_64BIT)
9208 tlsreg = gen_rtx_REG (Pmode, 13);
9209 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
9211 else
9213 tlsreg = gen_rtx_REG (Pmode, 2);
9214 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
9216 emit_insn (insn);
9218 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
9220 rtx tlsreg, tmp;
9222 tmp = gen_reg_rtx (Pmode);
9223 if (TARGET_64BIT)
9225 tlsreg = gen_rtx_REG (Pmode, 13);
9226 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
9228 else
9230 tlsreg = gen_rtx_REG (Pmode, 2);
9231 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
9233 emit_insn (insn);
9234 if (TARGET_64BIT)
9235 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
9236 else
9237 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
9238 emit_insn (insn);
9240 else
9242 rtx r3, got, tga, tmp1, tmp2, call_insn;
9244 /* We currently use relocations like @got@tlsgd for tls, which
9245 means the linker will handle allocation of tls entries, placing
9246 them in the .got section. So use a pointer to the .got section,
9247 not one to secondary TOC sections used by 64-bit -mminimal-toc,
9248 or to secondary GOT sections used by 32-bit -fPIC. */
9249 if (TARGET_64BIT)
9250 got = gen_rtx_REG (Pmode, 2);
9251 else
9253 if (flag_pic == 1)
9254 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
9255 else
9257 rtx gsym = rs6000_got_sym ();
9258 got = gen_reg_rtx (Pmode);
9259 if (flag_pic == 0)
9260 rs6000_emit_move (got, gsym, Pmode);
9261 else
9263 rtx mem, lab;
9265 tmp1 = gen_reg_rtx (Pmode);
9266 tmp2 = gen_reg_rtx (Pmode);
9267 mem = gen_const_mem (Pmode, tmp1);
9268 lab = gen_label_rtx ();
9269 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
9270 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
9271 if (TARGET_LINK_STACK)
9272 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
9273 emit_move_insn (tmp2, mem);
9274 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
9275 set_unique_reg_note (last, REG_EQUAL, gsym);
9280 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
9282 tga = rs6000_tls_get_addr ();
9283 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
9284 const0_rtx, Pmode);
9286 r3 = gen_rtx_REG (Pmode, 3);
9287 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9289 if (TARGET_64BIT)
9290 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
9291 else
9292 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
9294 else if (DEFAULT_ABI == ABI_V4)
9295 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
9296 else
9297 gcc_unreachable ();
9298 call_insn = last_call_insn ();
9299 PATTERN (call_insn) = insn;
9300 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9301 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9302 pic_offset_table_rtx);
9304 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
9306 tga = rs6000_tls_get_addr ();
9307 tmp1 = gen_reg_rtx (Pmode);
9308 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
9309 const0_rtx, Pmode);
9311 r3 = gen_rtx_REG (Pmode, 3);
9312 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9314 if (TARGET_64BIT)
9315 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
9316 else
9317 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
9319 else if (DEFAULT_ABI == ABI_V4)
9320 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
9321 else
9322 gcc_unreachable ();
9323 call_insn = last_call_insn ();
9324 PATTERN (call_insn) = insn;
9325 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9326 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9327 pic_offset_table_rtx);
9329 if (rs6000_tls_size == 16)
9331 if (TARGET_64BIT)
9332 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
9333 else
9334 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
9336 else if (rs6000_tls_size == 32)
9338 tmp2 = gen_reg_rtx (Pmode);
9339 if (TARGET_64BIT)
9340 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
9341 else
9342 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
9343 emit_insn (insn);
9344 if (TARGET_64BIT)
9345 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
9346 else
9347 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
9349 else
9351 tmp2 = gen_reg_rtx (Pmode);
9352 if (TARGET_64BIT)
9353 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
9354 else
9355 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
9356 emit_insn (insn);
9357 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
9359 emit_insn (insn);
9361 else
9363 /* IE, or 64-bit offset LE. */
9364 tmp2 = gen_reg_rtx (Pmode);
9365 if (TARGET_64BIT)
9366 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
9367 else
9368 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
9369 emit_insn (insn);
9370 if (TARGET_64BIT)
9371 insn = gen_tls_tls_64 (dest, tmp2, addr);
9372 else
9373 insn = gen_tls_tls_32 (dest, tmp2, addr);
9374 emit_insn (insn);
9378 return dest;
9381 /* Only create the global variable for the stack protect guard if we are using
9382 the global flavor of that guard. */
9383 static tree
9384 rs6000_init_stack_protect_guard (void)
9386 if (rs6000_stack_protector_guard == SSP_GLOBAL)
9387 return default_stack_protect_guard ();
9389 return NULL_TREE;
9392 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
9394 static bool
9395 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
9397 if (GET_CODE (x) == HIGH
9398 && GET_CODE (XEXP (x, 0)) == UNSPEC)
9399 return true;
9401 /* A TLS symbol in the TOC cannot contain a sum. */
9402 if (GET_CODE (x) == CONST
9403 && GET_CODE (XEXP (x, 0)) == PLUS
9404 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
9405 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
9406 return true;
9408 /* Do not place an ELF TLS symbol in the constant pool. */
9409 return TARGET_ELF && tls_referenced_p (x);
9412 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
9413 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
9414 can be addressed relative to the toc pointer. */
9416 static bool
9417 use_toc_relative_ref (rtx sym, machine_mode mode)
9419 return ((constant_pool_expr_p (sym)
9420 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
9421 get_pool_mode (sym)))
9422 || (TARGET_CMODEL == CMODEL_MEDIUM
9423 && SYMBOL_REF_LOCAL_P (sym)
9424 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
9427 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
9428 replace the input X, or the original X if no replacement is called for.
9429 The output parameter *WIN is 1 if the calling macro should goto WIN,
9430 0 if it should not.
9432 For RS/6000, we wish to handle large displacements off a base
9433 register by splitting the addend across an addiu/addis and the mem insn.
9434 This cuts number of extra insns needed from 3 to 1.
9436 On Darwin, we use this to generate code for floating point constants.
9437 A movsf_low is generated so we wind up with 2 instructions rather than 3.
9438 The Darwin code is inside #if TARGET_MACHO because only then are the
9439 machopic_* functions defined. */
9440 static rtx
9441 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
9442 int opnum, int type,
9443 int ind_levels ATTRIBUTE_UNUSED, int *win)
9445 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9446 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
9448 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
9449 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
9450 if (reg_offset_p
9451 && opnum == 1
9452 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
9453 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
9454 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
9455 && TARGET_P9_VECTOR)
9456 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
9457 && TARGET_P9_VECTOR)))
9458 reg_offset_p = false;
9460 /* We must recognize output that we have already generated ourselves. */
9461 if (GET_CODE (x) == PLUS
9462 && GET_CODE (XEXP (x, 0)) == PLUS
9463 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9464 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9465 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9467 if (TARGET_DEBUG_ADDR)
9469 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
9470 debug_rtx (x);
9472 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9473 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9474 opnum, (enum reload_type) type);
9475 *win = 1;
9476 return x;
9479 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
9480 if (GET_CODE (x) == LO_SUM
9481 && GET_CODE (XEXP (x, 0)) == HIGH)
9483 if (TARGET_DEBUG_ADDR)
9485 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
9486 debug_rtx (x);
9488 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9489 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9490 opnum, (enum reload_type) type);
9491 *win = 1;
9492 return x;
9495 #if TARGET_MACHO
9496 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
9497 && GET_CODE (x) == LO_SUM
9498 && GET_CODE (XEXP (x, 0)) == PLUS
9499 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
9500 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
9501 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
9502 && machopic_operand_p (XEXP (x, 1)))
9504 /* Result of previous invocation of this function on Darwin
9505 floating point constant. */
9506 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9507 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9508 opnum, (enum reload_type) type);
9509 *win = 1;
9510 return x;
9512 #endif
9514 if (TARGET_CMODEL != CMODEL_SMALL
9515 && reg_offset_p
9516 && !quad_offset_p
9517 && small_toc_ref (x, VOIDmode))
9519 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
9520 x = gen_rtx_LO_SUM (Pmode, hi, x);
9521 if (TARGET_DEBUG_ADDR)
9523 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
9524 debug_rtx (x);
9526 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9527 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9528 opnum, (enum reload_type) type);
9529 *win = 1;
9530 return x;
9533 if (GET_CODE (x) == PLUS
9534 && REG_P (XEXP (x, 0))
9535 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
9536 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
9537 && CONST_INT_P (XEXP (x, 1))
9538 && reg_offset_p
9539 && !PAIRED_VECTOR_MODE (mode)
9540 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
9542 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
9543 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
9544 HOST_WIDE_INT high
9545 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
9547 /* Check for 32-bit overflow or quad addresses with one of the
9548 four least significant bits set. */
9549 if (high + low != val
9550 || (quad_offset_p && (low & 0xf)))
9552 *win = 0;
9553 return x;
9556 /* Reload the high part into a base reg; leave the low part
9557 in the mem directly. */
9559 x = gen_rtx_PLUS (GET_MODE (x),
9560 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
9561 GEN_INT (high)),
9562 GEN_INT (low));
9564 if (TARGET_DEBUG_ADDR)
9566 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
9567 debug_rtx (x);
9569 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9570 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9571 opnum, (enum reload_type) type);
9572 *win = 1;
9573 return x;
9576 if (GET_CODE (x) == SYMBOL_REF
9577 && reg_offset_p
9578 && !quad_offset_p
9579 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
9580 && !PAIRED_VECTOR_MODE (mode)
9581 #if TARGET_MACHO
9582 && DEFAULT_ABI == ABI_DARWIN
9583 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
9584 && machopic_symbol_defined_p (x)
9585 #else
9586 && DEFAULT_ABI == ABI_V4
9587 && !flag_pic
9588 #endif
9589 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
9590 The same goes for DImode without 64-bit gprs and DFmode and DDmode
9591 without fprs.
9592 ??? Assume floating point reg based on mode? This assumption is
9593 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
9594 where reload ends up doing a DFmode load of a constant from
9595 mem using two gprs. Unfortunately, at this point reload
9596 hasn't yet selected regs so poking around in reload data
9597 won't help and even if we could figure out the regs reliably,
9598 we'd still want to allow this transformation when the mem is
9599 naturally aligned. Since we say the address is good here, we
9600 can't disable offsets from LO_SUMs in mem_operand_gpr.
9601 FIXME: Allow offset from lo_sum for other modes too, when
9602 mem is sufficiently aligned.
9604 Also disallow this if the type can go in VMX/Altivec registers, since
9605 those registers do not have d-form (reg+offset) address modes. */
9606 && !reg_addr[mode].scalar_in_vmx_p
9607 && mode != TFmode
9608 && mode != TDmode
9609 && mode != IFmode
9610 && mode != KFmode
9611 && (mode != TImode || !TARGET_VSX)
9612 && mode != PTImode
9613 && (mode != DImode || TARGET_POWERPC64)
9614 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
9615 || (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)))
9617 #if TARGET_MACHO
9618 if (flag_pic)
9620 rtx offset = machopic_gen_offset (x);
9621 x = gen_rtx_LO_SUM (GET_MODE (x),
9622 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
9623 gen_rtx_HIGH (Pmode, offset)), offset);
9625 else
9626 #endif
9627 x = gen_rtx_LO_SUM (GET_MODE (x),
9628 gen_rtx_HIGH (Pmode, x), x);
9630 if (TARGET_DEBUG_ADDR)
9632 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
9633 debug_rtx (x);
9635 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9636 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9637 opnum, (enum reload_type) type);
9638 *win = 1;
9639 return x;
9642 /* Reload an offset address wrapped by an AND that represents the
9643 masking of the lower bits. Strip the outer AND and let reload
9644 convert the offset address into an indirect address. For VSX,
9645 force reload to create the address with an AND in a separate
9646 register, because we can't guarantee an altivec register will
9647 be used. */
9648 if (VECTOR_MEM_ALTIVEC_P (mode)
9649 && GET_CODE (x) == AND
9650 && GET_CODE (XEXP (x, 0)) == PLUS
9651 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9652 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9653 && GET_CODE (XEXP (x, 1)) == CONST_INT
9654 && INTVAL (XEXP (x, 1)) == -16)
9656 x = XEXP (x, 0);
9657 *win = 1;
9658 return x;
9661 if (TARGET_TOC
9662 && reg_offset_p
9663 && !quad_offset_p
9664 && GET_CODE (x) == SYMBOL_REF
9665 && use_toc_relative_ref (x, mode))
9667 x = create_TOC_reference (x, NULL_RTX);
9668 if (TARGET_CMODEL != CMODEL_SMALL)
9670 if (TARGET_DEBUG_ADDR)
9672 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
9673 debug_rtx (x);
9675 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9676 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9677 opnum, (enum reload_type) type);
9679 *win = 1;
9680 return x;
9682 *win = 0;
9683 return x;
9686 /* Debug version of rs6000_legitimize_reload_address. */
9687 static rtx
9688 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
9689 int opnum, int type,
9690 int ind_levels, int *win)
9692 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
9693 ind_levels, win);
9694 fprintf (stderr,
9695 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9696 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9697 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
9698 debug_rtx (x);
9700 if (x == ret)
9701 fprintf (stderr, "Same address returned\n");
9702 else if (!ret)
9703 fprintf (stderr, "NULL returned\n");
9704 else
9706 fprintf (stderr, "New address:\n");
9707 debug_rtx (ret);
9710 return ret;
9713 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9714 that is a valid memory address for an instruction.
9715 The MODE argument is the machine mode for the MEM expression
9716 that wants to use this address.
9718 On the RS/6000, there are four valid address: a SYMBOL_REF that
9719 refers to a constant pool entry of an address (or the sum of it
9720 plus a constant), a short (16-bit signed) constant plus a register,
9721 the sum of two registers, or a register indirect, possibly with an
9722 auto-increment. For DFmode, DDmode and DImode with a constant plus
9723 register, we must ensure that both words are addressable or PowerPC64
9724 with offset word aligned.
9726 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9727 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9728 because adjacent memory cells are accessed by adding word-sized offsets
9729 during assembly output. */
9730 static bool
9731 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
9733 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9734 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
9736 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9737 if (VECTOR_MEM_ALTIVEC_P (mode)
9738 && GET_CODE (x) == AND
9739 && GET_CODE (XEXP (x, 1)) == CONST_INT
9740 && INTVAL (XEXP (x, 1)) == -16)
9741 x = XEXP (x, 0);
9743 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
9744 return 0;
9745 if (legitimate_indirect_address_p (x, reg_ok_strict))
9746 return 1;
9747 if (TARGET_UPDATE
9748 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
9749 && mode_supports_pre_incdec_p (mode)
9750 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
9751 return 1;
9752 /* Handle restricted vector d-form offsets in ISA 3.0. */
9753 if (quad_offset_p)
9755 if (quad_address_p (x, mode, reg_ok_strict))
9756 return 1;
9758 else if (virtual_stack_registers_memory_p (x))
9759 return 1;
9761 else if (reg_offset_p)
9763 if (legitimate_small_data_p (mode, x))
9764 return 1;
9765 if (legitimate_constant_pool_address_p (x, mode,
9766 reg_ok_strict || lra_in_progress))
9767 return 1;
9768 if (reg_addr[mode].fused_toc && GET_CODE (x) == UNSPEC
9769 && XINT (x, 1) == UNSPEC_FUSION_ADDIS)
9770 return 1;
9773 /* For TImode, if we have TImode in VSX registers, only allow register
9774 indirect addresses. This will allow the values to go in either GPRs
9775 or VSX registers without reloading. The vector types would tend to
9776 go into VSX registers, so we allow REG+REG, while TImode seems
9777 somewhat split, in that some uses are GPR based, and some VSX based. */
9778 /* FIXME: We could loosen this by changing the following to
9779 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
9780 but currently we cannot allow REG+REG addressing for TImode. See
9781 PR72827 for complete details on how this ends up hoodwinking DSE. */
9782 if (mode == TImode && TARGET_VSX)
9783 return 0;
9784 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9785 if (! reg_ok_strict
9786 && reg_offset_p
9787 && GET_CODE (x) == PLUS
9788 && GET_CODE (XEXP (x, 0)) == REG
9789 && (XEXP (x, 0) == virtual_stack_vars_rtx
9790 || XEXP (x, 0) == arg_pointer_rtx)
9791 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9792 return 1;
9793 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
9794 return 1;
9795 if (!FLOAT128_2REG_P (mode)
9796 && ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
9797 || TARGET_POWERPC64
9798 || (mode != DFmode && mode != DDmode))
9799 && (TARGET_POWERPC64 || mode != DImode)
9800 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
9801 && mode != PTImode
9802 && !avoiding_indexed_address_p (mode)
9803 && legitimate_indexed_address_p (x, reg_ok_strict))
9804 return 1;
9805 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
9806 && mode_supports_pre_modify_p (mode)
9807 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
9808 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
9809 reg_ok_strict, false)
9810 || (!avoiding_indexed_address_p (mode)
9811 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
9812 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
9813 return 1;
9814 if (reg_offset_p && !quad_offset_p
9815 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
9816 return 1;
9817 return 0;
9820 /* Debug version of rs6000_legitimate_address_p. */
9821 static bool
9822 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
9823 bool reg_ok_strict)
9825 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
9826 fprintf (stderr,
9827 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9828 "strict = %d, reload = %s, code = %s\n",
9829 ret ? "true" : "false",
9830 GET_MODE_NAME (mode),
9831 reg_ok_strict,
9832 (reload_completed ? "after" : "before"),
9833 GET_RTX_NAME (GET_CODE (x)));
9834 debug_rtx (x);
9836 return ret;
9839 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9841 static bool
9842 rs6000_mode_dependent_address_p (const_rtx addr,
9843 addr_space_t as ATTRIBUTE_UNUSED)
9845 return rs6000_mode_dependent_address_ptr (addr);
9848 /* Go to LABEL if ADDR (a legitimate address expression)
9849 has an effect that depends on the machine mode it is used for.
9851 On the RS/6000 this is true of all integral offsets (since AltiVec
9852 and VSX modes don't allow them) or is a pre-increment or decrement.
9854 ??? Except that due to conceptual problems in offsettable_address_p
9855 we can't really report the problems of integral offsets. So leave
9856 this assuming that the adjustable offset must be valid for the
9857 sub-words of a TFmode operand, which is what we had before. */
9859 static bool
9860 rs6000_mode_dependent_address (const_rtx addr)
9862 switch (GET_CODE (addr))
9864 case PLUS:
9865 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9866 is considered a legitimate address before reload, so there
9867 are no offset restrictions in that case. Note that this
9868 condition is safe in strict mode because any address involving
9869 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9870 been rejected as illegitimate. */
9871 if (XEXP (addr, 0) != virtual_stack_vars_rtx
9872 && XEXP (addr, 0) != arg_pointer_rtx
9873 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
9875 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
9876 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
9878 break;
9880 case LO_SUM:
9881 /* Anything in the constant pool is sufficiently aligned that
9882 all bytes have the same high part address. */
9883 return !legitimate_constant_pool_address_p (addr, QImode, false);
9885 /* Auto-increment cases are now treated generically in recog.c. */
9886 case PRE_MODIFY:
9887 return TARGET_UPDATE;
9889 /* AND is only allowed in Altivec loads. */
9890 case AND:
9891 return true;
9893 default:
9894 break;
9897 return false;
9900 /* Debug version of rs6000_mode_dependent_address. */
9901 static bool
9902 rs6000_debug_mode_dependent_address (const_rtx addr)
9904 bool ret = rs6000_mode_dependent_address (addr);
9906 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9907 ret ? "true" : "false");
9908 debug_rtx (addr);
9910 return ret;
9913 /* Implement FIND_BASE_TERM. */
9916 rs6000_find_base_term (rtx op)
9918 rtx base;
9920 base = op;
9921 if (GET_CODE (base) == CONST)
9922 base = XEXP (base, 0);
9923 if (GET_CODE (base) == PLUS)
9924 base = XEXP (base, 0);
9925 if (GET_CODE (base) == UNSPEC)
9926 switch (XINT (base, 1))
9928 case UNSPEC_TOCREL:
9929 case UNSPEC_MACHOPIC_OFFSET:
9930 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9931 for aliasing purposes. */
9932 return XVECEXP (base, 0, 0);
9935 return op;
9938 /* More elaborate version of recog's offsettable_memref_p predicate
9939 that works around the ??? note of rs6000_mode_dependent_address.
9940 In particular it accepts
9942 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9944 in 32-bit mode, that the recog predicate rejects. */
9946 static bool
9947 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode)
9949 bool worst_case;
9951 if (!MEM_P (op))
9952 return false;
9954 /* First mimic offsettable_memref_p. */
9955 if (offsettable_address_p (true, GET_MODE (op), XEXP (op, 0)))
9956 return true;
9958 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9959 the latter predicate knows nothing about the mode of the memory
9960 reference and, therefore, assumes that it is the largest supported
9961 mode (TFmode). As a consequence, legitimate offsettable memory
9962 references are rejected. rs6000_legitimate_offset_address_p contains
9963 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9964 at least with a little bit of help here given that we know the
9965 actual registers used. */
9966 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9967 || GET_MODE_SIZE (reg_mode) == 4);
9968 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9969 true, worst_case);
9972 /* Determine the reassociation width to be used in reassociate_bb.
9973 This takes into account how many parallel operations we
9974 can actually do of a given type, and also the latency.
9976 int add/sub 6/cycle
9977 mul 2/cycle
9978 vect add/sub/mul 2/cycle
9979 fp add/sub/mul 2/cycle
9980 dfp 1/cycle
9983 static int
9984 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9985 machine_mode mode)
9987 switch (rs6000_cpu)
9989 case PROCESSOR_POWER8:
9990 case PROCESSOR_POWER9:
9991 if (DECIMAL_FLOAT_MODE_P (mode))
9992 return 1;
9993 if (VECTOR_MODE_P (mode))
9994 return 4;
9995 if (INTEGRAL_MODE_P (mode))
9996 return opc == MULT_EXPR ? 4 : 6;
9997 if (FLOAT_MODE_P (mode))
9998 return 4;
9999 break;
10000 default:
10001 break;
10003 return 1;
10006 /* Change register usage conditional on target flags. */
10007 static void
10008 rs6000_conditional_register_usage (void)
10010 int i;
10012 if (TARGET_DEBUG_TARGET)
10013 fprintf (stderr, "rs6000_conditional_register_usage called\n");
10015 /* Set MQ register fixed (already call_used) so that it will not be
10016 allocated. */
10017 fixed_regs[64] = 1;
10019 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
10020 if (TARGET_64BIT)
10021 fixed_regs[13] = call_used_regs[13]
10022 = call_really_used_regs[13] = 1;
10024 /* Conditionally disable FPRs. */
10025 if (TARGET_SOFT_FLOAT)
10026 for (i = 32; i < 64; i++)
10027 fixed_regs[i] = call_used_regs[i]
10028 = call_really_used_regs[i] = 1;
10030 /* The TOC register is not killed across calls in a way that is
10031 visible to the compiler. */
10032 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10033 call_really_used_regs[2] = 0;
10035 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
10036 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10038 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
10039 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10040 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10041 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10043 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
10044 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10045 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10046 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10048 if (TARGET_TOC && TARGET_MINIMAL_TOC)
10049 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10050 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10052 if (!TARGET_ALTIVEC && !TARGET_VSX)
10054 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
10055 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
10056 call_really_used_regs[VRSAVE_REGNO] = 1;
10059 if (TARGET_ALTIVEC || TARGET_VSX)
10060 global_regs[VSCR_REGNO] = 1;
10062 if (TARGET_ALTIVEC_ABI)
10064 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
10065 call_used_regs[i] = call_really_used_regs[i] = 1;
10067 /* AIX reserves VR20:31 in non-extended ABI mode. */
10068 if (TARGET_XCOFF)
10069 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
10070 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
10075 /* Output insns to set DEST equal to the constant SOURCE as a series of
10076 lis, ori and shl instructions and return TRUE. */
10078 bool
10079 rs6000_emit_set_const (rtx dest, rtx source)
10081 machine_mode mode = GET_MODE (dest);
10082 rtx temp, set;
10083 rtx_insn *insn;
10084 HOST_WIDE_INT c;
10086 gcc_checking_assert (CONST_INT_P (source));
10087 c = INTVAL (source);
10088 switch (mode)
10090 case E_QImode:
10091 case E_HImode:
10092 emit_insn (gen_rtx_SET (dest, source));
10093 return true;
10095 case E_SImode:
10096 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
10098 emit_insn (gen_rtx_SET (copy_rtx (temp),
10099 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
10100 emit_insn (gen_rtx_SET (dest,
10101 gen_rtx_IOR (SImode, copy_rtx (temp),
10102 GEN_INT (c & 0xffff))));
10103 break;
10105 case E_DImode:
10106 if (!TARGET_POWERPC64)
10108 rtx hi, lo;
10110 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
10111 DImode);
10112 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
10113 DImode);
10114 emit_move_insn (hi, GEN_INT (c >> 32));
10115 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
10116 emit_move_insn (lo, GEN_INT (c));
10118 else
10119 rs6000_emit_set_long_const (dest, c);
10120 break;
10122 default:
10123 gcc_unreachable ();
10126 insn = get_last_insn ();
10127 set = single_set (insn);
10128 if (! CONSTANT_P (SET_SRC (set)))
10129 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
10131 return true;
10134 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
10135 Output insns to set DEST equal to the constant C as a series of
10136 lis, ori and shl instructions. */
10138 static void
10139 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
10141 rtx temp;
10142 HOST_WIDE_INT ud1, ud2, ud3, ud4;
10144 ud1 = c & 0xffff;
10145 c = c >> 16;
10146 ud2 = c & 0xffff;
10147 c = c >> 16;
10148 ud3 = c & 0xffff;
10149 c = c >> 16;
10150 ud4 = c & 0xffff;
10152 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
10153 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
10154 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
10156 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
10157 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
10159 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10161 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10162 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
10163 if (ud1 != 0)
10164 emit_move_insn (dest,
10165 gen_rtx_IOR (DImode, copy_rtx (temp),
10166 GEN_INT (ud1)));
10168 else if (ud3 == 0 && ud4 == 0)
10170 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10172 gcc_assert (ud2 & 0x8000);
10173 emit_move_insn (copy_rtx (temp),
10174 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
10175 if (ud1 != 0)
10176 emit_move_insn (copy_rtx (temp),
10177 gen_rtx_IOR (DImode, copy_rtx (temp),
10178 GEN_INT (ud1)));
10179 emit_move_insn (dest,
10180 gen_rtx_ZERO_EXTEND (DImode,
10181 gen_lowpart (SImode,
10182 copy_rtx (temp))));
10184 else if ((ud4 == 0xffff && (ud3 & 0x8000))
10185 || (ud4 == 0 && ! (ud3 & 0x8000)))
10187 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10189 emit_move_insn (copy_rtx (temp),
10190 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
10191 if (ud2 != 0)
10192 emit_move_insn (copy_rtx (temp),
10193 gen_rtx_IOR (DImode, copy_rtx (temp),
10194 GEN_INT (ud2)));
10195 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10196 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
10197 GEN_INT (16)));
10198 if (ud1 != 0)
10199 emit_move_insn (dest,
10200 gen_rtx_IOR (DImode, copy_rtx (temp),
10201 GEN_INT (ud1)));
10203 else
10205 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10207 emit_move_insn (copy_rtx (temp),
10208 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
10209 if (ud3 != 0)
10210 emit_move_insn (copy_rtx (temp),
10211 gen_rtx_IOR (DImode, copy_rtx (temp),
10212 GEN_INT (ud3)));
10214 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
10215 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
10216 GEN_INT (32)));
10217 if (ud2 != 0)
10218 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10219 gen_rtx_IOR (DImode, copy_rtx (temp),
10220 GEN_INT (ud2 << 16)));
10221 if (ud1 != 0)
10222 emit_move_insn (dest,
10223 gen_rtx_IOR (DImode, copy_rtx (temp),
10224 GEN_INT (ud1)));
10228 /* Helper for the following. Get rid of [r+r] memory refs
10229 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
10231 static void
10232 rs6000_eliminate_indexed_memrefs (rtx operands[2])
10234 if (GET_CODE (operands[0]) == MEM
10235 && GET_CODE (XEXP (operands[0], 0)) != REG
10236 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
10237 GET_MODE (operands[0]), false))
10238 operands[0]
10239 = replace_equiv_address (operands[0],
10240 copy_addr_to_reg (XEXP (operands[0], 0)));
10242 if (GET_CODE (operands[1]) == MEM
10243 && GET_CODE (XEXP (operands[1], 0)) != REG
10244 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
10245 GET_MODE (operands[1]), false))
10246 operands[1]
10247 = replace_equiv_address (operands[1],
10248 copy_addr_to_reg (XEXP (operands[1], 0)));
10251 /* Generate a vector of constants to permute MODE for a little-endian
10252 storage operation by swapping the two halves of a vector. */
10253 static rtvec
10254 rs6000_const_vec (machine_mode mode)
10256 int i, subparts;
10257 rtvec v;
10259 switch (mode)
10261 case E_V1TImode:
10262 subparts = 1;
10263 break;
10264 case E_V2DFmode:
10265 case E_V2DImode:
10266 subparts = 2;
10267 break;
10268 case E_V4SFmode:
10269 case E_V4SImode:
10270 subparts = 4;
10271 break;
10272 case E_V8HImode:
10273 subparts = 8;
10274 break;
10275 case E_V16QImode:
10276 subparts = 16;
10277 break;
10278 default:
10279 gcc_unreachable();
10282 v = rtvec_alloc (subparts);
10284 for (i = 0; i < subparts / 2; ++i)
10285 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
10286 for (i = subparts / 2; i < subparts; ++i)
10287 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
10289 return v;
10292 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
10293 store operation. */
10294 void
10295 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
10297 /* Scalar permutations are easier to express in integer modes rather than
10298 floating-point modes, so cast them here. We use V1TImode instead
10299 of TImode to ensure that the values don't go through GPRs. */
10300 if (FLOAT128_VECTOR_P (mode))
10302 dest = gen_lowpart (V1TImode, dest);
10303 source = gen_lowpart (V1TImode, source);
10304 mode = V1TImode;
10307 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
10308 scalar. */
10309 if (mode == TImode || mode == V1TImode)
10310 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
10311 GEN_INT (64))));
10312 else
10314 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
10315 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
10319 /* Emit a little-endian load from vector memory location SOURCE to VSX
10320 register DEST in mode MODE. The load is done with two permuting
10321 insn's that represent an lxvd2x and xxpermdi. */
10322 void
10323 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
10325 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
10326 V1TImode). */
10327 if (mode == TImode || mode == V1TImode)
10329 mode = V2DImode;
10330 dest = gen_lowpart (V2DImode, dest);
10331 source = adjust_address (source, V2DImode, 0);
10334 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
10335 rs6000_emit_le_vsx_permute (tmp, source, mode);
10336 rs6000_emit_le_vsx_permute (dest, tmp, mode);
10339 /* Emit a little-endian store to vector memory location DEST from VSX
10340 register SOURCE in mode MODE. The store is done with two permuting
10341 insn's that represent an xxpermdi and an stxvd2x. */
10342 void
10343 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
10345 /* This should never be called during or after LRA, because it does
10346 not re-permute the source register. It is intended only for use
10347 during expand. */
10348 gcc_assert (!lra_in_progress && !reload_completed);
10350 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
10351 V1TImode). */
10352 if (mode == TImode || mode == V1TImode)
10354 mode = V2DImode;
10355 dest = adjust_address (dest, V2DImode, 0);
10356 source = gen_lowpart (V2DImode, source);
10359 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
10360 rs6000_emit_le_vsx_permute (tmp, source, mode);
10361 rs6000_emit_le_vsx_permute (dest, tmp, mode);
10364 /* Emit a sequence representing a little-endian VSX load or store,
10365 moving data from SOURCE to DEST in mode MODE. This is done
10366 separately from rs6000_emit_move to ensure it is called only
10367 during expand. LE VSX loads and stores introduced later are
10368 handled with a split. The expand-time RTL generation allows
10369 us to optimize away redundant pairs of register-permutes. */
10370 void
10371 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
10373 gcc_assert (!BYTES_BIG_ENDIAN
10374 && VECTOR_MEM_VSX_P (mode)
10375 && !TARGET_P9_VECTOR
10376 && !gpr_or_gpr_p (dest, source)
10377 && (MEM_P (source) ^ MEM_P (dest)));
10379 if (MEM_P (source))
10381 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
10382 rs6000_emit_le_vsx_load (dest, source, mode);
10384 else
10386 if (!REG_P (source))
10387 source = force_reg (mode, source);
10388 rs6000_emit_le_vsx_store (dest, source, mode);
10392 /* Return whether a SFmode or SImode move can be done without converting one
10393 mode to another. This arrises when we have:
10395 (SUBREG:SF (REG:SI ...))
10396 (SUBREG:SI (REG:SF ...))
10398 and one of the values is in a floating point/vector register, where SFmode
10399 scalars are stored in DFmode format. */
10401 bool
10402 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
10404 if (TARGET_ALLOW_SF_SUBREG)
10405 return true;
10407 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
10408 return true;
10410 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
10411 return true;
10413 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
10414 if (SUBREG_P (dest))
10416 rtx dest_subreg = SUBREG_REG (dest);
10417 rtx src_subreg = SUBREG_REG (src);
10418 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
10421 return false;
10425 /* Helper function to change moves with:
10427 (SUBREG:SF (REG:SI)) and
10428 (SUBREG:SI (REG:SF))
10430 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
10431 values are stored as DFmode values in the VSX registers. We need to convert
10432 the bits before we can use a direct move or operate on the bits in the
10433 vector register as an integer type.
10435 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
10437 static bool
10438 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
10440 if (TARGET_DIRECT_MOVE_64BIT && !lra_in_progress && !reload_completed
10441 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
10442 && SUBREG_P (source) && sf_subreg_operand (source, mode))
10444 rtx inner_source = SUBREG_REG (source);
10445 machine_mode inner_mode = GET_MODE (inner_source);
10447 if (mode == SImode && inner_mode == SFmode)
10449 emit_insn (gen_movsi_from_sf (dest, inner_source));
10450 return true;
10453 if (mode == SFmode && inner_mode == SImode)
10455 emit_insn (gen_movsf_from_si (dest, inner_source));
10456 return true;
10460 return false;
10463 /* Emit a move from SOURCE to DEST in mode MODE. */
10464 void
10465 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
10467 rtx operands[2];
10468 operands[0] = dest;
10469 operands[1] = source;
10471 if (TARGET_DEBUG_ADDR)
10473 fprintf (stderr,
10474 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
10475 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
10476 GET_MODE_NAME (mode),
10477 lra_in_progress,
10478 reload_completed,
10479 can_create_pseudo_p ());
10480 debug_rtx (dest);
10481 fprintf (stderr, "source:\n");
10482 debug_rtx (source);
10485 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
10486 if (CONST_WIDE_INT_P (operands[1])
10487 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
10489 /* This should be fixed with the introduction of CONST_WIDE_INT. */
10490 gcc_unreachable ();
10493 /* See if we need to special case SImode/SFmode SUBREG moves. */
10494 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
10495 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
10496 return;
10498 /* Check if GCC is setting up a block move that will end up using FP
10499 registers as temporaries. We must make sure this is acceptable. */
10500 if (GET_CODE (operands[0]) == MEM
10501 && GET_CODE (operands[1]) == MEM
10502 && mode == DImode
10503 && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
10504 || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
10505 && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
10506 ? 32 : MEM_ALIGN (operands[0])))
10507 || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
10508 ? 32
10509 : MEM_ALIGN (operands[1]))))
10510 && ! MEM_VOLATILE_P (operands [0])
10511 && ! MEM_VOLATILE_P (operands [1]))
10513 emit_move_insn (adjust_address (operands[0], SImode, 0),
10514 adjust_address (operands[1], SImode, 0));
10515 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
10516 adjust_address (copy_rtx (operands[1]), SImode, 4));
10517 return;
10520 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
10521 && !gpc_reg_operand (operands[1], mode))
10522 operands[1] = force_reg (mode, operands[1]);
10524 /* Recognize the case where operand[1] is a reference to thread-local
10525 data and load its address to a register. */
10526 if (tls_referenced_p (operands[1]))
10528 enum tls_model model;
10529 rtx tmp = operands[1];
10530 rtx addend = NULL;
10532 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
10534 addend = XEXP (XEXP (tmp, 0), 1);
10535 tmp = XEXP (XEXP (tmp, 0), 0);
10538 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
10539 model = SYMBOL_REF_TLS_MODEL (tmp);
10540 gcc_assert (model != 0);
10542 tmp = rs6000_legitimize_tls_address (tmp, model);
10543 if (addend)
10545 tmp = gen_rtx_PLUS (mode, tmp, addend);
10546 tmp = force_operand (tmp, operands[0]);
10548 operands[1] = tmp;
10551 /* 128-bit constant floating-point values on Darwin should really be loaded
10552 as two parts. However, this premature splitting is a problem when DFmode
10553 values can go into Altivec registers. */
10554 if (FLOAT128_IBM_P (mode) && !reg_addr[DFmode].scalar_in_vmx_p
10555 && GET_CODE (operands[1]) == CONST_DOUBLE)
10557 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
10558 simplify_gen_subreg (DFmode, operands[1], mode, 0),
10559 DFmode);
10560 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
10561 GET_MODE_SIZE (DFmode)),
10562 simplify_gen_subreg (DFmode, operands[1], mode,
10563 GET_MODE_SIZE (DFmode)),
10564 DFmode);
10565 return;
10568 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
10569 p1:SD) if p1 is not of floating point class and p0 is spilled as
10570 we can have no analogous movsd_store for this. */
10571 if (lra_in_progress && mode == DDmode
10572 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10573 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10574 && GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))
10575 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
10577 enum reg_class cl;
10578 int regno = REGNO (SUBREG_REG (operands[1]));
10580 if (regno >= FIRST_PSEUDO_REGISTER)
10582 cl = reg_preferred_class (regno);
10583 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
10585 if (regno >= 0 && ! FP_REGNO_P (regno))
10587 mode = SDmode;
10588 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
10589 operands[1] = SUBREG_REG (operands[1]);
10592 if (lra_in_progress
10593 && mode == SDmode
10594 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10595 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10596 && (REG_P (operands[1])
10597 || (GET_CODE (operands[1]) == SUBREG
10598 && REG_P (SUBREG_REG (operands[1])))))
10600 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
10601 ? SUBREG_REG (operands[1]) : operands[1]);
10602 enum reg_class cl;
10604 if (regno >= FIRST_PSEUDO_REGISTER)
10606 cl = reg_preferred_class (regno);
10607 gcc_assert (cl != NO_REGS);
10608 regno = ira_class_hard_regs[cl][0];
10610 if (FP_REGNO_P (regno))
10612 if (GET_MODE (operands[0]) != DDmode)
10613 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
10614 emit_insn (gen_movsd_store (operands[0], operands[1]));
10616 else if (INT_REGNO_P (regno))
10617 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10618 else
10619 gcc_unreachable();
10620 return;
10622 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10623 p:DD)) if p0 is not of floating point class and p1 is spilled as
10624 we can have no analogous movsd_load for this. */
10625 if (lra_in_progress && mode == DDmode
10626 && GET_CODE (operands[0]) == SUBREG && REG_P (SUBREG_REG (operands[0]))
10627 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
10628 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10629 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10631 enum reg_class cl;
10632 int regno = REGNO (SUBREG_REG (operands[0]));
10634 if (regno >= FIRST_PSEUDO_REGISTER)
10636 cl = reg_preferred_class (regno);
10637 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
10639 if (regno >= 0 && ! FP_REGNO_P (regno))
10641 mode = SDmode;
10642 operands[0] = SUBREG_REG (operands[0]);
10643 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
10646 if (lra_in_progress
10647 && mode == SDmode
10648 && (REG_P (operands[0])
10649 || (GET_CODE (operands[0]) == SUBREG
10650 && REG_P (SUBREG_REG (operands[0]))))
10651 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10652 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10654 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
10655 ? SUBREG_REG (operands[0]) : operands[0]);
10656 enum reg_class cl;
10658 if (regno >= FIRST_PSEUDO_REGISTER)
10660 cl = reg_preferred_class (regno);
10661 gcc_assert (cl != NO_REGS);
10662 regno = ira_class_hard_regs[cl][0];
10664 if (FP_REGNO_P (regno))
10666 if (GET_MODE (operands[1]) != DDmode)
10667 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
10668 emit_insn (gen_movsd_load (operands[0], operands[1]));
10670 else if (INT_REGNO_P (regno))
10671 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10672 else
10673 gcc_unreachable();
10674 return;
10677 /* FIXME: In the long term, this switch statement should go away
10678 and be replaced by a sequence of tests based on things like
10679 mode == Pmode. */
10680 switch (mode)
10682 case E_HImode:
10683 case E_QImode:
10684 if (CONSTANT_P (operands[1])
10685 && GET_CODE (operands[1]) != CONST_INT)
10686 operands[1] = force_const_mem (mode, operands[1]);
10687 break;
10689 case E_TFmode:
10690 case E_TDmode:
10691 case E_IFmode:
10692 case E_KFmode:
10693 if (FLOAT128_2REG_P (mode))
10694 rs6000_eliminate_indexed_memrefs (operands);
10695 /* fall through */
10697 case E_DFmode:
10698 case E_DDmode:
10699 case E_SFmode:
10700 case E_SDmode:
10701 if (CONSTANT_P (operands[1])
10702 && ! easy_fp_constant (operands[1], mode))
10703 operands[1] = force_const_mem (mode, operands[1]);
10704 break;
10706 case E_V16QImode:
10707 case E_V8HImode:
10708 case E_V4SFmode:
10709 case E_V4SImode:
10710 case E_V2SFmode:
10711 case E_V2SImode:
10712 case E_V2DFmode:
10713 case E_V2DImode:
10714 case E_V1TImode:
10715 if (CONSTANT_P (operands[1])
10716 && !easy_vector_constant (operands[1], mode))
10717 operands[1] = force_const_mem (mode, operands[1]);
10718 break;
10720 case E_SImode:
10721 case E_DImode:
10722 /* Use default pattern for address of ELF small data */
10723 if (TARGET_ELF
10724 && mode == Pmode
10725 && DEFAULT_ABI == ABI_V4
10726 && (GET_CODE (operands[1]) == SYMBOL_REF
10727 || GET_CODE (operands[1]) == CONST)
10728 && small_data_operand (operands[1], mode))
10730 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10731 return;
10734 if (DEFAULT_ABI == ABI_V4
10735 && mode == Pmode && mode == SImode
10736 && flag_pic == 1 && got_operand (operands[1], mode))
10738 emit_insn (gen_movsi_got (operands[0], operands[1]));
10739 return;
10742 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
10743 && TARGET_NO_TOC
10744 && ! flag_pic
10745 && mode == Pmode
10746 && CONSTANT_P (operands[1])
10747 && GET_CODE (operands[1]) != HIGH
10748 && GET_CODE (operands[1]) != CONST_INT)
10750 rtx target = (!can_create_pseudo_p ()
10751 ? operands[0]
10752 : gen_reg_rtx (mode));
10754 /* If this is a function address on -mcall-aixdesc,
10755 convert it to the address of the descriptor. */
10756 if (DEFAULT_ABI == ABI_AIX
10757 && GET_CODE (operands[1]) == SYMBOL_REF
10758 && XSTR (operands[1], 0)[0] == '.')
10760 const char *name = XSTR (operands[1], 0);
10761 rtx new_ref;
10762 while (*name == '.')
10763 name++;
10764 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
10765 CONSTANT_POOL_ADDRESS_P (new_ref)
10766 = CONSTANT_POOL_ADDRESS_P (operands[1]);
10767 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
10768 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
10769 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
10770 operands[1] = new_ref;
10773 if (DEFAULT_ABI == ABI_DARWIN)
10775 #if TARGET_MACHO
10776 if (MACHO_DYNAMIC_NO_PIC_P)
10778 /* Take care of any required data indirection. */
10779 operands[1] = rs6000_machopic_legitimize_pic_address (
10780 operands[1], mode, operands[0]);
10781 if (operands[0] != operands[1])
10782 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10783 return;
10785 #endif
10786 emit_insn (gen_macho_high (target, operands[1]));
10787 emit_insn (gen_macho_low (operands[0], target, operands[1]));
10788 return;
10791 emit_insn (gen_elf_high (target, operands[1]));
10792 emit_insn (gen_elf_low (operands[0], target, operands[1]));
10793 return;
10796 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10797 and we have put it in the TOC, we just need to make a TOC-relative
10798 reference to it. */
10799 if (TARGET_TOC
10800 && GET_CODE (operands[1]) == SYMBOL_REF
10801 && use_toc_relative_ref (operands[1], mode))
10802 operands[1] = create_TOC_reference (operands[1], operands[0]);
10803 else if (mode == Pmode
10804 && CONSTANT_P (operands[1])
10805 && GET_CODE (operands[1]) != HIGH
10806 && ((GET_CODE (operands[1]) != CONST_INT
10807 && ! easy_fp_constant (operands[1], mode))
10808 || (GET_CODE (operands[1]) == CONST_INT
10809 && (num_insns_constant (operands[1], mode)
10810 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
10811 || (GET_CODE (operands[0]) == REG
10812 && FP_REGNO_P (REGNO (operands[0]))))
10813 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
10814 && (TARGET_CMODEL == CMODEL_SMALL
10815 || can_create_pseudo_p ()
10816 || (REG_P (operands[0])
10817 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
10820 #if TARGET_MACHO
10821 /* Darwin uses a special PIC legitimizer. */
10822 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
10824 operands[1] =
10825 rs6000_machopic_legitimize_pic_address (operands[1], mode,
10826 operands[0]);
10827 if (operands[0] != operands[1])
10828 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10829 return;
10831 #endif
10833 /* If we are to limit the number of things we put in the TOC and
10834 this is a symbol plus a constant we can add in one insn,
10835 just put the symbol in the TOC and add the constant. */
10836 if (GET_CODE (operands[1]) == CONST
10837 && TARGET_NO_SUM_IN_TOC
10838 && GET_CODE (XEXP (operands[1], 0)) == PLUS
10839 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
10840 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
10841 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
10842 && ! side_effects_p (operands[0]))
10844 rtx sym =
10845 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
10846 rtx other = XEXP (XEXP (operands[1], 0), 1);
10848 sym = force_reg (mode, sym);
10849 emit_insn (gen_add3_insn (operands[0], sym, other));
10850 return;
10853 operands[1] = force_const_mem (mode, operands[1]);
10855 if (TARGET_TOC
10856 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
10857 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
10859 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
10860 operands[0]);
10861 operands[1] = gen_const_mem (mode, tocref);
10862 set_mem_alias_set (operands[1], get_TOC_alias_set ());
10865 break;
10867 case E_TImode:
10868 if (!VECTOR_MEM_VSX_P (TImode))
10869 rs6000_eliminate_indexed_memrefs (operands);
10870 break;
10872 case E_PTImode:
10873 rs6000_eliminate_indexed_memrefs (operands);
10874 break;
10876 default:
10877 fatal_insn ("bad move", gen_rtx_SET (dest, source));
10880 /* Above, we may have called force_const_mem which may have returned
10881 an invalid address. If we can, fix this up; otherwise, reload will
10882 have to deal with it. */
10883 if (GET_CODE (operands[1]) == MEM)
10884 operands[1] = validize_mem (operands[1]);
10886 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10889 /* Nonzero if we can use a floating-point register to pass this arg. */
10890 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10891 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10892 && (CUM)->fregno <= FP_ARG_MAX_REG \
10893 && TARGET_HARD_FLOAT)
10895 /* Nonzero if we can use an AltiVec register to pass this arg. */
10896 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10897 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10898 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10899 && TARGET_ALTIVEC_ABI \
10900 && (NAMED))
10902 /* Walk down the type tree of TYPE counting consecutive base elements.
10903 If *MODEP is VOIDmode, then set it to the first valid floating point
10904 or vector type. If a non-floating point or vector type is found, or
10905 if a floating point or vector type that doesn't match a non-VOIDmode
10906 *MODEP is found, then return -1, otherwise return the count in the
10907 sub-tree. */
10909 static int
10910 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10912 machine_mode mode;
10913 HOST_WIDE_INT size;
10915 switch (TREE_CODE (type))
10917 case REAL_TYPE:
10918 mode = TYPE_MODE (type);
10919 if (!SCALAR_FLOAT_MODE_P (mode))
10920 return -1;
10922 if (*modep == VOIDmode)
10923 *modep = mode;
10925 if (*modep == mode)
10926 return 1;
10928 break;
10930 case COMPLEX_TYPE:
10931 mode = TYPE_MODE (TREE_TYPE (type));
10932 if (!SCALAR_FLOAT_MODE_P (mode))
10933 return -1;
10935 if (*modep == VOIDmode)
10936 *modep = mode;
10938 if (*modep == mode)
10939 return 2;
10941 break;
10943 case VECTOR_TYPE:
10944 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10945 return -1;
10947 /* Use V4SImode as representative of all 128-bit vector types. */
10948 size = int_size_in_bytes (type);
10949 switch (size)
10951 case 16:
10952 mode = V4SImode;
10953 break;
10954 default:
10955 return -1;
10958 if (*modep == VOIDmode)
10959 *modep = mode;
10961 /* Vector modes are considered to be opaque: two vectors are
10962 equivalent for the purposes of being homogeneous aggregates
10963 if they are the same size. */
10964 if (*modep == mode)
10965 return 1;
10967 break;
10969 case ARRAY_TYPE:
10971 int count;
10972 tree index = TYPE_DOMAIN (type);
10974 /* Can't handle incomplete types nor sizes that are not
10975 fixed. */
10976 if (!COMPLETE_TYPE_P (type)
10977 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10978 return -1;
10980 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
10981 if (count == -1
10982 || !index
10983 || !TYPE_MAX_VALUE (index)
10984 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
10985 || !TYPE_MIN_VALUE (index)
10986 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
10987 || count < 0)
10988 return -1;
10990 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
10991 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
10993 /* There must be no padding. */
10994 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
10995 return -1;
10997 return count;
11000 case RECORD_TYPE:
11002 int count = 0;
11003 int sub_count;
11004 tree field;
11006 /* Can't handle incomplete types nor sizes that are not
11007 fixed. */
11008 if (!COMPLETE_TYPE_P (type)
11009 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11010 return -1;
11012 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
11014 if (TREE_CODE (field) != FIELD_DECL)
11015 continue;
11017 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
11018 if (sub_count < 0)
11019 return -1;
11020 count += sub_count;
11023 /* There must be no padding. */
11024 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
11025 return -1;
11027 return count;
11030 case UNION_TYPE:
11031 case QUAL_UNION_TYPE:
11033 /* These aren't very interesting except in a degenerate case. */
11034 int count = 0;
11035 int sub_count;
11036 tree field;
11038 /* Can't handle incomplete types nor sizes that are not
11039 fixed. */
11040 if (!COMPLETE_TYPE_P (type)
11041 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11042 return -1;
11044 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
11046 if (TREE_CODE (field) != FIELD_DECL)
11047 continue;
11049 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
11050 if (sub_count < 0)
11051 return -1;
11052 count = count > sub_count ? count : sub_count;
11055 /* There must be no padding. */
11056 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
11057 return -1;
11059 return count;
11062 default:
11063 break;
11066 return -1;
11069 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
11070 float or vector aggregate that shall be passed in FP/vector registers
11071 according to the ELFv2 ABI, return the homogeneous element mode in
11072 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
11074 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
11076 static bool
11077 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
11078 machine_mode *elt_mode,
11079 int *n_elts)
11081 /* Note that we do not accept complex types at the top level as
11082 homogeneous aggregates; these types are handled via the
11083 targetm.calls.split_complex_arg mechanism. Complex types
11084 can be elements of homogeneous aggregates, however. */
11085 if (DEFAULT_ABI == ABI_ELFv2 && type && AGGREGATE_TYPE_P (type))
11087 machine_mode field_mode = VOIDmode;
11088 int field_count = rs6000_aggregate_candidate (type, &field_mode);
11090 if (field_count > 0)
11092 int n_regs = (SCALAR_FLOAT_MODE_P (field_mode) ?
11093 (GET_MODE_SIZE (field_mode) + 7) >> 3 : 1);
11095 /* The ELFv2 ABI allows homogeneous aggregates to occupy
11096 up to AGGR_ARG_NUM_REG registers. */
11097 if (field_count * n_regs <= AGGR_ARG_NUM_REG)
11099 if (elt_mode)
11100 *elt_mode = field_mode;
11101 if (n_elts)
11102 *n_elts = field_count;
11103 return true;
11108 if (elt_mode)
11109 *elt_mode = mode;
11110 if (n_elts)
11111 *n_elts = 1;
11112 return false;
11115 /* Return a nonzero value to say to return the function value in
11116 memory, just as large structures are always returned. TYPE will be
11117 the data type of the value, and FNTYPE will be the type of the
11118 function doing the returning, or @code{NULL} for libcalls.
11120 The AIX ABI for the RS/6000 specifies that all structures are
11121 returned in memory. The Darwin ABI does the same.
11123 For the Darwin 64 Bit ABI, a function result can be returned in
11124 registers or in memory, depending on the size of the return data
11125 type. If it is returned in registers, the value occupies the same
11126 registers as it would if it were the first and only function
11127 argument. Otherwise, the function places its result in memory at
11128 the location pointed to by GPR3.
11130 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
11131 but a draft put them in memory, and GCC used to implement the draft
11132 instead of the final standard. Therefore, aix_struct_return
11133 controls this instead of DEFAULT_ABI; V.4 targets needing backward
11134 compatibility can change DRAFT_V4_STRUCT_RET to override the
11135 default, and -m switches get the final word. See
11136 rs6000_option_override_internal for more details.
11138 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
11139 long double support is enabled. These values are returned in memory.
11141 int_size_in_bytes returns -1 for variable size objects, which go in
11142 memory always. The cast to unsigned makes -1 > 8. */
11144 static bool
11145 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
11147 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
11148 if (TARGET_MACHO
11149 && rs6000_darwin64_abi
11150 && TREE_CODE (type) == RECORD_TYPE
11151 && int_size_in_bytes (type) > 0)
11153 CUMULATIVE_ARGS valcum;
11154 rtx valret;
11156 valcum.words = 0;
11157 valcum.fregno = FP_ARG_MIN_REG;
11158 valcum.vregno = ALTIVEC_ARG_MIN_REG;
11159 /* Do a trial code generation as if this were going to be passed
11160 as an argument; if any part goes in memory, we return NULL. */
11161 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
11162 if (valret)
11163 return false;
11164 /* Otherwise fall through to more conventional ABI rules. */
11167 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
11168 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
11169 NULL, NULL))
11170 return false;
11172 /* The ELFv2 ABI returns aggregates up to 16B in registers */
11173 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
11174 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
11175 return false;
11177 if (AGGREGATE_TYPE_P (type)
11178 && (aix_struct_return
11179 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
11180 return true;
11182 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
11183 modes only exist for GCC vector types if -maltivec. */
11184 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
11185 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
11186 return false;
11188 /* Return synthetic vectors in memory. */
11189 if (TREE_CODE (type) == VECTOR_TYPE
11190 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
11192 static bool warned_for_return_big_vectors = false;
11193 if (!warned_for_return_big_vectors)
11195 warning (OPT_Wpsabi, "GCC vector returned by reference: "
11196 "non-standard ABI extension with no compatibility "
11197 "guarantee");
11198 warned_for_return_big_vectors = true;
11200 return true;
11203 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
11204 && FLOAT128_IEEE_P (TYPE_MODE (type)))
11205 return true;
11207 return false;
11210 /* Specify whether values returned in registers should be at the most
11211 significant end of a register. We want aggregates returned by
11212 value to match the way aggregates are passed to functions. */
11214 static bool
11215 rs6000_return_in_msb (const_tree valtype)
11217 return (DEFAULT_ABI == ABI_ELFv2
11218 && BYTES_BIG_ENDIAN
11219 && AGGREGATE_TYPE_P (valtype)
11220 && (rs6000_function_arg_padding (TYPE_MODE (valtype), valtype)
11221 == PAD_UPWARD));
11224 #ifdef HAVE_AS_GNU_ATTRIBUTE
11225 /* Return TRUE if a call to function FNDECL may be one that
11226 potentially affects the function calling ABI of the object file. */
11228 static bool
11229 call_ABI_of_interest (tree fndecl)
11231 if (rs6000_gnu_attr && symtab->state == EXPANSION)
11233 struct cgraph_node *c_node;
11235 /* Libcalls are always interesting. */
11236 if (fndecl == NULL_TREE)
11237 return true;
11239 /* Any call to an external function is interesting. */
11240 if (DECL_EXTERNAL (fndecl))
11241 return true;
11243 /* Interesting functions that we are emitting in this object file. */
11244 c_node = cgraph_node::get (fndecl);
11245 c_node = c_node->ultimate_alias_target ();
11246 return !c_node->only_called_directly_p ();
11248 return false;
11250 #endif
11252 /* Initialize a variable CUM of type CUMULATIVE_ARGS
11253 for a call to a function whose data type is FNTYPE.
11254 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
11256 For incoming args we set the number of arguments in the prototype large
11257 so we never return a PARALLEL. */
11259 void
11260 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
11261 rtx libname ATTRIBUTE_UNUSED, int incoming,
11262 int libcall, int n_named_args,
11263 tree fndecl ATTRIBUTE_UNUSED,
11264 machine_mode return_mode ATTRIBUTE_UNUSED)
11266 static CUMULATIVE_ARGS zero_cumulative;
11268 *cum = zero_cumulative;
11269 cum->words = 0;
11270 cum->fregno = FP_ARG_MIN_REG;
11271 cum->vregno = ALTIVEC_ARG_MIN_REG;
11272 cum->prototype = (fntype && prototype_p (fntype));
11273 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
11274 ? CALL_LIBCALL : CALL_NORMAL);
11275 cum->sysv_gregno = GP_ARG_MIN_REG;
11276 cum->stdarg = stdarg_p (fntype);
11277 cum->libcall = libcall;
11279 cum->nargs_prototype = 0;
11280 if (incoming || cum->prototype)
11281 cum->nargs_prototype = n_named_args;
11283 /* Check for a longcall attribute. */
11284 if ((!fntype && rs6000_default_long_calls)
11285 || (fntype
11286 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
11287 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
11288 cum->call_cookie |= CALL_LONG;
11290 if (TARGET_DEBUG_ARG)
11292 fprintf (stderr, "\ninit_cumulative_args:");
11293 if (fntype)
11295 tree ret_type = TREE_TYPE (fntype);
11296 fprintf (stderr, " ret code = %s,",
11297 get_tree_code_name (TREE_CODE (ret_type)));
11300 if (cum->call_cookie & CALL_LONG)
11301 fprintf (stderr, " longcall,");
11303 fprintf (stderr, " proto = %d, nargs = %d\n",
11304 cum->prototype, cum->nargs_prototype);
11307 #ifdef HAVE_AS_GNU_ATTRIBUTE
11308 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
11310 cum->escapes = call_ABI_of_interest (fndecl);
11311 if (cum->escapes)
11313 tree return_type;
11315 if (fntype)
11317 return_type = TREE_TYPE (fntype);
11318 return_mode = TYPE_MODE (return_type);
11320 else
11321 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
11323 if (return_type != NULL)
11325 if (TREE_CODE (return_type) == RECORD_TYPE
11326 && TYPE_TRANSPARENT_AGGR (return_type))
11328 return_type = TREE_TYPE (first_field (return_type));
11329 return_mode = TYPE_MODE (return_type);
11331 if (AGGREGATE_TYPE_P (return_type)
11332 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
11333 <= 8))
11334 rs6000_returns_struct = true;
11336 if (SCALAR_FLOAT_MODE_P (return_mode))
11338 rs6000_passes_float = true;
11339 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11340 && (FLOAT128_IBM_P (return_mode)
11341 || FLOAT128_IEEE_P (return_mode)
11342 || (return_type != NULL
11343 && (TYPE_MAIN_VARIANT (return_type)
11344 == long_double_type_node))))
11345 rs6000_passes_long_double = true;
11347 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
11348 || PAIRED_VECTOR_MODE (return_mode))
11349 rs6000_passes_vector = true;
11352 #endif
11354 if (fntype
11355 && !TARGET_ALTIVEC
11356 && TARGET_ALTIVEC_ABI
11357 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
11359 error ("cannot return value in vector register because"
11360 " altivec instructions are disabled, use %qs"
11361 " to enable them", "-maltivec");
11365 /* The mode the ABI uses for a word. This is not the same as word_mode
11366 for -m32 -mpowerpc64. This is used to implement various target hooks. */
11368 static scalar_int_mode
11369 rs6000_abi_word_mode (void)
11371 return TARGET_32BIT ? SImode : DImode;
11374 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
11375 static char *
11376 rs6000_offload_options (void)
11378 if (TARGET_64BIT)
11379 return xstrdup ("-foffload-abi=lp64");
11380 else
11381 return xstrdup ("-foffload-abi=ilp32");
11384 /* On rs6000, function arguments are promoted, as are function return
11385 values. */
11387 static machine_mode
11388 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
11389 machine_mode mode,
11390 int *punsignedp ATTRIBUTE_UNUSED,
11391 const_tree, int)
11393 PROMOTE_MODE (mode, *punsignedp, type);
11395 return mode;
11398 /* Return true if TYPE must be passed on the stack and not in registers. */
11400 static bool
11401 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
11403 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
11404 return must_pass_in_stack_var_size (mode, type);
11405 else
11406 return must_pass_in_stack_var_size_or_pad (mode, type);
11409 static inline bool
11410 is_complex_IBM_long_double (machine_mode mode)
11412 return mode == ICmode || (!TARGET_IEEEQUAD && mode == TCmode);
11415 /* Whether ABI_V4 passes MODE args to a function in floating point
11416 registers. */
11418 static bool
11419 abi_v4_pass_in_fpr (machine_mode mode)
11421 if (!TARGET_HARD_FLOAT)
11422 return false;
11423 if (TARGET_SINGLE_FLOAT && mode == SFmode)
11424 return true;
11425 if (TARGET_DOUBLE_FLOAT && mode == DFmode)
11426 return true;
11427 /* ABI_V4 passes complex IBM long double in 8 gprs.
11428 Stupid, but we can't change the ABI now. */
11429 if (is_complex_IBM_long_double (mode))
11430 return false;
11431 if (FLOAT128_2REG_P (mode))
11432 return true;
11433 if (DECIMAL_FLOAT_MODE_P (mode))
11434 return true;
11435 return false;
11438 /* Implement TARGET_FUNCTION_ARG_PADDING.
11440 For the AIX ABI structs are always stored left shifted in their
11441 argument slot. */
11443 static pad_direction
11444 rs6000_function_arg_padding (machine_mode mode, const_tree type)
11446 #ifndef AGGREGATE_PADDING_FIXED
11447 #define AGGREGATE_PADDING_FIXED 0
11448 #endif
11449 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
11450 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
11451 #endif
11453 if (!AGGREGATE_PADDING_FIXED)
11455 /* GCC used to pass structures of the same size as integer types as
11456 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
11457 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
11458 passed padded downward, except that -mstrict-align further
11459 muddied the water in that multi-component structures of 2 and 4
11460 bytes in size were passed padded upward.
11462 The following arranges for best compatibility with previous
11463 versions of gcc, but removes the -mstrict-align dependency. */
11464 if (BYTES_BIG_ENDIAN)
11466 HOST_WIDE_INT size = 0;
11468 if (mode == BLKmode)
11470 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
11471 size = int_size_in_bytes (type);
11473 else
11474 size = GET_MODE_SIZE (mode);
11476 if (size == 1 || size == 2 || size == 4)
11477 return PAD_DOWNWARD;
11479 return PAD_UPWARD;
11482 if (AGGREGATES_PAD_UPWARD_ALWAYS)
11484 if (type != 0 && AGGREGATE_TYPE_P (type))
11485 return PAD_UPWARD;
11488 /* Fall back to the default. */
11489 return default_function_arg_padding (mode, type);
11492 /* If defined, a C expression that gives the alignment boundary, in bits,
11493 of an argument with the specified mode and type. If it is not defined,
11494 PARM_BOUNDARY is used for all arguments.
11496 V.4 wants long longs and doubles to be double word aligned. Just
11497 testing the mode size is a boneheaded way to do this as it means
11498 that other types such as complex int are also double word aligned.
11499 However, we're stuck with this because changing the ABI might break
11500 existing library interfaces.
11502 Quadword align Altivec/VSX vectors.
11503 Quadword align large synthetic vector types. */
11505 static unsigned int
11506 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
11508 machine_mode elt_mode;
11509 int n_elts;
11511 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11513 if (DEFAULT_ABI == ABI_V4
11514 && (GET_MODE_SIZE (mode) == 8
11515 || (TARGET_HARD_FLOAT
11516 && !is_complex_IBM_long_double (mode)
11517 && FLOAT128_2REG_P (mode))))
11518 return 64;
11519 else if (FLOAT128_VECTOR_P (mode))
11520 return 128;
11521 else if (PAIRED_VECTOR_MODE (mode)
11522 || (type && TREE_CODE (type) == VECTOR_TYPE
11523 && int_size_in_bytes (type) >= 8
11524 && int_size_in_bytes (type) < 16))
11525 return 64;
11526 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11527 || (type && TREE_CODE (type) == VECTOR_TYPE
11528 && int_size_in_bytes (type) >= 16))
11529 return 128;
11531 /* Aggregate types that need > 8 byte alignment are quadword-aligned
11532 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
11533 -mcompat-align-parm is used. */
11534 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
11535 || DEFAULT_ABI == ABI_ELFv2)
11536 && type && TYPE_ALIGN (type) > 64)
11538 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
11539 or homogeneous float/vector aggregates here. We already handled
11540 vector aggregates above, but still need to check for float here. */
11541 bool aggregate_p = (AGGREGATE_TYPE_P (type)
11542 && !SCALAR_FLOAT_MODE_P (elt_mode));
11544 /* We used to check for BLKmode instead of the above aggregate type
11545 check. Warn when this results in any difference to the ABI. */
11546 if (aggregate_p != (mode == BLKmode))
11548 static bool warned;
11549 if (!warned && warn_psabi)
11551 warned = true;
11552 inform (input_location,
11553 "the ABI of passing aggregates with %d-byte alignment"
11554 " has changed in GCC 5",
11555 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
11559 if (aggregate_p)
11560 return 128;
11563 /* Similar for the Darwin64 ABI. Note that for historical reasons we
11564 implement the "aggregate type" check as a BLKmode check here; this
11565 means certain aggregate types are in fact not aligned. */
11566 if (TARGET_MACHO && rs6000_darwin64_abi
11567 && mode == BLKmode
11568 && type && TYPE_ALIGN (type) > 64)
11569 return 128;
11571 return PARM_BOUNDARY;
11574 /* The offset in words to the start of the parameter save area. */
11576 static unsigned int
11577 rs6000_parm_offset (void)
11579 return (DEFAULT_ABI == ABI_V4 ? 2
11580 : DEFAULT_ABI == ABI_ELFv2 ? 4
11581 : 6);
11584 /* For a function parm of MODE and TYPE, return the starting word in
11585 the parameter area. NWORDS of the parameter area are already used. */
11587 static unsigned int
11588 rs6000_parm_start (machine_mode mode, const_tree type,
11589 unsigned int nwords)
11591 unsigned int align;
11593 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
11594 return nwords + (-(rs6000_parm_offset () + nwords) & align);
11597 /* Compute the size (in words) of a function argument. */
11599 static unsigned long
11600 rs6000_arg_size (machine_mode mode, const_tree type)
11602 unsigned long size;
11604 if (mode != BLKmode)
11605 size = GET_MODE_SIZE (mode);
11606 else
11607 size = int_size_in_bytes (type);
11609 if (TARGET_32BIT)
11610 return (size + 3) >> 2;
11611 else
11612 return (size + 7) >> 3;
11615 /* Use this to flush pending int fields. */
11617 static void
11618 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
11619 HOST_WIDE_INT bitpos, int final)
11621 unsigned int startbit, endbit;
11622 int intregs, intoffset;
11624 /* Handle the situations where a float is taking up the first half
11625 of the GPR, and the other half is empty (typically due to
11626 alignment restrictions). We can detect this by a 8-byte-aligned
11627 int field, or by seeing that this is the final flush for this
11628 argument. Count the word and continue on. */
11629 if (cum->floats_in_gpr == 1
11630 && (cum->intoffset % 64 == 0
11631 || (cum->intoffset == -1 && final)))
11633 cum->words++;
11634 cum->floats_in_gpr = 0;
11637 if (cum->intoffset == -1)
11638 return;
11640 intoffset = cum->intoffset;
11641 cum->intoffset = -1;
11642 cum->floats_in_gpr = 0;
11644 if (intoffset % BITS_PER_WORD != 0)
11646 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11647 if (!int_mode_for_size (bits, 0).exists ())
11649 /* We couldn't find an appropriate mode, which happens,
11650 e.g., in packed structs when there are 3 bytes to load.
11651 Back intoffset back to the beginning of the word in this
11652 case. */
11653 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11657 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11658 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11659 intregs = (endbit - startbit) / BITS_PER_WORD;
11660 cum->words += intregs;
11661 /* words should be unsigned. */
11662 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
11664 int pad = (endbit/BITS_PER_WORD) - cum->words;
11665 cum->words += pad;
11669 /* The darwin64 ABI calls for us to recurse down through structs,
11670 looking for elements passed in registers. Unfortunately, we have
11671 to track int register count here also because of misalignments
11672 in powerpc alignment mode. */
11674 static void
11675 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
11676 const_tree type,
11677 HOST_WIDE_INT startbitpos)
11679 tree f;
11681 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11682 if (TREE_CODE (f) == FIELD_DECL)
11684 HOST_WIDE_INT bitpos = startbitpos;
11685 tree ftype = TREE_TYPE (f);
11686 machine_mode mode;
11687 if (ftype == error_mark_node)
11688 continue;
11689 mode = TYPE_MODE (ftype);
11691 if (DECL_SIZE (f) != 0
11692 && tree_fits_uhwi_p (bit_position (f)))
11693 bitpos += int_bit_position (f);
11695 /* ??? FIXME: else assume zero offset. */
11697 if (TREE_CODE (ftype) == RECORD_TYPE)
11698 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
11699 else if (USE_FP_FOR_ARG_P (cum, mode))
11701 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
11702 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11703 cum->fregno += n_fpregs;
11704 /* Single-precision floats present a special problem for
11705 us, because they are smaller than an 8-byte GPR, and so
11706 the structure-packing rules combined with the standard
11707 varargs behavior mean that we want to pack float/float
11708 and float/int combinations into a single register's
11709 space. This is complicated by the arg advance flushing,
11710 which works on arbitrarily large groups of int-type
11711 fields. */
11712 if (mode == SFmode)
11714 if (cum->floats_in_gpr == 1)
11716 /* Two floats in a word; count the word and reset
11717 the float count. */
11718 cum->words++;
11719 cum->floats_in_gpr = 0;
11721 else if (bitpos % 64 == 0)
11723 /* A float at the beginning of an 8-byte word;
11724 count it and put off adjusting cum->words until
11725 we see if a arg advance flush is going to do it
11726 for us. */
11727 cum->floats_in_gpr++;
11729 else
11731 /* The float is at the end of a word, preceded
11732 by integer fields, so the arg advance flush
11733 just above has already set cum->words and
11734 everything is taken care of. */
11737 else
11738 cum->words += n_fpregs;
11740 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11742 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11743 cum->vregno++;
11744 cum->words += 2;
11746 else if (cum->intoffset == -1)
11747 cum->intoffset = bitpos;
11751 /* Check for an item that needs to be considered specially under the darwin 64
11752 bit ABI. These are record types where the mode is BLK or the structure is
11753 8 bytes in size. */
11754 static int
11755 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
11757 return rs6000_darwin64_abi
11758 && ((mode == BLKmode
11759 && TREE_CODE (type) == RECORD_TYPE
11760 && int_size_in_bytes (type) > 0)
11761 || (type && TREE_CODE (type) == RECORD_TYPE
11762 && int_size_in_bytes (type) == 8)) ? 1 : 0;
11765 /* Update the data in CUM to advance over an argument
11766 of mode MODE and data type TYPE.
11767 (TYPE is null for libcalls where that information may not be available.)
11769 Note that for args passed by reference, function_arg will be called
11770 with MODE and TYPE set to that of the pointer to the arg, not the arg
11771 itself. */
11773 static void
11774 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
11775 const_tree type, bool named, int depth)
11777 machine_mode elt_mode;
11778 int n_elts;
11780 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11782 /* Only tick off an argument if we're not recursing. */
11783 if (depth == 0)
11784 cum->nargs_prototype--;
11786 #ifdef HAVE_AS_GNU_ATTRIBUTE
11787 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
11788 && cum->escapes)
11790 if (SCALAR_FLOAT_MODE_P (mode))
11792 rs6000_passes_float = true;
11793 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11794 && (FLOAT128_IBM_P (mode)
11795 || FLOAT128_IEEE_P (mode)
11796 || (type != NULL
11797 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
11798 rs6000_passes_long_double = true;
11800 if ((named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
11801 || (PAIRED_VECTOR_MODE (mode)
11802 && !cum->stdarg
11803 && cum->sysv_gregno <= GP_ARG_MAX_REG))
11804 rs6000_passes_vector = true;
11806 #endif
11808 if (TARGET_ALTIVEC_ABI
11809 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11810 || (type && TREE_CODE (type) == VECTOR_TYPE
11811 && int_size_in_bytes (type) == 16)))
11813 bool stack = false;
11815 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11817 cum->vregno += n_elts;
11819 if (!TARGET_ALTIVEC)
11820 error ("cannot pass argument in vector register because"
11821 " altivec instructions are disabled, use %qs"
11822 " to enable them", "-maltivec");
11824 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11825 even if it is going to be passed in a vector register.
11826 Darwin does the same for variable-argument functions. */
11827 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11828 && TARGET_64BIT)
11829 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
11830 stack = true;
11832 else
11833 stack = true;
11835 if (stack)
11837 int align;
11839 /* Vector parameters must be 16-byte aligned. In 32-bit
11840 mode this means we need to take into account the offset
11841 to the parameter save area. In 64-bit mode, they just
11842 have to start on an even word, since the parameter save
11843 area is 16-byte aligned. */
11844 if (TARGET_32BIT)
11845 align = -(rs6000_parm_offset () + cum->words) & 3;
11846 else
11847 align = cum->words & 1;
11848 cum->words += align + rs6000_arg_size (mode, type);
11850 if (TARGET_DEBUG_ARG)
11852 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
11853 cum->words, align);
11854 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
11855 cum->nargs_prototype, cum->prototype,
11856 GET_MODE_NAME (mode));
11860 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11862 int size = int_size_in_bytes (type);
11863 /* Variable sized types have size == -1 and are
11864 treated as if consisting entirely of ints.
11865 Pad to 16 byte boundary if needed. */
11866 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11867 && (cum->words % 2) != 0)
11868 cum->words++;
11869 /* For varargs, we can just go up by the size of the struct. */
11870 if (!named)
11871 cum->words += (size + 7) / 8;
11872 else
11874 /* It is tempting to say int register count just goes up by
11875 sizeof(type)/8, but this is wrong in a case such as
11876 { int; double; int; } [powerpc alignment]. We have to
11877 grovel through the fields for these too. */
11878 cum->intoffset = 0;
11879 cum->floats_in_gpr = 0;
11880 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11881 rs6000_darwin64_record_arg_advance_flush (cum,
11882 size * BITS_PER_UNIT, 1);
11884 if (TARGET_DEBUG_ARG)
11886 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11887 cum->words, TYPE_ALIGN (type), size);
11888 fprintf (stderr,
11889 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11890 cum->nargs_prototype, cum->prototype,
11891 GET_MODE_NAME (mode));
11894 else if (DEFAULT_ABI == ABI_V4)
11896 if (abi_v4_pass_in_fpr (mode))
11898 /* _Decimal128 must use an even/odd register pair. This assumes
11899 that the register number is odd when fregno is odd. */
11900 if (mode == TDmode && (cum->fregno % 2) == 1)
11901 cum->fregno++;
11903 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11904 <= FP_ARG_V4_MAX_REG)
11905 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11906 else
11908 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11909 if (mode == DFmode || FLOAT128_IBM_P (mode)
11910 || mode == DDmode || mode == TDmode)
11911 cum->words += cum->words & 1;
11912 cum->words += rs6000_arg_size (mode, type);
11915 else
11917 int n_words = rs6000_arg_size (mode, type);
11918 int gregno = cum->sysv_gregno;
11920 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11921 As does any other 2 word item such as complex int due to a
11922 historical mistake. */
11923 if (n_words == 2)
11924 gregno += (1 - gregno) & 1;
11926 /* Multi-reg args are not split between registers and stack. */
11927 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11929 /* Long long is aligned on the stack. So are other 2 word
11930 items such as complex int due to a historical mistake. */
11931 if (n_words == 2)
11932 cum->words += cum->words & 1;
11933 cum->words += n_words;
11936 /* Note: continuing to accumulate gregno past when we've started
11937 spilling to the stack indicates the fact that we've started
11938 spilling to the stack to expand_builtin_saveregs. */
11939 cum->sysv_gregno = gregno + n_words;
11942 if (TARGET_DEBUG_ARG)
11944 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11945 cum->words, cum->fregno);
11946 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11947 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11948 fprintf (stderr, "mode = %4s, named = %d\n",
11949 GET_MODE_NAME (mode), named);
11952 else
11954 int n_words = rs6000_arg_size (mode, type);
11955 int start_words = cum->words;
11956 int align_words = rs6000_parm_start (mode, type, start_words);
11958 cum->words = align_words + n_words;
11960 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
11962 /* _Decimal128 must be passed in an even/odd float register pair.
11963 This assumes that the register number is odd when fregno is
11964 odd. */
11965 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11966 cum->fregno++;
11967 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
11970 if (TARGET_DEBUG_ARG)
11972 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11973 cum->words, cum->fregno);
11974 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
11975 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
11976 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
11977 named, align_words - start_words, depth);
11982 static void
11983 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
11984 const_tree type, bool named)
11986 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
11990 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11991 structure between cum->intoffset and bitpos to integer registers. */
11993 static void
11994 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
11995 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
11997 machine_mode mode;
11998 unsigned int regno;
11999 unsigned int startbit, endbit;
12000 int this_regno, intregs, intoffset;
12001 rtx reg;
12003 if (cum->intoffset == -1)
12004 return;
12006 intoffset = cum->intoffset;
12007 cum->intoffset = -1;
12009 /* If this is the trailing part of a word, try to only load that
12010 much into the register. Otherwise load the whole register. Note
12011 that in the latter case we may pick up unwanted bits. It's not a
12012 problem at the moment but may wish to revisit. */
12014 if (intoffset % BITS_PER_WORD != 0)
12016 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
12017 if (!int_mode_for_size (bits, 0).exists (&mode))
12019 /* We couldn't find an appropriate mode, which happens,
12020 e.g., in packed structs when there are 3 bytes to load.
12021 Back intoffset back to the beginning of the word in this
12022 case. */
12023 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
12024 mode = word_mode;
12027 else
12028 mode = word_mode;
12030 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
12031 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
12032 intregs = (endbit - startbit) / BITS_PER_WORD;
12033 this_regno = cum->words + intoffset / BITS_PER_WORD;
12035 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
12036 cum->use_stack = 1;
12038 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
12039 if (intregs <= 0)
12040 return;
12042 intoffset /= BITS_PER_UNIT;
12045 regno = GP_ARG_MIN_REG + this_regno;
12046 reg = gen_rtx_REG (mode, regno);
12047 rvec[(*k)++] =
12048 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
12050 this_regno += 1;
12051 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
12052 mode = word_mode;
12053 intregs -= 1;
12055 while (intregs > 0);
12058 /* Recursive workhorse for the following. */
12060 static void
12061 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
12062 HOST_WIDE_INT startbitpos, rtx rvec[],
12063 int *k)
12065 tree f;
12067 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
12068 if (TREE_CODE (f) == FIELD_DECL)
12070 HOST_WIDE_INT bitpos = startbitpos;
12071 tree ftype = TREE_TYPE (f);
12072 machine_mode mode;
12073 if (ftype == error_mark_node)
12074 continue;
12075 mode = TYPE_MODE (ftype);
12077 if (DECL_SIZE (f) != 0
12078 && tree_fits_uhwi_p (bit_position (f)))
12079 bitpos += int_bit_position (f);
12081 /* ??? FIXME: else assume zero offset. */
12083 if (TREE_CODE (ftype) == RECORD_TYPE)
12084 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
12085 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
12087 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
12088 #if 0
12089 switch (mode)
12091 case E_SCmode: mode = SFmode; break;
12092 case E_DCmode: mode = DFmode; break;
12093 case E_TCmode: mode = TFmode; break;
12094 default: break;
12096 #endif
12097 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
12098 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
12100 gcc_assert (cum->fregno == FP_ARG_MAX_REG
12101 && (mode == TFmode || mode == TDmode));
12102 /* Long double or _Decimal128 split over regs and memory. */
12103 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
12104 cum->use_stack=1;
12106 rvec[(*k)++]
12107 = gen_rtx_EXPR_LIST (VOIDmode,
12108 gen_rtx_REG (mode, cum->fregno++),
12109 GEN_INT (bitpos / BITS_PER_UNIT));
12110 if (FLOAT128_2REG_P (mode))
12111 cum->fregno++;
12113 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
12115 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
12116 rvec[(*k)++]
12117 = gen_rtx_EXPR_LIST (VOIDmode,
12118 gen_rtx_REG (mode, cum->vregno++),
12119 GEN_INT (bitpos / BITS_PER_UNIT));
12121 else if (cum->intoffset == -1)
12122 cum->intoffset = bitpos;
12126 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
12127 the register(s) to be used for each field and subfield of a struct
12128 being passed by value, along with the offset of where the
12129 register's value may be found in the block. FP fields go in FP
12130 register, vector fields go in vector registers, and everything
12131 else goes in int registers, packed as in memory.
12133 This code is also used for function return values. RETVAL indicates
12134 whether this is the case.
12136 Much of this is taken from the SPARC V9 port, which has a similar
12137 calling convention. */
12139 static rtx
12140 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
12141 bool named, bool retval)
12143 rtx rvec[FIRST_PSEUDO_REGISTER];
12144 int k = 1, kbase = 1;
12145 HOST_WIDE_INT typesize = int_size_in_bytes (type);
12146 /* This is a copy; modifications are not visible to our caller. */
12147 CUMULATIVE_ARGS copy_cum = *orig_cum;
12148 CUMULATIVE_ARGS *cum = &copy_cum;
12150 /* Pad to 16 byte boundary if needed. */
12151 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
12152 && (cum->words % 2) != 0)
12153 cum->words++;
12155 cum->intoffset = 0;
12156 cum->use_stack = 0;
12157 cum->named = named;
12159 /* Put entries into rvec[] for individual FP and vector fields, and
12160 for the chunks of memory that go in int regs. Note we start at
12161 element 1; 0 is reserved for an indication of using memory, and
12162 may or may not be filled in below. */
12163 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
12164 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
12166 /* If any part of the struct went on the stack put all of it there.
12167 This hack is because the generic code for
12168 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
12169 parts of the struct are not at the beginning. */
12170 if (cum->use_stack)
12172 if (retval)
12173 return NULL_RTX; /* doesn't go in registers at all */
12174 kbase = 0;
12175 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12177 if (k > 1 || cum->use_stack)
12178 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
12179 else
12180 return NULL_RTX;
12183 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
12185 static rtx
12186 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
12187 int align_words)
12189 int n_units;
12190 int i, k;
12191 rtx rvec[GP_ARG_NUM_REG + 1];
12193 if (align_words >= GP_ARG_NUM_REG)
12194 return NULL_RTX;
12196 n_units = rs6000_arg_size (mode, type);
12198 /* Optimize the simple case where the arg fits in one gpr, except in
12199 the case of BLKmode due to assign_parms assuming that registers are
12200 BITS_PER_WORD wide. */
12201 if (n_units == 0
12202 || (n_units == 1 && mode != BLKmode))
12203 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12205 k = 0;
12206 if (align_words + n_units > GP_ARG_NUM_REG)
12207 /* Not all of the arg fits in gprs. Say that it goes in memory too,
12208 using a magic NULL_RTX component.
12209 This is not strictly correct. Only some of the arg belongs in
12210 memory, not all of it. However, the normal scheme using
12211 function_arg_partial_nregs can result in unusual subregs, eg.
12212 (subreg:SI (reg:DF) 4), which are not handled well. The code to
12213 store the whole arg to memory is often more efficient than code
12214 to store pieces, and we know that space is available in the right
12215 place for the whole arg. */
12216 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12218 i = 0;
12221 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
12222 rtx off = GEN_INT (i++ * 4);
12223 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12225 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
12227 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
12230 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
12231 but must also be copied into the parameter save area starting at
12232 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
12233 to the GPRs and/or memory. Return the number of elements used. */
12235 static int
12236 rs6000_psave_function_arg (machine_mode mode, const_tree type,
12237 int align_words, rtx *rvec)
12239 int k = 0;
12241 if (align_words < GP_ARG_NUM_REG)
12243 int n_words = rs6000_arg_size (mode, type);
12245 if (align_words + n_words > GP_ARG_NUM_REG
12246 || mode == BLKmode
12247 || (TARGET_32BIT && TARGET_POWERPC64))
12249 /* If this is partially on the stack, then we only
12250 include the portion actually in registers here. */
12251 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12252 int i = 0;
12254 if (align_words + n_words > GP_ARG_NUM_REG)
12256 /* Not all of the arg fits in gprs. Say that it goes in memory
12257 too, using a magic NULL_RTX component. Also see comment in
12258 rs6000_mixed_function_arg for why the normal
12259 function_arg_partial_nregs scheme doesn't work in this case. */
12260 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12265 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12266 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
12267 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12269 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12271 else
12273 /* The whole arg fits in gprs. */
12274 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12275 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
12278 else
12280 /* It's entirely in memory. */
12281 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12284 return k;
12287 /* RVEC is a vector of K components of an argument of mode MODE.
12288 Construct the final function_arg return value from it. */
12290 static rtx
12291 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
12293 gcc_assert (k >= 1);
12295 /* Avoid returning a PARALLEL in the trivial cases. */
12296 if (k == 1)
12298 if (XEXP (rvec[0], 0) == NULL_RTX)
12299 return NULL_RTX;
12301 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
12302 return XEXP (rvec[0], 0);
12305 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
12308 /* Determine where to put an argument to a function.
12309 Value is zero to push the argument on the stack,
12310 or a hard register in which to store the argument.
12312 MODE is the argument's machine mode.
12313 TYPE is the data type of the argument (as a tree).
12314 This is null for libcalls where that information may
12315 not be available.
12316 CUM is a variable of type CUMULATIVE_ARGS which gives info about
12317 the preceding args and about the function being called. It is
12318 not modified in this routine.
12319 NAMED is nonzero if this argument is a named parameter
12320 (otherwise it is an extra parameter matching an ellipsis).
12322 On RS/6000 the first eight words of non-FP are normally in registers
12323 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
12324 Under V.4, the first 8 FP args are in registers.
12326 If this is floating-point and no prototype is specified, we use
12327 both an FP and integer register (or possibly FP reg and stack). Library
12328 functions (when CALL_LIBCALL is set) always have the proper types for args,
12329 so we can pass the FP value just in one register. emit_library_function
12330 doesn't support PARALLEL anyway.
12332 Note that for args passed by reference, function_arg will be called
12333 with MODE and TYPE set to that of the pointer to the arg, not the arg
12334 itself. */
12336 static rtx
12337 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
12338 const_tree type, bool named)
12340 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12341 enum rs6000_abi abi = DEFAULT_ABI;
12342 machine_mode elt_mode;
12343 int n_elts;
12345 /* Return a marker to indicate whether CR1 needs to set or clear the
12346 bit that V.4 uses to say fp args were passed in registers.
12347 Assume that we don't need the marker for software floating point,
12348 or compiler generated library calls. */
12349 if (mode == VOIDmode)
12351 if (abi == ABI_V4
12352 && (cum->call_cookie & CALL_LIBCALL) == 0
12353 && (cum->stdarg
12354 || (cum->nargs_prototype < 0
12355 && (cum->prototype || TARGET_NO_PROTOTYPE)))
12356 && TARGET_HARD_FLOAT)
12357 return GEN_INT (cum->call_cookie
12358 | ((cum->fregno == FP_ARG_MIN_REG)
12359 ? CALL_V4_SET_FP_ARGS
12360 : CALL_V4_CLEAR_FP_ARGS));
12362 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
12365 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12367 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12369 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
12370 if (rslt != NULL_RTX)
12371 return rslt;
12372 /* Else fall through to usual handling. */
12375 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12377 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12378 rtx r, off;
12379 int i, k = 0;
12381 /* Do we also need to pass this argument in the parameter save area?
12382 Library support functions for IEEE 128-bit are assumed to not need the
12383 value passed both in GPRs and in vector registers. */
12384 if (TARGET_64BIT && !cum->prototype
12385 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12387 int align_words = ROUND_UP (cum->words, 2);
12388 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12391 /* Describe where this argument goes in the vector registers. */
12392 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
12394 r = gen_rtx_REG (elt_mode, cum->vregno + i);
12395 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12396 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12399 return rs6000_finish_function_arg (mode, rvec, k);
12401 else if (TARGET_ALTIVEC_ABI
12402 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
12403 || (type && TREE_CODE (type) == VECTOR_TYPE
12404 && int_size_in_bytes (type) == 16)))
12406 if (named || abi == ABI_V4)
12407 return NULL_RTX;
12408 else
12410 /* Vector parameters to varargs functions under AIX or Darwin
12411 get passed in memory and possibly also in GPRs. */
12412 int align, align_words, n_words;
12413 machine_mode part_mode;
12415 /* Vector parameters must be 16-byte aligned. In 32-bit
12416 mode this means we need to take into account the offset
12417 to the parameter save area. In 64-bit mode, they just
12418 have to start on an even word, since the parameter save
12419 area is 16-byte aligned. */
12420 if (TARGET_32BIT)
12421 align = -(rs6000_parm_offset () + cum->words) & 3;
12422 else
12423 align = cum->words & 1;
12424 align_words = cum->words + align;
12426 /* Out of registers? Memory, then. */
12427 if (align_words >= GP_ARG_NUM_REG)
12428 return NULL_RTX;
12430 if (TARGET_32BIT && TARGET_POWERPC64)
12431 return rs6000_mixed_function_arg (mode, type, align_words);
12433 /* The vector value goes in GPRs. Only the part of the
12434 value in GPRs is reported here. */
12435 part_mode = mode;
12436 n_words = rs6000_arg_size (mode, type);
12437 if (align_words + n_words > GP_ARG_NUM_REG)
12438 /* Fortunately, there are only two possibilities, the value
12439 is either wholly in GPRs or half in GPRs and half not. */
12440 part_mode = DImode;
12442 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
12446 else if (abi == ABI_V4)
12448 if (abi_v4_pass_in_fpr (mode))
12450 /* _Decimal128 must use an even/odd register pair. This assumes
12451 that the register number is odd when fregno is odd. */
12452 if (mode == TDmode && (cum->fregno % 2) == 1)
12453 cum->fregno++;
12455 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
12456 <= FP_ARG_V4_MAX_REG)
12457 return gen_rtx_REG (mode, cum->fregno);
12458 else
12459 return NULL_RTX;
12461 else
12463 int n_words = rs6000_arg_size (mode, type);
12464 int gregno = cum->sysv_gregno;
12466 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
12467 As does any other 2 word item such as complex int due to a
12468 historical mistake. */
12469 if (n_words == 2)
12470 gregno += (1 - gregno) & 1;
12472 /* Multi-reg args are not split between registers and stack. */
12473 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
12474 return NULL_RTX;
12476 if (TARGET_32BIT && TARGET_POWERPC64)
12477 return rs6000_mixed_function_arg (mode, type,
12478 gregno - GP_ARG_MIN_REG);
12479 return gen_rtx_REG (mode, gregno);
12482 else
12484 int align_words = rs6000_parm_start (mode, type, cum->words);
12486 /* _Decimal128 must be passed in an even/odd float register pair.
12487 This assumes that the register number is odd when fregno is odd. */
12488 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
12489 cum->fregno++;
12491 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12493 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12494 rtx r, off;
12495 int i, k = 0;
12496 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12497 int fpr_words;
12499 /* Do we also need to pass this argument in the parameter
12500 save area? */
12501 if (type && (cum->nargs_prototype <= 0
12502 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12503 && TARGET_XL_COMPAT
12504 && align_words >= GP_ARG_NUM_REG)))
12505 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12507 /* Describe where this argument goes in the fprs. */
12508 for (i = 0; i < n_elts
12509 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
12511 /* Check if the argument is split over registers and memory.
12512 This can only ever happen for long double or _Decimal128;
12513 complex types are handled via split_complex_arg. */
12514 machine_mode fmode = elt_mode;
12515 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
12517 gcc_assert (FLOAT128_2REG_P (fmode));
12518 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
12521 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
12522 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12523 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12526 /* If there were not enough FPRs to hold the argument, the rest
12527 usually goes into memory. However, if the current position
12528 is still within the register parameter area, a portion may
12529 actually have to go into GPRs.
12531 Note that it may happen that the portion of the argument
12532 passed in the first "half" of the first GPR was already
12533 passed in the last FPR as well.
12535 For unnamed arguments, we already set up GPRs to cover the
12536 whole argument in rs6000_psave_function_arg, so there is
12537 nothing further to do at this point. */
12538 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
12539 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
12540 && cum->nargs_prototype > 0)
12542 static bool warned;
12544 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12545 int n_words = rs6000_arg_size (mode, type);
12547 align_words += fpr_words;
12548 n_words -= fpr_words;
12552 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12553 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
12554 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12556 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12558 if (!warned && warn_psabi)
12560 warned = true;
12561 inform (input_location,
12562 "the ABI of passing homogeneous float aggregates"
12563 " has changed in GCC 5");
12567 return rs6000_finish_function_arg (mode, rvec, k);
12569 else if (align_words < GP_ARG_NUM_REG)
12571 if (TARGET_32BIT && TARGET_POWERPC64)
12572 return rs6000_mixed_function_arg (mode, type, align_words);
12574 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12576 else
12577 return NULL_RTX;
12581 /* For an arg passed partly in registers and partly in memory, this is
12582 the number of bytes passed in registers. For args passed entirely in
12583 registers or entirely in memory, zero. When an arg is described by a
12584 PARALLEL, perhaps using more than one register type, this function
12585 returns the number of bytes used by the first element of the PARALLEL. */
12587 static int
12588 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
12589 tree type, bool named)
12591 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12592 bool passed_in_gprs = true;
12593 int ret = 0;
12594 int align_words;
12595 machine_mode elt_mode;
12596 int n_elts;
12598 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12600 if (DEFAULT_ABI == ABI_V4)
12601 return 0;
12603 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12605 /* If we are passing this arg in the fixed parameter save area (gprs or
12606 memory) as well as VRs, we do not use the partial bytes mechanism;
12607 instead, rs6000_function_arg will return a PARALLEL including a memory
12608 element as necessary. Library support functions for IEEE 128-bit are
12609 assumed to not need the value passed both in GPRs and in vector
12610 registers. */
12611 if (TARGET_64BIT && !cum->prototype
12612 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12613 return 0;
12615 /* Otherwise, we pass in VRs only. Check for partial copies. */
12616 passed_in_gprs = false;
12617 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
12618 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
12621 /* In this complicated case we just disable the partial_nregs code. */
12622 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12623 return 0;
12625 align_words = rs6000_parm_start (mode, type, cum->words);
12627 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12629 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12631 /* If we are passing this arg in the fixed parameter save area
12632 (gprs or memory) as well as FPRs, we do not use the partial
12633 bytes mechanism; instead, rs6000_function_arg will return a
12634 PARALLEL including a memory element as necessary. */
12635 if (type
12636 && (cum->nargs_prototype <= 0
12637 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12638 && TARGET_XL_COMPAT
12639 && align_words >= GP_ARG_NUM_REG)))
12640 return 0;
12642 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12643 passed_in_gprs = false;
12644 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
12646 /* Compute number of bytes / words passed in FPRs. If there
12647 is still space available in the register parameter area
12648 *after* that amount, a part of the argument will be passed
12649 in GPRs. In that case, the total amount passed in any
12650 registers is equal to the amount that would have been passed
12651 in GPRs if everything were passed there, so we fall back to
12652 the GPR code below to compute the appropriate value. */
12653 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
12654 * MIN (8, GET_MODE_SIZE (elt_mode)));
12655 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
12657 if (align_words + fpr_words < GP_ARG_NUM_REG)
12658 passed_in_gprs = true;
12659 else
12660 ret = fpr;
12664 if (passed_in_gprs
12665 && align_words < GP_ARG_NUM_REG
12666 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
12667 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
12669 if (ret != 0 && TARGET_DEBUG_ARG)
12670 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
12672 return ret;
12675 /* A C expression that indicates when an argument must be passed by
12676 reference. If nonzero for an argument, a copy of that argument is
12677 made in memory and a pointer to the argument is passed instead of
12678 the argument itself. The pointer is passed in whatever way is
12679 appropriate for passing a pointer to that type.
12681 Under V.4, aggregates and long double are passed by reference.
12683 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12684 reference unless the AltiVec vector extension ABI is in force.
12686 As an extension to all ABIs, variable sized types are passed by
12687 reference. */
12689 static bool
12690 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
12691 machine_mode mode, const_tree type,
12692 bool named ATTRIBUTE_UNUSED)
12694 if (!type)
12695 return 0;
12697 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
12698 && FLOAT128_IEEE_P (TYPE_MODE (type)))
12700 if (TARGET_DEBUG_ARG)
12701 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12702 return 1;
12705 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
12707 if (TARGET_DEBUG_ARG)
12708 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
12709 return 1;
12712 if (int_size_in_bytes (type) < 0)
12714 if (TARGET_DEBUG_ARG)
12715 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
12716 return 1;
12719 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12720 modes only exist for GCC vector types if -maltivec. */
12721 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12723 if (TARGET_DEBUG_ARG)
12724 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
12725 return 1;
12728 /* Pass synthetic vectors in memory. */
12729 if (TREE_CODE (type) == VECTOR_TYPE
12730 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
12732 static bool warned_for_pass_big_vectors = false;
12733 if (TARGET_DEBUG_ARG)
12734 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
12735 if (!warned_for_pass_big_vectors)
12737 warning (OPT_Wpsabi, "GCC vector passed by reference: "
12738 "non-standard ABI extension with no compatibility "
12739 "guarantee");
12740 warned_for_pass_big_vectors = true;
12742 return 1;
12745 return 0;
12748 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12749 already processes. Return true if the parameter must be passed
12750 (fully or partially) on the stack. */
12752 static bool
12753 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
12755 machine_mode mode;
12756 int unsignedp;
12757 rtx entry_parm;
12759 /* Catch errors. */
12760 if (type == NULL || type == error_mark_node)
12761 return true;
12763 /* Handle types with no storage requirement. */
12764 if (TYPE_MODE (type) == VOIDmode)
12765 return false;
12767 /* Handle complex types. */
12768 if (TREE_CODE (type) == COMPLEX_TYPE)
12769 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
12770 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
12772 /* Handle transparent aggregates. */
12773 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
12774 && TYPE_TRANSPARENT_AGGR (type))
12775 type = TREE_TYPE (first_field (type));
12777 /* See if this arg was passed by invisible reference. */
12778 if (pass_by_reference (get_cumulative_args (args_so_far),
12779 TYPE_MODE (type), type, true))
12780 type = build_pointer_type (type);
12782 /* Find mode as it is passed by the ABI. */
12783 unsignedp = TYPE_UNSIGNED (type);
12784 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
12786 /* If we must pass in stack, we need a stack. */
12787 if (rs6000_must_pass_in_stack (mode, type))
12788 return true;
12790 /* If there is no incoming register, we need a stack. */
12791 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
12792 if (entry_parm == NULL)
12793 return true;
12795 /* Likewise if we need to pass both in registers and on the stack. */
12796 if (GET_CODE (entry_parm) == PARALLEL
12797 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
12798 return true;
12800 /* Also true if we're partially in registers and partially not. */
12801 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
12802 return true;
12804 /* Update info on where next arg arrives in registers. */
12805 rs6000_function_arg_advance (args_so_far, mode, type, true);
12806 return false;
12809 /* Return true if FUN has no prototype, has a variable argument
12810 list, or passes any parameter in memory. */
12812 static bool
12813 rs6000_function_parms_need_stack (tree fun, bool incoming)
12815 tree fntype, result;
12816 CUMULATIVE_ARGS args_so_far_v;
12817 cumulative_args_t args_so_far;
12819 if (!fun)
12820 /* Must be a libcall, all of which only use reg parms. */
12821 return false;
12823 fntype = fun;
12824 if (!TYPE_P (fun))
12825 fntype = TREE_TYPE (fun);
12827 /* Varargs functions need the parameter save area. */
12828 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12829 return true;
12831 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12832 args_so_far = pack_cumulative_args (&args_so_far_v);
12834 /* When incoming, we will have been passed the function decl.
12835 It is necessary to use the decl to handle K&R style functions,
12836 where TYPE_ARG_TYPES may not be available. */
12837 if (incoming)
12839 gcc_assert (DECL_P (fun));
12840 result = DECL_RESULT (fun);
12842 else
12843 result = TREE_TYPE (fntype);
12845 if (result && aggregate_value_p (result, fntype))
12847 if (!TYPE_P (result))
12848 result = TREE_TYPE (result);
12849 result = build_pointer_type (result);
12850 rs6000_parm_needs_stack (args_so_far, result);
12853 if (incoming)
12855 tree parm;
12857 for (parm = DECL_ARGUMENTS (fun);
12858 parm && parm != void_list_node;
12859 parm = TREE_CHAIN (parm))
12860 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12861 return true;
12863 else
12865 function_args_iterator args_iter;
12866 tree arg_type;
12868 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12869 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12870 return true;
12873 return false;
12876 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12877 usually a constant depending on the ABI. However, in the ELFv2 ABI
12878 the register parameter area is optional when calling a function that
12879 has a prototype is scope, has no variable argument list, and passes
12880 all parameters in registers. */
12883 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12885 int reg_parm_stack_space;
12887 switch (DEFAULT_ABI)
12889 default:
12890 reg_parm_stack_space = 0;
12891 break;
12893 case ABI_AIX:
12894 case ABI_DARWIN:
12895 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12896 break;
12898 case ABI_ELFv2:
12899 /* ??? Recomputing this every time is a bit expensive. Is there
12900 a place to cache this information? */
12901 if (rs6000_function_parms_need_stack (fun, incoming))
12902 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12903 else
12904 reg_parm_stack_space = 0;
12905 break;
12908 return reg_parm_stack_space;
12911 static void
12912 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12914 int i;
12915 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12917 if (nregs == 0)
12918 return;
12920 for (i = 0; i < nregs; i++)
12922 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12923 if (reload_completed)
12925 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12926 tem = NULL_RTX;
12927 else
12928 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12929 i * GET_MODE_SIZE (reg_mode));
12931 else
12932 tem = replace_equiv_address (tem, XEXP (tem, 0));
12934 gcc_assert (tem);
12936 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12940 /* Perform any needed actions needed for a function that is receiving a
12941 variable number of arguments.
12943 CUM is as above.
12945 MODE and TYPE are the mode and type of the current parameter.
12947 PRETEND_SIZE is a variable that should be set to the amount of stack
12948 that must be pushed by the prolog to pretend that our caller pushed
12951 Normally, this macro will push all remaining incoming registers on the
12952 stack and set PRETEND_SIZE to the length of the registers pushed. */
12954 static void
12955 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12956 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12957 int no_rtl)
12959 CUMULATIVE_ARGS next_cum;
12960 int reg_size = TARGET_32BIT ? 4 : 8;
12961 rtx save_area = NULL_RTX, mem;
12962 int first_reg_offset;
12963 alias_set_type set;
12965 /* Skip the last named argument. */
12966 next_cum = *get_cumulative_args (cum);
12967 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12969 if (DEFAULT_ABI == ABI_V4)
12971 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
12973 if (! no_rtl)
12975 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
12976 HOST_WIDE_INT offset = 0;
12978 /* Try to optimize the size of the varargs save area.
12979 The ABI requires that ap.reg_save_area is doubleword
12980 aligned, but we don't need to allocate space for all
12981 the bytes, only those to which we actually will save
12982 anything. */
12983 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
12984 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
12985 if (TARGET_HARD_FLOAT
12986 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12987 && cfun->va_list_fpr_size)
12989 if (gpr_reg_num)
12990 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
12991 * UNITS_PER_FP_WORD;
12992 if (cfun->va_list_fpr_size
12993 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12994 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
12995 else
12996 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12997 * UNITS_PER_FP_WORD;
12999 if (gpr_reg_num)
13001 offset = -((first_reg_offset * reg_size) & ~7);
13002 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
13004 gpr_reg_num = cfun->va_list_gpr_size;
13005 if (reg_size == 4 && (first_reg_offset & 1))
13006 gpr_reg_num++;
13008 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
13010 else if (fpr_size)
13011 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
13012 * UNITS_PER_FP_WORD
13013 - (int) (GP_ARG_NUM_REG * reg_size);
13015 if (gpr_size + fpr_size)
13017 rtx reg_save_area
13018 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
13019 gcc_assert (GET_CODE (reg_save_area) == MEM);
13020 reg_save_area = XEXP (reg_save_area, 0);
13021 if (GET_CODE (reg_save_area) == PLUS)
13023 gcc_assert (XEXP (reg_save_area, 0)
13024 == virtual_stack_vars_rtx);
13025 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
13026 offset += INTVAL (XEXP (reg_save_area, 1));
13028 else
13029 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
13032 cfun->machine->varargs_save_offset = offset;
13033 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
13036 else
13038 first_reg_offset = next_cum.words;
13039 save_area = crtl->args.internal_arg_pointer;
13041 if (targetm.calls.must_pass_in_stack (mode, type))
13042 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
13045 set = get_varargs_alias_set ();
13046 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
13047 && cfun->va_list_gpr_size)
13049 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
13051 if (va_list_gpr_counter_field)
13052 /* V4 va_list_gpr_size counts number of registers needed. */
13053 n_gpr = cfun->va_list_gpr_size;
13054 else
13055 /* char * va_list instead counts number of bytes needed. */
13056 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
13058 if (nregs > n_gpr)
13059 nregs = n_gpr;
13061 mem = gen_rtx_MEM (BLKmode,
13062 plus_constant (Pmode, save_area,
13063 first_reg_offset * reg_size));
13064 MEM_NOTRAP_P (mem) = 1;
13065 set_mem_alias_set (mem, set);
13066 set_mem_align (mem, BITS_PER_WORD);
13068 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
13069 nregs);
13072 /* Save FP registers if needed. */
13073 if (DEFAULT_ABI == ABI_V4
13074 && TARGET_HARD_FLOAT
13075 && ! no_rtl
13076 && next_cum.fregno <= FP_ARG_V4_MAX_REG
13077 && cfun->va_list_fpr_size)
13079 int fregno = next_cum.fregno, nregs;
13080 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
13081 rtx lab = gen_label_rtx ();
13082 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
13083 * UNITS_PER_FP_WORD);
13085 emit_jump_insn
13086 (gen_rtx_SET (pc_rtx,
13087 gen_rtx_IF_THEN_ELSE (VOIDmode,
13088 gen_rtx_NE (VOIDmode, cr1,
13089 const0_rtx),
13090 gen_rtx_LABEL_REF (VOIDmode, lab),
13091 pc_rtx)));
13093 for (nregs = 0;
13094 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
13095 fregno++, off += UNITS_PER_FP_WORD, nregs++)
13097 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13098 ? DFmode : SFmode,
13099 plus_constant (Pmode, save_area, off));
13100 MEM_NOTRAP_P (mem) = 1;
13101 set_mem_alias_set (mem, set);
13102 set_mem_align (mem, GET_MODE_ALIGNMENT (
13103 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13104 ? DFmode : SFmode));
13105 emit_move_insn (mem, gen_rtx_REG (
13106 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13107 ? DFmode : SFmode, fregno));
13110 emit_label (lab);
13114 /* Create the va_list data type. */
13116 static tree
13117 rs6000_build_builtin_va_list (void)
13119 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
13121 /* For AIX, prefer 'char *' because that's what the system
13122 header files like. */
13123 if (DEFAULT_ABI != ABI_V4)
13124 return build_pointer_type (char_type_node);
13126 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
13127 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
13128 get_identifier ("__va_list_tag"), record);
13130 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
13131 unsigned_char_type_node);
13132 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
13133 unsigned_char_type_node);
13134 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
13135 every user file. */
13136 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13137 get_identifier ("reserved"), short_unsigned_type_node);
13138 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13139 get_identifier ("overflow_arg_area"),
13140 ptr_type_node);
13141 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13142 get_identifier ("reg_save_area"),
13143 ptr_type_node);
13145 va_list_gpr_counter_field = f_gpr;
13146 va_list_fpr_counter_field = f_fpr;
13148 DECL_FIELD_CONTEXT (f_gpr) = record;
13149 DECL_FIELD_CONTEXT (f_fpr) = record;
13150 DECL_FIELD_CONTEXT (f_res) = record;
13151 DECL_FIELD_CONTEXT (f_ovf) = record;
13152 DECL_FIELD_CONTEXT (f_sav) = record;
13154 TYPE_STUB_DECL (record) = type_decl;
13155 TYPE_NAME (record) = type_decl;
13156 TYPE_FIELDS (record) = f_gpr;
13157 DECL_CHAIN (f_gpr) = f_fpr;
13158 DECL_CHAIN (f_fpr) = f_res;
13159 DECL_CHAIN (f_res) = f_ovf;
13160 DECL_CHAIN (f_ovf) = f_sav;
13162 layout_type (record);
13164 /* The correct type is an array type of one element. */
13165 return build_array_type (record, build_index_type (size_zero_node));
13168 /* Implement va_start. */
13170 static void
13171 rs6000_va_start (tree valist, rtx nextarg)
13173 HOST_WIDE_INT words, n_gpr, n_fpr;
13174 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
13175 tree gpr, fpr, ovf, sav, t;
13177 /* Only SVR4 needs something special. */
13178 if (DEFAULT_ABI != ABI_V4)
13180 std_expand_builtin_va_start (valist, nextarg);
13181 return;
13184 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
13185 f_fpr = DECL_CHAIN (f_gpr);
13186 f_res = DECL_CHAIN (f_fpr);
13187 f_ovf = DECL_CHAIN (f_res);
13188 f_sav = DECL_CHAIN (f_ovf);
13190 valist = build_simple_mem_ref (valist);
13191 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
13192 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
13193 f_fpr, NULL_TREE);
13194 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
13195 f_ovf, NULL_TREE);
13196 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
13197 f_sav, NULL_TREE);
13199 /* Count number of gp and fp argument registers used. */
13200 words = crtl->args.info.words;
13201 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
13202 GP_ARG_NUM_REG);
13203 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
13204 FP_ARG_NUM_REG);
13206 if (TARGET_DEBUG_ARG)
13207 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
13208 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
13209 words, n_gpr, n_fpr);
13211 if (cfun->va_list_gpr_size)
13213 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
13214 build_int_cst (NULL_TREE, n_gpr));
13215 TREE_SIDE_EFFECTS (t) = 1;
13216 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13219 if (cfun->va_list_fpr_size)
13221 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
13222 build_int_cst (NULL_TREE, n_fpr));
13223 TREE_SIDE_EFFECTS (t) = 1;
13224 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13226 #ifdef HAVE_AS_GNU_ATTRIBUTE
13227 if (call_ABI_of_interest (cfun->decl))
13228 rs6000_passes_float = true;
13229 #endif
13232 /* Find the overflow area. */
13233 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
13234 if (words != 0)
13235 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
13236 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
13237 TREE_SIDE_EFFECTS (t) = 1;
13238 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13240 /* If there were no va_arg invocations, don't set up the register
13241 save area. */
13242 if (!cfun->va_list_gpr_size
13243 && !cfun->va_list_fpr_size
13244 && n_gpr < GP_ARG_NUM_REG
13245 && n_fpr < FP_ARG_V4_MAX_REG)
13246 return;
13248 /* Find the register save area. */
13249 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
13250 if (cfun->machine->varargs_save_offset)
13251 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
13252 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
13253 TREE_SIDE_EFFECTS (t) = 1;
13254 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13257 /* Implement va_arg. */
13259 static tree
13260 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
13261 gimple_seq *post_p)
13263 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
13264 tree gpr, fpr, ovf, sav, reg, t, u;
13265 int size, rsize, n_reg, sav_ofs, sav_scale;
13266 tree lab_false, lab_over, addr;
13267 int align;
13268 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
13269 int regalign = 0;
13270 gimple *stmt;
13272 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
13274 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
13275 return build_va_arg_indirect_ref (t);
13278 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
13279 earlier version of gcc, with the property that it always applied alignment
13280 adjustments to the va-args (even for zero-sized types). The cheapest way
13281 to deal with this is to replicate the effect of the part of
13282 std_gimplify_va_arg_expr that carries out the align adjust, for the case
13283 of relevance.
13284 We don't need to check for pass-by-reference because of the test above.
13285 We can return a simplifed answer, since we know there's no offset to add. */
13287 if (((TARGET_MACHO
13288 && rs6000_darwin64_abi)
13289 || DEFAULT_ABI == ABI_ELFv2
13290 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
13291 && integer_zerop (TYPE_SIZE (type)))
13293 unsigned HOST_WIDE_INT align, boundary;
13294 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
13295 align = PARM_BOUNDARY / BITS_PER_UNIT;
13296 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
13297 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
13298 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
13299 boundary /= BITS_PER_UNIT;
13300 if (boundary > align)
13302 tree t ;
13303 /* This updates arg ptr by the amount that would be necessary
13304 to align the zero-sized (but not zero-alignment) item. */
13305 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13306 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
13307 gimplify_and_add (t, pre_p);
13309 t = fold_convert (sizetype, valist_tmp);
13310 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13311 fold_convert (TREE_TYPE (valist),
13312 fold_build2 (BIT_AND_EXPR, sizetype, t,
13313 size_int (-boundary))));
13314 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
13315 gimplify_and_add (t, pre_p);
13317 /* Since it is zero-sized there's no increment for the item itself. */
13318 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
13319 return build_va_arg_indirect_ref (valist_tmp);
13322 if (DEFAULT_ABI != ABI_V4)
13324 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
13326 tree elem_type = TREE_TYPE (type);
13327 machine_mode elem_mode = TYPE_MODE (elem_type);
13328 int elem_size = GET_MODE_SIZE (elem_mode);
13330 if (elem_size < UNITS_PER_WORD)
13332 tree real_part, imag_part;
13333 gimple_seq post = NULL;
13335 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13336 &post);
13337 /* Copy the value into a temporary, lest the formal temporary
13338 be reused out from under us. */
13339 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
13340 gimple_seq_add_seq (pre_p, post);
13342 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13343 post_p);
13345 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
13349 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
13352 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
13353 f_fpr = DECL_CHAIN (f_gpr);
13354 f_res = DECL_CHAIN (f_fpr);
13355 f_ovf = DECL_CHAIN (f_res);
13356 f_sav = DECL_CHAIN (f_ovf);
13358 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
13359 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
13360 f_fpr, NULL_TREE);
13361 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
13362 f_ovf, NULL_TREE);
13363 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
13364 f_sav, NULL_TREE);
13366 size = int_size_in_bytes (type);
13367 rsize = (size + 3) / 4;
13368 int pad = 4 * rsize - size;
13369 align = 1;
13371 machine_mode mode = TYPE_MODE (type);
13372 if (abi_v4_pass_in_fpr (mode))
13374 /* FP args go in FP registers, if present. */
13375 reg = fpr;
13376 n_reg = (size + 7) / 8;
13377 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
13378 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
13379 if (mode != SFmode && mode != SDmode)
13380 align = 8;
13382 else
13384 /* Otherwise into GP registers. */
13385 reg = gpr;
13386 n_reg = rsize;
13387 sav_ofs = 0;
13388 sav_scale = 4;
13389 if (n_reg == 2)
13390 align = 8;
13393 /* Pull the value out of the saved registers.... */
13395 lab_over = NULL;
13396 addr = create_tmp_var (ptr_type_node, "addr");
13398 /* AltiVec vectors never go in registers when -mabi=altivec. */
13399 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
13400 align = 16;
13401 else
13403 lab_false = create_artificial_label (input_location);
13404 lab_over = create_artificial_label (input_location);
13406 /* Long long is aligned in the registers. As are any other 2 gpr
13407 item such as complex int due to a historical mistake. */
13408 u = reg;
13409 if (n_reg == 2 && reg == gpr)
13411 regalign = 1;
13412 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13413 build_int_cst (TREE_TYPE (reg), n_reg - 1));
13414 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
13415 unshare_expr (reg), u);
13417 /* _Decimal128 is passed in even/odd fpr pairs; the stored
13418 reg number is 0 for f1, so we want to make it odd. */
13419 else if (reg == fpr && mode == TDmode)
13421 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13422 build_int_cst (TREE_TYPE (reg), 1));
13423 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
13426 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
13427 t = build2 (GE_EXPR, boolean_type_node, u, t);
13428 u = build1 (GOTO_EXPR, void_type_node, lab_false);
13429 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
13430 gimplify_and_add (t, pre_p);
13432 t = sav;
13433 if (sav_ofs)
13434 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
13436 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13437 build_int_cst (TREE_TYPE (reg), n_reg));
13438 u = fold_convert (sizetype, u);
13439 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
13440 t = fold_build_pointer_plus (t, u);
13442 /* _Decimal32 varargs are located in the second word of the 64-bit
13443 FP register for 32-bit binaries. */
13444 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
13445 t = fold_build_pointer_plus_hwi (t, size);
13447 /* Args are passed right-aligned. */
13448 if (BYTES_BIG_ENDIAN)
13449 t = fold_build_pointer_plus_hwi (t, pad);
13451 gimplify_assign (addr, t, pre_p);
13453 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
13455 stmt = gimple_build_label (lab_false);
13456 gimple_seq_add_stmt (pre_p, stmt);
13458 if ((n_reg == 2 && !regalign) || n_reg > 2)
13460 /* Ensure that we don't find any more args in regs.
13461 Alignment has taken care of for special cases. */
13462 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
13466 /* ... otherwise out of the overflow area. */
13468 /* Care for on-stack alignment if needed. */
13469 t = ovf;
13470 if (align != 1)
13472 t = fold_build_pointer_plus_hwi (t, align - 1);
13473 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
13474 build_int_cst (TREE_TYPE (t), -align));
13477 /* Args are passed right-aligned. */
13478 if (BYTES_BIG_ENDIAN)
13479 t = fold_build_pointer_plus_hwi (t, pad);
13481 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
13483 gimplify_assign (unshare_expr (addr), t, pre_p);
13485 t = fold_build_pointer_plus_hwi (t, size);
13486 gimplify_assign (unshare_expr (ovf), t, pre_p);
13488 if (lab_over)
13490 stmt = gimple_build_label (lab_over);
13491 gimple_seq_add_stmt (pre_p, stmt);
13494 if (STRICT_ALIGNMENT
13495 && (TYPE_ALIGN (type)
13496 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
13498 /* The value (of type complex double, for example) may not be
13499 aligned in memory in the saved registers, so copy via a
13500 temporary. (This is the same code as used for SPARC.) */
13501 tree tmp = create_tmp_var (type, "va_arg_tmp");
13502 tree dest_addr = build_fold_addr_expr (tmp);
13504 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
13505 3, dest_addr, addr, size_int (rsize * 4));
13507 gimplify_and_add (copy, pre_p);
13508 addr = dest_addr;
13511 addr = fold_convert (ptrtype, addr);
13512 return build_va_arg_indirect_ref (addr);
13515 /* Builtins. */
13517 static void
13518 def_builtin (const char *name, tree type, enum rs6000_builtins code)
13520 tree t;
13521 unsigned classify = rs6000_builtin_info[(int)code].attr;
13522 const char *attr_string = "";
13524 gcc_assert (name != NULL);
13525 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
13527 if (rs6000_builtin_decls[(int)code])
13528 fatal_error (input_location,
13529 "internal error: builtin function %qs already processed",
13530 name);
13532 rs6000_builtin_decls[(int)code] = t =
13533 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
13535 /* Set any special attributes. */
13536 if ((classify & RS6000_BTC_CONST) != 0)
13538 /* const function, function only depends on the inputs. */
13539 TREE_READONLY (t) = 1;
13540 TREE_NOTHROW (t) = 1;
13541 attr_string = ", const";
13543 else if ((classify & RS6000_BTC_PURE) != 0)
13545 /* pure function, function can read global memory, but does not set any
13546 external state. */
13547 DECL_PURE_P (t) = 1;
13548 TREE_NOTHROW (t) = 1;
13549 attr_string = ", pure";
13551 else if ((classify & RS6000_BTC_FP) != 0)
13553 /* Function is a math function. If rounding mode is on, then treat the
13554 function as not reading global memory, but it can have arbitrary side
13555 effects. If it is off, then assume the function is a const function.
13556 This mimics the ATTR_MATHFN_FPROUNDING attribute in
13557 builtin-attribute.def that is used for the math functions. */
13558 TREE_NOTHROW (t) = 1;
13559 if (flag_rounding_math)
13561 DECL_PURE_P (t) = 1;
13562 DECL_IS_NOVOPS (t) = 1;
13563 attr_string = ", fp, pure";
13565 else
13567 TREE_READONLY (t) = 1;
13568 attr_string = ", fp, const";
13571 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
13572 gcc_unreachable ();
13574 if (TARGET_DEBUG_BUILTIN)
13575 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
13576 (int)code, name, attr_string);
13579 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13581 #undef RS6000_BUILTIN_0
13582 #undef RS6000_BUILTIN_1
13583 #undef RS6000_BUILTIN_2
13584 #undef RS6000_BUILTIN_3
13585 #undef RS6000_BUILTIN_A
13586 #undef RS6000_BUILTIN_D
13587 #undef RS6000_BUILTIN_H
13588 #undef RS6000_BUILTIN_P
13589 #undef RS6000_BUILTIN_Q
13590 #undef RS6000_BUILTIN_X
13592 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13593 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13594 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13595 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13596 { MASK, ICODE, NAME, ENUM },
13598 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13599 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13600 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13601 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13602 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13603 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13605 static const struct builtin_description bdesc_3arg[] =
13607 #include "rs6000-builtin.def"
13610 /* DST operations: void foo (void *, const int, const char). */
13612 #undef RS6000_BUILTIN_0
13613 #undef RS6000_BUILTIN_1
13614 #undef RS6000_BUILTIN_2
13615 #undef RS6000_BUILTIN_3
13616 #undef RS6000_BUILTIN_A
13617 #undef RS6000_BUILTIN_D
13618 #undef RS6000_BUILTIN_H
13619 #undef RS6000_BUILTIN_P
13620 #undef RS6000_BUILTIN_Q
13621 #undef RS6000_BUILTIN_X
13623 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13624 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13625 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13626 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13627 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13628 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13629 { MASK, ICODE, NAME, ENUM },
13631 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13632 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13633 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13634 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13636 static const struct builtin_description bdesc_dst[] =
13638 #include "rs6000-builtin.def"
13641 /* Simple binary operations: VECc = foo (VECa, VECb). */
13643 #undef RS6000_BUILTIN_0
13644 #undef RS6000_BUILTIN_1
13645 #undef RS6000_BUILTIN_2
13646 #undef RS6000_BUILTIN_3
13647 #undef RS6000_BUILTIN_A
13648 #undef RS6000_BUILTIN_D
13649 #undef RS6000_BUILTIN_H
13650 #undef RS6000_BUILTIN_P
13651 #undef RS6000_BUILTIN_Q
13652 #undef RS6000_BUILTIN_X
13654 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13655 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13656 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13657 { MASK, ICODE, NAME, ENUM },
13659 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13660 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13661 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13662 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13663 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13664 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13665 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13667 static const struct builtin_description bdesc_2arg[] =
13669 #include "rs6000-builtin.def"
13672 #undef RS6000_BUILTIN_0
13673 #undef RS6000_BUILTIN_1
13674 #undef RS6000_BUILTIN_2
13675 #undef RS6000_BUILTIN_3
13676 #undef RS6000_BUILTIN_A
13677 #undef RS6000_BUILTIN_D
13678 #undef RS6000_BUILTIN_H
13679 #undef RS6000_BUILTIN_P
13680 #undef RS6000_BUILTIN_Q
13681 #undef RS6000_BUILTIN_X
13683 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13684 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13685 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13686 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13687 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13688 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13689 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13690 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13691 { MASK, ICODE, NAME, ENUM },
13693 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13694 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13696 /* AltiVec predicates. */
13698 static const struct builtin_description bdesc_altivec_preds[] =
13700 #include "rs6000-builtin.def"
13703 /* PAIRED predicates. */
13704 #undef RS6000_BUILTIN_0
13705 #undef RS6000_BUILTIN_1
13706 #undef RS6000_BUILTIN_2
13707 #undef RS6000_BUILTIN_3
13708 #undef RS6000_BUILTIN_A
13709 #undef RS6000_BUILTIN_D
13710 #undef RS6000_BUILTIN_H
13711 #undef RS6000_BUILTIN_P
13712 #undef RS6000_BUILTIN_Q
13713 #undef RS6000_BUILTIN_X
13715 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13716 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13717 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13718 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13719 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13720 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13721 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13722 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13723 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
13724 { MASK, ICODE, NAME, ENUM },
13726 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13728 static const struct builtin_description bdesc_paired_preds[] =
13730 #include "rs6000-builtin.def"
13733 /* ABS* operations. */
13735 #undef RS6000_BUILTIN_0
13736 #undef RS6000_BUILTIN_1
13737 #undef RS6000_BUILTIN_2
13738 #undef RS6000_BUILTIN_3
13739 #undef RS6000_BUILTIN_A
13740 #undef RS6000_BUILTIN_D
13741 #undef RS6000_BUILTIN_H
13742 #undef RS6000_BUILTIN_P
13743 #undef RS6000_BUILTIN_Q
13744 #undef RS6000_BUILTIN_X
13746 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13747 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13748 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13749 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13750 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13751 { MASK, ICODE, NAME, ENUM },
13753 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13754 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13755 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13756 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13757 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13759 static const struct builtin_description bdesc_abs[] =
13761 #include "rs6000-builtin.def"
13764 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13765 foo (VECa). */
13767 #undef RS6000_BUILTIN_0
13768 #undef RS6000_BUILTIN_1
13769 #undef RS6000_BUILTIN_2
13770 #undef RS6000_BUILTIN_3
13771 #undef RS6000_BUILTIN_A
13772 #undef RS6000_BUILTIN_D
13773 #undef RS6000_BUILTIN_H
13774 #undef RS6000_BUILTIN_P
13775 #undef RS6000_BUILTIN_Q
13776 #undef RS6000_BUILTIN_X
13778 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13779 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13780 { MASK, ICODE, NAME, ENUM },
13782 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13783 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13784 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13785 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13786 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13787 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13788 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13789 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13791 static const struct builtin_description bdesc_1arg[] =
13793 #include "rs6000-builtin.def"
13796 /* Simple no-argument operations: result = __builtin_darn_32 () */
13798 #undef RS6000_BUILTIN_0
13799 #undef RS6000_BUILTIN_1
13800 #undef RS6000_BUILTIN_2
13801 #undef RS6000_BUILTIN_3
13802 #undef RS6000_BUILTIN_A
13803 #undef RS6000_BUILTIN_D
13804 #undef RS6000_BUILTIN_H
13805 #undef RS6000_BUILTIN_P
13806 #undef RS6000_BUILTIN_Q
13807 #undef RS6000_BUILTIN_X
13809 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13810 { MASK, ICODE, NAME, ENUM },
13812 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13813 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13814 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13815 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13816 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13817 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13818 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13819 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13820 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13822 static const struct builtin_description bdesc_0arg[] =
13824 #include "rs6000-builtin.def"
13827 /* HTM builtins. */
13828 #undef RS6000_BUILTIN_0
13829 #undef RS6000_BUILTIN_1
13830 #undef RS6000_BUILTIN_2
13831 #undef RS6000_BUILTIN_3
13832 #undef RS6000_BUILTIN_A
13833 #undef RS6000_BUILTIN_D
13834 #undef RS6000_BUILTIN_H
13835 #undef RS6000_BUILTIN_P
13836 #undef RS6000_BUILTIN_Q
13837 #undef RS6000_BUILTIN_X
13839 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13840 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13841 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13842 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13843 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13844 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13845 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13846 { MASK, ICODE, NAME, ENUM },
13848 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13849 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13850 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13852 static const struct builtin_description bdesc_htm[] =
13854 #include "rs6000-builtin.def"
13857 #undef RS6000_BUILTIN_0
13858 #undef RS6000_BUILTIN_1
13859 #undef RS6000_BUILTIN_2
13860 #undef RS6000_BUILTIN_3
13861 #undef RS6000_BUILTIN_A
13862 #undef RS6000_BUILTIN_D
13863 #undef RS6000_BUILTIN_H
13864 #undef RS6000_BUILTIN_P
13865 #undef RS6000_BUILTIN_Q
13867 /* Return true if a builtin function is overloaded. */
13868 bool
13869 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13871 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13874 const char *
13875 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
13877 return rs6000_builtin_info[(int)fncode].name;
13880 /* Expand an expression EXP that calls a builtin without arguments. */
13881 static rtx
13882 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13884 rtx pat;
13885 machine_mode tmode = insn_data[icode].operand[0].mode;
13887 if (icode == CODE_FOR_nothing)
13888 /* Builtin not supported on this processor. */
13889 return 0;
13891 if (target == 0
13892 || GET_MODE (target) != tmode
13893 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13894 target = gen_reg_rtx (tmode);
13896 pat = GEN_FCN (icode) (target);
13897 if (! pat)
13898 return 0;
13899 emit_insn (pat);
13901 return target;
13905 static rtx
13906 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13908 rtx pat;
13909 tree arg0 = CALL_EXPR_ARG (exp, 0);
13910 tree arg1 = CALL_EXPR_ARG (exp, 1);
13911 rtx op0 = expand_normal (arg0);
13912 rtx op1 = expand_normal (arg1);
13913 machine_mode mode0 = insn_data[icode].operand[0].mode;
13914 machine_mode mode1 = insn_data[icode].operand[1].mode;
13916 if (icode == CODE_FOR_nothing)
13917 /* Builtin not supported on this processor. */
13918 return 0;
13920 /* If we got invalid arguments bail out before generating bad rtl. */
13921 if (arg0 == error_mark_node || arg1 == error_mark_node)
13922 return const0_rtx;
13924 if (GET_CODE (op0) != CONST_INT
13925 || INTVAL (op0) > 255
13926 || INTVAL (op0) < 0)
13928 error ("argument 1 must be an 8-bit field value");
13929 return const0_rtx;
13932 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13933 op0 = copy_to_mode_reg (mode0, op0);
13935 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13936 op1 = copy_to_mode_reg (mode1, op1);
13938 pat = GEN_FCN (icode) (op0, op1);
13939 if (! pat)
13940 return const0_rtx;
13941 emit_insn (pat);
13943 return NULL_RTX;
13946 static rtx
13947 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13949 rtx pat;
13950 tree arg0 = CALL_EXPR_ARG (exp, 0);
13951 rtx op0 = expand_normal (arg0);
13952 machine_mode tmode = insn_data[icode].operand[0].mode;
13953 machine_mode mode0 = insn_data[icode].operand[1].mode;
13955 if (icode == CODE_FOR_nothing)
13956 /* Builtin not supported on this processor. */
13957 return 0;
13959 /* If we got invalid arguments bail out before generating bad rtl. */
13960 if (arg0 == error_mark_node)
13961 return const0_rtx;
13963 if (icode == CODE_FOR_altivec_vspltisb
13964 || icode == CODE_FOR_altivec_vspltish
13965 || icode == CODE_FOR_altivec_vspltisw)
13967 /* Only allow 5-bit *signed* literals. */
13968 if (GET_CODE (op0) != CONST_INT
13969 || INTVAL (op0) > 15
13970 || INTVAL (op0) < -16)
13972 error ("argument 1 must be a 5-bit signed literal");
13973 return CONST0_RTX (tmode);
13977 if (target == 0
13978 || GET_MODE (target) != tmode
13979 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13980 target = gen_reg_rtx (tmode);
13982 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13983 op0 = copy_to_mode_reg (mode0, op0);
13985 pat = GEN_FCN (icode) (target, op0);
13986 if (! pat)
13987 return 0;
13988 emit_insn (pat);
13990 return target;
13993 static rtx
13994 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
13996 rtx pat, scratch1, scratch2;
13997 tree arg0 = CALL_EXPR_ARG (exp, 0);
13998 rtx op0 = expand_normal (arg0);
13999 machine_mode tmode = insn_data[icode].operand[0].mode;
14000 machine_mode mode0 = insn_data[icode].operand[1].mode;
14002 /* If we have invalid arguments, bail out before generating bad rtl. */
14003 if (arg0 == error_mark_node)
14004 return const0_rtx;
14006 if (target == 0
14007 || GET_MODE (target) != tmode
14008 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14009 target = gen_reg_rtx (tmode);
14011 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14012 op0 = copy_to_mode_reg (mode0, op0);
14014 scratch1 = gen_reg_rtx (mode0);
14015 scratch2 = gen_reg_rtx (mode0);
14017 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
14018 if (! pat)
14019 return 0;
14020 emit_insn (pat);
14022 return target;
14025 static rtx
14026 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
14028 rtx pat;
14029 tree arg0 = CALL_EXPR_ARG (exp, 0);
14030 tree arg1 = CALL_EXPR_ARG (exp, 1);
14031 rtx op0 = expand_normal (arg0);
14032 rtx op1 = expand_normal (arg1);
14033 machine_mode tmode = insn_data[icode].operand[0].mode;
14034 machine_mode mode0 = insn_data[icode].operand[1].mode;
14035 machine_mode mode1 = insn_data[icode].operand[2].mode;
14037 if (icode == CODE_FOR_nothing)
14038 /* Builtin not supported on this processor. */
14039 return 0;
14041 /* If we got invalid arguments bail out before generating bad rtl. */
14042 if (arg0 == error_mark_node || arg1 == error_mark_node)
14043 return const0_rtx;
14045 if (icode == CODE_FOR_altivec_vcfux
14046 || icode == CODE_FOR_altivec_vcfsx
14047 || icode == CODE_FOR_altivec_vctsxs
14048 || icode == CODE_FOR_altivec_vctuxs
14049 || icode == CODE_FOR_altivec_vspltb
14050 || icode == CODE_FOR_altivec_vsplth
14051 || icode == CODE_FOR_altivec_vspltw)
14053 /* Only allow 5-bit unsigned literals. */
14054 STRIP_NOPS (arg1);
14055 if (TREE_CODE (arg1) != INTEGER_CST
14056 || TREE_INT_CST_LOW (arg1) & ~0x1f)
14058 error ("argument 2 must be a 5-bit unsigned literal");
14059 return CONST0_RTX (tmode);
14062 else if (icode == CODE_FOR_dfptstsfi_eq_dd
14063 || icode == CODE_FOR_dfptstsfi_lt_dd
14064 || icode == CODE_FOR_dfptstsfi_gt_dd
14065 || icode == CODE_FOR_dfptstsfi_unordered_dd
14066 || icode == CODE_FOR_dfptstsfi_eq_td
14067 || icode == CODE_FOR_dfptstsfi_lt_td
14068 || icode == CODE_FOR_dfptstsfi_gt_td
14069 || icode == CODE_FOR_dfptstsfi_unordered_td)
14071 /* Only allow 6-bit unsigned literals. */
14072 STRIP_NOPS (arg0);
14073 if (TREE_CODE (arg0) != INTEGER_CST
14074 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
14076 error ("argument 1 must be a 6-bit unsigned literal");
14077 return CONST0_RTX (tmode);
14080 else if (icode == CODE_FOR_xststdcqp
14081 || icode == CODE_FOR_xststdcdp
14082 || icode == CODE_FOR_xststdcsp
14083 || icode == CODE_FOR_xvtstdcdp
14084 || icode == CODE_FOR_xvtstdcsp)
14086 /* Only allow 7-bit unsigned literals. */
14087 STRIP_NOPS (arg1);
14088 if (TREE_CODE (arg1) != INTEGER_CST
14089 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
14091 error ("argument 2 must be a 7-bit unsigned literal");
14092 return CONST0_RTX (tmode);
14095 else if (icode == CODE_FOR_unpackv1ti
14096 || icode == CODE_FOR_unpackkf
14097 || icode == CODE_FOR_unpacktf
14098 || icode == CODE_FOR_unpackif
14099 || icode == CODE_FOR_unpacktd)
14101 /* Only allow 1-bit unsigned literals. */
14102 STRIP_NOPS (arg1);
14103 if (TREE_CODE (arg1) != INTEGER_CST
14104 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
14106 error ("argument 2 must be a 1-bit unsigned literal");
14107 return CONST0_RTX (tmode);
14111 if (target == 0
14112 || GET_MODE (target) != tmode
14113 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14114 target = gen_reg_rtx (tmode);
14116 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14117 op0 = copy_to_mode_reg (mode0, op0);
14118 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14119 op1 = copy_to_mode_reg (mode1, op1);
14121 pat = GEN_FCN (icode) (target, op0, op1);
14122 if (! pat)
14123 return 0;
14124 emit_insn (pat);
14126 return target;
14129 static rtx
14130 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
14132 rtx pat, scratch;
14133 tree cr6_form = CALL_EXPR_ARG (exp, 0);
14134 tree arg0 = CALL_EXPR_ARG (exp, 1);
14135 tree arg1 = CALL_EXPR_ARG (exp, 2);
14136 rtx op0 = expand_normal (arg0);
14137 rtx op1 = expand_normal (arg1);
14138 machine_mode tmode = SImode;
14139 machine_mode mode0 = insn_data[icode].operand[1].mode;
14140 machine_mode mode1 = insn_data[icode].operand[2].mode;
14141 int cr6_form_int;
14143 if (TREE_CODE (cr6_form) != INTEGER_CST)
14145 error ("argument 1 of %qs must be a constant",
14146 "__builtin_altivec_predicate");
14147 return const0_rtx;
14149 else
14150 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
14152 gcc_assert (mode0 == mode1);
14154 /* If we have invalid arguments, bail out before generating bad rtl. */
14155 if (arg0 == error_mark_node || arg1 == error_mark_node)
14156 return const0_rtx;
14158 if (target == 0
14159 || GET_MODE (target) != tmode
14160 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14161 target = gen_reg_rtx (tmode);
14163 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14164 op0 = copy_to_mode_reg (mode0, op0);
14165 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14166 op1 = copy_to_mode_reg (mode1, op1);
14168 /* Note that for many of the relevant operations (e.g. cmpne or
14169 cmpeq) with float or double operands, it makes more sense for the
14170 mode of the allocated scratch register to select a vector of
14171 integer. But the choice to copy the mode of operand 0 was made
14172 long ago and there are no plans to change it. */
14173 scratch = gen_reg_rtx (mode0);
14175 pat = GEN_FCN (icode) (scratch, op0, op1);
14176 if (! pat)
14177 return 0;
14178 emit_insn (pat);
14180 /* The vec_any* and vec_all* predicates use the same opcodes for two
14181 different operations, but the bits in CR6 will be different
14182 depending on what information we want. So we have to play tricks
14183 with CR6 to get the right bits out.
14185 If you think this is disgusting, look at the specs for the
14186 AltiVec predicates. */
14188 switch (cr6_form_int)
14190 case 0:
14191 emit_insn (gen_cr6_test_for_zero (target));
14192 break;
14193 case 1:
14194 emit_insn (gen_cr6_test_for_zero_reverse (target));
14195 break;
14196 case 2:
14197 emit_insn (gen_cr6_test_for_lt (target));
14198 break;
14199 case 3:
14200 emit_insn (gen_cr6_test_for_lt_reverse (target));
14201 break;
14202 default:
14203 error ("argument 1 of %qs is out of range",
14204 "__builtin_altivec_predicate");
14205 break;
14208 return target;
14211 static rtx
14212 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
14214 rtx pat, addr;
14215 tree arg0 = CALL_EXPR_ARG (exp, 0);
14216 tree arg1 = CALL_EXPR_ARG (exp, 1);
14217 machine_mode tmode = insn_data[icode].operand[0].mode;
14218 machine_mode mode0 = Pmode;
14219 machine_mode mode1 = Pmode;
14220 rtx op0 = expand_normal (arg0);
14221 rtx op1 = expand_normal (arg1);
14223 if (icode == CODE_FOR_nothing)
14224 /* Builtin not supported on this processor. */
14225 return 0;
14227 /* If we got invalid arguments bail out before generating bad rtl. */
14228 if (arg0 == error_mark_node || arg1 == error_mark_node)
14229 return const0_rtx;
14231 if (target == 0
14232 || GET_MODE (target) != tmode
14233 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14234 target = gen_reg_rtx (tmode);
14236 op1 = copy_to_mode_reg (mode1, op1);
14238 if (op0 == const0_rtx)
14240 addr = gen_rtx_MEM (tmode, op1);
14242 else
14244 op0 = copy_to_mode_reg (mode0, op0);
14245 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
14248 pat = GEN_FCN (icode) (target, addr);
14250 if (! pat)
14251 return 0;
14252 emit_insn (pat);
14254 return target;
14257 /* Return a constant vector for use as a little-endian permute control vector
14258 to reverse the order of elements of the given vector mode. */
14259 static rtx
14260 swap_selector_for_mode (machine_mode mode)
14262 /* These are little endian vectors, so their elements are reversed
14263 from what you would normally expect for a permute control vector. */
14264 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
14265 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
14266 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
14267 unsigned int swap16[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
14268 unsigned int *swaparray, i;
14269 rtx perm[16];
14271 switch (mode)
14273 case E_V2DFmode:
14274 case E_V2DImode:
14275 swaparray = swap2;
14276 break;
14277 case E_V4SFmode:
14278 case E_V4SImode:
14279 swaparray = swap4;
14280 break;
14281 case E_V8HImode:
14282 swaparray = swap8;
14283 break;
14284 case E_V16QImode:
14285 swaparray = swap16;
14286 break;
14287 default:
14288 gcc_unreachable ();
14291 for (i = 0; i < 16; ++i)
14292 perm[i] = GEN_INT (swaparray[i]);
14294 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm)));
14297 /* Generate code for an "lvxl", or "lve*x" built-in for a little endian target
14298 with -maltivec=be specified. Issue the load followed by an element-
14299 reversing permute. */
14300 void
14301 altivec_expand_lvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14303 rtx tmp = gen_reg_rtx (mode);
14304 rtx load = gen_rtx_SET (tmp, op1);
14305 rtx lvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
14306 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, load, lvx));
14307 rtx sel = swap_selector_for_mode (mode);
14308 rtx vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, tmp, tmp, sel), UNSPEC_VPERM);
14310 gcc_assert (REG_P (op0));
14311 emit_insn (par);
14312 emit_insn (gen_rtx_SET (op0, vperm));
14315 /* Generate code for a "stvxl" built-in for a little endian target with
14316 -maltivec=be specified. Issue the store preceded by an element-reversing
14317 permute. */
14318 void
14319 altivec_expand_stvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14321 rtx tmp = gen_reg_rtx (mode);
14322 rtx store = gen_rtx_SET (op0, tmp);
14323 rtx stvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
14324 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, store, stvx));
14325 rtx sel = swap_selector_for_mode (mode);
14326 rtx vperm;
14328 gcc_assert (REG_P (op1));
14329 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
14330 emit_insn (gen_rtx_SET (tmp, vperm));
14331 emit_insn (par);
14334 /* Generate code for a "stve*x" built-in for a little endian target with -maltivec=be
14335 specified. Issue the store preceded by an element-reversing permute. */
14336 void
14337 altivec_expand_stvex_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14339 machine_mode inner_mode = GET_MODE_INNER (mode);
14340 rtx tmp = gen_reg_rtx (mode);
14341 rtx stvx = gen_rtx_UNSPEC (inner_mode, gen_rtvec (1, tmp), unspec);
14342 rtx sel = swap_selector_for_mode (mode);
14343 rtx vperm;
14345 gcc_assert (REG_P (op1));
14346 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
14347 emit_insn (gen_rtx_SET (tmp, vperm));
14348 emit_insn (gen_rtx_SET (op0, stvx));
14351 static rtx
14352 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
14354 rtx pat, addr;
14355 tree arg0 = CALL_EXPR_ARG (exp, 0);
14356 tree arg1 = CALL_EXPR_ARG (exp, 1);
14357 machine_mode tmode = insn_data[icode].operand[0].mode;
14358 machine_mode mode0 = Pmode;
14359 machine_mode mode1 = Pmode;
14360 rtx op0 = expand_normal (arg0);
14361 rtx op1 = expand_normal (arg1);
14363 if (icode == CODE_FOR_nothing)
14364 /* Builtin not supported on this processor. */
14365 return 0;
14367 /* If we got invalid arguments bail out before generating bad rtl. */
14368 if (arg0 == error_mark_node || arg1 == error_mark_node)
14369 return const0_rtx;
14371 if (target == 0
14372 || GET_MODE (target) != tmode
14373 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14374 target = gen_reg_rtx (tmode);
14376 op1 = copy_to_mode_reg (mode1, op1);
14378 /* For LVX, express the RTL accurately by ANDing the address with -16.
14379 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
14380 so the raw address is fine. */
14381 if (icode == CODE_FOR_altivec_lvx_v2df_2op
14382 || icode == CODE_FOR_altivec_lvx_v2di_2op
14383 || icode == CODE_FOR_altivec_lvx_v4sf_2op
14384 || icode == CODE_FOR_altivec_lvx_v4si_2op
14385 || icode == CODE_FOR_altivec_lvx_v8hi_2op
14386 || icode == CODE_FOR_altivec_lvx_v16qi_2op)
14388 rtx rawaddr;
14389 if (op0 == const0_rtx)
14390 rawaddr = op1;
14391 else
14393 op0 = copy_to_mode_reg (mode0, op0);
14394 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
14396 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14397 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
14399 /* For -maltivec=be, emit the load and follow it up with a
14400 permute to swap the elements. */
14401 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
14403 rtx temp = gen_reg_rtx (tmode);
14404 emit_insn (gen_rtx_SET (temp, addr));
14406 rtx sel = swap_selector_for_mode (tmode);
14407 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, temp, temp, sel),
14408 UNSPEC_VPERM);
14409 emit_insn (gen_rtx_SET (target, vperm));
14411 else
14412 emit_insn (gen_rtx_SET (target, addr));
14414 else
14416 if (op0 == const0_rtx)
14417 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
14418 else
14420 op0 = copy_to_mode_reg (mode0, op0);
14421 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
14422 gen_rtx_PLUS (Pmode, op1, op0));
14425 pat = GEN_FCN (icode) (target, addr);
14426 if (! pat)
14427 return 0;
14428 emit_insn (pat);
14431 return target;
14434 static rtx
14435 altivec_expand_xl_be_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
14437 rtx pat, addr;
14438 tree arg0 = CALL_EXPR_ARG (exp, 0);
14439 tree arg1 = CALL_EXPR_ARG (exp, 1);
14440 machine_mode tmode = insn_data[icode].operand[0].mode;
14441 machine_mode mode0 = Pmode;
14442 machine_mode mode1 = Pmode;
14443 rtx op0 = expand_normal (arg0);
14444 rtx op1 = expand_normal (arg1);
14446 if (icode == CODE_FOR_nothing)
14447 /* Builtin not supported on this processor. */
14448 return 0;
14450 /* If we got invalid arguments bail out before generating bad rtl. */
14451 if (arg0 == error_mark_node || arg1 == error_mark_node)
14452 return const0_rtx;
14454 if (target == 0
14455 || GET_MODE (target) != tmode
14456 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14457 target = gen_reg_rtx (tmode);
14459 op1 = copy_to_mode_reg (mode1, op1);
14461 if (op0 == const0_rtx)
14462 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
14463 else
14465 op0 = copy_to_mode_reg (mode0, op0);
14466 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
14467 gen_rtx_PLUS (Pmode, op1, op0));
14470 pat = GEN_FCN (icode) (target, addr);
14471 if (!pat)
14472 return 0;
14474 emit_insn (pat);
14475 /* Reverse element order of elements if in LE mode */
14476 if (!VECTOR_ELT_ORDER_BIG)
14478 rtx sel = swap_selector_for_mode (tmode);
14479 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, target, target, sel),
14480 UNSPEC_VPERM);
14481 emit_insn (gen_rtx_SET (target, vperm));
14483 return target;
14486 static rtx
14487 paired_expand_stv_builtin (enum insn_code icode, tree exp)
14489 tree arg0 = CALL_EXPR_ARG (exp, 0);
14490 tree arg1 = CALL_EXPR_ARG (exp, 1);
14491 tree arg2 = CALL_EXPR_ARG (exp, 2);
14492 rtx op0 = expand_normal (arg0);
14493 rtx op1 = expand_normal (arg1);
14494 rtx op2 = expand_normal (arg2);
14495 rtx pat, addr;
14496 machine_mode tmode = insn_data[icode].operand[0].mode;
14497 machine_mode mode1 = Pmode;
14498 machine_mode mode2 = Pmode;
14500 /* Invalid arguments. Bail before doing anything stoopid! */
14501 if (arg0 == error_mark_node
14502 || arg1 == error_mark_node
14503 || arg2 == error_mark_node)
14504 return const0_rtx;
14506 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
14507 op0 = copy_to_mode_reg (tmode, op0);
14509 op2 = copy_to_mode_reg (mode2, op2);
14511 if (op1 == const0_rtx)
14513 addr = gen_rtx_MEM (tmode, op2);
14515 else
14517 op1 = copy_to_mode_reg (mode1, op1);
14518 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
14521 pat = GEN_FCN (icode) (addr, op0);
14522 if (pat)
14523 emit_insn (pat);
14524 return NULL_RTX;
14527 static rtx
14528 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
14530 rtx pat;
14531 tree arg0 = CALL_EXPR_ARG (exp, 0);
14532 tree arg1 = CALL_EXPR_ARG (exp, 1);
14533 tree arg2 = CALL_EXPR_ARG (exp, 2);
14534 rtx op0 = expand_normal (arg0);
14535 rtx op1 = expand_normal (arg1);
14536 rtx op2 = expand_normal (arg2);
14537 machine_mode mode0 = insn_data[icode].operand[0].mode;
14538 machine_mode mode1 = insn_data[icode].operand[1].mode;
14539 machine_mode mode2 = insn_data[icode].operand[2].mode;
14541 if (icode == CODE_FOR_nothing)
14542 /* Builtin not supported on this processor. */
14543 return NULL_RTX;
14545 /* If we got invalid arguments bail out before generating bad rtl. */
14546 if (arg0 == error_mark_node
14547 || arg1 == error_mark_node
14548 || arg2 == error_mark_node)
14549 return NULL_RTX;
14551 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14552 op0 = copy_to_mode_reg (mode0, op0);
14553 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14554 op1 = copy_to_mode_reg (mode1, op1);
14555 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14556 op2 = copy_to_mode_reg (mode2, op2);
14558 pat = GEN_FCN (icode) (op0, op1, op2);
14559 if (pat)
14560 emit_insn (pat);
14562 return NULL_RTX;
14565 static rtx
14566 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
14568 tree arg0 = CALL_EXPR_ARG (exp, 0);
14569 tree arg1 = CALL_EXPR_ARG (exp, 1);
14570 tree arg2 = CALL_EXPR_ARG (exp, 2);
14571 rtx op0 = expand_normal (arg0);
14572 rtx op1 = expand_normal (arg1);
14573 rtx op2 = expand_normal (arg2);
14574 rtx pat, addr, rawaddr;
14575 machine_mode tmode = insn_data[icode].operand[0].mode;
14576 machine_mode smode = insn_data[icode].operand[1].mode;
14577 machine_mode mode1 = Pmode;
14578 machine_mode mode2 = Pmode;
14580 /* Invalid arguments. Bail before doing anything stoopid! */
14581 if (arg0 == error_mark_node
14582 || arg1 == error_mark_node
14583 || arg2 == error_mark_node)
14584 return const0_rtx;
14586 op2 = copy_to_mode_reg (mode2, op2);
14588 /* For STVX, express the RTL accurately by ANDing the address with -16.
14589 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
14590 so the raw address is fine. */
14591 if (icode == CODE_FOR_altivec_stvx_v2df_2op
14592 || icode == CODE_FOR_altivec_stvx_v2di_2op
14593 || icode == CODE_FOR_altivec_stvx_v4sf_2op
14594 || icode == CODE_FOR_altivec_stvx_v4si_2op
14595 || icode == CODE_FOR_altivec_stvx_v8hi_2op
14596 || icode == CODE_FOR_altivec_stvx_v16qi_2op)
14598 if (op1 == const0_rtx)
14599 rawaddr = op2;
14600 else
14602 op1 = copy_to_mode_reg (mode1, op1);
14603 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
14606 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14607 addr = gen_rtx_MEM (tmode, addr);
14609 op0 = copy_to_mode_reg (tmode, op0);
14611 /* For -maltivec=be, emit a permute to swap the elements, followed
14612 by the store. */
14613 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
14615 rtx temp = gen_reg_rtx (tmode);
14616 rtx sel = swap_selector_for_mode (tmode);
14617 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, op0, op0, sel),
14618 UNSPEC_VPERM);
14619 emit_insn (gen_rtx_SET (temp, vperm));
14620 emit_insn (gen_rtx_SET (addr, temp));
14622 else
14623 emit_insn (gen_rtx_SET (addr, op0));
14625 else
14627 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
14628 op0 = copy_to_mode_reg (smode, op0);
14630 if (op1 == const0_rtx)
14631 addr = gen_rtx_MEM (tmode, op2);
14632 else
14634 op1 = copy_to_mode_reg (mode1, op1);
14635 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
14638 pat = GEN_FCN (icode) (addr, op0);
14639 if (pat)
14640 emit_insn (pat);
14643 return NULL_RTX;
14646 /* Return the appropriate SPR number associated with the given builtin. */
14647 static inline HOST_WIDE_INT
14648 htm_spr_num (enum rs6000_builtins code)
14650 if (code == HTM_BUILTIN_GET_TFHAR
14651 || code == HTM_BUILTIN_SET_TFHAR)
14652 return TFHAR_SPR;
14653 else if (code == HTM_BUILTIN_GET_TFIAR
14654 || code == HTM_BUILTIN_SET_TFIAR)
14655 return TFIAR_SPR;
14656 else if (code == HTM_BUILTIN_GET_TEXASR
14657 || code == HTM_BUILTIN_SET_TEXASR)
14658 return TEXASR_SPR;
14659 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
14660 || code == HTM_BUILTIN_SET_TEXASRU);
14661 return TEXASRU_SPR;
14664 /* Return the appropriate SPR regno associated with the given builtin. */
14665 static inline HOST_WIDE_INT
14666 htm_spr_regno (enum rs6000_builtins code)
14668 if (code == HTM_BUILTIN_GET_TFHAR
14669 || code == HTM_BUILTIN_SET_TFHAR)
14670 return TFHAR_REGNO;
14671 else if (code == HTM_BUILTIN_GET_TFIAR
14672 || code == HTM_BUILTIN_SET_TFIAR)
14673 return TFIAR_REGNO;
14674 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
14675 || code == HTM_BUILTIN_SET_TEXASR
14676 || code == HTM_BUILTIN_GET_TEXASRU
14677 || code == HTM_BUILTIN_SET_TEXASRU);
14678 return TEXASR_REGNO;
14681 /* Return the correct ICODE value depending on whether we are
14682 setting or reading the HTM SPRs. */
14683 static inline enum insn_code
14684 rs6000_htm_spr_icode (bool nonvoid)
14686 if (nonvoid)
14687 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
14688 else
14689 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
14692 /* Expand the HTM builtin in EXP and store the result in TARGET.
14693 Store true in *EXPANDEDP if we found a builtin to expand. */
14694 static rtx
14695 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
14697 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14698 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
14699 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14700 const struct builtin_description *d;
14701 size_t i;
14703 *expandedp = true;
14705 if (!TARGET_POWERPC64
14706 && (fcode == HTM_BUILTIN_TABORTDC
14707 || fcode == HTM_BUILTIN_TABORTDCI))
14709 size_t uns_fcode = (size_t)fcode;
14710 const char *name = rs6000_builtin_info[uns_fcode].name;
14711 error ("builtin %qs is only valid in 64-bit mode", name);
14712 return const0_rtx;
14715 /* Expand the HTM builtins. */
14716 d = bdesc_htm;
14717 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
14718 if (d->code == fcode)
14720 rtx op[MAX_HTM_OPERANDS], pat;
14721 int nopnds = 0;
14722 tree arg;
14723 call_expr_arg_iterator iter;
14724 unsigned attr = rs6000_builtin_info[fcode].attr;
14725 enum insn_code icode = d->icode;
14726 const struct insn_operand_data *insn_op;
14727 bool uses_spr = (attr & RS6000_BTC_SPR);
14728 rtx cr = NULL_RTX;
14730 if (uses_spr)
14731 icode = rs6000_htm_spr_icode (nonvoid);
14732 insn_op = &insn_data[icode].operand[0];
14734 if (nonvoid)
14736 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
14737 if (!target
14738 || GET_MODE (target) != tmode
14739 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
14740 target = gen_reg_rtx (tmode);
14741 if (uses_spr)
14742 op[nopnds++] = target;
14745 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
14747 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
14748 return const0_rtx;
14750 insn_op = &insn_data[icode].operand[nopnds];
14752 op[nopnds] = expand_normal (arg);
14754 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
14756 if (!strcmp (insn_op->constraint, "n"))
14758 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
14759 if (!CONST_INT_P (op[nopnds]))
14760 error ("argument %d must be an unsigned literal", arg_num);
14761 else
14762 error ("argument %d is an unsigned literal that is "
14763 "out of range", arg_num);
14764 return const0_rtx;
14766 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
14769 nopnds++;
14772 /* Handle the builtins for extended mnemonics. These accept
14773 no arguments, but map to builtins that take arguments. */
14774 switch (fcode)
14776 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
14777 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
14778 op[nopnds++] = GEN_INT (1);
14779 if (flag_checking)
14780 attr |= RS6000_BTC_UNARY;
14781 break;
14782 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
14783 op[nopnds++] = GEN_INT (0);
14784 if (flag_checking)
14785 attr |= RS6000_BTC_UNARY;
14786 break;
14787 default:
14788 break;
14791 /* If this builtin accesses SPRs, then pass in the appropriate
14792 SPR number and SPR regno as the last two operands. */
14793 if (uses_spr)
14795 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
14796 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
14797 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
14799 /* If this builtin accesses a CR, then pass in a scratch
14800 CR as the last operand. */
14801 else if (attr & RS6000_BTC_CR)
14802 { cr = gen_reg_rtx (CCmode);
14803 op[nopnds++] = cr;
14806 if (flag_checking)
14808 int expected_nopnds = 0;
14809 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
14810 expected_nopnds = 1;
14811 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
14812 expected_nopnds = 2;
14813 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
14814 expected_nopnds = 3;
14815 if (!(attr & RS6000_BTC_VOID))
14816 expected_nopnds += 1;
14817 if (uses_spr)
14818 expected_nopnds += 2;
14820 gcc_assert (nopnds == expected_nopnds
14821 && nopnds <= MAX_HTM_OPERANDS);
14824 switch (nopnds)
14826 case 1:
14827 pat = GEN_FCN (icode) (op[0]);
14828 break;
14829 case 2:
14830 pat = GEN_FCN (icode) (op[0], op[1]);
14831 break;
14832 case 3:
14833 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
14834 break;
14835 case 4:
14836 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
14837 break;
14838 default:
14839 gcc_unreachable ();
14841 if (!pat)
14842 return NULL_RTX;
14843 emit_insn (pat);
14845 if (attr & RS6000_BTC_CR)
14847 if (fcode == HTM_BUILTIN_TBEGIN)
14849 /* Emit code to set TARGET to true or false depending on
14850 whether the tbegin. instruction successfully or failed
14851 to start a transaction. We do this by placing the 1's
14852 complement of CR's EQ bit into TARGET. */
14853 rtx scratch = gen_reg_rtx (SImode);
14854 emit_insn (gen_rtx_SET (scratch,
14855 gen_rtx_EQ (SImode, cr,
14856 const0_rtx)));
14857 emit_insn (gen_rtx_SET (target,
14858 gen_rtx_XOR (SImode, scratch,
14859 GEN_INT (1))));
14861 else
14863 /* Emit code to copy the 4-bit condition register field
14864 CR into the least significant end of register TARGET. */
14865 rtx scratch1 = gen_reg_rtx (SImode);
14866 rtx scratch2 = gen_reg_rtx (SImode);
14867 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
14868 emit_insn (gen_movcc (subreg, cr));
14869 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
14870 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
14874 if (nonvoid)
14875 return target;
14876 return const0_rtx;
14879 *expandedp = false;
14880 return NULL_RTX;
14883 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14885 static rtx
14886 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
14887 rtx target)
14889 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14890 if (fcode == RS6000_BUILTIN_CPU_INIT)
14891 return const0_rtx;
14893 if (target == 0 || GET_MODE (target) != SImode)
14894 target = gen_reg_rtx (SImode);
14896 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14897 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
14898 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14899 to a STRING_CST. */
14900 if (TREE_CODE (arg) == ARRAY_REF
14901 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
14902 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
14903 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
14904 arg = TREE_OPERAND (arg, 0);
14906 if (TREE_CODE (arg) != STRING_CST)
14908 error ("builtin %qs only accepts a string argument",
14909 rs6000_builtin_info[(size_t) fcode].name);
14910 return const0_rtx;
14913 if (fcode == RS6000_BUILTIN_CPU_IS)
14915 const char *cpu = TREE_STRING_POINTER (arg);
14916 rtx cpuid = NULL_RTX;
14917 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
14918 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
14920 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14921 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
14922 break;
14924 if (cpuid == NULL_RTX)
14926 /* Invalid CPU argument. */
14927 error ("cpu %qs is an invalid argument to builtin %qs",
14928 cpu, rs6000_builtin_info[(size_t) fcode].name);
14929 return const0_rtx;
14932 rtx platform = gen_reg_rtx (SImode);
14933 rtx tcbmem = gen_const_mem (SImode,
14934 gen_rtx_PLUS (Pmode,
14935 gen_rtx_REG (Pmode, TLS_REGNUM),
14936 GEN_INT (TCB_PLATFORM_OFFSET)));
14937 emit_move_insn (platform, tcbmem);
14938 emit_insn (gen_eqsi3 (target, platform, cpuid));
14940 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14942 const char *hwcap = TREE_STRING_POINTER (arg);
14943 rtx mask = NULL_RTX;
14944 int hwcap_offset;
14945 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14946 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14948 mask = GEN_INT (cpu_supports_info[i].mask);
14949 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14950 break;
14952 if (mask == NULL_RTX)
14954 /* Invalid HWCAP argument. */
14955 error ("%s %qs is an invalid argument to builtin %qs",
14956 "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name);
14957 return const0_rtx;
14960 rtx tcb_hwcap = gen_reg_rtx (SImode);
14961 rtx tcbmem = gen_const_mem (SImode,
14962 gen_rtx_PLUS (Pmode,
14963 gen_rtx_REG (Pmode, TLS_REGNUM),
14964 GEN_INT (hwcap_offset)));
14965 emit_move_insn (tcb_hwcap, tcbmem);
14966 rtx scratch1 = gen_reg_rtx (SImode);
14967 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14968 rtx scratch2 = gen_reg_rtx (SImode);
14969 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
14970 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
14972 else
14973 gcc_unreachable ();
14975 /* Record that we have expanded a CPU builtin, so that we can later
14976 emit a reference to the special symbol exported by LIBC to ensure we
14977 do not link against an old LIBC that doesn't support this feature. */
14978 cpu_builtin_p = true;
14980 #else
14981 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
14982 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
14984 /* For old LIBCs, always return FALSE. */
14985 emit_move_insn (target, GEN_INT (0));
14986 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14988 return target;
14991 static rtx
14992 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
14994 rtx pat;
14995 tree arg0 = CALL_EXPR_ARG (exp, 0);
14996 tree arg1 = CALL_EXPR_ARG (exp, 1);
14997 tree arg2 = CALL_EXPR_ARG (exp, 2);
14998 rtx op0 = expand_normal (arg0);
14999 rtx op1 = expand_normal (arg1);
15000 rtx op2 = expand_normal (arg2);
15001 machine_mode tmode = insn_data[icode].operand[0].mode;
15002 machine_mode mode0 = insn_data[icode].operand[1].mode;
15003 machine_mode mode1 = insn_data[icode].operand[2].mode;
15004 machine_mode mode2 = insn_data[icode].operand[3].mode;
15006 if (icode == CODE_FOR_nothing)
15007 /* Builtin not supported on this processor. */
15008 return 0;
15010 /* If we got invalid arguments bail out before generating bad rtl. */
15011 if (arg0 == error_mark_node
15012 || arg1 == error_mark_node
15013 || arg2 == error_mark_node)
15014 return const0_rtx;
15016 /* Check and prepare argument depending on the instruction code.
15018 Note that a switch statement instead of the sequence of tests
15019 would be incorrect as many of the CODE_FOR values could be
15020 CODE_FOR_nothing and that would yield multiple alternatives
15021 with identical values. We'd never reach here at runtime in
15022 this case. */
15023 if (icode == CODE_FOR_altivec_vsldoi_v4sf
15024 || icode == CODE_FOR_altivec_vsldoi_v2df
15025 || icode == CODE_FOR_altivec_vsldoi_v4si
15026 || icode == CODE_FOR_altivec_vsldoi_v8hi
15027 || icode == CODE_FOR_altivec_vsldoi_v16qi)
15029 /* Only allow 4-bit unsigned literals. */
15030 STRIP_NOPS (arg2);
15031 if (TREE_CODE (arg2) != INTEGER_CST
15032 || TREE_INT_CST_LOW (arg2) & ~0xf)
15034 error ("argument 3 must be a 4-bit unsigned literal");
15035 return CONST0_RTX (tmode);
15038 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
15039 || icode == CODE_FOR_vsx_xxpermdi_v2di
15040 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
15041 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
15042 || icode == CODE_FOR_vsx_xxpermdi_v1ti
15043 || icode == CODE_FOR_vsx_xxpermdi_v4sf
15044 || icode == CODE_FOR_vsx_xxpermdi_v4si
15045 || icode == CODE_FOR_vsx_xxpermdi_v8hi
15046 || icode == CODE_FOR_vsx_xxpermdi_v16qi
15047 || icode == CODE_FOR_vsx_xxsldwi_v16qi
15048 || icode == CODE_FOR_vsx_xxsldwi_v8hi
15049 || icode == CODE_FOR_vsx_xxsldwi_v4si
15050 || icode == CODE_FOR_vsx_xxsldwi_v4sf
15051 || icode == CODE_FOR_vsx_xxsldwi_v2di
15052 || icode == CODE_FOR_vsx_xxsldwi_v2df)
15054 /* Only allow 2-bit unsigned literals. */
15055 STRIP_NOPS (arg2);
15056 if (TREE_CODE (arg2) != INTEGER_CST
15057 || TREE_INT_CST_LOW (arg2) & ~0x3)
15059 error ("argument 3 must be a 2-bit unsigned literal");
15060 return CONST0_RTX (tmode);
15063 else if (icode == CODE_FOR_vsx_set_v2df
15064 || icode == CODE_FOR_vsx_set_v2di
15065 || icode == CODE_FOR_bcdadd
15066 || icode == CODE_FOR_bcdadd_lt
15067 || icode == CODE_FOR_bcdadd_eq
15068 || icode == CODE_FOR_bcdadd_gt
15069 || icode == CODE_FOR_bcdsub
15070 || icode == CODE_FOR_bcdsub_lt
15071 || icode == CODE_FOR_bcdsub_eq
15072 || icode == CODE_FOR_bcdsub_gt)
15074 /* Only allow 1-bit unsigned literals. */
15075 STRIP_NOPS (arg2);
15076 if (TREE_CODE (arg2) != INTEGER_CST
15077 || TREE_INT_CST_LOW (arg2) & ~0x1)
15079 error ("argument 3 must be a 1-bit unsigned literal");
15080 return CONST0_RTX (tmode);
15083 else if (icode == CODE_FOR_dfp_ddedpd_dd
15084 || icode == CODE_FOR_dfp_ddedpd_td)
15086 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
15087 STRIP_NOPS (arg0);
15088 if (TREE_CODE (arg0) != INTEGER_CST
15089 || TREE_INT_CST_LOW (arg2) & ~0x3)
15091 error ("argument 1 must be 0 or 2");
15092 return CONST0_RTX (tmode);
15095 else if (icode == CODE_FOR_dfp_denbcd_dd
15096 || icode == CODE_FOR_dfp_denbcd_td)
15098 /* Only allow 1-bit unsigned literals. */
15099 STRIP_NOPS (arg0);
15100 if (TREE_CODE (arg0) != INTEGER_CST
15101 || TREE_INT_CST_LOW (arg0) & ~0x1)
15103 error ("argument 1 must be a 1-bit unsigned literal");
15104 return CONST0_RTX (tmode);
15107 else if (icode == CODE_FOR_dfp_dscli_dd
15108 || icode == CODE_FOR_dfp_dscli_td
15109 || icode == CODE_FOR_dfp_dscri_dd
15110 || icode == CODE_FOR_dfp_dscri_td)
15112 /* Only allow 6-bit unsigned literals. */
15113 STRIP_NOPS (arg1);
15114 if (TREE_CODE (arg1) != INTEGER_CST
15115 || TREE_INT_CST_LOW (arg1) & ~0x3f)
15117 error ("argument 2 must be a 6-bit unsigned literal");
15118 return CONST0_RTX (tmode);
15121 else if (icode == CODE_FOR_crypto_vshasigmaw
15122 || icode == CODE_FOR_crypto_vshasigmad)
15124 /* Check whether the 2nd and 3rd arguments are integer constants and in
15125 range and prepare arguments. */
15126 STRIP_NOPS (arg1);
15127 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (arg1, 2))
15129 error ("argument 2 must be 0 or 1");
15130 return CONST0_RTX (tmode);
15133 STRIP_NOPS (arg2);
15134 if (TREE_CODE (arg2) != INTEGER_CST || wi::geu_p (arg2, 16))
15136 error ("argument 3 must be in the range 0..15");
15137 return CONST0_RTX (tmode);
15141 if (target == 0
15142 || GET_MODE (target) != tmode
15143 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15144 target = gen_reg_rtx (tmode);
15146 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15147 op0 = copy_to_mode_reg (mode0, op0);
15148 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
15149 op1 = copy_to_mode_reg (mode1, op1);
15150 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
15151 op2 = copy_to_mode_reg (mode2, op2);
15153 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
15154 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
15155 else
15156 pat = GEN_FCN (icode) (target, op0, op1, op2);
15157 if (! pat)
15158 return 0;
15159 emit_insn (pat);
15161 return target;
15164 /* Expand the lvx builtins. */
15165 static rtx
15166 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
15168 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15169 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15170 tree arg0;
15171 machine_mode tmode, mode0;
15172 rtx pat, op0;
15173 enum insn_code icode;
15175 switch (fcode)
15177 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
15178 icode = CODE_FOR_vector_altivec_load_v16qi;
15179 break;
15180 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
15181 icode = CODE_FOR_vector_altivec_load_v8hi;
15182 break;
15183 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
15184 icode = CODE_FOR_vector_altivec_load_v4si;
15185 break;
15186 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
15187 icode = CODE_FOR_vector_altivec_load_v4sf;
15188 break;
15189 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
15190 icode = CODE_FOR_vector_altivec_load_v2df;
15191 break;
15192 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
15193 icode = CODE_FOR_vector_altivec_load_v2di;
15194 break;
15195 case ALTIVEC_BUILTIN_LD_INTERNAL_1ti:
15196 icode = CODE_FOR_vector_altivec_load_v1ti;
15197 break;
15198 default:
15199 *expandedp = false;
15200 return NULL_RTX;
15203 *expandedp = true;
15205 arg0 = CALL_EXPR_ARG (exp, 0);
15206 op0 = expand_normal (arg0);
15207 tmode = insn_data[icode].operand[0].mode;
15208 mode0 = insn_data[icode].operand[1].mode;
15210 if (target == 0
15211 || GET_MODE (target) != tmode
15212 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15213 target = gen_reg_rtx (tmode);
15215 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15216 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15218 pat = GEN_FCN (icode) (target, op0);
15219 if (! pat)
15220 return 0;
15221 emit_insn (pat);
15222 return target;
15225 /* Expand the stvx builtins. */
15226 static rtx
15227 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
15228 bool *expandedp)
15230 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15231 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15232 tree arg0, arg1;
15233 machine_mode mode0, mode1;
15234 rtx pat, op0, op1;
15235 enum insn_code icode;
15237 switch (fcode)
15239 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
15240 icode = CODE_FOR_vector_altivec_store_v16qi;
15241 break;
15242 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
15243 icode = CODE_FOR_vector_altivec_store_v8hi;
15244 break;
15245 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
15246 icode = CODE_FOR_vector_altivec_store_v4si;
15247 break;
15248 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
15249 icode = CODE_FOR_vector_altivec_store_v4sf;
15250 break;
15251 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
15252 icode = CODE_FOR_vector_altivec_store_v2df;
15253 break;
15254 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
15255 icode = CODE_FOR_vector_altivec_store_v2di;
15256 break;
15257 case ALTIVEC_BUILTIN_ST_INTERNAL_1ti:
15258 icode = CODE_FOR_vector_altivec_store_v1ti;
15259 break;
15260 default:
15261 *expandedp = false;
15262 return NULL_RTX;
15265 arg0 = CALL_EXPR_ARG (exp, 0);
15266 arg1 = CALL_EXPR_ARG (exp, 1);
15267 op0 = expand_normal (arg0);
15268 op1 = expand_normal (arg1);
15269 mode0 = insn_data[icode].operand[0].mode;
15270 mode1 = insn_data[icode].operand[1].mode;
15272 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15273 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15274 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
15275 op1 = copy_to_mode_reg (mode1, op1);
15277 pat = GEN_FCN (icode) (op0, op1);
15278 if (pat)
15279 emit_insn (pat);
15281 *expandedp = true;
15282 return NULL_RTX;
15285 /* Expand the dst builtins. */
15286 static rtx
15287 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
15288 bool *expandedp)
15290 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15291 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15292 tree arg0, arg1, arg2;
15293 machine_mode mode0, mode1;
15294 rtx pat, op0, op1, op2;
15295 const struct builtin_description *d;
15296 size_t i;
15298 *expandedp = false;
15300 /* Handle DST variants. */
15301 d = bdesc_dst;
15302 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
15303 if (d->code == fcode)
15305 arg0 = CALL_EXPR_ARG (exp, 0);
15306 arg1 = CALL_EXPR_ARG (exp, 1);
15307 arg2 = CALL_EXPR_ARG (exp, 2);
15308 op0 = expand_normal (arg0);
15309 op1 = expand_normal (arg1);
15310 op2 = expand_normal (arg2);
15311 mode0 = insn_data[d->icode].operand[0].mode;
15312 mode1 = insn_data[d->icode].operand[1].mode;
15314 /* Invalid arguments, bail out before generating bad rtl. */
15315 if (arg0 == error_mark_node
15316 || arg1 == error_mark_node
15317 || arg2 == error_mark_node)
15318 return const0_rtx;
15320 *expandedp = true;
15321 STRIP_NOPS (arg2);
15322 if (TREE_CODE (arg2) != INTEGER_CST
15323 || TREE_INT_CST_LOW (arg2) & ~0x3)
15325 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
15326 return const0_rtx;
15329 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
15330 op0 = copy_to_mode_reg (Pmode, op0);
15331 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
15332 op1 = copy_to_mode_reg (mode1, op1);
15334 pat = GEN_FCN (d->icode) (op0, op1, op2);
15335 if (pat != 0)
15336 emit_insn (pat);
15338 return NULL_RTX;
15341 return NULL_RTX;
15344 /* Expand vec_init builtin. */
15345 static rtx
15346 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
15348 machine_mode tmode = TYPE_MODE (type);
15349 machine_mode inner_mode = GET_MODE_INNER (tmode);
15350 int i, n_elt = GET_MODE_NUNITS (tmode);
15352 gcc_assert (VECTOR_MODE_P (tmode));
15353 gcc_assert (n_elt == call_expr_nargs (exp));
15355 if (!target || !register_operand (target, tmode))
15356 target = gen_reg_rtx (tmode);
15358 /* If we have a vector compromised of a single element, such as V1TImode, do
15359 the initialization directly. */
15360 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
15362 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
15363 emit_move_insn (target, gen_lowpart (tmode, x));
15365 else
15367 rtvec v = rtvec_alloc (n_elt);
15369 for (i = 0; i < n_elt; ++i)
15371 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
15372 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
15375 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
15378 return target;
15381 /* Return the integer constant in ARG. Constrain it to be in the range
15382 of the subparts of VEC_TYPE; issue an error if not. */
15384 static int
15385 get_element_number (tree vec_type, tree arg)
15387 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
15389 if (!tree_fits_uhwi_p (arg)
15390 || (elt = tree_to_uhwi (arg), elt > max))
15392 error ("selector must be an integer constant in the range 0..%wi", max);
15393 return 0;
15396 return elt;
15399 /* Expand vec_set builtin. */
15400 static rtx
15401 altivec_expand_vec_set_builtin (tree exp)
15403 machine_mode tmode, mode1;
15404 tree arg0, arg1, arg2;
15405 int elt;
15406 rtx op0, op1;
15408 arg0 = CALL_EXPR_ARG (exp, 0);
15409 arg1 = CALL_EXPR_ARG (exp, 1);
15410 arg2 = CALL_EXPR_ARG (exp, 2);
15412 tmode = TYPE_MODE (TREE_TYPE (arg0));
15413 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15414 gcc_assert (VECTOR_MODE_P (tmode));
15416 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
15417 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
15418 elt = get_element_number (TREE_TYPE (arg0), arg2);
15420 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
15421 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
15423 op0 = force_reg (tmode, op0);
15424 op1 = force_reg (mode1, op1);
15426 rs6000_expand_vector_set (op0, op1, elt);
15428 return op0;
15431 /* Expand vec_ext builtin. */
15432 static rtx
15433 altivec_expand_vec_ext_builtin (tree exp, rtx target)
15435 machine_mode tmode, mode0;
15436 tree arg0, arg1;
15437 rtx op0;
15438 rtx op1;
15440 arg0 = CALL_EXPR_ARG (exp, 0);
15441 arg1 = CALL_EXPR_ARG (exp, 1);
15443 op0 = expand_normal (arg0);
15444 op1 = expand_normal (arg1);
15446 /* Call get_element_number to validate arg1 if it is a constant. */
15447 if (TREE_CODE (arg1) == INTEGER_CST)
15448 (void) get_element_number (TREE_TYPE (arg0), arg1);
15450 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15451 mode0 = TYPE_MODE (TREE_TYPE (arg0));
15452 gcc_assert (VECTOR_MODE_P (mode0));
15454 op0 = force_reg (mode0, op0);
15456 if (optimize || !target || !register_operand (target, tmode))
15457 target = gen_reg_rtx (tmode);
15459 rs6000_expand_vector_extract (target, op0, op1);
15461 return target;
15464 /* Expand the builtin in EXP and store the result in TARGET. Store
15465 true in *EXPANDEDP if we found a builtin to expand. */
15466 static rtx
15467 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
15469 const struct builtin_description *d;
15470 size_t i;
15471 enum insn_code icode;
15472 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15473 tree arg0, arg1, arg2;
15474 rtx op0, pat;
15475 machine_mode tmode, mode0;
15476 enum rs6000_builtins fcode
15477 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15479 if (rs6000_overloaded_builtin_p (fcode))
15481 *expandedp = true;
15482 error ("unresolved overload for Altivec builtin %qF", fndecl);
15484 /* Given it is invalid, just generate a normal call. */
15485 return expand_call (exp, target, false);
15488 target = altivec_expand_ld_builtin (exp, target, expandedp);
15489 if (*expandedp)
15490 return target;
15492 target = altivec_expand_st_builtin (exp, target, expandedp);
15493 if (*expandedp)
15494 return target;
15496 target = altivec_expand_dst_builtin (exp, target, expandedp);
15497 if (*expandedp)
15498 return target;
15500 *expandedp = true;
15502 switch (fcode)
15504 case ALTIVEC_BUILTIN_STVX_V2DF:
15505 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df_2op, exp);
15506 case ALTIVEC_BUILTIN_STVX_V2DI:
15507 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di_2op, exp);
15508 case ALTIVEC_BUILTIN_STVX_V4SF:
15509 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf_2op, exp);
15510 case ALTIVEC_BUILTIN_STVX:
15511 case ALTIVEC_BUILTIN_STVX_V4SI:
15512 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si_2op, exp);
15513 case ALTIVEC_BUILTIN_STVX_V8HI:
15514 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi_2op, exp);
15515 case ALTIVEC_BUILTIN_STVX_V16QI:
15516 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi_2op, exp);
15517 case ALTIVEC_BUILTIN_STVEBX:
15518 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
15519 case ALTIVEC_BUILTIN_STVEHX:
15520 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
15521 case ALTIVEC_BUILTIN_STVEWX:
15522 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
15523 case ALTIVEC_BUILTIN_STVXL_V2DF:
15524 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
15525 case ALTIVEC_BUILTIN_STVXL_V2DI:
15526 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
15527 case ALTIVEC_BUILTIN_STVXL_V4SF:
15528 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
15529 case ALTIVEC_BUILTIN_STVXL:
15530 case ALTIVEC_BUILTIN_STVXL_V4SI:
15531 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
15532 case ALTIVEC_BUILTIN_STVXL_V8HI:
15533 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
15534 case ALTIVEC_BUILTIN_STVXL_V16QI:
15535 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
15537 case ALTIVEC_BUILTIN_STVLX:
15538 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
15539 case ALTIVEC_BUILTIN_STVLXL:
15540 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
15541 case ALTIVEC_BUILTIN_STVRX:
15542 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
15543 case ALTIVEC_BUILTIN_STVRXL:
15544 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
15546 case P9V_BUILTIN_STXVL:
15547 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
15549 case VSX_BUILTIN_STXVD2X_V1TI:
15550 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
15551 case VSX_BUILTIN_STXVD2X_V2DF:
15552 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
15553 case VSX_BUILTIN_STXVD2X_V2DI:
15554 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
15555 case VSX_BUILTIN_STXVW4X_V4SF:
15556 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
15557 case VSX_BUILTIN_STXVW4X_V4SI:
15558 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
15559 case VSX_BUILTIN_STXVW4X_V8HI:
15560 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
15561 case VSX_BUILTIN_STXVW4X_V16QI:
15562 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
15564 /* For the following on big endian, it's ok to use any appropriate
15565 unaligned-supporting store, so use a generic expander. For
15566 little-endian, the exact element-reversing instruction must
15567 be used. */
15568 case VSX_BUILTIN_ST_ELEMREV_V2DF:
15570 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
15571 : CODE_FOR_vsx_st_elemrev_v2df);
15572 return altivec_expand_stv_builtin (code, exp);
15574 case VSX_BUILTIN_ST_ELEMREV_V2DI:
15576 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
15577 : CODE_FOR_vsx_st_elemrev_v2di);
15578 return altivec_expand_stv_builtin (code, exp);
15580 case VSX_BUILTIN_ST_ELEMREV_V4SF:
15582 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
15583 : CODE_FOR_vsx_st_elemrev_v4sf);
15584 return altivec_expand_stv_builtin (code, exp);
15586 case VSX_BUILTIN_ST_ELEMREV_V4SI:
15588 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
15589 : CODE_FOR_vsx_st_elemrev_v4si);
15590 return altivec_expand_stv_builtin (code, exp);
15592 case VSX_BUILTIN_ST_ELEMREV_V8HI:
15594 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
15595 : CODE_FOR_vsx_st_elemrev_v8hi);
15596 return altivec_expand_stv_builtin (code, exp);
15598 case VSX_BUILTIN_ST_ELEMREV_V16QI:
15600 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
15601 : CODE_FOR_vsx_st_elemrev_v16qi);
15602 return altivec_expand_stv_builtin (code, exp);
15605 case ALTIVEC_BUILTIN_MFVSCR:
15606 icode = CODE_FOR_altivec_mfvscr;
15607 tmode = insn_data[icode].operand[0].mode;
15609 if (target == 0
15610 || GET_MODE (target) != tmode
15611 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15612 target = gen_reg_rtx (tmode);
15614 pat = GEN_FCN (icode) (target);
15615 if (! pat)
15616 return 0;
15617 emit_insn (pat);
15618 return target;
15620 case ALTIVEC_BUILTIN_MTVSCR:
15621 icode = CODE_FOR_altivec_mtvscr;
15622 arg0 = CALL_EXPR_ARG (exp, 0);
15623 op0 = expand_normal (arg0);
15624 mode0 = insn_data[icode].operand[0].mode;
15626 /* If we got invalid arguments bail out before generating bad rtl. */
15627 if (arg0 == error_mark_node)
15628 return const0_rtx;
15630 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15631 op0 = copy_to_mode_reg (mode0, op0);
15633 pat = GEN_FCN (icode) (op0);
15634 if (pat)
15635 emit_insn (pat);
15636 return NULL_RTX;
15638 case ALTIVEC_BUILTIN_DSSALL:
15639 emit_insn (gen_altivec_dssall ());
15640 return NULL_RTX;
15642 case ALTIVEC_BUILTIN_DSS:
15643 icode = CODE_FOR_altivec_dss;
15644 arg0 = CALL_EXPR_ARG (exp, 0);
15645 STRIP_NOPS (arg0);
15646 op0 = expand_normal (arg0);
15647 mode0 = insn_data[icode].operand[0].mode;
15649 /* If we got invalid arguments bail out before generating bad rtl. */
15650 if (arg0 == error_mark_node)
15651 return const0_rtx;
15653 if (TREE_CODE (arg0) != INTEGER_CST
15654 || TREE_INT_CST_LOW (arg0) & ~0x3)
15656 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
15657 return const0_rtx;
15660 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15661 op0 = copy_to_mode_reg (mode0, op0);
15663 emit_insn (gen_altivec_dss (op0));
15664 return NULL_RTX;
15666 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
15667 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
15668 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
15669 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
15670 case VSX_BUILTIN_VEC_INIT_V2DF:
15671 case VSX_BUILTIN_VEC_INIT_V2DI:
15672 case VSX_BUILTIN_VEC_INIT_V1TI:
15673 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
15675 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
15676 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
15677 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
15678 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
15679 case VSX_BUILTIN_VEC_SET_V2DF:
15680 case VSX_BUILTIN_VEC_SET_V2DI:
15681 case VSX_BUILTIN_VEC_SET_V1TI:
15682 return altivec_expand_vec_set_builtin (exp);
15684 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
15685 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
15686 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
15687 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
15688 case VSX_BUILTIN_VEC_EXT_V2DF:
15689 case VSX_BUILTIN_VEC_EXT_V2DI:
15690 case VSX_BUILTIN_VEC_EXT_V1TI:
15691 return altivec_expand_vec_ext_builtin (exp, target);
15693 case P9V_BUILTIN_VEXTRACT4B:
15694 case P9V_BUILTIN_VEC_VEXTRACT4B:
15695 arg1 = CALL_EXPR_ARG (exp, 1);
15696 STRIP_NOPS (arg1);
15698 /* Generate a normal call if it is invalid. */
15699 if (arg1 == error_mark_node)
15700 return expand_call (exp, target, false);
15702 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
15704 error ("second argument to %qs must be 0..12", "vec_vextract4b");
15705 return expand_call (exp, target, false);
15707 break;
15709 case P9V_BUILTIN_VINSERT4B:
15710 case P9V_BUILTIN_VINSERT4B_DI:
15711 case P9V_BUILTIN_VEC_VINSERT4B:
15712 arg2 = CALL_EXPR_ARG (exp, 2);
15713 STRIP_NOPS (arg2);
15715 /* Generate a normal call if it is invalid. */
15716 if (arg2 == error_mark_node)
15717 return expand_call (exp, target, false);
15719 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
15721 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
15722 return expand_call (exp, target, false);
15724 break;
15726 default:
15727 break;
15728 /* Fall through. */
15731 /* Expand abs* operations. */
15732 d = bdesc_abs;
15733 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
15734 if (d->code == fcode)
15735 return altivec_expand_abs_builtin (d->icode, exp, target);
15737 /* Expand the AltiVec predicates. */
15738 d = bdesc_altivec_preds;
15739 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
15740 if (d->code == fcode)
15741 return altivec_expand_predicate_builtin (d->icode, exp, target);
15743 /* LV* are funky. We initialized them differently. */
15744 switch (fcode)
15746 case ALTIVEC_BUILTIN_LVSL:
15747 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
15748 exp, target, false);
15749 case ALTIVEC_BUILTIN_LVSR:
15750 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
15751 exp, target, false);
15752 case ALTIVEC_BUILTIN_LVEBX:
15753 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
15754 exp, target, false);
15755 case ALTIVEC_BUILTIN_LVEHX:
15756 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
15757 exp, target, false);
15758 case ALTIVEC_BUILTIN_LVEWX:
15759 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
15760 exp, target, false);
15761 case ALTIVEC_BUILTIN_LVXL_V2DF:
15762 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
15763 exp, target, false);
15764 case ALTIVEC_BUILTIN_LVXL_V2DI:
15765 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
15766 exp, target, false);
15767 case ALTIVEC_BUILTIN_LVXL_V4SF:
15768 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
15769 exp, target, false);
15770 case ALTIVEC_BUILTIN_LVXL:
15771 case ALTIVEC_BUILTIN_LVXL_V4SI:
15772 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
15773 exp, target, false);
15774 case ALTIVEC_BUILTIN_LVXL_V8HI:
15775 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
15776 exp, target, false);
15777 case ALTIVEC_BUILTIN_LVXL_V16QI:
15778 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
15779 exp, target, false);
15780 case ALTIVEC_BUILTIN_LVX_V2DF:
15781 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df_2op,
15782 exp, target, false);
15783 case ALTIVEC_BUILTIN_LVX_V2DI:
15784 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di_2op,
15785 exp, target, false);
15786 case ALTIVEC_BUILTIN_LVX_V4SF:
15787 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf_2op,
15788 exp, target, false);
15789 case ALTIVEC_BUILTIN_LVX:
15790 case ALTIVEC_BUILTIN_LVX_V4SI:
15791 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si_2op,
15792 exp, target, false);
15793 case ALTIVEC_BUILTIN_LVX_V8HI:
15794 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi_2op,
15795 exp, target, false);
15796 case ALTIVEC_BUILTIN_LVX_V16QI:
15797 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi_2op,
15798 exp, target, false);
15799 case ALTIVEC_BUILTIN_LVLX:
15800 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
15801 exp, target, true);
15802 case ALTIVEC_BUILTIN_LVLXL:
15803 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
15804 exp, target, true);
15805 case ALTIVEC_BUILTIN_LVRX:
15806 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
15807 exp, target, true);
15808 case ALTIVEC_BUILTIN_LVRXL:
15809 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
15810 exp, target, true);
15811 case VSX_BUILTIN_LXVD2X_V1TI:
15812 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
15813 exp, target, false);
15814 case VSX_BUILTIN_LXVD2X_V2DF:
15815 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
15816 exp, target, false);
15817 case VSX_BUILTIN_LXVD2X_V2DI:
15818 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
15819 exp, target, false);
15820 case VSX_BUILTIN_LXVW4X_V4SF:
15821 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
15822 exp, target, false);
15823 case VSX_BUILTIN_LXVW4X_V4SI:
15824 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
15825 exp, target, false);
15826 case VSX_BUILTIN_LXVW4X_V8HI:
15827 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
15828 exp, target, false);
15829 case VSX_BUILTIN_LXVW4X_V16QI:
15830 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
15831 exp, target, false);
15832 /* For the following on big endian, it's ok to use any appropriate
15833 unaligned-supporting load, so use a generic expander. For
15834 little-endian, the exact element-reversing instruction must
15835 be used. */
15836 case VSX_BUILTIN_LD_ELEMREV_V2DF:
15838 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
15839 : CODE_FOR_vsx_ld_elemrev_v2df);
15840 return altivec_expand_lv_builtin (code, exp, target, false);
15842 case VSX_BUILTIN_LD_ELEMREV_V2DI:
15844 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
15845 : CODE_FOR_vsx_ld_elemrev_v2di);
15846 return altivec_expand_lv_builtin (code, exp, target, false);
15848 case VSX_BUILTIN_LD_ELEMREV_V4SF:
15850 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
15851 : CODE_FOR_vsx_ld_elemrev_v4sf);
15852 return altivec_expand_lv_builtin (code, exp, target, false);
15854 case VSX_BUILTIN_LD_ELEMREV_V4SI:
15856 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
15857 : CODE_FOR_vsx_ld_elemrev_v4si);
15858 return altivec_expand_lv_builtin (code, exp, target, false);
15860 case VSX_BUILTIN_LD_ELEMREV_V8HI:
15862 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
15863 : CODE_FOR_vsx_ld_elemrev_v8hi);
15864 return altivec_expand_lv_builtin (code, exp, target, false);
15866 case VSX_BUILTIN_LD_ELEMREV_V16QI:
15868 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
15869 : CODE_FOR_vsx_ld_elemrev_v16qi);
15870 return altivec_expand_lv_builtin (code, exp, target, false);
15872 break;
15873 default:
15874 break;
15875 /* Fall through. */
15878 /* XL_BE We initialized them to always load in big endian order. */
15879 switch (fcode)
15881 case VSX_BUILTIN_XL_BE_V2DI:
15883 enum insn_code code = CODE_FOR_vsx_load_v2di;
15884 return altivec_expand_xl_be_builtin (code, exp, target, false);
15886 break;
15887 case VSX_BUILTIN_XL_BE_V4SI:
15889 enum insn_code code = CODE_FOR_vsx_load_v4si;
15890 return altivec_expand_xl_be_builtin (code, exp, target, false);
15892 break;
15893 case VSX_BUILTIN_XL_BE_V8HI:
15895 enum insn_code code = CODE_FOR_vsx_load_v8hi;
15896 return altivec_expand_xl_be_builtin (code, exp, target, false);
15898 break;
15899 case VSX_BUILTIN_XL_BE_V16QI:
15901 enum insn_code code = CODE_FOR_vsx_load_v16qi;
15902 return altivec_expand_xl_be_builtin (code, exp, target, false);
15904 break;
15905 case VSX_BUILTIN_XL_BE_V2DF:
15907 enum insn_code code = CODE_FOR_vsx_load_v2df;
15908 return altivec_expand_xl_be_builtin (code, exp, target, false);
15910 break;
15911 case VSX_BUILTIN_XL_BE_V4SF:
15913 enum insn_code code = CODE_FOR_vsx_load_v4sf;
15914 return altivec_expand_xl_be_builtin (code, exp, target, false);
15916 break;
15917 default:
15918 break;
15919 /* Fall through. */
15922 *expandedp = false;
15923 return NULL_RTX;
15926 /* Expand the builtin in EXP and store the result in TARGET. Store
15927 true in *EXPANDEDP if we found a builtin to expand. */
15928 static rtx
15929 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
15931 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15932 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15933 const struct builtin_description *d;
15934 size_t i;
15936 *expandedp = true;
15938 switch (fcode)
15940 case PAIRED_BUILTIN_STX:
15941 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
15942 case PAIRED_BUILTIN_LX:
15943 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
15944 default:
15945 break;
15946 /* Fall through. */
15949 /* Expand the paired predicates. */
15950 d = bdesc_paired_preds;
15951 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
15952 if (d->code == fcode)
15953 return paired_expand_predicate_builtin (d->icode, exp, target);
15955 *expandedp = false;
15956 return NULL_RTX;
15959 static rtx
15960 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
15962 rtx pat, scratch, tmp;
15963 tree form = CALL_EXPR_ARG (exp, 0);
15964 tree arg0 = CALL_EXPR_ARG (exp, 1);
15965 tree arg1 = CALL_EXPR_ARG (exp, 2);
15966 rtx op0 = expand_normal (arg0);
15967 rtx op1 = expand_normal (arg1);
15968 machine_mode mode0 = insn_data[icode].operand[1].mode;
15969 machine_mode mode1 = insn_data[icode].operand[2].mode;
15970 int form_int;
15971 enum rtx_code code;
15973 if (TREE_CODE (form) != INTEGER_CST)
15975 error ("argument 1 of %s must be a constant",
15976 "__builtin_paired_predicate");
15977 return const0_rtx;
15979 else
15980 form_int = TREE_INT_CST_LOW (form);
15982 gcc_assert (mode0 == mode1);
15984 if (arg0 == error_mark_node || arg1 == error_mark_node)
15985 return const0_rtx;
15987 if (target == 0
15988 || GET_MODE (target) != SImode
15989 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
15990 target = gen_reg_rtx (SImode);
15991 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
15992 op0 = copy_to_mode_reg (mode0, op0);
15993 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
15994 op1 = copy_to_mode_reg (mode1, op1);
15996 scratch = gen_reg_rtx (CCFPmode);
15998 pat = GEN_FCN (icode) (scratch, op0, op1);
15999 if (!pat)
16000 return const0_rtx;
16002 emit_insn (pat);
16004 switch (form_int)
16006 /* LT bit. */
16007 case 0:
16008 code = LT;
16009 break;
16010 /* GT bit. */
16011 case 1:
16012 code = GT;
16013 break;
16014 /* EQ bit. */
16015 case 2:
16016 code = EQ;
16017 break;
16018 /* UN bit. */
16019 case 3:
16020 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
16021 return target;
16022 default:
16023 error ("argument 1 of %qs is out of range",
16024 "__builtin_paired_predicate");
16025 return const0_rtx;
16028 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
16029 emit_move_insn (target, tmp);
16030 return target;
16033 /* Raise an error message for a builtin function that is called without the
16034 appropriate target options being set. */
16036 static void
16037 rs6000_invalid_builtin (enum rs6000_builtins fncode)
16039 size_t uns_fncode = (size_t) fncode;
16040 const char *name = rs6000_builtin_info[uns_fncode].name;
16041 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
16043 gcc_assert (name != NULL);
16044 if ((fnmask & RS6000_BTM_CELL) != 0)
16045 error ("builtin function %qs is only valid for the cell processor", name);
16046 else if ((fnmask & RS6000_BTM_VSX) != 0)
16047 error ("builtin function %qs requires the %qs option", name, "-mvsx");
16048 else if ((fnmask & RS6000_BTM_HTM) != 0)
16049 error ("builtin function %qs requires the %qs option", name, "-mhtm");
16050 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
16051 error ("builtin function %qs requires the %qs option", name, "-maltivec");
16052 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
16053 error ("builtin function %qs requires the %qs option", name, "-mpaired");
16054 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
16055 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
16056 error ("builtin function %qs requires the %qs and %qs options",
16057 name, "-mhard-dfp", "-mpower8-vector");
16058 else if ((fnmask & RS6000_BTM_DFP) != 0)
16059 error ("builtin function %qs requires the %qs option", name, "-mhard-dfp");
16060 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
16061 error ("builtin function %qs requires the %qs option", name,
16062 "-mpower8-vector");
16063 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
16064 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
16065 error ("builtin function %qs requires the %qs and %qs options",
16066 name, "-mcpu=power9", "-m64");
16067 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
16068 error ("builtin function %qs requires the %qs option", name,
16069 "-mcpu=power9");
16070 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
16071 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
16072 error ("builtin function %qs requires the %qs and %qs options",
16073 name, "-mcpu=power9", "-m64");
16074 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
16075 error ("builtin function %qs requires the %qs option", name,
16076 "-mcpu=power9");
16077 else if ((fnmask & (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
16078 == (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
16079 error ("builtin function %qs requires the %qs and %qs options",
16080 name, "-mhard-float", "-mlong-double-128");
16081 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
16082 error ("builtin function %qs requires the %qs option", name,
16083 "-mhard-float");
16084 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
16085 error ("builtin function %qs requires the %qs option", name, "-mfloat128");
16086 else
16087 error ("builtin function %qs is not supported with the current options",
16088 name);
16091 /* Target hook for early folding of built-ins, shamelessly stolen
16092 from ia64.c. */
16094 static tree
16095 rs6000_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
16096 tree *args, bool ignore ATTRIBUTE_UNUSED)
16098 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
16100 enum rs6000_builtins fn_code
16101 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
16102 switch (fn_code)
16104 case RS6000_BUILTIN_NANQ:
16105 case RS6000_BUILTIN_NANSQ:
16107 tree type = TREE_TYPE (TREE_TYPE (fndecl));
16108 const char *str = c_getstr (*args);
16109 int quiet = fn_code == RS6000_BUILTIN_NANQ;
16110 REAL_VALUE_TYPE real;
16112 if (str && real_nan (&real, str, quiet, TYPE_MODE (type)))
16113 return build_real (type, real);
16114 return NULL_TREE;
16116 case RS6000_BUILTIN_INFQ:
16117 case RS6000_BUILTIN_HUGE_VALQ:
16119 tree type = TREE_TYPE (TREE_TYPE (fndecl));
16120 REAL_VALUE_TYPE inf;
16121 real_inf (&inf);
16122 return build_real (type, inf);
16124 default:
16125 break;
16128 #ifdef SUBTARGET_FOLD_BUILTIN
16129 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
16130 #else
16131 return NULL_TREE;
16132 #endif
16135 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
16136 a constant, use rs6000_fold_builtin.) */
16138 bool
16139 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
16141 gimple *stmt = gsi_stmt (*gsi);
16142 tree fndecl = gimple_call_fndecl (stmt);
16143 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
16144 enum rs6000_builtins fn_code
16145 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
16146 tree arg0, arg1, lhs;
16148 size_t uns_fncode = (size_t) fn_code;
16149 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
16150 const char *fn_name1 = rs6000_builtin_info[uns_fncode].name;
16151 const char *fn_name2 = (icode != CODE_FOR_nothing)
16152 ? get_insn_name ((int) icode)
16153 : "nothing";
16155 if (TARGET_DEBUG_BUILTIN)
16156 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
16157 fn_code, fn_name1, fn_name2);
16159 if (!rs6000_fold_gimple)
16160 return false;
16162 /* Generic solution to prevent gimple folding of code without a LHS. */
16163 if (!gimple_call_lhs (stmt))
16164 return false;
16166 switch (fn_code)
16168 /* Flavors of vec_add. We deliberately don't expand
16169 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
16170 TImode, resulting in much poorer code generation. */
16171 case ALTIVEC_BUILTIN_VADDUBM:
16172 case ALTIVEC_BUILTIN_VADDUHM:
16173 case ALTIVEC_BUILTIN_VADDUWM:
16174 case P8V_BUILTIN_VADDUDM:
16175 case ALTIVEC_BUILTIN_VADDFP:
16176 case VSX_BUILTIN_XVADDDP:
16178 arg0 = gimple_call_arg (stmt, 0);
16179 arg1 = gimple_call_arg (stmt, 1);
16180 lhs = gimple_call_lhs (stmt);
16181 gimple *g = gimple_build_assign (lhs, PLUS_EXPR, arg0, arg1);
16182 gimple_set_location (g, gimple_location (stmt));
16183 gsi_replace (gsi, g, true);
16184 return true;
16186 /* Flavors of vec_sub. We deliberately don't expand
16187 P8V_BUILTIN_VSUBUQM. */
16188 case ALTIVEC_BUILTIN_VSUBUBM:
16189 case ALTIVEC_BUILTIN_VSUBUHM:
16190 case ALTIVEC_BUILTIN_VSUBUWM:
16191 case P8V_BUILTIN_VSUBUDM:
16192 case ALTIVEC_BUILTIN_VSUBFP:
16193 case VSX_BUILTIN_XVSUBDP:
16195 arg0 = gimple_call_arg (stmt, 0);
16196 arg1 = gimple_call_arg (stmt, 1);
16197 lhs = gimple_call_lhs (stmt);
16198 gimple *g = gimple_build_assign (lhs, MINUS_EXPR, arg0, arg1);
16199 gimple_set_location (g, gimple_location (stmt));
16200 gsi_replace (gsi, g, true);
16201 return true;
16203 case VSX_BUILTIN_XVMULSP:
16204 case VSX_BUILTIN_XVMULDP:
16206 arg0 = gimple_call_arg (stmt, 0);
16207 arg1 = gimple_call_arg (stmt, 1);
16208 lhs = gimple_call_lhs (stmt);
16209 gimple *g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
16210 gimple_set_location (g, gimple_location (stmt));
16211 gsi_replace (gsi, g, true);
16212 return true;
16214 /* Even element flavors of vec_mul (signed). */
16215 case ALTIVEC_BUILTIN_VMULESB:
16216 case ALTIVEC_BUILTIN_VMULESH:
16217 /* Even element flavors of vec_mul (unsigned). */
16218 case ALTIVEC_BUILTIN_VMULEUB:
16219 case ALTIVEC_BUILTIN_VMULEUH:
16221 arg0 = gimple_call_arg (stmt, 0);
16222 arg1 = gimple_call_arg (stmt, 1);
16223 lhs = gimple_call_lhs (stmt);
16224 gimple *g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
16225 gimple_set_location (g, gimple_location (stmt));
16226 gsi_replace (gsi, g, true);
16227 return true;
16229 /* Odd element flavors of vec_mul (signed). */
16230 case ALTIVEC_BUILTIN_VMULOSB:
16231 case ALTIVEC_BUILTIN_VMULOSH:
16232 /* Odd element flavors of vec_mul (unsigned). */
16233 case ALTIVEC_BUILTIN_VMULOUB:
16234 case ALTIVEC_BUILTIN_VMULOUH:
16236 arg0 = gimple_call_arg (stmt, 0);
16237 arg1 = gimple_call_arg (stmt, 1);
16238 lhs = gimple_call_lhs (stmt);
16239 gimple *g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
16240 gimple_set_location (g, gimple_location (stmt));
16241 gsi_replace (gsi, g, true);
16242 return true;
16244 /* Flavors of vec_div (Integer). */
16245 case VSX_BUILTIN_DIV_V2DI:
16246 case VSX_BUILTIN_UDIV_V2DI:
16248 arg0 = gimple_call_arg (stmt, 0);
16249 arg1 = gimple_call_arg (stmt, 1);
16250 lhs = gimple_call_lhs (stmt);
16251 gimple *g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
16252 gimple_set_location (g, gimple_location (stmt));
16253 gsi_replace (gsi, g, true);
16254 return true;
16256 /* Flavors of vec_div (Float). */
16257 case VSX_BUILTIN_XVDIVSP:
16258 case VSX_BUILTIN_XVDIVDP:
16260 arg0 = gimple_call_arg (stmt, 0);
16261 arg1 = gimple_call_arg (stmt, 1);
16262 lhs = gimple_call_lhs (stmt);
16263 gimple *g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
16264 gimple_set_location (g, gimple_location (stmt));
16265 gsi_replace (gsi, g, true);
16266 return true;
16268 /* Flavors of vec_and. */
16269 case ALTIVEC_BUILTIN_VAND:
16271 arg0 = gimple_call_arg (stmt, 0);
16272 arg1 = gimple_call_arg (stmt, 1);
16273 lhs = gimple_call_lhs (stmt);
16274 gimple *g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
16275 gimple_set_location (g, gimple_location (stmt));
16276 gsi_replace (gsi, g, true);
16277 return true;
16279 /* Flavors of vec_andc. */
16280 case ALTIVEC_BUILTIN_VANDC:
16282 arg0 = gimple_call_arg (stmt, 0);
16283 arg1 = gimple_call_arg (stmt, 1);
16284 lhs = gimple_call_lhs (stmt);
16285 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16286 gimple *g = gimple_build_assign(temp, BIT_NOT_EXPR, arg1);
16287 gimple_set_location (g, gimple_location (stmt));
16288 gsi_insert_before(gsi, g, GSI_SAME_STMT);
16289 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
16290 gimple_set_location (g, gimple_location (stmt));
16291 gsi_replace (gsi, g, true);
16292 return true;
16294 /* Flavors of vec_nand. */
16295 case P8V_BUILTIN_VEC_NAND:
16296 case P8V_BUILTIN_NAND_V16QI:
16297 case P8V_BUILTIN_NAND_V8HI:
16298 case P8V_BUILTIN_NAND_V4SI:
16299 case P8V_BUILTIN_NAND_V4SF:
16300 case P8V_BUILTIN_NAND_V2DF:
16301 case P8V_BUILTIN_NAND_V2DI:
16303 arg0 = gimple_call_arg (stmt, 0);
16304 arg1 = gimple_call_arg (stmt, 1);
16305 lhs = gimple_call_lhs (stmt);
16306 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16307 gimple *g = gimple_build_assign(temp, BIT_AND_EXPR, arg0, arg1);
16308 gimple_set_location (g, gimple_location (stmt));
16309 gsi_insert_before(gsi, g, GSI_SAME_STMT);
16310 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16311 gimple_set_location (g, gimple_location (stmt));
16312 gsi_replace (gsi, g, true);
16313 return true;
16315 /* Flavors of vec_or. */
16316 case ALTIVEC_BUILTIN_VOR:
16318 arg0 = gimple_call_arg (stmt, 0);
16319 arg1 = gimple_call_arg (stmt, 1);
16320 lhs = gimple_call_lhs (stmt);
16321 gimple *g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
16322 gimple_set_location (g, gimple_location (stmt));
16323 gsi_replace (gsi, g, true);
16324 return true;
16326 /* flavors of vec_orc. */
16327 case P8V_BUILTIN_ORC_V16QI:
16328 case P8V_BUILTIN_ORC_V8HI:
16329 case P8V_BUILTIN_ORC_V4SI:
16330 case P8V_BUILTIN_ORC_V4SF:
16331 case P8V_BUILTIN_ORC_V2DF:
16332 case P8V_BUILTIN_ORC_V2DI:
16334 arg0 = gimple_call_arg (stmt, 0);
16335 arg1 = gimple_call_arg (stmt, 1);
16336 lhs = gimple_call_lhs (stmt);
16337 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16338 gimple *g = gimple_build_assign(temp, BIT_NOT_EXPR, arg1);
16339 gimple_set_location (g, gimple_location (stmt));
16340 gsi_insert_before(gsi, g, GSI_SAME_STMT);
16341 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
16342 gimple_set_location (g, gimple_location (stmt));
16343 gsi_replace (gsi, g, true);
16344 return true;
16346 /* Flavors of vec_xor. */
16347 case ALTIVEC_BUILTIN_VXOR:
16349 arg0 = gimple_call_arg (stmt, 0);
16350 arg1 = gimple_call_arg (stmt, 1);
16351 lhs = gimple_call_lhs (stmt);
16352 gimple *g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
16353 gimple_set_location (g, gimple_location (stmt));
16354 gsi_replace (gsi, g, true);
16355 return true;
16357 /* Flavors of vec_nor. */
16358 case ALTIVEC_BUILTIN_VNOR:
16360 arg0 = gimple_call_arg (stmt, 0);
16361 arg1 = gimple_call_arg (stmt, 1);
16362 lhs = gimple_call_lhs (stmt);
16363 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16364 gimple *g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
16365 gimple_set_location (g, gimple_location (stmt));
16366 gsi_insert_before(gsi, g, GSI_SAME_STMT);
16367 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16368 gimple_set_location (g, gimple_location (stmt));
16369 gsi_replace (gsi, g, true);
16370 return true;
16372 /* flavors of vec_abs. */
16373 case ALTIVEC_BUILTIN_ABS_V16QI:
16374 case ALTIVEC_BUILTIN_ABS_V8HI:
16375 case ALTIVEC_BUILTIN_ABS_V4SI:
16376 case ALTIVEC_BUILTIN_ABS_V4SF:
16377 case P8V_BUILTIN_ABS_V2DI:
16378 case VSX_BUILTIN_XVABSDP:
16380 arg0 = gimple_call_arg (stmt, 0);
16381 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
16382 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
16383 return false;
16384 lhs = gimple_call_lhs (stmt);
16385 gimple *g = gimple_build_assign (lhs, ABS_EXPR, arg0);
16386 gimple_set_location (g, gimple_location (stmt));
16387 gsi_replace (gsi, g, true);
16388 return true;
16390 /* flavors of vec_min. */
16391 case VSX_BUILTIN_XVMINDP:
16392 case P8V_BUILTIN_VMINSD:
16393 case P8V_BUILTIN_VMINUD:
16394 case ALTIVEC_BUILTIN_VMINSB:
16395 case ALTIVEC_BUILTIN_VMINSH:
16396 case ALTIVEC_BUILTIN_VMINSW:
16397 case ALTIVEC_BUILTIN_VMINUB:
16398 case ALTIVEC_BUILTIN_VMINUH:
16399 case ALTIVEC_BUILTIN_VMINUW:
16400 case ALTIVEC_BUILTIN_VMINFP:
16402 arg0 = gimple_call_arg (stmt, 0);
16403 arg1 = gimple_call_arg (stmt, 1);
16404 lhs = gimple_call_lhs (stmt);
16405 gimple *g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
16406 gimple_set_location (g, gimple_location (stmt));
16407 gsi_replace (gsi, g, true);
16408 return true;
16410 /* flavors of vec_max. */
16411 case VSX_BUILTIN_XVMAXDP:
16412 case P8V_BUILTIN_VMAXSD:
16413 case P8V_BUILTIN_VMAXUD:
16414 case ALTIVEC_BUILTIN_VMAXSB:
16415 case ALTIVEC_BUILTIN_VMAXSH:
16416 case ALTIVEC_BUILTIN_VMAXSW:
16417 case ALTIVEC_BUILTIN_VMAXUB:
16418 case ALTIVEC_BUILTIN_VMAXUH:
16419 case ALTIVEC_BUILTIN_VMAXUW:
16420 case ALTIVEC_BUILTIN_VMAXFP:
16422 arg0 = gimple_call_arg (stmt, 0);
16423 arg1 = gimple_call_arg (stmt, 1);
16424 lhs = gimple_call_lhs (stmt);
16425 gimple *g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
16426 gimple_set_location (g, gimple_location (stmt));
16427 gsi_replace (gsi, g, true);
16428 return true;
16430 /* Flavors of vec_eqv. */
16431 case P8V_BUILTIN_EQV_V16QI:
16432 case P8V_BUILTIN_EQV_V8HI:
16433 case P8V_BUILTIN_EQV_V4SI:
16434 case P8V_BUILTIN_EQV_V4SF:
16435 case P8V_BUILTIN_EQV_V2DF:
16436 case P8V_BUILTIN_EQV_V2DI:
16438 arg0 = gimple_call_arg (stmt, 0);
16439 arg1 = gimple_call_arg (stmt, 1);
16440 lhs = gimple_call_lhs (stmt);
16441 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16442 gimple *g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
16443 gimple_set_location (g, gimple_location (stmt));
16444 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16445 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16446 gimple_set_location (g, gimple_location (stmt));
16447 gsi_replace (gsi, g, true);
16448 return true;
16450 /* Flavors of vec_rotate_left. */
16451 case ALTIVEC_BUILTIN_VRLB:
16452 case ALTIVEC_BUILTIN_VRLH:
16453 case ALTIVEC_BUILTIN_VRLW:
16454 case P8V_BUILTIN_VRLD:
16456 arg0 = gimple_call_arg (stmt, 0);
16457 arg1 = gimple_call_arg (stmt, 1);
16458 lhs = gimple_call_lhs (stmt);
16459 gimple *g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
16460 gimple_set_location (g, gimple_location (stmt));
16461 gsi_replace (gsi, g, true);
16462 return true;
16464 /* Flavors of vector shift right algebraic.
16465 vec_sra{b,h,w} -> vsra{b,h,w}. */
16466 case ALTIVEC_BUILTIN_VSRAB:
16467 case ALTIVEC_BUILTIN_VSRAH:
16468 case ALTIVEC_BUILTIN_VSRAW:
16469 case P8V_BUILTIN_VSRAD:
16471 arg0 = gimple_call_arg (stmt, 0);
16472 arg1 = gimple_call_arg (stmt, 1);
16473 lhs = gimple_call_lhs (stmt);
16474 gimple *g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, arg1);
16475 gimple_set_location (g, gimple_location (stmt));
16476 gsi_replace (gsi, g, true);
16477 return true;
16479 /* Flavors of vector shift left.
16480 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
16481 case ALTIVEC_BUILTIN_VSLB:
16482 case ALTIVEC_BUILTIN_VSLH:
16483 case ALTIVEC_BUILTIN_VSLW:
16484 case P8V_BUILTIN_VSLD:
16486 arg0 = gimple_call_arg (stmt, 0);
16487 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
16488 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
16489 return false;
16490 arg1 = gimple_call_arg (stmt, 1);
16491 lhs = gimple_call_lhs (stmt);
16492 gimple *g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, arg1);
16493 gimple_set_location (g, gimple_location (stmt));
16494 gsi_replace (gsi, g, true);
16495 return true;
16497 /* Flavors of vector shift right. */
16498 case ALTIVEC_BUILTIN_VSRB:
16499 case ALTIVEC_BUILTIN_VSRH:
16500 case ALTIVEC_BUILTIN_VSRW:
16501 case P8V_BUILTIN_VSRD:
16503 arg0 = gimple_call_arg (stmt, 0);
16504 arg1 = gimple_call_arg (stmt, 1);
16505 lhs = gimple_call_lhs (stmt);
16506 gimple_seq stmts = NULL;
16507 /* Convert arg0 to unsigned. */
16508 tree arg0_unsigned
16509 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
16510 unsigned_type_for (TREE_TYPE (arg0)), arg0);
16511 tree res
16512 = gimple_build (&stmts, RSHIFT_EXPR,
16513 TREE_TYPE (arg0_unsigned), arg0_unsigned, arg1);
16514 /* Convert result back to the lhs type. */
16515 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
16516 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16517 update_call_from_tree (gsi, res);
16518 return true;
16520 default:
16521 if (TARGET_DEBUG_BUILTIN)
16522 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
16523 fn_code, fn_name1, fn_name2);
16524 break;
16527 return false;
16530 /* Expand an expression EXP that calls a built-in function,
16531 with result going to TARGET if that's convenient
16532 (and in mode MODE if that's convenient).
16533 SUBTARGET may be used as the target for computing one of EXP's operands.
16534 IGNORE is nonzero if the value is to be ignored. */
16536 static rtx
16537 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
16538 machine_mode mode ATTRIBUTE_UNUSED,
16539 int ignore ATTRIBUTE_UNUSED)
16541 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16542 enum rs6000_builtins fcode
16543 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
16544 size_t uns_fcode = (size_t)fcode;
16545 const struct builtin_description *d;
16546 size_t i;
16547 rtx ret;
16548 bool success;
16549 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
16550 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
16552 if (TARGET_DEBUG_BUILTIN)
16554 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
16555 const char *name1 = rs6000_builtin_info[uns_fcode].name;
16556 const char *name2 = (icode != CODE_FOR_nothing)
16557 ? get_insn_name ((int) icode)
16558 : "nothing";
16559 const char *name3;
16561 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
16563 default: name3 = "unknown"; break;
16564 case RS6000_BTC_SPECIAL: name3 = "special"; break;
16565 case RS6000_BTC_UNARY: name3 = "unary"; break;
16566 case RS6000_BTC_BINARY: name3 = "binary"; break;
16567 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
16568 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
16569 case RS6000_BTC_ABS: name3 = "abs"; break;
16570 case RS6000_BTC_DST: name3 = "dst"; break;
16574 fprintf (stderr,
16575 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16576 (name1) ? name1 : "---", fcode,
16577 (name2) ? name2 : "---", (int) icode,
16578 name3,
16579 func_valid_p ? "" : ", not valid");
16582 if (!func_valid_p)
16584 rs6000_invalid_builtin (fcode);
16586 /* Given it is invalid, just generate a normal call. */
16587 return expand_call (exp, target, ignore);
16590 switch (fcode)
16592 case RS6000_BUILTIN_RECIP:
16593 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
16595 case RS6000_BUILTIN_RECIPF:
16596 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
16598 case RS6000_BUILTIN_RSQRTF:
16599 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
16601 case RS6000_BUILTIN_RSQRT:
16602 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16604 case POWER7_BUILTIN_BPERMD:
16605 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16606 ? CODE_FOR_bpermd_di
16607 : CODE_FOR_bpermd_si), exp, target);
16609 case RS6000_BUILTIN_GET_TB:
16610 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16611 target);
16613 case RS6000_BUILTIN_MFTB:
16614 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16615 ? CODE_FOR_rs6000_mftb_di
16616 : CODE_FOR_rs6000_mftb_si),
16617 target);
16619 case RS6000_BUILTIN_MFFS:
16620 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16622 case RS6000_BUILTIN_MTFSF:
16623 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16625 case RS6000_BUILTIN_CPU_INIT:
16626 case RS6000_BUILTIN_CPU_IS:
16627 case RS6000_BUILTIN_CPU_SUPPORTS:
16628 return cpu_expand_builtin (fcode, exp, target);
16630 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16631 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16633 int icode = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16634 : (int) CODE_FOR_altivec_lvsl_direct);
16635 machine_mode tmode = insn_data[icode].operand[0].mode;
16636 machine_mode mode = insn_data[icode].operand[1].mode;
16637 tree arg;
16638 rtx op, addr, pat;
16640 gcc_assert (TARGET_ALTIVEC);
16642 arg = CALL_EXPR_ARG (exp, 0);
16643 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16644 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16645 addr = memory_address (mode, op);
16646 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16647 op = addr;
16648 else
16650 /* For the load case need to negate the address. */
16651 op = gen_reg_rtx (GET_MODE (addr));
16652 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16654 op = gen_rtx_MEM (mode, op);
16656 if (target == 0
16657 || GET_MODE (target) != tmode
16658 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16659 target = gen_reg_rtx (tmode);
16661 pat = GEN_FCN (icode) (target, op);
16662 if (!pat)
16663 return 0;
16664 emit_insn (pat);
16666 return target;
16669 case ALTIVEC_BUILTIN_VCFUX:
16670 case ALTIVEC_BUILTIN_VCFSX:
16671 case ALTIVEC_BUILTIN_VCTUXS:
16672 case ALTIVEC_BUILTIN_VCTSXS:
16673 /* FIXME: There's got to be a nicer way to handle this case than
16674 constructing a new CALL_EXPR. */
16675 if (call_expr_nargs (exp) == 1)
16677 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16678 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16680 break;
16682 default:
16683 break;
16686 if (TARGET_ALTIVEC)
16688 ret = altivec_expand_builtin (exp, target, &success);
16690 if (success)
16691 return ret;
16693 if (TARGET_PAIRED_FLOAT)
16695 ret = paired_expand_builtin (exp, target, &success);
16697 if (success)
16698 return ret;
16700 if (TARGET_HTM)
16702 ret = htm_expand_builtin (exp, target, &success);
16704 if (success)
16705 return ret;
16708 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16709 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16710 gcc_assert (attr == RS6000_BTC_UNARY
16711 || attr == RS6000_BTC_BINARY
16712 || attr == RS6000_BTC_TERNARY
16713 || attr == RS6000_BTC_SPECIAL);
16715 /* Handle simple unary operations. */
16716 d = bdesc_1arg;
16717 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16718 if (d->code == fcode)
16719 return rs6000_expand_unop_builtin (d->icode, exp, target);
16721 /* Handle simple binary operations. */
16722 d = bdesc_2arg;
16723 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16724 if (d->code == fcode)
16725 return rs6000_expand_binop_builtin (d->icode, exp, target);
16727 /* Handle simple ternary operations. */
16728 d = bdesc_3arg;
16729 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16730 if (d->code == fcode)
16731 return rs6000_expand_ternop_builtin (d->icode, exp, target);
16733 /* Handle simple no-argument operations. */
16734 d = bdesc_0arg;
16735 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16736 if (d->code == fcode)
16737 return rs6000_expand_zeroop_builtin (d->icode, target);
16739 gcc_unreachable ();
16742 /* Create a builtin vector type with a name. Taking care not to give
16743 the canonical type a name. */
16745 static tree
16746 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
16748 tree result = build_vector_type (elt_type, num_elts);
16750 /* Copy so we don't give the canonical type a name. */
16751 result = build_variant_type_copy (result);
16753 add_builtin_type (name, result);
16755 return result;
16758 static void
16759 rs6000_init_builtins (void)
16761 tree tdecl;
16762 tree ftype;
16763 machine_mode mode;
16765 if (TARGET_DEBUG_BUILTIN)
16766 fprintf (stderr, "rs6000_init_builtins%s%s%s\n",
16767 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
16768 (TARGET_ALTIVEC) ? ", altivec" : "",
16769 (TARGET_VSX) ? ", vsx" : "");
16771 V2SI_type_node = build_vector_type (intSI_type_node, 2);
16772 V2SF_type_node = build_vector_type (float_type_node, 2);
16773 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
16774 : "__vector long long",
16775 intDI_type_node, 2);
16776 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
16777 V4SI_type_node = rs6000_vector_type ("__vector signed int",
16778 intSI_type_node, 4);
16779 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
16780 V8HI_type_node = rs6000_vector_type ("__vector signed short",
16781 intHI_type_node, 8);
16782 V16QI_type_node = rs6000_vector_type ("__vector signed char",
16783 intQI_type_node, 16);
16785 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
16786 unsigned_intQI_type_node, 16);
16787 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
16788 unsigned_intHI_type_node, 8);
16789 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
16790 unsigned_intSI_type_node, 4);
16791 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16792 ? "__vector unsigned long"
16793 : "__vector unsigned long long",
16794 unsigned_intDI_type_node, 2);
16796 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
16797 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
16798 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
16799 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
16801 const_str_type_node
16802 = build_pointer_type (build_qualified_type (char_type_node,
16803 TYPE_QUAL_CONST));
16805 /* We use V1TI mode as a special container to hold __int128_t items that
16806 must live in VSX registers. */
16807 if (intTI_type_node)
16809 V1TI_type_node = rs6000_vector_type ("__vector __int128",
16810 intTI_type_node, 1);
16811 unsigned_V1TI_type_node
16812 = rs6000_vector_type ("__vector unsigned __int128",
16813 unsigned_intTI_type_node, 1);
16816 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16817 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16818 'vector unsigned short'. */
16820 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
16821 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16822 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
16823 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
16824 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16826 long_integer_type_internal_node = long_integer_type_node;
16827 long_unsigned_type_internal_node = long_unsigned_type_node;
16828 long_long_integer_type_internal_node = long_long_integer_type_node;
16829 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
16830 intQI_type_internal_node = intQI_type_node;
16831 uintQI_type_internal_node = unsigned_intQI_type_node;
16832 intHI_type_internal_node = intHI_type_node;
16833 uintHI_type_internal_node = unsigned_intHI_type_node;
16834 intSI_type_internal_node = intSI_type_node;
16835 uintSI_type_internal_node = unsigned_intSI_type_node;
16836 intDI_type_internal_node = intDI_type_node;
16837 uintDI_type_internal_node = unsigned_intDI_type_node;
16838 intTI_type_internal_node = intTI_type_node;
16839 uintTI_type_internal_node = unsigned_intTI_type_node;
16840 float_type_internal_node = float_type_node;
16841 double_type_internal_node = double_type_node;
16842 long_double_type_internal_node = long_double_type_node;
16843 dfloat64_type_internal_node = dfloat64_type_node;
16844 dfloat128_type_internal_node = dfloat128_type_node;
16845 void_type_internal_node = void_type_node;
16847 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16848 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16849 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16850 format that uses a pair of doubles, depending on the switches and
16851 defaults.
16853 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16854 floating point, we need make sure the type is non-zero or else self-test
16855 fails during bootstrap.
16857 We don't register a built-in type for __ibm128 if the type is the same as
16858 long double. Instead we add a #define for __ibm128 in
16859 rs6000_cpu_cpp_builtins to long double.
16861 For IEEE 128-bit floating point, always create the type __ieee128. If the
16862 user used -mfloat128, rs6000-c.c will create a define from __float128 to
16863 __ieee128. */
16864 if (TARGET_LONG_DOUBLE_128 && FLOAT128_IEEE_P (TFmode))
16866 ibm128_float_type_node = make_node (REAL_TYPE);
16867 TYPE_PRECISION (ibm128_float_type_node) = 128;
16868 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
16869 layout_type (ibm128_float_type_node);
16871 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
16872 "__ibm128");
16874 else
16875 ibm128_float_type_node = long_double_type_node;
16877 if (TARGET_FLOAT128_TYPE)
16879 ieee128_float_type_node = float128_type_node;
16880 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
16881 "__ieee128");
16884 else
16885 ieee128_float_type_node = long_double_type_node;
16887 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16888 tree type node. */
16889 builtin_mode_to_type[QImode][0] = integer_type_node;
16890 builtin_mode_to_type[HImode][0] = integer_type_node;
16891 builtin_mode_to_type[SImode][0] = intSI_type_node;
16892 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
16893 builtin_mode_to_type[DImode][0] = intDI_type_node;
16894 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
16895 builtin_mode_to_type[TImode][0] = intTI_type_node;
16896 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
16897 builtin_mode_to_type[SFmode][0] = float_type_node;
16898 builtin_mode_to_type[DFmode][0] = double_type_node;
16899 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
16900 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
16901 builtin_mode_to_type[TFmode][0] = long_double_type_node;
16902 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
16903 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
16904 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
16905 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
16906 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
16907 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
16908 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
16909 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
16910 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
16911 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
16912 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
16913 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
16914 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
16915 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
16916 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
16917 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
16919 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
16920 TYPE_NAME (bool_char_type_node) = tdecl;
16922 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
16923 TYPE_NAME (bool_short_type_node) = tdecl;
16925 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
16926 TYPE_NAME (bool_int_type_node) = tdecl;
16928 tdecl = add_builtin_type ("__pixel", pixel_type_node);
16929 TYPE_NAME (pixel_type_node) = tdecl;
16931 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
16932 bool_char_type_node, 16);
16933 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
16934 bool_short_type_node, 8);
16935 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
16936 bool_int_type_node, 4);
16937 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16938 ? "__vector __bool long"
16939 : "__vector __bool long long",
16940 bool_long_type_node, 2);
16941 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
16942 pixel_type_node, 8);
16944 /* Paired builtins are only available if you build a compiler with the
16945 appropriate options, so only create those builtins with the appropriate
16946 compiler option. Create Altivec and VSX builtins on machines with at
16947 least the general purpose extensions (970 and newer) to allow the use of
16948 the target attribute. */
16949 if (TARGET_PAIRED_FLOAT)
16950 paired_init_builtins ();
16951 if (TARGET_EXTRA_BUILTINS)
16952 altivec_init_builtins ();
16953 if (TARGET_HTM)
16954 htm_init_builtins ();
16956 if (TARGET_EXTRA_BUILTINS || TARGET_PAIRED_FLOAT)
16957 rs6000_common_init_builtins ();
16959 ftype = build_function_type_list (ieee128_float_type_node,
16960 const_str_type_node, NULL_TREE);
16961 def_builtin ("__builtin_nanq", ftype, RS6000_BUILTIN_NANQ);
16962 def_builtin ("__builtin_nansq", ftype, RS6000_BUILTIN_NANSQ);
16964 ftype = build_function_type_list (ieee128_float_type_node, NULL_TREE);
16965 def_builtin ("__builtin_infq", ftype, RS6000_BUILTIN_INFQ);
16966 def_builtin ("__builtin_huge_valq", ftype, RS6000_BUILTIN_HUGE_VALQ);
16968 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
16969 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
16970 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
16972 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
16973 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
16974 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
16976 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
16977 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
16978 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
16980 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
16981 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
16982 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
16984 mode = (TARGET_64BIT) ? DImode : SImode;
16985 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
16986 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
16987 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
16989 ftype = build_function_type_list (unsigned_intDI_type_node,
16990 NULL_TREE);
16991 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
16993 if (TARGET_64BIT)
16994 ftype = build_function_type_list (unsigned_intDI_type_node,
16995 NULL_TREE);
16996 else
16997 ftype = build_function_type_list (unsigned_intSI_type_node,
16998 NULL_TREE);
16999 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
17001 ftype = build_function_type_list (double_type_node, NULL_TREE);
17002 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
17004 ftype = build_function_type_list (void_type_node,
17005 intSI_type_node, double_type_node,
17006 NULL_TREE);
17007 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
17009 ftype = build_function_type_list (void_type_node, NULL_TREE);
17010 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
17012 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
17013 NULL_TREE);
17014 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
17015 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
17017 /* AIX libm provides clog as __clog. */
17018 if (TARGET_XCOFF &&
17019 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
17020 set_user_assembler_name (tdecl, "__clog");
17022 #ifdef SUBTARGET_INIT_BUILTINS
17023 SUBTARGET_INIT_BUILTINS;
17024 #endif
17027 /* Returns the rs6000 builtin decl for CODE. */
17029 static tree
17030 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
17032 HOST_WIDE_INT fnmask;
17034 if (code >= RS6000_BUILTIN_COUNT)
17035 return error_mark_node;
17037 fnmask = rs6000_builtin_info[code].mask;
17038 if ((fnmask & rs6000_builtin_mask) != fnmask)
17040 rs6000_invalid_builtin ((enum rs6000_builtins)code);
17041 return error_mark_node;
17044 return rs6000_builtin_decls[code];
17047 static void
17048 paired_init_builtins (void)
17050 const struct builtin_description *d;
17051 size_t i;
17052 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17054 tree int_ftype_int_v2sf_v2sf
17055 = build_function_type_list (integer_type_node,
17056 integer_type_node,
17057 V2SF_type_node,
17058 V2SF_type_node,
17059 NULL_TREE);
17060 tree pcfloat_type_node =
17061 build_pointer_type (build_qualified_type
17062 (float_type_node, TYPE_QUAL_CONST));
17064 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
17065 long_integer_type_node,
17066 pcfloat_type_node,
17067 NULL_TREE);
17068 tree void_ftype_v2sf_long_pcfloat =
17069 build_function_type_list (void_type_node,
17070 V2SF_type_node,
17071 long_integer_type_node,
17072 pcfloat_type_node,
17073 NULL_TREE);
17076 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
17077 PAIRED_BUILTIN_LX);
17080 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
17081 PAIRED_BUILTIN_STX);
17083 /* Predicates. */
17084 d = bdesc_paired_preds;
17085 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
17087 tree type;
17088 HOST_WIDE_INT mask = d->mask;
17090 if ((mask & builtin_mask) != mask)
17092 if (TARGET_DEBUG_BUILTIN)
17093 fprintf (stderr, "paired_init_builtins, skip predicate %s\n",
17094 d->name);
17095 continue;
17098 /* Cannot define builtin if the instruction is disabled. */
17099 gcc_assert (d->icode != CODE_FOR_nothing);
17101 if (TARGET_DEBUG_BUILTIN)
17102 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
17103 (int)i, get_insn_name (d->icode), (int)d->icode,
17104 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
17106 switch (insn_data[d->icode].operand[1].mode)
17108 case E_V2SFmode:
17109 type = int_ftype_int_v2sf_v2sf;
17110 break;
17111 default:
17112 gcc_unreachable ();
17115 def_builtin (d->name, type, d->code);
17119 static void
17120 altivec_init_builtins (void)
17122 const struct builtin_description *d;
17123 size_t i;
17124 tree ftype;
17125 tree decl;
17126 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17128 tree pvoid_type_node = build_pointer_type (void_type_node);
17130 tree pcvoid_type_node
17131 = build_pointer_type (build_qualified_type (void_type_node,
17132 TYPE_QUAL_CONST));
17134 tree int_ftype_opaque
17135 = build_function_type_list (integer_type_node,
17136 opaque_V4SI_type_node, NULL_TREE);
17137 tree opaque_ftype_opaque
17138 = build_function_type_list (integer_type_node, NULL_TREE);
17139 tree opaque_ftype_opaque_int
17140 = build_function_type_list (opaque_V4SI_type_node,
17141 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
17142 tree opaque_ftype_opaque_opaque_int
17143 = build_function_type_list (opaque_V4SI_type_node,
17144 opaque_V4SI_type_node, opaque_V4SI_type_node,
17145 integer_type_node, NULL_TREE);
17146 tree opaque_ftype_opaque_opaque_opaque
17147 = build_function_type_list (opaque_V4SI_type_node,
17148 opaque_V4SI_type_node, opaque_V4SI_type_node,
17149 opaque_V4SI_type_node, NULL_TREE);
17150 tree opaque_ftype_opaque_opaque
17151 = build_function_type_list (opaque_V4SI_type_node,
17152 opaque_V4SI_type_node, opaque_V4SI_type_node,
17153 NULL_TREE);
17154 tree int_ftype_int_opaque_opaque
17155 = build_function_type_list (integer_type_node,
17156 integer_type_node, opaque_V4SI_type_node,
17157 opaque_V4SI_type_node, NULL_TREE);
17158 tree int_ftype_int_v4si_v4si
17159 = build_function_type_list (integer_type_node,
17160 integer_type_node, V4SI_type_node,
17161 V4SI_type_node, NULL_TREE);
17162 tree int_ftype_int_v2di_v2di
17163 = build_function_type_list (integer_type_node,
17164 integer_type_node, V2DI_type_node,
17165 V2DI_type_node, NULL_TREE);
17166 tree void_ftype_v4si
17167 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
17168 tree v8hi_ftype_void
17169 = build_function_type_list (V8HI_type_node, NULL_TREE);
17170 tree void_ftype_void
17171 = build_function_type_list (void_type_node, NULL_TREE);
17172 tree void_ftype_int
17173 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
17175 tree opaque_ftype_long_pcvoid
17176 = build_function_type_list (opaque_V4SI_type_node,
17177 long_integer_type_node, pcvoid_type_node,
17178 NULL_TREE);
17179 tree v16qi_ftype_long_pcvoid
17180 = build_function_type_list (V16QI_type_node,
17181 long_integer_type_node, pcvoid_type_node,
17182 NULL_TREE);
17183 tree v8hi_ftype_long_pcvoid
17184 = build_function_type_list (V8HI_type_node,
17185 long_integer_type_node, pcvoid_type_node,
17186 NULL_TREE);
17187 tree v4si_ftype_long_pcvoid
17188 = build_function_type_list (V4SI_type_node,
17189 long_integer_type_node, pcvoid_type_node,
17190 NULL_TREE);
17191 tree v4sf_ftype_long_pcvoid
17192 = build_function_type_list (V4SF_type_node,
17193 long_integer_type_node, pcvoid_type_node,
17194 NULL_TREE);
17195 tree v2df_ftype_long_pcvoid
17196 = build_function_type_list (V2DF_type_node,
17197 long_integer_type_node, pcvoid_type_node,
17198 NULL_TREE);
17199 tree v2di_ftype_long_pcvoid
17200 = build_function_type_list (V2DI_type_node,
17201 long_integer_type_node, pcvoid_type_node,
17202 NULL_TREE);
17204 tree void_ftype_opaque_long_pvoid
17205 = build_function_type_list (void_type_node,
17206 opaque_V4SI_type_node, long_integer_type_node,
17207 pvoid_type_node, NULL_TREE);
17208 tree void_ftype_v4si_long_pvoid
17209 = build_function_type_list (void_type_node,
17210 V4SI_type_node, long_integer_type_node,
17211 pvoid_type_node, NULL_TREE);
17212 tree void_ftype_v16qi_long_pvoid
17213 = build_function_type_list (void_type_node,
17214 V16QI_type_node, long_integer_type_node,
17215 pvoid_type_node, NULL_TREE);
17217 tree void_ftype_v16qi_pvoid_long
17218 = build_function_type_list (void_type_node,
17219 V16QI_type_node, pvoid_type_node,
17220 long_integer_type_node, NULL_TREE);
17222 tree void_ftype_v8hi_long_pvoid
17223 = build_function_type_list (void_type_node,
17224 V8HI_type_node, long_integer_type_node,
17225 pvoid_type_node, NULL_TREE);
17226 tree void_ftype_v4sf_long_pvoid
17227 = build_function_type_list (void_type_node,
17228 V4SF_type_node, long_integer_type_node,
17229 pvoid_type_node, NULL_TREE);
17230 tree void_ftype_v2df_long_pvoid
17231 = build_function_type_list (void_type_node,
17232 V2DF_type_node, long_integer_type_node,
17233 pvoid_type_node, NULL_TREE);
17234 tree void_ftype_v2di_long_pvoid
17235 = build_function_type_list (void_type_node,
17236 V2DI_type_node, long_integer_type_node,
17237 pvoid_type_node, NULL_TREE);
17238 tree int_ftype_int_v8hi_v8hi
17239 = build_function_type_list (integer_type_node,
17240 integer_type_node, V8HI_type_node,
17241 V8HI_type_node, NULL_TREE);
17242 tree int_ftype_int_v16qi_v16qi
17243 = build_function_type_list (integer_type_node,
17244 integer_type_node, V16QI_type_node,
17245 V16QI_type_node, NULL_TREE);
17246 tree int_ftype_int_v4sf_v4sf
17247 = build_function_type_list (integer_type_node,
17248 integer_type_node, V4SF_type_node,
17249 V4SF_type_node, NULL_TREE);
17250 tree int_ftype_int_v2df_v2df
17251 = build_function_type_list (integer_type_node,
17252 integer_type_node, V2DF_type_node,
17253 V2DF_type_node, NULL_TREE);
17254 tree v2di_ftype_v2di
17255 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
17256 tree v4si_ftype_v4si
17257 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
17258 tree v8hi_ftype_v8hi
17259 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
17260 tree v16qi_ftype_v16qi
17261 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
17262 tree v4sf_ftype_v4sf
17263 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
17264 tree v2df_ftype_v2df
17265 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
17266 tree void_ftype_pcvoid_int_int
17267 = build_function_type_list (void_type_node,
17268 pcvoid_type_node, integer_type_node,
17269 integer_type_node, NULL_TREE);
17271 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
17272 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
17273 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
17274 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
17275 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
17276 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
17277 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
17278 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
17279 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
17280 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
17281 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
17282 ALTIVEC_BUILTIN_LVXL_V2DF);
17283 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
17284 ALTIVEC_BUILTIN_LVXL_V2DI);
17285 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
17286 ALTIVEC_BUILTIN_LVXL_V4SF);
17287 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
17288 ALTIVEC_BUILTIN_LVXL_V4SI);
17289 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
17290 ALTIVEC_BUILTIN_LVXL_V8HI);
17291 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
17292 ALTIVEC_BUILTIN_LVXL_V16QI);
17293 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
17294 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
17295 ALTIVEC_BUILTIN_LVX_V2DF);
17296 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
17297 ALTIVEC_BUILTIN_LVX_V2DI);
17298 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
17299 ALTIVEC_BUILTIN_LVX_V4SF);
17300 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
17301 ALTIVEC_BUILTIN_LVX_V4SI);
17302 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
17303 ALTIVEC_BUILTIN_LVX_V8HI);
17304 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
17305 ALTIVEC_BUILTIN_LVX_V16QI);
17306 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
17307 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
17308 ALTIVEC_BUILTIN_STVX_V2DF);
17309 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
17310 ALTIVEC_BUILTIN_STVX_V2DI);
17311 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
17312 ALTIVEC_BUILTIN_STVX_V4SF);
17313 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
17314 ALTIVEC_BUILTIN_STVX_V4SI);
17315 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
17316 ALTIVEC_BUILTIN_STVX_V8HI);
17317 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
17318 ALTIVEC_BUILTIN_STVX_V16QI);
17319 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
17320 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
17321 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
17322 ALTIVEC_BUILTIN_STVXL_V2DF);
17323 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
17324 ALTIVEC_BUILTIN_STVXL_V2DI);
17325 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
17326 ALTIVEC_BUILTIN_STVXL_V4SF);
17327 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
17328 ALTIVEC_BUILTIN_STVXL_V4SI);
17329 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
17330 ALTIVEC_BUILTIN_STVXL_V8HI);
17331 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
17332 ALTIVEC_BUILTIN_STVXL_V16QI);
17333 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
17334 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
17335 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
17336 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
17337 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
17338 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
17339 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
17340 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
17341 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
17342 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
17343 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
17344 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
17345 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
17346 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
17347 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
17348 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
17350 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
17351 VSX_BUILTIN_LXVD2X_V2DF);
17352 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
17353 VSX_BUILTIN_LXVD2X_V2DI);
17354 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
17355 VSX_BUILTIN_LXVW4X_V4SF);
17356 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
17357 VSX_BUILTIN_LXVW4X_V4SI);
17358 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
17359 VSX_BUILTIN_LXVW4X_V8HI);
17360 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
17361 VSX_BUILTIN_LXVW4X_V16QI);
17362 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
17363 VSX_BUILTIN_STXVD2X_V2DF);
17364 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
17365 VSX_BUILTIN_STXVD2X_V2DI);
17366 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
17367 VSX_BUILTIN_STXVW4X_V4SF);
17368 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
17369 VSX_BUILTIN_STXVW4X_V4SI);
17370 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
17371 VSX_BUILTIN_STXVW4X_V8HI);
17372 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
17373 VSX_BUILTIN_STXVW4X_V16QI);
17375 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
17376 VSX_BUILTIN_LD_ELEMREV_V2DF);
17377 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
17378 VSX_BUILTIN_LD_ELEMREV_V2DI);
17379 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
17380 VSX_BUILTIN_LD_ELEMREV_V4SF);
17381 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
17382 VSX_BUILTIN_LD_ELEMREV_V4SI);
17383 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
17384 VSX_BUILTIN_ST_ELEMREV_V2DF);
17385 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
17386 VSX_BUILTIN_ST_ELEMREV_V2DI);
17387 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
17388 VSX_BUILTIN_ST_ELEMREV_V4SF);
17389 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
17390 VSX_BUILTIN_ST_ELEMREV_V4SI);
17392 def_builtin ("__builtin_vsx_le_be_v8hi", v8hi_ftype_long_pcvoid,
17393 VSX_BUILTIN_XL_BE_V8HI);
17394 def_builtin ("__builtin_vsx_le_be_v4si", v4si_ftype_long_pcvoid,
17395 VSX_BUILTIN_XL_BE_V4SI);
17396 def_builtin ("__builtin_vsx_le_be_v2di", v2di_ftype_long_pcvoid,
17397 VSX_BUILTIN_XL_BE_V2DI);
17398 def_builtin ("__builtin_vsx_le_be_v4sf", v4sf_ftype_long_pcvoid,
17399 VSX_BUILTIN_XL_BE_V4SF);
17400 def_builtin ("__builtin_vsx_le_be_v2df", v2df_ftype_long_pcvoid,
17401 VSX_BUILTIN_XL_BE_V2DF);
17402 def_builtin ("__builtin_vsx_le_be_v16qi", v16qi_ftype_long_pcvoid,
17403 VSX_BUILTIN_XL_BE_V16QI);
17405 if (TARGET_P9_VECTOR)
17407 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
17408 VSX_BUILTIN_LD_ELEMREV_V8HI);
17409 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
17410 VSX_BUILTIN_LD_ELEMREV_V16QI);
17411 def_builtin ("__builtin_vsx_st_elemrev_v8hi",
17412 void_ftype_v8hi_long_pvoid, VSX_BUILTIN_ST_ELEMREV_V8HI);
17413 def_builtin ("__builtin_vsx_st_elemrev_v16qi",
17414 void_ftype_v16qi_long_pvoid, VSX_BUILTIN_ST_ELEMREV_V16QI);
17416 else
17418 rs6000_builtin_decls[(int) VSX_BUILTIN_LD_ELEMREV_V8HI]
17419 = rs6000_builtin_decls[(int) VSX_BUILTIN_LXVW4X_V8HI];
17420 rs6000_builtin_decls[(int) VSX_BUILTIN_LD_ELEMREV_V16QI]
17421 = rs6000_builtin_decls[(int) VSX_BUILTIN_LXVW4X_V16QI];
17422 rs6000_builtin_decls[(int) VSX_BUILTIN_ST_ELEMREV_V8HI]
17423 = rs6000_builtin_decls[(int) VSX_BUILTIN_STXVW4X_V8HI];
17424 rs6000_builtin_decls[(int) VSX_BUILTIN_ST_ELEMREV_V16QI]
17425 = rs6000_builtin_decls[(int) VSX_BUILTIN_STXVW4X_V16QI];
17428 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
17429 VSX_BUILTIN_VEC_LD);
17430 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
17431 VSX_BUILTIN_VEC_ST);
17432 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
17433 VSX_BUILTIN_VEC_XL);
17434 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
17435 VSX_BUILTIN_VEC_XL_BE);
17436 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
17437 VSX_BUILTIN_VEC_XST);
17439 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
17440 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
17441 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
17443 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
17444 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
17445 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
17446 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
17447 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
17448 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
17449 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
17450 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
17451 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
17452 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
17453 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
17454 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
17456 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
17457 ALTIVEC_BUILTIN_VEC_ADDE);
17458 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
17459 ALTIVEC_BUILTIN_VEC_ADDEC);
17460 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
17461 ALTIVEC_BUILTIN_VEC_CMPNE);
17462 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
17463 ALTIVEC_BUILTIN_VEC_MUL);
17464 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
17465 ALTIVEC_BUILTIN_VEC_SUBE);
17466 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
17467 ALTIVEC_BUILTIN_VEC_SUBEC);
17469 /* Cell builtins. */
17470 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
17471 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
17472 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
17473 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
17475 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
17476 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
17477 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
17478 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
17480 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
17481 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
17482 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
17483 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
17485 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
17486 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
17487 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
17488 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
17490 if (TARGET_P9_VECTOR)
17491 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
17492 P9V_BUILTIN_STXVL);
17494 /* Add the DST variants. */
17495 d = bdesc_dst;
17496 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
17498 HOST_WIDE_INT mask = d->mask;
17500 /* It is expected that these dst built-in functions may have
17501 d->icode equal to CODE_FOR_nothing. */
17502 if ((mask & builtin_mask) != mask)
17504 if (TARGET_DEBUG_BUILTIN)
17505 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
17506 d->name);
17507 continue;
17509 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
17512 /* Initialize the predicates. */
17513 d = bdesc_altivec_preds;
17514 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
17516 machine_mode mode1;
17517 tree type;
17518 HOST_WIDE_INT mask = d->mask;
17520 if ((mask & builtin_mask) != mask)
17522 if (TARGET_DEBUG_BUILTIN)
17523 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
17524 d->name);
17525 continue;
17528 if (rs6000_overloaded_builtin_p (d->code))
17529 mode1 = VOIDmode;
17530 else
17532 /* Cannot define builtin if the instruction is disabled. */
17533 gcc_assert (d->icode != CODE_FOR_nothing);
17534 mode1 = insn_data[d->icode].operand[1].mode;
17537 switch (mode1)
17539 case E_VOIDmode:
17540 type = int_ftype_int_opaque_opaque;
17541 break;
17542 case E_V2DImode:
17543 type = int_ftype_int_v2di_v2di;
17544 break;
17545 case E_V4SImode:
17546 type = int_ftype_int_v4si_v4si;
17547 break;
17548 case E_V8HImode:
17549 type = int_ftype_int_v8hi_v8hi;
17550 break;
17551 case E_V16QImode:
17552 type = int_ftype_int_v16qi_v16qi;
17553 break;
17554 case E_V4SFmode:
17555 type = int_ftype_int_v4sf_v4sf;
17556 break;
17557 case E_V2DFmode:
17558 type = int_ftype_int_v2df_v2df;
17559 break;
17560 default:
17561 gcc_unreachable ();
17564 def_builtin (d->name, type, d->code);
17567 /* Initialize the abs* operators. */
17568 d = bdesc_abs;
17569 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
17571 machine_mode mode0;
17572 tree type;
17573 HOST_WIDE_INT mask = d->mask;
17575 if ((mask & builtin_mask) != mask)
17577 if (TARGET_DEBUG_BUILTIN)
17578 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
17579 d->name);
17580 continue;
17583 /* Cannot define builtin if the instruction is disabled. */
17584 gcc_assert (d->icode != CODE_FOR_nothing);
17585 mode0 = insn_data[d->icode].operand[0].mode;
17587 switch (mode0)
17589 case E_V2DImode:
17590 type = v2di_ftype_v2di;
17591 break;
17592 case E_V4SImode:
17593 type = v4si_ftype_v4si;
17594 break;
17595 case E_V8HImode:
17596 type = v8hi_ftype_v8hi;
17597 break;
17598 case E_V16QImode:
17599 type = v16qi_ftype_v16qi;
17600 break;
17601 case E_V4SFmode:
17602 type = v4sf_ftype_v4sf;
17603 break;
17604 case E_V2DFmode:
17605 type = v2df_ftype_v2df;
17606 break;
17607 default:
17608 gcc_unreachable ();
17611 def_builtin (d->name, type, d->code);
17614 /* Initialize target builtin that implements
17615 targetm.vectorize.builtin_mask_for_load. */
17617 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
17618 v16qi_ftype_long_pcvoid,
17619 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
17620 BUILT_IN_MD, NULL, NULL_TREE);
17621 TREE_READONLY (decl) = 1;
17622 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17623 altivec_builtin_mask_for_load = decl;
17625 /* Access to the vec_init patterns. */
17626 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
17627 integer_type_node, integer_type_node,
17628 integer_type_node, NULL_TREE);
17629 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
17631 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
17632 short_integer_type_node,
17633 short_integer_type_node,
17634 short_integer_type_node,
17635 short_integer_type_node,
17636 short_integer_type_node,
17637 short_integer_type_node,
17638 short_integer_type_node, NULL_TREE);
17639 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17641 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17642 char_type_node, char_type_node,
17643 char_type_node, char_type_node,
17644 char_type_node, char_type_node,
17645 char_type_node, char_type_node,
17646 char_type_node, char_type_node,
17647 char_type_node, char_type_node,
17648 char_type_node, char_type_node,
17649 char_type_node, NULL_TREE);
17650 def_builtin ("__builtin_vec_init_v16qi", ftype,
17651 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17653 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17654 float_type_node, float_type_node,
17655 float_type_node, NULL_TREE);
17656 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17658 /* VSX builtins. */
17659 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17660 double_type_node, NULL_TREE);
17661 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17663 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17664 intDI_type_node, NULL_TREE);
17665 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17667 /* Access to the vec_set patterns. */
17668 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17669 intSI_type_node,
17670 integer_type_node, NULL_TREE);
17671 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17673 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17674 intHI_type_node,
17675 integer_type_node, NULL_TREE);
17676 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17678 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17679 intQI_type_node,
17680 integer_type_node, NULL_TREE);
17681 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17683 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17684 float_type_node,
17685 integer_type_node, NULL_TREE);
17686 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17688 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17689 double_type_node,
17690 integer_type_node, NULL_TREE);
17691 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17693 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17694 intDI_type_node,
17695 integer_type_node, NULL_TREE);
17696 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17698 /* Access to the vec_extract patterns. */
17699 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17700 integer_type_node, NULL_TREE);
17701 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17703 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17704 integer_type_node, NULL_TREE);
17705 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17707 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17708 integer_type_node, NULL_TREE);
17709 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17711 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17712 integer_type_node, NULL_TREE);
17713 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17715 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17716 integer_type_node, NULL_TREE);
17717 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17719 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17720 integer_type_node, NULL_TREE);
17721 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17724 if (V1TI_type_node)
17726 tree v1ti_ftype_long_pcvoid
17727 = build_function_type_list (V1TI_type_node,
17728 long_integer_type_node, pcvoid_type_node,
17729 NULL_TREE);
17730 tree void_ftype_v1ti_long_pvoid
17731 = build_function_type_list (void_type_node,
17732 V1TI_type_node, long_integer_type_node,
17733 pvoid_type_node, NULL_TREE);
17734 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17735 VSX_BUILTIN_LXVD2X_V1TI);
17736 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17737 VSX_BUILTIN_STXVD2X_V1TI);
17738 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17739 NULL_TREE, NULL_TREE);
17740 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17741 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17742 intTI_type_node,
17743 integer_type_node, NULL_TREE);
17744 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17745 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17746 integer_type_node, NULL_TREE);
17747 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17752 static void
17753 htm_init_builtins (void)
17755 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17756 const struct builtin_description *d;
17757 size_t i;
17759 d = bdesc_htm;
17760 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17762 tree op[MAX_HTM_OPERANDS], type;
17763 HOST_WIDE_INT mask = d->mask;
17764 unsigned attr = rs6000_builtin_info[d->code].attr;
17765 bool void_func = (attr & RS6000_BTC_VOID);
17766 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17767 int nopnds = 0;
17768 tree gpr_type_node;
17769 tree rettype;
17770 tree argtype;
17772 /* It is expected that these htm built-in functions may have
17773 d->icode equal to CODE_FOR_nothing. */
17775 if (TARGET_32BIT && TARGET_POWERPC64)
17776 gpr_type_node = long_long_unsigned_type_node;
17777 else
17778 gpr_type_node = long_unsigned_type_node;
17780 if (attr & RS6000_BTC_SPR)
17782 rettype = gpr_type_node;
17783 argtype = gpr_type_node;
17785 else if (d->code == HTM_BUILTIN_TABORTDC
17786 || d->code == HTM_BUILTIN_TABORTDCI)
17788 rettype = unsigned_type_node;
17789 argtype = gpr_type_node;
17791 else
17793 rettype = unsigned_type_node;
17794 argtype = unsigned_type_node;
17797 if ((mask & builtin_mask) != mask)
17799 if (TARGET_DEBUG_BUILTIN)
17800 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
17801 continue;
17804 if (d->name == 0)
17806 if (TARGET_DEBUG_BUILTIN)
17807 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17808 (long unsigned) i);
17809 continue;
17812 op[nopnds++] = (void_func) ? void_type_node : rettype;
17814 if (attr_args == RS6000_BTC_UNARY)
17815 op[nopnds++] = argtype;
17816 else if (attr_args == RS6000_BTC_BINARY)
17818 op[nopnds++] = argtype;
17819 op[nopnds++] = argtype;
17821 else if (attr_args == RS6000_BTC_TERNARY)
17823 op[nopnds++] = argtype;
17824 op[nopnds++] = argtype;
17825 op[nopnds++] = argtype;
17828 switch (nopnds)
17830 case 1:
17831 type = build_function_type_list (op[0], NULL_TREE);
17832 break;
17833 case 2:
17834 type = build_function_type_list (op[0], op[1], NULL_TREE);
17835 break;
17836 case 3:
17837 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17838 break;
17839 case 4:
17840 type = build_function_type_list (op[0], op[1], op[2], op[3],
17841 NULL_TREE);
17842 break;
17843 default:
17844 gcc_unreachable ();
17847 def_builtin (d->name, type, d->code);
17851 /* Hash function for builtin functions with up to 3 arguments and a return
17852 type. */
17853 hashval_t
17854 builtin_hasher::hash (builtin_hash_struct *bh)
17856 unsigned ret = 0;
17857 int i;
17859 for (i = 0; i < 4; i++)
17861 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17862 ret = (ret * 2) + bh->uns_p[i];
17865 return ret;
17868 /* Compare builtin hash entries H1 and H2 for equivalence. */
17869 bool
17870 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17872 return ((p1->mode[0] == p2->mode[0])
17873 && (p1->mode[1] == p2->mode[1])
17874 && (p1->mode[2] == p2->mode[2])
17875 && (p1->mode[3] == p2->mode[3])
17876 && (p1->uns_p[0] == p2->uns_p[0])
17877 && (p1->uns_p[1] == p2->uns_p[1])
17878 && (p1->uns_p[2] == p2->uns_p[2])
17879 && (p1->uns_p[3] == p2->uns_p[3]));
17882 /* Map types for builtin functions with an explicit return type and up to 3
17883 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17884 of the argument. */
17885 static tree
17886 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17887 machine_mode mode_arg1, machine_mode mode_arg2,
17888 enum rs6000_builtins builtin, const char *name)
17890 struct builtin_hash_struct h;
17891 struct builtin_hash_struct *h2;
17892 int num_args = 3;
17893 int i;
17894 tree ret_type = NULL_TREE;
17895 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17897 /* Create builtin_hash_table. */
17898 if (builtin_hash_table == NULL)
17899 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
17901 h.type = NULL_TREE;
17902 h.mode[0] = mode_ret;
17903 h.mode[1] = mode_arg0;
17904 h.mode[2] = mode_arg1;
17905 h.mode[3] = mode_arg2;
17906 h.uns_p[0] = 0;
17907 h.uns_p[1] = 0;
17908 h.uns_p[2] = 0;
17909 h.uns_p[3] = 0;
17911 /* If the builtin is a type that produces unsigned results or takes unsigned
17912 arguments, and it is returned as a decl for the vectorizer (such as
17913 widening multiplies, permute), make sure the arguments and return value
17914 are type correct. */
17915 switch (builtin)
17917 /* unsigned 1 argument functions. */
17918 case CRYPTO_BUILTIN_VSBOX:
17919 case P8V_BUILTIN_VGBBD:
17920 case MISC_BUILTIN_CDTBCD:
17921 case MISC_BUILTIN_CBCDTD:
17922 h.uns_p[0] = 1;
17923 h.uns_p[1] = 1;
17924 break;
17926 /* unsigned 2 argument functions. */
17927 case ALTIVEC_BUILTIN_VMULEUB:
17928 case ALTIVEC_BUILTIN_VMULEUH:
17929 case ALTIVEC_BUILTIN_VMULEUW:
17930 case ALTIVEC_BUILTIN_VMULOUB:
17931 case ALTIVEC_BUILTIN_VMULOUH:
17932 case ALTIVEC_BUILTIN_VMULOUW:
17933 case CRYPTO_BUILTIN_VCIPHER:
17934 case CRYPTO_BUILTIN_VCIPHERLAST:
17935 case CRYPTO_BUILTIN_VNCIPHER:
17936 case CRYPTO_BUILTIN_VNCIPHERLAST:
17937 case CRYPTO_BUILTIN_VPMSUMB:
17938 case CRYPTO_BUILTIN_VPMSUMH:
17939 case CRYPTO_BUILTIN_VPMSUMW:
17940 case CRYPTO_BUILTIN_VPMSUMD:
17941 case CRYPTO_BUILTIN_VPMSUM:
17942 case MISC_BUILTIN_ADDG6S:
17943 case MISC_BUILTIN_DIVWEU:
17944 case MISC_BUILTIN_DIVWEUO:
17945 case MISC_BUILTIN_DIVDEU:
17946 case MISC_BUILTIN_DIVDEUO:
17947 case VSX_BUILTIN_UDIV_V2DI:
17948 case ALTIVEC_BUILTIN_VMAXUB:
17949 case ALTIVEC_BUILTIN_VMINUB:
17950 case ALTIVEC_BUILTIN_VMAXUH:
17951 case ALTIVEC_BUILTIN_VMINUH:
17952 case ALTIVEC_BUILTIN_VMAXUW:
17953 case ALTIVEC_BUILTIN_VMINUW:
17954 case P8V_BUILTIN_VMAXUD:
17955 case P8V_BUILTIN_VMINUD:
17956 h.uns_p[0] = 1;
17957 h.uns_p[1] = 1;
17958 h.uns_p[2] = 1;
17959 break;
17961 /* unsigned 3 argument functions. */
17962 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
17963 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
17964 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
17965 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
17966 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
17967 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
17968 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
17969 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
17970 case VSX_BUILTIN_VPERM_16QI_UNS:
17971 case VSX_BUILTIN_VPERM_8HI_UNS:
17972 case VSX_BUILTIN_VPERM_4SI_UNS:
17973 case VSX_BUILTIN_VPERM_2DI_UNS:
17974 case VSX_BUILTIN_XXSEL_16QI_UNS:
17975 case VSX_BUILTIN_XXSEL_8HI_UNS:
17976 case VSX_BUILTIN_XXSEL_4SI_UNS:
17977 case VSX_BUILTIN_XXSEL_2DI_UNS:
17978 case CRYPTO_BUILTIN_VPERMXOR:
17979 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
17980 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
17981 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
17982 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
17983 case CRYPTO_BUILTIN_VSHASIGMAW:
17984 case CRYPTO_BUILTIN_VSHASIGMAD:
17985 case CRYPTO_BUILTIN_VSHASIGMA:
17986 h.uns_p[0] = 1;
17987 h.uns_p[1] = 1;
17988 h.uns_p[2] = 1;
17989 h.uns_p[3] = 1;
17990 break;
17992 /* signed permute functions with unsigned char mask. */
17993 case ALTIVEC_BUILTIN_VPERM_16QI:
17994 case ALTIVEC_BUILTIN_VPERM_8HI:
17995 case ALTIVEC_BUILTIN_VPERM_4SI:
17996 case ALTIVEC_BUILTIN_VPERM_4SF:
17997 case ALTIVEC_BUILTIN_VPERM_2DI:
17998 case ALTIVEC_BUILTIN_VPERM_2DF:
17999 case VSX_BUILTIN_VPERM_16QI:
18000 case VSX_BUILTIN_VPERM_8HI:
18001 case VSX_BUILTIN_VPERM_4SI:
18002 case VSX_BUILTIN_VPERM_4SF:
18003 case VSX_BUILTIN_VPERM_2DI:
18004 case VSX_BUILTIN_VPERM_2DF:
18005 h.uns_p[3] = 1;
18006 break;
18008 /* unsigned args, signed return. */
18009 case VSX_BUILTIN_XVCVUXDSP:
18010 case VSX_BUILTIN_XVCVUXDDP_UNS:
18011 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
18012 h.uns_p[1] = 1;
18013 break;
18015 /* signed args, unsigned return. */
18016 case VSX_BUILTIN_XVCVDPUXDS_UNS:
18017 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
18018 case MISC_BUILTIN_UNPACK_TD:
18019 case MISC_BUILTIN_UNPACK_V1TI:
18020 h.uns_p[0] = 1;
18021 break;
18023 /* unsigned arguments for 128-bit pack instructions. */
18024 case MISC_BUILTIN_PACK_TD:
18025 case MISC_BUILTIN_PACK_V1TI:
18026 h.uns_p[1] = 1;
18027 h.uns_p[2] = 1;
18028 break;
18030 /* unsigned second arguments (vector shift right). */
18031 case ALTIVEC_BUILTIN_VSRB:
18032 case ALTIVEC_BUILTIN_VSRH:
18033 case ALTIVEC_BUILTIN_VSRW:
18034 case P8V_BUILTIN_VSRD:
18035 h.uns_p[2] = 1;
18036 break;
18038 default:
18039 break;
18042 /* Figure out how many args are present. */
18043 while (num_args > 0 && h.mode[num_args] == VOIDmode)
18044 num_args--;
18046 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
18047 if (!ret_type && h.uns_p[0])
18048 ret_type = builtin_mode_to_type[h.mode[0]][0];
18050 if (!ret_type)
18051 fatal_error (input_location,
18052 "internal error: builtin function %qs had an unexpected "
18053 "return type %qs", name, GET_MODE_NAME (h.mode[0]));
18055 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
18056 arg_type[i] = NULL_TREE;
18058 for (i = 0; i < num_args; i++)
18060 int m = (int) h.mode[i+1];
18061 int uns_p = h.uns_p[i+1];
18063 arg_type[i] = builtin_mode_to_type[m][uns_p];
18064 if (!arg_type[i] && uns_p)
18065 arg_type[i] = builtin_mode_to_type[m][0];
18067 if (!arg_type[i])
18068 fatal_error (input_location,
18069 "internal error: builtin function %qs, argument %d "
18070 "had unexpected argument type %qs", name, i,
18071 GET_MODE_NAME (m));
18074 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
18075 if (*found == NULL)
18077 h2 = ggc_alloc<builtin_hash_struct> ();
18078 *h2 = h;
18079 *found = h2;
18081 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
18082 arg_type[2], NULL_TREE);
18085 return (*found)->type;
18088 static void
18089 rs6000_common_init_builtins (void)
18091 const struct builtin_description *d;
18092 size_t i;
18094 tree opaque_ftype_opaque = NULL_TREE;
18095 tree opaque_ftype_opaque_opaque = NULL_TREE;
18096 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
18097 tree v2si_ftype = NULL_TREE;
18098 tree v2si_ftype_qi = NULL_TREE;
18099 tree v2si_ftype_v2si_qi = NULL_TREE;
18100 tree v2si_ftype_int_qi = NULL_TREE;
18101 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
18103 if (!TARGET_PAIRED_FLOAT)
18105 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
18106 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
18109 /* Paired builtins are only available if you build a compiler with the
18110 appropriate options, so only create those builtins with the appropriate
18111 compiler option. Create Altivec and VSX builtins on machines with at
18112 least the general purpose extensions (970 and newer) to allow the use of
18113 the target attribute.. */
18115 if (TARGET_EXTRA_BUILTINS)
18116 builtin_mask |= RS6000_BTM_COMMON;
18118 /* Add the ternary operators. */
18119 d = bdesc_3arg;
18120 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
18122 tree type;
18123 HOST_WIDE_INT mask = d->mask;
18125 if ((mask & builtin_mask) != mask)
18127 if (TARGET_DEBUG_BUILTIN)
18128 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
18129 continue;
18132 if (rs6000_overloaded_builtin_p (d->code))
18134 if (! (type = opaque_ftype_opaque_opaque_opaque))
18135 type = opaque_ftype_opaque_opaque_opaque
18136 = build_function_type_list (opaque_V4SI_type_node,
18137 opaque_V4SI_type_node,
18138 opaque_V4SI_type_node,
18139 opaque_V4SI_type_node,
18140 NULL_TREE);
18142 else
18144 enum insn_code icode = d->icode;
18145 if (d->name == 0)
18147 if (TARGET_DEBUG_BUILTIN)
18148 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
18149 (long unsigned)i);
18151 continue;
18154 if (icode == CODE_FOR_nothing)
18156 if (TARGET_DEBUG_BUILTIN)
18157 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
18158 d->name);
18160 continue;
18163 type = builtin_function_type (insn_data[icode].operand[0].mode,
18164 insn_data[icode].operand[1].mode,
18165 insn_data[icode].operand[2].mode,
18166 insn_data[icode].operand[3].mode,
18167 d->code, d->name);
18170 def_builtin (d->name, type, d->code);
18173 /* Add the binary operators. */
18174 d = bdesc_2arg;
18175 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
18177 machine_mode mode0, mode1, mode2;
18178 tree type;
18179 HOST_WIDE_INT mask = d->mask;
18181 if ((mask & builtin_mask) != mask)
18183 if (TARGET_DEBUG_BUILTIN)
18184 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
18185 continue;
18188 if (rs6000_overloaded_builtin_p (d->code))
18190 if (! (type = opaque_ftype_opaque_opaque))
18191 type = opaque_ftype_opaque_opaque
18192 = build_function_type_list (opaque_V4SI_type_node,
18193 opaque_V4SI_type_node,
18194 opaque_V4SI_type_node,
18195 NULL_TREE);
18197 else
18199 enum insn_code icode = d->icode;
18200 if (d->name == 0)
18202 if (TARGET_DEBUG_BUILTIN)
18203 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
18204 (long unsigned)i);
18206 continue;
18209 if (icode == CODE_FOR_nothing)
18211 if (TARGET_DEBUG_BUILTIN)
18212 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
18213 d->name);
18215 continue;
18218 mode0 = insn_data[icode].operand[0].mode;
18219 mode1 = insn_data[icode].operand[1].mode;
18220 mode2 = insn_data[icode].operand[2].mode;
18222 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
18224 if (! (type = v2si_ftype_v2si_qi))
18225 type = v2si_ftype_v2si_qi
18226 = build_function_type_list (opaque_V2SI_type_node,
18227 opaque_V2SI_type_node,
18228 char_type_node,
18229 NULL_TREE);
18232 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
18233 && mode2 == QImode)
18235 if (! (type = v2si_ftype_int_qi))
18236 type = v2si_ftype_int_qi
18237 = build_function_type_list (opaque_V2SI_type_node,
18238 integer_type_node,
18239 char_type_node,
18240 NULL_TREE);
18243 else
18244 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
18245 d->code, d->name);
18248 def_builtin (d->name, type, d->code);
18251 /* Add the simple unary operators. */
18252 d = bdesc_1arg;
18253 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
18255 machine_mode mode0, mode1;
18256 tree type;
18257 HOST_WIDE_INT mask = d->mask;
18259 if ((mask & builtin_mask) != mask)
18261 if (TARGET_DEBUG_BUILTIN)
18262 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
18263 continue;
18266 if (rs6000_overloaded_builtin_p (d->code))
18268 if (! (type = opaque_ftype_opaque))
18269 type = opaque_ftype_opaque
18270 = build_function_type_list (opaque_V4SI_type_node,
18271 opaque_V4SI_type_node,
18272 NULL_TREE);
18274 else
18276 enum insn_code icode = d->icode;
18277 if (d->name == 0)
18279 if (TARGET_DEBUG_BUILTIN)
18280 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
18281 (long unsigned)i);
18283 continue;
18286 if (icode == CODE_FOR_nothing)
18288 if (TARGET_DEBUG_BUILTIN)
18289 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
18290 d->name);
18292 continue;
18295 mode0 = insn_data[icode].operand[0].mode;
18296 mode1 = insn_data[icode].operand[1].mode;
18298 if (mode0 == V2SImode && mode1 == QImode)
18300 if (! (type = v2si_ftype_qi))
18301 type = v2si_ftype_qi
18302 = build_function_type_list (opaque_V2SI_type_node,
18303 char_type_node,
18304 NULL_TREE);
18307 else
18308 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
18309 d->code, d->name);
18312 def_builtin (d->name, type, d->code);
18315 /* Add the simple no-argument operators. */
18316 d = bdesc_0arg;
18317 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
18319 machine_mode mode0;
18320 tree type;
18321 HOST_WIDE_INT mask = d->mask;
18323 if ((mask & builtin_mask) != mask)
18325 if (TARGET_DEBUG_BUILTIN)
18326 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
18327 continue;
18329 if (rs6000_overloaded_builtin_p (d->code))
18331 if (!opaque_ftype_opaque)
18332 opaque_ftype_opaque
18333 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
18334 type = opaque_ftype_opaque;
18336 else
18338 enum insn_code icode = d->icode;
18339 if (d->name == 0)
18341 if (TARGET_DEBUG_BUILTIN)
18342 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
18343 (long unsigned) i);
18344 continue;
18346 if (icode == CODE_FOR_nothing)
18348 if (TARGET_DEBUG_BUILTIN)
18349 fprintf (stderr,
18350 "rs6000_builtin, skip no-argument %s (no code)\n",
18351 d->name);
18352 continue;
18354 mode0 = insn_data[icode].operand[0].mode;
18355 if (mode0 == V2SImode)
18357 /* code for paired single */
18358 if (! (type = v2si_ftype))
18360 v2si_ftype
18361 = build_function_type_list (opaque_V2SI_type_node,
18362 NULL_TREE);
18363 type = v2si_ftype;
18366 else
18367 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
18368 d->code, d->name);
18370 def_builtin (d->name, type, d->code);
18374 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
18375 static void
18376 init_float128_ibm (machine_mode mode)
18378 if (!TARGET_XL_COMPAT)
18380 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
18381 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
18382 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
18383 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
18385 if (!TARGET_HARD_FLOAT)
18387 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
18388 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
18389 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
18390 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
18391 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
18392 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
18393 set_optab_libfunc (le_optab, mode, "__gcc_qle");
18394 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
18396 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
18397 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
18398 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
18399 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
18400 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
18401 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
18402 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
18403 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
18406 else
18408 set_optab_libfunc (add_optab, mode, "_xlqadd");
18409 set_optab_libfunc (sub_optab, mode, "_xlqsub");
18410 set_optab_libfunc (smul_optab, mode, "_xlqmul");
18411 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
18414 /* Add various conversions for IFmode to use the traditional TFmode
18415 names. */
18416 if (mode == IFmode)
18418 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf2");
18419 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf2");
18420 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctftd2");
18421 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd2");
18422 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd2");
18423 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdtf2");
18425 if (TARGET_POWERPC64)
18427 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
18428 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
18429 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
18430 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
18435 /* Set up IEEE 128-bit floating point routines. Use different names if the
18436 arguments can be passed in a vector register. The historical PowerPC
18437 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
18438 continue to use that if we aren't using vector registers to pass IEEE
18439 128-bit floating point. */
18441 static void
18442 init_float128_ieee (machine_mode mode)
18444 if (FLOAT128_VECTOR_P (mode))
18446 set_optab_libfunc (add_optab, mode, "__addkf3");
18447 set_optab_libfunc (sub_optab, mode, "__subkf3");
18448 set_optab_libfunc (neg_optab, mode, "__negkf2");
18449 set_optab_libfunc (smul_optab, mode, "__mulkf3");
18450 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
18451 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
18452 set_optab_libfunc (abs_optab, mode, "__abstkf2");
18454 set_optab_libfunc (eq_optab, mode, "__eqkf2");
18455 set_optab_libfunc (ne_optab, mode, "__nekf2");
18456 set_optab_libfunc (gt_optab, mode, "__gtkf2");
18457 set_optab_libfunc (ge_optab, mode, "__gekf2");
18458 set_optab_libfunc (lt_optab, mode, "__ltkf2");
18459 set_optab_libfunc (le_optab, mode, "__lekf2");
18460 set_optab_libfunc (unord_optab, mode, "__unordkf2");
18462 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
18463 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
18464 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
18465 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
18467 set_conv_libfunc (sext_optab, mode, IFmode, "__extendtfkf2");
18468 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18469 set_conv_libfunc (sext_optab, mode, TFmode, "__extendtfkf2");
18471 set_conv_libfunc (trunc_optab, IFmode, mode, "__trunckftf2");
18472 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18473 set_conv_libfunc (trunc_optab, TFmode, mode, "__trunckftf2");
18475 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf2");
18476 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf2");
18477 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunckftd2");
18478 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd2");
18479 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd2");
18480 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdkf2");
18482 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
18483 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
18484 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
18485 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
18487 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
18488 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
18489 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
18490 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
18492 if (TARGET_POWERPC64)
18494 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
18495 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
18496 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
18497 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
18501 else
18503 set_optab_libfunc (add_optab, mode, "_q_add");
18504 set_optab_libfunc (sub_optab, mode, "_q_sub");
18505 set_optab_libfunc (neg_optab, mode, "_q_neg");
18506 set_optab_libfunc (smul_optab, mode, "_q_mul");
18507 set_optab_libfunc (sdiv_optab, mode, "_q_div");
18508 if (TARGET_PPC_GPOPT)
18509 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
18511 set_optab_libfunc (eq_optab, mode, "_q_feq");
18512 set_optab_libfunc (ne_optab, mode, "_q_fne");
18513 set_optab_libfunc (gt_optab, mode, "_q_fgt");
18514 set_optab_libfunc (ge_optab, mode, "_q_fge");
18515 set_optab_libfunc (lt_optab, mode, "_q_flt");
18516 set_optab_libfunc (le_optab, mode, "_q_fle");
18518 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
18519 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
18520 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
18521 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
18522 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
18523 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
18524 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
18525 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
18529 static void
18530 rs6000_init_libfuncs (void)
18532 /* __float128 support. */
18533 if (TARGET_FLOAT128_TYPE)
18535 init_float128_ibm (IFmode);
18536 init_float128_ieee (KFmode);
18539 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18540 if (TARGET_LONG_DOUBLE_128)
18542 if (!TARGET_IEEEQUAD)
18543 init_float128_ibm (TFmode);
18545 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18546 else
18547 init_float128_ieee (TFmode);
18551 /* Emit a potentially record-form instruction, setting DST from SRC.
18552 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18553 signed comparison of DST with zero. If DOT is 1, the generated RTL
18554 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18555 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18556 a separate COMPARE. */
18558 void
18559 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
18561 if (dot == 0)
18563 emit_move_insn (dst, src);
18564 return;
18567 if (cc_reg_not_cr0_operand (ccreg, CCmode))
18569 emit_move_insn (dst, src);
18570 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
18571 return;
18574 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
18575 if (dot == 1)
18577 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
18578 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
18580 else
18582 rtx set = gen_rtx_SET (dst, src);
18583 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
18588 /* A validation routine: say whether CODE, a condition code, and MODE
18589 match. The other alternatives either don't make sense or should
18590 never be generated. */
18592 void
18593 validate_condition_mode (enum rtx_code code, machine_mode mode)
18595 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
18596 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
18597 && GET_MODE_CLASS (mode) == MODE_CC);
18599 /* These don't make sense. */
18600 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
18601 || mode != CCUNSmode);
18603 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
18604 || mode == CCUNSmode);
18606 gcc_assert (mode == CCFPmode
18607 || (code != ORDERED && code != UNORDERED
18608 && code != UNEQ && code != LTGT
18609 && code != UNGT && code != UNLT
18610 && code != UNGE && code != UNLE));
18612 /* These should never be generated except for
18613 flag_finite_math_only. */
18614 gcc_assert (mode != CCFPmode
18615 || flag_finite_math_only
18616 || (code != LE && code != GE
18617 && code != UNEQ && code != LTGT
18618 && code != UNGT && code != UNLT));
18620 /* These are invalid; the information is not there. */
18621 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18625 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18626 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18627 not zero, store there the bit offset (counted from the right) where
18628 the single stretch of 1 bits begins; and similarly for B, the bit
18629 offset where it ends. */
18631 bool
18632 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18634 unsigned HOST_WIDE_INT val = INTVAL (mask);
18635 unsigned HOST_WIDE_INT bit;
18636 int nb, ne;
18637 int n = GET_MODE_PRECISION (mode);
18639 if (mode != DImode && mode != SImode)
18640 return false;
18642 if (INTVAL (mask) >= 0)
18644 bit = val & -val;
18645 ne = exact_log2 (bit);
18646 nb = exact_log2 (val + bit);
18648 else if (val + 1 == 0)
18650 nb = n;
18651 ne = 0;
18653 else if (val & 1)
18655 val = ~val;
18656 bit = val & -val;
18657 nb = exact_log2 (bit);
18658 ne = exact_log2 (val + bit);
18660 else
18662 bit = val & -val;
18663 ne = exact_log2 (bit);
18664 if (val + bit == 0)
18665 nb = n;
18666 else
18667 nb = 0;
18670 nb--;
18672 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18673 return false;
18675 if (b)
18676 *b = nb;
18677 if (e)
18678 *e = ne;
18680 return true;
18683 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18684 or rldicr instruction, to implement an AND with it in mode MODE. */
18686 bool
18687 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18689 int nb, ne;
18691 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18692 return false;
18694 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18695 does not wrap. */
18696 if (mode == DImode)
18697 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18699 /* For SImode, rlwinm can do everything. */
18700 if (mode == SImode)
18701 return (nb < 32 && ne < 32);
18703 return false;
18706 /* Return the instruction template for an AND with mask in mode MODE, with
18707 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18709 const char *
18710 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18712 int nb, ne;
18714 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18715 gcc_unreachable ();
18717 if (mode == DImode && ne == 0)
18719 operands[3] = GEN_INT (63 - nb);
18720 if (dot)
18721 return "rldicl. %0,%1,0,%3";
18722 return "rldicl %0,%1,0,%3";
18725 if (mode == DImode && nb == 63)
18727 operands[3] = GEN_INT (63 - ne);
18728 if (dot)
18729 return "rldicr. %0,%1,0,%3";
18730 return "rldicr %0,%1,0,%3";
18733 if (nb < 32 && ne < 32)
18735 operands[3] = GEN_INT (31 - nb);
18736 operands[4] = GEN_INT (31 - ne);
18737 if (dot)
18738 return "rlwinm. %0,%1,0,%3,%4";
18739 return "rlwinm %0,%1,0,%3,%4";
18742 gcc_unreachable ();
18745 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18746 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18747 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18749 bool
18750 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18752 int nb, ne;
18754 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18755 return false;
18757 int n = GET_MODE_PRECISION (mode);
18758 int sh = -1;
18760 if (CONST_INT_P (XEXP (shift, 1)))
18762 sh = INTVAL (XEXP (shift, 1));
18763 if (sh < 0 || sh >= n)
18764 return false;
18767 rtx_code code = GET_CODE (shift);
18769 /* Convert any shift by 0 to a rotate, to simplify below code. */
18770 if (sh == 0)
18771 code = ROTATE;
18773 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18774 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18775 code = ASHIFT;
18776 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18778 code = LSHIFTRT;
18779 sh = n - sh;
18782 /* DImode rotates need rld*. */
18783 if (mode == DImode && code == ROTATE)
18784 return (nb == 63 || ne == 0 || ne == sh);
18786 /* SImode rotates need rlw*. */
18787 if (mode == SImode && code == ROTATE)
18788 return (nb < 32 && ne < 32 && sh < 32);
18790 /* Wrap-around masks are only okay for rotates. */
18791 if (ne > nb)
18792 return false;
18794 /* Variable shifts are only okay for rotates. */
18795 if (sh < 0)
18796 return false;
18798 /* Don't allow ASHIFT if the mask is wrong for that. */
18799 if (code == ASHIFT && ne < sh)
18800 return false;
18802 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18803 if the mask is wrong for that. */
18804 if (nb < 32 && ne < 32 && sh < 32
18805 && !(code == LSHIFTRT && nb >= 32 - sh))
18806 return true;
18808 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18809 if the mask is wrong for that. */
18810 if (code == LSHIFTRT)
18811 sh = 64 - sh;
18812 if (nb == 63 || ne == 0 || ne == sh)
18813 return !(code == LSHIFTRT && nb >= sh);
18815 return false;
18818 /* Return the instruction template for a shift with mask in mode MODE, with
18819 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18821 const char *
18822 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18824 int nb, ne;
18826 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18827 gcc_unreachable ();
18829 if (mode == DImode && ne == 0)
18831 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18832 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18833 operands[3] = GEN_INT (63 - nb);
18834 if (dot)
18835 return "rld%I2cl. %0,%1,%2,%3";
18836 return "rld%I2cl %0,%1,%2,%3";
18839 if (mode == DImode && nb == 63)
18841 operands[3] = GEN_INT (63 - ne);
18842 if (dot)
18843 return "rld%I2cr. %0,%1,%2,%3";
18844 return "rld%I2cr %0,%1,%2,%3";
18847 if (mode == DImode
18848 && GET_CODE (operands[4]) != LSHIFTRT
18849 && CONST_INT_P (operands[2])
18850 && ne == INTVAL (operands[2]))
18852 operands[3] = GEN_INT (63 - nb);
18853 if (dot)
18854 return "rld%I2c. %0,%1,%2,%3";
18855 return "rld%I2c %0,%1,%2,%3";
18858 if (nb < 32 && ne < 32)
18860 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18861 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18862 operands[3] = GEN_INT (31 - nb);
18863 operands[4] = GEN_INT (31 - ne);
18864 /* This insn can also be a 64-bit rotate with mask that really makes
18865 it just a shift right (with mask); the %h below are to adjust for
18866 that situation (shift count is >= 32 in that case). */
18867 if (dot)
18868 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18869 return "rlw%I2nm %0,%1,%h2,%3,%4";
18872 gcc_unreachable ();
18875 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18876 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18877 ASHIFT, or LSHIFTRT) in mode MODE. */
18879 bool
18880 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18882 int nb, ne;
18884 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18885 return false;
18887 int n = GET_MODE_PRECISION (mode);
18889 int sh = INTVAL (XEXP (shift, 1));
18890 if (sh < 0 || sh >= n)
18891 return false;
18893 rtx_code code = GET_CODE (shift);
18895 /* Convert any shift by 0 to a rotate, to simplify below code. */
18896 if (sh == 0)
18897 code = ROTATE;
18899 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18900 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18901 code = ASHIFT;
18902 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18904 code = LSHIFTRT;
18905 sh = n - sh;
18908 /* DImode rotates need rldimi. */
18909 if (mode == DImode && code == ROTATE)
18910 return (ne == sh);
18912 /* SImode rotates need rlwimi. */
18913 if (mode == SImode && code == ROTATE)
18914 return (nb < 32 && ne < 32 && sh < 32);
18916 /* Wrap-around masks are only okay for rotates. */
18917 if (ne > nb)
18918 return false;
18920 /* Don't allow ASHIFT if the mask is wrong for that. */
18921 if (code == ASHIFT && ne < sh)
18922 return false;
18924 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18925 if the mask is wrong for that. */
18926 if (nb < 32 && ne < 32 && sh < 32
18927 && !(code == LSHIFTRT && nb >= 32 - sh))
18928 return true;
18930 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18931 if the mask is wrong for that. */
18932 if (code == LSHIFTRT)
18933 sh = 64 - sh;
18934 if (ne == sh)
18935 return !(code == LSHIFTRT && nb >= sh);
18937 return false;
18940 /* Return the instruction template for an insert with mask in mode MODE, with
18941 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18943 const char *
18944 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
18946 int nb, ne;
18948 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18949 gcc_unreachable ();
18951 /* Prefer rldimi because rlwimi is cracked. */
18952 if (TARGET_POWERPC64
18953 && (!dot || mode == DImode)
18954 && GET_CODE (operands[4]) != LSHIFTRT
18955 && ne == INTVAL (operands[2]))
18957 operands[3] = GEN_INT (63 - nb);
18958 if (dot)
18959 return "rldimi. %0,%1,%2,%3";
18960 return "rldimi %0,%1,%2,%3";
18963 if (nb < 32 && ne < 32)
18965 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18966 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18967 operands[3] = GEN_INT (31 - nb);
18968 operands[4] = GEN_INT (31 - ne);
18969 if (dot)
18970 return "rlwimi. %0,%1,%2,%3,%4";
18971 return "rlwimi %0,%1,%2,%3,%4";
18974 gcc_unreachable ();
18977 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18978 using two machine instructions. */
18980 bool
18981 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
18983 /* There are two kinds of AND we can handle with two insns:
18984 1) those we can do with two rl* insn;
18985 2) ori[s];xori[s].
18987 We do not handle that last case yet. */
18989 /* If there is just one stretch of ones, we can do it. */
18990 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
18991 return true;
18993 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18994 one insn, we can do the whole thing with two. */
18995 unsigned HOST_WIDE_INT val = INTVAL (c);
18996 unsigned HOST_WIDE_INT bit1 = val & -val;
18997 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18998 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18999 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
19000 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
19003 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
19004 If EXPAND is true, split rotate-and-mask instructions we generate to
19005 their constituent parts as well (this is used during expand); if DOT
19006 is 1, make the last insn a record-form instruction clobbering the
19007 destination GPR and setting the CC reg (from operands[3]); if 2, set
19008 that GPR as well as the CC reg. */
19010 void
19011 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
19013 gcc_assert (!(expand && dot));
19015 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
19017 /* If it is one stretch of ones, it is DImode; shift left, mask, then
19018 shift right. This generates better code than doing the masks without
19019 shifts, or shifting first right and then left. */
19020 int nb, ne;
19021 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
19023 gcc_assert (mode == DImode);
19025 int shift = 63 - nb;
19026 if (expand)
19028 rtx tmp1 = gen_reg_rtx (DImode);
19029 rtx tmp2 = gen_reg_rtx (DImode);
19030 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
19031 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
19032 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
19034 else
19036 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
19037 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
19038 emit_move_insn (operands[0], tmp);
19039 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
19040 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19042 return;
19045 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
19046 that does the rest. */
19047 unsigned HOST_WIDE_INT bit1 = val & -val;
19048 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
19049 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
19050 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
19052 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
19053 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
19055 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
19057 /* Two "no-rotate"-and-mask instructions, for SImode. */
19058 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
19060 gcc_assert (mode == SImode);
19062 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
19063 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
19064 emit_move_insn (reg, tmp);
19065 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
19066 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19067 return;
19070 gcc_assert (mode == DImode);
19072 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
19073 insns; we have to do the first in SImode, because it wraps. */
19074 if (mask2 <= 0xffffffff
19075 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
19077 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
19078 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
19079 GEN_INT (mask1));
19080 rtx reg_low = gen_lowpart (SImode, reg);
19081 emit_move_insn (reg_low, tmp);
19082 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
19083 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19084 return;
19087 /* Two rld* insns: rotate, clear the hole in the middle (which now is
19088 at the top end), rotate back and clear the other hole. */
19089 int right = exact_log2 (bit3);
19090 int left = 64 - right;
19092 /* Rotate the mask too. */
19093 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
19095 if (expand)
19097 rtx tmp1 = gen_reg_rtx (DImode);
19098 rtx tmp2 = gen_reg_rtx (DImode);
19099 rtx tmp3 = gen_reg_rtx (DImode);
19100 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
19101 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
19102 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
19103 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
19105 else
19107 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
19108 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
19109 emit_move_insn (operands[0], tmp);
19110 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
19111 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
19112 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19116 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
19117 for lfq and stfq insns iff the registers are hard registers. */
19120 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
19122 /* We might have been passed a SUBREG. */
19123 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
19124 return 0;
19126 /* We might have been passed non floating point registers. */
19127 if (!FP_REGNO_P (REGNO (reg1))
19128 || !FP_REGNO_P (REGNO (reg2)))
19129 return 0;
19131 return (REGNO (reg1) == REGNO (reg2) - 1);
19134 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
19135 addr1 and addr2 must be in consecutive memory locations
19136 (addr2 == addr1 + 8). */
19139 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
19141 rtx addr1, addr2;
19142 unsigned int reg1, reg2;
19143 int offset1, offset2;
19145 /* The mems cannot be volatile. */
19146 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
19147 return 0;
19149 addr1 = XEXP (mem1, 0);
19150 addr2 = XEXP (mem2, 0);
19152 /* Extract an offset (if used) from the first addr. */
19153 if (GET_CODE (addr1) == PLUS)
19155 /* If not a REG, return zero. */
19156 if (GET_CODE (XEXP (addr1, 0)) != REG)
19157 return 0;
19158 else
19160 reg1 = REGNO (XEXP (addr1, 0));
19161 /* The offset must be constant! */
19162 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
19163 return 0;
19164 offset1 = INTVAL (XEXP (addr1, 1));
19167 else if (GET_CODE (addr1) != REG)
19168 return 0;
19169 else
19171 reg1 = REGNO (addr1);
19172 /* This was a simple (mem (reg)) expression. Offset is 0. */
19173 offset1 = 0;
19176 /* And now for the second addr. */
19177 if (GET_CODE (addr2) == PLUS)
19179 /* If not a REG, return zero. */
19180 if (GET_CODE (XEXP (addr2, 0)) != REG)
19181 return 0;
19182 else
19184 reg2 = REGNO (XEXP (addr2, 0));
19185 /* The offset must be constant. */
19186 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
19187 return 0;
19188 offset2 = INTVAL (XEXP (addr2, 1));
19191 else if (GET_CODE (addr2) != REG)
19192 return 0;
19193 else
19195 reg2 = REGNO (addr2);
19196 /* This was a simple (mem (reg)) expression. Offset is 0. */
19197 offset2 = 0;
19200 /* Both of these must have the same base register. */
19201 if (reg1 != reg2)
19202 return 0;
19204 /* The offset for the second addr must be 8 more than the first addr. */
19205 if (offset2 != offset1 + 8)
19206 return 0;
19208 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
19209 instructions. */
19210 return 1;
19213 /* Return the mode to be used for memory when a secondary memory
19214 location is needed. For SDmode values we need to use DDmode, in
19215 all other cases we can use the same mode. */
19216 machine_mode
19217 rs6000_secondary_memory_needed_mode (machine_mode mode)
19219 if (lra_in_progress && mode == SDmode)
19220 return DDmode;
19221 return mode;
19224 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
19225 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
19226 only work on the traditional altivec registers, note if an altivec register
19227 was chosen. */
19229 static enum rs6000_reg_type
19230 register_to_reg_type (rtx reg, bool *is_altivec)
19232 HOST_WIDE_INT regno;
19233 enum reg_class rclass;
19235 if (GET_CODE (reg) == SUBREG)
19236 reg = SUBREG_REG (reg);
19238 if (!REG_P (reg))
19239 return NO_REG_TYPE;
19241 regno = REGNO (reg);
19242 if (regno >= FIRST_PSEUDO_REGISTER)
19244 if (!lra_in_progress && !reload_completed)
19245 return PSEUDO_REG_TYPE;
19247 regno = true_regnum (reg);
19248 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
19249 return PSEUDO_REG_TYPE;
19252 gcc_assert (regno >= 0);
19254 if (is_altivec && ALTIVEC_REGNO_P (regno))
19255 *is_altivec = true;
19257 rclass = rs6000_regno_regclass[regno];
19258 return reg_class_to_reg_type[(int)rclass];
19261 /* Helper function to return the cost of adding a TOC entry address. */
19263 static inline int
19264 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
19266 int ret;
19268 if (TARGET_CMODEL != CMODEL_SMALL)
19269 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
19271 else
19272 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
19274 return ret;
19277 /* Helper function for rs6000_secondary_reload to determine whether the memory
19278 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
19279 needs reloading. Return negative if the memory is not handled by the memory
19280 helper functions and to try a different reload method, 0 if no additional
19281 instructions are need, and positive to give the extra cost for the
19282 memory. */
19284 static int
19285 rs6000_secondary_reload_memory (rtx addr,
19286 enum reg_class rclass,
19287 machine_mode mode)
19289 int extra_cost = 0;
19290 rtx reg, and_arg, plus_arg0, plus_arg1;
19291 addr_mask_type addr_mask;
19292 const char *type = NULL;
19293 const char *fail_msg = NULL;
19295 if (GPR_REG_CLASS_P (rclass))
19296 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19298 else if (rclass == FLOAT_REGS)
19299 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19301 else if (rclass == ALTIVEC_REGS)
19302 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19304 /* For the combined VSX_REGS, turn off Altivec AND -16. */
19305 else if (rclass == VSX_REGS)
19306 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
19307 & ~RELOAD_REG_AND_M16);
19309 /* If the register allocator hasn't made up its mind yet on the register
19310 class to use, settle on defaults to use. */
19311 else if (rclass == NO_REGS)
19313 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
19314 & ~RELOAD_REG_AND_M16);
19316 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
19317 addr_mask &= ~(RELOAD_REG_INDEXED
19318 | RELOAD_REG_PRE_INCDEC
19319 | RELOAD_REG_PRE_MODIFY);
19322 else
19323 addr_mask = 0;
19325 /* If the register isn't valid in this register class, just return now. */
19326 if ((addr_mask & RELOAD_REG_VALID) == 0)
19328 if (TARGET_DEBUG_ADDR)
19330 fprintf (stderr,
19331 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19332 "not valid in class\n",
19333 GET_MODE_NAME (mode), reg_class_names[rclass]);
19334 debug_rtx (addr);
19337 return -1;
19340 switch (GET_CODE (addr))
19342 /* Does the register class supports auto update forms for this mode? We
19343 don't need a scratch register, since the powerpc only supports
19344 PRE_INC, PRE_DEC, and PRE_MODIFY. */
19345 case PRE_INC:
19346 case PRE_DEC:
19347 reg = XEXP (addr, 0);
19348 if (!base_reg_operand (addr, GET_MODE (reg)))
19350 fail_msg = "no base register #1";
19351 extra_cost = -1;
19354 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19356 extra_cost = 1;
19357 type = "update";
19359 break;
19361 case PRE_MODIFY:
19362 reg = XEXP (addr, 0);
19363 plus_arg1 = XEXP (addr, 1);
19364 if (!base_reg_operand (reg, GET_MODE (reg))
19365 || GET_CODE (plus_arg1) != PLUS
19366 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
19368 fail_msg = "bad PRE_MODIFY";
19369 extra_cost = -1;
19372 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19374 extra_cost = 1;
19375 type = "update";
19377 break;
19379 /* Do we need to simulate AND -16 to clear the bottom address bits used
19380 in VMX load/stores? Only allow the AND for vector sizes. */
19381 case AND:
19382 and_arg = XEXP (addr, 0);
19383 if (GET_MODE_SIZE (mode) != 16
19384 || GET_CODE (XEXP (addr, 1)) != CONST_INT
19385 || INTVAL (XEXP (addr, 1)) != -16)
19387 fail_msg = "bad Altivec AND #1";
19388 extra_cost = -1;
19391 if (rclass != ALTIVEC_REGS)
19393 if (legitimate_indirect_address_p (and_arg, false))
19394 extra_cost = 1;
19396 else if (legitimate_indexed_address_p (and_arg, false))
19397 extra_cost = 2;
19399 else
19401 fail_msg = "bad Altivec AND #2";
19402 extra_cost = -1;
19405 type = "and";
19407 break;
19409 /* If this is an indirect address, make sure it is a base register. */
19410 case REG:
19411 case SUBREG:
19412 if (!legitimate_indirect_address_p (addr, false))
19414 extra_cost = 1;
19415 type = "move";
19417 break;
19419 /* If this is an indexed address, make sure the register class can handle
19420 indexed addresses for this mode. */
19421 case PLUS:
19422 plus_arg0 = XEXP (addr, 0);
19423 plus_arg1 = XEXP (addr, 1);
19425 /* (plus (plus (reg) (constant)) (constant)) is generated during
19426 push_reload processing, so handle it now. */
19427 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
19429 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19431 extra_cost = 1;
19432 type = "offset";
19436 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19437 push_reload processing, so handle it now. */
19438 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
19440 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19442 extra_cost = 1;
19443 type = "indexed #2";
19447 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
19449 fail_msg = "no base register #2";
19450 extra_cost = -1;
19453 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
19455 if ((addr_mask & RELOAD_REG_INDEXED) == 0
19456 || !legitimate_indexed_address_p (addr, false))
19458 extra_cost = 1;
19459 type = "indexed";
19463 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
19464 && CONST_INT_P (plus_arg1))
19466 if (!quad_address_offset_p (INTVAL (plus_arg1)))
19468 extra_cost = 1;
19469 type = "vector d-form offset";
19473 /* Make sure the register class can handle offset addresses. */
19474 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19476 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19478 extra_cost = 1;
19479 type = "offset #2";
19483 else
19485 fail_msg = "bad PLUS";
19486 extra_cost = -1;
19489 break;
19491 case LO_SUM:
19492 /* Quad offsets are restricted and can't handle normal addresses. */
19493 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19495 extra_cost = -1;
19496 type = "vector d-form lo_sum";
19499 else if (!legitimate_lo_sum_address_p (mode, addr, false))
19501 fail_msg = "bad LO_SUM";
19502 extra_cost = -1;
19505 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19507 extra_cost = 1;
19508 type = "lo_sum";
19510 break;
19512 /* Static addresses need to create a TOC entry. */
19513 case CONST:
19514 case SYMBOL_REF:
19515 case LABEL_REF:
19516 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19518 extra_cost = -1;
19519 type = "vector d-form lo_sum #2";
19522 else
19524 type = "address";
19525 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
19527 break;
19529 /* TOC references look like offsetable memory. */
19530 case UNSPEC:
19531 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
19533 fail_msg = "bad UNSPEC";
19534 extra_cost = -1;
19537 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19539 extra_cost = -1;
19540 type = "vector d-form lo_sum #3";
19543 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19545 extra_cost = 1;
19546 type = "toc reference";
19548 break;
19550 default:
19552 fail_msg = "bad address";
19553 extra_cost = -1;
19557 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
19559 if (extra_cost < 0)
19560 fprintf (stderr,
19561 "rs6000_secondary_reload_memory error: mode = %s, "
19562 "class = %s, addr_mask = '%s', %s\n",
19563 GET_MODE_NAME (mode),
19564 reg_class_names[rclass],
19565 rs6000_debug_addr_mask (addr_mask, false),
19566 (fail_msg != NULL) ? fail_msg : "<bad address>");
19568 else
19569 fprintf (stderr,
19570 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19571 "addr_mask = '%s', extra cost = %d, %s\n",
19572 GET_MODE_NAME (mode),
19573 reg_class_names[rclass],
19574 rs6000_debug_addr_mask (addr_mask, false),
19575 extra_cost,
19576 (type) ? type : "<none>");
19578 debug_rtx (addr);
19581 return extra_cost;
19584 /* Helper function for rs6000_secondary_reload to return true if a move to a
19585 different register classe is really a simple move. */
19587 static bool
19588 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
19589 enum rs6000_reg_type from_type,
19590 machine_mode mode)
19592 int size = GET_MODE_SIZE (mode);
19594 /* Add support for various direct moves available. In this function, we only
19595 look at cases where we don't need any extra registers, and one or more
19596 simple move insns are issued. Originally small integers are not allowed
19597 in FPR/VSX registers. Single precision binary floating is not a simple
19598 move because we need to convert to the single precision memory layout.
19599 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19600 need special direct move handling, which we do not support yet. */
19601 if (TARGET_DIRECT_MOVE
19602 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19603 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
19605 if (TARGET_POWERPC64)
19607 /* ISA 2.07: MTVSRD or MVFVSRD. */
19608 if (size == 8)
19609 return true;
19611 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19612 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
19613 return true;
19616 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19617 if (TARGET_P8_VECTOR)
19619 if (mode == SImode)
19620 return true;
19622 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
19623 return true;
19626 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19627 if (mode == SDmode)
19628 return true;
19631 /* Power6+: MFTGPR or MFFGPR. */
19632 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19633 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19634 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19635 return true;
19637 /* Move to/from SPR. */
19638 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19639 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19640 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19641 return true;
19643 return false;
19646 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19647 special direct moves that involve allocating an extra register, return the
19648 insn code of the helper function if there is such a function or
19649 CODE_FOR_nothing if not. */
19651 static bool
19652 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19653 enum rs6000_reg_type from_type,
19654 machine_mode mode,
19655 secondary_reload_info *sri,
19656 bool altivec_p)
19658 bool ret = false;
19659 enum insn_code icode = CODE_FOR_nothing;
19660 int cost = 0;
19661 int size = GET_MODE_SIZE (mode);
19663 if (TARGET_POWERPC64 && size == 16)
19665 /* Handle moving 128-bit values from GPRs to VSX point registers on
19666 ISA 2.07 (power8, power9) when running in 64-bit mode using
19667 XXPERMDI to glue the two 64-bit values back together. */
19668 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19670 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19671 icode = reg_addr[mode].reload_vsx_gpr;
19674 /* Handle moving 128-bit values from VSX point registers to GPRs on
19675 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19676 bottom 64-bit value. */
19677 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19679 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19680 icode = reg_addr[mode].reload_gpr_vsx;
19684 else if (TARGET_POWERPC64 && mode == SFmode)
19686 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19688 cost = 3; /* xscvdpspn, mfvsrd, and. */
19689 icode = reg_addr[mode].reload_gpr_vsx;
19692 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19694 cost = 2; /* mtvsrz, xscvspdpn. */
19695 icode = reg_addr[mode].reload_vsx_gpr;
19699 else if (!TARGET_POWERPC64 && size == 8)
19701 /* Handle moving 64-bit values from GPRs to floating point registers on
19702 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19703 32-bit values back together. Altivec register classes must be handled
19704 specially since a different instruction is used, and the secondary
19705 reload support requires a single instruction class in the scratch
19706 register constraint. However, right now TFmode is not allowed in
19707 Altivec registers, so the pattern will never match. */
19708 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19710 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19711 icode = reg_addr[mode].reload_fpr_gpr;
19715 if (icode != CODE_FOR_nothing)
19717 ret = true;
19718 if (sri)
19720 sri->icode = icode;
19721 sri->extra_cost = cost;
19725 return ret;
19728 /* Return whether a move between two register classes can be done either
19729 directly (simple move) or via a pattern that uses a single extra temporary
19730 (using ISA 2.07's direct move in this case. */
19732 static bool
19733 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19734 enum rs6000_reg_type from_type,
19735 machine_mode mode,
19736 secondary_reload_info *sri,
19737 bool altivec_p)
19739 /* Fall back to load/store reloads if either type is not a register. */
19740 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19741 return false;
19743 /* If we haven't allocated registers yet, assume the move can be done for the
19744 standard register types. */
19745 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19746 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19747 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19748 return true;
19750 /* Moves to the same set of registers is a simple move for non-specialized
19751 registers. */
19752 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19753 return true;
19755 /* Check whether a simple move can be done directly. */
19756 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19758 if (sri)
19760 sri->icode = CODE_FOR_nothing;
19761 sri->extra_cost = 0;
19763 return true;
19766 /* Now check if we can do it in a few steps. */
19767 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19768 altivec_p);
19771 /* Inform reload about cases where moving X with a mode MODE to a register in
19772 RCLASS requires an extra scratch or immediate register. Return the class
19773 needed for the immediate register.
19775 For VSX and Altivec, we may need a register to convert sp+offset into
19776 reg+sp.
19778 For misaligned 64-bit gpr loads and stores we need a register to
19779 convert an offset address to indirect. */
19781 static reg_class_t
19782 rs6000_secondary_reload (bool in_p,
19783 rtx x,
19784 reg_class_t rclass_i,
19785 machine_mode mode,
19786 secondary_reload_info *sri)
19788 enum reg_class rclass = (enum reg_class) rclass_i;
19789 reg_class_t ret = ALL_REGS;
19790 enum insn_code icode;
19791 bool default_p = false;
19792 bool done_p = false;
19794 /* Allow subreg of memory before/during reload. */
19795 bool memory_p = (MEM_P (x)
19796 || (!reload_completed && GET_CODE (x) == SUBREG
19797 && MEM_P (SUBREG_REG (x))));
19799 sri->icode = CODE_FOR_nothing;
19800 sri->t_icode = CODE_FOR_nothing;
19801 sri->extra_cost = 0;
19802 icode = ((in_p)
19803 ? reg_addr[mode].reload_load
19804 : reg_addr[mode].reload_store);
19806 if (REG_P (x) || register_operand (x, mode))
19808 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19809 bool altivec_p = (rclass == ALTIVEC_REGS);
19810 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19812 if (!in_p)
19813 std::swap (to_type, from_type);
19815 /* Can we do a direct move of some sort? */
19816 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19817 altivec_p))
19819 icode = (enum insn_code)sri->icode;
19820 default_p = false;
19821 done_p = true;
19822 ret = NO_REGS;
19826 /* Make sure 0.0 is not reloaded or forced into memory. */
19827 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19829 ret = NO_REGS;
19830 default_p = false;
19831 done_p = true;
19834 /* If this is a scalar floating point value and we want to load it into the
19835 traditional Altivec registers, do it via a move via a traditional floating
19836 point register, unless we have D-form addressing. Also make sure that
19837 non-zero constants use a FPR. */
19838 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19839 && !mode_supports_vmx_dform (mode)
19840 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19841 && (memory_p || (GET_CODE (x) == CONST_DOUBLE)))
19843 ret = FLOAT_REGS;
19844 default_p = false;
19845 done_p = true;
19848 /* Handle reload of load/stores if we have reload helper functions. */
19849 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19851 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19852 mode);
19854 if (extra_cost >= 0)
19856 done_p = true;
19857 ret = NO_REGS;
19858 if (extra_cost > 0)
19860 sri->extra_cost = extra_cost;
19861 sri->icode = icode;
19866 /* Handle unaligned loads and stores of integer registers. */
19867 if (!done_p && TARGET_POWERPC64
19868 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19869 && memory_p
19870 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19872 rtx addr = XEXP (x, 0);
19873 rtx off = address_offset (addr);
19875 if (off != NULL_RTX)
19877 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19878 unsigned HOST_WIDE_INT offset = INTVAL (off);
19880 /* We need a secondary reload when our legitimate_address_p
19881 says the address is good (as otherwise the entire address
19882 will be reloaded), and the offset is not a multiple of
19883 four or we have an address wrap. Address wrap will only
19884 occur for LO_SUMs since legitimate_offset_address_p
19885 rejects addresses for 16-byte mems that will wrap. */
19886 if (GET_CODE (addr) == LO_SUM
19887 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19888 && ((offset & 3) != 0
19889 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
19890 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
19891 && (offset & 3) != 0))
19893 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19894 if (in_p)
19895 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
19896 : CODE_FOR_reload_di_load);
19897 else
19898 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
19899 : CODE_FOR_reload_di_store);
19900 sri->extra_cost = 2;
19901 ret = NO_REGS;
19902 done_p = true;
19904 else
19905 default_p = true;
19907 else
19908 default_p = true;
19911 if (!done_p && !TARGET_POWERPC64
19912 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19913 && memory_p
19914 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
19916 rtx addr = XEXP (x, 0);
19917 rtx off = address_offset (addr);
19919 if (off != NULL_RTX)
19921 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19922 unsigned HOST_WIDE_INT offset = INTVAL (off);
19924 /* We need a secondary reload when our legitimate_address_p
19925 says the address is good (as otherwise the entire address
19926 will be reloaded), and we have a wrap.
19928 legitimate_lo_sum_address_p allows LO_SUM addresses to
19929 have any offset so test for wrap in the low 16 bits.
19931 legitimate_offset_address_p checks for the range
19932 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19933 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19934 [0x7ff4,0x7fff] respectively, so test for the
19935 intersection of these ranges, [0x7ffc,0x7fff] and
19936 [0x7ff4,0x7ff7] respectively.
19938 Note that the address we see here may have been
19939 manipulated by legitimize_reload_address. */
19940 if (GET_CODE (addr) == LO_SUM
19941 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
19942 : offset - (0x8000 - extra) < UNITS_PER_WORD)
19944 if (in_p)
19945 sri->icode = CODE_FOR_reload_si_load;
19946 else
19947 sri->icode = CODE_FOR_reload_si_store;
19948 sri->extra_cost = 2;
19949 ret = NO_REGS;
19950 done_p = true;
19952 else
19953 default_p = true;
19955 else
19956 default_p = true;
19959 if (!done_p)
19960 default_p = true;
19962 if (default_p)
19963 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
19965 gcc_assert (ret != ALL_REGS);
19967 if (TARGET_DEBUG_ADDR)
19969 fprintf (stderr,
19970 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19971 "mode = %s",
19972 reg_class_names[ret],
19973 in_p ? "true" : "false",
19974 reg_class_names[rclass],
19975 GET_MODE_NAME (mode));
19977 if (reload_completed)
19978 fputs (", after reload", stderr);
19980 if (!done_p)
19981 fputs (", done_p not set", stderr);
19983 if (default_p)
19984 fputs (", default secondary reload", stderr);
19986 if (sri->icode != CODE_FOR_nothing)
19987 fprintf (stderr, ", reload func = %s, extra cost = %d",
19988 insn_data[sri->icode].name, sri->extra_cost);
19990 else if (sri->extra_cost > 0)
19991 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
19993 fputs ("\n", stderr);
19994 debug_rtx (x);
19997 return ret;
20000 /* Better tracing for rs6000_secondary_reload_inner. */
20002 static void
20003 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
20004 bool store_p)
20006 rtx set, clobber;
20008 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
20010 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
20011 store_p ? "store" : "load");
20013 if (store_p)
20014 set = gen_rtx_SET (mem, reg);
20015 else
20016 set = gen_rtx_SET (reg, mem);
20018 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
20019 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
20022 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
20023 ATTRIBUTE_NORETURN;
20025 static void
20026 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
20027 bool store_p)
20029 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
20030 gcc_unreachable ();
20033 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
20034 reload helper functions. These were identified in
20035 rs6000_secondary_reload_memory, and if reload decided to use the secondary
20036 reload, it calls the insns:
20037 reload_<RELOAD:mode>_<P:mptrsize>_store
20038 reload_<RELOAD:mode>_<P:mptrsize>_load
20040 which in turn calls this function, to do whatever is necessary to create
20041 valid addresses. */
20043 void
20044 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
20046 int regno = true_regnum (reg);
20047 machine_mode mode = GET_MODE (reg);
20048 addr_mask_type addr_mask;
20049 rtx addr;
20050 rtx new_addr;
20051 rtx op_reg, op0, op1;
20052 rtx and_op;
20053 rtx cc_clobber;
20054 rtvec rv;
20056 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER || !MEM_P (mem)
20057 || !base_reg_operand (scratch, GET_MODE (scratch)))
20058 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20060 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
20061 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
20063 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
20064 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
20066 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
20067 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
20069 else
20070 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20072 /* Make sure the mode is valid in this register class. */
20073 if ((addr_mask & RELOAD_REG_VALID) == 0)
20074 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20076 if (TARGET_DEBUG_ADDR)
20077 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
20079 new_addr = addr = XEXP (mem, 0);
20080 switch (GET_CODE (addr))
20082 /* Does the register class support auto update forms for this mode? If
20083 not, do the update now. We don't need a scratch register, since the
20084 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
20085 case PRE_INC:
20086 case PRE_DEC:
20087 op_reg = XEXP (addr, 0);
20088 if (!base_reg_operand (op_reg, Pmode))
20089 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20091 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
20093 emit_insn (gen_add2_insn (op_reg, GEN_INT (GET_MODE_SIZE (mode))));
20094 new_addr = op_reg;
20096 break;
20098 case PRE_MODIFY:
20099 op0 = XEXP (addr, 0);
20100 op1 = XEXP (addr, 1);
20101 if (!base_reg_operand (op0, Pmode)
20102 || GET_CODE (op1) != PLUS
20103 || !rtx_equal_p (op0, XEXP (op1, 0)))
20104 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20106 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
20108 emit_insn (gen_rtx_SET (op0, op1));
20109 new_addr = reg;
20111 break;
20113 /* Do we need to simulate AND -16 to clear the bottom address bits used
20114 in VMX load/stores? */
20115 case AND:
20116 op0 = XEXP (addr, 0);
20117 op1 = XEXP (addr, 1);
20118 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
20120 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
20121 op_reg = op0;
20123 else if (GET_CODE (op1) == PLUS)
20125 emit_insn (gen_rtx_SET (scratch, op1));
20126 op_reg = scratch;
20129 else
20130 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20132 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
20133 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
20134 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
20135 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
20136 new_addr = scratch;
20138 break;
20140 /* If this is an indirect address, make sure it is a base register. */
20141 case REG:
20142 case SUBREG:
20143 if (!base_reg_operand (addr, GET_MODE (addr)))
20145 emit_insn (gen_rtx_SET (scratch, addr));
20146 new_addr = scratch;
20148 break;
20150 /* If this is an indexed address, make sure the register class can handle
20151 indexed addresses for this mode. */
20152 case PLUS:
20153 op0 = XEXP (addr, 0);
20154 op1 = XEXP (addr, 1);
20155 if (!base_reg_operand (op0, Pmode))
20156 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20158 else if (int_reg_operand (op1, Pmode))
20160 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
20162 emit_insn (gen_rtx_SET (scratch, addr));
20163 new_addr = scratch;
20167 else if (mode_supports_vsx_dform_quad (mode) && CONST_INT_P (op1))
20169 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
20170 || !quad_address_p (addr, mode, false))
20172 emit_insn (gen_rtx_SET (scratch, addr));
20173 new_addr = scratch;
20177 /* Make sure the register class can handle offset addresses. */
20178 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
20180 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
20182 emit_insn (gen_rtx_SET (scratch, addr));
20183 new_addr = scratch;
20187 else
20188 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20190 break;
20192 case LO_SUM:
20193 op0 = XEXP (addr, 0);
20194 op1 = XEXP (addr, 1);
20195 if (!base_reg_operand (op0, Pmode))
20196 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20198 else if (int_reg_operand (op1, Pmode))
20200 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
20202 emit_insn (gen_rtx_SET (scratch, addr));
20203 new_addr = scratch;
20207 /* Quad offsets are restricted and can't handle normal addresses. */
20208 else if (mode_supports_vsx_dform_quad (mode))
20210 emit_insn (gen_rtx_SET (scratch, addr));
20211 new_addr = scratch;
20214 /* Make sure the register class can handle offset addresses. */
20215 else if (legitimate_lo_sum_address_p (mode, addr, false))
20217 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
20219 emit_insn (gen_rtx_SET (scratch, addr));
20220 new_addr = scratch;
20224 else
20225 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20227 break;
20229 case SYMBOL_REF:
20230 case CONST:
20231 case LABEL_REF:
20232 rs6000_emit_move (scratch, addr, Pmode);
20233 new_addr = scratch;
20234 break;
20236 default:
20237 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20240 /* Adjust the address if it changed. */
20241 if (addr != new_addr)
20243 mem = replace_equiv_address_nv (mem, new_addr);
20244 if (TARGET_DEBUG_ADDR)
20245 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
20248 /* Now create the move. */
20249 if (store_p)
20250 emit_insn (gen_rtx_SET (mem, reg));
20251 else
20252 emit_insn (gen_rtx_SET (reg, mem));
20254 return;
20257 /* Convert reloads involving 64-bit gprs and misaligned offset
20258 addressing, or multiple 32-bit gprs and offsets that are too large,
20259 to use indirect addressing. */
20261 void
20262 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
20264 int regno = true_regnum (reg);
20265 enum reg_class rclass;
20266 rtx addr;
20267 rtx scratch_or_premodify = scratch;
20269 if (TARGET_DEBUG_ADDR)
20271 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
20272 store_p ? "store" : "load");
20273 fprintf (stderr, "reg:\n");
20274 debug_rtx (reg);
20275 fprintf (stderr, "mem:\n");
20276 debug_rtx (mem);
20277 fprintf (stderr, "scratch:\n");
20278 debug_rtx (scratch);
20281 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
20282 gcc_assert (GET_CODE (mem) == MEM);
20283 rclass = REGNO_REG_CLASS (regno);
20284 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
20285 addr = XEXP (mem, 0);
20287 if (GET_CODE (addr) == PRE_MODIFY)
20289 gcc_assert (REG_P (XEXP (addr, 0))
20290 && GET_CODE (XEXP (addr, 1)) == PLUS
20291 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
20292 scratch_or_premodify = XEXP (addr, 0);
20293 if (!HARD_REGISTER_P (scratch_or_premodify))
20294 /* If we have a pseudo here then reload will have arranged
20295 to have it replaced, but only in the original insn.
20296 Use the replacement here too. */
20297 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
20299 /* RTL emitted by rs6000_secondary_reload_gpr uses RTL
20300 expressions from the original insn, without unsharing them.
20301 Any RTL that points into the original insn will of course
20302 have register replacements applied. That is why we don't
20303 need to look for replacements under the PLUS. */
20304 addr = XEXP (addr, 1);
20306 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
20308 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
20310 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
20312 /* Now create the move. */
20313 if (store_p)
20314 emit_insn (gen_rtx_SET (mem, reg));
20315 else
20316 emit_insn (gen_rtx_SET (reg, mem));
20318 return;
20321 /* Given an rtx X being reloaded into a reg required to be
20322 in class CLASS, return the class of reg to actually use.
20323 In general this is just CLASS; but on some machines
20324 in some cases it is preferable to use a more restrictive class.
20326 On the RS/6000, we have to return NO_REGS when we want to reload a
20327 floating-point CONST_DOUBLE to force it to be copied to memory.
20329 We also don't want to reload integer values into floating-point
20330 registers if we can at all help it. In fact, this can
20331 cause reload to die, if it tries to generate a reload of CTR
20332 into a FP register and discovers it doesn't have the memory location
20333 required.
20335 ??? Would it be a good idea to have reload do the converse, that is
20336 try to reload floating modes into FP registers if possible?
20339 static enum reg_class
20340 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
20342 machine_mode mode = GET_MODE (x);
20343 bool is_constant = CONSTANT_P (x);
20345 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
20346 reload class for it. */
20347 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20348 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
20349 return NO_REGS;
20351 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
20352 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
20353 return NO_REGS;
20355 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
20356 the reloading of address expressions using PLUS into floating point
20357 registers. */
20358 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
20360 if (is_constant)
20362 /* Zero is always allowed in all VSX registers. */
20363 if (x == CONST0_RTX (mode))
20364 return rclass;
20366 /* If this is a vector constant that can be formed with a few Altivec
20367 instructions, we want altivec registers. */
20368 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
20369 return ALTIVEC_REGS;
20371 /* If this is an integer constant that can easily be loaded into
20372 vector registers, allow it. */
20373 if (CONST_INT_P (x))
20375 HOST_WIDE_INT value = INTVAL (x);
20377 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
20378 2.06 can generate it in the Altivec registers with
20379 VSPLTI<x>. */
20380 if (value == -1)
20382 if (TARGET_P8_VECTOR)
20383 return rclass;
20384 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20385 return ALTIVEC_REGS;
20386 else
20387 return NO_REGS;
20390 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
20391 a sign extend in the Altivec registers. */
20392 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
20393 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
20394 return ALTIVEC_REGS;
20397 /* Force constant to memory. */
20398 return NO_REGS;
20401 /* D-form addressing can easily reload the value. */
20402 if (mode_supports_vmx_dform (mode)
20403 || mode_supports_vsx_dform_quad (mode))
20404 return rclass;
20406 /* If this is a scalar floating point value and we don't have D-form
20407 addressing, prefer the traditional floating point registers so that we
20408 can use D-form (register+offset) addressing. */
20409 if (rclass == VSX_REGS
20410 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
20411 return FLOAT_REGS;
20413 /* Prefer the Altivec registers if Altivec is handling the vector
20414 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20415 loads. */
20416 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
20417 || mode == V1TImode)
20418 return ALTIVEC_REGS;
20420 return rclass;
20423 if (is_constant || GET_CODE (x) == PLUS)
20425 if (reg_class_subset_p (GENERAL_REGS, rclass))
20426 return GENERAL_REGS;
20427 if (reg_class_subset_p (BASE_REGS, rclass))
20428 return BASE_REGS;
20429 return NO_REGS;
20432 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
20433 return GENERAL_REGS;
20435 return rclass;
20438 /* Debug version of rs6000_preferred_reload_class. */
20439 static enum reg_class
20440 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
20442 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
20444 fprintf (stderr,
20445 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20446 "mode = %s, x:\n",
20447 reg_class_names[ret], reg_class_names[rclass],
20448 GET_MODE_NAME (GET_MODE (x)));
20449 debug_rtx (x);
20451 return ret;
20454 /* If we are copying between FP or AltiVec registers and anything else, we need
20455 a memory location. The exception is when we are targeting ppc64 and the
20456 move to/from fpr to gpr instructions are available. Also, under VSX, you
20457 can copy vector registers from the FP register set to the Altivec register
20458 set and vice versa. */
20460 static bool
20461 rs6000_secondary_memory_needed (enum reg_class from_class,
20462 enum reg_class to_class,
20463 machine_mode mode)
20465 enum rs6000_reg_type from_type, to_type;
20466 bool altivec_p = ((from_class == ALTIVEC_REGS)
20467 || (to_class == ALTIVEC_REGS));
20469 /* If a simple/direct move is available, we don't need secondary memory */
20470 from_type = reg_class_to_reg_type[(int)from_class];
20471 to_type = reg_class_to_reg_type[(int)to_class];
20473 if (rs6000_secondary_reload_move (to_type, from_type, mode,
20474 (secondary_reload_info *)0, altivec_p))
20475 return false;
20477 /* If we have a floating point or vector register class, we need to use
20478 memory to transfer the data. */
20479 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
20480 return true;
20482 return false;
20485 /* Debug version of rs6000_secondary_memory_needed. */
20486 static bool
20487 rs6000_debug_secondary_memory_needed (enum reg_class from_class,
20488 enum reg_class to_class,
20489 machine_mode mode)
20491 bool ret = rs6000_secondary_memory_needed (from_class, to_class, mode);
20493 fprintf (stderr,
20494 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20495 "to_class = %s, mode = %s\n",
20496 ret ? "true" : "false",
20497 reg_class_names[from_class],
20498 reg_class_names[to_class],
20499 GET_MODE_NAME (mode));
20501 return ret;
20504 /* Return the register class of a scratch register needed to copy IN into
20505 or out of a register in RCLASS in MODE. If it can be done directly,
20506 NO_REGS is returned. */
20508 static enum reg_class
20509 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
20510 rtx in)
20512 int regno;
20514 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
20515 #if TARGET_MACHO
20516 && MACHOPIC_INDIRECT
20517 #endif
20520 /* We cannot copy a symbolic operand directly into anything
20521 other than BASE_REGS for TARGET_ELF. So indicate that a
20522 register from BASE_REGS is needed as an intermediate
20523 register.
20525 On Darwin, pic addresses require a load from memory, which
20526 needs a base register. */
20527 if (rclass != BASE_REGS
20528 && (GET_CODE (in) == SYMBOL_REF
20529 || GET_CODE (in) == HIGH
20530 || GET_CODE (in) == LABEL_REF
20531 || GET_CODE (in) == CONST))
20532 return BASE_REGS;
20535 if (GET_CODE (in) == REG)
20537 regno = REGNO (in);
20538 if (regno >= FIRST_PSEUDO_REGISTER)
20540 regno = true_regnum (in);
20541 if (regno >= FIRST_PSEUDO_REGISTER)
20542 regno = -1;
20545 else if (GET_CODE (in) == SUBREG)
20547 regno = true_regnum (in);
20548 if (regno >= FIRST_PSEUDO_REGISTER)
20549 regno = -1;
20551 else
20552 regno = -1;
20554 /* If we have VSX register moves, prefer moving scalar values between
20555 Altivec registers and GPR by going via an FPR (and then via memory)
20556 instead of reloading the secondary memory address for Altivec moves. */
20557 if (TARGET_VSX
20558 && GET_MODE_SIZE (mode) < 16
20559 && !mode_supports_vmx_dform (mode)
20560 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
20561 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
20562 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20563 && (regno >= 0 && INT_REGNO_P (regno)))))
20564 return FLOAT_REGS;
20566 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20567 into anything. */
20568 if (rclass == GENERAL_REGS || rclass == BASE_REGS
20569 || (regno >= 0 && INT_REGNO_P (regno)))
20570 return NO_REGS;
20572 /* Constants, memory, and VSX registers can go into VSX registers (both the
20573 traditional floating point and the altivec registers). */
20574 if (rclass == VSX_REGS
20575 && (regno == -1 || VSX_REGNO_P (regno)))
20576 return NO_REGS;
20578 /* Constants, memory, and FP registers can go into FP registers. */
20579 if ((regno == -1 || FP_REGNO_P (regno))
20580 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
20581 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
20583 /* Memory, and AltiVec registers can go into AltiVec registers. */
20584 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
20585 && rclass == ALTIVEC_REGS)
20586 return NO_REGS;
20588 /* We can copy among the CR registers. */
20589 if ((rclass == CR_REGS || rclass == CR0_REGS)
20590 && regno >= 0 && CR_REGNO_P (regno))
20591 return NO_REGS;
20593 /* Otherwise, we need GENERAL_REGS. */
20594 return GENERAL_REGS;
20597 /* Debug version of rs6000_secondary_reload_class. */
20598 static enum reg_class
20599 rs6000_debug_secondary_reload_class (enum reg_class rclass,
20600 machine_mode mode, rtx in)
20602 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
20603 fprintf (stderr,
20604 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20605 "mode = %s, input rtx:\n",
20606 reg_class_names[ret], reg_class_names[rclass],
20607 GET_MODE_NAME (mode));
20608 debug_rtx (in);
20610 return ret;
20613 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
20615 static bool
20616 rs6000_cannot_change_mode_class (machine_mode from,
20617 machine_mode to,
20618 enum reg_class rclass)
20620 unsigned from_size = GET_MODE_SIZE (from);
20621 unsigned to_size = GET_MODE_SIZE (to);
20623 if (from_size != to_size)
20625 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20627 if (reg_classes_intersect_p (xclass, rclass))
20629 unsigned to_nregs = hard_regno_nregs[FIRST_FPR_REGNO][to];
20630 unsigned from_nregs = hard_regno_nregs[FIRST_FPR_REGNO][from];
20631 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20632 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20634 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20635 single register under VSX because the scalar part of the register
20636 is in the upper 64-bits, and not the lower 64-bits. Types like
20637 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20638 IEEE floating point can't overlap, and neither can small
20639 values. */
20641 if (to_float128_vector_p && from_float128_vector_p)
20642 return false;
20644 else if (to_float128_vector_p || from_float128_vector_p)
20645 return true;
20647 /* TDmode in floating-mode registers must always go into a register
20648 pair with the most significant word in the even-numbered register
20649 to match ISA requirements. In little-endian mode, this does not
20650 match subreg numbering, so we cannot allow subregs. */
20651 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20652 return true;
20654 if (from_size < 8 || to_size < 8)
20655 return true;
20657 if (from_size == 8 && (8 * to_nregs) != to_size)
20658 return true;
20660 if (to_size == 8 && (8 * from_nregs) != from_size)
20661 return true;
20663 return false;
20665 else
20666 return false;
20669 /* Since the VSX register set includes traditional floating point registers
20670 and altivec registers, just check for the size being different instead of
20671 trying to check whether the modes are vector modes. Otherwise it won't
20672 allow say DF and DI to change classes. For types like TFmode and TDmode
20673 that take 2 64-bit registers, rather than a single 128-bit register, don't
20674 allow subregs of those types to other 128 bit types. */
20675 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20677 unsigned num_regs = (from_size + 15) / 16;
20678 if (hard_regno_nregs[FIRST_FPR_REGNO][to] > num_regs
20679 || hard_regno_nregs[FIRST_FPR_REGNO][from] > num_regs)
20680 return true;
20682 return (from_size != 8 && from_size != 16);
20685 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20686 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20687 return true;
20689 return false;
20692 /* Debug version of rs6000_cannot_change_mode_class. */
20693 static bool
20694 rs6000_debug_cannot_change_mode_class (machine_mode from,
20695 machine_mode to,
20696 enum reg_class rclass)
20698 bool ret = rs6000_cannot_change_mode_class (from, to, rclass);
20700 fprintf (stderr,
20701 "rs6000_cannot_change_mode_class, return %s, from = %s, "
20702 "to = %s, rclass = %s\n",
20703 ret ? "true" : "false",
20704 GET_MODE_NAME (from), GET_MODE_NAME (to),
20705 reg_class_names[rclass]);
20707 return ret;
20710 /* Return a string to do a move operation of 128 bits of data. */
20712 const char *
20713 rs6000_output_move_128bit (rtx operands[])
20715 rtx dest = operands[0];
20716 rtx src = operands[1];
20717 machine_mode mode = GET_MODE (dest);
20718 int dest_regno;
20719 int src_regno;
20720 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20721 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20723 if (REG_P (dest))
20725 dest_regno = REGNO (dest);
20726 dest_gpr_p = INT_REGNO_P (dest_regno);
20727 dest_fp_p = FP_REGNO_P (dest_regno);
20728 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20729 dest_vsx_p = dest_fp_p | dest_vmx_p;
20731 else
20733 dest_regno = -1;
20734 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20737 if (REG_P (src))
20739 src_regno = REGNO (src);
20740 src_gpr_p = INT_REGNO_P (src_regno);
20741 src_fp_p = FP_REGNO_P (src_regno);
20742 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20743 src_vsx_p = src_fp_p | src_vmx_p;
20745 else
20747 src_regno = -1;
20748 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20751 /* Register moves. */
20752 if (dest_regno >= 0 && src_regno >= 0)
20754 if (dest_gpr_p)
20756 if (src_gpr_p)
20757 return "#";
20759 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20760 return (WORDS_BIG_ENDIAN
20761 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20762 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20764 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20765 return "#";
20768 else if (TARGET_VSX && dest_vsx_p)
20770 if (src_vsx_p)
20771 return "xxlor %x0,%x1,%x1";
20773 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20774 return (WORDS_BIG_ENDIAN
20775 ? "mtvsrdd %x0,%1,%L1"
20776 : "mtvsrdd %x0,%L1,%1");
20778 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20779 return "#";
20782 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20783 return "vor %0,%1,%1";
20785 else if (dest_fp_p && src_fp_p)
20786 return "#";
20789 /* Loads. */
20790 else if (dest_regno >= 0 && MEM_P (src))
20792 if (dest_gpr_p)
20794 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20795 return "lq %0,%1";
20796 else
20797 return "#";
20800 else if (TARGET_ALTIVEC && dest_vmx_p
20801 && altivec_indexed_or_indirect_operand (src, mode))
20802 return "lvx %0,%y1";
20804 else if (TARGET_VSX && dest_vsx_p)
20806 if (mode_supports_vsx_dform_quad (mode)
20807 && quad_address_p (XEXP (src, 0), mode, true))
20808 return "lxv %x0,%1";
20810 else if (TARGET_P9_VECTOR)
20811 return "lxvx %x0,%y1";
20813 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20814 return "lxvw4x %x0,%y1";
20816 else
20817 return "lxvd2x %x0,%y1";
20820 else if (TARGET_ALTIVEC && dest_vmx_p)
20821 return "lvx %0,%y1";
20823 else if (dest_fp_p)
20824 return "#";
20827 /* Stores. */
20828 else if (src_regno >= 0 && MEM_P (dest))
20830 if (src_gpr_p)
20832 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20833 return "stq %1,%0";
20834 else
20835 return "#";
20838 else if (TARGET_ALTIVEC && src_vmx_p
20839 && altivec_indexed_or_indirect_operand (src, mode))
20840 return "stvx %1,%y0";
20842 else if (TARGET_VSX && src_vsx_p)
20844 if (mode_supports_vsx_dform_quad (mode)
20845 && quad_address_p (XEXP (dest, 0), mode, true))
20846 return "stxv %x1,%0";
20848 else if (TARGET_P9_VECTOR)
20849 return "stxvx %x1,%y0";
20851 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20852 return "stxvw4x %x1,%y0";
20854 else
20855 return "stxvd2x %x1,%y0";
20858 else if (TARGET_ALTIVEC && src_vmx_p)
20859 return "stvx %1,%y0";
20861 else if (src_fp_p)
20862 return "#";
20865 /* Constants. */
20866 else if (dest_regno >= 0
20867 && (GET_CODE (src) == CONST_INT
20868 || GET_CODE (src) == CONST_WIDE_INT
20869 || GET_CODE (src) == CONST_DOUBLE
20870 || GET_CODE (src) == CONST_VECTOR))
20872 if (dest_gpr_p)
20873 return "#";
20875 else if ((dest_vmx_p && TARGET_ALTIVEC)
20876 || (dest_vsx_p && TARGET_VSX))
20877 return output_vec_const_move (operands);
20880 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20883 /* Validate a 128-bit move. */
20884 bool
20885 rs6000_move_128bit_ok_p (rtx operands[])
20887 machine_mode mode = GET_MODE (operands[0]);
20888 return (gpc_reg_operand (operands[0], mode)
20889 || gpc_reg_operand (operands[1], mode));
20892 /* Return true if a 128-bit move needs to be split. */
20893 bool
20894 rs6000_split_128bit_ok_p (rtx operands[])
20896 if (!reload_completed)
20897 return false;
20899 if (!gpr_or_gpr_p (operands[0], operands[1]))
20900 return false;
20902 if (quad_load_store_p (operands[0], operands[1]))
20903 return false;
20905 return true;
20909 /* Given a comparison operation, return the bit number in CCR to test. We
20910 know this is a valid comparison.
20912 SCC_P is 1 if this is for an scc. That means that %D will have been
20913 used instead of %C, so the bits will be in different places.
20915 Return -1 if OP isn't a valid comparison for some reason. */
20918 ccr_bit (rtx op, int scc_p)
20920 enum rtx_code code = GET_CODE (op);
20921 machine_mode cc_mode;
20922 int cc_regnum;
20923 int base_bit;
20924 rtx reg;
20926 if (!COMPARISON_P (op))
20927 return -1;
20929 reg = XEXP (op, 0);
20931 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
20933 cc_mode = GET_MODE (reg);
20934 cc_regnum = REGNO (reg);
20935 base_bit = 4 * (cc_regnum - CR0_REGNO);
20937 validate_condition_mode (code, cc_mode);
20939 /* When generating a sCOND operation, only positive conditions are
20940 allowed. */
20941 gcc_assert (!scc_p
20942 || code == EQ || code == GT || code == LT || code == UNORDERED
20943 || code == GTU || code == LTU);
20945 switch (code)
20947 case NE:
20948 return scc_p ? base_bit + 3 : base_bit + 2;
20949 case EQ:
20950 return base_bit + 2;
20951 case GT: case GTU: case UNLE:
20952 return base_bit + 1;
20953 case LT: case LTU: case UNGE:
20954 return base_bit;
20955 case ORDERED: case UNORDERED:
20956 return base_bit + 3;
20958 case GE: case GEU:
20959 /* If scc, we will have done a cror to put the bit in the
20960 unordered position. So test that bit. For integer, this is ! LT
20961 unless this is an scc insn. */
20962 return scc_p ? base_bit + 3 : base_bit;
20964 case LE: case LEU:
20965 return scc_p ? base_bit + 3 : base_bit + 1;
20967 default:
20968 gcc_unreachable ();
20972 /* Return the GOT register. */
20975 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
20977 /* The second flow pass currently (June 1999) can't update
20978 regs_ever_live without disturbing other parts of the compiler, so
20979 update it here to make the prolog/epilogue code happy. */
20980 if (!can_create_pseudo_p ()
20981 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20982 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
20984 crtl->uses_pic_offset_table = 1;
20986 return pic_offset_table_rtx;
20989 static rs6000_stack_t stack_info;
20991 /* Function to init struct machine_function.
20992 This will be called, via a pointer variable,
20993 from push_function_context. */
20995 static struct machine_function *
20996 rs6000_init_machine_status (void)
20998 stack_info.reload_completed = 0;
20999 return ggc_cleared_alloc<machine_function> ();
21002 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
21004 /* Write out a function code label. */
21006 void
21007 rs6000_output_function_entry (FILE *file, const char *fname)
21009 if (fname[0] != '.')
21011 switch (DEFAULT_ABI)
21013 default:
21014 gcc_unreachable ();
21016 case ABI_AIX:
21017 if (DOT_SYMBOLS)
21018 putc ('.', file);
21019 else
21020 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
21021 break;
21023 case ABI_ELFv2:
21024 case ABI_V4:
21025 case ABI_DARWIN:
21026 break;
21030 RS6000_OUTPUT_BASENAME (file, fname);
21033 /* Print an operand. Recognize special options, documented below. */
21035 #if TARGET_ELF
21036 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
21037 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
21038 #else
21039 #define SMALL_DATA_RELOC "sda21"
21040 #define SMALL_DATA_REG 0
21041 #endif
21043 void
21044 print_operand (FILE *file, rtx x, int code)
21046 int i;
21047 unsigned HOST_WIDE_INT uval;
21049 switch (code)
21051 /* %a is output_address. */
21053 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
21054 output_operand. */
21056 case 'D':
21057 /* Like 'J' but get to the GT bit only. */
21058 gcc_assert (REG_P (x));
21060 /* Bit 1 is GT bit. */
21061 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
21063 /* Add one for shift count in rlinm for scc. */
21064 fprintf (file, "%d", i + 1);
21065 return;
21067 case 'e':
21068 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
21069 if (! INT_P (x))
21071 output_operand_lossage ("invalid %%e value");
21072 return;
21075 uval = INTVAL (x);
21076 if ((uval & 0xffff) == 0 && uval != 0)
21077 putc ('s', file);
21078 return;
21080 case 'E':
21081 /* X is a CR register. Print the number of the EQ bit of the CR */
21082 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21083 output_operand_lossage ("invalid %%E value");
21084 else
21085 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
21086 return;
21088 case 'f':
21089 /* X is a CR register. Print the shift count needed to move it
21090 to the high-order four bits. */
21091 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21092 output_operand_lossage ("invalid %%f value");
21093 else
21094 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
21095 return;
21097 case 'F':
21098 /* Similar, but print the count for the rotate in the opposite
21099 direction. */
21100 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21101 output_operand_lossage ("invalid %%F value");
21102 else
21103 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
21104 return;
21106 case 'G':
21107 /* X is a constant integer. If it is negative, print "m",
21108 otherwise print "z". This is to make an aze or ame insn. */
21109 if (GET_CODE (x) != CONST_INT)
21110 output_operand_lossage ("invalid %%G value");
21111 else if (INTVAL (x) >= 0)
21112 putc ('z', file);
21113 else
21114 putc ('m', file);
21115 return;
21117 case 'h':
21118 /* If constant, output low-order five bits. Otherwise, write
21119 normally. */
21120 if (INT_P (x))
21121 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
21122 else
21123 print_operand (file, x, 0);
21124 return;
21126 case 'H':
21127 /* If constant, output low-order six bits. Otherwise, write
21128 normally. */
21129 if (INT_P (x))
21130 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
21131 else
21132 print_operand (file, x, 0);
21133 return;
21135 case 'I':
21136 /* Print `i' if this is a constant, else nothing. */
21137 if (INT_P (x))
21138 putc ('i', file);
21139 return;
21141 case 'j':
21142 /* Write the bit number in CCR for jump. */
21143 i = ccr_bit (x, 0);
21144 if (i == -1)
21145 output_operand_lossage ("invalid %%j code");
21146 else
21147 fprintf (file, "%d", i);
21148 return;
21150 case 'J':
21151 /* Similar, but add one for shift count in rlinm for scc and pass
21152 scc flag to `ccr_bit'. */
21153 i = ccr_bit (x, 1);
21154 if (i == -1)
21155 output_operand_lossage ("invalid %%J code");
21156 else
21157 /* If we want bit 31, write a shift count of zero, not 32. */
21158 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21159 return;
21161 case 'k':
21162 /* X must be a constant. Write the 1's complement of the
21163 constant. */
21164 if (! INT_P (x))
21165 output_operand_lossage ("invalid %%k value");
21166 else
21167 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
21168 return;
21170 case 'K':
21171 /* X must be a symbolic constant on ELF. Write an
21172 expression suitable for an 'addi' that adds in the low 16
21173 bits of the MEM. */
21174 if (GET_CODE (x) == CONST)
21176 if (GET_CODE (XEXP (x, 0)) != PLUS
21177 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
21178 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
21179 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
21180 output_operand_lossage ("invalid %%K value");
21182 print_operand_address (file, x);
21183 fputs ("@l", file);
21184 return;
21186 /* %l is output_asm_label. */
21188 case 'L':
21189 /* Write second word of DImode or DFmode reference. Works on register
21190 or non-indexed memory only. */
21191 if (REG_P (x))
21192 fputs (reg_names[REGNO (x) + 1], file);
21193 else if (MEM_P (x))
21195 machine_mode mode = GET_MODE (x);
21196 /* Handle possible auto-increment. Since it is pre-increment and
21197 we have already done it, we can just use an offset of word. */
21198 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21199 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21200 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
21201 UNITS_PER_WORD));
21202 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21203 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
21204 UNITS_PER_WORD));
21205 else
21206 output_address (mode, XEXP (adjust_address_nv (x, SImode,
21207 UNITS_PER_WORD),
21208 0));
21210 if (small_data_operand (x, GET_MODE (x)))
21211 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21212 reg_names[SMALL_DATA_REG]);
21214 return;
21216 case 'N':
21217 /* Write the number of elements in the vector times 4. */
21218 if (GET_CODE (x) != PARALLEL)
21219 output_operand_lossage ("invalid %%N value");
21220 else
21221 fprintf (file, "%d", XVECLEN (x, 0) * 4);
21222 return;
21224 case 'O':
21225 /* Similar, but subtract 1 first. */
21226 if (GET_CODE (x) != PARALLEL)
21227 output_operand_lossage ("invalid %%O value");
21228 else
21229 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
21230 return;
21232 case 'p':
21233 /* X is a CONST_INT that is a power of two. Output the logarithm. */
21234 if (! INT_P (x)
21235 || INTVAL (x) < 0
21236 || (i = exact_log2 (INTVAL (x))) < 0)
21237 output_operand_lossage ("invalid %%p value");
21238 else
21239 fprintf (file, "%d", i);
21240 return;
21242 case 'P':
21243 /* The operand must be an indirect memory reference. The result
21244 is the register name. */
21245 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
21246 || REGNO (XEXP (x, 0)) >= 32)
21247 output_operand_lossage ("invalid %%P value");
21248 else
21249 fputs (reg_names[REGNO (XEXP (x, 0))], file);
21250 return;
21252 case 'q':
21253 /* This outputs the logical code corresponding to a boolean
21254 expression. The expression may have one or both operands
21255 negated (if one, only the first one). For condition register
21256 logical operations, it will also treat the negated
21257 CR codes as NOTs, but not handle NOTs of them. */
21259 const char *const *t = 0;
21260 const char *s;
21261 enum rtx_code code = GET_CODE (x);
21262 static const char * const tbl[3][3] = {
21263 { "and", "andc", "nor" },
21264 { "or", "orc", "nand" },
21265 { "xor", "eqv", "xor" } };
21267 if (code == AND)
21268 t = tbl[0];
21269 else if (code == IOR)
21270 t = tbl[1];
21271 else if (code == XOR)
21272 t = tbl[2];
21273 else
21274 output_operand_lossage ("invalid %%q value");
21276 if (GET_CODE (XEXP (x, 0)) != NOT)
21277 s = t[0];
21278 else
21280 if (GET_CODE (XEXP (x, 1)) == NOT)
21281 s = t[2];
21282 else
21283 s = t[1];
21286 fputs (s, file);
21288 return;
21290 case 'Q':
21291 if (! TARGET_MFCRF)
21292 return;
21293 fputc (',', file);
21294 /* FALLTHRU */
21296 case 'R':
21297 /* X is a CR register. Print the mask for `mtcrf'. */
21298 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21299 output_operand_lossage ("invalid %%R value");
21300 else
21301 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
21302 return;
21304 case 's':
21305 /* Low 5 bits of 32 - value */
21306 if (! INT_P (x))
21307 output_operand_lossage ("invalid %%s value");
21308 else
21309 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
21310 return;
21312 case 't':
21313 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
21314 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
21316 /* Bit 3 is OV bit. */
21317 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
21319 /* If we want bit 31, write a shift count of zero, not 32. */
21320 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21321 return;
21323 case 'T':
21324 /* Print the symbolic name of a branch target register. */
21325 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
21326 && REGNO (x) != CTR_REGNO))
21327 output_operand_lossage ("invalid %%T value");
21328 else if (REGNO (x) == LR_REGNO)
21329 fputs ("lr", file);
21330 else
21331 fputs ("ctr", file);
21332 return;
21334 case 'u':
21335 /* High-order or low-order 16 bits of constant, whichever is non-zero,
21336 for use in unsigned operand. */
21337 if (! INT_P (x))
21339 output_operand_lossage ("invalid %%u value");
21340 return;
21343 uval = INTVAL (x);
21344 if ((uval & 0xffff) == 0)
21345 uval >>= 16;
21347 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
21348 return;
21350 case 'v':
21351 /* High-order 16 bits of constant for use in signed operand. */
21352 if (! INT_P (x))
21353 output_operand_lossage ("invalid %%v value");
21354 else
21355 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
21356 (INTVAL (x) >> 16) & 0xffff);
21357 return;
21359 case 'U':
21360 /* Print `u' if this has an auto-increment or auto-decrement. */
21361 if (MEM_P (x)
21362 && (GET_CODE (XEXP (x, 0)) == PRE_INC
21363 || GET_CODE (XEXP (x, 0)) == PRE_DEC
21364 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
21365 putc ('u', file);
21366 return;
21368 case 'V':
21369 /* Print the trap code for this operand. */
21370 switch (GET_CODE (x))
21372 case EQ:
21373 fputs ("eq", file); /* 4 */
21374 break;
21375 case NE:
21376 fputs ("ne", file); /* 24 */
21377 break;
21378 case LT:
21379 fputs ("lt", file); /* 16 */
21380 break;
21381 case LE:
21382 fputs ("le", file); /* 20 */
21383 break;
21384 case GT:
21385 fputs ("gt", file); /* 8 */
21386 break;
21387 case GE:
21388 fputs ("ge", file); /* 12 */
21389 break;
21390 case LTU:
21391 fputs ("llt", file); /* 2 */
21392 break;
21393 case LEU:
21394 fputs ("lle", file); /* 6 */
21395 break;
21396 case GTU:
21397 fputs ("lgt", file); /* 1 */
21398 break;
21399 case GEU:
21400 fputs ("lge", file); /* 5 */
21401 break;
21402 default:
21403 gcc_unreachable ();
21405 break;
21407 case 'w':
21408 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21409 normally. */
21410 if (INT_P (x))
21411 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
21412 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
21413 else
21414 print_operand (file, x, 0);
21415 return;
21417 case 'x':
21418 /* X is a FPR or Altivec register used in a VSX context. */
21419 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
21420 output_operand_lossage ("invalid %%x value");
21421 else
21423 int reg = REGNO (x);
21424 int vsx_reg = (FP_REGNO_P (reg)
21425 ? reg - 32
21426 : reg - FIRST_ALTIVEC_REGNO + 32);
21428 #ifdef TARGET_REGNAMES
21429 if (TARGET_REGNAMES)
21430 fprintf (file, "%%vs%d", vsx_reg);
21431 else
21432 #endif
21433 fprintf (file, "%d", vsx_reg);
21435 return;
21437 case 'X':
21438 if (MEM_P (x)
21439 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
21440 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
21441 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
21442 putc ('x', file);
21443 return;
21445 case 'Y':
21446 /* Like 'L', for third word of TImode/PTImode */
21447 if (REG_P (x))
21448 fputs (reg_names[REGNO (x) + 2], file);
21449 else if (MEM_P (x))
21451 machine_mode mode = GET_MODE (x);
21452 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21453 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21454 output_address (mode, plus_constant (Pmode,
21455 XEXP (XEXP (x, 0), 0), 8));
21456 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21457 output_address (mode, plus_constant (Pmode,
21458 XEXP (XEXP (x, 0), 0), 8));
21459 else
21460 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
21461 if (small_data_operand (x, GET_MODE (x)))
21462 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21463 reg_names[SMALL_DATA_REG]);
21465 return;
21467 case 'z':
21468 /* X is a SYMBOL_REF. Write out the name preceded by a
21469 period and without any trailing data in brackets. Used for function
21470 names. If we are configured for System V (or the embedded ABI) on
21471 the PowerPC, do not emit the period, since those systems do not use
21472 TOCs and the like. */
21473 gcc_assert (GET_CODE (x) == SYMBOL_REF);
21475 /* For macho, check to see if we need a stub. */
21476 if (TARGET_MACHO)
21478 const char *name = XSTR (x, 0);
21479 #if TARGET_MACHO
21480 if (darwin_emit_branch_islands
21481 && MACHOPIC_INDIRECT
21482 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
21483 name = machopic_indirection_name (x, /*stub_p=*/true);
21484 #endif
21485 assemble_name (file, name);
21487 else if (!DOT_SYMBOLS)
21488 assemble_name (file, XSTR (x, 0));
21489 else
21490 rs6000_output_function_entry (file, XSTR (x, 0));
21491 return;
21493 case 'Z':
21494 /* Like 'L', for last word of TImode/PTImode. */
21495 if (REG_P (x))
21496 fputs (reg_names[REGNO (x) + 3], file);
21497 else if (MEM_P (x))
21499 machine_mode mode = GET_MODE (x);
21500 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21501 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21502 output_address (mode, plus_constant (Pmode,
21503 XEXP (XEXP (x, 0), 0), 12));
21504 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21505 output_address (mode, plus_constant (Pmode,
21506 XEXP (XEXP (x, 0), 0), 12));
21507 else
21508 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
21509 if (small_data_operand (x, GET_MODE (x)))
21510 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21511 reg_names[SMALL_DATA_REG]);
21513 return;
21515 /* Print AltiVec memory operand. */
21516 case 'y':
21518 rtx tmp;
21520 gcc_assert (MEM_P (x));
21522 tmp = XEXP (x, 0);
21524 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
21525 && GET_CODE (tmp) == AND
21526 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
21527 && INTVAL (XEXP (tmp, 1)) == -16)
21528 tmp = XEXP (tmp, 0);
21529 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
21530 && GET_CODE (tmp) == PRE_MODIFY)
21531 tmp = XEXP (tmp, 1);
21532 if (REG_P (tmp))
21533 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
21534 else
21536 if (GET_CODE (tmp) != PLUS
21537 || !REG_P (XEXP (tmp, 0))
21538 || !REG_P (XEXP (tmp, 1)))
21540 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21541 break;
21544 if (REGNO (XEXP (tmp, 0)) == 0)
21545 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
21546 reg_names[ REGNO (XEXP (tmp, 0)) ]);
21547 else
21548 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
21549 reg_names[ REGNO (XEXP (tmp, 1)) ]);
21551 break;
21554 case 0:
21555 if (REG_P (x))
21556 fprintf (file, "%s", reg_names[REGNO (x)]);
21557 else if (MEM_P (x))
21559 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21560 know the width from the mode. */
21561 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
21562 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
21563 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21564 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
21565 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
21566 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21567 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21568 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
21569 else
21570 output_address (GET_MODE (x), XEXP (x, 0));
21572 else
21574 if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21575 /* This hack along with a corresponding hack in
21576 rs6000_output_addr_const_extra arranges to output addends
21577 where the assembler expects to find them. eg.
21578 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21579 without this hack would be output as "x@toc+4". We
21580 want "x+4@toc". */
21581 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21582 else
21583 output_addr_const (file, x);
21585 return;
21587 case '&':
21588 if (const char *name = get_some_local_dynamic_name ())
21589 assemble_name (file, name);
21590 else
21591 output_operand_lossage ("'%%&' used without any "
21592 "local dynamic TLS references");
21593 return;
21595 default:
21596 output_operand_lossage ("invalid %%xn code");
21600 /* Print the address of an operand. */
21602 void
21603 print_operand_address (FILE *file, rtx x)
21605 if (REG_P (x))
21606 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21607 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
21608 || GET_CODE (x) == LABEL_REF)
21610 output_addr_const (file, x);
21611 if (small_data_operand (x, GET_MODE (x)))
21612 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21613 reg_names[SMALL_DATA_REG]);
21614 else
21615 gcc_assert (!TARGET_TOC);
21617 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21618 && REG_P (XEXP (x, 1)))
21620 if (REGNO (XEXP (x, 0)) == 0)
21621 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21622 reg_names[ REGNO (XEXP (x, 0)) ]);
21623 else
21624 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21625 reg_names[ REGNO (XEXP (x, 1)) ]);
21627 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21628 && GET_CODE (XEXP (x, 1)) == CONST_INT)
21629 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21630 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21631 #if TARGET_MACHO
21632 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21633 && CONSTANT_P (XEXP (x, 1)))
21635 fprintf (file, "lo16(");
21636 output_addr_const (file, XEXP (x, 1));
21637 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21639 #endif
21640 #if TARGET_ELF
21641 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21642 && CONSTANT_P (XEXP (x, 1)))
21644 output_addr_const (file, XEXP (x, 1));
21645 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21647 #endif
21648 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21650 /* This hack along with a corresponding hack in
21651 rs6000_output_addr_const_extra arranges to output addends
21652 where the assembler expects to find them. eg.
21653 (lo_sum (reg 9)
21654 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21655 without this hack would be output as "x@toc+8@l(9)". We
21656 want "x+8@toc@l(9)". */
21657 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21658 if (GET_CODE (x) == LO_SUM)
21659 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21660 else
21661 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21663 else
21664 gcc_unreachable ();
21667 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21669 static bool
21670 rs6000_output_addr_const_extra (FILE *file, rtx x)
21672 if (GET_CODE (x) == UNSPEC)
21673 switch (XINT (x, 1))
21675 case UNSPEC_TOCREL:
21676 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
21677 && REG_P (XVECEXP (x, 0, 1))
21678 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21679 output_addr_const (file, XVECEXP (x, 0, 0));
21680 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21682 if (INTVAL (tocrel_offset_oac) >= 0)
21683 fprintf (file, "+");
21684 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21686 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21688 putc ('-', file);
21689 assemble_name (file, toc_label_name);
21690 need_toc_init = 1;
21692 else if (TARGET_ELF)
21693 fputs ("@toc", file);
21694 return true;
21696 #if TARGET_MACHO
21697 case UNSPEC_MACHOPIC_OFFSET:
21698 output_addr_const (file, XVECEXP (x, 0, 0));
21699 putc ('-', file);
21700 machopic_output_function_base_name (file);
21701 return true;
21702 #endif
21704 return false;
21707 /* Target hook for assembling integer objects. The PowerPC version has
21708 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21709 is defined. It also needs to handle DI-mode objects on 64-bit
21710 targets. */
21712 static bool
21713 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21715 #ifdef RELOCATABLE_NEEDS_FIXUP
21716 /* Special handling for SI values. */
21717 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21719 static int recurse = 0;
21721 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21722 the .fixup section. Since the TOC section is already relocated, we
21723 don't need to mark it here. We used to skip the text section, but it
21724 should never be valid for relocated addresses to be placed in the text
21725 section. */
21726 if (DEFAULT_ABI == ABI_V4
21727 && (TARGET_RELOCATABLE || flag_pic > 1)
21728 && in_section != toc_section
21729 && !recurse
21730 && !CONST_SCALAR_INT_P (x)
21731 && CONSTANT_P (x))
21733 char buf[256];
21735 recurse = 1;
21736 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21737 fixuplabelno++;
21738 ASM_OUTPUT_LABEL (asm_out_file, buf);
21739 fprintf (asm_out_file, "\t.long\t(");
21740 output_addr_const (asm_out_file, x);
21741 fprintf (asm_out_file, ")@fixup\n");
21742 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21743 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21744 fprintf (asm_out_file, "\t.long\t");
21745 assemble_name (asm_out_file, buf);
21746 fprintf (asm_out_file, "\n\t.previous\n");
21747 recurse = 0;
21748 return true;
21750 /* Remove initial .'s to turn a -mcall-aixdesc function
21751 address into the address of the descriptor, not the function
21752 itself. */
21753 else if (GET_CODE (x) == SYMBOL_REF
21754 && XSTR (x, 0)[0] == '.'
21755 && DEFAULT_ABI == ABI_AIX)
21757 const char *name = XSTR (x, 0);
21758 while (*name == '.')
21759 name++;
21761 fprintf (asm_out_file, "\t.long\t%s\n", name);
21762 return true;
21765 #endif /* RELOCATABLE_NEEDS_FIXUP */
21766 return default_assemble_integer (x, size, aligned_p);
21769 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21770 /* Emit an assembler directive to set symbol visibility for DECL to
21771 VISIBILITY_TYPE. */
21773 static void
21774 rs6000_assemble_visibility (tree decl, int vis)
21776 if (TARGET_XCOFF)
21777 return;
21779 /* Functions need to have their entry point symbol visibility set as
21780 well as their descriptor symbol visibility. */
21781 if (DEFAULT_ABI == ABI_AIX
21782 && DOT_SYMBOLS
21783 && TREE_CODE (decl) == FUNCTION_DECL)
21785 static const char * const visibility_types[] = {
21786 NULL, "protected", "hidden", "internal"
21789 const char *name, *type;
21791 name = ((* targetm.strip_name_encoding)
21792 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21793 type = visibility_types[vis];
21795 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21796 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21798 else
21799 default_assemble_visibility (decl, vis);
21801 #endif
21803 enum rtx_code
21804 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21806 /* Reversal of FP compares takes care -- an ordered compare
21807 becomes an unordered compare and vice versa. */
21808 if (mode == CCFPmode
21809 && (!flag_finite_math_only
21810 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21811 || code == UNEQ || code == LTGT))
21812 return reverse_condition_maybe_unordered (code);
21813 else
21814 return reverse_condition (code);
21817 /* Generate a compare for CODE. Return a brand-new rtx that
21818 represents the result of the compare. */
21820 static rtx
21821 rs6000_generate_compare (rtx cmp, machine_mode mode)
21823 machine_mode comp_mode;
21824 rtx compare_result;
21825 enum rtx_code code = GET_CODE (cmp);
21826 rtx op0 = XEXP (cmp, 0);
21827 rtx op1 = XEXP (cmp, 1);
21829 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21830 comp_mode = CCmode;
21831 else if (FLOAT_MODE_P (mode))
21832 comp_mode = CCFPmode;
21833 else if (code == GTU || code == LTU
21834 || code == GEU || code == LEU)
21835 comp_mode = CCUNSmode;
21836 else if ((code == EQ || code == NE)
21837 && unsigned_reg_p (op0)
21838 && (unsigned_reg_p (op1)
21839 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21840 /* These are unsigned values, perhaps there will be a later
21841 ordering compare that can be shared with this one. */
21842 comp_mode = CCUNSmode;
21843 else
21844 comp_mode = CCmode;
21846 /* If we have an unsigned compare, make sure we don't have a signed value as
21847 an immediate. */
21848 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
21849 && INTVAL (op1) < 0)
21851 op0 = copy_rtx_if_shared (op0);
21852 op1 = force_reg (GET_MODE (op0), op1);
21853 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21856 /* First, the compare. */
21857 compare_result = gen_reg_rtx (comp_mode);
21859 /* IEEE 128-bit support in VSX registers when we do not have hardware
21860 support. */
21861 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21863 rtx libfunc = NULL_RTX;
21864 bool check_nan = false;
21865 rtx dest;
21867 switch (code)
21869 case EQ:
21870 case NE:
21871 libfunc = optab_libfunc (eq_optab, mode);
21872 break;
21874 case GT:
21875 case GE:
21876 libfunc = optab_libfunc (ge_optab, mode);
21877 break;
21879 case LT:
21880 case LE:
21881 libfunc = optab_libfunc (le_optab, mode);
21882 break;
21884 case UNORDERED:
21885 case ORDERED:
21886 libfunc = optab_libfunc (unord_optab, mode);
21887 code = (code == UNORDERED) ? NE : EQ;
21888 break;
21890 case UNGE:
21891 case UNGT:
21892 check_nan = true;
21893 libfunc = optab_libfunc (ge_optab, mode);
21894 code = (code == UNGE) ? GE : GT;
21895 break;
21897 case UNLE:
21898 case UNLT:
21899 check_nan = true;
21900 libfunc = optab_libfunc (le_optab, mode);
21901 code = (code == UNLE) ? LE : LT;
21902 break;
21904 case UNEQ:
21905 case LTGT:
21906 check_nan = true;
21907 libfunc = optab_libfunc (eq_optab, mode);
21908 code = (code = UNEQ) ? EQ : NE;
21909 break;
21911 default:
21912 gcc_unreachable ();
21915 gcc_assert (libfunc);
21917 if (!check_nan)
21918 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21919 SImode, op0, mode, op1, mode);
21921 /* The library signals an exception for signalling NaNs, so we need to
21922 handle isgreater, etc. by first checking isordered. */
21923 else
21925 rtx ne_rtx, normal_dest, unord_dest;
21926 rtx unord_func = optab_libfunc (unord_optab, mode);
21927 rtx join_label = gen_label_rtx ();
21928 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
21929 rtx unord_cmp = gen_reg_rtx (comp_mode);
21932 /* Test for either value being a NaN. */
21933 gcc_assert (unord_func);
21934 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
21935 SImode, op0, mode, op1, mode);
21937 /* Set value (0) if either value is a NaN, and jump to the join
21938 label. */
21939 dest = gen_reg_rtx (SImode);
21940 emit_move_insn (dest, const1_rtx);
21941 emit_insn (gen_rtx_SET (unord_cmp,
21942 gen_rtx_COMPARE (comp_mode, unord_dest,
21943 const0_rtx)));
21945 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
21946 emit_jump_insn (gen_rtx_SET (pc_rtx,
21947 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
21948 join_ref,
21949 pc_rtx)));
21951 /* Do the normal comparison, knowing that the values are not
21952 NaNs. */
21953 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21954 SImode, op0, mode, op1, mode);
21956 emit_insn (gen_cstoresi4 (dest,
21957 gen_rtx_fmt_ee (code, SImode, normal_dest,
21958 const0_rtx),
21959 normal_dest, const0_rtx));
21961 /* Join NaN and non-Nan paths. Compare dest against 0. */
21962 emit_label (join_label);
21963 code = NE;
21966 emit_insn (gen_rtx_SET (compare_result,
21967 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
21970 else
21972 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
21973 CLOBBERs to match cmptf_internal2 pattern. */
21974 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
21975 && FLOAT128_IBM_P (GET_MODE (op0))
21976 && TARGET_HARD_FLOAT)
21977 emit_insn (gen_rtx_PARALLEL (VOIDmode,
21978 gen_rtvec (10,
21979 gen_rtx_SET (compare_result,
21980 gen_rtx_COMPARE (comp_mode, op0, op1)),
21981 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21982 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21983 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21984 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21985 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21986 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21987 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21988 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21989 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
21990 else if (GET_CODE (op1) == UNSPEC
21991 && XINT (op1, 1) == UNSPEC_SP_TEST)
21993 rtx op1b = XVECEXP (op1, 0, 0);
21994 comp_mode = CCEQmode;
21995 compare_result = gen_reg_rtx (CCEQmode);
21996 if (TARGET_64BIT)
21997 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
21998 else
21999 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
22001 else
22002 emit_insn (gen_rtx_SET (compare_result,
22003 gen_rtx_COMPARE (comp_mode, op0, op1)));
22006 /* Some kinds of FP comparisons need an OR operation;
22007 under flag_finite_math_only we don't bother. */
22008 if (FLOAT_MODE_P (mode)
22009 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
22010 && !flag_finite_math_only
22011 && (code == LE || code == GE
22012 || code == UNEQ || code == LTGT
22013 || code == UNGT || code == UNLT))
22015 enum rtx_code or1, or2;
22016 rtx or1_rtx, or2_rtx, compare2_rtx;
22017 rtx or_result = gen_reg_rtx (CCEQmode);
22019 switch (code)
22021 case LE: or1 = LT; or2 = EQ; break;
22022 case GE: or1 = GT; or2 = EQ; break;
22023 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
22024 case LTGT: or1 = LT; or2 = GT; break;
22025 case UNGT: or1 = UNORDERED; or2 = GT; break;
22026 case UNLT: or1 = UNORDERED; or2 = LT; break;
22027 default: gcc_unreachable ();
22029 validate_condition_mode (or1, comp_mode);
22030 validate_condition_mode (or2, comp_mode);
22031 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
22032 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
22033 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
22034 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
22035 const_true_rtx);
22036 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
22038 compare_result = or_result;
22039 code = EQ;
22042 validate_condition_mode (code, GET_MODE (compare_result));
22044 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
22048 /* Return the diagnostic message string if the binary operation OP is
22049 not permitted on TYPE1 and TYPE2, NULL otherwise. */
22051 static const char*
22052 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
22053 const_tree type1,
22054 const_tree type2)
22056 machine_mode mode1 = TYPE_MODE (type1);
22057 machine_mode mode2 = TYPE_MODE (type2);
22059 /* For complex modes, use the inner type. */
22060 if (COMPLEX_MODE_P (mode1))
22061 mode1 = GET_MODE_INNER (mode1);
22063 if (COMPLEX_MODE_P (mode2))
22064 mode2 = GET_MODE_INNER (mode2);
22066 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
22067 double to intermix unless -mfloat128-convert. */
22068 if (mode1 == mode2)
22069 return NULL;
22071 if (!TARGET_FLOAT128_CVT)
22073 if ((mode1 == KFmode && mode2 == IFmode)
22074 || (mode1 == IFmode && mode2 == KFmode))
22075 return N_("__float128 and __ibm128 cannot be used in the same "
22076 "expression");
22078 if (TARGET_IEEEQUAD
22079 && ((mode1 == IFmode && mode2 == TFmode)
22080 || (mode1 == TFmode && mode2 == IFmode)))
22081 return N_("__ibm128 and long double cannot be used in the same "
22082 "expression");
22084 if (!TARGET_IEEEQUAD
22085 && ((mode1 == KFmode && mode2 == TFmode)
22086 || (mode1 == TFmode && mode2 == KFmode)))
22087 return N_("__float128 and long double cannot be used in the same "
22088 "expression");
22091 return NULL;
22095 /* Expand floating point conversion to/from __float128 and __ibm128. */
22097 void
22098 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
22100 machine_mode dest_mode = GET_MODE (dest);
22101 machine_mode src_mode = GET_MODE (src);
22102 convert_optab cvt = unknown_optab;
22103 bool do_move = false;
22104 rtx libfunc = NULL_RTX;
22105 rtx dest2;
22106 typedef rtx (*rtx_2func_t) (rtx, rtx);
22107 rtx_2func_t hw_convert = (rtx_2func_t)0;
22108 size_t kf_or_tf;
22110 struct hw_conv_t {
22111 rtx_2func_t from_df;
22112 rtx_2func_t from_sf;
22113 rtx_2func_t from_si_sign;
22114 rtx_2func_t from_si_uns;
22115 rtx_2func_t from_di_sign;
22116 rtx_2func_t from_di_uns;
22117 rtx_2func_t to_df;
22118 rtx_2func_t to_sf;
22119 rtx_2func_t to_si_sign;
22120 rtx_2func_t to_si_uns;
22121 rtx_2func_t to_di_sign;
22122 rtx_2func_t to_di_uns;
22123 } hw_conversions[2] = {
22124 /* convertions to/from KFmode */
22126 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
22127 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
22128 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
22129 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
22130 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
22131 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
22132 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
22133 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
22134 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
22135 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
22136 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
22137 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
22140 /* convertions to/from TFmode */
22142 gen_extenddftf2_hw, /* TFmode <- DFmode. */
22143 gen_extendsftf2_hw, /* TFmode <- SFmode. */
22144 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
22145 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
22146 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
22147 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
22148 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
22149 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
22150 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
22151 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
22152 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
22153 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
22157 if (dest_mode == src_mode)
22158 gcc_unreachable ();
22160 /* Eliminate memory operations. */
22161 if (MEM_P (src))
22162 src = force_reg (src_mode, src);
22164 if (MEM_P (dest))
22166 rtx tmp = gen_reg_rtx (dest_mode);
22167 rs6000_expand_float128_convert (tmp, src, unsigned_p);
22168 rs6000_emit_move (dest, tmp, dest_mode);
22169 return;
22172 /* Convert to IEEE 128-bit floating point. */
22173 if (FLOAT128_IEEE_P (dest_mode))
22175 if (dest_mode == KFmode)
22176 kf_or_tf = 0;
22177 else if (dest_mode == TFmode)
22178 kf_or_tf = 1;
22179 else
22180 gcc_unreachable ();
22182 switch (src_mode)
22184 case E_DFmode:
22185 cvt = sext_optab;
22186 hw_convert = hw_conversions[kf_or_tf].from_df;
22187 break;
22189 case E_SFmode:
22190 cvt = sext_optab;
22191 hw_convert = hw_conversions[kf_or_tf].from_sf;
22192 break;
22194 case E_KFmode:
22195 case E_IFmode:
22196 case E_TFmode:
22197 if (FLOAT128_IBM_P (src_mode))
22198 cvt = sext_optab;
22199 else
22200 do_move = true;
22201 break;
22203 case E_SImode:
22204 if (unsigned_p)
22206 cvt = ufloat_optab;
22207 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
22209 else
22211 cvt = sfloat_optab;
22212 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
22214 break;
22216 case E_DImode:
22217 if (unsigned_p)
22219 cvt = ufloat_optab;
22220 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
22222 else
22224 cvt = sfloat_optab;
22225 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
22227 break;
22229 default:
22230 gcc_unreachable ();
22234 /* Convert from IEEE 128-bit floating point. */
22235 else if (FLOAT128_IEEE_P (src_mode))
22237 if (src_mode == KFmode)
22238 kf_or_tf = 0;
22239 else if (src_mode == TFmode)
22240 kf_or_tf = 1;
22241 else
22242 gcc_unreachable ();
22244 switch (dest_mode)
22246 case E_DFmode:
22247 cvt = trunc_optab;
22248 hw_convert = hw_conversions[kf_or_tf].to_df;
22249 break;
22251 case E_SFmode:
22252 cvt = trunc_optab;
22253 hw_convert = hw_conversions[kf_or_tf].to_sf;
22254 break;
22256 case E_KFmode:
22257 case E_IFmode:
22258 case E_TFmode:
22259 if (FLOAT128_IBM_P (dest_mode))
22260 cvt = trunc_optab;
22261 else
22262 do_move = true;
22263 break;
22265 case E_SImode:
22266 if (unsigned_p)
22268 cvt = ufix_optab;
22269 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
22271 else
22273 cvt = sfix_optab;
22274 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
22276 break;
22278 case E_DImode:
22279 if (unsigned_p)
22281 cvt = ufix_optab;
22282 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
22284 else
22286 cvt = sfix_optab;
22287 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
22289 break;
22291 default:
22292 gcc_unreachable ();
22296 /* Both IBM format. */
22297 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
22298 do_move = true;
22300 else
22301 gcc_unreachable ();
22303 /* Handle conversion between TFmode/KFmode. */
22304 if (do_move)
22305 emit_move_insn (dest, gen_lowpart (dest_mode, src));
22307 /* Handle conversion if we have hardware support. */
22308 else if (TARGET_FLOAT128_HW && hw_convert)
22309 emit_insn ((hw_convert) (dest, src));
22311 /* Call an external function to do the conversion. */
22312 else if (cvt != unknown_optab)
22314 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
22315 gcc_assert (libfunc != NULL_RTX);
22317 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode,
22318 src, src_mode);
22320 gcc_assert (dest2 != NULL_RTX);
22321 if (!rtx_equal_p (dest, dest2))
22322 emit_move_insn (dest, dest2);
22325 else
22326 gcc_unreachable ();
22328 return;
22332 /* Emit the RTL for an sISEL pattern. */
22334 void
22335 rs6000_emit_sISEL (machine_mode mode ATTRIBUTE_UNUSED, rtx operands[])
22337 rs6000_emit_int_cmove (operands[0], operands[1], const1_rtx, const0_rtx);
22340 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22341 can be used as that dest register. Return the dest register. */
22344 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
22346 if (op2 == const0_rtx)
22347 return op1;
22349 if (GET_CODE (scratch) == SCRATCH)
22350 scratch = gen_reg_rtx (mode);
22352 if (logical_operand (op2, mode))
22353 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
22354 else
22355 emit_insn (gen_rtx_SET (scratch,
22356 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
22358 return scratch;
22361 void
22362 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
22364 rtx condition_rtx;
22365 machine_mode op_mode;
22366 enum rtx_code cond_code;
22367 rtx result = operands[0];
22369 condition_rtx = rs6000_generate_compare (operands[1], mode);
22370 cond_code = GET_CODE (condition_rtx);
22372 if (cond_code == NE
22373 || cond_code == GE || cond_code == LE
22374 || cond_code == GEU || cond_code == LEU
22375 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
22377 rtx not_result = gen_reg_rtx (CCEQmode);
22378 rtx not_op, rev_cond_rtx;
22379 machine_mode cc_mode;
22381 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
22383 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
22384 SImode, XEXP (condition_rtx, 0), const0_rtx);
22385 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
22386 emit_insn (gen_rtx_SET (not_result, not_op));
22387 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
22390 op_mode = GET_MODE (XEXP (operands[1], 0));
22391 if (op_mode == VOIDmode)
22392 op_mode = GET_MODE (XEXP (operands[1], 1));
22394 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22396 PUT_MODE (condition_rtx, DImode);
22397 convert_move (result, condition_rtx, 0);
22399 else
22401 PUT_MODE (condition_rtx, SImode);
22402 emit_insn (gen_rtx_SET (result, condition_rtx));
22406 /* Emit a branch of kind CODE to location LOC. */
22408 void
22409 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22411 rtx condition_rtx, loc_ref;
22413 condition_rtx = rs6000_generate_compare (operands[0], mode);
22414 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22415 emit_jump_insn (gen_rtx_SET (pc_rtx,
22416 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22417 loc_ref, pc_rtx)));
22420 /* Return the string to output a conditional branch to LABEL, which is
22421 the operand template of the label, or NULL if the branch is really a
22422 conditional return.
22424 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22425 condition code register and its mode specifies what kind of
22426 comparison we made.
22428 REVERSED is nonzero if we should reverse the sense of the comparison.
22430 INSN is the insn. */
22432 char *
22433 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22435 static char string[64];
22436 enum rtx_code code = GET_CODE (op);
22437 rtx cc_reg = XEXP (op, 0);
22438 machine_mode mode = GET_MODE (cc_reg);
22439 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22440 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22441 int really_reversed = reversed ^ need_longbranch;
22442 char *s = string;
22443 const char *ccode;
22444 const char *pred;
22445 rtx note;
22447 validate_condition_mode (code, mode);
22449 /* Work out which way this really branches. We could use
22450 reverse_condition_maybe_unordered here always but this
22451 makes the resulting assembler clearer. */
22452 if (really_reversed)
22454 /* Reversal of FP compares takes care -- an ordered compare
22455 becomes an unordered compare and vice versa. */
22456 if (mode == CCFPmode)
22457 code = reverse_condition_maybe_unordered (code);
22458 else
22459 code = reverse_condition (code);
22462 switch (code)
22464 /* Not all of these are actually distinct opcodes, but
22465 we distinguish them for clarity of the resulting assembler. */
22466 case NE: case LTGT:
22467 ccode = "ne"; break;
22468 case EQ: case UNEQ:
22469 ccode = "eq"; break;
22470 case GE: case GEU:
22471 ccode = "ge"; break;
22472 case GT: case GTU: case UNGT:
22473 ccode = "gt"; break;
22474 case LE: case LEU:
22475 ccode = "le"; break;
22476 case LT: case LTU: case UNLT:
22477 ccode = "lt"; break;
22478 case UNORDERED: ccode = "un"; break;
22479 case ORDERED: ccode = "nu"; break;
22480 case UNGE: ccode = "nl"; break;
22481 case UNLE: ccode = "ng"; break;
22482 default:
22483 gcc_unreachable ();
22486 /* Maybe we have a guess as to how likely the branch is. */
22487 pred = "";
22488 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22489 if (note != NULL_RTX)
22491 /* PROB is the difference from 50%. */
22492 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
22493 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
22495 /* Only hint for highly probable/improbable branches on newer cpus when
22496 we have real profile data, as static prediction overrides processor
22497 dynamic prediction. For older cpus we may as well always hint, but
22498 assume not taken for branches that are very close to 50% as a
22499 mispredicted taken branch is more expensive than a
22500 mispredicted not-taken branch. */
22501 if (rs6000_always_hint
22502 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22503 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22504 && br_prob_note_reliable_p (note)))
22506 if (abs (prob) > REG_BR_PROB_BASE / 20
22507 && ((prob > 0) ^ need_longbranch))
22508 pred = "+";
22509 else
22510 pred = "-";
22514 if (label == NULL)
22515 s += sprintf (s, "b%slr%s ", ccode, pred);
22516 else
22517 s += sprintf (s, "b%s%s ", ccode, pred);
22519 /* We need to escape any '%' characters in the reg_names string.
22520 Assume they'd only be the first character.... */
22521 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22522 *s++ = '%';
22523 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22525 if (label != NULL)
22527 /* If the branch distance was too far, we may have to use an
22528 unconditional branch to go the distance. */
22529 if (need_longbranch)
22530 s += sprintf (s, ",$+8\n\tb %s", label);
22531 else
22532 s += sprintf (s, ",%s", label);
22535 return string;
22538 /* Return insn for VSX or Altivec comparisons. */
22540 static rtx
22541 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22543 rtx mask;
22544 machine_mode mode = GET_MODE (op0);
22546 switch (code)
22548 default:
22549 break;
22551 case GE:
22552 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22553 return NULL_RTX;
22554 /* FALLTHRU */
22556 case EQ:
22557 case GT:
22558 case GTU:
22559 case ORDERED:
22560 case UNORDERED:
22561 case UNEQ:
22562 case LTGT:
22563 mask = gen_reg_rtx (mode);
22564 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22565 return mask;
22568 return NULL_RTX;
22571 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22572 DMODE is expected destination mode. This is a recursive function. */
22574 static rtx
22575 rs6000_emit_vector_compare (enum rtx_code rcode,
22576 rtx op0, rtx op1,
22577 machine_mode dmode)
22579 rtx mask;
22580 bool swap_operands = false;
22581 bool try_again = false;
22583 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22584 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22586 /* See if the comparison works as is. */
22587 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22588 if (mask)
22589 return mask;
22591 switch (rcode)
22593 case LT:
22594 rcode = GT;
22595 swap_operands = true;
22596 try_again = true;
22597 break;
22598 case LTU:
22599 rcode = GTU;
22600 swap_operands = true;
22601 try_again = true;
22602 break;
22603 case NE:
22604 case UNLE:
22605 case UNLT:
22606 case UNGE:
22607 case UNGT:
22608 /* Invert condition and try again.
22609 e.g., A != B becomes ~(A==B). */
22611 enum rtx_code rev_code;
22612 enum insn_code nor_code;
22613 rtx mask2;
22615 rev_code = reverse_condition_maybe_unordered (rcode);
22616 if (rev_code == UNKNOWN)
22617 return NULL_RTX;
22619 nor_code = optab_handler (one_cmpl_optab, dmode);
22620 if (nor_code == CODE_FOR_nothing)
22621 return NULL_RTX;
22623 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22624 if (!mask2)
22625 return NULL_RTX;
22627 mask = gen_reg_rtx (dmode);
22628 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22629 return mask;
22631 break;
22632 case GE:
22633 case GEU:
22634 case LE:
22635 case LEU:
22636 /* Try GT/GTU/LT/LTU OR EQ */
22638 rtx c_rtx, eq_rtx;
22639 enum insn_code ior_code;
22640 enum rtx_code new_code;
22642 switch (rcode)
22644 case GE:
22645 new_code = GT;
22646 break;
22648 case GEU:
22649 new_code = GTU;
22650 break;
22652 case LE:
22653 new_code = LT;
22654 break;
22656 case LEU:
22657 new_code = LTU;
22658 break;
22660 default:
22661 gcc_unreachable ();
22664 ior_code = optab_handler (ior_optab, dmode);
22665 if (ior_code == CODE_FOR_nothing)
22666 return NULL_RTX;
22668 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22669 if (!c_rtx)
22670 return NULL_RTX;
22672 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22673 if (!eq_rtx)
22674 return NULL_RTX;
22676 mask = gen_reg_rtx (dmode);
22677 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22678 return mask;
22680 break;
22681 default:
22682 return NULL_RTX;
22685 if (try_again)
22687 if (swap_operands)
22688 std::swap (op0, op1);
22690 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22691 if (mask)
22692 return mask;
22695 /* You only get two chances. */
22696 return NULL_RTX;
22699 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22700 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22701 operands for the relation operation COND. */
22704 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22705 rtx cond, rtx cc_op0, rtx cc_op1)
22707 machine_mode dest_mode = GET_MODE (dest);
22708 machine_mode mask_mode = GET_MODE (cc_op0);
22709 enum rtx_code rcode = GET_CODE (cond);
22710 machine_mode cc_mode = CCmode;
22711 rtx mask;
22712 rtx cond2;
22713 bool invert_move = false;
22715 if (VECTOR_UNIT_NONE_P (dest_mode))
22716 return 0;
22718 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22719 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22721 switch (rcode)
22723 /* Swap operands if we can, and fall back to doing the operation as
22724 specified, and doing a NOR to invert the test. */
22725 case NE:
22726 case UNLE:
22727 case UNLT:
22728 case UNGE:
22729 case UNGT:
22730 /* Invert condition and try again.
22731 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22732 invert_move = true;
22733 rcode = reverse_condition_maybe_unordered (rcode);
22734 if (rcode == UNKNOWN)
22735 return 0;
22736 break;
22738 case GE:
22739 case LE:
22740 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22742 /* Invert condition to avoid compound test. */
22743 invert_move = true;
22744 rcode = reverse_condition (rcode);
22746 break;
22748 case GTU:
22749 case GEU:
22750 case LTU:
22751 case LEU:
22752 /* Mark unsigned tests with CCUNSmode. */
22753 cc_mode = CCUNSmode;
22755 /* Invert condition to avoid compound test if necessary. */
22756 if (rcode == GEU || rcode == LEU)
22758 invert_move = true;
22759 rcode = reverse_condition (rcode);
22761 break;
22763 default:
22764 break;
22767 /* Get the vector mask for the given relational operations. */
22768 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22770 if (!mask)
22771 return 0;
22773 if (invert_move)
22774 std::swap (op_true, op_false);
22776 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22777 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
22778 && (GET_CODE (op_true) == CONST_VECTOR
22779 || GET_CODE (op_false) == CONST_VECTOR))
22781 rtx constant_0 = CONST0_RTX (dest_mode);
22782 rtx constant_m1 = CONSTM1_RTX (dest_mode);
22784 if (op_true == constant_m1 && op_false == constant_0)
22786 emit_move_insn (dest, mask);
22787 return 1;
22790 else if (op_true == constant_0 && op_false == constant_m1)
22792 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
22793 return 1;
22796 /* If we can't use the vector comparison directly, perhaps we can use
22797 the mask for the true or false fields, instead of loading up a
22798 constant. */
22799 if (op_true == constant_m1)
22800 op_true = mask;
22802 if (op_false == constant_0)
22803 op_false = mask;
22806 if (!REG_P (op_true) && !SUBREG_P (op_true))
22807 op_true = force_reg (dest_mode, op_true);
22809 if (!REG_P (op_false) && !SUBREG_P (op_false))
22810 op_false = force_reg (dest_mode, op_false);
22812 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22813 CONST0_RTX (dest_mode));
22814 emit_insn (gen_rtx_SET (dest,
22815 gen_rtx_IF_THEN_ELSE (dest_mode,
22816 cond2,
22817 op_true,
22818 op_false)));
22819 return 1;
22822 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22823 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22824 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22825 hardware has no such operation. */
22827 static int
22828 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22830 enum rtx_code code = GET_CODE (op);
22831 rtx op0 = XEXP (op, 0);
22832 rtx op1 = XEXP (op, 1);
22833 machine_mode compare_mode = GET_MODE (op0);
22834 machine_mode result_mode = GET_MODE (dest);
22835 bool max_p = false;
22837 if (result_mode != compare_mode)
22838 return 0;
22840 if (code == GE || code == GT)
22841 max_p = true;
22842 else if (code == LE || code == LT)
22843 max_p = false;
22844 else
22845 return 0;
22847 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22850 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22851 max_p = !max_p;
22853 else
22854 return 0;
22856 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22857 return 1;
22860 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22861 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22862 operands of the last comparison is nonzero/true, FALSE_COND if it is
22863 zero/false. Return 0 if the hardware has no such operation. */
22865 static int
22866 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22868 enum rtx_code code = GET_CODE (op);
22869 rtx op0 = XEXP (op, 0);
22870 rtx op1 = XEXP (op, 1);
22871 machine_mode result_mode = GET_MODE (dest);
22872 rtx compare_rtx;
22873 rtx cmove_rtx;
22874 rtx clobber_rtx;
22876 if (!can_create_pseudo_p ())
22877 return 0;
22879 switch (code)
22881 case EQ:
22882 case GE:
22883 case GT:
22884 break;
22886 case NE:
22887 case LT:
22888 case LE:
22889 code = swap_condition (code);
22890 std::swap (op0, op1);
22891 break;
22893 default:
22894 return 0;
22897 /* Generate: [(parallel [(set (dest)
22898 (if_then_else (op (cmp1) (cmp2))
22899 (true)
22900 (false)))
22901 (clobber (scratch))])]. */
22903 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
22904 cmove_rtx = gen_rtx_SET (dest,
22905 gen_rtx_IF_THEN_ELSE (result_mode,
22906 compare_rtx,
22907 true_cond,
22908 false_cond));
22910 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
22911 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22912 gen_rtvec (2, cmove_rtx, clobber_rtx)));
22914 return 1;
22917 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
22918 operands of the last comparison is nonzero/true, FALSE_COND if it
22919 is zero/false. Return 0 if the hardware has no such operation. */
22922 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22924 enum rtx_code code = GET_CODE (op);
22925 rtx op0 = XEXP (op, 0);
22926 rtx op1 = XEXP (op, 1);
22927 machine_mode compare_mode = GET_MODE (op0);
22928 machine_mode result_mode = GET_MODE (dest);
22929 rtx temp;
22930 bool is_against_zero;
22932 /* These modes should always match. */
22933 if (GET_MODE (op1) != compare_mode
22934 /* In the isel case however, we can use a compare immediate, so
22935 op1 may be a small constant. */
22936 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
22937 return 0;
22938 if (GET_MODE (true_cond) != result_mode)
22939 return 0;
22940 if (GET_MODE (false_cond) != result_mode)
22941 return 0;
22943 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
22944 if (TARGET_P9_MINMAX
22945 && (compare_mode == SFmode || compare_mode == DFmode)
22946 && (result_mode == SFmode || result_mode == DFmode))
22948 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
22949 return 1;
22951 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
22952 return 1;
22955 /* Don't allow using floating point comparisons for integer results for
22956 now. */
22957 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
22958 return 0;
22960 /* First, work out if the hardware can do this at all, or
22961 if it's too slow.... */
22962 if (!FLOAT_MODE_P (compare_mode))
22964 if (TARGET_ISEL)
22965 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
22966 return 0;
22969 is_against_zero = op1 == CONST0_RTX (compare_mode);
22971 /* A floating-point subtract might overflow, underflow, or produce
22972 an inexact result, thus changing the floating-point flags, so it
22973 can't be generated if we care about that. It's safe if one side
22974 of the construct is zero, since then no subtract will be
22975 generated. */
22976 if (SCALAR_FLOAT_MODE_P (compare_mode)
22977 && flag_trapping_math && ! is_against_zero)
22978 return 0;
22980 /* Eliminate half of the comparisons by switching operands, this
22981 makes the remaining code simpler. */
22982 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
22983 || code == LTGT || code == LT || code == UNLE)
22985 code = reverse_condition_maybe_unordered (code);
22986 temp = true_cond;
22987 true_cond = false_cond;
22988 false_cond = temp;
22991 /* UNEQ and LTGT take four instructions for a comparison with zero,
22992 it'll probably be faster to use a branch here too. */
22993 if (code == UNEQ && HONOR_NANS (compare_mode))
22994 return 0;
22996 /* We're going to try to implement comparisons by performing
22997 a subtract, then comparing against zero. Unfortunately,
22998 Inf - Inf is NaN which is not zero, and so if we don't
22999 know that the operand is finite and the comparison
23000 would treat EQ different to UNORDERED, we can't do it. */
23001 if (HONOR_INFINITIES (compare_mode)
23002 && code != GT && code != UNGE
23003 && (GET_CODE (op1) != CONST_DOUBLE
23004 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
23005 /* Constructs of the form (a OP b ? a : b) are safe. */
23006 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
23007 || (! rtx_equal_p (op0, true_cond)
23008 && ! rtx_equal_p (op1, true_cond))))
23009 return 0;
23011 /* At this point we know we can use fsel. */
23013 /* Reduce the comparison to a comparison against zero. */
23014 if (! is_against_zero)
23016 temp = gen_reg_rtx (compare_mode);
23017 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
23018 op0 = temp;
23019 op1 = CONST0_RTX (compare_mode);
23022 /* If we don't care about NaNs we can reduce some of the comparisons
23023 down to faster ones. */
23024 if (! HONOR_NANS (compare_mode))
23025 switch (code)
23027 case GT:
23028 code = LE;
23029 temp = true_cond;
23030 true_cond = false_cond;
23031 false_cond = temp;
23032 break;
23033 case UNGE:
23034 code = GE;
23035 break;
23036 case UNEQ:
23037 code = EQ;
23038 break;
23039 default:
23040 break;
23043 /* Now, reduce everything down to a GE. */
23044 switch (code)
23046 case GE:
23047 break;
23049 case LE:
23050 temp = gen_reg_rtx (compare_mode);
23051 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23052 op0 = temp;
23053 break;
23055 case ORDERED:
23056 temp = gen_reg_rtx (compare_mode);
23057 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
23058 op0 = temp;
23059 break;
23061 case EQ:
23062 temp = gen_reg_rtx (compare_mode);
23063 emit_insn (gen_rtx_SET (temp,
23064 gen_rtx_NEG (compare_mode,
23065 gen_rtx_ABS (compare_mode, op0))));
23066 op0 = temp;
23067 break;
23069 case UNGE:
23070 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
23071 temp = gen_reg_rtx (result_mode);
23072 emit_insn (gen_rtx_SET (temp,
23073 gen_rtx_IF_THEN_ELSE (result_mode,
23074 gen_rtx_GE (VOIDmode,
23075 op0, op1),
23076 true_cond, false_cond)));
23077 false_cond = true_cond;
23078 true_cond = temp;
23080 temp = gen_reg_rtx (compare_mode);
23081 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23082 op0 = temp;
23083 break;
23085 case GT:
23086 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
23087 temp = gen_reg_rtx (result_mode);
23088 emit_insn (gen_rtx_SET (temp,
23089 gen_rtx_IF_THEN_ELSE (result_mode,
23090 gen_rtx_GE (VOIDmode,
23091 op0, op1),
23092 true_cond, false_cond)));
23093 true_cond = false_cond;
23094 false_cond = temp;
23096 temp = gen_reg_rtx (compare_mode);
23097 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23098 op0 = temp;
23099 break;
23101 default:
23102 gcc_unreachable ();
23105 emit_insn (gen_rtx_SET (dest,
23106 gen_rtx_IF_THEN_ELSE (result_mode,
23107 gen_rtx_GE (VOIDmode,
23108 op0, op1),
23109 true_cond, false_cond)));
23110 return 1;
23113 /* Same as above, but for ints (isel). */
23115 static int
23116 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23118 rtx condition_rtx, cr;
23119 machine_mode mode = GET_MODE (dest);
23120 enum rtx_code cond_code;
23121 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
23122 bool signedp;
23124 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
23125 return 0;
23127 /* We still have to do the compare, because isel doesn't do a
23128 compare, it just looks at the CRx bits set by a previous compare
23129 instruction. */
23130 condition_rtx = rs6000_generate_compare (op, mode);
23131 cond_code = GET_CODE (condition_rtx);
23132 cr = XEXP (condition_rtx, 0);
23133 signedp = GET_MODE (cr) == CCmode;
23135 isel_func = (mode == SImode
23136 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
23137 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
23139 switch (cond_code)
23141 case LT: case GT: case LTU: case GTU: case EQ:
23142 /* isel handles these directly. */
23143 break;
23145 default:
23146 /* We need to swap the sense of the comparison. */
23148 std::swap (false_cond, true_cond);
23149 PUT_CODE (condition_rtx, reverse_condition (cond_code));
23151 break;
23154 false_cond = force_reg (mode, false_cond);
23155 if (true_cond != const0_rtx)
23156 true_cond = force_reg (mode, true_cond);
23158 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
23160 return 1;
23163 const char *
23164 output_isel (rtx *operands)
23166 enum rtx_code code;
23168 code = GET_CODE (operands[1]);
23170 if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
23172 gcc_assert (GET_CODE (operands[2]) == REG
23173 && GET_CODE (operands[3]) == REG);
23174 PUT_CODE (operands[1], reverse_condition (code));
23175 return "isel %0,%3,%2,%j1";
23178 return "isel %0,%2,%3,%j1";
23181 void
23182 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
23184 machine_mode mode = GET_MODE (op0);
23185 enum rtx_code c;
23186 rtx target;
23188 /* VSX/altivec have direct min/max insns. */
23189 if ((code == SMAX || code == SMIN)
23190 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
23191 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
23193 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
23194 return;
23197 if (code == SMAX || code == SMIN)
23198 c = GE;
23199 else
23200 c = GEU;
23202 if (code == SMAX || code == UMAX)
23203 target = emit_conditional_move (dest, c, op0, op1, mode,
23204 op0, op1, mode, 0);
23205 else
23206 target = emit_conditional_move (dest, c, op0, op1, mode,
23207 op1, op0, mode, 0);
23208 gcc_assert (target);
23209 if (target != dest)
23210 emit_move_insn (dest, target);
23213 /* Split a signbit operation on 64-bit machines with direct move. Also allow
23214 for the value to come from memory or if it is already loaded into a GPR. */
23216 void
23217 rs6000_split_signbit (rtx dest, rtx src)
23219 machine_mode d_mode = GET_MODE (dest);
23220 machine_mode s_mode = GET_MODE (src);
23221 rtx dest_di = (d_mode == DImode) ? dest : gen_lowpart (DImode, dest);
23222 rtx shift_reg = dest_di;
23224 gcc_assert (FLOAT128_IEEE_P (s_mode) && TARGET_POWERPC64);
23226 if (MEM_P (src))
23228 rtx mem = (WORDS_BIG_ENDIAN
23229 ? adjust_address (src, DImode, 0)
23230 : adjust_address (src, DImode, 8));
23231 emit_insn (gen_rtx_SET (dest_di, mem));
23234 else
23236 unsigned int r = reg_or_subregno (src);
23238 if (INT_REGNO_P (r))
23239 shift_reg = gen_rtx_REG (DImode, r + (BYTES_BIG_ENDIAN == 0));
23241 else
23243 /* Generate the special mfvsrd instruction to get it in a GPR. */
23244 gcc_assert (VSX_REGNO_P (r));
23245 if (s_mode == KFmode)
23246 emit_insn (gen_signbitkf2_dm2 (dest_di, src));
23247 else
23248 emit_insn (gen_signbittf2_dm2 (dest_di, src));
23252 emit_insn (gen_lshrdi3 (dest_di, shift_reg, GEN_INT (63)));
23253 return;
23256 /* A subroutine of the atomic operation splitters. Jump to LABEL if
23257 COND is true. Mark the jump as unlikely to be taken. */
23259 static void
23260 emit_unlikely_jump (rtx cond, rtx label)
23262 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
23263 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
23264 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
23267 /* A subroutine of the atomic operation splitters. Emit a load-locked
23268 instruction in MODE. For QI/HImode, possibly use a pattern than includes
23269 the zero_extend operation. */
23271 static void
23272 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
23274 rtx (*fn) (rtx, rtx) = NULL;
23276 switch (mode)
23278 case E_QImode:
23279 fn = gen_load_lockedqi;
23280 break;
23281 case E_HImode:
23282 fn = gen_load_lockedhi;
23283 break;
23284 case E_SImode:
23285 if (GET_MODE (mem) == QImode)
23286 fn = gen_load_lockedqi_si;
23287 else if (GET_MODE (mem) == HImode)
23288 fn = gen_load_lockedhi_si;
23289 else
23290 fn = gen_load_lockedsi;
23291 break;
23292 case E_DImode:
23293 fn = gen_load_lockeddi;
23294 break;
23295 case E_TImode:
23296 fn = gen_load_lockedti;
23297 break;
23298 default:
23299 gcc_unreachable ();
23301 emit_insn (fn (reg, mem));
23304 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23305 instruction in MODE. */
23307 static void
23308 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
23310 rtx (*fn) (rtx, rtx, rtx) = NULL;
23312 switch (mode)
23314 case E_QImode:
23315 fn = gen_store_conditionalqi;
23316 break;
23317 case E_HImode:
23318 fn = gen_store_conditionalhi;
23319 break;
23320 case E_SImode:
23321 fn = gen_store_conditionalsi;
23322 break;
23323 case E_DImode:
23324 fn = gen_store_conditionaldi;
23325 break;
23326 case E_TImode:
23327 fn = gen_store_conditionalti;
23328 break;
23329 default:
23330 gcc_unreachable ();
23333 /* Emit sync before stwcx. to address PPC405 Erratum. */
23334 if (PPC405_ERRATUM77)
23335 emit_insn (gen_hwsync ());
23337 emit_insn (fn (res, mem, val));
23340 /* Expand barriers before and after a load_locked/store_cond sequence. */
23342 static rtx
23343 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
23345 rtx addr = XEXP (mem, 0);
23347 if (!legitimate_indirect_address_p (addr, reload_completed)
23348 && !legitimate_indexed_address_p (addr, reload_completed))
23350 addr = force_reg (Pmode, addr);
23351 mem = replace_equiv_address_nv (mem, addr);
23354 switch (model)
23356 case MEMMODEL_RELAXED:
23357 case MEMMODEL_CONSUME:
23358 case MEMMODEL_ACQUIRE:
23359 break;
23360 case MEMMODEL_RELEASE:
23361 case MEMMODEL_ACQ_REL:
23362 emit_insn (gen_lwsync ());
23363 break;
23364 case MEMMODEL_SEQ_CST:
23365 emit_insn (gen_hwsync ());
23366 break;
23367 default:
23368 gcc_unreachable ();
23370 return mem;
23373 static void
23374 rs6000_post_atomic_barrier (enum memmodel model)
23376 switch (model)
23378 case MEMMODEL_RELAXED:
23379 case MEMMODEL_CONSUME:
23380 case MEMMODEL_RELEASE:
23381 break;
23382 case MEMMODEL_ACQUIRE:
23383 case MEMMODEL_ACQ_REL:
23384 case MEMMODEL_SEQ_CST:
23385 emit_insn (gen_isync ());
23386 break;
23387 default:
23388 gcc_unreachable ();
23392 /* A subroutine of the various atomic expanders. For sub-word operations,
23393 we must adjust things to operate on SImode. Given the original MEM,
23394 return a new aligned memory. Also build and return the quantities by
23395 which to shift and mask. */
23397 static rtx
23398 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
23400 rtx addr, align, shift, mask, mem;
23401 HOST_WIDE_INT shift_mask;
23402 machine_mode mode = GET_MODE (orig_mem);
23404 /* For smaller modes, we have to implement this via SImode. */
23405 shift_mask = (mode == QImode ? 0x18 : 0x10);
23407 addr = XEXP (orig_mem, 0);
23408 addr = force_reg (GET_MODE (addr), addr);
23410 /* Aligned memory containing subword. Generate a new memory. We
23411 do not want any of the existing MEM_ATTR data, as we're now
23412 accessing memory outside the original object. */
23413 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
23414 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23415 mem = gen_rtx_MEM (SImode, align);
23416 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
23417 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
23418 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
23420 /* Shift amount for subword relative to aligned word. */
23421 shift = gen_reg_rtx (SImode);
23422 addr = gen_lowpart (SImode, addr);
23423 rtx tmp = gen_reg_rtx (SImode);
23424 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
23425 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
23426 if (BYTES_BIG_ENDIAN)
23427 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
23428 shift, 1, OPTAB_LIB_WIDEN);
23429 *pshift = shift;
23431 /* Mask for insertion. */
23432 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
23433 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
23434 *pmask = mask;
23436 return mem;
23439 /* A subroutine of the various atomic expanders. For sub-word operands,
23440 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23442 static rtx
23443 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
23445 rtx x;
23447 x = gen_reg_rtx (SImode);
23448 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
23449 gen_rtx_NOT (SImode, mask),
23450 oldval)));
23452 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
23454 return x;
23457 /* A subroutine of the various atomic expanders. For sub-word operands,
23458 extract WIDE to NARROW via SHIFT. */
23460 static void
23461 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23463 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23464 wide, 1, OPTAB_LIB_WIDEN);
23465 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23468 /* Expand an atomic compare and swap operation. */
23470 void
23471 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23473 rtx boolval, retval, mem, oldval, newval, cond;
23474 rtx label1, label2, x, mask, shift;
23475 machine_mode mode, orig_mode;
23476 enum memmodel mod_s, mod_f;
23477 bool is_weak;
23479 boolval = operands[0];
23480 retval = operands[1];
23481 mem = operands[2];
23482 oldval = operands[3];
23483 newval = operands[4];
23484 is_weak = (INTVAL (operands[5]) != 0);
23485 mod_s = memmodel_base (INTVAL (operands[6]));
23486 mod_f = memmodel_base (INTVAL (operands[7]));
23487 orig_mode = mode = GET_MODE (mem);
23489 mask = shift = NULL_RTX;
23490 if (mode == QImode || mode == HImode)
23492 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23493 lwarx and shift/mask operations. With power8, we need to do the
23494 comparison in SImode, but the store is still done in QI/HImode. */
23495 oldval = convert_modes (SImode, mode, oldval, 1);
23497 if (!TARGET_SYNC_HI_QI)
23499 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23501 /* Shift and mask OLDVAL into position with the word. */
23502 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23503 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23505 /* Shift and mask NEWVAL into position within the word. */
23506 newval = convert_modes (SImode, mode, newval, 1);
23507 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23508 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23511 /* Prepare to adjust the return value. */
23512 retval = gen_reg_rtx (SImode);
23513 mode = SImode;
23515 else if (reg_overlap_mentioned_p (retval, oldval))
23516 oldval = copy_to_reg (oldval);
23518 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23519 oldval = copy_to_mode_reg (mode, oldval);
23521 if (reg_overlap_mentioned_p (retval, newval))
23522 newval = copy_to_reg (newval);
23524 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23526 label1 = NULL_RTX;
23527 if (!is_weak)
23529 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23530 emit_label (XEXP (label1, 0));
23532 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23534 emit_load_locked (mode, retval, mem);
23536 x = retval;
23537 if (mask)
23538 x = expand_simple_binop (SImode, AND, retval, mask,
23539 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23541 cond = gen_reg_rtx (CCmode);
23542 /* If we have TImode, synthesize a comparison. */
23543 if (mode != TImode)
23544 x = gen_rtx_COMPARE (CCmode, x, oldval);
23545 else
23547 rtx xor1_result = gen_reg_rtx (DImode);
23548 rtx xor2_result = gen_reg_rtx (DImode);
23549 rtx or_result = gen_reg_rtx (DImode);
23550 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23551 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23552 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23553 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23555 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23556 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23557 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23558 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23561 emit_insn (gen_rtx_SET (cond, x));
23563 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23564 emit_unlikely_jump (x, label2);
23566 x = newval;
23567 if (mask)
23568 x = rs6000_mask_atomic_subword (retval, newval, mask);
23570 emit_store_conditional (orig_mode, cond, mem, x);
23572 if (!is_weak)
23574 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23575 emit_unlikely_jump (x, label1);
23578 if (!is_mm_relaxed (mod_f))
23579 emit_label (XEXP (label2, 0));
23581 rs6000_post_atomic_barrier (mod_s);
23583 if (is_mm_relaxed (mod_f))
23584 emit_label (XEXP (label2, 0));
23586 if (shift)
23587 rs6000_finish_atomic_subword (operands[1], retval, shift);
23588 else if (mode != GET_MODE (operands[1]))
23589 convert_move (operands[1], retval, 1);
23591 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23592 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23593 emit_insn (gen_rtx_SET (boolval, x));
23596 /* Expand an atomic exchange operation. */
23598 void
23599 rs6000_expand_atomic_exchange (rtx operands[])
23601 rtx retval, mem, val, cond;
23602 machine_mode mode;
23603 enum memmodel model;
23604 rtx label, x, mask, shift;
23606 retval = operands[0];
23607 mem = operands[1];
23608 val = operands[2];
23609 model = memmodel_base (INTVAL (operands[3]));
23610 mode = GET_MODE (mem);
23612 mask = shift = NULL_RTX;
23613 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23615 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23617 /* Shift and mask VAL into position with the word. */
23618 val = convert_modes (SImode, mode, val, 1);
23619 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23620 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23622 /* Prepare to adjust the return value. */
23623 retval = gen_reg_rtx (SImode);
23624 mode = SImode;
23627 mem = rs6000_pre_atomic_barrier (mem, model);
23629 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23630 emit_label (XEXP (label, 0));
23632 emit_load_locked (mode, retval, mem);
23634 x = val;
23635 if (mask)
23636 x = rs6000_mask_atomic_subword (retval, val, mask);
23638 cond = gen_reg_rtx (CCmode);
23639 emit_store_conditional (mode, cond, mem, x);
23641 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23642 emit_unlikely_jump (x, label);
23644 rs6000_post_atomic_barrier (model);
23646 if (shift)
23647 rs6000_finish_atomic_subword (operands[0], retval, shift);
23650 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23651 to perform. MEM is the memory on which to operate. VAL is the second
23652 operand of the binary operator. BEFORE and AFTER are optional locations to
23653 return the value of MEM either before of after the operation. MODEL_RTX
23654 is a CONST_INT containing the memory model to use. */
23656 void
23657 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23658 rtx orig_before, rtx orig_after, rtx model_rtx)
23660 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23661 machine_mode mode = GET_MODE (mem);
23662 machine_mode store_mode = mode;
23663 rtx label, x, cond, mask, shift;
23664 rtx before = orig_before, after = orig_after;
23666 mask = shift = NULL_RTX;
23667 /* On power8, we want to use SImode for the operation. On previous systems,
23668 use the operation in a subword and shift/mask to get the proper byte or
23669 halfword. */
23670 if (mode == QImode || mode == HImode)
23672 if (TARGET_SYNC_HI_QI)
23674 val = convert_modes (SImode, mode, val, 1);
23676 /* Prepare to adjust the return value. */
23677 before = gen_reg_rtx (SImode);
23678 if (after)
23679 after = gen_reg_rtx (SImode);
23680 mode = SImode;
23682 else
23684 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23686 /* Shift and mask VAL into position with the word. */
23687 val = convert_modes (SImode, mode, val, 1);
23688 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23689 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23691 switch (code)
23693 case IOR:
23694 case XOR:
23695 /* We've already zero-extended VAL. That is sufficient to
23696 make certain that it does not affect other bits. */
23697 mask = NULL;
23698 break;
23700 case AND:
23701 /* If we make certain that all of the other bits in VAL are
23702 set, that will be sufficient to not affect other bits. */
23703 x = gen_rtx_NOT (SImode, mask);
23704 x = gen_rtx_IOR (SImode, x, val);
23705 emit_insn (gen_rtx_SET (val, x));
23706 mask = NULL;
23707 break;
23709 case NOT:
23710 case PLUS:
23711 case MINUS:
23712 /* These will all affect bits outside the field and need
23713 adjustment via MASK within the loop. */
23714 break;
23716 default:
23717 gcc_unreachable ();
23720 /* Prepare to adjust the return value. */
23721 before = gen_reg_rtx (SImode);
23722 if (after)
23723 after = gen_reg_rtx (SImode);
23724 store_mode = mode = SImode;
23728 mem = rs6000_pre_atomic_barrier (mem, model);
23730 label = gen_label_rtx ();
23731 emit_label (label);
23732 label = gen_rtx_LABEL_REF (VOIDmode, label);
23734 if (before == NULL_RTX)
23735 before = gen_reg_rtx (mode);
23737 emit_load_locked (mode, before, mem);
23739 if (code == NOT)
23741 x = expand_simple_binop (mode, AND, before, val,
23742 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23743 after = expand_simple_unop (mode, NOT, x, after, 1);
23745 else
23747 after = expand_simple_binop (mode, code, before, val,
23748 after, 1, OPTAB_LIB_WIDEN);
23751 x = after;
23752 if (mask)
23754 x = expand_simple_binop (SImode, AND, after, mask,
23755 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23756 x = rs6000_mask_atomic_subword (before, x, mask);
23758 else if (store_mode != mode)
23759 x = convert_modes (store_mode, mode, x, 1);
23761 cond = gen_reg_rtx (CCmode);
23762 emit_store_conditional (store_mode, cond, mem, x);
23764 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23765 emit_unlikely_jump (x, label);
23767 rs6000_post_atomic_barrier (model);
23769 if (shift)
23771 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23772 then do the calcuations in a SImode register. */
23773 if (orig_before)
23774 rs6000_finish_atomic_subword (orig_before, before, shift);
23775 if (orig_after)
23776 rs6000_finish_atomic_subword (orig_after, after, shift);
23778 else if (store_mode != mode)
23780 /* QImode/HImode on machines with lbarx/lharx where we do the native
23781 operation and then do the calcuations in a SImode register. */
23782 if (orig_before)
23783 convert_move (orig_before, before, 1);
23784 if (orig_after)
23785 convert_move (orig_after, after, 1);
23787 else if (orig_after && after != orig_after)
23788 emit_move_insn (orig_after, after);
23791 /* Emit instructions to move SRC to DST. Called by splitters for
23792 multi-register moves. It will emit at most one instruction for
23793 each register that is accessed; that is, it won't emit li/lis pairs
23794 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23795 register. */
23797 void
23798 rs6000_split_multireg_move (rtx dst, rtx src)
23800 /* The register number of the first register being moved. */
23801 int reg;
23802 /* The mode that is to be moved. */
23803 machine_mode mode;
23804 /* The mode that the move is being done in, and its size. */
23805 machine_mode reg_mode;
23806 int reg_mode_size;
23807 /* The number of registers that will be moved. */
23808 int nregs;
23810 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23811 mode = GET_MODE (dst);
23812 nregs = hard_regno_nregs[reg][mode];
23813 if (FP_REGNO_P (reg))
23814 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23815 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
23816 else if (ALTIVEC_REGNO_P (reg))
23817 reg_mode = V16QImode;
23818 else
23819 reg_mode = word_mode;
23820 reg_mode_size = GET_MODE_SIZE (reg_mode);
23822 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23824 /* TDmode residing in FP registers is special, since the ISA requires that
23825 the lower-numbered word of a register pair is always the most significant
23826 word, even in little-endian mode. This does not match the usual subreg
23827 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23828 the appropriate constituent registers "by hand" in little-endian mode.
23830 Note we do not need to check for destructive overlap here since TDmode
23831 can only reside in even/odd register pairs. */
23832 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23834 rtx p_src, p_dst;
23835 int i;
23837 for (i = 0; i < nregs; i++)
23839 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23840 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23841 else
23842 p_src = simplify_gen_subreg (reg_mode, src, mode,
23843 i * reg_mode_size);
23845 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23846 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23847 else
23848 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23849 i * reg_mode_size);
23851 emit_insn (gen_rtx_SET (p_dst, p_src));
23854 return;
23857 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23859 /* Move register range backwards, if we might have destructive
23860 overlap. */
23861 int i;
23862 for (i = nregs - 1; i >= 0; i--)
23863 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23864 i * reg_mode_size),
23865 simplify_gen_subreg (reg_mode, src, mode,
23866 i * reg_mode_size)));
23868 else
23870 int i;
23871 int j = -1;
23872 bool used_update = false;
23873 rtx restore_basereg = NULL_RTX;
23875 if (MEM_P (src) && INT_REGNO_P (reg))
23877 rtx breg;
23879 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23880 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23882 rtx delta_rtx;
23883 breg = XEXP (XEXP (src, 0), 0);
23884 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23885 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23886 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23887 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23888 src = replace_equiv_address (src, breg);
23890 else if (! rs6000_offsettable_memref_p (src, reg_mode))
23892 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
23894 rtx basereg = XEXP (XEXP (src, 0), 0);
23895 if (TARGET_UPDATE)
23897 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
23898 emit_insn (gen_rtx_SET (ndst,
23899 gen_rtx_MEM (reg_mode,
23900 XEXP (src, 0))));
23901 used_update = true;
23903 else
23904 emit_insn (gen_rtx_SET (basereg,
23905 XEXP (XEXP (src, 0), 1)));
23906 src = replace_equiv_address (src, basereg);
23908 else
23910 rtx basereg = gen_rtx_REG (Pmode, reg);
23911 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
23912 src = replace_equiv_address (src, basereg);
23916 breg = XEXP (src, 0);
23917 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
23918 breg = XEXP (breg, 0);
23920 /* If the base register we are using to address memory is
23921 also a destination reg, then change that register last. */
23922 if (REG_P (breg)
23923 && REGNO (breg) >= REGNO (dst)
23924 && REGNO (breg) < REGNO (dst) + nregs)
23925 j = REGNO (breg) - REGNO (dst);
23927 else if (MEM_P (dst) && INT_REGNO_P (reg))
23929 rtx breg;
23931 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
23932 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
23934 rtx delta_rtx;
23935 breg = XEXP (XEXP (dst, 0), 0);
23936 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
23937 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
23938 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
23940 /* We have to update the breg before doing the store.
23941 Use store with update, if available. */
23943 if (TARGET_UPDATE)
23945 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23946 emit_insn (TARGET_32BIT
23947 ? (TARGET_POWERPC64
23948 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
23949 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
23950 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
23951 used_update = true;
23953 else
23954 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23955 dst = replace_equiv_address (dst, breg);
23957 else if (!rs6000_offsettable_memref_p (dst, reg_mode)
23958 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
23960 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
23962 rtx basereg = XEXP (XEXP (dst, 0), 0);
23963 if (TARGET_UPDATE)
23965 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23966 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
23967 XEXP (dst, 0)),
23968 nsrc));
23969 used_update = true;
23971 else
23972 emit_insn (gen_rtx_SET (basereg,
23973 XEXP (XEXP (dst, 0), 1)));
23974 dst = replace_equiv_address (dst, basereg);
23976 else
23978 rtx basereg = XEXP (XEXP (dst, 0), 0);
23979 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
23980 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
23981 && REG_P (basereg)
23982 && REG_P (offsetreg)
23983 && REGNO (basereg) != REGNO (offsetreg));
23984 if (REGNO (basereg) == 0)
23986 rtx tmp = offsetreg;
23987 offsetreg = basereg;
23988 basereg = tmp;
23990 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
23991 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
23992 dst = replace_equiv_address (dst, basereg);
23995 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
23996 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode));
23999 for (i = 0; i < nregs; i++)
24001 /* Calculate index to next subword. */
24002 ++j;
24003 if (j == nregs)
24004 j = 0;
24006 /* If compiler already emitted move of first word by
24007 store with update, no need to do anything. */
24008 if (j == 0 && used_update)
24009 continue;
24011 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
24012 j * reg_mode_size),
24013 simplify_gen_subreg (reg_mode, src, mode,
24014 j * reg_mode_size)));
24016 if (restore_basereg != NULL_RTX)
24017 emit_insn (restore_basereg);
24022 /* This page contains routines that are used to determine what the
24023 function prologue and epilogue code will do and write them out. */
24025 /* Determine whether the REG is really used. */
24027 static bool
24028 save_reg_p (int reg)
24030 /* We need to mark the PIC offset register live for the same conditions
24031 as it is set up, or otherwise it won't be saved before we clobber it. */
24033 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
24035 /* When calling eh_return, we must return true for all the cases
24036 where conditional_register_usage marks the PIC offset reg
24037 call used. */
24038 if (TARGET_TOC && TARGET_MINIMAL_TOC
24039 && (crtl->calls_eh_return
24040 || df_regs_ever_live_p (reg)
24041 || !constant_pool_empty_p ()))
24042 return true;
24044 if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
24045 && flag_pic)
24046 return true;
24049 return !call_used_regs[reg] && df_regs_ever_live_p (reg);
24052 /* Return the first fixed-point register that is required to be
24053 saved. 32 if none. */
24056 first_reg_to_save (void)
24058 int first_reg;
24060 /* Find lowest numbered live register. */
24061 for (first_reg = 13; first_reg <= 31; first_reg++)
24062 if (save_reg_p (first_reg))
24063 break;
24065 #if TARGET_MACHO
24066 if (flag_pic
24067 && crtl->uses_pic_offset_table
24068 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
24069 return RS6000_PIC_OFFSET_TABLE_REGNUM;
24070 #endif
24072 return first_reg;
24075 /* Similar, for FP regs. */
24078 first_fp_reg_to_save (void)
24080 int first_reg;
24082 /* Find lowest numbered live register. */
24083 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
24084 if (save_reg_p (first_reg))
24085 break;
24087 return first_reg;
24090 /* Similar, for AltiVec regs. */
24092 static int
24093 first_altivec_reg_to_save (void)
24095 int i;
24097 /* Stack frame remains as is unless we are in AltiVec ABI. */
24098 if (! TARGET_ALTIVEC_ABI)
24099 return LAST_ALTIVEC_REGNO + 1;
24101 /* On Darwin, the unwind routines are compiled without
24102 TARGET_ALTIVEC, and use save_world to save/restore the
24103 altivec registers when necessary. */
24104 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24105 && ! TARGET_ALTIVEC)
24106 return FIRST_ALTIVEC_REGNO + 20;
24108 /* Find lowest numbered live register. */
24109 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
24110 if (save_reg_p (i))
24111 break;
24113 return i;
24116 /* Return a 32-bit mask of the AltiVec registers we need to set in
24117 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
24118 the 32-bit word is 0. */
24120 static unsigned int
24121 compute_vrsave_mask (void)
24123 unsigned int i, mask = 0;
24125 /* On Darwin, the unwind routines are compiled without
24126 TARGET_ALTIVEC, and use save_world to save/restore the
24127 call-saved altivec registers when necessary. */
24128 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24129 && ! TARGET_ALTIVEC)
24130 mask |= 0xFFF;
24132 /* First, find out if we use _any_ altivec registers. */
24133 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
24134 if (df_regs_ever_live_p (i))
24135 mask |= ALTIVEC_REG_BIT (i);
24137 if (mask == 0)
24138 return mask;
24140 /* Next, remove the argument registers from the set. These must
24141 be in the VRSAVE mask set by the caller, so we don't need to add
24142 them in again. More importantly, the mask we compute here is
24143 used to generate CLOBBERs in the set_vrsave insn, and we do not
24144 wish the argument registers to die. */
24145 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
24146 mask &= ~ALTIVEC_REG_BIT (i);
24148 /* Similarly, remove the return value from the set. */
24150 bool yes = false;
24151 diddle_return_value (is_altivec_return_reg, &yes);
24152 if (yes)
24153 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
24156 return mask;
24159 /* For a very restricted set of circumstances, we can cut down the
24160 size of prologues/epilogues by calling our own save/restore-the-world
24161 routines. */
24163 static void
24164 compute_save_world_info (rs6000_stack_t *info)
24166 info->world_save_p = 1;
24167 info->world_save_p
24168 = (WORLD_SAVE_P (info)
24169 && DEFAULT_ABI == ABI_DARWIN
24170 && !cfun->has_nonlocal_label
24171 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
24172 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
24173 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
24174 && info->cr_save_p);
24176 /* This will not work in conjunction with sibcalls. Make sure there
24177 are none. (This check is expensive, but seldom executed.) */
24178 if (WORLD_SAVE_P (info))
24180 rtx_insn *insn;
24181 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
24182 if (CALL_P (insn) && SIBLING_CALL_P (insn))
24184 info->world_save_p = 0;
24185 break;
24189 if (WORLD_SAVE_P (info))
24191 /* Even if we're not touching VRsave, make sure there's room on the
24192 stack for it, if it looks like we're calling SAVE_WORLD, which
24193 will attempt to save it. */
24194 info->vrsave_size = 4;
24196 /* If we are going to save the world, we need to save the link register too. */
24197 info->lr_save_p = 1;
24199 /* "Save" the VRsave register too if we're saving the world. */
24200 if (info->vrsave_mask == 0)
24201 info->vrsave_mask = compute_vrsave_mask ();
24203 /* Because the Darwin register save/restore routines only handle
24204 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
24205 check. */
24206 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
24207 && (info->first_altivec_reg_save
24208 >= FIRST_SAVED_ALTIVEC_REGNO));
24211 return;
24215 static void
24216 is_altivec_return_reg (rtx reg, void *xyes)
24218 bool *yes = (bool *) xyes;
24219 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
24220 *yes = true;
24224 /* Return whether REG is a global user reg or has been specifed by
24225 -ffixed-REG. We should not restore these, and so cannot use
24226 lmw or out-of-line restore functions if there are any. We also
24227 can't save them (well, emit frame notes for them), because frame
24228 unwinding during exception handling will restore saved registers. */
24230 static bool
24231 fixed_reg_p (int reg)
24233 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
24234 backend sets it, overriding anything the user might have given. */
24235 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
24236 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
24237 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
24238 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
24239 return false;
24241 return fixed_regs[reg];
24244 /* Determine the strategy for savings/restoring registers. */
24246 enum {
24247 SAVE_MULTIPLE = 0x1,
24248 SAVE_INLINE_GPRS = 0x2,
24249 SAVE_INLINE_FPRS = 0x4,
24250 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
24251 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
24252 SAVE_INLINE_VRS = 0x20,
24253 REST_MULTIPLE = 0x100,
24254 REST_INLINE_GPRS = 0x200,
24255 REST_INLINE_FPRS = 0x400,
24256 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
24257 REST_INLINE_VRS = 0x1000
24260 static int
24261 rs6000_savres_strategy (rs6000_stack_t *info,
24262 bool using_static_chain_p)
24264 int strategy = 0;
24266 /* Select between in-line and out-of-line save and restore of regs.
24267 First, all the obvious cases where we don't use out-of-line. */
24268 if (crtl->calls_eh_return
24269 || cfun->machine->ra_need_lr)
24270 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
24271 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
24272 | SAVE_INLINE_VRS | REST_INLINE_VRS);
24274 if (info->first_gp_reg_save == 32)
24275 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24277 if (info->first_fp_reg_save == 64
24278 /* The out-of-line FP routines use double-precision stores;
24279 we can't use those routines if we don't have such stores. */
24280 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT))
24281 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24283 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
24284 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24286 /* Define cutoff for using out-of-line functions to save registers. */
24287 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
24289 if (!optimize_size)
24291 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24292 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24293 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24295 else
24297 /* Prefer out-of-line restore if it will exit. */
24298 if (info->first_fp_reg_save > 61)
24299 strategy |= SAVE_INLINE_FPRS;
24300 if (info->first_gp_reg_save > 29)
24302 if (info->first_fp_reg_save == 64)
24303 strategy |= SAVE_INLINE_GPRS;
24304 else
24305 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24307 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
24308 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24311 else if (DEFAULT_ABI == ABI_DARWIN)
24313 if (info->first_fp_reg_save > 60)
24314 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24315 if (info->first_gp_reg_save > 29)
24316 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24317 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24319 else
24321 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24322 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
24323 || info->first_fp_reg_save > 61)
24324 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24325 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24326 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24329 /* Don't bother to try to save things out-of-line if r11 is occupied
24330 by the static chain. It would require too much fiddling and the
24331 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24332 pointer on Darwin, and AIX uses r1 or r12. */
24333 if (using_static_chain_p
24334 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
24335 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
24336 | SAVE_INLINE_GPRS
24337 | SAVE_INLINE_VRS);
24339 /* Don't ever restore fixed regs. That means we can't use the
24340 out-of-line register restore functions if a fixed reg is in the
24341 range of regs restored. */
24342 if (!(strategy & REST_INLINE_FPRS))
24343 for (int i = info->first_fp_reg_save; i < 64; i++)
24344 if (fixed_regs[i])
24346 strategy |= REST_INLINE_FPRS;
24347 break;
24350 /* We can only use the out-of-line routines to restore fprs if we've
24351 saved all the registers from first_fp_reg_save in the prologue.
24352 Otherwise, we risk loading garbage. Of course, if we have saved
24353 out-of-line then we know we haven't skipped any fprs. */
24354 if ((strategy & SAVE_INLINE_FPRS)
24355 && !(strategy & REST_INLINE_FPRS))
24356 for (int i = info->first_fp_reg_save; i < 64; i++)
24357 if (!save_reg_p (i))
24359 strategy |= REST_INLINE_FPRS;
24360 break;
24363 /* Similarly, for altivec regs. */
24364 if (!(strategy & REST_INLINE_VRS))
24365 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24366 if (fixed_regs[i])
24368 strategy |= REST_INLINE_VRS;
24369 break;
24372 if ((strategy & SAVE_INLINE_VRS)
24373 && !(strategy & REST_INLINE_VRS))
24374 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24375 if (!save_reg_p (i))
24377 strategy |= REST_INLINE_VRS;
24378 break;
24381 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24382 saved is an out-of-line save or restore. Set up the value for
24383 the next test (excluding out-of-line gprs). */
24384 bool lr_save_p = (info->lr_save_p
24385 || !(strategy & SAVE_INLINE_FPRS)
24386 || !(strategy & SAVE_INLINE_VRS)
24387 || !(strategy & REST_INLINE_FPRS)
24388 || !(strategy & REST_INLINE_VRS));
24390 if (TARGET_MULTIPLE
24391 && !TARGET_POWERPC64
24392 && info->first_gp_reg_save < 31
24393 && !(flag_shrink_wrap
24394 && flag_shrink_wrap_separate
24395 && optimize_function_for_speed_p (cfun)))
24397 int count = 0;
24398 for (int i = info->first_gp_reg_save; i < 32; i++)
24399 if (save_reg_p (i))
24400 count++;
24402 if (count <= 1)
24403 /* Don't use store multiple if only one reg needs to be
24404 saved. This can occur for example when the ABI_V4 pic reg
24405 (r30) needs to be saved to make calls, but r31 is not
24406 used. */
24407 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24408 else
24410 /* Prefer store multiple for saves over out-of-line
24411 routines, since the store-multiple instruction will
24412 always be smaller. */
24413 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
24415 /* The situation is more complicated with load multiple.
24416 We'd prefer to use the out-of-line routines for restores,
24417 since the "exit" out-of-line routines can handle the
24418 restore of LR and the frame teardown. However if doesn't
24419 make sense to use the out-of-line routine if that is the
24420 only reason we'd need to save LR, and we can't use the
24421 "exit" out-of-line gpr restore if we have saved some
24422 fprs; In those cases it is advantageous to use load
24423 multiple when available. */
24424 if (info->first_fp_reg_save != 64 || !lr_save_p)
24425 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
24429 /* Using the "exit" out-of-line routine does not improve code size
24430 if using it would require lr to be saved and if only saving one
24431 or two gprs. */
24432 else if (!lr_save_p && info->first_gp_reg_save > 29)
24433 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24435 /* Don't ever restore fixed regs. */
24436 if ((strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24437 for (int i = info->first_gp_reg_save; i < 32; i++)
24438 if (fixed_reg_p (i))
24440 strategy |= REST_INLINE_GPRS;
24441 strategy &= ~REST_MULTIPLE;
24442 break;
24445 /* We can only use load multiple or the out-of-line routines to
24446 restore gprs if we've saved all the registers from
24447 first_gp_reg_save. Otherwise, we risk loading garbage.
24448 Of course, if we have saved out-of-line or used stmw then we know
24449 we haven't skipped any gprs. */
24450 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
24451 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24452 for (int i = info->first_gp_reg_save; i < 32; i++)
24453 if (!save_reg_p (i))
24455 strategy |= REST_INLINE_GPRS;
24456 strategy &= ~REST_MULTIPLE;
24457 break;
24460 if (TARGET_ELF && TARGET_64BIT)
24462 if (!(strategy & SAVE_INLINE_FPRS))
24463 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24464 else if (!(strategy & SAVE_INLINE_GPRS)
24465 && info->first_fp_reg_save == 64)
24466 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
24468 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
24469 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
24471 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
24472 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24474 return strategy;
24477 /* Calculate the stack information for the current function. This is
24478 complicated by having two separate calling sequences, the AIX calling
24479 sequence and the V.4 calling sequence.
24481 AIX (and Darwin/Mac OS X) stack frames look like:
24482 32-bit 64-bit
24483 SP----> +---------------------------------------+
24484 | back chain to caller | 0 0
24485 +---------------------------------------+
24486 | saved CR | 4 8 (8-11)
24487 +---------------------------------------+
24488 | saved LR | 8 16
24489 +---------------------------------------+
24490 | reserved for compilers | 12 24
24491 +---------------------------------------+
24492 | reserved for binders | 16 32
24493 +---------------------------------------+
24494 | saved TOC pointer | 20 40
24495 +---------------------------------------+
24496 | Parameter save area (+padding*) (P) | 24 48
24497 +---------------------------------------+
24498 | Alloca space (A) | 24+P etc.
24499 +---------------------------------------+
24500 | Local variable space (L) | 24+P+A
24501 +---------------------------------------+
24502 | Float/int conversion temporary (X) | 24+P+A+L
24503 +---------------------------------------+
24504 | Save area for AltiVec registers (W) | 24+P+A+L+X
24505 +---------------------------------------+
24506 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24507 +---------------------------------------+
24508 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24509 +---------------------------------------+
24510 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24511 +---------------------------------------+
24512 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24513 +---------------------------------------+
24514 old SP->| back chain to caller's caller |
24515 +---------------------------------------+
24517 * If the alloca area is present, the parameter save area is
24518 padded so that the former starts 16-byte aligned.
24520 The required alignment for AIX configurations is two words (i.e., 8
24521 or 16 bytes).
24523 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24525 SP----> +---------------------------------------+
24526 | Back chain to caller | 0
24527 +---------------------------------------+
24528 | Save area for CR | 8
24529 +---------------------------------------+
24530 | Saved LR | 16
24531 +---------------------------------------+
24532 | Saved TOC pointer | 24
24533 +---------------------------------------+
24534 | Parameter save area (+padding*) (P) | 32
24535 +---------------------------------------+
24536 | Alloca space (A) | 32+P
24537 +---------------------------------------+
24538 | Local variable space (L) | 32+P+A
24539 +---------------------------------------+
24540 | Save area for AltiVec registers (W) | 32+P+A+L
24541 +---------------------------------------+
24542 | AltiVec alignment padding (Y) | 32+P+A+L+W
24543 +---------------------------------------+
24544 | Save area for GP registers (G) | 32+P+A+L+W+Y
24545 +---------------------------------------+
24546 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24547 +---------------------------------------+
24548 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24549 +---------------------------------------+
24551 * If the alloca area is present, the parameter save area is
24552 padded so that the former starts 16-byte aligned.
24554 V.4 stack frames look like:
24556 SP----> +---------------------------------------+
24557 | back chain to caller | 0
24558 +---------------------------------------+
24559 | caller's saved LR | 4
24560 +---------------------------------------+
24561 | Parameter save area (+padding*) (P) | 8
24562 +---------------------------------------+
24563 | Alloca space (A) | 8+P
24564 +---------------------------------------+
24565 | Varargs save area (V) | 8+P+A
24566 +---------------------------------------+
24567 | Local variable space (L) | 8+P+A+V
24568 +---------------------------------------+
24569 | Float/int conversion temporary (X) | 8+P+A+V+L
24570 +---------------------------------------+
24571 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24572 +---------------------------------------+
24573 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24574 +---------------------------------------+
24575 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24576 +---------------------------------------+
24577 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24578 +---------------------------------------+
24579 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24580 +---------------------------------------+
24581 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24582 +---------------------------------------+
24583 old SP->| back chain to caller's caller |
24584 +---------------------------------------+
24586 * If the alloca area is present and the required alignment is
24587 16 bytes, the parameter save area is padded so that the
24588 alloca area starts 16-byte aligned.
24590 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24591 given. (But note below and in sysv4.h that we require only 8 and
24592 may round up the size of our stack frame anyways. The historical
24593 reason is early versions of powerpc-linux which didn't properly
24594 align the stack at program startup. A happy side-effect is that
24595 -mno-eabi libraries can be used with -meabi programs.)
24597 The EABI configuration defaults to the V.4 layout. However,
24598 the stack alignment requirements may differ. If -mno-eabi is not
24599 given, the required stack alignment is 8 bytes; if -mno-eabi is
24600 given, the required alignment is 16 bytes. (But see V.4 comment
24601 above.) */
24603 #ifndef ABI_STACK_BOUNDARY
24604 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24605 #endif
24607 static rs6000_stack_t *
24608 rs6000_stack_info (void)
24610 /* We should never be called for thunks, we are not set up for that. */
24611 gcc_assert (!cfun->is_thunk);
24613 rs6000_stack_t *info = &stack_info;
24614 int reg_size = TARGET_32BIT ? 4 : 8;
24615 int ehrd_size;
24616 int ehcr_size;
24617 int save_align;
24618 int first_gp;
24619 HOST_WIDE_INT non_fixed_size;
24620 bool using_static_chain_p;
24622 if (reload_completed && info->reload_completed)
24623 return info;
24625 memset (info, 0, sizeof (*info));
24626 info->reload_completed = reload_completed;
24628 /* Select which calling sequence. */
24629 info->abi = DEFAULT_ABI;
24631 /* Calculate which registers need to be saved & save area size. */
24632 info->first_gp_reg_save = first_reg_to_save ();
24633 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24634 even if it currently looks like we won't. Reload may need it to
24635 get at a constant; if so, it will have already created a constant
24636 pool entry for it. */
24637 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
24638 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
24639 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
24640 && crtl->uses_const_pool
24641 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
24642 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
24643 else
24644 first_gp = info->first_gp_reg_save;
24646 info->gp_size = reg_size * (32 - first_gp);
24648 info->first_fp_reg_save = first_fp_reg_to_save ();
24649 info->fp_size = 8 * (64 - info->first_fp_reg_save);
24651 info->first_altivec_reg_save = first_altivec_reg_to_save ();
24652 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
24653 - info->first_altivec_reg_save);
24655 /* Does this function call anything? */
24656 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
24658 /* Determine if we need to save the condition code registers. */
24659 if (save_reg_p (CR2_REGNO)
24660 || save_reg_p (CR3_REGNO)
24661 || save_reg_p (CR4_REGNO))
24663 info->cr_save_p = 1;
24664 if (DEFAULT_ABI == ABI_V4)
24665 info->cr_size = reg_size;
24668 /* If the current function calls __builtin_eh_return, then we need
24669 to allocate stack space for registers that will hold data for
24670 the exception handler. */
24671 if (crtl->calls_eh_return)
24673 unsigned int i;
24674 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
24675 continue;
24677 ehrd_size = i * UNITS_PER_WORD;
24679 else
24680 ehrd_size = 0;
24682 /* In the ELFv2 ABI, we also need to allocate space for separate
24683 CR field save areas if the function calls __builtin_eh_return. */
24684 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24686 /* This hard-codes that we have three call-saved CR fields. */
24687 ehcr_size = 3 * reg_size;
24688 /* We do *not* use the regular CR save mechanism. */
24689 info->cr_save_p = 0;
24691 else
24692 ehcr_size = 0;
24694 /* Determine various sizes. */
24695 info->reg_size = reg_size;
24696 info->fixed_size = RS6000_SAVE_AREA;
24697 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24698 if (cfun->calls_alloca)
24699 info->parm_size =
24700 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
24701 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
24702 else
24703 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24704 TARGET_ALTIVEC ? 16 : 8);
24705 if (FRAME_GROWS_DOWNWARD)
24706 info->vars_size
24707 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24708 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24709 - (info->fixed_size + info->vars_size + info->parm_size);
24711 if (TARGET_ALTIVEC_ABI)
24712 info->vrsave_mask = compute_vrsave_mask ();
24714 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24715 info->vrsave_size = 4;
24717 compute_save_world_info (info);
24719 /* Calculate the offsets. */
24720 switch (DEFAULT_ABI)
24722 case ABI_NONE:
24723 default:
24724 gcc_unreachable ();
24726 case ABI_AIX:
24727 case ABI_ELFv2:
24728 case ABI_DARWIN:
24729 info->fp_save_offset = -info->fp_size;
24730 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24732 if (TARGET_ALTIVEC_ABI)
24734 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24736 /* Align stack so vector save area is on a quadword boundary.
24737 The padding goes above the vectors. */
24738 if (info->altivec_size != 0)
24739 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24741 info->altivec_save_offset = info->vrsave_save_offset
24742 - info->altivec_padding_size
24743 - info->altivec_size;
24744 gcc_assert (info->altivec_size == 0
24745 || info->altivec_save_offset % 16 == 0);
24747 /* Adjust for AltiVec case. */
24748 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24750 else
24751 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24753 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24754 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24755 info->lr_save_offset = 2*reg_size;
24756 break;
24758 case ABI_V4:
24759 info->fp_save_offset = -info->fp_size;
24760 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24761 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24763 if (TARGET_ALTIVEC_ABI)
24765 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24767 /* Align stack so vector save area is on a quadword boundary. */
24768 if (info->altivec_size != 0)
24769 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24771 info->altivec_save_offset = info->vrsave_save_offset
24772 - info->altivec_padding_size
24773 - info->altivec_size;
24775 /* Adjust for AltiVec case. */
24776 info->ehrd_offset = info->altivec_save_offset;
24778 else
24779 info->ehrd_offset = info->cr_save_offset;
24781 info->ehrd_offset -= ehrd_size;
24782 info->lr_save_offset = reg_size;
24785 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24786 info->save_size = RS6000_ALIGN (info->fp_size
24787 + info->gp_size
24788 + info->altivec_size
24789 + info->altivec_padding_size
24790 + ehrd_size
24791 + ehcr_size
24792 + info->cr_size
24793 + info->vrsave_size,
24794 save_align);
24796 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24798 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24799 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24801 /* Determine if we need to save the link register. */
24802 if (info->calls_p
24803 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24804 && crtl->profile
24805 && !TARGET_PROFILE_KERNEL)
24806 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
24807 #ifdef TARGET_RELOCATABLE
24808 || (DEFAULT_ABI == ABI_V4
24809 && (TARGET_RELOCATABLE || flag_pic > 1)
24810 && !constant_pool_empty_p ())
24811 #endif
24812 || rs6000_ra_ever_killed ())
24813 info->lr_save_p = 1;
24815 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
24816 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
24817 && call_used_regs[STATIC_CHAIN_REGNUM]);
24818 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
24820 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
24821 || !(info->savres_strategy & SAVE_INLINE_FPRS)
24822 || !(info->savres_strategy & SAVE_INLINE_VRS)
24823 || !(info->savres_strategy & REST_INLINE_GPRS)
24824 || !(info->savres_strategy & REST_INLINE_FPRS)
24825 || !(info->savres_strategy & REST_INLINE_VRS))
24826 info->lr_save_p = 1;
24828 if (info->lr_save_p)
24829 df_set_regs_ever_live (LR_REGNO, true);
24831 /* Determine if we need to allocate any stack frame:
24833 For AIX we need to push the stack if a frame pointer is needed
24834 (because the stack might be dynamically adjusted), if we are
24835 debugging, if we make calls, or if the sum of fp_save, gp_save,
24836 and local variables are more than the space needed to save all
24837 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24838 + 18*8 = 288 (GPR13 reserved).
24840 For V.4 we don't have the stack cushion that AIX uses, but assume
24841 that the debugger can handle stackless frames. */
24843 if (info->calls_p)
24844 info->push_p = 1;
24846 else if (DEFAULT_ABI == ABI_V4)
24847 info->push_p = non_fixed_size != 0;
24849 else if (frame_pointer_needed)
24850 info->push_p = 1;
24852 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
24853 info->push_p = 1;
24855 else
24856 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
24858 return info;
24861 static void
24862 debug_stack_info (rs6000_stack_t *info)
24864 const char *abi_string;
24866 if (! info)
24867 info = rs6000_stack_info ();
24869 fprintf (stderr, "\nStack information for function %s:\n",
24870 ((current_function_decl && DECL_NAME (current_function_decl))
24871 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
24872 : "<unknown>"));
24874 switch (info->abi)
24876 default: abi_string = "Unknown"; break;
24877 case ABI_NONE: abi_string = "NONE"; break;
24878 case ABI_AIX: abi_string = "AIX"; break;
24879 case ABI_ELFv2: abi_string = "ELFv2"; break;
24880 case ABI_DARWIN: abi_string = "Darwin"; break;
24881 case ABI_V4: abi_string = "V.4"; break;
24884 fprintf (stderr, "\tABI = %5s\n", abi_string);
24886 if (TARGET_ALTIVEC_ABI)
24887 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
24889 if (info->first_gp_reg_save != 32)
24890 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
24892 if (info->first_fp_reg_save != 64)
24893 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
24895 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
24896 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
24897 info->first_altivec_reg_save);
24899 if (info->lr_save_p)
24900 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
24902 if (info->cr_save_p)
24903 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
24905 if (info->vrsave_mask)
24906 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
24908 if (info->push_p)
24909 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
24911 if (info->calls_p)
24912 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
24914 if (info->gp_size)
24915 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
24917 if (info->fp_size)
24918 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
24920 if (info->altivec_size)
24921 fprintf (stderr, "\taltivec_save_offset = %5d\n",
24922 info->altivec_save_offset);
24924 if (info->vrsave_size)
24925 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
24926 info->vrsave_save_offset);
24928 if (info->lr_save_p)
24929 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
24931 if (info->cr_save_p)
24932 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
24934 if (info->varargs_save_offset)
24935 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
24937 if (info->total_size)
24938 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24939 info->total_size);
24941 if (info->vars_size)
24942 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24943 info->vars_size);
24945 if (info->parm_size)
24946 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
24948 if (info->fixed_size)
24949 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
24951 if (info->gp_size)
24952 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
24954 if (info->fp_size)
24955 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
24957 if (info->altivec_size)
24958 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
24960 if (info->vrsave_size)
24961 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
24963 if (info->altivec_padding_size)
24964 fprintf (stderr, "\taltivec_padding_size= %5d\n",
24965 info->altivec_padding_size);
24967 if (info->cr_size)
24968 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
24970 if (info->save_size)
24971 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
24973 if (info->reg_size != 4)
24974 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
24976 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
24978 fprintf (stderr, "\n");
24982 rs6000_return_addr (int count, rtx frame)
24984 /* Currently we don't optimize very well between prolog and body
24985 code and for PIC code the code can be actually quite bad, so
24986 don't try to be too clever here. */
24987 if (count != 0
24988 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
24990 cfun->machine->ra_needs_full_frame = 1;
24992 return
24993 gen_rtx_MEM
24994 (Pmode,
24995 memory_address
24996 (Pmode,
24997 plus_constant (Pmode,
24998 copy_to_reg
24999 (gen_rtx_MEM (Pmode,
25000 memory_address (Pmode, frame))),
25001 RETURN_ADDRESS_OFFSET)));
25004 cfun->machine->ra_need_lr = 1;
25005 return get_hard_reg_initial_val (Pmode, LR_REGNO);
25008 /* Say whether a function is a candidate for sibcall handling or not. */
25010 static bool
25011 rs6000_function_ok_for_sibcall (tree decl, tree exp)
25013 tree fntype;
25015 if (decl)
25016 fntype = TREE_TYPE (decl);
25017 else
25018 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
25020 /* We can't do it if the called function has more vector parameters
25021 than the current function; there's nowhere to put the VRsave code. */
25022 if (TARGET_ALTIVEC_ABI
25023 && TARGET_ALTIVEC_VRSAVE
25024 && !(decl && decl == current_function_decl))
25026 function_args_iterator args_iter;
25027 tree type;
25028 int nvreg = 0;
25030 /* Functions with vector parameters are required to have a
25031 prototype, so the argument type info must be available
25032 here. */
25033 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
25034 if (TREE_CODE (type) == VECTOR_TYPE
25035 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25036 nvreg++;
25038 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
25039 if (TREE_CODE (type) == VECTOR_TYPE
25040 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25041 nvreg--;
25043 if (nvreg > 0)
25044 return false;
25047 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
25048 functions, because the callee may have a different TOC pointer to
25049 the caller and there's no way to ensure we restore the TOC when
25050 we return. With the secure-plt SYSV ABI we can't make non-local
25051 calls when -fpic/PIC because the plt call stubs use r30. */
25052 if (DEFAULT_ABI == ABI_DARWIN
25053 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25054 && decl
25055 && !DECL_EXTERNAL (decl)
25056 && !DECL_WEAK (decl)
25057 && (*targetm.binds_local_p) (decl))
25058 || (DEFAULT_ABI == ABI_V4
25059 && (!TARGET_SECURE_PLT
25060 || !flag_pic
25061 || (decl
25062 && (*targetm.binds_local_p) (decl)))))
25064 tree attr_list = TYPE_ATTRIBUTES (fntype);
25066 if (!lookup_attribute ("longcall", attr_list)
25067 || lookup_attribute ("shortcall", attr_list))
25068 return true;
25071 return false;
25074 static int
25075 rs6000_ra_ever_killed (void)
25077 rtx_insn *top;
25078 rtx reg;
25079 rtx_insn *insn;
25081 if (cfun->is_thunk)
25082 return 0;
25084 if (cfun->machine->lr_save_state)
25085 return cfun->machine->lr_save_state - 1;
25087 /* regs_ever_live has LR marked as used if any sibcalls are present,
25088 but this should not force saving and restoring in the
25089 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
25090 clobbers LR, so that is inappropriate. */
25092 /* Also, the prologue can generate a store into LR that
25093 doesn't really count, like this:
25095 move LR->R0
25096 bcl to set PIC register
25097 move LR->R31
25098 move R0->LR
25100 When we're called from the epilogue, we need to avoid counting
25101 this as a store. */
25103 push_topmost_sequence ();
25104 top = get_insns ();
25105 pop_topmost_sequence ();
25106 reg = gen_rtx_REG (Pmode, LR_REGNO);
25108 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
25110 if (INSN_P (insn))
25112 if (CALL_P (insn))
25114 if (!SIBLING_CALL_P (insn))
25115 return 1;
25117 else if (find_regno_note (insn, REG_INC, LR_REGNO))
25118 return 1;
25119 else if (set_of (reg, insn) != NULL_RTX
25120 && !prologue_epilogue_contains (insn))
25121 return 1;
25124 return 0;
25127 /* Emit instructions needed to load the TOC register.
25128 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
25129 a constant pool; or for SVR4 -fpic. */
25131 void
25132 rs6000_emit_load_toc_table (int fromprolog)
25134 rtx dest;
25135 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
25137 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
25139 char buf[30];
25140 rtx lab, tmp1, tmp2, got;
25142 lab = gen_label_rtx ();
25143 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
25144 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25145 if (flag_pic == 2)
25147 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25148 need_toc_init = 1;
25150 else
25151 got = rs6000_got_sym ();
25152 tmp1 = tmp2 = dest;
25153 if (!fromprolog)
25155 tmp1 = gen_reg_rtx (Pmode);
25156 tmp2 = gen_reg_rtx (Pmode);
25158 emit_insn (gen_load_toc_v4_PIC_1 (lab));
25159 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
25160 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
25161 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
25163 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
25165 emit_insn (gen_load_toc_v4_pic_si ());
25166 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25168 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
25170 char buf[30];
25171 rtx temp0 = (fromprolog
25172 ? gen_rtx_REG (Pmode, 0)
25173 : gen_reg_rtx (Pmode));
25175 if (fromprolog)
25177 rtx symF, symL;
25179 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
25180 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25182 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
25183 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25185 emit_insn (gen_load_toc_v4_PIC_1 (symF));
25186 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25187 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
25189 else
25191 rtx tocsym, lab;
25193 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25194 need_toc_init = 1;
25195 lab = gen_label_rtx ();
25196 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
25197 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25198 if (TARGET_LINK_STACK)
25199 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
25200 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
25202 emit_insn (gen_addsi3 (dest, temp0, dest));
25204 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
25206 /* This is for AIX code running in non-PIC ELF32. */
25207 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25209 need_toc_init = 1;
25210 emit_insn (gen_elf_high (dest, realsym));
25211 emit_insn (gen_elf_low (dest, dest, realsym));
25213 else
25215 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
25217 if (TARGET_32BIT)
25218 emit_insn (gen_load_toc_aix_si (dest));
25219 else
25220 emit_insn (gen_load_toc_aix_di (dest));
25224 /* Emit instructions to restore the link register after determining where
25225 its value has been stored. */
25227 void
25228 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
25230 rs6000_stack_t *info = rs6000_stack_info ();
25231 rtx operands[2];
25233 operands[0] = source;
25234 operands[1] = scratch;
25236 if (info->lr_save_p)
25238 rtx frame_rtx = stack_pointer_rtx;
25239 HOST_WIDE_INT sp_offset = 0;
25240 rtx tmp;
25242 if (frame_pointer_needed
25243 || cfun->calls_alloca
25244 || info->total_size > 32767)
25246 tmp = gen_frame_mem (Pmode, frame_rtx);
25247 emit_move_insn (operands[1], tmp);
25248 frame_rtx = operands[1];
25250 else if (info->push_p)
25251 sp_offset = info->total_size;
25253 tmp = plus_constant (Pmode, frame_rtx,
25254 info->lr_save_offset + sp_offset);
25255 tmp = gen_frame_mem (Pmode, tmp);
25256 emit_move_insn (tmp, operands[0]);
25258 else
25259 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
25261 /* Freeze lr_save_p. We've just emitted rtl that depends on the
25262 state of lr_save_p so any change from here on would be a bug. In
25263 particular, stop rs6000_ra_ever_killed from considering the SET
25264 of lr we may have added just above. */
25265 cfun->machine->lr_save_state = info->lr_save_p + 1;
25268 static GTY(()) alias_set_type set = -1;
25270 alias_set_type
25271 get_TOC_alias_set (void)
25273 if (set == -1)
25274 set = new_alias_set ();
25275 return set;
25278 /* This returns nonzero if the current function uses the TOC. This is
25279 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25280 is generated by the ABI_V4 load_toc_* patterns.
25281 Return 2 instead of 1 if the load_toc_* pattern is in the function
25282 partition that doesn't start the function. */
25283 #if TARGET_ELF
25284 static int
25285 uses_TOC (void)
25287 rtx_insn *insn;
25288 int ret = 1;
25290 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
25292 if (INSN_P (insn))
25294 rtx pat = PATTERN (insn);
25295 int i;
25297 if (GET_CODE (pat) == PARALLEL)
25298 for (i = 0; i < XVECLEN (pat, 0); i++)
25300 rtx sub = XVECEXP (pat, 0, i);
25301 if (GET_CODE (sub) == USE)
25303 sub = XEXP (sub, 0);
25304 if (GET_CODE (sub) == UNSPEC
25305 && XINT (sub, 1) == UNSPEC_TOC)
25306 return ret;
25310 else if (crtl->has_bb_partition
25311 && NOTE_P (insn)
25312 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
25313 ret = 2;
25315 return 0;
25317 #endif
25320 create_TOC_reference (rtx symbol, rtx largetoc_reg)
25322 rtx tocrel, tocreg, hi;
25324 if (TARGET_DEBUG_ADDR)
25326 if (GET_CODE (symbol) == SYMBOL_REF)
25327 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25328 XSTR (symbol, 0));
25329 else
25331 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
25332 GET_RTX_NAME (GET_CODE (symbol)));
25333 debug_rtx (symbol);
25337 if (!can_create_pseudo_p ())
25338 df_set_regs_ever_live (TOC_REGISTER, true);
25340 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
25341 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
25342 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
25343 return tocrel;
25345 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
25346 if (largetoc_reg != NULL)
25348 emit_move_insn (largetoc_reg, hi);
25349 hi = largetoc_reg;
25351 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
25354 /* Issue assembly directives that create a reference to the given DWARF
25355 FRAME_TABLE_LABEL from the current function section. */
25356 void
25357 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
25359 fprintf (asm_out_file, "\t.ref %s\n",
25360 (* targetm.strip_name_encoding) (frame_table_label));
25363 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25364 and the change to the stack pointer. */
25366 static void
25367 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
25369 rtvec p;
25370 int i;
25371 rtx regs[3];
25373 i = 0;
25374 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25375 if (hard_frame_needed)
25376 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
25377 if (!(REGNO (fp) == STACK_POINTER_REGNUM
25378 || (hard_frame_needed
25379 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
25380 regs[i++] = fp;
25382 p = rtvec_alloc (i);
25383 while (--i >= 0)
25385 rtx mem = gen_frame_mem (BLKmode, regs[i]);
25386 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
25389 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
25392 /* Emit the correct code for allocating stack space, as insns.
25393 If COPY_REG, make sure a copy of the old frame is left there.
25394 The generated code may use hard register 0 as a temporary. */
25396 static rtx_insn *
25397 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
25399 rtx_insn *insn;
25400 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25401 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25402 rtx todec = gen_int_mode (-size, Pmode);
25403 rtx par, set, mem;
25405 if (INTVAL (todec) != -size)
25407 warning (0, "stack frame too large");
25408 emit_insn (gen_trap ());
25409 return 0;
25412 if (crtl->limit_stack)
25414 if (REG_P (stack_limit_rtx)
25415 && REGNO (stack_limit_rtx) > 1
25416 && REGNO (stack_limit_rtx) <= 31)
25418 rtx_insn *insn
25419 = gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size));
25420 gcc_assert (insn);
25421 emit_insn (insn);
25422 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg, const0_rtx));
25424 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
25425 && TARGET_32BIT
25426 && DEFAULT_ABI == ABI_V4
25427 && !flag_pic)
25429 rtx toload = gen_rtx_CONST (VOIDmode,
25430 gen_rtx_PLUS (Pmode,
25431 stack_limit_rtx,
25432 GEN_INT (size)));
25434 emit_insn (gen_elf_high (tmp_reg, toload));
25435 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
25436 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25437 const0_rtx));
25439 else
25440 warning (0, "stack limit expression is not supported");
25443 if (copy_reg)
25445 if (copy_off != 0)
25446 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
25447 else
25448 emit_move_insn (copy_reg, stack_reg);
25451 if (size > 32767)
25453 /* Need a note here so that try_split doesn't get confused. */
25454 if (get_last_insn () == NULL_RTX)
25455 emit_note (NOTE_INSN_DELETED);
25456 insn = emit_move_insn (tmp_reg, todec);
25457 try_split (PATTERN (insn), insn, 0);
25458 todec = tmp_reg;
25461 insn = emit_insn (TARGET_32BIT
25462 ? gen_movsi_update_stack (stack_reg, stack_reg,
25463 todec, stack_reg)
25464 : gen_movdi_di_update_stack (stack_reg, stack_reg,
25465 todec, stack_reg));
25466 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25467 it now and set the alias set/attributes. The above gen_*_update
25468 calls will generate a PARALLEL with the MEM set being the first
25469 operation. */
25470 par = PATTERN (insn);
25471 gcc_assert (GET_CODE (par) == PARALLEL);
25472 set = XVECEXP (par, 0, 0);
25473 gcc_assert (GET_CODE (set) == SET);
25474 mem = SET_DEST (set);
25475 gcc_assert (MEM_P (mem));
25476 MEM_NOTRAP_P (mem) = 1;
25477 set_mem_alias_set (mem, get_frame_alias_set ());
25479 RTX_FRAME_RELATED_P (insn) = 1;
25480 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25481 gen_rtx_SET (stack_reg, gen_rtx_PLUS (Pmode, stack_reg,
25482 GEN_INT (-size))));
25483 return insn;
25486 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25488 #if PROBE_INTERVAL > 32768
25489 #error Cannot use indexed addressing mode for stack probing
25490 #endif
25492 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25493 inclusive. These are offsets from the current stack pointer. */
25495 static void
25496 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25498 /* See if we have a constant small number of probes to generate. If so,
25499 that's the easy case. */
25500 if (first + size <= 32768)
25502 HOST_WIDE_INT i;
25504 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25505 it exceeds SIZE. If only one probe is needed, this will not
25506 generate any code. Then probe at FIRST + SIZE. */
25507 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25508 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25509 -(first + i)));
25511 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25512 -(first + size)));
25515 /* Otherwise, do the same as above, but in a loop. Note that we must be
25516 extra careful with variables wrapping around because we might be at
25517 the very top (or the very bottom) of the address space and we have
25518 to be able to handle this case properly; in particular, we use an
25519 equality test for the loop condition. */
25520 else
25522 HOST_WIDE_INT rounded_size;
25523 rtx r12 = gen_rtx_REG (Pmode, 12);
25524 rtx r0 = gen_rtx_REG (Pmode, 0);
25526 /* Sanity check for the addressing mode we're going to use. */
25527 gcc_assert (first <= 32768);
25529 /* Step 1: round SIZE to the previous multiple of the interval. */
25531 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25534 /* Step 2: compute initial and final value of the loop counter. */
25536 /* TEST_ADDR = SP + FIRST. */
25537 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25538 -first)));
25540 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25541 if (rounded_size > 32768)
25543 emit_move_insn (r0, GEN_INT (-rounded_size));
25544 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25546 else
25547 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25548 -rounded_size)));
25551 /* Step 3: the loop
25555 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25556 probe at TEST_ADDR
25558 while (TEST_ADDR != LAST_ADDR)
25560 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25561 until it is equal to ROUNDED_SIZE. */
25563 if (TARGET_64BIT)
25564 emit_insn (gen_probe_stack_rangedi (r12, r12, r0));
25565 else
25566 emit_insn (gen_probe_stack_rangesi (r12, r12, r0));
25569 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25570 that SIZE is equal to ROUNDED_SIZE. */
25572 if (size != rounded_size)
25573 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25577 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25578 absolute addresses. */
25580 const char *
25581 output_probe_stack_range (rtx reg1, rtx reg2)
25583 static int labelno = 0;
25584 char loop_lab[32];
25585 rtx xops[2];
25587 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25589 /* Loop. */
25590 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25592 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25593 xops[0] = reg1;
25594 xops[1] = GEN_INT (-PROBE_INTERVAL);
25595 output_asm_insn ("addi %0,%0,%1", xops);
25597 /* Probe at TEST_ADDR. */
25598 xops[1] = gen_rtx_REG (Pmode, 0);
25599 output_asm_insn ("stw %1,0(%0)", xops);
25601 /* Test if TEST_ADDR == LAST_ADDR. */
25602 xops[1] = reg2;
25603 if (TARGET_64BIT)
25604 output_asm_insn ("cmpd 0,%0,%1", xops);
25605 else
25606 output_asm_insn ("cmpw 0,%0,%1", xops);
25608 /* Branch. */
25609 fputs ("\tbne 0,", asm_out_file);
25610 assemble_name_raw (asm_out_file, loop_lab);
25611 fputc ('\n', asm_out_file);
25613 return "";
25616 /* This function is called when rs6000_frame_related is processing
25617 SETs within a PARALLEL, and returns whether the REGNO save ought to
25618 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25619 for out-of-line register save functions, store multiple, and the
25620 Darwin world_save. They may contain registers that don't really
25621 need saving. */
25623 static bool
25624 interesting_frame_related_regno (unsigned int regno)
25626 /* Saves apparently of r0 are actually saving LR. It doesn't make
25627 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25628 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25629 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25630 as frame related. */
25631 if (regno == 0)
25632 return true;
25633 /* If we see CR2 then we are here on a Darwin world save. Saves of
25634 CR2 signify the whole CR is being saved. This is a long-standing
25635 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25636 that CR needs to be saved. */
25637 if (regno == CR2_REGNO)
25638 return true;
25639 /* Omit frame info for any user-defined global regs. If frame info
25640 is supplied for them, frame unwinding will restore a user reg.
25641 Also omit frame info for any reg we don't need to save, as that
25642 bloats frame info and can cause problems with shrink wrapping.
25643 Since global regs won't be seen as needing to be saved, both of
25644 these conditions are covered by save_reg_p. */
25645 return save_reg_p (regno);
25648 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25649 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25650 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25651 deduce these equivalences by itself so it wasn't necessary to hold
25652 its hand so much. Don't be tempted to always supply d2_f_d_e with
25653 the actual cfa register, ie. r31 when we are using a hard frame
25654 pointer. That fails when saving regs off r1, and sched moves the
25655 r31 setup past the reg saves. */
25657 static rtx_insn *
25658 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
25659 rtx reg2, rtx repl2)
25661 rtx repl;
25663 if (REGNO (reg) == STACK_POINTER_REGNUM)
25665 gcc_checking_assert (val == 0);
25666 repl = NULL_RTX;
25668 else
25669 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
25670 GEN_INT (val));
25672 rtx pat = PATTERN (insn);
25673 if (!repl && !reg2)
25675 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
25676 if (GET_CODE (pat) == PARALLEL)
25677 for (int i = 0; i < XVECLEN (pat, 0); i++)
25678 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25680 rtx set = XVECEXP (pat, 0, i);
25682 if (!REG_P (SET_SRC (set))
25683 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25684 RTX_FRAME_RELATED_P (set) = 1;
25686 RTX_FRAME_RELATED_P (insn) = 1;
25687 return insn;
25690 /* We expect that 'pat' is either a SET or a PARALLEL containing
25691 SETs (and possibly other stuff). In a PARALLEL, all the SETs
25692 are important so they all have to be marked RTX_FRAME_RELATED_P.
25693 Call simplify_replace_rtx on the SETs rather than the whole insn
25694 so as to leave the other stuff alone (for example USE of r12). */
25696 set_used_flags (pat);
25697 if (GET_CODE (pat) == SET)
25699 if (repl)
25700 pat = simplify_replace_rtx (pat, reg, repl);
25701 if (reg2)
25702 pat = simplify_replace_rtx (pat, reg2, repl2);
25704 else if (GET_CODE (pat) == PARALLEL)
25706 pat = shallow_copy_rtx (pat);
25707 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
25709 for (int i = 0; i < XVECLEN (pat, 0); i++)
25710 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25712 rtx set = XVECEXP (pat, 0, i);
25714 if (repl)
25715 set = simplify_replace_rtx (set, reg, repl);
25716 if (reg2)
25717 set = simplify_replace_rtx (set, reg2, repl2);
25718 XVECEXP (pat, 0, i) = set;
25720 if (!REG_P (SET_SRC (set))
25721 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25722 RTX_FRAME_RELATED_P (set) = 1;
25725 else
25726 gcc_unreachable ();
25728 RTX_FRAME_RELATED_P (insn) = 1;
25729 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
25731 return insn;
25734 /* Returns an insn that has a vrsave set operation with the
25735 appropriate CLOBBERs. */
25737 static rtx
25738 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
25740 int nclobs, i;
25741 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
25742 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
25744 clobs[0]
25745 = gen_rtx_SET (vrsave,
25746 gen_rtx_UNSPEC_VOLATILE (SImode,
25747 gen_rtvec (2, reg, vrsave),
25748 UNSPECV_SET_VRSAVE));
25750 nclobs = 1;
25752 /* We need to clobber the registers in the mask so the scheduler
25753 does not move sets to VRSAVE before sets of AltiVec registers.
25755 However, if the function receives nonlocal gotos, reload will set
25756 all call saved registers live. We will end up with:
25758 (set (reg 999) (mem))
25759 (parallel [ (set (reg vrsave) (unspec blah))
25760 (clobber (reg 999))])
25762 The clobber will cause the store into reg 999 to be dead, and
25763 flow will attempt to delete an epilogue insn. In this case, we
25764 need an unspec use/set of the register. */
25766 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
25767 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
25769 if (!epiloguep || call_used_regs [i])
25770 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
25771 gen_rtx_REG (V4SImode, i));
25772 else
25774 rtx reg = gen_rtx_REG (V4SImode, i);
25776 clobs[nclobs++]
25777 = gen_rtx_SET (reg,
25778 gen_rtx_UNSPEC (V4SImode,
25779 gen_rtvec (1, reg), 27));
25783 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
25785 for (i = 0; i < nclobs; ++i)
25786 XVECEXP (insn, 0, i) = clobs[i];
25788 return insn;
25791 static rtx
25792 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
25794 rtx addr, mem;
25796 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
25797 mem = gen_frame_mem (GET_MODE (reg), addr);
25798 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
25801 static rtx
25802 gen_frame_load (rtx reg, rtx frame_reg, int offset)
25804 return gen_frame_set (reg, frame_reg, offset, false);
25807 static rtx
25808 gen_frame_store (rtx reg, rtx frame_reg, int offset)
25810 return gen_frame_set (reg, frame_reg, offset, true);
25813 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
25814 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
25816 static rtx_insn *
25817 emit_frame_save (rtx frame_reg, machine_mode mode,
25818 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
25820 rtx reg;
25822 /* Some cases that need register indexed addressing. */
25823 gcc_checking_assert (!(TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
25824 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode)));
25826 reg = gen_rtx_REG (mode, regno);
25827 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
25828 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
25829 NULL_RTX, NULL_RTX);
25832 /* Emit an offset memory reference suitable for a frame store, while
25833 converting to a valid addressing mode. */
25835 static rtx
25836 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
25838 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, GEN_INT (offset)));
25841 #ifndef TARGET_FIX_AND_CONTINUE
25842 #define TARGET_FIX_AND_CONTINUE 0
25843 #endif
25845 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
25846 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
25847 #define LAST_SAVRES_REGISTER 31
25848 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
25850 enum {
25851 SAVRES_LR = 0x1,
25852 SAVRES_SAVE = 0x2,
25853 SAVRES_REG = 0x0c,
25854 SAVRES_GPR = 0,
25855 SAVRES_FPR = 4,
25856 SAVRES_VR = 8
25859 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
25861 /* Temporary holding space for an out-of-line register save/restore
25862 routine name. */
25863 static char savres_routine_name[30];
25865 /* Return the name for an out-of-line register save/restore routine.
25866 We are saving/restoring GPRs if GPR is true. */
25868 static char *
25869 rs6000_savres_routine_name (int regno, int sel)
25871 const char *prefix = "";
25872 const char *suffix = "";
25874 /* Different targets are supposed to define
25875 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
25876 routine name could be defined with:
25878 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
25880 This is a nice idea in practice, but in reality, things are
25881 complicated in several ways:
25883 - ELF targets have save/restore routines for GPRs.
25885 - PPC64 ELF targets have routines for save/restore of GPRs that
25886 differ in what they do with the link register, so having a set
25887 prefix doesn't work. (We only use one of the save routines at
25888 the moment, though.)
25890 - PPC32 elf targets have "exit" versions of the restore routines
25891 that restore the link register and can save some extra space.
25892 These require an extra suffix. (There are also "tail" versions
25893 of the restore routines and "GOT" versions of the save routines,
25894 but we don't generate those at present. Same problems apply,
25895 though.)
25897 We deal with all this by synthesizing our own prefix/suffix and
25898 using that for the simple sprintf call shown above. */
25899 if (DEFAULT_ABI == ABI_V4)
25901 if (TARGET_64BIT)
25902 goto aix_names;
25904 if ((sel & SAVRES_REG) == SAVRES_GPR)
25905 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
25906 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25907 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
25908 else if ((sel & SAVRES_REG) == SAVRES_VR)
25909 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25910 else
25911 abort ();
25913 if ((sel & SAVRES_LR))
25914 suffix = "_x";
25916 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25918 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
25919 /* No out-of-line save/restore routines for GPRs on AIX. */
25920 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
25921 #endif
25923 aix_names:
25924 if ((sel & SAVRES_REG) == SAVRES_GPR)
25925 prefix = ((sel & SAVRES_SAVE)
25926 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
25927 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
25928 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25930 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
25931 if ((sel & SAVRES_LR))
25932 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
25933 else
25934 #endif
25936 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
25937 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
25940 else if ((sel & SAVRES_REG) == SAVRES_VR)
25941 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25942 else
25943 abort ();
25946 if (DEFAULT_ABI == ABI_DARWIN)
25948 /* The Darwin approach is (slightly) different, in order to be
25949 compatible with code generated by the system toolchain. There is a
25950 single symbol for the start of save sequence, and the code here
25951 embeds an offset into that code on the basis of the first register
25952 to be saved. */
25953 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
25954 if ((sel & SAVRES_REG) == SAVRES_GPR)
25955 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
25956 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
25957 (regno - 13) * 4, prefix, regno);
25958 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25959 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
25960 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
25961 else if ((sel & SAVRES_REG) == SAVRES_VR)
25962 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
25963 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
25964 else
25965 abort ();
25967 else
25968 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
25970 return savres_routine_name;
25973 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
25974 We are saving/restoring GPRs if GPR is true. */
25976 static rtx
25977 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
25979 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
25980 ? info->first_gp_reg_save
25981 : (sel & SAVRES_REG) == SAVRES_FPR
25982 ? info->first_fp_reg_save - 32
25983 : (sel & SAVRES_REG) == SAVRES_VR
25984 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
25985 : -1);
25986 rtx sym;
25987 int select = sel;
25989 /* Don't generate bogus routine names. */
25990 gcc_assert (FIRST_SAVRES_REGISTER <= regno
25991 && regno <= LAST_SAVRES_REGISTER
25992 && select >= 0 && select <= 12);
25994 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
25996 if (sym == NULL)
25998 char *name;
26000 name = rs6000_savres_routine_name (regno, sel);
26002 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
26003 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
26004 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
26007 return sym;
26010 /* Emit a sequence of insns, including a stack tie if needed, for
26011 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
26012 reset the stack pointer, but move the base of the frame into
26013 reg UPDT_REGNO for use by out-of-line register restore routines. */
26015 static rtx
26016 rs6000_emit_stack_reset (rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
26017 unsigned updt_regno)
26019 /* If there is nothing to do, don't do anything. */
26020 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
26021 return NULL_RTX;
26023 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
26025 /* This blockage is needed so that sched doesn't decide to move
26026 the sp change before the register restores. */
26027 if (DEFAULT_ABI == ABI_V4)
26028 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
26029 GEN_INT (frame_off)));
26031 /* If we are restoring registers out-of-line, we will be using the
26032 "exit" variants of the restore routines, which will reset the
26033 stack for us. But we do need to point updt_reg into the
26034 right place for those routines. */
26035 if (frame_off != 0)
26036 return emit_insn (gen_add3_insn (updt_reg_rtx,
26037 frame_reg_rtx, GEN_INT (frame_off)));
26038 else
26039 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
26041 return NULL_RTX;
26044 /* Return the register number used as a pointer by out-of-line
26045 save/restore functions. */
26047 static inline unsigned
26048 ptr_regno_for_savres (int sel)
26050 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26051 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
26052 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
26055 /* Construct a parallel rtx describing the effect of a call to an
26056 out-of-line register save/restore routine, and emit the insn
26057 or jump_insn as appropriate. */
26059 static rtx_insn *
26060 rs6000_emit_savres_rtx (rs6000_stack_t *info,
26061 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
26062 machine_mode reg_mode, int sel)
26064 int i;
26065 int offset, start_reg, end_reg, n_regs, use_reg;
26066 int reg_size = GET_MODE_SIZE (reg_mode);
26067 rtx sym;
26068 rtvec p;
26069 rtx par;
26070 rtx_insn *insn;
26072 offset = 0;
26073 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26074 ? info->first_gp_reg_save
26075 : (sel & SAVRES_REG) == SAVRES_FPR
26076 ? info->first_fp_reg_save
26077 : (sel & SAVRES_REG) == SAVRES_VR
26078 ? info->first_altivec_reg_save
26079 : -1);
26080 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26081 ? 32
26082 : (sel & SAVRES_REG) == SAVRES_FPR
26083 ? 64
26084 : (sel & SAVRES_REG) == SAVRES_VR
26085 ? LAST_ALTIVEC_REGNO + 1
26086 : -1);
26087 n_regs = end_reg - start_reg;
26088 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
26089 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
26090 + n_regs);
26092 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26093 RTVEC_ELT (p, offset++) = ret_rtx;
26095 RTVEC_ELT (p, offset++)
26096 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
26098 sym = rs6000_savres_routine_sym (info, sel);
26099 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
26101 use_reg = ptr_regno_for_savres (sel);
26102 if ((sel & SAVRES_REG) == SAVRES_VR)
26104 /* Vector regs are saved/restored using [reg+reg] addressing. */
26105 RTVEC_ELT (p, offset++)
26106 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26107 RTVEC_ELT (p, offset++)
26108 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
26110 else
26111 RTVEC_ELT (p, offset++)
26112 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26114 for (i = 0; i < end_reg - start_reg; i++)
26115 RTVEC_ELT (p, i + offset)
26116 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
26117 frame_reg_rtx, save_area_offset + reg_size * i,
26118 (sel & SAVRES_SAVE) != 0);
26120 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26121 RTVEC_ELT (p, i + offset)
26122 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
26124 par = gen_rtx_PARALLEL (VOIDmode, p);
26126 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26128 insn = emit_jump_insn (par);
26129 JUMP_LABEL (insn) = ret_rtx;
26131 else
26132 insn = emit_insn (par);
26133 return insn;
26136 /* Emit prologue code to store CR fields that need to be saved into REG. This
26137 function should only be called when moving the non-volatile CRs to REG, it
26138 is not a general purpose routine to move the entire set of CRs to REG.
26139 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
26140 volatile CRs. */
26142 static void
26143 rs6000_emit_prologue_move_from_cr (rtx reg)
26145 /* Only the ELFv2 ABI allows storing only selected fields. */
26146 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
26148 int i, cr_reg[8], count = 0;
26150 /* Collect CR fields that must be saved. */
26151 for (i = 0; i < 8; i++)
26152 if (save_reg_p (CR0_REGNO + i))
26153 cr_reg[count++] = i;
26155 /* If it's just a single one, use mfcrf. */
26156 if (count == 1)
26158 rtvec p = rtvec_alloc (1);
26159 rtvec r = rtvec_alloc (2);
26160 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
26161 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
26162 RTVEC_ELT (p, 0)
26163 = gen_rtx_SET (reg,
26164 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
26166 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26167 return;
26170 /* ??? It might be better to handle count == 2 / 3 cases here
26171 as well, using logical operations to combine the values. */
26174 emit_insn (gen_prologue_movesi_from_cr (reg));
26177 /* Return whether the split-stack arg pointer (r12) is used. */
26179 static bool
26180 split_stack_arg_pointer_used_p (void)
26182 /* If the pseudo holding the arg pointer is no longer a pseudo,
26183 then the arg pointer is used. */
26184 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
26185 && (!REG_P (cfun->machine->split_stack_arg_pointer)
26186 || (REGNO (cfun->machine->split_stack_arg_pointer)
26187 < FIRST_PSEUDO_REGISTER)))
26188 return true;
26190 /* Unfortunately we also need to do some code scanning, since
26191 r12 may have been substituted for the pseudo. */
26192 rtx_insn *insn;
26193 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
26194 FOR_BB_INSNS (bb, insn)
26195 if (NONDEBUG_INSN_P (insn))
26197 /* A call destroys r12. */
26198 if (CALL_P (insn))
26199 return false;
26201 df_ref use;
26202 FOR_EACH_INSN_USE (use, insn)
26204 rtx x = DF_REF_REG (use);
26205 if (REG_P (x) && REGNO (x) == 12)
26206 return true;
26208 df_ref def;
26209 FOR_EACH_INSN_DEF (def, insn)
26211 rtx x = DF_REF_REG (def);
26212 if (REG_P (x) && REGNO (x) == 12)
26213 return false;
26216 return bitmap_bit_p (DF_LR_OUT (bb), 12);
26219 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26221 static bool
26222 rs6000_global_entry_point_needed_p (void)
26224 /* Only needed for the ELFv2 ABI. */
26225 if (DEFAULT_ABI != ABI_ELFv2)
26226 return false;
26228 /* With -msingle-pic-base, we assume the whole program shares the same
26229 TOC, so no global entry point prologues are needed anywhere. */
26230 if (TARGET_SINGLE_PIC_BASE)
26231 return false;
26233 /* Ensure we have a global entry point for thunks. ??? We could
26234 avoid that if the target routine doesn't need a global entry point,
26235 but we do not know whether this is the case at this point. */
26236 if (cfun->is_thunk)
26237 return true;
26239 /* For regular functions, rs6000_emit_prologue sets this flag if the
26240 routine ever uses the TOC pointer. */
26241 return cfun->machine->r2_setup_needed;
26244 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26245 static sbitmap
26246 rs6000_get_separate_components (void)
26248 rs6000_stack_t *info = rs6000_stack_info ();
26250 if (WORLD_SAVE_P (info))
26251 return NULL;
26253 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
26254 && !(info->savres_strategy & REST_MULTIPLE));
26256 /* Component 0 is the save/restore of LR (done via GPR0).
26257 Components 13..31 are the save/restore of GPR13..GPR31.
26258 Components 46..63 are the save/restore of FPR14..FPR31. */
26260 cfun->machine->n_components = 64;
26262 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26263 bitmap_clear (components);
26265 int reg_size = TARGET_32BIT ? 4 : 8;
26266 int fp_reg_size = 8;
26268 /* The GPRs we need saved to the frame. */
26269 if ((info->savres_strategy & SAVE_INLINE_GPRS)
26270 && (info->savres_strategy & REST_INLINE_GPRS))
26272 int offset = info->gp_save_offset;
26273 if (info->push_p)
26274 offset += info->total_size;
26276 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26278 if (IN_RANGE (offset, -0x8000, 0x7fff)
26279 && save_reg_p (regno))
26280 bitmap_set_bit (components, regno);
26282 offset += reg_size;
26286 /* Don't mess with the hard frame pointer. */
26287 if (frame_pointer_needed)
26288 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
26290 /* Don't mess with the fixed TOC register. */
26291 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
26292 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
26293 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
26294 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
26296 /* The FPRs we need saved to the frame. */
26297 if ((info->savres_strategy & SAVE_INLINE_FPRS)
26298 && (info->savres_strategy & REST_INLINE_FPRS))
26300 int offset = info->fp_save_offset;
26301 if (info->push_p)
26302 offset += info->total_size;
26304 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26306 if (IN_RANGE (offset, -0x8000, 0x7fff) && save_reg_p (regno))
26307 bitmap_set_bit (components, regno);
26309 offset += fp_reg_size;
26313 /* Optimize LR save and restore if we can. This is component 0. Any
26314 out-of-line register save/restore routines need LR. */
26315 if (info->lr_save_p
26316 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
26317 && (info->savres_strategy & SAVE_INLINE_GPRS)
26318 && (info->savres_strategy & REST_INLINE_GPRS)
26319 && (info->savres_strategy & SAVE_INLINE_FPRS)
26320 && (info->savres_strategy & REST_INLINE_FPRS)
26321 && (info->savres_strategy & SAVE_INLINE_VRS)
26322 && (info->savres_strategy & REST_INLINE_VRS))
26324 int offset = info->lr_save_offset;
26325 if (info->push_p)
26326 offset += info->total_size;
26327 if (IN_RANGE (offset, -0x8000, 0x7fff))
26328 bitmap_set_bit (components, 0);
26331 return components;
26334 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26335 static sbitmap
26336 rs6000_components_for_bb (basic_block bb)
26338 rs6000_stack_t *info = rs6000_stack_info ();
26340 bitmap in = DF_LIVE_IN (bb);
26341 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
26342 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
26344 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26345 bitmap_clear (components);
26347 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26349 /* GPRs. */
26350 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26351 if (bitmap_bit_p (in, regno)
26352 || bitmap_bit_p (gen, regno)
26353 || bitmap_bit_p (kill, regno))
26354 bitmap_set_bit (components, regno);
26356 /* FPRs. */
26357 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26358 if (bitmap_bit_p (in, regno)
26359 || bitmap_bit_p (gen, regno)
26360 || bitmap_bit_p (kill, regno))
26361 bitmap_set_bit (components, regno);
26363 /* The link register. */
26364 if (bitmap_bit_p (in, LR_REGNO)
26365 || bitmap_bit_p (gen, LR_REGNO)
26366 || bitmap_bit_p (kill, LR_REGNO))
26367 bitmap_set_bit (components, 0);
26369 return components;
26372 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26373 static void
26374 rs6000_disqualify_components (sbitmap components, edge e,
26375 sbitmap edge_components, bool /*is_prologue*/)
26377 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26378 live where we want to place that code. */
26379 if (bitmap_bit_p (edge_components, 0)
26380 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
26382 if (dump_file)
26383 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
26384 "on entry to bb %d\n", e->dest->index);
26385 bitmap_clear_bit (components, 0);
26389 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26390 static void
26391 rs6000_emit_prologue_components (sbitmap components)
26393 rs6000_stack_t *info = rs6000_stack_info ();
26394 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26395 ? HARD_FRAME_POINTER_REGNUM
26396 : STACK_POINTER_REGNUM);
26398 machine_mode reg_mode = Pmode;
26399 int reg_size = TARGET_32BIT ? 4 : 8;
26400 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
26401 ? DFmode : SFmode;
26402 int fp_reg_size = 8;
26404 /* Prologue for LR. */
26405 if (bitmap_bit_p (components, 0))
26407 rtx reg = gen_rtx_REG (reg_mode, 0);
26408 rtx_insn *insn = emit_move_insn (reg, gen_rtx_REG (reg_mode, LR_REGNO));
26409 RTX_FRAME_RELATED_P (insn) = 1;
26410 add_reg_note (insn, REG_CFA_REGISTER, NULL);
26412 int offset = info->lr_save_offset;
26413 if (info->push_p)
26414 offset += info->total_size;
26416 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26417 RTX_FRAME_RELATED_P (insn) = 1;
26418 rtx lr = gen_rtx_REG (reg_mode, LR_REGNO);
26419 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
26420 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
26423 /* Prologue for the GPRs. */
26424 int offset = info->gp_save_offset;
26425 if (info->push_p)
26426 offset += info->total_size;
26428 for (int i = info->first_gp_reg_save; i < 32; i++)
26430 if (bitmap_bit_p (components, i))
26432 rtx reg = gen_rtx_REG (reg_mode, i);
26433 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26434 RTX_FRAME_RELATED_P (insn) = 1;
26435 rtx set = copy_rtx (single_set (insn));
26436 add_reg_note (insn, REG_CFA_OFFSET, set);
26439 offset += reg_size;
26442 /* Prologue for the FPRs. */
26443 offset = info->fp_save_offset;
26444 if (info->push_p)
26445 offset += info->total_size;
26447 for (int i = info->first_fp_reg_save; i < 64; i++)
26449 if (bitmap_bit_p (components, i))
26451 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26452 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26453 RTX_FRAME_RELATED_P (insn) = 1;
26454 rtx set = copy_rtx (single_set (insn));
26455 add_reg_note (insn, REG_CFA_OFFSET, set);
26458 offset += fp_reg_size;
26462 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26463 static void
26464 rs6000_emit_epilogue_components (sbitmap components)
26466 rs6000_stack_t *info = rs6000_stack_info ();
26467 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26468 ? HARD_FRAME_POINTER_REGNUM
26469 : STACK_POINTER_REGNUM);
26471 machine_mode reg_mode = Pmode;
26472 int reg_size = TARGET_32BIT ? 4 : 8;
26474 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
26475 ? DFmode : SFmode;
26476 int fp_reg_size = 8;
26478 /* Epilogue for the FPRs. */
26479 int offset = info->fp_save_offset;
26480 if (info->push_p)
26481 offset += info->total_size;
26483 for (int i = info->first_fp_reg_save; i < 64; i++)
26485 if (bitmap_bit_p (components, i))
26487 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26488 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26489 RTX_FRAME_RELATED_P (insn) = 1;
26490 add_reg_note (insn, REG_CFA_RESTORE, reg);
26493 offset += fp_reg_size;
26496 /* Epilogue for the GPRs. */
26497 offset = info->gp_save_offset;
26498 if (info->push_p)
26499 offset += info->total_size;
26501 for (int i = info->first_gp_reg_save; i < 32; i++)
26503 if (bitmap_bit_p (components, i))
26505 rtx reg = gen_rtx_REG (reg_mode, i);
26506 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26507 RTX_FRAME_RELATED_P (insn) = 1;
26508 add_reg_note (insn, REG_CFA_RESTORE, reg);
26511 offset += reg_size;
26514 /* Epilogue for LR. */
26515 if (bitmap_bit_p (components, 0))
26517 int offset = info->lr_save_offset;
26518 if (info->push_p)
26519 offset += info->total_size;
26521 rtx reg = gen_rtx_REG (reg_mode, 0);
26522 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26524 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
26525 insn = emit_move_insn (lr, reg);
26526 RTX_FRAME_RELATED_P (insn) = 1;
26527 add_reg_note (insn, REG_CFA_RESTORE, lr);
26531 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26532 static void
26533 rs6000_set_handled_components (sbitmap components)
26535 rs6000_stack_t *info = rs6000_stack_info ();
26537 for (int i = info->first_gp_reg_save; i < 32; i++)
26538 if (bitmap_bit_p (components, i))
26539 cfun->machine->gpr_is_wrapped_separately[i] = true;
26541 for (int i = info->first_fp_reg_save; i < 64; i++)
26542 if (bitmap_bit_p (components, i))
26543 cfun->machine->fpr_is_wrapped_separately[i - 32] = true;
26545 if (bitmap_bit_p (components, 0))
26546 cfun->machine->lr_is_wrapped_separately = true;
26549 /* VRSAVE is a bit vector representing which AltiVec registers
26550 are used. The OS uses this to determine which vector
26551 registers to save on a context switch. We need to save
26552 VRSAVE on the stack frame, add whatever AltiVec registers we
26553 used in this function, and do the corresponding magic in the
26554 epilogue. */
26555 static void
26556 emit_vrsave_prologue (rs6000_stack_t *info, int save_regno,
26557 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26559 /* Get VRSAVE into a GPR. */
26560 rtx reg = gen_rtx_REG (SImode, save_regno);
26561 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26562 if (TARGET_MACHO)
26563 emit_insn (gen_get_vrsave_internal (reg));
26564 else
26565 emit_insn (gen_rtx_SET (reg, vrsave));
26567 /* Save VRSAVE. */
26568 int offset = info->vrsave_save_offset + frame_off;
26569 emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
26571 /* Include the registers in the mask. */
26572 emit_insn (gen_iorsi3 (reg, reg, GEN_INT (info->vrsave_mask)));
26574 emit_insn (generate_set_vrsave (reg, info, 0));
26577 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26578 called, it left the arg pointer to the old stack in r29. Otherwise, the
26579 arg pointer is the top of the current frame. */
26580 static void
26581 emit_split_stack_prologue (rs6000_stack_t *info, rtx_insn *sp_adjust,
26582 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26584 cfun->machine->split_stack_argp_used = true;
26586 if (sp_adjust)
26588 rtx r12 = gen_rtx_REG (Pmode, 12);
26589 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26590 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
26591 emit_insn_before (set_r12, sp_adjust);
26593 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
26595 rtx r12 = gen_rtx_REG (Pmode, 12);
26596 if (frame_off == 0)
26597 emit_move_insn (r12, frame_reg_rtx);
26598 else
26599 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
26602 if (info->push_p)
26604 rtx r12 = gen_rtx_REG (Pmode, 12);
26605 rtx r29 = gen_rtx_REG (Pmode, 29);
26606 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
26607 rtx not_more = gen_label_rtx ();
26608 rtx jump;
26610 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26611 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
26612 gen_rtx_LABEL_REF (VOIDmode, not_more),
26613 pc_rtx);
26614 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26615 JUMP_LABEL (jump) = not_more;
26616 LABEL_NUSES (not_more) += 1;
26617 emit_move_insn (r12, r29);
26618 emit_label (not_more);
26622 /* Emit function prologue as insns. */
26624 void
26625 rs6000_emit_prologue (void)
26627 rs6000_stack_t *info = rs6000_stack_info ();
26628 machine_mode reg_mode = Pmode;
26629 int reg_size = TARGET_32BIT ? 4 : 8;
26630 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
26631 ? DFmode : SFmode;
26632 int fp_reg_size = 8;
26633 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26634 rtx frame_reg_rtx = sp_reg_rtx;
26635 unsigned int cr_save_regno;
26636 rtx cr_save_rtx = NULL_RTX;
26637 rtx_insn *insn;
26638 int strategy;
26639 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
26640 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
26641 && call_used_regs[STATIC_CHAIN_REGNUM]);
26642 int using_split_stack = (flag_split_stack
26643 && (lookup_attribute ("no_split_stack",
26644 DECL_ATTRIBUTES (cfun->decl))
26645 == NULL));
26647 /* Offset to top of frame for frame_reg and sp respectively. */
26648 HOST_WIDE_INT frame_off = 0;
26649 HOST_WIDE_INT sp_off = 0;
26650 /* sp_adjust is the stack adjusting instruction, tracked so that the
26651 insn setting up the split-stack arg pointer can be emitted just
26652 prior to it, when r12 is not used here for other purposes. */
26653 rtx_insn *sp_adjust = 0;
26655 #if CHECKING_P
26656 /* Track and check usage of r0, r11, r12. */
26657 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
26658 #define START_USE(R) do \
26660 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26661 reg_inuse |= 1 << (R); \
26662 } while (0)
26663 #define END_USE(R) do \
26665 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
26666 reg_inuse &= ~(1 << (R)); \
26667 } while (0)
26668 #define NOT_INUSE(R) do \
26670 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26671 } while (0)
26672 #else
26673 #define START_USE(R) do {} while (0)
26674 #define END_USE(R) do {} while (0)
26675 #define NOT_INUSE(R) do {} while (0)
26676 #endif
26678 if (DEFAULT_ABI == ABI_ELFv2
26679 && !TARGET_SINGLE_PIC_BASE)
26681 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
26683 /* With -mminimal-toc we may generate an extra use of r2 below. */
26684 if (TARGET_TOC && TARGET_MINIMAL_TOC
26685 && !constant_pool_empty_p ())
26686 cfun->machine->r2_setup_needed = true;
26690 if (flag_stack_usage_info)
26691 current_function_static_stack_size = info->total_size;
26693 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
26695 HOST_WIDE_INT size = info->total_size;
26697 if (crtl->is_leaf && !cfun->calls_alloca)
26699 if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
26700 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT,
26701 size - STACK_CHECK_PROTECT);
26703 else if (size > 0)
26704 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
26707 if (TARGET_FIX_AND_CONTINUE)
26709 /* gdb on darwin arranges to forward a function from the old
26710 address by modifying the first 5 instructions of the function
26711 to branch to the overriding function. This is necessary to
26712 permit function pointers that point to the old function to
26713 actually forward to the new function. */
26714 emit_insn (gen_nop ());
26715 emit_insn (gen_nop ());
26716 emit_insn (gen_nop ());
26717 emit_insn (gen_nop ());
26718 emit_insn (gen_nop ());
26721 /* Handle world saves specially here. */
26722 if (WORLD_SAVE_P (info))
26724 int i, j, sz;
26725 rtx treg;
26726 rtvec p;
26727 rtx reg0;
26729 /* save_world expects lr in r0. */
26730 reg0 = gen_rtx_REG (Pmode, 0);
26731 if (info->lr_save_p)
26733 insn = emit_move_insn (reg0,
26734 gen_rtx_REG (Pmode, LR_REGNO));
26735 RTX_FRAME_RELATED_P (insn) = 1;
26738 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
26739 assumptions about the offsets of various bits of the stack
26740 frame. */
26741 gcc_assert (info->gp_save_offset == -220
26742 && info->fp_save_offset == -144
26743 && info->lr_save_offset == 8
26744 && info->cr_save_offset == 4
26745 && info->push_p
26746 && info->lr_save_p
26747 && (!crtl->calls_eh_return
26748 || info->ehrd_offset == -432)
26749 && info->vrsave_save_offset == -224
26750 && info->altivec_save_offset == -416);
26752 treg = gen_rtx_REG (SImode, 11);
26753 emit_move_insn (treg, GEN_INT (-info->total_size));
26755 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
26756 in R11. It also clobbers R12, so beware! */
26758 /* Preserve CR2 for save_world prologues */
26759 sz = 5;
26760 sz += 32 - info->first_gp_reg_save;
26761 sz += 64 - info->first_fp_reg_save;
26762 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
26763 p = rtvec_alloc (sz);
26764 j = 0;
26765 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
26766 gen_rtx_REG (SImode,
26767 LR_REGNO));
26768 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
26769 gen_rtx_SYMBOL_REF (Pmode,
26770 "*save_world"));
26771 /* We do floats first so that the instruction pattern matches
26772 properly. */
26773 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
26774 RTVEC_ELT (p, j++)
26775 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
26776 ? DFmode : SFmode,
26777 info->first_fp_reg_save + i),
26778 frame_reg_rtx,
26779 info->fp_save_offset + frame_off + 8 * i);
26780 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
26781 RTVEC_ELT (p, j++)
26782 = gen_frame_store (gen_rtx_REG (V4SImode,
26783 info->first_altivec_reg_save + i),
26784 frame_reg_rtx,
26785 info->altivec_save_offset + frame_off + 16 * i);
26786 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26787 RTVEC_ELT (p, j++)
26788 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26789 frame_reg_rtx,
26790 info->gp_save_offset + frame_off + reg_size * i);
26792 /* CR register traditionally saved as CR2. */
26793 RTVEC_ELT (p, j++)
26794 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
26795 frame_reg_rtx, info->cr_save_offset + frame_off);
26796 /* Explain about use of R0. */
26797 if (info->lr_save_p)
26798 RTVEC_ELT (p, j++)
26799 = gen_frame_store (reg0,
26800 frame_reg_rtx, info->lr_save_offset + frame_off);
26801 /* Explain what happens to the stack pointer. */
26803 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
26804 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
26807 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26808 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26809 treg, GEN_INT (-info->total_size));
26810 sp_off = frame_off = info->total_size;
26813 strategy = info->savres_strategy;
26815 /* For V.4, update stack before we do any saving and set back pointer. */
26816 if (! WORLD_SAVE_P (info)
26817 && info->push_p
26818 && (DEFAULT_ABI == ABI_V4
26819 || crtl->calls_eh_return))
26821 bool need_r11 = (!(strategy & SAVE_INLINE_FPRS)
26822 || !(strategy & SAVE_INLINE_GPRS)
26823 || !(strategy & SAVE_INLINE_VRS));
26824 int ptr_regno = -1;
26825 rtx ptr_reg = NULL_RTX;
26826 int ptr_off = 0;
26828 if (info->total_size < 32767)
26829 frame_off = info->total_size;
26830 else if (need_r11)
26831 ptr_regno = 11;
26832 else if (info->cr_save_p
26833 || info->lr_save_p
26834 || info->first_fp_reg_save < 64
26835 || info->first_gp_reg_save < 32
26836 || info->altivec_size != 0
26837 || info->vrsave_size != 0
26838 || crtl->calls_eh_return)
26839 ptr_regno = 12;
26840 else
26842 /* The prologue won't be saving any regs so there is no need
26843 to set up a frame register to access any frame save area.
26844 We also won't be using frame_off anywhere below, but set
26845 the correct value anyway to protect against future
26846 changes to this function. */
26847 frame_off = info->total_size;
26849 if (ptr_regno != -1)
26851 /* Set up the frame offset to that needed by the first
26852 out-of-line save function. */
26853 START_USE (ptr_regno);
26854 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26855 frame_reg_rtx = ptr_reg;
26856 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
26857 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
26858 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
26859 ptr_off = info->gp_save_offset + info->gp_size;
26860 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
26861 ptr_off = info->altivec_save_offset + info->altivec_size;
26862 frame_off = -ptr_off;
26864 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
26865 ptr_reg, ptr_off);
26866 if (REGNO (frame_reg_rtx) == 12)
26867 sp_adjust = 0;
26868 sp_off = info->total_size;
26869 if (frame_reg_rtx != sp_reg_rtx)
26870 rs6000_emit_stack_tie (frame_reg_rtx, false);
26873 /* If we use the link register, get it into r0. */
26874 if (!WORLD_SAVE_P (info) && info->lr_save_p
26875 && !cfun->machine->lr_is_wrapped_separately)
26877 rtx addr, reg, mem;
26879 reg = gen_rtx_REG (Pmode, 0);
26880 START_USE (0);
26881 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
26882 RTX_FRAME_RELATED_P (insn) = 1;
26884 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
26885 | SAVE_NOINLINE_FPRS_SAVES_LR)))
26887 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
26888 GEN_INT (info->lr_save_offset + frame_off));
26889 mem = gen_rtx_MEM (Pmode, addr);
26890 /* This should not be of rs6000_sr_alias_set, because of
26891 __builtin_return_address. */
26893 insn = emit_move_insn (mem, reg);
26894 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26895 NULL_RTX, NULL_RTX);
26896 END_USE (0);
26900 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
26901 r12 will be needed by out-of-line gpr restore. */
26902 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26903 && !(strategy & (SAVE_INLINE_GPRS
26904 | SAVE_NOINLINE_GPRS_SAVES_LR))
26905 ? 11 : 12);
26906 if (!WORLD_SAVE_P (info)
26907 && info->cr_save_p
26908 && REGNO (frame_reg_rtx) != cr_save_regno
26909 && !(using_static_chain_p && cr_save_regno == 11)
26910 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
26912 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
26913 START_USE (cr_save_regno);
26914 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
26917 /* Do any required saving of fpr's. If only one or two to save, do
26918 it ourselves. Otherwise, call function. */
26919 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
26921 int offset = info->fp_save_offset + frame_off;
26922 for (int i = info->first_fp_reg_save; i < 64; i++)
26924 if (save_reg_p (i)
26925 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
26926 emit_frame_save (frame_reg_rtx, fp_reg_mode, i, offset,
26927 sp_off - frame_off);
26929 offset += fp_reg_size;
26932 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
26934 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
26935 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
26936 unsigned ptr_regno = ptr_regno_for_savres (sel);
26937 rtx ptr_reg = frame_reg_rtx;
26939 if (REGNO (frame_reg_rtx) == ptr_regno)
26940 gcc_checking_assert (frame_off == 0);
26941 else
26943 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26944 NOT_INUSE (ptr_regno);
26945 emit_insn (gen_add3_insn (ptr_reg,
26946 frame_reg_rtx, GEN_INT (frame_off)));
26948 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26949 info->fp_save_offset,
26950 info->lr_save_offset,
26951 DFmode, sel);
26952 rs6000_frame_related (insn, ptr_reg, sp_off,
26953 NULL_RTX, NULL_RTX);
26954 if (lr)
26955 END_USE (0);
26958 /* Save GPRs. This is done as a PARALLEL if we are using
26959 the store-multiple instructions. */
26960 if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
26962 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
26963 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
26964 unsigned ptr_regno = ptr_regno_for_savres (sel);
26965 rtx ptr_reg = frame_reg_rtx;
26966 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
26967 int end_save = info->gp_save_offset + info->gp_size;
26968 int ptr_off;
26970 if (ptr_regno == 12)
26971 sp_adjust = 0;
26972 if (!ptr_set_up)
26973 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26975 /* Need to adjust r11 (r12) if we saved any FPRs. */
26976 if (end_save + frame_off != 0)
26978 rtx offset = GEN_INT (end_save + frame_off);
26980 if (ptr_set_up)
26981 frame_off = -end_save;
26982 else
26983 NOT_INUSE (ptr_regno);
26984 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
26986 else if (!ptr_set_up)
26988 NOT_INUSE (ptr_regno);
26989 emit_move_insn (ptr_reg, frame_reg_rtx);
26991 ptr_off = -end_save;
26992 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26993 info->gp_save_offset + ptr_off,
26994 info->lr_save_offset + ptr_off,
26995 reg_mode, sel);
26996 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
26997 NULL_RTX, NULL_RTX);
26998 if (lr)
26999 END_USE (0);
27001 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
27003 rtvec p;
27004 int i;
27005 p = rtvec_alloc (32 - info->first_gp_reg_save);
27006 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27007 RTVEC_ELT (p, i)
27008 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
27009 frame_reg_rtx,
27010 info->gp_save_offset + frame_off + reg_size * i);
27011 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27012 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27013 NULL_RTX, NULL_RTX);
27015 else if (!WORLD_SAVE_P (info))
27017 int offset = info->gp_save_offset + frame_off;
27018 for (int i = info->first_gp_reg_save; i < 32; i++)
27020 if (save_reg_p (i)
27021 && !cfun->machine->gpr_is_wrapped_separately[i])
27022 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
27023 sp_off - frame_off);
27025 offset += reg_size;
27029 if (crtl->calls_eh_return)
27031 unsigned int i;
27032 rtvec p;
27034 for (i = 0; ; ++i)
27036 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27037 if (regno == INVALID_REGNUM)
27038 break;
27041 p = rtvec_alloc (i);
27043 for (i = 0; ; ++i)
27045 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27046 if (regno == INVALID_REGNUM)
27047 break;
27049 rtx set
27050 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
27051 sp_reg_rtx,
27052 info->ehrd_offset + sp_off + reg_size * (int) i);
27053 RTVEC_ELT (p, i) = set;
27054 RTX_FRAME_RELATED_P (set) = 1;
27057 insn = emit_insn (gen_blockage ());
27058 RTX_FRAME_RELATED_P (insn) = 1;
27059 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
27062 /* In AIX ABI we need to make sure r2 is really saved. */
27063 if (TARGET_AIX && crtl->calls_eh_return)
27065 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
27066 rtx join_insn, note;
27067 rtx_insn *save_insn;
27068 long toc_restore_insn;
27070 tmp_reg = gen_rtx_REG (Pmode, 11);
27071 tmp_reg_si = gen_rtx_REG (SImode, 11);
27072 if (using_static_chain_p)
27074 START_USE (0);
27075 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
27077 else
27078 START_USE (11);
27079 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
27080 /* Peek at instruction to which this function returns. If it's
27081 restoring r2, then we know we've already saved r2. We can't
27082 unconditionally save r2 because the value we have will already
27083 be updated if we arrived at this function via a plt call or
27084 toc adjusting stub. */
27085 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
27086 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
27087 + RS6000_TOC_SAVE_SLOT);
27088 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
27089 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
27090 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
27091 validate_condition_mode (EQ, CCUNSmode);
27092 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
27093 emit_insn (gen_rtx_SET (compare_result,
27094 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
27095 toc_save_done = gen_label_rtx ();
27096 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27097 gen_rtx_EQ (VOIDmode, compare_result,
27098 const0_rtx),
27099 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
27100 pc_rtx);
27101 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27102 JUMP_LABEL (jump) = toc_save_done;
27103 LABEL_NUSES (toc_save_done) += 1;
27105 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
27106 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
27107 sp_off - frame_off);
27109 emit_label (toc_save_done);
27111 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
27112 have a CFG that has different saves along different paths.
27113 Move the note to a dummy blockage insn, which describes that
27114 R2 is unconditionally saved after the label. */
27115 /* ??? An alternate representation might be a special insn pattern
27116 containing both the branch and the store. That might let the
27117 code that minimizes the number of DW_CFA_advance opcodes better
27118 freedom in placing the annotations. */
27119 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
27120 if (note)
27121 remove_note (save_insn, note);
27122 else
27123 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
27124 copy_rtx (PATTERN (save_insn)), NULL_RTX);
27125 RTX_FRAME_RELATED_P (save_insn) = 0;
27127 join_insn = emit_insn (gen_blockage ());
27128 REG_NOTES (join_insn) = note;
27129 RTX_FRAME_RELATED_P (join_insn) = 1;
27131 if (using_static_chain_p)
27133 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
27134 END_USE (0);
27136 else
27137 END_USE (11);
27140 /* Save CR if we use any that must be preserved. */
27141 if (!WORLD_SAVE_P (info) && info->cr_save_p)
27143 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27144 GEN_INT (info->cr_save_offset + frame_off));
27145 rtx mem = gen_frame_mem (SImode, addr);
27147 /* If we didn't copy cr before, do so now using r0. */
27148 if (cr_save_rtx == NULL_RTX)
27150 START_USE (0);
27151 cr_save_rtx = gen_rtx_REG (SImode, 0);
27152 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27155 /* Saving CR requires a two-instruction sequence: one instruction
27156 to move the CR to a general-purpose register, and a second
27157 instruction that stores the GPR to memory.
27159 We do not emit any DWARF CFI records for the first of these,
27160 because we cannot properly represent the fact that CR is saved in
27161 a register. One reason is that we cannot express that multiple
27162 CR fields are saved; another reason is that on 64-bit, the size
27163 of the CR register in DWARF (4 bytes) differs from the size of
27164 a general-purpose register.
27166 This means if any intervening instruction were to clobber one of
27167 the call-saved CR fields, we'd have incorrect CFI. To prevent
27168 this from happening, we mark the store to memory as a use of
27169 those CR fields, which prevents any such instruction from being
27170 scheduled in between the two instructions. */
27171 rtx crsave_v[9];
27172 int n_crsave = 0;
27173 int i;
27175 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
27176 for (i = 0; i < 8; i++)
27177 if (save_reg_p (CR0_REGNO + i))
27178 crsave_v[n_crsave++]
27179 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27181 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
27182 gen_rtvec_v (n_crsave, crsave_v)));
27183 END_USE (REGNO (cr_save_rtx));
27185 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27186 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27187 so we need to construct a frame expression manually. */
27188 RTX_FRAME_RELATED_P (insn) = 1;
27190 /* Update address to be stack-pointer relative, like
27191 rs6000_frame_related would do. */
27192 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
27193 GEN_INT (info->cr_save_offset + sp_off));
27194 mem = gen_frame_mem (SImode, addr);
27196 if (DEFAULT_ABI == ABI_ELFv2)
27198 /* In the ELFv2 ABI we generate separate CFI records for each
27199 CR field that was actually saved. They all point to the
27200 same 32-bit stack slot. */
27201 rtx crframe[8];
27202 int n_crframe = 0;
27204 for (i = 0; i < 8; i++)
27205 if (save_reg_p (CR0_REGNO + i))
27207 crframe[n_crframe]
27208 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
27210 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
27211 n_crframe++;
27214 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27215 gen_rtx_PARALLEL (VOIDmode,
27216 gen_rtvec_v (n_crframe, crframe)));
27218 else
27220 /* In other ABIs, by convention, we use a single CR regnum to
27221 represent the fact that all call-saved CR fields are saved.
27222 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27223 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
27224 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
27228 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27229 *separate* slots if the routine calls __builtin_eh_return, so
27230 that they can be independently restored by the unwinder. */
27231 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27233 int i, cr_off = info->ehcr_offset;
27234 rtx crsave;
27236 /* ??? We might get better performance by using multiple mfocrf
27237 instructions. */
27238 crsave = gen_rtx_REG (SImode, 0);
27239 emit_insn (gen_prologue_movesi_from_cr (crsave));
27241 for (i = 0; i < 8; i++)
27242 if (!call_used_regs[CR0_REGNO + i])
27244 rtvec p = rtvec_alloc (2);
27245 RTVEC_ELT (p, 0)
27246 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
27247 RTVEC_ELT (p, 1)
27248 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27250 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27252 RTX_FRAME_RELATED_P (insn) = 1;
27253 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27254 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
27255 sp_reg_rtx, cr_off + sp_off));
27257 cr_off += reg_size;
27261 /* Update stack and set back pointer unless this is V.4,
27262 for which it was done previously. */
27263 if (!WORLD_SAVE_P (info) && info->push_p
27264 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
27266 rtx ptr_reg = NULL;
27267 int ptr_off = 0;
27269 /* If saving altivec regs we need to be able to address all save
27270 locations using a 16-bit offset. */
27271 if ((strategy & SAVE_INLINE_VRS) == 0
27272 || (info->altivec_size != 0
27273 && (info->altivec_save_offset + info->altivec_size - 16
27274 + info->total_size - frame_off) > 32767)
27275 || (info->vrsave_size != 0
27276 && (info->vrsave_save_offset
27277 + info->total_size - frame_off) > 32767))
27279 int sel = SAVRES_SAVE | SAVRES_VR;
27280 unsigned ptr_regno = ptr_regno_for_savres (sel);
27282 if (using_static_chain_p
27283 && ptr_regno == STATIC_CHAIN_REGNUM)
27284 ptr_regno = 12;
27285 if (REGNO (frame_reg_rtx) != ptr_regno)
27286 START_USE (ptr_regno);
27287 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27288 frame_reg_rtx = ptr_reg;
27289 ptr_off = info->altivec_save_offset + info->altivec_size;
27290 frame_off = -ptr_off;
27292 else if (REGNO (frame_reg_rtx) == 1)
27293 frame_off = info->total_size;
27294 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27295 ptr_reg, ptr_off);
27296 if (REGNO (frame_reg_rtx) == 12)
27297 sp_adjust = 0;
27298 sp_off = info->total_size;
27299 if (frame_reg_rtx != sp_reg_rtx)
27300 rs6000_emit_stack_tie (frame_reg_rtx, false);
27303 /* Set frame pointer, if needed. */
27304 if (frame_pointer_needed)
27306 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
27307 sp_reg_rtx);
27308 RTX_FRAME_RELATED_P (insn) = 1;
27311 /* Save AltiVec registers if needed. Save here because the red zone does
27312 not always include AltiVec registers. */
27313 if (!WORLD_SAVE_P (info)
27314 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
27316 int end_save = info->altivec_save_offset + info->altivec_size;
27317 int ptr_off;
27318 /* Oddly, the vector save/restore functions point r0 at the end
27319 of the save area, then use r11 or r12 to load offsets for
27320 [reg+reg] addressing. */
27321 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27322 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
27323 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27325 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27326 NOT_INUSE (0);
27327 if (scratch_regno == 12)
27328 sp_adjust = 0;
27329 if (end_save + frame_off != 0)
27331 rtx offset = GEN_INT (end_save + frame_off);
27333 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27335 else
27336 emit_move_insn (ptr_reg, frame_reg_rtx);
27338 ptr_off = -end_save;
27339 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27340 info->altivec_save_offset + ptr_off,
27341 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
27342 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
27343 NULL_RTX, NULL_RTX);
27344 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27346 /* The oddity mentioned above clobbered our frame reg. */
27347 emit_move_insn (frame_reg_rtx, ptr_reg);
27348 frame_off = ptr_off;
27351 else if (!WORLD_SAVE_P (info)
27352 && info->altivec_size != 0)
27354 int i;
27356 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27357 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27359 rtx areg, savereg, mem;
27360 HOST_WIDE_INT offset;
27362 offset = (info->altivec_save_offset + frame_off
27363 + 16 * (i - info->first_altivec_reg_save));
27365 savereg = gen_rtx_REG (V4SImode, i);
27367 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27369 mem = gen_frame_mem (V4SImode,
27370 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27371 GEN_INT (offset)));
27372 insn = emit_insn (gen_rtx_SET (mem, savereg));
27373 areg = NULL_RTX;
27375 else
27377 NOT_INUSE (0);
27378 areg = gen_rtx_REG (Pmode, 0);
27379 emit_move_insn (areg, GEN_INT (offset));
27381 /* AltiVec addressing mode is [reg+reg]. */
27382 mem = gen_frame_mem (V4SImode,
27383 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
27385 /* Rather than emitting a generic move, force use of the stvx
27386 instruction, which we always want on ISA 2.07 (power8) systems.
27387 In particular we don't want xxpermdi/stxvd2x for little
27388 endian. */
27389 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
27392 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27393 areg, GEN_INT (offset));
27397 /* VRSAVE is a bit vector representing which AltiVec registers
27398 are used. The OS uses this to determine which vector
27399 registers to save on a context switch. We need to save
27400 VRSAVE on the stack frame, add whatever AltiVec registers we
27401 used in this function, and do the corresponding magic in the
27402 epilogue. */
27404 if (!WORLD_SAVE_P (info) && info->vrsave_size != 0)
27406 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27407 be using r12 as frame_reg_rtx and r11 as the static chain
27408 pointer for nested functions. */
27409 int save_regno = 12;
27410 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27411 && !using_static_chain_p)
27412 save_regno = 11;
27413 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27415 save_regno = 11;
27416 if (using_static_chain_p)
27417 save_regno = 0;
27419 NOT_INUSE (save_regno);
27421 emit_vrsave_prologue (info, save_regno, frame_off, frame_reg_rtx);
27424 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27425 if (!TARGET_SINGLE_PIC_BASE
27426 && ((TARGET_TOC && TARGET_MINIMAL_TOC
27427 && !constant_pool_empty_p ())
27428 || (DEFAULT_ABI == ABI_V4
27429 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27430 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27432 /* If emit_load_toc_table will use the link register, we need to save
27433 it. We use R12 for this purpose because emit_load_toc_table
27434 can use register 0. This allows us to use a plain 'blr' to return
27435 from the procedure more often. */
27436 int save_LR_around_toc_setup = (TARGET_ELF
27437 && DEFAULT_ABI == ABI_V4
27438 && flag_pic
27439 && ! info->lr_save_p
27440 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27441 if (save_LR_around_toc_setup)
27443 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27444 rtx tmp = gen_rtx_REG (Pmode, 12);
27446 sp_adjust = 0;
27447 insn = emit_move_insn (tmp, lr);
27448 RTX_FRAME_RELATED_P (insn) = 1;
27450 rs6000_emit_load_toc_table (TRUE);
27452 insn = emit_move_insn (lr, tmp);
27453 add_reg_note (insn, REG_CFA_RESTORE, lr);
27454 RTX_FRAME_RELATED_P (insn) = 1;
27456 else
27457 rs6000_emit_load_toc_table (TRUE);
27460 #if TARGET_MACHO
27461 if (!TARGET_SINGLE_PIC_BASE
27462 && DEFAULT_ABI == ABI_DARWIN
27463 && flag_pic && crtl->uses_pic_offset_table)
27465 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27466 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27468 /* Save and restore LR locally around this call (in R0). */
27469 if (!info->lr_save_p)
27470 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27472 emit_insn (gen_load_macho_picbase (src));
27474 emit_move_insn (gen_rtx_REG (Pmode,
27475 RS6000_PIC_OFFSET_TABLE_REGNUM),
27476 lr);
27478 if (!info->lr_save_p)
27479 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27481 #endif
27483 /* If we need to, save the TOC register after doing the stack setup.
27484 Do not emit eh frame info for this save. The unwinder wants info,
27485 conceptually attached to instructions in this function, about
27486 register values in the caller of this function. This R2 may have
27487 already been changed from the value in the caller.
27488 We don't attempt to write accurate DWARF EH frame info for R2
27489 because code emitted by gcc for a (non-pointer) function call
27490 doesn't save and restore R2. Instead, R2 is managed out-of-line
27491 by a linker generated plt call stub when the function resides in
27492 a shared library. This behavior is costly to describe in DWARF,
27493 both in terms of the size of DWARF info and the time taken in the
27494 unwinder to interpret it. R2 changes, apart from the
27495 calls_eh_return case earlier in this function, are handled by
27496 linux-unwind.h frob_update_context. */
27497 if (rs6000_save_toc_in_prologue_p ())
27499 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27500 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27503 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27504 if (using_split_stack && split_stack_arg_pointer_used_p ())
27505 emit_split_stack_prologue (info, sp_adjust, frame_off, frame_reg_rtx);
27508 /* Output .extern statements for the save/restore routines we use. */
27510 static void
27511 rs6000_output_savres_externs (FILE *file)
27513 rs6000_stack_t *info = rs6000_stack_info ();
27515 if (TARGET_DEBUG_STACK)
27516 debug_stack_info (info);
27518 /* Write .extern for any function we will call to save and restore
27519 fp values. */
27520 if (info->first_fp_reg_save < 64
27521 && !TARGET_MACHO
27522 && !TARGET_ELF)
27524 char *name;
27525 int regno = info->first_fp_reg_save - 32;
27527 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
27529 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27530 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27531 name = rs6000_savres_routine_name (regno, sel);
27532 fprintf (file, "\t.extern %s\n", name);
27534 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
27536 bool lr = (info->savres_strategy
27537 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
27538 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27539 name = rs6000_savres_routine_name (regno, sel);
27540 fprintf (file, "\t.extern %s\n", name);
27545 /* Write function prologue. */
27547 static void
27548 rs6000_output_function_prologue (FILE *file)
27550 if (!cfun->is_thunk)
27551 rs6000_output_savres_externs (file);
27553 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27554 immediately after the global entry point label. */
27555 if (rs6000_global_entry_point_needed_p ())
27557 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
27559 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
27561 if (TARGET_CMODEL != CMODEL_LARGE)
27563 /* In the small and medium code models, we assume the TOC is less
27564 2 GB away from the text section, so it can be computed via the
27565 following two-instruction sequence. */
27566 char buf[256];
27568 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27569 fprintf (file, "0:\taddis 2,12,.TOC.-");
27570 assemble_name (file, buf);
27571 fprintf (file, "@ha\n");
27572 fprintf (file, "\taddi 2,2,.TOC.-");
27573 assemble_name (file, buf);
27574 fprintf (file, "@l\n");
27576 else
27578 /* In the large code model, we allow arbitrary offsets between the
27579 TOC and the text section, so we have to load the offset from
27580 memory. The data field is emitted directly before the global
27581 entry point in rs6000_elf_declare_function_name. */
27582 char buf[256];
27584 #ifdef HAVE_AS_ENTRY_MARKERS
27585 /* If supported by the linker, emit a marker relocation. If the
27586 total code size of the final executable or shared library
27587 happens to fit into 2 GB after all, the linker will replace
27588 this code sequence with the sequence for the small or medium
27589 code model. */
27590 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
27591 #endif
27592 fprintf (file, "\tld 2,");
27593 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
27594 assemble_name (file, buf);
27595 fprintf (file, "-");
27596 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27597 assemble_name (file, buf);
27598 fprintf (file, "(12)\n");
27599 fprintf (file, "\tadd 2,2,12\n");
27602 fputs ("\t.localentry\t", file);
27603 assemble_name (file, name);
27604 fputs (",.-", file);
27605 assemble_name (file, name);
27606 fputs ("\n", file);
27609 /* Output -mprofile-kernel code. This needs to be done here instead of
27610 in output_function_profile since it must go after the ELFv2 ABI
27611 local entry point. */
27612 if (TARGET_PROFILE_KERNEL && crtl->profile)
27614 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27615 gcc_assert (!TARGET_32BIT);
27617 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
27619 /* In the ELFv2 ABI we have no compiler stack word. It must be
27620 the resposibility of _mcount to preserve the static chain
27621 register if required. */
27622 if (DEFAULT_ABI != ABI_ELFv2
27623 && cfun->static_chain_decl != NULL)
27625 asm_fprintf (file, "\tstd %s,24(%s)\n",
27626 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27627 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27628 asm_fprintf (file, "\tld %s,24(%s)\n",
27629 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27631 else
27632 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27635 rs6000_pic_labelno++;
27638 /* -mprofile-kernel code calls mcount before the function prolog,
27639 so a profiled leaf function should stay a leaf function. */
27640 static bool
27641 rs6000_keep_leaf_when_profiled ()
27643 return TARGET_PROFILE_KERNEL;
27646 /* Non-zero if vmx regs are restored before the frame pop, zero if
27647 we restore after the pop when possible. */
27648 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27650 /* Restoring cr is a two step process: loading a reg from the frame
27651 save, then moving the reg to cr. For ABI_V4 we must let the
27652 unwinder know that the stack location is no longer valid at or
27653 before the stack deallocation, but we can't emit a cfa_restore for
27654 cr at the stack deallocation like we do for other registers.
27655 The trouble is that it is possible for the move to cr to be
27656 scheduled after the stack deallocation. So say exactly where cr
27657 is located on each of the two insns. */
27659 static rtx
27660 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
27662 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
27663 rtx reg = gen_rtx_REG (SImode, regno);
27664 rtx_insn *insn = emit_move_insn (reg, mem);
27666 if (!exit_func && DEFAULT_ABI == ABI_V4)
27668 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27669 rtx set = gen_rtx_SET (reg, cr);
27671 add_reg_note (insn, REG_CFA_REGISTER, set);
27672 RTX_FRAME_RELATED_P (insn) = 1;
27674 return reg;
27677 /* Reload CR from REG. */
27679 static void
27680 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
27682 int count = 0;
27683 int i;
27685 if (using_mfcr_multiple)
27687 for (i = 0; i < 8; i++)
27688 if (save_reg_p (CR0_REGNO + i))
27689 count++;
27690 gcc_assert (count);
27693 if (using_mfcr_multiple && count > 1)
27695 rtx_insn *insn;
27696 rtvec p;
27697 int ndx;
27699 p = rtvec_alloc (count);
27701 ndx = 0;
27702 for (i = 0; i < 8; i++)
27703 if (save_reg_p (CR0_REGNO + i))
27705 rtvec r = rtvec_alloc (2);
27706 RTVEC_ELT (r, 0) = reg;
27707 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
27708 RTVEC_ELT (p, ndx) =
27709 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
27710 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
27711 ndx++;
27713 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27714 gcc_assert (ndx == count);
27716 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27717 CR field separately. */
27718 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27720 for (i = 0; i < 8; i++)
27721 if (save_reg_p (CR0_REGNO + i))
27722 add_reg_note (insn, REG_CFA_RESTORE,
27723 gen_rtx_REG (SImode, CR0_REGNO + i));
27725 RTX_FRAME_RELATED_P (insn) = 1;
27728 else
27729 for (i = 0; i < 8; i++)
27730 if (save_reg_p (CR0_REGNO + i))
27732 rtx insn = emit_insn (gen_movsi_to_cr_one
27733 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
27735 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27736 CR field separately, attached to the insn that in fact
27737 restores this particular CR field. */
27738 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27740 add_reg_note (insn, REG_CFA_RESTORE,
27741 gen_rtx_REG (SImode, CR0_REGNO + i));
27743 RTX_FRAME_RELATED_P (insn) = 1;
27747 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
27748 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
27749 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
27751 rtx_insn *insn = get_last_insn ();
27752 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27754 add_reg_note (insn, REG_CFA_RESTORE, cr);
27755 RTX_FRAME_RELATED_P (insn) = 1;
27759 /* Like cr, the move to lr instruction can be scheduled after the
27760 stack deallocation, but unlike cr, its stack frame save is still
27761 valid. So we only need to emit the cfa_restore on the correct
27762 instruction. */
27764 static void
27765 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
27767 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
27768 rtx reg = gen_rtx_REG (Pmode, regno);
27770 emit_move_insn (reg, mem);
27773 static void
27774 restore_saved_lr (int regno, bool exit_func)
27776 rtx reg = gen_rtx_REG (Pmode, regno);
27777 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27778 rtx_insn *insn = emit_move_insn (lr, reg);
27780 if (!exit_func && flag_shrink_wrap)
27782 add_reg_note (insn, REG_CFA_RESTORE, lr);
27783 RTX_FRAME_RELATED_P (insn) = 1;
27787 static rtx
27788 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
27790 if (DEFAULT_ABI == ABI_ELFv2)
27792 int i;
27793 for (i = 0; i < 8; i++)
27794 if (save_reg_p (CR0_REGNO + i))
27796 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
27797 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
27798 cfa_restores);
27801 else if (info->cr_save_p)
27802 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27803 gen_rtx_REG (SImode, CR2_REGNO),
27804 cfa_restores);
27806 if (info->lr_save_p)
27807 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27808 gen_rtx_REG (Pmode, LR_REGNO),
27809 cfa_restores);
27810 return cfa_restores;
27813 /* Return true if OFFSET from stack pointer can be clobbered by signals.
27814 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
27815 below stack pointer not cloberred by signals. */
27817 static inline bool
27818 offset_below_red_zone_p (HOST_WIDE_INT offset)
27820 return offset < (DEFAULT_ABI == ABI_V4
27822 : TARGET_32BIT ? -220 : -288);
27825 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
27827 static void
27828 emit_cfa_restores (rtx cfa_restores)
27830 rtx_insn *insn = get_last_insn ();
27831 rtx *loc = &REG_NOTES (insn);
27833 while (*loc)
27834 loc = &XEXP (*loc, 1);
27835 *loc = cfa_restores;
27836 RTX_FRAME_RELATED_P (insn) = 1;
27839 /* Emit function epilogue as insns. */
27841 void
27842 rs6000_emit_epilogue (int sibcall)
27844 rs6000_stack_t *info;
27845 int restoring_GPRs_inline;
27846 int restoring_FPRs_inline;
27847 int using_load_multiple;
27848 int using_mtcr_multiple;
27849 int use_backchain_to_restore_sp;
27850 int restore_lr;
27851 int strategy;
27852 HOST_WIDE_INT frame_off = 0;
27853 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
27854 rtx frame_reg_rtx = sp_reg_rtx;
27855 rtx cfa_restores = NULL_RTX;
27856 rtx insn;
27857 rtx cr_save_reg = NULL_RTX;
27858 machine_mode reg_mode = Pmode;
27859 int reg_size = TARGET_32BIT ? 4 : 8;
27860 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
27861 ? DFmode : SFmode;
27862 int fp_reg_size = 8;
27863 int i;
27864 bool exit_func;
27865 unsigned ptr_regno;
27867 info = rs6000_stack_info ();
27869 strategy = info->savres_strategy;
27870 using_load_multiple = strategy & REST_MULTIPLE;
27871 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
27872 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
27873 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
27874 || rs6000_cpu == PROCESSOR_PPC603
27875 || rs6000_cpu == PROCESSOR_PPC750
27876 || optimize_size);
27877 /* Restore via the backchain when we have a large frame, since this
27878 is more efficient than an addis, addi pair. The second condition
27879 here will not trigger at the moment; We don't actually need a
27880 frame pointer for alloca, but the generic parts of the compiler
27881 give us one anyway. */
27882 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
27883 ? info->lr_save_offset
27884 : 0) > 32767
27885 || (cfun->calls_alloca
27886 && !frame_pointer_needed));
27887 restore_lr = (info->lr_save_p
27888 && (restoring_FPRs_inline
27889 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
27890 && (restoring_GPRs_inline
27891 || info->first_fp_reg_save < 64)
27892 && !cfun->machine->lr_is_wrapped_separately);
27895 if (WORLD_SAVE_P (info))
27897 int i, j;
27898 char rname[30];
27899 const char *alloc_rname;
27900 rtvec p;
27902 /* eh_rest_world_r10 will return to the location saved in the LR
27903 stack slot (which is not likely to be our caller.)
27904 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
27905 rest_world is similar, except any R10 parameter is ignored.
27906 The exception-handling stuff that was here in 2.95 is no
27907 longer necessary. */
27909 p = rtvec_alloc (9
27910 + 32 - info->first_gp_reg_save
27911 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
27912 + 63 + 1 - info->first_fp_reg_save);
27914 strcpy (rname, ((crtl->calls_eh_return) ?
27915 "*eh_rest_world_r10" : "*rest_world"));
27916 alloc_rname = ggc_strdup (rname);
27918 j = 0;
27919 RTVEC_ELT (p, j++) = ret_rtx;
27920 RTVEC_ELT (p, j++)
27921 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
27922 /* The instruction pattern requires a clobber here;
27923 it is shared with the restVEC helper. */
27924 RTVEC_ELT (p, j++)
27925 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
27928 /* CR register traditionally saved as CR2. */
27929 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
27930 RTVEC_ELT (p, j++)
27931 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
27932 if (flag_shrink_wrap)
27934 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27935 gen_rtx_REG (Pmode, LR_REGNO),
27936 cfa_restores);
27937 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27941 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27943 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
27944 RTVEC_ELT (p, j++)
27945 = gen_frame_load (reg,
27946 frame_reg_rtx, info->gp_save_offset + reg_size * i);
27947 if (flag_shrink_wrap
27948 && save_reg_p (info->first_gp_reg_save + i))
27949 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27951 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
27953 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
27954 RTVEC_ELT (p, j++)
27955 = gen_frame_load (reg,
27956 frame_reg_rtx, info->altivec_save_offset + 16 * i);
27957 if (flag_shrink_wrap
27958 && save_reg_p (info->first_altivec_reg_save + i))
27959 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27961 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
27963 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
27964 ? DFmode : SFmode),
27965 info->first_fp_reg_save + i);
27966 RTVEC_ELT (p, j++)
27967 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
27968 if (flag_shrink_wrap
27969 && save_reg_p (info->first_fp_reg_save + i))
27970 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27972 RTVEC_ELT (p, j++)
27973 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
27974 RTVEC_ELT (p, j++)
27975 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
27976 RTVEC_ELT (p, j++)
27977 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
27978 RTVEC_ELT (p, j++)
27979 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
27980 RTVEC_ELT (p, j++)
27981 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
27982 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
27984 if (flag_shrink_wrap)
27986 REG_NOTES (insn) = cfa_restores;
27987 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
27988 RTX_FRAME_RELATED_P (insn) = 1;
27990 return;
27993 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
27994 if (info->push_p)
27995 frame_off = info->total_size;
27997 /* Restore AltiVec registers if we must do so before adjusting the
27998 stack. */
27999 if (info->altivec_size != 0
28000 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28001 || (DEFAULT_ABI != ABI_V4
28002 && offset_below_red_zone_p (info->altivec_save_offset))))
28004 int i;
28005 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28007 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
28008 if (use_backchain_to_restore_sp)
28010 int frame_regno = 11;
28012 if ((strategy & REST_INLINE_VRS) == 0)
28014 /* Of r11 and r12, select the one not clobbered by an
28015 out-of-line restore function for the frame register. */
28016 frame_regno = 11 + 12 - scratch_regno;
28018 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
28019 emit_move_insn (frame_reg_rtx,
28020 gen_rtx_MEM (Pmode, sp_reg_rtx));
28021 frame_off = 0;
28023 else if (frame_pointer_needed)
28024 frame_reg_rtx = hard_frame_pointer_rtx;
28026 if ((strategy & REST_INLINE_VRS) == 0)
28028 int end_save = info->altivec_save_offset + info->altivec_size;
28029 int ptr_off;
28030 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28031 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28033 if (end_save + frame_off != 0)
28035 rtx offset = GEN_INT (end_save + frame_off);
28037 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28039 else
28040 emit_move_insn (ptr_reg, frame_reg_rtx);
28042 ptr_off = -end_save;
28043 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28044 info->altivec_save_offset + ptr_off,
28045 0, V4SImode, SAVRES_VR);
28047 else
28049 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28050 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28052 rtx addr, areg, mem, insn;
28053 rtx reg = gen_rtx_REG (V4SImode, i);
28054 HOST_WIDE_INT offset
28055 = (info->altivec_save_offset + frame_off
28056 + 16 * (i - info->first_altivec_reg_save));
28058 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28060 mem = gen_frame_mem (V4SImode,
28061 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28062 GEN_INT (offset)));
28063 insn = gen_rtx_SET (reg, mem);
28065 else
28067 areg = gen_rtx_REG (Pmode, 0);
28068 emit_move_insn (areg, GEN_INT (offset));
28070 /* AltiVec addressing mode is [reg+reg]. */
28071 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28072 mem = gen_frame_mem (V4SImode, addr);
28074 /* Rather than emitting a generic move, force use of the
28075 lvx instruction, which we always want. In particular we
28076 don't want lxvd2x/xxpermdi for little endian. */
28077 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28080 (void) emit_insn (insn);
28084 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28085 if (((strategy & REST_INLINE_VRS) == 0
28086 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28087 && (flag_shrink_wrap
28088 || (offset_below_red_zone_p
28089 (info->altivec_save_offset
28090 + 16 * (i - info->first_altivec_reg_save))))
28091 && save_reg_p (i))
28093 rtx reg = gen_rtx_REG (V4SImode, i);
28094 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28098 /* Restore VRSAVE if we must do so before adjusting the stack. */
28099 if (info->vrsave_size != 0
28100 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28101 || (DEFAULT_ABI != ABI_V4
28102 && offset_below_red_zone_p (info->vrsave_save_offset))))
28104 rtx reg;
28106 if (frame_reg_rtx == sp_reg_rtx)
28108 if (use_backchain_to_restore_sp)
28110 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28111 emit_move_insn (frame_reg_rtx,
28112 gen_rtx_MEM (Pmode, sp_reg_rtx));
28113 frame_off = 0;
28115 else if (frame_pointer_needed)
28116 frame_reg_rtx = hard_frame_pointer_rtx;
28119 reg = gen_rtx_REG (SImode, 12);
28120 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28121 info->vrsave_save_offset + frame_off));
28123 emit_insn (generate_set_vrsave (reg, info, 1));
28126 insn = NULL_RTX;
28127 /* If we have a large stack frame, restore the old stack pointer
28128 using the backchain. */
28129 if (use_backchain_to_restore_sp)
28131 if (frame_reg_rtx == sp_reg_rtx)
28133 /* Under V.4, don't reset the stack pointer until after we're done
28134 loading the saved registers. */
28135 if (DEFAULT_ABI == ABI_V4)
28136 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28138 insn = emit_move_insn (frame_reg_rtx,
28139 gen_rtx_MEM (Pmode, sp_reg_rtx));
28140 frame_off = 0;
28142 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28143 && DEFAULT_ABI == ABI_V4)
28144 /* frame_reg_rtx has been set up by the altivec restore. */
28146 else
28148 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
28149 frame_reg_rtx = sp_reg_rtx;
28152 /* If we have a frame pointer, we can restore the old stack pointer
28153 from it. */
28154 else if (frame_pointer_needed)
28156 frame_reg_rtx = sp_reg_rtx;
28157 if (DEFAULT_ABI == ABI_V4)
28158 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28159 /* Prevent reordering memory accesses against stack pointer restore. */
28160 else if (cfun->calls_alloca
28161 || offset_below_red_zone_p (-info->total_size))
28162 rs6000_emit_stack_tie (frame_reg_rtx, true);
28164 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
28165 GEN_INT (info->total_size)));
28166 frame_off = 0;
28168 else if (info->push_p
28169 && DEFAULT_ABI != ABI_V4
28170 && !crtl->calls_eh_return)
28172 /* Prevent reordering memory accesses against stack pointer restore. */
28173 if (cfun->calls_alloca
28174 || offset_below_red_zone_p (-info->total_size))
28175 rs6000_emit_stack_tie (frame_reg_rtx, false);
28176 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
28177 GEN_INT (info->total_size)));
28178 frame_off = 0;
28180 if (insn && frame_reg_rtx == sp_reg_rtx)
28182 if (cfa_restores)
28184 REG_NOTES (insn) = cfa_restores;
28185 cfa_restores = NULL_RTX;
28187 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28188 RTX_FRAME_RELATED_P (insn) = 1;
28191 /* Restore AltiVec registers if we have not done so already. */
28192 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28193 && info->altivec_size != 0
28194 && (DEFAULT_ABI == ABI_V4
28195 || !offset_below_red_zone_p (info->altivec_save_offset)))
28197 int i;
28199 if ((strategy & REST_INLINE_VRS) == 0)
28201 int end_save = info->altivec_save_offset + info->altivec_size;
28202 int ptr_off;
28203 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28204 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28205 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28207 if (end_save + frame_off != 0)
28209 rtx offset = GEN_INT (end_save + frame_off);
28211 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28213 else
28214 emit_move_insn (ptr_reg, frame_reg_rtx);
28216 ptr_off = -end_save;
28217 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28218 info->altivec_save_offset + ptr_off,
28219 0, V4SImode, SAVRES_VR);
28220 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
28222 /* Frame reg was clobbered by out-of-line save. Restore it
28223 from ptr_reg, and if we are calling out-of-line gpr or
28224 fpr restore set up the correct pointer and offset. */
28225 unsigned newptr_regno = 1;
28226 if (!restoring_GPRs_inline)
28228 bool lr = info->gp_save_offset + info->gp_size == 0;
28229 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28230 newptr_regno = ptr_regno_for_savres (sel);
28231 end_save = info->gp_save_offset + info->gp_size;
28233 else if (!restoring_FPRs_inline)
28235 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
28236 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28237 newptr_regno = ptr_regno_for_savres (sel);
28238 end_save = info->fp_save_offset + info->fp_size;
28241 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
28242 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
28244 if (end_save + ptr_off != 0)
28246 rtx offset = GEN_INT (end_save + ptr_off);
28248 frame_off = -end_save;
28249 if (TARGET_32BIT)
28250 emit_insn (gen_addsi3_carry (frame_reg_rtx,
28251 ptr_reg, offset));
28252 else
28253 emit_insn (gen_adddi3_carry (frame_reg_rtx,
28254 ptr_reg, offset));
28256 else
28258 frame_off = ptr_off;
28259 emit_move_insn (frame_reg_rtx, ptr_reg);
28263 else
28265 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28266 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28268 rtx addr, areg, mem, insn;
28269 rtx reg = gen_rtx_REG (V4SImode, i);
28270 HOST_WIDE_INT offset
28271 = (info->altivec_save_offset + frame_off
28272 + 16 * (i - info->first_altivec_reg_save));
28274 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28276 mem = gen_frame_mem (V4SImode,
28277 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28278 GEN_INT (offset)));
28279 insn = gen_rtx_SET (reg, mem);
28281 else
28283 areg = gen_rtx_REG (Pmode, 0);
28284 emit_move_insn (areg, GEN_INT (offset));
28286 /* AltiVec addressing mode is [reg+reg]. */
28287 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28288 mem = gen_frame_mem (V4SImode, addr);
28290 /* Rather than emitting a generic move, force use of the
28291 lvx instruction, which we always want. In particular we
28292 don't want lxvd2x/xxpermdi for little endian. */
28293 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28296 (void) emit_insn (insn);
28300 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28301 if (((strategy & REST_INLINE_VRS) == 0
28302 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28303 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28304 && save_reg_p (i))
28306 rtx reg = gen_rtx_REG (V4SImode, i);
28307 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28311 /* Restore VRSAVE if we have not done so already. */
28312 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28313 && info->vrsave_size != 0
28314 && (DEFAULT_ABI == ABI_V4
28315 || !offset_below_red_zone_p (info->vrsave_save_offset)))
28317 rtx reg;
28319 reg = gen_rtx_REG (SImode, 12);
28320 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28321 info->vrsave_save_offset + frame_off));
28323 emit_insn (generate_set_vrsave (reg, info, 1));
28326 /* If we exit by an out-of-line restore function on ABI_V4 then that
28327 function will deallocate the stack, so we don't need to worry
28328 about the unwinder restoring cr from an invalid stack frame
28329 location. */
28330 exit_func = (!restoring_FPRs_inline
28331 || (!restoring_GPRs_inline
28332 && info->first_fp_reg_save == 64));
28334 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28335 *separate* slots if the routine calls __builtin_eh_return, so
28336 that they can be independently restored by the unwinder. */
28337 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
28339 int i, cr_off = info->ehcr_offset;
28341 for (i = 0; i < 8; i++)
28342 if (!call_used_regs[CR0_REGNO + i])
28344 rtx reg = gen_rtx_REG (SImode, 0);
28345 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28346 cr_off + frame_off));
28348 insn = emit_insn (gen_movsi_to_cr_one
28349 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28351 if (!exit_func && flag_shrink_wrap)
28353 add_reg_note (insn, REG_CFA_RESTORE,
28354 gen_rtx_REG (SImode, CR0_REGNO + i));
28356 RTX_FRAME_RELATED_P (insn) = 1;
28359 cr_off += reg_size;
28363 /* Get the old lr if we saved it. If we are restoring registers
28364 out-of-line, then the out-of-line routines can do this for us. */
28365 if (restore_lr && restoring_GPRs_inline)
28366 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28368 /* Get the old cr if we saved it. */
28369 if (info->cr_save_p)
28371 unsigned cr_save_regno = 12;
28373 if (!restoring_GPRs_inline)
28375 /* Ensure we don't use the register used by the out-of-line
28376 gpr register restore below. */
28377 bool lr = info->gp_save_offset + info->gp_size == 0;
28378 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28379 int gpr_ptr_regno = ptr_regno_for_savres (sel);
28381 if (gpr_ptr_regno == 12)
28382 cr_save_regno = 11;
28383 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
28385 else if (REGNO (frame_reg_rtx) == 12)
28386 cr_save_regno = 11;
28388 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
28389 info->cr_save_offset + frame_off,
28390 exit_func);
28393 /* Set LR here to try to overlap restores below. */
28394 if (restore_lr && restoring_GPRs_inline)
28395 restore_saved_lr (0, exit_func);
28397 /* Load exception handler data registers, if needed. */
28398 if (crtl->calls_eh_return)
28400 unsigned int i, regno;
28402 if (TARGET_AIX)
28404 rtx reg = gen_rtx_REG (reg_mode, 2);
28405 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28406 frame_off + RS6000_TOC_SAVE_SLOT));
28409 for (i = 0; ; ++i)
28411 rtx mem;
28413 regno = EH_RETURN_DATA_REGNO (i);
28414 if (regno == INVALID_REGNUM)
28415 break;
28417 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28418 info->ehrd_offset + frame_off
28419 + reg_size * (int) i);
28421 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28425 /* Restore GPRs. This is done as a PARALLEL if we are using
28426 the load-multiple instructions. */
28427 if (!restoring_GPRs_inline)
28429 /* We are jumping to an out-of-line function. */
28430 rtx ptr_reg;
28431 int end_save = info->gp_save_offset + info->gp_size;
28432 bool can_use_exit = end_save == 0;
28433 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28434 int ptr_off;
28436 /* Emit stack reset code if we need it. */
28437 ptr_regno = ptr_regno_for_savres (sel);
28438 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28439 if (can_use_exit)
28440 rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28441 else if (end_save + frame_off != 0)
28442 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28443 GEN_INT (end_save + frame_off)));
28444 else if (REGNO (frame_reg_rtx) != ptr_regno)
28445 emit_move_insn (ptr_reg, frame_reg_rtx);
28446 if (REGNO (frame_reg_rtx) == ptr_regno)
28447 frame_off = -end_save;
28449 if (can_use_exit && info->cr_save_p)
28450 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28452 ptr_off = -end_save;
28453 rs6000_emit_savres_rtx (info, ptr_reg,
28454 info->gp_save_offset + ptr_off,
28455 info->lr_save_offset + ptr_off,
28456 reg_mode, sel);
28458 else if (using_load_multiple)
28460 rtvec p;
28461 p = rtvec_alloc (32 - info->first_gp_reg_save);
28462 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28463 RTVEC_ELT (p, i)
28464 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28465 frame_reg_rtx,
28466 info->gp_save_offset + frame_off + reg_size * i);
28467 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28469 else
28471 int offset = info->gp_save_offset + frame_off;
28472 for (i = info->first_gp_reg_save; i < 32; i++)
28474 if (save_reg_p (i)
28475 && !cfun->machine->gpr_is_wrapped_separately[i])
28477 rtx reg = gen_rtx_REG (reg_mode, i);
28478 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28481 offset += reg_size;
28485 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28487 /* If the frame pointer was used then we can't delay emitting
28488 a REG_CFA_DEF_CFA note. This must happen on the insn that
28489 restores the frame pointer, r31. We may have already emitted
28490 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28491 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28492 be harmless if emitted. */
28493 if (frame_pointer_needed)
28495 insn = get_last_insn ();
28496 add_reg_note (insn, REG_CFA_DEF_CFA,
28497 plus_constant (Pmode, frame_reg_rtx, frame_off));
28498 RTX_FRAME_RELATED_P (insn) = 1;
28501 /* Set up cfa_restores. We always need these when
28502 shrink-wrapping. If not shrink-wrapping then we only need
28503 the cfa_restore when the stack location is no longer valid.
28504 The cfa_restores must be emitted on or before the insn that
28505 invalidates the stack, and of course must not be emitted
28506 before the insn that actually does the restore. The latter
28507 is why it is a bad idea to emit the cfa_restores as a group
28508 on the last instruction here that actually does a restore:
28509 That insn may be reordered with respect to others doing
28510 restores. */
28511 if (flag_shrink_wrap
28512 && !restoring_GPRs_inline
28513 && info->first_fp_reg_save == 64)
28514 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28516 for (i = info->first_gp_reg_save; i < 32; i++)
28517 if (save_reg_p (i)
28518 && !cfun->machine->gpr_is_wrapped_separately[i])
28520 rtx reg = gen_rtx_REG (reg_mode, i);
28521 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28525 if (!restoring_GPRs_inline
28526 && info->first_fp_reg_save == 64)
28528 /* We are jumping to an out-of-line function. */
28529 if (cfa_restores)
28530 emit_cfa_restores (cfa_restores);
28531 return;
28534 if (restore_lr && !restoring_GPRs_inline)
28536 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28537 restore_saved_lr (0, exit_func);
28540 /* Restore fpr's if we need to do it without calling a function. */
28541 if (restoring_FPRs_inline)
28543 int offset = info->fp_save_offset + frame_off;
28544 for (i = info->first_fp_reg_save; i < 64; i++)
28546 if (save_reg_p (i)
28547 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
28549 rtx reg = gen_rtx_REG (fp_reg_mode, i);
28550 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28551 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28552 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
28553 cfa_restores);
28556 offset += fp_reg_size;
28560 /* If we saved cr, restore it here. Just those that were used. */
28561 if (info->cr_save_p)
28562 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
28564 /* If this is V.4, unwind the stack pointer after all of the loads
28565 have been done, or set up r11 if we are restoring fp out of line. */
28566 ptr_regno = 1;
28567 if (!restoring_FPRs_inline)
28569 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28570 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28571 ptr_regno = ptr_regno_for_savres (sel);
28574 insn = rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28575 if (REGNO (frame_reg_rtx) == ptr_regno)
28576 frame_off = 0;
28578 if (insn && restoring_FPRs_inline)
28580 if (cfa_restores)
28582 REG_NOTES (insn) = cfa_restores;
28583 cfa_restores = NULL_RTX;
28585 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28586 RTX_FRAME_RELATED_P (insn) = 1;
28589 if (crtl->calls_eh_return)
28591 rtx sa = EH_RETURN_STACKADJ_RTX;
28592 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
28595 if (!sibcall && restoring_FPRs_inline)
28597 if (cfa_restores)
28599 /* We can't hang the cfa_restores off a simple return,
28600 since the shrink-wrap code sometimes uses an existing
28601 return. This means there might be a path from
28602 pre-prologue code to this return, and dwarf2cfi code
28603 wants the eh_frame unwinder state to be the same on
28604 all paths to any point. So we need to emit the
28605 cfa_restores before the return. For -m64 we really
28606 don't need epilogue cfa_restores at all, except for
28607 this irritating dwarf2cfi with shrink-wrap
28608 requirement; The stack red-zone means eh_frame info
28609 from the prologue telling the unwinder to restore
28610 from the stack is perfectly good right to the end of
28611 the function. */
28612 emit_insn (gen_blockage ());
28613 emit_cfa_restores (cfa_restores);
28614 cfa_restores = NULL_RTX;
28617 emit_jump_insn (targetm.gen_simple_return ());
28620 if (!sibcall && !restoring_FPRs_inline)
28622 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28623 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
28624 int elt = 0;
28625 RTVEC_ELT (p, elt++) = ret_rtx;
28626 if (lr)
28627 RTVEC_ELT (p, elt++)
28628 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
28630 /* We have to restore more than two FP registers, so branch to the
28631 restore function. It will return to our caller. */
28632 int i;
28633 int reg;
28634 rtx sym;
28636 if (flag_shrink_wrap)
28637 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28639 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
28640 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
28641 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
28642 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
28644 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28646 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
28648 RTVEC_ELT (p, elt++)
28649 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
28650 if (flag_shrink_wrap
28651 && save_reg_p (info->first_fp_reg_save + i))
28652 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28655 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28658 if (cfa_restores)
28660 if (sibcall)
28661 /* Ensure the cfa_restores are hung off an insn that won't
28662 be reordered above other restores. */
28663 emit_insn (gen_blockage ());
28665 emit_cfa_restores (cfa_restores);
28669 /* Write function epilogue. */
28671 static void
28672 rs6000_output_function_epilogue (FILE *file)
28674 #if TARGET_MACHO
28675 macho_branch_islands ();
28678 rtx_insn *insn = get_last_insn ();
28679 rtx_insn *deleted_debug_label = NULL;
28681 /* Mach-O doesn't support labels at the end of objects, so if
28682 it looks like we might want one, take special action.
28684 First, collect any sequence of deleted debug labels. */
28685 while (insn
28686 && NOTE_P (insn)
28687 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
28689 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
28690 notes only, instead set their CODE_LABEL_NUMBER to -1,
28691 otherwise there would be code generation differences
28692 in between -g and -g0. */
28693 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28694 deleted_debug_label = insn;
28695 insn = PREV_INSN (insn);
28698 /* Second, if we have:
28699 label:
28700 barrier
28701 then this needs to be detected, so skip past the barrier. */
28703 if (insn && BARRIER_P (insn))
28704 insn = PREV_INSN (insn);
28706 /* Up to now we've only seen notes or barriers. */
28707 if (insn)
28709 if (LABEL_P (insn)
28710 || (NOTE_P (insn)
28711 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
28712 /* Trailing label: <barrier>. */
28713 fputs ("\tnop\n", file);
28714 else
28716 /* Lastly, see if we have a completely empty function body. */
28717 while (insn && ! INSN_P (insn))
28718 insn = PREV_INSN (insn);
28719 /* If we don't find any insns, we've got an empty function body;
28720 I.e. completely empty - without a return or branch. This is
28721 taken as the case where a function body has been removed
28722 because it contains an inline __builtin_unreachable(). GCC
28723 states that reaching __builtin_unreachable() means UB so we're
28724 not obliged to do anything special; however, we want
28725 non-zero-sized function bodies. To meet this, and help the
28726 user out, let's trap the case. */
28727 if (insn == NULL)
28728 fputs ("\ttrap\n", file);
28731 else if (deleted_debug_label)
28732 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
28733 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28734 CODE_LABEL_NUMBER (insn) = -1;
28736 #endif
28738 /* Output a traceback table here. See /usr/include/sys/debug.h for info
28739 on its format.
28741 We don't output a traceback table if -finhibit-size-directive was
28742 used. The documentation for -finhibit-size-directive reads
28743 ``don't output a @code{.size} assembler directive, or anything
28744 else that would cause trouble if the function is split in the
28745 middle, and the two halves are placed at locations far apart in
28746 memory.'' The traceback table has this property, since it
28747 includes the offset from the start of the function to the
28748 traceback table itself.
28750 System V.4 Powerpc's (and the embedded ABI derived from it) use a
28751 different traceback table. */
28752 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28753 && ! flag_inhibit_size_directive
28754 && rs6000_traceback != traceback_none && !cfun->is_thunk)
28756 const char *fname = NULL;
28757 const char *language_string = lang_hooks.name;
28758 int fixed_parms = 0, float_parms = 0, parm_info = 0;
28759 int i;
28760 int optional_tbtab;
28761 rs6000_stack_t *info = rs6000_stack_info ();
28763 if (rs6000_traceback == traceback_full)
28764 optional_tbtab = 1;
28765 else if (rs6000_traceback == traceback_part)
28766 optional_tbtab = 0;
28767 else
28768 optional_tbtab = !optimize_size && !TARGET_ELF;
28770 if (optional_tbtab)
28772 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
28773 while (*fname == '.') /* V.4 encodes . in the name */
28774 fname++;
28776 /* Need label immediately before tbtab, so we can compute
28777 its offset from the function start. */
28778 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28779 ASM_OUTPUT_LABEL (file, fname);
28782 /* The .tbtab pseudo-op can only be used for the first eight
28783 expressions, since it can't handle the possibly variable
28784 length fields that follow. However, if you omit the optional
28785 fields, the assembler outputs zeros for all optional fields
28786 anyways, giving each variable length field is minimum length
28787 (as defined in sys/debug.h). Thus we can not use the .tbtab
28788 pseudo-op at all. */
28790 /* An all-zero word flags the start of the tbtab, for debuggers
28791 that have to find it by searching forward from the entry
28792 point or from the current pc. */
28793 fputs ("\t.long 0\n", file);
28795 /* Tbtab format type. Use format type 0. */
28796 fputs ("\t.byte 0,", file);
28798 /* Language type. Unfortunately, there does not seem to be any
28799 official way to discover the language being compiled, so we
28800 use language_string.
28801 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
28802 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
28803 a number, so for now use 9. LTO, Go and JIT aren't assigned numbers
28804 either, so for now use 0. */
28805 if (lang_GNU_C ()
28806 || ! strcmp (language_string, "GNU GIMPLE")
28807 || ! strcmp (language_string, "GNU Go")
28808 || ! strcmp (language_string, "libgccjit"))
28809 i = 0;
28810 else if (! strcmp (language_string, "GNU F77")
28811 || lang_GNU_Fortran ())
28812 i = 1;
28813 else if (! strcmp (language_string, "GNU Pascal"))
28814 i = 2;
28815 else if (! strcmp (language_string, "GNU Ada"))
28816 i = 3;
28817 else if (lang_GNU_CXX ()
28818 || ! strcmp (language_string, "GNU Objective-C++"))
28819 i = 9;
28820 else if (! strcmp (language_string, "GNU Java"))
28821 i = 13;
28822 else if (! strcmp (language_string, "GNU Objective-C"))
28823 i = 14;
28824 else
28825 gcc_unreachable ();
28826 fprintf (file, "%d,", i);
28828 /* 8 single bit fields: global linkage (not set for C extern linkage,
28829 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
28830 from start of procedure stored in tbtab, internal function, function
28831 has controlled storage, function has no toc, function uses fp,
28832 function logs/aborts fp operations. */
28833 /* Assume that fp operations are used if any fp reg must be saved. */
28834 fprintf (file, "%d,",
28835 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
28837 /* 6 bitfields: function is interrupt handler, name present in
28838 proc table, function calls alloca, on condition directives
28839 (controls stack walks, 3 bits), saves condition reg, saves
28840 link reg. */
28841 /* The `function calls alloca' bit seems to be set whenever reg 31 is
28842 set up as a frame pointer, even when there is no alloca call. */
28843 fprintf (file, "%d,",
28844 ((optional_tbtab << 6)
28845 | ((optional_tbtab & frame_pointer_needed) << 5)
28846 | (info->cr_save_p << 1)
28847 | (info->lr_save_p)));
28849 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
28850 (6 bits). */
28851 fprintf (file, "%d,",
28852 (info->push_p << 7) | (64 - info->first_fp_reg_save));
28854 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
28855 fprintf (file, "%d,", (32 - first_reg_to_save ()));
28857 if (optional_tbtab)
28859 /* Compute the parameter info from the function decl argument
28860 list. */
28861 tree decl;
28862 int next_parm_info_bit = 31;
28864 for (decl = DECL_ARGUMENTS (current_function_decl);
28865 decl; decl = DECL_CHAIN (decl))
28867 rtx parameter = DECL_INCOMING_RTL (decl);
28868 machine_mode mode = GET_MODE (parameter);
28870 if (GET_CODE (parameter) == REG)
28872 if (SCALAR_FLOAT_MODE_P (mode))
28874 int bits;
28876 float_parms++;
28878 switch (mode)
28880 case E_SFmode:
28881 case E_SDmode:
28882 bits = 0x2;
28883 break;
28885 case E_DFmode:
28886 case E_DDmode:
28887 case E_TFmode:
28888 case E_TDmode:
28889 case E_IFmode:
28890 case E_KFmode:
28891 bits = 0x3;
28892 break;
28894 default:
28895 gcc_unreachable ();
28898 /* If only one bit will fit, don't or in this entry. */
28899 if (next_parm_info_bit > 0)
28900 parm_info |= (bits << (next_parm_info_bit - 1));
28901 next_parm_info_bit -= 2;
28903 else
28905 fixed_parms += ((GET_MODE_SIZE (mode)
28906 + (UNITS_PER_WORD - 1))
28907 / UNITS_PER_WORD);
28908 next_parm_info_bit -= 1;
28914 /* Number of fixed point parameters. */
28915 /* This is actually the number of words of fixed point parameters; thus
28916 an 8 byte struct counts as 2; and thus the maximum value is 8. */
28917 fprintf (file, "%d,", fixed_parms);
28919 /* 2 bitfields: number of floating point parameters (7 bits), parameters
28920 all on stack. */
28921 /* This is actually the number of fp registers that hold parameters;
28922 and thus the maximum value is 13. */
28923 /* Set parameters on stack bit if parameters are not in their original
28924 registers, regardless of whether they are on the stack? Xlc
28925 seems to set the bit when not optimizing. */
28926 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
28928 if (optional_tbtab)
28930 /* Optional fields follow. Some are variable length. */
28932 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
28933 float, 11 double float. */
28934 /* There is an entry for each parameter in a register, in the order
28935 that they occur in the parameter list. Any intervening arguments
28936 on the stack are ignored. If the list overflows a long (max
28937 possible length 34 bits) then completely leave off all elements
28938 that don't fit. */
28939 /* Only emit this long if there was at least one parameter. */
28940 if (fixed_parms || float_parms)
28941 fprintf (file, "\t.long %d\n", parm_info);
28943 /* Offset from start of code to tb table. */
28944 fputs ("\t.long ", file);
28945 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28946 RS6000_OUTPUT_BASENAME (file, fname);
28947 putc ('-', file);
28948 rs6000_output_function_entry (file, fname);
28949 putc ('\n', file);
28951 /* Interrupt handler mask. */
28952 /* Omit this long, since we never set the interrupt handler bit
28953 above. */
28955 /* Number of CTL (controlled storage) anchors. */
28956 /* Omit this long, since the has_ctl bit is never set above. */
28958 /* Displacement into stack of each CTL anchor. */
28959 /* Omit this list of longs, because there are no CTL anchors. */
28961 /* Length of function name. */
28962 if (*fname == '*')
28963 ++fname;
28964 fprintf (file, "\t.short %d\n", (int) strlen (fname));
28966 /* Function name. */
28967 assemble_string (fname, strlen (fname));
28969 /* Register for alloca automatic storage; this is always reg 31.
28970 Only emit this if the alloca bit was set above. */
28971 if (frame_pointer_needed)
28972 fputs ("\t.byte 31\n", file);
28974 fputs ("\t.align 2\n", file);
28978 /* Arrange to define .LCTOC1 label, if not already done. */
28979 if (need_toc_init)
28981 need_toc_init = 0;
28982 if (!toc_initialized)
28984 switch_to_section (toc_section);
28985 switch_to_section (current_function_section ());
28990 /* -fsplit-stack support. */
28992 /* A SYMBOL_REF for __morestack. */
28993 static GTY(()) rtx morestack_ref;
28995 static rtx
28996 gen_add3_const (rtx rt, rtx ra, long c)
28998 if (TARGET_64BIT)
28999 return gen_adddi3 (rt, ra, GEN_INT (c));
29000 else
29001 return gen_addsi3 (rt, ra, GEN_INT (c));
29004 /* Emit -fsplit-stack prologue, which goes before the regular function
29005 prologue (at local entry point in the case of ELFv2). */
29007 void
29008 rs6000_expand_split_stack_prologue (void)
29010 rs6000_stack_t *info = rs6000_stack_info ();
29011 unsigned HOST_WIDE_INT allocate;
29012 long alloc_hi, alloc_lo;
29013 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
29014 rtx_insn *insn;
29016 gcc_assert (flag_split_stack && reload_completed);
29018 if (!info->push_p)
29019 return;
29021 if (global_regs[29])
29023 error ("%qs uses register r29", "-fsplit-stack");
29024 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
29025 "conflicts with %qD", global_regs_decl[29]);
29028 allocate = info->total_size;
29029 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
29031 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
29032 return;
29034 if (morestack_ref == NULL_RTX)
29036 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
29037 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
29038 | SYMBOL_FLAG_FUNCTION);
29041 r0 = gen_rtx_REG (Pmode, 0);
29042 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29043 r12 = gen_rtx_REG (Pmode, 12);
29044 emit_insn (gen_load_split_stack_limit (r0));
29045 /* Always emit two insns here to calculate the requested stack,
29046 so that the linker can edit them when adjusting size for calling
29047 non-split-stack code. */
29048 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
29049 alloc_lo = -allocate - alloc_hi;
29050 if (alloc_hi != 0)
29052 emit_insn (gen_add3_const (r12, r1, alloc_hi));
29053 if (alloc_lo != 0)
29054 emit_insn (gen_add3_const (r12, r12, alloc_lo));
29055 else
29056 emit_insn (gen_nop ());
29058 else
29060 emit_insn (gen_add3_const (r12, r1, alloc_lo));
29061 emit_insn (gen_nop ());
29064 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
29065 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
29066 ok_label = gen_label_rtx ();
29067 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29068 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
29069 gen_rtx_LABEL_REF (VOIDmode, ok_label),
29070 pc_rtx);
29071 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29072 JUMP_LABEL (insn) = ok_label;
29073 /* Mark the jump as very likely to be taken. */
29074 add_reg_br_prob_note (insn, profile_probability::very_likely ());
29076 lr = gen_rtx_REG (Pmode, LR_REGNO);
29077 insn = emit_move_insn (r0, lr);
29078 RTX_FRAME_RELATED_P (insn) = 1;
29079 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
29080 RTX_FRAME_RELATED_P (insn) = 1;
29082 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
29083 const0_rtx, const0_rtx));
29084 call_fusage = NULL_RTX;
29085 use_reg (&call_fusage, r12);
29086 /* Say the call uses r0, even though it doesn't, to stop regrename
29087 from twiddling with the insns saving lr, trashing args for cfun.
29088 The insns restoring lr are similarly protected by making
29089 split_stack_return use r0. */
29090 use_reg (&call_fusage, r0);
29091 add_function_usage_to (insn, call_fusage);
29092 /* Indicate that this function can't jump to non-local gotos. */
29093 make_reg_eh_region_note_nothrow_nononlocal (insn);
29094 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
29095 insn = emit_move_insn (lr, r0);
29096 add_reg_note (insn, REG_CFA_RESTORE, lr);
29097 RTX_FRAME_RELATED_P (insn) = 1;
29098 emit_insn (gen_split_stack_return ());
29100 emit_label (ok_label);
29101 LABEL_NUSES (ok_label) = 1;
29104 /* Return the internal arg pointer used for function incoming
29105 arguments. When -fsplit-stack, the arg pointer is r12 so we need
29106 to copy it to a pseudo in order for it to be preserved over calls
29107 and suchlike. We'd really like to use a pseudo here for the
29108 internal arg pointer but data-flow analysis is not prepared to
29109 accept pseudos as live at the beginning of a function. */
29111 static rtx
29112 rs6000_internal_arg_pointer (void)
29114 if (flag_split_stack
29115 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
29116 == NULL))
29119 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
29121 rtx pat;
29123 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
29124 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
29126 /* Put the pseudo initialization right after the note at the
29127 beginning of the function. */
29128 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
29129 gen_rtx_REG (Pmode, 12));
29130 push_topmost_sequence ();
29131 emit_insn_after (pat, get_insns ());
29132 pop_topmost_sequence ();
29134 return plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
29135 FIRST_PARM_OFFSET (current_function_decl));
29137 return virtual_incoming_args_rtx;
29140 /* We may have to tell the dataflow pass that the split stack prologue
29141 is initializing a register. */
29143 static void
29144 rs6000_live_on_entry (bitmap regs)
29146 if (flag_split_stack)
29147 bitmap_set_bit (regs, 12);
29150 /* Emit -fsplit-stack dynamic stack allocation space check. */
29152 void
29153 rs6000_split_stack_space_check (rtx size, rtx label)
29155 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29156 rtx limit = gen_reg_rtx (Pmode);
29157 rtx requested = gen_reg_rtx (Pmode);
29158 rtx cmp = gen_reg_rtx (CCUNSmode);
29159 rtx jump;
29161 emit_insn (gen_load_split_stack_limit (limit));
29162 if (CONST_INT_P (size))
29163 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
29164 else
29166 size = force_reg (Pmode, size);
29167 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
29169 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
29170 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29171 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
29172 gen_rtx_LABEL_REF (VOIDmode, label),
29173 pc_rtx);
29174 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29175 JUMP_LABEL (jump) = label;
29178 /* A C compound statement that outputs the assembler code for a thunk
29179 function, used to implement C++ virtual function calls with
29180 multiple inheritance. The thunk acts as a wrapper around a virtual
29181 function, adjusting the implicit object parameter before handing
29182 control off to the real function.
29184 First, emit code to add the integer DELTA to the location that
29185 contains the incoming first argument. Assume that this argument
29186 contains a pointer, and is the one used to pass the `this' pointer
29187 in C++. This is the incoming argument *before* the function
29188 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29189 values of all other incoming arguments.
29191 After the addition, emit code to jump to FUNCTION, which is a
29192 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29193 not touch the return address. Hence returning from FUNCTION will
29194 return to whoever called the current `thunk'.
29196 The effect must be as if FUNCTION had been called directly with the
29197 adjusted first argument. This macro is responsible for emitting
29198 all of the code for a thunk function; output_function_prologue()
29199 and output_function_epilogue() are not invoked.
29201 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29202 been extracted from it.) It might possibly be useful on some
29203 targets, but probably not.
29205 If you do not define this macro, the target-independent code in the
29206 C++ frontend will generate a less efficient heavyweight thunk that
29207 calls FUNCTION instead of jumping to it. The generic approach does
29208 not support varargs. */
29210 static void
29211 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
29212 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
29213 tree function)
29215 rtx this_rtx, funexp;
29216 rtx_insn *insn;
29218 reload_completed = 1;
29219 epilogue_completed = 1;
29221 /* Mark the end of the (empty) prologue. */
29222 emit_note (NOTE_INSN_PROLOGUE_END);
29224 /* Find the "this" pointer. If the function returns a structure,
29225 the structure return pointer is in r3. */
29226 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
29227 this_rtx = gen_rtx_REG (Pmode, 4);
29228 else
29229 this_rtx = gen_rtx_REG (Pmode, 3);
29231 /* Apply the constant offset, if required. */
29232 if (delta)
29233 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
29235 /* Apply the offset from the vtable, if required. */
29236 if (vcall_offset)
29238 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
29239 rtx tmp = gen_rtx_REG (Pmode, 12);
29241 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
29242 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
29244 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
29245 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
29247 else
29249 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
29251 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
29253 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
29256 /* Generate a tail call to the target function. */
29257 if (!TREE_USED (function))
29259 assemble_external (function);
29260 TREE_USED (function) = 1;
29262 funexp = XEXP (DECL_RTL (function), 0);
29263 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
29265 #if TARGET_MACHO
29266 if (MACHOPIC_INDIRECT)
29267 funexp = machopic_indirect_call_target (funexp);
29268 #endif
29270 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29271 generate sibcall RTL explicitly. */
29272 insn = emit_call_insn (
29273 gen_rtx_PARALLEL (VOIDmode,
29274 gen_rtvec (3,
29275 gen_rtx_CALL (VOIDmode,
29276 funexp, const0_rtx),
29277 gen_rtx_USE (VOIDmode, const0_rtx),
29278 simple_return_rtx)));
29279 SIBLING_CALL_P (insn) = 1;
29280 emit_barrier ();
29282 /* Run just enough of rest_of_compilation to get the insns emitted.
29283 There's not really enough bulk here to make other passes such as
29284 instruction scheduling worth while. Note that use_thunk calls
29285 assemble_start_function and assemble_end_function. */
29286 insn = get_insns ();
29287 shorten_branches (insn);
29288 final_start_function (insn, file, 1);
29289 final (insn, file, 1);
29290 final_end_function ();
29292 reload_completed = 0;
29293 epilogue_completed = 0;
29296 /* A quick summary of the various types of 'constant-pool tables'
29297 under PowerPC:
29299 Target Flags Name One table per
29300 AIX (none) AIX TOC object file
29301 AIX -mfull-toc AIX TOC object file
29302 AIX -mminimal-toc AIX minimal TOC translation unit
29303 SVR4/EABI (none) SVR4 SDATA object file
29304 SVR4/EABI -fpic SVR4 pic object file
29305 SVR4/EABI -fPIC SVR4 PIC translation unit
29306 SVR4/EABI -mrelocatable EABI TOC function
29307 SVR4/EABI -maix AIX TOC object file
29308 SVR4/EABI -maix -mminimal-toc
29309 AIX minimal TOC translation unit
29311 Name Reg. Set by entries contains:
29312 made by addrs? fp? sum?
29314 AIX TOC 2 crt0 as Y option option
29315 AIX minimal TOC 30 prolog gcc Y Y option
29316 SVR4 SDATA 13 crt0 gcc N Y N
29317 SVR4 pic 30 prolog ld Y not yet N
29318 SVR4 PIC 30 prolog gcc Y option option
29319 EABI TOC 30 prolog gcc Y option option
29323 /* Hash functions for the hash table. */
29325 static unsigned
29326 rs6000_hash_constant (rtx k)
29328 enum rtx_code code = GET_CODE (k);
29329 machine_mode mode = GET_MODE (k);
29330 unsigned result = (code << 3) ^ mode;
29331 const char *format;
29332 int flen, fidx;
29334 format = GET_RTX_FORMAT (code);
29335 flen = strlen (format);
29336 fidx = 0;
29338 switch (code)
29340 case LABEL_REF:
29341 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
29343 case CONST_WIDE_INT:
29345 int i;
29346 flen = CONST_WIDE_INT_NUNITS (k);
29347 for (i = 0; i < flen; i++)
29348 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
29349 return result;
29352 case CONST_DOUBLE:
29353 if (mode != VOIDmode)
29354 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
29355 flen = 2;
29356 break;
29358 case CODE_LABEL:
29359 fidx = 3;
29360 break;
29362 default:
29363 break;
29366 for (; fidx < flen; fidx++)
29367 switch (format[fidx])
29369 case 's':
29371 unsigned i, len;
29372 const char *str = XSTR (k, fidx);
29373 len = strlen (str);
29374 result = result * 613 + len;
29375 for (i = 0; i < len; i++)
29376 result = result * 613 + (unsigned) str[i];
29377 break;
29379 case 'u':
29380 case 'e':
29381 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
29382 break;
29383 case 'i':
29384 case 'n':
29385 result = result * 613 + (unsigned) XINT (k, fidx);
29386 break;
29387 case 'w':
29388 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
29389 result = result * 613 + (unsigned) XWINT (k, fidx);
29390 else
29392 size_t i;
29393 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29394 result = result * 613 + (unsigned) (XWINT (k, fidx)
29395 >> CHAR_BIT * i);
29397 break;
29398 case '0':
29399 break;
29400 default:
29401 gcc_unreachable ();
29404 return result;
29407 hashval_t
29408 toc_hasher::hash (toc_hash_struct *thc)
29410 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29413 /* Compare H1 and H2 for equivalence. */
29415 bool
29416 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29418 rtx r1 = h1->key;
29419 rtx r2 = h2->key;
29421 if (h1->key_mode != h2->key_mode)
29422 return 0;
29424 return rtx_equal_p (r1, r2);
29427 /* These are the names given by the C++ front-end to vtables, and
29428 vtable-like objects. Ideally, this logic should not be here;
29429 instead, there should be some programmatic way of inquiring as
29430 to whether or not an object is a vtable. */
29432 #define VTABLE_NAME_P(NAME) \
29433 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29434 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29435 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29436 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29437 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29439 #ifdef NO_DOLLAR_IN_LABEL
29440 /* Return a GGC-allocated character string translating dollar signs in
29441 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29443 const char *
29444 rs6000_xcoff_strip_dollar (const char *name)
29446 char *strip, *p;
29447 const char *q;
29448 size_t len;
29450 q = (const char *) strchr (name, '$');
29452 if (q == 0 || q == name)
29453 return name;
29455 len = strlen (name);
29456 strip = XALLOCAVEC (char, len + 1);
29457 strcpy (strip, name);
29458 p = strip + (q - name);
29459 while (p)
29461 *p = '_';
29462 p = strchr (p + 1, '$');
29465 return ggc_alloc_string (strip, len);
29467 #endif
29469 void
29470 rs6000_output_symbol_ref (FILE *file, rtx x)
29472 const char *name = XSTR (x, 0);
29474 /* Currently C++ toc references to vtables can be emitted before it
29475 is decided whether the vtable is public or private. If this is
29476 the case, then the linker will eventually complain that there is
29477 a reference to an unknown section. Thus, for vtables only,
29478 we emit the TOC reference to reference the identifier and not the
29479 symbol. */
29480 if (VTABLE_NAME_P (name))
29482 RS6000_OUTPUT_BASENAME (file, name);
29484 else
29485 assemble_name (file, name);
29488 /* Output a TOC entry. We derive the entry name from what is being
29489 written. */
29491 void
29492 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29494 char buf[256];
29495 const char *name = buf;
29496 rtx base = x;
29497 HOST_WIDE_INT offset = 0;
29499 gcc_assert (!TARGET_NO_TOC);
29501 /* When the linker won't eliminate them, don't output duplicate
29502 TOC entries (this happens on AIX if there is any kind of TOC,
29503 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29504 CODE_LABELs. */
29505 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29507 struct toc_hash_struct *h;
29509 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29510 time because GGC is not initialized at that point. */
29511 if (toc_hash_table == NULL)
29512 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29514 h = ggc_alloc<toc_hash_struct> ();
29515 h->key = x;
29516 h->key_mode = mode;
29517 h->labelno = labelno;
29519 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29520 if (*found == NULL)
29521 *found = h;
29522 else /* This is indeed a duplicate.
29523 Set this label equal to that label. */
29525 fputs ("\t.set ", file);
29526 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29527 fprintf (file, "%d,", labelno);
29528 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29529 fprintf (file, "%d\n", ((*found)->labelno));
29531 #ifdef HAVE_AS_TLS
29532 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
29533 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
29534 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
29536 fputs ("\t.set ", file);
29537 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29538 fprintf (file, "%d,", labelno);
29539 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29540 fprintf (file, "%d\n", ((*found)->labelno));
29542 #endif
29543 return;
29547 /* If we're going to put a double constant in the TOC, make sure it's
29548 aligned properly when strict alignment is on. */
29549 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
29550 && STRICT_ALIGNMENT
29551 && GET_MODE_BITSIZE (mode) >= 64
29552 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
29553 ASM_OUTPUT_ALIGN (file, 3);
29556 (*targetm.asm_out.internal_label) (file, "LC", labelno);
29558 /* Handle FP constants specially. Note that if we have a minimal
29559 TOC, things we put here aren't actually in the TOC, so we can allow
29560 FP constants. */
29561 if (GET_CODE (x) == CONST_DOUBLE &&
29562 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
29563 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
29565 long k[4];
29567 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29568 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
29569 else
29570 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29572 if (TARGET_64BIT)
29574 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29575 fputs (DOUBLE_INT_ASM_OP, file);
29576 else
29577 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29578 k[0] & 0xffffffff, k[1] & 0xffffffff,
29579 k[2] & 0xffffffff, k[3] & 0xffffffff);
29580 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
29581 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29582 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
29583 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
29584 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
29585 return;
29587 else
29589 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29590 fputs ("\t.long ", file);
29591 else
29592 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29593 k[0] & 0xffffffff, k[1] & 0xffffffff,
29594 k[2] & 0xffffffff, k[3] & 0xffffffff);
29595 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29596 k[0] & 0xffffffff, k[1] & 0xffffffff,
29597 k[2] & 0xffffffff, k[3] & 0xffffffff);
29598 return;
29601 else if (GET_CODE (x) == CONST_DOUBLE &&
29602 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
29604 long k[2];
29606 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29607 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
29608 else
29609 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29611 if (TARGET_64BIT)
29613 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29614 fputs (DOUBLE_INT_ASM_OP, file);
29615 else
29616 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29617 k[0] & 0xffffffff, k[1] & 0xffffffff);
29618 fprintf (file, "0x%lx%08lx\n",
29619 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29620 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
29621 return;
29623 else
29625 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29626 fputs ("\t.long ", file);
29627 else
29628 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29629 k[0] & 0xffffffff, k[1] & 0xffffffff);
29630 fprintf (file, "0x%lx,0x%lx\n",
29631 k[0] & 0xffffffff, k[1] & 0xffffffff);
29632 return;
29635 else if (GET_CODE (x) == CONST_DOUBLE &&
29636 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
29638 long l;
29640 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29641 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
29642 else
29643 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
29645 if (TARGET_64BIT)
29647 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29648 fputs (DOUBLE_INT_ASM_OP, file);
29649 else
29650 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29651 if (WORDS_BIG_ENDIAN)
29652 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
29653 else
29654 fprintf (file, "0x%lx\n", l & 0xffffffff);
29655 return;
29657 else
29659 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29660 fputs ("\t.long ", file);
29661 else
29662 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29663 fprintf (file, "0x%lx\n", l & 0xffffffff);
29664 return;
29667 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
29669 unsigned HOST_WIDE_INT low;
29670 HOST_WIDE_INT high;
29672 low = INTVAL (x) & 0xffffffff;
29673 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
29675 /* TOC entries are always Pmode-sized, so when big-endian
29676 smaller integer constants in the TOC need to be padded.
29677 (This is still a win over putting the constants in
29678 a separate constant pool, because then we'd have
29679 to have both a TOC entry _and_ the actual constant.)
29681 For a 32-bit target, CONST_INT values are loaded and shifted
29682 entirely within `low' and can be stored in one TOC entry. */
29684 /* It would be easy to make this work, but it doesn't now. */
29685 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
29687 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
29689 low |= high << 32;
29690 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
29691 high = (HOST_WIDE_INT) low >> 32;
29692 low &= 0xffffffff;
29695 if (TARGET_64BIT)
29697 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29698 fputs (DOUBLE_INT_ASM_OP, file);
29699 else
29700 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29701 (long) high & 0xffffffff, (long) low & 0xffffffff);
29702 fprintf (file, "0x%lx%08lx\n",
29703 (long) high & 0xffffffff, (long) low & 0xffffffff);
29704 return;
29706 else
29708 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
29710 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29711 fputs ("\t.long ", file);
29712 else
29713 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29714 (long) high & 0xffffffff, (long) low & 0xffffffff);
29715 fprintf (file, "0x%lx,0x%lx\n",
29716 (long) high & 0xffffffff, (long) low & 0xffffffff);
29718 else
29720 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29721 fputs ("\t.long ", file);
29722 else
29723 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
29724 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
29726 return;
29730 if (GET_CODE (x) == CONST)
29732 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
29733 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
29735 base = XEXP (XEXP (x, 0), 0);
29736 offset = INTVAL (XEXP (XEXP (x, 0), 1));
29739 switch (GET_CODE (base))
29741 case SYMBOL_REF:
29742 name = XSTR (base, 0);
29743 break;
29745 case LABEL_REF:
29746 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
29747 CODE_LABEL_NUMBER (XEXP (base, 0)));
29748 break;
29750 case CODE_LABEL:
29751 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
29752 break;
29754 default:
29755 gcc_unreachable ();
29758 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29759 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
29760 else
29762 fputs ("\t.tc ", file);
29763 RS6000_OUTPUT_BASENAME (file, name);
29765 if (offset < 0)
29766 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
29767 else if (offset)
29768 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
29770 /* Mark large TOC symbols on AIX with [TE] so they are mapped
29771 after other TOC symbols, reducing overflow of small TOC access
29772 to [TC] symbols. */
29773 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
29774 ? "[TE]," : "[TC],", file);
29777 /* Currently C++ toc references to vtables can be emitted before it
29778 is decided whether the vtable is public or private. If this is
29779 the case, then the linker will eventually complain that there is
29780 a TOC reference to an unknown section. Thus, for vtables only,
29781 we emit the TOC reference to reference the symbol and not the
29782 section. */
29783 if (VTABLE_NAME_P (name))
29785 RS6000_OUTPUT_BASENAME (file, name);
29786 if (offset < 0)
29787 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
29788 else if (offset > 0)
29789 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
29791 else
29792 output_addr_const (file, x);
29794 #if HAVE_AS_TLS
29795 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF)
29797 switch (SYMBOL_REF_TLS_MODEL (base))
29799 case 0:
29800 break;
29801 case TLS_MODEL_LOCAL_EXEC:
29802 fputs ("@le", file);
29803 break;
29804 case TLS_MODEL_INITIAL_EXEC:
29805 fputs ("@ie", file);
29806 break;
29807 /* Use global-dynamic for local-dynamic. */
29808 case TLS_MODEL_GLOBAL_DYNAMIC:
29809 case TLS_MODEL_LOCAL_DYNAMIC:
29810 putc ('\n', file);
29811 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
29812 fputs ("\t.tc .", file);
29813 RS6000_OUTPUT_BASENAME (file, name);
29814 fputs ("[TC],", file);
29815 output_addr_const (file, x);
29816 fputs ("@m", file);
29817 break;
29818 default:
29819 gcc_unreachable ();
29822 #endif
29824 putc ('\n', file);
29827 /* Output an assembler pseudo-op to write an ASCII string of N characters
29828 starting at P to FILE.
29830 On the RS/6000, we have to do this using the .byte operation and
29831 write out special characters outside the quoted string.
29832 Also, the assembler is broken; very long strings are truncated,
29833 so we must artificially break them up early. */
29835 void
29836 output_ascii (FILE *file, const char *p, int n)
29838 char c;
29839 int i, count_string;
29840 const char *for_string = "\t.byte \"";
29841 const char *for_decimal = "\t.byte ";
29842 const char *to_close = NULL;
29844 count_string = 0;
29845 for (i = 0; i < n; i++)
29847 c = *p++;
29848 if (c >= ' ' && c < 0177)
29850 if (for_string)
29851 fputs (for_string, file);
29852 putc (c, file);
29854 /* Write two quotes to get one. */
29855 if (c == '"')
29857 putc (c, file);
29858 ++count_string;
29861 for_string = NULL;
29862 for_decimal = "\"\n\t.byte ";
29863 to_close = "\"\n";
29864 ++count_string;
29866 if (count_string >= 512)
29868 fputs (to_close, file);
29870 for_string = "\t.byte \"";
29871 for_decimal = "\t.byte ";
29872 to_close = NULL;
29873 count_string = 0;
29876 else
29878 if (for_decimal)
29879 fputs (for_decimal, file);
29880 fprintf (file, "%d", c);
29882 for_string = "\n\t.byte \"";
29883 for_decimal = ", ";
29884 to_close = "\n";
29885 count_string = 0;
29889 /* Now close the string if we have written one. Then end the line. */
29890 if (to_close)
29891 fputs (to_close, file);
29894 /* Generate a unique section name for FILENAME for a section type
29895 represented by SECTION_DESC. Output goes into BUF.
29897 SECTION_DESC can be any string, as long as it is different for each
29898 possible section type.
29900 We name the section in the same manner as xlc. The name begins with an
29901 underscore followed by the filename (after stripping any leading directory
29902 names) with the last period replaced by the string SECTION_DESC. If
29903 FILENAME does not contain a period, SECTION_DESC is appended to the end of
29904 the name. */
29906 void
29907 rs6000_gen_section_name (char **buf, const char *filename,
29908 const char *section_desc)
29910 const char *q, *after_last_slash, *last_period = 0;
29911 char *p;
29912 int len;
29914 after_last_slash = filename;
29915 for (q = filename; *q; q++)
29917 if (*q == '/')
29918 after_last_slash = q + 1;
29919 else if (*q == '.')
29920 last_period = q;
29923 len = strlen (after_last_slash) + strlen (section_desc) + 2;
29924 *buf = (char *) xmalloc (len);
29926 p = *buf;
29927 *p++ = '_';
29929 for (q = after_last_slash; *q; q++)
29931 if (q == last_period)
29933 strcpy (p, section_desc);
29934 p += strlen (section_desc);
29935 break;
29938 else if (ISALNUM (*q))
29939 *p++ = *q;
29942 if (last_period == 0)
29943 strcpy (p, section_desc);
29944 else
29945 *p = '\0';
29948 /* Emit profile function. */
29950 void
29951 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
29953 /* Non-standard profiling for kernels, which just saves LR then calls
29954 _mcount without worrying about arg saves. The idea is to change
29955 the function prologue as little as possible as it isn't easy to
29956 account for arg save/restore code added just for _mcount. */
29957 if (TARGET_PROFILE_KERNEL)
29958 return;
29960 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29962 #ifndef NO_PROFILE_COUNTERS
29963 # define NO_PROFILE_COUNTERS 0
29964 #endif
29965 if (NO_PROFILE_COUNTERS)
29966 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
29967 LCT_NORMAL, VOIDmode);
29968 else
29970 char buf[30];
29971 const char *label_name;
29972 rtx fun;
29974 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
29975 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
29976 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
29978 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
29979 LCT_NORMAL, VOIDmode, fun, Pmode);
29982 else if (DEFAULT_ABI == ABI_DARWIN)
29984 const char *mcount_name = RS6000_MCOUNT;
29985 int caller_addr_regno = LR_REGNO;
29987 /* Be conservative and always set this, at least for now. */
29988 crtl->uses_pic_offset_table = 1;
29990 #if TARGET_MACHO
29991 /* For PIC code, set up a stub and collect the caller's address
29992 from r0, which is where the prologue puts it. */
29993 if (MACHOPIC_INDIRECT
29994 && crtl->uses_pic_offset_table)
29995 caller_addr_regno = 0;
29996 #endif
29997 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
29998 LCT_NORMAL, VOIDmode,
29999 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
30003 /* Write function profiler code. */
30005 void
30006 output_function_profiler (FILE *file, int labelno)
30008 char buf[100];
30010 switch (DEFAULT_ABI)
30012 default:
30013 gcc_unreachable ();
30015 case ABI_V4:
30016 if (!TARGET_32BIT)
30018 warning (0, "no profiling of 64-bit code for this ABI");
30019 return;
30021 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30022 fprintf (file, "\tmflr %s\n", reg_names[0]);
30023 if (NO_PROFILE_COUNTERS)
30025 asm_fprintf (file, "\tstw %s,4(%s)\n",
30026 reg_names[0], reg_names[1]);
30028 else if (TARGET_SECURE_PLT && flag_pic)
30030 if (TARGET_LINK_STACK)
30032 char name[32];
30033 get_ppc476_thunk_name (name);
30034 asm_fprintf (file, "\tbl %s\n", name);
30036 else
30037 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
30038 asm_fprintf (file, "\tstw %s,4(%s)\n",
30039 reg_names[0], reg_names[1]);
30040 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30041 asm_fprintf (file, "\taddis %s,%s,",
30042 reg_names[12], reg_names[12]);
30043 assemble_name (file, buf);
30044 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
30045 assemble_name (file, buf);
30046 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
30048 else if (flag_pic == 1)
30050 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
30051 asm_fprintf (file, "\tstw %s,4(%s)\n",
30052 reg_names[0], reg_names[1]);
30053 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30054 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
30055 assemble_name (file, buf);
30056 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
30058 else if (flag_pic > 1)
30060 asm_fprintf (file, "\tstw %s,4(%s)\n",
30061 reg_names[0], reg_names[1]);
30062 /* Now, we need to get the address of the label. */
30063 if (TARGET_LINK_STACK)
30065 char name[32];
30066 get_ppc476_thunk_name (name);
30067 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
30068 assemble_name (file, buf);
30069 fputs ("-.\n1:", file);
30070 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30071 asm_fprintf (file, "\taddi %s,%s,4\n",
30072 reg_names[11], reg_names[11]);
30074 else
30076 fputs ("\tbcl 20,31,1f\n\t.long ", file);
30077 assemble_name (file, buf);
30078 fputs ("-.\n1:", file);
30079 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30081 asm_fprintf (file, "\tlwz %s,0(%s)\n",
30082 reg_names[0], reg_names[11]);
30083 asm_fprintf (file, "\tadd %s,%s,%s\n",
30084 reg_names[0], reg_names[0], reg_names[11]);
30086 else
30088 asm_fprintf (file, "\tlis %s,", reg_names[12]);
30089 assemble_name (file, buf);
30090 fputs ("@ha\n", file);
30091 asm_fprintf (file, "\tstw %s,4(%s)\n",
30092 reg_names[0], reg_names[1]);
30093 asm_fprintf (file, "\tla %s,", reg_names[0]);
30094 assemble_name (file, buf);
30095 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
30098 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
30099 fprintf (file, "\tbl %s%s\n",
30100 RS6000_MCOUNT, flag_pic ? "@plt" : "");
30101 break;
30103 case ABI_AIX:
30104 case ABI_ELFv2:
30105 case ABI_DARWIN:
30106 /* Don't do anything, done in output_profile_hook (). */
30107 break;
30113 /* The following variable value is the last issued insn. */
30115 static rtx_insn *last_scheduled_insn;
30117 /* The following variable helps to balance issuing of load and
30118 store instructions */
30120 static int load_store_pendulum;
30122 /* The following variable helps pair divide insns during scheduling. */
30123 static int divide_cnt;
30124 /* The following variable helps pair and alternate vector and vector load
30125 insns during scheduling. */
30126 static int vec_pairing;
30129 /* Power4 load update and store update instructions are cracked into a
30130 load or store and an integer insn which are executed in the same cycle.
30131 Branches have their own dispatch slot which does not count against the
30132 GCC issue rate, but it changes the program flow so there are no other
30133 instructions to issue in this cycle. */
30135 static int
30136 rs6000_variable_issue_1 (rtx_insn *insn, int more)
30138 last_scheduled_insn = insn;
30139 if (GET_CODE (PATTERN (insn)) == USE
30140 || GET_CODE (PATTERN (insn)) == CLOBBER)
30142 cached_can_issue_more = more;
30143 return cached_can_issue_more;
30146 if (insn_terminates_group_p (insn, current_group))
30148 cached_can_issue_more = 0;
30149 return cached_can_issue_more;
30152 /* If no reservation, but reach here */
30153 if (recog_memoized (insn) < 0)
30154 return more;
30156 if (rs6000_sched_groups)
30158 if (is_microcoded_insn (insn))
30159 cached_can_issue_more = 0;
30160 else if (is_cracked_insn (insn))
30161 cached_can_issue_more = more > 2 ? more - 2 : 0;
30162 else
30163 cached_can_issue_more = more - 1;
30165 return cached_can_issue_more;
30168 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
30169 return 0;
30171 cached_can_issue_more = more - 1;
30172 return cached_can_issue_more;
30175 static int
30176 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
30178 int r = rs6000_variable_issue_1 (insn, more);
30179 if (verbose)
30180 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
30181 return r;
30184 /* Adjust the cost of a scheduling dependency. Return the new cost of
30185 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30187 static int
30188 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
30189 unsigned int)
30191 enum attr_type attr_type;
30193 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
30194 return cost;
30196 switch (dep_type)
30198 case REG_DEP_TRUE:
30200 /* Data dependency; DEP_INSN writes a register that INSN reads
30201 some cycles later. */
30203 /* Separate a load from a narrower, dependent store. */
30204 if ((rs6000_sched_groups || rs6000_cpu_attr == CPU_POWER9)
30205 && GET_CODE (PATTERN (insn)) == SET
30206 && GET_CODE (PATTERN (dep_insn)) == SET
30207 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
30208 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
30209 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
30210 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
30211 return cost + 14;
30213 attr_type = get_attr_type (insn);
30215 switch (attr_type)
30217 case TYPE_JMPREG:
30218 /* Tell the first scheduling pass about the latency between
30219 a mtctr and bctr (and mtlr and br/blr). The first
30220 scheduling pass will not know about this latency since
30221 the mtctr instruction, which has the latency associated
30222 to it, will be generated by reload. */
30223 return 4;
30224 case TYPE_BRANCH:
30225 /* Leave some extra cycles between a compare and its
30226 dependent branch, to inhibit expensive mispredicts. */
30227 if ((rs6000_cpu_attr == CPU_PPC603
30228 || rs6000_cpu_attr == CPU_PPC604
30229 || rs6000_cpu_attr == CPU_PPC604E
30230 || rs6000_cpu_attr == CPU_PPC620
30231 || rs6000_cpu_attr == CPU_PPC630
30232 || rs6000_cpu_attr == CPU_PPC750
30233 || rs6000_cpu_attr == CPU_PPC7400
30234 || rs6000_cpu_attr == CPU_PPC7450
30235 || rs6000_cpu_attr == CPU_PPCE5500
30236 || rs6000_cpu_attr == CPU_PPCE6500
30237 || rs6000_cpu_attr == CPU_POWER4
30238 || rs6000_cpu_attr == CPU_POWER5
30239 || rs6000_cpu_attr == CPU_POWER7
30240 || rs6000_cpu_attr == CPU_POWER8
30241 || rs6000_cpu_attr == CPU_POWER9
30242 || rs6000_cpu_attr == CPU_CELL)
30243 && recog_memoized (dep_insn)
30244 && (INSN_CODE (dep_insn) >= 0))
30246 switch (get_attr_type (dep_insn))
30248 case TYPE_CMP:
30249 case TYPE_FPCOMPARE:
30250 case TYPE_CR_LOGICAL:
30251 case TYPE_DELAYED_CR:
30252 return cost + 2;
30253 case TYPE_EXTS:
30254 case TYPE_MUL:
30255 if (get_attr_dot (dep_insn) == DOT_YES)
30256 return cost + 2;
30257 else
30258 break;
30259 case TYPE_SHIFT:
30260 if (get_attr_dot (dep_insn) == DOT_YES
30261 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
30262 return cost + 2;
30263 else
30264 break;
30265 default:
30266 break;
30268 break;
30270 case TYPE_STORE:
30271 case TYPE_FPSTORE:
30272 if ((rs6000_cpu == PROCESSOR_POWER6)
30273 && recog_memoized (dep_insn)
30274 && (INSN_CODE (dep_insn) >= 0))
30277 if (GET_CODE (PATTERN (insn)) != SET)
30278 /* If this happens, we have to extend this to schedule
30279 optimally. Return default for now. */
30280 return cost;
30282 /* Adjust the cost for the case where the value written
30283 by a fixed point operation is used as the address
30284 gen value on a store. */
30285 switch (get_attr_type (dep_insn))
30287 case TYPE_LOAD:
30288 case TYPE_CNTLZ:
30290 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30291 return get_attr_sign_extend (dep_insn)
30292 == SIGN_EXTEND_YES ? 6 : 4;
30293 break;
30295 case TYPE_SHIFT:
30297 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30298 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30299 6 : 3;
30300 break;
30302 case TYPE_INTEGER:
30303 case TYPE_ADD:
30304 case TYPE_LOGICAL:
30305 case TYPE_EXTS:
30306 case TYPE_INSERT:
30308 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30309 return 3;
30310 break;
30312 case TYPE_STORE:
30313 case TYPE_FPLOAD:
30314 case TYPE_FPSTORE:
30316 if (get_attr_update (dep_insn) == UPDATE_YES
30317 && ! rs6000_store_data_bypass_p (dep_insn, insn))
30318 return 3;
30319 break;
30321 case TYPE_MUL:
30323 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30324 return 17;
30325 break;
30327 case TYPE_DIV:
30329 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30330 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30331 break;
30333 default:
30334 break;
30337 break;
30339 case TYPE_LOAD:
30340 if ((rs6000_cpu == PROCESSOR_POWER6)
30341 && recog_memoized (dep_insn)
30342 && (INSN_CODE (dep_insn) >= 0))
30345 /* Adjust the cost for the case where the value written
30346 by a fixed point instruction is used within the address
30347 gen portion of a subsequent load(u)(x) */
30348 switch (get_attr_type (dep_insn))
30350 case TYPE_LOAD:
30351 case TYPE_CNTLZ:
30353 if (set_to_load_agen (dep_insn, insn))
30354 return get_attr_sign_extend (dep_insn)
30355 == SIGN_EXTEND_YES ? 6 : 4;
30356 break;
30358 case TYPE_SHIFT:
30360 if (set_to_load_agen (dep_insn, insn))
30361 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30362 6 : 3;
30363 break;
30365 case TYPE_INTEGER:
30366 case TYPE_ADD:
30367 case TYPE_LOGICAL:
30368 case TYPE_EXTS:
30369 case TYPE_INSERT:
30371 if (set_to_load_agen (dep_insn, insn))
30372 return 3;
30373 break;
30375 case TYPE_STORE:
30376 case TYPE_FPLOAD:
30377 case TYPE_FPSTORE:
30379 if (get_attr_update (dep_insn) == UPDATE_YES
30380 && set_to_load_agen (dep_insn, insn))
30381 return 3;
30382 break;
30384 case TYPE_MUL:
30386 if (set_to_load_agen (dep_insn, insn))
30387 return 17;
30388 break;
30390 case TYPE_DIV:
30392 if (set_to_load_agen (dep_insn, insn))
30393 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30394 break;
30396 default:
30397 break;
30400 break;
30402 case TYPE_FPLOAD:
30403 if ((rs6000_cpu == PROCESSOR_POWER6)
30404 && get_attr_update (insn) == UPDATE_NO
30405 && recog_memoized (dep_insn)
30406 && (INSN_CODE (dep_insn) >= 0)
30407 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30408 return 2;
30410 default:
30411 break;
30414 /* Fall out to return default cost. */
30416 break;
30418 case REG_DEP_OUTPUT:
30419 /* Output dependency; DEP_INSN writes a register that INSN writes some
30420 cycles later. */
30421 if ((rs6000_cpu == PROCESSOR_POWER6)
30422 && recog_memoized (dep_insn)
30423 && (INSN_CODE (dep_insn) >= 0))
30425 attr_type = get_attr_type (insn);
30427 switch (attr_type)
30429 case TYPE_FP:
30430 case TYPE_FPSIMPLE:
30431 if (get_attr_type (dep_insn) == TYPE_FP
30432 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30433 return 1;
30434 break;
30435 case TYPE_FPLOAD:
30436 if (get_attr_update (insn) == UPDATE_NO
30437 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30438 return 2;
30439 break;
30440 default:
30441 break;
30444 /* Fall through, no cost for output dependency. */
30445 /* FALLTHRU */
30447 case REG_DEP_ANTI:
30448 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30449 cycles later. */
30450 return 0;
30452 default:
30453 gcc_unreachable ();
30456 return cost;
30459 /* Debug version of rs6000_adjust_cost. */
30461 static int
30462 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
30463 int cost, unsigned int dw)
30465 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
30467 if (ret != cost)
30469 const char *dep;
30471 switch (dep_type)
30473 default: dep = "unknown depencency"; break;
30474 case REG_DEP_TRUE: dep = "data dependency"; break;
30475 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30476 case REG_DEP_ANTI: dep = "anti depencency"; break;
30479 fprintf (stderr,
30480 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30481 "%s, insn:\n", ret, cost, dep);
30483 debug_rtx (insn);
30486 return ret;
30489 /* The function returns a true if INSN is microcoded.
30490 Return false otherwise. */
30492 static bool
30493 is_microcoded_insn (rtx_insn *insn)
30495 if (!insn || !NONDEBUG_INSN_P (insn)
30496 || GET_CODE (PATTERN (insn)) == USE
30497 || GET_CODE (PATTERN (insn)) == CLOBBER)
30498 return false;
30500 if (rs6000_cpu_attr == CPU_CELL)
30501 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30503 if (rs6000_sched_groups
30504 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
30506 enum attr_type type = get_attr_type (insn);
30507 if ((type == TYPE_LOAD
30508 && get_attr_update (insn) == UPDATE_YES
30509 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30510 || ((type == TYPE_LOAD || type == TYPE_STORE)
30511 && get_attr_update (insn) == UPDATE_YES
30512 && get_attr_indexed (insn) == INDEXED_YES)
30513 || type == TYPE_MFCR)
30514 return true;
30517 return false;
30520 /* The function returns true if INSN is cracked into 2 instructions
30521 by the processor (and therefore occupies 2 issue slots). */
30523 static bool
30524 is_cracked_insn (rtx_insn *insn)
30526 if (!insn || !NONDEBUG_INSN_P (insn)
30527 || GET_CODE (PATTERN (insn)) == USE
30528 || GET_CODE (PATTERN (insn)) == CLOBBER)
30529 return false;
30531 if (rs6000_sched_groups
30532 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
30534 enum attr_type type = get_attr_type (insn);
30535 if ((type == TYPE_LOAD
30536 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
30537 && get_attr_update (insn) == UPDATE_NO)
30538 || (type == TYPE_LOAD
30539 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
30540 && get_attr_update (insn) == UPDATE_YES
30541 && get_attr_indexed (insn) == INDEXED_NO)
30542 || (type == TYPE_STORE
30543 && get_attr_update (insn) == UPDATE_YES
30544 && get_attr_indexed (insn) == INDEXED_NO)
30545 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
30546 && get_attr_update (insn) == UPDATE_YES)
30547 || type == TYPE_DELAYED_CR
30548 || (type == TYPE_EXTS
30549 && get_attr_dot (insn) == DOT_YES)
30550 || (type == TYPE_SHIFT
30551 && get_attr_dot (insn) == DOT_YES
30552 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
30553 || (type == TYPE_MUL
30554 && get_attr_dot (insn) == DOT_YES)
30555 || type == TYPE_DIV
30556 || (type == TYPE_INSERT
30557 && get_attr_size (insn) == SIZE_32))
30558 return true;
30561 return false;
30564 /* The function returns true if INSN can be issued only from
30565 the branch slot. */
30567 static bool
30568 is_branch_slot_insn (rtx_insn *insn)
30570 if (!insn || !NONDEBUG_INSN_P (insn)
30571 || GET_CODE (PATTERN (insn)) == USE
30572 || GET_CODE (PATTERN (insn)) == CLOBBER)
30573 return false;
30575 if (rs6000_sched_groups)
30577 enum attr_type type = get_attr_type (insn);
30578 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
30579 return true;
30580 return false;
30583 return false;
30586 /* The function returns true if out_inst sets a value that is
30587 used in the address generation computation of in_insn */
30588 static bool
30589 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
30591 rtx out_set, in_set;
30593 /* For performance reasons, only handle the simple case where
30594 both loads are a single_set. */
30595 out_set = single_set (out_insn);
30596 if (out_set)
30598 in_set = single_set (in_insn);
30599 if (in_set)
30600 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
30603 return false;
30606 /* Try to determine base/offset/size parts of the given MEM.
30607 Return true if successful, false if all the values couldn't
30608 be determined.
30610 This function only looks for REG or REG+CONST address forms.
30611 REG+REG address form will return false. */
30613 static bool
30614 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
30615 HOST_WIDE_INT *size)
30617 rtx addr_rtx;
30618 if MEM_SIZE_KNOWN_P (mem)
30619 *size = MEM_SIZE (mem);
30620 else
30621 return false;
30623 addr_rtx = (XEXP (mem, 0));
30624 if (GET_CODE (addr_rtx) == PRE_MODIFY)
30625 addr_rtx = XEXP (addr_rtx, 1);
30627 *offset = 0;
30628 while (GET_CODE (addr_rtx) == PLUS
30629 && CONST_INT_P (XEXP (addr_rtx, 1)))
30631 *offset += INTVAL (XEXP (addr_rtx, 1));
30632 addr_rtx = XEXP (addr_rtx, 0);
30634 if (!REG_P (addr_rtx))
30635 return false;
30637 *base = addr_rtx;
30638 return true;
30641 /* The function returns true if the target storage location of
30642 mem1 is adjacent to the target storage location of mem2 */
30643 /* Return 1 if memory locations are adjacent. */
30645 static bool
30646 adjacent_mem_locations (rtx mem1, rtx mem2)
30648 rtx reg1, reg2;
30649 HOST_WIDE_INT off1, size1, off2, size2;
30651 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30652 && get_memref_parts (mem2, &reg2, &off2, &size2))
30653 return ((REGNO (reg1) == REGNO (reg2))
30654 && ((off1 + size1 == off2)
30655 || (off2 + size2 == off1)));
30657 return false;
30660 /* This function returns true if it can be determined that the two MEM
30661 locations overlap by at least 1 byte based on base reg/offset/size. */
30663 static bool
30664 mem_locations_overlap (rtx mem1, rtx mem2)
30666 rtx reg1, reg2;
30667 HOST_WIDE_INT off1, size1, off2, size2;
30669 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30670 && get_memref_parts (mem2, &reg2, &off2, &size2))
30671 return ((REGNO (reg1) == REGNO (reg2))
30672 && (((off1 <= off2) && (off1 + size1 > off2))
30673 || ((off2 <= off1) && (off2 + size2 > off1))));
30675 return false;
30678 /* A C statement (sans semicolon) to update the integer scheduling
30679 priority INSN_PRIORITY (INSN). Increase the priority to execute the
30680 INSN earlier, reduce the priority to execute INSN later. Do not
30681 define this macro if you do not need to adjust the scheduling
30682 priorities of insns. */
30684 static int
30685 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
30687 rtx load_mem, str_mem;
30688 /* On machines (like the 750) which have asymmetric integer units,
30689 where one integer unit can do multiply and divides and the other
30690 can't, reduce the priority of multiply/divide so it is scheduled
30691 before other integer operations. */
30693 #if 0
30694 if (! INSN_P (insn))
30695 return priority;
30697 if (GET_CODE (PATTERN (insn)) == USE)
30698 return priority;
30700 switch (rs6000_cpu_attr) {
30701 case CPU_PPC750:
30702 switch (get_attr_type (insn))
30704 default:
30705 break;
30707 case TYPE_MUL:
30708 case TYPE_DIV:
30709 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
30710 priority, priority);
30711 if (priority >= 0 && priority < 0x01000000)
30712 priority >>= 3;
30713 break;
30716 #endif
30718 if (insn_must_be_first_in_group (insn)
30719 && reload_completed
30720 && current_sched_info->sched_max_insns_priority
30721 && rs6000_sched_restricted_insns_priority)
30724 /* Prioritize insns that can be dispatched only in the first
30725 dispatch slot. */
30726 if (rs6000_sched_restricted_insns_priority == 1)
30727 /* Attach highest priority to insn. This means that in
30728 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
30729 precede 'priority' (critical path) considerations. */
30730 return current_sched_info->sched_max_insns_priority;
30731 else if (rs6000_sched_restricted_insns_priority == 2)
30732 /* Increase priority of insn by a minimal amount. This means that in
30733 haifa-sched.c:ready_sort(), only 'priority' (critical path)
30734 considerations precede dispatch-slot restriction considerations. */
30735 return (priority + 1);
30738 if (rs6000_cpu == PROCESSOR_POWER6
30739 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
30740 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
30741 /* Attach highest priority to insn if the scheduler has just issued two
30742 stores and this instruction is a load, or two loads and this instruction
30743 is a store. Power6 wants loads and stores scheduled alternately
30744 when possible */
30745 return current_sched_info->sched_max_insns_priority;
30747 return priority;
30750 /* Return true if the instruction is nonpipelined on the Cell. */
30751 static bool
30752 is_nonpipeline_insn (rtx_insn *insn)
30754 enum attr_type type;
30755 if (!insn || !NONDEBUG_INSN_P (insn)
30756 || GET_CODE (PATTERN (insn)) == USE
30757 || GET_CODE (PATTERN (insn)) == CLOBBER)
30758 return false;
30760 type = get_attr_type (insn);
30761 if (type == TYPE_MUL
30762 || type == TYPE_DIV
30763 || type == TYPE_SDIV
30764 || type == TYPE_DDIV
30765 || type == TYPE_SSQRT
30766 || type == TYPE_DSQRT
30767 || type == TYPE_MFCR
30768 || type == TYPE_MFCRF
30769 || type == TYPE_MFJMPR)
30771 return true;
30773 return false;
30777 /* Return how many instructions the machine can issue per cycle. */
30779 static int
30780 rs6000_issue_rate (void)
30782 /* Unless scheduling for register pressure, use issue rate of 1 for
30783 first scheduling pass to decrease degradation. */
30784 if (!reload_completed && !flag_sched_pressure)
30785 return 1;
30787 switch (rs6000_cpu_attr) {
30788 case CPU_RS64A:
30789 case CPU_PPC601: /* ? */
30790 case CPU_PPC7450:
30791 return 3;
30792 case CPU_PPC440:
30793 case CPU_PPC603:
30794 case CPU_PPC750:
30795 case CPU_PPC7400:
30796 case CPU_PPC8540:
30797 case CPU_PPC8548:
30798 case CPU_CELL:
30799 case CPU_PPCE300C2:
30800 case CPU_PPCE300C3:
30801 case CPU_PPCE500MC:
30802 case CPU_PPCE500MC64:
30803 case CPU_PPCE5500:
30804 case CPU_PPCE6500:
30805 case CPU_TITAN:
30806 return 2;
30807 case CPU_PPC476:
30808 case CPU_PPC604:
30809 case CPU_PPC604E:
30810 case CPU_PPC620:
30811 case CPU_PPC630:
30812 return 4;
30813 case CPU_POWER4:
30814 case CPU_POWER5:
30815 case CPU_POWER6:
30816 case CPU_POWER7:
30817 return 5;
30818 case CPU_POWER8:
30819 return 7;
30820 case CPU_POWER9:
30821 return 6;
30822 default:
30823 return 1;
30827 /* Return how many instructions to look ahead for better insn
30828 scheduling. */
30830 static int
30831 rs6000_use_sched_lookahead (void)
30833 switch (rs6000_cpu_attr)
30835 case CPU_PPC8540:
30836 case CPU_PPC8548:
30837 return 4;
30839 case CPU_CELL:
30840 return (reload_completed ? 8 : 0);
30842 default:
30843 return 0;
30847 /* We are choosing insn from the ready queue. Return zero if INSN can be
30848 chosen. */
30849 static int
30850 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
30852 if (ready_index == 0)
30853 return 0;
30855 if (rs6000_cpu_attr != CPU_CELL)
30856 return 0;
30858 gcc_assert (insn != NULL_RTX && INSN_P (insn));
30860 if (!reload_completed
30861 || is_nonpipeline_insn (insn)
30862 || is_microcoded_insn (insn))
30863 return 1;
30865 return 0;
30868 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
30869 and return true. */
30871 static bool
30872 find_mem_ref (rtx pat, rtx *mem_ref)
30874 const char * fmt;
30875 int i, j;
30877 /* stack_tie does not produce any real memory traffic. */
30878 if (tie_operand (pat, VOIDmode))
30879 return false;
30881 if (GET_CODE (pat) == MEM)
30883 *mem_ref = pat;
30884 return true;
30887 /* Recursively process the pattern. */
30888 fmt = GET_RTX_FORMAT (GET_CODE (pat));
30890 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
30892 if (fmt[i] == 'e')
30894 if (find_mem_ref (XEXP (pat, i), mem_ref))
30895 return true;
30897 else if (fmt[i] == 'E')
30898 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
30900 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
30901 return true;
30905 return false;
30908 /* Determine if PAT is a PATTERN of a load insn. */
30910 static bool
30911 is_load_insn1 (rtx pat, rtx *load_mem)
30913 if (!pat || pat == NULL_RTX)
30914 return false;
30916 if (GET_CODE (pat) == SET)
30917 return find_mem_ref (SET_SRC (pat), load_mem);
30919 if (GET_CODE (pat) == PARALLEL)
30921 int i;
30923 for (i = 0; i < XVECLEN (pat, 0); i++)
30924 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
30925 return true;
30928 return false;
30931 /* Determine if INSN loads from memory. */
30933 static bool
30934 is_load_insn (rtx insn, rtx *load_mem)
30936 if (!insn || !INSN_P (insn))
30937 return false;
30939 if (CALL_P (insn))
30940 return false;
30942 return is_load_insn1 (PATTERN (insn), load_mem);
30945 /* Determine if PAT is a PATTERN of a store insn. */
30947 static bool
30948 is_store_insn1 (rtx pat, rtx *str_mem)
30950 if (!pat || pat == NULL_RTX)
30951 return false;
30953 if (GET_CODE (pat) == SET)
30954 return find_mem_ref (SET_DEST (pat), str_mem);
30956 if (GET_CODE (pat) == PARALLEL)
30958 int i;
30960 for (i = 0; i < XVECLEN (pat, 0); i++)
30961 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
30962 return true;
30965 return false;
30968 /* Determine if INSN stores to memory. */
30970 static bool
30971 is_store_insn (rtx insn, rtx *str_mem)
30973 if (!insn || !INSN_P (insn))
30974 return false;
30976 return is_store_insn1 (PATTERN (insn), str_mem);
30979 /* Return whether TYPE is a Power9 pairable vector instruction type. */
30981 static bool
30982 is_power9_pairable_vec_type (enum attr_type type)
30984 switch (type)
30986 case TYPE_VECSIMPLE:
30987 case TYPE_VECCOMPLEX:
30988 case TYPE_VECDIV:
30989 case TYPE_VECCMP:
30990 case TYPE_VECPERM:
30991 case TYPE_VECFLOAT:
30992 case TYPE_VECFDIV:
30993 case TYPE_VECDOUBLE:
30994 return true;
30995 default:
30996 break;
30998 return false;
31001 /* Returns whether the dependence between INSN and NEXT is considered
31002 costly by the given target. */
31004 static bool
31005 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
31007 rtx insn;
31008 rtx next;
31009 rtx load_mem, str_mem;
31011 /* If the flag is not enabled - no dependence is considered costly;
31012 allow all dependent insns in the same group.
31013 This is the most aggressive option. */
31014 if (rs6000_sched_costly_dep == no_dep_costly)
31015 return false;
31017 /* If the flag is set to 1 - a dependence is always considered costly;
31018 do not allow dependent instructions in the same group.
31019 This is the most conservative option. */
31020 if (rs6000_sched_costly_dep == all_deps_costly)
31021 return true;
31023 insn = DEP_PRO (dep);
31024 next = DEP_CON (dep);
31026 if (rs6000_sched_costly_dep == store_to_load_dep_costly
31027 && is_load_insn (next, &load_mem)
31028 && is_store_insn (insn, &str_mem))
31029 /* Prevent load after store in the same group. */
31030 return true;
31032 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
31033 && is_load_insn (next, &load_mem)
31034 && is_store_insn (insn, &str_mem)
31035 && DEP_TYPE (dep) == REG_DEP_TRUE
31036 && mem_locations_overlap(str_mem, load_mem))
31037 /* Prevent load after store in the same group if it is a true
31038 dependence. */
31039 return true;
31041 /* The flag is set to X; dependences with latency >= X are considered costly,
31042 and will not be scheduled in the same group. */
31043 if (rs6000_sched_costly_dep <= max_dep_latency
31044 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
31045 return true;
31047 return false;
31050 /* Return the next insn after INSN that is found before TAIL is reached,
31051 skipping any "non-active" insns - insns that will not actually occupy
31052 an issue slot. Return NULL_RTX if such an insn is not found. */
31054 static rtx_insn *
31055 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
31057 if (insn == NULL_RTX || insn == tail)
31058 return NULL;
31060 while (1)
31062 insn = NEXT_INSN (insn);
31063 if (insn == NULL_RTX || insn == tail)
31064 return NULL;
31066 if (CALL_P (insn)
31067 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
31068 || (NONJUMP_INSN_P (insn)
31069 && GET_CODE (PATTERN (insn)) != USE
31070 && GET_CODE (PATTERN (insn)) != CLOBBER
31071 && INSN_CODE (insn) != CODE_FOR_stack_tie))
31072 break;
31074 return insn;
31077 /* Do Power9 specific sched_reorder2 reordering of ready list. */
31079 static int
31080 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
31082 int pos;
31083 int i;
31084 rtx_insn *tmp;
31085 enum attr_type type, type2;
31087 type = get_attr_type (last_scheduled_insn);
31089 /* Try to issue fixed point divides back-to-back in pairs so they will be
31090 routed to separate execution units and execute in parallel. */
31091 if (type == TYPE_DIV && divide_cnt == 0)
31093 /* First divide has been scheduled. */
31094 divide_cnt = 1;
31096 /* Scan the ready list looking for another divide, if found move it
31097 to the end of the list so it is chosen next. */
31098 pos = lastpos;
31099 while (pos >= 0)
31101 if (recog_memoized (ready[pos]) >= 0
31102 && get_attr_type (ready[pos]) == TYPE_DIV)
31104 tmp = ready[pos];
31105 for (i = pos; i < lastpos; i++)
31106 ready[i] = ready[i + 1];
31107 ready[lastpos] = tmp;
31108 break;
31110 pos--;
31113 else
31115 /* Last insn was the 2nd divide or not a divide, reset the counter. */
31116 divide_cnt = 0;
31118 /* The best dispatch throughput for vector and vector load insns can be
31119 achieved by interleaving a vector and vector load such that they'll
31120 dispatch to the same superslice. If this pairing cannot be achieved
31121 then it is best to pair vector insns together and vector load insns
31122 together.
31124 To aid in this pairing, vec_pairing maintains the current state with
31125 the following values:
31127 0 : Initial state, no vecload/vector pairing has been started.
31129 1 : A vecload or vector insn has been issued and a candidate for
31130 pairing has been found and moved to the end of the ready
31131 list. */
31132 if (type == TYPE_VECLOAD)
31134 /* Issued a vecload. */
31135 if (vec_pairing == 0)
31137 int vecload_pos = -1;
31138 /* We issued a single vecload, look for a vector insn to pair it
31139 with. If one isn't found, try to pair another vecload. */
31140 pos = lastpos;
31141 while (pos >= 0)
31143 if (recog_memoized (ready[pos]) >= 0)
31145 type2 = get_attr_type (ready[pos]);
31146 if (is_power9_pairable_vec_type (type2))
31148 /* Found a vector insn to pair with, move it to the
31149 end of the ready list so it is scheduled next. */
31150 tmp = ready[pos];
31151 for (i = pos; i < lastpos; i++)
31152 ready[i] = ready[i + 1];
31153 ready[lastpos] = tmp;
31154 vec_pairing = 1;
31155 return cached_can_issue_more;
31157 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
31158 /* Remember position of first vecload seen. */
31159 vecload_pos = pos;
31161 pos--;
31163 if (vecload_pos >= 0)
31165 /* Didn't find a vector to pair with but did find a vecload,
31166 move it to the end of the ready list. */
31167 tmp = ready[vecload_pos];
31168 for (i = vecload_pos; i < lastpos; i++)
31169 ready[i] = ready[i + 1];
31170 ready[lastpos] = tmp;
31171 vec_pairing = 1;
31172 return cached_can_issue_more;
31176 else if (is_power9_pairable_vec_type (type))
31178 /* Issued a vector operation. */
31179 if (vec_pairing == 0)
31181 int vec_pos = -1;
31182 /* We issued a single vector insn, look for a vecload to pair it
31183 with. If one isn't found, try to pair another vector. */
31184 pos = lastpos;
31185 while (pos >= 0)
31187 if (recog_memoized (ready[pos]) >= 0)
31189 type2 = get_attr_type (ready[pos]);
31190 if (type2 == TYPE_VECLOAD)
31192 /* Found a vecload insn to pair with, move it to the
31193 end of the ready list so it is scheduled next. */
31194 tmp = ready[pos];
31195 for (i = pos; i < lastpos; i++)
31196 ready[i] = ready[i + 1];
31197 ready[lastpos] = tmp;
31198 vec_pairing = 1;
31199 return cached_can_issue_more;
31201 else if (is_power9_pairable_vec_type (type2)
31202 && vec_pos == -1)
31203 /* Remember position of first vector insn seen. */
31204 vec_pos = pos;
31206 pos--;
31208 if (vec_pos >= 0)
31210 /* Didn't find a vecload to pair with but did find a vector
31211 insn, move it to the end of the ready list. */
31212 tmp = ready[vec_pos];
31213 for (i = vec_pos; i < lastpos; i++)
31214 ready[i] = ready[i + 1];
31215 ready[lastpos] = tmp;
31216 vec_pairing = 1;
31217 return cached_can_issue_more;
31222 /* We've either finished a vec/vecload pair, couldn't find an insn to
31223 continue the current pair, or the last insn had nothing to do with
31224 with pairing. In any case, reset the state. */
31225 vec_pairing = 0;
31228 return cached_can_issue_more;
31231 /* We are about to begin issuing insns for this clock cycle. */
31233 static int
31234 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
31235 rtx_insn **ready ATTRIBUTE_UNUSED,
31236 int *pn_ready ATTRIBUTE_UNUSED,
31237 int clock_var ATTRIBUTE_UNUSED)
31239 int n_ready = *pn_ready;
31241 if (sched_verbose)
31242 fprintf (dump, "// rs6000_sched_reorder :\n");
31244 /* Reorder the ready list, if the second to last ready insn
31245 is a nonepipeline insn. */
31246 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
31248 if (is_nonpipeline_insn (ready[n_ready - 1])
31249 && (recog_memoized (ready[n_ready - 2]) > 0))
31250 /* Simply swap first two insns. */
31251 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
31254 if (rs6000_cpu == PROCESSOR_POWER6)
31255 load_store_pendulum = 0;
31257 return rs6000_issue_rate ();
31260 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31262 static int
31263 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
31264 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
31266 if (sched_verbose)
31267 fprintf (dump, "// rs6000_sched_reorder2 :\n");
31269 /* For Power6, we need to handle some special cases to try and keep the
31270 store queue from overflowing and triggering expensive flushes.
31272 This code monitors how load and store instructions are being issued
31273 and skews the ready list one way or the other to increase the likelihood
31274 that a desired instruction is issued at the proper time.
31276 A couple of things are done. First, we maintain a "load_store_pendulum"
31277 to track the current state of load/store issue.
31279 - If the pendulum is at zero, then no loads or stores have been
31280 issued in the current cycle so we do nothing.
31282 - If the pendulum is 1, then a single load has been issued in this
31283 cycle and we attempt to locate another load in the ready list to
31284 issue with it.
31286 - If the pendulum is -2, then two stores have already been
31287 issued in this cycle, so we increase the priority of the first load
31288 in the ready list to increase it's likelihood of being chosen first
31289 in the next cycle.
31291 - If the pendulum is -1, then a single store has been issued in this
31292 cycle and we attempt to locate another store in the ready list to
31293 issue with it, preferring a store to an adjacent memory location to
31294 facilitate store pairing in the store queue.
31296 - If the pendulum is 2, then two loads have already been
31297 issued in this cycle, so we increase the priority of the first store
31298 in the ready list to increase it's likelihood of being chosen first
31299 in the next cycle.
31301 - If the pendulum < -2 or > 2, then do nothing.
31303 Note: This code covers the most common scenarios. There exist non
31304 load/store instructions which make use of the LSU and which
31305 would need to be accounted for to strictly model the behavior
31306 of the machine. Those instructions are currently unaccounted
31307 for to help minimize compile time overhead of this code.
31309 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
31311 int pos;
31312 int i;
31313 rtx_insn *tmp;
31314 rtx load_mem, str_mem;
31316 if (is_store_insn (last_scheduled_insn, &str_mem))
31317 /* Issuing a store, swing the load_store_pendulum to the left */
31318 load_store_pendulum--;
31319 else if (is_load_insn (last_scheduled_insn, &load_mem))
31320 /* Issuing a load, swing the load_store_pendulum to the right */
31321 load_store_pendulum++;
31322 else
31323 return cached_can_issue_more;
31325 /* If the pendulum is balanced, or there is only one instruction on
31326 the ready list, then all is well, so return. */
31327 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
31328 return cached_can_issue_more;
31330 if (load_store_pendulum == 1)
31332 /* A load has been issued in this cycle. Scan the ready list
31333 for another load to issue with it */
31334 pos = *pn_ready-1;
31336 while (pos >= 0)
31338 if (is_load_insn (ready[pos], &load_mem))
31340 /* Found a load. Move it to the head of the ready list,
31341 and adjust it's priority so that it is more likely to
31342 stay there */
31343 tmp = ready[pos];
31344 for (i=pos; i<*pn_ready-1; i++)
31345 ready[i] = ready[i + 1];
31346 ready[*pn_ready-1] = tmp;
31348 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31349 INSN_PRIORITY (tmp)++;
31350 break;
31352 pos--;
31355 else if (load_store_pendulum == -2)
31357 /* Two stores have been issued in this cycle. Increase the
31358 priority of the first load in the ready list to favor it for
31359 issuing in the next cycle. */
31360 pos = *pn_ready-1;
31362 while (pos >= 0)
31364 if (is_load_insn (ready[pos], &load_mem)
31365 && !sel_sched_p ()
31366 && INSN_PRIORITY_KNOWN (ready[pos]))
31368 INSN_PRIORITY (ready[pos])++;
31370 /* Adjust the pendulum to account for the fact that a load
31371 was found and increased in priority. This is to prevent
31372 increasing the priority of multiple loads */
31373 load_store_pendulum--;
31375 break;
31377 pos--;
31380 else if (load_store_pendulum == -1)
31382 /* A store has been issued in this cycle. Scan the ready list for
31383 another store to issue with it, preferring a store to an adjacent
31384 memory location */
31385 int first_store_pos = -1;
31387 pos = *pn_ready-1;
31389 while (pos >= 0)
31391 if (is_store_insn (ready[pos], &str_mem))
31393 rtx str_mem2;
31394 /* Maintain the index of the first store found on the
31395 list */
31396 if (first_store_pos == -1)
31397 first_store_pos = pos;
31399 if (is_store_insn (last_scheduled_insn, &str_mem2)
31400 && adjacent_mem_locations (str_mem, str_mem2))
31402 /* Found an adjacent store. Move it to the head of the
31403 ready list, and adjust it's priority so that it is
31404 more likely to stay there */
31405 tmp = ready[pos];
31406 for (i=pos; i<*pn_ready-1; i++)
31407 ready[i] = ready[i + 1];
31408 ready[*pn_ready-1] = tmp;
31410 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31411 INSN_PRIORITY (tmp)++;
31413 first_store_pos = -1;
31415 break;
31418 pos--;
31421 if (first_store_pos >= 0)
31423 /* An adjacent store wasn't found, but a non-adjacent store was,
31424 so move the non-adjacent store to the front of the ready
31425 list, and adjust its priority so that it is more likely to
31426 stay there. */
31427 tmp = ready[first_store_pos];
31428 for (i=first_store_pos; i<*pn_ready-1; i++)
31429 ready[i] = ready[i + 1];
31430 ready[*pn_ready-1] = tmp;
31431 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31432 INSN_PRIORITY (tmp)++;
31435 else if (load_store_pendulum == 2)
31437 /* Two loads have been issued in this cycle. Increase the priority
31438 of the first store in the ready list to favor it for issuing in
31439 the next cycle. */
31440 pos = *pn_ready-1;
31442 while (pos >= 0)
31444 if (is_store_insn (ready[pos], &str_mem)
31445 && !sel_sched_p ()
31446 && INSN_PRIORITY_KNOWN (ready[pos]))
31448 INSN_PRIORITY (ready[pos])++;
31450 /* Adjust the pendulum to account for the fact that a store
31451 was found and increased in priority. This is to prevent
31452 increasing the priority of multiple stores */
31453 load_store_pendulum++;
31455 break;
31457 pos--;
31462 /* Do Power9 dependent reordering if necessary. */
31463 if (rs6000_cpu == PROCESSOR_POWER9 && last_scheduled_insn
31464 && recog_memoized (last_scheduled_insn) >= 0)
31465 return power9_sched_reorder2 (ready, *pn_ready - 1);
31467 return cached_can_issue_more;
31470 /* Return whether the presence of INSN causes a dispatch group termination
31471 of group WHICH_GROUP.
31473 If WHICH_GROUP == current_group, this function will return true if INSN
31474 causes the termination of the current group (i.e, the dispatch group to
31475 which INSN belongs). This means that INSN will be the last insn in the
31476 group it belongs to.
31478 If WHICH_GROUP == previous_group, this function will return true if INSN
31479 causes the termination of the previous group (i.e, the dispatch group that
31480 precedes the group to which INSN belongs). This means that INSN will be
31481 the first insn in the group it belongs to). */
31483 static bool
31484 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31486 bool first, last;
31488 if (! insn)
31489 return false;
31491 first = insn_must_be_first_in_group (insn);
31492 last = insn_must_be_last_in_group (insn);
31494 if (first && last)
31495 return true;
31497 if (which_group == current_group)
31498 return last;
31499 else if (which_group == previous_group)
31500 return first;
31502 return false;
31506 static bool
31507 insn_must_be_first_in_group (rtx_insn *insn)
31509 enum attr_type type;
31511 if (!insn
31512 || NOTE_P (insn)
31513 || DEBUG_INSN_P (insn)
31514 || GET_CODE (PATTERN (insn)) == USE
31515 || GET_CODE (PATTERN (insn)) == CLOBBER)
31516 return false;
31518 switch (rs6000_cpu)
31520 case PROCESSOR_POWER5:
31521 if (is_cracked_insn (insn))
31522 return true;
31523 /* FALLTHRU */
31524 case PROCESSOR_POWER4:
31525 if (is_microcoded_insn (insn))
31526 return true;
31528 if (!rs6000_sched_groups)
31529 return false;
31531 type = get_attr_type (insn);
31533 switch (type)
31535 case TYPE_MFCR:
31536 case TYPE_MFCRF:
31537 case TYPE_MTCR:
31538 case TYPE_DELAYED_CR:
31539 case TYPE_CR_LOGICAL:
31540 case TYPE_MTJMPR:
31541 case TYPE_MFJMPR:
31542 case TYPE_DIV:
31543 case TYPE_LOAD_L:
31544 case TYPE_STORE_C:
31545 case TYPE_ISYNC:
31546 case TYPE_SYNC:
31547 return true;
31548 default:
31549 break;
31551 break;
31552 case PROCESSOR_POWER6:
31553 type = get_attr_type (insn);
31555 switch (type)
31557 case TYPE_EXTS:
31558 case TYPE_CNTLZ:
31559 case TYPE_TRAP:
31560 case TYPE_MUL:
31561 case TYPE_INSERT:
31562 case TYPE_FPCOMPARE:
31563 case TYPE_MFCR:
31564 case TYPE_MTCR:
31565 case TYPE_MFJMPR:
31566 case TYPE_MTJMPR:
31567 case TYPE_ISYNC:
31568 case TYPE_SYNC:
31569 case TYPE_LOAD_L:
31570 case TYPE_STORE_C:
31571 return true;
31572 case TYPE_SHIFT:
31573 if (get_attr_dot (insn) == DOT_NO
31574 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31575 return true;
31576 else
31577 break;
31578 case TYPE_DIV:
31579 if (get_attr_size (insn) == SIZE_32)
31580 return true;
31581 else
31582 break;
31583 case TYPE_LOAD:
31584 case TYPE_STORE:
31585 case TYPE_FPLOAD:
31586 case TYPE_FPSTORE:
31587 if (get_attr_update (insn) == UPDATE_YES)
31588 return true;
31589 else
31590 break;
31591 default:
31592 break;
31594 break;
31595 case PROCESSOR_POWER7:
31596 type = get_attr_type (insn);
31598 switch (type)
31600 case TYPE_CR_LOGICAL:
31601 case TYPE_MFCR:
31602 case TYPE_MFCRF:
31603 case TYPE_MTCR:
31604 case TYPE_DIV:
31605 case TYPE_ISYNC:
31606 case TYPE_LOAD_L:
31607 case TYPE_STORE_C:
31608 case TYPE_MFJMPR:
31609 case TYPE_MTJMPR:
31610 return true;
31611 case TYPE_MUL:
31612 case TYPE_SHIFT:
31613 case TYPE_EXTS:
31614 if (get_attr_dot (insn) == DOT_YES)
31615 return true;
31616 else
31617 break;
31618 case TYPE_LOAD:
31619 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31620 || get_attr_update (insn) == UPDATE_YES)
31621 return true;
31622 else
31623 break;
31624 case TYPE_STORE:
31625 case TYPE_FPLOAD:
31626 case TYPE_FPSTORE:
31627 if (get_attr_update (insn) == UPDATE_YES)
31628 return true;
31629 else
31630 break;
31631 default:
31632 break;
31634 break;
31635 case PROCESSOR_POWER8:
31636 type = get_attr_type (insn);
31638 switch (type)
31640 case TYPE_CR_LOGICAL:
31641 case TYPE_DELAYED_CR:
31642 case TYPE_MFCR:
31643 case TYPE_MFCRF:
31644 case TYPE_MTCR:
31645 case TYPE_SYNC:
31646 case TYPE_ISYNC:
31647 case TYPE_LOAD_L:
31648 case TYPE_STORE_C:
31649 case TYPE_VECSTORE:
31650 case TYPE_MFJMPR:
31651 case TYPE_MTJMPR:
31652 return true;
31653 case TYPE_SHIFT:
31654 case TYPE_EXTS:
31655 case TYPE_MUL:
31656 if (get_attr_dot (insn) == DOT_YES)
31657 return true;
31658 else
31659 break;
31660 case TYPE_LOAD:
31661 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31662 || get_attr_update (insn) == UPDATE_YES)
31663 return true;
31664 else
31665 break;
31666 case TYPE_STORE:
31667 if (get_attr_update (insn) == UPDATE_YES
31668 && get_attr_indexed (insn) == INDEXED_YES)
31669 return true;
31670 else
31671 break;
31672 default:
31673 break;
31675 break;
31676 default:
31677 break;
31680 return false;
31683 static bool
31684 insn_must_be_last_in_group (rtx_insn *insn)
31686 enum attr_type type;
31688 if (!insn
31689 || NOTE_P (insn)
31690 || DEBUG_INSN_P (insn)
31691 || GET_CODE (PATTERN (insn)) == USE
31692 || GET_CODE (PATTERN (insn)) == CLOBBER)
31693 return false;
31695 switch (rs6000_cpu) {
31696 case PROCESSOR_POWER4:
31697 case PROCESSOR_POWER5:
31698 if (is_microcoded_insn (insn))
31699 return true;
31701 if (is_branch_slot_insn (insn))
31702 return true;
31704 break;
31705 case PROCESSOR_POWER6:
31706 type = get_attr_type (insn);
31708 switch (type)
31710 case TYPE_EXTS:
31711 case TYPE_CNTLZ:
31712 case TYPE_TRAP:
31713 case TYPE_MUL:
31714 case TYPE_FPCOMPARE:
31715 case TYPE_MFCR:
31716 case TYPE_MTCR:
31717 case TYPE_MFJMPR:
31718 case TYPE_MTJMPR:
31719 case TYPE_ISYNC:
31720 case TYPE_SYNC:
31721 case TYPE_LOAD_L:
31722 case TYPE_STORE_C:
31723 return true;
31724 case TYPE_SHIFT:
31725 if (get_attr_dot (insn) == DOT_NO
31726 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31727 return true;
31728 else
31729 break;
31730 case TYPE_DIV:
31731 if (get_attr_size (insn) == SIZE_32)
31732 return true;
31733 else
31734 break;
31735 default:
31736 break;
31738 break;
31739 case PROCESSOR_POWER7:
31740 type = get_attr_type (insn);
31742 switch (type)
31744 case TYPE_ISYNC:
31745 case TYPE_SYNC:
31746 case TYPE_LOAD_L:
31747 case TYPE_STORE_C:
31748 return true;
31749 case TYPE_LOAD:
31750 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31751 && get_attr_update (insn) == UPDATE_YES)
31752 return true;
31753 else
31754 break;
31755 case TYPE_STORE:
31756 if (get_attr_update (insn) == UPDATE_YES
31757 && get_attr_indexed (insn) == INDEXED_YES)
31758 return true;
31759 else
31760 break;
31761 default:
31762 break;
31764 break;
31765 case PROCESSOR_POWER8:
31766 type = get_attr_type (insn);
31768 switch (type)
31770 case TYPE_MFCR:
31771 case TYPE_MTCR:
31772 case TYPE_ISYNC:
31773 case TYPE_SYNC:
31774 case TYPE_LOAD_L:
31775 case TYPE_STORE_C:
31776 return true;
31777 case TYPE_LOAD:
31778 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31779 && get_attr_update (insn) == UPDATE_YES)
31780 return true;
31781 else
31782 break;
31783 case TYPE_STORE:
31784 if (get_attr_update (insn) == UPDATE_YES
31785 && get_attr_indexed (insn) == INDEXED_YES)
31786 return true;
31787 else
31788 break;
31789 default:
31790 break;
31792 break;
31793 default:
31794 break;
31797 return false;
31800 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
31801 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
31803 static bool
31804 is_costly_group (rtx *group_insns, rtx next_insn)
31806 int i;
31807 int issue_rate = rs6000_issue_rate ();
31809 for (i = 0; i < issue_rate; i++)
31811 sd_iterator_def sd_it;
31812 dep_t dep;
31813 rtx insn = group_insns[i];
31815 if (!insn)
31816 continue;
31818 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
31820 rtx next = DEP_CON (dep);
31822 if (next == next_insn
31823 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
31824 return true;
31828 return false;
31831 /* Utility of the function redefine_groups.
31832 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
31833 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
31834 to keep it "far" (in a separate group) from GROUP_INSNS, following
31835 one of the following schemes, depending on the value of the flag
31836 -minsert_sched_nops = X:
31837 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
31838 in order to force NEXT_INSN into a separate group.
31839 (2) X < sched_finish_regroup_exact: insert exactly X nops.
31840 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
31841 insertion (has a group just ended, how many vacant issue slots remain in the
31842 last group, and how many dispatch groups were encountered so far). */
31844 static int
31845 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
31846 rtx_insn *next_insn, bool *group_end, int can_issue_more,
31847 int *group_count)
31849 rtx nop;
31850 bool force;
31851 int issue_rate = rs6000_issue_rate ();
31852 bool end = *group_end;
31853 int i;
31855 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
31856 return can_issue_more;
31858 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
31859 return can_issue_more;
31861 force = is_costly_group (group_insns, next_insn);
31862 if (!force)
31863 return can_issue_more;
31865 if (sched_verbose > 6)
31866 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
31867 *group_count ,can_issue_more);
31869 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
31871 if (*group_end)
31872 can_issue_more = 0;
31874 /* Since only a branch can be issued in the last issue_slot, it is
31875 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
31876 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
31877 in this case the last nop will start a new group and the branch
31878 will be forced to the new group. */
31879 if (can_issue_more && !is_branch_slot_insn (next_insn))
31880 can_issue_more--;
31882 /* Do we have a special group ending nop? */
31883 if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7
31884 || rs6000_cpu_attr == CPU_POWER8)
31886 nop = gen_group_ending_nop ();
31887 emit_insn_before (nop, next_insn);
31888 can_issue_more = 0;
31890 else
31891 while (can_issue_more > 0)
31893 nop = gen_nop ();
31894 emit_insn_before (nop, next_insn);
31895 can_issue_more--;
31898 *group_end = true;
31899 return 0;
31902 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
31904 int n_nops = rs6000_sched_insert_nops;
31906 /* Nops can't be issued from the branch slot, so the effective
31907 issue_rate for nops is 'issue_rate - 1'. */
31908 if (can_issue_more == 0)
31909 can_issue_more = issue_rate;
31910 can_issue_more--;
31911 if (can_issue_more == 0)
31913 can_issue_more = issue_rate - 1;
31914 (*group_count)++;
31915 end = true;
31916 for (i = 0; i < issue_rate; i++)
31918 group_insns[i] = 0;
31922 while (n_nops > 0)
31924 nop = gen_nop ();
31925 emit_insn_before (nop, next_insn);
31926 if (can_issue_more == issue_rate - 1) /* new group begins */
31927 end = false;
31928 can_issue_more--;
31929 if (can_issue_more == 0)
31931 can_issue_more = issue_rate - 1;
31932 (*group_count)++;
31933 end = true;
31934 for (i = 0; i < issue_rate; i++)
31936 group_insns[i] = 0;
31939 n_nops--;
31942 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
31943 can_issue_more++;
31945 /* Is next_insn going to start a new group? */
31946 *group_end
31947 = (end
31948 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
31949 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
31950 || (can_issue_more < issue_rate &&
31951 insn_terminates_group_p (next_insn, previous_group)));
31952 if (*group_end && end)
31953 (*group_count)--;
31955 if (sched_verbose > 6)
31956 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
31957 *group_count, can_issue_more);
31958 return can_issue_more;
31961 return can_issue_more;
31964 /* This function tries to synch the dispatch groups that the compiler "sees"
31965 with the dispatch groups that the processor dispatcher is expected to
31966 form in practice. It tries to achieve this synchronization by forcing the
31967 estimated processor grouping on the compiler (as opposed to the function
31968 'pad_goups' which tries to force the scheduler's grouping on the processor).
31970 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
31971 examines the (estimated) dispatch groups that will be formed by the processor
31972 dispatcher. It marks these group boundaries to reflect the estimated
31973 processor grouping, overriding the grouping that the scheduler had marked.
31974 Depending on the value of the flag '-minsert-sched-nops' this function can
31975 force certain insns into separate groups or force a certain distance between
31976 them by inserting nops, for example, if there exists a "costly dependence"
31977 between the insns.
31979 The function estimates the group boundaries that the processor will form as
31980 follows: It keeps track of how many vacant issue slots are available after
31981 each insn. A subsequent insn will start a new group if one of the following
31982 4 cases applies:
31983 - no more vacant issue slots remain in the current dispatch group.
31984 - only the last issue slot, which is the branch slot, is vacant, but the next
31985 insn is not a branch.
31986 - only the last 2 or less issue slots, including the branch slot, are vacant,
31987 which means that a cracked insn (which occupies two issue slots) can't be
31988 issued in this group.
31989 - less than 'issue_rate' slots are vacant, and the next insn always needs to
31990 start a new group. */
31992 static int
31993 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
31994 rtx_insn *tail)
31996 rtx_insn *insn, *next_insn;
31997 int issue_rate;
31998 int can_issue_more;
31999 int slot, i;
32000 bool group_end;
32001 int group_count = 0;
32002 rtx *group_insns;
32004 /* Initialize. */
32005 issue_rate = rs6000_issue_rate ();
32006 group_insns = XALLOCAVEC (rtx, issue_rate);
32007 for (i = 0; i < issue_rate; i++)
32009 group_insns[i] = 0;
32011 can_issue_more = issue_rate;
32012 slot = 0;
32013 insn = get_next_active_insn (prev_head_insn, tail);
32014 group_end = false;
32016 while (insn != NULL_RTX)
32018 slot = (issue_rate - can_issue_more);
32019 group_insns[slot] = insn;
32020 can_issue_more =
32021 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32022 if (insn_terminates_group_p (insn, current_group))
32023 can_issue_more = 0;
32025 next_insn = get_next_active_insn (insn, tail);
32026 if (next_insn == NULL_RTX)
32027 return group_count + 1;
32029 /* Is next_insn going to start a new group? */
32030 group_end
32031 = (can_issue_more == 0
32032 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32033 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32034 || (can_issue_more < issue_rate &&
32035 insn_terminates_group_p (next_insn, previous_group)));
32037 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
32038 next_insn, &group_end, can_issue_more,
32039 &group_count);
32041 if (group_end)
32043 group_count++;
32044 can_issue_more = 0;
32045 for (i = 0; i < issue_rate; i++)
32047 group_insns[i] = 0;
32051 if (GET_MODE (next_insn) == TImode && can_issue_more)
32052 PUT_MODE (next_insn, VOIDmode);
32053 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
32054 PUT_MODE (next_insn, TImode);
32056 insn = next_insn;
32057 if (can_issue_more == 0)
32058 can_issue_more = issue_rate;
32059 } /* while */
32061 return group_count;
32064 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32065 dispatch group boundaries that the scheduler had marked. Pad with nops
32066 any dispatch groups which have vacant issue slots, in order to force the
32067 scheduler's grouping on the processor dispatcher. The function
32068 returns the number of dispatch groups found. */
32070 static int
32071 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32072 rtx_insn *tail)
32074 rtx_insn *insn, *next_insn;
32075 rtx nop;
32076 int issue_rate;
32077 int can_issue_more;
32078 int group_end;
32079 int group_count = 0;
32081 /* Initialize issue_rate. */
32082 issue_rate = rs6000_issue_rate ();
32083 can_issue_more = issue_rate;
32085 insn = get_next_active_insn (prev_head_insn, tail);
32086 next_insn = get_next_active_insn (insn, tail);
32088 while (insn != NULL_RTX)
32090 can_issue_more =
32091 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32093 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
32095 if (next_insn == NULL_RTX)
32096 break;
32098 if (group_end)
32100 /* If the scheduler had marked group termination at this location
32101 (between insn and next_insn), and neither insn nor next_insn will
32102 force group termination, pad the group with nops to force group
32103 termination. */
32104 if (can_issue_more
32105 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
32106 && !insn_terminates_group_p (insn, current_group)
32107 && !insn_terminates_group_p (next_insn, previous_group))
32109 if (!is_branch_slot_insn (next_insn))
32110 can_issue_more--;
32112 while (can_issue_more)
32114 nop = gen_nop ();
32115 emit_insn_before (nop, next_insn);
32116 can_issue_more--;
32120 can_issue_more = issue_rate;
32121 group_count++;
32124 insn = next_insn;
32125 next_insn = get_next_active_insn (insn, tail);
32128 return group_count;
32131 /* We're beginning a new block. Initialize data structures as necessary. */
32133 static void
32134 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
32135 int sched_verbose ATTRIBUTE_UNUSED,
32136 int max_ready ATTRIBUTE_UNUSED)
32138 last_scheduled_insn = NULL;
32139 load_store_pendulum = 0;
32140 divide_cnt = 0;
32141 vec_pairing = 0;
32144 /* The following function is called at the end of scheduling BB.
32145 After reload, it inserts nops at insn group bundling. */
32147 static void
32148 rs6000_sched_finish (FILE *dump, int sched_verbose)
32150 int n_groups;
32152 if (sched_verbose)
32153 fprintf (dump, "=== Finishing schedule.\n");
32155 if (reload_completed && rs6000_sched_groups)
32157 /* Do not run sched_finish hook when selective scheduling enabled. */
32158 if (sel_sched_p ())
32159 return;
32161 if (rs6000_sched_insert_nops == sched_finish_none)
32162 return;
32164 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
32165 n_groups = pad_groups (dump, sched_verbose,
32166 current_sched_info->prev_head,
32167 current_sched_info->next_tail);
32168 else
32169 n_groups = redefine_groups (dump, sched_verbose,
32170 current_sched_info->prev_head,
32171 current_sched_info->next_tail);
32173 if (sched_verbose >= 6)
32175 fprintf (dump, "ngroups = %d\n", n_groups);
32176 print_rtl (dump, current_sched_info->prev_head);
32177 fprintf (dump, "Done finish_sched\n");
32182 struct rs6000_sched_context
32184 short cached_can_issue_more;
32185 rtx_insn *last_scheduled_insn;
32186 int load_store_pendulum;
32187 int divide_cnt;
32188 int vec_pairing;
32191 typedef struct rs6000_sched_context rs6000_sched_context_def;
32192 typedef rs6000_sched_context_def *rs6000_sched_context_t;
32194 /* Allocate store for new scheduling context. */
32195 static void *
32196 rs6000_alloc_sched_context (void)
32198 return xmalloc (sizeof (rs6000_sched_context_def));
32201 /* If CLEAN_P is true then initializes _SC with clean data,
32202 and from the global context otherwise. */
32203 static void
32204 rs6000_init_sched_context (void *_sc, bool clean_p)
32206 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32208 if (clean_p)
32210 sc->cached_can_issue_more = 0;
32211 sc->last_scheduled_insn = NULL;
32212 sc->load_store_pendulum = 0;
32213 sc->divide_cnt = 0;
32214 sc->vec_pairing = 0;
32216 else
32218 sc->cached_can_issue_more = cached_can_issue_more;
32219 sc->last_scheduled_insn = last_scheduled_insn;
32220 sc->load_store_pendulum = load_store_pendulum;
32221 sc->divide_cnt = divide_cnt;
32222 sc->vec_pairing = vec_pairing;
32226 /* Sets the global scheduling context to the one pointed to by _SC. */
32227 static void
32228 rs6000_set_sched_context (void *_sc)
32230 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32232 gcc_assert (sc != NULL);
32234 cached_can_issue_more = sc->cached_can_issue_more;
32235 last_scheduled_insn = sc->last_scheduled_insn;
32236 load_store_pendulum = sc->load_store_pendulum;
32237 divide_cnt = sc->divide_cnt;
32238 vec_pairing = sc->vec_pairing;
32241 /* Free _SC. */
32242 static void
32243 rs6000_free_sched_context (void *_sc)
32245 gcc_assert (_sc != NULL);
32247 free (_sc);
32250 static bool
32251 rs6000_sched_can_speculate_insn (rtx_insn *insn)
32253 switch (get_attr_type (insn))
32255 case TYPE_DIV:
32256 case TYPE_SDIV:
32257 case TYPE_DDIV:
32258 case TYPE_VECDIV:
32259 case TYPE_SSQRT:
32260 case TYPE_DSQRT:
32261 return false;
32263 default:
32264 return true;
32268 /* Length in units of the trampoline for entering a nested function. */
32271 rs6000_trampoline_size (void)
32273 int ret = 0;
32275 switch (DEFAULT_ABI)
32277 default:
32278 gcc_unreachable ();
32280 case ABI_AIX:
32281 ret = (TARGET_32BIT) ? 12 : 24;
32282 break;
32284 case ABI_ELFv2:
32285 gcc_assert (!TARGET_32BIT);
32286 ret = 32;
32287 break;
32289 case ABI_DARWIN:
32290 case ABI_V4:
32291 ret = (TARGET_32BIT) ? 40 : 48;
32292 break;
32295 return ret;
32298 /* Emit RTL insns to initialize the variable parts of a trampoline.
32299 FNADDR is an RTX for the address of the function's pure code.
32300 CXT is an RTX for the static chain value for the function. */
32302 static void
32303 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
32305 int regsize = (TARGET_32BIT) ? 4 : 8;
32306 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
32307 rtx ctx_reg = force_reg (Pmode, cxt);
32308 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
32310 switch (DEFAULT_ABI)
32312 default:
32313 gcc_unreachable ();
32315 /* Under AIX, just build the 3 word function descriptor */
32316 case ABI_AIX:
32318 rtx fnmem, fn_reg, toc_reg;
32320 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32321 error ("you cannot take the address of a nested function if you use "
32322 "the %qs option", "-mno-pointers-to-nested-functions");
32324 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
32325 fn_reg = gen_reg_rtx (Pmode);
32326 toc_reg = gen_reg_rtx (Pmode);
32328 /* Macro to shorten the code expansions below. */
32329 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32331 m_tramp = replace_equiv_address (m_tramp, addr);
32333 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
32334 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
32335 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
32336 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
32337 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
32339 # undef MEM_PLUS
32341 break;
32343 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32344 case ABI_ELFv2:
32345 case ABI_DARWIN:
32346 case ABI_V4:
32347 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
32348 LCT_NORMAL, VOIDmode,
32349 addr, Pmode,
32350 GEN_INT (rs6000_trampoline_size ()), SImode,
32351 fnaddr, Pmode,
32352 ctx_reg, Pmode);
32353 break;
32358 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32359 identifier as an argument, so the front end shouldn't look it up. */
32361 static bool
32362 rs6000_attribute_takes_identifier_p (const_tree attr_id)
32364 return is_attribute_p ("altivec", attr_id);
32367 /* Handle the "altivec" attribute. The attribute may have
32368 arguments as follows:
32370 __attribute__((altivec(vector__)))
32371 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32372 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32374 and may appear more than once (e.g., 'vector bool char') in a
32375 given declaration. */
32377 static tree
32378 rs6000_handle_altivec_attribute (tree *node,
32379 tree name ATTRIBUTE_UNUSED,
32380 tree args,
32381 int flags ATTRIBUTE_UNUSED,
32382 bool *no_add_attrs)
32384 tree type = *node, result = NULL_TREE;
32385 machine_mode mode;
32386 int unsigned_p;
32387 char altivec_type
32388 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
32389 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
32390 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
32391 : '?');
32393 while (POINTER_TYPE_P (type)
32394 || TREE_CODE (type) == FUNCTION_TYPE
32395 || TREE_CODE (type) == METHOD_TYPE
32396 || TREE_CODE (type) == ARRAY_TYPE)
32397 type = TREE_TYPE (type);
32399 mode = TYPE_MODE (type);
32401 /* Check for invalid AltiVec type qualifiers. */
32402 if (type == long_double_type_node)
32403 error ("use of %<long double%> in AltiVec types is invalid");
32404 else if (type == boolean_type_node)
32405 error ("use of boolean types in AltiVec types is invalid");
32406 else if (TREE_CODE (type) == COMPLEX_TYPE)
32407 error ("use of %<complex%> in AltiVec types is invalid");
32408 else if (DECIMAL_FLOAT_MODE_P (mode))
32409 error ("use of decimal floating point types in AltiVec types is invalid");
32410 else if (!TARGET_VSX)
32412 if (type == long_unsigned_type_node || type == long_integer_type_node)
32414 if (TARGET_64BIT)
32415 error ("use of %<long%> in AltiVec types is invalid for "
32416 "64-bit code without %qs", "-mvsx");
32417 else if (rs6000_warn_altivec_long)
32418 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32419 "use %<int%>");
32421 else if (type == long_long_unsigned_type_node
32422 || type == long_long_integer_type_node)
32423 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32424 "-mvsx");
32425 else if (type == double_type_node)
32426 error ("use of %<double%> in AltiVec types is invalid without %qs",
32427 "-mvsx");
32430 switch (altivec_type)
32432 case 'v':
32433 unsigned_p = TYPE_UNSIGNED (type);
32434 switch (mode)
32436 case E_TImode:
32437 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32438 break;
32439 case E_DImode:
32440 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32441 break;
32442 case E_SImode:
32443 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32444 break;
32445 case E_HImode:
32446 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32447 break;
32448 case E_QImode:
32449 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32450 break;
32451 case E_SFmode: result = V4SF_type_node; break;
32452 case E_DFmode: result = V2DF_type_node; break;
32453 /* If the user says 'vector int bool', we may be handed the 'bool'
32454 attribute _before_ the 'vector' attribute, and so select the
32455 proper type in the 'b' case below. */
32456 case E_V4SImode: case E_V8HImode: case E_V16QImode: case E_V4SFmode:
32457 case E_V2DImode: case E_V2DFmode:
32458 result = type;
32459 default: break;
32461 break;
32462 case 'b':
32463 switch (mode)
32465 case E_DImode: case E_V2DImode: result = bool_V2DI_type_node; break;
32466 case E_SImode: case E_V4SImode: result = bool_V4SI_type_node; break;
32467 case E_HImode: case E_V8HImode: result = bool_V8HI_type_node; break;
32468 case E_QImode: case E_V16QImode: result = bool_V16QI_type_node;
32469 default: break;
32471 break;
32472 case 'p':
32473 switch (mode)
32475 case E_V8HImode: result = pixel_V8HI_type_node;
32476 default: break;
32478 default: break;
32481 /* Propagate qualifiers attached to the element type
32482 onto the vector type. */
32483 if (result && result != type && TYPE_QUALS (type))
32484 result = build_qualified_type (result, TYPE_QUALS (type));
32486 *no_add_attrs = true; /* No need to hang on to the attribute. */
32488 if (result)
32489 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32491 return NULL_TREE;
32494 /* AltiVec defines four built-in scalar types that serve as vector
32495 elements; we must teach the compiler how to mangle them. */
32497 static const char *
32498 rs6000_mangle_type (const_tree type)
32500 type = TYPE_MAIN_VARIANT (type);
32502 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32503 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32504 return NULL;
32506 if (type == bool_char_type_node) return "U6__boolc";
32507 if (type == bool_short_type_node) return "U6__bools";
32508 if (type == pixel_type_node) return "u7__pixel";
32509 if (type == bool_int_type_node) return "U6__booli";
32510 if (type == bool_long_type_node) return "U6__booll";
32512 /* Use a unique name for __float128 rather than trying to use "e" or "g". Use
32513 "g" for IBM extended double, no matter whether it is long double (using
32514 -mabi=ibmlongdouble) or the distinct __ibm128 type. */
32515 if (TARGET_FLOAT128_TYPE)
32517 if (type == ieee128_float_type_node)
32518 return "U10__float128";
32520 if (TARGET_LONG_DOUBLE_128)
32522 if (type == long_double_type_node)
32523 return (TARGET_IEEEQUAD) ? "U10__float128" : "g";
32525 if (type == ibm128_float_type_node)
32526 return "g";
32530 /* Mangle IBM extended float long double as `g' (__float128) on
32531 powerpc*-linux where long-double-64 previously was the default. */
32532 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
32533 && TARGET_ELF
32534 && TARGET_LONG_DOUBLE_128
32535 && !TARGET_IEEEQUAD)
32536 return "g";
32538 /* For all other types, use normal C++ mangling. */
32539 return NULL;
32542 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32543 struct attribute_spec.handler. */
32545 static tree
32546 rs6000_handle_longcall_attribute (tree *node, tree name,
32547 tree args ATTRIBUTE_UNUSED,
32548 int flags ATTRIBUTE_UNUSED,
32549 bool *no_add_attrs)
32551 if (TREE_CODE (*node) != FUNCTION_TYPE
32552 && TREE_CODE (*node) != FIELD_DECL
32553 && TREE_CODE (*node) != TYPE_DECL)
32555 warning (OPT_Wattributes, "%qE attribute only applies to functions",
32556 name);
32557 *no_add_attrs = true;
32560 return NULL_TREE;
32563 /* Set longcall attributes on all functions declared when
32564 rs6000_default_long_calls is true. */
32565 static void
32566 rs6000_set_default_type_attributes (tree type)
32568 if (rs6000_default_long_calls
32569 && (TREE_CODE (type) == FUNCTION_TYPE
32570 || TREE_CODE (type) == METHOD_TYPE))
32571 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
32572 NULL_TREE,
32573 TYPE_ATTRIBUTES (type));
32575 #if TARGET_MACHO
32576 darwin_set_default_type_attributes (type);
32577 #endif
32580 /* Return a reference suitable for calling a function with the
32581 longcall attribute. */
32584 rs6000_longcall_ref (rtx call_ref)
32586 const char *call_name;
32587 tree node;
32589 if (GET_CODE (call_ref) != SYMBOL_REF)
32590 return call_ref;
32592 /* System V adds '.' to the internal name, so skip them. */
32593 call_name = XSTR (call_ref, 0);
32594 if (*call_name == '.')
32596 while (*call_name == '.')
32597 call_name++;
32599 node = get_identifier (call_name);
32600 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
32603 return force_reg (Pmode, call_ref);
32606 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32607 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32608 #endif
32610 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32611 struct attribute_spec.handler. */
32612 static tree
32613 rs6000_handle_struct_attribute (tree *node, tree name,
32614 tree args ATTRIBUTE_UNUSED,
32615 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
32617 tree *type = NULL;
32618 if (DECL_P (*node))
32620 if (TREE_CODE (*node) == TYPE_DECL)
32621 type = &TREE_TYPE (*node);
32623 else
32624 type = node;
32626 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
32627 || TREE_CODE (*type) == UNION_TYPE)))
32629 warning (OPT_Wattributes, "%qE attribute ignored", name);
32630 *no_add_attrs = true;
32633 else if ((is_attribute_p ("ms_struct", name)
32634 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
32635 || ((is_attribute_p ("gcc_struct", name)
32636 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
32638 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
32639 name);
32640 *no_add_attrs = true;
32643 return NULL_TREE;
32646 static bool
32647 rs6000_ms_bitfield_layout_p (const_tree record_type)
32649 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
32650 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
32651 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
32654 #ifdef USING_ELFOS_H
32656 /* A get_unnamed_section callback, used for switching to toc_section. */
32658 static void
32659 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
32661 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32662 && TARGET_MINIMAL_TOC)
32664 if (!toc_initialized)
32666 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32667 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32668 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
32669 fprintf (asm_out_file, "\t.tc ");
32670 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
32671 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32672 fprintf (asm_out_file, "\n");
32674 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32675 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32676 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32677 fprintf (asm_out_file, " = .+32768\n");
32678 toc_initialized = 1;
32680 else
32681 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32683 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32685 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32686 if (!toc_initialized)
32688 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32689 toc_initialized = 1;
32692 else
32694 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32695 if (!toc_initialized)
32697 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32698 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32699 fprintf (asm_out_file, " = .+32768\n");
32700 toc_initialized = 1;
32705 /* Implement TARGET_ASM_INIT_SECTIONS. */
32707 static void
32708 rs6000_elf_asm_init_sections (void)
32710 toc_section
32711 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
32713 sdata2_section
32714 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
32715 SDATA2_SECTION_ASM_OP);
32718 /* Implement TARGET_SELECT_RTX_SECTION. */
32720 static section *
32721 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
32722 unsigned HOST_WIDE_INT align)
32724 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
32725 return toc_section;
32726 else
32727 return default_elf_select_rtx_section (mode, x, align);
32730 /* For a SYMBOL_REF, set generic flags and then perform some
32731 target-specific processing.
32733 When the AIX ABI is requested on a non-AIX system, replace the
32734 function name with the real name (with a leading .) rather than the
32735 function descriptor name. This saves a lot of overriding code to
32736 read the prefixes. */
32738 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
32739 static void
32740 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
32742 default_encode_section_info (decl, rtl, first);
32744 if (first
32745 && TREE_CODE (decl) == FUNCTION_DECL
32746 && !TARGET_AIX
32747 && DEFAULT_ABI == ABI_AIX)
32749 rtx sym_ref = XEXP (rtl, 0);
32750 size_t len = strlen (XSTR (sym_ref, 0));
32751 char *str = XALLOCAVEC (char, len + 2);
32752 str[0] = '.';
32753 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
32754 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
32758 static inline bool
32759 compare_section_name (const char *section, const char *templ)
32761 int len;
32763 len = strlen (templ);
32764 return (strncmp (section, templ, len) == 0
32765 && (section[len] == 0 || section[len] == '.'));
32768 bool
32769 rs6000_elf_in_small_data_p (const_tree decl)
32771 if (rs6000_sdata == SDATA_NONE)
32772 return false;
32774 /* We want to merge strings, so we never consider them small data. */
32775 if (TREE_CODE (decl) == STRING_CST)
32776 return false;
32778 /* Functions are never in the small data area. */
32779 if (TREE_CODE (decl) == FUNCTION_DECL)
32780 return false;
32782 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
32784 const char *section = DECL_SECTION_NAME (decl);
32785 if (compare_section_name (section, ".sdata")
32786 || compare_section_name (section, ".sdata2")
32787 || compare_section_name (section, ".gnu.linkonce.s")
32788 || compare_section_name (section, ".sbss")
32789 || compare_section_name (section, ".sbss2")
32790 || compare_section_name (section, ".gnu.linkonce.sb")
32791 || strcmp (section, ".PPC.EMB.sdata0") == 0
32792 || strcmp (section, ".PPC.EMB.sbss0") == 0)
32793 return true;
32795 else
32797 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
32799 if (size > 0
32800 && size <= g_switch_value
32801 /* If it's not public, and we're not going to reference it there,
32802 there's no need to put it in the small data section. */
32803 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
32804 return true;
32807 return false;
32810 #endif /* USING_ELFOS_H */
32812 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
32814 static bool
32815 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
32817 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
32820 /* Do not place thread-local symbols refs in the object blocks. */
32822 static bool
32823 rs6000_use_blocks_for_decl_p (const_tree decl)
32825 return !DECL_THREAD_LOCAL_P (decl);
32828 /* Return a REG that occurs in ADDR with coefficient 1.
32829 ADDR can be effectively incremented by incrementing REG.
32831 r0 is special and we must not select it as an address
32832 register by this routine since our caller will try to
32833 increment the returned register via an "la" instruction. */
32836 find_addr_reg (rtx addr)
32838 while (GET_CODE (addr) == PLUS)
32840 if (GET_CODE (XEXP (addr, 0)) == REG
32841 && REGNO (XEXP (addr, 0)) != 0)
32842 addr = XEXP (addr, 0);
32843 else if (GET_CODE (XEXP (addr, 1)) == REG
32844 && REGNO (XEXP (addr, 1)) != 0)
32845 addr = XEXP (addr, 1);
32846 else if (CONSTANT_P (XEXP (addr, 0)))
32847 addr = XEXP (addr, 1);
32848 else if (CONSTANT_P (XEXP (addr, 1)))
32849 addr = XEXP (addr, 0);
32850 else
32851 gcc_unreachable ();
32853 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
32854 return addr;
32857 void
32858 rs6000_fatal_bad_address (rtx op)
32860 fatal_insn ("bad address", op);
32863 #if TARGET_MACHO
32865 typedef struct branch_island_d {
32866 tree function_name;
32867 tree label_name;
32868 int line_number;
32869 } branch_island;
32872 static vec<branch_island, va_gc> *branch_islands;
32874 /* Remember to generate a branch island for far calls to the given
32875 function. */
32877 static void
32878 add_compiler_branch_island (tree label_name, tree function_name,
32879 int line_number)
32881 branch_island bi = {function_name, label_name, line_number};
32882 vec_safe_push (branch_islands, bi);
32885 /* Generate far-jump branch islands for everything recorded in
32886 branch_islands. Invoked immediately after the last instruction of
32887 the epilogue has been emitted; the branch islands must be appended
32888 to, and contiguous with, the function body. Mach-O stubs are
32889 generated in machopic_output_stub(). */
32891 static void
32892 macho_branch_islands (void)
32894 char tmp_buf[512];
32896 while (!vec_safe_is_empty (branch_islands))
32898 branch_island *bi = &branch_islands->last ();
32899 const char *label = IDENTIFIER_POINTER (bi->label_name);
32900 const char *name = IDENTIFIER_POINTER (bi->function_name);
32901 char name_buf[512];
32902 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
32903 if (name[0] == '*' || name[0] == '&')
32904 strcpy (name_buf, name+1);
32905 else
32907 name_buf[0] = '_';
32908 strcpy (name_buf+1, name);
32910 strcpy (tmp_buf, "\n");
32911 strcat (tmp_buf, label);
32912 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32913 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32914 dbxout_stabd (N_SLINE, bi->line_number);
32915 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32916 if (flag_pic)
32918 if (TARGET_LINK_STACK)
32920 char name[32];
32921 get_ppc476_thunk_name (name);
32922 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
32923 strcat (tmp_buf, name);
32924 strcat (tmp_buf, "\n");
32925 strcat (tmp_buf, label);
32926 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32928 else
32930 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
32931 strcat (tmp_buf, label);
32932 strcat (tmp_buf, "_pic\n");
32933 strcat (tmp_buf, label);
32934 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32937 strcat (tmp_buf, "\taddis r11,r11,ha16(");
32938 strcat (tmp_buf, name_buf);
32939 strcat (tmp_buf, " - ");
32940 strcat (tmp_buf, label);
32941 strcat (tmp_buf, "_pic)\n");
32943 strcat (tmp_buf, "\tmtlr r0\n");
32945 strcat (tmp_buf, "\taddi r12,r11,lo16(");
32946 strcat (tmp_buf, name_buf);
32947 strcat (tmp_buf, " - ");
32948 strcat (tmp_buf, label);
32949 strcat (tmp_buf, "_pic)\n");
32951 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
32953 else
32955 strcat (tmp_buf, ":\nlis r12,hi16(");
32956 strcat (tmp_buf, name_buf);
32957 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
32958 strcat (tmp_buf, name_buf);
32959 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
32961 output_asm_insn (tmp_buf, 0);
32962 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32963 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32964 dbxout_stabd (N_SLINE, bi->line_number);
32965 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32966 branch_islands->pop ();
32970 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
32971 already there or not. */
32973 static int
32974 no_previous_def (tree function_name)
32976 branch_island *bi;
32977 unsigned ix;
32979 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
32980 if (function_name == bi->function_name)
32981 return 0;
32982 return 1;
32985 /* GET_PREV_LABEL gets the label name from the previous definition of
32986 the function. */
32988 static tree
32989 get_prev_label (tree function_name)
32991 branch_island *bi;
32992 unsigned ix;
32994 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
32995 if (function_name == bi->function_name)
32996 return bi->label_name;
32997 return NULL_TREE;
33000 /* INSN is either a function call or a millicode call. It may have an
33001 unconditional jump in its delay slot.
33003 CALL_DEST is the routine we are calling. */
33005 char *
33006 output_call (rtx_insn *insn, rtx *operands, int dest_operand_number,
33007 int cookie_operand_number)
33009 static char buf[256];
33010 if (darwin_emit_branch_islands
33011 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
33012 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
33014 tree labelname;
33015 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
33017 if (no_previous_def (funname))
33019 rtx label_rtx = gen_label_rtx ();
33020 char *label_buf, temp_buf[256];
33021 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
33022 CODE_LABEL_NUMBER (label_rtx));
33023 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
33024 labelname = get_identifier (label_buf);
33025 add_compiler_branch_island (labelname, funname, insn_line (insn));
33027 else
33028 labelname = get_prev_label (funname);
33030 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
33031 instruction will reach 'foo', otherwise link as 'bl L42'".
33032 "L42" should be a 'branch island', that will do a far jump to
33033 'foo'. Branch islands are generated in
33034 macho_branch_islands(). */
33035 sprintf (buf, "jbsr %%z%d,%.246s",
33036 dest_operand_number, IDENTIFIER_POINTER (labelname));
33038 else
33039 sprintf (buf, "bl %%z%d", dest_operand_number);
33040 return buf;
33043 /* Generate PIC and indirect symbol stubs. */
33045 void
33046 machopic_output_stub (FILE *file, const char *symb, const char *stub)
33048 unsigned int length;
33049 char *symbol_name, *lazy_ptr_name;
33050 char *local_label_0;
33051 static int label = 0;
33053 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
33054 symb = (*targetm.strip_name_encoding) (symb);
33057 length = strlen (symb);
33058 symbol_name = XALLOCAVEC (char, length + 32);
33059 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
33061 lazy_ptr_name = XALLOCAVEC (char, length + 32);
33062 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
33064 if (flag_pic == 2)
33065 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
33066 else
33067 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
33069 if (flag_pic == 2)
33071 fprintf (file, "\t.align 5\n");
33073 fprintf (file, "%s:\n", stub);
33074 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33076 label++;
33077 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
33078 sprintf (local_label_0, "\"L%011d$spb\"", label);
33080 fprintf (file, "\tmflr r0\n");
33081 if (TARGET_LINK_STACK)
33083 char name[32];
33084 get_ppc476_thunk_name (name);
33085 fprintf (file, "\tbl %s\n", name);
33086 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33088 else
33090 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
33091 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33093 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
33094 lazy_ptr_name, local_label_0);
33095 fprintf (file, "\tmtlr r0\n");
33096 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
33097 (TARGET_64BIT ? "ldu" : "lwzu"),
33098 lazy_ptr_name, local_label_0);
33099 fprintf (file, "\tmtctr r12\n");
33100 fprintf (file, "\tbctr\n");
33102 else
33104 fprintf (file, "\t.align 4\n");
33106 fprintf (file, "%s:\n", stub);
33107 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33109 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
33110 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
33111 (TARGET_64BIT ? "ldu" : "lwzu"),
33112 lazy_ptr_name);
33113 fprintf (file, "\tmtctr r12\n");
33114 fprintf (file, "\tbctr\n");
33117 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
33118 fprintf (file, "%s:\n", lazy_ptr_name);
33119 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33120 fprintf (file, "%sdyld_stub_binding_helper\n",
33121 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
33124 /* Legitimize PIC addresses. If the address is already
33125 position-independent, we return ORIG. Newly generated
33126 position-independent addresses go into a reg. This is REG if non
33127 zero, otherwise we allocate register(s) as necessary. */
33129 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33132 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
33133 rtx reg)
33135 rtx base, offset;
33137 if (reg == NULL && !reload_completed)
33138 reg = gen_reg_rtx (Pmode);
33140 if (GET_CODE (orig) == CONST)
33142 rtx reg_temp;
33144 if (GET_CODE (XEXP (orig, 0)) == PLUS
33145 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
33146 return orig;
33148 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
33150 /* Use a different reg for the intermediate value, as
33151 it will be marked UNCHANGING. */
33152 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
33153 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
33154 Pmode, reg_temp);
33155 offset =
33156 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
33157 Pmode, reg);
33159 if (GET_CODE (offset) == CONST_INT)
33161 if (SMALL_INT (offset))
33162 return plus_constant (Pmode, base, INTVAL (offset));
33163 else if (!reload_completed)
33164 offset = force_reg (Pmode, offset);
33165 else
33167 rtx mem = force_const_mem (Pmode, orig);
33168 return machopic_legitimize_pic_address (mem, Pmode, reg);
33171 return gen_rtx_PLUS (Pmode, base, offset);
33174 /* Fall back on generic machopic code. */
33175 return machopic_legitimize_pic_address (orig, mode, reg);
33178 /* Output a .machine directive for the Darwin assembler, and call
33179 the generic start_file routine. */
33181 static void
33182 rs6000_darwin_file_start (void)
33184 static const struct
33186 const char *arg;
33187 const char *name;
33188 HOST_WIDE_INT if_set;
33189 } mapping[] = {
33190 { "ppc64", "ppc64", MASK_64BIT },
33191 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
33192 { "power4", "ppc970", 0 },
33193 { "G5", "ppc970", 0 },
33194 { "7450", "ppc7450", 0 },
33195 { "7400", "ppc7400", MASK_ALTIVEC },
33196 { "G4", "ppc7400", 0 },
33197 { "750", "ppc750", 0 },
33198 { "740", "ppc750", 0 },
33199 { "G3", "ppc750", 0 },
33200 { "604e", "ppc604e", 0 },
33201 { "604", "ppc604", 0 },
33202 { "603e", "ppc603", 0 },
33203 { "603", "ppc603", 0 },
33204 { "601", "ppc601", 0 },
33205 { NULL, "ppc", 0 } };
33206 const char *cpu_id = "";
33207 size_t i;
33209 rs6000_file_start ();
33210 darwin_file_start ();
33212 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33214 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
33215 cpu_id = rs6000_default_cpu;
33217 if (global_options_set.x_rs6000_cpu_index)
33218 cpu_id = processor_target_table[rs6000_cpu_index].name;
33220 /* Look through the mapping array. Pick the first name that either
33221 matches the argument, has a bit set in IF_SET that is also set
33222 in the target flags, or has a NULL name. */
33224 i = 0;
33225 while (mapping[i].arg != NULL
33226 && strcmp (mapping[i].arg, cpu_id) != 0
33227 && (mapping[i].if_set & rs6000_isa_flags) == 0)
33228 i++;
33230 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
33233 #endif /* TARGET_MACHO */
33235 #if TARGET_ELF
33236 static int
33237 rs6000_elf_reloc_rw_mask (void)
33239 if (flag_pic)
33240 return 3;
33241 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33242 return 2;
33243 else
33244 return 0;
33247 /* Record an element in the table of global constructors. SYMBOL is
33248 a SYMBOL_REF of the function to be called; PRIORITY is a number
33249 between 0 and MAX_INIT_PRIORITY.
33251 This differs from default_named_section_asm_out_constructor in
33252 that we have special handling for -mrelocatable. */
33254 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
33255 static void
33256 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
33258 const char *section = ".ctors";
33259 char buf[18];
33261 if (priority != DEFAULT_INIT_PRIORITY)
33263 sprintf (buf, ".ctors.%.5u",
33264 /* Invert the numbering so the linker puts us in the proper
33265 order; constructors are run from right to left, and the
33266 linker sorts in increasing order. */
33267 MAX_INIT_PRIORITY - priority);
33268 section = buf;
33271 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33272 assemble_align (POINTER_SIZE);
33274 if (DEFAULT_ABI == ABI_V4
33275 && (TARGET_RELOCATABLE || flag_pic > 1))
33277 fputs ("\t.long (", asm_out_file);
33278 output_addr_const (asm_out_file, symbol);
33279 fputs (")@fixup\n", asm_out_file);
33281 else
33282 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33285 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
33286 static void
33287 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
33289 const char *section = ".dtors";
33290 char buf[18];
33292 if (priority != DEFAULT_INIT_PRIORITY)
33294 sprintf (buf, ".dtors.%.5u",
33295 /* Invert the numbering so the linker puts us in the proper
33296 order; constructors are run from right to left, and the
33297 linker sorts in increasing order. */
33298 MAX_INIT_PRIORITY - priority);
33299 section = buf;
33302 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33303 assemble_align (POINTER_SIZE);
33305 if (DEFAULT_ABI == ABI_V4
33306 && (TARGET_RELOCATABLE || flag_pic > 1))
33308 fputs ("\t.long (", asm_out_file);
33309 output_addr_const (asm_out_file, symbol);
33310 fputs (")@fixup\n", asm_out_file);
33312 else
33313 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33316 void
33317 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
33319 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
33321 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
33322 ASM_OUTPUT_LABEL (file, name);
33323 fputs (DOUBLE_INT_ASM_OP, file);
33324 rs6000_output_function_entry (file, name);
33325 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
33326 if (DOT_SYMBOLS)
33328 fputs ("\t.size\t", file);
33329 assemble_name (file, name);
33330 fputs (",24\n\t.type\t.", file);
33331 assemble_name (file, name);
33332 fputs (",@function\n", file);
33333 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
33335 fputs ("\t.globl\t.", file);
33336 assemble_name (file, name);
33337 putc ('\n', file);
33340 else
33341 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33342 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33343 rs6000_output_function_entry (file, name);
33344 fputs (":\n", file);
33345 return;
33348 int uses_toc;
33349 if (DEFAULT_ABI == ABI_V4
33350 && (TARGET_RELOCATABLE || flag_pic > 1)
33351 && !TARGET_SECURE_PLT
33352 && (!constant_pool_empty_p () || crtl->profile)
33353 && (uses_toc = uses_TOC ()))
33355 char buf[256];
33357 if (uses_toc == 2)
33358 switch_to_other_text_partition ();
33359 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33361 fprintf (file, "\t.long ");
33362 assemble_name (file, toc_label_name);
33363 need_toc_init = 1;
33364 putc ('-', file);
33365 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33366 assemble_name (file, buf);
33367 putc ('\n', file);
33368 if (uses_toc == 2)
33369 switch_to_other_text_partition ();
33372 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33373 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33375 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
33377 char buf[256];
33379 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33381 fprintf (file, "\t.quad .TOC.-");
33382 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33383 assemble_name (file, buf);
33384 putc ('\n', file);
33387 if (DEFAULT_ABI == ABI_AIX)
33389 const char *desc_name, *orig_name;
33391 orig_name = (*targetm.strip_name_encoding) (name);
33392 desc_name = orig_name;
33393 while (*desc_name == '.')
33394 desc_name++;
33396 if (TREE_PUBLIC (decl))
33397 fprintf (file, "\t.globl %s\n", desc_name);
33399 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33400 fprintf (file, "%s:\n", desc_name);
33401 fprintf (file, "\t.long %s\n", orig_name);
33402 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
33403 fputs ("\t.long 0\n", file);
33404 fprintf (file, "\t.previous\n");
33406 ASM_OUTPUT_LABEL (file, name);
33409 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
33410 static void
33411 rs6000_elf_file_end (void)
33413 #ifdef HAVE_AS_GNU_ATTRIBUTE
33414 /* ??? The value emitted depends on options active at file end.
33415 Assume anyone using #pragma or attributes that might change
33416 options knows what they are doing. */
33417 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
33418 && rs6000_passes_float)
33420 int fp;
33422 if (TARGET_DF_FPR)
33423 fp = 1;
33424 else if (TARGET_SF_FPR)
33425 fp = 3;
33426 else
33427 fp = 2;
33428 if (rs6000_passes_long_double)
33430 if (!TARGET_LONG_DOUBLE_128)
33431 fp |= 2 * 4;
33432 else if (TARGET_IEEEQUAD)
33433 fp |= 3 * 4;
33434 else
33435 fp |= 1 * 4;
33437 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
33439 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33441 if (rs6000_passes_vector)
33442 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33443 (TARGET_ALTIVEC_ABI ? 2 : 1));
33444 if (rs6000_returns_struct)
33445 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33446 aix_struct_return ? 2 : 1);
33448 #endif
33449 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33450 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33451 file_end_indicate_exec_stack ();
33452 #endif
33454 if (flag_split_stack)
33455 file_end_indicate_split_stack ();
33457 if (cpu_builtin_p)
33459 /* We have expanded a CPU builtin, so we need to emit a reference to
33460 the special symbol that LIBC uses to declare it supports the
33461 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33462 switch_to_section (data_section);
33463 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33464 fprintf (asm_out_file, "\t%s %s\n",
33465 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33468 #endif
33470 #if TARGET_XCOFF
33472 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33473 #define HAVE_XCOFF_DWARF_EXTRAS 0
33474 #endif
33476 static enum unwind_info_type
33477 rs6000_xcoff_debug_unwind_info (void)
33479 return UI_NONE;
33482 static void
33483 rs6000_xcoff_asm_output_anchor (rtx symbol)
33485 char buffer[100];
33487 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33488 SYMBOL_REF_BLOCK_OFFSET (symbol));
33489 fprintf (asm_out_file, "%s", SET_ASM_OP);
33490 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33491 fprintf (asm_out_file, ",");
33492 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33493 fprintf (asm_out_file, "\n");
33496 static void
33497 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33499 fputs (GLOBAL_ASM_OP, stream);
33500 RS6000_OUTPUT_BASENAME (stream, name);
33501 putc ('\n', stream);
33504 /* A get_unnamed_decl callback, used for read-only sections. PTR
33505 points to the section string variable. */
33507 static void
33508 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33510 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33511 *(const char *const *) directive,
33512 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33515 /* Likewise for read-write sections. */
33517 static void
33518 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
33520 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
33521 *(const char *const *) directive,
33522 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33525 static void
33526 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
33528 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
33529 *(const char *const *) directive,
33530 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33533 /* A get_unnamed_section callback, used for switching to toc_section. */
33535 static void
33536 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33538 if (TARGET_MINIMAL_TOC)
33540 /* toc_section is always selected at least once from
33541 rs6000_xcoff_file_start, so this is guaranteed to
33542 always be defined once and only once in each file. */
33543 if (!toc_initialized)
33545 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
33546 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
33547 toc_initialized = 1;
33549 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
33550 (TARGET_32BIT ? "" : ",3"));
33552 else
33553 fputs ("\t.toc\n", asm_out_file);
33556 /* Implement TARGET_ASM_INIT_SECTIONS. */
33558 static void
33559 rs6000_xcoff_asm_init_sections (void)
33561 read_only_data_section
33562 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33563 &xcoff_read_only_section_name);
33565 private_data_section
33566 = get_unnamed_section (SECTION_WRITE,
33567 rs6000_xcoff_output_readwrite_section_asm_op,
33568 &xcoff_private_data_section_name);
33570 tls_data_section
33571 = get_unnamed_section (SECTION_TLS,
33572 rs6000_xcoff_output_tls_section_asm_op,
33573 &xcoff_tls_data_section_name);
33575 tls_private_data_section
33576 = get_unnamed_section (SECTION_TLS,
33577 rs6000_xcoff_output_tls_section_asm_op,
33578 &xcoff_private_data_section_name);
33580 read_only_private_data_section
33581 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33582 &xcoff_private_data_section_name);
33584 toc_section
33585 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
33587 readonly_data_section = read_only_data_section;
33590 static int
33591 rs6000_xcoff_reloc_rw_mask (void)
33593 return 3;
33596 static void
33597 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
33598 tree decl ATTRIBUTE_UNUSED)
33600 int smclass;
33601 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
33603 if (flags & SECTION_EXCLUDE)
33604 smclass = 4;
33605 else if (flags & SECTION_DEBUG)
33607 fprintf (asm_out_file, "\t.dwsect %s\n", name);
33608 return;
33610 else if (flags & SECTION_CODE)
33611 smclass = 0;
33612 else if (flags & SECTION_TLS)
33613 smclass = 3;
33614 else if (flags & SECTION_WRITE)
33615 smclass = 2;
33616 else
33617 smclass = 1;
33619 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
33620 (flags & SECTION_CODE) ? "." : "",
33621 name, suffix[smclass], flags & SECTION_ENTSIZE);
33624 #define IN_NAMED_SECTION(DECL) \
33625 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33626 && DECL_SECTION_NAME (DECL) != NULL)
33628 static section *
33629 rs6000_xcoff_select_section (tree decl, int reloc,
33630 unsigned HOST_WIDE_INT align)
33632 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33633 named section. */
33634 if (align > BIGGEST_ALIGNMENT)
33636 resolve_unique_section (decl, reloc, true);
33637 if (IN_NAMED_SECTION (decl))
33638 return get_named_section (decl, NULL, reloc);
33641 if (decl_readonly_section (decl, reloc))
33643 if (TREE_PUBLIC (decl))
33644 return read_only_data_section;
33645 else
33646 return read_only_private_data_section;
33648 else
33650 #if HAVE_AS_TLS
33651 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33653 if (TREE_PUBLIC (decl))
33654 return tls_data_section;
33655 else if (bss_initializer_p (decl))
33657 /* Convert to COMMON to emit in BSS. */
33658 DECL_COMMON (decl) = 1;
33659 return tls_comm_section;
33661 else
33662 return tls_private_data_section;
33664 else
33665 #endif
33666 if (TREE_PUBLIC (decl))
33667 return data_section;
33668 else
33669 return private_data_section;
33673 static void
33674 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
33676 const char *name;
33678 /* Use select_section for private data and uninitialized data with
33679 alignment <= BIGGEST_ALIGNMENT. */
33680 if (!TREE_PUBLIC (decl)
33681 || DECL_COMMON (decl)
33682 || (DECL_INITIAL (decl) == NULL_TREE
33683 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
33684 || DECL_INITIAL (decl) == error_mark_node
33685 || (flag_zero_initialized_in_bss
33686 && initializer_zerop (DECL_INITIAL (decl))))
33687 return;
33689 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
33690 name = (*targetm.strip_name_encoding) (name);
33691 set_decl_section_name (decl, name);
33694 /* Select section for constant in constant pool.
33696 On RS/6000, all constants are in the private read-only data area.
33697 However, if this is being placed in the TOC it must be output as a
33698 toc entry. */
33700 static section *
33701 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
33702 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
33704 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33705 return toc_section;
33706 else
33707 return read_only_private_data_section;
33710 /* Remove any trailing [DS] or the like from the symbol name. */
33712 static const char *
33713 rs6000_xcoff_strip_name_encoding (const char *name)
33715 size_t len;
33716 if (*name == '*')
33717 name++;
33718 len = strlen (name);
33719 if (name[len - 1] == ']')
33720 return ggc_alloc_string (name, len - 4);
33721 else
33722 return name;
33725 /* Section attributes. AIX is always PIC. */
33727 static unsigned int
33728 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
33730 unsigned int align;
33731 unsigned int flags = default_section_type_flags (decl, name, reloc);
33733 /* Align to at least UNIT size. */
33734 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
33735 align = MIN_UNITS_PER_WORD;
33736 else
33737 /* Increase alignment of large objects if not already stricter. */
33738 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
33739 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
33740 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
33742 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
33745 /* Output at beginning of assembler file.
33747 Initialize the section names for the RS/6000 at this point.
33749 Specify filename, including full path, to assembler.
33751 We want to go into the TOC section so at least one .toc will be emitted.
33752 Also, in order to output proper .bs/.es pairs, we need at least one static
33753 [RW] section emitted.
33755 Finally, declare mcount when profiling to make the assembler happy. */
33757 static void
33758 rs6000_xcoff_file_start (void)
33760 rs6000_gen_section_name (&xcoff_bss_section_name,
33761 main_input_filename, ".bss_");
33762 rs6000_gen_section_name (&xcoff_private_data_section_name,
33763 main_input_filename, ".rw_");
33764 rs6000_gen_section_name (&xcoff_read_only_section_name,
33765 main_input_filename, ".ro_");
33766 rs6000_gen_section_name (&xcoff_tls_data_section_name,
33767 main_input_filename, ".tls_");
33768 rs6000_gen_section_name (&xcoff_tbss_section_name,
33769 main_input_filename, ".tbss_[UL]");
33771 fputs ("\t.file\t", asm_out_file);
33772 output_quoted_string (asm_out_file, main_input_filename);
33773 fputc ('\n', asm_out_file);
33774 if (write_symbols != NO_DEBUG)
33775 switch_to_section (private_data_section);
33776 switch_to_section (toc_section);
33777 switch_to_section (text_section);
33778 if (profile_flag)
33779 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
33780 rs6000_file_start ();
33783 /* Output at end of assembler file.
33784 On the RS/6000, referencing data should automatically pull in text. */
33786 static void
33787 rs6000_xcoff_file_end (void)
33789 switch_to_section (text_section);
33790 fputs ("_section_.text:\n", asm_out_file);
33791 switch_to_section (data_section);
33792 fputs (TARGET_32BIT
33793 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
33794 asm_out_file);
33797 struct declare_alias_data
33799 FILE *file;
33800 bool function_descriptor;
33803 /* Declare alias N. A helper function for for_node_and_aliases. */
33805 static bool
33806 rs6000_declare_alias (struct symtab_node *n, void *d)
33808 struct declare_alias_data *data = (struct declare_alias_data *)d;
33809 /* Main symbol is output specially, because varasm machinery does part of
33810 the job for us - we do not need to declare .globl/lglobs and such. */
33811 if (!n->alias || n->weakref)
33812 return false;
33814 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
33815 return false;
33817 /* Prevent assemble_alias from trying to use .set pseudo operation
33818 that does not behave as expected by the middle-end. */
33819 TREE_ASM_WRITTEN (n->decl) = true;
33821 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
33822 char *buffer = (char *) alloca (strlen (name) + 2);
33823 char *p;
33824 int dollar_inside = 0;
33826 strcpy (buffer, name);
33827 p = strchr (buffer, '$');
33828 while (p) {
33829 *p = '_';
33830 dollar_inside++;
33831 p = strchr (p + 1, '$');
33833 if (TREE_PUBLIC (n->decl))
33835 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
33837 if (dollar_inside) {
33838 if (data->function_descriptor)
33839 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33840 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33842 if (data->function_descriptor)
33844 fputs ("\t.globl .", data->file);
33845 RS6000_OUTPUT_BASENAME (data->file, buffer);
33846 putc ('\n', data->file);
33848 fputs ("\t.globl ", data->file);
33849 RS6000_OUTPUT_BASENAME (data->file, buffer);
33850 putc ('\n', data->file);
33852 #ifdef ASM_WEAKEN_DECL
33853 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
33854 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
33855 #endif
33857 else
33859 if (dollar_inside)
33861 if (data->function_descriptor)
33862 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33863 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33865 if (data->function_descriptor)
33867 fputs ("\t.lglobl .", data->file);
33868 RS6000_OUTPUT_BASENAME (data->file, buffer);
33869 putc ('\n', data->file);
33871 fputs ("\t.lglobl ", data->file);
33872 RS6000_OUTPUT_BASENAME (data->file, buffer);
33873 putc ('\n', data->file);
33875 if (data->function_descriptor)
33876 fputs (".", data->file);
33877 RS6000_OUTPUT_BASENAME (data->file, buffer);
33878 fputs (":\n", data->file);
33879 return false;
33883 #ifdef HAVE_GAS_HIDDEN
33884 /* Helper function to calculate visibility of a DECL
33885 and return the value as a const string. */
33887 static const char *
33888 rs6000_xcoff_visibility (tree decl)
33890 static const char * const visibility_types[] = {
33891 "", ",protected", ",hidden", ",internal"
33894 enum symbol_visibility vis = DECL_VISIBILITY (decl);
33896 if (TREE_CODE (decl) == FUNCTION_DECL
33897 && cgraph_node::get (decl)
33898 && cgraph_node::get (decl)->instrumentation_clone
33899 && cgraph_node::get (decl)->instrumented_version)
33900 vis = DECL_VISIBILITY (cgraph_node::get (decl)->instrumented_version->decl);
33902 return visibility_types[vis];
33904 #endif
33907 /* This macro produces the initial definition of a function name.
33908 On the RS/6000, we need to place an extra '.' in the function name and
33909 output the function descriptor.
33910 Dollar signs are converted to underscores.
33912 The csect for the function will have already been created when
33913 text_section was selected. We do have to go back to that csect, however.
33915 The third and fourth parameters to the .function pseudo-op (16 and 044)
33916 are placeholders which no longer have any use.
33918 Because AIX assembler's .set command has unexpected semantics, we output
33919 all aliases as alternative labels in front of the definition. */
33921 void
33922 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
33924 char *buffer = (char *) alloca (strlen (name) + 1);
33925 char *p;
33926 int dollar_inside = 0;
33927 struct declare_alias_data data = {file, false};
33929 strcpy (buffer, name);
33930 p = strchr (buffer, '$');
33931 while (p) {
33932 *p = '_';
33933 dollar_inside++;
33934 p = strchr (p + 1, '$');
33936 if (TREE_PUBLIC (decl))
33938 if (!RS6000_WEAK || !DECL_WEAK (decl))
33940 if (dollar_inside) {
33941 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33942 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33944 fputs ("\t.globl .", file);
33945 RS6000_OUTPUT_BASENAME (file, buffer);
33946 #ifdef HAVE_GAS_HIDDEN
33947 fputs (rs6000_xcoff_visibility (decl), file);
33948 #endif
33949 putc ('\n', file);
33952 else
33954 if (dollar_inside) {
33955 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33956 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33958 fputs ("\t.lglobl .", file);
33959 RS6000_OUTPUT_BASENAME (file, buffer);
33960 putc ('\n', file);
33962 fputs ("\t.csect ", file);
33963 RS6000_OUTPUT_BASENAME (file, buffer);
33964 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
33965 RS6000_OUTPUT_BASENAME (file, buffer);
33966 fputs (":\n", file);
33967 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33968 &data, true);
33969 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
33970 RS6000_OUTPUT_BASENAME (file, buffer);
33971 fputs (", TOC[tc0], 0\n", file);
33972 in_section = NULL;
33973 switch_to_section (function_section (decl));
33974 putc ('.', file);
33975 RS6000_OUTPUT_BASENAME (file, buffer);
33976 fputs (":\n", file);
33977 data.function_descriptor = true;
33978 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33979 &data, true);
33980 if (!DECL_IGNORED_P (decl))
33982 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33983 xcoffout_declare_function (file, decl, buffer);
33984 else if (write_symbols == DWARF2_DEBUG)
33986 name = (*targetm.strip_name_encoding) (name);
33987 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
33990 return;
33994 /* Output assembly language to globalize a symbol from a DECL,
33995 possibly with visibility. */
33997 void
33998 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
34000 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
34001 fputs (GLOBAL_ASM_OP, stream);
34002 RS6000_OUTPUT_BASENAME (stream, name);
34003 #ifdef HAVE_GAS_HIDDEN
34004 fputs (rs6000_xcoff_visibility (decl), stream);
34005 #endif
34006 putc ('\n', stream);
34009 /* Output assembly language to define a symbol as COMMON from a DECL,
34010 possibly with visibility. */
34012 void
34013 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
34014 tree decl ATTRIBUTE_UNUSED,
34015 const char *name,
34016 unsigned HOST_WIDE_INT size,
34017 unsigned HOST_WIDE_INT align)
34019 unsigned HOST_WIDE_INT align2 = 2;
34021 if (align > 32)
34022 align2 = floor_log2 (align / BITS_PER_UNIT);
34023 else if (size > 4)
34024 align2 = 3;
34026 fputs (COMMON_ASM_OP, stream);
34027 RS6000_OUTPUT_BASENAME (stream, name);
34029 fprintf (stream,
34030 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
34031 size, align2);
34033 #ifdef HAVE_GAS_HIDDEN
34034 fputs (rs6000_xcoff_visibility (decl), stream);
34035 #endif
34036 putc ('\n', stream);
34039 /* This macro produces the initial definition of a object (variable) name.
34040 Because AIX assembler's .set command has unexpected semantics, we output
34041 all aliases as alternative labels in front of the definition. */
34043 void
34044 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
34046 struct declare_alias_data data = {file, false};
34047 RS6000_OUTPUT_BASENAME (file, name);
34048 fputs (":\n", file);
34049 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34050 &data, true);
34053 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
34055 void
34056 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
34058 fputs (integer_asm_op (size, FALSE), file);
34059 assemble_name (file, label);
34060 fputs ("-$", file);
34063 /* Output a symbol offset relative to the dbase for the current object.
34064 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
34065 signed offsets.
34067 __gcc_unwind_dbase is embedded in all executables/libraries through
34068 libgcc/config/rs6000/crtdbase.S. */
34070 void
34071 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
34073 fputs (integer_asm_op (size, FALSE), file);
34074 assemble_name (file, label);
34075 fputs("-__gcc_unwind_dbase", file);
34078 #ifdef HAVE_AS_TLS
34079 static void
34080 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
34082 rtx symbol;
34083 int flags;
34084 const char *symname;
34086 default_encode_section_info (decl, rtl, first);
34088 /* Careful not to prod global register variables. */
34089 if (!MEM_P (rtl))
34090 return;
34091 symbol = XEXP (rtl, 0);
34092 if (GET_CODE (symbol) != SYMBOL_REF)
34093 return;
34095 flags = SYMBOL_REF_FLAGS (symbol);
34097 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
34098 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
34100 SYMBOL_REF_FLAGS (symbol) = flags;
34102 /* Append mapping class to extern decls. */
34103 symname = XSTR (symbol, 0);
34104 if (decl /* sync condition with assemble_external () */
34105 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
34106 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
34107 || TREE_CODE (decl) == FUNCTION_DECL)
34108 && symname[strlen (symname) - 1] != ']')
34110 char *newname = (char *) alloca (strlen (symname) + 5);
34111 strcpy (newname, symname);
34112 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
34113 ? "[DS]" : "[UA]"));
34114 XSTR (symbol, 0) = ggc_strdup (newname);
34117 #endif /* HAVE_AS_TLS */
34118 #endif /* TARGET_XCOFF */
34120 void
34121 rs6000_asm_weaken_decl (FILE *stream, tree decl,
34122 const char *name, const char *val)
34124 fputs ("\t.weak\t", stream);
34125 RS6000_OUTPUT_BASENAME (stream, name);
34126 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34127 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34129 if (TARGET_XCOFF)
34130 fputs ("[DS]", stream);
34131 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34132 if (TARGET_XCOFF)
34133 fputs (rs6000_xcoff_visibility (decl), stream);
34134 #endif
34135 fputs ("\n\t.weak\t.", stream);
34136 RS6000_OUTPUT_BASENAME (stream, name);
34138 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34139 if (TARGET_XCOFF)
34140 fputs (rs6000_xcoff_visibility (decl), stream);
34141 #endif
34142 fputc ('\n', stream);
34143 if (val)
34145 #ifdef ASM_OUTPUT_DEF
34146 ASM_OUTPUT_DEF (stream, name, val);
34147 #endif
34148 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34149 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34151 fputs ("\t.set\t.", stream);
34152 RS6000_OUTPUT_BASENAME (stream, name);
34153 fputs (",.", stream);
34154 RS6000_OUTPUT_BASENAME (stream, val);
34155 fputc ('\n', stream);
34161 /* Return true if INSN should not be copied. */
34163 static bool
34164 rs6000_cannot_copy_insn_p (rtx_insn *insn)
34166 return recog_memoized (insn) >= 0
34167 && get_attr_cannot_copy (insn);
34170 /* Compute a (partial) cost for rtx X. Return true if the complete
34171 cost has been computed, and false if subexpressions should be
34172 scanned. In either case, *TOTAL contains the cost result. */
34174 static bool
34175 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
34176 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
34178 int code = GET_CODE (x);
34180 switch (code)
34182 /* On the RS/6000, if it is valid in the insn, it is free. */
34183 case CONST_INT:
34184 if (((outer_code == SET
34185 || outer_code == PLUS
34186 || outer_code == MINUS)
34187 && (satisfies_constraint_I (x)
34188 || satisfies_constraint_L (x)))
34189 || (outer_code == AND
34190 && (satisfies_constraint_K (x)
34191 || (mode == SImode
34192 ? satisfies_constraint_L (x)
34193 : satisfies_constraint_J (x))))
34194 || ((outer_code == IOR || outer_code == XOR)
34195 && (satisfies_constraint_K (x)
34196 || (mode == SImode
34197 ? satisfies_constraint_L (x)
34198 : satisfies_constraint_J (x))))
34199 || outer_code == ASHIFT
34200 || outer_code == ASHIFTRT
34201 || outer_code == LSHIFTRT
34202 || outer_code == ROTATE
34203 || outer_code == ROTATERT
34204 || outer_code == ZERO_EXTRACT
34205 || (outer_code == MULT
34206 && satisfies_constraint_I (x))
34207 || ((outer_code == DIV || outer_code == UDIV
34208 || outer_code == MOD || outer_code == UMOD)
34209 && exact_log2 (INTVAL (x)) >= 0)
34210 || (outer_code == COMPARE
34211 && (satisfies_constraint_I (x)
34212 || satisfies_constraint_K (x)))
34213 || ((outer_code == EQ || outer_code == NE)
34214 && (satisfies_constraint_I (x)
34215 || satisfies_constraint_K (x)
34216 || (mode == SImode
34217 ? satisfies_constraint_L (x)
34218 : satisfies_constraint_J (x))))
34219 || (outer_code == GTU
34220 && satisfies_constraint_I (x))
34221 || (outer_code == LTU
34222 && satisfies_constraint_P (x)))
34224 *total = 0;
34225 return true;
34227 else if ((outer_code == PLUS
34228 && reg_or_add_cint_operand (x, VOIDmode))
34229 || (outer_code == MINUS
34230 && reg_or_sub_cint_operand (x, VOIDmode))
34231 || ((outer_code == SET
34232 || outer_code == IOR
34233 || outer_code == XOR)
34234 && (INTVAL (x)
34235 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
34237 *total = COSTS_N_INSNS (1);
34238 return true;
34240 /* FALLTHRU */
34242 case CONST_DOUBLE:
34243 case CONST_WIDE_INT:
34244 case CONST:
34245 case HIGH:
34246 case SYMBOL_REF:
34247 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34248 return true;
34250 case MEM:
34251 /* When optimizing for size, MEM should be slightly more expensive
34252 than generating address, e.g., (plus (reg) (const)).
34253 L1 cache latency is about two instructions. */
34254 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34255 if (SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (x)))
34256 *total += COSTS_N_INSNS (100);
34257 return true;
34259 case LABEL_REF:
34260 *total = 0;
34261 return true;
34263 case PLUS:
34264 case MINUS:
34265 if (FLOAT_MODE_P (mode))
34266 *total = rs6000_cost->fp;
34267 else
34268 *total = COSTS_N_INSNS (1);
34269 return false;
34271 case MULT:
34272 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34273 && satisfies_constraint_I (XEXP (x, 1)))
34275 if (INTVAL (XEXP (x, 1)) >= -256
34276 && INTVAL (XEXP (x, 1)) <= 255)
34277 *total = rs6000_cost->mulsi_const9;
34278 else
34279 *total = rs6000_cost->mulsi_const;
34281 else if (mode == SFmode)
34282 *total = rs6000_cost->fp;
34283 else if (FLOAT_MODE_P (mode))
34284 *total = rs6000_cost->dmul;
34285 else if (mode == DImode)
34286 *total = rs6000_cost->muldi;
34287 else
34288 *total = rs6000_cost->mulsi;
34289 return false;
34291 case FMA:
34292 if (mode == SFmode)
34293 *total = rs6000_cost->fp;
34294 else
34295 *total = rs6000_cost->dmul;
34296 break;
34298 case DIV:
34299 case MOD:
34300 if (FLOAT_MODE_P (mode))
34302 *total = mode == DFmode ? rs6000_cost->ddiv
34303 : rs6000_cost->sdiv;
34304 return false;
34306 /* FALLTHRU */
34308 case UDIV:
34309 case UMOD:
34310 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34311 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
34313 if (code == DIV || code == MOD)
34314 /* Shift, addze */
34315 *total = COSTS_N_INSNS (2);
34316 else
34317 /* Shift */
34318 *total = COSTS_N_INSNS (1);
34320 else
34322 if (GET_MODE (XEXP (x, 1)) == DImode)
34323 *total = rs6000_cost->divdi;
34324 else
34325 *total = rs6000_cost->divsi;
34327 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34328 if (!TARGET_MODULO && (code == MOD || code == UMOD))
34329 *total += COSTS_N_INSNS (2);
34330 return false;
34332 case CTZ:
34333 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
34334 return false;
34336 case FFS:
34337 *total = COSTS_N_INSNS (4);
34338 return false;
34340 case POPCOUNT:
34341 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
34342 return false;
34344 case PARITY:
34345 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
34346 return false;
34348 case NOT:
34349 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
34350 *total = 0;
34351 else
34352 *total = COSTS_N_INSNS (1);
34353 return false;
34355 case AND:
34356 if (CONST_INT_P (XEXP (x, 1)))
34358 rtx left = XEXP (x, 0);
34359 rtx_code left_code = GET_CODE (left);
34361 /* rotate-and-mask: 1 insn. */
34362 if ((left_code == ROTATE
34363 || left_code == ASHIFT
34364 || left_code == LSHIFTRT)
34365 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
34367 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
34368 if (!CONST_INT_P (XEXP (left, 1)))
34369 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
34370 *total += COSTS_N_INSNS (1);
34371 return true;
34374 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34375 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
34376 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
34377 || (val & 0xffff) == val
34378 || (val & 0xffff0000) == val
34379 || ((val & 0xffff) == 0 && mode == SImode))
34381 *total = rtx_cost (left, mode, AND, 0, speed);
34382 *total += COSTS_N_INSNS (1);
34383 return true;
34386 /* 2 insns. */
34387 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
34389 *total = rtx_cost (left, mode, AND, 0, speed);
34390 *total += COSTS_N_INSNS (2);
34391 return true;
34395 *total = COSTS_N_INSNS (1);
34396 return false;
34398 case IOR:
34399 /* FIXME */
34400 *total = COSTS_N_INSNS (1);
34401 return true;
34403 case CLZ:
34404 case XOR:
34405 case ZERO_EXTRACT:
34406 *total = COSTS_N_INSNS (1);
34407 return false;
34409 case ASHIFT:
34410 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34411 the sign extend and shift separately within the insn. */
34412 if (TARGET_EXTSWSLI && mode == DImode
34413 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
34414 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
34416 *total = 0;
34417 return false;
34419 /* fall through */
34421 case ASHIFTRT:
34422 case LSHIFTRT:
34423 case ROTATE:
34424 case ROTATERT:
34425 /* Handle mul_highpart. */
34426 if (outer_code == TRUNCATE
34427 && GET_CODE (XEXP (x, 0)) == MULT)
34429 if (mode == DImode)
34430 *total = rs6000_cost->muldi;
34431 else
34432 *total = rs6000_cost->mulsi;
34433 return true;
34435 else if (outer_code == AND)
34436 *total = 0;
34437 else
34438 *total = COSTS_N_INSNS (1);
34439 return false;
34441 case SIGN_EXTEND:
34442 case ZERO_EXTEND:
34443 if (GET_CODE (XEXP (x, 0)) == MEM)
34444 *total = 0;
34445 else
34446 *total = COSTS_N_INSNS (1);
34447 return false;
34449 case COMPARE:
34450 case NEG:
34451 case ABS:
34452 if (!FLOAT_MODE_P (mode))
34454 *total = COSTS_N_INSNS (1);
34455 return false;
34457 /* FALLTHRU */
34459 case FLOAT:
34460 case UNSIGNED_FLOAT:
34461 case FIX:
34462 case UNSIGNED_FIX:
34463 case FLOAT_TRUNCATE:
34464 *total = rs6000_cost->fp;
34465 return false;
34467 case FLOAT_EXTEND:
34468 if (mode == DFmode)
34469 *total = rs6000_cost->sfdf_convert;
34470 else
34471 *total = rs6000_cost->fp;
34472 return false;
34474 case UNSPEC:
34475 switch (XINT (x, 1))
34477 case UNSPEC_FRSP:
34478 *total = rs6000_cost->fp;
34479 return true;
34481 default:
34482 break;
34484 break;
34486 case CALL:
34487 case IF_THEN_ELSE:
34488 if (!speed)
34490 *total = COSTS_N_INSNS (1);
34491 return true;
34493 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
34495 *total = rs6000_cost->fp;
34496 return false;
34498 break;
34500 case NE:
34501 case EQ:
34502 case GTU:
34503 case LTU:
34504 /* Carry bit requires mode == Pmode.
34505 NEG or PLUS already counted so only add one. */
34506 if (mode == Pmode
34507 && (outer_code == NEG || outer_code == PLUS))
34509 *total = COSTS_N_INSNS (1);
34510 return true;
34512 if (outer_code == SET)
34514 if (XEXP (x, 1) == const0_rtx)
34516 if (TARGET_ISEL && !TARGET_MFCRF)
34517 *total = COSTS_N_INSNS (8);
34518 else
34519 *total = COSTS_N_INSNS (2);
34520 return true;
34522 else
34524 *total = COSTS_N_INSNS (3);
34525 return false;
34528 /* FALLTHRU */
34530 case GT:
34531 case LT:
34532 case UNORDERED:
34533 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
34535 if (TARGET_ISEL && !TARGET_MFCRF)
34536 *total = COSTS_N_INSNS (8);
34537 else
34538 *total = COSTS_N_INSNS (2);
34539 return true;
34541 /* CC COMPARE. */
34542 if (outer_code == COMPARE)
34544 *total = 0;
34545 return true;
34547 break;
34549 default:
34550 break;
34553 return false;
34556 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34558 static bool
34559 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
34560 int opno, int *total, bool speed)
34562 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
34564 fprintf (stderr,
34565 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34566 "opno = %d, total = %d, speed = %s, x:\n",
34567 ret ? "complete" : "scan inner",
34568 GET_MODE_NAME (mode),
34569 GET_RTX_NAME (outer_code),
34570 opno,
34571 *total,
34572 speed ? "true" : "false");
34574 debug_rtx (x);
34576 return ret;
34579 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
34581 static int
34582 rs6000_debug_address_cost (rtx x, machine_mode mode,
34583 addr_space_t as, bool speed)
34585 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
34587 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
34588 ret, speed ? "true" : "false");
34589 debug_rtx (x);
34591 return ret;
34595 /* A C expression returning the cost of moving data from a register of class
34596 CLASS1 to one of CLASS2. */
34598 static int
34599 rs6000_register_move_cost (machine_mode mode,
34600 reg_class_t from, reg_class_t to)
34602 int ret;
34604 if (TARGET_DEBUG_COST)
34605 dbg_cost_ctrl++;
34607 /* Moves from/to GENERAL_REGS. */
34608 if (reg_classes_intersect_p (to, GENERAL_REGS)
34609 || reg_classes_intersect_p (from, GENERAL_REGS))
34611 reg_class_t rclass = from;
34613 if (! reg_classes_intersect_p (to, GENERAL_REGS))
34614 rclass = to;
34616 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
34617 ret = (rs6000_memory_move_cost (mode, rclass, false)
34618 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
34620 /* It's more expensive to move CR_REGS than CR0_REGS because of the
34621 shift. */
34622 else if (rclass == CR_REGS)
34623 ret = 4;
34625 /* For those processors that have slow LR/CTR moves, make them more
34626 expensive than memory in order to bias spills to memory .*/
34627 else if ((rs6000_cpu == PROCESSOR_POWER6
34628 || rs6000_cpu == PROCESSOR_POWER7
34629 || rs6000_cpu == PROCESSOR_POWER8
34630 || rs6000_cpu == PROCESSOR_POWER9)
34631 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
34632 ret = 6 * hard_regno_nregs[0][mode];
34634 else
34635 /* A move will cost one instruction per GPR moved. */
34636 ret = 2 * hard_regno_nregs[0][mode];
34639 /* If we have VSX, we can easily move between FPR or Altivec registers. */
34640 else if (VECTOR_MEM_VSX_P (mode)
34641 && reg_classes_intersect_p (to, VSX_REGS)
34642 && reg_classes_intersect_p (from, VSX_REGS))
34643 ret = 2 * hard_regno_nregs[FIRST_FPR_REGNO][mode];
34645 /* Moving between two similar registers is just one instruction. */
34646 else if (reg_classes_intersect_p (to, from))
34647 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
34649 /* Everything else has to go through GENERAL_REGS. */
34650 else
34651 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
34652 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
34654 if (TARGET_DEBUG_COST)
34656 if (dbg_cost_ctrl == 1)
34657 fprintf (stderr,
34658 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
34659 ret, GET_MODE_NAME (mode), reg_class_names[from],
34660 reg_class_names[to]);
34661 dbg_cost_ctrl--;
34664 return ret;
34667 /* A C expressions returning the cost of moving data of MODE from a register to
34668 or from memory. */
34670 static int
34671 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
34672 bool in ATTRIBUTE_UNUSED)
34674 int ret;
34676 if (TARGET_DEBUG_COST)
34677 dbg_cost_ctrl++;
34679 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
34680 ret = 4 * hard_regno_nregs[0][mode];
34681 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
34682 || reg_classes_intersect_p (rclass, VSX_REGS)))
34683 ret = 4 * hard_regno_nregs[32][mode];
34684 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
34685 ret = 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
34686 else
34687 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
34689 if (TARGET_DEBUG_COST)
34691 if (dbg_cost_ctrl == 1)
34692 fprintf (stderr,
34693 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
34694 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
34695 dbg_cost_ctrl--;
34698 return ret;
34701 /* Returns a code for a target-specific builtin that implements
34702 reciprocal of the function, or NULL_TREE if not available. */
34704 static tree
34705 rs6000_builtin_reciprocal (tree fndecl)
34707 switch (DECL_FUNCTION_CODE (fndecl))
34709 case VSX_BUILTIN_XVSQRTDP:
34710 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
34711 return NULL_TREE;
34713 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
34715 case VSX_BUILTIN_XVSQRTSP:
34716 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
34717 return NULL_TREE;
34719 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
34721 default:
34722 return NULL_TREE;
34726 /* Load up a constant. If the mode is a vector mode, splat the value across
34727 all of the vector elements. */
34729 static rtx
34730 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
34732 rtx reg;
34734 if (mode == SFmode || mode == DFmode)
34736 rtx d = const_double_from_real_value (dconst, mode);
34737 reg = force_reg (mode, d);
34739 else if (mode == V4SFmode)
34741 rtx d = const_double_from_real_value (dconst, SFmode);
34742 rtvec v = gen_rtvec (4, d, d, d, d);
34743 reg = gen_reg_rtx (mode);
34744 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34746 else if (mode == V2DFmode)
34748 rtx d = const_double_from_real_value (dconst, DFmode);
34749 rtvec v = gen_rtvec (2, d, d);
34750 reg = gen_reg_rtx (mode);
34751 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34753 else
34754 gcc_unreachable ();
34756 return reg;
34759 /* Generate an FMA instruction. */
34761 static void
34762 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
34764 machine_mode mode = GET_MODE (target);
34765 rtx dst;
34767 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
34768 gcc_assert (dst != NULL);
34770 if (dst != target)
34771 emit_move_insn (target, dst);
34774 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
34776 static void
34777 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
34779 machine_mode mode = GET_MODE (dst);
34780 rtx r;
34782 /* This is a tad more complicated, since the fnma_optab is for
34783 a different expression: fma(-m1, m2, a), which is the same
34784 thing except in the case of signed zeros.
34786 Fortunately we know that if FMA is supported that FNMSUB is
34787 also supported in the ISA. Just expand it directly. */
34789 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
34791 r = gen_rtx_NEG (mode, a);
34792 r = gen_rtx_FMA (mode, m1, m2, r);
34793 r = gen_rtx_NEG (mode, r);
34794 emit_insn (gen_rtx_SET (dst, r));
34797 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
34798 add a reg_note saying that this was a division. Support both scalar and
34799 vector divide. Assumes no trapping math and finite arguments. */
34801 void
34802 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
34804 machine_mode mode = GET_MODE (dst);
34805 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
34806 int i;
34808 /* Low precision estimates guarantee 5 bits of accuracy. High
34809 precision estimates guarantee 14 bits of accuracy. SFmode
34810 requires 23 bits of accuracy. DFmode requires 52 bits of
34811 accuracy. Each pass at least doubles the accuracy, leading
34812 to the following. */
34813 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34814 if (mode == DFmode || mode == V2DFmode)
34815 passes++;
34817 enum insn_code code = optab_handler (smul_optab, mode);
34818 insn_gen_fn gen_mul = GEN_FCN (code);
34820 gcc_assert (code != CODE_FOR_nothing);
34822 one = rs6000_load_constant_and_splat (mode, dconst1);
34824 /* x0 = 1./d estimate */
34825 x0 = gen_reg_rtx (mode);
34826 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
34827 UNSPEC_FRES)));
34829 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
34830 if (passes > 1) {
34832 /* e0 = 1. - d * x0 */
34833 e0 = gen_reg_rtx (mode);
34834 rs6000_emit_nmsub (e0, d, x0, one);
34836 /* x1 = x0 + e0 * x0 */
34837 x1 = gen_reg_rtx (mode);
34838 rs6000_emit_madd (x1, e0, x0, x0);
34840 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
34841 ++i, xprev = xnext, eprev = enext) {
34843 /* enext = eprev * eprev */
34844 enext = gen_reg_rtx (mode);
34845 emit_insn (gen_mul (enext, eprev, eprev));
34847 /* xnext = xprev + enext * xprev */
34848 xnext = gen_reg_rtx (mode);
34849 rs6000_emit_madd (xnext, enext, xprev, xprev);
34852 } else
34853 xprev = x0;
34855 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
34857 /* u = n * xprev */
34858 u = gen_reg_rtx (mode);
34859 emit_insn (gen_mul (u, n, xprev));
34861 /* v = n - (d * u) */
34862 v = gen_reg_rtx (mode);
34863 rs6000_emit_nmsub (v, d, u, n);
34865 /* dst = (v * xprev) + u */
34866 rs6000_emit_madd (dst, v, xprev, u);
34868 if (note_p)
34869 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
34872 /* Goldschmidt's Algorithm for single/double-precision floating point
34873 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
34875 void
34876 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
34878 machine_mode mode = GET_MODE (src);
34879 rtx e = gen_reg_rtx (mode);
34880 rtx g = gen_reg_rtx (mode);
34881 rtx h = gen_reg_rtx (mode);
34883 /* Low precision estimates guarantee 5 bits of accuracy. High
34884 precision estimates guarantee 14 bits of accuracy. SFmode
34885 requires 23 bits of accuracy. DFmode requires 52 bits of
34886 accuracy. Each pass at least doubles the accuracy, leading
34887 to the following. */
34888 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34889 if (mode == DFmode || mode == V2DFmode)
34890 passes++;
34892 int i;
34893 rtx mhalf;
34894 enum insn_code code = optab_handler (smul_optab, mode);
34895 insn_gen_fn gen_mul = GEN_FCN (code);
34897 gcc_assert (code != CODE_FOR_nothing);
34899 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
34901 /* e = rsqrt estimate */
34902 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
34903 UNSPEC_RSQRT)));
34905 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
34906 if (!recip)
34908 rtx zero = force_reg (mode, CONST0_RTX (mode));
34910 if (mode == SFmode)
34912 rtx target = emit_conditional_move (e, GT, src, zero, mode,
34913 e, zero, mode, 0);
34914 if (target != e)
34915 emit_move_insn (e, target);
34917 else
34919 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
34920 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
34924 /* g = sqrt estimate. */
34925 emit_insn (gen_mul (g, e, src));
34926 /* h = 1/(2*sqrt) estimate. */
34927 emit_insn (gen_mul (h, e, mhalf));
34929 if (recip)
34931 if (passes == 1)
34933 rtx t = gen_reg_rtx (mode);
34934 rs6000_emit_nmsub (t, g, h, mhalf);
34935 /* Apply correction directly to 1/rsqrt estimate. */
34936 rs6000_emit_madd (dst, e, t, e);
34938 else
34940 for (i = 0; i < passes; i++)
34942 rtx t1 = gen_reg_rtx (mode);
34943 rtx g1 = gen_reg_rtx (mode);
34944 rtx h1 = gen_reg_rtx (mode);
34946 rs6000_emit_nmsub (t1, g, h, mhalf);
34947 rs6000_emit_madd (g1, g, t1, g);
34948 rs6000_emit_madd (h1, h, t1, h);
34950 g = g1;
34951 h = h1;
34953 /* Multiply by 2 for 1/rsqrt. */
34954 emit_insn (gen_add3_insn (dst, h, h));
34957 else
34959 rtx t = gen_reg_rtx (mode);
34960 rs6000_emit_nmsub (t, g, h, mhalf);
34961 rs6000_emit_madd (dst, g, t, g);
34964 return;
34967 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
34968 (Power7) targets. DST is the target, and SRC is the argument operand. */
34970 void
34971 rs6000_emit_popcount (rtx dst, rtx src)
34973 machine_mode mode = GET_MODE (dst);
34974 rtx tmp1, tmp2;
34976 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
34977 if (TARGET_POPCNTD)
34979 if (mode == SImode)
34980 emit_insn (gen_popcntdsi2 (dst, src));
34981 else
34982 emit_insn (gen_popcntddi2 (dst, src));
34983 return;
34986 tmp1 = gen_reg_rtx (mode);
34988 if (mode == SImode)
34990 emit_insn (gen_popcntbsi2 (tmp1, src));
34991 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
34992 NULL_RTX, 0);
34993 tmp2 = force_reg (SImode, tmp2);
34994 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
34996 else
34998 emit_insn (gen_popcntbdi2 (tmp1, src));
34999 tmp2 = expand_mult (DImode, tmp1,
35000 GEN_INT ((HOST_WIDE_INT)
35001 0x01010101 << 32 | 0x01010101),
35002 NULL_RTX, 0);
35003 tmp2 = force_reg (DImode, tmp2);
35004 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
35009 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
35010 target, and SRC is the argument operand. */
35012 void
35013 rs6000_emit_parity (rtx dst, rtx src)
35015 machine_mode mode = GET_MODE (dst);
35016 rtx tmp;
35018 tmp = gen_reg_rtx (mode);
35020 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
35021 if (TARGET_CMPB)
35023 if (mode == SImode)
35025 emit_insn (gen_popcntbsi2 (tmp, src));
35026 emit_insn (gen_paritysi2_cmpb (dst, tmp));
35028 else
35030 emit_insn (gen_popcntbdi2 (tmp, src));
35031 emit_insn (gen_paritydi2_cmpb (dst, tmp));
35033 return;
35036 if (mode == SImode)
35038 /* Is mult+shift >= shift+xor+shift+xor? */
35039 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
35041 rtx tmp1, tmp2, tmp3, tmp4;
35043 tmp1 = gen_reg_rtx (SImode);
35044 emit_insn (gen_popcntbsi2 (tmp1, src));
35046 tmp2 = gen_reg_rtx (SImode);
35047 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
35048 tmp3 = gen_reg_rtx (SImode);
35049 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
35051 tmp4 = gen_reg_rtx (SImode);
35052 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
35053 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
35055 else
35056 rs6000_emit_popcount (tmp, src);
35057 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
35059 else
35061 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
35062 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
35064 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
35066 tmp1 = gen_reg_rtx (DImode);
35067 emit_insn (gen_popcntbdi2 (tmp1, src));
35069 tmp2 = gen_reg_rtx (DImode);
35070 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
35071 tmp3 = gen_reg_rtx (DImode);
35072 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
35074 tmp4 = gen_reg_rtx (DImode);
35075 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
35076 tmp5 = gen_reg_rtx (DImode);
35077 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
35079 tmp6 = gen_reg_rtx (DImode);
35080 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
35081 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
35083 else
35084 rs6000_emit_popcount (tmp, src);
35085 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
35089 /* Expand an Altivec constant permutation for little endian mode.
35090 There are two issues: First, the two input operands must be
35091 swapped so that together they form a double-wide array in LE
35092 order. Second, the vperm instruction has surprising behavior
35093 in LE mode: it interprets the elements of the source vectors
35094 in BE mode ("left to right") and interprets the elements of
35095 the destination vector in LE mode ("right to left"). To
35096 correct for this, we must subtract each element of the permute
35097 control vector from 31.
35099 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
35100 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
35101 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
35102 serve as the permute control vector. Then, in BE mode,
35104 vperm 9,10,11,12
35106 places the desired result in vr9. However, in LE mode the
35107 vector contents will be
35109 vr10 = 00000003 00000002 00000001 00000000
35110 vr11 = 00000007 00000006 00000005 00000004
35112 The result of the vperm using the same permute control vector is
35114 vr9 = 05000000 07000000 01000000 03000000
35116 That is, the leftmost 4 bytes of vr10 are interpreted as the
35117 source for the rightmost 4 bytes of vr9, and so on.
35119 If we change the permute control vector to
35121 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35123 and issue
35125 vperm 9,11,10,12
35127 we get the desired
35129 vr9 = 00000006 00000004 00000002 00000000. */
35131 void
35132 altivec_expand_vec_perm_const_le (rtx operands[4])
35134 unsigned int i;
35135 rtx perm[16];
35136 rtx constv, unspec;
35137 rtx target = operands[0];
35138 rtx op0 = operands[1];
35139 rtx op1 = operands[2];
35140 rtx sel = operands[3];
35142 /* Unpack and adjust the constant selector. */
35143 for (i = 0; i < 16; ++i)
35145 rtx e = XVECEXP (sel, 0, i);
35146 unsigned int elt = 31 - (INTVAL (e) & 31);
35147 perm[i] = GEN_INT (elt);
35150 /* Expand to a permute, swapping the inputs and using the
35151 adjusted selector. */
35152 if (!REG_P (op0))
35153 op0 = force_reg (V16QImode, op0);
35154 if (!REG_P (op1))
35155 op1 = force_reg (V16QImode, op1);
35157 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
35158 constv = force_reg (V16QImode, constv);
35159 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
35160 UNSPEC_VPERM);
35161 if (!REG_P (target))
35163 rtx tmp = gen_reg_rtx (V16QImode);
35164 emit_move_insn (tmp, unspec);
35165 unspec = tmp;
35168 emit_move_insn (target, unspec);
35171 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35172 permute control vector. But here it's not a constant, so we must
35173 generate a vector NAND or NOR to do the adjustment. */
35175 void
35176 altivec_expand_vec_perm_le (rtx operands[4])
35178 rtx notx, iorx, unspec;
35179 rtx target = operands[0];
35180 rtx op0 = operands[1];
35181 rtx op1 = operands[2];
35182 rtx sel = operands[3];
35183 rtx tmp = target;
35184 rtx norreg = gen_reg_rtx (V16QImode);
35185 machine_mode mode = GET_MODE (target);
35187 /* Get everything in regs so the pattern matches. */
35188 if (!REG_P (op0))
35189 op0 = force_reg (mode, op0);
35190 if (!REG_P (op1))
35191 op1 = force_reg (mode, op1);
35192 if (!REG_P (sel))
35193 sel = force_reg (V16QImode, sel);
35194 if (!REG_P (target))
35195 tmp = gen_reg_rtx (mode);
35197 if (TARGET_P9_VECTOR)
35199 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op0, op1, sel),
35200 UNSPEC_VPERMR);
35202 else
35204 /* Invert the selector with a VNAND if available, else a VNOR.
35205 The VNAND is preferred for future fusion opportunities. */
35206 notx = gen_rtx_NOT (V16QImode, sel);
35207 iorx = (TARGET_P8_VECTOR
35208 ? gen_rtx_IOR (V16QImode, notx, notx)
35209 : gen_rtx_AND (V16QImode, notx, notx));
35210 emit_insn (gen_rtx_SET (norreg, iorx));
35212 /* Permute with operands reversed and adjusted selector. */
35213 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
35214 UNSPEC_VPERM);
35217 /* Copy into target, possibly by way of a register. */
35218 if (!REG_P (target))
35220 emit_move_insn (tmp, unspec);
35221 unspec = tmp;
35224 emit_move_insn (target, unspec);
35227 /* Expand an Altivec constant permutation. Return true if we match
35228 an efficient implementation; false to fall back to VPERM. */
35230 bool
35231 altivec_expand_vec_perm_const (rtx operands[4])
35233 struct altivec_perm_insn {
35234 HOST_WIDE_INT mask;
35235 enum insn_code impl;
35236 unsigned char perm[16];
35238 static const struct altivec_perm_insn patterns[] = {
35239 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
35240 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35241 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
35242 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35243 { OPTION_MASK_ALTIVEC,
35244 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
35245 : CODE_FOR_altivec_vmrglb_direct),
35246 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35247 { OPTION_MASK_ALTIVEC,
35248 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
35249 : CODE_FOR_altivec_vmrglh_direct),
35250 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35251 { OPTION_MASK_ALTIVEC,
35252 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
35253 : CODE_FOR_altivec_vmrglw_direct),
35254 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35255 { OPTION_MASK_ALTIVEC,
35256 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
35257 : CODE_FOR_altivec_vmrghb_direct),
35258 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35259 { OPTION_MASK_ALTIVEC,
35260 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
35261 : CODE_FOR_altivec_vmrghh_direct),
35262 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35263 { OPTION_MASK_ALTIVEC,
35264 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
35265 : CODE_FOR_altivec_vmrghw_direct),
35266 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35267 { OPTION_MASK_P8_VECTOR,
35268 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
35269 : CODE_FOR_p8_vmrgow_v4sf_direct),
35270 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35271 { OPTION_MASK_P8_VECTOR,
35272 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
35273 : CODE_FOR_p8_vmrgew_v4sf_direct),
35274 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35277 unsigned int i, j, elt, which;
35278 unsigned char perm[16];
35279 rtx target, op0, op1, sel, x;
35280 bool one_vec;
35282 target = operands[0];
35283 op0 = operands[1];
35284 op1 = operands[2];
35285 sel = operands[3];
35287 /* Unpack the constant selector. */
35288 for (i = which = 0; i < 16; ++i)
35290 rtx e = XVECEXP (sel, 0, i);
35291 elt = INTVAL (e) & 31;
35292 which |= (elt < 16 ? 1 : 2);
35293 perm[i] = elt;
35296 /* Simplify the constant selector based on operands. */
35297 switch (which)
35299 default:
35300 gcc_unreachable ();
35302 case 3:
35303 one_vec = false;
35304 if (!rtx_equal_p (op0, op1))
35305 break;
35306 /* FALLTHRU */
35308 case 2:
35309 for (i = 0; i < 16; ++i)
35310 perm[i] &= 15;
35311 op0 = op1;
35312 one_vec = true;
35313 break;
35315 case 1:
35316 op1 = op0;
35317 one_vec = true;
35318 break;
35321 /* Look for splat patterns. */
35322 if (one_vec)
35324 elt = perm[0];
35326 for (i = 0; i < 16; ++i)
35327 if (perm[i] != elt)
35328 break;
35329 if (i == 16)
35331 if (!BYTES_BIG_ENDIAN)
35332 elt = 15 - elt;
35333 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
35334 return true;
35337 if (elt % 2 == 0)
35339 for (i = 0; i < 16; i += 2)
35340 if (perm[i] != elt || perm[i + 1] != elt + 1)
35341 break;
35342 if (i == 16)
35344 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
35345 x = gen_reg_rtx (V8HImode);
35346 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
35347 GEN_INT (field)));
35348 emit_move_insn (target, gen_lowpart (V16QImode, x));
35349 return true;
35353 if (elt % 4 == 0)
35355 for (i = 0; i < 16; i += 4)
35356 if (perm[i] != elt
35357 || perm[i + 1] != elt + 1
35358 || perm[i + 2] != elt + 2
35359 || perm[i + 3] != elt + 3)
35360 break;
35361 if (i == 16)
35363 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35364 x = gen_reg_rtx (V4SImode);
35365 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35366 GEN_INT (field)));
35367 emit_move_insn (target, gen_lowpart (V16QImode, x));
35368 return true;
35373 /* Look for merge and pack patterns. */
35374 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35376 bool swapped;
35378 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35379 continue;
35381 elt = patterns[j].perm[0];
35382 if (perm[0] == elt)
35383 swapped = false;
35384 else if (perm[0] == elt + 16)
35385 swapped = true;
35386 else
35387 continue;
35388 for (i = 1; i < 16; ++i)
35390 elt = patterns[j].perm[i];
35391 if (swapped)
35392 elt = (elt >= 16 ? elt - 16 : elt + 16);
35393 else if (one_vec && elt >= 16)
35394 elt -= 16;
35395 if (perm[i] != elt)
35396 break;
35398 if (i == 16)
35400 enum insn_code icode = patterns[j].impl;
35401 machine_mode omode = insn_data[icode].operand[0].mode;
35402 machine_mode imode = insn_data[icode].operand[1].mode;
35404 /* For little-endian, don't use vpkuwum and vpkuhum if the
35405 underlying vector type is not V4SI and V8HI, respectively.
35406 For example, using vpkuwum with a V8HI picks up the even
35407 halfwords (BE numbering) when the even halfwords (LE
35408 numbering) are what we need. */
35409 if (!BYTES_BIG_ENDIAN
35410 && icode == CODE_FOR_altivec_vpkuwum_direct
35411 && ((GET_CODE (op0) == REG
35412 && GET_MODE (op0) != V4SImode)
35413 || (GET_CODE (op0) == SUBREG
35414 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35415 continue;
35416 if (!BYTES_BIG_ENDIAN
35417 && icode == CODE_FOR_altivec_vpkuhum_direct
35418 && ((GET_CODE (op0) == REG
35419 && GET_MODE (op0) != V8HImode)
35420 || (GET_CODE (op0) == SUBREG
35421 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35422 continue;
35424 /* For little-endian, the two input operands must be swapped
35425 (or swapped back) to ensure proper right-to-left numbering
35426 from 0 to 2N-1. */
35427 if (swapped ^ !BYTES_BIG_ENDIAN)
35428 std::swap (op0, op1);
35429 if (imode != V16QImode)
35431 op0 = gen_lowpart (imode, op0);
35432 op1 = gen_lowpart (imode, op1);
35434 if (omode == V16QImode)
35435 x = target;
35436 else
35437 x = gen_reg_rtx (omode);
35438 emit_insn (GEN_FCN (icode) (x, op0, op1));
35439 if (omode != V16QImode)
35440 emit_move_insn (target, gen_lowpart (V16QImode, x));
35441 return true;
35445 if (!BYTES_BIG_ENDIAN)
35447 altivec_expand_vec_perm_const_le (operands);
35448 return true;
35451 return false;
35454 /* Expand a Paired Single or VSX Permute Doubleword constant permutation.
35455 Return true if we match an efficient implementation. */
35457 static bool
35458 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
35459 unsigned char perm0, unsigned char perm1)
35461 rtx x;
35463 /* If both selectors come from the same operand, fold to single op. */
35464 if ((perm0 & 2) == (perm1 & 2))
35466 if (perm0 & 2)
35467 op0 = op1;
35468 else
35469 op1 = op0;
35471 /* If both operands are equal, fold to simpler permutation. */
35472 if (rtx_equal_p (op0, op1))
35474 perm0 = perm0 & 1;
35475 perm1 = (perm1 & 1) + 2;
35477 /* If the first selector comes from the second operand, swap. */
35478 else if (perm0 & 2)
35480 if (perm1 & 2)
35481 return false;
35482 perm0 -= 2;
35483 perm1 += 2;
35484 std::swap (op0, op1);
35486 /* If the second selector does not come from the second operand, fail. */
35487 else if ((perm1 & 2) == 0)
35488 return false;
35490 /* Success! */
35491 if (target != NULL)
35493 machine_mode vmode, dmode;
35494 rtvec v;
35496 vmode = GET_MODE (target);
35497 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
35498 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4).require ();
35499 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
35500 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
35501 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
35502 emit_insn (gen_rtx_SET (target, x));
35504 return true;
35507 bool
35508 rs6000_expand_vec_perm_const (rtx operands[4])
35510 rtx target, op0, op1, sel;
35511 unsigned char perm0, perm1;
35513 target = operands[0];
35514 op0 = operands[1];
35515 op1 = operands[2];
35516 sel = operands[3];
35518 /* Unpack the constant selector. */
35519 perm0 = INTVAL (XVECEXP (sel, 0, 0)) & 3;
35520 perm1 = INTVAL (XVECEXP (sel, 0, 1)) & 3;
35522 return rs6000_expand_vec_perm_const_1 (target, op0, op1, perm0, perm1);
35525 /* Test whether a constant permutation is supported. */
35527 static bool
35528 rs6000_vectorize_vec_perm_const_ok (machine_mode vmode,
35529 const unsigned char *sel)
35531 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35532 if (TARGET_ALTIVEC)
35533 return true;
35535 /* Check for ps_merge* or evmerge* insns. */
35536 if (TARGET_PAIRED_FLOAT && vmode == V2SFmode)
35538 rtx op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
35539 rtx op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
35540 return rs6000_expand_vec_perm_const_1 (NULL, op0, op1, sel[0], sel[1]);
35543 return false;
35546 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
35548 static void
35549 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
35550 machine_mode vmode, unsigned nelt, rtx perm[])
35552 machine_mode imode;
35553 rtx x;
35555 imode = vmode;
35556 if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
35557 imode = mode_for_int_vector (vmode).require ();
35559 x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
35560 x = expand_vec_perm (vmode, op0, op1, x, target);
35561 if (x != target)
35562 emit_move_insn (target, x);
35565 /* Expand an extract even operation. */
35567 void
35568 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
35570 machine_mode vmode = GET_MODE (target);
35571 unsigned i, nelt = GET_MODE_NUNITS (vmode);
35572 rtx perm[16];
35574 for (i = 0; i < nelt; i++)
35575 perm[i] = GEN_INT (i * 2);
35577 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
35580 /* Expand a vector interleave operation. */
35582 void
35583 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
35585 machine_mode vmode = GET_MODE (target);
35586 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
35587 rtx perm[16];
35589 high = (highp ? 0 : nelt / 2);
35590 for (i = 0; i < nelt / 2; i++)
35592 perm[i * 2] = GEN_INT (i + high);
35593 perm[i * 2 + 1] = GEN_INT (i + nelt + high);
35596 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
35599 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
35600 void
35601 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
35603 HOST_WIDE_INT hwi_scale (scale);
35604 REAL_VALUE_TYPE r_pow;
35605 rtvec v = rtvec_alloc (2);
35606 rtx elt;
35607 rtx scale_vec = gen_reg_rtx (V2DFmode);
35608 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
35609 elt = const_double_from_real_value (r_pow, DFmode);
35610 RTVEC_ELT (v, 0) = elt;
35611 RTVEC_ELT (v, 1) = elt;
35612 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
35613 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
35616 /* Return an RTX representing where to find the function value of a
35617 function returning MODE. */
35618 static rtx
35619 rs6000_complex_function_value (machine_mode mode)
35621 unsigned int regno;
35622 rtx r1, r2;
35623 machine_mode inner = GET_MODE_INNER (mode);
35624 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
35626 if (TARGET_FLOAT128_TYPE
35627 && (mode == KCmode
35628 || (mode == TCmode && TARGET_IEEEQUAD)))
35629 regno = ALTIVEC_ARG_RETURN;
35631 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35632 regno = FP_ARG_RETURN;
35634 else
35636 regno = GP_ARG_RETURN;
35638 /* 32-bit is OK since it'll go in r3/r4. */
35639 if (TARGET_32BIT && inner_bytes >= 4)
35640 return gen_rtx_REG (mode, regno);
35643 if (inner_bytes >= 8)
35644 return gen_rtx_REG (mode, regno);
35646 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
35647 const0_rtx);
35648 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
35649 GEN_INT (inner_bytes));
35650 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
35653 /* Return an rtx describing a return value of MODE as a PARALLEL
35654 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
35655 stride REG_STRIDE. */
35657 static rtx
35658 rs6000_parallel_return (machine_mode mode,
35659 int n_elts, machine_mode elt_mode,
35660 unsigned int regno, unsigned int reg_stride)
35662 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
35664 int i;
35665 for (i = 0; i < n_elts; i++)
35667 rtx r = gen_rtx_REG (elt_mode, regno);
35668 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
35669 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
35670 regno += reg_stride;
35673 return par;
35676 /* Target hook for TARGET_FUNCTION_VALUE.
35678 An integer value is in r3 and a floating-point value is in fp1,
35679 unless -msoft-float. */
35681 static rtx
35682 rs6000_function_value (const_tree valtype,
35683 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
35684 bool outgoing ATTRIBUTE_UNUSED)
35686 machine_mode mode;
35687 unsigned int regno;
35688 machine_mode elt_mode;
35689 int n_elts;
35691 /* Special handling for structs in darwin64. */
35692 if (TARGET_MACHO
35693 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
35695 CUMULATIVE_ARGS valcum;
35696 rtx valret;
35698 valcum.words = 0;
35699 valcum.fregno = FP_ARG_MIN_REG;
35700 valcum.vregno = ALTIVEC_ARG_MIN_REG;
35701 /* Do a trial code generation as if this were going to be passed as
35702 an argument; if any part goes in memory, we return NULL. */
35703 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
35704 if (valret)
35705 return valret;
35706 /* Otherwise fall through to standard ABI rules. */
35709 mode = TYPE_MODE (valtype);
35711 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
35712 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
35714 int first_reg, n_regs;
35716 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
35718 /* _Decimal128 must use even/odd register pairs. */
35719 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35720 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
35722 else
35724 first_reg = ALTIVEC_ARG_RETURN;
35725 n_regs = 1;
35728 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
35731 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
35732 if (TARGET_32BIT && TARGET_POWERPC64)
35733 switch (mode)
35735 default:
35736 break;
35737 case E_DImode:
35738 case E_SCmode:
35739 case E_DCmode:
35740 case E_TCmode:
35741 int count = GET_MODE_SIZE (mode) / 4;
35742 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
35745 if ((INTEGRAL_TYPE_P (valtype)
35746 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
35747 || POINTER_TYPE_P (valtype))
35748 mode = TARGET_32BIT ? SImode : DImode;
35750 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35751 /* _Decimal128 must use an even/odd register pair. */
35752 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35753 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
35754 && !FLOAT128_VECTOR_P (mode)
35755 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
35756 regno = FP_ARG_RETURN;
35757 else if (TREE_CODE (valtype) == COMPLEX_TYPE
35758 && targetm.calls.split_complex_arg)
35759 return rs6000_complex_function_value (mode);
35760 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35761 return register is used in both cases, and we won't see V2DImode/V2DFmode
35762 for pure altivec, combine the two cases. */
35763 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
35764 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
35765 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
35766 regno = ALTIVEC_ARG_RETURN;
35767 else
35768 regno = GP_ARG_RETURN;
35770 return gen_rtx_REG (mode, regno);
35773 /* Define how to find the value returned by a library function
35774 assuming the value has mode MODE. */
35776 rs6000_libcall_value (machine_mode mode)
35778 unsigned int regno;
35780 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
35781 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
35782 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
35784 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35785 /* _Decimal128 must use an even/odd register pair. */
35786 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35787 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode)
35788 && TARGET_HARD_FLOAT
35789 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
35790 regno = FP_ARG_RETURN;
35791 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35792 return register is used in both cases, and we won't see V2DImode/V2DFmode
35793 for pure altivec, combine the two cases. */
35794 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
35795 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
35796 regno = ALTIVEC_ARG_RETURN;
35797 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
35798 return rs6000_complex_function_value (mode);
35799 else
35800 regno = GP_ARG_RETURN;
35802 return gen_rtx_REG (mode, regno);
35805 /* Compute register pressure classes. We implement the target hook to avoid
35806 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
35807 lead to incorrect estimates of number of available registers and therefor
35808 increased register pressure/spill. */
35809 static int
35810 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
35812 int n;
35814 n = 0;
35815 pressure_classes[n++] = GENERAL_REGS;
35816 if (TARGET_VSX)
35817 pressure_classes[n++] = VSX_REGS;
35818 else
35820 if (TARGET_ALTIVEC)
35821 pressure_classes[n++] = ALTIVEC_REGS;
35822 if (TARGET_HARD_FLOAT)
35823 pressure_classes[n++] = FLOAT_REGS;
35825 pressure_classes[n++] = CR_REGS;
35826 pressure_classes[n++] = SPECIAL_REGS;
35828 return n;
35831 /* Given FROM and TO register numbers, say whether this elimination is allowed.
35832 Frame pointer elimination is automatically handled.
35834 For the RS/6000, if frame pointer elimination is being done, we would like
35835 to convert ap into fp, not sp.
35837 We need r30 if -mminimal-toc was specified, and there are constant pool
35838 references. */
35840 static bool
35841 rs6000_can_eliminate (const int from, const int to)
35843 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
35844 ? ! frame_pointer_needed
35845 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
35846 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
35847 || constant_pool_empty_p ()
35848 : true);
35851 /* Define the offset between two registers, FROM to be eliminated and its
35852 replacement TO, at the start of a routine. */
35853 HOST_WIDE_INT
35854 rs6000_initial_elimination_offset (int from, int to)
35856 rs6000_stack_t *info = rs6000_stack_info ();
35857 HOST_WIDE_INT offset;
35859 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35860 offset = info->push_p ? 0 : -info->total_size;
35861 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35863 offset = info->push_p ? 0 : -info->total_size;
35864 if (FRAME_GROWS_DOWNWARD)
35865 offset += info->fixed_size + info->vars_size + info->parm_size;
35867 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35868 offset = FRAME_GROWS_DOWNWARD
35869 ? info->fixed_size + info->vars_size + info->parm_size
35870 : 0;
35871 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35872 offset = info->total_size;
35873 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35874 offset = info->push_p ? info->total_size : 0;
35875 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
35876 offset = 0;
35877 else
35878 gcc_unreachable ();
35880 return offset;
35883 /* Fill in sizes of registers used by unwinder. */
35885 static void
35886 rs6000_init_dwarf_reg_sizes_extra (tree address)
35888 if (TARGET_MACHO && ! TARGET_ALTIVEC)
35890 int i;
35891 machine_mode mode = TYPE_MODE (char_type_node);
35892 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
35893 rtx mem = gen_rtx_MEM (BLKmode, addr);
35894 rtx value = gen_int_mode (16, mode);
35896 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
35897 The unwinder still needs to know the size of Altivec registers. */
35899 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
35901 int column = DWARF_REG_TO_UNWIND_COLUMN
35902 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
35903 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
35905 emit_move_insn (adjust_address (mem, mode, offset), value);
35910 /* Map internal gcc register numbers to debug format register numbers.
35911 FORMAT specifies the type of debug register number to use:
35912 0 -- debug information, except for frame-related sections
35913 1 -- DWARF .debug_frame section
35914 2 -- DWARF .eh_frame section */
35916 unsigned int
35917 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
35919 /* Except for the above, we use the internal number for non-DWARF
35920 debug information, and also for .eh_frame. */
35921 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
35922 return regno;
35924 /* On some platforms, we use the standard DWARF register
35925 numbering for .debug_info and .debug_frame. */
35926 #ifdef RS6000_USE_DWARF_NUMBERING
35927 if (regno <= 63)
35928 return regno;
35929 if (regno == LR_REGNO)
35930 return 108;
35931 if (regno == CTR_REGNO)
35932 return 109;
35933 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
35934 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
35935 The actual code emitted saves the whole of CR, so we map CR2_REGNO
35936 to the DWARF reg for CR. */
35937 if (format == 1 && regno == CR2_REGNO)
35938 return 64;
35939 if (CR_REGNO_P (regno))
35940 return regno - CR0_REGNO + 86;
35941 if (regno == CA_REGNO)
35942 return 101; /* XER */
35943 if (ALTIVEC_REGNO_P (regno))
35944 return regno - FIRST_ALTIVEC_REGNO + 1124;
35945 if (regno == VRSAVE_REGNO)
35946 return 356;
35947 if (regno == VSCR_REGNO)
35948 return 67;
35949 #endif
35950 return regno;
35953 /* target hook eh_return_filter_mode */
35954 static scalar_int_mode
35955 rs6000_eh_return_filter_mode (void)
35957 return TARGET_32BIT ? SImode : word_mode;
35960 /* Target hook for scalar_mode_supported_p. */
35961 static bool
35962 rs6000_scalar_mode_supported_p (scalar_mode mode)
35964 /* -m32 does not support TImode. This is the default, from
35965 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
35966 same ABI as for -m32. But default_scalar_mode_supported_p allows
35967 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
35968 for -mpowerpc64. */
35969 if (TARGET_32BIT && mode == TImode)
35970 return false;
35972 if (DECIMAL_FLOAT_MODE_P (mode))
35973 return default_decimal_float_supported_p ();
35974 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
35975 return true;
35976 else
35977 return default_scalar_mode_supported_p (mode);
35980 /* Target hook for vector_mode_supported_p. */
35981 static bool
35982 rs6000_vector_mode_supported_p (machine_mode mode)
35985 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
35986 return true;
35988 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
35989 128-bit, the compiler might try to widen IEEE 128-bit to IBM
35990 double-double. */
35991 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
35992 return true;
35994 else
35995 return false;
35998 /* Target hook for floatn_mode. */
35999 static opt_scalar_float_mode
36000 rs6000_floatn_mode (int n, bool extended)
36002 if (extended)
36004 switch (n)
36006 case 32:
36007 return DFmode;
36009 case 64:
36010 if (TARGET_FLOAT128_TYPE)
36011 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36012 else
36013 return opt_scalar_float_mode ();
36015 case 128:
36016 return opt_scalar_float_mode ();
36018 default:
36019 /* Those are the only valid _FloatNx types. */
36020 gcc_unreachable ();
36023 else
36025 switch (n)
36027 case 32:
36028 return SFmode;
36030 case 64:
36031 return DFmode;
36033 case 128:
36034 if (TARGET_FLOAT128_TYPE)
36035 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36036 else
36037 return opt_scalar_float_mode ();
36039 default:
36040 return opt_scalar_float_mode ();
36046 /* Target hook for c_mode_for_suffix. */
36047 static machine_mode
36048 rs6000_c_mode_for_suffix (char suffix)
36050 if (TARGET_FLOAT128_TYPE)
36052 if (suffix == 'q' || suffix == 'Q')
36053 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36055 /* At the moment, we are not defining a suffix for IBM extended double.
36056 If/when the default for -mabi=ieeelongdouble is changed, and we want
36057 to support __ibm128 constants in legacy library code, we may need to
36058 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
36059 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
36060 __float80 constants. */
36063 return VOIDmode;
36066 /* Target hook for invalid_arg_for_unprototyped_fn. */
36067 static const char *
36068 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
36070 return (!rs6000_darwin64_abi
36071 && typelist == 0
36072 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
36073 && (funcdecl == NULL_TREE
36074 || (TREE_CODE (funcdecl) == FUNCTION_DECL
36075 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
36076 ? N_("AltiVec argument passed to unprototyped function")
36077 : NULL;
36080 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
36081 setup by using __stack_chk_fail_local hidden function instead of
36082 calling __stack_chk_fail directly. Otherwise it is better to call
36083 __stack_chk_fail directly. */
36085 static tree ATTRIBUTE_UNUSED
36086 rs6000_stack_protect_fail (void)
36088 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
36089 ? default_hidden_stack_protect_fail ()
36090 : default_external_stack_protect_fail ();
36093 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
36095 #if TARGET_ELF
36096 static unsigned HOST_WIDE_INT
36097 rs6000_asan_shadow_offset (void)
36099 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
36101 #endif
36103 /* Mask options that we want to support inside of attribute((target)) and
36104 #pragma GCC target operations. Note, we do not include things like
36105 64/32-bit, endianness, hard/soft floating point, etc. that would have
36106 different calling sequences. */
36108 struct rs6000_opt_mask {
36109 const char *name; /* option name */
36110 HOST_WIDE_INT mask; /* mask to set */
36111 bool invert; /* invert sense of mask */
36112 bool valid_target; /* option is a target option */
36115 static struct rs6000_opt_mask const rs6000_opt_masks[] =
36117 { "altivec", OPTION_MASK_ALTIVEC, false, true },
36118 { "cmpb", OPTION_MASK_CMPB, false, true },
36119 { "crypto", OPTION_MASK_CRYPTO, false, true },
36120 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
36121 { "dlmzb", OPTION_MASK_DLMZB, false, true },
36122 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
36123 false, true },
36124 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, true },
36125 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, true },
36126 { "fprnd", OPTION_MASK_FPRND, false, true },
36127 { "hard-dfp", OPTION_MASK_DFP, false, true },
36128 { "htm", OPTION_MASK_HTM, false, true },
36129 { "isel", OPTION_MASK_ISEL, false, true },
36130 { "mfcrf", OPTION_MASK_MFCRF, false, true },
36131 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
36132 { "modulo", OPTION_MASK_MODULO, false, true },
36133 { "mulhw", OPTION_MASK_MULHW, false, true },
36134 { "multiple", OPTION_MASK_MULTIPLE, false, true },
36135 { "popcntb", OPTION_MASK_POPCNTB, false, true },
36136 { "popcntd", OPTION_MASK_POPCNTD, false, true },
36137 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
36138 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
36139 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
36140 { "power9-fusion", OPTION_MASK_P9_FUSION, false, true },
36141 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
36142 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
36143 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
36144 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
36145 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
36146 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
36147 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
36148 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
36149 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
36150 { "string", OPTION_MASK_STRING, false, true },
36151 { "toc-fusion", OPTION_MASK_TOC_FUSION, false, true },
36152 { "update", OPTION_MASK_NO_UPDATE, true , true },
36153 { "vsx", OPTION_MASK_VSX, false, true },
36154 #ifdef OPTION_MASK_64BIT
36155 #if TARGET_AIX_OS
36156 { "aix64", OPTION_MASK_64BIT, false, false },
36157 { "aix32", OPTION_MASK_64BIT, true, false },
36158 #else
36159 { "64", OPTION_MASK_64BIT, false, false },
36160 { "32", OPTION_MASK_64BIT, true, false },
36161 #endif
36162 #endif
36163 #ifdef OPTION_MASK_EABI
36164 { "eabi", OPTION_MASK_EABI, false, false },
36165 #endif
36166 #ifdef OPTION_MASK_LITTLE_ENDIAN
36167 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
36168 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
36169 #endif
36170 #ifdef OPTION_MASK_RELOCATABLE
36171 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
36172 #endif
36173 #ifdef OPTION_MASK_STRICT_ALIGN
36174 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
36175 #endif
36176 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
36177 { "string", OPTION_MASK_STRING, false, false },
36180 /* Builtin mask mapping for printing the flags. */
36181 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
36183 { "altivec", RS6000_BTM_ALTIVEC, false, false },
36184 { "vsx", RS6000_BTM_VSX, false, false },
36185 { "paired", RS6000_BTM_PAIRED, false, false },
36186 { "fre", RS6000_BTM_FRE, false, false },
36187 { "fres", RS6000_BTM_FRES, false, false },
36188 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
36189 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
36190 { "popcntd", RS6000_BTM_POPCNTD, false, false },
36191 { "cell", RS6000_BTM_CELL, false, false },
36192 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
36193 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
36194 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
36195 { "crypto", RS6000_BTM_CRYPTO, false, false },
36196 { "htm", RS6000_BTM_HTM, false, false },
36197 { "hard-dfp", RS6000_BTM_DFP, false, false },
36198 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
36199 { "long-double-128", RS6000_BTM_LDBL128, false, false },
36200 { "float128", RS6000_BTM_FLOAT128, false, false },
36203 /* Option variables that we want to support inside attribute((target)) and
36204 #pragma GCC target operations. */
36206 struct rs6000_opt_var {
36207 const char *name; /* option name */
36208 size_t global_offset; /* offset of the option in global_options. */
36209 size_t target_offset; /* offset of the option in target options. */
36212 static struct rs6000_opt_var const rs6000_opt_vars[] =
36214 { "friz",
36215 offsetof (struct gcc_options, x_TARGET_FRIZ),
36216 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
36217 { "avoid-indexed-addresses",
36218 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
36219 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
36220 { "paired",
36221 offsetof (struct gcc_options, x_rs6000_paired_float),
36222 offsetof (struct cl_target_option, x_rs6000_paired_float), },
36223 { "longcall",
36224 offsetof (struct gcc_options, x_rs6000_default_long_calls),
36225 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
36226 { "optimize-swaps",
36227 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
36228 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
36229 { "allow-movmisalign",
36230 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
36231 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
36232 { "sched-groups",
36233 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
36234 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
36235 { "always-hint",
36236 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
36237 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
36238 { "align-branch-targets",
36239 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
36240 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
36241 { "tls-markers",
36242 offsetof (struct gcc_options, x_tls_markers),
36243 offsetof (struct cl_target_option, x_tls_markers), },
36244 { "sched-prolog",
36245 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36246 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36247 { "sched-epilog",
36248 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36249 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36252 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36253 parsing. Return true if there were no errors. */
36255 static bool
36256 rs6000_inner_target_options (tree args, bool attr_p)
36258 bool ret = true;
36260 if (args == NULL_TREE)
36263 else if (TREE_CODE (args) == STRING_CST)
36265 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36266 char *q;
36268 while ((q = strtok (p, ",")) != NULL)
36270 bool error_p = false;
36271 bool not_valid_p = false;
36272 const char *cpu_opt = NULL;
36274 p = NULL;
36275 if (strncmp (q, "cpu=", 4) == 0)
36277 int cpu_index = rs6000_cpu_name_lookup (q+4);
36278 if (cpu_index >= 0)
36279 rs6000_cpu_index = cpu_index;
36280 else
36282 error_p = true;
36283 cpu_opt = q+4;
36286 else if (strncmp (q, "tune=", 5) == 0)
36288 int tune_index = rs6000_cpu_name_lookup (q+5);
36289 if (tune_index >= 0)
36290 rs6000_tune_index = tune_index;
36291 else
36293 error_p = true;
36294 cpu_opt = q+5;
36297 else
36299 size_t i;
36300 bool invert = false;
36301 char *r = q;
36303 error_p = true;
36304 if (strncmp (r, "no-", 3) == 0)
36306 invert = true;
36307 r += 3;
36310 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
36311 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
36313 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
36315 if (!rs6000_opt_masks[i].valid_target)
36316 not_valid_p = true;
36317 else
36319 error_p = false;
36320 rs6000_isa_flags_explicit |= mask;
36322 /* VSX needs altivec, so -mvsx automagically sets
36323 altivec and disables -mavoid-indexed-addresses. */
36324 if (!invert)
36326 if (mask == OPTION_MASK_VSX)
36328 mask |= OPTION_MASK_ALTIVEC;
36329 TARGET_AVOID_XFORM = 0;
36333 if (rs6000_opt_masks[i].invert)
36334 invert = !invert;
36336 if (invert)
36337 rs6000_isa_flags &= ~mask;
36338 else
36339 rs6000_isa_flags |= mask;
36341 break;
36344 if (error_p && !not_valid_p)
36346 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
36347 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
36349 size_t j = rs6000_opt_vars[i].global_offset;
36350 *((int *) ((char *)&global_options + j)) = !invert;
36351 error_p = false;
36352 not_valid_p = false;
36353 break;
36358 if (error_p)
36360 const char *eprefix, *esuffix;
36362 ret = false;
36363 if (attr_p)
36365 eprefix = "__attribute__((__target__(";
36366 esuffix = ")))";
36368 else
36370 eprefix = "#pragma GCC target ";
36371 esuffix = "";
36374 if (cpu_opt)
36375 error ("invalid cpu %qs for %s%qs%s", cpu_opt, eprefix,
36376 q, esuffix);
36377 else if (not_valid_p)
36378 error ("%s%qs%s is not allowed", eprefix, q, esuffix);
36379 else
36380 error ("%s%qs%s is invalid", eprefix, q, esuffix);
36385 else if (TREE_CODE (args) == TREE_LIST)
36389 tree value = TREE_VALUE (args);
36390 if (value)
36392 bool ret2 = rs6000_inner_target_options (value, attr_p);
36393 if (!ret2)
36394 ret = false;
36396 args = TREE_CHAIN (args);
36398 while (args != NULL_TREE);
36401 else
36403 error ("attribute %<target%> argument not a string");
36404 return false;
36407 return ret;
36410 /* Print out the target options as a list for -mdebug=target. */
36412 static void
36413 rs6000_debug_target_options (tree args, const char *prefix)
36415 if (args == NULL_TREE)
36416 fprintf (stderr, "%s<NULL>", prefix);
36418 else if (TREE_CODE (args) == STRING_CST)
36420 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36421 char *q;
36423 while ((q = strtok (p, ",")) != NULL)
36425 p = NULL;
36426 fprintf (stderr, "%s\"%s\"", prefix, q);
36427 prefix = ", ";
36431 else if (TREE_CODE (args) == TREE_LIST)
36435 tree value = TREE_VALUE (args);
36436 if (value)
36438 rs6000_debug_target_options (value, prefix);
36439 prefix = ", ";
36441 args = TREE_CHAIN (args);
36443 while (args != NULL_TREE);
36446 else
36447 gcc_unreachable ();
36449 return;
36453 /* Hook to validate attribute((target("..."))). */
36455 static bool
36456 rs6000_valid_attribute_p (tree fndecl,
36457 tree ARG_UNUSED (name),
36458 tree args,
36459 int flags)
36461 struct cl_target_option cur_target;
36462 bool ret;
36463 tree old_optimize = build_optimization_node (&global_options);
36464 tree new_target, new_optimize;
36465 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36467 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
36469 if (TARGET_DEBUG_TARGET)
36471 tree tname = DECL_NAME (fndecl);
36472 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
36473 if (tname)
36474 fprintf (stderr, "function: %.*s\n",
36475 (int) IDENTIFIER_LENGTH (tname),
36476 IDENTIFIER_POINTER (tname));
36477 else
36478 fprintf (stderr, "function: unknown\n");
36480 fprintf (stderr, "args:");
36481 rs6000_debug_target_options (args, " ");
36482 fprintf (stderr, "\n");
36484 if (flags)
36485 fprintf (stderr, "flags: 0x%x\n", flags);
36487 fprintf (stderr, "--------------------\n");
36490 /* attribute((target("default"))) does nothing, beyond
36491 affecting multi-versioning. */
36492 if (TREE_VALUE (args)
36493 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
36494 && TREE_CHAIN (args) == NULL_TREE
36495 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
36496 return true;
36498 old_optimize = build_optimization_node (&global_options);
36499 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36501 /* If the function changed the optimization levels as well as setting target
36502 options, start with the optimizations specified. */
36503 if (func_optimize && func_optimize != old_optimize)
36504 cl_optimization_restore (&global_options,
36505 TREE_OPTIMIZATION (func_optimize));
36507 /* The target attributes may also change some optimization flags, so update
36508 the optimization options if necessary. */
36509 cl_target_option_save (&cur_target, &global_options);
36510 rs6000_cpu_index = rs6000_tune_index = -1;
36511 ret = rs6000_inner_target_options (args, true);
36513 /* Set up any additional state. */
36514 if (ret)
36516 ret = rs6000_option_override_internal (false);
36517 new_target = build_target_option_node (&global_options);
36519 else
36520 new_target = NULL;
36522 new_optimize = build_optimization_node (&global_options);
36524 if (!new_target)
36525 ret = false;
36527 else if (fndecl)
36529 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
36531 if (old_optimize != new_optimize)
36532 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
36535 cl_target_option_restore (&global_options, &cur_target);
36537 if (old_optimize != new_optimize)
36538 cl_optimization_restore (&global_options,
36539 TREE_OPTIMIZATION (old_optimize));
36541 return ret;
36545 /* Hook to validate the current #pragma GCC target and set the state, and
36546 update the macros based on what was changed. If ARGS is NULL, then
36547 POP_TARGET is used to reset the options. */
36549 bool
36550 rs6000_pragma_target_parse (tree args, tree pop_target)
36552 tree prev_tree = build_target_option_node (&global_options);
36553 tree cur_tree;
36554 struct cl_target_option *prev_opt, *cur_opt;
36555 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
36556 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
36558 if (TARGET_DEBUG_TARGET)
36560 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
36561 fprintf (stderr, "args:");
36562 rs6000_debug_target_options (args, " ");
36563 fprintf (stderr, "\n");
36565 if (pop_target)
36567 fprintf (stderr, "pop_target:\n");
36568 debug_tree (pop_target);
36570 else
36571 fprintf (stderr, "pop_target: <NULL>\n");
36573 fprintf (stderr, "--------------------\n");
36576 if (! args)
36578 cur_tree = ((pop_target)
36579 ? pop_target
36580 : target_option_default_node);
36581 cl_target_option_restore (&global_options,
36582 TREE_TARGET_OPTION (cur_tree));
36584 else
36586 rs6000_cpu_index = rs6000_tune_index = -1;
36587 if (!rs6000_inner_target_options (args, false)
36588 || !rs6000_option_override_internal (false)
36589 || (cur_tree = build_target_option_node (&global_options))
36590 == NULL_TREE)
36592 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
36593 fprintf (stderr, "invalid pragma\n");
36595 return false;
36599 target_option_current_node = cur_tree;
36601 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
36602 change the macros that are defined. */
36603 if (rs6000_target_modify_macros_ptr)
36605 prev_opt = TREE_TARGET_OPTION (prev_tree);
36606 prev_bumask = prev_opt->x_rs6000_builtin_mask;
36607 prev_flags = prev_opt->x_rs6000_isa_flags;
36609 cur_opt = TREE_TARGET_OPTION (cur_tree);
36610 cur_flags = cur_opt->x_rs6000_isa_flags;
36611 cur_bumask = cur_opt->x_rs6000_builtin_mask;
36613 diff_bumask = (prev_bumask ^ cur_bumask);
36614 diff_flags = (prev_flags ^ cur_flags);
36616 if ((diff_flags != 0) || (diff_bumask != 0))
36618 /* Delete old macros. */
36619 rs6000_target_modify_macros_ptr (false,
36620 prev_flags & diff_flags,
36621 prev_bumask & diff_bumask);
36623 /* Define new macros. */
36624 rs6000_target_modify_macros_ptr (true,
36625 cur_flags & diff_flags,
36626 cur_bumask & diff_bumask);
36630 return true;
36634 /* Remember the last target of rs6000_set_current_function. */
36635 static GTY(()) tree rs6000_previous_fndecl;
36637 /* Restore target's globals from NEW_TREE and invalidate the
36638 rs6000_previous_fndecl cache. */
36640 static void
36641 rs6000_activate_target_options (tree new_tree)
36643 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
36644 if (TREE_TARGET_GLOBALS (new_tree))
36645 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
36646 else if (new_tree == target_option_default_node)
36647 restore_target_globals (&default_target_globals);
36648 else
36649 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
36650 rs6000_previous_fndecl = NULL_TREE;
36653 /* Establish appropriate back-end context for processing the function
36654 FNDECL. The argument might be NULL to indicate processing at top
36655 level, outside of any function scope. */
36656 static void
36657 rs6000_set_current_function (tree fndecl)
36659 if (TARGET_DEBUG_TARGET)
36661 fprintf (stderr, "\n==================== rs6000_set_current_function");
36663 if (fndecl)
36664 fprintf (stderr, ", fndecl %s (%p)",
36665 (DECL_NAME (fndecl)
36666 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
36667 : "<unknown>"), (void *)fndecl);
36669 if (rs6000_previous_fndecl)
36670 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
36672 fprintf (stderr, "\n");
36675 /* Only change the context if the function changes. This hook is called
36676 several times in the course of compiling a function, and we don't want to
36677 slow things down too much or call target_reinit when it isn't safe. */
36678 if (fndecl == rs6000_previous_fndecl)
36679 return;
36681 tree old_tree;
36682 if (rs6000_previous_fndecl == NULL_TREE)
36683 old_tree = target_option_current_node;
36684 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl))
36685 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl);
36686 else
36687 old_tree = target_option_default_node;
36689 tree new_tree;
36690 if (fndecl == NULL_TREE)
36692 if (old_tree != target_option_current_node)
36693 new_tree = target_option_current_node;
36694 else
36695 new_tree = NULL_TREE;
36697 else
36699 new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
36700 if (new_tree == NULL_TREE)
36701 new_tree = target_option_default_node;
36704 if (TARGET_DEBUG_TARGET)
36706 if (new_tree)
36708 fprintf (stderr, "\nnew fndecl target specific options:\n");
36709 debug_tree (new_tree);
36712 if (old_tree)
36714 fprintf (stderr, "\nold fndecl target specific options:\n");
36715 debug_tree (old_tree);
36718 if (old_tree != NULL_TREE || new_tree != NULL_TREE)
36719 fprintf (stderr, "--------------------\n");
36722 if (new_tree && old_tree != new_tree)
36723 rs6000_activate_target_options (new_tree);
36725 if (fndecl)
36726 rs6000_previous_fndecl = fndecl;
36730 /* Save the current options */
36732 static void
36733 rs6000_function_specific_save (struct cl_target_option *ptr,
36734 struct gcc_options *opts)
36736 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
36737 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
36740 /* Restore the current options */
36742 static void
36743 rs6000_function_specific_restore (struct gcc_options *opts,
36744 struct cl_target_option *ptr)
36747 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
36748 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
36749 (void) rs6000_option_override_internal (false);
36752 /* Print the current options */
36754 static void
36755 rs6000_function_specific_print (FILE *file, int indent,
36756 struct cl_target_option *ptr)
36758 rs6000_print_isa_options (file, indent, "Isa options set",
36759 ptr->x_rs6000_isa_flags);
36761 rs6000_print_isa_options (file, indent, "Isa options explicit",
36762 ptr->x_rs6000_isa_flags_explicit);
36765 /* Helper function to print the current isa or misc options on a line. */
36767 static void
36768 rs6000_print_options_internal (FILE *file,
36769 int indent,
36770 const char *string,
36771 HOST_WIDE_INT flags,
36772 const char *prefix,
36773 const struct rs6000_opt_mask *opts,
36774 size_t num_elements)
36776 size_t i;
36777 size_t start_column = 0;
36778 size_t cur_column;
36779 size_t max_column = 120;
36780 size_t prefix_len = strlen (prefix);
36781 size_t comma_len = 0;
36782 const char *comma = "";
36784 if (indent)
36785 start_column += fprintf (file, "%*s", indent, "");
36787 if (!flags)
36789 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
36790 return;
36793 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
36795 /* Print the various mask options. */
36796 cur_column = start_column;
36797 for (i = 0; i < num_elements; i++)
36799 bool invert = opts[i].invert;
36800 const char *name = opts[i].name;
36801 const char *no_str = "";
36802 HOST_WIDE_INT mask = opts[i].mask;
36803 size_t len = comma_len + prefix_len + strlen (name);
36805 if (!invert)
36807 if ((flags & mask) == 0)
36809 no_str = "no-";
36810 len += sizeof ("no-") - 1;
36813 flags &= ~mask;
36816 else
36818 if ((flags & mask) != 0)
36820 no_str = "no-";
36821 len += sizeof ("no-") - 1;
36824 flags |= mask;
36827 cur_column += len;
36828 if (cur_column > max_column)
36830 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
36831 cur_column = start_column + len;
36832 comma = "";
36835 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
36836 comma = ", ";
36837 comma_len = sizeof (", ") - 1;
36840 fputs ("\n", file);
36843 /* Helper function to print the current isa options on a line. */
36845 static void
36846 rs6000_print_isa_options (FILE *file, int indent, const char *string,
36847 HOST_WIDE_INT flags)
36849 rs6000_print_options_internal (file, indent, string, flags, "-m",
36850 &rs6000_opt_masks[0],
36851 ARRAY_SIZE (rs6000_opt_masks));
36854 static void
36855 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
36856 HOST_WIDE_INT flags)
36858 rs6000_print_options_internal (file, indent, string, flags, "",
36859 &rs6000_builtin_mask_names[0],
36860 ARRAY_SIZE (rs6000_builtin_mask_names));
36863 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
36864 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
36865 -mupper-regs-df, etc.).
36867 If the user used -mno-power8-vector, we need to turn off all of the implicit
36868 ISA 2.07 and 3.0 options that relate to the vector unit.
36870 If the user used -mno-power9-vector, we need to turn off all of the implicit
36871 ISA 3.0 options that relate to the vector unit.
36873 This function does not handle explicit options such as the user specifying
36874 -mdirect-move. These are handled in rs6000_option_override_internal, and
36875 the appropriate error is given if needed.
36877 We return a mask of all of the implicit options that should not be enabled
36878 by default. */
36880 static HOST_WIDE_INT
36881 rs6000_disable_incompatible_switches (void)
36883 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
36884 size_t i, j;
36886 static const struct {
36887 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
36888 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
36889 const char *const name; /* name of the switch. */
36890 } flags[] = {
36891 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
36892 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
36893 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
36896 for (i = 0; i < ARRAY_SIZE (flags); i++)
36898 HOST_WIDE_INT no_flag = flags[i].no_flag;
36900 if ((rs6000_isa_flags & no_flag) == 0
36901 && (rs6000_isa_flags_explicit & no_flag) != 0)
36903 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
36904 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
36905 & rs6000_isa_flags
36906 & dep_flags);
36908 if (set_flags)
36910 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
36911 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
36913 set_flags &= ~rs6000_opt_masks[j].mask;
36914 error ("%<-mno-%s%> turns off %<-m%s%>",
36915 flags[i].name,
36916 rs6000_opt_masks[j].name);
36919 gcc_assert (!set_flags);
36922 rs6000_isa_flags &= ~dep_flags;
36923 ignore_masks |= no_flag | dep_flags;
36927 return ignore_masks;
36931 /* Helper function for printing the function name when debugging. */
36933 static const char *
36934 get_decl_name (tree fn)
36936 tree name;
36938 if (!fn)
36939 return "<null>";
36941 name = DECL_NAME (fn);
36942 if (!name)
36943 return "<no-name>";
36945 return IDENTIFIER_POINTER (name);
36948 /* Return the clone id of the target we are compiling code for in a target
36949 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
36950 the priority list for the target clones (ordered from lowest to
36951 highest). */
36953 static int
36954 rs6000_clone_priority (tree fndecl)
36956 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
36957 HOST_WIDE_INT isa_masks;
36958 int ret = CLONE_DEFAULT;
36959 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
36960 const char *attrs_str = NULL;
36962 attrs = TREE_VALUE (TREE_VALUE (attrs));
36963 attrs_str = TREE_STRING_POINTER (attrs);
36965 /* Return priority zero for default function. Return the ISA needed for the
36966 function if it is not the default. */
36967 if (strcmp (attrs_str, "default") != 0)
36969 if (fn_opts == NULL_TREE)
36970 fn_opts = target_option_default_node;
36972 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
36973 isa_masks = rs6000_isa_flags;
36974 else
36975 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
36977 for (ret = CLONE_MAX - 1; ret != 0; ret--)
36978 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
36979 break;
36982 if (TARGET_DEBUG_TARGET)
36983 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
36984 get_decl_name (fndecl), ret);
36986 return ret;
36989 /* This compares the priority of target features in function DECL1 and DECL2.
36990 It returns positive value if DECL1 is higher priority, negative value if
36991 DECL2 is higher priority and 0 if they are the same. Note, priorities are
36992 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
36994 static int
36995 rs6000_compare_version_priority (tree decl1, tree decl2)
36997 int priority1 = rs6000_clone_priority (decl1);
36998 int priority2 = rs6000_clone_priority (decl2);
36999 int ret = priority1 - priority2;
37001 if (TARGET_DEBUG_TARGET)
37002 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
37003 get_decl_name (decl1), get_decl_name (decl2), ret);
37005 return ret;
37008 /* Make a dispatcher declaration for the multi-versioned function DECL.
37009 Calls to DECL function will be replaced with calls to the dispatcher
37010 by the front-end. Returns the decl of the dispatcher function. */
37012 static tree
37013 rs6000_get_function_versions_dispatcher (void *decl)
37015 tree fn = (tree) decl;
37016 struct cgraph_node *node = NULL;
37017 struct cgraph_node *default_node = NULL;
37018 struct cgraph_function_version_info *node_v = NULL;
37019 struct cgraph_function_version_info *first_v = NULL;
37021 tree dispatch_decl = NULL;
37023 struct cgraph_function_version_info *default_version_info = NULL;
37024 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
37026 if (TARGET_DEBUG_TARGET)
37027 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
37028 get_decl_name (fn));
37030 node = cgraph_node::get (fn);
37031 gcc_assert (node != NULL);
37033 node_v = node->function_version ();
37034 gcc_assert (node_v != NULL);
37036 if (node_v->dispatcher_resolver != NULL)
37037 return node_v->dispatcher_resolver;
37039 /* Find the default version and make it the first node. */
37040 first_v = node_v;
37041 /* Go to the beginning of the chain. */
37042 while (first_v->prev != NULL)
37043 first_v = first_v->prev;
37045 default_version_info = first_v;
37046 while (default_version_info != NULL)
37048 const tree decl2 = default_version_info->this_node->decl;
37049 if (is_function_default_version (decl2))
37050 break;
37051 default_version_info = default_version_info->next;
37054 /* If there is no default node, just return NULL. */
37055 if (default_version_info == NULL)
37056 return NULL;
37058 /* Make default info the first node. */
37059 if (first_v != default_version_info)
37061 default_version_info->prev->next = default_version_info->next;
37062 if (default_version_info->next)
37063 default_version_info->next->prev = default_version_info->prev;
37064 first_v->prev = default_version_info;
37065 default_version_info->next = first_v;
37066 default_version_info->prev = NULL;
37069 default_node = default_version_info->this_node;
37071 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
37072 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37073 "target_clones attribute needs GLIBC (2.23 and newer) that "
37074 "exports hardware capability bits");
37075 #else
37077 if (targetm.has_ifunc_p ())
37079 struct cgraph_function_version_info *it_v = NULL;
37080 struct cgraph_node *dispatcher_node = NULL;
37081 struct cgraph_function_version_info *dispatcher_version_info = NULL;
37083 /* Right now, the dispatching is done via ifunc. */
37084 dispatch_decl = make_dispatcher_decl (default_node->decl);
37086 dispatcher_node = cgraph_node::get_create (dispatch_decl);
37087 gcc_assert (dispatcher_node != NULL);
37088 dispatcher_node->dispatcher_function = 1;
37089 dispatcher_version_info
37090 = dispatcher_node->insert_new_function_version ();
37091 dispatcher_version_info->next = default_version_info;
37092 dispatcher_node->definition = 1;
37094 /* Set the dispatcher for all the versions. */
37095 it_v = default_version_info;
37096 while (it_v != NULL)
37098 it_v->dispatcher_resolver = dispatch_decl;
37099 it_v = it_v->next;
37102 else
37104 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37105 "multiversioning needs ifunc which is not supported "
37106 "on this target");
37108 #endif
37110 return dispatch_decl;
37113 /* Make the resolver function decl to dispatch the versions of a multi-
37114 versioned function, DEFAULT_DECL. Create an empty basic block in the
37115 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
37116 function. */
37118 static tree
37119 make_resolver_func (const tree default_decl,
37120 const tree dispatch_decl,
37121 basic_block *empty_bb)
37123 /* Make the resolver function static. The resolver function returns
37124 void *. */
37125 tree decl_name = clone_function_name (default_decl, "resolver");
37126 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
37127 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
37128 tree decl = build_fn_decl (resolver_name, type);
37129 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
37131 DECL_NAME (decl) = decl_name;
37132 TREE_USED (decl) = 1;
37133 DECL_ARTIFICIAL (decl) = 1;
37134 DECL_IGNORED_P (decl) = 0;
37135 TREE_PUBLIC (decl) = 0;
37136 DECL_UNINLINABLE (decl) = 1;
37138 /* Resolver is not external, body is generated. */
37139 DECL_EXTERNAL (decl) = 0;
37140 DECL_EXTERNAL (dispatch_decl) = 0;
37142 DECL_CONTEXT (decl) = NULL_TREE;
37143 DECL_INITIAL (decl) = make_node (BLOCK);
37144 DECL_STATIC_CONSTRUCTOR (decl) = 0;
37146 /* Build result decl and add to function_decl. */
37147 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
37148 DECL_ARTIFICIAL (t) = 1;
37149 DECL_IGNORED_P (t) = 1;
37150 DECL_RESULT (decl) = t;
37152 gimplify_function_tree (decl);
37153 push_cfun (DECL_STRUCT_FUNCTION (decl));
37154 *empty_bb = init_lowered_empty_function (decl, false,
37155 profile_count::uninitialized ());
37157 cgraph_node::add_new_function (decl, true);
37158 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
37160 pop_cfun ();
37162 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37163 DECL_ATTRIBUTES (dispatch_decl)
37164 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
37166 cgraph_node::create_same_body_alias (dispatch_decl, decl);
37168 return decl;
37171 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37172 return a pointer to VERSION_DECL if we are running on a machine that
37173 supports the index CLONE_ISA hardware architecture bits. This function will
37174 be called during version dispatch to decide which function version to
37175 execute. It returns the basic block at the end, to which more conditions
37176 can be added. */
37178 static basic_block
37179 add_condition_to_bb (tree function_decl, tree version_decl,
37180 int clone_isa, basic_block new_bb)
37182 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
37184 gcc_assert (new_bb != NULL);
37185 gimple_seq gseq = bb_seq (new_bb);
37188 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
37189 build_fold_addr_expr (version_decl));
37190 tree result_var = create_tmp_var (ptr_type_node);
37191 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
37192 gimple *return_stmt = gimple_build_return (result_var);
37194 if (clone_isa == CLONE_DEFAULT)
37196 gimple_seq_add_stmt (&gseq, convert_stmt);
37197 gimple_seq_add_stmt (&gseq, return_stmt);
37198 set_bb_seq (new_bb, gseq);
37199 gimple_set_bb (convert_stmt, new_bb);
37200 gimple_set_bb (return_stmt, new_bb);
37201 pop_cfun ();
37202 return new_bb;
37205 tree bool_zero = build_int_cst (bool_int_type_node, 0);
37206 tree cond_var = create_tmp_var (bool_int_type_node);
37207 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
37208 const char *arg_str = rs6000_clone_map[clone_isa].name;
37209 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
37210 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
37211 gimple_call_set_lhs (call_cond_stmt, cond_var);
37213 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
37214 gimple_set_bb (call_cond_stmt, new_bb);
37215 gimple_seq_add_stmt (&gseq, call_cond_stmt);
37217 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
37218 NULL_TREE, NULL_TREE);
37219 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
37220 gimple_set_bb (if_else_stmt, new_bb);
37221 gimple_seq_add_stmt (&gseq, if_else_stmt);
37223 gimple_seq_add_stmt (&gseq, convert_stmt);
37224 gimple_seq_add_stmt (&gseq, return_stmt);
37225 set_bb_seq (new_bb, gseq);
37227 basic_block bb1 = new_bb;
37228 edge e12 = split_block (bb1, if_else_stmt);
37229 basic_block bb2 = e12->dest;
37230 e12->flags &= ~EDGE_FALLTHRU;
37231 e12->flags |= EDGE_TRUE_VALUE;
37233 edge e23 = split_block (bb2, return_stmt);
37234 gimple_set_bb (convert_stmt, bb2);
37235 gimple_set_bb (return_stmt, bb2);
37237 basic_block bb3 = e23->dest;
37238 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
37240 remove_edge (e23);
37241 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
37243 pop_cfun ();
37244 return bb3;
37247 /* This function generates the dispatch function for multi-versioned functions.
37248 DISPATCH_DECL is the function which will contain the dispatch logic.
37249 FNDECLS are the function choices for dispatch, and is a tree chain.
37250 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37251 code is generated. */
37253 static int
37254 dispatch_function_versions (tree dispatch_decl,
37255 void *fndecls_p,
37256 basic_block *empty_bb)
37258 int ix;
37259 tree ele;
37260 vec<tree> *fndecls;
37261 tree clones[CLONE_MAX];
37263 if (TARGET_DEBUG_TARGET)
37264 fputs ("dispatch_function_versions, top\n", stderr);
37266 gcc_assert (dispatch_decl != NULL
37267 && fndecls_p != NULL
37268 && empty_bb != NULL);
37270 /* fndecls_p is actually a vector. */
37271 fndecls = static_cast<vec<tree> *> (fndecls_p);
37273 /* At least one more version other than the default. */
37274 gcc_assert (fndecls->length () >= 2);
37276 /* The first version in the vector is the default decl. */
37277 memset ((void *) clones, '\0', sizeof (clones));
37278 clones[CLONE_DEFAULT] = (*fndecls)[0];
37280 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37281 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37282 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37283 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37284 to insert the code here to do the call. */
37286 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
37288 int priority = rs6000_clone_priority (ele);
37289 if (!clones[priority])
37290 clones[priority] = ele;
37293 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
37294 if (clones[ix])
37296 if (TARGET_DEBUG_TARGET)
37297 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
37298 ix, get_decl_name (clones[ix]));
37300 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
37301 *empty_bb);
37304 return 0;
37307 /* Generate the dispatching code body to dispatch multi-versioned function
37308 DECL. The target hook is called to process the "target" attributes and
37309 provide the code to dispatch the right function at run-time. NODE points
37310 to the dispatcher decl whose body will be created. */
37312 static tree
37313 rs6000_generate_version_dispatcher_body (void *node_p)
37315 tree resolver;
37316 basic_block empty_bb;
37317 struct cgraph_node *node = (cgraph_node *) node_p;
37318 struct cgraph_function_version_info *ninfo = node->function_version ();
37320 if (ninfo->dispatcher_resolver)
37321 return ninfo->dispatcher_resolver;
37323 /* node is going to be an alias, so remove the finalized bit. */
37324 node->definition = false;
37326 /* The first version in the chain corresponds to the default version. */
37327 ninfo->dispatcher_resolver = resolver
37328 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
37330 if (TARGET_DEBUG_TARGET)
37331 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
37332 get_decl_name (resolver));
37334 push_cfun (DECL_STRUCT_FUNCTION (resolver));
37335 auto_vec<tree, 2> fn_ver_vec;
37337 for (struct cgraph_function_version_info *vinfo = ninfo->next;
37338 vinfo;
37339 vinfo = vinfo->next)
37341 struct cgraph_node *version = vinfo->this_node;
37342 /* Check for virtual functions here again, as by this time it should
37343 have been determined if this function needs a vtable index or
37344 not. This happens for methods in derived classes that override
37345 virtual methods in base classes but are not explicitly marked as
37346 virtual. */
37347 if (DECL_VINDEX (version->decl))
37348 sorry ("Virtual function multiversioning not supported");
37350 fn_ver_vec.safe_push (version->decl);
37353 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
37354 cgraph_edge::rebuild_edges ();
37355 pop_cfun ();
37356 return resolver;
37360 /* Hook to determine if one function can safely inline another. */
37362 static bool
37363 rs6000_can_inline_p (tree caller, tree callee)
37365 bool ret = false;
37366 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
37367 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
37369 /* If callee has no option attributes, then it is ok to inline. */
37370 if (!callee_tree)
37371 ret = true;
37373 /* If caller has no option attributes, but callee does then it is not ok to
37374 inline. */
37375 else if (!caller_tree)
37376 ret = false;
37378 else
37380 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
37381 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
37383 /* Callee's options should a subset of the caller's, i.e. a vsx function
37384 can inline an altivec function but a non-vsx function can't inline a
37385 vsx function. */
37386 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
37387 == callee_opts->x_rs6000_isa_flags)
37388 ret = true;
37391 if (TARGET_DEBUG_TARGET)
37392 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37393 get_decl_name (caller), get_decl_name (callee),
37394 (ret ? "can" : "cannot"));
37396 return ret;
37399 /* Allocate a stack temp and fixup the address so it meets the particular
37400 memory requirements (either offetable or REG+REG addressing). */
37403 rs6000_allocate_stack_temp (machine_mode mode,
37404 bool offsettable_p,
37405 bool reg_reg_p)
37407 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
37408 rtx addr = XEXP (stack, 0);
37409 int strict_p = reload_completed;
37411 if (!legitimate_indirect_address_p (addr, strict_p))
37413 if (offsettable_p
37414 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
37415 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37417 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
37418 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37421 return stack;
37424 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
37425 to such a form to deal with memory reference instructions like STFIWX that
37426 only take reg+reg addressing. */
37429 rs6000_address_for_fpconvert (rtx x)
37431 rtx addr;
37433 gcc_assert (MEM_P (x));
37434 addr = XEXP (x, 0);
37435 if (! legitimate_indirect_address_p (addr, reload_completed)
37436 && ! legitimate_indexed_address_p (addr, reload_completed))
37438 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
37440 rtx reg = XEXP (addr, 0);
37441 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
37442 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
37443 gcc_assert (REG_P (reg));
37444 emit_insn (gen_add3_insn (reg, reg, size_rtx));
37445 addr = reg;
37447 else if (GET_CODE (addr) == PRE_MODIFY)
37449 rtx reg = XEXP (addr, 0);
37450 rtx expr = XEXP (addr, 1);
37451 gcc_assert (REG_P (reg));
37452 gcc_assert (GET_CODE (expr) == PLUS);
37453 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
37454 addr = reg;
37457 x = replace_equiv_address (x, copy_addr_to_reg (addr));
37460 return x;
37463 /* Given a memory reference, if it is not in the form for altivec memory
37464 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
37465 convert to the altivec format. */
37468 rs6000_address_for_altivec (rtx x)
37470 gcc_assert (MEM_P (x));
37471 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
37473 rtx addr = XEXP (x, 0);
37475 if (!legitimate_indexed_address_p (addr, reload_completed)
37476 && !legitimate_indirect_address_p (addr, reload_completed))
37477 addr = copy_to_mode_reg (Pmode, addr);
37479 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
37480 x = change_address (x, GET_MODE (x), addr);
37483 return x;
37486 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37488 On the RS/6000, all integer constants are acceptable, most won't be valid
37489 for particular insns, though. Only easy FP constants are acceptable. */
37491 static bool
37492 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
37494 if (TARGET_ELF && tls_referenced_p (x))
37495 return false;
37497 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
37498 || GET_MODE (x) == VOIDmode
37499 || (TARGET_POWERPC64 && mode == DImode)
37500 || easy_fp_constant (x, mode)
37501 || easy_vector_constant (x, mode));
37505 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37507 static bool
37508 chain_already_loaded (rtx_insn *last)
37510 for (; last != NULL; last = PREV_INSN (last))
37512 if (NONJUMP_INSN_P (last))
37514 rtx patt = PATTERN (last);
37516 if (GET_CODE (patt) == SET)
37518 rtx lhs = XEXP (patt, 0);
37520 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
37521 return true;
37525 return false;
37528 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37530 void
37531 rs6000_call_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
37533 const bool direct_call_p
37534 = GET_CODE (func_desc) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (func_desc);
37535 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
37536 rtx toc_load = NULL_RTX;
37537 rtx toc_restore = NULL_RTX;
37538 rtx func_addr;
37539 rtx abi_reg = NULL_RTX;
37540 rtx call[4];
37541 int n_call;
37542 rtx insn;
37544 /* Handle longcall attributes. */
37545 if (INTVAL (cookie) & CALL_LONG)
37546 func_desc = rs6000_longcall_ref (func_desc);
37548 /* Handle indirect calls. */
37549 if (GET_CODE (func_desc) != SYMBOL_REF
37550 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func_desc)))
37552 /* Save the TOC into its reserved slot before the call,
37553 and prepare to restore it after the call. */
37554 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
37555 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
37556 rtx stack_toc_mem = gen_frame_mem (Pmode,
37557 gen_rtx_PLUS (Pmode, stack_ptr,
37558 stack_toc_offset));
37559 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
37560 gen_rtvec (1, stack_toc_offset),
37561 UNSPEC_TOCSLOT);
37562 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
37564 /* Can we optimize saving the TOC in the prologue or
37565 do we need to do it at every call? */
37566 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
37567 cfun->machine->save_toc_in_prologue = true;
37568 else
37570 MEM_VOLATILE_P (stack_toc_mem) = 1;
37571 emit_move_insn (stack_toc_mem, toc_reg);
37574 if (DEFAULT_ABI == ABI_ELFv2)
37576 /* A function pointer in the ELFv2 ABI is just a plain address, but
37577 the ABI requires it to be loaded into r12 before the call. */
37578 func_addr = gen_rtx_REG (Pmode, 12);
37579 emit_move_insn (func_addr, func_desc);
37580 abi_reg = func_addr;
37582 else
37584 /* A function pointer under AIX is a pointer to a data area whose
37585 first word contains the actual address of the function, whose
37586 second word contains a pointer to its TOC, and whose third word
37587 contains a value to place in the static chain register (r11).
37588 Note that if we load the static chain, our "trampoline" need
37589 not have any executable code. */
37591 /* Load up address of the actual function. */
37592 func_desc = force_reg (Pmode, func_desc);
37593 func_addr = gen_reg_rtx (Pmode);
37594 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
37596 /* Prepare to load the TOC of the called function. Note that the
37597 TOC load must happen immediately before the actual call so
37598 that unwinding the TOC registers works correctly. See the
37599 comment in frob_update_context. */
37600 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
37601 rtx func_toc_mem = gen_rtx_MEM (Pmode,
37602 gen_rtx_PLUS (Pmode, func_desc,
37603 func_toc_offset));
37604 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
37606 /* If we have a static chain, load it up. But, if the call was
37607 originally direct, the 3rd word has not been written since no
37608 trampoline has been built, so we ought not to load it, lest we
37609 override a static chain value. */
37610 if (!direct_call_p
37611 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
37612 && !chain_already_loaded (get_current_sequence ()->next->last))
37614 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
37615 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
37616 rtx func_sc_mem = gen_rtx_MEM (Pmode,
37617 gen_rtx_PLUS (Pmode, func_desc,
37618 func_sc_offset));
37619 emit_move_insn (sc_reg, func_sc_mem);
37620 abi_reg = sc_reg;
37624 else
37626 /* Direct calls use the TOC: for local calls, the callee will
37627 assume the TOC register is set; for non-local calls, the
37628 PLT stub needs the TOC register. */
37629 abi_reg = toc_reg;
37630 func_addr = func_desc;
37633 /* Create the call. */
37634 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), flag);
37635 if (value != NULL_RTX)
37636 call[0] = gen_rtx_SET (value, call[0]);
37637 n_call = 1;
37639 if (toc_load)
37640 call[n_call++] = toc_load;
37641 if (toc_restore)
37642 call[n_call++] = toc_restore;
37644 call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
37646 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
37647 insn = emit_call_insn (insn);
37649 /* Mention all registers defined by the ABI to hold information
37650 as uses in CALL_INSN_FUNCTION_USAGE. */
37651 if (abi_reg)
37652 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37655 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
37657 void
37658 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
37660 rtx call[2];
37661 rtx insn;
37663 gcc_assert (INTVAL (cookie) == 0);
37665 /* Create the call. */
37666 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), flag);
37667 if (value != NULL_RTX)
37668 call[0] = gen_rtx_SET (value, call[0]);
37670 call[1] = simple_return_rtx;
37672 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
37673 insn = emit_call_insn (insn);
37675 /* Note use of the TOC register. */
37676 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
37679 /* Return whether we need to always update the saved TOC pointer when we update
37680 the stack pointer. */
37682 static bool
37683 rs6000_save_toc_in_prologue_p (void)
37685 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
37688 #ifdef HAVE_GAS_HIDDEN
37689 # define USE_HIDDEN_LINKONCE 1
37690 #else
37691 # define USE_HIDDEN_LINKONCE 0
37692 #endif
37694 /* Fills in the label name that should be used for a 476 link stack thunk. */
37696 void
37697 get_ppc476_thunk_name (char name[32])
37699 gcc_assert (TARGET_LINK_STACK);
37701 if (USE_HIDDEN_LINKONCE)
37702 sprintf (name, "__ppc476.get_thunk");
37703 else
37704 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
37707 /* This function emits the simple thunk routine that is used to preserve
37708 the link stack on the 476 cpu. */
37710 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
37711 static void
37712 rs6000_code_end (void)
37714 char name[32];
37715 tree decl;
37717 if (!TARGET_LINK_STACK)
37718 return;
37720 get_ppc476_thunk_name (name);
37722 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
37723 build_function_type_list (void_type_node, NULL_TREE));
37724 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
37725 NULL_TREE, void_type_node);
37726 TREE_PUBLIC (decl) = 1;
37727 TREE_STATIC (decl) = 1;
37729 #if RS6000_WEAK
37730 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
37732 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
37733 targetm.asm_out.unique_section (decl, 0);
37734 switch_to_section (get_named_section (decl, NULL, 0));
37735 DECL_WEAK (decl) = 1;
37736 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
37737 targetm.asm_out.globalize_label (asm_out_file, name);
37738 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
37739 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
37741 else
37742 #endif
37744 switch_to_section (text_section);
37745 ASM_OUTPUT_LABEL (asm_out_file, name);
37748 DECL_INITIAL (decl) = make_node (BLOCK);
37749 current_function_decl = decl;
37750 allocate_struct_function (decl, false);
37751 init_function_start (decl);
37752 first_function_block_is_cold = false;
37753 /* Make sure unwind info is emitted for the thunk if needed. */
37754 final_start_function (emit_barrier (), asm_out_file, 1);
37756 fputs ("\tblr\n", asm_out_file);
37758 final_end_function ();
37759 init_insn_lengths ();
37760 free_after_compilation (cfun);
37761 set_cfun (NULL);
37762 current_function_decl = NULL;
37765 /* Add r30 to hard reg set if the prologue sets it up and it is not
37766 pic_offset_table_rtx. */
37768 static void
37769 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
37771 if (!TARGET_SINGLE_PIC_BASE
37772 && TARGET_TOC
37773 && TARGET_MINIMAL_TOC
37774 && !constant_pool_empty_p ())
37775 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
37776 if (cfun->machine->split_stack_argp_used)
37777 add_to_hard_reg_set (&set->set, Pmode, 12);
37781 /* Helper function for rs6000_split_logical to emit a logical instruction after
37782 spliting the operation to single GPR registers.
37784 DEST is the destination register.
37785 OP1 and OP2 are the input source registers.
37786 CODE is the base operation (AND, IOR, XOR, NOT).
37787 MODE is the machine mode.
37788 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37789 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37790 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
37792 static void
37793 rs6000_split_logical_inner (rtx dest,
37794 rtx op1,
37795 rtx op2,
37796 enum rtx_code code,
37797 machine_mode mode,
37798 bool complement_final_p,
37799 bool complement_op1_p,
37800 bool complement_op2_p)
37802 rtx bool_rtx;
37804 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
37805 if (op2 && GET_CODE (op2) == CONST_INT
37806 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
37807 && !complement_final_p && !complement_op1_p && !complement_op2_p)
37809 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
37810 HOST_WIDE_INT value = INTVAL (op2) & mask;
37812 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
37813 if (code == AND)
37815 if (value == 0)
37817 emit_insn (gen_rtx_SET (dest, const0_rtx));
37818 return;
37821 else if (value == mask)
37823 if (!rtx_equal_p (dest, op1))
37824 emit_insn (gen_rtx_SET (dest, op1));
37825 return;
37829 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
37830 into separate ORI/ORIS or XORI/XORIS instrucitons. */
37831 else if (code == IOR || code == XOR)
37833 if (value == 0)
37835 if (!rtx_equal_p (dest, op1))
37836 emit_insn (gen_rtx_SET (dest, op1));
37837 return;
37842 if (code == AND && mode == SImode
37843 && !complement_final_p && !complement_op1_p && !complement_op2_p)
37845 emit_insn (gen_andsi3 (dest, op1, op2));
37846 return;
37849 if (complement_op1_p)
37850 op1 = gen_rtx_NOT (mode, op1);
37852 if (complement_op2_p)
37853 op2 = gen_rtx_NOT (mode, op2);
37855 /* For canonical RTL, if only one arm is inverted it is the first. */
37856 if (!complement_op1_p && complement_op2_p)
37857 std::swap (op1, op2);
37859 bool_rtx = ((code == NOT)
37860 ? gen_rtx_NOT (mode, op1)
37861 : gen_rtx_fmt_ee (code, mode, op1, op2));
37863 if (complement_final_p)
37864 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
37866 emit_insn (gen_rtx_SET (dest, bool_rtx));
37869 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
37870 operations are split immediately during RTL generation to allow for more
37871 optimizations of the AND/IOR/XOR.
37873 OPERANDS is an array containing the destination and two input operands.
37874 CODE is the base operation (AND, IOR, XOR, NOT).
37875 MODE is the machine mode.
37876 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37877 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37878 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
37879 CLOBBER_REG is either NULL or a scratch register of type CC to allow
37880 formation of the AND instructions. */
37882 static void
37883 rs6000_split_logical_di (rtx operands[3],
37884 enum rtx_code code,
37885 bool complement_final_p,
37886 bool complement_op1_p,
37887 bool complement_op2_p)
37889 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
37890 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
37891 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
37892 enum hi_lo { hi = 0, lo = 1 };
37893 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
37894 size_t i;
37896 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
37897 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
37898 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
37899 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
37901 if (code == NOT)
37902 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
37903 else
37905 if (GET_CODE (operands[2]) != CONST_INT)
37907 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
37908 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
37910 else
37912 HOST_WIDE_INT value = INTVAL (operands[2]);
37913 HOST_WIDE_INT value_hi_lo[2];
37915 gcc_assert (!complement_final_p);
37916 gcc_assert (!complement_op1_p);
37917 gcc_assert (!complement_op2_p);
37919 value_hi_lo[hi] = value >> 32;
37920 value_hi_lo[lo] = value & lower_32bits;
37922 for (i = 0; i < 2; i++)
37924 HOST_WIDE_INT sub_value = value_hi_lo[i];
37926 if (sub_value & sign_bit)
37927 sub_value |= upper_32bits;
37929 op2_hi_lo[i] = GEN_INT (sub_value);
37931 /* If this is an AND instruction, check to see if we need to load
37932 the value in a register. */
37933 if (code == AND && sub_value != -1 && sub_value != 0
37934 && !and_operand (op2_hi_lo[i], SImode))
37935 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
37940 for (i = 0; i < 2; i++)
37942 /* Split large IOR/XOR operations. */
37943 if ((code == IOR || code == XOR)
37944 && GET_CODE (op2_hi_lo[i]) == CONST_INT
37945 && !complement_final_p
37946 && !complement_op1_p
37947 && !complement_op2_p
37948 && !logical_const_operand (op2_hi_lo[i], SImode))
37950 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
37951 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
37952 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
37953 rtx tmp = gen_reg_rtx (SImode);
37955 /* Make sure the constant is sign extended. */
37956 if ((hi_16bits & sign_bit) != 0)
37957 hi_16bits |= upper_32bits;
37959 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
37960 code, SImode, false, false, false);
37962 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
37963 code, SImode, false, false, false);
37965 else
37966 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
37967 code, SImode, complement_final_p,
37968 complement_op1_p, complement_op2_p);
37971 return;
37974 /* Split the insns that make up boolean operations operating on multiple GPR
37975 registers. The boolean MD patterns ensure that the inputs either are
37976 exactly the same as the output registers, or there is no overlap.
37978 OPERANDS is an array containing the destination and two input operands.
37979 CODE is the base operation (AND, IOR, XOR, NOT).
37980 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37981 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37982 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
37984 void
37985 rs6000_split_logical (rtx operands[3],
37986 enum rtx_code code,
37987 bool complement_final_p,
37988 bool complement_op1_p,
37989 bool complement_op2_p)
37991 machine_mode mode = GET_MODE (operands[0]);
37992 machine_mode sub_mode;
37993 rtx op0, op1, op2;
37994 int sub_size, regno0, regno1, nregs, i;
37996 /* If this is DImode, use the specialized version that can run before
37997 register allocation. */
37998 if (mode == DImode && !TARGET_POWERPC64)
38000 rs6000_split_logical_di (operands, code, complement_final_p,
38001 complement_op1_p, complement_op2_p);
38002 return;
38005 op0 = operands[0];
38006 op1 = operands[1];
38007 op2 = (code == NOT) ? NULL_RTX : operands[2];
38008 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
38009 sub_size = GET_MODE_SIZE (sub_mode);
38010 regno0 = REGNO (op0);
38011 regno1 = REGNO (op1);
38013 gcc_assert (reload_completed);
38014 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38015 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38017 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
38018 gcc_assert (nregs > 1);
38020 if (op2 && REG_P (op2))
38021 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
38023 for (i = 0; i < nregs; i++)
38025 int offset = i * sub_size;
38026 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
38027 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
38028 rtx sub_op2 = ((code == NOT)
38029 ? NULL_RTX
38030 : simplify_subreg (sub_mode, op2, mode, offset));
38032 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
38033 complement_final_p, complement_op1_p,
38034 complement_op2_p);
38037 return;
38041 /* Return true if the peephole2 can combine a load involving a combination of
38042 an addis instruction and a load with an offset that can be fused together on
38043 a power8. */
38045 bool
38046 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
38047 rtx addis_value, /* addis value. */
38048 rtx target, /* target register that is loaded. */
38049 rtx mem) /* bottom part of the memory addr. */
38051 rtx addr;
38052 rtx base_reg;
38054 /* Validate arguments. */
38055 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38056 return false;
38058 if (!base_reg_operand (target, GET_MODE (target)))
38059 return false;
38061 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38062 return false;
38064 /* Allow sign/zero extension. */
38065 if (GET_CODE (mem) == ZERO_EXTEND
38066 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
38067 mem = XEXP (mem, 0);
38069 if (!MEM_P (mem))
38070 return false;
38072 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
38073 return false;
38075 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38076 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
38077 return false;
38079 /* Validate that the register used to load the high value is either the
38080 register being loaded, or we can safely replace its use.
38082 This function is only called from the peephole2 pass and we assume that
38083 there are 2 instructions in the peephole (addis and load), so we want to
38084 check if the target register was not used in the memory address and the
38085 register to hold the addis result is dead after the peephole. */
38086 if (REGNO (addis_reg) != REGNO (target))
38088 if (reg_mentioned_p (target, mem))
38089 return false;
38091 if (!peep2_reg_dead_p (2, addis_reg))
38092 return false;
38094 /* If the target register being loaded is the stack pointer, we must
38095 avoid loading any other value into it, even temporarily. */
38096 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
38097 return false;
38100 base_reg = XEXP (addr, 0);
38101 return REGNO (addis_reg) == REGNO (base_reg);
38104 /* During the peephole2 pass, adjust and expand the insns for a load fusion
38105 sequence. We adjust the addis register to use the target register. If the
38106 load sign extends, we adjust the code to do the zero extending load, and an
38107 explicit sign extension later since the fusion only covers zero extending
38108 loads.
38110 The operands are:
38111 operands[0] register set with addis (to be replaced with target)
38112 operands[1] value set via addis
38113 operands[2] target register being loaded
38114 operands[3] D-form memory reference using operands[0]. */
38116 void
38117 expand_fusion_gpr_load (rtx *operands)
38119 rtx addis_value = operands[1];
38120 rtx target = operands[2];
38121 rtx orig_mem = operands[3];
38122 rtx new_addr, new_mem, orig_addr, offset;
38123 enum rtx_code plus_or_lo_sum;
38124 machine_mode target_mode = GET_MODE (target);
38125 machine_mode extend_mode = target_mode;
38126 machine_mode ptr_mode = Pmode;
38127 enum rtx_code extend = UNKNOWN;
38129 if (GET_CODE (orig_mem) == ZERO_EXTEND
38130 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
38132 extend = GET_CODE (orig_mem);
38133 orig_mem = XEXP (orig_mem, 0);
38134 target_mode = GET_MODE (orig_mem);
38137 gcc_assert (MEM_P (orig_mem));
38139 orig_addr = XEXP (orig_mem, 0);
38140 plus_or_lo_sum = GET_CODE (orig_addr);
38141 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38143 offset = XEXP (orig_addr, 1);
38144 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38145 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38147 if (extend != UNKNOWN)
38148 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
38150 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38151 UNSPEC_FUSION_GPR);
38152 emit_insn (gen_rtx_SET (target, new_mem));
38154 if (extend == SIGN_EXTEND)
38156 int sub_off = ((BYTES_BIG_ENDIAN)
38157 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
38158 : 0);
38159 rtx sign_reg
38160 = simplify_subreg (target_mode, target, extend_mode, sub_off);
38162 emit_insn (gen_rtx_SET (target,
38163 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
38166 return;
38169 /* Emit the addis instruction that will be part of a fused instruction
38170 sequence. */
38172 void
38173 emit_fusion_addis (rtx target, rtx addis_value, const char *comment,
38174 const char *mode_name)
38176 rtx fuse_ops[10];
38177 char insn_template[80];
38178 const char *addis_str = NULL;
38179 const char *comment_str = ASM_COMMENT_START;
38181 if (*comment_str == ' ')
38182 comment_str++;
38184 /* Emit the addis instruction. */
38185 fuse_ops[0] = target;
38186 if (satisfies_constraint_L (addis_value))
38188 fuse_ops[1] = addis_value;
38189 addis_str = "lis %0,%v1";
38192 else if (GET_CODE (addis_value) == PLUS)
38194 rtx op0 = XEXP (addis_value, 0);
38195 rtx op1 = XEXP (addis_value, 1);
38197 if (REG_P (op0) && CONST_INT_P (op1)
38198 && satisfies_constraint_L (op1))
38200 fuse_ops[1] = op0;
38201 fuse_ops[2] = op1;
38202 addis_str = "addis %0,%1,%v2";
38206 else if (GET_CODE (addis_value) == HIGH)
38208 rtx value = XEXP (addis_value, 0);
38209 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
38211 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
38212 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
38213 if (TARGET_ELF)
38214 addis_str = "addis %0,%2,%1@toc@ha";
38216 else if (TARGET_XCOFF)
38217 addis_str = "addis %0,%1@u(%2)";
38219 else
38220 gcc_unreachable ();
38223 else if (GET_CODE (value) == PLUS)
38225 rtx op0 = XEXP (value, 0);
38226 rtx op1 = XEXP (value, 1);
38228 if (GET_CODE (op0) == UNSPEC
38229 && XINT (op0, 1) == UNSPEC_TOCREL
38230 && CONST_INT_P (op1))
38232 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
38233 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
38234 fuse_ops[3] = op1;
38235 if (TARGET_ELF)
38236 addis_str = "addis %0,%2,%1+%3@toc@ha";
38238 else if (TARGET_XCOFF)
38239 addis_str = "addis %0,%1+%3@u(%2)";
38241 else
38242 gcc_unreachable ();
38246 else if (satisfies_constraint_L (value))
38248 fuse_ops[1] = value;
38249 addis_str = "lis %0,%v1";
38252 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
38254 fuse_ops[1] = value;
38255 addis_str = "lis %0,%1@ha";
38259 if (!addis_str)
38260 fatal_insn ("Could not generate addis value for fusion", addis_value);
38262 sprintf (insn_template, "%s\t\t%s %s, type %s", addis_str, comment_str,
38263 comment, mode_name);
38264 output_asm_insn (insn_template, fuse_ops);
38267 /* Emit a D-form load or store instruction that is the second instruction
38268 of a fusion sequence. */
38270 void
38271 emit_fusion_load_store (rtx load_store_reg, rtx addis_reg, rtx offset,
38272 const char *insn_str)
38274 rtx fuse_ops[10];
38275 char insn_template[80];
38277 fuse_ops[0] = load_store_reg;
38278 fuse_ops[1] = addis_reg;
38280 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
38282 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
38283 fuse_ops[2] = offset;
38284 output_asm_insn (insn_template, fuse_ops);
38287 else if (GET_CODE (offset) == UNSPEC
38288 && XINT (offset, 1) == UNSPEC_TOCREL)
38290 if (TARGET_ELF)
38291 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
38293 else if (TARGET_XCOFF)
38294 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38296 else
38297 gcc_unreachable ();
38299 fuse_ops[2] = XVECEXP (offset, 0, 0);
38300 output_asm_insn (insn_template, fuse_ops);
38303 else if (GET_CODE (offset) == PLUS
38304 && GET_CODE (XEXP (offset, 0)) == UNSPEC
38305 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
38306 && CONST_INT_P (XEXP (offset, 1)))
38308 rtx tocrel_unspec = XEXP (offset, 0);
38309 if (TARGET_ELF)
38310 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
38312 else if (TARGET_XCOFF)
38313 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
38315 else
38316 gcc_unreachable ();
38318 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
38319 fuse_ops[3] = XEXP (offset, 1);
38320 output_asm_insn (insn_template, fuse_ops);
38323 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
38325 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38327 fuse_ops[2] = offset;
38328 output_asm_insn (insn_template, fuse_ops);
38331 else
38332 fatal_insn ("Unable to generate load/store offset for fusion", offset);
38334 return;
38337 /* Wrap a TOC address that can be fused to indicate that special fusion
38338 processing is needed. */
38341 fusion_wrap_memory_address (rtx old_mem)
38343 rtx old_addr = XEXP (old_mem, 0);
38344 rtvec v = gen_rtvec (1, old_addr);
38345 rtx new_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_FUSION_ADDIS);
38346 return replace_equiv_address_nv (old_mem, new_addr, false);
38349 /* Given an address, convert it into the addis and load offset parts. Addresses
38350 created during the peephole2 process look like:
38351 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38352 (unspec [(...)] UNSPEC_TOCREL))
38354 Addresses created via toc fusion look like:
38355 (unspec [(unspec [(...)] UNSPEC_TOCREL)] UNSPEC_FUSION_ADDIS)) */
38357 static void
38358 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
38360 rtx hi, lo;
38362 if (GET_CODE (addr) == UNSPEC && XINT (addr, 1) == UNSPEC_FUSION_ADDIS)
38364 lo = XVECEXP (addr, 0, 0);
38365 hi = gen_rtx_HIGH (Pmode, lo);
38367 else if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
38369 hi = XEXP (addr, 0);
38370 lo = XEXP (addr, 1);
38372 else
38373 gcc_unreachable ();
38375 *p_hi = hi;
38376 *p_lo = lo;
38379 /* Return a string to fuse an addis instruction with a gpr load to the same
38380 register that we loaded up the addis instruction. The address that is used
38381 is the logical address that was formed during peephole2:
38382 (lo_sum (high) (low-part))
38384 Or the address is the TOC address that is wrapped before register allocation:
38385 (unspec [(addr) (toc-reg)] UNSPEC_FUSION_ADDIS)
38387 The code is complicated, so we call output_asm_insn directly, and just
38388 return "". */
38390 const char *
38391 emit_fusion_gpr_load (rtx target, rtx mem)
38393 rtx addis_value;
38394 rtx addr;
38395 rtx load_offset;
38396 const char *load_str = NULL;
38397 const char *mode_name = NULL;
38398 machine_mode mode;
38400 if (GET_CODE (mem) == ZERO_EXTEND)
38401 mem = XEXP (mem, 0);
38403 gcc_assert (REG_P (target) && MEM_P (mem));
38405 addr = XEXP (mem, 0);
38406 fusion_split_address (addr, &addis_value, &load_offset);
38408 /* Now emit the load instruction to the same register. */
38409 mode = GET_MODE (mem);
38410 switch (mode)
38412 case E_QImode:
38413 mode_name = "char";
38414 load_str = "lbz";
38415 break;
38417 case E_HImode:
38418 mode_name = "short";
38419 load_str = "lhz";
38420 break;
38422 case E_SImode:
38423 case E_SFmode:
38424 mode_name = (mode == SFmode) ? "float" : "int";
38425 load_str = "lwz";
38426 break;
38428 case E_DImode:
38429 case E_DFmode:
38430 gcc_assert (TARGET_POWERPC64);
38431 mode_name = (mode == DFmode) ? "double" : "long";
38432 load_str = "ld";
38433 break;
38435 default:
38436 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
38439 /* Emit the addis instruction. */
38440 emit_fusion_addis (target, addis_value, "gpr load fusion", mode_name);
38442 /* Emit the D-form load instruction. */
38443 emit_fusion_load_store (target, target, load_offset, load_str);
38445 return "";
38449 /* Return true if the peephole2 can combine a load/store involving a
38450 combination of an addis instruction and the memory operation. This was
38451 added to the ISA 3.0 (power9) hardware. */
38453 bool
38454 fusion_p9_p (rtx addis_reg, /* register set via addis. */
38455 rtx addis_value, /* addis value. */
38456 rtx dest, /* destination (memory or register). */
38457 rtx src) /* source (register or memory). */
38459 rtx addr, mem, offset;
38460 machine_mode mode = GET_MODE (src);
38462 /* Validate arguments. */
38463 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38464 return false;
38466 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38467 return false;
38469 /* Ignore extend operations that are part of the load. */
38470 if (GET_CODE (src) == FLOAT_EXTEND || GET_CODE (src) == ZERO_EXTEND)
38471 src = XEXP (src, 0);
38473 /* Test for memory<-register or register<-memory. */
38474 if (fpr_reg_operand (src, mode) || int_reg_operand (src, mode))
38476 if (!MEM_P (dest))
38477 return false;
38479 mem = dest;
38482 else if (MEM_P (src))
38484 if (!fpr_reg_operand (dest, mode) && !int_reg_operand (dest, mode))
38485 return false;
38487 mem = src;
38490 else
38491 return false;
38493 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38494 if (GET_CODE (addr) == PLUS)
38496 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
38497 return false;
38499 return satisfies_constraint_I (XEXP (addr, 1));
38502 else if (GET_CODE (addr) == LO_SUM)
38504 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
38505 return false;
38507 offset = XEXP (addr, 1);
38508 if (TARGET_XCOFF || (TARGET_ELF && TARGET_POWERPC64))
38509 return small_toc_ref (offset, GET_MODE (offset));
38511 else if (TARGET_ELF && !TARGET_POWERPC64)
38512 return CONSTANT_P (offset);
38515 return false;
38518 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38519 load sequence.
38521 The operands are:
38522 operands[0] register set with addis
38523 operands[1] value set via addis
38524 operands[2] target register being loaded
38525 operands[3] D-form memory reference using operands[0].
38527 This is similar to the fusion introduced with power8, except it scales to
38528 both loads/stores and does not require the result register to be the same as
38529 the base register. At the moment, we only do this if register set with addis
38530 is dead. */
38532 void
38533 expand_fusion_p9_load (rtx *operands)
38535 rtx tmp_reg = operands[0];
38536 rtx addis_value = operands[1];
38537 rtx target = operands[2];
38538 rtx orig_mem = operands[3];
38539 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn;
38540 enum rtx_code plus_or_lo_sum;
38541 machine_mode target_mode = GET_MODE (target);
38542 machine_mode extend_mode = target_mode;
38543 machine_mode ptr_mode = Pmode;
38544 enum rtx_code extend = UNKNOWN;
38546 if (GET_CODE (orig_mem) == FLOAT_EXTEND || GET_CODE (orig_mem) == ZERO_EXTEND)
38548 extend = GET_CODE (orig_mem);
38549 orig_mem = XEXP (orig_mem, 0);
38550 target_mode = GET_MODE (orig_mem);
38553 gcc_assert (MEM_P (orig_mem));
38555 orig_addr = XEXP (orig_mem, 0);
38556 plus_or_lo_sum = GET_CODE (orig_addr);
38557 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38559 offset = XEXP (orig_addr, 1);
38560 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38561 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38563 if (extend != UNKNOWN)
38564 new_mem = gen_rtx_fmt_e (extend, extend_mode, new_mem);
38566 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38567 UNSPEC_FUSION_P9);
38569 set = gen_rtx_SET (target, new_mem);
38570 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
38571 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
38572 emit_insn (insn);
38574 return;
38577 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38578 store sequence.
38580 The operands are:
38581 operands[0] register set with addis
38582 operands[1] value set via addis
38583 operands[2] target D-form memory being stored to
38584 operands[3] register being stored
38586 This is similar to the fusion introduced with power8, except it scales to
38587 both loads/stores and does not require the result register to be the same as
38588 the base register. At the moment, we only do this if register set with addis
38589 is dead. */
38591 void
38592 expand_fusion_p9_store (rtx *operands)
38594 rtx tmp_reg = operands[0];
38595 rtx addis_value = operands[1];
38596 rtx orig_mem = operands[2];
38597 rtx src = operands[3];
38598 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn, new_src;
38599 enum rtx_code plus_or_lo_sum;
38600 machine_mode target_mode = GET_MODE (orig_mem);
38601 machine_mode ptr_mode = Pmode;
38603 gcc_assert (MEM_P (orig_mem));
38605 orig_addr = XEXP (orig_mem, 0);
38606 plus_or_lo_sum = GET_CODE (orig_addr);
38607 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38609 offset = XEXP (orig_addr, 1);
38610 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38611 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38613 new_src = gen_rtx_UNSPEC (target_mode, gen_rtvec (1, src),
38614 UNSPEC_FUSION_P9);
38616 set = gen_rtx_SET (new_mem, new_src);
38617 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
38618 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
38619 emit_insn (insn);
38621 return;
38624 /* Return a string to fuse an addis instruction with a load using extended
38625 fusion. The address that is used is the logical address that was formed
38626 during peephole2: (lo_sum (high) (low-part))
38628 The code is complicated, so we call output_asm_insn directly, and just
38629 return "". */
38631 const char *
38632 emit_fusion_p9_load (rtx reg, rtx mem, rtx tmp_reg)
38634 machine_mode mode = GET_MODE (reg);
38635 rtx hi;
38636 rtx lo;
38637 rtx addr;
38638 const char *load_string;
38639 int r;
38641 if (GET_CODE (mem) == FLOAT_EXTEND || GET_CODE (mem) == ZERO_EXTEND)
38643 mem = XEXP (mem, 0);
38644 mode = GET_MODE (mem);
38647 if (GET_CODE (reg) == SUBREG)
38649 gcc_assert (SUBREG_BYTE (reg) == 0);
38650 reg = SUBREG_REG (reg);
38653 if (!REG_P (reg))
38654 fatal_insn ("emit_fusion_p9_load, bad reg #1", reg);
38656 r = REGNO (reg);
38657 if (FP_REGNO_P (r))
38659 if (mode == SFmode)
38660 load_string = "lfs";
38661 else if (mode == DFmode || mode == DImode)
38662 load_string = "lfd";
38663 else
38664 gcc_unreachable ();
38666 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_VECTOR)
38668 if (mode == SFmode)
38669 load_string = "lxssp";
38670 else if (mode == DFmode || mode == DImode)
38671 load_string = "lxsd";
38672 else
38673 gcc_unreachable ();
38675 else if (INT_REGNO_P (r))
38677 switch (mode)
38679 case E_QImode:
38680 load_string = "lbz";
38681 break;
38682 case E_HImode:
38683 load_string = "lhz";
38684 break;
38685 case E_SImode:
38686 case E_SFmode:
38687 load_string = "lwz";
38688 break;
38689 case E_DImode:
38690 case E_DFmode:
38691 if (!TARGET_POWERPC64)
38692 gcc_unreachable ();
38693 load_string = "ld";
38694 break;
38695 default:
38696 gcc_unreachable ();
38699 else
38700 fatal_insn ("emit_fusion_p9_load, bad reg #2", reg);
38702 if (!MEM_P (mem))
38703 fatal_insn ("emit_fusion_p9_load not MEM", mem);
38705 addr = XEXP (mem, 0);
38706 fusion_split_address (addr, &hi, &lo);
38708 /* Emit the addis instruction. */
38709 emit_fusion_addis (tmp_reg, hi, "power9 load fusion", GET_MODE_NAME (mode));
38711 /* Emit the D-form load instruction. */
38712 emit_fusion_load_store (reg, tmp_reg, lo, load_string);
38714 return "";
38717 /* Return a string to fuse an addis instruction with a store using extended
38718 fusion. The address that is used is the logical address that was formed
38719 during peephole2: (lo_sum (high) (low-part))
38721 The code is complicated, so we call output_asm_insn directly, and just
38722 return "". */
38724 const char *
38725 emit_fusion_p9_store (rtx mem, rtx reg, rtx tmp_reg)
38727 machine_mode mode = GET_MODE (reg);
38728 rtx hi;
38729 rtx lo;
38730 rtx addr;
38731 const char *store_string;
38732 int r;
38734 if (GET_CODE (reg) == SUBREG)
38736 gcc_assert (SUBREG_BYTE (reg) == 0);
38737 reg = SUBREG_REG (reg);
38740 if (!REG_P (reg))
38741 fatal_insn ("emit_fusion_p9_store, bad reg #1", reg);
38743 r = REGNO (reg);
38744 if (FP_REGNO_P (r))
38746 if (mode == SFmode)
38747 store_string = "stfs";
38748 else if (mode == DFmode)
38749 store_string = "stfd";
38750 else
38751 gcc_unreachable ();
38753 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_VECTOR)
38755 if (mode == SFmode)
38756 store_string = "stxssp";
38757 else if (mode == DFmode || mode == DImode)
38758 store_string = "stxsd";
38759 else
38760 gcc_unreachable ();
38762 else if (INT_REGNO_P (r))
38764 switch (mode)
38766 case E_QImode:
38767 store_string = "stb";
38768 break;
38769 case E_HImode:
38770 store_string = "sth";
38771 break;
38772 case E_SImode:
38773 case E_SFmode:
38774 store_string = "stw";
38775 break;
38776 case E_DImode:
38777 case E_DFmode:
38778 if (!TARGET_POWERPC64)
38779 gcc_unreachable ();
38780 store_string = "std";
38781 break;
38782 default:
38783 gcc_unreachable ();
38786 else
38787 fatal_insn ("emit_fusion_p9_store, bad reg #2", reg);
38789 if (!MEM_P (mem))
38790 fatal_insn ("emit_fusion_p9_store not MEM", mem);
38792 addr = XEXP (mem, 0);
38793 fusion_split_address (addr, &hi, &lo);
38795 /* Emit the addis instruction. */
38796 emit_fusion_addis (tmp_reg, hi, "power9 store fusion", GET_MODE_NAME (mode));
38798 /* Emit the D-form load instruction. */
38799 emit_fusion_load_store (reg, tmp_reg, lo, store_string);
38801 return "";
38804 #ifdef RS6000_GLIBC_ATOMIC_FENV
38805 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
38806 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
38807 #endif
38809 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
38811 static void
38812 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
38814 if (!TARGET_HARD_FLOAT)
38816 #ifdef RS6000_GLIBC_ATOMIC_FENV
38817 if (atomic_hold_decl == NULL_TREE)
38819 atomic_hold_decl
38820 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38821 get_identifier ("__atomic_feholdexcept"),
38822 build_function_type_list (void_type_node,
38823 double_ptr_type_node,
38824 NULL_TREE));
38825 TREE_PUBLIC (atomic_hold_decl) = 1;
38826 DECL_EXTERNAL (atomic_hold_decl) = 1;
38829 if (atomic_clear_decl == NULL_TREE)
38831 atomic_clear_decl
38832 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38833 get_identifier ("__atomic_feclearexcept"),
38834 build_function_type_list (void_type_node,
38835 NULL_TREE));
38836 TREE_PUBLIC (atomic_clear_decl) = 1;
38837 DECL_EXTERNAL (atomic_clear_decl) = 1;
38840 tree const_double = build_qualified_type (double_type_node,
38841 TYPE_QUAL_CONST);
38842 tree const_double_ptr = build_pointer_type (const_double);
38843 if (atomic_update_decl == NULL_TREE)
38845 atomic_update_decl
38846 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38847 get_identifier ("__atomic_feupdateenv"),
38848 build_function_type_list (void_type_node,
38849 const_double_ptr,
38850 NULL_TREE));
38851 TREE_PUBLIC (atomic_update_decl) = 1;
38852 DECL_EXTERNAL (atomic_update_decl) = 1;
38855 tree fenv_var = create_tmp_var_raw (double_type_node);
38856 TREE_ADDRESSABLE (fenv_var) = 1;
38857 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
38859 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
38860 *clear = build_call_expr (atomic_clear_decl, 0);
38861 *update = build_call_expr (atomic_update_decl, 1,
38862 fold_convert (const_double_ptr, fenv_addr));
38863 #endif
38864 return;
38867 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
38868 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
38869 tree call_mffs = build_call_expr (mffs, 0);
38871 /* Generates the equivalent of feholdexcept (&fenv_var)
38873 *fenv_var = __builtin_mffs ();
38874 double fenv_hold;
38875 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
38876 __builtin_mtfsf (0xff, fenv_hold); */
38878 /* Mask to clear everything except for the rounding modes and non-IEEE
38879 arithmetic flag. */
38880 const unsigned HOST_WIDE_INT hold_exception_mask =
38881 HOST_WIDE_INT_C (0xffffffff00000007);
38883 tree fenv_var = create_tmp_var_raw (double_type_node);
38885 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
38887 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
38888 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38889 build_int_cst (uint64_type_node,
38890 hold_exception_mask));
38892 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38893 fenv_llu_and);
38895 tree hold_mtfsf = build_call_expr (mtfsf, 2,
38896 build_int_cst (unsigned_type_node, 0xff),
38897 fenv_hold_mtfsf);
38899 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
38901 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
38903 double fenv_clear = __builtin_mffs ();
38904 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
38905 __builtin_mtfsf (0xff, fenv_clear); */
38907 /* Mask to clear everything except for the rounding modes and non-IEEE
38908 arithmetic flag. */
38909 const unsigned HOST_WIDE_INT clear_exception_mask =
38910 HOST_WIDE_INT_C (0xffffffff00000000);
38912 tree fenv_clear = create_tmp_var_raw (double_type_node);
38914 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
38916 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
38917 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
38918 fenv_clean_llu,
38919 build_int_cst (uint64_type_node,
38920 clear_exception_mask));
38922 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38923 fenv_clear_llu_and);
38925 tree clear_mtfsf = build_call_expr (mtfsf, 2,
38926 build_int_cst (unsigned_type_node, 0xff),
38927 fenv_clear_mtfsf);
38929 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
38931 /* Generates the equivalent of feupdateenv (&fenv_var)
38933 double old_fenv = __builtin_mffs ();
38934 double fenv_update;
38935 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
38936 (*(uint64_t*)fenv_var 0x1ff80fff);
38937 __builtin_mtfsf (0xff, fenv_update); */
38939 const unsigned HOST_WIDE_INT update_exception_mask =
38940 HOST_WIDE_INT_C (0xffffffff1fffff00);
38941 const unsigned HOST_WIDE_INT new_exception_mask =
38942 HOST_WIDE_INT_C (0x1ff80fff);
38944 tree old_fenv = create_tmp_var_raw (double_type_node);
38945 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
38947 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
38948 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
38949 build_int_cst (uint64_type_node,
38950 update_exception_mask));
38952 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38953 build_int_cst (uint64_type_node,
38954 new_exception_mask));
38956 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
38957 old_llu_and, new_llu_and);
38959 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38960 new_llu_mask);
38962 tree update_mtfsf = build_call_expr (mtfsf, 2,
38963 build_int_cst (unsigned_type_node, 0xff),
38964 fenv_update_mtfsf);
38966 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
38969 void
38970 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
38972 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
38974 rtx_tmp0 = gen_reg_rtx (V2DImode);
38975 rtx_tmp1 = gen_reg_rtx (V2DImode);
38977 /* The destination of the vmrgew instruction layout is:
38978 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
38979 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
38980 vmrgew instruction will be correct. */
38981 if (VECTOR_ELT_ORDER_BIG)
38983 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
38984 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
38986 else
38988 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
38989 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
38992 rtx_tmp2 = gen_reg_rtx (V4SFmode);
38993 rtx_tmp3 = gen_reg_rtx (V4SFmode);
38995 if (signed_convert)
38997 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
38998 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
39000 else
39002 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
39003 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
39006 if (VECTOR_ELT_ORDER_BIG)
39007 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39008 else
39009 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39012 void
39013 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
39014 rtx src2)
39016 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39018 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39019 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39021 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
39022 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
39024 rtx_tmp2 = gen_reg_rtx (V4SImode);
39025 rtx_tmp3 = gen_reg_rtx (V4SImode);
39027 if (signed_convert)
39029 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
39030 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
39032 else
39034 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
39035 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
39038 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
39041 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
39043 static bool
39044 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
39045 optimization_type opt_type)
39047 switch (op)
39049 case rsqrt_optab:
39050 return (opt_type == OPTIMIZE_FOR_SPEED
39051 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
39053 default:
39054 return true;
39058 struct gcc_target targetm = TARGET_INITIALIZER;
39060 #include "gt-rs6000.h"