[gcc]
[official-gcc.git] / gcc / config / rs6000 / rs6000.c
blob1978634f02e55668bff9d4672e7d45ced3272284
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2017 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "memmodel.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "cfgloop.h"
31 #include "df.h"
32 #include "tm_p.h"
33 #include "stringpool.h"
34 #include "expmed.h"
35 #include "optabs.h"
36 #include "regs.h"
37 #include "ira.h"
38 #include "recog.h"
39 #include "cgraph.h"
40 #include "diagnostic-core.h"
41 #include "insn-attr.h"
42 #include "flags.h"
43 #include "alias.h"
44 #include "fold-const.h"
45 #include "attribs.h"
46 #include "stor-layout.h"
47 #include "calls.h"
48 #include "print-tree.h"
49 #include "varasm.h"
50 #include "explow.h"
51 #include "expr.h"
52 #include "output.h"
53 #include "dbxout.h"
54 #include "common/common-target.h"
55 #include "langhooks.h"
56 #include "reload.h"
57 #include "sched-int.h"
58 #include "gimplify.h"
59 #include "gimple-fold.h"
60 #include "gimple-iterator.h"
61 #include "gimple-ssa.h"
62 #include "gimple-walk.h"
63 #include "intl.h"
64 #include "params.h"
65 #include "tm-constrs.h"
66 #include "tree-vectorizer.h"
67 #include "target-globals.h"
68 #include "builtins.h"
69 #include "context.h"
70 #include "tree-pass.h"
71 #include "except.h"
72 #if TARGET_XCOFF
73 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
74 #endif
75 #if TARGET_MACHO
76 #include "gstab.h" /* for N_SLINE */
77 #endif
78 #include "case-cfn-macros.h"
79 #include "ppc-auxv.h"
80 #include "tree-ssa-propagate.h"
82 /* This file should be included last. */
83 #include "target-def.h"
85 #ifndef TARGET_NO_PROTOTYPE
86 #define TARGET_NO_PROTOTYPE 0
87 #endif
89 #define min(A,B) ((A) < (B) ? (A) : (B))
90 #define max(A,B) ((A) > (B) ? (A) : (B))
92 static pad_direction rs6000_function_arg_padding (machine_mode, const_tree);
94 /* Structure used to define the rs6000 stack */
95 typedef struct rs6000_stack {
96 int reload_completed; /* stack info won't change from here on */
97 int first_gp_reg_save; /* first callee saved GP register used */
98 int first_fp_reg_save; /* first callee saved FP register used */
99 int first_altivec_reg_save; /* first callee saved AltiVec register used */
100 int lr_save_p; /* true if the link reg needs to be saved */
101 int cr_save_p; /* true if the CR reg needs to be saved */
102 unsigned int vrsave_mask; /* mask of vec registers to save */
103 int push_p; /* true if we need to allocate stack space */
104 int calls_p; /* true if the function makes any calls */
105 int world_save_p; /* true if we're saving *everything*:
106 r13-r31, cr, f14-f31, vrsave, v20-v31 */
107 enum rs6000_abi abi; /* which ABI to use */
108 int gp_save_offset; /* offset to save GP regs from initial SP */
109 int fp_save_offset; /* offset to save FP regs from initial SP */
110 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
111 int lr_save_offset; /* offset to save LR from initial SP */
112 int cr_save_offset; /* offset to save CR from initial SP */
113 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
114 int varargs_save_offset; /* offset to save the varargs registers */
115 int ehrd_offset; /* offset to EH return data */
116 int ehcr_offset; /* offset to EH CR field data */
117 int reg_size; /* register size (4 or 8) */
118 HOST_WIDE_INT vars_size; /* variable save area size */
119 int parm_size; /* outgoing parameter size */
120 int save_size; /* save area size */
121 int fixed_size; /* fixed size of stack frame */
122 int gp_size; /* size of saved GP registers */
123 int fp_size; /* size of saved FP registers */
124 int altivec_size; /* size of saved AltiVec registers */
125 int cr_size; /* size to hold CR if not in fixed area */
126 int vrsave_size; /* size to hold VRSAVE */
127 int altivec_padding_size; /* size of altivec alignment padding */
128 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
129 int savres_strategy;
130 } rs6000_stack_t;
132 /* A C structure for machine-specific, per-function data.
133 This is added to the cfun structure. */
134 typedef struct GTY(()) machine_function
136 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
137 int ra_needs_full_frame;
138 /* Flags if __builtin_return_address (0) was used. */
139 int ra_need_lr;
140 /* Cache lr_save_p after expansion of builtin_eh_return. */
141 int lr_save_state;
142 /* Whether we need to save the TOC to the reserved stack location in the
143 function prologue. */
144 bool save_toc_in_prologue;
145 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
146 varargs save area. */
147 HOST_WIDE_INT varargs_save_offset;
148 /* Alternative internal arg pointer for -fsplit-stack. */
149 rtx split_stack_arg_pointer;
150 bool split_stack_argp_used;
151 /* Flag if r2 setup is needed with ELFv2 ABI. */
152 bool r2_setup_needed;
153 /* The number of components we use for separate shrink-wrapping. */
154 int n_components;
155 /* The components already handled by separate shrink-wrapping, which should
156 not be considered by the prologue and epilogue. */
157 bool gpr_is_wrapped_separately[32];
158 bool fpr_is_wrapped_separately[32];
159 bool lr_is_wrapped_separately;
160 } machine_function;
162 /* Support targetm.vectorize.builtin_mask_for_load. */
163 static GTY(()) tree altivec_builtin_mask_for_load;
165 /* Set to nonzero once AIX common-mode calls have been defined. */
166 static GTY(()) int common_mode_defined;
168 /* Label number of label created for -mrelocatable, to call to so we can
169 get the address of the GOT section */
170 static int rs6000_pic_labelno;
172 #ifdef USING_ELFOS_H
173 /* Counter for labels which are to be placed in .fixup. */
174 int fixuplabelno = 0;
175 #endif
177 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
178 int dot_symbols;
180 /* Specify the machine mode that pointers have. After generation of rtl, the
181 compiler makes no further distinction between pointers and any other objects
182 of this machine mode. */
183 scalar_int_mode rs6000_pmode;
185 /* Width in bits of a pointer. */
186 unsigned rs6000_pointer_size;
188 #ifdef HAVE_AS_GNU_ATTRIBUTE
189 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
190 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
191 # endif
192 /* Flag whether floating point values have been passed/returned.
193 Note that this doesn't say whether fprs are used, since the
194 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
195 should be set for soft-float values passed in gprs and ieee128
196 values passed in vsx registers. */
197 static bool rs6000_passes_float;
198 static bool rs6000_passes_long_double;
199 /* Flag whether vector values have been passed/returned. */
200 static bool rs6000_passes_vector;
201 /* Flag whether small (<= 8 byte) structures have been returned. */
202 static bool rs6000_returns_struct;
203 #endif
205 /* Value is TRUE if register/mode pair is acceptable. */
206 static bool rs6000_hard_regno_mode_ok_p
207 [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
209 /* Maximum number of registers needed for a given register class and mode. */
210 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
212 /* How many registers are needed for a given register and mode. */
213 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
215 /* Map register number to register class. */
216 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
218 static int dbg_cost_ctrl;
220 /* Built in types. */
221 tree rs6000_builtin_types[RS6000_BTI_MAX];
222 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
224 /* Flag to say the TOC is initialized */
225 int toc_initialized, need_toc_init;
226 char toc_label_name[10];
228 /* Cached value of rs6000_variable_issue. This is cached in
229 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
230 static short cached_can_issue_more;
232 static GTY(()) section *read_only_data_section;
233 static GTY(()) section *private_data_section;
234 static GTY(()) section *tls_data_section;
235 static GTY(()) section *tls_private_data_section;
236 static GTY(()) section *read_only_private_data_section;
237 static GTY(()) section *sdata2_section;
238 static GTY(()) section *toc_section;
240 struct builtin_description
242 const HOST_WIDE_INT mask;
243 const enum insn_code icode;
244 const char *const name;
245 const enum rs6000_builtins code;
248 /* Describe the vector unit used for modes. */
249 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
250 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
252 /* Register classes for various constraints that are based on the target
253 switches. */
254 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
256 /* Describe the alignment of a vector. */
257 int rs6000_vector_align[NUM_MACHINE_MODES];
259 /* Map selected modes to types for builtins. */
260 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
262 /* What modes to automatically generate reciprocal divide estimate (fre) and
263 reciprocal sqrt (frsqrte) for. */
264 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
266 /* Masks to determine which reciprocal esitmate instructions to generate
267 automatically. */
268 enum rs6000_recip_mask {
269 RECIP_SF_DIV = 0x001, /* Use divide estimate */
270 RECIP_DF_DIV = 0x002,
271 RECIP_V4SF_DIV = 0x004,
272 RECIP_V2DF_DIV = 0x008,
274 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
275 RECIP_DF_RSQRT = 0x020,
276 RECIP_V4SF_RSQRT = 0x040,
277 RECIP_V2DF_RSQRT = 0x080,
279 /* Various combination of flags for -mrecip=xxx. */
280 RECIP_NONE = 0,
281 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
282 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
283 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
285 RECIP_HIGH_PRECISION = RECIP_ALL,
287 /* On low precision machines like the power5, don't enable double precision
288 reciprocal square root estimate, since it isn't accurate enough. */
289 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
292 /* -mrecip options. */
293 static struct
295 const char *string; /* option name */
296 unsigned int mask; /* mask bits to set */
297 } recip_options[] = {
298 { "all", RECIP_ALL },
299 { "none", RECIP_NONE },
300 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
301 | RECIP_V2DF_DIV) },
302 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
303 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
304 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
305 | RECIP_V2DF_RSQRT) },
306 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
307 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
310 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
311 static const struct
313 const char *cpu;
314 unsigned int cpuid;
315 } cpu_is_info[] = {
316 { "power9", PPC_PLATFORM_POWER9 },
317 { "power8", PPC_PLATFORM_POWER8 },
318 { "power7", PPC_PLATFORM_POWER7 },
319 { "power6x", PPC_PLATFORM_POWER6X },
320 { "power6", PPC_PLATFORM_POWER6 },
321 { "power5+", PPC_PLATFORM_POWER5_PLUS },
322 { "power5", PPC_PLATFORM_POWER5 },
323 { "ppc970", PPC_PLATFORM_PPC970 },
324 { "power4", PPC_PLATFORM_POWER4 },
325 { "ppca2", PPC_PLATFORM_PPCA2 },
326 { "ppc476", PPC_PLATFORM_PPC476 },
327 { "ppc464", PPC_PLATFORM_PPC464 },
328 { "ppc440", PPC_PLATFORM_PPC440 },
329 { "ppc405", PPC_PLATFORM_PPC405 },
330 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
333 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
334 static const struct
336 const char *hwcap;
337 int mask;
338 unsigned int id;
339 } cpu_supports_info[] = {
340 /* AT_HWCAP masks. */
341 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
342 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
343 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
344 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
345 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
346 { "booke", PPC_FEATURE_BOOKE, 0 },
347 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
348 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
349 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
350 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
351 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
352 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
353 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
354 { "notb", PPC_FEATURE_NO_TB, 0 },
355 { "pa6t", PPC_FEATURE_PA6T, 0 },
356 { "power4", PPC_FEATURE_POWER4, 0 },
357 { "power5", PPC_FEATURE_POWER5, 0 },
358 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
359 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
360 { "ppc32", PPC_FEATURE_32, 0 },
361 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
362 { "ppc64", PPC_FEATURE_64, 0 },
363 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
364 { "smt", PPC_FEATURE_SMT, 0 },
365 { "spe", PPC_FEATURE_HAS_SPE, 0 },
366 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
367 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
368 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
370 /* AT_HWCAP2 masks. */
371 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
372 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
373 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
374 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
375 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
376 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
377 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
378 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
379 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
380 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
381 { "darn", PPC_FEATURE2_DARN, 1 },
382 { "scv", PPC_FEATURE2_SCV, 1 }
385 /* On PowerPC, we have a limited number of target clones that we care about
386 which means we can use an array to hold the options, rather than having more
387 elaborate data structures to identify each possible variation. Order the
388 clones from the default to the highest ISA. */
389 enum {
390 CLONE_DEFAULT = 0, /* default clone. */
391 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
392 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
393 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
394 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
395 CLONE_MAX
398 /* Map compiler ISA bits into HWCAP names. */
399 struct clone_map {
400 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
401 const char *name; /* name to use in __builtin_cpu_supports. */
404 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
405 { 0, "" }, /* Default options. */
406 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
407 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
408 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
409 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
413 /* Newer LIBCs explicitly export this symbol to declare that they provide
414 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
415 reference to this symbol whenever we expand a CPU builtin, so that
416 we never link against an old LIBC. */
417 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
419 /* True if we have expanded a CPU builtin. */
420 bool cpu_builtin_p;
422 /* Pointer to function (in rs6000-c.c) that can define or undefine target
423 macros that have changed. Languages that don't support the preprocessor
424 don't link in rs6000-c.c, so we can't call it directly. */
425 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
427 /* Simplfy register classes into simpler classifications. We assume
428 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
429 check for standard register classes (gpr/floating/altivec/vsx) and
430 floating/vector classes (float/altivec/vsx). */
432 enum rs6000_reg_type {
433 NO_REG_TYPE,
434 PSEUDO_REG_TYPE,
435 GPR_REG_TYPE,
436 VSX_REG_TYPE,
437 ALTIVEC_REG_TYPE,
438 FPR_REG_TYPE,
439 SPR_REG_TYPE,
440 CR_REG_TYPE
443 /* Map register class to register type. */
444 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
446 /* First/last register type for the 'normal' register types (i.e. general
447 purpose, floating point, altivec, and VSX registers). */
448 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
450 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
453 /* Register classes we care about in secondary reload or go if legitimate
454 address. We only need to worry about GPR, FPR, and Altivec registers here,
455 along an ANY field that is the OR of the 3 register classes. */
457 enum rs6000_reload_reg_type {
458 RELOAD_REG_GPR, /* General purpose registers. */
459 RELOAD_REG_FPR, /* Traditional floating point regs. */
460 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
461 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
462 N_RELOAD_REG
465 /* For setting up register classes, loop through the 3 register classes mapping
466 into real registers, and skip the ANY class, which is just an OR of the
467 bits. */
468 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
469 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
471 /* Map reload register type to a register in the register class. */
472 struct reload_reg_map_type {
473 const char *name; /* Register class name. */
474 int reg; /* Register in the register class. */
477 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
478 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
479 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
480 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
481 { "Any", -1 }, /* RELOAD_REG_ANY. */
484 /* Mask bits for each register class, indexed per mode. Historically the
485 compiler has been more restrictive which types can do PRE_MODIFY instead of
486 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
487 typedef unsigned char addr_mask_type;
489 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
490 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
491 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
492 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
493 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
494 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
495 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
496 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
498 /* Register type masks based on the type, of valid addressing modes. */
499 struct rs6000_reg_addr {
500 enum insn_code reload_load; /* INSN to reload for loading. */
501 enum insn_code reload_store; /* INSN to reload for storing. */
502 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
503 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
504 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
505 enum insn_code fusion_gpr_ld; /* INSN for fusing gpr ADDIS/loads. */
506 /* INSNs for fusing addi with loads
507 or stores for each reg. class. */
508 enum insn_code fusion_addi_ld[(int)N_RELOAD_REG];
509 enum insn_code fusion_addi_st[(int)N_RELOAD_REG];
510 /* INSNs for fusing addis with loads
511 or stores for each reg. class. */
512 enum insn_code fusion_addis_ld[(int)N_RELOAD_REG];
513 enum insn_code fusion_addis_st[(int)N_RELOAD_REG];
514 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
515 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
516 bool fused_toc; /* Mode supports TOC fusion. */
519 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
521 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
522 static inline bool
523 mode_supports_pre_incdec_p (machine_mode mode)
525 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
526 != 0);
529 /* Helper function to say whether a mode supports PRE_MODIFY. */
530 static inline bool
531 mode_supports_pre_modify_p (machine_mode mode)
533 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
534 != 0);
537 /* Given that there exists at least one variable that is set (produced)
538 by OUT_INSN and read (consumed) by IN_INSN, return true iff
539 IN_INSN represents one or more memory store operations and none of
540 the variables set by OUT_INSN is used by IN_INSN as the address of a
541 store operation. If either IN_INSN or OUT_INSN does not represent
542 a "single" RTL SET expression (as loosely defined by the
543 implementation of the single_set function) or a PARALLEL with only
544 SETs, CLOBBERs, and USEs inside, this function returns false.
546 This rs6000-specific version of store_data_bypass_p checks for
547 certain conditions that result in assertion failures (and internal
548 compiler errors) in the generic store_data_bypass_p function and
549 returns false rather than calling store_data_bypass_p if one of the
550 problematic conditions is detected. */
553 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
555 rtx out_set, in_set;
556 rtx out_pat, in_pat;
557 rtx out_exp, in_exp;
558 int i, j;
560 in_set = single_set (in_insn);
561 if (in_set)
563 if (MEM_P (SET_DEST (in_set)))
565 out_set = single_set (out_insn);
566 if (!out_set)
568 out_pat = PATTERN (out_insn);
569 if (GET_CODE (out_pat) == PARALLEL)
571 for (i = 0; i < XVECLEN (out_pat, 0); i++)
573 out_exp = XVECEXP (out_pat, 0, i);
574 if ((GET_CODE (out_exp) == CLOBBER)
575 || (GET_CODE (out_exp) == USE))
576 continue;
577 else if (GET_CODE (out_exp) != SET)
578 return false;
584 else
586 in_pat = PATTERN (in_insn);
587 if (GET_CODE (in_pat) != PARALLEL)
588 return false;
590 for (i = 0; i < XVECLEN (in_pat, 0); i++)
592 in_exp = XVECEXP (in_pat, 0, i);
593 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
594 continue;
595 else if (GET_CODE (in_exp) != SET)
596 return false;
598 if (MEM_P (SET_DEST (in_exp)))
600 out_set = single_set (out_insn);
601 if (!out_set)
603 out_pat = PATTERN (out_insn);
604 if (GET_CODE (out_pat) != PARALLEL)
605 return false;
606 for (j = 0; j < XVECLEN (out_pat, 0); j++)
608 out_exp = XVECEXP (out_pat, 0, j);
609 if ((GET_CODE (out_exp) == CLOBBER)
610 || (GET_CODE (out_exp) == USE))
611 continue;
612 else if (GET_CODE (out_exp) != SET)
613 return false;
619 return store_data_bypass_p (out_insn, in_insn);
622 /* Return true if we have D-form addressing in altivec registers. */
623 static inline bool
624 mode_supports_vmx_dform (machine_mode mode)
626 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
629 /* Return true if we have D-form addressing in VSX registers. This addressing
630 is more limited than normal d-form addressing in that the offset must be
631 aligned on a 16-byte boundary. */
632 static inline bool
633 mode_supports_vsx_dform_quad (machine_mode mode)
635 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
636 != 0);
640 /* Target cpu costs. */
642 struct processor_costs {
643 const int mulsi; /* cost of SImode multiplication. */
644 const int mulsi_const; /* cost of SImode multiplication by constant. */
645 const int mulsi_const9; /* cost of SImode mult by short constant. */
646 const int muldi; /* cost of DImode multiplication. */
647 const int divsi; /* cost of SImode division. */
648 const int divdi; /* cost of DImode division. */
649 const int fp; /* cost of simple SFmode and DFmode insns. */
650 const int dmul; /* cost of DFmode multiplication (and fmadd). */
651 const int sdiv; /* cost of SFmode division (fdivs). */
652 const int ddiv; /* cost of DFmode division (fdiv). */
653 const int cache_line_size; /* cache line size in bytes. */
654 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
655 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
656 const int simultaneous_prefetches; /* number of parallel prefetch
657 operations. */
658 const int sfdf_convert; /* cost of SF->DF conversion. */
661 const struct processor_costs *rs6000_cost;
663 /* Processor costs (relative to an add) */
665 /* Instruction size costs on 32bit processors. */
666 static const
667 struct processor_costs size32_cost = {
668 COSTS_N_INSNS (1), /* mulsi */
669 COSTS_N_INSNS (1), /* mulsi_const */
670 COSTS_N_INSNS (1), /* mulsi_const9 */
671 COSTS_N_INSNS (1), /* muldi */
672 COSTS_N_INSNS (1), /* divsi */
673 COSTS_N_INSNS (1), /* divdi */
674 COSTS_N_INSNS (1), /* fp */
675 COSTS_N_INSNS (1), /* dmul */
676 COSTS_N_INSNS (1), /* sdiv */
677 COSTS_N_INSNS (1), /* ddiv */
678 32, /* cache line size */
679 0, /* l1 cache */
680 0, /* l2 cache */
681 0, /* streams */
682 0, /* SF->DF convert */
685 /* Instruction size costs on 64bit processors. */
686 static const
687 struct processor_costs size64_cost = {
688 COSTS_N_INSNS (1), /* mulsi */
689 COSTS_N_INSNS (1), /* mulsi_const */
690 COSTS_N_INSNS (1), /* mulsi_const9 */
691 COSTS_N_INSNS (1), /* muldi */
692 COSTS_N_INSNS (1), /* divsi */
693 COSTS_N_INSNS (1), /* divdi */
694 COSTS_N_INSNS (1), /* fp */
695 COSTS_N_INSNS (1), /* dmul */
696 COSTS_N_INSNS (1), /* sdiv */
697 COSTS_N_INSNS (1), /* ddiv */
698 128, /* cache line size */
699 0, /* l1 cache */
700 0, /* l2 cache */
701 0, /* streams */
702 0, /* SF->DF convert */
705 /* Instruction costs on RS64A processors. */
706 static const
707 struct processor_costs rs64a_cost = {
708 COSTS_N_INSNS (20), /* mulsi */
709 COSTS_N_INSNS (12), /* mulsi_const */
710 COSTS_N_INSNS (8), /* mulsi_const9 */
711 COSTS_N_INSNS (34), /* muldi */
712 COSTS_N_INSNS (65), /* divsi */
713 COSTS_N_INSNS (67), /* divdi */
714 COSTS_N_INSNS (4), /* fp */
715 COSTS_N_INSNS (4), /* dmul */
716 COSTS_N_INSNS (31), /* sdiv */
717 COSTS_N_INSNS (31), /* ddiv */
718 128, /* cache line size */
719 128, /* l1 cache */
720 2048, /* l2 cache */
721 1, /* streams */
722 0, /* SF->DF convert */
725 /* Instruction costs on MPCCORE processors. */
726 static const
727 struct processor_costs mpccore_cost = {
728 COSTS_N_INSNS (2), /* mulsi */
729 COSTS_N_INSNS (2), /* mulsi_const */
730 COSTS_N_INSNS (2), /* mulsi_const9 */
731 COSTS_N_INSNS (2), /* muldi */
732 COSTS_N_INSNS (6), /* divsi */
733 COSTS_N_INSNS (6), /* divdi */
734 COSTS_N_INSNS (4), /* fp */
735 COSTS_N_INSNS (5), /* dmul */
736 COSTS_N_INSNS (10), /* sdiv */
737 COSTS_N_INSNS (17), /* ddiv */
738 32, /* cache line size */
739 4, /* l1 cache */
740 16, /* l2 cache */
741 1, /* streams */
742 0, /* SF->DF convert */
745 /* Instruction costs on PPC403 processors. */
746 static const
747 struct processor_costs ppc403_cost = {
748 COSTS_N_INSNS (4), /* mulsi */
749 COSTS_N_INSNS (4), /* mulsi_const */
750 COSTS_N_INSNS (4), /* mulsi_const9 */
751 COSTS_N_INSNS (4), /* muldi */
752 COSTS_N_INSNS (33), /* divsi */
753 COSTS_N_INSNS (33), /* divdi */
754 COSTS_N_INSNS (11), /* fp */
755 COSTS_N_INSNS (11), /* dmul */
756 COSTS_N_INSNS (11), /* sdiv */
757 COSTS_N_INSNS (11), /* ddiv */
758 32, /* cache line size */
759 4, /* l1 cache */
760 16, /* l2 cache */
761 1, /* streams */
762 0, /* SF->DF convert */
765 /* Instruction costs on PPC405 processors. */
766 static const
767 struct processor_costs ppc405_cost = {
768 COSTS_N_INSNS (5), /* mulsi */
769 COSTS_N_INSNS (4), /* mulsi_const */
770 COSTS_N_INSNS (3), /* mulsi_const9 */
771 COSTS_N_INSNS (5), /* muldi */
772 COSTS_N_INSNS (35), /* divsi */
773 COSTS_N_INSNS (35), /* divdi */
774 COSTS_N_INSNS (11), /* fp */
775 COSTS_N_INSNS (11), /* dmul */
776 COSTS_N_INSNS (11), /* sdiv */
777 COSTS_N_INSNS (11), /* ddiv */
778 32, /* cache line size */
779 16, /* l1 cache */
780 128, /* l2 cache */
781 1, /* streams */
782 0, /* SF->DF convert */
785 /* Instruction costs on PPC440 processors. */
786 static const
787 struct processor_costs ppc440_cost = {
788 COSTS_N_INSNS (3), /* mulsi */
789 COSTS_N_INSNS (2), /* mulsi_const */
790 COSTS_N_INSNS (2), /* mulsi_const9 */
791 COSTS_N_INSNS (3), /* muldi */
792 COSTS_N_INSNS (34), /* divsi */
793 COSTS_N_INSNS (34), /* divdi */
794 COSTS_N_INSNS (5), /* fp */
795 COSTS_N_INSNS (5), /* dmul */
796 COSTS_N_INSNS (19), /* sdiv */
797 COSTS_N_INSNS (33), /* ddiv */
798 32, /* cache line size */
799 32, /* l1 cache */
800 256, /* l2 cache */
801 1, /* streams */
802 0, /* SF->DF convert */
805 /* Instruction costs on PPC476 processors. */
806 static const
807 struct processor_costs ppc476_cost = {
808 COSTS_N_INSNS (4), /* mulsi */
809 COSTS_N_INSNS (4), /* mulsi_const */
810 COSTS_N_INSNS (4), /* mulsi_const9 */
811 COSTS_N_INSNS (4), /* muldi */
812 COSTS_N_INSNS (11), /* divsi */
813 COSTS_N_INSNS (11), /* divdi */
814 COSTS_N_INSNS (6), /* fp */
815 COSTS_N_INSNS (6), /* dmul */
816 COSTS_N_INSNS (19), /* sdiv */
817 COSTS_N_INSNS (33), /* ddiv */
818 32, /* l1 cache line size */
819 32, /* l1 cache */
820 512, /* l2 cache */
821 1, /* streams */
822 0, /* SF->DF convert */
825 /* Instruction costs on PPC601 processors. */
826 static const
827 struct processor_costs ppc601_cost = {
828 COSTS_N_INSNS (5), /* mulsi */
829 COSTS_N_INSNS (5), /* mulsi_const */
830 COSTS_N_INSNS (5), /* mulsi_const9 */
831 COSTS_N_INSNS (5), /* muldi */
832 COSTS_N_INSNS (36), /* divsi */
833 COSTS_N_INSNS (36), /* divdi */
834 COSTS_N_INSNS (4), /* fp */
835 COSTS_N_INSNS (5), /* dmul */
836 COSTS_N_INSNS (17), /* sdiv */
837 COSTS_N_INSNS (31), /* ddiv */
838 32, /* cache line size */
839 32, /* l1 cache */
840 256, /* l2 cache */
841 1, /* streams */
842 0, /* SF->DF convert */
845 /* Instruction costs on PPC603 processors. */
846 static const
847 struct processor_costs ppc603_cost = {
848 COSTS_N_INSNS (5), /* mulsi */
849 COSTS_N_INSNS (3), /* mulsi_const */
850 COSTS_N_INSNS (2), /* mulsi_const9 */
851 COSTS_N_INSNS (5), /* muldi */
852 COSTS_N_INSNS (37), /* divsi */
853 COSTS_N_INSNS (37), /* divdi */
854 COSTS_N_INSNS (3), /* fp */
855 COSTS_N_INSNS (4), /* dmul */
856 COSTS_N_INSNS (18), /* sdiv */
857 COSTS_N_INSNS (33), /* ddiv */
858 32, /* cache line size */
859 8, /* l1 cache */
860 64, /* l2 cache */
861 1, /* streams */
862 0, /* SF->DF convert */
865 /* Instruction costs on PPC604 processors. */
866 static const
867 struct processor_costs ppc604_cost = {
868 COSTS_N_INSNS (4), /* mulsi */
869 COSTS_N_INSNS (4), /* mulsi_const */
870 COSTS_N_INSNS (4), /* mulsi_const9 */
871 COSTS_N_INSNS (4), /* muldi */
872 COSTS_N_INSNS (20), /* divsi */
873 COSTS_N_INSNS (20), /* divdi */
874 COSTS_N_INSNS (3), /* fp */
875 COSTS_N_INSNS (3), /* dmul */
876 COSTS_N_INSNS (18), /* sdiv */
877 COSTS_N_INSNS (32), /* ddiv */
878 32, /* cache line size */
879 16, /* l1 cache */
880 512, /* l2 cache */
881 1, /* streams */
882 0, /* SF->DF convert */
885 /* Instruction costs on PPC604e processors. */
886 static const
887 struct processor_costs ppc604e_cost = {
888 COSTS_N_INSNS (2), /* mulsi */
889 COSTS_N_INSNS (2), /* mulsi_const */
890 COSTS_N_INSNS (2), /* mulsi_const9 */
891 COSTS_N_INSNS (2), /* muldi */
892 COSTS_N_INSNS (20), /* divsi */
893 COSTS_N_INSNS (20), /* divdi */
894 COSTS_N_INSNS (3), /* fp */
895 COSTS_N_INSNS (3), /* dmul */
896 COSTS_N_INSNS (18), /* sdiv */
897 COSTS_N_INSNS (32), /* ddiv */
898 32, /* cache line size */
899 32, /* l1 cache */
900 1024, /* l2 cache */
901 1, /* streams */
902 0, /* SF->DF convert */
905 /* Instruction costs on PPC620 processors. */
906 static const
907 struct processor_costs ppc620_cost = {
908 COSTS_N_INSNS (5), /* mulsi */
909 COSTS_N_INSNS (4), /* mulsi_const */
910 COSTS_N_INSNS (3), /* mulsi_const9 */
911 COSTS_N_INSNS (7), /* muldi */
912 COSTS_N_INSNS (21), /* divsi */
913 COSTS_N_INSNS (37), /* divdi */
914 COSTS_N_INSNS (3), /* fp */
915 COSTS_N_INSNS (3), /* dmul */
916 COSTS_N_INSNS (18), /* sdiv */
917 COSTS_N_INSNS (32), /* ddiv */
918 128, /* cache line size */
919 32, /* l1 cache */
920 1024, /* l2 cache */
921 1, /* streams */
922 0, /* SF->DF convert */
925 /* Instruction costs on PPC630 processors. */
926 static const
927 struct processor_costs ppc630_cost = {
928 COSTS_N_INSNS (5), /* mulsi */
929 COSTS_N_INSNS (4), /* mulsi_const */
930 COSTS_N_INSNS (3), /* mulsi_const9 */
931 COSTS_N_INSNS (7), /* muldi */
932 COSTS_N_INSNS (21), /* divsi */
933 COSTS_N_INSNS (37), /* divdi */
934 COSTS_N_INSNS (3), /* fp */
935 COSTS_N_INSNS (3), /* dmul */
936 COSTS_N_INSNS (17), /* sdiv */
937 COSTS_N_INSNS (21), /* ddiv */
938 128, /* cache line size */
939 64, /* l1 cache */
940 1024, /* l2 cache */
941 1, /* streams */
942 0, /* SF->DF convert */
945 /* Instruction costs on Cell processor. */
946 /* COSTS_N_INSNS (1) ~ one add. */
947 static const
948 struct processor_costs ppccell_cost = {
949 COSTS_N_INSNS (9/2)+2, /* mulsi */
950 COSTS_N_INSNS (6/2), /* mulsi_const */
951 COSTS_N_INSNS (6/2), /* mulsi_const9 */
952 COSTS_N_INSNS (15/2)+2, /* muldi */
953 COSTS_N_INSNS (38/2), /* divsi */
954 COSTS_N_INSNS (70/2), /* divdi */
955 COSTS_N_INSNS (10/2), /* fp */
956 COSTS_N_INSNS (10/2), /* dmul */
957 COSTS_N_INSNS (74/2), /* sdiv */
958 COSTS_N_INSNS (74/2), /* ddiv */
959 128, /* cache line size */
960 32, /* l1 cache */
961 512, /* l2 cache */
962 6, /* streams */
963 0, /* SF->DF convert */
966 /* Instruction costs on PPC750 and PPC7400 processors. */
967 static const
968 struct processor_costs ppc750_cost = {
969 COSTS_N_INSNS (5), /* mulsi */
970 COSTS_N_INSNS (3), /* mulsi_const */
971 COSTS_N_INSNS (2), /* mulsi_const9 */
972 COSTS_N_INSNS (5), /* muldi */
973 COSTS_N_INSNS (17), /* divsi */
974 COSTS_N_INSNS (17), /* divdi */
975 COSTS_N_INSNS (3), /* fp */
976 COSTS_N_INSNS (3), /* dmul */
977 COSTS_N_INSNS (17), /* sdiv */
978 COSTS_N_INSNS (31), /* ddiv */
979 32, /* cache line size */
980 32, /* l1 cache */
981 512, /* l2 cache */
982 1, /* streams */
983 0, /* SF->DF convert */
986 /* Instruction costs on PPC7450 processors. */
987 static const
988 struct processor_costs ppc7450_cost = {
989 COSTS_N_INSNS (4), /* mulsi */
990 COSTS_N_INSNS (3), /* mulsi_const */
991 COSTS_N_INSNS (3), /* mulsi_const9 */
992 COSTS_N_INSNS (4), /* muldi */
993 COSTS_N_INSNS (23), /* divsi */
994 COSTS_N_INSNS (23), /* divdi */
995 COSTS_N_INSNS (5), /* fp */
996 COSTS_N_INSNS (5), /* dmul */
997 COSTS_N_INSNS (21), /* sdiv */
998 COSTS_N_INSNS (35), /* ddiv */
999 32, /* cache line size */
1000 32, /* l1 cache */
1001 1024, /* l2 cache */
1002 1, /* streams */
1003 0, /* SF->DF convert */
1006 /* Instruction costs on PPC8540 processors. */
1007 static const
1008 struct processor_costs ppc8540_cost = {
1009 COSTS_N_INSNS (4), /* mulsi */
1010 COSTS_N_INSNS (4), /* mulsi_const */
1011 COSTS_N_INSNS (4), /* mulsi_const9 */
1012 COSTS_N_INSNS (4), /* muldi */
1013 COSTS_N_INSNS (19), /* divsi */
1014 COSTS_N_INSNS (19), /* divdi */
1015 COSTS_N_INSNS (4), /* fp */
1016 COSTS_N_INSNS (4), /* dmul */
1017 COSTS_N_INSNS (29), /* sdiv */
1018 COSTS_N_INSNS (29), /* ddiv */
1019 32, /* cache line size */
1020 32, /* l1 cache */
1021 256, /* l2 cache */
1022 1, /* prefetch streams /*/
1023 0, /* SF->DF convert */
1026 /* Instruction costs on E300C2 and E300C3 cores. */
1027 static const
1028 struct processor_costs ppce300c2c3_cost = {
1029 COSTS_N_INSNS (4), /* mulsi */
1030 COSTS_N_INSNS (4), /* mulsi_const */
1031 COSTS_N_INSNS (4), /* mulsi_const9 */
1032 COSTS_N_INSNS (4), /* muldi */
1033 COSTS_N_INSNS (19), /* divsi */
1034 COSTS_N_INSNS (19), /* divdi */
1035 COSTS_N_INSNS (3), /* fp */
1036 COSTS_N_INSNS (4), /* dmul */
1037 COSTS_N_INSNS (18), /* sdiv */
1038 COSTS_N_INSNS (33), /* ddiv */
1040 16, /* l1 cache */
1041 16, /* l2 cache */
1042 1, /* prefetch streams /*/
1043 0, /* SF->DF convert */
1046 /* Instruction costs on PPCE500MC processors. */
1047 static const
1048 struct processor_costs ppce500mc_cost = {
1049 COSTS_N_INSNS (4), /* mulsi */
1050 COSTS_N_INSNS (4), /* mulsi_const */
1051 COSTS_N_INSNS (4), /* mulsi_const9 */
1052 COSTS_N_INSNS (4), /* muldi */
1053 COSTS_N_INSNS (14), /* divsi */
1054 COSTS_N_INSNS (14), /* divdi */
1055 COSTS_N_INSNS (8), /* fp */
1056 COSTS_N_INSNS (10), /* dmul */
1057 COSTS_N_INSNS (36), /* sdiv */
1058 COSTS_N_INSNS (66), /* ddiv */
1059 64, /* cache line size */
1060 32, /* l1 cache */
1061 128, /* l2 cache */
1062 1, /* prefetch streams /*/
1063 0, /* SF->DF convert */
1066 /* Instruction costs on PPCE500MC64 processors. */
1067 static const
1068 struct processor_costs ppce500mc64_cost = {
1069 COSTS_N_INSNS (4), /* mulsi */
1070 COSTS_N_INSNS (4), /* mulsi_const */
1071 COSTS_N_INSNS (4), /* mulsi_const9 */
1072 COSTS_N_INSNS (4), /* muldi */
1073 COSTS_N_INSNS (14), /* divsi */
1074 COSTS_N_INSNS (14), /* divdi */
1075 COSTS_N_INSNS (4), /* fp */
1076 COSTS_N_INSNS (10), /* dmul */
1077 COSTS_N_INSNS (36), /* sdiv */
1078 COSTS_N_INSNS (66), /* ddiv */
1079 64, /* cache line size */
1080 32, /* l1 cache */
1081 128, /* l2 cache */
1082 1, /* prefetch streams /*/
1083 0, /* SF->DF convert */
1086 /* Instruction costs on PPCE5500 processors. */
1087 static const
1088 struct processor_costs ppce5500_cost = {
1089 COSTS_N_INSNS (5), /* mulsi */
1090 COSTS_N_INSNS (5), /* mulsi_const */
1091 COSTS_N_INSNS (4), /* mulsi_const9 */
1092 COSTS_N_INSNS (5), /* muldi */
1093 COSTS_N_INSNS (14), /* divsi */
1094 COSTS_N_INSNS (14), /* divdi */
1095 COSTS_N_INSNS (7), /* fp */
1096 COSTS_N_INSNS (10), /* dmul */
1097 COSTS_N_INSNS (36), /* sdiv */
1098 COSTS_N_INSNS (66), /* ddiv */
1099 64, /* cache line size */
1100 32, /* l1 cache */
1101 128, /* l2 cache */
1102 1, /* prefetch streams /*/
1103 0, /* SF->DF convert */
1106 /* Instruction costs on PPCE6500 processors. */
1107 static const
1108 struct processor_costs ppce6500_cost = {
1109 COSTS_N_INSNS (5), /* mulsi */
1110 COSTS_N_INSNS (5), /* mulsi_const */
1111 COSTS_N_INSNS (4), /* mulsi_const9 */
1112 COSTS_N_INSNS (5), /* muldi */
1113 COSTS_N_INSNS (14), /* divsi */
1114 COSTS_N_INSNS (14), /* divdi */
1115 COSTS_N_INSNS (7), /* fp */
1116 COSTS_N_INSNS (10), /* dmul */
1117 COSTS_N_INSNS (36), /* sdiv */
1118 COSTS_N_INSNS (66), /* ddiv */
1119 64, /* cache line size */
1120 32, /* l1 cache */
1121 128, /* l2 cache */
1122 1, /* prefetch streams /*/
1123 0, /* SF->DF convert */
1126 /* Instruction costs on AppliedMicro Titan processors. */
1127 static const
1128 struct processor_costs titan_cost = {
1129 COSTS_N_INSNS (5), /* mulsi */
1130 COSTS_N_INSNS (5), /* mulsi_const */
1131 COSTS_N_INSNS (5), /* mulsi_const9 */
1132 COSTS_N_INSNS (5), /* muldi */
1133 COSTS_N_INSNS (18), /* divsi */
1134 COSTS_N_INSNS (18), /* divdi */
1135 COSTS_N_INSNS (10), /* fp */
1136 COSTS_N_INSNS (10), /* dmul */
1137 COSTS_N_INSNS (46), /* sdiv */
1138 COSTS_N_INSNS (72), /* ddiv */
1139 32, /* cache line size */
1140 32, /* l1 cache */
1141 512, /* l2 cache */
1142 1, /* prefetch streams /*/
1143 0, /* SF->DF convert */
1146 /* Instruction costs on POWER4 and POWER5 processors. */
1147 static const
1148 struct processor_costs power4_cost = {
1149 COSTS_N_INSNS (3), /* mulsi */
1150 COSTS_N_INSNS (2), /* mulsi_const */
1151 COSTS_N_INSNS (2), /* mulsi_const9 */
1152 COSTS_N_INSNS (4), /* muldi */
1153 COSTS_N_INSNS (18), /* divsi */
1154 COSTS_N_INSNS (34), /* divdi */
1155 COSTS_N_INSNS (3), /* fp */
1156 COSTS_N_INSNS (3), /* dmul */
1157 COSTS_N_INSNS (17), /* sdiv */
1158 COSTS_N_INSNS (17), /* ddiv */
1159 128, /* cache line size */
1160 32, /* l1 cache */
1161 1024, /* l2 cache */
1162 8, /* prefetch streams /*/
1163 0, /* SF->DF convert */
1166 /* Instruction costs on POWER6 processors. */
1167 static const
1168 struct processor_costs power6_cost = {
1169 COSTS_N_INSNS (8), /* mulsi */
1170 COSTS_N_INSNS (8), /* mulsi_const */
1171 COSTS_N_INSNS (8), /* mulsi_const9 */
1172 COSTS_N_INSNS (8), /* muldi */
1173 COSTS_N_INSNS (22), /* divsi */
1174 COSTS_N_INSNS (28), /* divdi */
1175 COSTS_N_INSNS (3), /* fp */
1176 COSTS_N_INSNS (3), /* dmul */
1177 COSTS_N_INSNS (13), /* sdiv */
1178 COSTS_N_INSNS (16), /* ddiv */
1179 128, /* cache line size */
1180 64, /* l1 cache */
1181 2048, /* l2 cache */
1182 16, /* prefetch streams */
1183 0, /* SF->DF convert */
1186 /* Instruction costs on POWER7 processors. */
1187 static const
1188 struct processor_costs power7_cost = {
1189 COSTS_N_INSNS (2), /* mulsi */
1190 COSTS_N_INSNS (2), /* mulsi_const */
1191 COSTS_N_INSNS (2), /* mulsi_const9 */
1192 COSTS_N_INSNS (2), /* muldi */
1193 COSTS_N_INSNS (18), /* divsi */
1194 COSTS_N_INSNS (34), /* divdi */
1195 COSTS_N_INSNS (3), /* fp */
1196 COSTS_N_INSNS (3), /* dmul */
1197 COSTS_N_INSNS (13), /* sdiv */
1198 COSTS_N_INSNS (16), /* ddiv */
1199 128, /* cache line size */
1200 32, /* l1 cache */
1201 256, /* l2 cache */
1202 12, /* prefetch streams */
1203 COSTS_N_INSNS (3), /* SF->DF convert */
1206 /* Instruction costs on POWER8 processors. */
1207 static const
1208 struct processor_costs power8_cost = {
1209 COSTS_N_INSNS (3), /* mulsi */
1210 COSTS_N_INSNS (3), /* mulsi_const */
1211 COSTS_N_INSNS (3), /* mulsi_const9 */
1212 COSTS_N_INSNS (3), /* muldi */
1213 COSTS_N_INSNS (19), /* divsi */
1214 COSTS_N_INSNS (35), /* divdi */
1215 COSTS_N_INSNS (3), /* fp */
1216 COSTS_N_INSNS (3), /* dmul */
1217 COSTS_N_INSNS (14), /* sdiv */
1218 COSTS_N_INSNS (17), /* ddiv */
1219 128, /* cache line size */
1220 32, /* l1 cache */
1221 256, /* l2 cache */
1222 12, /* prefetch streams */
1223 COSTS_N_INSNS (3), /* SF->DF convert */
1226 /* Instruction costs on POWER9 processors. */
1227 static const
1228 struct processor_costs power9_cost = {
1229 COSTS_N_INSNS (3), /* mulsi */
1230 COSTS_N_INSNS (3), /* mulsi_const */
1231 COSTS_N_INSNS (3), /* mulsi_const9 */
1232 COSTS_N_INSNS (3), /* muldi */
1233 COSTS_N_INSNS (8), /* divsi */
1234 COSTS_N_INSNS (12), /* divdi */
1235 COSTS_N_INSNS (3), /* fp */
1236 COSTS_N_INSNS (3), /* dmul */
1237 COSTS_N_INSNS (13), /* sdiv */
1238 COSTS_N_INSNS (18), /* ddiv */
1239 128, /* cache line size */
1240 32, /* l1 cache */
1241 512, /* l2 cache */
1242 8, /* prefetch streams */
1243 COSTS_N_INSNS (3), /* SF->DF convert */
1246 /* Instruction costs on POWER A2 processors. */
1247 static const
1248 struct processor_costs ppca2_cost = {
1249 COSTS_N_INSNS (16), /* mulsi */
1250 COSTS_N_INSNS (16), /* mulsi_const */
1251 COSTS_N_INSNS (16), /* mulsi_const9 */
1252 COSTS_N_INSNS (16), /* muldi */
1253 COSTS_N_INSNS (22), /* divsi */
1254 COSTS_N_INSNS (28), /* divdi */
1255 COSTS_N_INSNS (3), /* fp */
1256 COSTS_N_INSNS (3), /* dmul */
1257 COSTS_N_INSNS (59), /* sdiv */
1258 COSTS_N_INSNS (72), /* ddiv */
1260 16, /* l1 cache */
1261 2048, /* l2 cache */
1262 16, /* prefetch streams */
1263 0, /* SF->DF convert */
1267 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1268 #undef RS6000_BUILTIN_0
1269 #undef RS6000_BUILTIN_1
1270 #undef RS6000_BUILTIN_2
1271 #undef RS6000_BUILTIN_3
1272 #undef RS6000_BUILTIN_A
1273 #undef RS6000_BUILTIN_D
1274 #undef RS6000_BUILTIN_H
1275 #undef RS6000_BUILTIN_P
1276 #undef RS6000_BUILTIN_Q
1277 #undef RS6000_BUILTIN_X
1279 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1280 { NAME, ICODE, MASK, ATTR },
1282 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1283 { NAME, ICODE, MASK, ATTR },
1285 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1286 { NAME, ICODE, MASK, ATTR },
1288 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1289 { NAME, ICODE, MASK, ATTR },
1291 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1292 { NAME, ICODE, MASK, ATTR },
1294 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1295 { NAME, ICODE, MASK, ATTR },
1297 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1298 { NAME, ICODE, MASK, ATTR },
1300 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1301 { NAME, ICODE, MASK, ATTR },
1303 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
1304 { NAME, ICODE, MASK, ATTR },
1306 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1307 { NAME, ICODE, MASK, ATTR },
1309 struct rs6000_builtin_info_type {
1310 const char *name;
1311 const enum insn_code icode;
1312 const HOST_WIDE_INT mask;
1313 const unsigned attr;
1316 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1318 #include "rs6000-builtin.def"
1321 #undef RS6000_BUILTIN_0
1322 #undef RS6000_BUILTIN_1
1323 #undef RS6000_BUILTIN_2
1324 #undef RS6000_BUILTIN_3
1325 #undef RS6000_BUILTIN_A
1326 #undef RS6000_BUILTIN_D
1327 #undef RS6000_BUILTIN_H
1328 #undef RS6000_BUILTIN_P
1329 #undef RS6000_BUILTIN_Q
1330 #undef RS6000_BUILTIN_X
1332 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1333 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1336 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1337 static struct machine_function * rs6000_init_machine_status (void);
1338 static int rs6000_ra_ever_killed (void);
1339 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1340 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1341 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1342 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1343 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1344 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1345 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1346 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1347 bool);
1348 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1349 unsigned int);
1350 static bool is_microcoded_insn (rtx_insn *);
1351 static bool is_nonpipeline_insn (rtx_insn *);
1352 static bool is_cracked_insn (rtx_insn *);
1353 static bool is_load_insn (rtx, rtx *);
1354 static bool is_store_insn (rtx, rtx *);
1355 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1356 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1357 static bool insn_must_be_first_in_group (rtx_insn *);
1358 static bool insn_must_be_last_in_group (rtx_insn *);
1359 static void altivec_init_builtins (void);
1360 static tree builtin_function_type (machine_mode, machine_mode,
1361 machine_mode, machine_mode,
1362 enum rs6000_builtins, const char *name);
1363 static void rs6000_common_init_builtins (void);
1364 static void paired_init_builtins (void);
1365 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
1366 static void htm_init_builtins (void);
1367 static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
1368 static rs6000_stack_t *rs6000_stack_info (void);
1369 static void is_altivec_return_reg (rtx, void *);
1370 int easy_vector_constant (rtx, machine_mode);
1371 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1372 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1373 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1374 bool, bool);
1375 #if TARGET_MACHO
1376 static void macho_branch_islands (void);
1377 #endif
1378 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1379 int, int *);
1380 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1381 int, int, int *);
1382 static bool rs6000_mode_dependent_address (const_rtx);
1383 static bool rs6000_debug_mode_dependent_address (const_rtx);
1384 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1385 machine_mode, rtx);
1386 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1387 machine_mode,
1388 rtx);
1389 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1390 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1391 enum reg_class);
1392 static bool rs6000_debug_secondary_memory_needed (machine_mode,
1393 reg_class_t,
1394 reg_class_t);
1395 static bool rs6000_debug_can_change_mode_class (machine_mode,
1396 machine_mode,
1397 reg_class_t);
1398 static bool rs6000_save_toc_in_prologue_p (void);
1399 static rtx rs6000_internal_arg_pointer (void);
1401 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1402 int, int *)
1403 = rs6000_legitimize_reload_address;
1405 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1406 = rs6000_mode_dependent_address;
1408 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1409 machine_mode, rtx)
1410 = rs6000_secondary_reload_class;
1412 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1413 = rs6000_preferred_reload_class;
1415 const int INSN_NOT_AVAILABLE = -1;
1417 static void rs6000_print_isa_options (FILE *, int, const char *,
1418 HOST_WIDE_INT);
1419 static void rs6000_print_builtin_options (FILE *, int, const char *,
1420 HOST_WIDE_INT);
1421 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1423 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1424 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1425 enum rs6000_reg_type,
1426 machine_mode,
1427 secondary_reload_info *,
1428 bool);
1429 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1430 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1431 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1433 /* Hash table stuff for keeping track of TOC entries. */
1435 struct GTY((for_user)) toc_hash_struct
1437 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1438 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1439 rtx key;
1440 machine_mode key_mode;
1441 int labelno;
1444 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1446 static hashval_t hash (toc_hash_struct *);
1447 static bool equal (toc_hash_struct *, toc_hash_struct *);
1450 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1452 /* Hash table to keep track of the argument types for builtin functions. */
1454 struct GTY((for_user)) builtin_hash_struct
1456 tree type;
1457 machine_mode mode[4]; /* return value + 3 arguments. */
1458 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1461 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1463 static hashval_t hash (builtin_hash_struct *);
1464 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1467 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1470 /* Default register names. */
1471 char rs6000_reg_names[][8] =
1473 "0", "1", "2", "3", "4", "5", "6", "7",
1474 "8", "9", "10", "11", "12", "13", "14", "15",
1475 "16", "17", "18", "19", "20", "21", "22", "23",
1476 "24", "25", "26", "27", "28", "29", "30", "31",
1477 "0", "1", "2", "3", "4", "5", "6", "7",
1478 "8", "9", "10", "11", "12", "13", "14", "15",
1479 "16", "17", "18", "19", "20", "21", "22", "23",
1480 "24", "25", "26", "27", "28", "29", "30", "31",
1481 "mq", "lr", "ctr","ap",
1482 "0", "1", "2", "3", "4", "5", "6", "7",
1483 "ca",
1484 /* AltiVec registers. */
1485 "0", "1", "2", "3", "4", "5", "6", "7",
1486 "8", "9", "10", "11", "12", "13", "14", "15",
1487 "16", "17", "18", "19", "20", "21", "22", "23",
1488 "24", "25", "26", "27", "28", "29", "30", "31",
1489 "vrsave", "vscr",
1490 /* Soft frame pointer. */
1491 "sfp",
1492 /* HTM SPR registers. */
1493 "tfhar", "tfiar", "texasr"
1496 #ifdef TARGET_REGNAMES
1497 static const char alt_reg_names[][8] =
1499 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1500 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1501 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1502 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1503 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1504 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1505 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1506 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1507 "mq", "lr", "ctr", "ap",
1508 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1509 "ca",
1510 /* AltiVec registers. */
1511 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1512 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1513 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1514 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1515 "vrsave", "vscr",
1516 /* Soft frame pointer. */
1517 "sfp",
1518 /* HTM SPR registers. */
1519 "tfhar", "tfiar", "texasr"
1521 #endif
1523 /* Table of valid machine attributes. */
1525 static const struct attribute_spec rs6000_attribute_table[] =
1527 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1528 affects_type_identity } */
1529 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
1530 false },
1531 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1532 false },
1533 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1534 false },
1535 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1536 false },
1537 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1538 false },
1539 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1540 SUBTARGET_ATTRIBUTE_TABLE,
1541 #endif
1542 { NULL, 0, 0, false, false, false, NULL, false }
1545 #ifndef TARGET_PROFILE_KERNEL
1546 #define TARGET_PROFILE_KERNEL 0
1547 #endif
1549 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1550 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1552 /* Initialize the GCC target structure. */
1553 #undef TARGET_ATTRIBUTE_TABLE
1554 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1555 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1556 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1557 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1558 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1560 #undef TARGET_ASM_ALIGNED_DI_OP
1561 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1563 /* Default unaligned ops are only provided for ELF. Find the ops needed
1564 for non-ELF systems. */
1565 #ifndef OBJECT_FORMAT_ELF
1566 #if TARGET_XCOFF
1567 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1568 64-bit targets. */
1569 #undef TARGET_ASM_UNALIGNED_HI_OP
1570 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1571 #undef TARGET_ASM_UNALIGNED_SI_OP
1572 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1573 #undef TARGET_ASM_UNALIGNED_DI_OP
1574 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1575 #else
1576 /* For Darwin. */
1577 #undef TARGET_ASM_UNALIGNED_HI_OP
1578 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1579 #undef TARGET_ASM_UNALIGNED_SI_OP
1580 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1581 #undef TARGET_ASM_UNALIGNED_DI_OP
1582 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1583 #undef TARGET_ASM_ALIGNED_DI_OP
1584 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1585 #endif
1586 #endif
1588 /* This hook deals with fixups for relocatable code and DI-mode objects
1589 in 64-bit code. */
1590 #undef TARGET_ASM_INTEGER
1591 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1593 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1594 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1595 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1596 #endif
1598 #undef TARGET_SET_UP_BY_PROLOGUE
1599 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1601 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1602 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1603 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1604 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1605 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1606 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1607 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1608 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1609 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1610 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1611 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1612 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1614 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1615 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1617 #undef TARGET_INTERNAL_ARG_POINTER
1618 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1620 #undef TARGET_HAVE_TLS
1621 #define TARGET_HAVE_TLS HAVE_AS_TLS
1623 #undef TARGET_CANNOT_FORCE_CONST_MEM
1624 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1626 #undef TARGET_DELEGITIMIZE_ADDRESS
1627 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1629 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1630 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1632 #undef TARGET_LEGITIMATE_COMBINED_INSN
1633 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1635 #undef TARGET_ASM_FUNCTION_PROLOGUE
1636 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1637 #undef TARGET_ASM_FUNCTION_EPILOGUE
1638 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1640 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1641 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1643 #undef TARGET_LEGITIMIZE_ADDRESS
1644 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1646 #undef TARGET_SCHED_VARIABLE_ISSUE
1647 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1649 #undef TARGET_SCHED_ISSUE_RATE
1650 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1651 #undef TARGET_SCHED_ADJUST_COST
1652 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1653 #undef TARGET_SCHED_ADJUST_PRIORITY
1654 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1655 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1656 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1657 #undef TARGET_SCHED_INIT
1658 #define TARGET_SCHED_INIT rs6000_sched_init
1659 #undef TARGET_SCHED_FINISH
1660 #define TARGET_SCHED_FINISH rs6000_sched_finish
1661 #undef TARGET_SCHED_REORDER
1662 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1663 #undef TARGET_SCHED_REORDER2
1664 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1666 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1667 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1669 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1670 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1672 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1673 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1674 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1675 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1676 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1677 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1678 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1679 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1681 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1682 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1684 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1685 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1686 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1687 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1688 rs6000_builtin_support_vector_misalignment
1689 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1690 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1691 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1692 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1693 rs6000_builtin_vectorization_cost
1694 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1695 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1696 rs6000_preferred_simd_mode
1697 #undef TARGET_VECTORIZE_INIT_COST
1698 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1699 #undef TARGET_VECTORIZE_ADD_STMT_COST
1700 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1701 #undef TARGET_VECTORIZE_FINISH_COST
1702 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1703 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1704 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1706 #undef TARGET_INIT_BUILTINS
1707 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1708 #undef TARGET_BUILTIN_DECL
1709 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1711 #undef TARGET_FOLD_BUILTIN
1712 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1713 #undef TARGET_GIMPLE_FOLD_BUILTIN
1714 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1716 #undef TARGET_EXPAND_BUILTIN
1717 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1719 #undef TARGET_MANGLE_TYPE
1720 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1722 #undef TARGET_INIT_LIBFUNCS
1723 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1725 #if TARGET_MACHO
1726 #undef TARGET_BINDS_LOCAL_P
1727 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1728 #endif
1730 #undef TARGET_MS_BITFIELD_LAYOUT_P
1731 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1733 #undef TARGET_ASM_OUTPUT_MI_THUNK
1734 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1736 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1737 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1739 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1740 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1742 #undef TARGET_REGISTER_MOVE_COST
1743 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1744 #undef TARGET_MEMORY_MOVE_COST
1745 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1746 #undef TARGET_CANNOT_COPY_INSN_P
1747 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1748 #undef TARGET_RTX_COSTS
1749 #define TARGET_RTX_COSTS rs6000_rtx_costs
1750 #undef TARGET_ADDRESS_COST
1751 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1753 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1754 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1756 #undef TARGET_PROMOTE_FUNCTION_MODE
1757 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1759 #undef TARGET_RETURN_IN_MEMORY
1760 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1762 #undef TARGET_RETURN_IN_MSB
1763 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1765 #undef TARGET_SETUP_INCOMING_VARARGS
1766 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1768 /* Always strict argument naming on rs6000. */
1769 #undef TARGET_STRICT_ARGUMENT_NAMING
1770 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1771 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1772 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1773 #undef TARGET_SPLIT_COMPLEX_ARG
1774 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1775 #undef TARGET_MUST_PASS_IN_STACK
1776 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1777 #undef TARGET_PASS_BY_REFERENCE
1778 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1779 #undef TARGET_ARG_PARTIAL_BYTES
1780 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1781 #undef TARGET_FUNCTION_ARG_ADVANCE
1782 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1783 #undef TARGET_FUNCTION_ARG
1784 #define TARGET_FUNCTION_ARG rs6000_function_arg
1785 #undef TARGET_FUNCTION_ARG_PADDING
1786 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1787 #undef TARGET_FUNCTION_ARG_BOUNDARY
1788 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1790 #undef TARGET_BUILD_BUILTIN_VA_LIST
1791 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1793 #undef TARGET_EXPAND_BUILTIN_VA_START
1794 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1796 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1797 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1799 #undef TARGET_EH_RETURN_FILTER_MODE
1800 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1802 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1803 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1805 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1806 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1808 #undef TARGET_FLOATN_MODE
1809 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1811 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1812 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1814 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1815 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1817 #undef TARGET_MD_ASM_ADJUST
1818 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1820 #undef TARGET_OPTION_OVERRIDE
1821 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1823 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1824 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1825 rs6000_builtin_vectorized_function
1827 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1828 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1829 rs6000_builtin_md_vectorized_function
1831 #undef TARGET_STACK_PROTECT_GUARD
1832 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1834 #if !TARGET_MACHO
1835 #undef TARGET_STACK_PROTECT_FAIL
1836 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1837 #endif
1839 #ifdef HAVE_AS_TLS
1840 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1841 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1842 #endif
1844 /* Use a 32-bit anchor range. This leads to sequences like:
1846 addis tmp,anchor,high
1847 add dest,tmp,low
1849 where tmp itself acts as an anchor, and can be shared between
1850 accesses to the same 64k page. */
1851 #undef TARGET_MIN_ANCHOR_OFFSET
1852 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1853 #undef TARGET_MAX_ANCHOR_OFFSET
1854 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1855 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1856 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1857 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1858 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1860 #undef TARGET_BUILTIN_RECIPROCAL
1861 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1863 #undef TARGET_SECONDARY_RELOAD
1864 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1865 #undef TARGET_SECONDARY_MEMORY_NEEDED
1866 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1867 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1868 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1870 #undef TARGET_LEGITIMATE_ADDRESS_P
1871 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1873 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1874 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1876 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1877 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1879 #undef TARGET_CAN_ELIMINATE
1880 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1882 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1883 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1885 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1886 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1888 #undef TARGET_TRAMPOLINE_INIT
1889 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1891 #undef TARGET_FUNCTION_VALUE
1892 #define TARGET_FUNCTION_VALUE rs6000_function_value
1894 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1895 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1897 #undef TARGET_OPTION_SAVE
1898 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1900 #undef TARGET_OPTION_RESTORE
1901 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1903 #undef TARGET_OPTION_PRINT
1904 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1906 #undef TARGET_CAN_INLINE_P
1907 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1909 #undef TARGET_SET_CURRENT_FUNCTION
1910 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1912 #undef TARGET_LEGITIMATE_CONSTANT_P
1913 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1915 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1916 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1918 #undef TARGET_CAN_USE_DOLOOP_P
1919 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1921 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1922 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1924 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1925 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1926 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1927 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1928 #undef TARGET_UNWIND_WORD_MODE
1929 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1931 #undef TARGET_OFFLOAD_OPTIONS
1932 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1934 #undef TARGET_C_MODE_FOR_SUFFIX
1935 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1937 #undef TARGET_INVALID_BINARY_OP
1938 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1940 #undef TARGET_OPTAB_SUPPORTED_P
1941 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1943 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1944 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1946 #undef TARGET_COMPARE_VERSION_PRIORITY
1947 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1949 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1950 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1951 rs6000_generate_version_dispatcher_body
1953 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1954 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1955 rs6000_get_function_versions_dispatcher
1957 #undef TARGET_OPTION_FUNCTION_VERSIONS
1958 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1960 #undef TARGET_HARD_REGNO_NREGS
1961 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1962 #undef TARGET_HARD_REGNO_MODE_OK
1963 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1965 #undef TARGET_MODES_TIEABLE_P
1966 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1968 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1969 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1970 rs6000_hard_regno_call_part_clobbered
1972 #undef TARGET_SLOW_UNALIGNED_ACCESS
1973 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1975 #undef TARGET_CAN_CHANGE_MODE_CLASS
1976 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1979 /* Processor table. */
1980 struct rs6000_ptt
1982 const char *const name; /* Canonical processor name. */
1983 const enum processor_type processor; /* Processor type enum value. */
1984 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1987 static struct rs6000_ptt const processor_target_table[] =
1989 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1990 #include "rs6000-cpus.def"
1991 #undef RS6000_CPU
1994 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1995 name is invalid. */
1997 static int
1998 rs6000_cpu_name_lookup (const char *name)
2000 size_t i;
2002 if (name != NULL)
2004 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2005 if (! strcmp (name, processor_target_table[i].name))
2006 return (int)i;
2009 return -1;
2013 /* Return number of consecutive hard regs needed starting at reg REGNO
2014 to hold something of mode MODE.
2015 This is ordinarily the length in words of a value of mode MODE
2016 but can be less for certain modes in special long registers.
2018 POWER and PowerPC GPRs hold 32 bits worth;
2019 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2021 static int
2022 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
2024 unsigned HOST_WIDE_INT reg_size;
2026 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2027 128-bit floating point that can go in vector registers, which has VSX
2028 memory addressing. */
2029 if (FP_REGNO_P (regno))
2030 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
2031 ? UNITS_PER_VSX_WORD
2032 : UNITS_PER_FP_WORD);
2034 else if (ALTIVEC_REGNO_P (regno))
2035 reg_size = UNITS_PER_ALTIVEC_WORD;
2037 else
2038 reg_size = UNITS_PER_WORD;
2040 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
2043 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2044 MODE. */
2045 static int
2046 rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
2048 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
2050 if (COMPLEX_MODE_P (mode))
2051 mode = GET_MODE_INNER (mode);
2053 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2054 register combinations, and use PTImode where we need to deal with quad
2055 word memory operations. Don't allow quad words in the argument or frame
2056 pointer registers, just registers 0..31. */
2057 if (mode == PTImode)
2058 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2059 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2060 && ((regno & 1) == 0));
2062 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2063 implementations. Don't allow an item to be split between a FP register
2064 and an Altivec register. Allow TImode in all VSX registers if the user
2065 asked for it. */
2066 if (TARGET_VSX && VSX_REGNO_P (regno)
2067 && (VECTOR_MEM_VSX_P (mode)
2068 || FLOAT128_VECTOR_P (mode)
2069 || reg_addr[mode].scalar_in_vmx_p
2070 || mode == TImode
2071 || (TARGET_VADDUQM && mode == V1TImode)))
2073 if (FP_REGNO_P (regno))
2074 return FP_REGNO_P (last_regno);
2076 if (ALTIVEC_REGNO_P (regno))
2078 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2079 return 0;
2081 return ALTIVEC_REGNO_P (last_regno);
2085 /* The GPRs can hold any mode, but values bigger than one register
2086 cannot go past R31. */
2087 if (INT_REGNO_P (regno))
2088 return INT_REGNO_P (last_regno);
2090 /* The float registers (except for VSX vector modes) can only hold floating
2091 modes and DImode. */
2092 if (FP_REGNO_P (regno))
2094 if (FLOAT128_VECTOR_P (mode))
2095 return false;
2097 if (SCALAR_FLOAT_MODE_P (mode)
2098 && (mode != TDmode || (regno % 2) == 0)
2099 && FP_REGNO_P (last_regno))
2100 return 1;
2102 if (GET_MODE_CLASS (mode) == MODE_INT)
2104 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2105 return 1;
2107 if (TARGET_P8_VECTOR && (mode == SImode))
2108 return 1;
2110 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2111 return 1;
2114 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
2115 && PAIRED_VECTOR_MODE (mode))
2116 return 1;
2118 return 0;
2121 /* The CR register can only hold CC modes. */
2122 if (CR_REGNO_P (regno))
2123 return GET_MODE_CLASS (mode) == MODE_CC;
2125 if (CA_REGNO_P (regno))
2126 return mode == Pmode || mode == SImode;
2128 /* AltiVec only in AldyVec registers. */
2129 if (ALTIVEC_REGNO_P (regno))
2130 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2131 || mode == V1TImode);
2133 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2134 and it must be able to fit within the register set. */
2136 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2139 /* Implement TARGET_HARD_REGNO_NREGS. */
2141 static unsigned int
2142 rs6000_hard_regno_nregs_hook (unsigned int regno, machine_mode mode)
2144 return rs6000_hard_regno_nregs[mode][regno];
2147 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2149 static bool
2150 rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2152 return rs6000_hard_regno_mode_ok_p[mode][regno];
2155 /* Implement TARGET_MODES_TIEABLE_P.
2157 PTImode cannot tie with other modes because PTImode is restricted to even
2158 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2159 57744).
2161 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2162 128-bit floating point on VSX systems ties with other vectors. */
2164 static bool
2165 rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2167 if (mode1 == PTImode)
2168 return mode2 == PTImode;
2169 if (mode2 == PTImode)
2170 return false;
2172 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
2173 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2);
2174 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2))
2175 return false;
2177 if (SCALAR_FLOAT_MODE_P (mode1))
2178 return SCALAR_FLOAT_MODE_P (mode2);
2179 if (SCALAR_FLOAT_MODE_P (mode2))
2180 return false;
2182 if (GET_MODE_CLASS (mode1) == MODE_CC)
2183 return GET_MODE_CLASS (mode2) == MODE_CC;
2184 if (GET_MODE_CLASS (mode2) == MODE_CC)
2185 return false;
2187 if (PAIRED_VECTOR_MODE (mode1))
2188 return PAIRED_VECTOR_MODE (mode2);
2189 if (PAIRED_VECTOR_MODE (mode2))
2190 return false;
2192 return true;
2195 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2197 static bool
2198 rs6000_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode)
2200 if (TARGET_32BIT
2201 && TARGET_POWERPC64
2202 && GET_MODE_SIZE (mode) > 4
2203 && INT_REGNO_P (regno))
2204 return true;
2206 if (TARGET_VSX
2207 && FP_REGNO_P (regno)
2208 && GET_MODE_SIZE (mode) > 8
2209 && !FLOAT128_2REG_P (mode))
2210 return true;
2212 return false;
2215 /* Print interesting facts about registers. */
2216 static void
2217 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2219 int r, m;
2221 for (r = first_regno; r <= last_regno; ++r)
2223 const char *comma = "";
2224 int len;
2226 if (first_regno == last_regno)
2227 fprintf (stderr, "%s:\t", reg_name);
2228 else
2229 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2231 len = 8;
2232 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2233 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2235 if (len > 70)
2237 fprintf (stderr, ",\n\t");
2238 len = 8;
2239 comma = "";
2242 if (rs6000_hard_regno_nregs[m][r] > 1)
2243 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2244 rs6000_hard_regno_nregs[m][r]);
2245 else
2246 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2248 comma = ", ";
2251 if (call_used_regs[r])
2253 if (len > 70)
2255 fprintf (stderr, ",\n\t");
2256 len = 8;
2257 comma = "";
2260 len += fprintf (stderr, "%s%s", comma, "call-used");
2261 comma = ", ";
2264 if (fixed_regs[r])
2266 if (len > 70)
2268 fprintf (stderr, ",\n\t");
2269 len = 8;
2270 comma = "";
2273 len += fprintf (stderr, "%s%s", comma, "fixed");
2274 comma = ", ";
2277 if (len > 70)
2279 fprintf (stderr, ",\n\t");
2280 comma = "";
2283 len += fprintf (stderr, "%sreg-class = %s", comma,
2284 reg_class_names[(int)rs6000_regno_regclass[r]]);
2285 comma = ", ";
2287 if (len > 70)
2289 fprintf (stderr, ",\n\t");
2290 comma = "";
2293 fprintf (stderr, "%sregno = %d\n", comma, r);
2297 static const char *
2298 rs6000_debug_vector_unit (enum rs6000_vector v)
2300 const char *ret;
2302 switch (v)
2304 case VECTOR_NONE: ret = "none"; break;
2305 case VECTOR_ALTIVEC: ret = "altivec"; break;
2306 case VECTOR_VSX: ret = "vsx"; break;
2307 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2308 case VECTOR_PAIRED: ret = "paired"; break;
2309 case VECTOR_OTHER: ret = "other"; break;
2310 default: ret = "unknown"; break;
2313 return ret;
2316 /* Inner function printing just the address mask for a particular reload
2317 register class. */
2318 DEBUG_FUNCTION char *
2319 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2321 static char ret[8];
2322 char *p = ret;
2324 if ((mask & RELOAD_REG_VALID) != 0)
2325 *p++ = 'v';
2326 else if (keep_spaces)
2327 *p++ = ' ';
2329 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2330 *p++ = 'm';
2331 else if (keep_spaces)
2332 *p++ = ' ';
2334 if ((mask & RELOAD_REG_INDEXED) != 0)
2335 *p++ = 'i';
2336 else if (keep_spaces)
2337 *p++ = ' ';
2339 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2340 *p++ = 'O';
2341 else if ((mask & RELOAD_REG_OFFSET) != 0)
2342 *p++ = 'o';
2343 else if (keep_spaces)
2344 *p++ = ' ';
2346 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2347 *p++ = '+';
2348 else if (keep_spaces)
2349 *p++ = ' ';
2351 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2352 *p++ = '+';
2353 else if (keep_spaces)
2354 *p++ = ' ';
2356 if ((mask & RELOAD_REG_AND_M16) != 0)
2357 *p++ = '&';
2358 else if (keep_spaces)
2359 *p++ = ' ';
2361 *p = '\0';
2363 return ret;
2366 /* Print the address masks in a human readble fashion. */
2367 DEBUG_FUNCTION void
2368 rs6000_debug_print_mode (ssize_t m)
2370 ssize_t rc;
2371 int spaces = 0;
2372 bool fuse_extra_p;
2374 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2375 for (rc = 0; rc < N_RELOAD_REG; rc++)
2376 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2377 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2379 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2380 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2381 fprintf (stderr, " Reload=%c%c",
2382 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2383 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2384 else
2385 spaces += sizeof (" Reload=sl") - 1;
2387 if (reg_addr[m].scalar_in_vmx_p)
2389 fprintf (stderr, "%*s Upper=y", spaces, "");
2390 spaces = 0;
2392 else
2393 spaces += sizeof (" Upper=y") - 1;
2395 fuse_extra_p = ((reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2396 || reg_addr[m].fused_toc);
2397 if (!fuse_extra_p)
2399 for (rc = 0; rc < N_RELOAD_REG; rc++)
2401 if (rc != RELOAD_REG_ANY)
2403 if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2404 || reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2405 || reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing
2406 || reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing
2407 || reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2409 fuse_extra_p = true;
2410 break;
2416 if (fuse_extra_p)
2418 fprintf (stderr, "%*s Fuse:", spaces, "");
2419 spaces = 0;
2421 for (rc = 0; rc < N_RELOAD_REG; rc++)
2423 if (rc != RELOAD_REG_ANY)
2425 char load, store;
2427 if (reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing)
2428 load = 'l';
2429 else if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing)
2430 load = 'L';
2431 else
2432 load = '-';
2434 if (reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2435 store = 's';
2436 else if (reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing)
2437 store = 'S';
2438 else
2439 store = '-';
2441 if (load == '-' && store == '-')
2442 spaces += 5;
2443 else
2445 fprintf (stderr, "%*s%c=%c%c", (spaces + 1), "",
2446 reload_reg_map[rc].name[0], load, store);
2447 spaces = 0;
2452 if (reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2454 fprintf (stderr, "%*sP8gpr", (spaces + 1), "");
2455 spaces = 0;
2457 else
2458 spaces += sizeof (" P8gpr") - 1;
2460 if (reg_addr[m].fused_toc)
2462 fprintf (stderr, "%*sToc", (spaces + 1), "");
2463 spaces = 0;
2465 else
2466 spaces += sizeof (" Toc") - 1;
2468 else
2469 spaces += sizeof (" Fuse: G=ls F=ls v=ls P8gpr Toc") - 1;
2471 if (rs6000_vector_unit[m] != VECTOR_NONE
2472 || rs6000_vector_mem[m] != VECTOR_NONE)
2474 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2475 spaces, "",
2476 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2477 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2480 fputs ("\n", stderr);
2483 #define DEBUG_FMT_ID "%-32s= "
2484 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2485 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2486 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2488 /* Print various interesting information with -mdebug=reg. */
2489 static void
2490 rs6000_debug_reg_global (void)
2492 static const char *const tf[2] = { "false", "true" };
2493 const char *nl = (const char *)0;
2494 int m;
2495 size_t m1, m2, v;
2496 char costly_num[20];
2497 char nop_num[20];
2498 char flags_buffer[40];
2499 const char *costly_str;
2500 const char *nop_str;
2501 const char *trace_str;
2502 const char *abi_str;
2503 const char *cmodel_str;
2504 struct cl_target_option cl_opts;
2506 /* Modes we want tieable information on. */
2507 static const machine_mode print_tieable_modes[] = {
2508 QImode,
2509 HImode,
2510 SImode,
2511 DImode,
2512 TImode,
2513 PTImode,
2514 SFmode,
2515 DFmode,
2516 TFmode,
2517 IFmode,
2518 KFmode,
2519 SDmode,
2520 DDmode,
2521 TDmode,
2522 V2SImode,
2523 V16QImode,
2524 V8HImode,
2525 V4SImode,
2526 V2DImode,
2527 V1TImode,
2528 V32QImode,
2529 V16HImode,
2530 V8SImode,
2531 V4DImode,
2532 V2TImode,
2533 V2SFmode,
2534 V4SFmode,
2535 V2DFmode,
2536 V8SFmode,
2537 V4DFmode,
2538 CCmode,
2539 CCUNSmode,
2540 CCEQmode,
2543 /* Virtual regs we are interested in. */
2544 const static struct {
2545 int regno; /* register number. */
2546 const char *name; /* register name. */
2547 } virtual_regs[] = {
2548 { STACK_POINTER_REGNUM, "stack pointer:" },
2549 { TOC_REGNUM, "toc: " },
2550 { STATIC_CHAIN_REGNUM, "static chain: " },
2551 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2552 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2553 { ARG_POINTER_REGNUM, "arg pointer: " },
2554 { FRAME_POINTER_REGNUM, "frame pointer:" },
2555 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2556 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2557 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2558 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2559 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2560 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2561 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2562 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2563 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2566 fputs ("\nHard register information:\n", stderr);
2567 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2568 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2569 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2570 LAST_ALTIVEC_REGNO,
2571 "vs");
2572 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2573 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2574 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2575 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2576 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2577 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2579 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2580 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2581 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2583 fprintf (stderr,
2584 "\n"
2585 "d reg_class = %s\n"
2586 "f reg_class = %s\n"
2587 "v reg_class = %s\n"
2588 "wa reg_class = %s\n"
2589 "wb reg_class = %s\n"
2590 "wd reg_class = %s\n"
2591 "we reg_class = %s\n"
2592 "wf reg_class = %s\n"
2593 "wg reg_class = %s\n"
2594 "wh reg_class = %s\n"
2595 "wi reg_class = %s\n"
2596 "wj reg_class = %s\n"
2597 "wk reg_class = %s\n"
2598 "wl reg_class = %s\n"
2599 "wm reg_class = %s\n"
2600 "wo reg_class = %s\n"
2601 "wp reg_class = %s\n"
2602 "wq reg_class = %s\n"
2603 "wr reg_class = %s\n"
2604 "ws reg_class = %s\n"
2605 "wt reg_class = %s\n"
2606 "wu reg_class = %s\n"
2607 "wv reg_class = %s\n"
2608 "ww reg_class = %s\n"
2609 "wx reg_class = %s\n"
2610 "wy reg_class = %s\n"
2611 "wz reg_class = %s\n"
2612 "wA reg_class = %s\n"
2613 "wH reg_class = %s\n"
2614 "wI reg_class = %s\n"
2615 "wJ reg_class = %s\n"
2616 "wK reg_class = %s\n"
2617 "\n",
2618 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2619 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2620 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2621 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2622 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2623 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2624 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2625 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2626 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2627 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2628 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2629 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2630 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2631 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2632 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2633 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2634 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2635 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2636 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2637 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2638 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2639 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2640 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2641 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2642 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2643 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2644 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
2645 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]],
2646 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wH]],
2647 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wI]],
2648 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wJ]],
2649 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wK]]);
2651 nl = "\n";
2652 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2653 rs6000_debug_print_mode (m);
2655 fputs ("\n", stderr);
2657 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2659 machine_mode mode1 = print_tieable_modes[m1];
2660 bool first_time = true;
2662 nl = (const char *)0;
2663 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2665 machine_mode mode2 = print_tieable_modes[m2];
2666 if (mode1 != mode2 && rs6000_modes_tieable_p (mode1, mode2))
2668 if (first_time)
2670 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2671 nl = "\n";
2672 first_time = false;
2675 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2679 if (!first_time)
2680 fputs ("\n", stderr);
2683 if (nl)
2684 fputs (nl, stderr);
2686 if (rs6000_recip_control)
2688 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2690 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2691 if (rs6000_recip_bits[m])
2693 fprintf (stderr,
2694 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2695 GET_MODE_NAME (m),
2696 (RS6000_RECIP_AUTO_RE_P (m)
2697 ? "auto"
2698 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2699 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2700 ? "auto"
2701 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2704 fputs ("\n", stderr);
2707 if (rs6000_cpu_index >= 0)
2709 const char *name = processor_target_table[rs6000_cpu_index].name;
2710 HOST_WIDE_INT flags
2711 = processor_target_table[rs6000_cpu_index].target_enable;
2713 sprintf (flags_buffer, "-mcpu=%s flags", name);
2714 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2716 else
2717 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2719 if (rs6000_tune_index >= 0)
2721 const char *name = processor_target_table[rs6000_tune_index].name;
2722 HOST_WIDE_INT flags
2723 = processor_target_table[rs6000_tune_index].target_enable;
2725 sprintf (flags_buffer, "-mtune=%s flags", name);
2726 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2728 else
2729 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2731 cl_target_option_save (&cl_opts, &global_options);
2732 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2733 rs6000_isa_flags);
2735 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2736 rs6000_isa_flags_explicit);
2738 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2739 rs6000_builtin_mask);
2741 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2743 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2744 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2746 switch (rs6000_sched_costly_dep)
2748 case max_dep_latency:
2749 costly_str = "max_dep_latency";
2750 break;
2752 case no_dep_costly:
2753 costly_str = "no_dep_costly";
2754 break;
2756 case all_deps_costly:
2757 costly_str = "all_deps_costly";
2758 break;
2760 case true_store_to_load_dep_costly:
2761 costly_str = "true_store_to_load_dep_costly";
2762 break;
2764 case store_to_load_dep_costly:
2765 costly_str = "store_to_load_dep_costly";
2766 break;
2768 default:
2769 costly_str = costly_num;
2770 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2771 break;
2774 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2776 switch (rs6000_sched_insert_nops)
2778 case sched_finish_regroup_exact:
2779 nop_str = "sched_finish_regroup_exact";
2780 break;
2782 case sched_finish_pad_groups:
2783 nop_str = "sched_finish_pad_groups";
2784 break;
2786 case sched_finish_none:
2787 nop_str = "sched_finish_none";
2788 break;
2790 default:
2791 nop_str = nop_num;
2792 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2793 break;
2796 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2798 switch (rs6000_sdata)
2800 default:
2801 case SDATA_NONE:
2802 break;
2804 case SDATA_DATA:
2805 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2806 break;
2808 case SDATA_SYSV:
2809 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2810 break;
2812 case SDATA_EABI:
2813 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2814 break;
2818 switch (rs6000_traceback)
2820 case traceback_default: trace_str = "default"; break;
2821 case traceback_none: trace_str = "none"; break;
2822 case traceback_part: trace_str = "part"; break;
2823 case traceback_full: trace_str = "full"; break;
2824 default: trace_str = "unknown"; break;
2827 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2829 switch (rs6000_current_cmodel)
2831 case CMODEL_SMALL: cmodel_str = "small"; break;
2832 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2833 case CMODEL_LARGE: cmodel_str = "large"; break;
2834 default: cmodel_str = "unknown"; break;
2837 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2839 switch (rs6000_current_abi)
2841 case ABI_NONE: abi_str = "none"; break;
2842 case ABI_AIX: abi_str = "aix"; break;
2843 case ABI_ELFv2: abi_str = "ELFv2"; break;
2844 case ABI_V4: abi_str = "V4"; break;
2845 case ABI_DARWIN: abi_str = "darwin"; break;
2846 default: abi_str = "unknown"; break;
2849 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2851 if (rs6000_altivec_abi)
2852 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2854 if (rs6000_darwin64_abi)
2855 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2857 fprintf (stderr, DEBUG_FMT_S, "single_float",
2858 (TARGET_SINGLE_FLOAT ? "true" : "false"));
2860 fprintf (stderr, DEBUG_FMT_S, "double_float",
2861 (TARGET_DOUBLE_FLOAT ? "true" : "false"));
2863 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2864 (TARGET_SOFT_FLOAT ? "true" : "false"));
2866 if (TARGET_LINK_STACK)
2867 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2869 if (TARGET_P8_FUSION)
2871 char options[80];
2873 strcpy (options, (TARGET_P9_FUSION) ? "power9" : "power8");
2874 if (TARGET_TOC_FUSION)
2875 strcat (options, ", toc");
2877 if (TARGET_P8_FUSION_SIGN)
2878 strcat (options, ", sign");
2880 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2883 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2884 TARGET_SECURE_PLT ? "secure" : "bss");
2885 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2886 aix_struct_return ? "aix" : "sysv");
2887 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2888 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2889 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2890 tf[!!rs6000_align_branch_targets]);
2891 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2892 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2893 rs6000_long_double_type_size);
2894 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2895 (int)rs6000_sched_restricted_insns_priority);
2896 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2897 (int)END_BUILTINS);
2898 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2899 (int)RS6000_BUILTIN_COUNT);
2901 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2902 (int)TARGET_FLOAT128_ENABLE_TYPE);
2904 if (TARGET_VSX)
2905 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2906 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2908 if (TARGET_DIRECT_MOVE_128)
2909 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2910 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2914 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2915 legitimate address support to figure out the appropriate addressing to
2916 use. */
2918 static void
2919 rs6000_setup_reg_addr_masks (void)
2921 ssize_t rc, reg, m, nregs;
2922 addr_mask_type any_addr_mask, addr_mask;
2924 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2926 machine_mode m2 = (machine_mode) m;
2927 bool complex_p = false;
2928 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2929 size_t msize;
2931 if (COMPLEX_MODE_P (m2))
2933 complex_p = true;
2934 m2 = GET_MODE_INNER (m2);
2937 msize = GET_MODE_SIZE (m2);
2939 /* SDmode is special in that we want to access it only via REG+REG
2940 addressing on power7 and above, since we want to use the LFIWZX and
2941 STFIWZX instructions to load it. */
2942 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2944 any_addr_mask = 0;
2945 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2947 addr_mask = 0;
2948 reg = reload_reg_map[rc].reg;
2950 /* Can mode values go in the GPR/FPR/Altivec registers? */
2951 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2953 bool small_int_vsx_p = (small_int_p
2954 && (rc == RELOAD_REG_FPR
2955 || rc == RELOAD_REG_VMX));
2957 nregs = rs6000_hard_regno_nregs[m][reg];
2958 addr_mask |= RELOAD_REG_VALID;
2960 /* Indicate if the mode takes more than 1 physical register. If
2961 it takes a single register, indicate it can do REG+REG
2962 addressing. Small integers in VSX registers can only do
2963 REG+REG addressing. */
2964 if (small_int_vsx_p)
2965 addr_mask |= RELOAD_REG_INDEXED;
2966 else if (nregs > 1 || m == BLKmode || complex_p)
2967 addr_mask |= RELOAD_REG_MULTIPLE;
2968 else
2969 addr_mask |= RELOAD_REG_INDEXED;
2971 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2972 addressing. If we allow scalars into Altivec registers,
2973 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY. */
2975 if (TARGET_UPDATE
2976 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2977 && msize <= 8
2978 && !VECTOR_MODE_P (m2)
2979 && !FLOAT128_VECTOR_P (m2)
2980 && !complex_p
2981 && !small_int_vsx_p)
2983 addr_mask |= RELOAD_REG_PRE_INCDEC;
2985 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2986 we don't allow PRE_MODIFY for some multi-register
2987 operations. */
2988 switch (m)
2990 default:
2991 addr_mask |= RELOAD_REG_PRE_MODIFY;
2992 break;
2994 case E_DImode:
2995 if (TARGET_POWERPC64)
2996 addr_mask |= RELOAD_REG_PRE_MODIFY;
2997 break;
2999 case E_DFmode:
3000 case E_DDmode:
3001 if (TARGET_DF_INSN)
3002 addr_mask |= RELOAD_REG_PRE_MODIFY;
3003 break;
3008 /* GPR and FPR registers can do REG+OFFSET addressing, except
3009 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
3010 for 64-bit scalars and 32-bit SFmode to altivec registers. */
3011 if ((addr_mask != 0) && !indexed_only_p
3012 && msize <= 8
3013 && (rc == RELOAD_REG_GPR
3014 || ((msize == 8 || m2 == SFmode)
3015 && (rc == RELOAD_REG_FPR
3016 || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR)))))
3017 addr_mask |= RELOAD_REG_OFFSET;
3019 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
3020 instructions are enabled. The offset for 128-bit VSX registers is
3021 only 12-bits. While GPRs can handle the full offset range, VSX
3022 registers can only handle the restricted range. */
3023 else if ((addr_mask != 0) && !indexed_only_p
3024 && msize == 16 && TARGET_P9_VECTOR
3025 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
3026 || (m2 == TImode && TARGET_VSX)))
3028 addr_mask |= RELOAD_REG_OFFSET;
3029 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
3030 addr_mask |= RELOAD_REG_QUAD_OFFSET;
3033 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
3034 addressing on 128-bit types. */
3035 if (rc == RELOAD_REG_VMX && msize == 16
3036 && (addr_mask & RELOAD_REG_VALID) != 0)
3037 addr_mask |= RELOAD_REG_AND_M16;
3039 reg_addr[m].addr_mask[rc] = addr_mask;
3040 any_addr_mask |= addr_mask;
3043 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
3048 /* Initialize the various global tables that are based on register size. */
3049 static void
3050 rs6000_init_hard_regno_mode_ok (bool global_init_p)
3052 ssize_t r, m, c;
3053 int align64;
3054 int align32;
3056 /* Precalculate REGNO_REG_CLASS. */
3057 rs6000_regno_regclass[0] = GENERAL_REGS;
3058 for (r = 1; r < 32; ++r)
3059 rs6000_regno_regclass[r] = BASE_REGS;
3061 for (r = 32; r < 64; ++r)
3062 rs6000_regno_regclass[r] = FLOAT_REGS;
3064 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
3065 rs6000_regno_regclass[r] = NO_REGS;
3067 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
3068 rs6000_regno_regclass[r] = ALTIVEC_REGS;
3070 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
3071 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
3072 rs6000_regno_regclass[r] = CR_REGS;
3074 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
3075 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
3076 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
3077 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
3078 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
3079 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
3080 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
3081 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
3082 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
3083 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
3085 /* Precalculate register class to simpler reload register class. We don't
3086 need all of the register classes that are combinations of different
3087 classes, just the simple ones that have constraint letters. */
3088 for (c = 0; c < N_REG_CLASSES; c++)
3089 reg_class_to_reg_type[c] = NO_REG_TYPE;
3091 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
3092 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
3093 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
3094 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
3095 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
3096 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
3097 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
3098 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
3099 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
3100 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
3102 if (TARGET_VSX)
3104 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
3105 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
3107 else
3109 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
3110 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
3113 /* Precalculate the valid memory formats as well as the vector information,
3114 this must be set up before the rs6000_hard_regno_nregs_internal calls
3115 below. */
3116 gcc_assert ((int)VECTOR_NONE == 0);
3117 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
3118 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
3120 gcc_assert ((int)CODE_FOR_nothing == 0);
3121 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3123 gcc_assert ((int)NO_REGS == 0);
3124 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3126 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3127 believes it can use native alignment or still uses 128-bit alignment. */
3128 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3130 align64 = 64;
3131 align32 = 32;
3133 else
3135 align64 = 128;
3136 align32 = 128;
3139 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3140 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3141 if (TARGET_FLOAT128_TYPE)
3143 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3144 rs6000_vector_align[KFmode] = 128;
3146 if (FLOAT128_IEEE_P (TFmode))
3148 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3149 rs6000_vector_align[TFmode] = 128;
3153 /* V2DF mode, VSX only. */
3154 if (TARGET_VSX)
3156 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3157 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3158 rs6000_vector_align[V2DFmode] = align64;
3161 /* V4SF mode, either VSX or Altivec. */
3162 if (TARGET_VSX)
3164 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3165 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3166 rs6000_vector_align[V4SFmode] = align32;
3168 else if (TARGET_ALTIVEC)
3170 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3171 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3172 rs6000_vector_align[V4SFmode] = align32;
3175 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3176 and stores. */
3177 if (TARGET_ALTIVEC)
3179 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3180 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3181 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3182 rs6000_vector_align[V4SImode] = align32;
3183 rs6000_vector_align[V8HImode] = align32;
3184 rs6000_vector_align[V16QImode] = align32;
3186 if (TARGET_VSX)
3188 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3189 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3190 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3192 else
3194 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3195 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3196 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3200 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3201 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3202 if (TARGET_VSX)
3204 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3205 rs6000_vector_unit[V2DImode]
3206 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3207 rs6000_vector_align[V2DImode] = align64;
3209 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3210 rs6000_vector_unit[V1TImode]
3211 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3212 rs6000_vector_align[V1TImode] = 128;
3215 /* DFmode, see if we want to use the VSX unit. Memory is handled
3216 differently, so don't set rs6000_vector_mem. */
3217 if (TARGET_VSX)
3219 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3220 rs6000_vector_align[DFmode] = 64;
3223 /* SFmode, see if we want to use the VSX unit. */
3224 if (TARGET_P8_VECTOR)
3226 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3227 rs6000_vector_align[SFmode] = 32;
3230 /* Allow TImode in VSX register and set the VSX memory macros. */
3231 if (TARGET_VSX)
3233 rs6000_vector_mem[TImode] = VECTOR_VSX;
3234 rs6000_vector_align[TImode] = align64;
3237 /* TODO add paired floating point vector support. */
3239 /* Register class constraints for the constraints that depend on compile
3240 switches. When the VSX code was added, different constraints were added
3241 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3242 of the VSX registers are used. The register classes for scalar floating
3243 point types is set, based on whether we allow that type into the upper
3244 (Altivec) registers. GCC has register classes to target the Altivec
3245 registers for load/store operations, to select using a VSX memory
3246 operation instead of the traditional floating point operation. The
3247 constraints are:
3249 d - Register class to use with traditional DFmode instructions.
3250 f - Register class to use with traditional SFmode instructions.
3251 v - Altivec register.
3252 wa - Any VSX register.
3253 wc - Reserved to represent individual CR bits (used in LLVM).
3254 wd - Preferred register class for V2DFmode.
3255 wf - Preferred register class for V4SFmode.
3256 wg - Float register for power6x move insns.
3257 wh - FP register for direct move instructions.
3258 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3259 wj - FP or VSX register to hold 64-bit integers for direct moves.
3260 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3261 wl - Float register if we can do 32-bit signed int loads.
3262 wm - VSX register for ISA 2.07 direct move operations.
3263 wn - always NO_REGS.
3264 wr - GPR if 64-bit mode is permitted.
3265 ws - Register class to do ISA 2.06 DF operations.
3266 wt - VSX register for TImode in VSX registers.
3267 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3268 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3269 ww - Register class to do SF conversions in with VSX operations.
3270 wx - Float register if we can do 32-bit int stores.
3271 wy - Register class to do ISA 2.07 SF operations.
3272 wz - Float register if we can do 32-bit unsigned int loads.
3273 wH - Altivec register if SImode is allowed in VSX registers.
3274 wI - VSX register if SImode is allowed in VSX registers.
3275 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3276 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3278 if (TARGET_HARD_FLOAT)
3279 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3281 if (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
3282 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3284 if (TARGET_VSX)
3286 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3287 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3288 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3289 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS; /* DFmode */
3290 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS; /* DFmode */
3291 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS; /* DImode */
3292 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3295 /* Add conditional constraints based on various options, to allow us to
3296 collapse multiple insn patterns. */
3297 if (TARGET_ALTIVEC)
3298 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3300 if (TARGET_MFPGPR) /* DFmode */
3301 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3303 if (TARGET_LFIWAX)
3304 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3306 if (TARGET_DIRECT_MOVE)
3308 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3309 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3310 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3311 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3312 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3313 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3316 if (TARGET_POWERPC64)
3318 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3319 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3322 if (TARGET_P8_VECTOR) /* SFmode */
3324 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3325 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3326 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3328 else if (TARGET_VSX)
3329 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3331 if (TARGET_STFIWX)
3332 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3334 if (TARGET_LFIWZX)
3335 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3337 if (TARGET_FLOAT128_TYPE)
3339 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3340 if (FLOAT128_IEEE_P (TFmode))
3341 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3344 if (TARGET_P9_VECTOR)
3346 /* Support for new D-form instructions. */
3347 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3349 /* Support for ISA 3.0 (power9) vectors. */
3350 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3353 /* Support for new direct moves (ISA 3.0 + 64bit). */
3354 if (TARGET_DIRECT_MOVE_128)
3355 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3357 /* Support small integers in VSX registers. */
3358 if (TARGET_P8_VECTOR)
3360 rs6000_constraints[RS6000_CONSTRAINT_wH] = ALTIVEC_REGS;
3361 rs6000_constraints[RS6000_CONSTRAINT_wI] = FLOAT_REGS;
3362 if (TARGET_P9_VECTOR)
3364 rs6000_constraints[RS6000_CONSTRAINT_wJ] = FLOAT_REGS;
3365 rs6000_constraints[RS6000_CONSTRAINT_wK] = ALTIVEC_REGS;
3369 /* Set up the reload helper and direct move functions. */
3370 if (TARGET_VSX || TARGET_ALTIVEC)
3372 if (TARGET_64BIT)
3374 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3375 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3376 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3377 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3378 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3379 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3380 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3381 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3382 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3383 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3384 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3385 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3386 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3387 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3388 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3389 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3390 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3391 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3392 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3393 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3395 if (FLOAT128_VECTOR_P (KFmode))
3397 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3398 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3401 if (FLOAT128_VECTOR_P (TFmode))
3403 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3404 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3407 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3408 available. */
3409 if (TARGET_NO_SDMODE_STACK)
3411 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3412 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3415 if (TARGET_VSX)
3417 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3418 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3421 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3423 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3424 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3425 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3426 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3427 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3428 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3429 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3430 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3431 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3433 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3434 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3435 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3436 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3437 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3438 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3439 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3440 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3441 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3443 if (FLOAT128_VECTOR_P (KFmode))
3445 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3446 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3449 if (FLOAT128_VECTOR_P (TFmode))
3451 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3452 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3456 else
3458 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3459 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3460 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3461 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3462 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3463 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3464 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3465 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3466 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3467 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3468 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3469 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3470 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3471 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3472 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3473 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3474 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3475 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3476 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3477 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3479 if (FLOAT128_VECTOR_P (KFmode))
3481 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3482 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3485 if (FLOAT128_IEEE_P (TFmode))
3487 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3488 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3491 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3492 available. */
3493 if (TARGET_NO_SDMODE_STACK)
3495 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3496 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3499 if (TARGET_VSX)
3501 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3502 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3505 if (TARGET_DIRECT_MOVE)
3507 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3508 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3509 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3513 reg_addr[DFmode].scalar_in_vmx_p = true;
3514 reg_addr[DImode].scalar_in_vmx_p = true;
3516 if (TARGET_P8_VECTOR)
3518 reg_addr[SFmode].scalar_in_vmx_p = true;
3519 reg_addr[SImode].scalar_in_vmx_p = true;
3521 if (TARGET_P9_VECTOR)
3523 reg_addr[HImode].scalar_in_vmx_p = true;
3524 reg_addr[QImode].scalar_in_vmx_p = true;
3529 /* Setup the fusion operations. */
3530 if (TARGET_P8_FUSION)
3532 reg_addr[QImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_qi;
3533 reg_addr[HImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_hi;
3534 reg_addr[SImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_si;
3535 if (TARGET_64BIT)
3536 reg_addr[DImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_di;
3539 if (TARGET_P9_FUSION)
3541 struct fuse_insns {
3542 enum machine_mode mode; /* mode of the fused type. */
3543 enum machine_mode pmode; /* pointer mode. */
3544 enum rs6000_reload_reg_type rtype; /* register type. */
3545 enum insn_code load; /* load insn. */
3546 enum insn_code store; /* store insn. */
3549 static const struct fuse_insns addis_insns[] = {
3550 { E_SFmode, E_DImode, RELOAD_REG_FPR,
3551 CODE_FOR_fusion_vsx_di_sf_load,
3552 CODE_FOR_fusion_vsx_di_sf_store },
3554 { E_SFmode, E_SImode, RELOAD_REG_FPR,
3555 CODE_FOR_fusion_vsx_si_sf_load,
3556 CODE_FOR_fusion_vsx_si_sf_store },
3558 { E_DFmode, E_DImode, RELOAD_REG_FPR,
3559 CODE_FOR_fusion_vsx_di_df_load,
3560 CODE_FOR_fusion_vsx_di_df_store },
3562 { E_DFmode, E_SImode, RELOAD_REG_FPR,
3563 CODE_FOR_fusion_vsx_si_df_load,
3564 CODE_FOR_fusion_vsx_si_df_store },
3566 { E_DImode, E_DImode, RELOAD_REG_FPR,
3567 CODE_FOR_fusion_vsx_di_di_load,
3568 CODE_FOR_fusion_vsx_di_di_store },
3570 { E_DImode, E_SImode, RELOAD_REG_FPR,
3571 CODE_FOR_fusion_vsx_si_di_load,
3572 CODE_FOR_fusion_vsx_si_di_store },
3574 { E_QImode, E_DImode, RELOAD_REG_GPR,
3575 CODE_FOR_fusion_gpr_di_qi_load,
3576 CODE_FOR_fusion_gpr_di_qi_store },
3578 { E_QImode, E_SImode, RELOAD_REG_GPR,
3579 CODE_FOR_fusion_gpr_si_qi_load,
3580 CODE_FOR_fusion_gpr_si_qi_store },
3582 { E_HImode, E_DImode, RELOAD_REG_GPR,
3583 CODE_FOR_fusion_gpr_di_hi_load,
3584 CODE_FOR_fusion_gpr_di_hi_store },
3586 { E_HImode, E_SImode, RELOAD_REG_GPR,
3587 CODE_FOR_fusion_gpr_si_hi_load,
3588 CODE_FOR_fusion_gpr_si_hi_store },
3590 { E_SImode, E_DImode, RELOAD_REG_GPR,
3591 CODE_FOR_fusion_gpr_di_si_load,
3592 CODE_FOR_fusion_gpr_di_si_store },
3594 { E_SImode, E_SImode, RELOAD_REG_GPR,
3595 CODE_FOR_fusion_gpr_si_si_load,
3596 CODE_FOR_fusion_gpr_si_si_store },
3598 { E_SFmode, E_DImode, RELOAD_REG_GPR,
3599 CODE_FOR_fusion_gpr_di_sf_load,
3600 CODE_FOR_fusion_gpr_di_sf_store },
3602 { E_SFmode, E_SImode, RELOAD_REG_GPR,
3603 CODE_FOR_fusion_gpr_si_sf_load,
3604 CODE_FOR_fusion_gpr_si_sf_store },
3606 { E_DImode, E_DImode, RELOAD_REG_GPR,
3607 CODE_FOR_fusion_gpr_di_di_load,
3608 CODE_FOR_fusion_gpr_di_di_store },
3610 { E_DFmode, E_DImode, RELOAD_REG_GPR,
3611 CODE_FOR_fusion_gpr_di_df_load,
3612 CODE_FOR_fusion_gpr_di_df_store },
3615 machine_mode cur_pmode = Pmode;
3616 size_t i;
3618 for (i = 0; i < ARRAY_SIZE (addis_insns); i++)
3620 machine_mode xmode = addis_insns[i].mode;
3621 enum rs6000_reload_reg_type rtype = addis_insns[i].rtype;
3623 if (addis_insns[i].pmode != cur_pmode)
3624 continue;
3626 if (rtype == RELOAD_REG_FPR && !TARGET_HARD_FLOAT)
3627 continue;
3629 reg_addr[xmode].fusion_addis_ld[rtype] = addis_insns[i].load;
3630 reg_addr[xmode].fusion_addis_st[rtype] = addis_insns[i].store;
3632 if (rtype == RELOAD_REG_FPR && TARGET_P9_VECTOR)
3634 reg_addr[xmode].fusion_addis_ld[RELOAD_REG_VMX]
3635 = addis_insns[i].load;
3636 reg_addr[xmode].fusion_addis_st[RELOAD_REG_VMX]
3637 = addis_insns[i].store;
3642 /* Note which types we support fusing TOC setup plus memory insn. We only do
3643 fused TOCs for medium/large code models. */
3644 if (TARGET_P8_FUSION && TARGET_TOC_FUSION && TARGET_POWERPC64
3645 && (TARGET_CMODEL != CMODEL_SMALL))
3647 reg_addr[QImode].fused_toc = true;
3648 reg_addr[HImode].fused_toc = true;
3649 reg_addr[SImode].fused_toc = true;
3650 reg_addr[DImode].fused_toc = true;
3651 if (TARGET_HARD_FLOAT)
3653 if (TARGET_SINGLE_FLOAT)
3654 reg_addr[SFmode].fused_toc = true;
3655 if (TARGET_DOUBLE_FLOAT)
3656 reg_addr[DFmode].fused_toc = true;
3660 /* Precalculate HARD_REGNO_NREGS. */
3661 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3662 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3663 rs6000_hard_regno_nregs[m][r]
3664 = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
3666 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3667 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3668 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3669 if (rs6000_hard_regno_mode_ok_uncached (r, (machine_mode)m))
3670 rs6000_hard_regno_mode_ok_p[m][r] = true;
3672 /* Precalculate CLASS_MAX_NREGS sizes. */
3673 for (c = 0; c < LIM_REG_CLASSES; ++c)
3675 int reg_size;
3677 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3678 reg_size = UNITS_PER_VSX_WORD;
3680 else if (c == ALTIVEC_REGS)
3681 reg_size = UNITS_PER_ALTIVEC_WORD;
3683 else if (c == FLOAT_REGS)
3684 reg_size = UNITS_PER_FP_WORD;
3686 else
3687 reg_size = UNITS_PER_WORD;
3689 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3691 machine_mode m2 = (machine_mode)m;
3692 int reg_size2 = reg_size;
3694 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3695 in VSX. */
3696 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3697 reg_size2 = UNITS_PER_FP_WORD;
3699 rs6000_class_max_nregs[m][c]
3700 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3704 /* Calculate which modes to automatically generate code to use a the
3705 reciprocal divide and square root instructions. In the future, possibly
3706 automatically generate the instructions even if the user did not specify
3707 -mrecip. The older machines double precision reciprocal sqrt estimate is
3708 not accurate enough. */
3709 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3710 if (TARGET_FRES)
3711 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3712 if (TARGET_FRE)
3713 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3714 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3715 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3716 if (VECTOR_UNIT_VSX_P (V2DFmode))
3717 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3719 if (TARGET_FRSQRTES)
3720 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3721 if (TARGET_FRSQRTE)
3722 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3723 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3724 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3725 if (VECTOR_UNIT_VSX_P (V2DFmode))
3726 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3728 if (rs6000_recip_control)
3730 if (!flag_finite_math_only)
3731 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3732 "-ffast-math");
3733 if (flag_trapping_math)
3734 warning (0, "%qs requires %qs or %qs", "-mrecip",
3735 "-fno-trapping-math", "-ffast-math");
3736 if (!flag_reciprocal_math)
3737 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3738 "-ffast-math");
3739 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3741 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3742 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3743 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3745 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3746 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3747 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3749 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3750 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3751 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3753 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3754 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3755 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3757 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3758 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3759 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3761 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3762 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3763 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3765 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3766 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3767 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3769 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3770 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3771 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3775 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3776 legitimate address support to figure out the appropriate addressing to
3777 use. */
3778 rs6000_setup_reg_addr_masks ();
3780 if (global_init_p || TARGET_DEBUG_TARGET)
3782 if (TARGET_DEBUG_REG)
3783 rs6000_debug_reg_global ();
3785 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3786 fprintf (stderr,
3787 "SImode variable mult cost = %d\n"
3788 "SImode constant mult cost = %d\n"
3789 "SImode short constant mult cost = %d\n"
3790 "DImode multipliciation cost = %d\n"
3791 "SImode division cost = %d\n"
3792 "DImode division cost = %d\n"
3793 "Simple fp operation cost = %d\n"
3794 "DFmode multiplication cost = %d\n"
3795 "SFmode division cost = %d\n"
3796 "DFmode division cost = %d\n"
3797 "cache line size = %d\n"
3798 "l1 cache size = %d\n"
3799 "l2 cache size = %d\n"
3800 "simultaneous prefetches = %d\n"
3801 "\n",
3802 rs6000_cost->mulsi,
3803 rs6000_cost->mulsi_const,
3804 rs6000_cost->mulsi_const9,
3805 rs6000_cost->muldi,
3806 rs6000_cost->divsi,
3807 rs6000_cost->divdi,
3808 rs6000_cost->fp,
3809 rs6000_cost->dmul,
3810 rs6000_cost->sdiv,
3811 rs6000_cost->ddiv,
3812 rs6000_cost->cache_line_size,
3813 rs6000_cost->l1_cache_size,
3814 rs6000_cost->l2_cache_size,
3815 rs6000_cost->simultaneous_prefetches);
3819 #if TARGET_MACHO
3820 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3822 static void
3823 darwin_rs6000_override_options (void)
3825 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3826 off. */
3827 rs6000_altivec_abi = 1;
3828 TARGET_ALTIVEC_VRSAVE = 1;
3829 rs6000_current_abi = ABI_DARWIN;
3831 if (DEFAULT_ABI == ABI_DARWIN
3832 && TARGET_64BIT)
3833 darwin_one_byte_bool = 1;
3835 if (TARGET_64BIT && ! TARGET_POWERPC64)
3837 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3838 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3840 if (flag_mkernel)
3842 rs6000_default_long_calls = 1;
3843 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3846 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3847 Altivec. */
3848 if (!flag_mkernel && !flag_apple_kext
3849 && TARGET_64BIT
3850 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3851 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3853 /* Unless the user (not the configurer) has explicitly overridden
3854 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3855 G4 unless targeting the kernel. */
3856 if (!flag_mkernel
3857 && !flag_apple_kext
3858 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3859 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3860 && ! global_options_set.x_rs6000_cpu_index)
3862 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3865 #endif
3867 /* If not otherwise specified by a target, make 'long double' equivalent to
3868 'double'. */
3870 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3871 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3872 #endif
3874 /* Return the builtin mask of the various options used that could affect which
3875 builtins were used. In the past we used target_flags, but we've run out of
3876 bits, and some options like PAIRED are no longer in target_flags. */
3878 HOST_WIDE_INT
3879 rs6000_builtin_mask_calculate (void)
3881 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3882 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3883 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3884 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
3885 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3886 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3887 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3888 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3889 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3890 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3891 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3892 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3893 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3894 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3895 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3896 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3897 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3898 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3899 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3900 | ((TARGET_LONG_DOUBLE_128) ? RS6000_BTM_LDBL128 : 0)
3901 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0)
3902 | ((TARGET_FLOAT128_HW) ? RS6000_BTM_FLOAT128_HW : 0));
3905 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3906 to clobber the XER[CA] bit because clobbering that bit without telling
3907 the compiler worked just fine with versions of GCC before GCC 5, and
3908 breaking a lot of older code in ways that are hard to track down is
3909 not such a great idea. */
3911 static rtx_insn *
3912 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3913 vec<const char *> &/*constraints*/,
3914 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3916 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3917 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3918 return NULL;
3921 /* Override command line options.
3923 Combine build-specific configuration information with options
3924 specified on the command line to set various state variables which
3925 influence code generation, optimization, and expansion of built-in
3926 functions. Assure that command-line configuration preferences are
3927 compatible with each other and with the build configuration; issue
3928 warnings while adjusting configuration or error messages while
3929 rejecting configuration.
3931 Upon entry to this function:
3933 This function is called once at the beginning of
3934 compilation, and then again at the start and end of compiling
3935 each section of code that has a different configuration, as
3936 indicated, for example, by adding the
3938 __attribute__((__target__("cpu=power9")))
3940 qualifier to a function definition or, for example, by bracketing
3941 code between
3943 #pragma GCC target("altivec")
3947 #pragma GCC reset_options
3949 directives. Parameter global_init_p is true for the initial
3950 invocation, which initializes global variables, and false for all
3951 subsequent invocations.
3954 Various global state information is assumed to be valid. This
3955 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3956 default CPU specified at build configure time, TARGET_DEFAULT,
3957 representing the default set of option flags for the default
3958 target, and global_options_set.x_rs6000_isa_flags, representing
3959 which options were requested on the command line.
3961 Upon return from this function:
3963 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3964 was set by name on the command line. Additionally, if certain
3965 attributes are automatically enabled or disabled by this function
3966 in order to assure compatibility between options and
3967 configuration, the flags associated with those attributes are
3968 also set. By setting these "explicit bits", we avoid the risk
3969 that other code might accidentally overwrite these particular
3970 attributes with "default values".
3972 The various bits of rs6000_isa_flags are set to indicate the
3973 target options that have been selected for the most current
3974 compilation efforts. This has the effect of also turning on the
3975 associated TARGET_XXX values since these are macros which are
3976 generally defined to test the corresponding bit of the
3977 rs6000_isa_flags variable.
3979 The variable rs6000_builtin_mask is set to represent the target
3980 options for the most current compilation efforts, consistent with
3981 the current contents of rs6000_isa_flags. This variable controls
3982 expansion of built-in functions.
3984 Various other global variables and fields of global structures
3985 (over 50 in all) are initialized to reflect the desired options
3986 for the most current compilation efforts. */
3988 static bool
3989 rs6000_option_override_internal (bool global_init_p)
3991 bool ret = true;
3992 bool have_cpu = false;
3994 /* The default cpu requested at configure time, if any. */
3995 const char *implicit_cpu = OPTION_TARGET_CPU_DEFAULT;
3997 HOST_WIDE_INT set_masks;
3998 HOST_WIDE_INT ignore_masks;
3999 int cpu_index;
4000 int tune_index;
4001 struct cl_target_option *main_target_opt
4002 = ((global_init_p || target_option_default_node == NULL)
4003 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
4005 /* Print defaults. */
4006 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
4007 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
4009 /* Remember the explicit arguments. */
4010 if (global_init_p)
4011 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
4013 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
4014 library functions, so warn about it. The flag may be useful for
4015 performance studies from time to time though, so don't disable it
4016 entirely. */
4017 if (global_options_set.x_rs6000_alignment_flags
4018 && rs6000_alignment_flags == MASK_ALIGN_POWER
4019 && DEFAULT_ABI == ABI_DARWIN
4020 && TARGET_64BIT)
4021 warning (0, "%qs is not supported for 64-bit Darwin;"
4022 " it is incompatible with the installed C and C++ libraries",
4023 "-malign-power");
4025 /* Numerous experiment shows that IRA based loop pressure
4026 calculation works better for RTL loop invariant motion on targets
4027 with enough (>= 32) registers. It is an expensive optimization.
4028 So it is on only for peak performance. */
4029 if (optimize >= 3 && global_init_p
4030 && !global_options_set.x_flag_ira_loop_pressure)
4031 flag_ira_loop_pressure = 1;
4033 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
4034 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
4035 options were already specified. */
4036 if (flag_sanitize & SANITIZE_USER_ADDRESS
4037 && !global_options_set.x_flag_asynchronous_unwind_tables)
4038 flag_asynchronous_unwind_tables = 1;
4040 /* Set the pointer size. */
4041 if (TARGET_64BIT)
4043 rs6000_pmode = DImode;
4044 rs6000_pointer_size = 64;
4046 else
4048 rs6000_pmode = SImode;
4049 rs6000_pointer_size = 32;
4052 /* Some OSs don't support saving the high part of 64-bit registers on context
4053 switch. Other OSs don't support saving Altivec registers. On those OSs,
4054 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
4055 if the user wants either, the user must explicitly specify them and we
4056 won't interfere with the user's specification. */
4058 set_masks = POWERPC_MASKS;
4059 #ifdef OS_MISSING_POWERPC64
4060 if (OS_MISSING_POWERPC64)
4061 set_masks &= ~OPTION_MASK_POWERPC64;
4062 #endif
4063 #ifdef OS_MISSING_ALTIVEC
4064 if (OS_MISSING_ALTIVEC)
4065 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
4066 | OTHER_VSX_VECTOR_MASKS);
4067 #endif
4069 /* Don't override by the processor default if given explicitly. */
4070 set_masks &= ~rs6000_isa_flags_explicit;
4072 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
4073 the cpu in a target attribute or pragma, but did not specify a tuning
4074 option, use the cpu for the tuning option rather than the option specified
4075 with -mtune on the command line. Process a '--with-cpu' configuration
4076 request as an implicit --cpu. */
4077 if (rs6000_cpu_index >= 0)
4079 cpu_index = rs6000_cpu_index;
4080 have_cpu = true;
4082 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
4084 rs6000_cpu_index = cpu_index = main_target_opt->x_rs6000_cpu_index;
4085 have_cpu = true;
4087 else if (implicit_cpu)
4089 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (implicit_cpu);
4090 have_cpu = true;
4092 else
4094 /* PowerPC 64-bit LE requires at least ISA 2.07. */
4095 const char *default_cpu = ((!TARGET_POWERPC64)
4096 ? "powerpc"
4097 : ((BYTES_BIG_ENDIAN)
4098 ? "powerpc64"
4099 : "powerpc64le"));
4101 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
4102 have_cpu = false;
4105 gcc_assert (cpu_index >= 0);
4107 if (have_cpu)
4109 #ifndef HAVE_AS_POWER9
4110 if (processor_target_table[rs6000_cpu_index].processor
4111 == PROCESSOR_POWER9)
4113 have_cpu = false;
4114 warning (0, "will not generate power9 instructions because "
4115 "assembler lacks power9 support");
4117 #endif
4118 #ifndef HAVE_AS_POWER8
4119 if (processor_target_table[rs6000_cpu_index].processor
4120 == PROCESSOR_POWER8)
4122 have_cpu = false;
4123 warning (0, "will not generate power8 instructions because "
4124 "assembler lacks power8 support");
4126 #endif
4127 #ifndef HAVE_AS_POPCNTD
4128 if (processor_target_table[rs6000_cpu_index].processor
4129 == PROCESSOR_POWER7)
4131 have_cpu = false;
4132 warning (0, "will not generate power7 instructions because "
4133 "assembler lacks power7 support");
4135 #endif
4136 #ifndef HAVE_AS_DFP
4137 if (processor_target_table[rs6000_cpu_index].processor
4138 == PROCESSOR_POWER6)
4140 have_cpu = false;
4141 warning (0, "will not generate power6 instructions because "
4142 "assembler lacks power6 support");
4144 #endif
4145 #ifndef HAVE_AS_POPCNTB
4146 if (processor_target_table[rs6000_cpu_index].processor
4147 == PROCESSOR_POWER5)
4149 have_cpu = false;
4150 warning (0, "will not generate power5 instructions because "
4151 "assembler lacks power5 support");
4153 #endif
4155 if (!have_cpu)
4157 /* PowerPC 64-bit LE requires at least ISA 2.07. */
4158 const char *default_cpu = (!TARGET_POWERPC64
4159 ? "powerpc"
4160 : (BYTES_BIG_ENDIAN
4161 ? "powerpc64"
4162 : "powerpc64le"));
4164 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
4168 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
4169 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
4170 with those from the cpu, except for options that were explicitly set. If
4171 we don't have a cpu, do not override the target bits set in
4172 TARGET_DEFAULT. */
4173 if (have_cpu)
4175 rs6000_isa_flags &= ~set_masks;
4176 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
4177 & set_masks);
4179 else
4181 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
4182 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
4183 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
4184 to using rs6000_isa_flags, we need to do the initialization here.
4186 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
4187 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
4188 HOST_WIDE_INT flags = ((TARGET_DEFAULT) ? TARGET_DEFAULT
4189 : processor_target_table[cpu_index].target_enable);
4190 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
4193 if (rs6000_tune_index >= 0)
4194 tune_index = rs6000_tune_index;
4195 else if (have_cpu)
4196 rs6000_tune_index = tune_index = cpu_index;
4197 else
4199 size_t i;
4200 enum processor_type tune_proc
4201 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
4203 tune_index = -1;
4204 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
4205 if (processor_target_table[i].processor == tune_proc)
4207 rs6000_tune_index = tune_index = i;
4208 break;
4212 gcc_assert (tune_index >= 0);
4213 rs6000_cpu = processor_target_table[tune_index].processor;
4215 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
4216 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
4217 || rs6000_cpu == PROCESSOR_PPCE5500)
4219 if (TARGET_ALTIVEC)
4220 error ("AltiVec not supported in this target");
4223 /* If we are optimizing big endian systems for space, use the load/store
4224 multiple and string instructions. */
4225 if (BYTES_BIG_ENDIAN && optimize_size)
4226 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & (OPTION_MASK_MULTIPLE
4227 | OPTION_MASK_STRING);
4229 /* Don't allow -mmultiple or -mstring on little endian systems
4230 unless the cpu is a 750, because the hardware doesn't support the
4231 instructions used in little endian mode, and causes an alignment
4232 trap. The 750 does not cause an alignment trap (except when the
4233 target is unaligned). */
4235 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
4237 if (TARGET_MULTIPLE)
4239 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
4240 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
4241 warning (0, "%qs is not supported on little endian systems",
4242 "-mmultiple");
4245 if (TARGET_STRING)
4247 rs6000_isa_flags &= ~OPTION_MASK_STRING;
4248 if ((rs6000_isa_flags_explicit & OPTION_MASK_STRING) != 0)
4249 warning (0, "%qs is not supported on little endian systems",
4250 "-mstring");
4254 /* If little-endian, default to -mstrict-align on older processors.
4255 Testing for htm matches power8 and later. */
4256 if (!BYTES_BIG_ENDIAN
4257 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
4258 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
4260 /* -maltivec={le,be} implies -maltivec. */
4261 if (rs6000_altivec_element_order != 0)
4262 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
4264 /* Disallow -maltivec=le in big endian mode for now. This is not
4265 known to be useful for anyone. */
4266 if (BYTES_BIG_ENDIAN && rs6000_altivec_element_order == 1)
4268 warning (0, N_("-maltivec=le not allowed for big-endian targets"));
4269 rs6000_altivec_element_order = 0;
4272 if (!rs6000_fold_gimple)
4273 fprintf (stderr,
4274 "gimple folding of rs6000 builtins has been disabled.\n");
4276 /* Add some warnings for VSX. */
4277 if (TARGET_VSX)
4279 const char *msg = NULL;
4280 if (!TARGET_HARD_FLOAT || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
4282 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4283 msg = N_("-mvsx requires hardware floating point");
4284 else
4286 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4287 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4290 else if (TARGET_PAIRED_FLOAT)
4291 msg = N_("-mvsx and -mpaired are incompatible");
4292 else if (TARGET_AVOID_XFORM > 0)
4293 msg = N_("-mvsx needs indexed addressing");
4294 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
4295 & OPTION_MASK_ALTIVEC))
4297 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4298 msg = N_("-mvsx and -mno-altivec are incompatible");
4299 else
4300 msg = N_("-mno-altivec disables vsx");
4303 if (msg)
4305 warning (0, msg);
4306 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4307 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4311 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4312 the -mcpu setting to enable options that conflict. */
4313 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4314 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4315 | OPTION_MASK_ALTIVEC
4316 | OPTION_MASK_VSX)) != 0)
4317 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4318 | OPTION_MASK_DIRECT_MOVE)
4319 & ~rs6000_isa_flags_explicit);
4321 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4322 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4324 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4325 off all of the options that depend on those flags. */
4326 ignore_masks = rs6000_disable_incompatible_switches ();
4328 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4329 unless the user explicitly used the -mno-<option> to disable the code. */
4330 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_MISC)
4331 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4332 else if (TARGET_P9_MINMAX)
4334 if (have_cpu)
4336 if (cpu_index == PROCESSOR_POWER9)
4338 /* legacy behavior: allow -mcpu=power9 with certain
4339 capabilities explicitly disabled. */
4340 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4342 else
4343 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4344 "for <xxx> less than power9", "-mcpu");
4346 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
4347 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
4348 & rs6000_isa_flags_explicit))
4349 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4350 were explicitly cleared. */
4351 error ("%qs incompatible with explicitly disabled options",
4352 "-mpower9-minmax");
4353 else
4354 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
4356 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4357 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
4358 else if (TARGET_VSX)
4359 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
4360 else if (TARGET_POPCNTD)
4361 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
4362 else if (TARGET_DFP)
4363 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
4364 else if (TARGET_CMPB)
4365 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
4366 else if (TARGET_FPRND)
4367 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
4368 else if (TARGET_POPCNTB)
4369 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
4370 else if (TARGET_ALTIVEC)
4371 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
4373 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4375 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4376 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4377 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4380 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4382 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4383 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4384 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4387 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4389 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4390 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4391 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4394 if (TARGET_P8_VECTOR && !TARGET_VSX)
4396 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4397 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
4398 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4399 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
4401 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4402 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4403 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4405 else
4407 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4408 not explicit. */
4409 rs6000_isa_flags |= OPTION_MASK_VSX;
4410 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4414 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4416 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4417 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4418 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4421 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4422 silently turn off quad memory mode. */
4423 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4425 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4426 warning (0, N_("-mquad-memory requires 64-bit mode"));
4428 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4429 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4431 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4432 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4435 /* Non-atomic quad memory load/store are disabled for little endian, since
4436 the words are reversed, but atomic operations can still be done by
4437 swapping the words. */
4438 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4440 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4441 warning (0, N_("-mquad-memory is not available in little endian "
4442 "mode"));
4444 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4447 /* Assume if the user asked for normal quad memory instructions, they want
4448 the atomic versions as well, unless they explicity told us not to use quad
4449 word atomic instructions. */
4450 if (TARGET_QUAD_MEMORY
4451 && !TARGET_QUAD_MEMORY_ATOMIC
4452 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4453 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4455 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4456 generating power8 instructions. */
4457 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4458 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4459 & OPTION_MASK_P8_FUSION);
4461 /* Setting additional fusion flags turns on base fusion. */
4462 if (!TARGET_P8_FUSION && (TARGET_P8_FUSION_SIGN || TARGET_TOC_FUSION))
4464 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4466 if (TARGET_P8_FUSION_SIGN)
4467 error ("%qs requires %qs", "-mpower8-fusion-sign",
4468 "-mpower8-fusion");
4470 if (TARGET_TOC_FUSION)
4471 error ("%qs requires %qs", "-mtoc-fusion", "-mpower8-fusion");
4473 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4475 else
4476 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4479 /* Power9 fusion is a superset over power8 fusion. */
4480 if (TARGET_P9_FUSION && !TARGET_P8_FUSION)
4482 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4484 /* We prefer to not mention undocumented options in
4485 error messages. However, if users have managed to select
4486 power9-fusion without selecting power8-fusion, they
4487 already know about undocumented flags. */
4488 error ("%qs requires %qs", "-mpower9-fusion", "-mpower8-fusion");
4489 rs6000_isa_flags &= ~OPTION_MASK_P9_FUSION;
4491 else
4492 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4495 /* Enable power9 fusion if we are tuning for power9, even if we aren't
4496 generating power9 instructions. */
4497 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_FUSION))
4498 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4499 & OPTION_MASK_P9_FUSION);
4501 /* Power8 does not fuse sign extended loads with the addis. If we are
4502 optimizing at high levels for speed, convert a sign extended load into a
4503 zero extending load, and an explicit sign extension. */
4504 if (TARGET_P8_FUSION
4505 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4506 && optimize_function_for_speed_p (cfun)
4507 && optimize >= 3)
4508 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4510 /* TOC fusion requires 64-bit and medium/large code model. */
4511 if (TARGET_TOC_FUSION && !TARGET_POWERPC64)
4513 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4514 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4515 warning (0, N_("-mtoc-fusion requires 64-bit"));
4518 if (TARGET_TOC_FUSION && (TARGET_CMODEL == CMODEL_SMALL))
4520 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4521 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4522 warning (0, N_("-mtoc-fusion requires medium/large code model"));
4525 /* Turn on -mtoc-fusion by default if p8-fusion and 64-bit medium/large code
4526 model. */
4527 if (TARGET_P8_FUSION && !TARGET_TOC_FUSION && TARGET_POWERPC64
4528 && (TARGET_CMODEL != CMODEL_SMALL)
4529 && !(rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION))
4530 rs6000_isa_flags |= OPTION_MASK_TOC_FUSION;
4532 /* ISA 3.0 vector instructions include ISA 2.07. */
4533 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4535 /* We prefer to not mention undocumented options in
4536 error messages. However, if users have managed to select
4537 power9-vector without selecting power8-vector, they
4538 already know about undocumented flags. */
4539 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4540 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4541 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4542 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4544 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4545 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4546 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4548 else
4550 /* OPTION_MASK_P9_VECTOR is explicit and
4551 OPTION_MASK_P8_VECTOR is not explicit. */
4552 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4553 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4557 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4558 support. If we only have ISA 2.06 support, and the user did not specify
4559 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4560 but we don't enable the full vectorization support */
4561 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4562 TARGET_ALLOW_MOVMISALIGN = 1;
4564 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4566 if (TARGET_ALLOW_MOVMISALIGN > 0
4567 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4568 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4570 TARGET_ALLOW_MOVMISALIGN = 0;
4573 /* Determine when unaligned vector accesses are permitted, and when
4574 they are preferred over masked Altivec loads. Note that if
4575 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4576 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4577 not true. */
4578 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4580 if (!TARGET_VSX)
4582 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4583 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4585 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4588 else if (!TARGET_ALLOW_MOVMISALIGN)
4590 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4591 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4592 "-mallow-movmisalign");
4594 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4598 /* Set long double size before the IEEE 128-bit tests. */
4599 if (!global_options_set.x_rs6000_long_double_type_size)
4601 if (main_target_opt != NULL
4602 && (main_target_opt->x_rs6000_long_double_type_size
4603 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
4604 error ("target attribute or pragma changes long double size");
4605 else
4606 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
4609 /* Set -mabi=ieeelongdouble on some old targets. Note, AIX and Darwin
4610 explicitly redefine TARGET_IEEEQUAD to 0, so those systems will not
4611 pick up this default. */
4612 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
4613 if (!global_options_set.x_rs6000_ieeequad)
4614 rs6000_ieeequad = 1;
4615 #endif
4617 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4618 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4619 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4620 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4621 the keyword as well as the type. */
4622 TARGET_FLOAT128_TYPE = TARGET_FLOAT128_ENABLE_TYPE && TARGET_VSX;
4624 /* IEEE 128-bit floating point requires VSX support. */
4625 if (TARGET_FLOAT128_KEYWORD)
4627 if (!TARGET_VSX)
4629 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4630 error ("%qs requires VSX support", "-mfloat128");
4632 TARGET_FLOAT128_TYPE = 0;
4633 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_KEYWORD
4634 | OPTION_MASK_FLOAT128_HW);
4636 else if (!TARGET_FLOAT128_TYPE)
4638 TARGET_FLOAT128_TYPE = 1;
4639 warning (0, "The -mfloat128 option may not be fully supported");
4643 /* Enable the __float128 keyword under Linux by default. */
4644 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_KEYWORD
4645 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4646 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4648 /* If we have are supporting the float128 type and full ISA 3.0 support,
4649 enable -mfloat128-hardware by default. However, don't enable the
4650 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4651 because sometimes the compiler wants to put things in an integer
4652 container, and if we don't have __int128 support, it is impossible. */
4653 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW && TARGET_64BIT
4654 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4655 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4656 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4658 if (TARGET_FLOAT128_HW
4659 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4661 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4662 error ("%qs requires full ISA 3.0 support", "-mfloat128-hardware");
4664 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4667 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4669 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4670 error ("%qs requires %qs", "-mfloat128-hardware", "-m64");
4672 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4675 /* Print the options after updating the defaults. */
4676 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4677 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4679 /* E500mc does "better" if we inline more aggressively. Respect the
4680 user's opinion, though. */
4681 if (rs6000_block_move_inline_limit == 0
4682 && (rs6000_cpu == PROCESSOR_PPCE500MC
4683 || rs6000_cpu == PROCESSOR_PPCE500MC64
4684 || rs6000_cpu == PROCESSOR_PPCE5500
4685 || rs6000_cpu == PROCESSOR_PPCE6500))
4686 rs6000_block_move_inline_limit = 128;
4688 /* store_one_arg depends on expand_block_move to handle at least the
4689 size of reg_parm_stack_space. */
4690 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4691 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4693 if (global_init_p)
4695 /* If the appropriate debug option is enabled, replace the target hooks
4696 with debug versions that call the real version and then prints
4697 debugging information. */
4698 if (TARGET_DEBUG_COST)
4700 targetm.rtx_costs = rs6000_debug_rtx_costs;
4701 targetm.address_cost = rs6000_debug_address_cost;
4702 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4705 if (TARGET_DEBUG_ADDR)
4707 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4708 targetm.legitimize_address = rs6000_debug_legitimize_address;
4709 rs6000_secondary_reload_class_ptr
4710 = rs6000_debug_secondary_reload_class;
4711 targetm.secondary_memory_needed
4712 = rs6000_debug_secondary_memory_needed;
4713 targetm.can_change_mode_class
4714 = rs6000_debug_can_change_mode_class;
4715 rs6000_preferred_reload_class_ptr
4716 = rs6000_debug_preferred_reload_class;
4717 rs6000_legitimize_reload_address_ptr
4718 = rs6000_debug_legitimize_reload_address;
4719 rs6000_mode_dependent_address_ptr
4720 = rs6000_debug_mode_dependent_address;
4723 if (rs6000_veclibabi_name)
4725 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4726 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4727 else
4729 error ("unknown vectorization library ABI type (%qs) for "
4730 "%qs switch", rs6000_veclibabi_name, "-mveclibabi=");
4731 ret = false;
4736 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4737 target attribute or pragma which automatically enables both options,
4738 unless the altivec ABI was set. This is set by default for 64-bit, but
4739 not for 32-bit. */
4740 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4742 TARGET_FLOAT128_TYPE = 0;
4743 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4744 | OPTION_MASK_FLOAT128_KEYWORD)
4745 & ~rs6000_isa_flags_explicit);
4748 /* Enable Altivec ABI for AIX -maltivec. */
4749 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4751 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4752 error ("target attribute or pragma changes AltiVec ABI");
4753 else
4754 rs6000_altivec_abi = 1;
4757 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4758 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4759 be explicitly overridden in either case. */
4760 if (TARGET_ELF)
4762 if (!global_options_set.x_rs6000_altivec_abi
4763 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4765 if (main_target_opt != NULL &&
4766 !main_target_opt->x_rs6000_altivec_abi)
4767 error ("target attribute or pragma changes AltiVec ABI");
4768 else
4769 rs6000_altivec_abi = 1;
4773 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4774 So far, the only darwin64 targets are also MACH-O. */
4775 if (TARGET_MACHO
4776 && DEFAULT_ABI == ABI_DARWIN
4777 && TARGET_64BIT)
4779 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4780 error ("target attribute or pragma changes darwin64 ABI");
4781 else
4783 rs6000_darwin64_abi = 1;
4784 /* Default to natural alignment, for better performance. */
4785 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4789 /* Place FP constants in the constant pool instead of TOC
4790 if section anchors enabled. */
4791 if (flag_section_anchors
4792 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4793 TARGET_NO_FP_IN_TOC = 1;
4795 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4796 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4798 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4799 SUBTARGET_OVERRIDE_OPTIONS;
4800 #endif
4801 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4802 SUBSUBTARGET_OVERRIDE_OPTIONS;
4803 #endif
4804 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4805 SUB3TARGET_OVERRIDE_OPTIONS;
4806 #endif
4808 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4809 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4811 /* For the E500 family of cores, reset the single/double FP flags to let us
4812 check that they remain constant across attributes or pragmas. Also,
4813 clear a possible request for string instructions, not supported and which
4814 we might have silently queried above for -Os.
4816 For other families, clear ISEL in case it was set implicitly.
4819 switch (rs6000_cpu)
4821 case PROCESSOR_PPC8540:
4822 case PROCESSOR_PPC8548:
4823 case PROCESSOR_PPCE500MC:
4824 case PROCESSOR_PPCE500MC64:
4825 case PROCESSOR_PPCE5500:
4826 case PROCESSOR_PPCE6500:
4828 rs6000_single_float = 0;
4829 rs6000_double_float = 0;
4831 rs6000_isa_flags &= ~OPTION_MASK_STRING;
4833 break;
4835 default:
4837 if (have_cpu && !(rs6000_isa_flags_explicit & OPTION_MASK_ISEL))
4838 rs6000_isa_flags &= ~OPTION_MASK_ISEL;
4840 break;
4843 if (main_target_opt)
4845 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
4846 error ("target attribute or pragma changes single precision floating "
4847 "point");
4848 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
4849 error ("target attribute or pragma changes double precision floating "
4850 "point");
4853 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
4854 && rs6000_cpu != PROCESSOR_POWER5
4855 && rs6000_cpu != PROCESSOR_POWER6
4856 && rs6000_cpu != PROCESSOR_POWER7
4857 && rs6000_cpu != PROCESSOR_POWER8
4858 && rs6000_cpu != PROCESSOR_POWER9
4859 && rs6000_cpu != PROCESSOR_PPCA2
4860 && rs6000_cpu != PROCESSOR_CELL
4861 && rs6000_cpu != PROCESSOR_PPC476);
4862 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
4863 || rs6000_cpu == PROCESSOR_POWER5
4864 || rs6000_cpu == PROCESSOR_POWER7
4865 || rs6000_cpu == PROCESSOR_POWER8);
4866 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
4867 || rs6000_cpu == PROCESSOR_POWER5
4868 || rs6000_cpu == PROCESSOR_POWER6
4869 || rs6000_cpu == PROCESSOR_POWER7
4870 || rs6000_cpu == PROCESSOR_POWER8
4871 || rs6000_cpu == PROCESSOR_POWER9
4872 || rs6000_cpu == PROCESSOR_PPCE500MC
4873 || rs6000_cpu == PROCESSOR_PPCE500MC64
4874 || rs6000_cpu == PROCESSOR_PPCE5500
4875 || rs6000_cpu == PROCESSOR_PPCE6500);
4877 /* Allow debug switches to override the above settings. These are set to -1
4878 in rs6000.opt to indicate the user hasn't directly set the switch. */
4879 if (TARGET_ALWAYS_HINT >= 0)
4880 rs6000_always_hint = TARGET_ALWAYS_HINT;
4882 if (TARGET_SCHED_GROUPS >= 0)
4883 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4885 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4886 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4888 rs6000_sched_restricted_insns_priority
4889 = (rs6000_sched_groups ? 1 : 0);
4891 /* Handle -msched-costly-dep option. */
4892 rs6000_sched_costly_dep
4893 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4895 if (rs6000_sched_costly_dep_str)
4897 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4898 rs6000_sched_costly_dep = no_dep_costly;
4899 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4900 rs6000_sched_costly_dep = all_deps_costly;
4901 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4902 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4903 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4904 rs6000_sched_costly_dep = store_to_load_dep_costly;
4905 else
4906 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4907 atoi (rs6000_sched_costly_dep_str));
4910 /* Handle -minsert-sched-nops option. */
4911 rs6000_sched_insert_nops
4912 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4914 if (rs6000_sched_insert_nops_str)
4916 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4917 rs6000_sched_insert_nops = sched_finish_none;
4918 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4919 rs6000_sched_insert_nops = sched_finish_pad_groups;
4920 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4921 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4922 else
4923 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4924 atoi (rs6000_sched_insert_nops_str));
4927 /* Handle stack protector */
4928 if (!global_options_set.x_rs6000_stack_protector_guard)
4929 #ifdef TARGET_THREAD_SSP_OFFSET
4930 rs6000_stack_protector_guard = SSP_TLS;
4931 #else
4932 rs6000_stack_protector_guard = SSP_GLOBAL;
4933 #endif
4935 #ifdef TARGET_THREAD_SSP_OFFSET
4936 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4937 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4938 #endif
4940 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4942 char *endp;
4943 const char *str = rs6000_stack_protector_guard_offset_str;
4945 errno = 0;
4946 long offset = strtol (str, &endp, 0);
4947 if (!*str || *endp || errno)
4948 error ("%qs is not a valid number in %qs", str,
4949 "-mstack-protector-guard-offset=");
4951 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4952 || (TARGET_64BIT && (offset & 3)))
4953 error ("%qs is not a valid offset in %qs", str,
4954 "-mstack-protector-guard-offset=");
4956 rs6000_stack_protector_guard_offset = offset;
4959 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4961 const char *str = rs6000_stack_protector_guard_reg_str;
4962 int reg = decode_reg_name (str);
4964 if (!IN_RANGE (reg, 1, 31))
4965 error ("%qs is not a valid base register in %qs", str,
4966 "-mstack-protector-guard-reg=");
4968 rs6000_stack_protector_guard_reg = reg;
4971 if (rs6000_stack_protector_guard == SSP_TLS
4972 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
4973 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4975 if (global_init_p)
4977 #ifdef TARGET_REGNAMES
4978 /* If the user desires alternate register names, copy in the
4979 alternate names now. */
4980 if (TARGET_REGNAMES)
4981 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4982 #endif
4984 /* Set aix_struct_return last, after the ABI is determined.
4985 If -maix-struct-return or -msvr4-struct-return was explicitly
4986 used, don't override with the ABI default. */
4987 if (!global_options_set.x_aix_struct_return)
4988 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4990 #if 0
4991 /* IBM XL compiler defaults to unsigned bitfields. */
4992 if (TARGET_XL_COMPAT)
4993 flag_signed_bitfields = 0;
4994 #endif
4996 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4997 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4999 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
5001 /* We can only guarantee the availability of DI pseudo-ops when
5002 assembling for 64-bit targets. */
5003 if (!TARGET_64BIT)
5005 targetm.asm_out.aligned_op.di = NULL;
5006 targetm.asm_out.unaligned_op.di = NULL;
5010 /* Set branch target alignment, if not optimizing for size. */
5011 if (!optimize_size)
5013 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
5014 aligned 8byte to avoid misprediction by the branch predictor. */
5015 if (rs6000_cpu == PROCESSOR_TITAN
5016 || rs6000_cpu == PROCESSOR_CELL)
5018 if (align_functions <= 0)
5019 align_functions = 8;
5020 if (align_jumps <= 0)
5021 align_jumps = 8;
5022 if (align_loops <= 0)
5023 align_loops = 8;
5025 if (rs6000_align_branch_targets)
5027 if (align_functions <= 0)
5028 align_functions = 16;
5029 if (align_jumps <= 0)
5030 align_jumps = 16;
5031 if (align_loops <= 0)
5033 can_override_loop_align = 1;
5034 align_loops = 16;
5037 if (align_jumps_max_skip <= 0)
5038 align_jumps_max_skip = 15;
5039 if (align_loops_max_skip <= 0)
5040 align_loops_max_skip = 15;
5043 /* Arrange to save and restore machine status around nested functions. */
5044 init_machine_status = rs6000_init_machine_status;
5046 /* We should always be splitting complex arguments, but we can't break
5047 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
5048 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
5049 targetm.calls.split_complex_arg = NULL;
5051 /* The AIX and ELFv1 ABIs define standard function descriptors. */
5052 if (DEFAULT_ABI == ABI_AIX)
5053 targetm.calls.custom_function_descriptors = 0;
5056 /* Initialize rs6000_cost with the appropriate target costs. */
5057 if (optimize_size)
5058 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
5059 else
5060 switch (rs6000_cpu)
5062 case PROCESSOR_RS64A:
5063 rs6000_cost = &rs64a_cost;
5064 break;
5066 case PROCESSOR_MPCCORE:
5067 rs6000_cost = &mpccore_cost;
5068 break;
5070 case PROCESSOR_PPC403:
5071 rs6000_cost = &ppc403_cost;
5072 break;
5074 case PROCESSOR_PPC405:
5075 rs6000_cost = &ppc405_cost;
5076 break;
5078 case PROCESSOR_PPC440:
5079 rs6000_cost = &ppc440_cost;
5080 break;
5082 case PROCESSOR_PPC476:
5083 rs6000_cost = &ppc476_cost;
5084 break;
5086 case PROCESSOR_PPC601:
5087 rs6000_cost = &ppc601_cost;
5088 break;
5090 case PROCESSOR_PPC603:
5091 rs6000_cost = &ppc603_cost;
5092 break;
5094 case PROCESSOR_PPC604:
5095 rs6000_cost = &ppc604_cost;
5096 break;
5098 case PROCESSOR_PPC604e:
5099 rs6000_cost = &ppc604e_cost;
5100 break;
5102 case PROCESSOR_PPC620:
5103 rs6000_cost = &ppc620_cost;
5104 break;
5106 case PROCESSOR_PPC630:
5107 rs6000_cost = &ppc630_cost;
5108 break;
5110 case PROCESSOR_CELL:
5111 rs6000_cost = &ppccell_cost;
5112 break;
5114 case PROCESSOR_PPC750:
5115 case PROCESSOR_PPC7400:
5116 rs6000_cost = &ppc750_cost;
5117 break;
5119 case PROCESSOR_PPC7450:
5120 rs6000_cost = &ppc7450_cost;
5121 break;
5123 case PROCESSOR_PPC8540:
5124 case PROCESSOR_PPC8548:
5125 rs6000_cost = &ppc8540_cost;
5126 break;
5128 case PROCESSOR_PPCE300C2:
5129 case PROCESSOR_PPCE300C3:
5130 rs6000_cost = &ppce300c2c3_cost;
5131 break;
5133 case PROCESSOR_PPCE500MC:
5134 rs6000_cost = &ppce500mc_cost;
5135 break;
5137 case PROCESSOR_PPCE500MC64:
5138 rs6000_cost = &ppce500mc64_cost;
5139 break;
5141 case PROCESSOR_PPCE5500:
5142 rs6000_cost = &ppce5500_cost;
5143 break;
5145 case PROCESSOR_PPCE6500:
5146 rs6000_cost = &ppce6500_cost;
5147 break;
5149 case PROCESSOR_TITAN:
5150 rs6000_cost = &titan_cost;
5151 break;
5153 case PROCESSOR_POWER4:
5154 case PROCESSOR_POWER5:
5155 rs6000_cost = &power4_cost;
5156 break;
5158 case PROCESSOR_POWER6:
5159 rs6000_cost = &power6_cost;
5160 break;
5162 case PROCESSOR_POWER7:
5163 rs6000_cost = &power7_cost;
5164 break;
5166 case PROCESSOR_POWER8:
5167 rs6000_cost = &power8_cost;
5168 break;
5170 case PROCESSOR_POWER9:
5171 rs6000_cost = &power9_cost;
5172 break;
5174 case PROCESSOR_PPCA2:
5175 rs6000_cost = &ppca2_cost;
5176 break;
5178 default:
5179 gcc_unreachable ();
5182 if (global_init_p)
5184 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
5185 rs6000_cost->simultaneous_prefetches,
5186 global_options.x_param_values,
5187 global_options_set.x_param_values);
5188 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
5189 global_options.x_param_values,
5190 global_options_set.x_param_values);
5191 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
5192 rs6000_cost->cache_line_size,
5193 global_options.x_param_values,
5194 global_options_set.x_param_values);
5195 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
5196 global_options.x_param_values,
5197 global_options_set.x_param_values);
5199 /* Increase loop peeling limits based on performance analysis. */
5200 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
5201 global_options.x_param_values,
5202 global_options_set.x_param_values);
5203 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
5204 global_options.x_param_values,
5205 global_options_set.x_param_values);
5207 /* Use the 'model' -fsched-pressure algorithm by default. */
5208 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
5209 SCHED_PRESSURE_MODEL,
5210 global_options.x_param_values,
5211 global_options_set.x_param_values);
5213 /* If using typedef char *va_list, signal that
5214 __builtin_va_start (&ap, 0) can be optimized to
5215 ap = __builtin_next_arg (0). */
5216 if (DEFAULT_ABI != ABI_V4)
5217 targetm.expand_builtin_va_start = NULL;
5220 /* Set up single/double float flags.
5221 If TARGET_HARD_FLOAT is set, but neither single or double is set,
5222 then set both flags. */
5223 if (TARGET_HARD_FLOAT && rs6000_single_float == 0 && rs6000_double_float == 0)
5224 rs6000_single_float = rs6000_double_float = 1;
5226 /* If not explicitly specified via option, decide whether to generate indexed
5227 load/store instructions. A value of -1 indicates that the
5228 initial value of this variable has not been overwritten. During
5229 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
5230 if (TARGET_AVOID_XFORM == -1)
5231 /* Avoid indexed addressing when targeting Power6 in order to avoid the
5232 DERAT mispredict penalty. However the LVE and STVE altivec instructions
5233 need indexed accesses and the type used is the scalar type of the element
5234 being loaded or stored. */
5235 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB
5236 && !TARGET_ALTIVEC);
5238 /* Set the -mrecip options. */
5239 if (rs6000_recip_name)
5241 char *p = ASTRDUP (rs6000_recip_name);
5242 char *q;
5243 unsigned int mask, i;
5244 bool invert;
5246 while ((q = strtok (p, ",")) != NULL)
5248 p = NULL;
5249 if (*q == '!')
5251 invert = true;
5252 q++;
5254 else
5255 invert = false;
5257 if (!strcmp (q, "default"))
5258 mask = ((TARGET_RECIP_PRECISION)
5259 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
5260 else
5262 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
5263 if (!strcmp (q, recip_options[i].string))
5265 mask = recip_options[i].mask;
5266 break;
5269 if (i == ARRAY_SIZE (recip_options))
5271 error ("unknown option for %<%s=%s%>", "-mrecip", q);
5272 invert = false;
5273 mask = 0;
5274 ret = false;
5278 if (invert)
5279 rs6000_recip_control &= ~mask;
5280 else
5281 rs6000_recip_control |= mask;
5285 /* Set the builtin mask of the various options used that could affect which
5286 builtins were used. In the past we used target_flags, but we've run out
5287 of bits, and some options like PAIRED are no longer in target_flags. */
5288 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
5289 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
5290 rs6000_print_builtin_options (stderr, 0, "builtin mask",
5291 rs6000_builtin_mask);
5293 /* Initialize all of the registers. */
5294 rs6000_init_hard_regno_mode_ok (global_init_p);
5296 /* Save the initial options in case the user does function specific options */
5297 if (global_init_p)
5298 target_option_default_node = target_option_current_node
5299 = build_target_option_node (&global_options);
5301 /* If not explicitly specified via option, decide whether to generate the
5302 extra blr's required to preserve the link stack on some cpus (eg, 476). */
5303 if (TARGET_LINK_STACK == -1)
5304 SET_TARGET_LINK_STACK (rs6000_cpu == PROCESSOR_PPC476 && flag_pic);
5306 return ret;
5309 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
5310 define the target cpu type. */
5312 static void
5313 rs6000_option_override (void)
5315 (void) rs6000_option_override_internal (true);
5319 /* Implement targetm.vectorize.builtin_mask_for_load. */
5320 static tree
5321 rs6000_builtin_mask_for_load (void)
5323 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
5324 if ((TARGET_ALTIVEC && !TARGET_VSX)
5325 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
5326 return altivec_builtin_mask_for_load;
5327 else
5328 return 0;
5331 /* Implement LOOP_ALIGN. */
5333 rs6000_loop_align (rtx label)
5335 basic_block bb;
5336 int ninsns;
5338 /* Don't override loop alignment if -falign-loops was specified. */
5339 if (!can_override_loop_align)
5340 return align_loops_log;
5342 bb = BLOCK_FOR_INSN (label);
5343 ninsns = num_loop_insns(bb->loop_father);
5345 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5346 if (ninsns > 4 && ninsns <= 8
5347 && (rs6000_cpu == PROCESSOR_POWER4
5348 || rs6000_cpu == PROCESSOR_POWER5
5349 || rs6000_cpu == PROCESSOR_POWER6
5350 || rs6000_cpu == PROCESSOR_POWER7
5351 || rs6000_cpu == PROCESSOR_POWER8
5352 || rs6000_cpu == PROCESSOR_POWER9))
5353 return 5;
5354 else
5355 return align_loops_log;
5358 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
5359 static int
5360 rs6000_loop_align_max_skip (rtx_insn *label)
5362 return (1 << rs6000_loop_align (label)) - 1;
5365 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5366 after applying N number of iterations. This routine does not determine
5367 how may iterations are required to reach desired alignment. */
5369 static bool
5370 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5372 if (is_packed)
5373 return false;
5375 if (TARGET_32BIT)
5377 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5378 return true;
5380 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5381 return true;
5383 return false;
5385 else
5387 if (TARGET_MACHO)
5388 return false;
5390 /* Assuming that all other types are naturally aligned. CHECKME! */
5391 return true;
5395 /* Return true if the vector misalignment factor is supported by the
5396 target. */
5397 static bool
5398 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5399 const_tree type,
5400 int misalignment,
5401 bool is_packed)
5403 if (TARGET_VSX)
5405 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5406 return true;
5408 /* Return if movmisalign pattern is not supported for this mode. */
5409 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5410 return false;
5412 if (misalignment == -1)
5414 /* Misalignment factor is unknown at compile time but we know
5415 it's word aligned. */
5416 if (rs6000_vector_alignment_reachable (type, is_packed))
5418 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5420 if (element_size == 64 || element_size == 32)
5421 return true;
5424 return false;
5427 /* VSX supports word-aligned vector. */
5428 if (misalignment % 4 == 0)
5429 return true;
5431 return false;
5434 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5435 static int
5436 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5437 tree vectype, int misalign)
5439 unsigned elements;
5440 tree elem_type;
5442 switch (type_of_cost)
5444 case scalar_stmt:
5445 case scalar_load:
5446 case scalar_store:
5447 case vector_stmt:
5448 case vector_load:
5449 case vector_store:
5450 case vec_to_scalar:
5451 case scalar_to_vec:
5452 case cond_branch_not_taken:
5453 return 1;
5455 case vec_perm:
5456 if (TARGET_VSX)
5457 return 3;
5458 else
5459 return 1;
5461 case vec_promote_demote:
5462 if (TARGET_VSX)
5463 return 4;
5464 else
5465 return 1;
5467 case cond_branch_taken:
5468 return 3;
5470 case unaligned_load:
5471 if (TARGET_P9_VECTOR)
5472 return 3;
5474 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5475 return 1;
5477 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5479 elements = TYPE_VECTOR_SUBPARTS (vectype);
5480 if (elements == 2)
5481 /* Double word aligned. */
5482 return 2;
5484 if (elements == 4)
5486 switch (misalign)
5488 case 8:
5489 /* Double word aligned. */
5490 return 2;
5492 case -1:
5493 /* Unknown misalignment. */
5494 case 4:
5495 case 12:
5496 /* Word aligned. */
5497 return 22;
5499 default:
5500 gcc_unreachable ();
5505 if (TARGET_ALTIVEC)
5506 /* Misaligned loads are not supported. */
5507 gcc_unreachable ();
5509 return 2;
5511 case unaligned_store:
5512 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5513 return 1;
5515 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5517 elements = TYPE_VECTOR_SUBPARTS (vectype);
5518 if (elements == 2)
5519 /* Double word aligned. */
5520 return 2;
5522 if (elements == 4)
5524 switch (misalign)
5526 case 8:
5527 /* Double word aligned. */
5528 return 2;
5530 case -1:
5531 /* Unknown misalignment. */
5532 case 4:
5533 case 12:
5534 /* Word aligned. */
5535 return 23;
5537 default:
5538 gcc_unreachable ();
5543 if (TARGET_ALTIVEC)
5544 /* Misaligned stores are not supported. */
5545 gcc_unreachable ();
5547 return 2;
5549 case vec_construct:
5550 /* This is a rough approximation assuming non-constant elements
5551 constructed into a vector via element insertion. FIXME:
5552 vec_construct is not granular enough for uniformly good
5553 decisions. If the initialization is a splat, this is
5554 cheaper than we estimate. Improve this someday. */
5555 elem_type = TREE_TYPE (vectype);
5556 /* 32-bit vectors loaded into registers are stored as double
5557 precision, so we need 2 permutes, 2 converts, and 1 merge
5558 to construct a vector of short floats from them. */
5559 if (SCALAR_FLOAT_TYPE_P (elem_type)
5560 && TYPE_PRECISION (elem_type) == 32)
5561 return 5;
5562 /* On POWER9, integer vector types are built up in GPRs and then
5563 use a direct move (2 cycles). For POWER8 this is even worse,
5564 as we need two direct moves and a merge, and the direct moves
5565 are five cycles. */
5566 else if (INTEGRAL_TYPE_P (elem_type))
5568 if (TARGET_P9_VECTOR)
5569 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5570 else
5571 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5573 else
5574 /* V2DFmode doesn't need a direct move. */
5575 return 2;
5577 default:
5578 gcc_unreachable ();
5582 /* Implement targetm.vectorize.preferred_simd_mode. */
5584 static machine_mode
5585 rs6000_preferred_simd_mode (scalar_mode mode)
5587 if (TARGET_VSX)
5588 switch (mode)
5590 case E_DFmode:
5591 return V2DFmode;
5592 default:;
5594 if (TARGET_ALTIVEC || TARGET_VSX)
5595 switch (mode)
5597 case E_SFmode:
5598 return V4SFmode;
5599 case E_TImode:
5600 return V1TImode;
5601 case E_DImode:
5602 return V2DImode;
5603 case E_SImode:
5604 return V4SImode;
5605 case E_HImode:
5606 return V8HImode;
5607 case E_QImode:
5608 return V16QImode;
5609 default:;
5611 if (TARGET_PAIRED_FLOAT
5612 && mode == SFmode)
5613 return V2SFmode;
5614 return word_mode;
5617 typedef struct _rs6000_cost_data
5619 struct loop *loop_info;
5620 unsigned cost[3];
5621 } rs6000_cost_data;
5623 /* Test for likely overcommitment of vector hardware resources. If a
5624 loop iteration is relatively large, and too large a percentage of
5625 instructions in the loop are vectorized, the cost model may not
5626 adequately reflect delays from unavailable vector resources.
5627 Penalize the loop body cost for this case. */
5629 static void
5630 rs6000_density_test (rs6000_cost_data *data)
5632 const int DENSITY_PCT_THRESHOLD = 85;
5633 const int DENSITY_SIZE_THRESHOLD = 70;
5634 const int DENSITY_PENALTY = 10;
5635 struct loop *loop = data->loop_info;
5636 basic_block *bbs = get_loop_body (loop);
5637 int nbbs = loop->num_nodes;
5638 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5639 int i, density_pct;
5641 for (i = 0; i < nbbs; i++)
5643 basic_block bb = bbs[i];
5644 gimple_stmt_iterator gsi;
5646 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5648 gimple *stmt = gsi_stmt (gsi);
5649 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5651 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5652 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5653 not_vec_cost++;
5657 free (bbs);
5658 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5660 if (density_pct > DENSITY_PCT_THRESHOLD
5661 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5663 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5664 if (dump_enabled_p ())
5665 dump_printf_loc (MSG_NOTE, vect_location,
5666 "density %d%%, cost %d exceeds threshold, penalizing "
5667 "loop body cost by %d%%", density_pct,
5668 vec_cost + not_vec_cost, DENSITY_PENALTY);
5672 /* Implement targetm.vectorize.init_cost. */
5674 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5675 instruction is needed by the vectorization. */
5676 static bool rs6000_vect_nonmem;
5678 static void *
5679 rs6000_init_cost (struct loop *loop_info)
5681 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5682 data->loop_info = loop_info;
5683 data->cost[vect_prologue] = 0;
5684 data->cost[vect_body] = 0;
5685 data->cost[vect_epilogue] = 0;
5686 rs6000_vect_nonmem = false;
5687 return data;
5690 /* Implement targetm.vectorize.add_stmt_cost. */
5692 static unsigned
5693 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5694 struct _stmt_vec_info *stmt_info, int misalign,
5695 enum vect_cost_model_location where)
5697 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5698 unsigned retval = 0;
5700 if (flag_vect_cost_model)
5702 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5703 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5704 misalign);
5705 /* Statements in an inner loop relative to the loop being
5706 vectorized are weighted more heavily. The value here is
5707 arbitrary and could potentially be improved with analysis. */
5708 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5709 count *= 50; /* FIXME. */
5711 retval = (unsigned) (count * stmt_cost);
5712 cost_data->cost[where] += retval;
5714 /* Check whether we're doing something other than just a copy loop.
5715 Not all such loops may be profitably vectorized; see
5716 rs6000_finish_cost. */
5717 if ((kind == vec_to_scalar || kind == vec_perm
5718 || kind == vec_promote_demote || kind == vec_construct
5719 || kind == scalar_to_vec)
5720 || (where == vect_body && kind == vector_stmt))
5721 rs6000_vect_nonmem = true;
5724 return retval;
5727 /* Implement targetm.vectorize.finish_cost. */
5729 static void
5730 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5731 unsigned *body_cost, unsigned *epilogue_cost)
5733 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5735 if (cost_data->loop_info)
5736 rs6000_density_test (cost_data);
5738 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5739 that require versioning for any reason. The vectorization is at
5740 best a wash inside the loop, and the versioning checks make
5741 profitability highly unlikely and potentially quite harmful. */
5742 if (cost_data->loop_info)
5744 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5745 if (!rs6000_vect_nonmem
5746 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5747 && LOOP_REQUIRES_VERSIONING (vec_info))
5748 cost_data->cost[vect_body] += 10000;
5751 *prologue_cost = cost_data->cost[vect_prologue];
5752 *body_cost = cost_data->cost[vect_body];
5753 *epilogue_cost = cost_data->cost[vect_epilogue];
5756 /* Implement targetm.vectorize.destroy_cost_data. */
5758 static void
5759 rs6000_destroy_cost_data (void *data)
5761 free (data);
5764 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5765 library with vectorized intrinsics. */
5767 static tree
5768 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5769 tree type_in)
5771 char name[32];
5772 const char *suffix = NULL;
5773 tree fntype, new_fndecl, bdecl = NULL_TREE;
5774 int n_args = 1;
5775 const char *bname;
5776 machine_mode el_mode, in_mode;
5777 int n, in_n;
5779 /* Libmass is suitable for unsafe math only as it does not correctly support
5780 parts of IEEE with the required precision such as denormals. Only support
5781 it if we have VSX to use the simd d2 or f4 functions.
5782 XXX: Add variable length support. */
5783 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5784 return NULL_TREE;
5786 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5787 n = TYPE_VECTOR_SUBPARTS (type_out);
5788 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5789 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5790 if (el_mode != in_mode
5791 || n != in_n)
5792 return NULL_TREE;
5794 switch (fn)
5796 CASE_CFN_ATAN2:
5797 CASE_CFN_HYPOT:
5798 CASE_CFN_POW:
5799 n_args = 2;
5800 gcc_fallthrough ();
5802 CASE_CFN_ACOS:
5803 CASE_CFN_ACOSH:
5804 CASE_CFN_ASIN:
5805 CASE_CFN_ASINH:
5806 CASE_CFN_ATAN:
5807 CASE_CFN_ATANH:
5808 CASE_CFN_CBRT:
5809 CASE_CFN_COS:
5810 CASE_CFN_COSH:
5811 CASE_CFN_ERF:
5812 CASE_CFN_ERFC:
5813 CASE_CFN_EXP2:
5814 CASE_CFN_EXP:
5815 CASE_CFN_EXPM1:
5816 CASE_CFN_LGAMMA:
5817 CASE_CFN_LOG10:
5818 CASE_CFN_LOG1P:
5819 CASE_CFN_LOG2:
5820 CASE_CFN_LOG:
5821 CASE_CFN_SIN:
5822 CASE_CFN_SINH:
5823 CASE_CFN_SQRT:
5824 CASE_CFN_TAN:
5825 CASE_CFN_TANH:
5826 if (el_mode == DFmode && n == 2)
5828 bdecl = mathfn_built_in (double_type_node, fn);
5829 suffix = "d2"; /* pow -> powd2 */
5831 else if (el_mode == SFmode && n == 4)
5833 bdecl = mathfn_built_in (float_type_node, fn);
5834 suffix = "4"; /* powf -> powf4 */
5836 else
5837 return NULL_TREE;
5838 if (!bdecl)
5839 return NULL_TREE;
5840 break;
5842 default:
5843 return NULL_TREE;
5846 gcc_assert (suffix != NULL);
5847 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5848 if (!bname)
5849 return NULL_TREE;
5851 strcpy (name, bname + sizeof ("__builtin_") - 1);
5852 strcat (name, suffix);
5854 if (n_args == 1)
5855 fntype = build_function_type_list (type_out, type_in, NULL);
5856 else if (n_args == 2)
5857 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5858 else
5859 gcc_unreachable ();
5861 /* Build a function declaration for the vectorized function. */
5862 new_fndecl = build_decl (BUILTINS_LOCATION,
5863 FUNCTION_DECL, get_identifier (name), fntype);
5864 TREE_PUBLIC (new_fndecl) = 1;
5865 DECL_EXTERNAL (new_fndecl) = 1;
5866 DECL_IS_NOVOPS (new_fndecl) = 1;
5867 TREE_READONLY (new_fndecl) = 1;
5869 return new_fndecl;
5872 /* Returns a function decl for a vectorized version of the builtin function
5873 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5874 if it is not available. */
5876 static tree
5877 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5878 tree type_in)
5880 machine_mode in_mode, out_mode;
5881 int in_n, out_n;
5883 if (TARGET_DEBUG_BUILTIN)
5884 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5885 combined_fn_name (combined_fn (fn)),
5886 GET_MODE_NAME (TYPE_MODE (type_out)),
5887 GET_MODE_NAME (TYPE_MODE (type_in)));
5889 if (TREE_CODE (type_out) != VECTOR_TYPE
5890 || TREE_CODE (type_in) != VECTOR_TYPE)
5891 return NULL_TREE;
5893 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5894 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5895 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5896 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5898 switch (fn)
5900 CASE_CFN_COPYSIGN:
5901 if (VECTOR_UNIT_VSX_P (V2DFmode)
5902 && out_mode == DFmode && out_n == 2
5903 && in_mode == DFmode && in_n == 2)
5904 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5905 if (VECTOR_UNIT_VSX_P (V4SFmode)
5906 && out_mode == SFmode && out_n == 4
5907 && in_mode == SFmode && in_n == 4)
5908 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5909 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5910 && out_mode == SFmode && out_n == 4
5911 && in_mode == SFmode && in_n == 4)
5912 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5913 break;
5914 CASE_CFN_CEIL:
5915 if (VECTOR_UNIT_VSX_P (V2DFmode)
5916 && out_mode == DFmode && out_n == 2
5917 && in_mode == DFmode && in_n == 2)
5918 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5919 if (VECTOR_UNIT_VSX_P (V4SFmode)
5920 && out_mode == SFmode && out_n == 4
5921 && in_mode == SFmode && in_n == 4)
5922 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5923 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5924 && out_mode == SFmode && out_n == 4
5925 && in_mode == SFmode && in_n == 4)
5926 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5927 break;
5928 CASE_CFN_FLOOR:
5929 if (VECTOR_UNIT_VSX_P (V2DFmode)
5930 && out_mode == DFmode && out_n == 2
5931 && in_mode == DFmode && in_n == 2)
5932 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5933 if (VECTOR_UNIT_VSX_P (V4SFmode)
5934 && out_mode == SFmode && out_n == 4
5935 && in_mode == SFmode && in_n == 4)
5936 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5937 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5938 && out_mode == SFmode && out_n == 4
5939 && in_mode == SFmode && in_n == 4)
5940 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5941 break;
5942 CASE_CFN_FMA:
5943 if (VECTOR_UNIT_VSX_P (V2DFmode)
5944 && out_mode == DFmode && out_n == 2
5945 && in_mode == DFmode && in_n == 2)
5946 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5947 if (VECTOR_UNIT_VSX_P (V4SFmode)
5948 && out_mode == SFmode && out_n == 4
5949 && in_mode == SFmode && in_n == 4)
5950 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5951 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5952 && out_mode == SFmode && out_n == 4
5953 && in_mode == SFmode && in_n == 4)
5954 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5955 break;
5956 CASE_CFN_TRUNC:
5957 if (VECTOR_UNIT_VSX_P (V2DFmode)
5958 && out_mode == DFmode && out_n == 2
5959 && in_mode == DFmode && in_n == 2)
5960 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5961 if (VECTOR_UNIT_VSX_P (V4SFmode)
5962 && out_mode == SFmode && out_n == 4
5963 && in_mode == SFmode && in_n == 4)
5964 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5965 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5966 && out_mode == SFmode && out_n == 4
5967 && in_mode == SFmode && in_n == 4)
5968 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5969 break;
5970 CASE_CFN_NEARBYINT:
5971 if (VECTOR_UNIT_VSX_P (V2DFmode)
5972 && flag_unsafe_math_optimizations
5973 && out_mode == DFmode && out_n == 2
5974 && in_mode == DFmode && in_n == 2)
5975 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5976 if (VECTOR_UNIT_VSX_P (V4SFmode)
5977 && flag_unsafe_math_optimizations
5978 && out_mode == SFmode && out_n == 4
5979 && in_mode == SFmode && in_n == 4)
5980 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5981 break;
5982 CASE_CFN_RINT:
5983 if (VECTOR_UNIT_VSX_P (V2DFmode)
5984 && !flag_trapping_math
5985 && out_mode == DFmode && out_n == 2
5986 && in_mode == DFmode && in_n == 2)
5987 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5988 if (VECTOR_UNIT_VSX_P (V4SFmode)
5989 && !flag_trapping_math
5990 && out_mode == SFmode && out_n == 4
5991 && in_mode == SFmode && in_n == 4)
5992 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5993 break;
5994 default:
5995 break;
5998 /* Generate calls to libmass if appropriate. */
5999 if (rs6000_veclib_handler)
6000 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
6002 return NULL_TREE;
6005 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
6007 static tree
6008 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
6009 tree type_in)
6011 machine_mode in_mode, out_mode;
6012 int in_n, out_n;
6014 if (TARGET_DEBUG_BUILTIN)
6015 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
6016 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
6017 GET_MODE_NAME (TYPE_MODE (type_out)),
6018 GET_MODE_NAME (TYPE_MODE (type_in)));
6020 if (TREE_CODE (type_out) != VECTOR_TYPE
6021 || TREE_CODE (type_in) != VECTOR_TYPE)
6022 return NULL_TREE;
6024 out_mode = TYPE_MODE (TREE_TYPE (type_out));
6025 out_n = TYPE_VECTOR_SUBPARTS (type_out);
6026 in_mode = TYPE_MODE (TREE_TYPE (type_in));
6027 in_n = TYPE_VECTOR_SUBPARTS (type_in);
6029 enum rs6000_builtins fn
6030 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
6031 switch (fn)
6033 case RS6000_BUILTIN_RSQRTF:
6034 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
6035 && out_mode == SFmode && out_n == 4
6036 && in_mode == SFmode && in_n == 4)
6037 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
6038 break;
6039 case RS6000_BUILTIN_RSQRT:
6040 if (VECTOR_UNIT_VSX_P (V2DFmode)
6041 && out_mode == DFmode && out_n == 2
6042 && in_mode == DFmode && in_n == 2)
6043 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
6044 break;
6045 case RS6000_BUILTIN_RECIPF:
6046 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
6047 && out_mode == SFmode && out_n == 4
6048 && in_mode == SFmode && in_n == 4)
6049 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
6050 break;
6051 case RS6000_BUILTIN_RECIP:
6052 if (VECTOR_UNIT_VSX_P (V2DFmode)
6053 && out_mode == DFmode && out_n == 2
6054 && in_mode == DFmode && in_n == 2)
6055 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
6056 break;
6057 default:
6058 break;
6060 return NULL_TREE;
6063 /* Default CPU string for rs6000*_file_start functions. */
6064 static const char *rs6000_default_cpu;
6066 /* Do anything needed at the start of the asm file. */
6068 static void
6069 rs6000_file_start (void)
6071 char buffer[80];
6072 const char *start = buffer;
6073 FILE *file = asm_out_file;
6075 rs6000_default_cpu = TARGET_CPU_DEFAULT;
6077 default_file_start ();
6079 if (flag_verbose_asm)
6081 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
6083 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
6085 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
6086 start = "";
6089 if (global_options_set.x_rs6000_cpu_index)
6091 fprintf (file, "%s -mcpu=%s", start,
6092 processor_target_table[rs6000_cpu_index].name);
6093 start = "";
6096 if (global_options_set.x_rs6000_tune_index)
6098 fprintf (file, "%s -mtune=%s", start,
6099 processor_target_table[rs6000_tune_index].name);
6100 start = "";
6103 if (PPC405_ERRATUM77)
6105 fprintf (file, "%s PPC405CR_ERRATUM77", start);
6106 start = "";
6109 #ifdef USING_ELFOS_H
6110 switch (rs6000_sdata)
6112 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
6113 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
6114 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
6115 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
6118 if (rs6000_sdata && g_switch_value)
6120 fprintf (file, "%s -G %d", start,
6121 g_switch_value);
6122 start = "";
6124 #endif
6126 if (*start == '\0')
6127 putc ('\n', file);
6130 #ifdef USING_ELFOS_H
6131 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
6132 && !global_options_set.x_rs6000_cpu_index)
6134 fputs ("\t.machine ", asm_out_file);
6135 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
6136 fputs ("power9\n", asm_out_file);
6137 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
6138 fputs ("power8\n", asm_out_file);
6139 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
6140 fputs ("power7\n", asm_out_file);
6141 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
6142 fputs ("power6\n", asm_out_file);
6143 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
6144 fputs ("power5\n", asm_out_file);
6145 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
6146 fputs ("power4\n", asm_out_file);
6147 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
6148 fputs ("ppc64\n", asm_out_file);
6149 else
6150 fputs ("ppc\n", asm_out_file);
6152 #endif
6154 if (DEFAULT_ABI == ABI_ELFv2)
6155 fprintf (file, "\t.abiversion 2\n");
6159 /* Return nonzero if this function is known to have a null epilogue. */
6162 direct_return (void)
6164 if (reload_completed)
6166 rs6000_stack_t *info = rs6000_stack_info ();
6168 if (info->first_gp_reg_save == 32
6169 && info->first_fp_reg_save == 64
6170 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
6171 && ! info->lr_save_p
6172 && ! info->cr_save_p
6173 && info->vrsave_size == 0
6174 && ! info->push_p)
6175 return 1;
6178 return 0;
6181 /* Return the number of instructions it takes to form a constant in an
6182 integer register. */
6185 num_insns_constant_wide (HOST_WIDE_INT value)
6187 /* signed constant loadable with addi */
6188 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
6189 return 1;
6191 /* constant loadable with addis */
6192 else if ((value & 0xffff) == 0
6193 && (value >> 31 == -1 || value >> 31 == 0))
6194 return 1;
6196 else if (TARGET_POWERPC64)
6198 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
6199 HOST_WIDE_INT high = value >> 31;
6201 if (high == 0 || high == -1)
6202 return 2;
6204 high >>= 1;
6206 if (low == 0)
6207 return num_insns_constant_wide (high) + 1;
6208 else if (high == 0)
6209 return num_insns_constant_wide (low) + 1;
6210 else
6211 return (num_insns_constant_wide (high)
6212 + num_insns_constant_wide (low) + 1);
6215 else
6216 return 2;
6220 num_insns_constant (rtx op, machine_mode mode)
6222 HOST_WIDE_INT low, high;
6224 switch (GET_CODE (op))
6226 case CONST_INT:
6227 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
6228 && rs6000_is_valid_and_mask (op, mode))
6229 return 2;
6230 else
6231 return num_insns_constant_wide (INTVAL (op));
6233 case CONST_WIDE_INT:
6235 int i;
6236 int ins = CONST_WIDE_INT_NUNITS (op) - 1;
6237 for (i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
6238 ins += num_insns_constant_wide (CONST_WIDE_INT_ELT (op, i));
6239 return ins;
6242 case CONST_DOUBLE:
6243 if (mode == SFmode || mode == SDmode)
6245 long l;
6247 if (DECIMAL_FLOAT_MODE_P (mode))
6248 REAL_VALUE_TO_TARGET_DECIMAL32
6249 (*CONST_DOUBLE_REAL_VALUE (op), l);
6250 else
6251 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6252 return num_insns_constant_wide ((HOST_WIDE_INT) l);
6255 long l[2];
6256 if (DECIMAL_FLOAT_MODE_P (mode))
6257 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (op), l);
6258 else
6259 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6260 high = l[WORDS_BIG_ENDIAN == 0];
6261 low = l[WORDS_BIG_ENDIAN != 0];
6263 if (TARGET_32BIT)
6264 return (num_insns_constant_wide (low)
6265 + num_insns_constant_wide (high));
6266 else
6268 if ((high == 0 && low >= 0)
6269 || (high == -1 && low < 0))
6270 return num_insns_constant_wide (low);
6272 else if (rs6000_is_valid_and_mask (op, mode))
6273 return 2;
6275 else if (low == 0)
6276 return num_insns_constant_wide (high) + 1;
6278 else
6279 return (num_insns_constant_wide (high)
6280 + num_insns_constant_wide (low) + 1);
6283 default:
6284 gcc_unreachable ();
6288 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
6289 If the mode of OP is MODE_VECTOR_INT, this simply returns the
6290 corresponding element of the vector, but for V4SFmode and V2SFmode,
6291 the corresponding "float" is interpreted as an SImode integer. */
6293 HOST_WIDE_INT
6294 const_vector_elt_as_int (rtx op, unsigned int elt)
6296 rtx tmp;
6298 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
6299 gcc_assert (GET_MODE (op) != V2DImode
6300 && GET_MODE (op) != V2DFmode);
6302 tmp = CONST_VECTOR_ELT (op, elt);
6303 if (GET_MODE (op) == V4SFmode
6304 || GET_MODE (op) == V2SFmode)
6305 tmp = gen_lowpart (SImode, tmp);
6306 return INTVAL (tmp);
6309 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
6310 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
6311 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
6312 all items are set to the same value and contain COPIES replicas of the
6313 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6314 operand and the others are set to the value of the operand's msb. */
6316 static bool
6317 vspltis_constant (rtx op, unsigned step, unsigned copies)
6319 machine_mode mode = GET_MODE (op);
6320 machine_mode inner = GET_MODE_INNER (mode);
6322 unsigned i;
6323 unsigned nunits;
6324 unsigned bitsize;
6325 unsigned mask;
6327 HOST_WIDE_INT val;
6328 HOST_WIDE_INT splat_val;
6329 HOST_WIDE_INT msb_val;
6331 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
6332 return false;
6334 nunits = GET_MODE_NUNITS (mode);
6335 bitsize = GET_MODE_BITSIZE (inner);
6336 mask = GET_MODE_MASK (inner);
6338 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6339 splat_val = val;
6340 msb_val = val >= 0 ? 0 : -1;
6342 /* Construct the value to be splatted, if possible. If not, return 0. */
6343 for (i = 2; i <= copies; i *= 2)
6345 HOST_WIDE_INT small_val;
6346 bitsize /= 2;
6347 small_val = splat_val >> bitsize;
6348 mask >>= bitsize;
6349 if (splat_val != ((HOST_WIDE_INT)
6350 ((unsigned HOST_WIDE_INT) small_val << bitsize)
6351 | (small_val & mask)))
6352 return false;
6353 splat_val = small_val;
6356 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6357 if (EASY_VECTOR_15 (splat_val))
6360 /* Also check if we can splat, and then add the result to itself. Do so if
6361 the value is positive, of if the splat instruction is using OP's mode;
6362 for splat_val < 0, the splat and the add should use the same mode. */
6363 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6364 && (splat_val >= 0 || (step == 1 && copies == 1)))
6367 /* Also check if are loading up the most significant bit which can be done by
6368 loading up -1 and shifting the value left by -1. */
6369 else if (EASY_VECTOR_MSB (splat_val, inner))
6372 else
6373 return false;
6375 /* Check if VAL is present in every STEP-th element, and the
6376 other elements are filled with its most significant bit. */
6377 for (i = 1; i < nunits; ++i)
6379 HOST_WIDE_INT desired_val;
6380 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6381 if ((i & (step - 1)) == 0)
6382 desired_val = val;
6383 else
6384 desired_val = msb_val;
6386 if (desired_val != const_vector_elt_as_int (op, elt))
6387 return false;
6390 return true;
6393 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6394 instruction, filling in the bottom elements with 0 or -1.
6396 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6397 for the number of zeroes to shift in, or negative for the number of 0xff
6398 bytes to shift in.
6400 OP is a CONST_VECTOR. */
6403 vspltis_shifted (rtx op)
6405 machine_mode mode = GET_MODE (op);
6406 machine_mode inner = GET_MODE_INNER (mode);
6408 unsigned i, j;
6409 unsigned nunits;
6410 unsigned mask;
6412 HOST_WIDE_INT val;
6414 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6415 return false;
6417 /* We need to create pseudo registers to do the shift, so don't recognize
6418 shift vector constants after reload. */
6419 if (!can_create_pseudo_p ())
6420 return false;
6422 nunits = GET_MODE_NUNITS (mode);
6423 mask = GET_MODE_MASK (inner);
6425 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6427 /* Check if the value can really be the operand of a vspltis[bhw]. */
6428 if (EASY_VECTOR_15 (val))
6431 /* Also check if we are loading up the most significant bit which can be done
6432 by loading up -1 and shifting the value left by -1. */
6433 else if (EASY_VECTOR_MSB (val, inner))
6436 else
6437 return 0;
6439 /* Check if VAL is present in every STEP-th element until we find elements
6440 that are 0 or all 1 bits. */
6441 for (i = 1; i < nunits; ++i)
6443 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6444 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6446 /* If the value isn't the splat value, check for the remaining elements
6447 being 0/-1. */
6448 if (val != elt_val)
6450 if (elt_val == 0)
6452 for (j = i+1; j < nunits; ++j)
6454 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6455 if (const_vector_elt_as_int (op, elt2) != 0)
6456 return 0;
6459 return (nunits - i) * GET_MODE_SIZE (inner);
6462 else if ((elt_val & mask) == mask)
6464 for (j = i+1; j < nunits; ++j)
6466 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6467 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6468 return 0;
6471 return -((nunits - i) * GET_MODE_SIZE (inner));
6474 else
6475 return 0;
6479 /* If all elements are equal, we don't need to do VLSDOI. */
6480 return 0;
6484 /* Return true if OP is of the given MODE and can be synthesized
6485 with a vspltisb, vspltish or vspltisw. */
6487 bool
6488 easy_altivec_constant (rtx op, machine_mode mode)
6490 unsigned step, copies;
6492 if (mode == VOIDmode)
6493 mode = GET_MODE (op);
6494 else if (mode != GET_MODE (op))
6495 return false;
6497 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6498 constants. */
6499 if (mode == V2DFmode)
6500 return zero_constant (op, mode);
6502 else if (mode == V2DImode)
6504 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
6505 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
6506 return false;
6508 if (zero_constant (op, mode))
6509 return true;
6511 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6512 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6513 return true;
6515 return false;
6518 /* V1TImode is a special container for TImode. Ignore for now. */
6519 else if (mode == V1TImode)
6520 return false;
6522 /* Start with a vspltisw. */
6523 step = GET_MODE_NUNITS (mode) / 4;
6524 copies = 1;
6526 if (vspltis_constant (op, step, copies))
6527 return true;
6529 /* Then try with a vspltish. */
6530 if (step == 1)
6531 copies <<= 1;
6532 else
6533 step >>= 1;
6535 if (vspltis_constant (op, step, copies))
6536 return true;
6538 /* And finally a vspltisb. */
6539 if (step == 1)
6540 copies <<= 1;
6541 else
6542 step >>= 1;
6544 if (vspltis_constant (op, step, copies))
6545 return true;
6547 if (vspltis_shifted (op) != 0)
6548 return true;
6550 return false;
6553 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6554 result is OP. Abort if it is not possible. */
6557 gen_easy_altivec_constant (rtx op)
6559 machine_mode mode = GET_MODE (op);
6560 int nunits = GET_MODE_NUNITS (mode);
6561 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6562 unsigned step = nunits / 4;
6563 unsigned copies = 1;
6565 /* Start with a vspltisw. */
6566 if (vspltis_constant (op, step, copies))
6567 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6569 /* Then try with a vspltish. */
6570 if (step == 1)
6571 copies <<= 1;
6572 else
6573 step >>= 1;
6575 if (vspltis_constant (op, step, copies))
6576 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6578 /* And finally a vspltisb. */
6579 if (step == 1)
6580 copies <<= 1;
6581 else
6582 step >>= 1;
6584 if (vspltis_constant (op, step, copies))
6585 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6587 gcc_unreachable ();
6590 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6591 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6593 Return the number of instructions needed (1 or 2) into the address pointed
6594 via NUM_INSNS_PTR.
6596 Return the constant that is being split via CONSTANT_PTR. */
6598 bool
6599 xxspltib_constant_p (rtx op,
6600 machine_mode mode,
6601 int *num_insns_ptr,
6602 int *constant_ptr)
6604 size_t nunits = GET_MODE_NUNITS (mode);
6605 size_t i;
6606 HOST_WIDE_INT value;
6607 rtx element;
6609 /* Set the returned values to out of bound values. */
6610 *num_insns_ptr = -1;
6611 *constant_ptr = 256;
6613 if (!TARGET_P9_VECTOR)
6614 return false;
6616 if (mode == VOIDmode)
6617 mode = GET_MODE (op);
6619 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6620 return false;
6622 /* Handle (vec_duplicate <constant>). */
6623 if (GET_CODE (op) == VEC_DUPLICATE)
6625 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6626 && mode != V2DImode)
6627 return false;
6629 element = XEXP (op, 0);
6630 if (!CONST_INT_P (element))
6631 return false;
6633 value = INTVAL (element);
6634 if (!IN_RANGE (value, -128, 127))
6635 return false;
6638 /* Handle (const_vector [...]). */
6639 else if (GET_CODE (op) == CONST_VECTOR)
6641 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6642 && mode != V2DImode)
6643 return false;
6645 element = CONST_VECTOR_ELT (op, 0);
6646 if (!CONST_INT_P (element))
6647 return false;
6649 value = INTVAL (element);
6650 if (!IN_RANGE (value, -128, 127))
6651 return false;
6653 for (i = 1; i < nunits; i++)
6655 element = CONST_VECTOR_ELT (op, i);
6656 if (!CONST_INT_P (element))
6657 return false;
6659 if (value != INTVAL (element))
6660 return false;
6664 /* Handle integer constants being loaded into the upper part of the VSX
6665 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6666 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6667 else if (CONST_INT_P (op))
6669 if (!SCALAR_INT_MODE_P (mode))
6670 return false;
6672 value = INTVAL (op);
6673 if (!IN_RANGE (value, -128, 127))
6674 return false;
6676 if (!IN_RANGE (value, -1, 0))
6678 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6679 return false;
6681 if (EASY_VECTOR_15 (value))
6682 return false;
6686 else
6687 return false;
6689 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6690 sign extend. Special case 0/-1 to allow getting any VSX register instead
6691 of an Altivec register. */
6692 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6693 && EASY_VECTOR_15 (value))
6694 return false;
6696 /* Return # of instructions and the constant byte for XXSPLTIB. */
6697 if (mode == V16QImode)
6698 *num_insns_ptr = 1;
6700 else if (IN_RANGE (value, -1, 0))
6701 *num_insns_ptr = 1;
6703 else
6704 *num_insns_ptr = 2;
6706 *constant_ptr = (int) value;
6707 return true;
6710 const char *
6711 output_vec_const_move (rtx *operands)
6713 int shift;
6714 machine_mode mode;
6715 rtx dest, vec;
6717 dest = operands[0];
6718 vec = operands[1];
6719 mode = GET_MODE (dest);
6721 if (TARGET_VSX)
6723 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6724 int xxspltib_value = 256;
6725 int num_insns = -1;
6727 if (zero_constant (vec, mode))
6729 if (TARGET_P9_VECTOR)
6730 return "xxspltib %x0,0";
6732 else if (dest_vmx_p)
6733 return "vspltisw %0,0";
6735 else
6736 return "xxlxor %x0,%x0,%x0";
6739 if (all_ones_constant (vec, mode))
6741 if (TARGET_P9_VECTOR)
6742 return "xxspltib %x0,255";
6744 else if (dest_vmx_p)
6745 return "vspltisw %0,-1";
6747 else if (TARGET_P8_VECTOR)
6748 return "xxlorc %x0,%x0,%x0";
6750 else
6751 gcc_unreachable ();
6754 if (TARGET_P9_VECTOR
6755 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6757 if (num_insns == 1)
6759 operands[2] = GEN_INT (xxspltib_value & 0xff);
6760 return "xxspltib %x0,%2";
6763 return "#";
6767 if (TARGET_ALTIVEC)
6769 rtx splat_vec;
6771 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6772 if (zero_constant (vec, mode))
6773 return "vspltisw %0,0";
6775 if (all_ones_constant (vec, mode))
6776 return "vspltisw %0,-1";
6778 /* Do we need to construct a value using VSLDOI? */
6779 shift = vspltis_shifted (vec);
6780 if (shift != 0)
6781 return "#";
6783 splat_vec = gen_easy_altivec_constant (vec);
6784 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6785 operands[1] = XEXP (splat_vec, 0);
6786 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6787 return "#";
6789 switch (GET_MODE (splat_vec))
6791 case E_V4SImode:
6792 return "vspltisw %0,%1";
6794 case E_V8HImode:
6795 return "vspltish %0,%1";
6797 case E_V16QImode:
6798 return "vspltisb %0,%1";
6800 default:
6801 gcc_unreachable ();
6805 gcc_unreachable ();
6808 /* Initialize TARGET of vector PAIRED to VALS. */
6810 void
6811 paired_expand_vector_init (rtx target, rtx vals)
6813 machine_mode mode = GET_MODE (target);
6814 int n_elts = GET_MODE_NUNITS (mode);
6815 int n_var = 0;
6816 rtx x, new_rtx, tmp, constant_op, op1, op2;
6817 int i;
6819 for (i = 0; i < n_elts; ++i)
6821 x = XVECEXP (vals, 0, i);
6822 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6823 ++n_var;
6825 if (n_var == 0)
6827 /* Load from constant pool. */
6828 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
6829 return;
6832 if (n_var == 2)
6834 /* The vector is initialized only with non-constants. */
6835 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
6836 XVECEXP (vals, 0, 1));
6838 emit_move_insn (target, new_rtx);
6839 return;
6842 /* One field is non-constant and the other one is a constant. Load the
6843 constant from the constant pool and use ps_merge instruction to
6844 construct the whole vector. */
6845 op1 = XVECEXP (vals, 0, 0);
6846 op2 = XVECEXP (vals, 0, 1);
6848 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
6850 tmp = gen_reg_rtx (GET_MODE (constant_op));
6851 emit_move_insn (tmp, constant_op);
6853 if (CONSTANT_P (op1))
6854 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
6855 else
6856 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
6858 emit_move_insn (target, new_rtx);
6861 void
6862 paired_expand_vector_move (rtx operands[])
6864 rtx op0 = operands[0], op1 = operands[1];
6866 emit_move_insn (op0, op1);
6869 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
6870 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
6871 operands for the relation operation COND. This is a recursive
6872 function. */
6874 static void
6875 paired_emit_vector_compare (enum rtx_code rcode,
6876 rtx dest, rtx op0, rtx op1,
6877 rtx cc_op0, rtx cc_op1)
6879 rtx tmp = gen_reg_rtx (V2SFmode);
6880 rtx tmp1, max, min;
6882 gcc_assert (TARGET_PAIRED_FLOAT);
6883 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
6885 switch (rcode)
6887 case LT:
6888 case LTU:
6889 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6890 return;
6891 case GE:
6892 case GEU:
6893 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6894 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
6895 return;
6896 case LE:
6897 case LEU:
6898 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
6899 return;
6900 case GT:
6901 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6902 return;
6903 case EQ:
6904 tmp1 = gen_reg_rtx (V2SFmode);
6905 max = gen_reg_rtx (V2SFmode);
6906 min = gen_reg_rtx (V2SFmode);
6907 gen_reg_rtx (V2SFmode);
6909 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6910 emit_insn (gen_selv2sf4
6911 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6912 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
6913 emit_insn (gen_selv2sf4
6914 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6915 emit_insn (gen_subv2sf3 (tmp1, min, max));
6916 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
6917 return;
6918 case NE:
6919 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
6920 return;
6921 case UNLE:
6922 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6923 return;
6924 case UNLT:
6925 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
6926 return;
6927 case UNGE:
6928 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6929 return;
6930 case UNGT:
6931 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
6932 return;
6933 default:
6934 gcc_unreachable ();
6937 return;
6940 /* Emit vector conditional expression.
6941 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
6942 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
6945 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
6946 rtx cond, rtx cc_op0, rtx cc_op1)
6948 enum rtx_code rcode = GET_CODE (cond);
6950 if (!TARGET_PAIRED_FLOAT)
6951 return 0;
6953 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
6955 return 1;
6958 /* Initialize vector TARGET to VALS. */
6960 void
6961 rs6000_expand_vector_init (rtx target, rtx vals)
6963 machine_mode mode = GET_MODE (target);
6964 machine_mode inner_mode = GET_MODE_INNER (mode);
6965 int n_elts = GET_MODE_NUNITS (mode);
6966 int n_var = 0, one_var = -1;
6967 bool all_same = true, all_const_zero = true;
6968 rtx x, mem;
6969 int i;
6971 for (i = 0; i < n_elts; ++i)
6973 x = XVECEXP (vals, 0, i);
6974 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6975 ++n_var, one_var = i;
6976 else if (x != CONST0_RTX (inner_mode))
6977 all_const_zero = false;
6979 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6980 all_same = false;
6983 if (n_var == 0)
6985 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6986 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6987 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6989 /* Zero register. */
6990 emit_move_insn (target, CONST0_RTX (mode));
6991 return;
6993 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6995 /* Splat immediate. */
6996 emit_insn (gen_rtx_SET (target, const_vec));
6997 return;
6999 else
7001 /* Load from constant pool. */
7002 emit_move_insn (target, const_vec);
7003 return;
7007 /* Double word values on VSX can use xxpermdi or lxvdsx. */
7008 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
7010 rtx op[2];
7011 size_t i;
7012 size_t num_elements = all_same ? 1 : 2;
7013 for (i = 0; i < num_elements; i++)
7015 op[i] = XVECEXP (vals, 0, i);
7016 /* Just in case there is a SUBREG with a smaller mode, do a
7017 conversion. */
7018 if (GET_MODE (op[i]) != inner_mode)
7020 rtx tmp = gen_reg_rtx (inner_mode);
7021 convert_move (tmp, op[i], 0);
7022 op[i] = tmp;
7024 /* Allow load with splat double word. */
7025 else if (MEM_P (op[i]))
7027 if (!all_same)
7028 op[i] = force_reg (inner_mode, op[i]);
7030 else if (!REG_P (op[i]))
7031 op[i] = force_reg (inner_mode, op[i]);
7034 if (all_same)
7036 if (mode == V2DFmode)
7037 emit_insn (gen_vsx_splat_v2df (target, op[0]));
7038 else
7039 emit_insn (gen_vsx_splat_v2di (target, op[0]));
7041 else
7043 if (mode == V2DFmode)
7044 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
7045 else
7046 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
7048 return;
7051 /* Special case initializing vector int if we are on 64-bit systems with
7052 direct move or we have the ISA 3.0 instructions. */
7053 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
7054 && TARGET_DIRECT_MOVE_64BIT)
7056 if (all_same)
7058 rtx element0 = XVECEXP (vals, 0, 0);
7059 if (MEM_P (element0))
7060 element0 = rs6000_address_for_fpconvert (element0);
7061 else
7062 element0 = force_reg (SImode, element0);
7064 if (TARGET_P9_VECTOR)
7065 emit_insn (gen_vsx_splat_v4si (target, element0));
7066 else
7068 rtx tmp = gen_reg_rtx (DImode);
7069 emit_insn (gen_zero_extendsidi2 (tmp, element0));
7070 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
7072 return;
7074 else
7076 rtx elements[4];
7077 size_t i;
7079 for (i = 0; i < 4; i++)
7081 elements[i] = XVECEXP (vals, 0, i);
7082 if (!CONST_INT_P (elements[i]) && !REG_P (elements[i]))
7083 elements[i] = copy_to_mode_reg (SImode, elements[i]);
7086 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
7087 elements[2], elements[3]));
7088 return;
7092 /* With single precision floating point on VSX, know that internally single
7093 precision is actually represented as a double, and either make 2 V2DF
7094 vectors, and convert these vectors to single precision, or do one
7095 conversion, and splat the result to the other elements. */
7096 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
7098 if (all_same)
7100 rtx element0 = XVECEXP (vals, 0, 0);
7102 if (TARGET_P9_VECTOR)
7104 if (MEM_P (element0))
7105 element0 = rs6000_address_for_fpconvert (element0);
7107 emit_insn (gen_vsx_splat_v4sf (target, element0));
7110 else
7112 rtx freg = gen_reg_rtx (V4SFmode);
7113 rtx sreg = force_reg (SFmode, element0);
7114 rtx cvt = (TARGET_XSCVDPSPN
7115 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
7116 : gen_vsx_xscvdpsp_scalar (freg, sreg));
7118 emit_insn (cvt);
7119 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
7120 const0_rtx));
7123 else
7125 rtx dbl_even = gen_reg_rtx (V2DFmode);
7126 rtx dbl_odd = gen_reg_rtx (V2DFmode);
7127 rtx flt_even = gen_reg_rtx (V4SFmode);
7128 rtx flt_odd = gen_reg_rtx (V4SFmode);
7129 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
7130 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
7131 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
7132 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
7134 /* Use VMRGEW if we can instead of doing a permute. */
7135 if (TARGET_P8_VECTOR)
7137 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
7138 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
7139 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
7140 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
7141 if (BYTES_BIG_ENDIAN)
7142 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
7143 else
7144 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
7146 else
7148 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
7149 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
7150 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
7151 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
7152 rs6000_expand_extract_even (target, flt_even, flt_odd);
7155 return;
7158 /* Special case initializing vector short/char that are splats if we are on
7159 64-bit systems with direct move. */
7160 if (all_same && TARGET_DIRECT_MOVE_64BIT
7161 && (mode == V16QImode || mode == V8HImode))
7163 rtx op0 = XVECEXP (vals, 0, 0);
7164 rtx di_tmp = gen_reg_rtx (DImode);
7166 if (!REG_P (op0))
7167 op0 = force_reg (GET_MODE_INNER (mode), op0);
7169 if (mode == V16QImode)
7171 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
7172 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
7173 return;
7176 if (mode == V8HImode)
7178 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
7179 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
7180 return;
7184 /* Store value to stack temp. Load vector element. Splat. However, splat
7185 of 64-bit items is not supported on Altivec. */
7186 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
7188 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
7189 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
7190 XVECEXP (vals, 0, 0));
7191 x = gen_rtx_UNSPEC (VOIDmode,
7192 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
7193 emit_insn (gen_rtx_PARALLEL (VOIDmode,
7194 gen_rtvec (2,
7195 gen_rtx_SET (target, mem),
7196 x)));
7197 x = gen_rtx_VEC_SELECT (inner_mode, target,
7198 gen_rtx_PARALLEL (VOIDmode,
7199 gen_rtvec (1, const0_rtx)));
7200 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
7201 return;
7204 /* One field is non-constant. Load constant then overwrite
7205 varying field. */
7206 if (n_var == 1)
7208 rtx copy = copy_rtx (vals);
7210 /* Load constant part of vector, substitute neighboring value for
7211 varying element. */
7212 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
7213 rs6000_expand_vector_init (target, copy);
7215 /* Insert variable. */
7216 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
7217 return;
7220 /* Construct the vector in memory one field at a time
7221 and load the whole vector. */
7222 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7223 for (i = 0; i < n_elts; i++)
7224 emit_move_insn (adjust_address_nv (mem, inner_mode,
7225 i * GET_MODE_SIZE (inner_mode)),
7226 XVECEXP (vals, 0, i));
7227 emit_move_insn (target, mem);
7230 /* Set field ELT of TARGET to VAL. */
7232 void
7233 rs6000_expand_vector_set (rtx target, rtx val, int elt)
7235 machine_mode mode = GET_MODE (target);
7236 machine_mode inner_mode = GET_MODE_INNER (mode);
7237 rtx reg = gen_reg_rtx (mode);
7238 rtx mask, mem, x;
7239 int width = GET_MODE_SIZE (inner_mode);
7240 int i;
7242 val = force_reg (GET_MODE (val), val);
7244 if (VECTOR_MEM_VSX_P (mode))
7246 rtx insn = NULL_RTX;
7247 rtx elt_rtx = GEN_INT (elt);
7249 if (mode == V2DFmode)
7250 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
7252 else if (mode == V2DImode)
7253 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
7255 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
7257 if (mode == V4SImode)
7258 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
7259 else if (mode == V8HImode)
7260 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
7261 else if (mode == V16QImode)
7262 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
7263 else if (mode == V4SFmode)
7264 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
7267 if (insn)
7269 emit_insn (insn);
7270 return;
7274 /* Simplify setting single element vectors like V1TImode. */
7275 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
7277 emit_move_insn (target, gen_lowpart (mode, val));
7278 return;
7281 /* Load single variable value. */
7282 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
7283 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
7284 x = gen_rtx_UNSPEC (VOIDmode,
7285 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
7286 emit_insn (gen_rtx_PARALLEL (VOIDmode,
7287 gen_rtvec (2,
7288 gen_rtx_SET (reg, mem),
7289 x)));
7291 /* Linear sequence. */
7292 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
7293 for (i = 0; i < 16; ++i)
7294 XVECEXP (mask, 0, i) = GEN_INT (i);
7296 /* Set permute mask to insert element into target. */
7297 for (i = 0; i < width; ++i)
7298 XVECEXP (mask, 0, elt*width + i)
7299 = GEN_INT (i + 0x10);
7300 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
7302 if (BYTES_BIG_ENDIAN)
7303 x = gen_rtx_UNSPEC (mode,
7304 gen_rtvec (3, target, reg,
7305 force_reg (V16QImode, x)),
7306 UNSPEC_VPERM);
7307 else
7309 if (TARGET_P9_VECTOR)
7310 x = gen_rtx_UNSPEC (mode,
7311 gen_rtvec (3, target, reg,
7312 force_reg (V16QImode, x)),
7313 UNSPEC_VPERMR);
7314 else
7316 /* Invert selector. We prefer to generate VNAND on P8 so
7317 that future fusion opportunities can kick in, but must
7318 generate VNOR elsewhere. */
7319 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
7320 rtx iorx = (TARGET_P8_VECTOR
7321 ? gen_rtx_IOR (V16QImode, notx, notx)
7322 : gen_rtx_AND (V16QImode, notx, notx));
7323 rtx tmp = gen_reg_rtx (V16QImode);
7324 emit_insn (gen_rtx_SET (tmp, iorx));
7326 /* Permute with operands reversed and adjusted selector. */
7327 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
7328 UNSPEC_VPERM);
7332 emit_insn (gen_rtx_SET (target, x));
7335 /* Extract field ELT from VEC into TARGET. */
7337 void
7338 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
7340 machine_mode mode = GET_MODE (vec);
7341 machine_mode inner_mode = GET_MODE_INNER (mode);
7342 rtx mem;
7344 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
7346 switch (mode)
7348 default:
7349 break;
7350 case E_V1TImode:
7351 gcc_assert (INTVAL (elt) == 0 && inner_mode == TImode);
7352 emit_move_insn (target, gen_lowpart (TImode, vec));
7353 break;
7354 case E_V2DFmode:
7355 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
7356 return;
7357 case E_V2DImode:
7358 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
7359 return;
7360 case E_V4SFmode:
7361 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
7362 return;
7363 case E_V16QImode:
7364 if (TARGET_DIRECT_MOVE_64BIT)
7366 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
7367 return;
7369 else
7370 break;
7371 case E_V8HImode:
7372 if (TARGET_DIRECT_MOVE_64BIT)
7374 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
7375 return;
7377 else
7378 break;
7379 case E_V4SImode:
7380 if (TARGET_DIRECT_MOVE_64BIT)
7382 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
7383 return;
7385 break;
7388 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
7389 && TARGET_DIRECT_MOVE_64BIT)
7391 if (GET_MODE (elt) != DImode)
7393 rtx tmp = gen_reg_rtx (DImode);
7394 convert_move (tmp, elt, 0);
7395 elt = tmp;
7397 else if (!REG_P (elt))
7398 elt = force_reg (DImode, elt);
7400 switch (mode)
7402 case E_V2DFmode:
7403 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
7404 return;
7406 case E_V2DImode:
7407 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
7408 return;
7410 case E_V4SFmode:
7411 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
7412 return;
7414 case E_V4SImode:
7415 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
7416 return;
7418 case E_V8HImode:
7419 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
7420 return;
7422 case E_V16QImode:
7423 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
7424 return;
7426 default:
7427 gcc_unreachable ();
7431 gcc_assert (CONST_INT_P (elt));
7433 /* Allocate mode-sized buffer. */
7434 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7436 emit_move_insn (mem, vec);
7438 /* Add offset to field within buffer matching vector element. */
7439 mem = adjust_address_nv (mem, inner_mode,
7440 INTVAL (elt) * GET_MODE_SIZE (inner_mode));
7442 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
7445 /* Helper function to return the register number of a RTX. */
7446 static inline int
7447 regno_or_subregno (rtx op)
7449 if (REG_P (op))
7450 return REGNO (op);
7451 else if (SUBREG_P (op))
7452 return subreg_regno (op);
7453 else
7454 gcc_unreachable ();
7457 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
7458 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
7459 temporary (BASE_TMP) to fixup the address. Return the new memory address
7460 that is valid for reads or writes to a given register (SCALAR_REG). */
7463 rs6000_adjust_vec_address (rtx scalar_reg,
7464 rtx mem,
7465 rtx element,
7466 rtx base_tmp,
7467 machine_mode scalar_mode)
7469 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7470 rtx addr = XEXP (mem, 0);
7471 rtx element_offset;
7472 rtx new_addr;
7473 bool valid_addr_p;
7475 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7476 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
7478 /* Calculate what we need to add to the address to get the element
7479 address. */
7480 if (CONST_INT_P (element))
7481 element_offset = GEN_INT (INTVAL (element) * scalar_size);
7482 else
7484 int byte_shift = exact_log2 (scalar_size);
7485 gcc_assert (byte_shift >= 0);
7487 if (byte_shift == 0)
7488 element_offset = element;
7490 else
7492 if (TARGET_POWERPC64)
7493 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
7494 else
7495 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
7497 element_offset = base_tmp;
7501 /* Create the new address pointing to the element within the vector. If we
7502 are adding 0, we don't have to change the address. */
7503 if (element_offset == const0_rtx)
7504 new_addr = addr;
7506 /* A simple indirect address can be converted into a reg + offset
7507 address. */
7508 else if (REG_P (addr) || SUBREG_P (addr))
7509 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
7511 /* Optimize D-FORM addresses with constant offset with a constant element, to
7512 include the element offset in the address directly. */
7513 else if (GET_CODE (addr) == PLUS)
7515 rtx op0 = XEXP (addr, 0);
7516 rtx op1 = XEXP (addr, 1);
7517 rtx insn;
7519 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7520 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7522 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7523 rtx offset_rtx = GEN_INT (offset);
7525 if (IN_RANGE (offset, -32768, 32767)
7526 && (scalar_size < 8 || (offset & 0x3) == 0))
7527 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7528 else
7530 emit_move_insn (base_tmp, offset_rtx);
7531 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7534 else
7536 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7537 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7539 /* Note, ADDI requires the register being added to be a base
7540 register. If the register was R0, load it up into the temporary
7541 and do the add. */
7542 if (op1_reg_p
7543 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7545 insn = gen_add3_insn (base_tmp, op1, element_offset);
7546 gcc_assert (insn != NULL_RTX);
7547 emit_insn (insn);
7550 else if (ele_reg_p
7551 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7553 insn = gen_add3_insn (base_tmp, element_offset, op1);
7554 gcc_assert (insn != NULL_RTX);
7555 emit_insn (insn);
7558 else
7560 emit_move_insn (base_tmp, op1);
7561 emit_insn (gen_add2_insn (base_tmp, element_offset));
7564 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7568 else
7570 emit_move_insn (base_tmp, addr);
7571 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7574 /* If we have a PLUS, we need to see whether the particular register class
7575 allows for D-FORM or X-FORM addressing. */
7576 if (GET_CODE (new_addr) == PLUS)
7578 rtx op1 = XEXP (new_addr, 1);
7579 addr_mask_type addr_mask;
7580 int scalar_regno = regno_or_subregno (scalar_reg);
7582 gcc_assert (scalar_regno < FIRST_PSEUDO_REGISTER);
7583 if (INT_REGNO_P (scalar_regno))
7584 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7586 else if (FP_REGNO_P (scalar_regno))
7587 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7589 else if (ALTIVEC_REGNO_P (scalar_regno))
7590 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7592 else
7593 gcc_unreachable ();
7595 if (REG_P (op1) || SUBREG_P (op1))
7596 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7597 else
7598 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7601 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7602 valid_addr_p = true;
7604 else
7605 valid_addr_p = false;
7607 if (!valid_addr_p)
7609 emit_move_insn (base_tmp, new_addr);
7610 new_addr = base_tmp;
7613 return change_address (mem, scalar_mode, new_addr);
7616 /* Split a variable vec_extract operation into the component instructions. */
7618 void
7619 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7620 rtx tmp_altivec)
7622 machine_mode mode = GET_MODE (src);
7623 machine_mode scalar_mode = GET_MODE (dest);
7624 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7625 int byte_shift = exact_log2 (scalar_size);
7627 gcc_assert (byte_shift >= 0);
7629 /* If we are given a memory address, optimize to load just the element. We
7630 don't have to adjust the vector element number on little endian
7631 systems. */
7632 if (MEM_P (src))
7634 gcc_assert (REG_P (tmp_gpr));
7635 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7636 tmp_gpr, scalar_mode));
7637 return;
7640 else if (REG_P (src) || SUBREG_P (src))
7642 int bit_shift = byte_shift + 3;
7643 rtx element2;
7644 int dest_regno = regno_or_subregno (dest);
7645 int src_regno = regno_or_subregno (src);
7646 int element_regno = regno_or_subregno (element);
7648 gcc_assert (REG_P (tmp_gpr));
7650 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7651 a general purpose register. */
7652 if (TARGET_P9_VECTOR
7653 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7654 && INT_REGNO_P (dest_regno)
7655 && ALTIVEC_REGNO_P (src_regno)
7656 && INT_REGNO_P (element_regno))
7658 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7659 rtx element_si = gen_rtx_REG (SImode, element_regno);
7661 if (mode == V16QImode)
7662 emit_insn (VECTOR_ELT_ORDER_BIG
7663 ? gen_vextublx (dest_si, element_si, src)
7664 : gen_vextubrx (dest_si, element_si, src));
7666 else if (mode == V8HImode)
7668 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7669 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7670 emit_insn (VECTOR_ELT_ORDER_BIG
7671 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7672 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7676 else
7678 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7679 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7680 emit_insn (VECTOR_ELT_ORDER_BIG
7681 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7682 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7685 return;
7689 gcc_assert (REG_P (tmp_altivec));
7691 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7692 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7693 will shift the element into the upper position (adding 3 to convert a
7694 byte shift into a bit shift). */
7695 if (scalar_size == 8)
7697 if (!VECTOR_ELT_ORDER_BIG)
7699 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7700 element2 = tmp_gpr;
7702 else
7703 element2 = element;
7705 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7706 bit. */
7707 emit_insn (gen_rtx_SET (tmp_gpr,
7708 gen_rtx_AND (DImode,
7709 gen_rtx_ASHIFT (DImode,
7710 element2,
7711 GEN_INT (6)),
7712 GEN_INT (64))));
7714 else
7716 if (!VECTOR_ELT_ORDER_BIG)
7718 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
7720 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7721 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7722 element2 = tmp_gpr;
7724 else
7725 element2 = element;
7727 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7730 /* Get the value into the lower byte of the Altivec register where VSLO
7731 expects it. */
7732 if (TARGET_P9_VECTOR)
7733 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7734 else if (can_create_pseudo_p ())
7735 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7736 else
7738 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7739 emit_move_insn (tmp_di, tmp_gpr);
7740 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7743 /* Do the VSLO to get the value into the final location. */
7744 switch (mode)
7746 case E_V2DFmode:
7747 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7748 return;
7750 case E_V2DImode:
7751 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7752 return;
7754 case E_V4SFmode:
7756 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7757 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7758 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7759 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7760 tmp_altivec));
7762 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7763 return;
7766 case E_V4SImode:
7767 case E_V8HImode:
7768 case E_V16QImode:
7770 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7771 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7772 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7773 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7774 tmp_altivec));
7775 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7776 emit_insn (gen_ashrdi3 (tmp_gpr_di, tmp_gpr_di,
7777 GEN_INT (64 - (8 * scalar_size))));
7778 return;
7781 default:
7782 gcc_unreachable ();
7785 return;
7787 else
7788 gcc_unreachable ();
7791 /* Helper function for rs6000_split_v4si_init to build up a DImode value from
7792 two SImode values. */
7794 static void
7795 rs6000_split_v4si_init_di_reg (rtx dest, rtx si1, rtx si2, rtx tmp)
7797 const unsigned HOST_WIDE_INT mask_32bit = HOST_WIDE_INT_C (0xffffffff);
7799 if (CONST_INT_P (si1) && CONST_INT_P (si2))
7801 unsigned HOST_WIDE_INT const1 = (UINTVAL (si1) & mask_32bit) << 32;
7802 unsigned HOST_WIDE_INT const2 = UINTVAL (si2) & mask_32bit;
7804 emit_move_insn (dest, GEN_INT (const1 | const2));
7805 return;
7808 /* Put si1 into upper 32-bits of dest. */
7809 if (CONST_INT_P (si1))
7810 emit_move_insn (dest, GEN_INT ((UINTVAL (si1) & mask_32bit) << 32));
7811 else
7813 /* Generate RLDIC. */
7814 rtx si1_di = gen_rtx_REG (DImode, regno_or_subregno (si1));
7815 rtx shift_rtx = gen_rtx_ASHIFT (DImode, si1_di, GEN_INT (32));
7816 rtx mask_rtx = GEN_INT (mask_32bit << 32);
7817 rtx and_rtx = gen_rtx_AND (DImode, shift_rtx, mask_rtx);
7818 gcc_assert (!reg_overlap_mentioned_p (dest, si1));
7819 emit_insn (gen_rtx_SET (dest, and_rtx));
7822 /* Put si2 into the temporary. */
7823 gcc_assert (!reg_overlap_mentioned_p (dest, tmp));
7824 if (CONST_INT_P (si2))
7825 emit_move_insn (tmp, GEN_INT (UINTVAL (si2) & mask_32bit));
7826 else
7827 emit_insn (gen_zero_extendsidi2 (tmp, si2));
7829 /* Combine the two parts. */
7830 emit_insn (gen_iordi3 (dest, dest, tmp));
7831 return;
7834 /* Split a V4SI initialization. */
7836 void
7837 rs6000_split_v4si_init (rtx operands[])
7839 rtx dest = operands[0];
7841 /* Destination is a GPR, build up the two DImode parts in place. */
7842 if (REG_P (dest) || SUBREG_P (dest))
7844 int d_regno = regno_or_subregno (dest);
7845 rtx scalar1 = operands[1];
7846 rtx scalar2 = operands[2];
7847 rtx scalar3 = operands[3];
7848 rtx scalar4 = operands[4];
7849 rtx tmp1 = operands[5];
7850 rtx tmp2 = operands[6];
7852 /* Even though we only need one temporary (plus the destination, which
7853 has an early clobber constraint, try to use two temporaries, one for
7854 each double word created. That way the 2nd insn scheduling pass can
7855 rearrange things so the two parts are done in parallel. */
7856 if (BYTES_BIG_ENDIAN)
7858 rtx di_lo = gen_rtx_REG (DImode, d_regno);
7859 rtx di_hi = gen_rtx_REG (DImode, d_regno + 1);
7860 rs6000_split_v4si_init_di_reg (di_lo, scalar1, scalar2, tmp1);
7861 rs6000_split_v4si_init_di_reg (di_hi, scalar3, scalar4, tmp2);
7863 else
7865 rtx di_lo = gen_rtx_REG (DImode, d_regno + 1);
7866 rtx di_hi = gen_rtx_REG (DImode, d_regno);
7867 gcc_assert (!VECTOR_ELT_ORDER_BIG);
7868 rs6000_split_v4si_init_di_reg (di_lo, scalar4, scalar3, tmp1);
7869 rs6000_split_v4si_init_di_reg (di_hi, scalar2, scalar1, tmp2);
7871 return;
7874 else
7875 gcc_unreachable ();
7878 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7879 selects whether the alignment is abi mandated, optional, or
7880 both abi and optional alignment. */
7882 unsigned int
7883 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7885 if (how != align_opt)
7887 if (TREE_CODE (type) == VECTOR_TYPE)
7889 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (TYPE_MODE (type)))
7891 if (align < 64)
7892 align = 64;
7894 else if (align < 128)
7895 align = 128;
7899 if (how != align_abi)
7901 if (TREE_CODE (type) == ARRAY_TYPE
7902 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7904 if (align < BITS_PER_WORD)
7905 align = BITS_PER_WORD;
7909 return align;
7912 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7913 instructions simply ignore the low bits; VSX memory instructions
7914 are aligned to 4 or 8 bytes. */
7916 static bool
7917 rs6000_slow_unaligned_access (machine_mode mode, unsigned int align)
7919 return (STRICT_ALIGNMENT
7920 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7921 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && align < 32)
7922 || ((VECTOR_MODE_P (mode) || FLOAT128_VECTOR_P (mode))
7923 && (int) align < VECTOR_ALIGN (mode)))));
7926 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7928 bool
7929 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7931 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7933 if (computed != 128)
7935 static bool warned;
7936 if (!warned && warn_psabi)
7938 warned = true;
7939 inform (input_location,
7940 "the layout of aggregates containing vectors with"
7941 " %d-byte alignment has changed in GCC 5",
7942 computed / BITS_PER_UNIT);
7945 /* In current GCC there is no special case. */
7946 return false;
7949 return false;
7952 /* AIX increases natural record alignment to doubleword if the first
7953 field is an FP double while the FP fields remain word aligned. */
7955 unsigned int
7956 rs6000_special_round_type_align (tree type, unsigned int computed,
7957 unsigned int specified)
7959 unsigned int align = MAX (computed, specified);
7960 tree field = TYPE_FIELDS (type);
7962 /* Skip all non field decls */
7963 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7964 field = DECL_CHAIN (field);
7966 if (field != NULL && field != type)
7968 type = TREE_TYPE (field);
7969 while (TREE_CODE (type) == ARRAY_TYPE)
7970 type = TREE_TYPE (type);
7972 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7973 align = MAX (align, 64);
7976 return align;
7979 /* Darwin increases record alignment to the natural alignment of
7980 the first field. */
7982 unsigned int
7983 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7984 unsigned int specified)
7986 unsigned int align = MAX (computed, specified);
7988 if (TYPE_PACKED (type))
7989 return align;
7991 /* Find the first field, looking down into aggregates. */
7992 do {
7993 tree field = TYPE_FIELDS (type);
7994 /* Skip all non field decls */
7995 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7996 field = DECL_CHAIN (field);
7997 if (! field)
7998 break;
7999 /* A packed field does not contribute any extra alignment. */
8000 if (DECL_PACKED (field))
8001 return align;
8002 type = TREE_TYPE (field);
8003 while (TREE_CODE (type) == ARRAY_TYPE)
8004 type = TREE_TYPE (type);
8005 } while (AGGREGATE_TYPE_P (type));
8007 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
8008 align = MAX (align, TYPE_ALIGN (type));
8010 return align;
8013 /* Return 1 for an operand in small memory on V.4/eabi. */
8016 small_data_operand (rtx op ATTRIBUTE_UNUSED,
8017 machine_mode mode ATTRIBUTE_UNUSED)
8019 #if TARGET_ELF
8020 rtx sym_ref;
8022 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
8023 return 0;
8025 if (DEFAULT_ABI != ABI_V4)
8026 return 0;
8028 if (GET_CODE (op) == SYMBOL_REF)
8029 sym_ref = op;
8031 else if (GET_CODE (op) != CONST
8032 || GET_CODE (XEXP (op, 0)) != PLUS
8033 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
8034 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
8035 return 0;
8037 else
8039 rtx sum = XEXP (op, 0);
8040 HOST_WIDE_INT summand;
8042 /* We have to be careful here, because it is the referenced address
8043 that must be 32k from _SDA_BASE_, not just the symbol. */
8044 summand = INTVAL (XEXP (sum, 1));
8045 if (summand < 0 || summand > g_switch_value)
8046 return 0;
8048 sym_ref = XEXP (sum, 0);
8051 return SYMBOL_REF_SMALL_P (sym_ref);
8052 #else
8053 return 0;
8054 #endif
8057 /* Return true if either operand is a general purpose register. */
8059 bool
8060 gpr_or_gpr_p (rtx op0, rtx op1)
8062 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
8063 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
8066 /* Return true if this is a move direct operation between GPR registers and
8067 floating point/VSX registers. */
8069 bool
8070 direct_move_p (rtx op0, rtx op1)
8072 int regno0, regno1;
8074 if (!REG_P (op0) || !REG_P (op1))
8075 return false;
8077 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
8078 return false;
8080 regno0 = REGNO (op0);
8081 regno1 = REGNO (op1);
8082 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
8083 return false;
8085 if (INT_REGNO_P (regno0))
8086 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
8088 else if (INT_REGNO_P (regno1))
8090 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
8091 return true;
8093 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
8094 return true;
8097 return false;
8100 /* Return true if the OFFSET is valid for the quad address instructions that
8101 use d-form (register + offset) addressing. */
8103 static inline bool
8104 quad_address_offset_p (HOST_WIDE_INT offset)
8106 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
8109 /* Return true if the ADDR is an acceptable address for a quad memory
8110 operation of mode MODE (either LQ/STQ for general purpose registers, or
8111 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
8112 is intended for LQ/STQ. If it is false, the address is intended for the ISA
8113 3.0 LXV/STXV instruction. */
8115 bool
8116 quad_address_p (rtx addr, machine_mode mode, bool strict)
8118 rtx op0, op1;
8120 if (GET_MODE_SIZE (mode) != 16)
8121 return false;
8123 if (legitimate_indirect_address_p (addr, strict))
8124 return true;
8126 if (VECTOR_MODE_P (mode) && !mode_supports_vsx_dform_quad (mode))
8127 return false;
8129 if (GET_CODE (addr) != PLUS)
8130 return false;
8132 op0 = XEXP (addr, 0);
8133 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
8134 return false;
8136 op1 = XEXP (addr, 1);
8137 if (!CONST_INT_P (op1))
8138 return false;
8140 return quad_address_offset_p (INTVAL (op1));
8143 /* Return true if this is a load or store quad operation. This function does
8144 not handle the atomic quad memory instructions. */
8146 bool
8147 quad_load_store_p (rtx op0, rtx op1)
8149 bool ret;
8151 if (!TARGET_QUAD_MEMORY)
8152 ret = false;
8154 else if (REG_P (op0) && MEM_P (op1))
8155 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
8156 && quad_memory_operand (op1, GET_MODE (op1))
8157 && !reg_overlap_mentioned_p (op0, op1));
8159 else if (MEM_P (op0) && REG_P (op1))
8160 ret = (quad_memory_operand (op0, GET_MODE (op0))
8161 && quad_int_reg_operand (op1, GET_MODE (op1)));
8163 else
8164 ret = false;
8166 if (TARGET_DEBUG_ADDR)
8168 fprintf (stderr, "\n========== quad_load_store, return %s\n",
8169 ret ? "true" : "false");
8170 debug_rtx (gen_rtx_SET (op0, op1));
8173 return ret;
8176 /* Given an address, return a constant offset term if one exists. */
8178 static rtx
8179 address_offset (rtx op)
8181 if (GET_CODE (op) == PRE_INC
8182 || GET_CODE (op) == PRE_DEC)
8183 op = XEXP (op, 0);
8184 else if (GET_CODE (op) == PRE_MODIFY
8185 || GET_CODE (op) == LO_SUM)
8186 op = XEXP (op, 1);
8188 if (GET_CODE (op) == CONST)
8189 op = XEXP (op, 0);
8191 if (GET_CODE (op) == PLUS)
8192 op = XEXP (op, 1);
8194 if (CONST_INT_P (op))
8195 return op;
8197 return NULL_RTX;
8200 /* Return true if the MEM operand is a memory operand suitable for use
8201 with a (full width, possibly multiple) gpr load/store. On
8202 powerpc64 this means the offset must be divisible by 4.
8203 Implements 'Y' constraint.
8205 Accept direct, indexed, offset, lo_sum and tocref. Since this is
8206 a constraint function we know the operand has satisfied a suitable
8207 memory predicate. Also accept some odd rtl generated by reload
8208 (see rs6000_legitimize_reload_address for various forms). It is
8209 important that reload rtl be accepted by appropriate constraints
8210 but not by the operand predicate.
8212 Offsetting a lo_sum should not be allowed, except where we know by
8213 alignment that a 32k boundary is not crossed, but see the ???
8214 comment in rs6000_legitimize_reload_address. Note that by
8215 "offsetting" here we mean a further offset to access parts of the
8216 MEM. It's fine to have a lo_sum where the inner address is offset
8217 from a sym, since the same sym+offset will appear in the high part
8218 of the address calculation. */
8220 bool
8221 mem_operand_gpr (rtx op, machine_mode mode)
8223 unsigned HOST_WIDE_INT offset;
8224 int extra;
8225 rtx addr = XEXP (op, 0);
8227 op = address_offset (addr);
8228 if (op == NULL_RTX)
8229 return true;
8231 offset = INTVAL (op);
8232 if (TARGET_POWERPC64 && (offset & 3) != 0)
8233 return false;
8235 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
8236 if (extra < 0)
8237 extra = 0;
8239 if (GET_CODE (addr) == LO_SUM)
8240 /* For lo_sum addresses, we must allow any offset except one that
8241 causes a wrap, so test only the low 16 bits. */
8242 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8244 return offset + 0x8000 < 0x10000u - extra;
8247 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
8248 enforce an offset divisible by 4 even for 32-bit. */
8250 bool
8251 mem_operand_ds_form (rtx op, machine_mode mode)
8253 unsigned HOST_WIDE_INT offset;
8254 int extra;
8255 rtx addr = XEXP (op, 0);
8257 if (!offsettable_address_p (false, mode, addr))
8258 return false;
8260 op = address_offset (addr);
8261 if (op == NULL_RTX)
8262 return true;
8264 offset = INTVAL (op);
8265 if ((offset & 3) != 0)
8266 return false;
8268 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
8269 if (extra < 0)
8270 extra = 0;
8272 if (GET_CODE (addr) == LO_SUM)
8273 /* For lo_sum addresses, we must allow any offset except one that
8274 causes a wrap, so test only the low 16 bits. */
8275 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8277 return offset + 0x8000 < 0x10000u - extra;
8280 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
8282 static bool
8283 reg_offset_addressing_ok_p (machine_mode mode)
8285 switch (mode)
8287 case E_V16QImode:
8288 case E_V8HImode:
8289 case E_V4SFmode:
8290 case E_V4SImode:
8291 case E_V2DFmode:
8292 case E_V2DImode:
8293 case E_V1TImode:
8294 case E_TImode:
8295 case E_TFmode:
8296 case E_KFmode:
8297 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
8298 ISA 3.0 vector d-form addressing mode was added. While TImode is not
8299 a vector mode, if we want to use the VSX registers to move it around,
8300 we need to restrict ourselves to reg+reg addressing. Similarly for
8301 IEEE 128-bit floating point that is passed in a single vector
8302 register. */
8303 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
8304 return mode_supports_vsx_dform_quad (mode);
8305 break;
8307 case E_V2SImode:
8308 case E_V2SFmode:
8309 /* Paired vector modes. Only reg+reg addressing is valid. */
8310 if (TARGET_PAIRED_FLOAT)
8311 return false;
8312 break;
8314 case E_SDmode:
8315 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
8316 addressing for the LFIWZX and STFIWX instructions. */
8317 if (TARGET_NO_SDMODE_STACK)
8318 return false;
8319 break;
8321 default:
8322 break;
8325 return true;
8328 static bool
8329 virtual_stack_registers_memory_p (rtx op)
8331 int regnum;
8333 if (GET_CODE (op) == REG)
8334 regnum = REGNO (op);
8336 else if (GET_CODE (op) == PLUS
8337 && GET_CODE (XEXP (op, 0)) == REG
8338 && GET_CODE (XEXP (op, 1)) == CONST_INT)
8339 regnum = REGNO (XEXP (op, 0));
8341 else
8342 return false;
8344 return (regnum >= FIRST_VIRTUAL_REGISTER
8345 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
8348 /* Return true if a MODE sized memory accesses to OP plus OFFSET
8349 is known to not straddle a 32k boundary. This function is used
8350 to determine whether -mcmodel=medium code can use TOC pointer
8351 relative addressing for OP. This means the alignment of the TOC
8352 pointer must also be taken into account, and unfortunately that is
8353 only 8 bytes. */
8355 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
8356 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
8357 #endif
8359 static bool
8360 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
8361 machine_mode mode)
8363 tree decl;
8364 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
8366 if (GET_CODE (op) != SYMBOL_REF)
8367 return false;
8369 /* ISA 3.0 vector d-form addressing is restricted, don't allow
8370 SYMBOL_REF. */
8371 if (mode_supports_vsx_dform_quad (mode))
8372 return false;
8374 dsize = GET_MODE_SIZE (mode);
8375 decl = SYMBOL_REF_DECL (op);
8376 if (!decl)
8378 if (dsize == 0)
8379 return false;
8381 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
8382 replacing memory addresses with an anchor plus offset. We
8383 could find the decl by rummaging around in the block->objects
8384 VEC for the given offset but that seems like too much work. */
8385 dalign = BITS_PER_UNIT;
8386 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
8387 && SYMBOL_REF_ANCHOR_P (op)
8388 && SYMBOL_REF_BLOCK (op) != NULL)
8390 struct object_block *block = SYMBOL_REF_BLOCK (op);
8392 dalign = block->alignment;
8393 offset += SYMBOL_REF_BLOCK_OFFSET (op);
8395 else if (CONSTANT_POOL_ADDRESS_P (op))
8397 /* It would be nice to have get_pool_align().. */
8398 machine_mode cmode = get_pool_mode (op);
8400 dalign = GET_MODE_ALIGNMENT (cmode);
8403 else if (DECL_P (decl))
8405 dalign = DECL_ALIGN (decl);
8407 if (dsize == 0)
8409 /* Allow BLKmode when the entire object is known to not
8410 cross a 32k boundary. */
8411 if (!DECL_SIZE_UNIT (decl))
8412 return false;
8414 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
8415 return false;
8417 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
8418 if (dsize > 32768)
8419 return false;
8421 dalign /= BITS_PER_UNIT;
8422 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8423 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8424 return dalign >= dsize;
8427 else
8428 gcc_unreachable ();
8430 /* Find how many bits of the alignment we know for this access. */
8431 dalign /= BITS_PER_UNIT;
8432 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8433 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8434 mask = dalign - 1;
8435 lsb = offset & -offset;
8436 mask &= lsb - 1;
8437 dalign = mask + 1;
8439 return dalign >= dsize;
8442 static bool
8443 constant_pool_expr_p (rtx op)
8445 rtx base, offset;
8447 split_const (op, &base, &offset);
8448 return (GET_CODE (base) == SYMBOL_REF
8449 && CONSTANT_POOL_ADDRESS_P (base)
8450 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
8453 /* These are only used to pass through from print_operand/print_operand_address
8454 to rs6000_output_addr_const_extra over the intervening function
8455 output_addr_const which is not target code. */
8456 static const_rtx tocrel_base_oac, tocrel_offset_oac;
8458 /* Return true if OP is a toc pointer relative address (the output
8459 of create_TOC_reference). If STRICT, do not match non-split
8460 -mcmodel=large/medium toc pointer relative addresses. If the pointers
8461 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
8462 TOCREL_OFFSET_RET respectively. */
8464 bool
8465 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
8466 const_rtx *tocrel_offset_ret)
8468 if (!TARGET_TOC)
8469 return false;
8471 if (TARGET_CMODEL != CMODEL_SMALL)
8473 /* When strict ensure we have everything tidy. */
8474 if (strict
8475 && !(GET_CODE (op) == LO_SUM
8476 && REG_P (XEXP (op, 0))
8477 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
8478 return false;
8480 /* When not strict, allow non-split TOC addresses and also allow
8481 (lo_sum (high ..)) TOC addresses created during reload. */
8482 if (GET_CODE (op) == LO_SUM)
8483 op = XEXP (op, 1);
8486 const_rtx tocrel_base = op;
8487 const_rtx tocrel_offset = const0_rtx;
8489 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
8491 tocrel_base = XEXP (op, 0);
8492 tocrel_offset = XEXP (op, 1);
8495 if (tocrel_base_ret)
8496 *tocrel_base_ret = tocrel_base;
8497 if (tocrel_offset_ret)
8498 *tocrel_offset_ret = tocrel_offset;
8500 return (GET_CODE (tocrel_base) == UNSPEC
8501 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
8504 /* Return true if X is a constant pool address, and also for cmodel=medium
8505 if X is a toc-relative address known to be offsettable within MODE. */
8507 bool
8508 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
8509 bool strict)
8511 const_rtx tocrel_base, tocrel_offset;
8512 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
8513 && (TARGET_CMODEL != CMODEL_MEDIUM
8514 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
8515 || mode == QImode
8516 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
8517 INTVAL (tocrel_offset), mode)));
8520 static bool
8521 legitimate_small_data_p (machine_mode mode, rtx x)
8523 return (DEFAULT_ABI == ABI_V4
8524 && !flag_pic && !TARGET_TOC
8525 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
8526 && small_data_operand (x, mode));
8529 bool
8530 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
8531 bool strict, bool worst_case)
8533 unsigned HOST_WIDE_INT offset;
8534 unsigned int extra;
8536 if (GET_CODE (x) != PLUS)
8537 return false;
8538 if (!REG_P (XEXP (x, 0)))
8539 return false;
8540 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8541 return false;
8542 if (mode_supports_vsx_dform_quad (mode))
8543 return quad_address_p (x, mode, strict);
8544 if (!reg_offset_addressing_ok_p (mode))
8545 return virtual_stack_registers_memory_p (x);
8546 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
8547 return true;
8548 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
8549 return false;
8551 offset = INTVAL (XEXP (x, 1));
8552 extra = 0;
8553 switch (mode)
8555 case E_V2SImode:
8556 case E_V2SFmode:
8557 /* Paired single modes: offset addressing isn't valid. */
8558 return false;
8560 case E_DFmode:
8561 case E_DDmode:
8562 case E_DImode:
8563 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8564 addressing. */
8565 if (VECTOR_MEM_VSX_P (mode))
8566 return false;
8568 if (!worst_case)
8569 break;
8570 if (!TARGET_POWERPC64)
8571 extra = 4;
8572 else if (offset & 3)
8573 return false;
8574 break;
8576 case E_TFmode:
8577 case E_IFmode:
8578 case E_KFmode:
8579 case E_TDmode:
8580 case E_TImode:
8581 case E_PTImode:
8582 extra = 8;
8583 if (!worst_case)
8584 break;
8585 if (!TARGET_POWERPC64)
8586 extra = 12;
8587 else if (offset & 3)
8588 return false;
8589 break;
8591 default:
8592 break;
8595 offset += 0x8000;
8596 return offset < 0x10000 - extra;
8599 bool
8600 legitimate_indexed_address_p (rtx x, int strict)
8602 rtx op0, op1;
8604 if (GET_CODE (x) != PLUS)
8605 return false;
8607 op0 = XEXP (x, 0);
8608 op1 = XEXP (x, 1);
8610 return (REG_P (op0) && REG_P (op1)
8611 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8612 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8613 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8614 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8617 bool
8618 avoiding_indexed_address_p (machine_mode mode)
8620 /* Avoid indexed addressing for modes that have non-indexed
8621 load/store instruction forms. */
8622 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8625 bool
8626 legitimate_indirect_address_p (rtx x, int strict)
8628 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
8631 bool
8632 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
8634 if (!TARGET_MACHO || !flag_pic
8635 || mode != SImode || GET_CODE (x) != MEM)
8636 return false;
8637 x = XEXP (x, 0);
8639 if (GET_CODE (x) != LO_SUM)
8640 return false;
8641 if (GET_CODE (XEXP (x, 0)) != REG)
8642 return false;
8643 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
8644 return false;
8645 x = XEXP (x, 1);
8647 return CONSTANT_P (x);
8650 static bool
8651 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
8653 if (GET_CODE (x) != LO_SUM)
8654 return false;
8655 if (GET_CODE (XEXP (x, 0)) != REG)
8656 return false;
8657 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8658 return false;
8659 /* quad word addresses are restricted, and we can't use LO_SUM. */
8660 if (mode_supports_vsx_dform_quad (mode))
8661 return false;
8662 x = XEXP (x, 1);
8664 if (TARGET_ELF || TARGET_MACHO)
8666 bool large_toc_ok;
8668 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8669 return false;
8670 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8671 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8672 recognizes some LO_SUM addresses as valid although this
8673 function says opposite. In most cases, LRA through different
8674 transformations can generate correct code for address reloads.
8675 It can not manage only some LO_SUM cases. So we need to add
8676 code analogous to one in rs6000_legitimize_reload_address for
8677 LOW_SUM here saying that some addresses are still valid. */
8678 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8679 && small_toc_ref (x, VOIDmode));
8680 if (TARGET_TOC && ! large_toc_ok)
8681 return false;
8682 if (GET_MODE_NUNITS (mode) != 1)
8683 return false;
8684 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8685 && !(/* ??? Assume floating point reg based on mode? */
8686 TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
8687 && (mode == DFmode || mode == DDmode)))
8688 return false;
8690 return CONSTANT_P (x) || large_toc_ok;
8693 return false;
8697 /* Try machine-dependent ways of modifying an illegitimate address
8698 to be legitimate. If we find one, return the new, valid address.
8699 This is used from only one place: `memory_address' in explow.c.
8701 OLDX is the address as it was before break_out_memory_refs was
8702 called. In some cases it is useful to look at this to decide what
8703 needs to be done.
8705 It is always safe for this function to do nothing. It exists to
8706 recognize opportunities to optimize the output.
8708 On RS/6000, first check for the sum of a register with a constant
8709 integer that is out of range. If so, generate code to add the
8710 constant with the low-order 16 bits masked to the register and force
8711 this result into another register (this can be done with `cau').
8712 Then generate an address of REG+(CONST&0xffff), allowing for the
8713 possibility of bit 16 being a one.
8715 Then check for the sum of a register and something not constant, try to
8716 load the other things into a register and return the sum. */
8718 static rtx
8719 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8720 machine_mode mode)
8722 unsigned int extra;
8724 if (!reg_offset_addressing_ok_p (mode)
8725 || mode_supports_vsx_dform_quad (mode))
8727 if (virtual_stack_registers_memory_p (x))
8728 return x;
8730 /* In theory we should not be seeing addresses of the form reg+0,
8731 but just in case it is generated, optimize it away. */
8732 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8733 return force_reg (Pmode, XEXP (x, 0));
8735 /* For TImode with load/store quad, restrict addresses to just a single
8736 pointer, so it works with both GPRs and VSX registers. */
8737 /* Make sure both operands are registers. */
8738 else if (GET_CODE (x) == PLUS
8739 && (mode != TImode || !TARGET_VSX))
8740 return gen_rtx_PLUS (Pmode,
8741 force_reg (Pmode, XEXP (x, 0)),
8742 force_reg (Pmode, XEXP (x, 1)));
8743 else
8744 return force_reg (Pmode, x);
8746 if (GET_CODE (x) == SYMBOL_REF)
8748 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8749 if (model != 0)
8750 return rs6000_legitimize_tls_address (x, model);
8753 extra = 0;
8754 switch (mode)
8756 case E_TFmode:
8757 case E_TDmode:
8758 case E_TImode:
8759 case E_PTImode:
8760 case E_IFmode:
8761 case E_KFmode:
8762 /* As in legitimate_offset_address_p we do not assume
8763 worst-case. The mode here is just a hint as to the registers
8764 used. A TImode is usually in gprs, but may actually be in
8765 fprs. Leave worst-case scenario for reload to handle via
8766 insn constraints. PTImode is only GPRs. */
8767 extra = 8;
8768 break;
8769 default:
8770 break;
8773 if (GET_CODE (x) == PLUS
8774 && GET_CODE (XEXP (x, 0)) == REG
8775 && GET_CODE (XEXP (x, 1)) == CONST_INT
8776 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8777 >= 0x10000 - extra)
8778 && !PAIRED_VECTOR_MODE (mode))
8780 HOST_WIDE_INT high_int, low_int;
8781 rtx sum;
8782 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8783 if (low_int >= 0x8000 - extra)
8784 low_int = 0;
8785 high_int = INTVAL (XEXP (x, 1)) - low_int;
8786 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8787 GEN_INT (high_int)), 0);
8788 return plus_constant (Pmode, sum, low_int);
8790 else if (GET_CODE (x) == PLUS
8791 && GET_CODE (XEXP (x, 0)) == REG
8792 && GET_CODE (XEXP (x, 1)) != CONST_INT
8793 && GET_MODE_NUNITS (mode) == 1
8794 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8795 || (/* ??? Assume floating point reg based on mode? */
8796 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8797 && (mode == DFmode || mode == DDmode)))
8798 && !avoiding_indexed_address_p (mode))
8800 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8801 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8803 else if (PAIRED_VECTOR_MODE (mode))
8805 if (mode == DImode)
8806 return x;
8807 /* We accept [reg + reg]. */
8809 if (GET_CODE (x) == PLUS)
8811 rtx op1 = XEXP (x, 0);
8812 rtx op2 = XEXP (x, 1);
8813 rtx y;
8815 op1 = force_reg (Pmode, op1);
8816 op2 = force_reg (Pmode, op2);
8818 /* We can't always do [reg + reg] for these, because [reg +
8819 reg + offset] is not a legitimate addressing mode. */
8820 y = gen_rtx_PLUS (Pmode, op1, op2);
8822 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
8823 return force_reg (Pmode, y);
8824 else
8825 return y;
8828 return force_reg (Pmode, x);
8830 else if ((TARGET_ELF
8831 #if TARGET_MACHO
8832 || !MACHO_DYNAMIC_NO_PIC_P
8833 #endif
8835 && TARGET_32BIT
8836 && TARGET_NO_TOC
8837 && ! flag_pic
8838 && GET_CODE (x) != CONST_INT
8839 && GET_CODE (x) != CONST_WIDE_INT
8840 && GET_CODE (x) != CONST_DOUBLE
8841 && CONSTANT_P (x)
8842 && GET_MODE_NUNITS (mode) == 1
8843 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8844 || (/* ??? Assume floating point reg based on mode? */
8845 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8846 && (mode == DFmode || mode == DDmode))))
8848 rtx reg = gen_reg_rtx (Pmode);
8849 if (TARGET_ELF)
8850 emit_insn (gen_elf_high (reg, x));
8851 else
8852 emit_insn (gen_macho_high (reg, x));
8853 return gen_rtx_LO_SUM (Pmode, reg, x);
8855 else if (TARGET_TOC
8856 && GET_CODE (x) == SYMBOL_REF
8857 && constant_pool_expr_p (x)
8858 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8859 return create_TOC_reference (x, NULL_RTX);
8860 else
8861 return x;
8864 /* Debug version of rs6000_legitimize_address. */
8865 static rtx
8866 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8868 rtx ret;
8869 rtx_insn *insns;
8871 start_sequence ();
8872 ret = rs6000_legitimize_address (x, oldx, mode);
8873 insns = get_insns ();
8874 end_sequence ();
8876 if (ret != x)
8878 fprintf (stderr,
8879 "\nrs6000_legitimize_address: mode %s, old code %s, "
8880 "new code %s, modified\n",
8881 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8882 GET_RTX_NAME (GET_CODE (ret)));
8884 fprintf (stderr, "Original address:\n");
8885 debug_rtx (x);
8887 fprintf (stderr, "oldx:\n");
8888 debug_rtx (oldx);
8890 fprintf (stderr, "New address:\n");
8891 debug_rtx (ret);
8893 if (insns)
8895 fprintf (stderr, "Insns added:\n");
8896 debug_rtx_list (insns, 20);
8899 else
8901 fprintf (stderr,
8902 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8903 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8905 debug_rtx (x);
8908 if (insns)
8909 emit_insn (insns);
8911 return ret;
8914 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8915 We need to emit DTP-relative relocations. */
8917 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8918 static void
8919 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8921 switch (size)
8923 case 4:
8924 fputs ("\t.long\t", file);
8925 break;
8926 case 8:
8927 fputs (DOUBLE_INT_ASM_OP, file);
8928 break;
8929 default:
8930 gcc_unreachable ();
8932 output_addr_const (file, x);
8933 if (TARGET_ELF)
8934 fputs ("@dtprel+0x8000", file);
8935 else if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF)
8937 switch (SYMBOL_REF_TLS_MODEL (x))
8939 case 0:
8940 break;
8941 case TLS_MODEL_LOCAL_EXEC:
8942 fputs ("@le", file);
8943 break;
8944 case TLS_MODEL_INITIAL_EXEC:
8945 fputs ("@ie", file);
8946 break;
8947 case TLS_MODEL_GLOBAL_DYNAMIC:
8948 case TLS_MODEL_LOCAL_DYNAMIC:
8949 fputs ("@m", file);
8950 break;
8951 default:
8952 gcc_unreachable ();
8957 /* Return true if X is a symbol that refers to real (rather than emulated)
8958 TLS. */
8960 static bool
8961 rs6000_real_tls_symbol_ref_p (rtx x)
8963 return (GET_CODE (x) == SYMBOL_REF
8964 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8967 /* In the name of slightly smaller debug output, and to cater to
8968 general assembler lossage, recognize various UNSPEC sequences
8969 and turn them back into a direct symbol reference. */
8971 static rtx
8972 rs6000_delegitimize_address (rtx orig_x)
8974 rtx x, y, offset;
8976 orig_x = delegitimize_mem_from_attrs (orig_x);
8977 x = orig_x;
8978 if (MEM_P (x))
8979 x = XEXP (x, 0);
8981 y = x;
8982 if (TARGET_CMODEL != CMODEL_SMALL
8983 && GET_CODE (y) == LO_SUM)
8984 y = XEXP (y, 1);
8986 offset = NULL_RTX;
8987 if (GET_CODE (y) == PLUS
8988 && GET_MODE (y) == Pmode
8989 && CONST_INT_P (XEXP (y, 1)))
8991 offset = XEXP (y, 1);
8992 y = XEXP (y, 0);
8995 if (GET_CODE (y) == UNSPEC
8996 && XINT (y, 1) == UNSPEC_TOCREL)
8998 y = XVECEXP (y, 0, 0);
9000 #ifdef HAVE_AS_TLS
9001 /* Do not associate thread-local symbols with the original
9002 constant pool symbol. */
9003 if (TARGET_XCOFF
9004 && GET_CODE (y) == SYMBOL_REF
9005 && CONSTANT_POOL_ADDRESS_P (y)
9006 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
9007 return orig_x;
9008 #endif
9010 if (offset != NULL_RTX)
9011 y = gen_rtx_PLUS (Pmode, y, offset);
9012 if (!MEM_P (orig_x))
9013 return y;
9014 else
9015 return replace_equiv_address_nv (orig_x, y);
9018 if (TARGET_MACHO
9019 && GET_CODE (orig_x) == LO_SUM
9020 && GET_CODE (XEXP (orig_x, 1)) == CONST)
9022 y = XEXP (XEXP (orig_x, 1), 0);
9023 if (GET_CODE (y) == UNSPEC
9024 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
9025 return XVECEXP (y, 0, 0);
9028 return orig_x;
9031 /* Return true if X shouldn't be emitted into the debug info.
9032 The linker doesn't like .toc section references from
9033 .debug_* sections, so reject .toc section symbols. */
9035 static bool
9036 rs6000_const_not_ok_for_debug_p (rtx x)
9038 if (GET_CODE (x) == SYMBOL_REF
9039 && CONSTANT_POOL_ADDRESS_P (x))
9041 rtx c = get_pool_constant (x);
9042 machine_mode cmode = get_pool_mode (x);
9043 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
9044 return true;
9047 return false;
9051 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
9053 static bool
9054 rs6000_legitimate_combined_insn (rtx_insn *insn)
9056 int icode = INSN_CODE (insn);
9058 /* Reject creating doloop insns. Combine should not be allowed
9059 to create these for a number of reasons:
9060 1) In a nested loop, if combine creates one of these in an
9061 outer loop and the register allocator happens to allocate ctr
9062 to the outer loop insn, then the inner loop can't use ctr.
9063 Inner loops ought to be more highly optimized.
9064 2) Combine often wants to create one of these from what was
9065 originally a three insn sequence, first combining the three
9066 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
9067 allocated ctr, the splitter takes use back to the three insn
9068 sequence. It's better to stop combine at the two insn
9069 sequence.
9070 3) Faced with not being able to allocate ctr for ctrsi/crtdi
9071 insns, the register allocator sometimes uses floating point
9072 or vector registers for the pseudo. Since ctrsi/ctrdi is a
9073 jump insn and output reloads are not implemented for jumps,
9074 the ctrsi/ctrdi splitters need to handle all possible cases.
9075 That's a pain, and it gets to be seriously difficult when a
9076 splitter that runs after reload needs memory to transfer from
9077 a gpr to fpr. See PR70098 and PR71763 which are not fixed
9078 for the difficult case. It's better to not create problems
9079 in the first place. */
9080 if (icode != CODE_FOR_nothing
9081 && (icode == CODE_FOR_ctrsi_internal1
9082 || icode == CODE_FOR_ctrdi_internal1
9083 || icode == CODE_FOR_ctrsi_internal2
9084 || icode == CODE_FOR_ctrdi_internal2
9085 || icode == CODE_FOR_ctrsi_internal3
9086 || icode == CODE_FOR_ctrdi_internal3
9087 || icode == CODE_FOR_ctrsi_internal4
9088 || icode == CODE_FOR_ctrdi_internal4))
9089 return false;
9091 return true;
9094 /* Construct the SYMBOL_REF for the tls_get_addr function. */
9096 static GTY(()) rtx rs6000_tls_symbol;
9097 static rtx
9098 rs6000_tls_get_addr (void)
9100 if (!rs6000_tls_symbol)
9101 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
9103 return rs6000_tls_symbol;
9106 /* Construct the SYMBOL_REF for TLS GOT references. */
9108 static GTY(()) rtx rs6000_got_symbol;
9109 static rtx
9110 rs6000_got_sym (void)
9112 if (!rs6000_got_symbol)
9114 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9115 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
9116 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
9119 return rs6000_got_symbol;
9122 /* AIX Thread-Local Address support. */
9124 static rtx
9125 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
9127 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
9128 const char *name;
9129 char *tlsname;
9131 name = XSTR (addr, 0);
9132 /* Append TLS CSECT qualifier, unless the symbol already is qualified
9133 or the symbol will be in TLS private data section. */
9134 if (name[strlen (name) - 1] != ']'
9135 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
9136 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
9138 tlsname = XALLOCAVEC (char, strlen (name) + 4);
9139 strcpy (tlsname, name);
9140 strcat (tlsname,
9141 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
9142 tlsaddr = copy_rtx (addr);
9143 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
9145 else
9146 tlsaddr = addr;
9148 /* Place addr into TOC constant pool. */
9149 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
9151 /* Output the TOC entry and create the MEM referencing the value. */
9152 if (constant_pool_expr_p (XEXP (sym, 0))
9153 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
9155 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
9156 mem = gen_const_mem (Pmode, tocref);
9157 set_mem_alias_set (mem, get_TOC_alias_set ());
9159 else
9160 return sym;
9162 /* Use global-dynamic for local-dynamic. */
9163 if (model == TLS_MODEL_GLOBAL_DYNAMIC
9164 || model == TLS_MODEL_LOCAL_DYNAMIC)
9166 /* Create new TOC reference for @m symbol. */
9167 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
9168 tlsname = XALLOCAVEC (char, strlen (name) + 1);
9169 strcpy (tlsname, "*LCM");
9170 strcat (tlsname, name + 3);
9171 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
9172 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
9173 tocref = create_TOC_reference (modaddr, NULL_RTX);
9174 rtx modmem = gen_const_mem (Pmode, tocref);
9175 set_mem_alias_set (modmem, get_TOC_alias_set ());
9177 rtx modreg = gen_reg_rtx (Pmode);
9178 emit_insn (gen_rtx_SET (modreg, modmem));
9180 tmpreg = gen_reg_rtx (Pmode);
9181 emit_insn (gen_rtx_SET (tmpreg, mem));
9183 dest = gen_reg_rtx (Pmode);
9184 if (TARGET_32BIT)
9185 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
9186 else
9187 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
9188 return dest;
9190 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
9191 else if (TARGET_32BIT)
9193 tlsreg = gen_reg_rtx (SImode);
9194 emit_insn (gen_tls_get_tpointer (tlsreg));
9196 else
9197 tlsreg = gen_rtx_REG (DImode, 13);
9199 /* Load the TOC value into temporary register. */
9200 tmpreg = gen_reg_rtx (Pmode);
9201 emit_insn (gen_rtx_SET (tmpreg, mem));
9202 set_unique_reg_note (get_last_insn (), REG_EQUAL,
9203 gen_rtx_MINUS (Pmode, addr, tlsreg));
9205 /* Add TOC symbol value to TLS pointer. */
9206 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
9208 return dest;
9211 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
9212 this (thread-local) address. */
9214 static rtx
9215 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
9217 rtx dest, insn;
9219 if (TARGET_XCOFF)
9220 return rs6000_legitimize_tls_address_aix (addr, model);
9222 dest = gen_reg_rtx (Pmode);
9223 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
9225 rtx tlsreg;
9227 if (TARGET_64BIT)
9229 tlsreg = gen_rtx_REG (Pmode, 13);
9230 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
9232 else
9234 tlsreg = gen_rtx_REG (Pmode, 2);
9235 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
9237 emit_insn (insn);
9239 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
9241 rtx tlsreg, tmp;
9243 tmp = gen_reg_rtx (Pmode);
9244 if (TARGET_64BIT)
9246 tlsreg = gen_rtx_REG (Pmode, 13);
9247 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
9249 else
9251 tlsreg = gen_rtx_REG (Pmode, 2);
9252 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
9254 emit_insn (insn);
9255 if (TARGET_64BIT)
9256 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
9257 else
9258 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
9259 emit_insn (insn);
9261 else
9263 rtx r3, got, tga, tmp1, tmp2, call_insn;
9265 /* We currently use relocations like @got@tlsgd for tls, which
9266 means the linker will handle allocation of tls entries, placing
9267 them in the .got section. So use a pointer to the .got section,
9268 not one to secondary TOC sections used by 64-bit -mminimal-toc,
9269 or to secondary GOT sections used by 32-bit -fPIC. */
9270 if (TARGET_64BIT)
9271 got = gen_rtx_REG (Pmode, 2);
9272 else
9274 if (flag_pic == 1)
9275 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
9276 else
9278 rtx gsym = rs6000_got_sym ();
9279 got = gen_reg_rtx (Pmode);
9280 if (flag_pic == 0)
9281 rs6000_emit_move (got, gsym, Pmode);
9282 else
9284 rtx mem, lab;
9286 tmp1 = gen_reg_rtx (Pmode);
9287 tmp2 = gen_reg_rtx (Pmode);
9288 mem = gen_const_mem (Pmode, tmp1);
9289 lab = gen_label_rtx ();
9290 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
9291 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
9292 if (TARGET_LINK_STACK)
9293 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
9294 emit_move_insn (tmp2, mem);
9295 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
9296 set_unique_reg_note (last, REG_EQUAL, gsym);
9301 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
9303 tga = rs6000_tls_get_addr ();
9304 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
9305 const0_rtx, Pmode);
9307 r3 = gen_rtx_REG (Pmode, 3);
9308 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9310 if (TARGET_64BIT)
9311 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
9312 else
9313 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
9315 else if (DEFAULT_ABI == ABI_V4)
9316 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
9317 else
9318 gcc_unreachable ();
9319 call_insn = last_call_insn ();
9320 PATTERN (call_insn) = insn;
9321 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9322 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9323 pic_offset_table_rtx);
9325 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
9327 tga = rs6000_tls_get_addr ();
9328 tmp1 = gen_reg_rtx (Pmode);
9329 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
9330 const0_rtx, Pmode);
9332 r3 = gen_rtx_REG (Pmode, 3);
9333 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9335 if (TARGET_64BIT)
9336 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
9337 else
9338 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
9340 else if (DEFAULT_ABI == ABI_V4)
9341 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
9342 else
9343 gcc_unreachable ();
9344 call_insn = last_call_insn ();
9345 PATTERN (call_insn) = insn;
9346 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9347 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9348 pic_offset_table_rtx);
9350 if (rs6000_tls_size == 16)
9352 if (TARGET_64BIT)
9353 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
9354 else
9355 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
9357 else if (rs6000_tls_size == 32)
9359 tmp2 = gen_reg_rtx (Pmode);
9360 if (TARGET_64BIT)
9361 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
9362 else
9363 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
9364 emit_insn (insn);
9365 if (TARGET_64BIT)
9366 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
9367 else
9368 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
9370 else
9372 tmp2 = gen_reg_rtx (Pmode);
9373 if (TARGET_64BIT)
9374 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
9375 else
9376 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
9377 emit_insn (insn);
9378 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
9380 emit_insn (insn);
9382 else
9384 /* IE, or 64-bit offset LE. */
9385 tmp2 = gen_reg_rtx (Pmode);
9386 if (TARGET_64BIT)
9387 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
9388 else
9389 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
9390 emit_insn (insn);
9391 if (TARGET_64BIT)
9392 insn = gen_tls_tls_64 (dest, tmp2, addr);
9393 else
9394 insn = gen_tls_tls_32 (dest, tmp2, addr);
9395 emit_insn (insn);
9399 return dest;
9402 /* Only create the global variable for the stack protect guard if we are using
9403 the global flavor of that guard. */
9404 static tree
9405 rs6000_init_stack_protect_guard (void)
9407 if (rs6000_stack_protector_guard == SSP_GLOBAL)
9408 return default_stack_protect_guard ();
9410 return NULL_TREE;
9413 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
9415 static bool
9416 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
9418 if (GET_CODE (x) == HIGH
9419 && GET_CODE (XEXP (x, 0)) == UNSPEC)
9420 return true;
9422 /* A TLS symbol in the TOC cannot contain a sum. */
9423 if (GET_CODE (x) == CONST
9424 && GET_CODE (XEXP (x, 0)) == PLUS
9425 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
9426 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
9427 return true;
9429 /* Do not place an ELF TLS symbol in the constant pool. */
9430 return TARGET_ELF && tls_referenced_p (x);
9433 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
9434 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
9435 can be addressed relative to the toc pointer. */
9437 static bool
9438 use_toc_relative_ref (rtx sym, machine_mode mode)
9440 return ((constant_pool_expr_p (sym)
9441 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
9442 get_pool_mode (sym)))
9443 || (TARGET_CMODEL == CMODEL_MEDIUM
9444 && SYMBOL_REF_LOCAL_P (sym)
9445 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
9448 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
9449 replace the input X, or the original X if no replacement is called for.
9450 The output parameter *WIN is 1 if the calling macro should goto WIN,
9451 0 if it should not.
9453 For RS/6000, we wish to handle large displacements off a base
9454 register by splitting the addend across an addiu/addis and the mem insn.
9455 This cuts number of extra insns needed from 3 to 1.
9457 On Darwin, we use this to generate code for floating point constants.
9458 A movsf_low is generated so we wind up with 2 instructions rather than 3.
9459 The Darwin code is inside #if TARGET_MACHO because only then are the
9460 machopic_* functions defined. */
9461 static rtx
9462 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
9463 int opnum, int type,
9464 int ind_levels ATTRIBUTE_UNUSED, int *win)
9466 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9467 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
9469 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
9470 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
9471 if (reg_offset_p
9472 && opnum == 1
9473 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
9474 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
9475 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
9476 && TARGET_P9_VECTOR)
9477 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
9478 && TARGET_P9_VECTOR)))
9479 reg_offset_p = false;
9481 /* We must recognize output that we have already generated ourselves. */
9482 if (GET_CODE (x) == PLUS
9483 && GET_CODE (XEXP (x, 0)) == PLUS
9484 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9485 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9486 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9488 if (TARGET_DEBUG_ADDR)
9490 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
9491 debug_rtx (x);
9493 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9494 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9495 opnum, (enum reload_type) type);
9496 *win = 1;
9497 return x;
9500 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
9501 if (GET_CODE (x) == LO_SUM
9502 && GET_CODE (XEXP (x, 0)) == HIGH)
9504 if (TARGET_DEBUG_ADDR)
9506 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
9507 debug_rtx (x);
9509 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9510 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9511 opnum, (enum reload_type) type);
9512 *win = 1;
9513 return x;
9516 #if TARGET_MACHO
9517 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
9518 && GET_CODE (x) == LO_SUM
9519 && GET_CODE (XEXP (x, 0)) == PLUS
9520 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
9521 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
9522 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
9523 && machopic_operand_p (XEXP (x, 1)))
9525 /* Result of previous invocation of this function on Darwin
9526 floating point constant. */
9527 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9528 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9529 opnum, (enum reload_type) type);
9530 *win = 1;
9531 return x;
9533 #endif
9535 if (TARGET_CMODEL != CMODEL_SMALL
9536 && reg_offset_p
9537 && !quad_offset_p
9538 && small_toc_ref (x, VOIDmode))
9540 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
9541 x = gen_rtx_LO_SUM (Pmode, hi, x);
9542 if (TARGET_DEBUG_ADDR)
9544 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
9545 debug_rtx (x);
9547 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9548 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9549 opnum, (enum reload_type) type);
9550 *win = 1;
9551 return x;
9554 if (GET_CODE (x) == PLUS
9555 && REG_P (XEXP (x, 0))
9556 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
9557 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
9558 && CONST_INT_P (XEXP (x, 1))
9559 && reg_offset_p
9560 && !PAIRED_VECTOR_MODE (mode)
9561 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
9563 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
9564 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
9565 HOST_WIDE_INT high
9566 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
9568 /* Check for 32-bit overflow or quad addresses with one of the
9569 four least significant bits set. */
9570 if (high + low != val
9571 || (quad_offset_p && (low & 0xf)))
9573 *win = 0;
9574 return x;
9577 /* Reload the high part into a base reg; leave the low part
9578 in the mem directly. */
9580 x = gen_rtx_PLUS (GET_MODE (x),
9581 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
9582 GEN_INT (high)),
9583 GEN_INT (low));
9585 if (TARGET_DEBUG_ADDR)
9587 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
9588 debug_rtx (x);
9590 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9591 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9592 opnum, (enum reload_type) type);
9593 *win = 1;
9594 return x;
9597 if (GET_CODE (x) == SYMBOL_REF
9598 && reg_offset_p
9599 && !quad_offset_p
9600 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
9601 && !PAIRED_VECTOR_MODE (mode)
9602 #if TARGET_MACHO
9603 && DEFAULT_ABI == ABI_DARWIN
9604 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
9605 && machopic_symbol_defined_p (x)
9606 #else
9607 && DEFAULT_ABI == ABI_V4
9608 && !flag_pic
9609 #endif
9610 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
9611 The same goes for DImode without 64-bit gprs and DFmode and DDmode
9612 without fprs.
9613 ??? Assume floating point reg based on mode? This assumption is
9614 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
9615 where reload ends up doing a DFmode load of a constant from
9616 mem using two gprs. Unfortunately, at this point reload
9617 hasn't yet selected regs so poking around in reload data
9618 won't help and even if we could figure out the regs reliably,
9619 we'd still want to allow this transformation when the mem is
9620 naturally aligned. Since we say the address is good here, we
9621 can't disable offsets from LO_SUMs in mem_operand_gpr.
9622 FIXME: Allow offset from lo_sum for other modes too, when
9623 mem is sufficiently aligned.
9625 Also disallow this if the type can go in VMX/Altivec registers, since
9626 those registers do not have d-form (reg+offset) address modes. */
9627 && !reg_addr[mode].scalar_in_vmx_p
9628 && mode != TFmode
9629 && mode != TDmode
9630 && mode != IFmode
9631 && mode != KFmode
9632 && (mode != TImode || !TARGET_VSX)
9633 && mode != PTImode
9634 && (mode != DImode || TARGET_POWERPC64)
9635 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
9636 || (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)))
9638 #if TARGET_MACHO
9639 if (flag_pic)
9641 rtx offset = machopic_gen_offset (x);
9642 x = gen_rtx_LO_SUM (GET_MODE (x),
9643 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
9644 gen_rtx_HIGH (Pmode, offset)), offset);
9646 else
9647 #endif
9648 x = gen_rtx_LO_SUM (GET_MODE (x),
9649 gen_rtx_HIGH (Pmode, x), x);
9651 if (TARGET_DEBUG_ADDR)
9653 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
9654 debug_rtx (x);
9656 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9657 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9658 opnum, (enum reload_type) type);
9659 *win = 1;
9660 return x;
9663 /* Reload an offset address wrapped by an AND that represents the
9664 masking of the lower bits. Strip the outer AND and let reload
9665 convert the offset address into an indirect address. For VSX,
9666 force reload to create the address with an AND in a separate
9667 register, because we can't guarantee an altivec register will
9668 be used. */
9669 if (VECTOR_MEM_ALTIVEC_P (mode)
9670 && GET_CODE (x) == AND
9671 && GET_CODE (XEXP (x, 0)) == PLUS
9672 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9673 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9674 && GET_CODE (XEXP (x, 1)) == CONST_INT
9675 && INTVAL (XEXP (x, 1)) == -16)
9677 x = XEXP (x, 0);
9678 *win = 1;
9679 return x;
9682 if (TARGET_TOC
9683 && reg_offset_p
9684 && !quad_offset_p
9685 && GET_CODE (x) == SYMBOL_REF
9686 && use_toc_relative_ref (x, mode))
9688 x = create_TOC_reference (x, NULL_RTX);
9689 if (TARGET_CMODEL != CMODEL_SMALL)
9691 if (TARGET_DEBUG_ADDR)
9693 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
9694 debug_rtx (x);
9696 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9697 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9698 opnum, (enum reload_type) type);
9700 *win = 1;
9701 return x;
9703 *win = 0;
9704 return x;
9707 /* Debug version of rs6000_legitimize_reload_address. */
9708 static rtx
9709 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
9710 int opnum, int type,
9711 int ind_levels, int *win)
9713 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
9714 ind_levels, win);
9715 fprintf (stderr,
9716 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9717 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9718 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
9719 debug_rtx (x);
9721 if (x == ret)
9722 fprintf (stderr, "Same address returned\n");
9723 else if (!ret)
9724 fprintf (stderr, "NULL returned\n");
9725 else
9727 fprintf (stderr, "New address:\n");
9728 debug_rtx (ret);
9731 return ret;
9734 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9735 that is a valid memory address for an instruction.
9736 The MODE argument is the machine mode for the MEM expression
9737 that wants to use this address.
9739 On the RS/6000, there are four valid address: a SYMBOL_REF that
9740 refers to a constant pool entry of an address (or the sum of it
9741 plus a constant), a short (16-bit signed) constant plus a register,
9742 the sum of two registers, or a register indirect, possibly with an
9743 auto-increment. For DFmode, DDmode and DImode with a constant plus
9744 register, we must ensure that both words are addressable or PowerPC64
9745 with offset word aligned.
9747 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9748 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9749 because adjacent memory cells are accessed by adding word-sized offsets
9750 during assembly output. */
9751 static bool
9752 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
9754 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9755 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
9757 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9758 if (VECTOR_MEM_ALTIVEC_P (mode)
9759 && GET_CODE (x) == AND
9760 && GET_CODE (XEXP (x, 1)) == CONST_INT
9761 && INTVAL (XEXP (x, 1)) == -16)
9762 x = XEXP (x, 0);
9764 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
9765 return 0;
9766 if (legitimate_indirect_address_p (x, reg_ok_strict))
9767 return 1;
9768 if (TARGET_UPDATE
9769 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
9770 && mode_supports_pre_incdec_p (mode)
9771 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
9772 return 1;
9773 /* Handle restricted vector d-form offsets in ISA 3.0. */
9774 if (quad_offset_p)
9776 if (quad_address_p (x, mode, reg_ok_strict))
9777 return 1;
9779 else if (virtual_stack_registers_memory_p (x))
9780 return 1;
9782 else if (reg_offset_p)
9784 if (legitimate_small_data_p (mode, x))
9785 return 1;
9786 if (legitimate_constant_pool_address_p (x, mode,
9787 reg_ok_strict || lra_in_progress))
9788 return 1;
9789 if (reg_addr[mode].fused_toc && GET_CODE (x) == UNSPEC
9790 && XINT (x, 1) == UNSPEC_FUSION_ADDIS)
9791 return 1;
9794 /* For TImode, if we have TImode in VSX registers, only allow register
9795 indirect addresses. This will allow the values to go in either GPRs
9796 or VSX registers without reloading. The vector types would tend to
9797 go into VSX registers, so we allow REG+REG, while TImode seems
9798 somewhat split, in that some uses are GPR based, and some VSX based. */
9799 /* FIXME: We could loosen this by changing the following to
9800 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
9801 but currently we cannot allow REG+REG addressing for TImode. See
9802 PR72827 for complete details on how this ends up hoodwinking DSE. */
9803 if (mode == TImode && TARGET_VSX)
9804 return 0;
9805 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9806 if (! reg_ok_strict
9807 && reg_offset_p
9808 && GET_CODE (x) == PLUS
9809 && GET_CODE (XEXP (x, 0)) == REG
9810 && (XEXP (x, 0) == virtual_stack_vars_rtx
9811 || XEXP (x, 0) == arg_pointer_rtx)
9812 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9813 return 1;
9814 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
9815 return 1;
9816 if (!FLOAT128_2REG_P (mode)
9817 && ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
9818 || TARGET_POWERPC64
9819 || (mode != DFmode && mode != DDmode))
9820 && (TARGET_POWERPC64 || mode != DImode)
9821 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
9822 && mode != PTImode
9823 && !avoiding_indexed_address_p (mode)
9824 && legitimate_indexed_address_p (x, reg_ok_strict))
9825 return 1;
9826 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
9827 && mode_supports_pre_modify_p (mode)
9828 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
9829 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
9830 reg_ok_strict, false)
9831 || (!avoiding_indexed_address_p (mode)
9832 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
9833 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
9834 return 1;
9835 if (reg_offset_p && !quad_offset_p
9836 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
9837 return 1;
9838 return 0;
9841 /* Debug version of rs6000_legitimate_address_p. */
9842 static bool
9843 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
9844 bool reg_ok_strict)
9846 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
9847 fprintf (stderr,
9848 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9849 "strict = %d, reload = %s, code = %s\n",
9850 ret ? "true" : "false",
9851 GET_MODE_NAME (mode),
9852 reg_ok_strict,
9853 (reload_completed ? "after" : "before"),
9854 GET_RTX_NAME (GET_CODE (x)));
9855 debug_rtx (x);
9857 return ret;
9860 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9862 static bool
9863 rs6000_mode_dependent_address_p (const_rtx addr,
9864 addr_space_t as ATTRIBUTE_UNUSED)
9866 return rs6000_mode_dependent_address_ptr (addr);
9869 /* Go to LABEL if ADDR (a legitimate address expression)
9870 has an effect that depends on the machine mode it is used for.
9872 On the RS/6000 this is true of all integral offsets (since AltiVec
9873 and VSX modes don't allow them) or is a pre-increment or decrement.
9875 ??? Except that due to conceptual problems in offsettable_address_p
9876 we can't really report the problems of integral offsets. So leave
9877 this assuming that the adjustable offset must be valid for the
9878 sub-words of a TFmode operand, which is what we had before. */
9880 static bool
9881 rs6000_mode_dependent_address (const_rtx addr)
9883 switch (GET_CODE (addr))
9885 case PLUS:
9886 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9887 is considered a legitimate address before reload, so there
9888 are no offset restrictions in that case. Note that this
9889 condition is safe in strict mode because any address involving
9890 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9891 been rejected as illegitimate. */
9892 if (XEXP (addr, 0) != virtual_stack_vars_rtx
9893 && XEXP (addr, 0) != arg_pointer_rtx
9894 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
9896 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
9897 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
9899 break;
9901 case LO_SUM:
9902 /* Anything in the constant pool is sufficiently aligned that
9903 all bytes have the same high part address. */
9904 return !legitimate_constant_pool_address_p (addr, QImode, false);
9906 /* Auto-increment cases are now treated generically in recog.c. */
9907 case PRE_MODIFY:
9908 return TARGET_UPDATE;
9910 /* AND is only allowed in Altivec loads. */
9911 case AND:
9912 return true;
9914 default:
9915 break;
9918 return false;
9921 /* Debug version of rs6000_mode_dependent_address. */
9922 static bool
9923 rs6000_debug_mode_dependent_address (const_rtx addr)
9925 bool ret = rs6000_mode_dependent_address (addr);
9927 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9928 ret ? "true" : "false");
9929 debug_rtx (addr);
9931 return ret;
9934 /* Implement FIND_BASE_TERM. */
9937 rs6000_find_base_term (rtx op)
9939 rtx base;
9941 base = op;
9942 if (GET_CODE (base) == CONST)
9943 base = XEXP (base, 0);
9944 if (GET_CODE (base) == PLUS)
9945 base = XEXP (base, 0);
9946 if (GET_CODE (base) == UNSPEC)
9947 switch (XINT (base, 1))
9949 case UNSPEC_TOCREL:
9950 case UNSPEC_MACHOPIC_OFFSET:
9951 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9952 for aliasing purposes. */
9953 return XVECEXP (base, 0, 0);
9956 return op;
9959 /* More elaborate version of recog's offsettable_memref_p predicate
9960 that works around the ??? note of rs6000_mode_dependent_address.
9961 In particular it accepts
9963 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9965 in 32-bit mode, that the recog predicate rejects. */
9967 static bool
9968 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode)
9970 bool worst_case;
9972 if (!MEM_P (op))
9973 return false;
9975 /* First mimic offsettable_memref_p. */
9976 if (offsettable_address_p (true, GET_MODE (op), XEXP (op, 0)))
9977 return true;
9979 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9980 the latter predicate knows nothing about the mode of the memory
9981 reference and, therefore, assumes that it is the largest supported
9982 mode (TFmode). As a consequence, legitimate offsettable memory
9983 references are rejected. rs6000_legitimate_offset_address_p contains
9984 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9985 at least with a little bit of help here given that we know the
9986 actual registers used. */
9987 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9988 || GET_MODE_SIZE (reg_mode) == 4);
9989 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9990 true, worst_case);
9993 /* Determine the reassociation width to be used in reassociate_bb.
9994 This takes into account how many parallel operations we
9995 can actually do of a given type, and also the latency.
9997 int add/sub 6/cycle
9998 mul 2/cycle
9999 vect add/sub/mul 2/cycle
10000 fp add/sub/mul 2/cycle
10001 dfp 1/cycle
10004 static int
10005 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
10006 machine_mode mode)
10008 switch (rs6000_cpu)
10010 case PROCESSOR_POWER8:
10011 case PROCESSOR_POWER9:
10012 if (DECIMAL_FLOAT_MODE_P (mode))
10013 return 1;
10014 if (VECTOR_MODE_P (mode))
10015 return 4;
10016 if (INTEGRAL_MODE_P (mode))
10017 return opc == MULT_EXPR ? 4 : 6;
10018 if (FLOAT_MODE_P (mode))
10019 return 4;
10020 break;
10021 default:
10022 break;
10024 return 1;
10027 /* Change register usage conditional on target flags. */
10028 static void
10029 rs6000_conditional_register_usage (void)
10031 int i;
10033 if (TARGET_DEBUG_TARGET)
10034 fprintf (stderr, "rs6000_conditional_register_usage called\n");
10036 /* Set MQ register fixed (already call_used) so that it will not be
10037 allocated. */
10038 fixed_regs[64] = 1;
10040 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
10041 if (TARGET_64BIT)
10042 fixed_regs[13] = call_used_regs[13]
10043 = call_really_used_regs[13] = 1;
10045 /* Conditionally disable FPRs. */
10046 if (TARGET_SOFT_FLOAT)
10047 for (i = 32; i < 64; i++)
10048 fixed_regs[i] = call_used_regs[i]
10049 = call_really_used_regs[i] = 1;
10051 /* The TOC register is not killed across calls in a way that is
10052 visible to the compiler. */
10053 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10054 call_really_used_regs[2] = 0;
10056 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
10057 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10059 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
10060 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10061 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10062 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10064 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
10065 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10066 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10067 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10069 if (TARGET_TOC && TARGET_MINIMAL_TOC)
10070 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10071 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10073 if (!TARGET_ALTIVEC && !TARGET_VSX)
10075 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
10076 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
10077 call_really_used_regs[VRSAVE_REGNO] = 1;
10080 if (TARGET_ALTIVEC || TARGET_VSX)
10081 global_regs[VSCR_REGNO] = 1;
10083 if (TARGET_ALTIVEC_ABI)
10085 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
10086 call_used_regs[i] = call_really_used_regs[i] = 1;
10088 /* AIX reserves VR20:31 in non-extended ABI mode. */
10089 if (TARGET_XCOFF)
10090 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
10091 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
10096 /* Output insns to set DEST equal to the constant SOURCE as a series of
10097 lis, ori and shl instructions and return TRUE. */
10099 bool
10100 rs6000_emit_set_const (rtx dest, rtx source)
10102 machine_mode mode = GET_MODE (dest);
10103 rtx temp, set;
10104 rtx_insn *insn;
10105 HOST_WIDE_INT c;
10107 gcc_checking_assert (CONST_INT_P (source));
10108 c = INTVAL (source);
10109 switch (mode)
10111 case E_QImode:
10112 case E_HImode:
10113 emit_insn (gen_rtx_SET (dest, source));
10114 return true;
10116 case E_SImode:
10117 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
10119 emit_insn (gen_rtx_SET (copy_rtx (temp),
10120 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
10121 emit_insn (gen_rtx_SET (dest,
10122 gen_rtx_IOR (SImode, copy_rtx (temp),
10123 GEN_INT (c & 0xffff))));
10124 break;
10126 case E_DImode:
10127 if (!TARGET_POWERPC64)
10129 rtx hi, lo;
10131 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
10132 DImode);
10133 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
10134 DImode);
10135 emit_move_insn (hi, GEN_INT (c >> 32));
10136 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
10137 emit_move_insn (lo, GEN_INT (c));
10139 else
10140 rs6000_emit_set_long_const (dest, c);
10141 break;
10143 default:
10144 gcc_unreachable ();
10147 insn = get_last_insn ();
10148 set = single_set (insn);
10149 if (! CONSTANT_P (SET_SRC (set)))
10150 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
10152 return true;
10155 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
10156 Output insns to set DEST equal to the constant C as a series of
10157 lis, ori and shl instructions. */
10159 static void
10160 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
10162 rtx temp;
10163 HOST_WIDE_INT ud1, ud2, ud3, ud4;
10165 ud1 = c & 0xffff;
10166 c = c >> 16;
10167 ud2 = c & 0xffff;
10168 c = c >> 16;
10169 ud3 = c & 0xffff;
10170 c = c >> 16;
10171 ud4 = c & 0xffff;
10173 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
10174 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
10175 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
10177 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
10178 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
10180 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10182 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10183 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
10184 if (ud1 != 0)
10185 emit_move_insn (dest,
10186 gen_rtx_IOR (DImode, copy_rtx (temp),
10187 GEN_INT (ud1)));
10189 else if (ud3 == 0 && ud4 == 0)
10191 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10193 gcc_assert (ud2 & 0x8000);
10194 emit_move_insn (copy_rtx (temp),
10195 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
10196 if (ud1 != 0)
10197 emit_move_insn (copy_rtx (temp),
10198 gen_rtx_IOR (DImode, copy_rtx (temp),
10199 GEN_INT (ud1)));
10200 emit_move_insn (dest,
10201 gen_rtx_ZERO_EXTEND (DImode,
10202 gen_lowpart (SImode,
10203 copy_rtx (temp))));
10205 else if ((ud4 == 0xffff && (ud3 & 0x8000))
10206 || (ud4 == 0 && ! (ud3 & 0x8000)))
10208 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10210 emit_move_insn (copy_rtx (temp),
10211 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
10212 if (ud2 != 0)
10213 emit_move_insn (copy_rtx (temp),
10214 gen_rtx_IOR (DImode, copy_rtx (temp),
10215 GEN_INT (ud2)));
10216 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10217 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
10218 GEN_INT (16)));
10219 if (ud1 != 0)
10220 emit_move_insn (dest,
10221 gen_rtx_IOR (DImode, copy_rtx (temp),
10222 GEN_INT (ud1)));
10224 else
10226 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10228 emit_move_insn (copy_rtx (temp),
10229 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
10230 if (ud3 != 0)
10231 emit_move_insn (copy_rtx (temp),
10232 gen_rtx_IOR (DImode, copy_rtx (temp),
10233 GEN_INT (ud3)));
10235 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
10236 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
10237 GEN_INT (32)));
10238 if (ud2 != 0)
10239 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10240 gen_rtx_IOR (DImode, copy_rtx (temp),
10241 GEN_INT (ud2 << 16)));
10242 if (ud1 != 0)
10243 emit_move_insn (dest,
10244 gen_rtx_IOR (DImode, copy_rtx (temp),
10245 GEN_INT (ud1)));
10249 /* Helper for the following. Get rid of [r+r] memory refs
10250 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
10252 static void
10253 rs6000_eliminate_indexed_memrefs (rtx operands[2])
10255 if (GET_CODE (operands[0]) == MEM
10256 && GET_CODE (XEXP (operands[0], 0)) != REG
10257 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
10258 GET_MODE (operands[0]), false))
10259 operands[0]
10260 = replace_equiv_address (operands[0],
10261 copy_addr_to_reg (XEXP (operands[0], 0)));
10263 if (GET_CODE (operands[1]) == MEM
10264 && GET_CODE (XEXP (operands[1], 0)) != REG
10265 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
10266 GET_MODE (operands[1]), false))
10267 operands[1]
10268 = replace_equiv_address (operands[1],
10269 copy_addr_to_reg (XEXP (operands[1], 0)));
10272 /* Generate a vector of constants to permute MODE for a little-endian
10273 storage operation by swapping the two halves of a vector. */
10274 static rtvec
10275 rs6000_const_vec (machine_mode mode)
10277 int i, subparts;
10278 rtvec v;
10280 switch (mode)
10282 case E_V1TImode:
10283 subparts = 1;
10284 break;
10285 case E_V2DFmode:
10286 case E_V2DImode:
10287 subparts = 2;
10288 break;
10289 case E_V4SFmode:
10290 case E_V4SImode:
10291 subparts = 4;
10292 break;
10293 case E_V8HImode:
10294 subparts = 8;
10295 break;
10296 case E_V16QImode:
10297 subparts = 16;
10298 break;
10299 default:
10300 gcc_unreachable();
10303 v = rtvec_alloc (subparts);
10305 for (i = 0; i < subparts / 2; ++i)
10306 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
10307 for (i = subparts / 2; i < subparts; ++i)
10308 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
10310 return v;
10313 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
10314 store operation. */
10315 void
10316 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
10318 /* Scalar permutations are easier to express in integer modes rather than
10319 floating-point modes, so cast them here. We use V1TImode instead
10320 of TImode to ensure that the values don't go through GPRs. */
10321 if (FLOAT128_VECTOR_P (mode))
10323 dest = gen_lowpart (V1TImode, dest);
10324 source = gen_lowpart (V1TImode, source);
10325 mode = V1TImode;
10328 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
10329 scalar. */
10330 if (mode == TImode || mode == V1TImode)
10331 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
10332 GEN_INT (64))));
10333 else
10335 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
10336 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
10340 /* Emit a little-endian load from vector memory location SOURCE to VSX
10341 register DEST in mode MODE. The load is done with two permuting
10342 insn's that represent an lxvd2x and xxpermdi. */
10343 void
10344 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
10346 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
10347 V1TImode). */
10348 if (mode == TImode || mode == V1TImode)
10350 mode = V2DImode;
10351 dest = gen_lowpart (V2DImode, dest);
10352 source = adjust_address (source, V2DImode, 0);
10355 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
10356 rs6000_emit_le_vsx_permute (tmp, source, mode);
10357 rs6000_emit_le_vsx_permute (dest, tmp, mode);
10360 /* Emit a little-endian store to vector memory location DEST from VSX
10361 register SOURCE in mode MODE. The store is done with two permuting
10362 insn's that represent an xxpermdi and an stxvd2x. */
10363 void
10364 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
10366 /* This should never be called during or after LRA, because it does
10367 not re-permute the source register. It is intended only for use
10368 during expand. */
10369 gcc_assert (!lra_in_progress && !reload_completed);
10371 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
10372 V1TImode). */
10373 if (mode == TImode || mode == V1TImode)
10375 mode = V2DImode;
10376 dest = adjust_address (dest, V2DImode, 0);
10377 source = gen_lowpart (V2DImode, source);
10380 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
10381 rs6000_emit_le_vsx_permute (tmp, source, mode);
10382 rs6000_emit_le_vsx_permute (dest, tmp, mode);
10385 /* Emit a sequence representing a little-endian VSX load or store,
10386 moving data from SOURCE to DEST in mode MODE. This is done
10387 separately from rs6000_emit_move to ensure it is called only
10388 during expand. LE VSX loads and stores introduced later are
10389 handled with a split. The expand-time RTL generation allows
10390 us to optimize away redundant pairs of register-permutes. */
10391 void
10392 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
10394 gcc_assert (!BYTES_BIG_ENDIAN
10395 && VECTOR_MEM_VSX_P (mode)
10396 && !TARGET_P9_VECTOR
10397 && !gpr_or_gpr_p (dest, source)
10398 && (MEM_P (source) ^ MEM_P (dest)));
10400 if (MEM_P (source))
10402 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
10403 rs6000_emit_le_vsx_load (dest, source, mode);
10405 else
10407 if (!REG_P (source))
10408 source = force_reg (mode, source);
10409 rs6000_emit_le_vsx_store (dest, source, mode);
10413 /* Return whether a SFmode or SImode move can be done without converting one
10414 mode to another. This arrises when we have:
10416 (SUBREG:SF (REG:SI ...))
10417 (SUBREG:SI (REG:SF ...))
10419 and one of the values is in a floating point/vector register, where SFmode
10420 scalars are stored in DFmode format. */
10422 bool
10423 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
10425 if (TARGET_ALLOW_SF_SUBREG)
10426 return true;
10428 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
10429 return true;
10431 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
10432 return true;
10434 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
10435 if (SUBREG_P (dest))
10437 rtx dest_subreg = SUBREG_REG (dest);
10438 rtx src_subreg = SUBREG_REG (src);
10439 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
10442 return false;
10446 /* Helper function to change moves with:
10448 (SUBREG:SF (REG:SI)) and
10449 (SUBREG:SI (REG:SF))
10451 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
10452 values are stored as DFmode values in the VSX registers. We need to convert
10453 the bits before we can use a direct move or operate on the bits in the
10454 vector register as an integer type.
10456 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
10458 static bool
10459 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
10461 if (TARGET_DIRECT_MOVE_64BIT && !lra_in_progress && !reload_completed
10462 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
10463 && SUBREG_P (source) && sf_subreg_operand (source, mode))
10465 rtx inner_source = SUBREG_REG (source);
10466 machine_mode inner_mode = GET_MODE (inner_source);
10468 if (mode == SImode && inner_mode == SFmode)
10470 emit_insn (gen_movsi_from_sf (dest, inner_source));
10471 return true;
10474 if (mode == SFmode && inner_mode == SImode)
10476 emit_insn (gen_movsf_from_si (dest, inner_source));
10477 return true;
10481 return false;
10484 /* Emit a move from SOURCE to DEST in mode MODE. */
10485 void
10486 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
10488 rtx operands[2];
10489 operands[0] = dest;
10490 operands[1] = source;
10492 if (TARGET_DEBUG_ADDR)
10494 fprintf (stderr,
10495 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
10496 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
10497 GET_MODE_NAME (mode),
10498 lra_in_progress,
10499 reload_completed,
10500 can_create_pseudo_p ());
10501 debug_rtx (dest);
10502 fprintf (stderr, "source:\n");
10503 debug_rtx (source);
10506 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
10507 if (CONST_WIDE_INT_P (operands[1])
10508 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
10510 /* This should be fixed with the introduction of CONST_WIDE_INT. */
10511 gcc_unreachable ();
10514 /* See if we need to special case SImode/SFmode SUBREG moves. */
10515 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
10516 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
10517 return;
10519 /* Check if GCC is setting up a block move that will end up using FP
10520 registers as temporaries. We must make sure this is acceptable. */
10521 if (GET_CODE (operands[0]) == MEM
10522 && GET_CODE (operands[1]) == MEM
10523 && mode == DImode
10524 && (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0]))
10525 || rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1])))
10526 && ! (rs6000_slow_unaligned_access (SImode,
10527 (MEM_ALIGN (operands[0]) > 32
10528 ? 32 : MEM_ALIGN (operands[0])))
10529 || rs6000_slow_unaligned_access (SImode,
10530 (MEM_ALIGN (operands[1]) > 32
10531 ? 32 : MEM_ALIGN (operands[1]))))
10532 && ! MEM_VOLATILE_P (operands [0])
10533 && ! MEM_VOLATILE_P (operands [1]))
10535 emit_move_insn (adjust_address (operands[0], SImode, 0),
10536 adjust_address (operands[1], SImode, 0));
10537 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
10538 adjust_address (copy_rtx (operands[1]), SImode, 4));
10539 return;
10542 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
10543 && !gpc_reg_operand (operands[1], mode))
10544 operands[1] = force_reg (mode, operands[1]);
10546 /* Recognize the case where operand[1] is a reference to thread-local
10547 data and load its address to a register. */
10548 if (tls_referenced_p (operands[1]))
10550 enum tls_model model;
10551 rtx tmp = operands[1];
10552 rtx addend = NULL;
10554 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
10556 addend = XEXP (XEXP (tmp, 0), 1);
10557 tmp = XEXP (XEXP (tmp, 0), 0);
10560 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
10561 model = SYMBOL_REF_TLS_MODEL (tmp);
10562 gcc_assert (model != 0);
10564 tmp = rs6000_legitimize_tls_address (tmp, model);
10565 if (addend)
10567 tmp = gen_rtx_PLUS (mode, tmp, addend);
10568 tmp = force_operand (tmp, operands[0]);
10570 operands[1] = tmp;
10573 /* 128-bit constant floating-point values on Darwin should really be loaded
10574 as two parts. However, this premature splitting is a problem when DFmode
10575 values can go into Altivec registers. */
10576 if (FLOAT128_IBM_P (mode) && !reg_addr[DFmode].scalar_in_vmx_p
10577 && GET_CODE (operands[1]) == CONST_DOUBLE)
10579 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
10580 simplify_gen_subreg (DFmode, operands[1], mode, 0),
10581 DFmode);
10582 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
10583 GET_MODE_SIZE (DFmode)),
10584 simplify_gen_subreg (DFmode, operands[1], mode,
10585 GET_MODE_SIZE (DFmode)),
10586 DFmode);
10587 return;
10590 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
10591 p1:SD) if p1 is not of floating point class and p0 is spilled as
10592 we can have no analogous movsd_store for this. */
10593 if (lra_in_progress && mode == DDmode
10594 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10595 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10596 && GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))
10597 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
10599 enum reg_class cl;
10600 int regno = REGNO (SUBREG_REG (operands[1]));
10602 if (regno >= FIRST_PSEUDO_REGISTER)
10604 cl = reg_preferred_class (regno);
10605 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
10607 if (regno >= 0 && ! FP_REGNO_P (regno))
10609 mode = SDmode;
10610 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
10611 operands[1] = SUBREG_REG (operands[1]);
10614 if (lra_in_progress
10615 && mode == SDmode
10616 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10617 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10618 && (REG_P (operands[1])
10619 || (GET_CODE (operands[1]) == SUBREG
10620 && REG_P (SUBREG_REG (operands[1])))))
10622 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
10623 ? SUBREG_REG (operands[1]) : operands[1]);
10624 enum reg_class cl;
10626 if (regno >= FIRST_PSEUDO_REGISTER)
10628 cl = reg_preferred_class (regno);
10629 gcc_assert (cl != NO_REGS);
10630 regno = ira_class_hard_regs[cl][0];
10632 if (FP_REGNO_P (regno))
10634 if (GET_MODE (operands[0]) != DDmode)
10635 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
10636 emit_insn (gen_movsd_store (operands[0], operands[1]));
10638 else if (INT_REGNO_P (regno))
10639 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10640 else
10641 gcc_unreachable();
10642 return;
10644 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10645 p:DD)) if p0 is not of floating point class and p1 is spilled as
10646 we can have no analogous movsd_load for this. */
10647 if (lra_in_progress && mode == DDmode
10648 && GET_CODE (operands[0]) == SUBREG && REG_P (SUBREG_REG (operands[0]))
10649 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
10650 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10651 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10653 enum reg_class cl;
10654 int regno = REGNO (SUBREG_REG (operands[0]));
10656 if (regno >= FIRST_PSEUDO_REGISTER)
10658 cl = reg_preferred_class (regno);
10659 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
10661 if (regno >= 0 && ! FP_REGNO_P (regno))
10663 mode = SDmode;
10664 operands[0] = SUBREG_REG (operands[0]);
10665 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
10668 if (lra_in_progress
10669 && mode == SDmode
10670 && (REG_P (operands[0])
10671 || (GET_CODE (operands[0]) == SUBREG
10672 && REG_P (SUBREG_REG (operands[0]))))
10673 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10674 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10676 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
10677 ? SUBREG_REG (operands[0]) : operands[0]);
10678 enum reg_class cl;
10680 if (regno >= FIRST_PSEUDO_REGISTER)
10682 cl = reg_preferred_class (regno);
10683 gcc_assert (cl != NO_REGS);
10684 regno = ira_class_hard_regs[cl][0];
10686 if (FP_REGNO_P (regno))
10688 if (GET_MODE (operands[1]) != DDmode)
10689 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
10690 emit_insn (gen_movsd_load (operands[0], operands[1]));
10692 else if (INT_REGNO_P (regno))
10693 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10694 else
10695 gcc_unreachable();
10696 return;
10699 /* FIXME: In the long term, this switch statement should go away
10700 and be replaced by a sequence of tests based on things like
10701 mode == Pmode. */
10702 switch (mode)
10704 case E_HImode:
10705 case E_QImode:
10706 if (CONSTANT_P (operands[1])
10707 && GET_CODE (operands[1]) != CONST_INT)
10708 operands[1] = force_const_mem (mode, operands[1]);
10709 break;
10711 case E_TFmode:
10712 case E_TDmode:
10713 case E_IFmode:
10714 case E_KFmode:
10715 if (FLOAT128_2REG_P (mode))
10716 rs6000_eliminate_indexed_memrefs (operands);
10717 /* fall through */
10719 case E_DFmode:
10720 case E_DDmode:
10721 case E_SFmode:
10722 case E_SDmode:
10723 if (CONSTANT_P (operands[1])
10724 && ! easy_fp_constant (operands[1], mode))
10725 operands[1] = force_const_mem (mode, operands[1]);
10726 break;
10728 case E_V16QImode:
10729 case E_V8HImode:
10730 case E_V4SFmode:
10731 case E_V4SImode:
10732 case E_V2SFmode:
10733 case E_V2SImode:
10734 case E_V2DFmode:
10735 case E_V2DImode:
10736 case E_V1TImode:
10737 if (CONSTANT_P (operands[1])
10738 && !easy_vector_constant (operands[1], mode))
10739 operands[1] = force_const_mem (mode, operands[1]);
10740 break;
10742 case E_SImode:
10743 case E_DImode:
10744 /* Use default pattern for address of ELF small data */
10745 if (TARGET_ELF
10746 && mode == Pmode
10747 && DEFAULT_ABI == ABI_V4
10748 && (GET_CODE (operands[1]) == SYMBOL_REF
10749 || GET_CODE (operands[1]) == CONST)
10750 && small_data_operand (operands[1], mode))
10752 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10753 return;
10756 if (DEFAULT_ABI == ABI_V4
10757 && mode == Pmode && mode == SImode
10758 && flag_pic == 1 && got_operand (operands[1], mode))
10760 emit_insn (gen_movsi_got (operands[0], operands[1]));
10761 return;
10764 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
10765 && TARGET_NO_TOC
10766 && ! flag_pic
10767 && mode == Pmode
10768 && CONSTANT_P (operands[1])
10769 && GET_CODE (operands[1]) != HIGH
10770 && GET_CODE (operands[1]) != CONST_INT)
10772 rtx target = (!can_create_pseudo_p ()
10773 ? operands[0]
10774 : gen_reg_rtx (mode));
10776 /* If this is a function address on -mcall-aixdesc,
10777 convert it to the address of the descriptor. */
10778 if (DEFAULT_ABI == ABI_AIX
10779 && GET_CODE (operands[1]) == SYMBOL_REF
10780 && XSTR (operands[1], 0)[0] == '.')
10782 const char *name = XSTR (operands[1], 0);
10783 rtx new_ref;
10784 while (*name == '.')
10785 name++;
10786 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
10787 CONSTANT_POOL_ADDRESS_P (new_ref)
10788 = CONSTANT_POOL_ADDRESS_P (operands[1]);
10789 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
10790 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
10791 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
10792 operands[1] = new_ref;
10795 if (DEFAULT_ABI == ABI_DARWIN)
10797 #if TARGET_MACHO
10798 if (MACHO_DYNAMIC_NO_PIC_P)
10800 /* Take care of any required data indirection. */
10801 operands[1] = rs6000_machopic_legitimize_pic_address (
10802 operands[1], mode, operands[0]);
10803 if (operands[0] != operands[1])
10804 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10805 return;
10807 #endif
10808 emit_insn (gen_macho_high (target, operands[1]));
10809 emit_insn (gen_macho_low (operands[0], target, operands[1]));
10810 return;
10813 emit_insn (gen_elf_high (target, operands[1]));
10814 emit_insn (gen_elf_low (operands[0], target, operands[1]));
10815 return;
10818 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10819 and we have put it in the TOC, we just need to make a TOC-relative
10820 reference to it. */
10821 if (TARGET_TOC
10822 && GET_CODE (operands[1]) == SYMBOL_REF
10823 && use_toc_relative_ref (operands[1], mode))
10824 operands[1] = create_TOC_reference (operands[1], operands[0]);
10825 else if (mode == Pmode
10826 && CONSTANT_P (operands[1])
10827 && GET_CODE (operands[1]) != HIGH
10828 && ((GET_CODE (operands[1]) != CONST_INT
10829 && ! easy_fp_constant (operands[1], mode))
10830 || (GET_CODE (operands[1]) == CONST_INT
10831 && (num_insns_constant (operands[1], mode)
10832 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
10833 || (GET_CODE (operands[0]) == REG
10834 && FP_REGNO_P (REGNO (operands[0]))))
10835 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
10836 && (TARGET_CMODEL == CMODEL_SMALL
10837 || can_create_pseudo_p ()
10838 || (REG_P (operands[0])
10839 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
10842 #if TARGET_MACHO
10843 /* Darwin uses a special PIC legitimizer. */
10844 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
10846 operands[1] =
10847 rs6000_machopic_legitimize_pic_address (operands[1], mode,
10848 operands[0]);
10849 if (operands[0] != operands[1])
10850 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10851 return;
10853 #endif
10855 /* If we are to limit the number of things we put in the TOC and
10856 this is a symbol plus a constant we can add in one insn,
10857 just put the symbol in the TOC and add the constant. */
10858 if (GET_CODE (operands[1]) == CONST
10859 && TARGET_NO_SUM_IN_TOC
10860 && GET_CODE (XEXP (operands[1], 0)) == PLUS
10861 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
10862 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
10863 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
10864 && ! side_effects_p (operands[0]))
10866 rtx sym =
10867 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
10868 rtx other = XEXP (XEXP (operands[1], 0), 1);
10870 sym = force_reg (mode, sym);
10871 emit_insn (gen_add3_insn (operands[0], sym, other));
10872 return;
10875 operands[1] = force_const_mem (mode, operands[1]);
10877 if (TARGET_TOC
10878 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
10879 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
10881 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
10882 operands[0]);
10883 operands[1] = gen_const_mem (mode, tocref);
10884 set_mem_alias_set (operands[1], get_TOC_alias_set ());
10887 break;
10889 case E_TImode:
10890 if (!VECTOR_MEM_VSX_P (TImode))
10891 rs6000_eliminate_indexed_memrefs (operands);
10892 break;
10894 case E_PTImode:
10895 rs6000_eliminate_indexed_memrefs (operands);
10896 break;
10898 default:
10899 fatal_insn ("bad move", gen_rtx_SET (dest, source));
10902 /* Above, we may have called force_const_mem which may have returned
10903 an invalid address. If we can, fix this up; otherwise, reload will
10904 have to deal with it. */
10905 if (GET_CODE (operands[1]) == MEM)
10906 operands[1] = validize_mem (operands[1]);
10908 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10911 /* Nonzero if we can use a floating-point register to pass this arg. */
10912 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10913 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10914 && (CUM)->fregno <= FP_ARG_MAX_REG \
10915 && TARGET_HARD_FLOAT)
10917 /* Nonzero if we can use an AltiVec register to pass this arg. */
10918 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10919 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10920 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10921 && TARGET_ALTIVEC_ABI \
10922 && (NAMED))
10924 /* Walk down the type tree of TYPE counting consecutive base elements.
10925 If *MODEP is VOIDmode, then set it to the first valid floating point
10926 or vector type. If a non-floating point or vector type is found, or
10927 if a floating point or vector type that doesn't match a non-VOIDmode
10928 *MODEP is found, then return -1, otherwise return the count in the
10929 sub-tree. */
10931 static int
10932 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10934 machine_mode mode;
10935 HOST_WIDE_INT size;
10937 switch (TREE_CODE (type))
10939 case REAL_TYPE:
10940 mode = TYPE_MODE (type);
10941 if (!SCALAR_FLOAT_MODE_P (mode))
10942 return -1;
10944 if (*modep == VOIDmode)
10945 *modep = mode;
10947 if (*modep == mode)
10948 return 1;
10950 break;
10952 case COMPLEX_TYPE:
10953 mode = TYPE_MODE (TREE_TYPE (type));
10954 if (!SCALAR_FLOAT_MODE_P (mode))
10955 return -1;
10957 if (*modep == VOIDmode)
10958 *modep = mode;
10960 if (*modep == mode)
10961 return 2;
10963 break;
10965 case VECTOR_TYPE:
10966 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10967 return -1;
10969 /* Use V4SImode as representative of all 128-bit vector types. */
10970 size = int_size_in_bytes (type);
10971 switch (size)
10973 case 16:
10974 mode = V4SImode;
10975 break;
10976 default:
10977 return -1;
10980 if (*modep == VOIDmode)
10981 *modep = mode;
10983 /* Vector modes are considered to be opaque: two vectors are
10984 equivalent for the purposes of being homogeneous aggregates
10985 if they are the same size. */
10986 if (*modep == mode)
10987 return 1;
10989 break;
10991 case ARRAY_TYPE:
10993 int count;
10994 tree index = TYPE_DOMAIN (type);
10996 /* Can't handle incomplete types nor sizes that are not
10997 fixed. */
10998 if (!COMPLETE_TYPE_P (type)
10999 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11000 return -1;
11002 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
11003 if (count == -1
11004 || !index
11005 || !TYPE_MAX_VALUE (index)
11006 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
11007 || !TYPE_MIN_VALUE (index)
11008 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
11009 || count < 0)
11010 return -1;
11012 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
11013 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
11015 /* There must be no padding. */
11016 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
11017 return -1;
11019 return count;
11022 case RECORD_TYPE:
11024 int count = 0;
11025 int sub_count;
11026 tree field;
11028 /* Can't handle incomplete types nor sizes that are not
11029 fixed. */
11030 if (!COMPLETE_TYPE_P (type)
11031 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11032 return -1;
11034 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
11036 if (TREE_CODE (field) != FIELD_DECL)
11037 continue;
11039 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
11040 if (sub_count < 0)
11041 return -1;
11042 count += sub_count;
11045 /* There must be no padding. */
11046 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
11047 return -1;
11049 return count;
11052 case UNION_TYPE:
11053 case QUAL_UNION_TYPE:
11055 /* These aren't very interesting except in a degenerate case. */
11056 int count = 0;
11057 int sub_count;
11058 tree field;
11060 /* Can't handle incomplete types nor sizes that are not
11061 fixed. */
11062 if (!COMPLETE_TYPE_P (type)
11063 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11064 return -1;
11066 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
11068 if (TREE_CODE (field) != FIELD_DECL)
11069 continue;
11071 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
11072 if (sub_count < 0)
11073 return -1;
11074 count = count > sub_count ? count : sub_count;
11077 /* There must be no padding. */
11078 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
11079 return -1;
11081 return count;
11084 default:
11085 break;
11088 return -1;
11091 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
11092 float or vector aggregate that shall be passed in FP/vector registers
11093 according to the ELFv2 ABI, return the homogeneous element mode in
11094 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
11096 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
11098 static bool
11099 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
11100 machine_mode *elt_mode,
11101 int *n_elts)
11103 /* Note that we do not accept complex types at the top level as
11104 homogeneous aggregates; these types are handled via the
11105 targetm.calls.split_complex_arg mechanism. Complex types
11106 can be elements of homogeneous aggregates, however. */
11107 if (DEFAULT_ABI == ABI_ELFv2 && type && AGGREGATE_TYPE_P (type))
11109 machine_mode field_mode = VOIDmode;
11110 int field_count = rs6000_aggregate_candidate (type, &field_mode);
11112 if (field_count > 0)
11114 int n_regs = (SCALAR_FLOAT_MODE_P (field_mode) ?
11115 (GET_MODE_SIZE (field_mode) + 7) >> 3 : 1);
11117 /* The ELFv2 ABI allows homogeneous aggregates to occupy
11118 up to AGGR_ARG_NUM_REG registers. */
11119 if (field_count * n_regs <= AGGR_ARG_NUM_REG)
11121 if (elt_mode)
11122 *elt_mode = field_mode;
11123 if (n_elts)
11124 *n_elts = field_count;
11125 return true;
11130 if (elt_mode)
11131 *elt_mode = mode;
11132 if (n_elts)
11133 *n_elts = 1;
11134 return false;
11137 /* Return a nonzero value to say to return the function value in
11138 memory, just as large structures are always returned. TYPE will be
11139 the data type of the value, and FNTYPE will be the type of the
11140 function doing the returning, or @code{NULL} for libcalls.
11142 The AIX ABI for the RS/6000 specifies that all structures are
11143 returned in memory. The Darwin ABI does the same.
11145 For the Darwin 64 Bit ABI, a function result can be returned in
11146 registers or in memory, depending on the size of the return data
11147 type. If it is returned in registers, the value occupies the same
11148 registers as it would if it were the first and only function
11149 argument. Otherwise, the function places its result in memory at
11150 the location pointed to by GPR3.
11152 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
11153 but a draft put them in memory, and GCC used to implement the draft
11154 instead of the final standard. Therefore, aix_struct_return
11155 controls this instead of DEFAULT_ABI; V.4 targets needing backward
11156 compatibility can change DRAFT_V4_STRUCT_RET to override the
11157 default, and -m switches get the final word. See
11158 rs6000_option_override_internal for more details.
11160 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
11161 long double support is enabled. These values are returned in memory.
11163 int_size_in_bytes returns -1 for variable size objects, which go in
11164 memory always. The cast to unsigned makes -1 > 8. */
11166 static bool
11167 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
11169 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
11170 if (TARGET_MACHO
11171 && rs6000_darwin64_abi
11172 && TREE_CODE (type) == RECORD_TYPE
11173 && int_size_in_bytes (type) > 0)
11175 CUMULATIVE_ARGS valcum;
11176 rtx valret;
11178 valcum.words = 0;
11179 valcum.fregno = FP_ARG_MIN_REG;
11180 valcum.vregno = ALTIVEC_ARG_MIN_REG;
11181 /* Do a trial code generation as if this were going to be passed
11182 as an argument; if any part goes in memory, we return NULL. */
11183 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
11184 if (valret)
11185 return false;
11186 /* Otherwise fall through to more conventional ABI rules. */
11189 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
11190 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
11191 NULL, NULL))
11192 return false;
11194 /* The ELFv2 ABI returns aggregates up to 16B in registers */
11195 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
11196 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
11197 return false;
11199 if (AGGREGATE_TYPE_P (type)
11200 && (aix_struct_return
11201 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
11202 return true;
11204 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
11205 modes only exist for GCC vector types if -maltivec. */
11206 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
11207 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
11208 return false;
11210 /* Return synthetic vectors in memory. */
11211 if (TREE_CODE (type) == VECTOR_TYPE
11212 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
11214 static bool warned_for_return_big_vectors = false;
11215 if (!warned_for_return_big_vectors)
11217 warning (OPT_Wpsabi, "GCC vector returned by reference: "
11218 "non-standard ABI extension with no compatibility "
11219 "guarantee");
11220 warned_for_return_big_vectors = true;
11222 return true;
11225 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
11226 && FLOAT128_IEEE_P (TYPE_MODE (type)))
11227 return true;
11229 return false;
11232 /* Specify whether values returned in registers should be at the most
11233 significant end of a register. We want aggregates returned by
11234 value to match the way aggregates are passed to functions. */
11236 static bool
11237 rs6000_return_in_msb (const_tree valtype)
11239 return (DEFAULT_ABI == ABI_ELFv2
11240 && BYTES_BIG_ENDIAN
11241 && AGGREGATE_TYPE_P (valtype)
11242 && (rs6000_function_arg_padding (TYPE_MODE (valtype), valtype)
11243 == PAD_UPWARD));
11246 #ifdef HAVE_AS_GNU_ATTRIBUTE
11247 /* Return TRUE if a call to function FNDECL may be one that
11248 potentially affects the function calling ABI of the object file. */
11250 static bool
11251 call_ABI_of_interest (tree fndecl)
11253 if (rs6000_gnu_attr && symtab->state == EXPANSION)
11255 struct cgraph_node *c_node;
11257 /* Libcalls are always interesting. */
11258 if (fndecl == NULL_TREE)
11259 return true;
11261 /* Any call to an external function is interesting. */
11262 if (DECL_EXTERNAL (fndecl))
11263 return true;
11265 /* Interesting functions that we are emitting in this object file. */
11266 c_node = cgraph_node::get (fndecl);
11267 c_node = c_node->ultimate_alias_target ();
11268 return !c_node->only_called_directly_p ();
11270 return false;
11272 #endif
11274 /* Initialize a variable CUM of type CUMULATIVE_ARGS
11275 for a call to a function whose data type is FNTYPE.
11276 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
11278 For incoming args we set the number of arguments in the prototype large
11279 so we never return a PARALLEL. */
11281 void
11282 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
11283 rtx libname ATTRIBUTE_UNUSED, int incoming,
11284 int libcall, int n_named_args,
11285 tree fndecl ATTRIBUTE_UNUSED,
11286 machine_mode return_mode ATTRIBUTE_UNUSED)
11288 static CUMULATIVE_ARGS zero_cumulative;
11290 *cum = zero_cumulative;
11291 cum->words = 0;
11292 cum->fregno = FP_ARG_MIN_REG;
11293 cum->vregno = ALTIVEC_ARG_MIN_REG;
11294 cum->prototype = (fntype && prototype_p (fntype));
11295 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
11296 ? CALL_LIBCALL : CALL_NORMAL);
11297 cum->sysv_gregno = GP_ARG_MIN_REG;
11298 cum->stdarg = stdarg_p (fntype);
11299 cum->libcall = libcall;
11301 cum->nargs_prototype = 0;
11302 if (incoming || cum->prototype)
11303 cum->nargs_prototype = n_named_args;
11305 /* Check for a longcall attribute. */
11306 if ((!fntype && rs6000_default_long_calls)
11307 || (fntype
11308 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
11309 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
11310 cum->call_cookie |= CALL_LONG;
11312 if (TARGET_DEBUG_ARG)
11314 fprintf (stderr, "\ninit_cumulative_args:");
11315 if (fntype)
11317 tree ret_type = TREE_TYPE (fntype);
11318 fprintf (stderr, " ret code = %s,",
11319 get_tree_code_name (TREE_CODE (ret_type)));
11322 if (cum->call_cookie & CALL_LONG)
11323 fprintf (stderr, " longcall,");
11325 fprintf (stderr, " proto = %d, nargs = %d\n",
11326 cum->prototype, cum->nargs_prototype);
11329 #ifdef HAVE_AS_GNU_ATTRIBUTE
11330 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
11332 cum->escapes = call_ABI_of_interest (fndecl);
11333 if (cum->escapes)
11335 tree return_type;
11337 if (fntype)
11339 return_type = TREE_TYPE (fntype);
11340 return_mode = TYPE_MODE (return_type);
11342 else
11343 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
11345 if (return_type != NULL)
11347 if (TREE_CODE (return_type) == RECORD_TYPE
11348 && TYPE_TRANSPARENT_AGGR (return_type))
11350 return_type = TREE_TYPE (first_field (return_type));
11351 return_mode = TYPE_MODE (return_type);
11353 if (AGGREGATE_TYPE_P (return_type)
11354 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
11355 <= 8))
11356 rs6000_returns_struct = true;
11358 if (SCALAR_FLOAT_MODE_P (return_mode))
11360 rs6000_passes_float = true;
11361 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11362 && (FLOAT128_IBM_P (return_mode)
11363 || FLOAT128_IEEE_P (return_mode)
11364 || (return_type != NULL
11365 && (TYPE_MAIN_VARIANT (return_type)
11366 == long_double_type_node))))
11367 rs6000_passes_long_double = true;
11369 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
11370 || PAIRED_VECTOR_MODE (return_mode))
11371 rs6000_passes_vector = true;
11374 #endif
11376 if (fntype
11377 && !TARGET_ALTIVEC
11378 && TARGET_ALTIVEC_ABI
11379 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
11381 error ("cannot return value in vector register because"
11382 " altivec instructions are disabled, use %qs"
11383 " to enable them", "-maltivec");
11387 /* The mode the ABI uses for a word. This is not the same as word_mode
11388 for -m32 -mpowerpc64. This is used to implement various target hooks. */
11390 static scalar_int_mode
11391 rs6000_abi_word_mode (void)
11393 return TARGET_32BIT ? SImode : DImode;
11396 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
11397 static char *
11398 rs6000_offload_options (void)
11400 if (TARGET_64BIT)
11401 return xstrdup ("-foffload-abi=lp64");
11402 else
11403 return xstrdup ("-foffload-abi=ilp32");
11406 /* On rs6000, function arguments are promoted, as are function return
11407 values. */
11409 static machine_mode
11410 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
11411 machine_mode mode,
11412 int *punsignedp ATTRIBUTE_UNUSED,
11413 const_tree, int)
11415 PROMOTE_MODE (mode, *punsignedp, type);
11417 return mode;
11420 /* Return true if TYPE must be passed on the stack and not in registers. */
11422 static bool
11423 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
11425 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
11426 return must_pass_in_stack_var_size (mode, type);
11427 else
11428 return must_pass_in_stack_var_size_or_pad (mode, type);
11431 static inline bool
11432 is_complex_IBM_long_double (machine_mode mode)
11434 return mode == ICmode || (!TARGET_IEEEQUAD && mode == TCmode);
11437 /* Whether ABI_V4 passes MODE args to a function in floating point
11438 registers. */
11440 static bool
11441 abi_v4_pass_in_fpr (machine_mode mode)
11443 if (!TARGET_HARD_FLOAT)
11444 return false;
11445 if (TARGET_SINGLE_FLOAT && mode == SFmode)
11446 return true;
11447 if (TARGET_DOUBLE_FLOAT && mode == DFmode)
11448 return true;
11449 /* ABI_V4 passes complex IBM long double in 8 gprs.
11450 Stupid, but we can't change the ABI now. */
11451 if (is_complex_IBM_long_double (mode))
11452 return false;
11453 if (FLOAT128_2REG_P (mode))
11454 return true;
11455 if (DECIMAL_FLOAT_MODE_P (mode))
11456 return true;
11457 return false;
11460 /* Implement TARGET_FUNCTION_ARG_PADDING.
11462 For the AIX ABI structs are always stored left shifted in their
11463 argument slot. */
11465 static pad_direction
11466 rs6000_function_arg_padding (machine_mode mode, const_tree type)
11468 #ifndef AGGREGATE_PADDING_FIXED
11469 #define AGGREGATE_PADDING_FIXED 0
11470 #endif
11471 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
11472 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
11473 #endif
11475 if (!AGGREGATE_PADDING_FIXED)
11477 /* GCC used to pass structures of the same size as integer types as
11478 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
11479 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
11480 passed padded downward, except that -mstrict-align further
11481 muddied the water in that multi-component structures of 2 and 4
11482 bytes in size were passed padded upward.
11484 The following arranges for best compatibility with previous
11485 versions of gcc, but removes the -mstrict-align dependency. */
11486 if (BYTES_BIG_ENDIAN)
11488 HOST_WIDE_INT size = 0;
11490 if (mode == BLKmode)
11492 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
11493 size = int_size_in_bytes (type);
11495 else
11496 size = GET_MODE_SIZE (mode);
11498 if (size == 1 || size == 2 || size == 4)
11499 return PAD_DOWNWARD;
11501 return PAD_UPWARD;
11504 if (AGGREGATES_PAD_UPWARD_ALWAYS)
11506 if (type != 0 && AGGREGATE_TYPE_P (type))
11507 return PAD_UPWARD;
11510 /* Fall back to the default. */
11511 return default_function_arg_padding (mode, type);
11514 /* If defined, a C expression that gives the alignment boundary, in bits,
11515 of an argument with the specified mode and type. If it is not defined,
11516 PARM_BOUNDARY is used for all arguments.
11518 V.4 wants long longs and doubles to be double word aligned. Just
11519 testing the mode size is a boneheaded way to do this as it means
11520 that other types such as complex int are also double word aligned.
11521 However, we're stuck with this because changing the ABI might break
11522 existing library interfaces.
11524 Quadword align Altivec/VSX vectors.
11525 Quadword align large synthetic vector types. */
11527 static unsigned int
11528 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
11530 machine_mode elt_mode;
11531 int n_elts;
11533 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11535 if (DEFAULT_ABI == ABI_V4
11536 && (GET_MODE_SIZE (mode) == 8
11537 || (TARGET_HARD_FLOAT
11538 && !is_complex_IBM_long_double (mode)
11539 && FLOAT128_2REG_P (mode))))
11540 return 64;
11541 else if (FLOAT128_VECTOR_P (mode))
11542 return 128;
11543 else if (PAIRED_VECTOR_MODE (mode)
11544 || (type && TREE_CODE (type) == VECTOR_TYPE
11545 && int_size_in_bytes (type) >= 8
11546 && int_size_in_bytes (type) < 16))
11547 return 64;
11548 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11549 || (type && TREE_CODE (type) == VECTOR_TYPE
11550 && int_size_in_bytes (type) >= 16))
11551 return 128;
11553 /* Aggregate types that need > 8 byte alignment are quadword-aligned
11554 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
11555 -mcompat-align-parm is used. */
11556 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
11557 || DEFAULT_ABI == ABI_ELFv2)
11558 && type && TYPE_ALIGN (type) > 64)
11560 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
11561 or homogeneous float/vector aggregates here. We already handled
11562 vector aggregates above, but still need to check for float here. */
11563 bool aggregate_p = (AGGREGATE_TYPE_P (type)
11564 && !SCALAR_FLOAT_MODE_P (elt_mode));
11566 /* We used to check for BLKmode instead of the above aggregate type
11567 check. Warn when this results in any difference to the ABI. */
11568 if (aggregate_p != (mode == BLKmode))
11570 static bool warned;
11571 if (!warned && warn_psabi)
11573 warned = true;
11574 inform (input_location,
11575 "the ABI of passing aggregates with %d-byte alignment"
11576 " has changed in GCC 5",
11577 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
11581 if (aggregate_p)
11582 return 128;
11585 /* Similar for the Darwin64 ABI. Note that for historical reasons we
11586 implement the "aggregate type" check as a BLKmode check here; this
11587 means certain aggregate types are in fact not aligned. */
11588 if (TARGET_MACHO && rs6000_darwin64_abi
11589 && mode == BLKmode
11590 && type && TYPE_ALIGN (type) > 64)
11591 return 128;
11593 return PARM_BOUNDARY;
11596 /* The offset in words to the start of the parameter save area. */
11598 static unsigned int
11599 rs6000_parm_offset (void)
11601 return (DEFAULT_ABI == ABI_V4 ? 2
11602 : DEFAULT_ABI == ABI_ELFv2 ? 4
11603 : 6);
11606 /* For a function parm of MODE and TYPE, return the starting word in
11607 the parameter area. NWORDS of the parameter area are already used. */
11609 static unsigned int
11610 rs6000_parm_start (machine_mode mode, const_tree type,
11611 unsigned int nwords)
11613 unsigned int align;
11615 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
11616 return nwords + (-(rs6000_parm_offset () + nwords) & align);
11619 /* Compute the size (in words) of a function argument. */
11621 static unsigned long
11622 rs6000_arg_size (machine_mode mode, const_tree type)
11624 unsigned long size;
11626 if (mode != BLKmode)
11627 size = GET_MODE_SIZE (mode);
11628 else
11629 size = int_size_in_bytes (type);
11631 if (TARGET_32BIT)
11632 return (size + 3) >> 2;
11633 else
11634 return (size + 7) >> 3;
11637 /* Use this to flush pending int fields. */
11639 static void
11640 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
11641 HOST_WIDE_INT bitpos, int final)
11643 unsigned int startbit, endbit;
11644 int intregs, intoffset;
11646 /* Handle the situations where a float is taking up the first half
11647 of the GPR, and the other half is empty (typically due to
11648 alignment restrictions). We can detect this by a 8-byte-aligned
11649 int field, or by seeing that this is the final flush for this
11650 argument. Count the word and continue on. */
11651 if (cum->floats_in_gpr == 1
11652 && (cum->intoffset % 64 == 0
11653 || (cum->intoffset == -1 && final)))
11655 cum->words++;
11656 cum->floats_in_gpr = 0;
11659 if (cum->intoffset == -1)
11660 return;
11662 intoffset = cum->intoffset;
11663 cum->intoffset = -1;
11664 cum->floats_in_gpr = 0;
11666 if (intoffset % BITS_PER_WORD != 0)
11668 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11669 if (!int_mode_for_size (bits, 0).exists ())
11671 /* We couldn't find an appropriate mode, which happens,
11672 e.g., in packed structs when there are 3 bytes to load.
11673 Back intoffset back to the beginning of the word in this
11674 case. */
11675 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11679 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11680 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11681 intregs = (endbit - startbit) / BITS_PER_WORD;
11682 cum->words += intregs;
11683 /* words should be unsigned. */
11684 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
11686 int pad = (endbit/BITS_PER_WORD) - cum->words;
11687 cum->words += pad;
11691 /* The darwin64 ABI calls for us to recurse down through structs,
11692 looking for elements passed in registers. Unfortunately, we have
11693 to track int register count here also because of misalignments
11694 in powerpc alignment mode. */
11696 static void
11697 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
11698 const_tree type,
11699 HOST_WIDE_INT startbitpos)
11701 tree f;
11703 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11704 if (TREE_CODE (f) == FIELD_DECL)
11706 HOST_WIDE_INT bitpos = startbitpos;
11707 tree ftype = TREE_TYPE (f);
11708 machine_mode mode;
11709 if (ftype == error_mark_node)
11710 continue;
11711 mode = TYPE_MODE (ftype);
11713 if (DECL_SIZE (f) != 0
11714 && tree_fits_uhwi_p (bit_position (f)))
11715 bitpos += int_bit_position (f);
11717 /* ??? FIXME: else assume zero offset. */
11719 if (TREE_CODE (ftype) == RECORD_TYPE)
11720 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
11721 else if (USE_FP_FOR_ARG_P (cum, mode))
11723 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
11724 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11725 cum->fregno += n_fpregs;
11726 /* Single-precision floats present a special problem for
11727 us, because they are smaller than an 8-byte GPR, and so
11728 the structure-packing rules combined with the standard
11729 varargs behavior mean that we want to pack float/float
11730 and float/int combinations into a single register's
11731 space. This is complicated by the arg advance flushing,
11732 which works on arbitrarily large groups of int-type
11733 fields. */
11734 if (mode == SFmode)
11736 if (cum->floats_in_gpr == 1)
11738 /* Two floats in a word; count the word and reset
11739 the float count. */
11740 cum->words++;
11741 cum->floats_in_gpr = 0;
11743 else if (bitpos % 64 == 0)
11745 /* A float at the beginning of an 8-byte word;
11746 count it and put off adjusting cum->words until
11747 we see if a arg advance flush is going to do it
11748 for us. */
11749 cum->floats_in_gpr++;
11751 else
11753 /* The float is at the end of a word, preceded
11754 by integer fields, so the arg advance flush
11755 just above has already set cum->words and
11756 everything is taken care of. */
11759 else
11760 cum->words += n_fpregs;
11762 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11764 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11765 cum->vregno++;
11766 cum->words += 2;
11768 else if (cum->intoffset == -1)
11769 cum->intoffset = bitpos;
11773 /* Check for an item that needs to be considered specially under the darwin 64
11774 bit ABI. These are record types where the mode is BLK or the structure is
11775 8 bytes in size. */
11776 static int
11777 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
11779 return rs6000_darwin64_abi
11780 && ((mode == BLKmode
11781 && TREE_CODE (type) == RECORD_TYPE
11782 && int_size_in_bytes (type) > 0)
11783 || (type && TREE_CODE (type) == RECORD_TYPE
11784 && int_size_in_bytes (type) == 8)) ? 1 : 0;
11787 /* Update the data in CUM to advance over an argument
11788 of mode MODE and data type TYPE.
11789 (TYPE is null for libcalls where that information may not be available.)
11791 Note that for args passed by reference, function_arg will be called
11792 with MODE and TYPE set to that of the pointer to the arg, not the arg
11793 itself. */
11795 static void
11796 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
11797 const_tree type, bool named, int depth)
11799 machine_mode elt_mode;
11800 int n_elts;
11802 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11804 /* Only tick off an argument if we're not recursing. */
11805 if (depth == 0)
11806 cum->nargs_prototype--;
11808 #ifdef HAVE_AS_GNU_ATTRIBUTE
11809 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
11810 && cum->escapes)
11812 if (SCALAR_FLOAT_MODE_P (mode))
11814 rs6000_passes_float = true;
11815 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11816 && (FLOAT128_IBM_P (mode)
11817 || FLOAT128_IEEE_P (mode)
11818 || (type != NULL
11819 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
11820 rs6000_passes_long_double = true;
11822 if ((named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
11823 || (PAIRED_VECTOR_MODE (mode)
11824 && !cum->stdarg
11825 && cum->sysv_gregno <= GP_ARG_MAX_REG))
11826 rs6000_passes_vector = true;
11828 #endif
11830 if (TARGET_ALTIVEC_ABI
11831 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11832 || (type && TREE_CODE (type) == VECTOR_TYPE
11833 && int_size_in_bytes (type) == 16)))
11835 bool stack = false;
11837 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11839 cum->vregno += n_elts;
11841 if (!TARGET_ALTIVEC)
11842 error ("cannot pass argument in vector register because"
11843 " altivec instructions are disabled, use %qs"
11844 " to enable them", "-maltivec");
11846 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11847 even if it is going to be passed in a vector register.
11848 Darwin does the same for variable-argument functions. */
11849 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11850 && TARGET_64BIT)
11851 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
11852 stack = true;
11854 else
11855 stack = true;
11857 if (stack)
11859 int align;
11861 /* Vector parameters must be 16-byte aligned. In 32-bit
11862 mode this means we need to take into account the offset
11863 to the parameter save area. In 64-bit mode, they just
11864 have to start on an even word, since the parameter save
11865 area is 16-byte aligned. */
11866 if (TARGET_32BIT)
11867 align = -(rs6000_parm_offset () + cum->words) & 3;
11868 else
11869 align = cum->words & 1;
11870 cum->words += align + rs6000_arg_size (mode, type);
11872 if (TARGET_DEBUG_ARG)
11874 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
11875 cum->words, align);
11876 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
11877 cum->nargs_prototype, cum->prototype,
11878 GET_MODE_NAME (mode));
11882 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11884 int size = int_size_in_bytes (type);
11885 /* Variable sized types have size == -1 and are
11886 treated as if consisting entirely of ints.
11887 Pad to 16 byte boundary if needed. */
11888 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11889 && (cum->words % 2) != 0)
11890 cum->words++;
11891 /* For varargs, we can just go up by the size of the struct. */
11892 if (!named)
11893 cum->words += (size + 7) / 8;
11894 else
11896 /* It is tempting to say int register count just goes up by
11897 sizeof(type)/8, but this is wrong in a case such as
11898 { int; double; int; } [powerpc alignment]. We have to
11899 grovel through the fields for these too. */
11900 cum->intoffset = 0;
11901 cum->floats_in_gpr = 0;
11902 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11903 rs6000_darwin64_record_arg_advance_flush (cum,
11904 size * BITS_PER_UNIT, 1);
11906 if (TARGET_DEBUG_ARG)
11908 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11909 cum->words, TYPE_ALIGN (type), size);
11910 fprintf (stderr,
11911 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11912 cum->nargs_prototype, cum->prototype,
11913 GET_MODE_NAME (mode));
11916 else if (DEFAULT_ABI == ABI_V4)
11918 if (abi_v4_pass_in_fpr (mode))
11920 /* _Decimal128 must use an even/odd register pair. This assumes
11921 that the register number is odd when fregno is odd. */
11922 if (mode == TDmode && (cum->fregno % 2) == 1)
11923 cum->fregno++;
11925 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11926 <= FP_ARG_V4_MAX_REG)
11927 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11928 else
11930 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11931 if (mode == DFmode || FLOAT128_IBM_P (mode)
11932 || mode == DDmode || mode == TDmode)
11933 cum->words += cum->words & 1;
11934 cum->words += rs6000_arg_size (mode, type);
11937 else
11939 int n_words = rs6000_arg_size (mode, type);
11940 int gregno = cum->sysv_gregno;
11942 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11943 As does any other 2 word item such as complex int due to a
11944 historical mistake. */
11945 if (n_words == 2)
11946 gregno += (1 - gregno) & 1;
11948 /* Multi-reg args are not split between registers and stack. */
11949 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11951 /* Long long is aligned on the stack. So are other 2 word
11952 items such as complex int due to a historical mistake. */
11953 if (n_words == 2)
11954 cum->words += cum->words & 1;
11955 cum->words += n_words;
11958 /* Note: continuing to accumulate gregno past when we've started
11959 spilling to the stack indicates the fact that we've started
11960 spilling to the stack to expand_builtin_saveregs. */
11961 cum->sysv_gregno = gregno + n_words;
11964 if (TARGET_DEBUG_ARG)
11966 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11967 cum->words, cum->fregno);
11968 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11969 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11970 fprintf (stderr, "mode = %4s, named = %d\n",
11971 GET_MODE_NAME (mode), named);
11974 else
11976 int n_words = rs6000_arg_size (mode, type);
11977 int start_words = cum->words;
11978 int align_words = rs6000_parm_start (mode, type, start_words);
11980 cum->words = align_words + n_words;
11982 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
11984 /* _Decimal128 must be passed in an even/odd float register pair.
11985 This assumes that the register number is odd when fregno is
11986 odd. */
11987 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11988 cum->fregno++;
11989 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
11992 if (TARGET_DEBUG_ARG)
11994 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11995 cum->words, cum->fregno);
11996 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
11997 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
11998 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
11999 named, align_words - start_words, depth);
12004 static void
12005 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
12006 const_tree type, bool named)
12008 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
12012 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
12013 structure between cum->intoffset and bitpos to integer registers. */
12015 static void
12016 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
12017 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
12019 machine_mode mode;
12020 unsigned int regno;
12021 unsigned int startbit, endbit;
12022 int this_regno, intregs, intoffset;
12023 rtx reg;
12025 if (cum->intoffset == -1)
12026 return;
12028 intoffset = cum->intoffset;
12029 cum->intoffset = -1;
12031 /* If this is the trailing part of a word, try to only load that
12032 much into the register. Otherwise load the whole register. Note
12033 that in the latter case we may pick up unwanted bits. It's not a
12034 problem at the moment but may wish to revisit. */
12036 if (intoffset % BITS_PER_WORD != 0)
12038 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
12039 if (!int_mode_for_size (bits, 0).exists (&mode))
12041 /* We couldn't find an appropriate mode, which happens,
12042 e.g., in packed structs when there are 3 bytes to load.
12043 Back intoffset back to the beginning of the word in this
12044 case. */
12045 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
12046 mode = word_mode;
12049 else
12050 mode = word_mode;
12052 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
12053 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
12054 intregs = (endbit - startbit) / BITS_PER_WORD;
12055 this_regno = cum->words + intoffset / BITS_PER_WORD;
12057 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
12058 cum->use_stack = 1;
12060 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
12061 if (intregs <= 0)
12062 return;
12064 intoffset /= BITS_PER_UNIT;
12067 regno = GP_ARG_MIN_REG + this_regno;
12068 reg = gen_rtx_REG (mode, regno);
12069 rvec[(*k)++] =
12070 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
12072 this_regno += 1;
12073 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
12074 mode = word_mode;
12075 intregs -= 1;
12077 while (intregs > 0);
12080 /* Recursive workhorse for the following. */
12082 static void
12083 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
12084 HOST_WIDE_INT startbitpos, rtx rvec[],
12085 int *k)
12087 tree f;
12089 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
12090 if (TREE_CODE (f) == FIELD_DECL)
12092 HOST_WIDE_INT bitpos = startbitpos;
12093 tree ftype = TREE_TYPE (f);
12094 machine_mode mode;
12095 if (ftype == error_mark_node)
12096 continue;
12097 mode = TYPE_MODE (ftype);
12099 if (DECL_SIZE (f) != 0
12100 && tree_fits_uhwi_p (bit_position (f)))
12101 bitpos += int_bit_position (f);
12103 /* ??? FIXME: else assume zero offset. */
12105 if (TREE_CODE (ftype) == RECORD_TYPE)
12106 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
12107 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
12109 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
12110 #if 0
12111 switch (mode)
12113 case E_SCmode: mode = SFmode; break;
12114 case E_DCmode: mode = DFmode; break;
12115 case E_TCmode: mode = TFmode; break;
12116 default: break;
12118 #endif
12119 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
12120 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
12122 gcc_assert (cum->fregno == FP_ARG_MAX_REG
12123 && (mode == TFmode || mode == TDmode));
12124 /* Long double or _Decimal128 split over regs and memory. */
12125 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
12126 cum->use_stack=1;
12128 rvec[(*k)++]
12129 = gen_rtx_EXPR_LIST (VOIDmode,
12130 gen_rtx_REG (mode, cum->fregno++),
12131 GEN_INT (bitpos / BITS_PER_UNIT));
12132 if (FLOAT128_2REG_P (mode))
12133 cum->fregno++;
12135 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
12137 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
12138 rvec[(*k)++]
12139 = gen_rtx_EXPR_LIST (VOIDmode,
12140 gen_rtx_REG (mode, cum->vregno++),
12141 GEN_INT (bitpos / BITS_PER_UNIT));
12143 else if (cum->intoffset == -1)
12144 cum->intoffset = bitpos;
12148 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
12149 the register(s) to be used for each field and subfield of a struct
12150 being passed by value, along with the offset of where the
12151 register's value may be found in the block. FP fields go in FP
12152 register, vector fields go in vector registers, and everything
12153 else goes in int registers, packed as in memory.
12155 This code is also used for function return values. RETVAL indicates
12156 whether this is the case.
12158 Much of this is taken from the SPARC V9 port, which has a similar
12159 calling convention. */
12161 static rtx
12162 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
12163 bool named, bool retval)
12165 rtx rvec[FIRST_PSEUDO_REGISTER];
12166 int k = 1, kbase = 1;
12167 HOST_WIDE_INT typesize = int_size_in_bytes (type);
12168 /* This is a copy; modifications are not visible to our caller. */
12169 CUMULATIVE_ARGS copy_cum = *orig_cum;
12170 CUMULATIVE_ARGS *cum = &copy_cum;
12172 /* Pad to 16 byte boundary if needed. */
12173 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
12174 && (cum->words % 2) != 0)
12175 cum->words++;
12177 cum->intoffset = 0;
12178 cum->use_stack = 0;
12179 cum->named = named;
12181 /* Put entries into rvec[] for individual FP and vector fields, and
12182 for the chunks of memory that go in int regs. Note we start at
12183 element 1; 0 is reserved for an indication of using memory, and
12184 may or may not be filled in below. */
12185 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
12186 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
12188 /* If any part of the struct went on the stack put all of it there.
12189 This hack is because the generic code for
12190 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
12191 parts of the struct are not at the beginning. */
12192 if (cum->use_stack)
12194 if (retval)
12195 return NULL_RTX; /* doesn't go in registers at all */
12196 kbase = 0;
12197 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12199 if (k > 1 || cum->use_stack)
12200 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
12201 else
12202 return NULL_RTX;
12205 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
12207 static rtx
12208 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
12209 int align_words)
12211 int n_units;
12212 int i, k;
12213 rtx rvec[GP_ARG_NUM_REG + 1];
12215 if (align_words >= GP_ARG_NUM_REG)
12216 return NULL_RTX;
12218 n_units = rs6000_arg_size (mode, type);
12220 /* Optimize the simple case where the arg fits in one gpr, except in
12221 the case of BLKmode due to assign_parms assuming that registers are
12222 BITS_PER_WORD wide. */
12223 if (n_units == 0
12224 || (n_units == 1 && mode != BLKmode))
12225 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12227 k = 0;
12228 if (align_words + n_units > GP_ARG_NUM_REG)
12229 /* Not all of the arg fits in gprs. Say that it goes in memory too,
12230 using a magic NULL_RTX component.
12231 This is not strictly correct. Only some of the arg belongs in
12232 memory, not all of it. However, the normal scheme using
12233 function_arg_partial_nregs can result in unusual subregs, eg.
12234 (subreg:SI (reg:DF) 4), which are not handled well. The code to
12235 store the whole arg to memory is often more efficient than code
12236 to store pieces, and we know that space is available in the right
12237 place for the whole arg. */
12238 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12240 i = 0;
12243 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
12244 rtx off = GEN_INT (i++ * 4);
12245 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12247 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
12249 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
12252 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
12253 but must also be copied into the parameter save area starting at
12254 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
12255 to the GPRs and/or memory. Return the number of elements used. */
12257 static int
12258 rs6000_psave_function_arg (machine_mode mode, const_tree type,
12259 int align_words, rtx *rvec)
12261 int k = 0;
12263 if (align_words < GP_ARG_NUM_REG)
12265 int n_words = rs6000_arg_size (mode, type);
12267 if (align_words + n_words > GP_ARG_NUM_REG
12268 || mode == BLKmode
12269 || (TARGET_32BIT && TARGET_POWERPC64))
12271 /* If this is partially on the stack, then we only
12272 include the portion actually in registers here. */
12273 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12274 int i = 0;
12276 if (align_words + n_words > GP_ARG_NUM_REG)
12278 /* Not all of the arg fits in gprs. Say that it goes in memory
12279 too, using a magic NULL_RTX component. Also see comment in
12280 rs6000_mixed_function_arg for why the normal
12281 function_arg_partial_nregs scheme doesn't work in this case. */
12282 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12287 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12288 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
12289 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12291 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12293 else
12295 /* The whole arg fits in gprs. */
12296 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12297 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
12300 else
12302 /* It's entirely in memory. */
12303 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12306 return k;
12309 /* RVEC is a vector of K components of an argument of mode MODE.
12310 Construct the final function_arg return value from it. */
12312 static rtx
12313 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
12315 gcc_assert (k >= 1);
12317 /* Avoid returning a PARALLEL in the trivial cases. */
12318 if (k == 1)
12320 if (XEXP (rvec[0], 0) == NULL_RTX)
12321 return NULL_RTX;
12323 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
12324 return XEXP (rvec[0], 0);
12327 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
12330 /* Determine where to put an argument to a function.
12331 Value is zero to push the argument on the stack,
12332 or a hard register in which to store the argument.
12334 MODE is the argument's machine mode.
12335 TYPE is the data type of the argument (as a tree).
12336 This is null for libcalls where that information may
12337 not be available.
12338 CUM is a variable of type CUMULATIVE_ARGS which gives info about
12339 the preceding args and about the function being called. It is
12340 not modified in this routine.
12341 NAMED is nonzero if this argument is a named parameter
12342 (otherwise it is an extra parameter matching an ellipsis).
12344 On RS/6000 the first eight words of non-FP are normally in registers
12345 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
12346 Under V.4, the first 8 FP args are in registers.
12348 If this is floating-point and no prototype is specified, we use
12349 both an FP and integer register (or possibly FP reg and stack). Library
12350 functions (when CALL_LIBCALL is set) always have the proper types for args,
12351 so we can pass the FP value just in one register. emit_library_function
12352 doesn't support PARALLEL anyway.
12354 Note that for args passed by reference, function_arg will be called
12355 with MODE and TYPE set to that of the pointer to the arg, not the arg
12356 itself. */
12358 static rtx
12359 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
12360 const_tree type, bool named)
12362 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12363 enum rs6000_abi abi = DEFAULT_ABI;
12364 machine_mode elt_mode;
12365 int n_elts;
12367 /* Return a marker to indicate whether CR1 needs to set or clear the
12368 bit that V.4 uses to say fp args were passed in registers.
12369 Assume that we don't need the marker for software floating point,
12370 or compiler generated library calls. */
12371 if (mode == VOIDmode)
12373 if (abi == ABI_V4
12374 && (cum->call_cookie & CALL_LIBCALL) == 0
12375 && (cum->stdarg
12376 || (cum->nargs_prototype < 0
12377 && (cum->prototype || TARGET_NO_PROTOTYPE)))
12378 && TARGET_HARD_FLOAT)
12379 return GEN_INT (cum->call_cookie
12380 | ((cum->fregno == FP_ARG_MIN_REG)
12381 ? CALL_V4_SET_FP_ARGS
12382 : CALL_V4_CLEAR_FP_ARGS));
12384 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
12387 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12389 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12391 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
12392 if (rslt != NULL_RTX)
12393 return rslt;
12394 /* Else fall through to usual handling. */
12397 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12399 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12400 rtx r, off;
12401 int i, k = 0;
12403 /* Do we also need to pass this argument in the parameter save area?
12404 Library support functions for IEEE 128-bit are assumed to not need the
12405 value passed both in GPRs and in vector registers. */
12406 if (TARGET_64BIT && !cum->prototype
12407 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12409 int align_words = ROUND_UP (cum->words, 2);
12410 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12413 /* Describe where this argument goes in the vector registers. */
12414 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
12416 r = gen_rtx_REG (elt_mode, cum->vregno + i);
12417 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12418 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12421 return rs6000_finish_function_arg (mode, rvec, k);
12423 else if (TARGET_ALTIVEC_ABI
12424 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
12425 || (type && TREE_CODE (type) == VECTOR_TYPE
12426 && int_size_in_bytes (type) == 16)))
12428 if (named || abi == ABI_V4)
12429 return NULL_RTX;
12430 else
12432 /* Vector parameters to varargs functions under AIX or Darwin
12433 get passed in memory and possibly also in GPRs. */
12434 int align, align_words, n_words;
12435 machine_mode part_mode;
12437 /* Vector parameters must be 16-byte aligned. In 32-bit
12438 mode this means we need to take into account the offset
12439 to the parameter save area. In 64-bit mode, they just
12440 have to start on an even word, since the parameter save
12441 area is 16-byte aligned. */
12442 if (TARGET_32BIT)
12443 align = -(rs6000_parm_offset () + cum->words) & 3;
12444 else
12445 align = cum->words & 1;
12446 align_words = cum->words + align;
12448 /* Out of registers? Memory, then. */
12449 if (align_words >= GP_ARG_NUM_REG)
12450 return NULL_RTX;
12452 if (TARGET_32BIT && TARGET_POWERPC64)
12453 return rs6000_mixed_function_arg (mode, type, align_words);
12455 /* The vector value goes in GPRs. Only the part of the
12456 value in GPRs is reported here. */
12457 part_mode = mode;
12458 n_words = rs6000_arg_size (mode, type);
12459 if (align_words + n_words > GP_ARG_NUM_REG)
12460 /* Fortunately, there are only two possibilities, the value
12461 is either wholly in GPRs or half in GPRs and half not. */
12462 part_mode = DImode;
12464 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
12468 else if (abi == ABI_V4)
12470 if (abi_v4_pass_in_fpr (mode))
12472 /* _Decimal128 must use an even/odd register pair. This assumes
12473 that the register number is odd when fregno is odd. */
12474 if (mode == TDmode && (cum->fregno % 2) == 1)
12475 cum->fregno++;
12477 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
12478 <= FP_ARG_V4_MAX_REG)
12479 return gen_rtx_REG (mode, cum->fregno);
12480 else
12481 return NULL_RTX;
12483 else
12485 int n_words = rs6000_arg_size (mode, type);
12486 int gregno = cum->sysv_gregno;
12488 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
12489 As does any other 2 word item such as complex int due to a
12490 historical mistake. */
12491 if (n_words == 2)
12492 gregno += (1 - gregno) & 1;
12494 /* Multi-reg args are not split between registers and stack. */
12495 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
12496 return NULL_RTX;
12498 if (TARGET_32BIT && TARGET_POWERPC64)
12499 return rs6000_mixed_function_arg (mode, type,
12500 gregno - GP_ARG_MIN_REG);
12501 return gen_rtx_REG (mode, gregno);
12504 else
12506 int align_words = rs6000_parm_start (mode, type, cum->words);
12508 /* _Decimal128 must be passed in an even/odd float register pair.
12509 This assumes that the register number is odd when fregno is odd. */
12510 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
12511 cum->fregno++;
12513 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12515 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12516 rtx r, off;
12517 int i, k = 0;
12518 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12519 int fpr_words;
12521 /* Do we also need to pass this argument in the parameter
12522 save area? */
12523 if (type && (cum->nargs_prototype <= 0
12524 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12525 && TARGET_XL_COMPAT
12526 && align_words >= GP_ARG_NUM_REG)))
12527 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12529 /* Describe where this argument goes in the fprs. */
12530 for (i = 0; i < n_elts
12531 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
12533 /* Check if the argument is split over registers and memory.
12534 This can only ever happen for long double or _Decimal128;
12535 complex types are handled via split_complex_arg. */
12536 machine_mode fmode = elt_mode;
12537 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
12539 gcc_assert (FLOAT128_2REG_P (fmode));
12540 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
12543 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
12544 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12545 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12548 /* If there were not enough FPRs to hold the argument, the rest
12549 usually goes into memory. However, if the current position
12550 is still within the register parameter area, a portion may
12551 actually have to go into GPRs.
12553 Note that it may happen that the portion of the argument
12554 passed in the first "half" of the first GPR was already
12555 passed in the last FPR as well.
12557 For unnamed arguments, we already set up GPRs to cover the
12558 whole argument in rs6000_psave_function_arg, so there is
12559 nothing further to do at this point. */
12560 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
12561 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
12562 && cum->nargs_prototype > 0)
12564 static bool warned;
12566 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12567 int n_words = rs6000_arg_size (mode, type);
12569 align_words += fpr_words;
12570 n_words -= fpr_words;
12574 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12575 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
12576 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12578 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12580 if (!warned && warn_psabi)
12582 warned = true;
12583 inform (input_location,
12584 "the ABI of passing homogeneous float aggregates"
12585 " has changed in GCC 5");
12589 return rs6000_finish_function_arg (mode, rvec, k);
12591 else if (align_words < GP_ARG_NUM_REG)
12593 if (TARGET_32BIT && TARGET_POWERPC64)
12594 return rs6000_mixed_function_arg (mode, type, align_words);
12596 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12598 else
12599 return NULL_RTX;
12603 /* For an arg passed partly in registers and partly in memory, this is
12604 the number of bytes passed in registers. For args passed entirely in
12605 registers or entirely in memory, zero. When an arg is described by a
12606 PARALLEL, perhaps using more than one register type, this function
12607 returns the number of bytes used by the first element of the PARALLEL. */
12609 static int
12610 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
12611 tree type, bool named)
12613 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12614 bool passed_in_gprs = true;
12615 int ret = 0;
12616 int align_words;
12617 machine_mode elt_mode;
12618 int n_elts;
12620 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12622 if (DEFAULT_ABI == ABI_V4)
12623 return 0;
12625 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12627 /* If we are passing this arg in the fixed parameter save area (gprs or
12628 memory) as well as VRs, we do not use the partial bytes mechanism;
12629 instead, rs6000_function_arg will return a PARALLEL including a memory
12630 element as necessary. Library support functions for IEEE 128-bit are
12631 assumed to not need the value passed both in GPRs and in vector
12632 registers. */
12633 if (TARGET_64BIT && !cum->prototype
12634 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12635 return 0;
12637 /* Otherwise, we pass in VRs only. Check for partial copies. */
12638 passed_in_gprs = false;
12639 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
12640 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
12643 /* In this complicated case we just disable the partial_nregs code. */
12644 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12645 return 0;
12647 align_words = rs6000_parm_start (mode, type, cum->words);
12649 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12651 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12653 /* If we are passing this arg in the fixed parameter save area
12654 (gprs or memory) as well as FPRs, we do not use the partial
12655 bytes mechanism; instead, rs6000_function_arg will return a
12656 PARALLEL including a memory element as necessary. */
12657 if (type
12658 && (cum->nargs_prototype <= 0
12659 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12660 && TARGET_XL_COMPAT
12661 && align_words >= GP_ARG_NUM_REG)))
12662 return 0;
12664 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12665 passed_in_gprs = false;
12666 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
12668 /* Compute number of bytes / words passed in FPRs. If there
12669 is still space available in the register parameter area
12670 *after* that amount, a part of the argument will be passed
12671 in GPRs. In that case, the total amount passed in any
12672 registers is equal to the amount that would have been passed
12673 in GPRs if everything were passed there, so we fall back to
12674 the GPR code below to compute the appropriate value. */
12675 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
12676 * MIN (8, GET_MODE_SIZE (elt_mode)));
12677 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
12679 if (align_words + fpr_words < GP_ARG_NUM_REG)
12680 passed_in_gprs = true;
12681 else
12682 ret = fpr;
12686 if (passed_in_gprs
12687 && align_words < GP_ARG_NUM_REG
12688 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
12689 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
12691 if (ret != 0 && TARGET_DEBUG_ARG)
12692 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
12694 return ret;
12697 /* A C expression that indicates when an argument must be passed by
12698 reference. If nonzero for an argument, a copy of that argument is
12699 made in memory and a pointer to the argument is passed instead of
12700 the argument itself. The pointer is passed in whatever way is
12701 appropriate for passing a pointer to that type.
12703 Under V.4, aggregates and long double are passed by reference.
12705 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12706 reference unless the AltiVec vector extension ABI is in force.
12708 As an extension to all ABIs, variable sized types are passed by
12709 reference. */
12711 static bool
12712 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
12713 machine_mode mode, const_tree type,
12714 bool named ATTRIBUTE_UNUSED)
12716 if (!type)
12717 return 0;
12719 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
12720 && FLOAT128_IEEE_P (TYPE_MODE (type)))
12722 if (TARGET_DEBUG_ARG)
12723 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12724 return 1;
12727 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
12729 if (TARGET_DEBUG_ARG)
12730 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
12731 return 1;
12734 if (int_size_in_bytes (type) < 0)
12736 if (TARGET_DEBUG_ARG)
12737 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
12738 return 1;
12741 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12742 modes only exist for GCC vector types if -maltivec. */
12743 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12745 if (TARGET_DEBUG_ARG)
12746 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
12747 return 1;
12750 /* Pass synthetic vectors in memory. */
12751 if (TREE_CODE (type) == VECTOR_TYPE
12752 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
12754 static bool warned_for_pass_big_vectors = false;
12755 if (TARGET_DEBUG_ARG)
12756 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
12757 if (!warned_for_pass_big_vectors)
12759 warning (OPT_Wpsabi, "GCC vector passed by reference: "
12760 "non-standard ABI extension with no compatibility "
12761 "guarantee");
12762 warned_for_pass_big_vectors = true;
12764 return 1;
12767 return 0;
12770 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12771 already processes. Return true if the parameter must be passed
12772 (fully or partially) on the stack. */
12774 static bool
12775 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
12777 machine_mode mode;
12778 int unsignedp;
12779 rtx entry_parm;
12781 /* Catch errors. */
12782 if (type == NULL || type == error_mark_node)
12783 return true;
12785 /* Handle types with no storage requirement. */
12786 if (TYPE_MODE (type) == VOIDmode)
12787 return false;
12789 /* Handle complex types. */
12790 if (TREE_CODE (type) == COMPLEX_TYPE)
12791 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
12792 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
12794 /* Handle transparent aggregates. */
12795 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
12796 && TYPE_TRANSPARENT_AGGR (type))
12797 type = TREE_TYPE (first_field (type));
12799 /* See if this arg was passed by invisible reference. */
12800 if (pass_by_reference (get_cumulative_args (args_so_far),
12801 TYPE_MODE (type), type, true))
12802 type = build_pointer_type (type);
12804 /* Find mode as it is passed by the ABI. */
12805 unsignedp = TYPE_UNSIGNED (type);
12806 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
12808 /* If we must pass in stack, we need a stack. */
12809 if (rs6000_must_pass_in_stack (mode, type))
12810 return true;
12812 /* If there is no incoming register, we need a stack. */
12813 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
12814 if (entry_parm == NULL)
12815 return true;
12817 /* Likewise if we need to pass both in registers and on the stack. */
12818 if (GET_CODE (entry_parm) == PARALLEL
12819 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
12820 return true;
12822 /* Also true if we're partially in registers and partially not. */
12823 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
12824 return true;
12826 /* Update info on where next arg arrives in registers. */
12827 rs6000_function_arg_advance (args_so_far, mode, type, true);
12828 return false;
12831 /* Return true if FUN has no prototype, has a variable argument
12832 list, or passes any parameter in memory. */
12834 static bool
12835 rs6000_function_parms_need_stack (tree fun, bool incoming)
12837 tree fntype, result;
12838 CUMULATIVE_ARGS args_so_far_v;
12839 cumulative_args_t args_so_far;
12841 if (!fun)
12842 /* Must be a libcall, all of which only use reg parms. */
12843 return false;
12845 fntype = fun;
12846 if (!TYPE_P (fun))
12847 fntype = TREE_TYPE (fun);
12849 /* Varargs functions need the parameter save area. */
12850 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12851 return true;
12853 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12854 args_so_far = pack_cumulative_args (&args_so_far_v);
12856 /* When incoming, we will have been passed the function decl.
12857 It is necessary to use the decl to handle K&R style functions,
12858 where TYPE_ARG_TYPES may not be available. */
12859 if (incoming)
12861 gcc_assert (DECL_P (fun));
12862 result = DECL_RESULT (fun);
12864 else
12865 result = TREE_TYPE (fntype);
12867 if (result && aggregate_value_p (result, fntype))
12869 if (!TYPE_P (result))
12870 result = TREE_TYPE (result);
12871 result = build_pointer_type (result);
12872 rs6000_parm_needs_stack (args_so_far, result);
12875 if (incoming)
12877 tree parm;
12879 for (parm = DECL_ARGUMENTS (fun);
12880 parm && parm != void_list_node;
12881 parm = TREE_CHAIN (parm))
12882 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12883 return true;
12885 else
12887 function_args_iterator args_iter;
12888 tree arg_type;
12890 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12891 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12892 return true;
12895 return false;
12898 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12899 usually a constant depending on the ABI. However, in the ELFv2 ABI
12900 the register parameter area is optional when calling a function that
12901 has a prototype is scope, has no variable argument list, and passes
12902 all parameters in registers. */
12905 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12907 int reg_parm_stack_space;
12909 switch (DEFAULT_ABI)
12911 default:
12912 reg_parm_stack_space = 0;
12913 break;
12915 case ABI_AIX:
12916 case ABI_DARWIN:
12917 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12918 break;
12920 case ABI_ELFv2:
12921 /* ??? Recomputing this every time is a bit expensive. Is there
12922 a place to cache this information? */
12923 if (rs6000_function_parms_need_stack (fun, incoming))
12924 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12925 else
12926 reg_parm_stack_space = 0;
12927 break;
12930 return reg_parm_stack_space;
12933 static void
12934 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12936 int i;
12937 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12939 if (nregs == 0)
12940 return;
12942 for (i = 0; i < nregs; i++)
12944 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12945 if (reload_completed)
12947 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12948 tem = NULL_RTX;
12949 else
12950 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12951 i * GET_MODE_SIZE (reg_mode));
12953 else
12954 tem = replace_equiv_address (tem, XEXP (tem, 0));
12956 gcc_assert (tem);
12958 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12962 /* Perform any needed actions needed for a function that is receiving a
12963 variable number of arguments.
12965 CUM is as above.
12967 MODE and TYPE are the mode and type of the current parameter.
12969 PRETEND_SIZE is a variable that should be set to the amount of stack
12970 that must be pushed by the prolog to pretend that our caller pushed
12973 Normally, this macro will push all remaining incoming registers on the
12974 stack and set PRETEND_SIZE to the length of the registers pushed. */
12976 static void
12977 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12978 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12979 int no_rtl)
12981 CUMULATIVE_ARGS next_cum;
12982 int reg_size = TARGET_32BIT ? 4 : 8;
12983 rtx save_area = NULL_RTX, mem;
12984 int first_reg_offset;
12985 alias_set_type set;
12987 /* Skip the last named argument. */
12988 next_cum = *get_cumulative_args (cum);
12989 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12991 if (DEFAULT_ABI == ABI_V4)
12993 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
12995 if (! no_rtl)
12997 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
12998 HOST_WIDE_INT offset = 0;
13000 /* Try to optimize the size of the varargs save area.
13001 The ABI requires that ap.reg_save_area is doubleword
13002 aligned, but we don't need to allocate space for all
13003 the bytes, only those to which we actually will save
13004 anything. */
13005 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
13006 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
13007 if (TARGET_HARD_FLOAT
13008 && next_cum.fregno <= FP_ARG_V4_MAX_REG
13009 && cfun->va_list_fpr_size)
13011 if (gpr_reg_num)
13012 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
13013 * UNITS_PER_FP_WORD;
13014 if (cfun->va_list_fpr_size
13015 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
13016 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
13017 else
13018 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
13019 * UNITS_PER_FP_WORD;
13021 if (gpr_reg_num)
13023 offset = -((first_reg_offset * reg_size) & ~7);
13024 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
13026 gpr_reg_num = cfun->va_list_gpr_size;
13027 if (reg_size == 4 && (first_reg_offset & 1))
13028 gpr_reg_num++;
13030 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
13032 else if (fpr_size)
13033 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
13034 * UNITS_PER_FP_WORD
13035 - (int) (GP_ARG_NUM_REG * reg_size);
13037 if (gpr_size + fpr_size)
13039 rtx reg_save_area
13040 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
13041 gcc_assert (GET_CODE (reg_save_area) == MEM);
13042 reg_save_area = XEXP (reg_save_area, 0);
13043 if (GET_CODE (reg_save_area) == PLUS)
13045 gcc_assert (XEXP (reg_save_area, 0)
13046 == virtual_stack_vars_rtx);
13047 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
13048 offset += INTVAL (XEXP (reg_save_area, 1));
13050 else
13051 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
13054 cfun->machine->varargs_save_offset = offset;
13055 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
13058 else
13060 first_reg_offset = next_cum.words;
13061 save_area = crtl->args.internal_arg_pointer;
13063 if (targetm.calls.must_pass_in_stack (mode, type))
13064 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
13067 set = get_varargs_alias_set ();
13068 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
13069 && cfun->va_list_gpr_size)
13071 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
13073 if (va_list_gpr_counter_field)
13074 /* V4 va_list_gpr_size counts number of registers needed. */
13075 n_gpr = cfun->va_list_gpr_size;
13076 else
13077 /* char * va_list instead counts number of bytes needed. */
13078 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
13080 if (nregs > n_gpr)
13081 nregs = n_gpr;
13083 mem = gen_rtx_MEM (BLKmode,
13084 plus_constant (Pmode, save_area,
13085 first_reg_offset * reg_size));
13086 MEM_NOTRAP_P (mem) = 1;
13087 set_mem_alias_set (mem, set);
13088 set_mem_align (mem, BITS_PER_WORD);
13090 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
13091 nregs);
13094 /* Save FP registers if needed. */
13095 if (DEFAULT_ABI == ABI_V4
13096 && TARGET_HARD_FLOAT
13097 && ! no_rtl
13098 && next_cum.fregno <= FP_ARG_V4_MAX_REG
13099 && cfun->va_list_fpr_size)
13101 int fregno = next_cum.fregno, nregs;
13102 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
13103 rtx lab = gen_label_rtx ();
13104 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
13105 * UNITS_PER_FP_WORD);
13107 emit_jump_insn
13108 (gen_rtx_SET (pc_rtx,
13109 gen_rtx_IF_THEN_ELSE (VOIDmode,
13110 gen_rtx_NE (VOIDmode, cr1,
13111 const0_rtx),
13112 gen_rtx_LABEL_REF (VOIDmode, lab),
13113 pc_rtx)));
13115 for (nregs = 0;
13116 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
13117 fregno++, off += UNITS_PER_FP_WORD, nregs++)
13119 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13120 ? DFmode : SFmode,
13121 plus_constant (Pmode, save_area, off));
13122 MEM_NOTRAP_P (mem) = 1;
13123 set_mem_alias_set (mem, set);
13124 set_mem_align (mem, GET_MODE_ALIGNMENT (
13125 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13126 ? DFmode : SFmode));
13127 emit_move_insn (mem, gen_rtx_REG (
13128 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13129 ? DFmode : SFmode, fregno));
13132 emit_label (lab);
13136 /* Create the va_list data type. */
13138 static tree
13139 rs6000_build_builtin_va_list (void)
13141 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
13143 /* For AIX, prefer 'char *' because that's what the system
13144 header files like. */
13145 if (DEFAULT_ABI != ABI_V4)
13146 return build_pointer_type (char_type_node);
13148 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
13149 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
13150 get_identifier ("__va_list_tag"), record);
13152 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
13153 unsigned_char_type_node);
13154 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
13155 unsigned_char_type_node);
13156 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
13157 every user file. */
13158 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13159 get_identifier ("reserved"), short_unsigned_type_node);
13160 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13161 get_identifier ("overflow_arg_area"),
13162 ptr_type_node);
13163 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13164 get_identifier ("reg_save_area"),
13165 ptr_type_node);
13167 va_list_gpr_counter_field = f_gpr;
13168 va_list_fpr_counter_field = f_fpr;
13170 DECL_FIELD_CONTEXT (f_gpr) = record;
13171 DECL_FIELD_CONTEXT (f_fpr) = record;
13172 DECL_FIELD_CONTEXT (f_res) = record;
13173 DECL_FIELD_CONTEXT (f_ovf) = record;
13174 DECL_FIELD_CONTEXT (f_sav) = record;
13176 TYPE_STUB_DECL (record) = type_decl;
13177 TYPE_NAME (record) = type_decl;
13178 TYPE_FIELDS (record) = f_gpr;
13179 DECL_CHAIN (f_gpr) = f_fpr;
13180 DECL_CHAIN (f_fpr) = f_res;
13181 DECL_CHAIN (f_res) = f_ovf;
13182 DECL_CHAIN (f_ovf) = f_sav;
13184 layout_type (record);
13186 /* The correct type is an array type of one element. */
13187 return build_array_type (record, build_index_type (size_zero_node));
13190 /* Implement va_start. */
13192 static void
13193 rs6000_va_start (tree valist, rtx nextarg)
13195 HOST_WIDE_INT words, n_gpr, n_fpr;
13196 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
13197 tree gpr, fpr, ovf, sav, t;
13199 /* Only SVR4 needs something special. */
13200 if (DEFAULT_ABI != ABI_V4)
13202 std_expand_builtin_va_start (valist, nextarg);
13203 return;
13206 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
13207 f_fpr = DECL_CHAIN (f_gpr);
13208 f_res = DECL_CHAIN (f_fpr);
13209 f_ovf = DECL_CHAIN (f_res);
13210 f_sav = DECL_CHAIN (f_ovf);
13212 valist = build_simple_mem_ref (valist);
13213 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
13214 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
13215 f_fpr, NULL_TREE);
13216 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
13217 f_ovf, NULL_TREE);
13218 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
13219 f_sav, NULL_TREE);
13221 /* Count number of gp and fp argument registers used. */
13222 words = crtl->args.info.words;
13223 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
13224 GP_ARG_NUM_REG);
13225 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
13226 FP_ARG_NUM_REG);
13228 if (TARGET_DEBUG_ARG)
13229 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
13230 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
13231 words, n_gpr, n_fpr);
13233 if (cfun->va_list_gpr_size)
13235 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
13236 build_int_cst (NULL_TREE, n_gpr));
13237 TREE_SIDE_EFFECTS (t) = 1;
13238 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13241 if (cfun->va_list_fpr_size)
13243 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
13244 build_int_cst (NULL_TREE, n_fpr));
13245 TREE_SIDE_EFFECTS (t) = 1;
13246 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13248 #ifdef HAVE_AS_GNU_ATTRIBUTE
13249 if (call_ABI_of_interest (cfun->decl))
13250 rs6000_passes_float = true;
13251 #endif
13254 /* Find the overflow area. */
13255 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
13256 if (words != 0)
13257 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
13258 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
13259 TREE_SIDE_EFFECTS (t) = 1;
13260 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13262 /* If there were no va_arg invocations, don't set up the register
13263 save area. */
13264 if (!cfun->va_list_gpr_size
13265 && !cfun->va_list_fpr_size
13266 && n_gpr < GP_ARG_NUM_REG
13267 && n_fpr < FP_ARG_V4_MAX_REG)
13268 return;
13270 /* Find the register save area. */
13271 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
13272 if (cfun->machine->varargs_save_offset)
13273 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
13274 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
13275 TREE_SIDE_EFFECTS (t) = 1;
13276 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13279 /* Implement va_arg. */
13281 static tree
13282 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
13283 gimple_seq *post_p)
13285 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
13286 tree gpr, fpr, ovf, sav, reg, t, u;
13287 int size, rsize, n_reg, sav_ofs, sav_scale;
13288 tree lab_false, lab_over, addr;
13289 int align;
13290 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
13291 int regalign = 0;
13292 gimple *stmt;
13294 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
13296 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
13297 return build_va_arg_indirect_ref (t);
13300 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
13301 earlier version of gcc, with the property that it always applied alignment
13302 adjustments to the va-args (even for zero-sized types). The cheapest way
13303 to deal with this is to replicate the effect of the part of
13304 std_gimplify_va_arg_expr that carries out the align adjust, for the case
13305 of relevance.
13306 We don't need to check for pass-by-reference because of the test above.
13307 We can return a simplifed answer, since we know there's no offset to add. */
13309 if (((TARGET_MACHO
13310 && rs6000_darwin64_abi)
13311 || DEFAULT_ABI == ABI_ELFv2
13312 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
13313 && integer_zerop (TYPE_SIZE (type)))
13315 unsigned HOST_WIDE_INT align, boundary;
13316 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
13317 align = PARM_BOUNDARY / BITS_PER_UNIT;
13318 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
13319 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
13320 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
13321 boundary /= BITS_PER_UNIT;
13322 if (boundary > align)
13324 tree t ;
13325 /* This updates arg ptr by the amount that would be necessary
13326 to align the zero-sized (but not zero-alignment) item. */
13327 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13328 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
13329 gimplify_and_add (t, pre_p);
13331 t = fold_convert (sizetype, valist_tmp);
13332 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13333 fold_convert (TREE_TYPE (valist),
13334 fold_build2 (BIT_AND_EXPR, sizetype, t,
13335 size_int (-boundary))));
13336 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
13337 gimplify_and_add (t, pre_p);
13339 /* Since it is zero-sized there's no increment for the item itself. */
13340 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
13341 return build_va_arg_indirect_ref (valist_tmp);
13344 if (DEFAULT_ABI != ABI_V4)
13346 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
13348 tree elem_type = TREE_TYPE (type);
13349 machine_mode elem_mode = TYPE_MODE (elem_type);
13350 int elem_size = GET_MODE_SIZE (elem_mode);
13352 if (elem_size < UNITS_PER_WORD)
13354 tree real_part, imag_part;
13355 gimple_seq post = NULL;
13357 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13358 &post);
13359 /* Copy the value into a temporary, lest the formal temporary
13360 be reused out from under us. */
13361 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
13362 gimple_seq_add_seq (pre_p, post);
13364 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13365 post_p);
13367 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
13371 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
13374 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
13375 f_fpr = DECL_CHAIN (f_gpr);
13376 f_res = DECL_CHAIN (f_fpr);
13377 f_ovf = DECL_CHAIN (f_res);
13378 f_sav = DECL_CHAIN (f_ovf);
13380 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
13381 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
13382 f_fpr, NULL_TREE);
13383 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
13384 f_ovf, NULL_TREE);
13385 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
13386 f_sav, NULL_TREE);
13388 size = int_size_in_bytes (type);
13389 rsize = (size + 3) / 4;
13390 int pad = 4 * rsize - size;
13391 align = 1;
13393 machine_mode mode = TYPE_MODE (type);
13394 if (abi_v4_pass_in_fpr (mode))
13396 /* FP args go in FP registers, if present. */
13397 reg = fpr;
13398 n_reg = (size + 7) / 8;
13399 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
13400 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
13401 if (mode != SFmode && mode != SDmode)
13402 align = 8;
13404 else
13406 /* Otherwise into GP registers. */
13407 reg = gpr;
13408 n_reg = rsize;
13409 sav_ofs = 0;
13410 sav_scale = 4;
13411 if (n_reg == 2)
13412 align = 8;
13415 /* Pull the value out of the saved registers.... */
13417 lab_over = NULL;
13418 addr = create_tmp_var (ptr_type_node, "addr");
13420 /* AltiVec vectors never go in registers when -mabi=altivec. */
13421 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
13422 align = 16;
13423 else
13425 lab_false = create_artificial_label (input_location);
13426 lab_over = create_artificial_label (input_location);
13428 /* Long long is aligned in the registers. As are any other 2 gpr
13429 item such as complex int due to a historical mistake. */
13430 u = reg;
13431 if (n_reg == 2 && reg == gpr)
13433 regalign = 1;
13434 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13435 build_int_cst (TREE_TYPE (reg), n_reg - 1));
13436 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
13437 unshare_expr (reg), u);
13439 /* _Decimal128 is passed in even/odd fpr pairs; the stored
13440 reg number is 0 for f1, so we want to make it odd. */
13441 else if (reg == fpr && mode == TDmode)
13443 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13444 build_int_cst (TREE_TYPE (reg), 1));
13445 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
13448 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
13449 t = build2 (GE_EXPR, boolean_type_node, u, t);
13450 u = build1 (GOTO_EXPR, void_type_node, lab_false);
13451 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
13452 gimplify_and_add (t, pre_p);
13454 t = sav;
13455 if (sav_ofs)
13456 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
13458 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13459 build_int_cst (TREE_TYPE (reg), n_reg));
13460 u = fold_convert (sizetype, u);
13461 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
13462 t = fold_build_pointer_plus (t, u);
13464 /* _Decimal32 varargs are located in the second word of the 64-bit
13465 FP register for 32-bit binaries. */
13466 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
13467 t = fold_build_pointer_plus_hwi (t, size);
13469 /* Args are passed right-aligned. */
13470 if (BYTES_BIG_ENDIAN)
13471 t = fold_build_pointer_plus_hwi (t, pad);
13473 gimplify_assign (addr, t, pre_p);
13475 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
13477 stmt = gimple_build_label (lab_false);
13478 gimple_seq_add_stmt (pre_p, stmt);
13480 if ((n_reg == 2 && !regalign) || n_reg > 2)
13482 /* Ensure that we don't find any more args in regs.
13483 Alignment has taken care of for special cases. */
13484 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
13488 /* ... otherwise out of the overflow area. */
13490 /* Care for on-stack alignment if needed. */
13491 t = ovf;
13492 if (align != 1)
13494 t = fold_build_pointer_plus_hwi (t, align - 1);
13495 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
13496 build_int_cst (TREE_TYPE (t), -align));
13499 /* Args are passed right-aligned. */
13500 if (BYTES_BIG_ENDIAN)
13501 t = fold_build_pointer_plus_hwi (t, pad);
13503 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
13505 gimplify_assign (unshare_expr (addr), t, pre_p);
13507 t = fold_build_pointer_plus_hwi (t, size);
13508 gimplify_assign (unshare_expr (ovf), t, pre_p);
13510 if (lab_over)
13512 stmt = gimple_build_label (lab_over);
13513 gimple_seq_add_stmt (pre_p, stmt);
13516 if (STRICT_ALIGNMENT
13517 && (TYPE_ALIGN (type)
13518 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
13520 /* The value (of type complex double, for example) may not be
13521 aligned in memory in the saved registers, so copy via a
13522 temporary. (This is the same code as used for SPARC.) */
13523 tree tmp = create_tmp_var (type, "va_arg_tmp");
13524 tree dest_addr = build_fold_addr_expr (tmp);
13526 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
13527 3, dest_addr, addr, size_int (rsize * 4));
13529 gimplify_and_add (copy, pre_p);
13530 addr = dest_addr;
13533 addr = fold_convert (ptrtype, addr);
13534 return build_va_arg_indirect_ref (addr);
13537 /* Builtins. */
13539 static void
13540 def_builtin (const char *name, tree type, enum rs6000_builtins code)
13542 tree t;
13543 unsigned classify = rs6000_builtin_info[(int)code].attr;
13544 const char *attr_string = "";
13546 gcc_assert (name != NULL);
13547 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
13549 if (rs6000_builtin_decls[(int)code])
13550 fatal_error (input_location,
13551 "internal error: builtin function %qs already processed",
13552 name);
13554 rs6000_builtin_decls[(int)code] = t =
13555 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
13557 /* Set any special attributes. */
13558 if ((classify & RS6000_BTC_CONST) != 0)
13560 /* const function, function only depends on the inputs. */
13561 TREE_READONLY (t) = 1;
13562 TREE_NOTHROW (t) = 1;
13563 attr_string = ", const";
13565 else if ((classify & RS6000_BTC_PURE) != 0)
13567 /* pure function, function can read global memory, but does not set any
13568 external state. */
13569 DECL_PURE_P (t) = 1;
13570 TREE_NOTHROW (t) = 1;
13571 attr_string = ", pure";
13573 else if ((classify & RS6000_BTC_FP) != 0)
13575 /* Function is a math function. If rounding mode is on, then treat the
13576 function as not reading global memory, but it can have arbitrary side
13577 effects. If it is off, then assume the function is a const function.
13578 This mimics the ATTR_MATHFN_FPROUNDING attribute in
13579 builtin-attribute.def that is used for the math functions. */
13580 TREE_NOTHROW (t) = 1;
13581 if (flag_rounding_math)
13583 DECL_PURE_P (t) = 1;
13584 DECL_IS_NOVOPS (t) = 1;
13585 attr_string = ", fp, pure";
13587 else
13589 TREE_READONLY (t) = 1;
13590 attr_string = ", fp, const";
13593 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
13594 gcc_unreachable ();
13596 if (TARGET_DEBUG_BUILTIN)
13597 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
13598 (int)code, name, attr_string);
13601 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13603 #undef RS6000_BUILTIN_0
13604 #undef RS6000_BUILTIN_1
13605 #undef RS6000_BUILTIN_2
13606 #undef RS6000_BUILTIN_3
13607 #undef RS6000_BUILTIN_A
13608 #undef RS6000_BUILTIN_D
13609 #undef RS6000_BUILTIN_H
13610 #undef RS6000_BUILTIN_P
13611 #undef RS6000_BUILTIN_Q
13612 #undef RS6000_BUILTIN_X
13614 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13615 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13616 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13617 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13618 { MASK, ICODE, NAME, ENUM },
13620 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13621 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13622 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13623 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13624 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13625 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13627 static const struct builtin_description bdesc_3arg[] =
13629 #include "rs6000-builtin.def"
13632 /* DST operations: void foo (void *, const int, const char). */
13634 #undef RS6000_BUILTIN_0
13635 #undef RS6000_BUILTIN_1
13636 #undef RS6000_BUILTIN_2
13637 #undef RS6000_BUILTIN_3
13638 #undef RS6000_BUILTIN_A
13639 #undef RS6000_BUILTIN_D
13640 #undef RS6000_BUILTIN_H
13641 #undef RS6000_BUILTIN_P
13642 #undef RS6000_BUILTIN_Q
13643 #undef RS6000_BUILTIN_X
13645 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13646 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13647 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13648 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13649 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13650 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13651 { MASK, ICODE, NAME, ENUM },
13653 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13654 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13655 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13656 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13658 static const struct builtin_description bdesc_dst[] =
13660 #include "rs6000-builtin.def"
13663 /* Simple binary operations: VECc = foo (VECa, VECb). */
13665 #undef RS6000_BUILTIN_0
13666 #undef RS6000_BUILTIN_1
13667 #undef RS6000_BUILTIN_2
13668 #undef RS6000_BUILTIN_3
13669 #undef RS6000_BUILTIN_A
13670 #undef RS6000_BUILTIN_D
13671 #undef RS6000_BUILTIN_H
13672 #undef RS6000_BUILTIN_P
13673 #undef RS6000_BUILTIN_Q
13674 #undef RS6000_BUILTIN_X
13676 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13677 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13678 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13679 { MASK, ICODE, NAME, ENUM },
13681 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13682 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13683 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13684 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13685 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13686 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13687 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13689 static const struct builtin_description bdesc_2arg[] =
13691 #include "rs6000-builtin.def"
13694 #undef RS6000_BUILTIN_0
13695 #undef RS6000_BUILTIN_1
13696 #undef RS6000_BUILTIN_2
13697 #undef RS6000_BUILTIN_3
13698 #undef RS6000_BUILTIN_A
13699 #undef RS6000_BUILTIN_D
13700 #undef RS6000_BUILTIN_H
13701 #undef RS6000_BUILTIN_P
13702 #undef RS6000_BUILTIN_Q
13703 #undef RS6000_BUILTIN_X
13705 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13706 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13707 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13708 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13709 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13710 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13711 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13712 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13713 { MASK, ICODE, NAME, ENUM },
13715 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13716 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13718 /* AltiVec predicates. */
13720 static const struct builtin_description bdesc_altivec_preds[] =
13722 #include "rs6000-builtin.def"
13725 /* PAIRED predicates. */
13726 #undef RS6000_BUILTIN_0
13727 #undef RS6000_BUILTIN_1
13728 #undef RS6000_BUILTIN_2
13729 #undef RS6000_BUILTIN_3
13730 #undef RS6000_BUILTIN_A
13731 #undef RS6000_BUILTIN_D
13732 #undef RS6000_BUILTIN_H
13733 #undef RS6000_BUILTIN_P
13734 #undef RS6000_BUILTIN_Q
13735 #undef RS6000_BUILTIN_X
13737 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13738 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13739 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13740 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13741 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13742 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13743 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13744 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13745 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
13746 { MASK, ICODE, NAME, ENUM },
13748 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13750 static const struct builtin_description bdesc_paired_preds[] =
13752 #include "rs6000-builtin.def"
13755 /* ABS* operations. */
13757 #undef RS6000_BUILTIN_0
13758 #undef RS6000_BUILTIN_1
13759 #undef RS6000_BUILTIN_2
13760 #undef RS6000_BUILTIN_3
13761 #undef RS6000_BUILTIN_A
13762 #undef RS6000_BUILTIN_D
13763 #undef RS6000_BUILTIN_H
13764 #undef RS6000_BUILTIN_P
13765 #undef RS6000_BUILTIN_Q
13766 #undef RS6000_BUILTIN_X
13768 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13769 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13770 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13771 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13772 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13773 { MASK, ICODE, NAME, ENUM },
13775 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13776 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13777 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13778 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13779 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13781 static const struct builtin_description bdesc_abs[] =
13783 #include "rs6000-builtin.def"
13786 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13787 foo (VECa). */
13789 #undef RS6000_BUILTIN_0
13790 #undef RS6000_BUILTIN_1
13791 #undef RS6000_BUILTIN_2
13792 #undef RS6000_BUILTIN_3
13793 #undef RS6000_BUILTIN_A
13794 #undef RS6000_BUILTIN_D
13795 #undef RS6000_BUILTIN_H
13796 #undef RS6000_BUILTIN_P
13797 #undef RS6000_BUILTIN_Q
13798 #undef RS6000_BUILTIN_X
13800 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13801 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13802 { MASK, ICODE, NAME, ENUM },
13804 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13805 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13806 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13807 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13808 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13809 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13810 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13811 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13813 static const struct builtin_description bdesc_1arg[] =
13815 #include "rs6000-builtin.def"
13818 /* Simple no-argument operations: result = __builtin_darn_32 () */
13820 #undef RS6000_BUILTIN_0
13821 #undef RS6000_BUILTIN_1
13822 #undef RS6000_BUILTIN_2
13823 #undef RS6000_BUILTIN_3
13824 #undef RS6000_BUILTIN_A
13825 #undef RS6000_BUILTIN_D
13826 #undef RS6000_BUILTIN_H
13827 #undef RS6000_BUILTIN_P
13828 #undef RS6000_BUILTIN_Q
13829 #undef RS6000_BUILTIN_X
13831 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13832 { MASK, ICODE, NAME, ENUM },
13834 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13835 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13836 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13837 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13838 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13839 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13840 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13841 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13842 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13844 static const struct builtin_description bdesc_0arg[] =
13846 #include "rs6000-builtin.def"
13849 /* HTM builtins. */
13850 #undef RS6000_BUILTIN_0
13851 #undef RS6000_BUILTIN_1
13852 #undef RS6000_BUILTIN_2
13853 #undef RS6000_BUILTIN_3
13854 #undef RS6000_BUILTIN_A
13855 #undef RS6000_BUILTIN_D
13856 #undef RS6000_BUILTIN_H
13857 #undef RS6000_BUILTIN_P
13858 #undef RS6000_BUILTIN_Q
13859 #undef RS6000_BUILTIN_X
13861 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13862 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13863 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13864 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13865 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13866 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13867 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13868 { MASK, ICODE, NAME, ENUM },
13870 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13871 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13872 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13874 static const struct builtin_description bdesc_htm[] =
13876 #include "rs6000-builtin.def"
13879 #undef RS6000_BUILTIN_0
13880 #undef RS6000_BUILTIN_1
13881 #undef RS6000_BUILTIN_2
13882 #undef RS6000_BUILTIN_3
13883 #undef RS6000_BUILTIN_A
13884 #undef RS6000_BUILTIN_D
13885 #undef RS6000_BUILTIN_H
13886 #undef RS6000_BUILTIN_P
13887 #undef RS6000_BUILTIN_Q
13889 /* Return true if a builtin function is overloaded. */
13890 bool
13891 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13893 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13896 const char *
13897 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
13899 return rs6000_builtin_info[(int)fncode].name;
13902 /* Expand an expression EXP that calls a builtin without arguments. */
13903 static rtx
13904 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13906 rtx pat;
13907 machine_mode tmode = insn_data[icode].operand[0].mode;
13909 if (icode == CODE_FOR_nothing)
13910 /* Builtin not supported on this processor. */
13911 return 0;
13913 if (target == 0
13914 || GET_MODE (target) != tmode
13915 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13916 target = gen_reg_rtx (tmode);
13918 pat = GEN_FCN (icode) (target);
13919 if (! pat)
13920 return 0;
13921 emit_insn (pat);
13923 return target;
13927 static rtx
13928 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13930 rtx pat;
13931 tree arg0 = CALL_EXPR_ARG (exp, 0);
13932 tree arg1 = CALL_EXPR_ARG (exp, 1);
13933 rtx op0 = expand_normal (arg0);
13934 rtx op1 = expand_normal (arg1);
13935 machine_mode mode0 = insn_data[icode].operand[0].mode;
13936 machine_mode mode1 = insn_data[icode].operand[1].mode;
13938 if (icode == CODE_FOR_nothing)
13939 /* Builtin not supported on this processor. */
13940 return 0;
13942 /* If we got invalid arguments bail out before generating bad rtl. */
13943 if (arg0 == error_mark_node || arg1 == error_mark_node)
13944 return const0_rtx;
13946 if (GET_CODE (op0) != CONST_INT
13947 || INTVAL (op0) > 255
13948 || INTVAL (op0) < 0)
13950 error ("argument 1 must be an 8-bit field value");
13951 return const0_rtx;
13954 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13955 op0 = copy_to_mode_reg (mode0, op0);
13957 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13958 op1 = copy_to_mode_reg (mode1, op1);
13960 pat = GEN_FCN (icode) (op0, op1);
13961 if (! pat)
13962 return const0_rtx;
13963 emit_insn (pat);
13965 return NULL_RTX;
13968 static rtx
13969 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13971 rtx pat;
13972 tree arg0 = CALL_EXPR_ARG (exp, 0);
13973 rtx op0 = expand_normal (arg0);
13974 machine_mode tmode = insn_data[icode].operand[0].mode;
13975 machine_mode mode0 = insn_data[icode].operand[1].mode;
13977 if (icode == CODE_FOR_nothing)
13978 /* Builtin not supported on this processor. */
13979 return 0;
13981 /* If we got invalid arguments bail out before generating bad rtl. */
13982 if (arg0 == error_mark_node)
13983 return const0_rtx;
13985 if (icode == CODE_FOR_altivec_vspltisb
13986 || icode == CODE_FOR_altivec_vspltish
13987 || icode == CODE_FOR_altivec_vspltisw)
13989 /* Only allow 5-bit *signed* literals. */
13990 if (GET_CODE (op0) != CONST_INT
13991 || INTVAL (op0) > 15
13992 || INTVAL (op0) < -16)
13994 error ("argument 1 must be a 5-bit signed literal");
13995 return CONST0_RTX (tmode);
13999 if (target == 0
14000 || GET_MODE (target) != tmode
14001 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14002 target = gen_reg_rtx (tmode);
14004 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14005 op0 = copy_to_mode_reg (mode0, op0);
14007 pat = GEN_FCN (icode) (target, op0);
14008 if (! pat)
14009 return 0;
14010 emit_insn (pat);
14012 return target;
14015 static rtx
14016 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
14018 rtx pat, scratch1, scratch2;
14019 tree arg0 = CALL_EXPR_ARG (exp, 0);
14020 rtx op0 = expand_normal (arg0);
14021 machine_mode tmode = insn_data[icode].operand[0].mode;
14022 machine_mode mode0 = insn_data[icode].operand[1].mode;
14024 /* If we have invalid arguments, bail out before generating bad rtl. */
14025 if (arg0 == error_mark_node)
14026 return const0_rtx;
14028 if (target == 0
14029 || GET_MODE (target) != tmode
14030 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14031 target = gen_reg_rtx (tmode);
14033 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14034 op0 = copy_to_mode_reg (mode0, op0);
14036 scratch1 = gen_reg_rtx (mode0);
14037 scratch2 = gen_reg_rtx (mode0);
14039 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
14040 if (! pat)
14041 return 0;
14042 emit_insn (pat);
14044 return target;
14047 static rtx
14048 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
14050 rtx pat;
14051 tree arg0 = CALL_EXPR_ARG (exp, 0);
14052 tree arg1 = CALL_EXPR_ARG (exp, 1);
14053 rtx op0 = expand_normal (arg0);
14054 rtx op1 = expand_normal (arg1);
14055 machine_mode tmode = insn_data[icode].operand[0].mode;
14056 machine_mode mode0 = insn_data[icode].operand[1].mode;
14057 machine_mode mode1 = insn_data[icode].operand[2].mode;
14059 if (icode == CODE_FOR_nothing)
14060 /* Builtin not supported on this processor. */
14061 return 0;
14063 /* If we got invalid arguments bail out before generating bad rtl. */
14064 if (arg0 == error_mark_node || arg1 == error_mark_node)
14065 return const0_rtx;
14067 if (icode == CODE_FOR_altivec_vcfux
14068 || icode == CODE_FOR_altivec_vcfsx
14069 || icode == CODE_FOR_altivec_vctsxs
14070 || icode == CODE_FOR_altivec_vctuxs
14071 || icode == CODE_FOR_altivec_vspltb
14072 || icode == CODE_FOR_altivec_vsplth
14073 || icode == CODE_FOR_altivec_vspltw)
14075 /* Only allow 5-bit unsigned literals. */
14076 STRIP_NOPS (arg1);
14077 if (TREE_CODE (arg1) != INTEGER_CST
14078 || TREE_INT_CST_LOW (arg1) & ~0x1f)
14080 error ("argument 2 must be a 5-bit unsigned literal");
14081 return CONST0_RTX (tmode);
14084 else if (icode == CODE_FOR_dfptstsfi_eq_dd
14085 || icode == CODE_FOR_dfptstsfi_lt_dd
14086 || icode == CODE_FOR_dfptstsfi_gt_dd
14087 || icode == CODE_FOR_dfptstsfi_unordered_dd
14088 || icode == CODE_FOR_dfptstsfi_eq_td
14089 || icode == CODE_FOR_dfptstsfi_lt_td
14090 || icode == CODE_FOR_dfptstsfi_gt_td
14091 || icode == CODE_FOR_dfptstsfi_unordered_td)
14093 /* Only allow 6-bit unsigned literals. */
14094 STRIP_NOPS (arg0);
14095 if (TREE_CODE (arg0) != INTEGER_CST
14096 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
14098 error ("argument 1 must be a 6-bit unsigned literal");
14099 return CONST0_RTX (tmode);
14102 else if (icode == CODE_FOR_xststdcqp
14103 || icode == CODE_FOR_xststdcdp
14104 || icode == CODE_FOR_xststdcsp
14105 || icode == CODE_FOR_xvtstdcdp
14106 || icode == CODE_FOR_xvtstdcsp)
14108 /* Only allow 7-bit unsigned literals. */
14109 STRIP_NOPS (arg1);
14110 if (TREE_CODE (arg1) != INTEGER_CST
14111 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
14113 error ("argument 2 must be a 7-bit unsigned literal");
14114 return CONST0_RTX (tmode);
14117 else if (icode == CODE_FOR_unpackv1ti
14118 || icode == CODE_FOR_unpackkf
14119 || icode == CODE_FOR_unpacktf
14120 || icode == CODE_FOR_unpackif
14121 || icode == CODE_FOR_unpacktd)
14123 /* Only allow 1-bit unsigned literals. */
14124 STRIP_NOPS (arg1);
14125 if (TREE_CODE (arg1) != INTEGER_CST
14126 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
14128 error ("argument 2 must be a 1-bit unsigned literal");
14129 return CONST0_RTX (tmode);
14133 if (target == 0
14134 || GET_MODE (target) != tmode
14135 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14136 target = gen_reg_rtx (tmode);
14138 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14139 op0 = copy_to_mode_reg (mode0, op0);
14140 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14141 op1 = copy_to_mode_reg (mode1, op1);
14143 pat = GEN_FCN (icode) (target, op0, op1);
14144 if (! pat)
14145 return 0;
14146 emit_insn (pat);
14148 return target;
14151 static rtx
14152 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
14154 rtx pat, scratch;
14155 tree cr6_form = CALL_EXPR_ARG (exp, 0);
14156 tree arg0 = CALL_EXPR_ARG (exp, 1);
14157 tree arg1 = CALL_EXPR_ARG (exp, 2);
14158 rtx op0 = expand_normal (arg0);
14159 rtx op1 = expand_normal (arg1);
14160 machine_mode tmode = SImode;
14161 machine_mode mode0 = insn_data[icode].operand[1].mode;
14162 machine_mode mode1 = insn_data[icode].operand[2].mode;
14163 int cr6_form_int;
14165 if (TREE_CODE (cr6_form) != INTEGER_CST)
14167 error ("argument 1 of %qs must be a constant",
14168 "__builtin_altivec_predicate");
14169 return const0_rtx;
14171 else
14172 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
14174 gcc_assert (mode0 == mode1);
14176 /* If we have invalid arguments, bail out before generating bad rtl. */
14177 if (arg0 == error_mark_node || arg1 == error_mark_node)
14178 return const0_rtx;
14180 if (target == 0
14181 || GET_MODE (target) != tmode
14182 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14183 target = gen_reg_rtx (tmode);
14185 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14186 op0 = copy_to_mode_reg (mode0, op0);
14187 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14188 op1 = copy_to_mode_reg (mode1, op1);
14190 /* Note that for many of the relevant operations (e.g. cmpne or
14191 cmpeq) with float or double operands, it makes more sense for the
14192 mode of the allocated scratch register to select a vector of
14193 integer. But the choice to copy the mode of operand 0 was made
14194 long ago and there are no plans to change it. */
14195 scratch = gen_reg_rtx (mode0);
14197 pat = GEN_FCN (icode) (scratch, op0, op1);
14198 if (! pat)
14199 return 0;
14200 emit_insn (pat);
14202 /* The vec_any* and vec_all* predicates use the same opcodes for two
14203 different operations, but the bits in CR6 will be different
14204 depending on what information we want. So we have to play tricks
14205 with CR6 to get the right bits out.
14207 If you think this is disgusting, look at the specs for the
14208 AltiVec predicates. */
14210 switch (cr6_form_int)
14212 case 0:
14213 emit_insn (gen_cr6_test_for_zero (target));
14214 break;
14215 case 1:
14216 emit_insn (gen_cr6_test_for_zero_reverse (target));
14217 break;
14218 case 2:
14219 emit_insn (gen_cr6_test_for_lt (target));
14220 break;
14221 case 3:
14222 emit_insn (gen_cr6_test_for_lt_reverse (target));
14223 break;
14224 default:
14225 error ("argument 1 of %qs is out of range",
14226 "__builtin_altivec_predicate");
14227 break;
14230 return target;
14233 static rtx
14234 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
14236 rtx pat, addr;
14237 tree arg0 = CALL_EXPR_ARG (exp, 0);
14238 tree arg1 = CALL_EXPR_ARG (exp, 1);
14239 machine_mode tmode = insn_data[icode].operand[0].mode;
14240 machine_mode mode0 = Pmode;
14241 machine_mode mode1 = Pmode;
14242 rtx op0 = expand_normal (arg0);
14243 rtx op1 = expand_normal (arg1);
14245 if (icode == CODE_FOR_nothing)
14246 /* Builtin not supported on this processor. */
14247 return 0;
14249 /* If we got invalid arguments bail out before generating bad rtl. */
14250 if (arg0 == error_mark_node || arg1 == error_mark_node)
14251 return const0_rtx;
14253 if (target == 0
14254 || GET_MODE (target) != tmode
14255 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14256 target = gen_reg_rtx (tmode);
14258 op1 = copy_to_mode_reg (mode1, op1);
14260 if (op0 == const0_rtx)
14262 addr = gen_rtx_MEM (tmode, op1);
14264 else
14266 op0 = copy_to_mode_reg (mode0, op0);
14267 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
14270 pat = GEN_FCN (icode) (target, addr);
14272 if (! pat)
14273 return 0;
14274 emit_insn (pat);
14276 return target;
14279 /* Return a constant vector for use as a little-endian permute control vector
14280 to reverse the order of elements of the given vector mode. */
14281 static rtx
14282 swap_selector_for_mode (machine_mode mode)
14284 /* These are little endian vectors, so their elements are reversed
14285 from what you would normally expect for a permute control vector. */
14286 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
14287 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
14288 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
14289 unsigned int swap16[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
14290 unsigned int *swaparray, i;
14291 rtx perm[16];
14293 switch (mode)
14295 case E_V2DFmode:
14296 case E_V2DImode:
14297 swaparray = swap2;
14298 break;
14299 case E_V4SFmode:
14300 case E_V4SImode:
14301 swaparray = swap4;
14302 break;
14303 case E_V8HImode:
14304 swaparray = swap8;
14305 break;
14306 case E_V16QImode:
14307 swaparray = swap16;
14308 break;
14309 default:
14310 gcc_unreachable ();
14313 for (i = 0; i < 16; ++i)
14314 perm[i] = GEN_INT (swaparray[i]);
14316 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm)));
14319 /* Generate code for an "lvxl", or "lve*x" built-in for a little endian target
14320 with -maltivec=be specified. Issue the load followed by an element-
14321 reversing permute. */
14322 void
14323 altivec_expand_lvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14325 rtx tmp = gen_reg_rtx (mode);
14326 rtx load = gen_rtx_SET (tmp, op1);
14327 rtx lvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
14328 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, load, lvx));
14329 rtx sel = swap_selector_for_mode (mode);
14330 rtx vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, tmp, tmp, sel), UNSPEC_VPERM);
14332 gcc_assert (REG_P (op0));
14333 emit_insn (par);
14334 emit_insn (gen_rtx_SET (op0, vperm));
14337 /* Generate code for a "stvxl" built-in for a little endian target with
14338 -maltivec=be specified. Issue the store preceded by an element-reversing
14339 permute. */
14340 void
14341 altivec_expand_stvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14343 rtx tmp = gen_reg_rtx (mode);
14344 rtx store = gen_rtx_SET (op0, tmp);
14345 rtx stvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
14346 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, store, stvx));
14347 rtx sel = swap_selector_for_mode (mode);
14348 rtx vperm;
14350 gcc_assert (REG_P (op1));
14351 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
14352 emit_insn (gen_rtx_SET (tmp, vperm));
14353 emit_insn (par);
14356 /* Generate code for a "stve*x" built-in for a little endian target with -maltivec=be
14357 specified. Issue the store preceded by an element-reversing permute. */
14358 void
14359 altivec_expand_stvex_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14361 machine_mode inner_mode = GET_MODE_INNER (mode);
14362 rtx tmp = gen_reg_rtx (mode);
14363 rtx stvx = gen_rtx_UNSPEC (inner_mode, gen_rtvec (1, tmp), unspec);
14364 rtx sel = swap_selector_for_mode (mode);
14365 rtx vperm;
14367 gcc_assert (REG_P (op1));
14368 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
14369 emit_insn (gen_rtx_SET (tmp, vperm));
14370 emit_insn (gen_rtx_SET (op0, stvx));
14373 static rtx
14374 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
14376 rtx pat, addr;
14377 tree arg0 = CALL_EXPR_ARG (exp, 0);
14378 tree arg1 = CALL_EXPR_ARG (exp, 1);
14379 machine_mode tmode = insn_data[icode].operand[0].mode;
14380 machine_mode mode0 = Pmode;
14381 machine_mode mode1 = Pmode;
14382 rtx op0 = expand_normal (arg0);
14383 rtx op1 = expand_normal (arg1);
14385 if (icode == CODE_FOR_nothing)
14386 /* Builtin not supported on this processor. */
14387 return 0;
14389 /* If we got invalid arguments bail out before generating bad rtl. */
14390 if (arg0 == error_mark_node || arg1 == error_mark_node)
14391 return const0_rtx;
14393 if (target == 0
14394 || GET_MODE (target) != tmode
14395 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14396 target = gen_reg_rtx (tmode);
14398 op1 = copy_to_mode_reg (mode1, op1);
14400 /* For LVX, express the RTL accurately by ANDing the address with -16.
14401 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
14402 so the raw address is fine. */
14403 if (icode == CODE_FOR_altivec_lvx_v2df_2op
14404 || icode == CODE_FOR_altivec_lvx_v2di_2op
14405 || icode == CODE_FOR_altivec_lvx_v4sf_2op
14406 || icode == CODE_FOR_altivec_lvx_v4si_2op
14407 || icode == CODE_FOR_altivec_lvx_v8hi_2op
14408 || icode == CODE_FOR_altivec_lvx_v16qi_2op)
14410 rtx rawaddr;
14411 if (op0 == const0_rtx)
14412 rawaddr = op1;
14413 else
14415 op0 = copy_to_mode_reg (mode0, op0);
14416 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
14418 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14419 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
14421 /* For -maltivec=be, emit the load and follow it up with a
14422 permute to swap the elements. */
14423 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
14425 rtx temp = gen_reg_rtx (tmode);
14426 emit_insn (gen_rtx_SET (temp, addr));
14428 rtx sel = swap_selector_for_mode (tmode);
14429 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, temp, temp, sel),
14430 UNSPEC_VPERM);
14431 emit_insn (gen_rtx_SET (target, vperm));
14433 else
14434 emit_insn (gen_rtx_SET (target, addr));
14436 else
14438 if (op0 == const0_rtx)
14439 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
14440 else
14442 op0 = copy_to_mode_reg (mode0, op0);
14443 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
14444 gen_rtx_PLUS (Pmode, op1, op0));
14447 pat = GEN_FCN (icode) (target, addr);
14448 if (! pat)
14449 return 0;
14450 emit_insn (pat);
14453 return target;
14456 static rtx
14457 altivec_expand_xl_be_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
14459 rtx pat, addr;
14460 tree arg0 = CALL_EXPR_ARG (exp, 0);
14461 tree arg1 = CALL_EXPR_ARG (exp, 1);
14462 machine_mode tmode = insn_data[icode].operand[0].mode;
14463 machine_mode mode0 = Pmode;
14464 machine_mode mode1 = Pmode;
14465 rtx op0 = expand_normal (arg0);
14466 rtx op1 = expand_normal (arg1);
14468 if (icode == CODE_FOR_nothing)
14469 /* Builtin not supported on this processor. */
14470 return 0;
14472 /* If we got invalid arguments bail out before generating bad rtl. */
14473 if (arg0 == error_mark_node || arg1 == error_mark_node)
14474 return const0_rtx;
14476 if (target == 0
14477 || GET_MODE (target) != tmode
14478 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14479 target = gen_reg_rtx (tmode);
14481 op1 = copy_to_mode_reg (mode1, op1);
14483 if (op0 == const0_rtx)
14484 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
14485 else
14487 op0 = copy_to_mode_reg (mode0, op0);
14488 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
14489 gen_rtx_PLUS (Pmode, op1, op0));
14492 pat = GEN_FCN (icode) (target, addr);
14493 if (!pat)
14494 return 0;
14496 emit_insn (pat);
14497 /* Reverse element order of elements if in LE mode */
14498 if (!VECTOR_ELT_ORDER_BIG)
14500 rtx sel = swap_selector_for_mode (tmode);
14501 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, target, target, sel),
14502 UNSPEC_VPERM);
14503 emit_insn (gen_rtx_SET (target, vperm));
14505 return target;
14508 static rtx
14509 paired_expand_stv_builtin (enum insn_code icode, tree exp)
14511 tree arg0 = CALL_EXPR_ARG (exp, 0);
14512 tree arg1 = CALL_EXPR_ARG (exp, 1);
14513 tree arg2 = CALL_EXPR_ARG (exp, 2);
14514 rtx op0 = expand_normal (arg0);
14515 rtx op1 = expand_normal (arg1);
14516 rtx op2 = expand_normal (arg2);
14517 rtx pat, addr;
14518 machine_mode tmode = insn_data[icode].operand[0].mode;
14519 machine_mode mode1 = Pmode;
14520 machine_mode mode2 = Pmode;
14522 /* Invalid arguments. Bail before doing anything stoopid! */
14523 if (arg0 == error_mark_node
14524 || arg1 == error_mark_node
14525 || arg2 == error_mark_node)
14526 return const0_rtx;
14528 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
14529 op0 = copy_to_mode_reg (tmode, op0);
14531 op2 = copy_to_mode_reg (mode2, op2);
14533 if (op1 == const0_rtx)
14535 addr = gen_rtx_MEM (tmode, op2);
14537 else
14539 op1 = copy_to_mode_reg (mode1, op1);
14540 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
14543 pat = GEN_FCN (icode) (addr, op0);
14544 if (pat)
14545 emit_insn (pat);
14546 return NULL_RTX;
14549 static rtx
14550 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
14552 rtx pat;
14553 tree arg0 = CALL_EXPR_ARG (exp, 0);
14554 tree arg1 = CALL_EXPR_ARG (exp, 1);
14555 tree arg2 = CALL_EXPR_ARG (exp, 2);
14556 rtx op0 = expand_normal (arg0);
14557 rtx op1 = expand_normal (arg1);
14558 rtx op2 = expand_normal (arg2);
14559 machine_mode mode0 = insn_data[icode].operand[0].mode;
14560 machine_mode mode1 = insn_data[icode].operand[1].mode;
14561 machine_mode mode2 = insn_data[icode].operand[2].mode;
14563 if (icode == CODE_FOR_nothing)
14564 /* Builtin not supported on this processor. */
14565 return NULL_RTX;
14567 /* If we got invalid arguments bail out before generating bad rtl. */
14568 if (arg0 == error_mark_node
14569 || arg1 == error_mark_node
14570 || arg2 == error_mark_node)
14571 return NULL_RTX;
14573 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14574 op0 = copy_to_mode_reg (mode0, op0);
14575 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14576 op1 = copy_to_mode_reg (mode1, op1);
14577 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14578 op2 = copy_to_mode_reg (mode2, op2);
14580 pat = GEN_FCN (icode) (op0, op1, op2);
14581 if (pat)
14582 emit_insn (pat);
14584 return NULL_RTX;
14587 static rtx
14588 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
14590 tree arg0 = CALL_EXPR_ARG (exp, 0);
14591 tree arg1 = CALL_EXPR_ARG (exp, 1);
14592 tree arg2 = CALL_EXPR_ARG (exp, 2);
14593 rtx op0 = expand_normal (arg0);
14594 rtx op1 = expand_normal (arg1);
14595 rtx op2 = expand_normal (arg2);
14596 rtx pat, addr, rawaddr;
14597 machine_mode tmode = insn_data[icode].operand[0].mode;
14598 machine_mode smode = insn_data[icode].operand[1].mode;
14599 machine_mode mode1 = Pmode;
14600 machine_mode mode2 = Pmode;
14602 /* Invalid arguments. Bail before doing anything stoopid! */
14603 if (arg0 == error_mark_node
14604 || arg1 == error_mark_node
14605 || arg2 == error_mark_node)
14606 return const0_rtx;
14608 op2 = copy_to_mode_reg (mode2, op2);
14610 /* For STVX, express the RTL accurately by ANDing the address with -16.
14611 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
14612 so the raw address is fine. */
14613 if (icode == CODE_FOR_altivec_stvx_v2df_2op
14614 || icode == CODE_FOR_altivec_stvx_v2di_2op
14615 || icode == CODE_FOR_altivec_stvx_v4sf_2op
14616 || icode == CODE_FOR_altivec_stvx_v4si_2op
14617 || icode == CODE_FOR_altivec_stvx_v8hi_2op
14618 || icode == CODE_FOR_altivec_stvx_v16qi_2op)
14620 if (op1 == const0_rtx)
14621 rawaddr = op2;
14622 else
14624 op1 = copy_to_mode_reg (mode1, op1);
14625 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
14628 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14629 addr = gen_rtx_MEM (tmode, addr);
14631 op0 = copy_to_mode_reg (tmode, op0);
14633 /* For -maltivec=be, emit a permute to swap the elements, followed
14634 by the store. */
14635 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
14637 rtx temp = gen_reg_rtx (tmode);
14638 rtx sel = swap_selector_for_mode (tmode);
14639 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, op0, op0, sel),
14640 UNSPEC_VPERM);
14641 emit_insn (gen_rtx_SET (temp, vperm));
14642 emit_insn (gen_rtx_SET (addr, temp));
14644 else
14645 emit_insn (gen_rtx_SET (addr, op0));
14647 else
14649 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
14650 op0 = copy_to_mode_reg (smode, op0);
14652 if (op1 == const0_rtx)
14653 addr = gen_rtx_MEM (tmode, op2);
14654 else
14656 op1 = copy_to_mode_reg (mode1, op1);
14657 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
14660 pat = GEN_FCN (icode) (addr, op0);
14661 if (pat)
14662 emit_insn (pat);
14665 return NULL_RTX;
14668 /* Return the appropriate SPR number associated with the given builtin. */
14669 static inline HOST_WIDE_INT
14670 htm_spr_num (enum rs6000_builtins code)
14672 if (code == HTM_BUILTIN_GET_TFHAR
14673 || code == HTM_BUILTIN_SET_TFHAR)
14674 return TFHAR_SPR;
14675 else if (code == HTM_BUILTIN_GET_TFIAR
14676 || code == HTM_BUILTIN_SET_TFIAR)
14677 return TFIAR_SPR;
14678 else if (code == HTM_BUILTIN_GET_TEXASR
14679 || code == HTM_BUILTIN_SET_TEXASR)
14680 return TEXASR_SPR;
14681 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
14682 || code == HTM_BUILTIN_SET_TEXASRU);
14683 return TEXASRU_SPR;
14686 /* Return the appropriate SPR regno associated with the given builtin. */
14687 static inline HOST_WIDE_INT
14688 htm_spr_regno (enum rs6000_builtins code)
14690 if (code == HTM_BUILTIN_GET_TFHAR
14691 || code == HTM_BUILTIN_SET_TFHAR)
14692 return TFHAR_REGNO;
14693 else if (code == HTM_BUILTIN_GET_TFIAR
14694 || code == HTM_BUILTIN_SET_TFIAR)
14695 return TFIAR_REGNO;
14696 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
14697 || code == HTM_BUILTIN_SET_TEXASR
14698 || code == HTM_BUILTIN_GET_TEXASRU
14699 || code == HTM_BUILTIN_SET_TEXASRU);
14700 return TEXASR_REGNO;
14703 /* Return the correct ICODE value depending on whether we are
14704 setting or reading the HTM SPRs. */
14705 static inline enum insn_code
14706 rs6000_htm_spr_icode (bool nonvoid)
14708 if (nonvoid)
14709 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
14710 else
14711 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
14714 /* Expand the HTM builtin in EXP and store the result in TARGET.
14715 Store true in *EXPANDEDP if we found a builtin to expand. */
14716 static rtx
14717 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
14719 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14720 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
14721 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14722 const struct builtin_description *d;
14723 size_t i;
14725 *expandedp = true;
14727 if (!TARGET_POWERPC64
14728 && (fcode == HTM_BUILTIN_TABORTDC
14729 || fcode == HTM_BUILTIN_TABORTDCI))
14731 size_t uns_fcode = (size_t)fcode;
14732 const char *name = rs6000_builtin_info[uns_fcode].name;
14733 error ("builtin %qs is only valid in 64-bit mode", name);
14734 return const0_rtx;
14737 /* Expand the HTM builtins. */
14738 d = bdesc_htm;
14739 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
14740 if (d->code == fcode)
14742 rtx op[MAX_HTM_OPERANDS], pat;
14743 int nopnds = 0;
14744 tree arg;
14745 call_expr_arg_iterator iter;
14746 unsigned attr = rs6000_builtin_info[fcode].attr;
14747 enum insn_code icode = d->icode;
14748 const struct insn_operand_data *insn_op;
14749 bool uses_spr = (attr & RS6000_BTC_SPR);
14750 rtx cr = NULL_RTX;
14752 if (uses_spr)
14753 icode = rs6000_htm_spr_icode (nonvoid);
14754 insn_op = &insn_data[icode].operand[0];
14756 if (nonvoid)
14758 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
14759 if (!target
14760 || GET_MODE (target) != tmode
14761 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
14762 target = gen_reg_rtx (tmode);
14763 if (uses_spr)
14764 op[nopnds++] = target;
14767 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
14769 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
14770 return const0_rtx;
14772 insn_op = &insn_data[icode].operand[nopnds];
14774 op[nopnds] = expand_normal (arg);
14776 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
14778 if (!strcmp (insn_op->constraint, "n"))
14780 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
14781 if (!CONST_INT_P (op[nopnds]))
14782 error ("argument %d must be an unsigned literal", arg_num);
14783 else
14784 error ("argument %d is an unsigned literal that is "
14785 "out of range", arg_num);
14786 return const0_rtx;
14788 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
14791 nopnds++;
14794 /* Handle the builtins for extended mnemonics. These accept
14795 no arguments, but map to builtins that take arguments. */
14796 switch (fcode)
14798 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
14799 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
14800 op[nopnds++] = GEN_INT (1);
14801 if (flag_checking)
14802 attr |= RS6000_BTC_UNARY;
14803 break;
14804 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
14805 op[nopnds++] = GEN_INT (0);
14806 if (flag_checking)
14807 attr |= RS6000_BTC_UNARY;
14808 break;
14809 default:
14810 break;
14813 /* If this builtin accesses SPRs, then pass in the appropriate
14814 SPR number and SPR regno as the last two operands. */
14815 if (uses_spr)
14817 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
14818 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
14819 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
14821 /* If this builtin accesses a CR, then pass in a scratch
14822 CR as the last operand. */
14823 else if (attr & RS6000_BTC_CR)
14824 { cr = gen_reg_rtx (CCmode);
14825 op[nopnds++] = cr;
14828 if (flag_checking)
14830 int expected_nopnds = 0;
14831 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
14832 expected_nopnds = 1;
14833 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
14834 expected_nopnds = 2;
14835 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
14836 expected_nopnds = 3;
14837 if (!(attr & RS6000_BTC_VOID))
14838 expected_nopnds += 1;
14839 if (uses_spr)
14840 expected_nopnds += 2;
14842 gcc_assert (nopnds == expected_nopnds
14843 && nopnds <= MAX_HTM_OPERANDS);
14846 switch (nopnds)
14848 case 1:
14849 pat = GEN_FCN (icode) (op[0]);
14850 break;
14851 case 2:
14852 pat = GEN_FCN (icode) (op[0], op[1]);
14853 break;
14854 case 3:
14855 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
14856 break;
14857 case 4:
14858 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
14859 break;
14860 default:
14861 gcc_unreachable ();
14863 if (!pat)
14864 return NULL_RTX;
14865 emit_insn (pat);
14867 if (attr & RS6000_BTC_CR)
14869 if (fcode == HTM_BUILTIN_TBEGIN)
14871 /* Emit code to set TARGET to true or false depending on
14872 whether the tbegin. instruction successfully or failed
14873 to start a transaction. We do this by placing the 1's
14874 complement of CR's EQ bit into TARGET. */
14875 rtx scratch = gen_reg_rtx (SImode);
14876 emit_insn (gen_rtx_SET (scratch,
14877 gen_rtx_EQ (SImode, cr,
14878 const0_rtx)));
14879 emit_insn (gen_rtx_SET (target,
14880 gen_rtx_XOR (SImode, scratch,
14881 GEN_INT (1))));
14883 else
14885 /* Emit code to copy the 4-bit condition register field
14886 CR into the least significant end of register TARGET. */
14887 rtx scratch1 = gen_reg_rtx (SImode);
14888 rtx scratch2 = gen_reg_rtx (SImode);
14889 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
14890 emit_insn (gen_movcc (subreg, cr));
14891 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
14892 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
14896 if (nonvoid)
14897 return target;
14898 return const0_rtx;
14901 *expandedp = false;
14902 return NULL_RTX;
14905 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14907 static rtx
14908 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
14909 rtx target)
14911 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14912 if (fcode == RS6000_BUILTIN_CPU_INIT)
14913 return const0_rtx;
14915 if (target == 0 || GET_MODE (target) != SImode)
14916 target = gen_reg_rtx (SImode);
14918 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14919 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
14920 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14921 to a STRING_CST. */
14922 if (TREE_CODE (arg) == ARRAY_REF
14923 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
14924 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
14925 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
14926 arg = TREE_OPERAND (arg, 0);
14928 if (TREE_CODE (arg) != STRING_CST)
14930 error ("builtin %qs only accepts a string argument",
14931 rs6000_builtin_info[(size_t) fcode].name);
14932 return const0_rtx;
14935 if (fcode == RS6000_BUILTIN_CPU_IS)
14937 const char *cpu = TREE_STRING_POINTER (arg);
14938 rtx cpuid = NULL_RTX;
14939 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
14940 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
14942 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14943 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
14944 break;
14946 if (cpuid == NULL_RTX)
14948 /* Invalid CPU argument. */
14949 error ("cpu %qs is an invalid argument to builtin %qs",
14950 cpu, rs6000_builtin_info[(size_t) fcode].name);
14951 return const0_rtx;
14954 rtx platform = gen_reg_rtx (SImode);
14955 rtx tcbmem = gen_const_mem (SImode,
14956 gen_rtx_PLUS (Pmode,
14957 gen_rtx_REG (Pmode, TLS_REGNUM),
14958 GEN_INT (TCB_PLATFORM_OFFSET)));
14959 emit_move_insn (platform, tcbmem);
14960 emit_insn (gen_eqsi3 (target, platform, cpuid));
14962 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14964 const char *hwcap = TREE_STRING_POINTER (arg);
14965 rtx mask = NULL_RTX;
14966 int hwcap_offset;
14967 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14968 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14970 mask = GEN_INT (cpu_supports_info[i].mask);
14971 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14972 break;
14974 if (mask == NULL_RTX)
14976 /* Invalid HWCAP argument. */
14977 error ("%s %qs is an invalid argument to builtin %qs",
14978 "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name);
14979 return const0_rtx;
14982 rtx tcb_hwcap = gen_reg_rtx (SImode);
14983 rtx tcbmem = gen_const_mem (SImode,
14984 gen_rtx_PLUS (Pmode,
14985 gen_rtx_REG (Pmode, TLS_REGNUM),
14986 GEN_INT (hwcap_offset)));
14987 emit_move_insn (tcb_hwcap, tcbmem);
14988 rtx scratch1 = gen_reg_rtx (SImode);
14989 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14990 rtx scratch2 = gen_reg_rtx (SImode);
14991 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
14992 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
14994 else
14995 gcc_unreachable ();
14997 /* Record that we have expanded a CPU builtin, so that we can later
14998 emit a reference to the special symbol exported by LIBC to ensure we
14999 do not link against an old LIBC that doesn't support this feature. */
15000 cpu_builtin_p = true;
15002 #else
15003 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
15004 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
15006 /* For old LIBCs, always return FALSE. */
15007 emit_move_insn (target, GEN_INT (0));
15008 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
15010 return target;
15013 static rtx
15014 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
15016 rtx pat;
15017 tree arg0 = CALL_EXPR_ARG (exp, 0);
15018 tree arg1 = CALL_EXPR_ARG (exp, 1);
15019 tree arg2 = CALL_EXPR_ARG (exp, 2);
15020 rtx op0 = expand_normal (arg0);
15021 rtx op1 = expand_normal (arg1);
15022 rtx op2 = expand_normal (arg2);
15023 machine_mode tmode = insn_data[icode].operand[0].mode;
15024 machine_mode mode0 = insn_data[icode].operand[1].mode;
15025 machine_mode mode1 = insn_data[icode].operand[2].mode;
15026 machine_mode mode2 = insn_data[icode].operand[3].mode;
15028 if (icode == CODE_FOR_nothing)
15029 /* Builtin not supported on this processor. */
15030 return 0;
15032 /* If we got invalid arguments bail out before generating bad rtl. */
15033 if (arg0 == error_mark_node
15034 || arg1 == error_mark_node
15035 || arg2 == error_mark_node)
15036 return const0_rtx;
15038 /* Check and prepare argument depending on the instruction code.
15040 Note that a switch statement instead of the sequence of tests
15041 would be incorrect as many of the CODE_FOR values could be
15042 CODE_FOR_nothing and that would yield multiple alternatives
15043 with identical values. We'd never reach here at runtime in
15044 this case. */
15045 if (icode == CODE_FOR_altivec_vsldoi_v4sf
15046 || icode == CODE_FOR_altivec_vsldoi_v2df
15047 || icode == CODE_FOR_altivec_vsldoi_v4si
15048 || icode == CODE_FOR_altivec_vsldoi_v8hi
15049 || icode == CODE_FOR_altivec_vsldoi_v16qi)
15051 /* Only allow 4-bit unsigned literals. */
15052 STRIP_NOPS (arg2);
15053 if (TREE_CODE (arg2) != INTEGER_CST
15054 || TREE_INT_CST_LOW (arg2) & ~0xf)
15056 error ("argument 3 must be a 4-bit unsigned literal");
15057 return CONST0_RTX (tmode);
15060 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
15061 || icode == CODE_FOR_vsx_xxpermdi_v2di
15062 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
15063 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
15064 || icode == CODE_FOR_vsx_xxpermdi_v1ti
15065 || icode == CODE_FOR_vsx_xxpermdi_v4sf
15066 || icode == CODE_FOR_vsx_xxpermdi_v4si
15067 || icode == CODE_FOR_vsx_xxpermdi_v8hi
15068 || icode == CODE_FOR_vsx_xxpermdi_v16qi
15069 || icode == CODE_FOR_vsx_xxsldwi_v16qi
15070 || icode == CODE_FOR_vsx_xxsldwi_v8hi
15071 || icode == CODE_FOR_vsx_xxsldwi_v4si
15072 || icode == CODE_FOR_vsx_xxsldwi_v4sf
15073 || icode == CODE_FOR_vsx_xxsldwi_v2di
15074 || icode == CODE_FOR_vsx_xxsldwi_v2df)
15076 /* Only allow 2-bit unsigned literals. */
15077 STRIP_NOPS (arg2);
15078 if (TREE_CODE (arg2) != INTEGER_CST
15079 || TREE_INT_CST_LOW (arg2) & ~0x3)
15081 error ("argument 3 must be a 2-bit unsigned literal");
15082 return CONST0_RTX (tmode);
15085 else if (icode == CODE_FOR_vsx_set_v2df
15086 || icode == CODE_FOR_vsx_set_v2di
15087 || icode == CODE_FOR_bcdadd
15088 || icode == CODE_FOR_bcdadd_lt
15089 || icode == CODE_FOR_bcdadd_eq
15090 || icode == CODE_FOR_bcdadd_gt
15091 || icode == CODE_FOR_bcdsub
15092 || icode == CODE_FOR_bcdsub_lt
15093 || icode == CODE_FOR_bcdsub_eq
15094 || icode == CODE_FOR_bcdsub_gt)
15096 /* Only allow 1-bit unsigned literals. */
15097 STRIP_NOPS (arg2);
15098 if (TREE_CODE (arg2) != INTEGER_CST
15099 || TREE_INT_CST_LOW (arg2) & ~0x1)
15101 error ("argument 3 must be a 1-bit unsigned literal");
15102 return CONST0_RTX (tmode);
15105 else if (icode == CODE_FOR_dfp_ddedpd_dd
15106 || icode == CODE_FOR_dfp_ddedpd_td)
15108 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
15109 STRIP_NOPS (arg0);
15110 if (TREE_CODE (arg0) != INTEGER_CST
15111 || TREE_INT_CST_LOW (arg2) & ~0x3)
15113 error ("argument 1 must be 0 or 2");
15114 return CONST0_RTX (tmode);
15117 else if (icode == CODE_FOR_dfp_denbcd_dd
15118 || icode == CODE_FOR_dfp_denbcd_td)
15120 /* Only allow 1-bit unsigned literals. */
15121 STRIP_NOPS (arg0);
15122 if (TREE_CODE (arg0) != INTEGER_CST
15123 || TREE_INT_CST_LOW (arg0) & ~0x1)
15125 error ("argument 1 must be a 1-bit unsigned literal");
15126 return CONST0_RTX (tmode);
15129 else if (icode == CODE_FOR_dfp_dscli_dd
15130 || icode == CODE_FOR_dfp_dscli_td
15131 || icode == CODE_FOR_dfp_dscri_dd
15132 || icode == CODE_FOR_dfp_dscri_td)
15134 /* Only allow 6-bit unsigned literals. */
15135 STRIP_NOPS (arg1);
15136 if (TREE_CODE (arg1) != INTEGER_CST
15137 || TREE_INT_CST_LOW (arg1) & ~0x3f)
15139 error ("argument 2 must be a 6-bit unsigned literal");
15140 return CONST0_RTX (tmode);
15143 else if (icode == CODE_FOR_crypto_vshasigmaw
15144 || icode == CODE_FOR_crypto_vshasigmad)
15146 /* Check whether the 2nd and 3rd arguments are integer constants and in
15147 range and prepare arguments. */
15148 STRIP_NOPS (arg1);
15149 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (arg1, 2))
15151 error ("argument 2 must be 0 or 1");
15152 return CONST0_RTX (tmode);
15155 STRIP_NOPS (arg2);
15156 if (TREE_CODE (arg2) != INTEGER_CST || wi::geu_p (arg2, 16))
15158 error ("argument 3 must be in the range 0..15");
15159 return CONST0_RTX (tmode);
15163 if (target == 0
15164 || GET_MODE (target) != tmode
15165 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15166 target = gen_reg_rtx (tmode);
15168 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15169 op0 = copy_to_mode_reg (mode0, op0);
15170 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
15171 op1 = copy_to_mode_reg (mode1, op1);
15172 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
15173 op2 = copy_to_mode_reg (mode2, op2);
15175 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
15176 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
15177 else
15178 pat = GEN_FCN (icode) (target, op0, op1, op2);
15179 if (! pat)
15180 return 0;
15181 emit_insn (pat);
15183 return target;
15186 /* Expand the lvx builtins. */
15187 static rtx
15188 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
15190 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15191 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15192 tree arg0;
15193 machine_mode tmode, mode0;
15194 rtx pat, op0;
15195 enum insn_code icode;
15197 switch (fcode)
15199 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
15200 icode = CODE_FOR_vector_altivec_load_v16qi;
15201 break;
15202 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
15203 icode = CODE_FOR_vector_altivec_load_v8hi;
15204 break;
15205 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
15206 icode = CODE_FOR_vector_altivec_load_v4si;
15207 break;
15208 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
15209 icode = CODE_FOR_vector_altivec_load_v4sf;
15210 break;
15211 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
15212 icode = CODE_FOR_vector_altivec_load_v2df;
15213 break;
15214 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
15215 icode = CODE_FOR_vector_altivec_load_v2di;
15216 break;
15217 case ALTIVEC_BUILTIN_LD_INTERNAL_1ti:
15218 icode = CODE_FOR_vector_altivec_load_v1ti;
15219 break;
15220 default:
15221 *expandedp = false;
15222 return NULL_RTX;
15225 *expandedp = true;
15227 arg0 = CALL_EXPR_ARG (exp, 0);
15228 op0 = expand_normal (arg0);
15229 tmode = insn_data[icode].operand[0].mode;
15230 mode0 = insn_data[icode].operand[1].mode;
15232 if (target == 0
15233 || GET_MODE (target) != tmode
15234 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15235 target = gen_reg_rtx (tmode);
15237 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15238 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15240 pat = GEN_FCN (icode) (target, op0);
15241 if (! pat)
15242 return 0;
15243 emit_insn (pat);
15244 return target;
15247 /* Expand the stvx builtins. */
15248 static rtx
15249 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
15250 bool *expandedp)
15252 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15253 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15254 tree arg0, arg1;
15255 machine_mode mode0, mode1;
15256 rtx pat, op0, op1;
15257 enum insn_code icode;
15259 switch (fcode)
15261 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
15262 icode = CODE_FOR_vector_altivec_store_v16qi;
15263 break;
15264 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
15265 icode = CODE_FOR_vector_altivec_store_v8hi;
15266 break;
15267 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
15268 icode = CODE_FOR_vector_altivec_store_v4si;
15269 break;
15270 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
15271 icode = CODE_FOR_vector_altivec_store_v4sf;
15272 break;
15273 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
15274 icode = CODE_FOR_vector_altivec_store_v2df;
15275 break;
15276 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
15277 icode = CODE_FOR_vector_altivec_store_v2di;
15278 break;
15279 case ALTIVEC_BUILTIN_ST_INTERNAL_1ti:
15280 icode = CODE_FOR_vector_altivec_store_v1ti;
15281 break;
15282 default:
15283 *expandedp = false;
15284 return NULL_RTX;
15287 arg0 = CALL_EXPR_ARG (exp, 0);
15288 arg1 = CALL_EXPR_ARG (exp, 1);
15289 op0 = expand_normal (arg0);
15290 op1 = expand_normal (arg1);
15291 mode0 = insn_data[icode].operand[0].mode;
15292 mode1 = insn_data[icode].operand[1].mode;
15294 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15295 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15296 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
15297 op1 = copy_to_mode_reg (mode1, op1);
15299 pat = GEN_FCN (icode) (op0, op1);
15300 if (pat)
15301 emit_insn (pat);
15303 *expandedp = true;
15304 return NULL_RTX;
15307 /* Expand the dst builtins. */
15308 static rtx
15309 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
15310 bool *expandedp)
15312 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15313 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15314 tree arg0, arg1, arg2;
15315 machine_mode mode0, mode1;
15316 rtx pat, op0, op1, op2;
15317 const struct builtin_description *d;
15318 size_t i;
15320 *expandedp = false;
15322 /* Handle DST variants. */
15323 d = bdesc_dst;
15324 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
15325 if (d->code == fcode)
15327 arg0 = CALL_EXPR_ARG (exp, 0);
15328 arg1 = CALL_EXPR_ARG (exp, 1);
15329 arg2 = CALL_EXPR_ARG (exp, 2);
15330 op0 = expand_normal (arg0);
15331 op1 = expand_normal (arg1);
15332 op2 = expand_normal (arg2);
15333 mode0 = insn_data[d->icode].operand[0].mode;
15334 mode1 = insn_data[d->icode].operand[1].mode;
15336 /* Invalid arguments, bail out before generating bad rtl. */
15337 if (arg0 == error_mark_node
15338 || arg1 == error_mark_node
15339 || arg2 == error_mark_node)
15340 return const0_rtx;
15342 *expandedp = true;
15343 STRIP_NOPS (arg2);
15344 if (TREE_CODE (arg2) != INTEGER_CST
15345 || TREE_INT_CST_LOW (arg2) & ~0x3)
15347 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
15348 return const0_rtx;
15351 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
15352 op0 = copy_to_mode_reg (Pmode, op0);
15353 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
15354 op1 = copy_to_mode_reg (mode1, op1);
15356 pat = GEN_FCN (d->icode) (op0, op1, op2);
15357 if (pat != 0)
15358 emit_insn (pat);
15360 return NULL_RTX;
15363 return NULL_RTX;
15366 /* Expand vec_init builtin. */
15367 static rtx
15368 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
15370 machine_mode tmode = TYPE_MODE (type);
15371 machine_mode inner_mode = GET_MODE_INNER (tmode);
15372 int i, n_elt = GET_MODE_NUNITS (tmode);
15374 gcc_assert (VECTOR_MODE_P (tmode));
15375 gcc_assert (n_elt == call_expr_nargs (exp));
15377 if (!target || !register_operand (target, tmode))
15378 target = gen_reg_rtx (tmode);
15380 /* If we have a vector compromised of a single element, such as V1TImode, do
15381 the initialization directly. */
15382 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
15384 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
15385 emit_move_insn (target, gen_lowpart (tmode, x));
15387 else
15389 rtvec v = rtvec_alloc (n_elt);
15391 for (i = 0; i < n_elt; ++i)
15393 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
15394 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
15397 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
15400 return target;
15403 /* Return the integer constant in ARG. Constrain it to be in the range
15404 of the subparts of VEC_TYPE; issue an error if not. */
15406 static int
15407 get_element_number (tree vec_type, tree arg)
15409 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
15411 if (!tree_fits_uhwi_p (arg)
15412 || (elt = tree_to_uhwi (arg), elt > max))
15414 error ("selector must be an integer constant in the range 0..%wi", max);
15415 return 0;
15418 return elt;
15421 /* Expand vec_set builtin. */
15422 static rtx
15423 altivec_expand_vec_set_builtin (tree exp)
15425 machine_mode tmode, mode1;
15426 tree arg0, arg1, arg2;
15427 int elt;
15428 rtx op0, op1;
15430 arg0 = CALL_EXPR_ARG (exp, 0);
15431 arg1 = CALL_EXPR_ARG (exp, 1);
15432 arg2 = CALL_EXPR_ARG (exp, 2);
15434 tmode = TYPE_MODE (TREE_TYPE (arg0));
15435 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15436 gcc_assert (VECTOR_MODE_P (tmode));
15438 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
15439 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
15440 elt = get_element_number (TREE_TYPE (arg0), arg2);
15442 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
15443 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
15445 op0 = force_reg (tmode, op0);
15446 op1 = force_reg (mode1, op1);
15448 rs6000_expand_vector_set (op0, op1, elt);
15450 return op0;
15453 /* Expand vec_ext builtin. */
15454 static rtx
15455 altivec_expand_vec_ext_builtin (tree exp, rtx target)
15457 machine_mode tmode, mode0;
15458 tree arg0, arg1;
15459 rtx op0;
15460 rtx op1;
15462 arg0 = CALL_EXPR_ARG (exp, 0);
15463 arg1 = CALL_EXPR_ARG (exp, 1);
15465 op0 = expand_normal (arg0);
15466 op1 = expand_normal (arg1);
15468 /* Call get_element_number to validate arg1 if it is a constant. */
15469 if (TREE_CODE (arg1) == INTEGER_CST)
15470 (void) get_element_number (TREE_TYPE (arg0), arg1);
15472 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15473 mode0 = TYPE_MODE (TREE_TYPE (arg0));
15474 gcc_assert (VECTOR_MODE_P (mode0));
15476 op0 = force_reg (mode0, op0);
15478 if (optimize || !target || !register_operand (target, tmode))
15479 target = gen_reg_rtx (tmode);
15481 rs6000_expand_vector_extract (target, op0, op1);
15483 return target;
15486 /* Expand the builtin in EXP and store the result in TARGET. Store
15487 true in *EXPANDEDP if we found a builtin to expand. */
15488 static rtx
15489 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
15491 const struct builtin_description *d;
15492 size_t i;
15493 enum insn_code icode;
15494 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15495 tree arg0, arg1, arg2;
15496 rtx op0, pat;
15497 machine_mode tmode, mode0;
15498 enum rs6000_builtins fcode
15499 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15501 if (rs6000_overloaded_builtin_p (fcode))
15503 *expandedp = true;
15504 error ("unresolved overload for Altivec builtin %qF", fndecl);
15506 /* Given it is invalid, just generate a normal call. */
15507 return expand_call (exp, target, false);
15510 target = altivec_expand_ld_builtin (exp, target, expandedp);
15511 if (*expandedp)
15512 return target;
15514 target = altivec_expand_st_builtin (exp, target, expandedp);
15515 if (*expandedp)
15516 return target;
15518 target = altivec_expand_dst_builtin (exp, target, expandedp);
15519 if (*expandedp)
15520 return target;
15522 *expandedp = true;
15524 switch (fcode)
15526 case ALTIVEC_BUILTIN_STVX_V2DF:
15527 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df_2op, exp);
15528 case ALTIVEC_BUILTIN_STVX_V2DI:
15529 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di_2op, exp);
15530 case ALTIVEC_BUILTIN_STVX_V4SF:
15531 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf_2op, exp);
15532 case ALTIVEC_BUILTIN_STVX:
15533 case ALTIVEC_BUILTIN_STVX_V4SI:
15534 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si_2op, exp);
15535 case ALTIVEC_BUILTIN_STVX_V8HI:
15536 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi_2op, exp);
15537 case ALTIVEC_BUILTIN_STVX_V16QI:
15538 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi_2op, exp);
15539 case ALTIVEC_BUILTIN_STVEBX:
15540 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
15541 case ALTIVEC_BUILTIN_STVEHX:
15542 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
15543 case ALTIVEC_BUILTIN_STVEWX:
15544 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
15545 case ALTIVEC_BUILTIN_STVXL_V2DF:
15546 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
15547 case ALTIVEC_BUILTIN_STVXL_V2DI:
15548 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
15549 case ALTIVEC_BUILTIN_STVXL_V4SF:
15550 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
15551 case ALTIVEC_BUILTIN_STVXL:
15552 case ALTIVEC_BUILTIN_STVXL_V4SI:
15553 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
15554 case ALTIVEC_BUILTIN_STVXL_V8HI:
15555 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
15556 case ALTIVEC_BUILTIN_STVXL_V16QI:
15557 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
15559 case ALTIVEC_BUILTIN_STVLX:
15560 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
15561 case ALTIVEC_BUILTIN_STVLXL:
15562 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
15563 case ALTIVEC_BUILTIN_STVRX:
15564 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
15565 case ALTIVEC_BUILTIN_STVRXL:
15566 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
15568 case P9V_BUILTIN_STXVL:
15569 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
15571 case VSX_BUILTIN_STXVD2X_V1TI:
15572 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
15573 case VSX_BUILTIN_STXVD2X_V2DF:
15574 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
15575 case VSX_BUILTIN_STXVD2X_V2DI:
15576 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
15577 case VSX_BUILTIN_STXVW4X_V4SF:
15578 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
15579 case VSX_BUILTIN_STXVW4X_V4SI:
15580 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
15581 case VSX_BUILTIN_STXVW4X_V8HI:
15582 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
15583 case VSX_BUILTIN_STXVW4X_V16QI:
15584 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
15586 /* For the following on big endian, it's ok to use any appropriate
15587 unaligned-supporting store, so use a generic expander. For
15588 little-endian, the exact element-reversing instruction must
15589 be used. */
15590 case VSX_BUILTIN_ST_ELEMREV_V2DF:
15592 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
15593 : CODE_FOR_vsx_st_elemrev_v2df);
15594 return altivec_expand_stv_builtin (code, exp);
15596 case VSX_BUILTIN_ST_ELEMREV_V2DI:
15598 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
15599 : CODE_FOR_vsx_st_elemrev_v2di);
15600 return altivec_expand_stv_builtin (code, exp);
15602 case VSX_BUILTIN_ST_ELEMREV_V4SF:
15604 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
15605 : CODE_FOR_vsx_st_elemrev_v4sf);
15606 return altivec_expand_stv_builtin (code, exp);
15608 case VSX_BUILTIN_ST_ELEMREV_V4SI:
15610 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
15611 : CODE_FOR_vsx_st_elemrev_v4si);
15612 return altivec_expand_stv_builtin (code, exp);
15614 case VSX_BUILTIN_ST_ELEMREV_V8HI:
15616 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
15617 : CODE_FOR_vsx_st_elemrev_v8hi);
15618 return altivec_expand_stv_builtin (code, exp);
15620 case VSX_BUILTIN_ST_ELEMREV_V16QI:
15622 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
15623 : CODE_FOR_vsx_st_elemrev_v16qi);
15624 return altivec_expand_stv_builtin (code, exp);
15627 case ALTIVEC_BUILTIN_MFVSCR:
15628 icode = CODE_FOR_altivec_mfvscr;
15629 tmode = insn_data[icode].operand[0].mode;
15631 if (target == 0
15632 || GET_MODE (target) != tmode
15633 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15634 target = gen_reg_rtx (tmode);
15636 pat = GEN_FCN (icode) (target);
15637 if (! pat)
15638 return 0;
15639 emit_insn (pat);
15640 return target;
15642 case ALTIVEC_BUILTIN_MTVSCR:
15643 icode = CODE_FOR_altivec_mtvscr;
15644 arg0 = CALL_EXPR_ARG (exp, 0);
15645 op0 = expand_normal (arg0);
15646 mode0 = insn_data[icode].operand[0].mode;
15648 /* If we got invalid arguments bail out before generating bad rtl. */
15649 if (arg0 == error_mark_node)
15650 return const0_rtx;
15652 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15653 op0 = copy_to_mode_reg (mode0, op0);
15655 pat = GEN_FCN (icode) (op0);
15656 if (pat)
15657 emit_insn (pat);
15658 return NULL_RTX;
15660 case ALTIVEC_BUILTIN_DSSALL:
15661 emit_insn (gen_altivec_dssall ());
15662 return NULL_RTX;
15664 case ALTIVEC_BUILTIN_DSS:
15665 icode = CODE_FOR_altivec_dss;
15666 arg0 = CALL_EXPR_ARG (exp, 0);
15667 STRIP_NOPS (arg0);
15668 op0 = expand_normal (arg0);
15669 mode0 = insn_data[icode].operand[0].mode;
15671 /* If we got invalid arguments bail out before generating bad rtl. */
15672 if (arg0 == error_mark_node)
15673 return const0_rtx;
15675 if (TREE_CODE (arg0) != INTEGER_CST
15676 || TREE_INT_CST_LOW (arg0) & ~0x3)
15678 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
15679 return const0_rtx;
15682 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15683 op0 = copy_to_mode_reg (mode0, op0);
15685 emit_insn (gen_altivec_dss (op0));
15686 return NULL_RTX;
15688 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
15689 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
15690 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
15691 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
15692 case VSX_BUILTIN_VEC_INIT_V2DF:
15693 case VSX_BUILTIN_VEC_INIT_V2DI:
15694 case VSX_BUILTIN_VEC_INIT_V1TI:
15695 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
15697 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
15698 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
15699 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
15700 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
15701 case VSX_BUILTIN_VEC_SET_V2DF:
15702 case VSX_BUILTIN_VEC_SET_V2DI:
15703 case VSX_BUILTIN_VEC_SET_V1TI:
15704 return altivec_expand_vec_set_builtin (exp);
15706 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
15707 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
15708 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
15709 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
15710 case VSX_BUILTIN_VEC_EXT_V2DF:
15711 case VSX_BUILTIN_VEC_EXT_V2DI:
15712 case VSX_BUILTIN_VEC_EXT_V1TI:
15713 return altivec_expand_vec_ext_builtin (exp, target);
15715 case P9V_BUILTIN_VEXTRACT4B:
15716 case P9V_BUILTIN_VEC_VEXTRACT4B:
15717 arg1 = CALL_EXPR_ARG (exp, 1);
15718 STRIP_NOPS (arg1);
15720 /* Generate a normal call if it is invalid. */
15721 if (arg1 == error_mark_node)
15722 return expand_call (exp, target, false);
15724 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
15726 error ("second argument to %qs must be 0..12", "vec_vextract4b");
15727 return expand_call (exp, target, false);
15729 break;
15731 case P9V_BUILTIN_VINSERT4B:
15732 case P9V_BUILTIN_VINSERT4B_DI:
15733 case P9V_BUILTIN_VEC_VINSERT4B:
15734 arg2 = CALL_EXPR_ARG (exp, 2);
15735 STRIP_NOPS (arg2);
15737 /* Generate a normal call if it is invalid. */
15738 if (arg2 == error_mark_node)
15739 return expand_call (exp, target, false);
15741 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
15743 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
15744 return expand_call (exp, target, false);
15746 break;
15748 default:
15749 break;
15750 /* Fall through. */
15753 /* Expand abs* operations. */
15754 d = bdesc_abs;
15755 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
15756 if (d->code == fcode)
15757 return altivec_expand_abs_builtin (d->icode, exp, target);
15759 /* Expand the AltiVec predicates. */
15760 d = bdesc_altivec_preds;
15761 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
15762 if (d->code == fcode)
15763 return altivec_expand_predicate_builtin (d->icode, exp, target);
15765 /* LV* are funky. We initialized them differently. */
15766 switch (fcode)
15768 case ALTIVEC_BUILTIN_LVSL:
15769 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
15770 exp, target, false);
15771 case ALTIVEC_BUILTIN_LVSR:
15772 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
15773 exp, target, false);
15774 case ALTIVEC_BUILTIN_LVEBX:
15775 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
15776 exp, target, false);
15777 case ALTIVEC_BUILTIN_LVEHX:
15778 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
15779 exp, target, false);
15780 case ALTIVEC_BUILTIN_LVEWX:
15781 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
15782 exp, target, false);
15783 case ALTIVEC_BUILTIN_LVXL_V2DF:
15784 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
15785 exp, target, false);
15786 case ALTIVEC_BUILTIN_LVXL_V2DI:
15787 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
15788 exp, target, false);
15789 case ALTIVEC_BUILTIN_LVXL_V4SF:
15790 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
15791 exp, target, false);
15792 case ALTIVEC_BUILTIN_LVXL:
15793 case ALTIVEC_BUILTIN_LVXL_V4SI:
15794 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
15795 exp, target, false);
15796 case ALTIVEC_BUILTIN_LVXL_V8HI:
15797 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
15798 exp, target, false);
15799 case ALTIVEC_BUILTIN_LVXL_V16QI:
15800 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
15801 exp, target, false);
15802 case ALTIVEC_BUILTIN_LVX_V2DF:
15803 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df_2op,
15804 exp, target, false);
15805 case ALTIVEC_BUILTIN_LVX_V2DI:
15806 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di_2op,
15807 exp, target, false);
15808 case ALTIVEC_BUILTIN_LVX_V4SF:
15809 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf_2op,
15810 exp, target, false);
15811 case ALTIVEC_BUILTIN_LVX:
15812 case ALTIVEC_BUILTIN_LVX_V4SI:
15813 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si_2op,
15814 exp, target, false);
15815 case ALTIVEC_BUILTIN_LVX_V8HI:
15816 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi_2op,
15817 exp, target, false);
15818 case ALTIVEC_BUILTIN_LVX_V16QI:
15819 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi_2op,
15820 exp, target, false);
15821 case ALTIVEC_BUILTIN_LVLX:
15822 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
15823 exp, target, true);
15824 case ALTIVEC_BUILTIN_LVLXL:
15825 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
15826 exp, target, true);
15827 case ALTIVEC_BUILTIN_LVRX:
15828 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
15829 exp, target, true);
15830 case ALTIVEC_BUILTIN_LVRXL:
15831 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
15832 exp, target, true);
15833 case VSX_BUILTIN_LXVD2X_V1TI:
15834 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
15835 exp, target, false);
15836 case VSX_BUILTIN_LXVD2X_V2DF:
15837 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
15838 exp, target, false);
15839 case VSX_BUILTIN_LXVD2X_V2DI:
15840 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
15841 exp, target, false);
15842 case VSX_BUILTIN_LXVW4X_V4SF:
15843 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
15844 exp, target, false);
15845 case VSX_BUILTIN_LXVW4X_V4SI:
15846 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
15847 exp, target, false);
15848 case VSX_BUILTIN_LXVW4X_V8HI:
15849 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
15850 exp, target, false);
15851 case VSX_BUILTIN_LXVW4X_V16QI:
15852 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
15853 exp, target, false);
15854 /* For the following on big endian, it's ok to use any appropriate
15855 unaligned-supporting load, so use a generic expander. For
15856 little-endian, the exact element-reversing instruction must
15857 be used. */
15858 case VSX_BUILTIN_LD_ELEMREV_V2DF:
15860 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
15861 : CODE_FOR_vsx_ld_elemrev_v2df);
15862 return altivec_expand_lv_builtin (code, exp, target, false);
15864 case VSX_BUILTIN_LD_ELEMREV_V2DI:
15866 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
15867 : CODE_FOR_vsx_ld_elemrev_v2di);
15868 return altivec_expand_lv_builtin (code, exp, target, false);
15870 case VSX_BUILTIN_LD_ELEMREV_V4SF:
15872 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
15873 : CODE_FOR_vsx_ld_elemrev_v4sf);
15874 return altivec_expand_lv_builtin (code, exp, target, false);
15876 case VSX_BUILTIN_LD_ELEMREV_V4SI:
15878 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
15879 : CODE_FOR_vsx_ld_elemrev_v4si);
15880 return altivec_expand_lv_builtin (code, exp, target, false);
15882 case VSX_BUILTIN_LD_ELEMREV_V8HI:
15884 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
15885 : CODE_FOR_vsx_ld_elemrev_v8hi);
15886 return altivec_expand_lv_builtin (code, exp, target, false);
15888 case VSX_BUILTIN_LD_ELEMREV_V16QI:
15890 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
15891 : CODE_FOR_vsx_ld_elemrev_v16qi);
15892 return altivec_expand_lv_builtin (code, exp, target, false);
15894 break;
15895 default:
15896 break;
15897 /* Fall through. */
15900 /* XL_BE We initialized them to always load in big endian order. */
15901 switch (fcode)
15903 case VSX_BUILTIN_XL_BE_V2DI:
15905 enum insn_code code = CODE_FOR_vsx_load_v2di;
15906 return altivec_expand_xl_be_builtin (code, exp, target, false);
15908 break;
15909 case VSX_BUILTIN_XL_BE_V4SI:
15911 enum insn_code code = CODE_FOR_vsx_load_v4si;
15912 return altivec_expand_xl_be_builtin (code, exp, target, false);
15914 break;
15915 case VSX_BUILTIN_XL_BE_V8HI:
15917 enum insn_code code = CODE_FOR_vsx_load_v8hi;
15918 return altivec_expand_xl_be_builtin (code, exp, target, false);
15920 break;
15921 case VSX_BUILTIN_XL_BE_V16QI:
15923 enum insn_code code = CODE_FOR_vsx_load_v16qi;
15924 return altivec_expand_xl_be_builtin (code, exp, target, false);
15926 break;
15927 case VSX_BUILTIN_XL_BE_V2DF:
15929 enum insn_code code = CODE_FOR_vsx_load_v2df;
15930 return altivec_expand_xl_be_builtin (code, exp, target, false);
15932 break;
15933 case VSX_BUILTIN_XL_BE_V4SF:
15935 enum insn_code code = CODE_FOR_vsx_load_v4sf;
15936 return altivec_expand_xl_be_builtin (code, exp, target, false);
15938 break;
15939 default:
15940 break;
15941 /* Fall through. */
15944 *expandedp = false;
15945 return NULL_RTX;
15948 /* Expand the builtin in EXP and store the result in TARGET. Store
15949 true in *EXPANDEDP if we found a builtin to expand. */
15950 static rtx
15951 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
15953 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15954 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15955 const struct builtin_description *d;
15956 size_t i;
15958 *expandedp = true;
15960 switch (fcode)
15962 case PAIRED_BUILTIN_STX:
15963 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
15964 case PAIRED_BUILTIN_LX:
15965 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
15966 default:
15967 break;
15968 /* Fall through. */
15971 /* Expand the paired predicates. */
15972 d = bdesc_paired_preds;
15973 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
15974 if (d->code == fcode)
15975 return paired_expand_predicate_builtin (d->icode, exp, target);
15977 *expandedp = false;
15978 return NULL_RTX;
15981 static rtx
15982 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
15984 rtx pat, scratch, tmp;
15985 tree form = CALL_EXPR_ARG (exp, 0);
15986 tree arg0 = CALL_EXPR_ARG (exp, 1);
15987 tree arg1 = CALL_EXPR_ARG (exp, 2);
15988 rtx op0 = expand_normal (arg0);
15989 rtx op1 = expand_normal (arg1);
15990 machine_mode mode0 = insn_data[icode].operand[1].mode;
15991 machine_mode mode1 = insn_data[icode].operand[2].mode;
15992 int form_int;
15993 enum rtx_code code;
15995 if (TREE_CODE (form) != INTEGER_CST)
15997 error ("argument 1 of %s must be a constant",
15998 "__builtin_paired_predicate");
15999 return const0_rtx;
16001 else
16002 form_int = TREE_INT_CST_LOW (form);
16004 gcc_assert (mode0 == mode1);
16006 if (arg0 == error_mark_node || arg1 == error_mark_node)
16007 return const0_rtx;
16009 if (target == 0
16010 || GET_MODE (target) != SImode
16011 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
16012 target = gen_reg_rtx (SImode);
16013 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
16014 op0 = copy_to_mode_reg (mode0, op0);
16015 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
16016 op1 = copy_to_mode_reg (mode1, op1);
16018 scratch = gen_reg_rtx (CCFPmode);
16020 pat = GEN_FCN (icode) (scratch, op0, op1);
16021 if (!pat)
16022 return const0_rtx;
16024 emit_insn (pat);
16026 switch (form_int)
16028 /* LT bit. */
16029 case 0:
16030 code = LT;
16031 break;
16032 /* GT bit. */
16033 case 1:
16034 code = GT;
16035 break;
16036 /* EQ bit. */
16037 case 2:
16038 code = EQ;
16039 break;
16040 /* UN bit. */
16041 case 3:
16042 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
16043 return target;
16044 default:
16045 error ("argument 1 of %qs is out of range",
16046 "__builtin_paired_predicate");
16047 return const0_rtx;
16050 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
16051 emit_move_insn (target, tmp);
16052 return target;
16055 /* Raise an error message for a builtin function that is called without the
16056 appropriate target options being set. */
16058 static void
16059 rs6000_invalid_builtin (enum rs6000_builtins fncode)
16061 size_t uns_fncode = (size_t) fncode;
16062 const char *name = rs6000_builtin_info[uns_fncode].name;
16063 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
16065 gcc_assert (name != NULL);
16066 if ((fnmask & RS6000_BTM_CELL) != 0)
16067 error ("builtin function %qs is only valid for the cell processor", name);
16068 else if ((fnmask & RS6000_BTM_VSX) != 0)
16069 error ("builtin function %qs requires the %qs option", name, "-mvsx");
16070 else if ((fnmask & RS6000_BTM_HTM) != 0)
16071 error ("builtin function %qs requires the %qs option", name, "-mhtm");
16072 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
16073 error ("builtin function %qs requires the %qs option", name, "-maltivec");
16074 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
16075 error ("builtin function %qs requires the %qs option", name, "-mpaired");
16076 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
16077 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
16078 error ("builtin function %qs requires the %qs and %qs options",
16079 name, "-mhard-dfp", "-mpower8-vector");
16080 else if ((fnmask & RS6000_BTM_DFP) != 0)
16081 error ("builtin function %qs requires the %qs option", name, "-mhard-dfp");
16082 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
16083 error ("builtin function %qs requires the %qs option", name,
16084 "-mpower8-vector");
16085 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
16086 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
16087 error ("builtin function %qs requires the %qs and %qs options",
16088 name, "-mcpu=power9", "-m64");
16089 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
16090 error ("builtin function %qs requires the %qs option", name,
16091 "-mcpu=power9");
16092 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
16093 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
16094 error ("builtin function %qs requires the %qs and %qs options",
16095 name, "-mcpu=power9", "-m64");
16096 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
16097 error ("builtin function %qs requires the %qs option", name,
16098 "-mcpu=power9");
16099 else if ((fnmask & (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
16100 == (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
16101 error ("builtin function %qs requires the %qs and %qs options",
16102 name, "-mhard-float", "-mlong-double-128");
16103 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
16104 error ("builtin function %qs requires the %qs option", name,
16105 "-mhard-float");
16106 else if ((fnmask & RS6000_BTM_FLOAT128_HW) != 0)
16107 error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
16108 name);
16109 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
16110 error ("builtin function %qs requires the %qs option", name, "-mfloat128");
16111 else
16112 error ("builtin function %qs is not supported with the current options",
16113 name);
16116 /* Target hook for early folding of built-ins, shamelessly stolen
16117 from ia64.c. */
16119 static tree
16120 rs6000_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
16121 tree *args, bool ignore ATTRIBUTE_UNUSED)
16123 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
16125 enum rs6000_builtins fn_code
16126 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
16127 switch (fn_code)
16129 case RS6000_BUILTIN_NANQ:
16130 case RS6000_BUILTIN_NANSQ:
16132 tree type = TREE_TYPE (TREE_TYPE (fndecl));
16133 const char *str = c_getstr (*args);
16134 int quiet = fn_code == RS6000_BUILTIN_NANQ;
16135 REAL_VALUE_TYPE real;
16137 if (str && real_nan (&real, str, quiet, TYPE_MODE (type)))
16138 return build_real (type, real);
16139 return NULL_TREE;
16141 case RS6000_BUILTIN_INFQ:
16142 case RS6000_BUILTIN_HUGE_VALQ:
16144 tree type = TREE_TYPE (TREE_TYPE (fndecl));
16145 REAL_VALUE_TYPE inf;
16146 real_inf (&inf);
16147 return build_real (type, inf);
16149 default:
16150 break;
16153 #ifdef SUBTARGET_FOLD_BUILTIN
16154 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
16155 #else
16156 return NULL_TREE;
16157 #endif
16160 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
16161 a constant, use rs6000_fold_builtin.) */
16163 bool
16164 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
16166 gimple *stmt = gsi_stmt (*gsi);
16167 tree fndecl = gimple_call_fndecl (stmt);
16168 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
16169 enum rs6000_builtins fn_code
16170 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
16171 tree arg0, arg1, lhs;
16173 size_t uns_fncode = (size_t) fn_code;
16174 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
16175 const char *fn_name1 = rs6000_builtin_info[uns_fncode].name;
16176 const char *fn_name2 = (icode != CODE_FOR_nothing)
16177 ? get_insn_name ((int) icode)
16178 : "nothing";
16180 if (TARGET_DEBUG_BUILTIN)
16181 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
16182 fn_code, fn_name1, fn_name2);
16184 if (!rs6000_fold_gimple)
16185 return false;
16187 /* Generic solution to prevent gimple folding of code without a LHS. */
16188 if (!gimple_call_lhs (stmt))
16189 return false;
16191 switch (fn_code)
16193 /* Flavors of vec_add. We deliberately don't expand
16194 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
16195 TImode, resulting in much poorer code generation. */
16196 case ALTIVEC_BUILTIN_VADDUBM:
16197 case ALTIVEC_BUILTIN_VADDUHM:
16198 case ALTIVEC_BUILTIN_VADDUWM:
16199 case P8V_BUILTIN_VADDUDM:
16200 case ALTIVEC_BUILTIN_VADDFP:
16201 case VSX_BUILTIN_XVADDDP:
16203 arg0 = gimple_call_arg (stmt, 0);
16204 arg1 = gimple_call_arg (stmt, 1);
16205 lhs = gimple_call_lhs (stmt);
16206 gimple *g = gimple_build_assign (lhs, PLUS_EXPR, arg0, arg1);
16207 gimple_set_location (g, gimple_location (stmt));
16208 gsi_replace (gsi, g, true);
16209 return true;
16211 /* Flavors of vec_sub. We deliberately don't expand
16212 P8V_BUILTIN_VSUBUQM. */
16213 case ALTIVEC_BUILTIN_VSUBUBM:
16214 case ALTIVEC_BUILTIN_VSUBUHM:
16215 case ALTIVEC_BUILTIN_VSUBUWM:
16216 case P8V_BUILTIN_VSUBUDM:
16217 case ALTIVEC_BUILTIN_VSUBFP:
16218 case VSX_BUILTIN_XVSUBDP:
16220 arg0 = gimple_call_arg (stmt, 0);
16221 arg1 = gimple_call_arg (stmt, 1);
16222 lhs = gimple_call_lhs (stmt);
16223 gimple *g = gimple_build_assign (lhs, MINUS_EXPR, arg0, arg1);
16224 gimple_set_location (g, gimple_location (stmt));
16225 gsi_replace (gsi, g, true);
16226 return true;
16228 case VSX_BUILTIN_XVMULSP:
16229 case VSX_BUILTIN_XVMULDP:
16231 arg0 = gimple_call_arg (stmt, 0);
16232 arg1 = gimple_call_arg (stmt, 1);
16233 lhs = gimple_call_lhs (stmt);
16234 gimple *g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
16235 gimple_set_location (g, gimple_location (stmt));
16236 gsi_replace (gsi, g, true);
16237 return true;
16239 /* Even element flavors of vec_mul (signed). */
16240 case ALTIVEC_BUILTIN_VMULESB:
16241 case ALTIVEC_BUILTIN_VMULESH:
16242 case ALTIVEC_BUILTIN_VMULESW:
16243 /* Even element flavors of vec_mul (unsigned). */
16244 case ALTIVEC_BUILTIN_VMULEUB:
16245 case ALTIVEC_BUILTIN_VMULEUH:
16246 case ALTIVEC_BUILTIN_VMULEUW:
16248 arg0 = gimple_call_arg (stmt, 0);
16249 arg1 = gimple_call_arg (stmt, 1);
16250 lhs = gimple_call_lhs (stmt);
16251 gimple *g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
16252 gimple_set_location (g, gimple_location (stmt));
16253 gsi_replace (gsi, g, true);
16254 return true;
16256 /* Odd element flavors of vec_mul (signed). */
16257 case ALTIVEC_BUILTIN_VMULOSB:
16258 case ALTIVEC_BUILTIN_VMULOSH:
16259 case ALTIVEC_BUILTIN_VMULOSW:
16260 /* Odd element flavors of vec_mul (unsigned). */
16261 case ALTIVEC_BUILTIN_VMULOUB:
16262 case ALTIVEC_BUILTIN_VMULOUH:
16263 case ALTIVEC_BUILTIN_VMULOUW:
16265 arg0 = gimple_call_arg (stmt, 0);
16266 arg1 = gimple_call_arg (stmt, 1);
16267 lhs = gimple_call_lhs (stmt);
16268 gimple *g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
16269 gimple_set_location (g, gimple_location (stmt));
16270 gsi_replace (gsi, g, true);
16271 return true;
16273 /* Flavors of vec_div (Integer). */
16274 case VSX_BUILTIN_DIV_V2DI:
16275 case VSX_BUILTIN_UDIV_V2DI:
16277 arg0 = gimple_call_arg (stmt, 0);
16278 arg1 = gimple_call_arg (stmt, 1);
16279 lhs = gimple_call_lhs (stmt);
16280 gimple *g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
16281 gimple_set_location (g, gimple_location (stmt));
16282 gsi_replace (gsi, g, true);
16283 return true;
16285 /* Flavors of vec_div (Float). */
16286 case VSX_BUILTIN_XVDIVSP:
16287 case VSX_BUILTIN_XVDIVDP:
16289 arg0 = gimple_call_arg (stmt, 0);
16290 arg1 = gimple_call_arg (stmt, 1);
16291 lhs = gimple_call_lhs (stmt);
16292 gimple *g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
16293 gimple_set_location (g, gimple_location (stmt));
16294 gsi_replace (gsi, g, true);
16295 return true;
16297 /* Flavors of vec_and. */
16298 case ALTIVEC_BUILTIN_VAND:
16300 arg0 = gimple_call_arg (stmt, 0);
16301 arg1 = gimple_call_arg (stmt, 1);
16302 lhs = gimple_call_lhs (stmt);
16303 gimple *g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
16304 gimple_set_location (g, gimple_location (stmt));
16305 gsi_replace (gsi, g, true);
16306 return true;
16308 /* Flavors of vec_andc. */
16309 case ALTIVEC_BUILTIN_VANDC:
16311 arg0 = gimple_call_arg (stmt, 0);
16312 arg1 = gimple_call_arg (stmt, 1);
16313 lhs = gimple_call_lhs (stmt);
16314 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16315 gimple *g = gimple_build_assign(temp, BIT_NOT_EXPR, arg1);
16316 gimple_set_location (g, gimple_location (stmt));
16317 gsi_insert_before(gsi, g, GSI_SAME_STMT);
16318 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
16319 gimple_set_location (g, gimple_location (stmt));
16320 gsi_replace (gsi, g, true);
16321 return true;
16323 /* Flavors of vec_nand. */
16324 case P8V_BUILTIN_VEC_NAND:
16325 case P8V_BUILTIN_NAND_V16QI:
16326 case P8V_BUILTIN_NAND_V8HI:
16327 case P8V_BUILTIN_NAND_V4SI:
16328 case P8V_BUILTIN_NAND_V4SF:
16329 case P8V_BUILTIN_NAND_V2DF:
16330 case P8V_BUILTIN_NAND_V2DI:
16332 arg0 = gimple_call_arg (stmt, 0);
16333 arg1 = gimple_call_arg (stmt, 1);
16334 lhs = gimple_call_lhs (stmt);
16335 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16336 gimple *g = gimple_build_assign(temp, BIT_AND_EXPR, arg0, arg1);
16337 gimple_set_location (g, gimple_location (stmt));
16338 gsi_insert_before(gsi, g, GSI_SAME_STMT);
16339 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16340 gimple_set_location (g, gimple_location (stmt));
16341 gsi_replace (gsi, g, true);
16342 return true;
16344 /* Flavors of vec_or. */
16345 case ALTIVEC_BUILTIN_VOR:
16347 arg0 = gimple_call_arg (stmt, 0);
16348 arg1 = gimple_call_arg (stmt, 1);
16349 lhs = gimple_call_lhs (stmt);
16350 gimple *g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
16351 gimple_set_location (g, gimple_location (stmt));
16352 gsi_replace (gsi, g, true);
16353 return true;
16355 /* flavors of vec_orc. */
16356 case P8V_BUILTIN_ORC_V16QI:
16357 case P8V_BUILTIN_ORC_V8HI:
16358 case P8V_BUILTIN_ORC_V4SI:
16359 case P8V_BUILTIN_ORC_V4SF:
16360 case P8V_BUILTIN_ORC_V2DF:
16361 case P8V_BUILTIN_ORC_V2DI:
16363 arg0 = gimple_call_arg (stmt, 0);
16364 arg1 = gimple_call_arg (stmt, 1);
16365 lhs = gimple_call_lhs (stmt);
16366 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16367 gimple *g = gimple_build_assign(temp, BIT_NOT_EXPR, arg1);
16368 gimple_set_location (g, gimple_location (stmt));
16369 gsi_insert_before(gsi, g, GSI_SAME_STMT);
16370 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
16371 gimple_set_location (g, gimple_location (stmt));
16372 gsi_replace (gsi, g, true);
16373 return true;
16375 /* Flavors of vec_xor. */
16376 case ALTIVEC_BUILTIN_VXOR:
16378 arg0 = gimple_call_arg (stmt, 0);
16379 arg1 = gimple_call_arg (stmt, 1);
16380 lhs = gimple_call_lhs (stmt);
16381 gimple *g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
16382 gimple_set_location (g, gimple_location (stmt));
16383 gsi_replace (gsi, g, true);
16384 return true;
16386 /* Flavors of vec_nor. */
16387 case ALTIVEC_BUILTIN_VNOR:
16389 arg0 = gimple_call_arg (stmt, 0);
16390 arg1 = gimple_call_arg (stmt, 1);
16391 lhs = gimple_call_lhs (stmt);
16392 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16393 gimple *g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
16394 gimple_set_location (g, gimple_location (stmt));
16395 gsi_insert_before(gsi, g, GSI_SAME_STMT);
16396 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16397 gimple_set_location (g, gimple_location (stmt));
16398 gsi_replace (gsi, g, true);
16399 return true;
16401 /* flavors of vec_abs. */
16402 case ALTIVEC_BUILTIN_ABS_V16QI:
16403 case ALTIVEC_BUILTIN_ABS_V8HI:
16404 case ALTIVEC_BUILTIN_ABS_V4SI:
16405 case ALTIVEC_BUILTIN_ABS_V4SF:
16406 case P8V_BUILTIN_ABS_V2DI:
16407 case VSX_BUILTIN_XVABSDP:
16409 arg0 = gimple_call_arg (stmt, 0);
16410 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
16411 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
16412 return false;
16413 lhs = gimple_call_lhs (stmt);
16414 gimple *g = gimple_build_assign (lhs, ABS_EXPR, arg0);
16415 gimple_set_location (g, gimple_location (stmt));
16416 gsi_replace (gsi, g, true);
16417 return true;
16419 /* flavors of vec_min. */
16420 case VSX_BUILTIN_XVMINDP:
16421 case P8V_BUILTIN_VMINSD:
16422 case P8V_BUILTIN_VMINUD:
16423 case ALTIVEC_BUILTIN_VMINSB:
16424 case ALTIVEC_BUILTIN_VMINSH:
16425 case ALTIVEC_BUILTIN_VMINSW:
16426 case ALTIVEC_BUILTIN_VMINUB:
16427 case ALTIVEC_BUILTIN_VMINUH:
16428 case ALTIVEC_BUILTIN_VMINUW:
16429 case ALTIVEC_BUILTIN_VMINFP:
16431 arg0 = gimple_call_arg (stmt, 0);
16432 arg1 = gimple_call_arg (stmt, 1);
16433 lhs = gimple_call_lhs (stmt);
16434 gimple *g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
16435 gimple_set_location (g, gimple_location (stmt));
16436 gsi_replace (gsi, g, true);
16437 return true;
16439 /* flavors of vec_max. */
16440 case VSX_BUILTIN_XVMAXDP:
16441 case P8V_BUILTIN_VMAXSD:
16442 case P8V_BUILTIN_VMAXUD:
16443 case ALTIVEC_BUILTIN_VMAXSB:
16444 case ALTIVEC_BUILTIN_VMAXSH:
16445 case ALTIVEC_BUILTIN_VMAXSW:
16446 case ALTIVEC_BUILTIN_VMAXUB:
16447 case ALTIVEC_BUILTIN_VMAXUH:
16448 case ALTIVEC_BUILTIN_VMAXUW:
16449 case ALTIVEC_BUILTIN_VMAXFP:
16451 arg0 = gimple_call_arg (stmt, 0);
16452 arg1 = gimple_call_arg (stmt, 1);
16453 lhs = gimple_call_lhs (stmt);
16454 gimple *g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
16455 gimple_set_location (g, gimple_location (stmt));
16456 gsi_replace (gsi, g, true);
16457 return true;
16459 /* Flavors of vec_eqv. */
16460 case P8V_BUILTIN_EQV_V16QI:
16461 case P8V_BUILTIN_EQV_V8HI:
16462 case P8V_BUILTIN_EQV_V4SI:
16463 case P8V_BUILTIN_EQV_V4SF:
16464 case P8V_BUILTIN_EQV_V2DF:
16465 case P8V_BUILTIN_EQV_V2DI:
16467 arg0 = gimple_call_arg (stmt, 0);
16468 arg1 = gimple_call_arg (stmt, 1);
16469 lhs = gimple_call_lhs (stmt);
16470 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16471 gimple *g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
16472 gimple_set_location (g, gimple_location (stmt));
16473 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16474 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16475 gimple_set_location (g, gimple_location (stmt));
16476 gsi_replace (gsi, g, true);
16477 return true;
16479 /* Flavors of vec_rotate_left. */
16480 case ALTIVEC_BUILTIN_VRLB:
16481 case ALTIVEC_BUILTIN_VRLH:
16482 case ALTIVEC_BUILTIN_VRLW:
16483 case P8V_BUILTIN_VRLD:
16485 arg0 = gimple_call_arg (stmt, 0);
16486 arg1 = gimple_call_arg (stmt, 1);
16487 lhs = gimple_call_lhs (stmt);
16488 gimple *g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
16489 gimple_set_location (g, gimple_location (stmt));
16490 gsi_replace (gsi, g, true);
16491 return true;
16493 /* Flavors of vector shift right algebraic.
16494 vec_sra{b,h,w} -> vsra{b,h,w}. */
16495 case ALTIVEC_BUILTIN_VSRAB:
16496 case ALTIVEC_BUILTIN_VSRAH:
16497 case ALTIVEC_BUILTIN_VSRAW:
16498 case P8V_BUILTIN_VSRAD:
16500 arg0 = gimple_call_arg (stmt, 0);
16501 arg1 = gimple_call_arg (stmt, 1);
16502 lhs = gimple_call_lhs (stmt);
16503 gimple *g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, arg1);
16504 gimple_set_location (g, gimple_location (stmt));
16505 gsi_replace (gsi, g, true);
16506 return true;
16508 /* Flavors of vector shift left.
16509 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
16510 case ALTIVEC_BUILTIN_VSLB:
16511 case ALTIVEC_BUILTIN_VSLH:
16512 case ALTIVEC_BUILTIN_VSLW:
16513 case P8V_BUILTIN_VSLD:
16515 arg0 = gimple_call_arg (stmt, 0);
16516 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
16517 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
16518 return false;
16519 arg1 = gimple_call_arg (stmt, 1);
16520 lhs = gimple_call_lhs (stmt);
16521 gimple *g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, arg1);
16522 gimple_set_location (g, gimple_location (stmt));
16523 gsi_replace (gsi, g, true);
16524 return true;
16526 /* Flavors of vector shift right. */
16527 case ALTIVEC_BUILTIN_VSRB:
16528 case ALTIVEC_BUILTIN_VSRH:
16529 case ALTIVEC_BUILTIN_VSRW:
16530 case P8V_BUILTIN_VSRD:
16532 arg0 = gimple_call_arg (stmt, 0);
16533 arg1 = gimple_call_arg (stmt, 1);
16534 lhs = gimple_call_lhs (stmt);
16535 gimple_seq stmts = NULL;
16536 /* Convert arg0 to unsigned. */
16537 tree arg0_unsigned
16538 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
16539 unsigned_type_for (TREE_TYPE (arg0)), arg0);
16540 tree res
16541 = gimple_build (&stmts, RSHIFT_EXPR,
16542 TREE_TYPE (arg0_unsigned), arg0_unsigned, arg1);
16543 /* Convert result back to the lhs type. */
16544 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
16545 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16546 update_call_from_tree (gsi, res);
16547 return true;
16549 /* Vector loads. */
16550 case ALTIVEC_BUILTIN_LVX_V16QI:
16551 case ALTIVEC_BUILTIN_LVX_V8HI:
16552 case ALTIVEC_BUILTIN_LVX_V4SI:
16553 case ALTIVEC_BUILTIN_LVX_V4SF:
16554 case ALTIVEC_BUILTIN_LVX_V2DI:
16555 case ALTIVEC_BUILTIN_LVX_V2DF:
16557 arg0 = gimple_call_arg (stmt, 0); // offset
16558 arg1 = gimple_call_arg (stmt, 1); // address
16559 /* Do not fold for -maltivec=be on LE targets. */
16560 if (VECTOR_ELT_ORDER_BIG && !BYTES_BIG_ENDIAN)
16561 return false;
16562 lhs = gimple_call_lhs (stmt);
16563 location_t loc = gimple_location (stmt);
16564 /* Since arg1 may be cast to a different type, just use ptr_type_node
16565 here instead of trying to enforce TBAA on pointer types. */
16566 tree arg1_type = ptr_type_node;
16567 tree lhs_type = TREE_TYPE (lhs);
16568 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
16569 the tree using the value from arg0. The resulting type will match
16570 the type of arg1. */
16571 gimple_seq stmts = NULL;
16572 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
16573 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
16574 arg1_type, arg1, temp_offset);
16575 /* Mask off any lower bits from the address. */
16576 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
16577 arg1_type, temp_addr,
16578 build_int_cst (arg1_type, -16));
16579 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16580 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
16581 take an offset, but since we've already incorporated the offset
16582 above, here we just pass in a zero. */
16583 gimple *g;
16584 g = gimple_build_assign (lhs, build2 (MEM_REF, lhs_type, aligned_addr,
16585 build_int_cst (arg1_type, 0)));
16586 gimple_set_location (g, loc);
16587 gsi_replace (gsi, g, true);
16588 return true;
16591 default:
16592 if (TARGET_DEBUG_BUILTIN)
16593 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
16594 fn_code, fn_name1, fn_name2);
16595 break;
16598 return false;
16601 /* Expand an expression EXP that calls a built-in function,
16602 with result going to TARGET if that's convenient
16603 (and in mode MODE if that's convenient).
16604 SUBTARGET may be used as the target for computing one of EXP's operands.
16605 IGNORE is nonzero if the value is to be ignored. */
16607 static rtx
16608 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
16609 machine_mode mode ATTRIBUTE_UNUSED,
16610 int ignore ATTRIBUTE_UNUSED)
16612 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16613 enum rs6000_builtins fcode
16614 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
16615 size_t uns_fcode = (size_t)fcode;
16616 const struct builtin_description *d;
16617 size_t i;
16618 rtx ret;
16619 bool success;
16620 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
16621 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
16623 if (TARGET_DEBUG_BUILTIN)
16625 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
16626 const char *name1 = rs6000_builtin_info[uns_fcode].name;
16627 const char *name2 = (icode != CODE_FOR_nothing)
16628 ? get_insn_name ((int) icode)
16629 : "nothing";
16630 const char *name3;
16632 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
16634 default: name3 = "unknown"; break;
16635 case RS6000_BTC_SPECIAL: name3 = "special"; break;
16636 case RS6000_BTC_UNARY: name3 = "unary"; break;
16637 case RS6000_BTC_BINARY: name3 = "binary"; break;
16638 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
16639 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
16640 case RS6000_BTC_ABS: name3 = "abs"; break;
16641 case RS6000_BTC_DST: name3 = "dst"; break;
16645 fprintf (stderr,
16646 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16647 (name1) ? name1 : "---", fcode,
16648 (name2) ? name2 : "---", (int) icode,
16649 name3,
16650 func_valid_p ? "" : ", not valid");
16653 if (!func_valid_p)
16655 rs6000_invalid_builtin (fcode);
16657 /* Given it is invalid, just generate a normal call. */
16658 return expand_call (exp, target, ignore);
16661 switch (fcode)
16663 case RS6000_BUILTIN_RECIP:
16664 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
16666 case RS6000_BUILTIN_RECIPF:
16667 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
16669 case RS6000_BUILTIN_RSQRTF:
16670 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
16672 case RS6000_BUILTIN_RSQRT:
16673 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16675 case POWER7_BUILTIN_BPERMD:
16676 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16677 ? CODE_FOR_bpermd_di
16678 : CODE_FOR_bpermd_si), exp, target);
16680 case RS6000_BUILTIN_GET_TB:
16681 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16682 target);
16684 case RS6000_BUILTIN_MFTB:
16685 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16686 ? CODE_FOR_rs6000_mftb_di
16687 : CODE_FOR_rs6000_mftb_si),
16688 target);
16690 case RS6000_BUILTIN_MFFS:
16691 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16693 case RS6000_BUILTIN_MTFSF:
16694 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16696 case RS6000_BUILTIN_CPU_INIT:
16697 case RS6000_BUILTIN_CPU_IS:
16698 case RS6000_BUILTIN_CPU_SUPPORTS:
16699 return cpu_expand_builtin (fcode, exp, target);
16701 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16702 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16704 int icode = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16705 : (int) CODE_FOR_altivec_lvsl_direct);
16706 machine_mode tmode = insn_data[icode].operand[0].mode;
16707 machine_mode mode = insn_data[icode].operand[1].mode;
16708 tree arg;
16709 rtx op, addr, pat;
16711 gcc_assert (TARGET_ALTIVEC);
16713 arg = CALL_EXPR_ARG (exp, 0);
16714 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16715 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16716 addr = memory_address (mode, op);
16717 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16718 op = addr;
16719 else
16721 /* For the load case need to negate the address. */
16722 op = gen_reg_rtx (GET_MODE (addr));
16723 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16725 op = gen_rtx_MEM (mode, op);
16727 if (target == 0
16728 || GET_MODE (target) != tmode
16729 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16730 target = gen_reg_rtx (tmode);
16732 pat = GEN_FCN (icode) (target, op);
16733 if (!pat)
16734 return 0;
16735 emit_insn (pat);
16737 return target;
16740 case ALTIVEC_BUILTIN_VCFUX:
16741 case ALTIVEC_BUILTIN_VCFSX:
16742 case ALTIVEC_BUILTIN_VCTUXS:
16743 case ALTIVEC_BUILTIN_VCTSXS:
16744 /* FIXME: There's got to be a nicer way to handle this case than
16745 constructing a new CALL_EXPR. */
16746 if (call_expr_nargs (exp) == 1)
16748 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16749 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16751 break;
16753 default:
16754 break;
16757 if (TARGET_ALTIVEC)
16759 ret = altivec_expand_builtin (exp, target, &success);
16761 if (success)
16762 return ret;
16764 if (TARGET_PAIRED_FLOAT)
16766 ret = paired_expand_builtin (exp, target, &success);
16768 if (success)
16769 return ret;
16771 if (TARGET_HTM)
16773 ret = htm_expand_builtin (exp, target, &success);
16775 if (success)
16776 return ret;
16779 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16780 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16781 gcc_assert (attr == RS6000_BTC_UNARY
16782 || attr == RS6000_BTC_BINARY
16783 || attr == RS6000_BTC_TERNARY
16784 || attr == RS6000_BTC_SPECIAL);
16786 /* Handle simple unary operations. */
16787 d = bdesc_1arg;
16788 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16789 if (d->code == fcode)
16790 return rs6000_expand_unop_builtin (d->icode, exp, target);
16792 /* Handle simple binary operations. */
16793 d = bdesc_2arg;
16794 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16795 if (d->code == fcode)
16796 return rs6000_expand_binop_builtin (d->icode, exp, target);
16798 /* Handle simple ternary operations. */
16799 d = bdesc_3arg;
16800 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16801 if (d->code == fcode)
16802 return rs6000_expand_ternop_builtin (d->icode, exp, target);
16804 /* Handle simple no-argument operations. */
16805 d = bdesc_0arg;
16806 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16807 if (d->code == fcode)
16808 return rs6000_expand_zeroop_builtin (d->icode, target);
16810 gcc_unreachable ();
16813 /* Create a builtin vector type with a name. Taking care not to give
16814 the canonical type a name. */
16816 static tree
16817 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
16819 tree result = build_vector_type (elt_type, num_elts);
16821 /* Copy so we don't give the canonical type a name. */
16822 result = build_variant_type_copy (result);
16824 add_builtin_type (name, result);
16826 return result;
16829 static void
16830 rs6000_init_builtins (void)
16832 tree tdecl;
16833 tree ftype;
16834 machine_mode mode;
16836 if (TARGET_DEBUG_BUILTIN)
16837 fprintf (stderr, "rs6000_init_builtins%s%s%s\n",
16838 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
16839 (TARGET_ALTIVEC) ? ", altivec" : "",
16840 (TARGET_VSX) ? ", vsx" : "");
16842 V2SI_type_node = build_vector_type (intSI_type_node, 2);
16843 V2SF_type_node = build_vector_type (float_type_node, 2);
16844 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
16845 : "__vector long long",
16846 intDI_type_node, 2);
16847 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
16848 V4SI_type_node = rs6000_vector_type ("__vector signed int",
16849 intSI_type_node, 4);
16850 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
16851 V8HI_type_node = rs6000_vector_type ("__vector signed short",
16852 intHI_type_node, 8);
16853 V16QI_type_node = rs6000_vector_type ("__vector signed char",
16854 intQI_type_node, 16);
16856 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
16857 unsigned_intQI_type_node, 16);
16858 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
16859 unsigned_intHI_type_node, 8);
16860 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
16861 unsigned_intSI_type_node, 4);
16862 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16863 ? "__vector unsigned long"
16864 : "__vector unsigned long long",
16865 unsigned_intDI_type_node, 2);
16867 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
16868 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
16869 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
16870 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
16872 const_str_type_node
16873 = build_pointer_type (build_qualified_type (char_type_node,
16874 TYPE_QUAL_CONST));
16876 /* We use V1TI mode as a special container to hold __int128_t items that
16877 must live in VSX registers. */
16878 if (intTI_type_node)
16880 V1TI_type_node = rs6000_vector_type ("__vector __int128",
16881 intTI_type_node, 1);
16882 unsigned_V1TI_type_node
16883 = rs6000_vector_type ("__vector unsigned __int128",
16884 unsigned_intTI_type_node, 1);
16887 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16888 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16889 'vector unsigned short'. */
16891 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
16892 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16893 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
16894 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
16895 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16897 long_integer_type_internal_node = long_integer_type_node;
16898 long_unsigned_type_internal_node = long_unsigned_type_node;
16899 long_long_integer_type_internal_node = long_long_integer_type_node;
16900 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
16901 intQI_type_internal_node = intQI_type_node;
16902 uintQI_type_internal_node = unsigned_intQI_type_node;
16903 intHI_type_internal_node = intHI_type_node;
16904 uintHI_type_internal_node = unsigned_intHI_type_node;
16905 intSI_type_internal_node = intSI_type_node;
16906 uintSI_type_internal_node = unsigned_intSI_type_node;
16907 intDI_type_internal_node = intDI_type_node;
16908 uintDI_type_internal_node = unsigned_intDI_type_node;
16909 intTI_type_internal_node = intTI_type_node;
16910 uintTI_type_internal_node = unsigned_intTI_type_node;
16911 float_type_internal_node = float_type_node;
16912 double_type_internal_node = double_type_node;
16913 long_double_type_internal_node = long_double_type_node;
16914 dfloat64_type_internal_node = dfloat64_type_node;
16915 dfloat128_type_internal_node = dfloat128_type_node;
16916 void_type_internal_node = void_type_node;
16918 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16919 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16920 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16921 format that uses a pair of doubles, depending on the switches and
16922 defaults.
16924 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16925 floating point, we need make sure the type is non-zero or else self-test
16926 fails during bootstrap.
16928 We don't register a built-in type for __ibm128 if the type is the same as
16929 long double. Instead we add a #define for __ibm128 in
16930 rs6000_cpu_cpp_builtins to long double.
16932 For IEEE 128-bit floating point, always create the type __ieee128. If the
16933 user used -mfloat128, rs6000-c.c will create a define from __float128 to
16934 __ieee128. */
16935 if (TARGET_LONG_DOUBLE_128 && FLOAT128_IEEE_P (TFmode))
16937 ibm128_float_type_node = make_node (REAL_TYPE);
16938 TYPE_PRECISION (ibm128_float_type_node) = 128;
16939 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
16940 layout_type (ibm128_float_type_node);
16942 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
16943 "__ibm128");
16945 else
16946 ibm128_float_type_node = long_double_type_node;
16948 if (TARGET_FLOAT128_TYPE)
16950 ieee128_float_type_node = float128_type_node;
16951 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
16952 "__ieee128");
16955 else
16956 ieee128_float_type_node = long_double_type_node;
16958 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16959 tree type node. */
16960 builtin_mode_to_type[QImode][0] = integer_type_node;
16961 builtin_mode_to_type[HImode][0] = integer_type_node;
16962 builtin_mode_to_type[SImode][0] = intSI_type_node;
16963 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
16964 builtin_mode_to_type[DImode][0] = intDI_type_node;
16965 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
16966 builtin_mode_to_type[TImode][0] = intTI_type_node;
16967 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
16968 builtin_mode_to_type[SFmode][0] = float_type_node;
16969 builtin_mode_to_type[DFmode][0] = double_type_node;
16970 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
16971 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
16972 builtin_mode_to_type[TFmode][0] = long_double_type_node;
16973 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
16974 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
16975 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
16976 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
16977 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
16978 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
16979 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
16980 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
16981 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
16982 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
16983 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
16984 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
16985 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
16986 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
16987 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
16988 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
16990 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
16991 TYPE_NAME (bool_char_type_node) = tdecl;
16993 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
16994 TYPE_NAME (bool_short_type_node) = tdecl;
16996 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
16997 TYPE_NAME (bool_int_type_node) = tdecl;
16999 tdecl = add_builtin_type ("__pixel", pixel_type_node);
17000 TYPE_NAME (pixel_type_node) = tdecl;
17002 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
17003 bool_char_type_node, 16);
17004 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
17005 bool_short_type_node, 8);
17006 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
17007 bool_int_type_node, 4);
17008 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
17009 ? "__vector __bool long"
17010 : "__vector __bool long long",
17011 bool_long_type_node, 2);
17012 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
17013 pixel_type_node, 8);
17015 /* Paired builtins are only available if you build a compiler with the
17016 appropriate options, so only create those builtins with the appropriate
17017 compiler option. Create Altivec and VSX builtins on machines with at
17018 least the general purpose extensions (970 and newer) to allow the use of
17019 the target attribute. */
17020 if (TARGET_PAIRED_FLOAT)
17021 paired_init_builtins ();
17022 if (TARGET_EXTRA_BUILTINS)
17023 altivec_init_builtins ();
17024 if (TARGET_HTM)
17025 htm_init_builtins ();
17027 if (TARGET_EXTRA_BUILTINS || TARGET_PAIRED_FLOAT)
17028 rs6000_common_init_builtins ();
17030 ftype = build_function_type_list (ieee128_float_type_node,
17031 const_str_type_node, NULL_TREE);
17032 def_builtin ("__builtin_nanq", ftype, RS6000_BUILTIN_NANQ);
17033 def_builtin ("__builtin_nansq", ftype, RS6000_BUILTIN_NANSQ);
17035 ftype = build_function_type_list (ieee128_float_type_node, NULL_TREE);
17036 def_builtin ("__builtin_infq", ftype, RS6000_BUILTIN_INFQ);
17037 def_builtin ("__builtin_huge_valq", ftype, RS6000_BUILTIN_HUGE_VALQ);
17039 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
17040 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
17041 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
17043 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
17044 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
17045 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
17047 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
17048 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
17049 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
17051 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
17052 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
17053 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
17055 mode = (TARGET_64BIT) ? DImode : SImode;
17056 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
17057 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
17058 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
17060 ftype = build_function_type_list (unsigned_intDI_type_node,
17061 NULL_TREE);
17062 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
17064 if (TARGET_64BIT)
17065 ftype = build_function_type_list (unsigned_intDI_type_node,
17066 NULL_TREE);
17067 else
17068 ftype = build_function_type_list (unsigned_intSI_type_node,
17069 NULL_TREE);
17070 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
17072 ftype = build_function_type_list (double_type_node, NULL_TREE);
17073 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
17075 ftype = build_function_type_list (void_type_node,
17076 intSI_type_node, double_type_node,
17077 NULL_TREE);
17078 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
17080 ftype = build_function_type_list (void_type_node, NULL_TREE);
17081 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
17083 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
17084 NULL_TREE);
17085 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
17086 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
17088 /* AIX libm provides clog as __clog. */
17089 if (TARGET_XCOFF &&
17090 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
17091 set_user_assembler_name (tdecl, "__clog");
17093 #ifdef SUBTARGET_INIT_BUILTINS
17094 SUBTARGET_INIT_BUILTINS;
17095 #endif
17098 /* Returns the rs6000 builtin decl for CODE. */
17100 static tree
17101 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
17103 HOST_WIDE_INT fnmask;
17105 if (code >= RS6000_BUILTIN_COUNT)
17106 return error_mark_node;
17108 fnmask = rs6000_builtin_info[code].mask;
17109 if ((fnmask & rs6000_builtin_mask) != fnmask)
17111 rs6000_invalid_builtin ((enum rs6000_builtins)code);
17112 return error_mark_node;
17115 return rs6000_builtin_decls[code];
17118 static void
17119 paired_init_builtins (void)
17121 const struct builtin_description *d;
17122 size_t i;
17123 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17125 tree int_ftype_int_v2sf_v2sf
17126 = build_function_type_list (integer_type_node,
17127 integer_type_node,
17128 V2SF_type_node,
17129 V2SF_type_node,
17130 NULL_TREE);
17131 tree pcfloat_type_node =
17132 build_pointer_type (build_qualified_type
17133 (float_type_node, TYPE_QUAL_CONST));
17135 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
17136 long_integer_type_node,
17137 pcfloat_type_node,
17138 NULL_TREE);
17139 tree void_ftype_v2sf_long_pcfloat =
17140 build_function_type_list (void_type_node,
17141 V2SF_type_node,
17142 long_integer_type_node,
17143 pcfloat_type_node,
17144 NULL_TREE);
17147 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
17148 PAIRED_BUILTIN_LX);
17151 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
17152 PAIRED_BUILTIN_STX);
17154 /* Predicates. */
17155 d = bdesc_paired_preds;
17156 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
17158 tree type;
17159 HOST_WIDE_INT mask = d->mask;
17161 if ((mask & builtin_mask) != mask)
17163 if (TARGET_DEBUG_BUILTIN)
17164 fprintf (stderr, "paired_init_builtins, skip predicate %s\n",
17165 d->name);
17166 continue;
17169 /* Cannot define builtin if the instruction is disabled. */
17170 gcc_assert (d->icode != CODE_FOR_nothing);
17172 if (TARGET_DEBUG_BUILTIN)
17173 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
17174 (int)i, get_insn_name (d->icode), (int)d->icode,
17175 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
17177 switch (insn_data[d->icode].operand[1].mode)
17179 case E_V2SFmode:
17180 type = int_ftype_int_v2sf_v2sf;
17181 break;
17182 default:
17183 gcc_unreachable ();
17186 def_builtin (d->name, type, d->code);
17190 static void
17191 altivec_init_builtins (void)
17193 const struct builtin_description *d;
17194 size_t i;
17195 tree ftype;
17196 tree decl;
17197 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17199 tree pvoid_type_node = build_pointer_type (void_type_node);
17201 tree pcvoid_type_node
17202 = build_pointer_type (build_qualified_type (void_type_node,
17203 TYPE_QUAL_CONST));
17205 tree int_ftype_opaque
17206 = build_function_type_list (integer_type_node,
17207 opaque_V4SI_type_node, NULL_TREE);
17208 tree opaque_ftype_opaque
17209 = build_function_type_list (integer_type_node, NULL_TREE);
17210 tree opaque_ftype_opaque_int
17211 = build_function_type_list (opaque_V4SI_type_node,
17212 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
17213 tree opaque_ftype_opaque_opaque_int
17214 = build_function_type_list (opaque_V4SI_type_node,
17215 opaque_V4SI_type_node, opaque_V4SI_type_node,
17216 integer_type_node, NULL_TREE);
17217 tree opaque_ftype_opaque_opaque_opaque
17218 = build_function_type_list (opaque_V4SI_type_node,
17219 opaque_V4SI_type_node, opaque_V4SI_type_node,
17220 opaque_V4SI_type_node, NULL_TREE);
17221 tree opaque_ftype_opaque_opaque
17222 = build_function_type_list (opaque_V4SI_type_node,
17223 opaque_V4SI_type_node, opaque_V4SI_type_node,
17224 NULL_TREE);
17225 tree int_ftype_int_opaque_opaque
17226 = build_function_type_list (integer_type_node,
17227 integer_type_node, opaque_V4SI_type_node,
17228 opaque_V4SI_type_node, NULL_TREE);
17229 tree int_ftype_int_v4si_v4si
17230 = build_function_type_list (integer_type_node,
17231 integer_type_node, V4SI_type_node,
17232 V4SI_type_node, NULL_TREE);
17233 tree int_ftype_int_v2di_v2di
17234 = build_function_type_list (integer_type_node,
17235 integer_type_node, V2DI_type_node,
17236 V2DI_type_node, NULL_TREE);
17237 tree void_ftype_v4si
17238 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
17239 tree v8hi_ftype_void
17240 = build_function_type_list (V8HI_type_node, NULL_TREE);
17241 tree void_ftype_void
17242 = build_function_type_list (void_type_node, NULL_TREE);
17243 tree void_ftype_int
17244 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
17246 tree opaque_ftype_long_pcvoid
17247 = build_function_type_list (opaque_V4SI_type_node,
17248 long_integer_type_node, pcvoid_type_node,
17249 NULL_TREE);
17250 tree v16qi_ftype_long_pcvoid
17251 = build_function_type_list (V16QI_type_node,
17252 long_integer_type_node, pcvoid_type_node,
17253 NULL_TREE);
17254 tree v8hi_ftype_long_pcvoid
17255 = build_function_type_list (V8HI_type_node,
17256 long_integer_type_node, pcvoid_type_node,
17257 NULL_TREE);
17258 tree v4si_ftype_long_pcvoid
17259 = build_function_type_list (V4SI_type_node,
17260 long_integer_type_node, pcvoid_type_node,
17261 NULL_TREE);
17262 tree v4sf_ftype_long_pcvoid
17263 = build_function_type_list (V4SF_type_node,
17264 long_integer_type_node, pcvoid_type_node,
17265 NULL_TREE);
17266 tree v2df_ftype_long_pcvoid
17267 = build_function_type_list (V2DF_type_node,
17268 long_integer_type_node, pcvoid_type_node,
17269 NULL_TREE);
17270 tree v2di_ftype_long_pcvoid
17271 = build_function_type_list (V2DI_type_node,
17272 long_integer_type_node, pcvoid_type_node,
17273 NULL_TREE);
17275 tree void_ftype_opaque_long_pvoid
17276 = build_function_type_list (void_type_node,
17277 opaque_V4SI_type_node, long_integer_type_node,
17278 pvoid_type_node, NULL_TREE);
17279 tree void_ftype_v4si_long_pvoid
17280 = build_function_type_list (void_type_node,
17281 V4SI_type_node, long_integer_type_node,
17282 pvoid_type_node, NULL_TREE);
17283 tree void_ftype_v16qi_long_pvoid
17284 = build_function_type_list (void_type_node,
17285 V16QI_type_node, long_integer_type_node,
17286 pvoid_type_node, NULL_TREE);
17288 tree void_ftype_v16qi_pvoid_long
17289 = build_function_type_list (void_type_node,
17290 V16QI_type_node, pvoid_type_node,
17291 long_integer_type_node, NULL_TREE);
17293 tree void_ftype_v8hi_long_pvoid
17294 = build_function_type_list (void_type_node,
17295 V8HI_type_node, long_integer_type_node,
17296 pvoid_type_node, NULL_TREE);
17297 tree void_ftype_v4sf_long_pvoid
17298 = build_function_type_list (void_type_node,
17299 V4SF_type_node, long_integer_type_node,
17300 pvoid_type_node, NULL_TREE);
17301 tree void_ftype_v2df_long_pvoid
17302 = build_function_type_list (void_type_node,
17303 V2DF_type_node, long_integer_type_node,
17304 pvoid_type_node, NULL_TREE);
17305 tree void_ftype_v2di_long_pvoid
17306 = build_function_type_list (void_type_node,
17307 V2DI_type_node, long_integer_type_node,
17308 pvoid_type_node, NULL_TREE);
17309 tree int_ftype_int_v8hi_v8hi
17310 = build_function_type_list (integer_type_node,
17311 integer_type_node, V8HI_type_node,
17312 V8HI_type_node, NULL_TREE);
17313 tree int_ftype_int_v16qi_v16qi
17314 = build_function_type_list (integer_type_node,
17315 integer_type_node, V16QI_type_node,
17316 V16QI_type_node, NULL_TREE);
17317 tree int_ftype_int_v4sf_v4sf
17318 = build_function_type_list (integer_type_node,
17319 integer_type_node, V4SF_type_node,
17320 V4SF_type_node, NULL_TREE);
17321 tree int_ftype_int_v2df_v2df
17322 = build_function_type_list (integer_type_node,
17323 integer_type_node, V2DF_type_node,
17324 V2DF_type_node, NULL_TREE);
17325 tree v2di_ftype_v2di
17326 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
17327 tree v4si_ftype_v4si
17328 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
17329 tree v8hi_ftype_v8hi
17330 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
17331 tree v16qi_ftype_v16qi
17332 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
17333 tree v4sf_ftype_v4sf
17334 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
17335 tree v2df_ftype_v2df
17336 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
17337 tree void_ftype_pcvoid_int_int
17338 = build_function_type_list (void_type_node,
17339 pcvoid_type_node, integer_type_node,
17340 integer_type_node, NULL_TREE);
17342 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
17343 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
17344 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
17345 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
17346 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
17347 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
17348 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
17349 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
17350 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
17351 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
17352 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
17353 ALTIVEC_BUILTIN_LVXL_V2DF);
17354 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
17355 ALTIVEC_BUILTIN_LVXL_V2DI);
17356 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
17357 ALTIVEC_BUILTIN_LVXL_V4SF);
17358 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
17359 ALTIVEC_BUILTIN_LVXL_V4SI);
17360 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
17361 ALTIVEC_BUILTIN_LVXL_V8HI);
17362 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
17363 ALTIVEC_BUILTIN_LVXL_V16QI);
17364 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
17365 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
17366 ALTIVEC_BUILTIN_LVX_V2DF);
17367 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
17368 ALTIVEC_BUILTIN_LVX_V2DI);
17369 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
17370 ALTIVEC_BUILTIN_LVX_V4SF);
17371 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
17372 ALTIVEC_BUILTIN_LVX_V4SI);
17373 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
17374 ALTIVEC_BUILTIN_LVX_V8HI);
17375 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
17376 ALTIVEC_BUILTIN_LVX_V16QI);
17377 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
17378 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
17379 ALTIVEC_BUILTIN_STVX_V2DF);
17380 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
17381 ALTIVEC_BUILTIN_STVX_V2DI);
17382 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
17383 ALTIVEC_BUILTIN_STVX_V4SF);
17384 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
17385 ALTIVEC_BUILTIN_STVX_V4SI);
17386 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
17387 ALTIVEC_BUILTIN_STVX_V8HI);
17388 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
17389 ALTIVEC_BUILTIN_STVX_V16QI);
17390 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
17391 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
17392 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
17393 ALTIVEC_BUILTIN_STVXL_V2DF);
17394 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
17395 ALTIVEC_BUILTIN_STVXL_V2DI);
17396 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
17397 ALTIVEC_BUILTIN_STVXL_V4SF);
17398 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
17399 ALTIVEC_BUILTIN_STVXL_V4SI);
17400 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
17401 ALTIVEC_BUILTIN_STVXL_V8HI);
17402 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
17403 ALTIVEC_BUILTIN_STVXL_V16QI);
17404 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
17405 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
17406 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
17407 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
17408 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
17409 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
17410 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
17411 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
17412 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
17413 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
17414 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
17415 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
17416 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
17417 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
17418 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
17419 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
17421 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
17422 VSX_BUILTIN_LXVD2X_V2DF);
17423 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
17424 VSX_BUILTIN_LXVD2X_V2DI);
17425 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
17426 VSX_BUILTIN_LXVW4X_V4SF);
17427 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
17428 VSX_BUILTIN_LXVW4X_V4SI);
17429 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
17430 VSX_BUILTIN_LXVW4X_V8HI);
17431 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
17432 VSX_BUILTIN_LXVW4X_V16QI);
17433 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
17434 VSX_BUILTIN_STXVD2X_V2DF);
17435 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
17436 VSX_BUILTIN_STXVD2X_V2DI);
17437 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
17438 VSX_BUILTIN_STXVW4X_V4SF);
17439 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
17440 VSX_BUILTIN_STXVW4X_V4SI);
17441 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
17442 VSX_BUILTIN_STXVW4X_V8HI);
17443 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
17444 VSX_BUILTIN_STXVW4X_V16QI);
17446 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
17447 VSX_BUILTIN_LD_ELEMREV_V2DF);
17448 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
17449 VSX_BUILTIN_LD_ELEMREV_V2DI);
17450 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
17451 VSX_BUILTIN_LD_ELEMREV_V4SF);
17452 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
17453 VSX_BUILTIN_LD_ELEMREV_V4SI);
17454 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
17455 VSX_BUILTIN_ST_ELEMREV_V2DF);
17456 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
17457 VSX_BUILTIN_ST_ELEMREV_V2DI);
17458 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
17459 VSX_BUILTIN_ST_ELEMREV_V4SF);
17460 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
17461 VSX_BUILTIN_ST_ELEMREV_V4SI);
17463 def_builtin ("__builtin_vsx_le_be_v8hi", v8hi_ftype_long_pcvoid,
17464 VSX_BUILTIN_XL_BE_V8HI);
17465 def_builtin ("__builtin_vsx_le_be_v4si", v4si_ftype_long_pcvoid,
17466 VSX_BUILTIN_XL_BE_V4SI);
17467 def_builtin ("__builtin_vsx_le_be_v2di", v2di_ftype_long_pcvoid,
17468 VSX_BUILTIN_XL_BE_V2DI);
17469 def_builtin ("__builtin_vsx_le_be_v4sf", v4sf_ftype_long_pcvoid,
17470 VSX_BUILTIN_XL_BE_V4SF);
17471 def_builtin ("__builtin_vsx_le_be_v2df", v2df_ftype_long_pcvoid,
17472 VSX_BUILTIN_XL_BE_V2DF);
17473 def_builtin ("__builtin_vsx_le_be_v16qi", v16qi_ftype_long_pcvoid,
17474 VSX_BUILTIN_XL_BE_V16QI);
17476 if (TARGET_P9_VECTOR)
17478 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
17479 VSX_BUILTIN_LD_ELEMREV_V8HI);
17480 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
17481 VSX_BUILTIN_LD_ELEMREV_V16QI);
17482 def_builtin ("__builtin_vsx_st_elemrev_v8hi",
17483 void_ftype_v8hi_long_pvoid, VSX_BUILTIN_ST_ELEMREV_V8HI);
17484 def_builtin ("__builtin_vsx_st_elemrev_v16qi",
17485 void_ftype_v16qi_long_pvoid, VSX_BUILTIN_ST_ELEMREV_V16QI);
17487 else
17489 rs6000_builtin_decls[(int) VSX_BUILTIN_LD_ELEMREV_V8HI]
17490 = rs6000_builtin_decls[(int) VSX_BUILTIN_LXVW4X_V8HI];
17491 rs6000_builtin_decls[(int) VSX_BUILTIN_LD_ELEMREV_V16QI]
17492 = rs6000_builtin_decls[(int) VSX_BUILTIN_LXVW4X_V16QI];
17493 rs6000_builtin_decls[(int) VSX_BUILTIN_ST_ELEMREV_V8HI]
17494 = rs6000_builtin_decls[(int) VSX_BUILTIN_STXVW4X_V8HI];
17495 rs6000_builtin_decls[(int) VSX_BUILTIN_ST_ELEMREV_V16QI]
17496 = rs6000_builtin_decls[(int) VSX_BUILTIN_STXVW4X_V16QI];
17499 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
17500 VSX_BUILTIN_VEC_LD);
17501 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
17502 VSX_BUILTIN_VEC_ST);
17503 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
17504 VSX_BUILTIN_VEC_XL);
17505 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
17506 VSX_BUILTIN_VEC_XL_BE);
17507 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
17508 VSX_BUILTIN_VEC_XST);
17510 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
17511 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
17512 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
17514 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
17515 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
17516 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
17517 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
17518 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
17519 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
17520 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
17521 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
17522 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
17523 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
17524 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
17525 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
17527 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
17528 ALTIVEC_BUILTIN_VEC_ADDE);
17529 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
17530 ALTIVEC_BUILTIN_VEC_ADDEC);
17531 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
17532 ALTIVEC_BUILTIN_VEC_CMPNE);
17533 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
17534 ALTIVEC_BUILTIN_VEC_MUL);
17535 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
17536 ALTIVEC_BUILTIN_VEC_SUBE);
17537 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
17538 ALTIVEC_BUILTIN_VEC_SUBEC);
17540 /* Cell builtins. */
17541 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
17542 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
17543 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
17544 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
17546 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
17547 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
17548 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
17549 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
17551 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
17552 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
17553 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
17554 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
17556 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
17557 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
17558 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
17559 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
17561 if (TARGET_P9_VECTOR)
17562 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
17563 P9V_BUILTIN_STXVL);
17565 /* Add the DST variants. */
17566 d = bdesc_dst;
17567 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
17569 HOST_WIDE_INT mask = d->mask;
17571 /* It is expected that these dst built-in functions may have
17572 d->icode equal to CODE_FOR_nothing. */
17573 if ((mask & builtin_mask) != mask)
17575 if (TARGET_DEBUG_BUILTIN)
17576 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
17577 d->name);
17578 continue;
17580 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
17583 /* Initialize the predicates. */
17584 d = bdesc_altivec_preds;
17585 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
17587 machine_mode mode1;
17588 tree type;
17589 HOST_WIDE_INT mask = d->mask;
17591 if ((mask & builtin_mask) != mask)
17593 if (TARGET_DEBUG_BUILTIN)
17594 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
17595 d->name);
17596 continue;
17599 if (rs6000_overloaded_builtin_p (d->code))
17600 mode1 = VOIDmode;
17601 else
17603 /* Cannot define builtin if the instruction is disabled. */
17604 gcc_assert (d->icode != CODE_FOR_nothing);
17605 mode1 = insn_data[d->icode].operand[1].mode;
17608 switch (mode1)
17610 case E_VOIDmode:
17611 type = int_ftype_int_opaque_opaque;
17612 break;
17613 case E_V2DImode:
17614 type = int_ftype_int_v2di_v2di;
17615 break;
17616 case E_V4SImode:
17617 type = int_ftype_int_v4si_v4si;
17618 break;
17619 case E_V8HImode:
17620 type = int_ftype_int_v8hi_v8hi;
17621 break;
17622 case E_V16QImode:
17623 type = int_ftype_int_v16qi_v16qi;
17624 break;
17625 case E_V4SFmode:
17626 type = int_ftype_int_v4sf_v4sf;
17627 break;
17628 case E_V2DFmode:
17629 type = int_ftype_int_v2df_v2df;
17630 break;
17631 default:
17632 gcc_unreachable ();
17635 def_builtin (d->name, type, d->code);
17638 /* Initialize the abs* operators. */
17639 d = bdesc_abs;
17640 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
17642 machine_mode mode0;
17643 tree type;
17644 HOST_WIDE_INT mask = d->mask;
17646 if ((mask & builtin_mask) != mask)
17648 if (TARGET_DEBUG_BUILTIN)
17649 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
17650 d->name);
17651 continue;
17654 /* Cannot define builtin if the instruction is disabled. */
17655 gcc_assert (d->icode != CODE_FOR_nothing);
17656 mode0 = insn_data[d->icode].operand[0].mode;
17658 switch (mode0)
17660 case E_V2DImode:
17661 type = v2di_ftype_v2di;
17662 break;
17663 case E_V4SImode:
17664 type = v4si_ftype_v4si;
17665 break;
17666 case E_V8HImode:
17667 type = v8hi_ftype_v8hi;
17668 break;
17669 case E_V16QImode:
17670 type = v16qi_ftype_v16qi;
17671 break;
17672 case E_V4SFmode:
17673 type = v4sf_ftype_v4sf;
17674 break;
17675 case E_V2DFmode:
17676 type = v2df_ftype_v2df;
17677 break;
17678 default:
17679 gcc_unreachable ();
17682 def_builtin (d->name, type, d->code);
17685 /* Initialize target builtin that implements
17686 targetm.vectorize.builtin_mask_for_load. */
17688 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
17689 v16qi_ftype_long_pcvoid,
17690 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
17691 BUILT_IN_MD, NULL, NULL_TREE);
17692 TREE_READONLY (decl) = 1;
17693 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17694 altivec_builtin_mask_for_load = decl;
17696 /* Access to the vec_init patterns. */
17697 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
17698 integer_type_node, integer_type_node,
17699 integer_type_node, NULL_TREE);
17700 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
17702 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
17703 short_integer_type_node,
17704 short_integer_type_node,
17705 short_integer_type_node,
17706 short_integer_type_node,
17707 short_integer_type_node,
17708 short_integer_type_node,
17709 short_integer_type_node, NULL_TREE);
17710 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17712 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17713 char_type_node, char_type_node,
17714 char_type_node, char_type_node,
17715 char_type_node, char_type_node,
17716 char_type_node, char_type_node,
17717 char_type_node, char_type_node,
17718 char_type_node, char_type_node,
17719 char_type_node, char_type_node,
17720 char_type_node, NULL_TREE);
17721 def_builtin ("__builtin_vec_init_v16qi", ftype,
17722 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17724 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17725 float_type_node, float_type_node,
17726 float_type_node, NULL_TREE);
17727 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17729 /* VSX builtins. */
17730 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17731 double_type_node, NULL_TREE);
17732 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17734 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17735 intDI_type_node, NULL_TREE);
17736 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17738 /* Access to the vec_set patterns. */
17739 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17740 intSI_type_node,
17741 integer_type_node, NULL_TREE);
17742 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17744 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17745 intHI_type_node,
17746 integer_type_node, NULL_TREE);
17747 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17749 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17750 intQI_type_node,
17751 integer_type_node, NULL_TREE);
17752 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17754 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17755 float_type_node,
17756 integer_type_node, NULL_TREE);
17757 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17759 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17760 double_type_node,
17761 integer_type_node, NULL_TREE);
17762 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17764 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17765 intDI_type_node,
17766 integer_type_node, NULL_TREE);
17767 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17769 /* Access to the vec_extract patterns. */
17770 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17771 integer_type_node, NULL_TREE);
17772 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17774 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17775 integer_type_node, NULL_TREE);
17776 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17778 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17779 integer_type_node, NULL_TREE);
17780 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17782 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17783 integer_type_node, NULL_TREE);
17784 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17786 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17787 integer_type_node, NULL_TREE);
17788 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17790 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17791 integer_type_node, NULL_TREE);
17792 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17795 if (V1TI_type_node)
17797 tree v1ti_ftype_long_pcvoid
17798 = build_function_type_list (V1TI_type_node,
17799 long_integer_type_node, pcvoid_type_node,
17800 NULL_TREE);
17801 tree void_ftype_v1ti_long_pvoid
17802 = build_function_type_list (void_type_node,
17803 V1TI_type_node, long_integer_type_node,
17804 pvoid_type_node, NULL_TREE);
17805 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17806 VSX_BUILTIN_LXVD2X_V1TI);
17807 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17808 VSX_BUILTIN_STXVD2X_V1TI);
17809 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17810 NULL_TREE, NULL_TREE);
17811 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17812 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17813 intTI_type_node,
17814 integer_type_node, NULL_TREE);
17815 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17816 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17817 integer_type_node, NULL_TREE);
17818 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17823 static void
17824 htm_init_builtins (void)
17826 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17827 const struct builtin_description *d;
17828 size_t i;
17830 d = bdesc_htm;
17831 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17833 tree op[MAX_HTM_OPERANDS], type;
17834 HOST_WIDE_INT mask = d->mask;
17835 unsigned attr = rs6000_builtin_info[d->code].attr;
17836 bool void_func = (attr & RS6000_BTC_VOID);
17837 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17838 int nopnds = 0;
17839 tree gpr_type_node;
17840 tree rettype;
17841 tree argtype;
17843 /* It is expected that these htm built-in functions may have
17844 d->icode equal to CODE_FOR_nothing. */
17846 if (TARGET_32BIT && TARGET_POWERPC64)
17847 gpr_type_node = long_long_unsigned_type_node;
17848 else
17849 gpr_type_node = long_unsigned_type_node;
17851 if (attr & RS6000_BTC_SPR)
17853 rettype = gpr_type_node;
17854 argtype = gpr_type_node;
17856 else if (d->code == HTM_BUILTIN_TABORTDC
17857 || d->code == HTM_BUILTIN_TABORTDCI)
17859 rettype = unsigned_type_node;
17860 argtype = gpr_type_node;
17862 else
17864 rettype = unsigned_type_node;
17865 argtype = unsigned_type_node;
17868 if ((mask & builtin_mask) != mask)
17870 if (TARGET_DEBUG_BUILTIN)
17871 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
17872 continue;
17875 if (d->name == 0)
17877 if (TARGET_DEBUG_BUILTIN)
17878 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17879 (long unsigned) i);
17880 continue;
17883 op[nopnds++] = (void_func) ? void_type_node : rettype;
17885 if (attr_args == RS6000_BTC_UNARY)
17886 op[nopnds++] = argtype;
17887 else if (attr_args == RS6000_BTC_BINARY)
17889 op[nopnds++] = argtype;
17890 op[nopnds++] = argtype;
17892 else if (attr_args == RS6000_BTC_TERNARY)
17894 op[nopnds++] = argtype;
17895 op[nopnds++] = argtype;
17896 op[nopnds++] = argtype;
17899 switch (nopnds)
17901 case 1:
17902 type = build_function_type_list (op[0], NULL_TREE);
17903 break;
17904 case 2:
17905 type = build_function_type_list (op[0], op[1], NULL_TREE);
17906 break;
17907 case 3:
17908 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17909 break;
17910 case 4:
17911 type = build_function_type_list (op[0], op[1], op[2], op[3],
17912 NULL_TREE);
17913 break;
17914 default:
17915 gcc_unreachable ();
17918 def_builtin (d->name, type, d->code);
17922 /* Hash function for builtin functions with up to 3 arguments and a return
17923 type. */
17924 hashval_t
17925 builtin_hasher::hash (builtin_hash_struct *bh)
17927 unsigned ret = 0;
17928 int i;
17930 for (i = 0; i < 4; i++)
17932 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17933 ret = (ret * 2) + bh->uns_p[i];
17936 return ret;
17939 /* Compare builtin hash entries H1 and H2 for equivalence. */
17940 bool
17941 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17943 return ((p1->mode[0] == p2->mode[0])
17944 && (p1->mode[1] == p2->mode[1])
17945 && (p1->mode[2] == p2->mode[2])
17946 && (p1->mode[3] == p2->mode[3])
17947 && (p1->uns_p[0] == p2->uns_p[0])
17948 && (p1->uns_p[1] == p2->uns_p[1])
17949 && (p1->uns_p[2] == p2->uns_p[2])
17950 && (p1->uns_p[3] == p2->uns_p[3]));
17953 /* Map types for builtin functions with an explicit return type and up to 3
17954 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17955 of the argument. */
17956 static tree
17957 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17958 machine_mode mode_arg1, machine_mode mode_arg2,
17959 enum rs6000_builtins builtin, const char *name)
17961 struct builtin_hash_struct h;
17962 struct builtin_hash_struct *h2;
17963 int num_args = 3;
17964 int i;
17965 tree ret_type = NULL_TREE;
17966 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17968 /* Create builtin_hash_table. */
17969 if (builtin_hash_table == NULL)
17970 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
17972 h.type = NULL_TREE;
17973 h.mode[0] = mode_ret;
17974 h.mode[1] = mode_arg0;
17975 h.mode[2] = mode_arg1;
17976 h.mode[3] = mode_arg2;
17977 h.uns_p[0] = 0;
17978 h.uns_p[1] = 0;
17979 h.uns_p[2] = 0;
17980 h.uns_p[3] = 0;
17982 /* If the builtin is a type that produces unsigned results or takes unsigned
17983 arguments, and it is returned as a decl for the vectorizer (such as
17984 widening multiplies, permute), make sure the arguments and return value
17985 are type correct. */
17986 switch (builtin)
17988 /* unsigned 1 argument functions. */
17989 case CRYPTO_BUILTIN_VSBOX:
17990 case P8V_BUILTIN_VGBBD:
17991 case MISC_BUILTIN_CDTBCD:
17992 case MISC_BUILTIN_CBCDTD:
17993 h.uns_p[0] = 1;
17994 h.uns_p[1] = 1;
17995 break;
17997 /* unsigned 2 argument functions. */
17998 case ALTIVEC_BUILTIN_VMULEUB:
17999 case ALTIVEC_BUILTIN_VMULEUH:
18000 case ALTIVEC_BUILTIN_VMULEUW:
18001 case ALTIVEC_BUILTIN_VMULOUB:
18002 case ALTIVEC_BUILTIN_VMULOUH:
18003 case ALTIVEC_BUILTIN_VMULOUW:
18004 case CRYPTO_BUILTIN_VCIPHER:
18005 case CRYPTO_BUILTIN_VCIPHERLAST:
18006 case CRYPTO_BUILTIN_VNCIPHER:
18007 case CRYPTO_BUILTIN_VNCIPHERLAST:
18008 case CRYPTO_BUILTIN_VPMSUMB:
18009 case CRYPTO_BUILTIN_VPMSUMH:
18010 case CRYPTO_BUILTIN_VPMSUMW:
18011 case CRYPTO_BUILTIN_VPMSUMD:
18012 case CRYPTO_BUILTIN_VPMSUM:
18013 case MISC_BUILTIN_ADDG6S:
18014 case MISC_BUILTIN_DIVWEU:
18015 case MISC_BUILTIN_DIVWEUO:
18016 case MISC_BUILTIN_DIVDEU:
18017 case MISC_BUILTIN_DIVDEUO:
18018 case VSX_BUILTIN_UDIV_V2DI:
18019 case ALTIVEC_BUILTIN_VMAXUB:
18020 case ALTIVEC_BUILTIN_VMINUB:
18021 case ALTIVEC_BUILTIN_VMAXUH:
18022 case ALTIVEC_BUILTIN_VMINUH:
18023 case ALTIVEC_BUILTIN_VMAXUW:
18024 case ALTIVEC_BUILTIN_VMINUW:
18025 case P8V_BUILTIN_VMAXUD:
18026 case P8V_BUILTIN_VMINUD:
18027 h.uns_p[0] = 1;
18028 h.uns_p[1] = 1;
18029 h.uns_p[2] = 1;
18030 break;
18032 /* unsigned 3 argument functions. */
18033 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
18034 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
18035 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
18036 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
18037 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
18038 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
18039 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
18040 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
18041 case VSX_BUILTIN_VPERM_16QI_UNS:
18042 case VSX_BUILTIN_VPERM_8HI_UNS:
18043 case VSX_BUILTIN_VPERM_4SI_UNS:
18044 case VSX_BUILTIN_VPERM_2DI_UNS:
18045 case VSX_BUILTIN_XXSEL_16QI_UNS:
18046 case VSX_BUILTIN_XXSEL_8HI_UNS:
18047 case VSX_BUILTIN_XXSEL_4SI_UNS:
18048 case VSX_BUILTIN_XXSEL_2DI_UNS:
18049 case CRYPTO_BUILTIN_VPERMXOR:
18050 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
18051 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
18052 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
18053 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
18054 case CRYPTO_BUILTIN_VSHASIGMAW:
18055 case CRYPTO_BUILTIN_VSHASIGMAD:
18056 case CRYPTO_BUILTIN_VSHASIGMA:
18057 h.uns_p[0] = 1;
18058 h.uns_p[1] = 1;
18059 h.uns_p[2] = 1;
18060 h.uns_p[3] = 1;
18061 break;
18063 /* signed permute functions with unsigned char mask. */
18064 case ALTIVEC_BUILTIN_VPERM_16QI:
18065 case ALTIVEC_BUILTIN_VPERM_8HI:
18066 case ALTIVEC_BUILTIN_VPERM_4SI:
18067 case ALTIVEC_BUILTIN_VPERM_4SF:
18068 case ALTIVEC_BUILTIN_VPERM_2DI:
18069 case ALTIVEC_BUILTIN_VPERM_2DF:
18070 case VSX_BUILTIN_VPERM_16QI:
18071 case VSX_BUILTIN_VPERM_8HI:
18072 case VSX_BUILTIN_VPERM_4SI:
18073 case VSX_BUILTIN_VPERM_4SF:
18074 case VSX_BUILTIN_VPERM_2DI:
18075 case VSX_BUILTIN_VPERM_2DF:
18076 h.uns_p[3] = 1;
18077 break;
18079 /* unsigned args, signed return. */
18080 case VSX_BUILTIN_XVCVUXDSP:
18081 case VSX_BUILTIN_XVCVUXDDP_UNS:
18082 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
18083 h.uns_p[1] = 1;
18084 break;
18086 /* signed args, unsigned return. */
18087 case VSX_BUILTIN_XVCVDPUXDS_UNS:
18088 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
18089 case MISC_BUILTIN_UNPACK_TD:
18090 case MISC_BUILTIN_UNPACK_V1TI:
18091 h.uns_p[0] = 1;
18092 break;
18094 /* unsigned arguments for 128-bit pack instructions. */
18095 case MISC_BUILTIN_PACK_TD:
18096 case MISC_BUILTIN_PACK_V1TI:
18097 h.uns_p[1] = 1;
18098 h.uns_p[2] = 1;
18099 break;
18101 /* unsigned second arguments (vector shift right). */
18102 case ALTIVEC_BUILTIN_VSRB:
18103 case ALTIVEC_BUILTIN_VSRH:
18104 case ALTIVEC_BUILTIN_VSRW:
18105 case P8V_BUILTIN_VSRD:
18106 h.uns_p[2] = 1;
18107 break;
18109 default:
18110 break;
18113 /* Figure out how many args are present. */
18114 while (num_args > 0 && h.mode[num_args] == VOIDmode)
18115 num_args--;
18117 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
18118 if (!ret_type && h.uns_p[0])
18119 ret_type = builtin_mode_to_type[h.mode[0]][0];
18121 if (!ret_type)
18122 fatal_error (input_location,
18123 "internal error: builtin function %qs had an unexpected "
18124 "return type %qs", name, GET_MODE_NAME (h.mode[0]));
18126 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
18127 arg_type[i] = NULL_TREE;
18129 for (i = 0; i < num_args; i++)
18131 int m = (int) h.mode[i+1];
18132 int uns_p = h.uns_p[i+1];
18134 arg_type[i] = builtin_mode_to_type[m][uns_p];
18135 if (!arg_type[i] && uns_p)
18136 arg_type[i] = builtin_mode_to_type[m][0];
18138 if (!arg_type[i])
18139 fatal_error (input_location,
18140 "internal error: builtin function %qs, argument %d "
18141 "had unexpected argument type %qs", name, i,
18142 GET_MODE_NAME (m));
18145 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
18146 if (*found == NULL)
18148 h2 = ggc_alloc<builtin_hash_struct> ();
18149 *h2 = h;
18150 *found = h2;
18152 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
18153 arg_type[2], NULL_TREE);
18156 return (*found)->type;
18159 static void
18160 rs6000_common_init_builtins (void)
18162 const struct builtin_description *d;
18163 size_t i;
18165 tree opaque_ftype_opaque = NULL_TREE;
18166 tree opaque_ftype_opaque_opaque = NULL_TREE;
18167 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
18168 tree v2si_ftype = NULL_TREE;
18169 tree v2si_ftype_qi = NULL_TREE;
18170 tree v2si_ftype_v2si_qi = NULL_TREE;
18171 tree v2si_ftype_int_qi = NULL_TREE;
18172 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
18174 if (!TARGET_PAIRED_FLOAT)
18176 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
18177 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
18180 /* Paired builtins are only available if you build a compiler with the
18181 appropriate options, so only create those builtins with the appropriate
18182 compiler option. Create Altivec and VSX builtins on machines with at
18183 least the general purpose extensions (970 and newer) to allow the use of
18184 the target attribute.. */
18186 if (TARGET_EXTRA_BUILTINS)
18187 builtin_mask |= RS6000_BTM_COMMON;
18189 /* Add the ternary operators. */
18190 d = bdesc_3arg;
18191 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
18193 tree type;
18194 HOST_WIDE_INT mask = d->mask;
18196 if ((mask & builtin_mask) != mask)
18198 if (TARGET_DEBUG_BUILTIN)
18199 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
18200 continue;
18203 if (rs6000_overloaded_builtin_p (d->code))
18205 if (! (type = opaque_ftype_opaque_opaque_opaque))
18206 type = opaque_ftype_opaque_opaque_opaque
18207 = build_function_type_list (opaque_V4SI_type_node,
18208 opaque_V4SI_type_node,
18209 opaque_V4SI_type_node,
18210 opaque_V4SI_type_node,
18211 NULL_TREE);
18213 else
18215 enum insn_code icode = d->icode;
18216 if (d->name == 0)
18218 if (TARGET_DEBUG_BUILTIN)
18219 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
18220 (long unsigned)i);
18222 continue;
18225 if (icode == CODE_FOR_nothing)
18227 if (TARGET_DEBUG_BUILTIN)
18228 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
18229 d->name);
18231 continue;
18234 type = builtin_function_type (insn_data[icode].operand[0].mode,
18235 insn_data[icode].operand[1].mode,
18236 insn_data[icode].operand[2].mode,
18237 insn_data[icode].operand[3].mode,
18238 d->code, d->name);
18241 def_builtin (d->name, type, d->code);
18244 /* Add the binary operators. */
18245 d = bdesc_2arg;
18246 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
18248 machine_mode mode0, mode1, mode2;
18249 tree type;
18250 HOST_WIDE_INT mask = d->mask;
18252 if ((mask & builtin_mask) != mask)
18254 if (TARGET_DEBUG_BUILTIN)
18255 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
18256 continue;
18259 if (rs6000_overloaded_builtin_p (d->code))
18261 if (! (type = opaque_ftype_opaque_opaque))
18262 type = opaque_ftype_opaque_opaque
18263 = build_function_type_list (opaque_V4SI_type_node,
18264 opaque_V4SI_type_node,
18265 opaque_V4SI_type_node,
18266 NULL_TREE);
18268 else
18270 enum insn_code icode = d->icode;
18271 if (d->name == 0)
18273 if (TARGET_DEBUG_BUILTIN)
18274 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
18275 (long unsigned)i);
18277 continue;
18280 if (icode == CODE_FOR_nothing)
18282 if (TARGET_DEBUG_BUILTIN)
18283 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
18284 d->name);
18286 continue;
18289 mode0 = insn_data[icode].operand[0].mode;
18290 mode1 = insn_data[icode].operand[1].mode;
18291 mode2 = insn_data[icode].operand[2].mode;
18293 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
18295 if (! (type = v2si_ftype_v2si_qi))
18296 type = v2si_ftype_v2si_qi
18297 = build_function_type_list (opaque_V2SI_type_node,
18298 opaque_V2SI_type_node,
18299 char_type_node,
18300 NULL_TREE);
18303 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
18304 && mode2 == QImode)
18306 if (! (type = v2si_ftype_int_qi))
18307 type = v2si_ftype_int_qi
18308 = build_function_type_list (opaque_V2SI_type_node,
18309 integer_type_node,
18310 char_type_node,
18311 NULL_TREE);
18314 else
18315 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
18316 d->code, d->name);
18319 def_builtin (d->name, type, d->code);
18322 /* Add the simple unary operators. */
18323 d = bdesc_1arg;
18324 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
18326 machine_mode mode0, mode1;
18327 tree type;
18328 HOST_WIDE_INT mask = d->mask;
18330 if ((mask & builtin_mask) != mask)
18332 if (TARGET_DEBUG_BUILTIN)
18333 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
18334 continue;
18337 if (rs6000_overloaded_builtin_p (d->code))
18339 if (! (type = opaque_ftype_opaque))
18340 type = opaque_ftype_opaque
18341 = build_function_type_list (opaque_V4SI_type_node,
18342 opaque_V4SI_type_node,
18343 NULL_TREE);
18345 else
18347 enum insn_code icode = d->icode;
18348 if (d->name == 0)
18350 if (TARGET_DEBUG_BUILTIN)
18351 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
18352 (long unsigned)i);
18354 continue;
18357 if (icode == CODE_FOR_nothing)
18359 if (TARGET_DEBUG_BUILTIN)
18360 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
18361 d->name);
18363 continue;
18366 mode0 = insn_data[icode].operand[0].mode;
18367 mode1 = insn_data[icode].operand[1].mode;
18369 if (mode0 == V2SImode && mode1 == QImode)
18371 if (! (type = v2si_ftype_qi))
18372 type = v2si_ftype_qi
18373 = build_function_type_list (opaque_V2SI_type_node,
18374 char_type_node,
18375 NULL_TREE);
18378 else
18379 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
18380 d->code, d->name);
18383 def_builtin (d->name, type, d->code);
18386 /* Add the simple no-argument operators. */
18387 d = bdesc_0arg;
18388 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
18390 machine_mode mode0;
18391 tree type;
18392 HOST_WIDE_INT mask = d->mask;
18394 if ((mask & builtin_mask) != mask)
18396 if (TARGET_DEBUG_BUILTIN)
18397 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
18398 continue;
18400 if (rs6000_overloaded_builtin_p (d->code))
18402 if (!opaque_ftype_opaque)
18403 opaque_ftype_opaque
18404 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
18405 type = opaque_ftype_opaque;
18407 else
18409 enum insn_code icode = d->icode;
18410 if (d->name == 0)
18412 if (TARGET_DEBUG_BUILTIN)
18413 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
18414 (long unsigned) i);
18415 continue;
18417 if (icode == CODE_FOR_nothing)
18419 if (TARGET_DEBUG_BUILTIN)
18420 fprintf (stderr,
18421 "rs6000_builtin, skip no-argument %s (no code)\n",
18422 d->name);
18423 continue;
18425 mode0 = insn_data[icode].operand[0].mode;
18426 if (mode0 == V2SImode)
18428 /* code for paired single */
18429 if (! (type = v2si_ftype))
18431 v2si_ftype
18432 = build_function_type_list (opaque_V2SI_type_node,
18433 NULL_TREE);
18434 type = v2si_ftype;
18437 else
18438 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
18439 d->code, d->name);
18441 def_builtin (d->name, type, d->code);
18445 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
18446 static void
18447 init_float128_ibm (machine_mode mode)
18449 if (!TARGET_XL_COMPAT)
18451 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
18452 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
18453 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
18454 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
18456 if (!TARGET_HARD_FLOAT)
18458 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
18459 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
18460 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
18461 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
18462 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
18463 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
18464 set_optab_libfunc (le_optab, mode, "__gcc_qle");
18465 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
18467 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
18468 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
18469 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
18470 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
18471 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
18472 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
18473 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
18474 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
18477 else
18479 set_optab_libfunc (add_optab, mode, "_xlqadd");
18480 set_optab_libfunc (sub_optab, mode, "_xlqsub");
18481 set_optab_libfunc (smul_optab, mode, "_xlqmul");
18482 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
18485 /* Add various conversions for IFmode to use the traditional TFmode
18486 names. */
18487 if (mode == IFmode)
18489 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf2");
18490 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf2");
18491 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctftd2");
18492 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd2");
18493 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd2");
18494 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdtf2");
18496 if (TARGET_POWERPC64)
18498 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
18499 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
18500 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
18501 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
18506 /* Set up IEEE 128-bit floating point routines. Use different names if the
18507 arguments can be passed in a vector register. The historical PowerPC
18508 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
18509 continue to use that if we aren't using vector registers to pass IEEE
18510 128-bit floating point. */
18512 static void
18513 init_float128_ieee (machine_mode mode)
18515 if (FLOAT128_VECTOR_P (mode))
18517 set_optab_libfunc (add_optab, mode, "__addkf3");
18518 set_optab_libfunc (sub_optab, mode, "__subkf3");
18519 set_optab_libfunc (neg_optab, mode, "__negkf2");
18520 set_optab_libfunc (smul_optab, mode, "__mulkf3");
18521 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
18522 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
18523 set_optab_libfunc (abs_optab, mode, "__abstkf2");
18525 set_optab_libfunc (eq_optab, mode, "__eqkf2");
18526 set_optab_libfunc (ne_optab, mode, "__nekf2");
18527 set_optab_libfunc (gt_optab, mode, "__gtkf2");
18528 set_optab_libfunc (ge_optab, mode, "__gekf2");
18529 set_optab_libfunc (lt_optab, mode, "__ltkf2");
18530 set_optab_libfunc (le_optab, mode, "__lekf2");
18531 set_optab_libfunc (unord_optab, mode, "__unordkf2");
18533 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
18534 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
18535 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
18536 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
18538 set_conv_libfunc (sext_optab, mode, IFmode, "__extendtfkf2");
18539 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18540 set_conv_libfunc (sext_optab, mode, TFmode, "__extendtfkf2");
18542 set_conv_libfunc (trunc_optab, IFmode, mode, "__trunckftf2");
18543 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18544 set_conv_libfunc (trunc_optab, TFmode, mode, "__trunckftf2");
18546 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf2");
18547 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf2");
18548 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunckftd2");
18549 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd2");
18550 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd2");
18551 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdkf2");
18553 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
18554 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
18555 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
18556 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
18558 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
18559 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
18560 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
18561 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
18563 if (TARGET_POWERPC64)
18565 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
18566 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
18567 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
18568 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
18572 else
18574 set_optab_libfunc (add_optab, mode, "_q_add");
18575 set_optab_libfunc (sub_optab, mode, "_q_sub");
18576 set_optab_libfunc (neg_optab, mode, "_q_neg");
18577 set_optab_libfunc (smul_optab, mode, "_q_mul");
18578 set_optab_libfunc (sdiv_optab, mode, "_q_div");
18579 if (TARGET_PPC_GPOPT)
18580 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
18582 set_optab_libfunc (eq_optab, mode, "_q_feq");
18583 set_optab_libfunc (ne_optab, mode, "_q_fne");
18584 set_optab_libfunc (gt_optab, mode, "_q_fgt");
18585 set_optab_libfunc (ge_optab, mode, "_q_fge");
18586 set_optab_libfunc (lt_optab, mode, "_q_flt");
18587 set_optab_libfunc (le_optab, mode, "_q_fle");
18589 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
18590 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
18591 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
18592 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
18593 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
18594 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
18595 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
18596 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
18600 static void
18601 rs6000_init_libfuncs (void)
18603 /* __float128 support. */
18604 if (TARGET_FLOAT128_TYPE)
18606 init_float128_ibm (IFmode);
18607 init_float128_ieee (KFmode);
18610 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18611 if (TARGET_LONG_DOUBLE_128)
18613 if (!TARGET_IEEEQUAD)
18614 init_float128_ibm (TFmode);
18616 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18617 else
18618 init_float128_ieee (TFmode);
18622 /* Emit a potentially record-form instruction, setting DST from SRC.
18623 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18624 signed comparison of DST with zero. If DOT is 1, the generated RTL
18625 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18626 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18627 a separate COMPARE. */
18629 void
18630 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
18632 if (dot == 0)
18634 emit_move_insn (dst, src);
18635 return;
18638 if (cc_reg_not_cr0_operand (ccreg, CCmode))
18640 emit_move_insn (dst, src);
18641 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
18642 return;
18645 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
18646 if (dot == 1)
18648 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
18649 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
18651 else
18653 rtx set = gen_rtx_SET (dst, src);
18654 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
18659 /* A validation routine: say whether CODE, a condition code, and MODE
18660 match. The other alternatives either don't make sense or should
18661 never be generated. */
18663 void
18664 validate_condition_mode (enum rtx_code code, machine_mode mode)
18666 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
18667 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
18668 && GET_MODE_CLASS (mode) == MODE_CC);
18670 /* These don't make sense. */
18671 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
18672 || mode != CCUNSmode);
18674 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
18675 || mode == CCUNSmode);
18677 gcc_assert (mode == CCFPmode
18678 || (code != ORDERED && code != UNORDERED
18679 && code != UNEQ && code != LTGT
18680 && code != UNGT && code != UNLT
18681 && code != UNGE && code != UNLE));
18683 /* These should never be generated except for
18684 flag_finite_math_only. */
18685 gcc_assert (mode != CCFPmode
18686 || flag_finite_math_only
18687 || (code != LE && code != GE
18688 && code != UNEQ && code != LTGT
18689 && code != UNGT && code != UNLT));
18691 /* These are invalid; the information is not there. */
18692 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18696 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18697 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18698 not zero, store there the bit offset (counted from the right) where
18699 the single stretch of 1 bits begins; and similarly for B, the bit
18700 offset where it ends. */
18702 bool
18703 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18705 unsigned HOST_WIDE_INT val = INTVAL (mask);
18706 unsigned HOST_WIDE_INT bit;
18707 int nb, ne;
18708 int n = GET_MODE_PRECISION (mode);
18710 if (mode != DImode && mode != SImode)
18711 return false;
18713 if (INTVAL (mask) >= 0)
18715 bit = val & -val;
18716 ne = exact_log2 (bit);
18717 nb = exact_log2 (val + bit);
18719 else if (val + 1 == 0)
18721 nb = n;
18722 ne = 0;
18724 else if (val & 1)
18726 val = ~val;
18727 bit = val & -val;
18728 nb = exact_log2 (bit);
18729 ne = exact_log2 (val + bit);
18731 else
18733 bit = val & -val;
18734 ne = exact_log2 (bit);
18735 if (val + bit == 0)
18736 nb = n;
18737 else
18738 nb = 0;
18741 nb--;
18743 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18744 return false;
18746 if (b)
18747 *b = nb;
18748 if (e)
18749 *e = ne;
18751 return true;
18754 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18755 or rldicr instruction, to implement an AND with it in mode MODE. */
18757 bool
18758 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18760 int nb, ne;
18762 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18763 return false;
18765 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18766 does not wrap. */
18767 if (mode == DImode)
18768 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18770 /* For SImode, rlwinm can do everything. */
18771 if (mode == SImode)
18772 return (nb < 32 && ne < 32);
18774 return false;
18777 /* Return the instruction template for an AND with mask in mode MODE, with
18778 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18780 const char *
18781 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18783 int nb, ne;
18785 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18786 gcc_unreachable ();
18788 if (mode == DImode && ne == 0)
18790 operands[3] = GEN_INT (63 - nb);
18791 if (dot)
18792 return "rldicl. %0,%1,0,%3";
18793 return "rldicl %0,%1,0,%3";
18796 if (mode == DImode && nb == 63)
18798 operands[3] = GEN_INT (63 - ne);
18799 if (dot)
18800 return "rldicr. %0,%1,0,%3";
18801 return "rldicr %0,%1,0,%3";
18804 if (nb < 32 && ne < 32)
18806 operands[3] = GEN_INT (31 - nb);
18807 operands[4] = GEN_INT (31 - ne);
18808 if (dot)
18809 return "rlwinm. %0,%1,0,%3,%4";
18810 return "rlwinm %0,%1,0,%3,%4";
18813 gcc_unreachable ();
18816 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18817 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18818 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18820 bool
18821 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18823 int nb, ne;
18825 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18826 return false;
18828 int n = GET_MODE_PRECISION (mode);
18829 int sh = -1;
18831 if (CONST_INT_P (XEXP (shift, 1)))
18833 sh = INTVAL (XEXP (shift, 1));
18834 if (sh < 0 || sh >= n)
18835 return false;
18838 rtx_code code = GET_CODE (shift);
18840 /* Convert any shift by 0 to a rotate, to simplify below code. */
18841 if (sh == 0)
18842 code = ROTATE;
18844 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18845 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18846 code = ASHIFT;
18847 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18849 code = LSHIFTRT;
18850 sh = n - sh;
18853 /* DImode rotates need rld*. */
18854 if (mode == DImode && code == ROTATE)
18855 return (nb == 63 || ne == 0 || ne == sh);
18857 /* SImode rotates need rlw*. */
18858 if (mode == SImode && code == ROTATE)
18859 return (nb < 32 && ne < 32 && sh < 32);
18861 /* Wrap-around masks are only okay for rotates. */
18862 if (ne > nb)
18863 return false;
18865 /* Variable shifts are only okay for rotates. */
18866 if (sh < 0)
18867 return false;
18869 /* Don't allow ASHIFT if the mask is wrong for that. */
18870 if (code == ASHIFT && ne < sh)
18871 return false;
18873 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18874 if the mask is wrong for that. */
18875 if (nb < 32 && ne < 32 && sh < 32
18876 && !(code == LSHIFTRT && nb >= 32 - sh))
18877 return true;
18879 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18880 if the mask is wrong for that. */
18881 if (code == LSHIFTRT)
18882 sh = 64 - sh;
18883 if (nb == 63 || ne == 0 || ne == sh)
18884 return !(code == LSHIFTRT && nb >= sh);
18886 return false;
18889 /* Return the instruction template for a shift with mask in mode MODE, with
18890 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18892 const char *
18893 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18895 int nb, ne;
18897 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18898 gcc_unreachable ();
18900 if (mode == DImode && ne == 0)
18902 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18903 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18904 operands[3] = GEN_INT (63 - nb);
18905 if (dot)
18906 return "rld%I2cl. %0,%1,%2,%3";
18907 return "rld%I2cl %0,%1,%2,%3";
18910 if (mode == DImode && nb == 63)
18912 operands[3] = GEN_INT (63 - ne);
18913 if (dot)
18914 return "rld%I2cr. %0,%1,%2,%3";
18915 return "rld%I2cr %0,%1,%2,%3";
18918 if (mode == DImode
18919 && GET_CODE (operands[4]) != LSHIFTRT
18920 && CONST_INT_P (operands[2])
18921 && ne == INTVAL (operands[2]))
18923 operands[3] = GEN_INT (63 - nb);
18924 if (dot)
18925 return "rld%I2c. %0,%1,%2,%3";
18926 return "rld%I2c %0,%1,%2,%3";
18929 if (nb < 32 && ne < 32)
18931 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18932 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18933 operands[3] = GEN_INT (31 - nb);
18934 operands[4] = GEN_INT (31 - ne);
18935 /* This insn can also be a 64-bit rotate with mask that really makes
18936 it just a shift right (with mask); the %h below are to adjust for
18937 that situation (shift count is >= 32 in that case). */
18938 if (dot)
18939 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18940 return "rlw%I2nm %0,%1,%h2,%3,%4";
18943 gcc_unreachable ();
18946 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18947 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18948 ASHIFT, or LSHIFTRT) in mode MODE. */
18950 bool
18951 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18953 int nb, ne;
18955 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18956 return false;
18958 int n = GET_MODE_PRECISION (mode);
18960 int sh = INTVAL (XEXP (shift, 1));
18961 if (sh < 0 || sh >= n)
18962 return false;
18964 rtx_code code = GET_CODE (shift);
18966 /* Convert any shift by 0 to a rotate, to simplify below code. */
18967 if (sh == 0)
18968 code = ROTATE;
18970 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18971 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18972 code = ASHIFT;
18973 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18975 code = LSHIFTRT;
18976 sh = n - sh;
18979 /* DImode rotates need rldimi. */
18980 if (mode == DImode && code == ROTATE)
18981 return (ne == sh);
18983 /* SImode rotates need rlwimi. */
18984 if (mode == SImode && code == ROTATE)
18985 return (nb < 32 && ne < 32 && sh < 32);
18987 /* Wrap-around masks are only okay for rotates. */
18988 if (ne > nb)
18989 return false;
18991 /* Don't allow ASHIFT if the mask is wrong for that. */
18992 if (code == ASHIFT && ne < sh)
18993 return false;
18995 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18996 if the mask is wrong for that. */
18997 if (nb < 32 && ne < 32 && sh < 32
18998 && !(code == LSHIFTRT && nb >= 32 - sh))
18999 return true;
19001 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
19002 if the mask is wrong for that. */
19003 if (code == LSHIFTRT)
19004 sh = 64 - sh;
19005 if (ne == sh)
19006 return !(code == LSHIFTRT && nb >= sh);
19008 return false;
19011 /* Return the instruction template for an insert with mask in mode MODE, with
19012 operands OPERANDS. If DOT is true, make it a record-form instruction. */
19014 const char *
19015 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
19017 int nb, ne;
19019 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
19020 gcc_unreachable ();
19022 /* Prefer rldimi because rlwimi is cracked. */
19023 if (TARGET_POWERPC64
19024 && (!dot || mode == DImode)
19025 && GET_CODE (operands[4]) != LSHIFTRT
19026 && ne == INTVAL (operands[2]))
19028 operands[3] = GEN_INT (63 - nb);
19029 if (dot)
19030 return "rldimi. %0,%1,%2,%3";
19031 return "rldimi %0,%1,%2,%3";
19034 if (nb < 32 && ne < 32)
19036 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
19037 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
19038 operands[3] = GEN_INT (31 - nb);
19039 operands[4] = GEN_INT (31 - ne);
19040 if (dot)
19041 return "rlwimi. %0,%1,%2,%3,%4";
19042 return "rlwimi %0,%1,%2,%3,%4";
19045 gcc_unreachable ();
19048 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
19049 using two machine instructions. */
19051 bool
19052 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
19054 /* There are two kinds of AND we can handle with two insns:
19055 1) those we can do with two rl* insn;
19056 2) ori[s];xori[s].
19058 We do not handle that last case yet. */
19060 /* If there is just one stretch of ones, we can do it. */
19061 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
19062 return true;
19064 /* Otherwise, fill in the lowest "hole"; if we can do the result with
19065 one insn, we can do the whole thing with two. */
19066 unsigned HOST_WIDE_INT val = INTVAL (c);
19067 unsigned HOST_WIDE_INT bit1 = val & -val;
19068 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
19069 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
19070 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
19071 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
19074 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
19075 If EXPAND is true, split rotate-and-mask instructions we generate to
19076 their constituent parts as well (this is used during expand); if DOT
19077 is 1, make the last insn a record-form instruction clobbering the
19078 destination GPR and setting the CC reg (from operands[3]); if 2, set
19079 that GPR as well as the CC reg. */
19081 void
19082 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
19084 gcc_assert (!(expand && dot));
19086 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
19088 /* If it is one stretch of ones, it is DImode; shift left, mask, then
19089 shift right. This generates better code than doing the masks without
19090 shifts, or shifting first right and then left. */
19091 int nb, ne;
19092 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
19094 gcc_assert (mode == DImode);
19096 int shift = 63 - nb;
19097 if (expand)
19099 rtx tmp1 = gen_reg_rtx (DImode);
19100 rtx tmp2 = gen_reg_rtx (DImode);
19101 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
19102 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
19103 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
19105 else
19107 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
19108 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
19109 emit_move_insn (operands[0], tmp);
19110 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
19111 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19113 return;
19116 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
19117 that does the rest. */
19118 unsigned HOST_WIDE_INT bit1 = val & -val;
19119 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
19120 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
19121 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
19123 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
19124 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
19126 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
19128 /* Two "no-rotate"-and-mask instructions, for SImode. */
19129 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
19131 gcc_assert (mode == SImode);
19133 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
19134 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
19135 emit_move_insn (reg, tmp);
19136 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
19137 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19138 return;
19141 gcc_assert (mode == DImode);
19143 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
19144 insns; we have to do the first in SImode, because it wraps. */
19145 if (mask2 <= 0xffffffff
19146 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
19148 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
19149 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
19150 GEN_INT (mask1));
19151 rtx reg_low = gen_lowpart (SImode, reg);
19152 emit_move_insn (reg_low, tmp);
19153 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
19154 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19155 return;
19158 /* Two rld* insns: rotate, clear the hole in the middle (which now is
19159 at the top end), rotate back and clear the other hole. */
19160 int right = exact_log2 (bit3);
19161 int left = 64 - right;
19163 /* Rotate the mask too. */
19164 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
19166 if (expand)
19168 rtx tmp1 = gen_reg_rtx (DImode);
19169 rtx tmp2 = gen_reg_rtx (DImode);
19170 rtx tmp3 = gen_reg_rtx (DImode);
19171 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
19172 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
19173 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
19174 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
19176 else
19178 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
19179 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
19180 emit_move_insn (operands[0], tmp);
19181 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
19182 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
19183 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19187 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
19188 for lfq and stfq insns iff the registers are hard registers. */
19191 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
19193 /* We might have been passed a SUBREG. */
19194 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
19195 return 0;
19197 /* We might have been passed non floating point registers. */
19198 if (!FP_REGNO_P (REGNO (reg1))
19199 || !FP_REGNO_P (REGNO (reg2)))
19200 return 0;
19202 return (REGNO (reg1) == REGNO (reg2) - 1);
19205 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
19206 addr1 and addr2 must be in consecutive memory locations
19207 (addr2 == addr1 + 8). */
19210 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
19212 rtx addr1, addr2;
19213 unsigned int reg1, reg2;
19214 int offset1, offset2;
19216 /* The mems cannot be volatile. */
19217 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
19218 return 0;
19220 addr1 = XEXP (mem1, 0);
19221 addr2 = XEXP (mem2, 0);
19223 /* Extract an offset (if used) from the first addr. */
19224 if (GET_CODE (addr1) == PLUS)
19226 /* If not a REG, return zero. */
19227 if (GET_CODE (XEXP (addr1, 0)) != REG)
19228 return 0;
19229 else
19231 reg1 = REGNO (XEXP (addr1, 0));
19232 /* The offset must be constant! */
19233 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
19234 return 0;
19235 offset1 = INTVAL (XEXP (addr1, 1));
19238 else if (GET_CODE (addr1) != REG)
19239 return 0;
19240 else
19242 reg1 = REGNO (addr1);
19243 /* This was a simple (mem (reg)) expression. Offset is 0. */
19244 offset1 = 0;
19247 /* And now for the second addr. */
19248 if (GET_CODE (addr2) == PLUS)
19250 /* If not a REG, return zero. */
19251 if (GET_CODE (XEXP (addr2, 0)) != REG)
19252 return 0;
19253 else
19255 reg2 = REGNO (XEXP (addr2, 0));
19256 /* The offset must be constant. */
19257 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
19258 return 0;
19259 offset2 = INTVAL (XEXP (addr2, 1));
19262 else if (GET_CODE (addr2) != REG)
19263 return 0;
19264 else
19266 reg2 = REGNO (addr2);
19267 /* This was a simple (mem (reg)) expression. Offset is 0. */
19268 offset2 = 0;
19271 /* Both of these must have the same base register. */
19272 if (reg1 != reg2)
19273 return 0;
19275 /* The offset for the second addr must be 8 more than the first addr. */
19276 if (offset2 != offset1 + 8)
19277 return 0;
19279 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
19280 instructions. */
19281 return 1;
19284 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
19285 need to use DDmode, in all other cases we can use the same mode. */
19286 static machine_mode
19287 rs6000_secondary_memory_needed_mode (machine_mode mode)
19289 if (lra_in_progress && mode == SDmode)
19290 return DDmode;
19291 return mode;
19294 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
19295 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
19296 only work on the traditional altivec registers, note if an altivec register
19297 was chosen. */
19299 static enum rs6000_reg_type
19300 register_to_reg_type (rtx reg, bool *is_altivec)
19302 HOST_WIDE_INT regno;
19303 enum reg_class rclass;
19305 if (GET_CODE (reg) == SUBREG)
19306 reg = SUBREG_REG (reg);
19308 if (!REG_P (reg))
19309 return NO_REG_TYPE;
19311 regno = REGNO (reg);
19312 if (regno >= FIRST_PSEUDO_REGISTER)
19314 if (!lra_in_progress && !reload_completed)
19315 return PSEUDO_REG_TYPE;
19317 regno = true_regnum (reg);
19318 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
19319 return PSEUDO_REG_TYPE;
19322 gcc_assert (regno >= 0);
19324 if (is_altivec && ALTIVEC_REGNO_P (regno))
19325 *is_altivec = true;
19327 rclass = rs6000_regno_regclass[regno];
19328 return reg_class_to_reg_type[(int)rclass];
19331 /* Helper function to return the cost of adding a TOC entry address. */
19333 static inline int
19334 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
19336 int ret;
19338 if (TARGET_CMODEL != CMODEL_SMALL)
19339 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
19341 else
19342 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
19344 return ret;
19347 /* Helper function for rs6000_secondary_reload to determine whether the memory
19348 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
19349 needs reloading. Return negative if the memory is not handled by the memory
19350 helper functions and to try a different reload method, 0 if no additional
19351 instructions are need, and positive to give the extra cost for the
19352 memory. */
19354 static int
19355 rs6000_secondary_reload_memory (rtx addr,
19356 enum reg_class rclass,
19357 machine_mode mode)
19359 int extra_cost = 0;
19360 rtx reg, and_arg, plus_arg0, plus_arg1;
19361 addr_mask_type addr_mask;
19362 const char *type = NULL;
19363 const char *fail_msg = NULL;
19365 if (GPR_REG_CLASS_P (rclass))
19366 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19368 else if (rclass == FLOAT_REGS)
19369 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19371 else if (rclass == ALTIVEC_REGS)
19372 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19374 /* For the combined VSX_REGS, turn off Altivec AND -16. */
19375 else if (rclass == VSX_REGS)
19376 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
19377 & ~RELOAD_REG_AND_M16);
19379 /* If the register allocator hasn't made up its mind yet on the register
19380 class to use, settle on defaults to use. */
19381 else if (rclass == NO_REGS)
19383 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
19384 & ~RELOAD_REG_AND_M16);
19386 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
19387 addr_mask &= ~(RELOAD_REG_INDEXED
19388 | RELOAD_REG_PRE_INCDEC
19389 | RELOAD_REG_PRE_MODIFY);
19392 else
19393 addr_mask = 0;
19395 /* If the register isn't valid in this register class, just return now. */
19396 if ((addr_mask & RELOAD_REG_VALID) == 0)
19398 if (TARGET_DEBUG_ADDR)
19400 fprintf (stderr,
19401 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19402 "not valid in class\n",
19403 GET_MODE_NAME (mode), reg_class_names[rclass]);
19404 debug_rtx (addr);
19407 return -1;
19410 switch (GET_CODE (addr))
19412 /* Does the register class supports auto update forms for this mode? We
19413 don't need a scratch register, since the powerpc only supports
19414 PRE_INC, PRE_DEC, and PRE_MODIFY. */
19415 case PRE_INC:
19416 case PRE_DEC:
19417 reg = XEXP (addr, 0);
19418 if (!base_reg_operand (addr, GET_MODE (reg)))
19420 fail_msg = "no base register #1";
19421 extra_cost = -1;
19424 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19426 extra_cost = 1;
19427 type = "update";
19429 break;
19431 case PRE_MODIFY:
19432 reg = XEXP (addr, 0);
19433 plus_arg1 = XEXP (addr, 1);
19434 if (!base_reg_operand (reg, GET_MODE (reg))
19435 || GET_CODE (plus_arg1) != PLUS
19436 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
19438 fail_msg = "bad PRE_MODIFY";
19439 extra_cost = -1;
19442 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19444 extra_cost = 1;
19445 type = "update";
19447 break;
19449 /* Do we need to simulate AND -16 to clear the bottom address bits used
19450 in VMX load/stores? Only allow the AND for vector sizes. */
19451 case AND:
19452 and_arg = XEXP (addr, 0);
19453 if (GET_MODE_SIZE (mode) != 16
19454 || GET_CODE (XEXP (addr, 1)) != CONST_INT
19455 || INTVAL (XEXP (addr, 1)) != -16)
19457 fail_msg = "bad Altivec AND #1";
19458 extra_cost = -1;
19461 if (rclass != ALTIVEC_REGS)
19463 if (legitimate_indirect_address_p (and_arg, false))
19464 extra_cost = 1;
19466 else if (legitimate_indexed_address_p (and_arg, false))
19467 extra_cost = 2;
19469 else
19471 fail_msg = "bad Altivec AND #2";
19472 extra_cost = -1;
19475 type = "and";
19477 break;
19479 /* If this is an indirect address, make sure it is a base register. */
19480 case REG:
19481 case SUBREG:
19482 if (!legitimate_indirect_address_p (addr, false))
19484 extra_cost = 1;
19485 type = "move";
19487 break;
19489 /* If this is an indexed address, make sure the register class can handle
19490 indexed addresses for this mode. */
19491 case PLUS:
19492 plus_arg0 = XEXP (addr, 0);
19493 plus_arg1 = XEXP (addr, 1);
19495 /* (plus (plus (reg) (constant)) (constant)) is generated during
19496 push_reload processing, so handle it now. */
19497 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
19499 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19501 extra_cost = 1;
19502 type = "offset";
19506 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19507 push_reload processing, so handle it now. */
19508 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
19510 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19512 extra_cost = 1;
19513 type = "indexed #2";
19517 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
19519 fail_msg = "no base register #2";
19520 extra_cost = -1;
19523 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
19525 if ((addr_mask & RELOAD_REG_INDEXED) == 0
19526 || !legitimate_indexed_address_p (addr, false))
19528 extra_cost = 1;
19529 type = "indexed";
19533 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
19534 && CONST_INT_P (plus_arg1))
19536 if (!quad_address_offset_p (INTVAL (plus_arg1)))
19538 extra_cost = 1;
19539 type = "vector d-form offset";
19543 /* Make sure the register class can handle offset addresses. */
19544 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19546 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19548 extra_cost = 1;
19549 type = "offset #2";
19553 else
19555 fail_msg = "bad PLUS";
19556 extra_cost = -1;
19559 break;
19561 case LO_SUM:
19562 /* Quad offsets are restricted and can't handle normal addresses. */
19563 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19565 extra_cost = -1;
19566 type = "vector d-form lo_sum";
19569 else if (!legitimate_lo_sum_address_p (mode, addr, false))
19571 fail_msg = "bad LO_SUM";
19572 extra_cost = -1;
19575 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19577 extra_cost = 1;
19578 type = "lo_sum";
19580 break;
19582 /* Static addresses need to create a TOC entry. */
19583 case CONST:
19584 case SYMBOL_REF:
19585 case LABEL_REF:
19586 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19588 extra_cost = -1;
19589 type = "vector d-form lo_sum #2";
19592 else
19594 type = "address";
19595 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
19597 break;
19599 /* TOC references look like offsetable memory. */
19600 case UNSPEC:
19601 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
19603 fail_msg = "bad UNSPEC";
19604 extra_cost = -1;
19607 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19609 extra_cost = -1;
19610 type = "vector d-form lo_sum #3";
19613 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19615 extra_cost = 1;
19616 type = "toc reference";
19618 break;
19620 default:
19622 fail_msg = "bad address";
19623 extra_cost = -1;
19627 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
19629 if (extra_cost < 0)
19630 fprintf (stderr,
19631 "rs6000_secondary_reload_memory error: mode = %s, "
19632 "class = %s, addr_mask = '%s', %s\n",
19633 GET_MODE_NAME (mode),
19634 reg_class_names[rclass],
19635 rs6000_debug_addr_mask (addr_mask, false),
19636 (fail_msg != NULL) ? fail_msg : "<bad address>");
19638 else
19639 fprintf (stderr,
19640 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19641 "addr_mask = '%s', extra cost = %d, %s\n",
19642 GET_MODE_NAME (mode),
19643 reg_class_names[rclass],
19644 rs6000_debug_addr_mask (addr_mask, false),
19645 extra_cost,
19646 (type) ? type : "<none>");
19648 debug_rtx (addr);
19651 return extra_cost;
19654 /* Helper function for rs6000_secondary_reload to return true if a move to a
19655 different register classe is really a simple move. */
19657 static bool
19658 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
19659 enum rs6000_reg_type from_type,
19660 machine_mode mode)
19662 int size = GET_MODE_SIZE (mode);
19664 /* Add support for various direct moves available. In this function, we only
19665 look at cases where we don't need any extra registers, and one or more
19666 simple move insns are issued. Originally small integers are not allowed
19667 in FPR/VSX registers. Single precision binary floating is not a simple
19668 move because we need to convert to the single precision memory layout.
19669 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19670 need special direct move handling, which we do not support yet. */
19671 if (TARGET_DIRECT_MOVE
19672 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19673 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
19675 if (TARGET_POWERPC64)
19677 /* ISA 2.07: MTVSRD or MVFVSRD. */
19678 if (size == 8)
19679 return true;
19681 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19682 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
19683 return true;
19686 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19687 if (TARGET_P8_VECTOR)
19689 if (mode == SImode)
19690 return true;
19692 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
19693 return true;
19696 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19697 if (mode == SDmode)
19698 return true;
19701 /* Power6+: MFTGPR or MFFGPR. */
19702 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19703 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19704 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19705 return true;
19707 /* Move to/from SPR. */
19708 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19709 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19710 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19711 return true;
19713 return false;
19716 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19717 special direct moves that involve allocating an extra register, return the
19718 insn code of the helper function if there is such a function or
19719 CODE_FOR_nothing if not. */
19721 static bool
19722 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19723 enum rs6000_reg_type from_type,
19724 machine_mode mode,
19725 secondary_reload_info *sri,
19726 bool altivec_p)
19728 bool ret = false;
19729 enum insn_code icode = CODE_FOR_nothing;
19730 int cost = 0;
19731 int size = GET_MODE_SIZE (mode);
19733 if (TARGET_POWERPC64 && size == 16)
19735 /* Handle moving 128-bit values from GPRs to VSX point registers on
19736 ISA 2.07 (power8, power9) when running in 64-bit mode using
19737 XXPERMDI to glue the two 64-bit values back together. */
19738 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19740 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19741 icode = reg_addr[mode].reload_vsx_gpr;
19744 /* Handle moving 128-bit values from VSX point registers to GPRs on
19745 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19746 bottom 64-bit value. */
19747 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19749 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19750 icode = reg_addr[mode].reload_gpr_vsx;
19754 else if (TARGET_POWERPC64 && mode == SFmode)
19756 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19758 cost = 3; /* xscvdpspn, mfvsrd, and. */
19759 icode = reg_addr[mode].reload_gpr_vsx;
19762 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19764 cost = 2; /* mtvsrz, xscvspdpn. */
19765 icode = reg_addr[mode].reload_vsx_gpr;
19769 else if (!TARGET_POWERPC64 && size == 8)
19771 /* Handle moving 64-bit values from GPRs to floating point registers on
19772 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19773 32-bit values back together. Altivec register classes must be handled
19774 specially since a different instruction is used, and the secondary
19775 reload support requires a single instruction class in the scratch
19776 register constraint. However, right now TFmode is not allowed in
19777 Altivec registers, so the pattern will never match. */
19778 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19780 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19781 icode = reg_addr[mode].reload_fpr_gpr;
19785 if (icode != CODE_FOR_nothing)
19787 ret = true;
19788 if (sri)
19790 sri->icode = icode;
19791 sri->extra_cost = cost;
19795 return ret;
19798 /* Return whether a move between two register classes can be done either
19799 directly (simple move) or via a pattern that uses a single extra temporary
19800 (using ISA 2.07's direct move in this case. */
19802 static bool
19803 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19804 enum rs6000_reg_type from_type,
19805 machine_mode mode,
19806 secondary_reload_info *sri,
19807 bool altivec_p)
19809 /* Fall back to load/store reloads if either type is not a register. */
19810 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19811 return false;
19813 /* If we haven't allocated registers yet, assume the move can be done for the
19814 standard register types. */
19815 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19816 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19817 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19818 return true;
19820 /* Moves to the same set of registers is a simple move for non-specialized
19821 registers. */
19822 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19823 return true;
19825 /* Check whether a simple move can be done directly. */
19826 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19828 if (sri)
19830 sri->icode = CODE_FOR_nothing;
19831 sri->extra_cost = 0;
19833 return true;
19836 /* Now check if we can do it in a few steps. */
19837 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19838 altivec_p);
19841 /* Inform reload about cases where moving X with a mode MODE to a register in
19842 RCLASS requires an extra scratch or immediate register. Return the class
19843 needed for the immediate register.
19845 For VSX and Altivec, we may need a register to convert sp+offset into
19846 reg+sp.
19848 For misaligned 64-bit gpr loads and stores we need a register to
19849 convert an offset address to indirect. */
19851 static reg_class_t
19852 rs6000_secondary_reload (bool in_p,
19853 rtx x,
19854 reg_class_t rclass_i,
19855 machine_mode mode,
19856 secondary_reload_info *sri)
19858 enum reg_class rclass = (enum reg_class) rclass_i;
19859 reg_class_t ret = ALL_REGS;
19860 enum insn_code icode;
19861 bool default_p = false;
19862 bool done_p = false;
19864 /* Allow subreg of memory before/during reload. */
19865 bool memory_p = (MEM_P (x)
19866 || (!reload_completed && GET_CODE (x) == SUBREG
19867 && MEM_P (SUBREG_REG (x))));
19869 sri->icode = CODE_FOR_nothing;
19870 sri->t_icode = CODE_FOR_nothing;
19871 sri->extra_cost = 0;
19872 icode = ((in_p)
19873 ? reg_addr[mode].reload_load
19874 : reg_addr[mode].reload_store);
19876 if (REG_P (x) || register_operand (x, mode))
19878 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19879 bool altivec_p = (rclass == ALTIVEC_REGS);
19880 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19882 if (!in_p)
19883 std::swap (to_type, from_type);
19885 /* Can we do a direct move of some sort? */
19886 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19887 altivec_p))
19889 icode = (enum insn_code)sri->icode;
19890 default_p = false;
19891 done_p = true;
19892 ret = NO_REGS;
19896 /* Make sure 0.0 is not reloaded or forced into memory. */
19897 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19899 ret = NO_REGS;
19900 default_p = false;
19901 done_p = true;
19904 /* If this is a scalar floating point value and we want to load it into the
19905 traditional Altivec registers, do it via a move via a traditional floating
19906 point register, unless we have D-form addressing. Also make sure that
19907 non-zero constants use a FPR. */
19908 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19909 && !mode_supports_vmx_dform (mode)
19910 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19911 && (memory_p || (GET_CODE (x) == CONST_DOUBLE)))
19913 ret = FLOAT_REGS;
19914 default_p = false;
19915 done_p = true;
19918 /* Handle reload of load/stores if we have reload helper functions. */
19919 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19921 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19922 mode);
19924 if (extra_cost >= 0)
19926 done_p = true;
19927 ret = NO_REGS;
19928 if (extra_cost > 0)
19930 sri->extra_cost = extra_cost;
19931 sri->icode = icode;
19936 /* Handle unaligned loads and stores of integer registers. */
19937 if (!done_p && TARGET_POWERPC64
19938 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19939 && memory_p
19940 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19942 rtx addr = XEXP (x, 0);
19943 rtx off = address_offset (addr);
19945 if (off != NULL_RTX)
19947 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19948 unsigned HOST_WIDE_INT offset = INTVAL (off);
19950 /* We need a secondary reload when our legitimate_address_p
19951 says the address is good (as otherwise the entire address
19952 will be reloaded), and the offset is not a multiple of
19953 four or we have an address wrap. Address wrap will only
19954 occur for LO_SUMs since legitimate_offset_address_p
19955 rejects addresses for 16-byte mems that will wrap. */
19956 if (GET_CODE (addr) == LO_SUM
19957 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19958 && ((offset & 3) != 0
19959 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
19960 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
19961 && (offset & 3) != 0))
19963 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19964 if (in_p)
19965 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
19966 : CODE_FOR_reload_di_load);
19967 else
19968 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
19969 : CODE_FOR_reload_di_store);
19970 sri->extra_cost = 2;
19971 ret = NO_REGS;
19972 done_p = true;
19974 else
19975 default_p = true;
19977 else
19978 default_p = true;
19981 if (!done_p && !TARGET_POWERPC64
19982 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19983 && memory_p
19984 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
19986 rtx addr = XEXP (x, 0);
19987 rtx off = address_offset (addr);
19989 if (off != NULL_RTX)
19991 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19992 unsigned HOST_WIDE_INT offset = INTVAL (off);
19994 /* We need a secondary reload when our legitimate_address_p
19995 says the address is good (as otherwise the entire address
19996 will be reloaded), and we have a wrap.
19998 legitimate_lo_sum_address_p allows LO_SUM addresses to
19999 have any offset so test for wrap in the low 16 bits.
20001 legitimate_offset_address_p checks for the range
20002 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
20003 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
20004 [0x7ff4,0x7fff] respectively, so test for the
20005 intersection of these ranges, [0x7ffc,0x7fff] and
20006 [0x7ff4,0x7ff7] respectively.
20008 Note that the address we see here may have been
20009 manipulated by legitimize_reload_address. */
20010 if (GET_CODE (addr) == LO_SUM
20011 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
20012 : offset - (0x8000 - extra) < UNITS_PER_WORD)
20014 if (in_p)
20015 sri->icode = CODE_FOR_reload_si_load;
20016 else
20017 sri->icode = CODE_FOR_reload_si_store;
20018 sri->extra_cost = 2;
20019 ret = NO_REGS;
20020 done_p = true;
20022 else
20023 default_p = true;
20025 else
20026 default_p = true;
20029 if (!done_p)
20030 default_p = true;
20032 if (default_p)
20033 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
20035 gcc_assert (ret != ALL_REGS);
20037 if (TARGET_DEBUG_ADDR)
20039 fprintf (stderr,
20040 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
20041 "mode = %s",
20042 reg_class_names[ret],
20043 in_p ? "true" : "false",
20044 reg_class_names[rclass],
20045 GET_MODE_NAME (mode));
20047 if (reload_completed)
20048 fputs (", after reload", stderr);
20050 if (!done_p)
20051 fputs (", done_p not set", stderr);
20053 if (default_p)
20054 fputs (", default secondary reload", stderr);
20056 if (sri->icode != CODE_FOR_nothing)
20057 fprintf (stderr, ", reload func = %s, extra cost = %d",
20058 insn_data[sri->icode].name, sri->extra_cost);
20060 else if (sri->extra_cost > 0)
20061 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
20063 fputs ("\n", stderr);
20064 debug_rtx (x);
20067 return ret;
20070 /* Better tracing for rs6000_secondary_reload_inner. */
20072 static void
20073 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
20074 bool store_p)
20076 rtx set, clobber;
20078 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
20080 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
20081 store_p ? "store" : "load");
20083 if (store_p)
20084 set = gen_rtx_SET (mem, reg);
20085 else
20086 set = gen_rtx_SET (reg, mem);
20088 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
20089 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
20092 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
20093 ATTRIBUTE_NORETURN;
20095 static void
20096 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
20097 bool store_p)
20099 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
20100 gcc_unreachable ();
20103 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
20104 reload helper functions. These were identified in
20105 rs6000_secondary_reload_memory, and if reload decided to use the secondary
20106 reload, it calls the insns:
20107 reload_<RELOAD:mode>_<P:mptrsize>_store
20108 reload_<RELOAD:mode>_<P:mptrsize>_load
20110 which in turn calls this function, to do whatever is necessary to create
20111 valid addresses. */
20113 void
20114 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
20116 int regno = true_regnum (reg);
20117 machine_mode mode = GET_MODE (reg);
20118 addr_mask_type addr_mask;
20119 rtx addr;
20120 rtx new_addr;
20121 rtx op_reg, op0, op1;
20122 rtx and_op;
20123 rtx cc_clobber;
20124 rtvec rv;
20126 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER || !MEM_P (mem)
20127 || !base_reg_operand (scratch, GET_MODE (scratch)))
20128 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20130 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
20131 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
20133 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
20134 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
20136 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
20137 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
20139 else
20140 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20142 /* Make sure the mode is valid in this register class. */
20143 if ((addr_mask & RELOAD_REG_VALID) == 0)
20144 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20146 if (TARGET_DEBUG_ADDR)
20147 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
20149 new_addr = addr = XEXP (mem, 0);
20150 switch (GET_CODE (addr))
20152 /* Does the register class support auto update forms for this mode? If
20153 not, do the update now. We don't need a scratch register, since the
20154 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
20155 case PRE_INC:
20156 case PRE_DEC:
20157 op_reg = XEXP (addr, 0);
20158 if (!base_reg_operand (op_reg, Pmode))
20159 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20161 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
20163 emit_insn (gen_add2_insn (op_reg, GEN_INT (GET_MODE_SIZE (mode))));
20164 new_addr = op_reg;
20166 break;
20168 case PRE_MODIFY:
20169 op0 = XEXP (addr, 0);
20170 op1 = XEXP (addr, 1);
20171 if (!base_reg_operand (op0, Pmode)
20172 || GET_CODE (op1) != PLUS
20173 || !rtx_equal_p (op0, XEXP (op1, 0)))
20174 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20176 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
20178 emit_insn (gen_rtx_SET (op0, op1));
20179 new_addr = reg;
20181 break;
20183 /* Do we need to simulate AND -16 to clear the bottom address bits used
20184 in VMX load/stores? */
20185 case AND:
20186 op0 = XEXP (addr, 0);
20187 op1 = XEXP (addr, 1);
20188 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
20190 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
20191 op_reg = op0;
20193 else if (GET_CODE (op1) == PLUS)
20195 emit_insn (gen_rtx_SET (scratch, op1));
20196 op_reg = scratch;
20199 else
20200 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20202 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
20203 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
20204 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
20205 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
20206 new_addr = scratch;
20208 break;
20210 /* If this is an indirect address, make sure it is a base register. */
20211 case REG:
20212 case SUBREG:
20213 if (!base_reg_operand (addr, GET_MODE (addr)))
20215 emit_insn (gen_rtx_SET (scratch, addr));
20216 new_addr = scratch;
20218 break;
20220 /* If this is an indexed address, make sure the register class can handle
20221 indexed addresses for this mode. */
20222 case PLUS:
20223 op0 = XEXP (addr, 0);
20224 op1 = XEXP (addr, 1);
20225 if (!base_reg_operand (op0, Pmode))
20226 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20228 else if (int_reg_operand (op1, Pmode))
20230 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
20232 emit_insn (gen_rtx_SET (scratch, addr));
20233 new_addr = scratch;
20237 else if (mode_supports_vsx_dform_quad (mode) && CONST_INT_P (op1))
20239 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
20240 || !quad_address_p (addr, mode, false))
20242 emit_insn (gen_rtx_SET (scratch, addr));
20243 new_addr = scratch;
20247 /* Make sure the register class can handle offset addresses. */
20248 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
20250 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
20252 emit_insn (gen_rtx_SET (scratch, addr));
20253 new_addr = scratch;
20257 else
20258 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20260 break;
20262 case LO_SUM:
20263 op0 = XEXP (addr, 0);
20264 op1 = XEXP (addr, 1);
20265 if (!base_reg_operand (op0, Pmode))
20266 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20268 else if (int_reg_operand (op1, Pmode))
20270 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
20272 emit_insn (gen_rtx_SET (scratch, addr));
20273 new_addr = scratch;
20277 /* Quad offsets are restricted and can't handle normal addresses. */
20278 else if (mode_supports_vsx_dform_quad (mode))
20280 emit_insn (gen_rtx_SET (scratch, addr));
20281 new_addr = scratch;
20284 /* Make sure the register class can handle offset addresses. */
20285 else if (legitimate_lo_sum_address_p (mode, addr, false))
20287 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
20289 emit_insn (gen_rtx_SET (scratch, addr));
20290 new_addr = scratch;
20294 else
20295 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20297 break;
20299 case SYMBOL_REF:
20300 case CONST:
20301 case LABEL_REF:
20302 rs6000_emit_move (scratch, addr, Pmode);
20303 new_addr = scratch;
20304 break;
20306 default:
20307 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20310 /* Adjust the address if it changed. */
20311 if (addr != new_addr)
20313 mem = replace_equiv_address_nv (mem, new_addr);
20314 if (TARGET_DEBUG_ADDR)
20315 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
20318 /* Now create the move. */
20319 if (store_p)
20320 emit_insn (gen_rtx_SET (mem, reg));
20321 else
20322 emit_insn (gen_rtx_SET (reg, mem));
20324 return;
20327 /* Convert reloads involving 64-bit gprs and misaligned offset
20328 addressing, or multiple 32-bit gprs and offsets that are too large,
20329 to use indirect addressing. */
20331 void
20332 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
20334 int regno = true_regnum (reg);
20335 enum reg_class rclass;
20336 rtx addr;
20337 rtx scratch_or_premodify = scratch;
20339 if (TARGET_DEBUG_ADDR)
20341 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
20342 store_p ? "store" : "load");
20343 fprintf (stderr, "reg:\n");
20344 debug_rtx (reg);
20345 fprintf (stderr, "mem:\n");
20346 debug_rtx (mem);
20347 fprintf (stderr, "scratch:\n");
20348 debug_rtx (scratch);
20351 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
20352 gcc_assert (GET_CODE (mem) == MEM);
20353 rclass = REGNO_REG_CLASS (regno);
20354 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
20355 addr = XEXP (mem, 0);
20357 if (GET_CODE (addr) == PRE_MODIFY)
20359 gcc_assert (REG_P (XEXP (addr, 0))
20360 && GET_CODE (XEXP (addr, 1)) == PLUS
20361 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
20362 scratch_or_premodify = XEXP (addr, 0);
20363 if (!HARD_REGISTER_P (scratch_or_premodify))
20364 /* If we have a pseudo here then reload will have arranged
20365 to have it replaced, but only in the original insn.
20366 Use the replacement here too. */
20367 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
20369 /* RTL emitted by rs6000_secondary_reload_gpr uses RTL
20370 expressions from the original insn, without unsharing them.
20371 Any RTL that points into the original insn will of course
20372 have register replacements applied. That is why we don't
20373 need to look for replacements under the PLUS. */
20374 addr = XEXP (addr, 1);
20376 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
20378 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
20380 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
20382 /* Now create the move. */
20383 if (store_p)
20384 emit_insn (gen_rtx_SET (mem, reg));
20385 else
20386 emit_insn (gen_rtx_SET (reg, mem));
20388 return;
20391 /* Given an rtx X being reloaded into a reg required to be
20392 in class CLASS, return the class of reg to actually use.
20393 In general this is just CLASS; but on some machines
20394 in some cases it is preferable to use a more restrictive class.
20396 On the RS/6000, we have to return NO_REGS when we want to reload a
20397 floating-point CONST_DOUBLE to force it to be copied to memory.
20399 We also don't want to reload integer values into floating-point
20400 registers if we can at all help it. In fact, this can
20401 cause reload to die, if it tries to generate a reload of CTR
20402 into a FP register and discovers it doesn't have the memory location
20403 required.
20405 ??? Would it be a good idea to have reload do the converse, that is
20406 try to reload floating modes into FP registers if possible?
20409 static enum reg_class
20410 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
20412 machine_mode mode = GET_MODE (x);
20413 bool is_constant = CONSTANT_P (x);
20415 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
20416 reload class for it. */
20417 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20418 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
20419 return NO_REGS;
20421 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
20422 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
20423 return NO_REGS;
20425 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
20426 the reloading of address expressions using PLUS into floating point
20427 registers. */
20428 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
20430 if (is_constant)
20432 /* Zero is always allowed in all VSX registers. */
20433 if (x == CONST0_RTX (mode))
20434 return rclass;
20436 /* If this is a vector constant that can be formed with a few Altivec
20437 instructions, we want altivec registers. */
20438 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
20439 return ALTIVEC_REGS;
20441 /* If this is an integer constant that can easily be loaded into
20442 vector registers, allow it. */
20443 if (CONST_INT_P (x))
20445 HOST_WIDE_INT value = INTVAL (x);
20447 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
20448 2.06 can generate it in the Altivec registers with
20449 VSPLTI<x>. */
20450 if (value == -1)
20452 if (TARGET_P8_VECTOR)
20453 return rclass;
20454 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20455 return ALTIVEC_REGS;
20456 else
20457 return NO_REGS;
20460 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
20461 a sign extend in the Altivec registers. */
20462 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
20463 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
20464 return ALTIVEC_REGS;
20467 /* Force constant to memory. */
20468 return NO_REGS;
20471 /* D-form addressing can easily reload the value. */
20472 if (mode_supports_vmx_dform (mode)
20473 || mode_supports_vsx_dform_quad (mode))
20474 return rclass;
20476 /* If this is a scalar floating point value and we don't have D-form
20477 addressing, prefer the traditional floating point registers so that we
20478 can use D-form (register+offset) addressing. */
20479 if (rclass == VSX_REGS
20480 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
20481 return FLOAT_REGS;
20483 /* Prefer the Altivec registers if Altivec is handling the vector
20484 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20485 loads. */
20486 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
20487 || mode == V1TImode)
20488 return ALTIVEC_REGS;
20490 return rclass;
20493 if (is_constant || GET_CODE (x) == PLUS)
20495 if (reg_class_subset_p (GENERAL_REGS, rclass))
20496 return GENERAL_REGS;
20497 if (reg_class_subset_p (BASE_REGS, rclass))
20498 return BASE_REGS;
20499 return NO_REGS;
20502 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
20503 return GENERAL_REGS;
20505 return rclass;
20508 /* Debug version of rs6000_preferred_reload_class. */
20509 static enum reg_class
20510 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
20512 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
20514 fprintf (stderr,
20515 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20516 "mode = %s, x:\n",
20517 reg_class_names[ret], reg_class_names[rclass],
20518 GET_MODE_NAME (GET_MODE (x)));
20519 debug_rtx (x);
20521 return ret;
20524 /* If we are copying between FP or AltiVec registers and anything else, we need
20525 a memory location. The exception is when we are targeting ppc64 and the
20526 move to/from fpr to gpr instructions are available. Also, under VSX, you
20527 can copy vector registers from the FP register set to the Altivec register
20528 set and vice versa. */
20530 static bool
20531 rs6000_secondary_memory_needed (machine_mode mode,
20532 reg_class_t from_class,
20533 reg_class_t to_class)
20535 enum rs6000_reg_type from_type, to_type;
20536 bool altivec_p = ((from_class == ALTIVEC_REGS)
20537 || (to_class == ALTIVEC_REGS));
20539 /* If a simple/direct move is available, we don't need secondary memory */
20540 from_type = reg_class_to_reg_type[(int)from_class];
20541 to_type = reg_class_to_reg_type[(int)to_class];
20543 if (rs6000_secondary_reload_move (to_type, from_type, mode,
20544 (secondary_reload_info *)0, altivec_p))
20545 return false;
20547 /* If we have a floating point or vector register class, we need to use
20548 memory to transfer the data. */
20549 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
20550 return true;
20552 return false;
20555 /* Debug version of rs6000_secondary_memory_needed. */
20556 static bool
20557 rs6000_debug_secondary_memory_needed (machine_mode mode,
20558 reg_class_t from_class,
20559 reg_class_t to_class)
20561 bool ret = rs6000_secondary_memory_needed (mode, from_class, to_class);
20563 fprintf (stderr,
20564 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20565 "to_class = %s, mode = %s\n",
20566 ret ? "true" : "false",
20567 reg_class_names[from_class],
20568 reg_class_names[to_class],
20569 GET_MODE_NAME (mode));
20571 return ret;
20574 /* Return the register class of a scratch register needed to copy IN into
20575 or out of a register in RCLASS in MODE. If it can be done directly,
20576 NO_REGS is returned. */
20578 static enum reg_class
20579 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
20580 rtx in)
20582 int regno;
20584 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
20585 #if TARGET_MACHO
20586 && MACHOPIC_INDIRECT
20587 #endif
20590 /* We cannot copy a symbolic operand directly into anything
20591 other than BASE_REGS for TARGET_ELF. So indicate that a
20592 register from BASE_REGS is needed as an intermediate
20593 register.
20595 On Darwin, pic addresses require a load from memory, which
20596 needs a base register. */
20597 if (rclass != BASE_REGS
20598 && (GET_CODE (in) == SYMBOL_REF
20599 || GET_CODE (in) == HIGH
20600 || GET_CODE (in) == LABEL_REF
20601 || GET_CODE (in) == CONST))
20602 return BASE_REGS;
20605 if (GET_CODE (in) == REG)
20607 regno = REGNO (in);
20608 if (regno >= FIRST_PSEUDO_REGISTER)
20610 regno = true_regnum (in);
20611 if (regno >= FIRST_PSEUDO_REGISTER)
20612 regno = -1;
20615 else if (GET_CODE (in) == SUBREG)
20617 regno = true_regnum (in);
20618 if (regno >= FIRST_PSEUDO_REGISTER)
20619 regno = -1;
20621 else
20622 regno = -1;
20624 /* If we have VSX register moves, prefer moving scalar values between
20625 Altivec registers and GPR by going via an FPR (and then via memory)
20626 instead of reloading the secondary memory address for Altivec moves. */
20627 if (TARGET_VSX
20628 && GET_MODE_SIZE (mode) < 16
20629 && !mode_supports_vmx_dform (mode)
20630 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
20631 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
20632 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20633 && (regno >= 0 && INT_REGNO_P (regno)))))
20634 return FLOAT_REGS;
20636 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20637 into anything. */
20638 if (rclass == GENERAL_REGS || rclass == BASE_REGS
20639 || (regno >= 0 && INT_REGNO_P (regno)))
20640 return NO_REGS;
20642 /* Constants, memory, and VSX registers can go into VSX registers (both the
20643 traditional floating point and the altivec registers). */
20644 if (rclass == VSX_REGS
20645 && (regno == -1 || VSX_REGNO_P (regno)))
20646 return NO_REGS;
20648 /* Constants, memory, and FP registers can go into FP registers. */
20649 if ((regno == -1 || FP_REGNO_P (regno))
20650 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
20651 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
20653 /* Memory, and AltiVec registers can go into AltiVec registers. */
20654 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
20655 && rclass == ALTIVEC_REGS)
20656 return NO_REGS;
20658 /* We can copy among the CR registers. */
20659 if ((rclass == CR_REGS || rclass == CR0_REGS)
20660 && regno >= 0 && CR_REGNO_P (regno))
20661 return NO_REGS;
20663 /* Otherwise, we need GENERAL_REGS. */
20664 return GENERAL_REGS;
20667 /* Debug version of rs6000_secondary_reload_class. */
20668 static enum reg_class
20669 rs6000_debug_secondary_reload_class (enum reg_class rclass,
20670 machine_mode mode, rtx in)
20672 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
20673 fprintf (stderr,
20674 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20675 "mode = %s, input rtx:\n",
20676 reg_class_names[ret], reg_class_names[rclass],
20677 GET_MODE_NAME (mode));
20678 debug_rtx (in);
20680 return ret;
20683 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
20685 static bool
20686 rs6000_can_change_mode_class (machine_mode from,
20687 machine_mode to,
20688 reg_class_t rclass)
20690 unsigned from_size = GET_MODE_SIZE (from);
20691 unsigned to_size = GET_MODE_SIZE (to);
20693 if (from_size != to_size)
20695 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20697 if (reg_classes_intersect_p (xclass, rclass))
20699 unsigned to_nregs = hard_regno_nregs (FIRST_FPR_REGNO, to);
20700 unsigned from_nregs = hard_regno_nregs (FIRST_FPR_REGNO, from);
20701 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20702 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20704 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20705 single register under VSX because the scalar part of the register
20706 is in the upper 64-bits, and not the lower 64-bits. Types like
20707 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20708 IEEE floating point can't overlap, and neither can small
20709 values. */
20711 if (to_float128_vector_p && from_float128_vector_p)
20712 return true;
20714 else if (to_float128_vector_p || from_float128_vector_p)
20715 return false;
20717 /* TDmode in floating-mode registers must always go into a register
20718 pair with the most significant word in the even-numbered register
20719 to match ISA requirements. In little-endian mode, this does not
20720 match subreg numbering, so we cannot allow subregs. */
20721 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20722 return false;
20724 if (from_size < 8 || to_size < 8)
20725 return false;
20727 if (from_size == 8 && (8 * to_nregs) != to_size)
20728 return false;
20730 if (to_size == 8 && (8 * from_nregs) != from_size)
20731 return false;
20733 return true;
20735 else
20736 return true;
20739 /* Since the VSX register set includes traditional floating point registers
20740 and altivec registers, just check for the size being different instead of
20741 trying to check whether the modes are vector modes. Otherwise it won't
20742 allow say DF and DI to change classes. For types like TFmode and TDmode
20743 that take 2 64-bit registers, rather than a single 128-bit register, don't
20744 allow subregs of those types to other 128 bit types. */
20745 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20747 unsigned num_regs = (from_size + 15) / 16;
20748 if (hard_regno_nregs (FIRST_FPR_REGNO, to) > num_regs
20749 || hard_regno_nregs (FIRST_FPR_REGNO, from) > num_regs)
20750 return false;
20752 return (from_size == 8 || from_size == 16);
20755 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20756 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20757 return false;
20759 return true;
20762 /* Debug version of rs6000_can_change_mode_class. */
20763 static bool
20764 rs6000_debug_can_change_mode_class (machine_mode from,
20765 machine_mode to,
20766 reg_class_t rclass)
20768 bool ret = rs6000_can_change_mode_class (from, to, rclass);
20770 fprintf (stderr,
20771 "rs6000_can_change_mode_class, return %s, from = %s, "
20772 "to = %s, rclass = %s\n",
20773 ret ? "true" : "false",
20774 GET_MODE_NAME (from), GET_MODE_NAME (to),
20775 reg_class_names[rclass]);
20777 return ret;
20780 /* Return a string to do a move operation of 128 bits of data. */
20782 const char *
20783 rs6000_output_move_128bit (rtx operands[])
20785 rtx dest = operands[0];
20786 rtx src = operands[1];
20787 machine_mode mode = GET_MODE (dest);
20788 int dest_regno;
20789 int src_regno;
20790 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20791 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20793 if (REG_P (dest))
20795 dest_regno = REGNO (dest);
20796 dest_gpr_p = INT_REGNO_P (dest_regno);
20797 dest_fp_p = FP_REGNO_P (dest_regno);
20798 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20799 dest_vsx_p = dest_fp_p | dest_vmx_p;
20801 else
20803 dest_regno = -1;
20804 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20807 if (REG_P (src))
20809 src_regno = REGNO (src);
20810 src_gpr_p = INT_REGNO_P (src_regno);
20811 src_fp_p = FP_REGNO_P (src_regno);
20812 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20813 src_vsx_p = src_fp_p | src_vmx_p;
20815 else
20817 src_regno = -1;
20818 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20821 /* Register moves. */
20822 if (dest_regno >= 0 && src_regno >= 0)
20824 if (dest_gpr_p)
20826 if (src_gpr_p)
20827 return "#";
20829 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20830 return (WORDS_BIG_ENDIAN
20831 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20832 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20834 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20835 return "#";
20838 else if (TARGET_VSX && dest_vsx_p)
20840 if (src_vsx_p)
20841 return "xxlor %x0,%x1,%x1";
20843 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20844 return (WORDS_BIG_ENDIAN
20845 ? "mtvsrdd %x0,%1,%L1"
20846 : "mtvsrdd %x0,%L1,%1");
20848 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20849 return "#";
20852 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20853 return "vor %0,%1,%1";
20855 else if (dest_fp_p && src_fp_p)
20856 return "#";
20859 /* Loads. */
20860 else if (dest_regno >= 0 && MEM_P (src))
20862 if (dest_gpr_p)
20864 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20865 return "lq %0,%1";
20866 else
20867 return "#";
20870 else if (TARGET_ALTIVEC && dest_vmx_p
20871 && altivec_indexed_or_indirect_operand (src, mode))
20872 return "lvx %0,%y1";
20874 else if (TARGET_VSX && dest_vsx_p)
20876 if (mode_supports_vsx_dform_quad (mode)
20877 && quad_address_p (XEXP (src, 0), mode, true))
20878 return "lxv %x0,%1";
20880 else if (TARGET_P9_VECTOR)
20881 return "lxvx %x0,%y1";
20883 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20884 return "lxvw4x %x0,%y1";
20886 else
20887 return "lxvd2x %x0,%y1";
20890 else if (TARGET_ALTIVEC && dest_vmx_p)
20891 return "lvx %0,%y1";
20893 else if (dest_fp_p)
20894 return "#";
20897 /* Stores. */
20898 else if (src_regno >= 0 && MEM_P (dest))
20900 if (src_gpr_p)
20902 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20903 return "stq %1,%0";
20904 else
20905 return "#";
20908 else if (TARGET_ALTIVEC && src_vmx_p
20909 && altivec_indexed_or_indirect_operand (src, mode))
20910 return "stvx %1,%y0";
20912 else if (TARGET_VSX && src_vsx_p)
20914 if (mode_supports_vsx_dform_quad (mode)
20915 && quad_address_p (XEXP (dest, 0), mode, true))
20916 return "stxv %x1,%0";
20918 else if (TARGET_P9_VECTOR)
20919 return "stxvx %x1,%y0";
20921 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20922 return "stxvw4x %x1,%y0";
20924 else
20925 return "stxvd2x %x1,%y0";
20928 else if (TARGET_ALTIVEC && src_vmx_p)
20929 return "stvx %1,%y0";
20931 else if (src_fp_p)
20932 return "#";
20935 /* Constants. */
20936 else if (dest_regno >= 0
20937 && (GET_CODE (src) == CONST_INT
20938 || GET_CODE (src) == CONST_WIDE_INT
20939 || GET_CODE (src) == CONST_DOUBLE
20940 || GET_CODE (src) == CONST_VECTOR))
20942 if (dest_gpr_p)
20943 return "#";
20945 else if ((dest_vmx_p && TARGET_ALTIVEC)
20946 || (dest_vsx_p && TARGET_VSX))
20947 return output_vec_const_move (operands);
20950 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20953 /* Validate a 128-bit move. */
20954 bool
20955 rs6000_move_128bit_ok_p (rtx operands[])
20957 machine_mode mode = GET_MODE (operands[0]);
20958 return (gpc_reg_operand (operands[0], mode)
20959 || gpc_reg_operand (operands[1], mode));
20962 /* Return true if a 128-bit move needs to be split. */
20963 bool
20964 rs6000_split_128bit_ok_p (rtx operands[])
20966 if (!reload_completed)
20967 return false;
20969 if (!gpr_or_gpr_p (operands[0], operands[1]))
20970 return false;
20972 if (quad_load_store_p (operands[0], operands[1]))
20973 return false;
20975 return true;
20979 /* Given a comparison operation, return the bit number in CCR to test. We
20980 know this is a valid comparison.
20982 SCC_P is 1 if this is for an scc. That means that %D will have been
20983 used instead of %C, so the bits will be in different places.
20985 Return -1 if OP isn't a valid comparison for some reason. */
20988 ccr_bit (rtx op, int scc_p)
20990 enum rtx_code code = GET_CODE (op);
20991 machine_mode cc_mode;
20992 int cc_regnum;
20993 int base_bit;
20994 rtx reg;
20996 if (!COMPARISON_P (op))
20997 return -1;
20999 reg = XEXP (op, 0);
21001 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
21003 cc_mode = GET_MODE (reg);
21004 cc_regnum = REGNO (reg);
21005 base_bit = 4 * (cc_regnum - CR0_REGNO);
21007 validate_condition_mode (code, cc_mode);
21009 /* When generating a sCOND operation, only positive conditions are
21010 allowed. */
21011 gcc_assert (!scc_p
21012 || code == EQ || code == GT || code == LT || code == UNORDERED
21013 || code == GTU || code == LTU);
21015 switch (code)
21017 case NE:
21018 return scc_p ? base_bit + 3 : base_bit + 2;
21019 case EQ:
21020 return base_bit + 2;
21021 case GT: case GTU: case UNLE:
21022 return base_bit + 1;
21023 case LT: case LTU: case UNGE:
21024 return base_bit;
21025 case ORDERED: case UNORDERED:
21026 return base_bit + 3;
21028 case GE: case GEU:
21029 /* If scc, we will have done a cror to put the bit in the
21030 unordered position. So test that bit. For integer, this is ! LT
21031 unless this is an scc insn. */
21032 return scc_p ? base_bit + 3 : base_bit;
21034 case LE: case LEU:
21035 return scc_p ? base_bit + 3 : base_bit + 1;
21037 default:
21038 gcc_unreachable ();
21042 /* Return the GOT register. */
21045 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
21047 /* The second flow pass currently (June 1999) can't update
21048 regs_ever_live without disturbing other parts of the compiler, so
21049 update it here to make the prolog/epilogue code happy. */
21050 if (!can_create_pseudo_p ()
21051 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
21052 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
21054 crtl->uses_pic_offset_table = 1;
21056 return pic_offset_table_rtx;
21059 static rs6000_stack_t stack_info;
21061 /* Function to init struct machine_function.
21062 This will be called, via a pointer variable,
21063 from push_function_context. */
21065 static struct machine_function *
21066 rs6000_init_machine_status (void)
21068 stack_info.reload_completed = 0;
21069 return ggc_cleared_alloc<machine_function> ();
21072 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
21074 /* Write out a function code label. */
21076 void
21077 rs6000_output_function_entry (FILE *file, const char *fname)
21079 if (fname[0] != '.')
21081 switch (DEFAULT_ABI)
21083 default:
21084 gcc_unreachable ();
21086 case ABI_AIX:
21087 if (DOT_SYMBOLS)
21088 putc ('.', file);
21089 else
21090 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
21091 break;
21093 case ABI_ELFv2:
21094 case ABI_V4:
21095 case ABI_DARWIN:
21096 break;
21100 RS6000_OUTPUT_BASENAME (file, fname);
21103 /* Print an operand. Recognize special options, documented below. */
21105 #if TARGET_ELF
21106 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
21107 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
21108 #else
21109 #define SMALL_DATA_RELOC "sda21"
21110 #define SMALL_DATA_REG 0
21111 #endif
21113 void
21114 print_operand (FILE *file, rtx x, int code)
21116 int i;
21117 unsigned HOST_WIDE_INT uval;
21119 switch (code)
21121 /* %a is output_address. */
21123 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
21124 output_operand. */
21126 case 'D':
21127 /* Like 'J' but get to the GT bit only. */
21128 gcc_assert (REG_P (x));
21130 /* Bit 1 is GT bit. */
21131 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
21133 /* Add one for shift count in rlinm for scc. */
21134 fprintf (file, "%d", i + 1);
21135 return;
21137 case 'e':
21138 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
21139 if (! INT_P (x))
21141 output_operand_lossage ("invalid %%e value");
21142 return;
21145 uval = INTVAL (x);
21146 if ((uval & 0xffff) == 0 && uval != 0)
21147 putc ('s', file);
21148 return;
21150 case 'E':
21151 /* X is a CR register. Print the number of the EQ bit of the CR */
21152 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21153 output_operand_lossage ("invalid %%E value");
21154 else
21155 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
21156 return;
21158 case 'f':
21159 /* X is a CR register. Print the shift count needed to move it
21160 to the high-order four bits. */
21161 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21162 output_operand_lossage ("invalid %%f value");
21163 else
21164 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
21165 return;
21167 case 'F':
21168 /* Similar, but print the count for the rotate in the opposite
21169 direction. */
21170 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21171 output_operand_lossage ("invalid %%F value");
21172 else
21173 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
21174 return;
21176 case 'G':
21177 /* X is a constant integer. If it is negative, print "m",
21178 otherwise print "z". This is to make an aze or ame insn. */
21179 if (GET_CODE (x) != CONST_INT)
21180 output_operand_lossage ("invalid %%G value");
21181 else if (INTVAL (x) >= 0)
21182 putc ('z', file);
21183 else
21184 putc ('m', file);
21185 return;
21187 case 'h':
21188 /* If constant, output low-order five bits. Otherwise, write
21189 normally. */
21190 if (INT_P (x))
21191 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
21192 else
21193 print_operand (file, x, 0);
21194 return;
21196 case 'H':
21197 /* If constant, output low-order six bits. Otherwise, write
21198 normally. */
21199 if (INT_P (x))
21200 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
21201 else
21202 print_operand (file, x, 0);
21203 return;
21205 case 'I':
21206 /* Print `i' if this is a constant, else nothing. */
21207 if (INT_P (x))
21208 putc ('i', file);
21209 return;
21211 case 'j':
21212 /* Write the bit number in CCR for jump. */
21213 i = ccr_bit (x, 0);
21214 if (i == -1)
21215 output_operand_lossage ("invalid %%j code");
21216 else
21217 fprintf (file, "%d", i);
21218 return;
21220 case 'J':
21221 /* Similar, but add one for shift count in rlinm for scc and pass
21222 scc flag to `ccr_bit'. */
21223 i = ccr_bit (x, 1);
21224 if (i == -1)
21225 output_operand_lossage ("invalid %%J code");
21226 else
21227 /* If we want bit 31, write a shift count of zero, not 32. */
21228 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21229 return;
21231 case 'k':
21232 /* X must be a constant. Write the 1's complement of the
21233 constant. */
21234 if (! INT_P (x))
21235 output_operand_lossage ("invalid %%k value");
21236 else
21237 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
21238 return;
21240 case 'K':
21241 /* X must be a symbolic constant on ELF. Write an
21242 expression suitable for an 'addi' that adds in the low 16
21243 bits of the MEM. */
21244 if (GET_CODE (x) == CONST)
21246 if (GET_CODE (XEXP (x, 0)) != PLUS
21247 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
21248 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
21249 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
21250 output_operand_lossage ("invalid %%K value");
21252 print_operand_address (file, x);
21253 fputs ("@l", file);
21254 return;
21256 /* %l is output_asm_label. */
21258 case 'L':
21259 /* Write second word of DImode or DFmode reference. Works on register
21260 or non-indexed memory only. */
21261 if (REG_P (x))
21262 fputs (reg_names[REGNO (x) + 1], file);
21263 else if (MEM_P (x))
21265 machine_mode mode = GET_MODE (x);
21266 /* Handle possible auto-increment. Since it is pre-increment and
21267 we have already done it, we can just use an offset of word. */
21268 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21269 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21270 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
21271 UNITS_PER_WORD));
21272 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21273 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
21274 UNITS_PER_WORD));
21275 else
21276 output_address (mode, XEXP (adjust_address_nv (x, SImode,
21277 UNITS_PER_WORD),
21278 0));
21280 if (small_data_operand (x, GET_MODE (x)))
21281 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21282 reg_names[SMALL_DATA_REG]);
21284 return;
21286 case 'N':
21287 /* Write the number of elements in the vector times 4. */
21288 if (GET_CODE (x) != PARALLEL)
21289 output_operand_lossage ("invalid %%N value");
21290 else
21291 fprintf (file, "%d", XVECLEN (x, 0) * 4);
21292 return;
21294 case 'O':
21295 /* Similar, but subtract 1 first. */
21296 if (GET_CODE (x) != PARALLEL)
21297 output_operand_lossage ("invalid %%O value");
21298 else
21299 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
21300 return;
21302 case 'p':
21303 /* X is a CONST_INT that is a power of two. Output the logarithm. */
21304 if (! INT_P (x)
21305 || INTVAL (x) < 0
21306 || (i = exact_log2 (INTVAL (x))) < 0)
21307 output_operand_lossage ("invalid %%p value");
21308 else
21309 fprintf (file, "%d", i);
21310 return;
21312 case 'P':
21313 /* The operand must be an indirect memory reference. The result
21314 is the register name. */
21315 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
21316 || REGNO (XEXP (x, 0)) >= 32)
21317 output_operand_lossage ("invalid %%P value");
21318 else
21319 fputs (reg_names[REGNO (XEXP (x, 0))], file);
21320 return;
21322 case 'q':
21323 /* This outputs the logical code corresponding to a boolean
21324 expression. The expression may have one or both operands
21325 negated (if one, only the first one). For condition register
21326 logical operations, it will also treat the negated
21327 CR codes as NOTs, but not handle NOTs of them. */
21329 const char *const *t = 0;
21330 const char *s;
21331 enum rtx_code code = GET_CODE (x);
21332 static const char * const tbl[3][3] = {
21333 { "and", "andc", "nor" },
21334 { "or", "orc", "nand" },
21335 { "xor", "eqv", "xor" } };
21337 if (code == AND)
21338 t = tbl[0];
21339 else if (code == IOR)
21340 t = tbl[1];
21341 else if (code == XOR)
21342 t = tbl[2];
21343 else
21344 output_operand_lossage ("invalid %%q value");
21346 if (GET_CODE (XEXP (x, 0)) != NOT)
21347 s = t[0];
21348 else
21350 if (GET_CODE (XEXP (x, 1)) == NOT)
21351 s = t[2];
21352 else
21353 s = t[1];
21356 fputs (s, file);
21358 return;
21360 case 'Q':
21361 if (! TARGET_MFCRF)
21362 return;
21363 fputc (',', file);
21364 /* FALLTHRU */
21366 case 'R':
21367 /* X is a CR register. Print the mask for `mtcrf'. */
21368 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21369 output_operand_lossage ("invalid %%R value");
21370 else
21371 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
21372 return;
21374 case 's':
21375 /* Low 5 bits of 32 - value */
21376 if (! INT_P (x))
21377 output_operand_lossage ("invalid %%s value");
21378 else
21379 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
21380 return;
21382 case 't':
21383 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
21384 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
21386 /* Bit 3 is OV bit. */
21387 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
21389 /* If we want bit 31, write a shift count of zero, not 32. */
21390 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21391 return;
21393 case 'T':
21394 /* Print the symbolic name of a branch target register. */
21395 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
21396 && REGNO (x) != CTR_REGNO))
21397 output_operand_lossage ("invalid %%T value");
21398 else if (REGNO (x) == LR_REGNO)
21399 fputs ("lr", file);
21400 else
21401 fputs ("ctr", file);
21402 return;
21404 case 'u':
21405 /* High-order or low-order 16 bits of constant, whichever is non-zero,
21406 for use in unsigned operand. */
21407 if (! INT_P (x))
21409 output_operand_lossage ("invalid %%u value");
21410 return;
21413 uval = INTVAL (x);
21414 if ((uval & 0xffff) == 0)
21415 uval >>= 16;
21417 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
21418 return;
21420 case 'v':
21421 /* High-order 16 bits of constant for use in signed operand. */
21422 if (! INT_P (x))
21423 output_operand_lossage ("invalid %%v value");
21424 else
21425 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
21426 (INTVAL (x) >> 16) & 0xffff);
21427 return;
21429 case 'U':
21430 /* Print `u' if this has an auto-increment or auto-decrement. */
21431 if (MEM_P (x)
21432 && (GET_CODE (XEXP (x, 0)) == PRE_INC
21433 || GET_CODE (XEXP (x, 0)) == PRE_DEC
21434 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
21435 putc ('u', file);
21436 return;
21438 case 'V':
21439 /* Print the trap code for this operand. */
21440 switch (GET_CODE (x))
21442 case EQ:
21443 fputs ("eq", file); /* 4 */
21444 break;
21445 case NE:
21446 fputs ("ne", file); /* 24 */
21447 break;
21448 case LT:
21449 fputs ("lt", file); /* 16 */
21450 break;
21451 case LE:
21452 fputs ("le", file); /* 20 */
21453 break;
21454 case GT:
21455 fputs ("gt", file); /* 8 */
21456 break;
21457 case GE:
21458 fputs ("ge", file); /* 12 */
21459 break;
21460 case LTU:
21461 fputs ("llt", file); /* 2 */
21462 break;
21463 case LEU:
21464 fputs ("lle", file); /* 6 */
21465 break;
21466 case GTU:
21467 fputs ("lgt", file); /* 1 */
21468 break;
21469 case GEU:
21470 fputs ("lge", file); /* 5 */
21471 break;
21472 default:
21473 gcc_unreachable ();
21475 break;
21477 case 'w':
21478 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21479 normally. */
21480 if (INT_P (x))
21481 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
21482 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
21483 else
21484 print_operand (file, x, 0);
21485 return;
21487 case 'x':
21488 /* X is a FPR or Altivec register used in a VSX context. */
21489 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
21490 output_operand_lossage ("invalid %%x value");
21491 else
21493 int reg = REGNO (x);
21494 int vsx_reg = (FP_REGNO_P (reg)
21495 ? reg - 32
21496 : reg - FIRST_ALTIVEC_REGNO + 32);
21498 #ifdef TARGET_REGNAMES
21499 if (TARGET_REGNAMES)
21500 fprintf (file, "%%vs%d", vsx_reg);
21501 else
21502 #endif
21503 fprintf (file, "%d", vsx_reg);
21505 return;
21507 case 'X':
21508 if (MEM_P (x)
21509 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
21510 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
21511 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
21512 putc ('x', file);
21513 return;
21515 case 'Y':
21516 /* Like 'L', for third word of TImode/PTImode */
21517 if (REG_P (x))
21518 fputs (reg_names[REGNO (x) + 2], file);
21519 else if (MEM_P (x))
21521 machine_mode mode = GET_MODE (x);
21522 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21523 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21524 output_address (mode, plus_constant (Pmode,
21525 XEXP (XEXP (x, 0), 0), 8));
21526 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21527 output_address (mode, plus_constant (Pmode,
21528 XEXP (XEXP (x, 0), 0), 8));
21529 else
21530 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
21531 if (small_data_operand (x, GET_MODE (x)))
21532 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21533 reg_names[SMALL_DATA_REG]);
21535 return;
21537 case 'z':
21538 /* X is a SYMBOL_REF. Write out the name preceded by a
21539 period and without any trailing data in brackets. Used for function
21540 names. If we are configured for System V (or the embedded ABI) on
21541 the PowerPC, do not emit the period, since those systems do not use
21542 TOCs and the like. */
21543 gcc_assert (GET_CODE (x) == SYMBOL_REF);
21545 /* For macho, check to see if we need a stub. */
21546 if (TARGET_MACHO)
21548 const char *name = XSTR (x, 0);
21549 #if TARGET_MACHO
21550 if (darwin_emit_branch_islands
21551 && MACHOPIC_INDIRECT
21552 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
21553 name = machopic_indirection_name (x, /*stub_p=*/true);
21554 #endif
21555 assemble_name (file, name);
21557 else if (!DOT_SYMBOLS)
21558 assemble_name (file, XSTR (x, 0));
21559 else
21560 rs6000_output_function_entry (file, XSTR (x, 0));
21561 return;
21563 case 'Z':
21564 /* Like 'L', for last word of TImode/PTImode. */
21565 if (REG_P (x))
21566 fputs (reg_names[REGNO (x) + 3], file);
21567 else if (MEM_P (x))
21569 machine_mode mode = GET_MODE (x);
21570 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21571 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21572 output_address (mode, plus_constant (Pmode,
21573 XEXP (XEXP (x, 0), 0), 12));
21574 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21575 output_address (mode, plus_constant (Pmode,
21576 XEXP (XEXP (x, 0), 0), 12));
21577 else
21578 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
21579 if (small_data_operand (x, GET_MODE (x)))
21580 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21581 reg_names[SMALL_DATA_REG]);
21583 return;
21585 /* Print AltiVec memory operand. */
21586 case 'y':
21588 rtx tmp;
21590 gcc_assert (MEM_P (x));
21592 tmp = XEXP (x, 0);
21594 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
21595 && GET_CODE (tmp) == AND
21596 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
21597 && INTVAL (XEXP (tmp, 1)) == -16)
21598 tmp = XEXP (tmp, 0);
21599 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
21600 && GET_CODE (tmp) == PRE_MODIFY)
21601 tmp = XEXP (tmp, 1);
21602 if (REG_P (tmp))
21603 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
21604 else
21606 if (GET_CODE (tmp) != PLUS
21607 || !REG_P (XEXP (tmp, 0))
21608 || !REG_P (XEXP (tmp, 1)))
21610 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21611 break;
21614 if (REGNO (XEXP (tmp, 0)) == 0)
21615 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
21616 reg_names[ REGNO (XEXP (tmp, 0)) ]);
21617 else
21618 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
21619 reg_names[ REGNO (XEXP (tmp, 1)) ]);
21621 break;
21624 case 0:
21625 if (REG_P (x))
21626 fprintf (file, "%s", reg_names[REGNO (x)]);
21627 else if (MEM_P (x))
21629 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21630 know the width from the mode. */
21631 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
21632 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
21633 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21634 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
21635 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
21636 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21637 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21638 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
21639 else
21640 output_address (GET_MODE (x), XEXP (x, 0));
21642 else
21644 if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21645 /* This hack along with a corresponding hack in
21646 rs6000_output_addr_const_extra arranges to output addends
21647 where the assembler expects to find them. eg.
21648 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21649 without this hack would be output as "x@toc+4". We
21650 want "x+4@toc". */
21651 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21652 else
21653 output_addr_const (file, x);
21655 return;
21657 case '&':
21658 if (const char *name = get_some_local_dynamic_name ())
21659 assemble_name (file, name);
21660 else
21661 output_operand_lossage ("'%%&' used without any "
21662 "local dynamic TLS references");
21663 return;
21665 default:
21666 output_operand_lossage ("invalid %%xn code");
21670 /* Print the address of an operand. */
21672 void
21673 print_operand_address (FILE *file, rtx x)
21675 if (REG_P (x))
21676 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21677 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
21678 || GET_CODE (x) == LABEL_REF)
21680 output_addr_const (file, x);
21681 if (small_data_operand (x, GET_MODE (x)))
21682 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21683 reg_names[SMALL_DATA_REG]);
21684 else
21685 gcc_assert (!TARGET_TOC);
21687 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21688 && REG_P (XEXP (x, 1)))
21690 if (REGNO (XEXP (x, 0)) == 0)
21691 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21692 reg_names[ REGNO (XEXP (x, 0)) ]);
21693 else
21694 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21695 reg_names[ REGNO (XEXP (x, 1)) ]);
21697 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21698 && GET_CODE (XEXP (x, 1)) == CONST_INT)
21699 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21700 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21701 #if TARGET_MACHO
21702 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21703 && CONSTANT_P (XEXP (x, 1)))
21705 fprintf (file, "lo16(");
21706 output_addr_const (file, XEXP (x, 1));
21707 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21709 #endif
21710 #if TARGET_ELF
21711 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21712 && CONSTANT_P (XEXP (x, 1)))
21714 output_addr_const (file, XEXP (x, 1));
21715 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21717 #endif
21718 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21720 /* This hack along with a corresponding hack in
21721 rs6000_output_addr_const_extra arranges to output addends
21722 where the assembler expects to find them. eg.
21723 (lo_sum (reg 9)
21724 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21725 without this hack would be output as "x@toc+8@l(9)". We
21726 want "x+8@toc@l(9)". */
21727 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21728 if (GET_CODE (x) == LO_SUM)
21729 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21730 else
21731 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21733 else
21734 gcc_unreachable ();
21737 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21739 static bool
21740 rs6000_output_addr_const_extra (FILE *file, rtx x)
21742 if (GET_CODE (x) == UNSPEC)
21743 switch (XINT (x, 1))
21745 case UNSPEC_TOCREL:
21746 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
21747 && REG_P (XVECEXP (x, 0, 1))
21748 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21749 output_addr_const (file, XVECEXP (x, 0, 0));
21750 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21752 if (INTVAL (tocrel_offset_oac) >= 0)
21753 fprintf (file, "+");
21754 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21756 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21758 putc ('-', file);
21759 assemble_name (file, toc_label_name);
21760 need_toc_init = 1;
21762 else if (TARGET_ELF)
21763 fputs ("@toc", file);
21764 return true;
21766 #if TARGET_MACHO
21767 case UNSPEC_MACHOPIC_OFFSET:
21768 output_addr_const (file, XVECEXP (x, 0, 0));
21769 putc ('-', file);
21770 machopic_output_function_base_name (file);
21771 return true;
21772 #endif
21774 return false;
21777 /* Target hook for assembling integer objects. The PowerPC version has
21778 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21779 is defined. It also needs to handle DI-mode objects on 64-bit
21780 targets. */
21782 static bool
21783 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21785 #ifdef RELOCATABLE_NEEDS_FIXUP
21786 /* Special handling for SI values. */
21787 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21789 static int recurse = 0;
21791 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21792 the .fixup section. Since the TOC section is already relocated, we
21793 don't need to mark it here. We used to skip the text section, but it
21794 should never be valid for relocated addresses to be placed in the text
21795 section. */
21796 if (DEFAULT_ABI == ABI_V4
21797 && (TARGET_RELOCATABLE || flag_pic > 1)
21798 && in_section != toc_section
21799 && !recurse
21800 && !CONST_SCALAR_INT_P (x)
21801 && CONSTANT_P (x))
21803 char buf[256];
21805 recurse = 1;
21806 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21807 fixuplabelno++;
21808 ASM_OUTPUT_LABEL (asm_out_file, buf);
21809 fprintf (asm_out_file, "\t.long\t(");
21810 output_addr_const (asm_out_file, x);
21811 fprintf (asm_out_file, ")@fixup\n");
21812 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21813 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21814 fprintf (asm_out_file, "\t.long\t");
21815 assemble_name (asm_out_file, buf);
21816 fprintf (asm_out_file, "\n\t.previous\n");
21817 recurse = 0;
21818 return true;
21820 /* Remove initial .'s to turn a -mcall-aixdesc function
21821 address into the address of the descriptor, not the function
21822 itself. */
21823 else if (GET_CODE (x) == SYMBOL_REF
21824 && XSTR (x, 0)[0] == '.'
21825 && DEFAULT_ABI == ABI_AIX)
21827 const char *name = XSTR (x, 0);
21828 while (*name == '.')
21829 name++;
21831 fprintf (asm_out_file, "\t.long\t%s\n", name);
21832 return true;
21835 #endif /* RELOCATABLE_NEEDS_FIXUP */
21836 return default_assemble_integer (x, size, aligned_p);
21839 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21840 /* Emit an assembler directive to set symbol visibility for DECL to
21841 VISIBILITY_TYPE. */
21843 static void
21844 rs6000_assemble_visibility (tree decl, int vis)
21846 if (TARGET_XCOFF)
21847 return;
21849 /* Functions need to have their entry point symbol visibility set as
21850 well as their descriptor symbol visibility. */
21851 if (DEFAULT_ABI == ABI_AIX
21852 && DOT_SYMBOLS
21853 && TREE_CODE (decl) == FUNCTION_DECL)
21855 static const char * const visibility_types[] = {
21856 NULL, "protected", "hidden", "internal"
21859 const char *name, *type;
21861 name = ((* targetm.strip_name_encoding)
21862 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21863 type = visibility_types[vis];
21865 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21866 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21868 else
21869 default_assemble_visibility (decl, vis);
21871 #endif
21873 enum rtx_code
21874 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21876 /* Reversal of FP compares takes care -- an ordered compare
21877 becomes an unordered compare and vice versa. */
21878 if (mode == CCFPmode
21879 && (!flag_finite_math_only
21880 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21881 || code == UNEQ || code == LTGT))
21882 return reverse_condition_maybe_unordered (code);
21883 else
21884 return reverse_condition (code);
21887 /* Generate a compare for CODE. Return a brand-new rtx that
21888 represents the result of the compare. */
21890 static rtx
21891 rs6000_generate_compare (rtx cmp, machine_mode mode)
21893 machine_mode comp_mode;
21894 rtx compare_result;
21895 enum rtx_code code = GET_CODE (cmp);
21896 rtx op0 = XEXP (cmp, 0);
21897 rtx op1 = XEXP (cmp, 1);
21899 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21900 comp_mode = CCmode;
21901 else if (FLOAT_MODE_P (mode))
21902 comp_mode = CCFPmode;
21903 else if (code == GTU || code == LTU
21904 || code == GEU || code == LEU)
21905 comp_mode = CCUNSmode;
21906 else if ((code == EQ || code == NE)
21907 && unsigned_reg_p (op0)
21908 && (unsigned_reg_p (op1)
21909 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21910 /* These are unsigned values, perhaps there will be a later
21911 ordering compare that can be shared with this one. */
21912 comp_mode = CCUNSmode;
21913 else
21914 comp_mode = CCmode;
21916 /* If we have an unsigned compare, make sure we don't have a signed value as
21917 an immediate. */
21918 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
21919 && INTVAL (op1) < 0)
21921 op0 = copy_rtx_if_shared (op0);
21922 op1 = force_reg (GET_MODE (op0), op1);
21923 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21926 /* First, the compare. */
21927 compare_result = gen_reg_rtx (comp_mode);
21929 /* IEEE 128-bit support in VSX registers when we do not have hardware
21930 support. */
21931 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21933 rtx libfunc = NULL_RTX;
21934 bool check_nan = false;
21935 rtx dest;
21937 switch (code)
21939 case EQ:
21940 case NE:
21941 libfunc = optab_libfunc (eq_optab, mode);
21942 break;
21944 case GT:
21945 case GE:
21946 libfunc = optab_libfunc (ge_optab, mode);
21947 break;
21949 case LT:
21950 case LE:
21951 libfunc = optab_libfunc (le_optab, mode);
21952 break;
21954 case UNORDERED:
21955 case ORDERED:
21956 libfunc = optab_libfunc (unord_optab, mode);
21957 code = (code == UNORDERED) ? NE : EQ;
21958 break;
21960 case UNGE:
21961 case UNGT:
21962 check_nan = true;
21963 libfunc = optab_libfunc (ge_optab, mode);
21964 code = (code == UNGE) ? GE : GT;
21965 break;
21967 case UNLE:
21968 case UNLT:
21969 check_nan = true;
21970 libfunc = optab_libfunc (le_optab, mode);
21971 code = (code == UNLE) ? LE : LT;
21972 break;
21974 case UNEQ:
21975 case LTGT:
21976 check_nan = true;
21977 libfunc = optab_libfunc (eq_optab, mode);
21978 code = (code = UNEQ) ? EQ : NE;
21979 break;
21981 default:
21982 gcc_unreachable ();
21985 gcc_assert (libfunc);
21987 if (!check_nan)
21988 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21989 SImode, op0, mode, op1, mode);
21991 /* The library signals an exception for signalling NaNs, so we need to
21992 handle isgreater, etc. by first checking isordered. */
21993 else
21995 rtx ne_rtx, normal_dest, unord_dest;
21996 rtx unord_func = optab_libfunc (unord_optab, mode);
21997 rtx join_label = gen_label_rtx ();
21998 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
21999 rtx unord_cmp = gen_reg_rtx (comp_mode);
22002 /* Test for either value being a NaN. */
22003 gcc_assert (unord_func);
22004 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
22005 SImode, op0, mode, op1, mode);
22007 /* Set value (0) if either value is a NaN, and jump to the join
22008 label. */
22009 dest = gen_reg_rtx (SImode);
22010 emit_move_insn (dest, const1_rtx);
22011 emit_insn (gen_rtx_SET (unord_cmp,
22012 gen_rtx_COMPARE (comp_mode, unord_dest,
22013 const0_rtx)));
22015 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
22016 emit_jump_insn (gen_rtx_SET (pc_rtx,
22017 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
22018 join_ref,
22019 pc_rtx)));
22021 /* Do the normal comparison, knowing that the values are not
22022 NaNs. */
22023 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
22024 SImode, op0, mode, op1, mode);
22026 emit_insn (gen_cstoresi4 (dest,
22027 gen_rtx_fmt_ee (code, SImode, normal_dest,
22028 const0_rtx),
22029 normal_dest, const0_rtx));
22031 /* Join NaN and non-Nan paths. Compare dest against 0. */
22032 emit_label (join_label);
22033 code = NE;
22036 emit_insn (gen_rtx_SET (compare_result,
22037 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
22040 else
22042 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
22043 CLOBBERs to match cmptf_internal2 pattern. */
22044 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
22045 && FLOAT128_IBM_P (GET_MODE (op0))
22046 && TARGET_HARD_FLOAT)
22047 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22048 gen_rtvec (10,
22049 gen_rtx_SET (compare_result,
22050 gen_rtx_COMPARE (comp_mode, op0, op1)),
22051 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22052 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22053 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22054 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22055 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22056 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22057 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22058 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22059 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
22060 else if (GET_CODE (op1) == UNSPEC
22061 && XINT (op1, 1) == UNSPEC_SP_TEST)
22063 rtx op1b = XVECEXP (op1, 0, 0);
22064 comp_mode = CCEQmode;
22065 compare_result = gen_reg_rtx (CCEQmode);
22066 if (TARGET_64BIT)
22067 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
22068 else
22069 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
22071 else
22072 emit_insn (gen_rtx_SET (compare_result,
22073 gen_rtx_COMPARE (comp_mode, op0, op1)));
22076 /* Some kinds of FP comparisons need an OR operation;
22077 under flag_finite_math_only we don't bother. */
22078 if (FLOAT_MODE_P (mode)
22079 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
22080 && !flag_finite_math_only
22081 && (code == LE || code == GE
22082 || code == UNEQ || code == LTGT
22083 || code == UNGT || code == UNLT))
22085 enum rtx_code or1, or2;
22086 rtx or1_rtx, or2_rtx, compare2_rtx;
22087 rtx or_result = gen_reg_rtx (CCEQmode);
22089 switch (code)
22091 case LE: or1 = LT; or2 = EQ; break;
22092 case GE: or1 = GT; or2 = EQ; break;
22093 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
22094 case LTGT: or1 = LT; or2 = GT; break;
22095 case UNGT: or1 = UNORDERED; or2 = GT; break;
22096 case UNLT: or1 = UNORDERED; or2 = LT; break;
22097 default: gcc_unreachable ();
22099 validate_condition_mode (or1, comp_mode);
22100 validate_condition_mode (or2, comp_mode);
22101 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
22102 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
22103 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
22104 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
22105 const_true_rtx);
22106 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
22108 compare_result = or_result;
22109 code = EQ;
22112 validate_condition_mode (code, GET_MODE (compare_result));
22114 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
22118 /* Return the diagnostic message string if the binary operation OP is
22119 not permitted on TYPE1 and TYPE2, NULL otherwise. */
22121 static const char*
22122 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
22123 const_tree type1,
22124 const_tree type2)
22126 machine_mode mode1 = TYPE_MODE (type1);
22127 machine_mode mode2 = TYPE_MODE (type2);
22129 /* For complex modes, use the inner type. */
22130 if (COMPLEX_MODE_P (mode1))
22131 mode1 = GET_MODE_INNER (mode1);
22133 if (COMPLEX_MODE_P (mode2))
22134 mode2 = GET_MODE_INNER (mode2);
22136 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
22137 double to intermix unless -mfloat128-convert. */
22138 if (mode1 == mode2)
22139 return NULL;
22141 if (!TARGET_FLOAT128_CVT)
22143 if ((mode1 == KFmode && mode2 == IFmode)
22144 || (mode1 == IFmode && mode2 == KFmode))
22145 return N_("__float128 and __ibm128 cannot be used in the same "
22146 "expression");
22148 if (TARGET_IEEEQUAD
22149 && ((mode1 == IFmode && mode2 == TFmode)
22150 || (mode1 == TFmode && mode2 == IFmode)))
22151 return N_("__ibm128 and long double cannot be used in the same "
22152 "expression");
22154 if (!TARGET_IEEEQUAD
22155 && ((mode1 == KFmode && mode2 == TFmode)
22156 || (mode1 == TFmode && mode2 == KFmode)))
22157 return N_("__float128 and long double cannot be used in the same "
22158 "expression");
22161 return NULL;
22165 /* Expand floating point conversion to/from __float128 and __ibm128. */
22167 void
22168 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
22170 machine_mode dest_mode = GET_MODE (dest);
22171 machine_mode src_mode = GET_MODE (src);
22172 convert_optab cvt = unknown_optab;
22173 bool do_move = false;
22174 rtx libfunc = NULL_RTX;
22175 rtx dest2;
22176 typedef rtx (*rtx_2func_t) (rtx, rtx);
22177 rtx_2func_t hw_convert = (rtx_2func_t)0;
22178 size_t kf_or_tf;
22180 struct hw_conv_t {
22181 rtx_2func_t from_df;
22182 rtx_2func_t from_sf;
22183 rtx_2func_t from_si_sign;
22184 rtx_2func_t from_si_uns;
22185 rtx_2func_t from_di_sign;
22186 rtx_2func_t from_di_uns;
22187 rtx_2func_t to_df;
22188 rtx_2func_t to_sf;
22189 rtx_2func_t to_si_sign;
22190 rtx_2func_t to_si_uns;
22191 rtx_2func_t to_di_sign;
22192 rtx_2func_t to_di_uns;
22193 } hw_conversions[2] = {
22194 /* convertions to/from KFmode */
22196 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
22197 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
22198 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
22199 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
22200 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
22201 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
22202 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
22203 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
22204 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
22205 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
22206 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
22207 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
22210 /* convertions to/from TFmode */
22212 gen_extenddftf2_hw, /* TFmode <- DFmode. */
22213 gen_extendsftf2_hw, /* TFmode <- SFmode. */
22214 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
22215 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
22216 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
22217 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
22218 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
22219 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
22220 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
22221 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
22222 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
22223 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
22227 if (dest_mode == src_mode)
22228 gcc_unreachable ();
22230 /* Eliminate memory operations. */
22231 if (MEM_P (src))
22232 src = force_reg (src_mode, src);
22234 if (MEM_P (dest))
22236 rtx tmp = gen_reg_rtx (dest_mode);
22237 rs6000_expand_float128_convert (tmp, src, unsigned_p);
22238 rs6000_emit_move (dest, tmp, dest_mode);
22239 return;
22242 /* Convert to IEEE 128-bit floating point. */
22243 if (FLOAT128_IEEE_P (dest_mode))
22245 if (dest_mode == KFmode)
22246 kf_or_tf = 0;
22247 else if (dest_mode == TFmode)
22248 kf_or_tf = 1;
22249 else
22250 gcc_unreachable ();
22252 switch (src_mode)
22254 case E_DFmode:
22255 cvt = sext_optab;
22256 hw_convert = hw_conversions[kf_or_tf].from_df;
22257 break;
22259 case E_SFmode:
22260 cvt = sext_optab;
22261 hw_convert = hw_conversions[kf_or_tf].from_sf;
22262 break;
22264 case E_KFmode:
22265 case E_IFmode:
22266 case E_TFmode:
22267 if (FLOAT128_IBM_P (src_mode))
22268 cvt = sext_optab;
22269 else
22270 do_move = true;
22271 break;
22273 case E_SImode:
22274 if (unsigned_p)
22276 cvt = ufloat_optab;
22277 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
22279 else
22281 cvt = sfloat_optab;
22282 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
22284 break;
22286 case E_DImode:
22287 if (unsigned_p)
22289 cvt = ufloat_optab;
22290 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
22292 else
22294 cvt = sfloat_optab;
22295 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
22297 break;
22299 default:
22300 gcc_unreachable ();
22304 /* Convert from IEEE 128-bit floating point. */
22305 else if (FLOAT128_IEEE_P (src_mode))
22307 if (src_mode == KFmode)
22308 kf_or_tf = 0;
22309 else if (src_mode == TFmode)
22310 kf_or_tf = 1;
22311 else
22312 gcc_unreachable ();
22314 switch (dest_mode)
22316 case E_DFmode:
22317 cvt = trunc_optab;
22318 hw_convert = hw_conversions[kf_or_tf].to_df;
22319 break;
22321 case E_SFmode:
22322 cvt = trunc_optab;
22323 hw_convert = hw_conversions[kf_or_tf].to_sf;
22324 break;
22326 case E_KFmode:
22327 case E_IFmode:
22328 case E_TFmode:
22329 if (FLOAT128_IBM_P (dest_mode))
22330 cvt = trunc_optab;
22331 else
22332 do_move = true;
22333 break;
22335 case E_SImode:
22336 if (unsigned_p)
22338 cvt = ufix_optab;
22339 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
22341 else
22343 cvt = sfix_optab;
22344 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
22346 break;
22348 case E_DImode:
22349 if (unsigned_p)
22351 cvt = ufix_optab;
22352 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
22354 else
22356 cvt = sfix_optab;
22357 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
22359 break;
22361 default:
22362 gcc_unreachable ();
22366 /* Both IBM format. */
22367 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
22368 do_move = true;
22370 else
22371 gcc_unreachable ();
22373 /* Handle conversion between TFmode/KFmode. */
22374 if (do_move)
22375 emit_move_insn (dest, gen_lowpart (dest_mode, src));
22377 /* Handle conversion if we have hardware support. */
22378 else if (TARGET_FLOAT128_HW && hw_convert)
22379 emit_insn ((hw_convert) (dest, src));
22381 /* Call an external function to do the conversion. */
22382 else if (cvt != unknown_optab)
22384 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
22385 gcc_assert (libfunc != NULL_RTX);
22387 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode,
22388 src, src_mode);
22390 gcc_assert (dest2 != NULL_RTX);
22391 if (!rtx_equal_p (dest, dest2))
22392 emit_move_insn (dest, dest2);
22395 else
22396 gcc_unreachable ();
22398 return;
22402 /* Emit the RTL for an sISEL pattern. */
22404 void
22405 rs6000_emit_sISEL (machine_mode mode ATTRIBUTE_UNUSED, rtx operands[])
22407 rs6000_emit_int_cmove (operands[0], operands[1], const1_rtx, const0_rtx);
22410 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22411 can be used as that dest register. Return the dest register. */
22414 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
22416 if (op2 == const0_rtx)
22417 return op1;
22419 if (GET_CODE (scratch) == SCRATCH)
22420 scratch = gen_reg_rtx (mode);
22422 if (logical_operand (op2, mode))
22423 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
22424 else
22425 emit_insn (gen_rtx_SET (scratch,
22426 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
22428 return scratch;
22431 void
22432 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
22434 rtx condition_rtx;
22435 machine_mode op_mode;
22436 enum rtx_code cond_code;
22437 rtx result = operands[0];
22439 condition_rtx = rs6000_generate_compare (operands[1], mode);
22440 cond_code = GET_CODE (condition_rtx);
22442 if (cond_code == NE
22443 || cond_code == GE || cond_code == LE
22444 || cond_code == GEU || cond_code == LEU
22445 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
22447 rtx not_result = gen_reg_rtx (CCEQmode);
22448 rtx not_op, rev_cond_rtx;
22449 machine_mode cc_mode;
22451 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
22453 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
22454 SImode, XEXP (condition_rtx, 0), const0_rtx);
22455 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
22456 emit_insn (gen_rtx_SET (not_result, not_op));
22457 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
22460 op_mode = GET_MODE (XEXP (operands[1], 0));
22461 if (op_mode == VOIDmode)
22462 op_mode = GET_MODE (XEXP (operands[1], 1));
22464 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22466 PUT_MODE (condition_rtx, DImode);
22467 convert_move (result, condition_rtx, 0);
22469 else
22471 PUT_MODE (condition_rtx, SImode);
22472 emit_insn (gen_rtx_SET (result, condition_rtx));
22476 /* Emit a branch of kind CODE to location LOC. */
22478 void
22479 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22481 rtx condition_rtx, loc_ref;
22483 condition_rtx = rs6000_generate_compare (operands[0], mode);
22484 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22485 emit_jump_insn (gen_rtx_SET (pc_rtx,
22486 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22487 loc_ref, pc_rtx)));
22490 /* Return the string to output a conditional branch to LABEL, which is
22491 the operand template of the label, or NULL if the branch is really a
22492 conditional return.
22494 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22495 condition code register and its mode specifies what kind of
22496 comparison we made.
22498 REVERSED is nonzero if we should reverse the sense of the comparison.
22500 INSN is the insn. */
22502 char *
22503 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22505 static char string[64];
22506 enum rtx_code code = GET_CODE (op);
22507 rtx cc_reg = XEXP (op, 0);
22508 machine_mode mode = GET_MODE (cc_reg);
22509 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22510 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22511 int really_reversed = reversed ^ need_longbranch;
22512 char *s = string;
22513 const char *ccode;
22514 const char *pred;
22515 rtx note;
22517 validate_condition_mode (code, mode);
22519 /* Work out which way this really branches. We could use
22520 reverse_condition_maybe_unordered here always but this
22521 makes the resulting assembler clearer. */
22522 if (really_reversed)
22524 /* Reversal of FP compares takes care -- an ordered compare
22525 becomes an unordered compare and vice versa. */
22526 if (mode == CCFPmode)
22527 code = reverse_condition_maybe_unordered (code);
22528 else
22529 code = reverse_condition (code);
22532 switch (code)
22534 /* Not all of these are actually distinct opcodes, but
22535 we distinguish them for clarity of the resulting assembler. */
22536 case NE: case LTGT:
22537 ccode = "ne"; break;
22538 case EQ: case UNEQ:
22539 ccode = "eq"; break;
22540 case GE: case GEU:
22541 ccode = "ge"; break;
22542 case GT: case GTU: case UNGT:
22543 ccode = "gt"; break;
22544 case LE: case LEU:
22545 ccode = "le"; break;
22546 case LT: case LTU: case UNLT:
22547 ccode = "lt"; break;
22548 case UNORDERED: ccode = "un"; break;
22549 case ORDERED: ccode = "nu"; break;
22550 case UNGE: ccode = "nl"; break;
22551 case UNLE: ccode = "ng"; break;
22552 default:
22553 gcc_unreachable ();
22556 /* Maybe we have a guess as to how likely the branch is. */
22557 pred = "";
22558 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22559 if (note != NULL_RTX)
22561 /* PROB is the difference from 50%. */
22562 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
22563 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
22565 /* Only hint for highly probable/improbable branches on newer cpus when
22566 we have real profile data, as static prediction overrides processor
22567 dynamic prediction. For older cpus we may as well always hint, but
22568 assume not taken for branches that are very close to 50% as a
22569 mispredicted taken branch is more expensive than a
22570 mispredicted not-taken branch. */
22571 if (rs6000_always_hint
22572 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22573 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22574 && br_prob_note_reliable_p (note)))
22576 if (abs (prob) > REG_BR_PROB_BASE / 20
22577 && ((prob > 0) ^ need_longbranch))
22578 pred = "+";
22579 else
22580 pred = "-";
22584 if (label == NULL)
22585 s += sprintf (s, "b%slr%s ", ccode, pred);
22586 else
22587 s += sprintf (s, "b%s%s ", ccode, pred);
22589 /* We need to escape any '%' characters in the reg_names string.
22590 Assume they'd only be the first character.... */
22591 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22592 *s++ = '%';
22593 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22595 if (label != NULL)
22597 /* If the branch distance was too far, we may have to use an
22598 unconditional branch to go the distance. */
22599 if (need_longbranch)
22600 s += sprintf (s, ",$+8\n\tb %s", label);
22601 else
22602 s += sprintf (s, ",%s", label);
22605 return string;
22608 /* Return insn for VSX or Altivec comparisons. */
22610 static rtx
22611 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22613 rtx mask;
22614 machine_mode mode = GET_MODE (op0);
22616 switch (code)
22618 default:
22619 break;
22621 case GE:
22622 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22623 return NULL_RTX;
22624 /* FALLTHRU */
22626 case EQ:
22627 case GT:
22628 case GTU:
22629 case ORDERED:
22630 case UNORDERED:
22631 case UNEQ:
22632 case LTGT:
22633 mask = gen_reg_rtx (mode);
22634 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22635 return mask;
22638 return NULL_RTX;
22641 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22642 DMODE is expected destination mode. This is a recursive function. */
22644 static rtx
22645 rs6000_emit_vector_compare (enum rtx_code rcode,
22646 rtx op0, rtx op1,
22647 machine_mode dmode)
22649 rtx mask;
22650 bool swap_operands = false;
22651 bool try_again = false;
22653 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22654 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22656 /* See if the comparison works as is. */
22657 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22658 if (mask)
22659 return mask;
22661 switch (rcode)
22663 case LT:
22664 rcode = GT;
22665 swap_operands = true;
22666 try_again = true;
22667 break;
22668 case LTU:
22669 rcode = GTU;
22670 swap_operands = true;
22671 try_again = true;
22672 break;
22673 case NE:
22674 case UNLE:
22675 case UNLT:
22676 case UNGE:
22677 case UNGT:
22678 /* Invert condition and try again.
22679 e.g., A != B becomes ~(A==B). */
22681 enum rtx_code rev_code;
22682 enum insn_code nor_code;
22683 rtx mask2;
22685 rev_code = reverse_condition_maybe_unordered (rcode);
22686 if (rev_code == UNKNOWN)
22687 return NULL_RTX;
22689 nor_code = optab_handler (one_cmpl_optab, dmode);
22690 if (nor_code == CODE_FOR_nothing)
22691 return NULL_RTX;
22693 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22694 if (!mask2)
22695 return NULL_RTX;
22697 mask = gen_reg_rtx (dmode);
22698 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22699 return mask;
22701 break;
22702 case GE:
22703 case GEU:
22704 case LE:
22705 case LEU:
22706 /* Try GT/GTU/LT/LTU OR EQ */
22708 rtx c_rtx, eq_rtx;
22709 enum insn_code ior_code;
22710 enum rtx_code new_code;
22712 switch (rcode)
22714 case GE:
22715 new_code = GT;
22716 break;
22718 case GEU:
22719 new_code = GTU;
22720 break;
22722 case LE:
22723 new_code = LT;
22724 break;
22726 case LEU:
22727 new_code = LTU;
22728 break;
22730 default:
22731 gcc_unreachable ();
22734 ior_code = optab_handler (ior_optab, dmode);
22735 if (ior_code == CODE_FOR_nothing)
22736 return NULL_RTX;
22738 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22739 if (!c_rtx)
22740 return NULL_RTX;
22742 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22743 if (!eq_rtx)
22744 return NULL_RTX;
22746 mask = gen_reg_rtx (dmode);
22747 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22748 return mask;
22750 break;
22751 default:
22752 return NULL_RTX;
22755 if (try_again)
22757 if (swap_operands)
22758 std::swap (op0, op1);
22760 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22761 if (mask)
22762 return mask;
22765 /* You only get two chances. */
22766 return NULL_RTX;
22769 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22770 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22771 operands for the relation operation COND. */
22774 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22775 rtx cond, rtx cc_op0, rtx cc_op1)
22777 machine_mode dest_mode = GET_MODE (dest);
22778 machine_mode mask_mode = GET_MODE (cc_op0);
22779 enum rtx_code rcode = GET_CODE (cond);
22780 machine_mode cc_mode = CCmode;
22781 rtx mask;
22782 rtx cond2;
22783 bool invert_move = false;
22785 if (VECTOR_UNIT_NONE_P (dest_mode))
22786 return 0;
22788 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22789 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22791 switch (rcode)
22793 /* Swap operands if we can, and fall back to doing the operation as
22794 specified, and doing a NOR to invert the test. */
22795 case NE:
22796 case UNLE:
22797 case UNLT:
22798 case UNGE:
22799 case UNGT:
22800 /* Invert condition and try again.
22801 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22802 invert_move = true;
22803 rcode = reverse_condition_maybe_unordered (rcode);
22804 if (rcode == UNKNOWN)
22805 return 0;
22806 break;
22808 case GE:
22809 case LE:
22810 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22812 /* Invert condition to avoid compound test. */
22813 invert_move = true;
22814 rcode = reverse_condition (rcode);
22816 break;
22818 case GTU:
22819 case GEU:
22820 case LTU:
22821 case LEU:
22822 /* Mark unsigned tests with CCUNSmode. */
22823 cc_mode = CCUNSmode;
22825 /* Invert condition to avoid compound test if necessary. */
22826 if (rcode == GEU || rcode == LEU)
22828 invert_move = true;
22829 rcode = reverse_condition (rcode);
22831 break;
22833 default:
22834 break;
22837 /* Get the vector mask for the given relational operations. */
22838 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22840 if (!mask)
22841 return 0;
22843 if (invert_move)
22844 std::swap (op_true, op_false);
22846 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22847 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
22848 && (GET_CODE (op_true) == CONST_VECTOR
22849 || GET_CODE (op_false) == CONST_VECTOR))
22851 rtx constant_0 = CONST0_RTX (dest_mode);
22852 rtx constant_m1 = CONSTM1_RTX (dest_mode);
22854 if (op_true == constant_m1 && op_false == constant_0)
22856 emit_move_insn (dest, mask);
22857 return 1;
22860 else if (op_true == constant_0 && op_false == constant_m1)
22862 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
22863 return 1;
22866 /* If we can't use the vector comparison directly, perhaps we can use
22867 the mask for the true or false fields, instead of loading up a
22868 constant. */
22869 if (op_true == constant_m1)
22870 op_true = mask;
22872 if (op_false == constant_0)
22873 op_false = mask;
22876 if (!REG_P (op_true) && !SUBREG_P (op_true))
22877 op_true = force_reg (dest_mode, op_true);
22879 if (!REG_P (op_false) && !SUBREG_P (op_false))
22880 op_false = force_reg (dest_mode, op_false);
22882 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22883 CONST0_RTX (dest_mode));
22884 emit_insn (gen_rtx_SET (dest,
22885 gen_rtx_IF_THEN_ELSE (dest_mode,
22886 cond2,
22887 op_true,
22888 op_false)));
22889 return 1;
22892 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22893 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22894 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22895 hardware has no such operation. */
22897 static int
22898 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22900 enum rtx_code code = GET_CODE (op);
22901 rtx op0 = XEXP (op, 0);
22902 rtx op1 = XEXP (op, 1);
22903 machine_mode compare_mode = GET_MODE (op0);
22904 machine_mode result_mode = GET_MODE (dest);
22905 bool max_p = false;
22907 if (result_mode != compare_mode)
22908 return 0;
22910 if (code == GE || code == GT)
22911 max_p = true;
22912 else if (code == LE || code == LT)
22913 max_p = false;
22914 else
22915 return 0;
22917 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22920 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22921 max_p = !max_p;
22923 else
22924 return 0;
22926 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22927 return 1;
22930 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22931 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22932 operands of the last comparison is nonzero/true, FALSE_COND if it is
22933 zero/false. Return 0 if the hardware has no such operation. */
22935 static int
22936 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22938 enum rtx_code code = GET_CODE (op);
22939 rtx op0 = XEXP (op, 0);
22940 rtx op1 = XEXP (op, 1);
22941 machine_mode result_mode = GET_MODE (dest);
22942 rtx compare_rtx;
22943 rtx cmove_rtx;
22944 rtx clobber_rtx;
22946 if (!can_create_pseudo_p ())
22947 return 0;
22949 switch (code)
22951 case EQ:
22952 case GE:
22953 case GT:
22954 break;
22956 case NE:
22957 case LT:
22958 case LE:
22959 code = swap_condition (code);
22960 std::swap (op0, op1);
22961 break;
22963 default:
22964 return 0;
22967 /* Generate: [(parallel [(set (dest)
22968 (if_then_else (op (cmp1) (cmp2))
22969 (true)
22970 (false)))
22971 (clobber (scratch))])]. */
22973 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
22974 cmove_rtx = gen_rtx_SET (dest,
22975 gen_rtx_IF_THEN_ELSE (result_mode,
22976 compare_rtx,
22977 true_cond,
22978 false_cond));
22980 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
22981 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22982 gen_rtvec (2, cmove_rtx, clobber_rtx)));
22984 return 1;
22987 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
22988 operands of the last comparison is nonzero/true, FALSE_COND if it
22989 is zero/false. Return 0 if the hardware has no such operation. */
22992 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22994 enum rtx_code code = GET_CODE (op);
22995 rtx op0 = XEXP (op, 0);
22996 rtx op1 = XEXP (op, 1);
22997 machine_mode compare_mode = GET_MODE (op0);
22998 machine_mode result_mode = GET_MODE (dest);
22999 rtx temp;
23000 bool is_against_zero;
23002 /* These modes should always match. */
23003 if (GET_MODE (op1) != compare_mode
23004 /* In the isel case however, we can use a compare immediate, so
23005 op1 may be a small constant. */
23006 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
23007 return 0;
23008 if (GET_MODE (true_cond) != result_mode)
23009 return 0;
23010 if (GET_MODE (false_cond) != result_mode)
23011 return 0;
23013 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
23014 if (TARGET_P9_MINMAX
23015 && (compare_mode == SFmode || compare_mode == DFmode)
23016 && (result_mode == SFmode || result_mode == DFmode))
23018 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
23019 return 1;
23021 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
23022 return 1;
23025 /* Don't allow using floating point comparisons for integer results for
23026 now. */
23027 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
23028 return 0;
23030 /* First, work out if the hardware can do this at all, or
23031 if it's too slow.... */
23032 if (!FLOAT_MODE_P (compare_mode))
23034 if (TARGET_ISEL)
23035 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
23036 return 0;
23039 is_against_zero = op1 == CONST0_RTX (compare_mode);
23041 /* A floating-point subtract might overflow, underflow, or produce
23042 an inexact result, thus changing the floating-point flags, so it
23043 can't be generated if we care about that. It's safe if one side
23044 of the construct is zero, since then no subtract will be
23045 generated. */
23046 if (SCALAR_FLOAT_MODE_P (compare_mode)
23047 && flag_trapping_math && ! is_against_zero)
23048 return 0;
23050 /* Eliminate half of the comparisons by switching operands, this
23051 makes the remaining code simpler. */
23052 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
23053 || code == LTGT || code == LT || code == UNLE)
23055 code = reverse_condition_maybe_unordered (code);
23056 temp = true_cond;
23057 true_cond = false_cond;
23058 false_cond = temp;
23061 /* UNEQ and LTGT take four instructions for a comparison with zero,
23062 it'll probably be faster to use a branch here too. */
23063 if (code == UNEQ && HONOR_NANS (compare_mode))
23064 return 0;
23066 /* We're going to try to implement comparisons by performing
23067 a subtract, then comparing against zero. Unfortunately,
23068 Inf - Inf is NaN which is not zero, and so if we don't
23069 know that the operand is finite and the comparison
23070 would treat EQ different to UNORDERED, we can't do it. */
23071 if (HONOR_INFINITIES (compare_mode)
23072 && code != GT && code != UNGE
23073 && (GET_CODE (op1) != CONST_DOUBLE
23074 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
23075 /* Constructs of the form (a OP b ? a : b) are safe. */
23076 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
23077 || (! rtx_equal_p (op0, true_cond)
23078 && ! rtx_equal_p (op1, true_cond))))
23079 return 0;
23081 /* At this point we know we can use fsel. */
23083 /* Reduce the comparison to a comparison against zero. */
23084 if (! is_against_zero)
23086 temp = gen_reg_rtx (compare_mode);
23087 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
23088 op0 = temp;
23089 op1 = CONST0_RTX (compare_mode);
23092 /* If we don't care about NaNs we can reduce some of the comparisons
23093 down to faster ones. */
23094 if (! HONOR_NANS (compare_mode))
23095 switch (code)
23097 case GT:
23098 code = LE;
23099 temp = true_cond;
23100 true_cond = false_cond;
23101 false_cond = temp;
23102 break;
23103 case UNGE:
23104 code = GE;
23105 break;
23106 case UNEQ:
23107 code = EQ;
23108 break;
23109 default:
23110 break;
23113 /* Now, reduce everything down to a GE. */
23114 switch (code)
23116 case GE:
23117 break;
23119 case LE:
23120 temp = gen_reg_rtx (compare_mode);
23121 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23122 op0 = temp;
23123 break;
23125 case ORDERED:
23126 temp = gen_reg_rtx (compare_mode);
23127 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
23128 op0 = temp;
23129 break;
23131 case EQ:
23132 temp = gen_reg_rtx (compare_mode);
23133 emit_insn (gen_rtx_SET (temp,
23134 gen_rtx_NEG (compare_mode,
23135 gen_rtx_ABS (compare_mode, op0))));
23136 op0 = temp;
23137 break;
23139 case UNGE:
23140 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
23141 temp = gen_reg_rtx (result_mode);
23142 emit_insn (gen_rtx_SET (temp,
23143 gen_rtx_IF_THEN_ELSE (result_mode,
23144 gen_rtx_GE (VOIDmode,
23145 op0, op1),
23146 true_cond, false_cond)));
23147 false_cond = true_cond;
23148 true_cond = temp;
23150 temp = gen_reg_rtx (compare_mode);
23151 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23152 op0 = temp;
23153 break;
23155 case GT:
23156 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
23157 temp = gen_reg_rtx (result_mode);
23158 emit_insn (gen_rtx_SET (temp,
23159 gen_rtx_IF_THEN_ELSE (result_mode,
23160 gen_rtx_GE (VOIDmode,
23161 op0, op1),
23162 true_cond, false_cond)));
23163 true_cond = false_cond;
23164 false_cond = temp;
23166 temp = gen_reg_rtx (compare_mode);
23167 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23168 op0 = temp;
23169 break;
23171 default:
23172 gcc_unreachable ();
23175 emit_insn (gen_rtx_SET (dest,
23176 gen_rtx_IF_THEN_ELSE (result_mode,
23177 gen_rtx_GE (VOIDmode,
23178 op0, op1),
23179 true_cond, false_cond)));
23180 return 1;
23183 /* Same as above, but for ints (isel). */
23185 static int
23186 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23188 rtx condition_rtx, cr;
23189 machine_mode mode = GET_MODE (dest);
23190 enum rtx_code cond_code;
23191 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
23192 bool signedp;
23194 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
23195 return 0;
23197 /* We still have to do the compare, because isel doesn't do a
23198 compare, it just looks at the CRx bits set by a previous compare
23199 instruction. */
23200 condition_rtx = rs6000_generate_compare (op, mode);
23201 cond_code = GET_CODE (condition_rtx);
23202 cr = XEXP (condition_rtx, 0);
23203 signedp = GET_MODE (cr) == CCmode;
23205 isel_func = (mode == SImode
23206 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
23207 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
23209 switch (cond_code)
23211 case LT: case GT: case LTU: case GTU: case EQ:
23212 /* isel handles these directly. */
23213 break;
23215 default:
23216 /* We need to swap the sense of the comparison. */
23218 std::swap (false_cond, true_cond);
23219 PUT_CODE (condition_rtx, reverse_condition (cond_code));
23221 break;
23224 false_cond = force_reg (mode, false_cond);
23225 if (true_cond != const0_rtx)
23226 true_cond = force_reg (mode, true_cond);
23228 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
23230 return 1;
23233 const char *
23234 output_isel (rtx *operands)
23236 enum rtx_code code;
23238 code = GET_CODE (operands[1]);
23240 if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
23242 gcc_assert (GET_CODE (operands[2]) == REG
23243 && GET_CODE (operands[3]) == REG);
23244 PUT_CODE (operands[1], reverse_condition (code));
23245 return "isel %0,%3,%2,%j1";
23248 return "isel %0,%2,%3,%j1";
23251 void
23252 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
23254 machine_mode mode = GET_MODE (op0);
23255 enum rtx_code c;
23256 rtx target;
23258 /* VSX/altivec have direct min/max insns. */
23259 if ((code == SMAX || code == SMIN)
23260 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
23261 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
23263 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
23264 return;
23267 if (code == SMAX || code == SMIN)
23268 c = GE;
23269 else
23270 c = GEU;
23272 if (code == SMAX || code == UMAX)
23273 target = emit_conditional_move (dest, c, op0, op1, mode,
23274 op0, op1, mode, 0);
23275 else
23276 target = emit_conditional_move (dest, c, op0, op1, mode,
23277 op1, op0, mode, 0);
23278 gcc_assert (target);
23279 if (target != dest)
23280 emit_move_insn (dest, target);
23283 /* Split a signbit operation on 64-bit machines with direct move. Also allow
23284 for the value to come from memory or if it is already loaded into a GPR. */
23286 void
23287 rs6000_split_signbit (rtx dest, rtx src)
23289 machine_mode d_mode = GET_MODE (dest);
23290 machine_mode s_mode = GET_MODE (src);
23291 rtx dest_di = (d_mode == DImode) ? dest : gen_lowpart (DImode, dest);
23292 rtx shift_reg = dest_di;
23294 gcc_assert (FLOAT128_IEEE_P (s_mode) && TARGET_POWERPC64);
23296 if (MEM_P (src))
23298 rtx mem = (WORDS_BIG_ENDIAN
23299 ? adjust_address (src, DImode, 0)
23300 : adjust_address (src, DImode, 8));
23301 emit_insn (gen_rtx_SET (dest_di, mem));
23304 else
23306 unsigned int r = reg_or_subregno (src);
23308 if (INT_REGNO_P (r))
23309 shift_reg = gen_rtx_REG (DImode, r + (BYTES_BIG_ENDIAN == 0));
23311 else
23313 /* Generate the special mfvsrd instruction to get it in a GPR. */
23314 gcc_assert (VSX_REGNO_P (r));
23315 if (s_mode == KFmode)
23316 emit_insn (gen_signbitkf2_dm2 (dest_di, src));
23317 else
23318 emit_insn (gen_signbittf2_dm2 (dest_di, src));
23322 emit_insn (gen_lshrdi3 (dest_di, shift_reg, GEN_INT (63)));
23323 return;
23326 /* A subroutine of the atomic operation splitters. Jump to LABEL if
23327 COND is true. Mark the jump as unlikely to be taken. */
23329 static void
23330 emit_unlikely_jump (rtx cond, rtx label)
23332 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
23333 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
23334 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
23337 /* A subroutine of the atomic operation splitters. Emit a load-locked
23338 instruction in MODE. For QI/HImode, possibly use a pattern than includes
23339 the zero_extend operation. */
23341 static void
23342 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
23344 rtx (*fn) (rtx, rtx) = NULL;
23346 switch (mode)
23348 case E_QImode:
23349 fn = gen_load_lockedqi;
23350 break;
23351 case E_HImode:
23352 fn = gen_load_lockedhi;
23353 break;
23354 case E_SImode:
23355 if (GET_MODE (mem) == QImode)
23356 fn = gen_load_lockedqi_si;
23357 else if (GET_MODE (mem) == HImode)
23358 fn = gen_load_lockedhi_si;
23359 else
23360 fn = gen_load_lockedsi;
23361 break;
23362 case E_DImode:
23363 fn = gen_load_lockeddi;
23364 break;
23365 case E_TImode:
23366 fn = gen_load_lockedti;
23367 break;
23368 default:
23369 gcc_unreachable ();
23371 emit_insn (fn (reg, mem));
23374 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23375 instruction in MODE. */
23377 static void
23378 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
23380 rtx (*fn) (rtx, rtx, rtx) = NULL;
23382 switch (mode)
23384 case E_QImode:
23385 fn = gen_store_conditionalqi;
23386 break;
23387 case E_HImode:
23388 fn = gen_store_conditionalhi;
23389 break;
23390 case E_SImode:
23391 fn = gen_store_conditionalsi;
23392 break;
23393 case E_DImode:
23394 fn = gen_store_conditionaldi;
23395 break;
23396 case E_TImode:
23397 fn = gen_store_conditionalti;
23398 break;
23399 default:
23400 gcc_unreachable ();
23403 /* Emit sync before stwcx. to address PPC405 Erratum. */
23404 if (PPC405_ERRATUM77)
23405 emit_insn (gen_hwsync ());
23407 emit_insn (fn (res, mem, val));
23410 /* Expand barriers before and after a load_locked/store_cond sequence. */
23412 static rtx
23413 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
23415 rtx addr = XEXP (mem, 0);
23417 if (!legitimate_indirect_address_p (addr, reload_completed)
23418 && !legitimate_indexed_address_p (addr, reload_completed))
23420 addr = force_reg (Pmode, addr);
23421 mem = replace_equiv_address_nv (mem, addr);
23424 switch (model)
23426 case MEMMODEL_RELAXED:
23427 case MEMMODEL_CONSUME:
23428 case MEMMODEL_ACQUIRE:
23429 break;
23430 case MEMMODEL_RELEASE:
23431 case MEMMODEL_ACQ_REL:
23432 emit_insn (gen_lwsync ());
23433 break;
23434 case MEMMODEL_SEQ_CST:
23435 emit_insn (gen_hwsync ());
23436 break;
23437 default:
23438 gcc_unreachable ();
23440 return mem;
23443 static void
23444 rs6000_post_atomic_barrier (enum memmodel model)
23446 switch (model)
23448 case MEMMODEL_RELAXED:
23449 case MEMMODEL_CONSUME:
23450 case MEMMODEL_RELEASE:
23451 break;
23452 case MEMMODEL_ACQUIRE:
23453 case MEMMODEL_ACQ_REL:
23454 case MEMMODEL_SEQ_CST:
23455 emit_insn (gen_isync ());
23456 break;
23457 default:
23458 gcc_unreachable ();
23462 /* A subroutine of the various atomic expanders. For sub-word operations,
23463 we must adjust things to operate on SImode. Given the original MEM,
23464 return a new aligned memory. Also build and return the quantities by
23465 which to shift and mask. */
23467 static rtx
23468 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
23470 rtx addr, align, shift, mask, mem;
23471 HOST_WIDE_INT shift_mask;
23472 machine_mode mode = GET_MODE (orig_mem);
23474 /* For smaller modes, we have to implement this via SImode. */
23475 shift_mask = (mode == QImode ? 0x18 : 0x10);
23477 addr = XEXP (orig_mem, 0);
23478 addr = force_reg (GET_MODE (addr), addr);
23480 /* Aligned memory containing subword. Generate a new memory. We
23481 do not want any of the existing MEM_ATTR data, as we're now
23482 accessing memory outside the original object. */
23483 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
23484 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23485 mem = gen_rtx_MEM (SImode, align);
23486 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
23487 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
23488 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
23490 /* Shift amount for subword relative to aligned word. */
23491 shift = gen_reg_rtx (SImode);
23492 addr = gen_lowpart (SImode, addr);
23493 rtx tmp = gen_reg_rtx (SImode);
23494 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
23495 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
23496 if (BYTES_BIG_ENDIAN)
23497 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
23498 shift, 1, OPTAB_LIB_WIDEN);
23499 *pshift = shift;
23501 /* Mask for insertion. */
23502 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
23503 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
23504 *pmask = mask;
23506 return mem;
23509 /* A subroutine of the various atomic expanders. For sub-word operands,
23510 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23512 static rtx
23513 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
23515 rtx x;
23517 x = gen_reg_rtx (SImode);
23518 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
23519 gen_rtx_NOT (SImode, mask),
23520 oldval)));
23522 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
23524 return x;
23527 /* A subroutine of the various atomic expanders. For sub-word operands,
23528 extract WIDE to NARROW via SHIFT. */
23530 static void
23531 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23533 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23534 wide, 1, OPTAB_LIB_WIDEN);
23535 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23538 /* Expand an atomic compare and swap operation. */
23540 void
23541 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23543 rtx boolval, retval, mem, oldval, newval, cond;
23544 rtx label1, label2, x, mask, shift;
23545 machine_mode mode, orig_mode;
23546 enum memmodel mod_s, mod_f;
23547 bool is_weak;
23549 boolval = operands[0];
23550 retval = operands[1];
23551 mem = operands[2];
23552 oldval = operands[3];
23553 newval = operands[4];
23554 is_weak = (INTVAL (operands[5]) != 0);
23555 mod_s = memmodel_base (INTVAL (operands[6]));
23556 mod_f = memmodel_base (INTVAL (operands[7]));
23557 orig_mode = mode = GET_MODE (mem);
23559 mask = shift = NULL_RTX;
23560 if (mode == QImode || mode == HImode)
23562 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23563 lwarx and shift/mask operations. With power8, we need to do the
23564 comparison in SImode, but the store is still done in QI/HImode. */
23565 oldval = convert_modes (SImode, mode, oldval, 1);
23567 if (!TARGET_SYNC_HI_QI)
23569 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23571 /* Shift and mask OLDVAL into position with the word. */
23572 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23573 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23575 /* Shift and mask NEWVAL into position within the word. */
23576 newval = convert_modes (SImode, mode, newval, 1);
23577 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23578 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23581 /* Prepare to adjust the return value. */
23582 retval = gen_reg_rtx (SImode);
23583 mode = SImode;
23585 else if (reg_overlap_mentioned_p (retval, oldval))
23586 oldval = copy_to_reg (oldval);
23588 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23589 oldval = copy_to_mode_reg (mode, oldval);
23591 if (reg_overlap_mentioned_p (retval, newval))
23592 newval = copy_to_reg (newval);
23594 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23596 label1 = NULL_RTX;
23597 if (!is_weak)
23599 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23600 emit_label (XEXP (label1, 0));
23602 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23604 emit_load_locked (mode, retval, mem);
23606 x = retval;
23607 if (mask)
23608 x = expand_simple_binop (SImode, AND, retval, mask,
23609 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23611 cond = gen_reg_rtx (CCmode);
23612 /* If we have TImode, synthesize a comparison. */
23613 if (mode != TImode)
23614 x = gen_rtx_COMPARE (CCmode, x, oldval);
23615 else
23617 rtx xor1_result = gen_reg_rtx (DImode);
23618 rtx xor2_result = gen_reg_rtx (DImode);
23619 rtx or_result = gen_reg_rtx (DImode);
23620 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23621 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23622 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23623 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23625 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23626 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23627 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23628 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23631 emit_insn (gen_rtx_SET (cond, x));
23633 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23634 emit_unlikely_jump (x, label2);
23636 x = newval;
23637 if (mask)
23638 x = rs6000_mask_atomic_subword (retval, newval, mask);
23640 emit_store_conditional (orig_mode, cond, mem, x);
23642 if (!is_weak)
23644 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23645 emit_unlikely_jump (x, label1);
23648 if (!is_mm_relaxed (mod_f))
23649 emit_label (XEXP (label2, 0));
23651 rs6000_post_atomic_barrier (mod_s);
23653 if (is_mm_relaxed (mod_f))
23654 emit_label (XEXP (label2, 0));
23656 if (shift)
23657 rs6000_finish_atomic_subword (operands[1], retval, shift);
23658 else if (mode != GET_MODE (operands[1]))
23659 convert_move (operands[1], retval, 1);
23661 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23662 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23663 emit_insn (gen_rtx_SET (boolval, x));
23666 /* Expand an atomic exchange operation. */
23668 void
23669 rs6000_expand_atomic_exchange (rtx operands[])
23671 rtx retval, mem, val, cond;
23672 machine_mode mode;
23673 enum memmodel model;
23674 rtx label, x, mask, shift;
23676 retval = operands[0];
23677 mem = operands[1];
23678 val = operands[2];
23679 model = memmodel_base (INTVAL (operands[3]));
23680 mode = GET_MODE (mem);
23682 mask = shift = NULL_RTX;
23683 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23685 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23687 /* Shift and mask VAL into position with the word. */
23688 val = convert_modes (SImode, mode, val, 1);
23689 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23690 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23692 /* Prepare to adjust the return value. */
23693 retval = gen_reg_rtx (SImode);
23694 mode = SImode;
23697 mem = rs6000_pre_atomic_barrier (mem, model);
23699 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23700 emit_label (XEXP (label, 0));
23702 emit_load_locked (mode, retval, mem);
23704 x = val;
23705 if (mask)
23706 x = rs6000_mask_atomic_subword (retval, val, mask);
23708 cond = gen_reg_rtx (CCmode);
23709 emit_store_conditional (mode, cond, mem, x);
23711 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23712 emit_unlikely_jump (x, label);
23714 rs6000_post_atomic_barrier (model);
23716 if (shift)
23717 rs6000_finish_atomic_subword (operands[0], retval, shift);
23720 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23721 to perform. MEM is the memory on which to operate. VAL is the second
23722 operand of the binary operator. BEFORE and AFTER are optional locations to
23723 return the value of MEM either before of after the operation. MODEL_RTX
23724 is a CONST_INT containing the memory model to use. */
23726 void
23727 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23728 rtx orig_before, rtx orig_after, rtx model_rtx)
23730 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23731 machine_mode mode = GET_MODE (mem);
23732 machine_mode store_mode = mode;
23733 rtx label, x, cond, mask, shift;
23734 rtx before = orig_before, after = orig_after;
23736 mask = shift = NULL_RTX;
23737 /* On power8, we want to use SImode for the operation. On previous systems,
23738 use the operation in a subword and shift/mask to get the proper byte or
23739 halfword. */
23740 if (mode == QImode || mode == HImode)
23742 if (TARGET_SYNC_HI_QI)
23744 val = convert_modes (SImode, mode, val, 1);
23746 /* Prepare to adjust the return value. */
23747 before = gen_reg_rtx (SImode);
23748 if (after)
23749 after = gen_reg_rtx (SImode);
23750 mode = SImode;
23752 else
23754 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23756 /* Shift and mask VAL into position with the word. */
23757 val = convert_modes (SImode, mode, val, 1);
23758 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23759 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23761 switch (code)
23763 case IOR:
23764 case XOR:
23765 /* We've already zero-extended VAL. That is sufficient to
23766 make certain that it does not affect other bits. */
23767 mask = NULL;
23768 break;
23770 case AND:
23771 /* If we make certain that all of the other bits in VAL are
23772 set, that will be sufficient to not affect other bits. */
23773 x = gen_rtx_NOT (SImode, mask);
23774 x = gen_rtx_IOR (SImode, x, val);
23775 emit_insn (gen_rtx_SET (val, x));
23776 mask = NULL;
23777 break;
23779 case NOT:
23780 case PLUS:
23781 case MINUS:
23782 /* These will all affect bits outside the field and need
23783 adjustment via MASK within the loop. */
23784 break;
23786 default:
23787 gcc_unreachable ();
23790 /* Prepare to adjust the return value. */
23791 before = gen_reg_rtx (SImode);
23792 if (after)
23793 after = gen_reg_rtx (SImode);
23794 store_mode = mode = SImode;
23798 mem = rs6000_pre_atomic_barrier (mem, model);
23800 label = gen_label_rtx ();
23801 emit_label (label);
23802 label = gen_rtx_LABEL_REF (VOIDmode, label);
23804 if (before == NULL_RTX)
23805 before = gen_reg_rtx (mode);
23807 emit_load_locked (mode, before, mem);
23809 if (code == NOT)
23811 x = expand_simple_binop (mode, AND, before, val,
23812 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23813 after = expand_simple_unop (mode, NOT, x, after, 1);
23815 else
23817 after = expand_simple_binop (mode, code, before, val,
23818 after, 1, OPTAB_LIB_WIDEN);
23821 x = after;
23822 if (mask)
23824 x = expand_simple_binop (SImode, AND, after, mask,
23825 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23826 x = rs6000_mask_atomic_subword (before, x, mask);
23828 else if (store_mode != mode)
23829 x = convert_modes (store_mode, mode, x, 1);
23831 cond = gen_reg_rtx (CCmode);
23832 emit_store_conditional (store_mode, cond, mem, x);
23834 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23835 emit_unlikely_jump (x, label);
23837 rs6000_post_atomic_barrier (model);
23839 if (shift)
23841 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23842 then do the calcuations in a SImode register. */
23843 if (orig_before)
23844 rs6000_finish_atomic_subword (orig_before, before, shift);
23845 if (orig_after)
23846 rs6000_finish_atomic_subword (orig_after, after, shift);
23848 else if (store_mode != mode)
23850 /* QImode/HImode on machines with lbarx/lharx where we do the native
23851 operation and then do the calcuations in a SImode register. */
23852 if (orig_before)
23853 convert_move (orig_before, before, 1);
23854 if (orig_after)
23855 convert_move (orig_after, after, 1);
23857 else if (orig_after && after != orig_after)
23858 emit_move_insn (orig_after, after);
23861 /* Emit instructions to move SRC to DST. Called by splitters for
23862 multi-register moves. It will emit at most one instruction for
23863 each register that is accessed; that is, it won't emit li/lis pairs
23864 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23865 register. */
23867 void
23868 rs6000_split_multireg_move (rtx dst, rtx src)
23870 /* The register number of the first register being moved. */
23871 int reg;
23872 /* The mode that is to be moved. */
23873 machine_mode mode;
23874 /* The mode that the move is being done in, and its size. */
23875 machine_mode reg_mode;
23876 int reg_mode_size;
23877 /* The number of registers that will be moved. */
23878 int nregs;
23880 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23881 mode = GET_MODE (dst);
23882 nregs = hard_regno_nregs (reg, mode);
23883 if (FP_REGNO_P (reg))
23884 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23885 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
23886 else if (ALTIVEC_REGNO_P (reg))
23887 reg_mode = V16QImode;
23888 else
23889 reg_mode = word_mode;
23890 reg_mode_size = GET_MODE_SIZE (reg_mode);
23892 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23894 /* TDmode residing in FP registers is special, since the ISA requires that
23895 the lower-numbered word of a register pair is always the most significant
23896 word, even in little-endian mode. This does not match the usual subreg
23897 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23898 the appropriate constituent registers "by hand" in little-endian mode.
23900 Note we do not need to check for destructive overlap here since TDmode
23901 can only reside in even/odd register pairs. */
23902 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23904 rtx p_src, p_dst;
23905 int i;
23907 for (i = 0; i < nregs; i++)
23909 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23910 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23911 else
23912 p_src = simplify_gen_subreg (reg_mode, src, mode,
23913 i * reg_mode_size);
23915 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23916 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23917 else
23918 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23919 i * reg_mode_size);
23921 emit_insn (gen_rtx_SET (p_dst, p_src));
23924 return;
23927 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23929 /* Move register range backwards, if we might have destructive
23930 overlap. */
23931 int i;
23932 for (i = nregs - 1; i >= 0; i--)
23933 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23934 i * reg_mode_size),
23935 simplify_gen_subreg (reg_mode, src, mode,
23936 i * reg_mode_size)));
23938 else
23940 int i;
23941 int j = -1;
23942 bool used_update = false;
23943 rtx restore_basereg = NULL_RTX;
23945 if (MEM_P (src) && INT_REGNO_P (reg))
23947 rtx breg;
23949 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23950 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23952 rtx delta_rtx;
23953 breg = XEXP (XEXP (src, 0), 0);
23954 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23955 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23956 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23957 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23958 src = replace_equiv_address (src, breg);
23960 else if (! rs6000_offsettable_memref_p (src, reg_mode))
23962 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
23964 rtx basereg = XEXP (XEXP (src, 0), 0);
23965 if (TARGET_UPDATE)
23967 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
23968 emit_insn (gen_rtx_SET (ndst,
23969 gen_rtx_MEM (reg_mode,
23970 XEXP (src, 0))));
23971 used_update = true;
23973 else
23974 emit_insn (gen_rtx_SET (basereg,
23975 XEXP (XEXP (src, 0), 1)));
23976 src = replace_equiv_address (src, basereg);
23978 else
23980 rtx basereg = gen_rtx_REG (Pmode, reg);
23981 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
23982 src = replace_equiv_address (src, basereg);
23986 breg = XEXP (src, 0);
23987 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
23988 breg = XEXP (breg, 0);
23990 /* If the base register we are using to address memory is
23991 also a destination reg, then change that register last. */
23992 if (REG_P (breg)
23993 && REGNO (breg) >= REGNO (dst)
23994 && REGNO (breg) < REGNO (dst) + nregs)
23995 j = REGNO (breg) - REGNO (dst);
23997 else if (MEM_P (dst) && INT_REGNO_P (reg))
23999 rtx breg;
24001 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
24002 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
24004 rtx delta_rtx;
24005 breg = XEXP (XEXP (dst, 0), 0);
24006 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
24007 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
24008 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
24010 /* We have to update the breg before doing the store.
24011 Use store with update, if available. */
24013 if (TARGET_UPDATE)
24015 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
24016 emit_insn (TARGET_32BIT
24017 ? (TARGET_POWERPC64
24018 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
24019 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
24020 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
24021 used_update = true;
24023 else
24024 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
24025 dst = replace_equiv_address (dst, breg);
24027 else if (!rs6000_offsettable_memref_p (dst, reg_mode)
24028 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
24030 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
24032 rtx basereg = XEXP (XEXP (dst, 0), 0);
24033 if (TARGET_UPDATE)
24035 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
24036 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
24037 XEXP (dst, 0)),
24038 nsrc));
24039 used_update = true;
24041 else
24042 emit_insn (gen_rtx_SET (basereg,
24043 XEXP (XEXP (dst, 0), 1)));
24044 dst = replace_equiv_address (dst, basereg);
24046 else
24048 rtx basereg = XEXP (XEXP (dst, 0), 0);
24049 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
24050 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
24051 && REG_P (basereg)
24052 && REG_P (offsetreg)
24053 && REGNO (basereg) != REGNO (offsetreg));
24054 if (REGNO (basereg) == 0)
24056 rtx tmp = offsetreg;
24057 offsetreg = basereg;
24058 basereg = tmp;
24060 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
24061 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
24062 dst = replace_equiv_address (dst, basereg);
24065 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
24066 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode));
24069 for (i = 0; i < nregs; i++)
24071 /* Calculate index to next subword. */
24072 ++j;
24073 if (j == nregs)
24074 j = 0;
24076 /* If compiler already emitted move of first word by
24077 store with update, no need to do anything. */
24078 if (j == 0 && used_update)
24079 continue;
24081 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
24082 j * reg_mode_size),
24083 simplify_gen_subreg (reg_mode, src, mode,
24084 j * reg_mode_size)));
24086 if (restore_basereg != NULL_RTX)
24087 emit_insn (restore_basereg);
24092 /* This page contains routines that are used to determine what the
24093 function prologue and epilogue code will do and write them out. */
24095 /* Determine whether the REG is really used. */
24097 static bool
24098 save_reg_p (int reg)
24100 /* We need to mark the PIC offset register live for the same conditions
24101 as it is set up, or otherwise it won't be saved before we clobber it. */
24103 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
24105 /* When calling eh_return, we must return true for all the cases
24106 where conditional_register_usage marks the PIC offset reg
24107 call used. */
24108 if (TARGET_TOC && TARGET_MINIMAL_TOC
24109 && (crtl->calls_eh_return
24110 || df_regs_ever_live_p (reg)
24111 || !constant_pool_empty_p ()))
24112 return true;
24114 if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
24115 && flag_pic)
24116 return true;
24119 return !call_used_regs[reg] && df_regs_ever_live_p (reg);
24122 /* Return the first fixed-point register that is required to be
24123 saved. 32 if none. */
24126 first_reg_to_save (void)
24128 int first_reg;
24130 /* Find lowest numbered live register. */
24131 for (first_reg = 13; first_reg <= 31; first_reg++)
24132 if (save_reg_p (first_reg))
24133 break;
24135 #if TARGET_MACHO
24136 if (flag_pic
24137 && crtl->uses_pic_offset_table
24138 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
24139 return RS6000_PIC_OFFSET_TABLE_REGNUM;
24140 #endif
24142 return first_reg;
24145 /* Similar, for FP regs. */
24148 first_fp_reg_to_save (void)
24150 int first_reg;
24152 /* Find lowest numbered live register. */
24153 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
24154 if (save_reg_p (first_reg))
24155 break;
24157 return first_reg;
24160 /* Similar, for AltiVec regs. */
24162 static int
24163 first_altivec_reg_to_save (void)
24165 int i;
24167 /* Stack frame remains as is unless we are in AltiVec ABI. */
24168 if (! TARGET_ALTIVEC_ABI)
24169 return LAST_ALTIVEC_REGNO + 1;
24171 /* On Darwin, the unwind routines are compiled without
24172 TARGET_ALTIVEC, and use save_world to save/restore the
24173 altivec registers when necessary. */
24174 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24175 && ! TARGET_ALTIVEC)
24176 return FIRST_ALTIVEC_REGNO + 20;
24178 /* Find lowest numbered live register. */
24179 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
24180 if (save_reg_p (i))
24181 break;
24183 return i;
24186 /* Return a 32-bit mask of the AltiVec registers we need to set in
24187 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
24188 the 32-bit word is 0. */
24190 static unsigned int
24191 compute_vrsave_mask (void)
24193 unsigned int i, mask = 0;
24195 /* On Darwin, the unwind routines are compiled without
24196 TARGET_ALTIVEC, and use save_world to save/restore the
24197 call-saved altivec registers when necessary. */
24198 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24199 && ! TARGET_ALTIVEC)
24200 mask |= 0xFFF;
24202 /* First, find out if we use _any_ altivec registers. */
24203 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
24204 if (df_regs_ever_live_p (i))
24205 mask |= ALTIVEC_REG_BIT (i);
24207 if (mask == 0)
24208 return mask;
24210 /* Next, remove the argument registers from the set. These must
24211 be in the VRSAVE mask set by the caller, so we don't need to add
24212 them in again. More importantly, the mask we compute here is
24213 used to generate CLOBBERs in the set_vrsave insn, and we do not
24214 wish the argument registers to die. */
24215 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
24216 mask &= ~ALTIVEC_REG_BIT (i);
24218 /* Similarly, remove the return value from the set. */
24220 bool yes = false;
24221 diddle_return_value (is_altivec_return_reg, &yes);
24222 if (yes)
24223 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
24226 return mask;
24229 /* For a very restricted set of circumstances, we can cut down the
24230 size of prologues/epilogues by calling our own save/restore-the-world
24231 routines. */
24233 static void
24234 compute_save_world_info (rs6000_stack_t *info)
24236 info->world_save_p = 1;
24237 info->world_save_p
24238 = (WORLD_SAVE_P (info)
24239 && DEFAULT_ABI == ABI_DARWIN
24240 && !cfun->has_nonlocal_label
24241 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
24242 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
24243 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
24244 && info->cr_save_p);
24246 /* This will not work in conjunction with sibcalls. Make sure there
24247 are none. (This check is expensive, but seldom executed.) */
24248 if (WORLD_SAVE_P (info))
24250 rtx_insn *insn;
24251 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
24252 if (CALL_P (insn) && SIBLING_CALL_P (insn))
24254 info->world_save_p = 0;
24255 break;
24259 if (WORLD_SAVE_P (info))
24261 /* Even if we're not touching VRsave, make sure there's room on the
24262 stack for it, if it looks like we're calling SAVE_WORLD, which
24263 will attempt to save it. */
24264 info->vrsave_size = 4;
24266 /* If we are going to save the world, we need to save the link register too. */
24267 info->lr_save_p = 1;
24269 /* "Save" the VRsave register too if we're saving the world. */
24270 if (info->vrsave_mask == 0)
24271 info->vrsave_mask = compute_vrsave_mask ();
24273 /* Because the Darwin register save/restore routines only handle
24274 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
24275 check. */
24276 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
24277 && (info->first_altivec_reg_save
24278 >= FIRST_SAVED_ALTIVEC_REGNO));
24281 return;
24285 static void
24286 is_altivec_return_reg (rtx reg, void *xyes)
24288 bool *yes = (bool *) xyes;
24289 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
24290 *yes = true;
24294 /* Return whether REG is a global user reg or has been specifed by
24295 -ffixed-REG. We should not restore these, and so cannot use
24296 lmw or out-of-line restore functions if there are any. We also
24297 can't save them (well, emit frame notes for them), because frame
24298 unwinding during exception handling will restore saved registers. */
24300 static bool
24301 fixed_reg_p (int reg)
24303 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
24304 backend sets it, overriding anything the user might have given. */
24305 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
24306 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
24307 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
24308 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
24309 return false;
24311 return fixed_regs[reg];
24314 /* Determine the strategy for savings/restoring registers. */
24316 enum {
24317 SAVE_MULTIPLE = 0x1,
24318 SAVE_INLINE_GPRS = 0x2,
24319 SAVE_INLINE_FPRS = 0x4,
24320 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
24321 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
24322 SAVE_INLINE_VRS = 0x20,
24323 REST_MULTIPLE = 0x100,
24324 REST_INLINE_GPRS = 0x200,
24325 REST_INLINE_FPRS = 0x400,
24326 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
24327 REST_INLINE_VRS = 0x1000
24330 static int
24331 rs6000_savres_strategy (rs6000_stack_t *info,
24332 bool using_static_chain_p)
24334 int strategy = 0;
24336 /* Select between in-line and out-of-line save and restore of regs.
24337 First, all the obvious cases where we don't use out-of-line. */
24338 if (crtl->calls_eh_return
24339 || cfun->machine->ra_need_lr)
24340 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
24341 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
24342 | SAVE_INLINE_VRS | REST_INLINE_VRS);
24344 if (info->first_gp_reg_save == 32)
24345 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24347 if (info->first_fp_reg_save == 64
24348 /* The out-of-line FP routines use double-precision stores;
24349 we can't use those routines if we don't have such stores. */
24350 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT))
24351 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24353 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
24354 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24356 /* Define cutoff for using out-of-line functions to save registers. */
24357 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
24359 if (!optimize_size)
24361 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24362 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24363 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24365 else
24367 /* Prefer out-of-line restore if it will exit. */
24368 if (info->first_fp_reg_save > 61)
24369 strategy |= SAVE_INLINE_FPRS;
24370 if (info->first_gp_reg_save > 29)
24372 if (info->first_fp_reg_save == 64)
24373 strategy |= SAVE_INLINE_GPRS;
24374 else
24375 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24377 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
24378 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24381 else if (DEFAULT_ABI == ABI_DARWIN)
24383 if (info->first_fp_reg_save > 60)
24384 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24385 if (info->first_gp_reg_save > 29)
24386 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24387 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24389 else
24391 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24392 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
24393 || info->first_fp_reg_save > 61)
24394 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24395 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24396 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24399 /* Don't bother to try to save things out-of-line if r11 is occupied
24400 by the static chain. It would require too much fiddling and the
24401 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24402 pointer on Darwin, and AIX uses r1 or r12. */
24403 if (using_static_chain_p
24404 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
24405 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
24406 | SAVE_INLINE_GPRS
24407 | SAVE_INLINE_VRS);
24409 /* Don't ever restore fixed regs. That means we can't use the
24410 out-of-line register restore functions if a fixed reg is in the
24411 range of regs restored. */
24412 if (!(strategy & REST_INLINE_FPRS))
24413 for (int i = info->first_fp_reg_save; i < 64; i++)
24414 if (fixed_regs[i])
24416 strategy |= REST_INLINE_FPRS;
24417 break;
24420 /* We can only use the out-of-line routines to restore fprs if we've
24421 saved all the registers from first_fp_reg_save in the prologue.
24422 Otherwise, we risk loading garbage. Of course, if we have saved
24423 out-of-line then we know we haven't skipped any fprs. */
24424 if ((strategy & SAVE_INLINE_FPRS)
24425 && !(strategy & REST_INLINE_FPRS))
24426 for (int i = info->first_fp_reg_save; i < 64; i++)
24427 if (!save_reg_p (i))
24429 strategy |= REST_INLINE_FPRS;
24430 break;
24433 /* Similarly, for altivec regs. */
24434 if (!(strategy & REST_INLINE_VRS))
24435 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24436 if (fixed_regs[i])
24438 strategy |= REST_INLINE_VRS;
24439 break;
24442 if ((strategy & SAVE_INLINE_VRS)
24443 && !(strategy & REST_INLINE_VRS))
24444 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24445 if (!save_reg_p (i))
24447 strategy |= REST_INLINE_VRS;
24448 break;
24451 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24452 saved is an out-of-line save or restore. Set up the value for
24453 the next test (excluding out-of-line gprs). */
24454 bool lr_save_p = (info->lr_save_p
24455 || !(strategy & SAVE_INLINE_FPRS)
24456 || !(strategy & SAVE_INLINE_VRS)
24457 || !(strategy & REST_INLINE_FPRS)
24458 || !(strategy & REST_INLINE_VRS));
24460 if (TARGET_MULTIPLE
24461 && !TARGET_POWERPC64
24462 && info->first_gp_reg_save < 31
24463 && !(flag_shrink_wrap
24464 && flag_shrink_wrap_separate
24465 && optimize_function_for_speed_p (cfun)))
24467 int count = 0;
24468 for (int i = info->first_gp_reg_save; i < 32; i++)
24469 if (save_reg_p (i))
24470 count++;
24472 if (count <= 1)
24473 /* Don't use store multiple if only one reg needs to be
24474 saved. This can occur for example when the ABI_V4 pic reg
24475 (r30) needs to be saved to make calls, but r31 is not
24476 used. */
24477 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24478 else
24480 /* Prefer store multiple for saves over out-of-line
24481 routines, since the store-multiple instruction will
24482 always be smaller. */
24483 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
24485 /* The situation is more complicated with load multiple.
24486 We'd prefer to use the out-of-line routines for restores,
24487 since the "exit" out-of-line routines can handle the
24488 restore of LR and the frame teardown. However if doesn't
24489 make sense to use the out-of-line routine if that is the
24490 only reason we'd need to save LR, and we can't use the
24491 "exit" out-of-line gpr restore if we have saved some
24492 fprs; In those cases it is advantageous to use load
24493 multiple when available. */
24494 if (info->first_fp_reg_save != 64 || !lr_save_p)
24495 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
24499 /* Using the "exit" out-of-line routine does not improve code size
24500 if using it would require lr to be saved and if only saving one
24501 or two gprs. */
24502 else if (!lr_save_p && info->first_gp_reg_save > 29)
24503 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24505 /* Don't ever restore fixed regs. */
24506 if ((strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24507 for (int i = info->first_gp_reg_save; i < 32; i++)
24508 if (fixed_reg_p (i))
24510 strategy |= REST_INLINE_GPRS;
24511 strategy &= ~REST_MULTIPLE;
24512 break;
24515 /* We can only use load multiple or the out-of-line routines to
24516 restore gprs if we've saved all the registers from
24517 first_gp_reg_save. Otherwise, we risk loading garbage.
24518 Of course, if we have saved out-of-line or used stmw then we know
24519 we haven't skipped any gprs. */
24520 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
24521 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24522 for (int i = info->first_gp_reg_save; i < 32; i++)
24523 if (!save_reg_p (i))
24525 strategy |= REST_INLINE_GPRS;
24526 strategy &= ~REST_MULTIPLE;
24527 break;
24530 if (TARGET_ELF && TARGET_64BIT)
24532 if (!(strategy & SAVE_INLINE_FPRS))
24533 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24534 else if (!(strategy & SAVE_INLINE_GPRS)
24535 && info->first_fp_reg_save == 64)
24536 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
24538 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
24539 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
24541 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
24542 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24544 return strategy;
24547 /* Calculate the stack information for the current function. This is
24548 complicated by having two separate calling sequences, the AIX calling
24549 sequence and the V.4 calling sequence.
24551 AIX (and Darwin/Mac OS X) stack frames look like:
24552 32-bit 64-bit
24553 SP----> +---------------------------------------+
24554 | back chain to caller | 0 0
24555 +---------------------------------------+
24556 | saved CR | 4 8 (8-11)
24557 +---------------------------------------+
24558 | saved LR | 8 16
24559 +---------------------------------------+
24560 | reserved for compilers | 12 24
24561 +---------------------------------------+
24562 | reserved for binders | 16 32
24563 +---------------------------------------+
24564 | saved TOC pointer | 20 40
24565 +---------------------------------------+
24566 | Parameter save area (+padding*) (P) | 24 48
24567 +---------------------------------------+
24568 | Alloca space (A) | 24+P etc.
24569 +---------------------------------------+
24570 | Local variable space (L) | 24+P+A
24571 +---------------------------------------+
24572 | Float/int conversion temporary (X) | 24+P+A+L
24573 +---------------------------------------+
24574 | Save area for AltiVec registers (W) | 24+P+A+L+X
24575 +---------------------------------------+
24576 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24577 +---------------------------------------+
24578 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24579 +---------------------------------------+
24580 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24581 +---------------------------------------+
24582 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24583 +---------------------------------------+
24584 old SP->| back chain to caller's caller |
24585 +---------------------------------------+
24587 * If the alloca area is present, the parameter save area is
24588 padded so that the former starts 16-byte aligned.
24590 The required alignment for AIX configurations is two words (i.e., 8
24591 or 16 bytes).
24593 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24595 SP----> +---------------------------------------+
24596 | Back chain to caller | 0
24597 +---------------------------------------+
24598 | Save area for CR | 8
24599 +---------------------------------------+
24600 | Saved LR | 16
24601 +---------------------------------------+
24602 | Saved TOC pointer | 24
24603 +---------------------------------------+
24604 | Parameter save area (+padding*) (P) | 32
24605 +---------------------------------------+
24606 | Alloca space (A) | 32+P
24607 +---------------------------------------+
24608 | Local variable space (L) | 32+P+A
24609 +---------------------------------------+
24610 | Save area for AltiVec registers (W) | 32+P+A+L
24611 +---------------------------------------+
24612 | AltiVec alignment padding (Y) | 32+P+A+L+W
24613 +---------------------------------------+
24614 | Save area for GP registers (G) | 32+P+A+L+W+Y
24615 +---------------------------------------+
24616 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24617 +---------------------------------------+
24618 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24619 +---------------------------------------+
24621 * If the alloca area is present, the parameter save area is
24622 padded so that the former starts 16-byte aligned.
24624 V.4 stack frames look like:
24626 SP----> +---------------------------------------+
24627 | back chain to caller | 0
24628 +---------------------------------------+
24629 | caller's saved LR | 4
24630 +---------------------------------------+
24631 | Parameter save area (+padding*) (P) | 8
24632 +---------------------------------------+
24633 | Alloca space (A) | 8+P
24634 +---------------------------------------+
24635 | Varargs save area (V) | 8+P+A
24636 +---------------------------------------+
24637 | Local variable space (L) | 8+P+A+V
24638 +---------------------------------------+
24639 | Float/int conversion temporary (X) | 8+P+A+V+L
24640 +---------------------------------------+
24641 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24642 +---------------------------------------+
24643 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24644 +---------------------------------------+
24645 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24646 +---------------------------------------+
24647 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24648 +---------------------------------------+
24649 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24650 +---------------------------------------+
24651 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24652 +---------------------------------------+
24653 old SP->| back chain to caller's caller |
24654 +---------------------------------------+
24656 * If the alloca area is present and the required alignment is
24657 16 bytes, the parameter save area is padded so that the
24658 alloca area starts 16-byte aligned.
24660 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24661 given. (But note below and in sysv4.h that we require only 8 and
24662 may round up the size of our stack frame anyways. The historical
24663 reason is early versions of powerpc-linux which didn't properly
24664 align the stack at program startup. A happy side-effect is that
24665 -mno-eabi libraries can be used with -meabi programs.)
24667 The EABI configuration defaults to the V.4 layout. However,
24668 the stack alignment requirements may differ. If -mno-eabi is not
24669 given, the required stack alignment is 8 bytes; if -mno-eabi is
24670 given, the required alignment is 16 bytes. (But see V.4 comment
24671 above.) */
24673 #ifndef ABI_STACK_BOUNDARY
24674 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24675 #endif
24677 static rs6000_stack_t *
24678 rs6000_stack_info (void)
24680 /* We should never be called for thunks, we are not set up for that. */
24681 gcc_assert (!cfun->is_thunk);
24683 rs6000_stack_t *info = &stack_info;
24684 int reg_size = TARGET_32BIT ? 4 : 8;
24685 int ehrd_size;
24686 int ehcr_size;
24687 int save_align;
24688 int first_gp;
24689 HOST_WIDE_INT non_fixed_size;
24690 bool using_static_chain_p;
24692 if (reload_completed && info->reload_completed)
24693 return info;
24695 memset (info, 0, sizeof (*info));
24696 info->reload_completed = reload_completed;
24698 /* Select which calling sequence. */
24699 info->abi = DEFAULT_ABI;
24701 /* Calculate which registers need to be saved & save area size. */
24702 info->first_gp_reg_save = first_reg_to_save ();
24703 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24704 even if it currently looks like we won't. Reload may need it to
24705 get at a constant; if so, it will have already created a constant
24706 pool entry for it. */
24707 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
24708 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
24709 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
24710 && crtl->uses_const_pool
24711 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
24712 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
24713 else
24714 first_gp = info->first_gp_reg_save;
24716 info->gp_size = reg_size * (32 - first_gp);
24718 info->first_fp_reg_save = first_fp_reg_to_save ();
24719 info->fp_size = 8 * (64 - info->first_fp_reg_save);
24721 info->first_altivec_reg_save = first_altivec_reg_to_save ();
24722 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
24723 - info->first_altivec_reg_save);
24725 /* Does this function call anything? */
24726 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
24728 /* Determine if we need to save the condition code registers. */
24729 if (save_reg_p (CR2_REGNO)
24730 || save_reg_p (CR3_REGNO)
24731 || save_reg_p (CR4_REGNO))
24733 info->cr_save_p = 1;
24734 if (DEFAULT_ABI == ABI_V4)
24735 info->cr_size = reg_size;
24738 /* If the current function calls __builtin_eh_return, then we need
24739 to allocate stack space for registers that will hold data for
24740 the exception handler. */
24741 if (crtl->calls_eh_return)
24743 unsigned int i;
24744 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
24745 continue;
24747 ehrd_size = i * UNITS_PER_WORD;
24749 else
24750 ehrd_size = 0;
24752 /* In the ELFv2 ABI, we also need to allocate space for separate
24753 CR field save areas if the function calls __builtin_eh_return. */
24754 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24756 /* This hard-codes that we have three call-saved CR fields. */
24757 ehcr_size = 3 * reg_size;
24758 /* We do *not* use the regular CR save mechanism. */
24759 info->cr_save_p = 0;
24761 else
24762 ehcr_size = 0;
24764 /* Determine various sizes. */
24765 info->reg_size = reg_size;
24766 info->fixed_size = RS6000_SAVE_AREA;
24767 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24768 if (cfun->calls_alloca)
24769 info->parm_size =
24770 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
24771 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
24772 else
24773 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24774 TARGET_ALTIVEC ? 16 : 8);
24775 if (FRAME_GROWS_DOWNWARD)
24776 info->vars_size
24777 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24778 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24779 - (info->fixed_size + info->vars_size + info->parm_size);
24781 if (TARGET_ALTIVEC_ABI)
24782 info->vrsave_mask = compute_vrsave_mask ();
24784 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24785 info->vrsave_size = 4;
24787 compute_save_world_info (info);
24789 /* Calculate the offsets. */
24790 switch (DEFAULT_ABI)
24792 case ABI_NONE:
24793 default:
24794 gcc_unreachable ();
24796 case ABI_AIX:
24797 case ABI_ELFv2:
24798 case ABI_DARWIN:
24799 info->fp_save_offset = -info->fp_size;
24800 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24802 if (TARGET_ALTIVEC_ABI)
24804 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24806 /* Align stack so vector save area is on a quadword boundary.
24807 The padding goes above the vectors. */
24808 if (info->altivec_size != 0)
24809 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24811 info->altivec_save_offset = info->vrsave_save_offset
24812 - info->altivec_padding_size
24813 - info->altivec_size;
24814 gcc_assert (info->altivec_size == 0
24815 || info->altivec_save_offset % 16 == 0);
24817 /* Adjust for AltiVec case. */
24818 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24820 else
24821 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24823 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24824 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24825 info->lr_save_offset = 2*reg_size;
24826 break;
24828 case ABI_V4:
24829 info->fp_save_offset = -info->fp_size;
24830 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24831 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24833 if (TARGET_ALTIVEC_ABI)
24835 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24837 /* Align stack so vector save area is on a quadword boundary. */
24838 if (info->altivec_size != 0)
24839 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24841 info->altivec_save_offset = info->vrsave_save_offset
24842 - info->altivec_padding_size
24843 - info->altivec_size;
24845 /* Adjust for AltiVec case. */
24846 info->ehrd_offset = info->altivec_save_offset;
24848 else
24849 info->ehrd_offset = info->cr_save_offset;
24851 info->ehrd_offset -= ehrd_size;
24852 info->lr_save_offset = reg_size;
24855 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24856 info->save_size = RS6000_ALIGN (info->fp_size
24857 + info->gp_size
24858 + info->altivec_size
24859 + info->altivec_padding_size
24860 + ehrd_size
24861 + ehcr_size
24862 + info->cr_size
24863 + info->vrsave_size,
24864 save_align);
24866 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24868 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24869 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24871 /* Determine if we need to save the link register. */
24872 if (info->calls_p
24873 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24874 && crtl->profile
24875 && !TARGET_PROFILE_KERNEL)
24876 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
24877 #ifdef TARGET_RELOCATABLE
24878 || (DEFAULT_ABI == ABI_V4
24879 && (TARGET_RELOCATABLE || flag_pic > 1)
24880 && !constant_pool_empty_p ())
24881 #endif
24882 || rs6000_ra_ever_killed ())
24883 info->lr_save_p = 1;
24885 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
24886 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
24887 && call_used_regs[STATIC_CHAIN_REGNUM]);
24888 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
24890 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
24891 || !(info->savres_strategy & SAVE_INLINE_FPRS)
24892 || !(info->savres_strategy & SAVE_INLINE_VRS)
24893 || !(info->savres_strategy & REST_INLINE_GPRS)
24894 || !(info->savres_strategy & REST_INLINE_FPRS)
24895 || !(info->savres_strategy & REST_INLINE_VRS))
24896 info->lr_save_p = 1;
24898 if (info->lr_save_p)
24899 df_set_regs_ever_live (LR_REGNO, true);
24901 /* Determine if we need to allocate any stack frame:
24903 For AIX we need to push the stack if a frame pointer is needed
24904 (because the stack might be dynamically adjusted), if we are
24905 debugging, if we make calls, or if the sum of fp_save, gp_save,
24906 and local variables are more than the space needed to save all
24907 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24908 + 18*8 = 288 (GPR13 reserved).
24910 For V.4 we don't have the stack cushion that AIX uses, but assume
24911 that the debugger can handle stackless frames. */
24913 if (info->calls_p)
24914 info->push_p = 1;
24916 else if (DEFAULT_ABI == ABI_V4)
24917 info->push_p = non_fixed_size != 0;
24919 else if (frame_pointer_needed)
24920 info->push_p = 1;
24922 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
24923 info->push_p = 1;
24925 else
24926 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
24928 return info;
24931 static void
24932 debug_stack_info (rs6000_stack_t *info)
24934 const char *abi_string;
24936 if (! info)
24937 info = rs6000_stack_info ();
24939 fprintf (stderr, "\nStack information for function %s:\n",
24940 ((current_function_decl && DECL_NAME (current_function_decl))
24941 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
24942 : "<unknown>"));
24944 switch (info->abi)
24946 default: abi_string = "Unknown"; break;
24947 case ABI_NONE: abi_string = "NONE"; break;
24948 case ABI_AIX: abi_string = "AIX"; break;
24949 case ABI_ELFv2: abi_string = "ELFv2"; break;
24950 case ABI_DARWIN: abi_string = "Darwin"; break;
24951 case ABI_V4: abi_string = "V.4"; break;
24954 fprintf (stderr, "\tABI = %5s\n", abi_string);
24956 if (TARGET_ALTIVEC_ABI)
24957 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
24959 if (info->first_gp_reg_save != 32)
24960 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
24962 if (info->first_fp_reg_save != 64)
24963 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
24965 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
24966 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
24967 info->first_altivec_reg_save);
24969 if (info->lr_save_p)
24970 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
24972 if (info->cr_save_p)
24973 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
24975 if (info->vrsave_mask)
24976 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
24978 if (info->push_p)
24979 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
24981 if (info->calls_p)
24982 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
24984 if (info->gp_size)
24985 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
24987 if (info->fp_size)
24988 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
24990 if (info->altivec_size)
24991 fprintf (stderr, "\taltivec_save_offset = %5d\n",
24992 info->altivec_save_offset);
24994 if (info->vrsave_size)
24995 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
24996 info->vrsave_save_offset);
24998 if (info->lr_save_p)
24999 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
25001 if (info->cr_save_p)
25002 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
25004 if (info->varargs_save_offset)
25005 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
25007 if (info->total_size)
25008 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
25009 info->total_size);
25011 if (info->vars_size)
25012 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
25013 info->vars_size);
25015 if (info->parm_size)
25016 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
25018 if (info->fixed_size)
25019 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
25021 if (info->gp_size)
25022 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
25024 if (info->fp_size)
25025 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
25027 if (info->altivec_size)
25028 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
25030 if (info->vrsave_size)
25031 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
25033 if (info->altivec_padding_size)
25034 fprintf (stderr, "\taltivec_padding_size= %5d\n",
25035 info->altivec_padding_size);
25037 if (info->cr_size)
25038 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
25040 if (info->save_size)
25041 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
25043 if (info->reg_size != 4)
25044 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
25046 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
25048 fprintf (stderr, "\n");
25052 rs6000_return_addr (int count, rtx frame)
25054 /* We can't use get_hard_reg_initial_val for LR when count == 0 if LR
25055 is trashed by the prologue, as it is for PIC on ABI_V4 and Darwin. */
25056 if (count != 0
25057 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
25059 cfun->machine->ra_needs_full_frame = 1;
25061 if (count == 0)
25062 /* FRAME is set to frame_pointer_rtx by the generic code, but that
25063 is good for loading 0(r1) only when !FRAME_GROWS_DOWNWARD. */
25064 frame = stack_pointer_rtx;
25065 rtx prev_frame_addr = memory_address (Pmode, frame);
25066 rtx prev_frame = copy_to_reg (gen_rtx_MEM (Pmode, prev_frame_addr));
25067 rtx lr_save_off = plus_constant (Pmode,
25068 prev_frame, RETURN_ADDRESS_OFFSET);
25069 rtx lr_save_addr = memory_address (Pmode, lr_save_off);
25070 return gen_rtx_MEM (Pmode, lr_save_addr);
25073 cfun->machine->ra_need_lr = 1;
25074 return get_hard_reg_initial_val (Pmode, LR_REGNO);
25077 /* Say whether a function is a candidate for sibcall handling or not. */
25079 static bool
25080 rs6000_function_ok_for_sibcall (tree decl, tree exp)
25082 tree fntype;
25084 if (decl)
25085 fntype = TREE_TYPE (decl);
25086 else
25087 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
25089 /* We can't do it if the called function has more vector parameters
25090 than the current function; there's nowhere to put the VRsave code. */
25091 if (TARGET_ALTIVEC_ABI
25092 && TARGET_ALTIVEC_VRSAVE
25093 && !(decl && decl == current_function_decl))
25095 function_args_iterator args_iter;
25096 tree type;
25097 int nvreg = 0;
25099 /* Functions with vector parameters are required to have a
25100 prototype, so the argument type info must be available
25101 here. */
25102 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
25103 if (TREE_CODE (type) == VECTOR_TYPE
25104 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25105 nvreg++;
25107 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
25108 if (TREE_CODE (type) == VECTOR_TYPE
25109 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25110 nvreg--;
25112 if (nvreg > 0)
25113 return false;
25116 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
25117 functions, because the callee may have a different TOC pointer to
25118 the caller and there's no way to ensure we restore the TOC when
25119 we return. With the secure-plt SYSV ABI we can't make non-local
25120 calls when -fpic/PIC because the plt call stubs use r30. */
25121 if (DEFAULT_ABI == ABI_DARWIN
25122 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25123 && decl
25124 && !DECL_EXTERNAL (decl)
25125 && !DECL_WEAK (decl)
25126 && (*targetm.binds_local_p) (decl))
25127 || (DEFAULT_ABI == ABI_V4
25128 && (!TARGET_SECURE_PLT
25129 || !flag_pic
25130 || (decl
25131 && (*targetm.binds_local_p) (decl)))))
25133 tree attr_list = TYPE_ATTRIBUTES (fntype);
25135 if (!lookup_attribute ("longcall", attr_list)
25136 || lookup_attribute ("shortcall", attr_list))
25137 return true;
25140 return false;
25143 static int
25144 rs6000_ra_ever_killed (void)
25146 rtx_insn *top;
25147 rtx reg;
25148 rtx_insn *insn;
25150 if (cfun->is_thunk)
25151 return 0;
25153 if (cfun->machine->lr_save_state)
25154 return cfun->machine->lr_save_state - 1;
25156 /* regs_ever_live has LR marked as used if any sibcalls are present,
25157 but this should not force saving and restoring in the
25158 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
25159 clobbers LR, so that is inappropriate. */
25161 /* Also, the prologue can generate a store into LR that
25162 doesn't really count, like this:
25164 move LR->R0
25165 bcl to set PIC register
25166 move LR->R31
25167 move R0->LR
25169 When we're called from the epilogue, we need to avoid counting
25170 this as a store. */
25172 push_topmost_sequence ();
25173 top = get_insns ();
25174 pop_topmost_sequence ();
25175 reg = gen_rtx_REG (Pmode, LR_REGNO);
25177 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
25179 if (INSN_P (insn))
25181 if (CALL_P (insn))
25183 if (!SIBLING_CALL_P (insn))
25184 return 1;
25186 else if (find_regno_note (insn, REG_INC, LR_REGNO))
25187 return 1;
25188 else if (set_of (reg, insn) != NULL_RTX
25189 && !prologue_epilogue_contains (insn))
25190 return 1;
25193 return 0;
25196 /* Emit instructions needed to load the TOC register.
25197 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
25198 a constant pool; or for SVR4 -fpic. */
25200 void
25201 rs6000_emit_load_toc_table (int fromprolog)
25203 rtx dest;
25204 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
25206 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
25208 char buf[30];
25209 rtx lab, tmp1, tmp2, got;
25211 lab = gen_label_rtx ();
25212 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
25213 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25214 if (flag_pic == 2)
25216 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25217 need_toc_init = 1;
25219 else
25220 got = rs6000_got_sym ();
25221 tmp1 = tmp2 = dest;
25222 if (!fromprolog)
25224 tmp1 = gen_reg_rtx (Pmode);
25225 tmp2 = gen_reg_rtx (Pmode);
25227 emit_insn (gen_load_toc_v4_PIC_1 (lab));
25228 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
25229 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
25230 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
25232 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
25234 emit_insn (gen_load_toc_v4_pic_si ());
25235 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25237 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
25239 char buf[30];
25240 rtx temp0 = (fromprolog
25241 ? gen_rtx_REG (Pmode, 0)
25242 : gen_reg_rtx (Pmode));
25244 if (fromprolog)
25246 rtx symF, symL;
25248 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
25249 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25251 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
25252 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25254 emit_insn (gen_load_toc_v4_PIC_1 (symF));
25255 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25256 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
25258 else
25260 rtx tocsym, lab;
25262 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25263 need_toc_init = 1;
25264 lab = gen_label_rtx ();
25265 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
25266 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25267 if (TARGET_LINK_STACK)
25268 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
25269 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
25271 emit_insn (gen_addsi3 (dest, temp0, dest));
25273 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
25275 /* This is for AIX code running in non-PIC ELF32. */
25276 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25278 need_toc_init = 1;
25279 emit_insn (gen_elf_high (dest, realsym));
25280 emit_insn (gen_elf_low (dest, dest, realsym));
25282 else
25284 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
25286 if (TARGET_32BIT)
25287 emit_insn (gen_load_toc_aix_si (dest));
25288 else
25289 emit_insn (gen_load_toc_aix_di (dest));
25293 /* Emit instructions to restore the link register after determining where
25294 its value has been stored. */
25296 void
25297 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
25299 rs6000_stack_t *info = rs6000_stack_info ();
25300 rtx operands[2];
25302 operands[0] = source;
25303 operands[1] = scratch;
25305 if (info->lr_save_p)
25307 rtx frame_rtx = stack_pointer_rtx;
25308 HOST_WIDE_INT sp_offset = 0;
25309 rtx tmp;
25311 if (frame_pointer_needed
25312 || cfun->calls_alloca
25313 || info->total_size > 32767)
25315 tmp = gen_frame_mem (Pmode, frame_rtx);
25316 emit_move_insn (operands[1], tmp);
25317 frame_rtx = operands[1];
25319 else if (info->push_p)
25320 sp_offset = info->total_size;
25322 tmp = plus_constant (Pmode, frame_rtx,
25323 info->lr_save_offset + sp_offset);
25324 tmp = gen_frame_mem (Pmode, tmp);
25325 emit_move_insn (tmp, operands[0]);
25327 else
25328 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
25330 /* Freeze lr_save_p. We've just emitted rtl that depends on the
25331 state of lr_save_p so any change from here on would be a bug. In
25332 particular, stop rs6000_ra_ever_killed from considering the SET
25333 of lr we may have added just above. */
25334 cfun->machine->lr_save_state = info->lr_save_p + 1;
25337 static GTY(()) alias_set_type set = -1;
25339 alias_set_type
25340 get_TOC_alias_set (void)
25342 if (set == -1)
25343 set = new_alias_set ();
25344 return set;
25347 /* This returns nonzero if the current function uses the TOC. This is
25348 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25349 is generated by the ABI_V4 load_toc_* patterns.
25350 Return 2 instead of 1 if the load_toc_* pattern is in the function
25351 partition that doesn't start the function. */
25352 #if TARGET_ELF
25353 static int
25354 uses_TOC (void)
25356 rtx_insn *insn;
25357 int ret = 1;
25359 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
25361 if (INSN_P (insn))
25363 rtx pat = PATTERN (insn);
25364 int i;
25366 if (GET_CODE (pat) == PARALLEL)
25367 for (i = 0; i < XVECLEN (pat, 0); i++)
25369 rtx sub = XVECEXP (pat, 0, i);
25370 if (GET_CODE (sub) == USE)
25372 sub = XEXP (sub, 0);
25373 if (GET_CODE (sub) == UNSPEC
25374 && XINT (sub, 1) == UNSPEC_TOC)
25375 return ret;
25379 else if (crtl->has_bb_partition
25380 && NOTE_P (insn)
25381 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
25382 ret = 2;
25384 return 0;
25386 #endif
25389 create_TOC_reference (rtx symbol, rtx largetoc_reg)
25391 rtx tocrel, tocreg, hi;
25393 if (TARGET_DEBUG_ADDR)
25395 if (GET_CODE (symbol) == SYMBOL_REF)
25396 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25397 XSTR (symbol, 0));
25398 else
25400 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
25401 GET_RTX_NAME (GET_CODE (symbol)));
25402 debug_rtx (symbol);
25406 if (!can_create_pseudo_p ())
25407 df_set_regs_ever_live (TOC_REGISTER, true);
25409 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
25410 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
25411 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
25412 return tocrel;
25414 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
25415 if (largetoc_reg != NULL)
25417 emit_move_insn (largetoc_reg, hi);
25418 hi = largetoc_reg;
25420 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
25423 /* Issue assembly directives that create a reference to the given DWARF
25424 FRAME_TABLE_LABEL from the current function section. */
25425 void
25426 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
25428 fprintf (asm_out_file, "\t.ref %s\n",
25429 (* targetm.strip_name_encoding) (frame_table_label));
25432 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25433 and the change to the stack pointer. */
25435 static void
25436 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
25438 rtvec p;
25439 int i;
25440 rtx regs[3];
25442 i = 0;
25443 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25444 if (hard_frame_needed)
25445 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
25446 if (!(REGNO (fp) == STACK_POINTER_REGNUM
25447 || (hard_frame_needed
25448 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
25449 regs[i++] = fp;
25451 p = rtvec_alloc (i);
25452 while (--i >= 0)
25454 rtx mem = gen_frame_mem (BLKmode, regs[i]);
25455 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
25458 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
25461 /* Emit the correct code for allocating stack space, as insns.
25462 If COPY_REG, make sure a copy of the old frame is left there.
25463 The generated code may use hard register 0 as a temporary. */
25465 static rtx_insn *
25466 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
25468 rtx_insn *insn;
25469 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25470 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25471 rtx todec = gen_int_mode (-size, Pmode);
25472 rtx par, set, mem;
25474 if (INTVAL (todec) != -size)
25476 warning (0, "stack frame too large");
25477 emit_insn (gen_trap ());
25478 return 0;
25481 if (crtl->limit_stack)
25483 if (REG_P (stack_limit_rtx)
25484 && REGNO (stack_limit_rtx) > 1
25485 && REGNO (stack_limit_rtx) <= 31)
25487 rtx_insn *insn
25488 = gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size));
25489 gcc_assert (insn);
25490 emit_insn (insn);
25491 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg, const0_rtx));
25493 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
25494 && TARGET_32BIT
25495 && DEFAULT_ABI == ABI_V4
25496 && !flag_pic)
25498 rtx toload = gen_rtx_CONST (VOIDmode,
25499 gen_rtx_PLUS (Pmode,
25500 stack_limit_rtx,
25501 GEN_INT (size)));
25503 emit_insn (gen_elf_high (tmp_reg, toload));
25504 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
25505 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25506 const0_rtx));
25508 else
25509 warning (0, "stack limit expression is not supported");
25512 if (copy_reg)
25514 if (copy_off != 0)
25515 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
25516 else
25517 emit_move_insn (copy_reg, stack_reg);
25520 if (size > 32767)
25522 /* Need a note here so that try_split doesn't get confused. */
25523 if (get_last_insn () == NULL_RTX)
25524 emit_note (NOTE_INSN_DELETED);
25525 insn = emit_move_insn (tmp_reg, todec);
25526 try_split (PATTERN (insn), insn, 0);
25527 todec = tmp_reg;
25530 insn = emit_insn (TARGET_32BIT
25531 ? gen_movsi_update_stack (stack_reg, stack_reg,
25532 todec, stack_reg)
25533 : gen_movdi_di_update_stack (stack_reg, stack_reg,
25534 todec, stack_reg));
25535 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25536 it now and set the alias set/attributes. The above gen_*_update
25537 calls will generate a PARALLEL with the MEM set being the first
25538 operation. */
25539 par = PATTERN (insn);
25540 gcc_assert (GET_CODE (par) == PARALLEL);
25541 set = XVECEXP (par, 0, 0);
25542 gcc_assert (GET_CODE (set) == SET);
25543 mem = SET_DEST (set);
25544 gcc_assert (MEM_P (mem));
25545 MEM_NOTRAP_P (mem) = 1;
25546 set_mem_alias_set (mem, get_frame_alias_set ());
25548 RTX_FRAME_RELATED_P (insn) = 1;
25549 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25550 gen_rtx_SET (stack_reg, gen_rtx_PLUS (Pmode, stack_reg,
25551 GEN_INT (-size))));
25552 return insn;
25555 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25557 #if PROBE_INTERVAL > 32768
25558 #error Cannot use indexed addressing mode for stack probing
25559 #endif
25561 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25562 inclusive. These are offsets from the current stack pointer. */
25564 static void
25565 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25567 /* See if we have a constant small number of probes to generate. If so,
25568 that's the easy case. */
25569 if (first + size <= 32768)
25571 HOST_WIDE_INT i;
25573 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25574 it exceeds SIZE. If only one probe is needed, this will not
25575 generate any code. Then probe at FIRST + SIZE. */
25576 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25577 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25578 -(first + i)));
25580 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25581 -(first + size)));
25584 /* Otherwise, do the same as above, but in a loop. Note that we must be
25585 extra careful with variables wrapping around because we might be at
25586 the very top (or the very bottom) of the address space and we have
25587 to be able to handle this case properly; in particular, we use an
25588 equality test for the loop condition. */
25589 else
25591 HOST_WIDE_INT rounded_size;
25592 rtx r12 = gen_rtx_REG (Pmode, 12);
25593 rtx r0 = gen_rtx_REG (Pmode, 0);
25595 /* Sanity check for the addressing mode we're going to use. */
25596 gcc_assert (first <= 32768);
25598 /* Step 1: round SIZE to the previous multiple of the interval. */
25600 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25603 /* Step 2: compute initial and final value of the loop counter. */
25605 /* TEST_ADDR = SP + FIRST. */
25606 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25607 -first)));
25609 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25610 if (rounded_size > 32768)
25612 emit_move_insn (r0, GEN_INT (-rounded_size));
25613 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25615 else
25616 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25617 -rounded_size)));
25620 /* Step 3: the loop
25624 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25625 probe at TEST_ADDR
25627 while (TEST_ADDR != LAST_ADDR)
25629 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25630 until it is equal to ROUNDED_SIZE. */
25632 if (TARGET_64BIT)
25633 emit_insn (gen_probe_stack_rangedi (r12, r12, r0));
25634 else
25635 emit_insn (gen_probe_stack_rangesi (r12, r12, r0));
25638 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25639 that SIZE is equal to ROUNDED_SIZE. */
25641 if (size != rounded_size)
25642 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25646 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25647 absolute addresses. */
25649 const char *
25650 output_probe_stack_range (rtx reg1, rtx reg2)
25652 static int labelno = 0;
25653 char loop_lab[32];
25654 rtx xops[2];
25656 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25658 /* Loop. */
25659 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25661 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25662 xops[0] = reg1;
25663 xops[1] = GEN_INT (-PROBE_INTERVAL);
25664 output_asm_insn ("addi %0,%0,%1", xops);
25666 /* Probe at TEST_ADDR. */
25667 xops[1] = gen_rtx_REG (Pmode, 0);
25668 output_asm_insn ("stw %1,0(%0)", xops);
25670 /* Test if TEST_ADDR == LAST_ADDR. */
25671 xops[1] = reg2;
25672 if (TARGET_64BIT)
25673 output_asm_insn ("cmpd 0,%0,%1", xops);
25674 else
25675 output_asm_insn ("cmpw 0,%0,%1", xops);
25677 /* Branch. */
25678 fputs ("\tbne 0,", asm_out_file);
25679 assemble_name_raw (asm_out_file, loop_lab);
25680 fputc ('\n', asm_out_file);
25682 return "";
25685 /* This function is called when rs6000_frame_related is processing
25686 SETs within a PARALLEL, and returns whether the REGNO save ought to
25687 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25688 for out-of-line register save functions, store multiple, and the
25689 Darwin world_save. They may contain registers that don't really
25690 need saving. */
25692 static bool
25693 interesting_frame_related_regno (unsigned int regno)
25695 /* Saves apparently of r0 are actually saving LR. It doesn't make
25696 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25697 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25698 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25699 as frame related. */
25700 if (regno == 0)
25701 return true;
25702 /* If we see CR2 then we are here on a Darwin world save. Saves of
25703 CR2 signify the whole CR is being saved. This is a long-standing
25704 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25705 that CR needs to be saved. */
25706 if (regno == CR2_REGNO)
25707 return true;
25708 /* Omit frame info for any user-defined global regs. If frame info
25709 is supplied for them, frame unwinding will restore a user reg.
25710 Also omit frame info for any reg we don't need to save, as that
25711 bloats frame info and can cause problems with shrink wrapping.
25712 Since global regs won't be seen as needing to be saved, both of
25713 these conditions are covered by save_reg_p. */
25714 return save_reg_p (regno);
25717 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25718 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25719 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25720 deduce these equivalences by itself so it wasn't necessary to hold
25721 its hand so much. Don't be tempted to always supply d2_f_d_e with
25722 the actual cfa register, ie. r31 when we are using a hard frame
25723 pointer. That fails when saving regs off r1, and sched moves the
25724 r31 setup past the reg saves. */
25726 static rtx_insn *
25727 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
25728 rtx reg2, rtx repl2)
25730 rtx repl;
25732 if (REGNO (reg) == STACK_POINTER_REGNUM)
25734 gcc_checking_assert (val == 0);
25735 repl = NULL_RTX;
25737 else
25738 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
25739 GEN_INT (val));
25741 rtx pat = PATTERN (insn);
25742 if (!repl && !reg2)
25744 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
25745 if (GET_CODE (pat) == PARALLEL)
25746 for (int i = 0; i < XVECLEN (pat, 0); i++)
25747 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25749 rtx set = XVECEXP (pat, 0, i);
25751 if (!REG_P (SET_SRC (set))
25752 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25753 RTX_FRAME_RELATED_P (set) = 1;
25755 RTX_FRAME_RELATED_P (insn) = 1;
25756 return insn;
25759 /* We expect that 'pat' is either a SET or a PARALLEL containing
25760 SETs (and possibly other stuff). In a PARALLEL, all the SETs
25761 are important so they all have to be marked RTX_FRAME_RELATED_P.
25762 Call simplify_replace_rtx on the SETs rather than the whole insn
25763 so as to leave the other stuff alone (for example USE of r12). */
25765 set_used_flags (pat);
25766 if (GET_CODE (pat) == SET)
25768 if (repl)
25769 pat = simplify_replace_rtx (pat, reg, repl);
25770 if (reg2)
25771 pat = simplify_replace_rtx (pat, reg2, repl2);
25773 else if (GET_CODE (pat) == PARALLEL)
25775 pat = shallow_copy_rtx (pat);
25776 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
25778 for (int i = 0; i < XVECLEN (pat, 0); i++)
25779 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25781 rtx set = XVECEXP (pat, 0, i);
25783 if (repl)
25784 set = simplify_replace_rtx (set, reg, repl);
25785 if (reg2)
25786 set = simplify_replace_rtx (set, reg2, repl2);
25787 XVECEXP (pat, 0, i) = set;
25789 if (!REG_P (SET_SRC (set))
25790 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25791 RTX_FRAME_RELATED_P (set) = 1;
25794 else
25795 gcc_unreachable ();
25797 RTX_FRAME_RELATED_P (insn) = 1;
25798 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
25800 return insn;
25803 /* Returns an insn that has a vrsave set operation with the
25804 appropriate CLOBBERs. */
25806 static rtx
25807 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
25809 int nclobs, i;
25810 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
25811 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
25813 clobs[0]
25814 = gen_rtx_SET (vrsave,
25815 gen_rtx_UNSPEC_VOLATILE (SImode,
25816 gen_rtvec (2, reg, vrsave),
25817 UNSPECV_SET_VRSAVE));
25819 nclobs = 1;
25821 /* We need to clobber the registers in the mask so the scheduler
25822 does not move sets to VRSAVE before sets of AltiVec registers.
25824 However, if the function receives nonlocal gotos, reload will set
25825 all call saved registers live. We will end up with:
25827 (set (reg 999) (mem))
25828 (parallel [ (set (reg vrsave) (unspec blah))
25829 (clobber (reg 999))])
25831 The clobber will cause the store into reg 999 to be dead, and
25832 flow will attempt to delete an epilogue insn. In this case, we
25833 need an unspec use/set of the register. */
25835 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
25836 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
25838 if (!epiloguep || call_used_regs [i])
25839 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
25840 gen_rtx_REG (V4SImode, i));
25841 else
25843 rtx reg = gen_rtx_REG (V4SImode, i);
25845 clobs[nclobs++]
25846 = gen_rtx_SET (reg,
25847 gen_rtx_UNSPEC (V4SImode,
25848 gen_rtvec (1, reg), 27));
25852 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
25854 for (i = 0; i < nclobs; ++i)
25855 XVECEXP (insn, 0, i) = clobs[i];
25857 return insn;
25860 static rtx
25861 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
25863 rtx addr, mem;
25865 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
25866 mem = gen_frame_mem (GET_MODE (reg), addr);
25867 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
25870 static rtx
25871 gen_frame_load (rtx reg, rtx frame_reg, int offset)
25873 return gen_frame_set (reg, frame_reg, offset, false);
25876 static rtx
25877 gen_frame_store (rtx reg, rtx frame_reg, int offset)
25879 return gen_frame_set (reg, frame_reg, offset, true);
25882 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
25883 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
25885 static rtx_insn *
25886 emit_frame_save (rtx frame_reg, machine_mode mode,
25887 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
25889 rtx reg;
25891 /* Some cases that need register indexed addressing. */
25892 gcc_checking_assert (!(TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
25893 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode)));
25895 reg = gen_rtx_REG (mode, regno);
25896 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
25897 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
25898 NULL_RTX, NULL_RTX);
25901 /* Emit an offset memory reference suitable for a frame store, while
25902 converting to a valid addressing mode. */
25904 static rtx
25905 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
25907 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, GEN_INT (offset)));
25910 #ifndef TARGET_FIX_AND_CONTINUE
25911 #define TARGET_FIX_AND_CONTINUE 0
25912 #endif
25914 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
25915 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
25916 #define LAST_SAVRES_REGISTER 31
25917 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
25919 enum {
25920 SAVRES_LR = 0x1,
25921 SAVRES_SAVE = 0x2,
25922 SAVRES_REG = 0x0c,
25923 SAVRES_GPR = 0,
25924 SAVRES_FPR = 4,
25925 SAVRES_VR = 8
25928 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
25930 /* Temporary holding space for an out-of-line register save/restore
25931 routine name. */
25932 static char savres_routine_name[30];
25934 /* Return the name for an out-of-line register save/restore routine.
25935 We are saving/restoring GPRs if GPR is true. */
25937 static char *
25938 rs6000_savres_routine_name (int regno, int sel)
25940 const char *prefix = "";
25941 const char *suffix = "";
25943 /* Different targets are supposed to define
25944 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
25945 routine name could be defined with:
25947 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
25949 This is a nice idea in practice, but in reality, things are
25950 complicated in several ways:
25952 - ELF targets have save/restore routines for GPRs.
25954 - PPC64 ELF targets have routines for save/restore of GPRs that
25955 differ in what they do with the link register, so having a set
25956 prefix doesn't work. (We only use one of the save routines at
25957 the moment, though.)
25959 - PPC32 elf targets have "exit" versions of the restore routines
25960 that restore the link register and can save some extra space.
25961 These require an extra suffix. (There are also "tail" versions
25962 of the restore routines and "GOT" versions of the save routines,
25963 but we don't generate those at present. Same problems apply,
25964 though.)
25966 We deal with all this by synthesizing our own prefix/suffix and
25967 using that for the simple sprintf call shown above. */
25968 if (DEFAULT_ABI == ABI_V4)
25970 if (TARGET_64BIT)
25971 goto aix_names;
25973 if ((sel & SAVRES_REG) == SAVRES_GPR)
25974 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
25975 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25976 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
25977 else if ((sel & SAVRES_REG) == SAVRES_VR)
25978 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25979 else
25980 abort ();
25982 if ((sel & SAVRES_LR))
25983 suffix = "_x";
25985 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25987 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
25988 /* No out-of-line save/restore routines for GPRs on AIX. */
25989 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
25990 #endif
25992 aix_names:
25993 if ((sel & SAVRES_REG) == SAVRES_GPR)
25994 prefix = ((sel & SAVRES_SAVE)
25995 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
25996 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
25997 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25999 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
26000 if ((sel & SAVRES_LR))
26001 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
26002 else
26003 #endif
26005 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
26006 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
26009 else if ((sel & SAVRES_REG) == SAVRES_VR)
26010 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26011 else
26012 abort ();
26015 if (DEFAULT_ABI == ABI_DARWIN)
26017 /* The Darwin approach is (slightly) different, in order to be
26018 compatible with code generated by the system toolchain. There is a
26019 single symbol for the start of save sequence, and the code here
26020 embeds an offset into that code on the basis of the first register
26021 to be saved. */
26022 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
26023 if ((sel & SAVRES_REG) == SAVRES_GPR)
26024 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
26025 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
26026 (regno - 13) * 4, prefix, regno);
26027 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26028 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
26029 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
26030 else if ((sel & SAVRES_REG) == SAVRES_VR)
26031 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
26032 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
26033 else
26034 abort ();
26036 else
26037 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
26039 return savres_routine_name;
26042 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
26043 We are saving/restoring GPRs if GPR is true. */
26045 static rtx
26046 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
26048 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
26049 ? info->first_gp_reg_save
26050 : (sel & SAVRES_REG) == SAVRES_FPR
26051 ? info->first_fp_reg_save - 32
26052 : (sel & SAVRES_REG) == SAVRES_VR
26053 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
26054 : -1);
26055 rtx sym;
26056 int select = sel;
26058 /* Don't generate bogus routine names. */
26059 gcc_assert (FIRST_SAVRES_REGISTER <= regno
26060 && regno <= LAST_SAVRES_REGISTER
26061 && select >= 0 && select <= 12);
26063 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
26065 if (sym == NULL)
26067 char *name;
26069 name = rs6000_savres_routine_name (regno, sel);
26071 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
26072 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
26073 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
26076 return sym;
26079 /* Emit a sequence of insns, including a stack tie if needed, for
26080 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
26081 reset the stack pointer, but move the base of the frame into
26082 reg UPDT_REGNO for use by out-of-line register restore routines. */
26084 static rtx
26085 rs6000_emit_stack_reset (rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
26086 unsigned updt_regno)
26088 /* If there is nothing to do, don't do anything. */
26089 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
26090 return NULL_RTX;
26092 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
26094 /* This blockage is needed so that sched doesn't decide to move
26095 the sp change before the register restores. */
26096 if (DEFAULT_ABI == ABI_V4)
26097 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
26098 GEN_INT (frame_off)));
26100 /* If we are restoring registers out-of-line, we will be using the
26101 "exit" variants of the restore routines, which will reset the
26102 stack for us. But we do need to point updt_reg into the
26103 right place for those routines. */
26104 if (frame_off != 0)
26105 return emit_insn (gen_add3_insn (updt_reg_rtx,
26106 frame_reg_rtx, GEN_INT (frame_off)));
26107 else
26108 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
26110 return NULL_RTX;
26113 /* Return the register number used as a pointer by out-of-line
26114 save/restore functions. */
26116 static inline unsigned
26117 ptr_regno_for_savres (int sel)
26119 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26120 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
26121 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
26124 /* Construct a parallel rtx describing the effect of a call to an
26125 out-of-line register save/restore routine, and emit the insn
26126 or jump_insn as appropriate. */
26128 static rtx_insn *
26129 rs6000_emit_savres_rtx (rs6000_stack_t *info,
26130 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
26131 machine_mode reg_mode, int sel)
26133 int i;
26134 int offset, start_reg, end_reg, n_regs, use_reg;
26135 int reg_size = GET_MODE_SIZE (reg_mode);
26136 rtx sym;
26137 rtvec p;
26138 rtx par;
26139 rtx_insn *insn;
26141 offset = 0;
26142 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26143 ? info->first_gp_reg_save
26144 : (sel & SAVRES_REG) == SAVRES_FPR
26145 ? info->first_fp_reg_save
26146 : (sel & SAVRES_REG) == SAVRES_VR
26147 ? info->first_altivec_reg_save
26148 : -1);
26149 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26150 ? 32
26151 : (sel & SAVRES_REG) == SAVRES_FPR
26152 ? 64
26153 : (sel & SAVRES_REG) == SAVRES_VR
26154 ? LAST_ALTIVEC_REGNO + 1
26155 : -1);
26156 n_regs = end_reg - start_reg;
26157 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
26158 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
26159 + n_regs);
26161 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26162 RTVEC_ELT (p, offset++) = ret_rtx;
26164 RTVEC_ELT (p, offset++)
26165 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
26167 sym = rs6000_savres_routine_sym (info, sel);
26168 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
26170 use_reg = ptr_regno_for_savres (sel);
26171 if ((sel & SAVRES_REG) == SAVRES_VR)
26173 /* Vector regs are saved/restored using [reg+reg] addressing. */
26174 RTVEC_ELT (p, offset++)
26175 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26176 RTVEC_ELT (p, offset++)
26177 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
26179 else
26180 RTVEC_ELT (p, offset++)
26181 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26183 for (i = 0; i < end_reg - start_reg; i++)
26184 RTVEC_ELT (p, i + offset)
26185 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
26186 frame_reg_rtx, save_area_offset + reg_size * i,
26187 (sel & SAVRES_SAVE) != 0);
26189 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26190 RTVEC_ELT (p, i + offset)
26191 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
26193 par = gen_rtx_PARALLEL (VOIDmode, p);
26195 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26197 insn = emit_jump_insn (par);
26198 JUMP_LABEL (insn) = ret_rtx;
26200 else
26201 insn = emit_insn (par);
26202 return insn;
26205 /* Emit prologue code to store CR fields that need to be saved into REG. This
26206 function should only be called when moving the non-volatile CRs to REG, it
26207 is not a general purpose routine to move the entire set of CRs to REG.
26208 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
26209 volatile CRs. */
26211 static void
26212 rs6000_emit_prologue_move_from_cr (rtx reg)
26214 /* Only the ELFv2 ABI allows storing only selected fields. */
26215 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
26217 int i, cr_reg[8], count = 0;
26219 /* Collect CR fields that must be saved. */
26220 for (i = 0; i < 8; i++)
26221 if (save_reg_p (CR0_REGNO + i))
26222 cr_reg[count++] = i;
26224 /* If it's just a single one, use mfcrf. */
26225 if (count == 1)
26227 rtvec p = rtvec_alloc (1);
26228 rtvec r = rtvec_alloc (2);
26229 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
26230 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
26231 RTVEC_ELT (p, 0)
26232 = gen_rtx_SET (reg,
26233 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
26235 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26236 return;
26239 /* ??? It might be better to handle count == 2 / 3 cases here
26240 as well, using logical operations to combine the values. */
26243 emit_insn (gen_prologue_movesi_from_cr (reg));
26246 /* Return whether the split-stack arg pointer (r12) is used. */
26248 static bool
26249 split_stack_arg_pointer_used_p (void)
26251 /* If the pseudo holding the arg pointer is no longer a pseudo,
26252 then the arg pointer is used. */
26253 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
26254 && (!REG_P (cfun->machine->split_stack_arg_pointer)
26255 || (REGNO (cfun->machine->split_stack_arg_pointer)
26256 < FIRST_PSEUDO_REGISTER)))
26257 return true;
26259 /* Unfortunately we also need to do some code scanning, since
26260 r12 may have been substituted for the pseudo. */
26261 rtx_insn *insn;
26262 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
26263 FOR_BB_INSNS (bb, insn)
26264 if (NONDEBUG_INSN_P (insn))
26266 /* A call destroys r12. */
26267 if (CALL_P (insn))
26268 return false;
26270 df_ref use;
26271 FOR_EACH_INSN_USE (use, insn)
26273 rtx x = DF_REF_REG (use);
26274 if (REG_P (x) && REGNO (x) == 12)
26275 return true;
26277 df_ref def;
26278 FOR_EACH_INSN_DEF (def, insn)
26280 rtx x = DF_REF_REG (def);
26281 if (REG_P (x) && REGNO (x) == 12)
26282 return false;
26285 return bitmap_bit_p (DF_LR_OUT (bb), 12);
26288 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26290 static bool
26291 rs6000_global_entry_point_needed_p (void)
26293 /* Only needed for the ELFv2 ABI. */
26294 if (DEFAULT_ABI != ABI_ELFv2)
26295 return false;
26297 /* With -msingle-pic-base, we assume the whole program shares the same
26298 TOC, so no global entry point prologues are needed anywhere. */
26299 if (TARGET_SINGLE_PIC_BASE)
26300 return false;
26302 /* Ensure we have a global entry point for thunks. ??? We could
26303 avoid that if the target routine doesn't need a global entry point,
26304 but we do not know whether this is the case at this point. */
26305 if (cfun->is_thunk)
26306 return true;
26308 /* For regular functions, rs6000_emit_prologue sets this flag if the
26309 routine ever uses the TOC pointer. */
26310 return cfun->machine->r2_setup_needed;
26313 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26314 static sbitmap
26315 rs6000_get_separate_components (void)
26317 rs6000_stack_t *info = rs6000_stack_info ();
26319 if (WORLD_SAVE_P (info))
26320 return NULL;
26322 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
26323 && !(info->savres_strategy & REST_MULTIPLE));
26325 /* Component 0 is the save/restore of LR (done via GPR0).
26326 Components 13..31 are the save/restore of GPR13..GPR31.
26327 Components 46..63 are the save/restore of FPR14..FPR31. */
26329 cfun->machine->n_components = 64;
26331 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26332 bitmap_clear (components);
26334 int reg_size = TARGET_32BIT ? 4 : 8;
26335 int fp_reg_size = 8;
26337 /* The GPRs we need saved to the frame. */
26338 if ((info->savres_strategy & SAVE_INLINE_GPRS)
26339 && (info->savres_strategy & REST_INLINE_GPRS))
26341 int offset = info->gp_save_offset;
26342 if (info->push_p)
26343 offset += info->total_size;
26345 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26347 if (IN_RANGE (offset, -0x8000, 0x7fff)
26348 && save_reg_p (regno))
26349 bitmap_set_bit (components, regno);
26351 offset += reg_size;
26355 /* Don't mess with the hard frame pointer. */
26356 if (frame_pointer_needed)
26357 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
26359 /* Don't mess with the fixed TOC register. */
26360 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
26361 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
26362 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
26363 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
26365 /* The FPRs we need saved to the frame. */
26366 if ((info->savres_strategy & SAVE_INLINE_FPRS)
26367 && (info->savres_strategy & REST_INLINE_FPRS))
26369 int offset = info->fp_save_offset;
26370 if (info->push_p)
26371 offset += info->total_size;
26373 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26375 if (IN_RANGE (offset, -0x8000, 0x7fff) && save_reg_p (regno))
26376 bitmap_set_bit (components, regno);
26378 offset += fp_reg_size;
26382 /* Optimize LR save and restore if we can. This is component 0. Any
26383 out-of-line register save/restore routines need LR. */
26384 if (info->lr_save_p
26385 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
26386 && (info->savres_strategy & SAVE_INLINE_GPRS)
26387 && (info->savres_strategy & REST_INLINE_GPRS)
26388 && (info->savres_strategy & SAVE_INLINE_FPRS)
26389 && (info->savres_strategy & REST_INLINE_FPRS)
26390 && (info->savres_strategy & SAVE_INLINE_VRS)
26391 && (info->savres_strategy & REST_INLINE_VRS))
26393 int offset = info->lr_save_offset;
26394 if (info->push_p)
26395 offset += info->total_size;
26396 if (IN_RANGE (offset, -0x8000, 0x7fff))
26397 bitmap_set_bit (components, 0);
26400 return components;
26403 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26404 static sbitmap
26405 rs6000_components_for_bb (basic_block bb)
26407 rs6000_stack_t *info = rs6000_stack_info ();
26409 bitmap in = DF_LIVE_IN (bb);
26410 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
26411 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
26413 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26414 bitmap_clear (components);
26416 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26418 /* GPRs. */
26419 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26420 if (bitmap_bit_p (in, regno)
26421 || bitmap_bit_p (gen, regno)
26422 || bitmap_bit_p (kill, regno))
26423 bitmap_set_bit (components, regno);
26425 /* FPRs. */
26426 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26427 if (bitmap_bit_p (in, regno)
26428 || bitmap_bit_p (gen, regno)
26429 || bitmap_bit_p (kill, regno))
26430 bitmap_set_bit (components, regno);
26432 /* The link register. */
26433 if (bitmap_bit_p (in, LR_REGNO)
26434 || bitmap_bit_p (gen, LR_REGNO)
26435 || bitmap_bit_p (kill, LR_REGNO))
26436 bitmap_set_bit (components, 0);
26438 return components;
26441 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26442 static void
26443 rs6000_disqualify_components (sbitmap components, edge e,
26444 sbitmap edge_components, bool /*is_prologue*/)
26446 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26447 live where we want to place that code. */
26448 if (bitmap_bit_p (edge_components, 0)
26449 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
26451 if (dump_file)
26452 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
26453 "on entry to bb %d\n", e->dest->index);
26454 bitmap_clear_bit (components, 0);
26458 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26459 static void
26460 rs6000_emit_prologue_components (sbitmap components)
26462 rs6000_stack_t *info = rs6000_stack_info ();
26463 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26464 ? HARD_FRAME_POINTER_REGNUM
26465 : STACK_POINTER_REGNUM);
26467 machine_mode reg_mode = Pmode;
26468 int reg_size = TARGET_32BIT ? 4 : 8;
26469 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
26470 ? DFmode : SFmode;
26471 int fp_reg_size = 8;
26473 /* Prologue for LR. */
26474 if (bitmap_bit_p (components, 0))
26476 rtx reg = gen_rtx_REG (reg_mode, 0);
26477 rtx_insn *insn = emit_move_insn (reg, gen_rtx_REG (reg_mode, LR_REGNO));
26478 RTX_FRAME_RELATED_P (insn) = 1;
26479 add_reg_note (insn, REG_CFA_REGISTER, NULL);
26481 int offset = info->lr_save_offset;
26482 if (info->push_p)
26483 offset += info->total_size;
26485 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26486 RTX_FRAME_RELATED_P (insn) = 1;
26487 rtx lr = gen_rtx_REG (reg_mode, LR_REGNO);
26488 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
26489 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
26492 /* Prologue for the GPRs. */
26493 int offset = info->gp_save_offset;
26494 if (info->push_p)
26495 offset += info->total_size;
26497 for (int i = info->first_gp_reg_save; i < 32; i++)
26499 if (bitmap_bit_p (components, i))
26501 rtx reg = gen_rtx_REG (reg_mode, i);
26502 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26503 RTX_FRAME_RELATED_P (insn) = 1;
26504 rtx set = copy_rtx (single_set (insn));
26505 add_reg_note (insn, REG_CFA_OFFSET, set);
26508 offset += reg_size;
26511 /* Prologue for the FPRs. */
26512 offset = info->fp_save_offset;
26513 if (info->push_p)
26514 offset += info->total_size;
26516 for (int i = info->first_fp_reg_save; i < 64; i++)
26518 if (bitmap_bit_p (components, i))
26520 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26521 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26522 RTX_FRAME_RELATED_P (insn) = 1;
26523 rtx set = copy_rtx (single_set (insn));
26524 add_reg_note (insn, REG_CFA_OFFSET, set);
26527 offset += fp_reg_size;
26531 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26532 static void
26533 rs6000_emit_epilogue_components (sbitmap components)
26535 rs6000_stack_t *info = rs6000_stack_info ();
26536 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26537 ? HARD_FRAME_POINTER_REGNUM
26538 : STACK_POINTER_REGNUM);
26540 machine_mode reg_mode = Pmode;
26541 int reg_size = TARGET_32BIT ? 4 : 8;
26543 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
26544 ? DFmode : SFmode;
26545 int fp_reg_size = 8;
26547 /* Epilogue for the FPRs. */
26548 int offset = info->fp_save_offset;
26549 if (info->push_p)
26550 offset += info->total_size;
26552 for (int i = info->first_fp_reg_save; i < 64; i++)
26554 if (bitmap_bit_p (components, i))
26556 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26557 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26558 RTX_FRAME_RELATED_P (insn) = 1;
26559 add_reg_note (insn, REG_CFA_RESTORE, reg);
26562 offset += fp_reg_size;
26565 /* Epilogue for the GPRs. */
26566 offset = info->gp_save_offset;
26567 if (info->push_p)
26568 offset += info->total_size;
26570 for (int i = info->first_gp_reg_save; i < 32; i++)
26572 if (bitmap_bit_p (components, i))
26574 rtx reg = gen_rtx_REG (reg_mode, i);
26575 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26576 RTX_FRAME_RELATED_P (insn) = 1;
26577 add_reg_note (insn, REG_CFA_RESTORE, reg);
26580 offset += reg_size;
26583 /* Epilogue for LR. */
26584 if (bitmap_bit_p (components, 0))
26586 int offset = info->lr_save_offset;
26587 if (info->push_p)
26588 offset += info->total_size;
26590 rtx reg = gen_rtx_REG (reg_mode, 0);
26591 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26593 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
26594 insn = emit_move_insn (lr, reg);
26595 RTX_FRAME_RELATED_P (insn) = 1;
26596 add_reg_note (insn, REG_CFA_RESTORE, lr);
26600 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26601 static void
26602 rs6000_set_handled_components (sbitmap components)
26604 rs6000_stack_t *info = rs6000_stack_info ();
26606 for (int i = info->first_gp_reg_save; i < 32; i++)
26607 if (bitmap_bit_p (components, i))
26608 cfun->machine->gpr_is_wrapped_separately[i] = true;
26610 for (int i = info->first_fp_reg_save; i < 64; i++)
26611 if (bitmap_bit_p (components, i))
26612 cfun->machine->fpr_is_wrapped_separately[i - 32] = true;
26614 if (bitmap_bit_p (components, 0))
26615 cfun->machine->lr_is_wrapped_separately = true;
26618 /* VRSAVE is a bit vector representing which AltiVec registers
26619 are used. The OS uses this to determine which vector
26620 registers to save on a context switch. We need to save
26621 VRSAVE on the stack frame, add whatever AltiVec registers we
26622 used in this function, and do the corresponding magic in the
26623 epilogue. */
26624 static void
26625 emit_vrsave_prologue (rs6000_stack_t *info, int save_regno,
26626 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26628 /* Get VRSAVE into a GPR. */
26629 rtx reg = gen_rtx_REG (SImode, save_regno);
26630 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26631 if (TARGET_MACHO)
26632 emit_insn (gen_get_vrsave_internal (reg));
26633 else
26634 emit_insn (gen_rtx_SET (reg, vrsave));
26636 /* Save VRSAVE. */
26637 int offset = info->vrsave_save_offset + frame_off;
26638 emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
26640 /* Include the registers in the mask. */
26641 emit_insn (gen_iorsi3 (reg, reg, GEN_INT (info->vrsave_mask)));
26643 emit_insn (generate_set_vrsave (reg, info, 0));
26646 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26647 called, it left the arg pointer to the old stack in r29. Otherwise, the
26648 arg pointer is the top of the current frame. */
26649 static void
26650 emit_split_stack_prologue (rs6000_stack_t *info, rtx_insn *sp_adjust,
26651 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26653 cfun->machine->split_stack_argp_used = true;
26655 if (sp_adjust)
26657 rtx r12 = gen_rtx_REG (Pmode, 12);
26658 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26659 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
26660 emit_insn_before (set_r12, sp_adjust);
26662 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
26664 rtx r12 = gen_rtx_REG (Pmode, 12);
26665 if (frame_off == 0)
26666 emit_move_insn (r12, frame_reg_rtx);
26667 else
26668 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
26671 if (info->push_p)
26673 rtx r12 = gen_rtx_REG (Pmode, 12);
26674 rtx r29 = gen_rtx_REG (Pmode, 29);
26675 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
26676 rtx not_more = gen_label_rtx ();
26677 rtx jump;
26679 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26680 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
26681 gen_rtx_LABEL_REF (VOIDmode, not_more),
26682 pc_rtx);
26683 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26684 JUMP_LABEL (jump) = not_more;
26685 LABEL_NUSES (not_more) += 1;
26686 emit_move_insn (r12, r29);
26687 emit_label (not_more);
26691 /* Emit function prologue as insns. */
26693 void
26694 rs6000_emit_prologue (void)
26696 rs6000_stack_t *info = rs6000_stack_info ();
26697 machine_mode reg_mode = Pmode;
26698 int reg_size = TARGET_32BIT ? 4 : 8;
26699 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
26700 ? DFmode : SFmode;
26701 int fp_reg_size = 8;
26702 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26703 rtx frame_reg_rtx = sp_reg_rtx;
26704 unsigned int cr_save_regno;
26705 rtx cr_save_rtx = NULL_RTX;
26706 rtx_insn *insn;
26707 int strategy;
26708 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
26709 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
26710 && call_used_regs[STATIC_CHAIN_REGNUM]);
26711 int using_split_stack = (flag_split_stack
26712 && (lookup_attribute ("no_split_stack",
26713 DECL_ATTRIBUTES (cfun->decl))
26714 == NULL));
26716 /* Offset to top of frame for frame_reg and sp respectively. */
26717 HOST_WIDE_INT frame_off = 0;
26718 HOST_WIDE_INT sp_off = 0;
26719 /* sp_adjust is the stack adjusting instruction, tracked so that the
26720 insn setting up the split-stack arg pointer can be emitted just
26721 prior to it, when r12 is not used here for other purposes. */
26722 rtx_insn *sp_adjust = 0;
26724 #if CHECKING_P
26725 /* Track and check usage of r0, r11, r12. */
26726 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
26727 #define START_USE(R) do \
26729 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26730 reg_inuse |= 1 << (R); \
26731 } while (0)
26732 #define END_USE(R) do \
26734 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
26735 reg_inuse &= ~(1 << (R)); \
26736 } while (0)
26737 #define NOT_INUSE(R) do \
26739 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26740 } while (0)
26741 #else
26742 #define START_USE(R) do {} while (0)
26743 #define END_USE(R) do {} while (0)
26744 #define NOT_INUSE(R) do {} while (0)
26745 #endif
26747 if (DEFAULT_ABI == ABI_ELFv2
26748 && !TARGET_SINGLE_PIC_BASE)
26750 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
26752 /* With -mminimal-toc we may generate an extra use of r2 below. */
26753 if (TARGET_TOC && TARGET_MINIMAL_TOC
26754 && !constant_pool_empty_p ())
26755 cfun->machine->r2_setup_needed = true;
26759 if (flag_stack_usage_info)
26760 current_function_static_stack_size = info->total_size;
26762 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
26764 HOST_WIDE_INT size = info->total_size;
26766 if (crtl->is_leaf && !cfun->calls_alloca)
26768 if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
26769 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT,
26770 size - STACK_CHECK_PROTECT);
26772 else if (size > 0)
26773 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
26776 if (TARGET_FIX_AND_CONTINUE)
26778 /* gdb on darwin arranges to forward a function from the old
26779 address by modifying the first 5 instructions of the function
26780 to branch to the overriding function. This is necessary to
26781 permit function pointers that point to the old function to
26782 actually forward to the new function. */
26783 emit_insn (gen_nop ());
26784 emit_insn (gen_nop ());
26785 emit_insn (gen_nop ());
26786 emit_insn (gen_nop ());
26787 emit_insn (gen_nop ());
26790 /* Handle world saves specially here. */
26791 if (WORLD_SAVE_P (info))
26793 int i, j, sz;
26794 rtx treg;
26795 rtvec p;
26796 rtx reg0;
26798 /* save_world expects lr in r0. */
26799 reg0 = gen_rtx_REG (Pmode, 0);
26800 if (info->lr_save_p)
26802 insn = emit_move_insn (reg0,
26803 gen_rtx_REG (Pmode, LR_REGNO));
26804 RTX_FRAME_RELATED_P (insn) = 1;
26807 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
26808 assumptions about the offsets of various bits of the stack
26809 frame. */
26810 gcc_assert (info->gp_save_offset == -220
26811 && info->fp_save_offset == -144
26812 && info->lr_save_offset == 8
26813 && info->cr_save_offset == 4
26814 && info->push_p
26815 && info->lr_save_p
26816 && (!crtl->calls_eh_return
26817 || info->ehrd_offset == -432)
26818 && info->vrsave_save_offset == -224
26819 && info->altivec_save_offset == -416);
26821 treg = gen_rtx_REG (SImode, 11);
26822 emit_move_insn (treg, GEN_INT (-info->total_size));
26824 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
26825 in R11. It also clobbers R12, so beware! */
26827 /* Preserve CR2 for save_world prologues */
26828 sz = 5;
26829 sz += 32 - info->first_gp_reg_save;
26830 sz += 64 - info->first_fp_reg_save;
26831 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
26832 p = rtvec_alloc (sz);
26833 j = 0;
26834 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
26835 gen_rtx_REG (SImode,
26836 LR_REGNO));
26837 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
26838 gen_rtx_SYMBOL_REF (Pmode,
26839 "*save_world"));
26840 /* We do floats first so that the instruction pattern matches
26841 properly. */
26842 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
26843 RTVEC_ELT (p, j++)
26844 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
26845 ? DFmode : SFmode,
26846 info->first_fp_reg_save + i),
26847 frame_reg_rtx,
26848 info->fp_save_offset + frame_off + 8 * i);
26849 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
26850 RTVEC_ELT (p, j++)
26851 = gen_frame_store (gen_rtx_REG (V4SImode,
26852 info->first_altivec_reg_save + i),
26853 frame_reg_rtx,
26854 info->altivec_save_offset + frame_off + 16 * i);
26855 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26856 RTVEC_ELT (p, j++)
26857 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26858 frame_reg_rtx,
26859 info->gp_save_offset + frame_off + reg_size * i);
26861 /* CR register traditionally saved as CR2. */
26862 RTVEC_ELT (p, j++)
26863 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
26864 frame_reg_rtx, info->cr_save_offset + frame_off);
26865 /* Explain about use of R0. */
26866 if (info->lr_save_p)
26867 RTVEC_ELT (p, j++)
26868 = gen_frame_store (reg0,
26869 frame_reg_rtx, info->lr_save_offset + frame_off);
26870 /* Explain what happens to the stack pointer. */
26872 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
26873 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
26876 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26877 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26878 treg, GEN_INT (-info->total_size));
26879 sp_off = frame_off = info->total_size;
26882 strategy = info->savres_strategy;
26884 /* For V.4, update stack before we do any saving and set back pointer. */
26885 if (! WORLD_SAVE_P (info)
26886 && info->push_p
26887 && (DEFAULT_ABI == ABI_V4
26888 || crtl->calls_eh_return))
26890 bool need_r11 = (!(strategy & SAVE_INLINE_FPRS)
26891 || !(strategy & SAVE_INLINE_GPRS)
26892 || !(strategy & SAVE_INLINE_VRS));
26893 int ptr_regno = -1;
26894 rtx ptr_reg = NULL_RTX;
26895 int ptr_off = 0;
26897 if (info->total_size < 32767)
26898 frame_off = info->total_size;
26899 else if (need_r11)
26900 ptr_regno = 11;
26901 else if (info->cr_save_p
26902 || info->lr_save_p
26903 || info->first_fp_reg_save < 64
26904 || info->first_gp_reg_save < 32
26905 || info->altivec_size != 0
26906 || info->vrsave_size != 0
26907 || crtl->calls_eh_return)
26908 ptr_regno = 12;
26909 else
26911 /* The prologue won't be saving any regs so there is no need
26912 to set up a frame register to access any frame save area.
26913 We also won't be using frame_off anywhere below, but set
26914 the correct value anyway to protect against future
26915 changes to this function. */
26916 frame_off = info->total_size;
26918 if (ptr_regno != -1)
26920 /* Set up the frame offset to that needed by the first
26921 out-of-line save function. */
26922 START_USE (ptr_regno);
26923 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26924 frame_reg_rtx = ptr_reg;
26925 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
26926 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
26927 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
26928 ptr_off = info->gp_save_offset + info->gp_size;
26929 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
26930 ptr_off = info->altivec_save_offset + info->altivec_size;
26931 frame_off = -ptr_off;
26933 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
26934 ptr_reg, ptr_off);
26935 if (REGNO (frame_reg_rtx) == 12)
26936 sp_adjust = 0;
26937 sp_off = info->total_size;
26938 if (frame_reg_rtx != sp_reg_rtx)
26939 rs6000_emit_stack_tie (frame_reg_rtx, false);
26942 /* If we use the link register, get it into r0. */
26943 if (!WORLD_SAVE_P (info) && info->lr_save_p
26944 && !cfun->machine->lr_is_wrapped_separately)
26946 rtx addr, reg, mem;
26948 reg = gen_rtx_REG (Pmode, 0);
26949 START_USE (0);
26950 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
26951 RTX_FRAME_RELATED_P (insn) = 1;
26953 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
26954 | SAVE_NOINLINE_FPRS_SAVES_LR)))
26956 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
26957 GEN_INT (info->lr_save_offset + frame_off));
26958 mem = gen_rtx_MEM (Pmode, addr);
26959 /* This should not be of rs6000_sr_alias_set, because of
26960 __builtin_return_address. */
26962 insn = emit_move_insn (mem, reg);
26963 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26964 NULL_RTX, NULL_RTX);
26965 END_USE (0);
26969 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
26970 r12 will be needed by out-of-line gpr restore. */
26971 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26972 && !(strategy & (SAVE_INLINE_GPRS
26973 | SAVE_NOINLINE_GPRS_SAVES_LR))
26974 ? 11 : 12);
26975 if (!WORLD_SAVE_P (info)
26976 && info->cr_save_p
26977 && REGNO (frame_reg_rtx) != cr_save_regno
26978 && !(using_static_chain_p && cr_save_regno == 11)
26979 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
26981 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
26982 START_USE (cr_save_regno);
26983 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
26986 /* Do any required saving of fpr's. If only one or two to save, do
26987 it ourselves. Otherwise, call function. */
26988 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
26990 int offset = info->fp_save_offset + frame_off;
26991 for (int i = info->first_fp_reg_save; i < 64; i++)
26993 if (save_reg_p (i)
26994 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
26995 emit_frame_save (frame_reg_rtx, fp_reg_mode, i, offset,
26996 sp_off - frame_off);
26998 offset += fp_reg_size;
27001 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
27003 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27004 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27005 unsigned ptr_regno = ptr_regno_for_savres (sel);
27006 rtx ptr_reg = frame_reg_rtx;
27008 if (REGNO (frame_reg_rtx) == ptr_regno)
27009 gcc_checking_assert (frame_off == 0);
27010 else
27012 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27013 NOT_INUSE (ptr_regno);
27014 emit_insn (gen_add3_insn (ptr_reg,
27015 frame_reg_rtx, GEN_INT (frame_off)));
27017 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27018 info->fp_save_offset,
27019 info->lr_save_offset,
27020 DFmode, sel);
27021 rs6000_frame_related (insn, ptr_reg, sp_off,
27022 NULL_RTX, NULL_RTX);
27023 if (lr)
27024 END_USE (0);
27027 /* Save GPRs. This is done as a PARALLEL if we are using
27028 the store-multiple instructions. */
27029 if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
27031 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
27032 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
27033 unsigned ptr_regno = ptr_regno_for_savres (sel);
27034 rtx ptr_reg = frame_reg_rtx;
27035 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
27036 int end_save = info->gp_save_offset + info->gp_size;
27037 int ptr_off;
27039 if (ptr_regno == 12)
27040 sp_adjust = 0;
27041 if (!ptr_set_up)
27042 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27044 /* Need to adjust r11 (r12) if we saved any FPRs. */
27045 if (end_save + frame_off != 0)
27047 rtx offset = GEN_INT (end_save + frame_off);
27049 if (ptr_set_up)
27050 frame_off = -end_save;
27051 else
27052 NOT_INUSE (ptr_regno);
27053 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27055 else if (!ptr_set_up)
27057 NOT_INUSE (ptr_regno);
27058 emit_move_insn (ptr_reg, frame_reg_rtx);
27060 ptr_off = -end_save;
27061 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27062 info->gp_save_offset + ptr_off,
27063 info->lr_save_offset + ptr_off,
27064 reg_mode, sel);
27065 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
27066 NULL_RTX, NULL_RTX);
27067 if (lr)
27068 END_USE (0);
27070 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
27072 rtvec p;
27073 int i;
27074 p = rtvec_alloc (32 - info->first_gp_reg_save);
27075 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27076 RTVEC_ELT (p, i)
27077 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
27078 frame_reg_rtx,
27079 info->gp_save_offset + frame_off + reg_size * i);
27080 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27081 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27082 NULL_RTX, NULL_RTX);
27084 else if (!WORLD_SAVE_P (info))
27086 int offset = info->gp_save_offset + frame_off;
27087 for (int i = info->first_gp_reg_save; i < 32; i++)
27089 if (save_reg_p (i)
27090 && !cfun->machine->gpr_is_wrapped_separately[i])
27091 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
27092 sp_off - frame_off);
27094 offset += reg_size;
27098 if (crtl->calls_eh_return)
27100 unsigned int i;
27101 rtvec p;
27103 for (i = 0; ; ++i)
27105 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27106 if (regno == INVALID_REGNUM)
27107 break;
27110 p = rtvec_alloc (i);
27112 for (i = 0; ; ++i)
27114 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27115 if (regno == INVALID_REGNUM)
27116 break;
27118 rtx set
27119 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
27120 sp_reg_rtx,
27121 info->ehrd_offset + sp_off + reg_size * (int) i);
27122 RTVEC_ELT (p, i) = set;
27123 RTX_FRAME_RELATED_P (set) = 1;
27126 insn = emit_insn (gen_blockage ());
27127 RTX_FRAME_RELATED_P (insn) = 1;
27128 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
27131 /* In AIX ABI we need to make sure r2 is really saved. */
27132 if (TARGET_AIX && crtl->calls_eh_return)
27134 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
27135 rtx join_insn, note;
27136 rtx_insn *save_insn;
27137 long toc_restore_insn;
27139 tmp_reg = gen_rtx_REG (Pmode, 11);
27140 tmp_reg_si = gen_rtx_REG (SImode, 11);
27141 if (using_static_chain_p)
27143 START_USE (0);
27144 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
27146 else
27147 START_USE (11);
27148 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
27149 /* Peek at instruction to which this function returns. If it's
27150 restoring r2, then we know we've already saved r2. We can't
27151 unconditionally save r2 because the value we have will already
27152 be updated if we arrived at this function via a plt call or
27153 toc adjusting stub. */
27154 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
27155 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
27156 + RS6000_TOC_SAVE_SLOT);
27157 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
27158 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
27159 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
27160 validate_condition_mode (EQ, CCUNSmode);
27161 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
27162 emit_insn (gen_rtx_SET (compare_result,
27163 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
27164 toc_save_done = gen_label_rtx ();
27165 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27166 gen_rtx_EQ (VOIDmode, compare_result,
27167 const0_rtx),
27168 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
27169 pc_rtx);
27170 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27171 JUMP_LABEL (jump) = toc_save_done;
27172 LABEL_NUSES (toc_save_done) += 1;
27174 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
27175 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
27176 sp_off - frame_off);
27178 emit_label (toc_save_done);
27180 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
27181 have a CFG that has different saves along different paths.
27182 Move the note to a dummy blockage insn, which describes that
27183 R2 is unconditionally saved after the label. */
27184 /* ??? An alternate representation might be a special insn pattern
27185 containing both the branch and the store. That might let the
27186 code that minimizes the number of DW_CFA_advance opcodes better
27187 freedom in placing the annotations. */
27188 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
27189 if (note)
27190 remove_note (save_insn, note);
27191 else
27192 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
27193 copy_rtx (PATTERN (save_insn)), NULL_RTX);
27194 RTX_FRAME_RELATED_P (save_insn) = 0;
27196 join_insn = emit_insn (gen_blockage ());
27197 REG_NOTES (join_insn) = note;
27198 RTX_FRAME_RELATED_P (join_insn) = 1;
27200 if (using_static_chain_p)
27202 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
27203 END_USE (0);
27205 else
27206 END_USE (11);
27209 /* Save CR if we use any that must be preserved. */
27210 if (!WORLD_SAVE_P (info) && info->cr_save_p)
27212 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27213 GEN_INT (info->cr_save_offset + frame_off));
27214 rtx mem = gen_frame_mem (SImode, addr);
27216 /* If we didn't copy cr before, do so now using r0. */
27217 if (cr_save_rtx == NULL_RTX)
27219 START_USE (0);
27220 cr_save_rtx = gen_rtx_REG (SImode, 0);
27221 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27224 /* Saving CR requires a two-instruction sequence: one instruction
27225 to move the CR to a general-purpose register, and a second
27226 instruction that stores the GPR to memory.
27228 We do not emit any DWARF CFI records for the first of these,
27229 because we cannot properly represent the fact that CR is saved in
27230 a register. One reason is that we cannot express that multiple
27231 CR fields are saved; another reason is that on 64-bit, the size
27232 of the CR register in DWARF (4 bytes) differs from the size of
27233 a general-purpose register.
27235 This means if any intervening instruction were to clobber one of
27236 the call-saved CR fields, we'd have incorrect CFI. To prevent
27237 this from happening, we mark the store to memory as a use of
27238 those CR fields, which prevents any such instruction from being
27239 scheduled in between the two instructions. */
27240 rtx crsave_v[9];
27241 int n_crsave = 0;
27242 int i;
27244 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
27245 for (i = 0; i < 8; i++)
27246 if (save_reg_p (CR0_REGNO + i))
27247 crsave_v[n_crsave++]
27248 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27250 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
27251 gen_rtvec_v (n_crsave, crsave_v)));
27252 END_USE (REGNO (cr_save_rtx));
27254 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27255 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27256 so we need to construct a frame expression manually. */
27257 RTX_FRAME_RELATED_P (insn) = 1;
27259 /* Update address to be stack-pointer relative, like
27260 rs6000_frame_related would do. */
27261 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
27262 GEN_INT (info->cr_save_offset + sp_off));
27263 mem = gen_frame_mem (SImode, addr);
27265 if (DEFAULT_ABI == ABI_ELFv2)
27267 /* In the ELFv2 ABI we generate separate CFI records for each
27268 CR field that was actually saved. They all point to the
27269 same 32-bit stack slot. */
27270 rtx crframe[8];
27271 int n_crframe = 0;
27273 for (i = 0; i < 8; i++)
27274 if (save_reg_p (CR0_REGNO + i))
27276 crframe[n_crframe]
27277 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
27279 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
27280 n_crframe++;
27283 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27284 gen_rtx_PARALLEL (VOIDmode,
27285 gen_rtvec_v (n_crframe, crframe)));
27287 else
27289 /* In other ABIs, by convention, we use a single CR regnum to
27290 represent the fact that all call-saved CR fields are saved.
27291 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27292 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
27293 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
27297 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27298 *separate* slots if the routine calls __builtin_eh_return, so
27299 that they can be independently restored by the unwinder. */
27300 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27302 int i, cr_off = info->ehcr_offset;
27303 rtx crsave;
27305 /* ??? We might get better performance by using multiple mfocrf
27306 instructions. */
27307 crsave = gen_rtx_REG (SImode, 0);
27308 emit_insn (gen_prologue_movesi_from_cr (crsave));
27310 for (i = 0; i < 8; i++)
27311 if (!call_used_regs[CR0_REGNO + i])
27313 rtvec p = rtvec_alloc (2);
27314 RTVEC_ELT (p, 0)
27315 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
27316 RTVEC_ELT (p, 1)
27317 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27319 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27321 RTX_FRAME_RELATED_P (insn) = 1;
27322 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27323 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
27324 sp_reg_rtx, cr_off + sp_off));
27326 cr_off += reg_size;
27330 /* Update stack and set back pointer unless this is V.4,
27331 for which it was done previously. */
27332 if (!WORLD_SAVE_P (info) && info->push_p
27333 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
27335 rtx ptr_reg = NULL;
27336 int ptr_off = 0;
27338 /* If saving altivec regs we need to be able to address all save
27339 locations using a 16-bit offset. */
27340 if ((strategy & SAVE_INLINE_VRS) == 0
27341 || (info->altivec_size != 0
27342 && (info->altivec_save_offset + info->altivec_size - 16
27343 + info->total_size - frame_off) > 32767)
27344 || (info->vrsave_size != 0
27345 && (info->vrsave_save_offset
27346 + info->total_size - frame_off) > 32767))
27348 int sel = SAVRES_SAVE | SAVRES_VR;
27349 unsigned ptr_regno = ptr_regno_for_savres (sel);
27351 if (using_static_chain_p
27352 && ptr_regno == STATIC_CHAIN_REGNUM)
27353 ptr_regno = 12;
27354 if (REGNO (frame_reg_rtx) != ptr_regno)
27355 START_USE (ptr_regno);
27356 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27357 frame_reg_rtx = ptr_reg;
27358 ptr_off = info->altivec_save_offset + info->altivec_size;
27359 frame_off = -ptr_off;
27361 else if (REGNO (frame_reg_rtx) == 1)
27362 frame_off = info->total_size;
27363 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27364 ptr_reg, ptr_off);
27365 if (REGNO (frame_reg_rtx) == 12)
27366 sp_adjust = 0;
27367 sp_off = info->total_size;
27368 if (frame_reg_rtx != sp_reg_rtx)
27369 rs6000_emit_stack_tie (frame_reg_rtx, false);
27372 /* Set frame pointer, if needed. */
27373 if (frame_pointer_needed)
27375 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
27376 sp_reg_rtx);
27377 RTX_FRAME_RELATED_P (insn) = 1;
27380 /* Save AltiVec registers if needed. Save here because the red zone does
27381 not always include AltiVec registers. */
27382 if (!WORLD_SAVE_P (info)
27383 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
27385 int end_save = info->altivec_save_offset + info->altivec_size;
27386 int ptr_off;
27387 /* Oddly, the vector save/restore functions point r0 at the end
27388 of the save area, then use r11 or r12 to load offsets for
27389 [reg+reg] addressing. */
27390 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27391 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
27392 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27394 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27395 NOT_INUSE (0);
27396 if (scratch_regno == 12)
27397 sp_adjust = 0;
27398 if (end_save + frame_off != 0)
27400 rtx offset = GEN_INT (end_save + frame_off);
27402 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27404 else
27405 emit_move_insn (ptr_reg, frame_reg_rtx);
27407 ptr_off = -end_save;
27408 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27409 info->altivec_save_offset + ptr_off,
27410 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
27411 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
27412 NULL_RTX, NULL_RTX);
27413 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27415 /* The oddity mentioned above clobbered our frame reg. */
27416 emit_move_insn (frame_reg_rtx, ptr_reg);
27417 frame_off = ptr_off;
27420 else if (!WORLD_SAVE_P (info)
27421 && info->altivec_size != 0)
27423 int i;
27425 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27426 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27428 rtx areg, savereg, mem;
27429 HOST_WIDE_INT offset;
27431 offset = (info->altivec_save_offset + frame_off
27432 + 16 * (i - info->first_altivec_reg_save));
27434 savereg = gen_rtx_REG (V4SImode, i);
27436 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27438 mem = gen_frame_mem (V4SImode,
27439 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27440 GEN_INT (offset)));
27441 insn = emit_insn (gen_rtx_SET (mem, savereg));
27442 areg = NULL_RTX;
27444 else
27446 NOT_INUSE (0);
27447 areg = gen_rtx_REG (Pmode, 0);
27448 emit_move_insn (areg, GEN_INT (offset));
27450 /* AltiVec addressing mode is [reg+reg]. */
27451 mem = gen_frame_mem (V4SImode,
27452 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
27454 /* Rather than emitting a generic move, force use of the stvx
27455 instruction, which we always want on ISA 2.07 (power8) systems.
27456 In particular we don't want xxpermdi/stxvd2x for little
27457 endian. */
27458 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
27461 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27462 areg, GEN_INT (offset));
27466 /* VRSAVE is a bit vector representing which AltiVec registers
27467 are used. The OS uses this to determine which vector
27468 registers to save on a context switch. We need to save
27469 VRSAVE on the stack frame, add whatever AltiVec registers we
27470 used in this function, and do the corresponding magic in the
27471 epilogue. */
27473 if (!WORLD_SAVE_P (info) && info->vrsave_size != 0)
27475 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27476 be using r12 as frame_reg_rtx and r11 as the static chain
27477 pointer for nested functions. */
27478 int save_regno = 12;
27479 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27480 && !using_static_chain_p)
27481 save_regno = 11;
27482 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27484 save_regno = 11;
27485 if (using_static_chain_p)
27486 save_regno = 0;
27488 NOT_INUSE (save_regno);
27490 emit_vrsave_prologue (info, save_regno, frame_off, frame_reg_rtx);
27493 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27494 if (!TARGET_SINGLE_PIC_BASE
27495 && ((TARGET_TOC && TARGET_MINIMAL_TOC
27496 && !constant_pool_empty_p ())
27497 || (DEFAULT_ABI == ABI_V4
27498 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27499 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27501 /* If emit_load_toc_table will use the link register, we need to save
27502 it. We use R12 for this purpose because emit_load_toc_table
27503 can use register 0. This allows us to use a plain 'blr' to return
27504 from the procedure more often. */
27505 int save_LR_around_toc_setup = (TARGET_ELF
27506 && DEFAULT_ABI == ABI_V4
27507 && flag_pic
27508 && ! info->lr_save_p
27509 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27510 if (save_LR_around_toc_setup)
27512 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27513 rtx tmp = gen_rtx_REG (Pmode, 12);
27515 sp_adjust = 0;
27516 insn = emit_move_insn (tmp, lr);
27517 RTX_FRAME_RELATED_P (insn) = 1;
27519 rs6000_emit_load_toc_table (TRUE);
27521 insn = emit_move_insn (lr, tmp);
27522 add_reg_note (insn, REG_CFA_RESTORE, lr);
27523 RTX_FRAME_RELATED_P (insn) = 1;
27525 else
27526 rs6000_emit_load_toc_table (TRUE);
27529 #if TARGET_MACHO
27530 if (!TARGET_SINGLE_PIC_BASE
27531 && DEFAULT_ABI == ABI_DARWIN
27532 && flag_pic && crtl->uses_pic_offset_table)
27534 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27535 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27537 /* Save and restore LR locally around this call (in R0). */
27538 if (!info->lr_save_p)
27539 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27541 emit_insn (gen_load_macho_picbase (src));
27543 emit_move_insn (gen_rtx_REG (Pmode,
27544 RS6000_PIC_OFFSET_TABLE_REGNUM),
27545 lr);
27547 if (!info->lr_save_p)
27548 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27550 #endif
27552 /* If we need to, save the TOC register after doing the stack setup.
27553 Do not emit eh frame info for this save. The unwinder wants info,
27554 conceptually attached to instructions in this function, about
27555 register values in the caller of this function. This R2 may have
27556 already been changed from the value in the caller.
27557 We don't attempt to write accurate DWARF EH frame info for R2
27558 because code emitted by gcc for a (non-pointer) function call
27559 doesn't save and restore R2. Instead, R2 is managed out-of-line
27560 by a linker generated plt call stub when the function resides in
27561 a shared library. This behavior is costly to describe in DWARF,
27562 both in terms of the size of DWARF info and the time taken in the
27563 unwinder to interpret it. R2 changes, apart from the
27564 calls_eh_return case earlier in this function, are handled by
27565 linux-unwind.h frob_update_context. */
27566 if (rs6000_save_toc_in_prologue_p ())
27568 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27569 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27572 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27573 if (using_split_stack && split_stack_arg_pointer_used_p ())
27574 emit_split_stack_prologue (info, sp_adjust, frame_off, frame_reg_rtx);
27577 /* Output .extern statements for the save/restore routines we use. */
27579 static void
27580 rs6000_output_savres_externs (FILE *file)
27582 rs6000_stack_t *info = rs6000_stack_info ();
27584 if (TARGET_DEBUG_STACK)
27585 debug_stack_info (info);
27587 /* Write .extern for any function we will call to save and restore
27588 fp values. */
27589 if (info->first_fp_reg_save < 64
27590 && !TARGET_MACHO
27591 && !TARGET_ELF)
27593 char *name;
27594 int regno = info->first_fp_reg_save - 32;
27596 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
27598 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27599 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27600 name = rs6000_savres_routine_name (regno, sel);
27601 fprintf (file, "\t.extern %s\n", name);
27603 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
27605 bool lr = (info->savres_strategy
27606 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
27607 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27608 name = rs6000_savres_routine_name (regno, sel);
27609 fprintf (file, "\t.extern %s\n", name);
27614 /* Write function prologue. */
27616 static void
27617 rs6000_output_function_prologue (FILE *file)
27619 if (!cfun->is_thunk)
27620 rs6000_output_savres_externs (file);
27622 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27623 immediately after the global entry point label. */
27624 if (rs6000_global_entry_point_needed_p ())
27626 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
27628 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
27630 if (TARGET_CMODEL != CMODEL_LARGE)
27632 /* In the small and medium code models, we assume the TOC is less
27633 2 GB away from the text section, so it can be computed via the
27634 following two-instruction sequence. */
27635 char buf[256];
27637 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27638 fprintf (file, "0:\taddis 2,12,.TOC.-");
27639 assemble_name (file, buf);
27640 fprintf (file, "@ha\n");
27641 fprintf (file, "\taddi 2,2,.TOC.-");
27642 assemble_name (file, buf);
27643 fprintf (file, "@l\n");
27645 else
27647 /* In the large code model, we allow arbitrary offsets between the
27648 TOC and the text section, so we have to load the offset from
27649 memory. The data field is emitted directly before the global
27650 entry point in rs6000_elf_declare_function_name. */
27651 char buf[256];
27653 #ifdef HAVE_AS_ENTRY_MARKERS
27654 /* If supported by the linker, emit a marker relocation. If the
27655 total code size of the final executable or shared library
27656 happens to fit into 2 GB after all, the linker will replace
27657 this code sequence with the sequence for the small or medium
27658 code model. */
27659 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
27660 #endif
27661 fprintf (file, "\tld 2,");
27662 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
27663 assemble_name (file, buf);
27664 fprintf (file, "-");
27665 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27666 assemble_name (file, buf);
27667 fprintf (file, "(12)\n");
27668 fprintf (file, "\tadd 2,2,12\n");
27671 fputs ("\t.localentry\t", file);
27672 assemble_name (file, name);
27673 fputs (",.-", file);
27674 assemble_name (file, name);
27675 fputs ("\n", file);
27678 /* Output -mprofile-kernel code. This needs to be done here instead of
27679 in output_function_profile since it must go after the ELFv2 ABI
27680 local entry point. */
27681 if (TARGET_PROFILE_KERNEL && crtl->profile)
27683 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27684 gcc_assert (!TARGET_32BIT);
27686 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
27688 /* In the ELFv2 ABI we have no compiler stack word. It must be
27689 the resposibility of _mcount to preserve the static chain
27690 register if required. */
27691 if (DEFAULT_ABI != ABI_ELFv2
27692 && cfun->static_chain_decl != NULL)
27694 asm_fprintf (file, "\tstd %s,24(%s)\n",
27695 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27696 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27697 asm_fprintf (file, "\tld %s,24(%s)\n",
27698 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27700 else
27701 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27704 rs6000_pic_labelno++;
27707 /* -mprofile-kernel code calls mcount before the function prolog,
27708 so a profiled leaf function should stay a leaf function. */
27709 static bool
27710 rs6000_keep_leaf_when_profiled ()
27712 return TARGET_PROFILE_KERNEL;
27715 /* Non-zero if vmx regs are restored before the frame pop, zero if
27716 we restore after the pop when possible. */
27717 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27719 /* Restoring cr is a two step process: loading a reg from the frame
27720 save, then moving the reg to cr. For ABI_V4 we must let the
27721 unwinder know that the stack location is no longer valid at or
27722 before the stack deallocation, but we can't emit a cfa_restore for
27723 cr at the stack deallocation like we do for other registers.
27724 The trouble is that it is possible for the move to cr to be
27725 scheduled after the stack deallocation. So say exactly where cr
27726 is located on each of the two insns. */
27728 static rtx
27729 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
27731 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
27732 rtx reg = gen_rtx_REG (SImode, regno);
27733 rtx_insn *insn = emit_move_insn (reg, mem);
27735 if (!exit_func && DEFAULT_ABI == ABI_V4)
27737 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27738 rtx set = gen_rtx_SET (reg, cr);
27740 add_reg_note (insn, REG_CFA_REGISTER, set);
27741 RTX_FRAME_RELATED_P (insn) = 1;
27743 return reg;
27746 /* Reload CR from REG. */
27748 static void
27749 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
27751 int count = 0;
27752 int i;
27754 if (using_mfcr_multiple)
27756 for (i = 0; i < 8; i++)
27757 if (save_reg_p (CR0_REGNO + i))
27758 count++;
27759 gcc_assert (count);
27762 if (using_mfcr_multiple && count > 1)
27764 rtx_insn *insn;
27765 rtvec p;
27766 int ndx;
27768 p = rtvec_alloc (count);
27770 ndx = 0;
27771 for (i = 0; i < 8; i++)
27772 if (save_reg_p (CR0_REGNO + i))
27774 rtvec r = rtvec_alloc (2);
27775 RTVEC_ELT (r, 0) = reg;
27776 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
27777 RTVEC_ELT (p, ndx) =
27778 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
27779 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
27780 ndx++;
27782 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27783 gcc_assert (ndx == count);
27785 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27786 CR field separately. */
27787 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27789 for (i = 0; i < 8; i++)
27790 if (save_reg_p (CR0_REGNO + i))
27791 add_reg_note (insn, REG_CFA_RESTORE,
27792 gen_rtx_REG (SImode, CR0_REGNO + i));
27794 RTX_FRAME_RELATED_P (insn) = 1;
27797 else
27798 for (i = 0; i < 8; i++)
27799 if (save_reg_p (CR0_REGNO + i))
27801 rtx insn = emit_insn (gen_movsi_to_cr_one
27802 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
27804 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27805 CR field separately, attached to the insn that in fact
27806 restores this particular CR field. */
27807 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27809 add_reg_note (insn, REG_CFA_RESTORE,
27810 gen_rtx_REG (SImode, CR0_REGNO + i));
27812 RTX_FRAME_RELATED_P (insn) = 1;
27816 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
27817 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
27818 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
27820 rtx_insn *insn = get_last_insn ();
27821 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27823 add_reg_note (insn, REG_CFA_RESTORE, cr);
27824 RTX_FRAME_RELATED_P (insn) = 1;
27828 /* Like cr, the move to lr instruction can be scheduled after the
27829 stack deallocation, but unlike cr, its stack frame save is still
27830 valid. So we only need to emit the cfa_restore on the correct
27831 instruction. */
27833 static void
27834 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
27836 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
27837 rtx reg = gen_rtx_REG (Pmode, regno);
27839 emit_move_insn (reg, mem);
27842 static void
27843 restore_saved_lr (int regno, bool exit_func)
27845 rtx reg = gen_rtx_REG (Pmode, regno);
27846 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27847 rtx_insn *insn = emit_move_insn (lr, reg);
27849 if (!exit_func && flag_shrink_wrap)
27851 add_reg_note (insn, REG_CFA_RESTORE, lr);
27852 RTX_FRAME_RELATED_P (insn) = 1;
27856 static rtx
27857 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
27859 if (DEFAULT_ABI == ABI_ELFv2)
27861 int i;
27862 for (i = 0; i < 8; i++)
27863 if (save_reg_p (CR0_REGNO + i))
27865 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
27866 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
27867 cfa_restores);
27870 else if (info->cr_save_p)
27871 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27872 gen_rtx_REG (SImode, CR2_REGNO),
27873 cfa_restores);
27875 if (info->lr_save_p)
27876 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27877 gen_rtx_REG (Pmode, LR_REGNO),
27878 cfa_restores);
27879 return cfa_restores;
27882 /* Return true if OFFSET from stack pointer can be clobbered by signals.
27883 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
27884 below stack pointer not cloberred by signals. */
27886 static inline bool
27887 offset_below_red_zone_p (HOST_WIDE_INT offset)
27889 return offset < (DEFAULT_ABI == ABI_V4
27891 : TARGET_32BIT ? -220 : -288);
27894 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
27896 static void
27897 emit_cfa_restores (rtx cfa_restores)
27899 rtx_insn *insn = get_last_insn ();
27900 rtx *loc = &REG_NOTES (insn);
27902 while (*loc)
27903 loc = &XEXP (*loc, 1);
27904 *loc = cfa_restores;
27905 RTX_FRAME_RELATED_P (insn) = 1;
27908 /* Emit function epilogue as insns. */
27910 void
27911 rs6000_emit_epilogue (int sibcall)
27913 rs6000_stack_t *info;
27914 int restoring_GPRs_inline;
27915 int restoring_FPRs_inline;
27916 int using_load_multiple;
27917 int using_mtcr_multiple;
27918 int use_backchain_to_restore_sp;
27919 int restore_lr;
27920 int strategy;
27921 HOST_WIDE_INT frame_off = 0;
27922 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
27923 rtx frame_reg_rtx = sp_reg_rtx;
27924 rtx cfa_restores = NULL_RTX;
27925 rtx insn;
27926 rtx cr_save_reg = NULL_RTX;
27927 machine_mode reg_mode = Pmode;
27928 int reg_size = TARGET_32BIT ? 4 : 8;
27929 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
27930 ? DFmode : SFmode;
27931 int fp_reg_size = 8;
27932 int i;
27933 bool exit_func;
27934 unsigned ptr_regno;
27936 info = rs6000_stack_info ();
27938 strategy = info->savres_strategy;
27939 using_load_multiple = strategy & REST_MULTIPLE;
27940 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
27941 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
27942 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
27943 || rs6000_cpu == PROCESSOR_PPC603
27944 || rs6000_cpu == PROCESSOR_PPC750
27945 || optimize_size);
27946 /* Restore via the backchain when we have a large frame, since this
27947 is more efficient than an addis, addi pair. The second condition
27948 here will not trigger at the moment; We don't actually need a
27949 frame pointer for alloca, but the generic parts of the compiler
27950 give us one anyway. */
27951 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
27952 ? info->lr_save_offset
27953 : 0) > 32767
27954 || (cfun->calls_alloca
27955 && !frame_pointer_needed));
27956 restore_lr = (info->lr_save_p
27957 && (restoring_FPRs_inline
27958 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
27959 && (restoring_GPRs_inline
27960 || info->first_fp_reg_save < 64)
27961 && !cfun->machine->lr_is_wrapped_separately);
27964 if (WORLD_SAVE_P (info))
27966 int i, j;
27967 char rname[30];
27968 const char *alloc_rname;
27969 rtvec p;
27971 /* eh_rest_world_r10 will return to the location saved in the LR
27972 stack slot (which is not likely to be our caller.)
27973 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
27974 rest_world is similar, except any R10 parameter is ignored.
27975 The exception-handling stuff that was here in 2.95 is no
27976 longer necessary. */
27978 p = rtvec_alloc (9
27979 + 32 - info->first_gp_reg_save
27980 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
27981 + 63 + 1 - info->first_fp_reg_save);
27983 strcpy (rname, ((crtl->calls_eh_return) ?
27984 "*eh_rest_world_r10" : "*rest_world"));
27985 alloc_rname = ggc_strdup (rname);
27987 j = 0;
27988 RTVEC_ELT (p, j++) = ret_rtx;
27989 RTVEC_ELT (p, j++)
27990 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
27991 /* The instruction pattern requires a clobber here;
27992 it is shared with the restVEC helper. */
27993 RTVEC_ELT (p, j++)
27994 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
27997 /* CR register traditionally saved as CR2. */
27998 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
27999 RTVEC_ELT (p, j++)
28000 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
28001 if (flag_shrink_wrap)
28003 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28004 gen_rtx_REG (Pmode, LR_REGNO),
28005 cfa_restores);
28006 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28010 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28012 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
28013 RTVEC_ELT (p, j++)
28014 = gen_frame_load (reg,
28015 frame_reg_rtx, info->gp_save_offset + reg_size * i);
28016 if (flag_shrink_wrap
28017 && save_reg_p (info->first_gp_reg_save + i))
28018 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28020 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
28022 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
28023 RTVEC_ELT (p, j++)
28024 = gen_frame_load (reg,
28025 frame_reg_rtx, info->altivec_save_offset + 16 * i);
28026 if (flag_shrink_wrap
28027 && save_reg_p (info->first_altivec_reg_save + i))
28028 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28030 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
28032 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
28033 ? DFmode : SFmode),
28034 info->first_fp_reg_save + i);
28035 RTVEC_ELT (p, j++)
28036 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
28037 if (flag_shrink_wrap
28038 && save_reg_p (info->first_fp_reg_save + i))
28039 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28041 RTVEC_ELT (p, j++)
28042 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
28043 RTVEC_ELT (p, j++)
28044 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
28045 RTVEC_ELT (p, j++)
28046 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
28047 RTVEC_ELT (p, j++)
28048 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
28049 RTVEC_ELT (p, j++)
28050 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
28051 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28053 if (flag_shrink_wrap)
28055 REG_NOTES (insn) = cfa_restores;
28056 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28057 RTX_FRAME_RELATED_P (insn) = 1;
28059 return;
28062 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
28063 if (info->push_p)
28064 frame_off = info->total_size;
28066 /* Restore AltiVec registers if we must do so before adjusting the
28067 stack. */
28068 if (info->altivec_size != 0
28069 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28070 || (DEFAULT_ABI != ABI_V4
28071 && offset_below_red_zone_p (info->altivec_save_offset))))
28073 int i;
28074 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28076 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
28077 if (use_backchain_to_restore_sp)
28079 int frame_regno = 11;
28081 if ((strategy & REST_INLINE_VRS) == 0)
28083 /* Of r11 and r12, select the one not clobbered by an
28084 out-of-line restore function for the frame register. */
28085 frame_regno = 11 + 12 - scratch_regno;
28087 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
28088 emit_move_insn (frame_reg_rtx,
28089 gen_rtx_MEM (Pmode, sp_reg_rtx));
28090 frame_off = 0;
28092 else if (frame_pointer_needed)
28093 frame_reg_rtx = hard_frame_pointer_rtx;
28095 if ((strategy & REST_INLINE_VRS) == 0)
28097 int end_save = info->altivec_save_offset + info->altivec_size;
28098 int ptr_off;
28099 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28100 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28102 if (end_save + frame_off != 0)
28104 rtx offset = GEN_INT (end_save + frame_off);
28106 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28108 else
28109 emit_move_insn (ptr_reg, frame_reg_rtx);
28111 ptr_off = -end_save;
28112 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28113 info->altivec_save_offset + ptr_off,
28114 0, V4SImode, SAVRES_VR);
28116 else
28118 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28119 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28121 rtx addr, areg, mem, insn;
28122 rtx reg = gen_rtx_REG (V4SImode, i);
28123 HOST_WIDE_INT offset
28124 = (info->altivec_save_offset + frame_off
28125 + 16 * (i - info->first_altivec_reg_save));
28127 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28129 mem = gen_frame_mem (V4SImode,
28130 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28131 GEN_INT (offset)));
28132 insn = gen_rtx_SET (reg, mem);
28134 else
28136 areg = gen_rtx_REG (Pmode, 0);
28137 emit_move_insn (areg, GEN_INT (offset));
28139 /* AltiVec addressing mode is [reg+reg]. */
28140 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28141 mem = gen_frame_mem (V4SImode, addr);
28143 /* Rather than emitting a generic move, force use of the
28144 lvx instruction, which we always want. In particular we
28145 don't want lxvd2x/xxpermdi for little endian. */
28146 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28149 (void) emit_insn (insn);
28153 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28154 if (((strategy & REST_INLINE_VRS) == 0
28155 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28156 && (flag_shrink_wrap
28157 || (offset_below_red_zone_p
28158 (info->altivec_save_offset
28159 + 16 * (i - info->first_altivec_reg_save))))
28160 && save_reg_p (i))
28162 rtx reg = gen_rtx_REG (V4SImode, i);
28163 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28167 /* Restore VRSAVE if we must do so before adjusting the stack. */
28168 if (info->vrsave_size != 0
28169 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28170 || (DEFAULT_ABI != ABI_V4
28171 && offset_below_red_zone_p (info->vrsave_save_offset))))
28173 rtx reg;
28175 if (frame_reg_rtx == sp_reg_rtx)
28177 if (use_backchain_to_restore_sp)
28179 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28180 emit_move_insn (frame_reg_rtx,
28181 gen_rtx_MEM (Pmode, sp_reg_rtx));
28182 frame_off = 0;
28184 else if (frame_pointer_needed)
28185 frame_reg_rtx = hard_frame_pointer_rtx;
28188 reg = gen_rtx_REG (SImode, 12);
28189 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28190 info->vrsave_save_offset + frame_off));
28192 emit_insn (generate_set_vrsave (reg, info, 1));
28195 insn = NULL_RTX;
28196 /* If we have a large stack frame, restore the old stack pointer
28197 using the backchain. */
28198 if (use_backchain_to_restore_sp)
28200 if (frame_reg_rtx == sp_reg_rtx)
28202 /* Under V.4, don't reset the stack pointer until after we're done
28203 loading the saved registers. */
28204 if (DEFAULT_ABI == ABI_V4)
28205 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28207 insn = emit_move_insn (frame_reg_rtx,
28208 gen_rtx_MEM (Pmode, sp_reg_rtx));
28209 frame_off = 0;
28211 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28212 && DEFAULT_ABI == ABI_V4)
28213 /* frame_reg_rtx has been set up by the altivec restore. */
28215 else
28217 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
28218 frame_reg_rtx = sp_reg_rtx;
28221 /* If we have a frame pointer, we can restore the old stack pointer
28222 from it. */
28223 else if (frame_pointer_needed)
28225 frame_reg_rtx = sp_reg_rtx;
28226 if (DEFAULT_ABI == ABI_V4)
28227 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28228 /* Prevent reordering memory accesses against stack pointer restore. */
28229 else if (cfun->calls_alloca
28230 || offset_below_red_zone_p (-info->total_size))
28231 rs6000_emit_stack_tie (frame_reg_rtx, true);
28233 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
28234 GEN_INT (info->total_size)));
28235 frame_off = 0;
28237 else if (info->push_p
28238 && DEFAULT_ABI != ABI_V4
28239 && !crtl->calls_eh_return)
28241 /* Prevent reordering memory accesses against stack pointer restore. */
28242 if (cfun->calls_alloca
28243 || offset_below_red_zone_p (-info->total_size))
28244 rs6000_emit_stack_tie (frame_reg_rtx, false);
28245 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
28246 GEN_INT (info->total_size)));
28247 frame_off = 0;
28249 if (insn && frame_reg_rtx == sp_reg_rtx)
28251 if (cfa_restores)
28253 REG_NOTES (insn) = cfa_restores;
28254 cfa_restores = NULL_RTX;
28256 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28257 RTX_FRAME_RELATED_P (insn) = 1;
28260 /* Restore AltiVec registers if we have not done so already. */
28261 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28262 && info->altivec_size != 0
28263 && (DEFAULT_ABI == ABI_V4
28264 || !offset_below_red_zone_p (info->altivec_save_offset)))
28266 int i;
28268 if ((strategy & REST_INLINE_VRS) == 0)
28270 int end_save = info->altivec_save_offset + info->altivec_size;
28271 int ptr_off;
28272 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28273 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28274 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28276 if (end_save + frame_off != 0)
28278 rtx offset = GEN_INT (end_save + frame_off);
28280 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28282 else
28283 emit_move_insn (ptr_reg, frame_reg_rtx);
28285 ptr_off = -end_save;
28286 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28287 info->altivec_save_offset + ptr_off,
28288 0, V4SImode, SAVRES_VR);
28289 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
28291 /* Frame reg was clobbered by out-of-line save. Restore it
28292 from ptr_reg, and if we are calling out-of-line gpr or
28293 fpr restore set up the correct pointer and offset. */
28294 unsigned newptr_regno = 1;
28295 if (!restoring_GPRs_inline)
28297 bool lr = info->gp_save_offset + info->gp_size == 0;
28298 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28299 newptr_regno = ptr_regno_for_savres (sel);
28300 end_save = info->gp_save_offset + info->gp_size;
28302 else if (!restoring_FPRs_inline)
28304 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
28305 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28306 newptr_regno = ptr_regno_for_savres (sel);
28307 end_save = info->fp_save_offset + info->fp_size;
28310 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
28311 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
28313 if (end_save + ptr_off != 0)
28315 rtx offset = GEN_INT (end_save + ptr_off);
28317 frame_off = -end_save;
28318 if (TARGET_32BIT)
28319 emit_insn (gen_addsi3_carry (frame_reg_rtx,
28320 ptr_reg, offset));
28321 else
28322 emit_insn (gen_adddi3_carry (frame_reg_rtx,
28323 ptr_reg, offset));
28325 else
28327 frame_off = ptr_off;
28328 emit_move_insn (frame_reg_rtx, ptr_reg);
28332 else
28334 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28335 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28337 rtx addr, areg, mem, insn;
28338 rtx reg = gen_rtx_REG (V4SImode, i);
28339 HOST_WIDE_INT offset
28340 = (info->altivec_save_offset + frame_off
28341 + 16 * (i - info->first_altivec_reg_save));
28343 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28345 mem = gen_frame_mem (V4SImode,
28346 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28347 GEN_INT (offset)));
28348 insn = gen_rtx_SET (reg, mem);
28350 else
28352 areg = gen_rtx_REG (Pmode, 0);
28353 emit_move_insn (areg, GEN_INT (offset));
28355 /* AltiVec addressing mode is [reg+reg]. */
28356 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28357 mem = gen_frame_mem (V4SImode, addr);
28359 /* Rather than emitting a generic move, force use of the
28360 lvx instruction, which we always want. In particular we
28361 don't want lxvd2x/xxpermdi for little endian. */
28362 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28365 (void) emit_insn (insn);
28369 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28370 if (((strategy & REST_INLINE_VRS) == 0
28371 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28372 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28373 && save_reg_p (i))
28375 rtx reg = gen_rtx_REG (V4SImode, i);
28376 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28380 /* Restore VRSAVE if we have not done so already. */
28381 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28382 && info->vrsave_size != 0
28383 && (DEFAULT_ABI == ABI_V4
28384 || !offset_below_red_zone_p (info->vrsave_save_offset)))
28386 rtx reg;
28388 reg = gen_rtx_REG (SImode, 12);
28389 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28390 info->vrsave_save_offset + frame_off));
28392 emit_insn (generate_set_vrsave (reg, info, 1));
28395 /* If we exit by an out-of-line restore function on ABI_V4 then that
28396 function will deallocate the stack, so we don't need to worry
28397 about the unwinder restoring cr from an invalid stack frame
28398 location. */
28399 exit_func = (!restoring_FPRs_inline
28400 || (!restoring_GPRs_inline
28401 && info->first_fp_reg_save == 64));
28403 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28404 *separate* slots if the routine calls __builtin_eh_return, so
28405 that they can be independently restored by the unwinder. */
28406 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
28408 int i, cr_off = info->ehcr_offset;
28410 for (i = 0; i < 8; i++)
28411 if (!call_used_regs[CR0_REGNO + i])
28413 rtx reg = gen_rtx_REG (SImode, 0);
28414 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28415 cr_off + frame_off));
28417 insn = emit_insn (gen_movsi_to_cr_one
28418 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28420 if (!exit_func && flag_shrink_wrap)
28422 add_reg_note (insn, REG_CFA_RESTORE,
28423 gen_rtx_REG (SImode, CR0_REGNO + i));
28425 RTX_FRAME_RELATED_P (insn) = 1;
28428 cr_off += reg_size;
28432 /* Get the old lr if we saved it. If we are restoring registers
28433 out-of-line, then the out-of-line routines can do this for us. */
28434 if (restore_lr && restoring_GPRs_inline)
28435 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28437 /* Get the old cr if we saved it. */
28438 if (info->cr_save_p)
28440 unsigned cr_save_regno = 12;
28442 if (!restoring_GPRs_inline)
28444 /* Ensure we don't use the register used by the out-of-line
28445 gpr register restore below. */
28446 bool lr = info->gp_save_offset + info->gp_size == 0;
28447 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28448 int gpr_ptr_regno = ptr_regno_for_savres (sel);
28450 if (gpr_ptr_regno == 12)
28451 cr_save_regno = 11;
28452 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
28454 else if (REGNO (frame_reg_rtx) == 12)
28455 cr_save_regno = 11;
28457 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
28458 info->cr_save_offset + frame_off,
28459 exit_func);
28462 /* Set LR here to try to overlap restores below. */
28463 if (restore_lr && restoring_GPRs_inline)
28464 restore_saved_lr (0, exit_func);
28466 /* Load exception handler data registers, if needed. */
28467 if (crtl->calls_eh_return)
28469 unsigned int i, regno;
28471 if (TARGET_AIX)
28473 rtx reg = gen_rtx_REG (reg_mode, 2);
28474 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28475 frame_off + RS6000_TOC_SAVE_SLOT));
28478 for (i = 0; ; ++i)
28480 rtx mem;
28482 regno = EH_RETURN_DATA_REGNO (i);
28483 if (regno == INVALID_REGNUM)
28484 break;
28486 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28487 info->ehrd_offset + frame_off
28488 + reg_size * (int) i);
28490 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28494 /* Restore GPRs. This is done as a PARALLEL if we are using
28495 the load-multiple instructions. */
28496 if (!restoring_GPRs_inline)
28498 /* We are jumping to an out-of-line function. */
28499 rtx ptr_reg;
28500 int end_save = info->gp_save_offset + info->gp_size;
28501 bool can_use_exit = end_save == 0;
28502 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28503 int ptr_off;
28505 /* Emit stack reset code if we need it. */
28506 ptr_regno = ptr_regno_for_savres (sel);
28507 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28508 if (can_use_exit)
28509 rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28510 else if (end_save + frame_off != 0)
28511 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28512 GEN_INT (end_save + frame_off)));
28513 else if (REGNO (frame_reg_rtx) != ptr_regno)
28514 emit_move_insn (ptr_reg, frame_reg_rtx);
28515 if (REGNO (frame_reg_rtx) == ptr_regno)
28516 frame_off = -end_save;
28518 if (can_use_exit && info->cr_save_p)
28519 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28521 ptr_off = -end_save;
28522 rs6000_emit_savres_rtx (info, ptr_reg,
28523 info->gp_save_offset + ptr_off,
28524 info->lr_save_offset + ptr_off,
28525 reg_mode, sel);
28527 else if (using_load_multiple)
28529 rtvec p;
28530 p = rtvec_alloc (32 - info->first_gp_reg_save);
28531 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28532 RTVEC_ELT (p, i)
28533 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28534 frame_reg_rtx,
28535 info->gp_save_offset + frame_off + reg_size * i);
28536 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28538 else
28540 int offset = info->gp_save_offset + frame_off;
28541 for (i = info->first_gp_reg_save; i < 32; i++)
28543 if (save_reg_p (i)
28544 && !cfun->machine->gpr_is_wrapped_separately[i])
28546 rtx reg = gen_rtx_REG (reg_mode, i);
28547 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28550 offset += reg_size;
28554 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28556 /* If the frame pointer was used then we can't delay emitting
28557 a REG_CFA_DEF_CFA note. This must happen on the insn that
28558 restores the frame pointer, r31. We may have already emitted
28559 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28560 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28561 be harmless if emitted. */
28562 if (frame_pointer_needed)
28564 insn = get_last_insn ();
28565 add_reg_note (insn, REG_CFA_DEF_CFA,
28566 plus_constant (Pmode, frame_reg_rtx, frame_off));
28567 RTX_FRAME_RELATED_P (insn) = 1;
28570 /* Set up cfa_restores. We always need these when
28571 shrink-wrapping. If not shrink-wrapping then we only need
28572 the cfa_restore when the stack location is no longer valid.
28573 The cfa_restores must be emitted on or before the insn that
28574 invalidates the stack, and of course must not be emitted
28575 before the insn that actually does the restore. The latter
28576 is why it is a bad idea to emit the cfa_restores as a group
28577 on the last instruction here that actually does a restore:
28578 That insn may be reordered with respect to others doing
28579 restores. */
28580 if (flag_shrink_wrap
28581 && !restoring_GPRs_inline
28582 && info->first_fp_reg_save == 64)
28583 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28585 for (i = info->first_gp_reg_save; i < 32; i++)
28586 if (save_reg_p (i)
28587 && !cfun->machine->gpr_is_wrapped_separately[i])
28589 rtx reg = gen_rtx_REG (reg_mode, i);
28590 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28594 if (!restoring_GPRs_inline
28595 && info->first_fp_reg_save == 64)
28597 /* We are jumping to an out-of-line function. */
28598 if (cfa_restores)
28599 emit_cfa_restores (cfa_restores);
28600 return;
28603 if (restore_lr && !restoring_GPRs_inline)
28605 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28606 restore_saved_lr (0, exit_func);
28609 /* Restore fpr's if we need to do it without calling a function. */
28610 if (restoring_FPRs_inline)
28612 int offset = info->fp_save_offset + frame_off;
28613 for (i = info->first_fp_reg_save; i < 64; i++)
28615 if (save_reg_p (i)
28616 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
28618 rtx reg = gen_rtx_REG (fp_reg_mode, i);
28619 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28620 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28621 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
28622 cfa_restores);
28625 offset += fp_reg_size;
28629 /* If we saved cr, restore it here. Just those that were used. */
28630 if (info->cr_save_p)
28631 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
28633 /* If this is V.4, unwind the stack pointer after all of the loads
28634 have been done, or set up r11 if we are restoring fp out of line. */
28635 ptr_regno = 1;
28636 if (!restoring_FPRs_inline)
28638 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28639 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28640 ptr_regno = ptr_regno_for_savres (sel);
28643 insn = rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28644 if (REGNO (frame_reg_rtx) == ptr_regno)
28645 frame_off = 0;
28647 if (insn && restoring_FPRs_inline)
28649 if (cfa_restores)
28651 REG_NOTES (insn) = cfa_restores;
28652 cfa_restores = NULL_RTX;
28654 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28655 RTX_FRAME_RELATED_P (insn) = 1;
28658 if (crtl->calls_eh_return)
28660 rtx sa = EH_RETURN_STACKADJ_RTX;
28661 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
28664 if (!sibcall && restoring_FPRs_inline)
28666 if (cfa_restores)
28668 /* We can't hang the cfa_restores off a simple return,
28669 since the shrink-wrap code sometimes uses an existing
28670 return. This means there might be a path from
28671 pre-prologue code to this return, and dwarf2cfi code
28672 wants the eh_frame unwinder state to be the same on
28673 all paths to any point. So we need to emit the
28674 cfa_restores before the return. For -m64 we really
28675 don't need epilogue cfa_restores at all, except for
28676 this irritating dwarf2cfi with shrink-wrap
28677 requirement; The stack red-zone means eh_frame info
28678 from the prologue telling the unwinder to restore
28679 from the stack is perfectly good right to the end of
28680 the function. */
28681 emit_insn (gen_blockage ());
28682 emit_cfa_restores (cfa_restores);
28683 cfa_restores = NULL_RTX;
28686 emit_jump_insn (targetm.gen_simple_return ());
28689 if (!sibcall && !restoring_FPRs_inline)
28691 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28692 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
28693 int elt = 0;
28694 RTVEC_ELT (p, elt++) = ret_rtx;
28695 if (lr)
28696 RTVEC_ELT (p, elt++)
28697 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
28699 /* We have to restore more than two FP registers, so branch to the
28700 restore function. It will return to our caller. */
28701 int i;
28702 int reg;
28703 rtx sym;
28705 if (flag_shrink_wrap)
28706 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28708 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
28709 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
28710 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
28711 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
28713 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28715 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
28717 RTVEC_ELT (p, elt++)
28718 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
28719 if (flag_shrink_wrap
28720 && save_reg_p (info->first_fp_reg_save + i))
28721 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28724 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28727 if (cfa_restores)
28729 if (sibcall)
28730 /* Ensure the cfa_restores are hung off an insn that won't
28731 be reordered above other restores. */
28732 emit_insn (gen_blockage ());
28734 emit_cfa_restores (cfa_restores);
28738 /* Write function epilogue. */
28740 static void
28741 rs6000_output_function_epilogue (FILE *file)
28743 #if TARGET_MACHO
28744 macho_branch_islands ();
28747 rtx_insn *insn = get_last_insn ();
28748 rtx_insn *deleted_debug_label = NULL;
28750 /* Mach-O doesn't support labels at the end of objects, so if
28751 it looks like we might want one, take special action.
28753 First, collect any sequence of deleted debug labels. */
28754 while (insn
28755 && NOTE_P (insn)
28756 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
28758 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
28759 notes only, instead set their CODE_LABEL_NUMBER to -1,
28760 otherwise there would be code generation differences
28761 in between -g and -g0. */
28762 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28763 deleted_debug_label = insn;
28764 insn = PREV_INSN (insn);
28767 /* Second, if we have:
28768 label:
28769 barrier
28770 then this needs to be detected, so skip past the barrier. */
28772 if (insn && BARRIER_P (insn))
28773 insn = PREV_INSN (insn);
28775 /* Up to now we've only seen notes or barriers. */
28776 if (insn)
28778 if (LABEL_P (insn)
28779 || (NOTE_P (insn)
28780 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
28781 /* Trailing label: <barrier>. */
28782 fputs ("\tnop\n", file);
28783 else
28785 /* Lastly, see if we have a completely empty function body. */
28786 while (insn && ! INSN_P (insn))
28787 insn = PREV_INSN (insn);
28788 /* If we don't find any insns, we've got an empty function body;
28789 I.e. completely empty - without a return or branch. This is
28790 taken as the case where a function body has been removed
28791 because it contains an inline __builtin_unreachable(). GCC
28792 states that reaching __builtin_unreachable() means UB so we're
28793 not obliged to do anything special; however, we want
28794 non-zero-sized function bodies. To meet this, and help the
28795 user out, let's trap the case. */
28796 if (insn == NULL)
28797 fputs ("\ttrap\n", file);
28800 else if (deleted_debug_label)
28801 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
28802 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28803 CODE_LABEL_NUMBER (insn) = -1;
28805 #endif
28807 /* Output a traceback table here. See /usr/include/sys/debug.h for info
28808 on its format.
28810 We don't output a traceback table if -finhibit-size-directive was
28811 used. The documentation for -finhibit-size-directive reads
28812 ``don't output a @code{.size} assembler directive, or anything
28813 else that would cause trouble if the function is split in the
28814 middle, and the two halves are placed at locations far apart in
28815 memory.'' The traceback table has this property, since it
28816 includes the offset from the start of the function to the
28817 traceback table itself.
28819 System V.4 Powerpc's (and the embedded ABI derived from it) use a
28820 different traceback table. */
28821 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28822 && ! flag_inhibit_size_directive
28823 && rs6000_traceback != traceback_none && !cfun->is_thunk)
28825 const char *fname = NULL;
28826 const char *language_string = lang_hooks.name;
28827 int fixed_parms = 0, float_parms = 0, parm_info = 0;
28828 int i;
28829 int optional_tbtab;
28830 rs6000_stack_t *info = rs6000_stack_info ();
28832 if (rs6000_traceback == traceback_full)
28833 optional_tbtab = 1;
28834 else if (rs6000_traceback == traceback_part)
28835 optional_tbtab = 0;
28836 else
28837 optional_tbtab = !optimize_size && !TARGET_ELF;
28839 if (optional_tbtab)
28841 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
28842 while (*fname == '.') /* V.4 encodes . in the name */
28843 fname++;
28845 /* Need label immediately before tbtab, so we can compute
28846 its offset from the function start. */
28847 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28848 ASM_OUTPUT_LABEL (file, fname);
28851 /* The .tbtab pseudo-op can only be used for the first eight
28852 expressions, since it can't handle the possibly variable
28853 length fields that follow. However, if you omit the optional
28854 fields, the assembler outputs zeros for all optional fields
28855 anyways, giving each variable length field is minimum length
28856 (as defined in sys/debug.h). Thus we can not use the .tbtab
28857 pseudo-op at all. */
28859 /* An all-zero word flags the start of the tbtab, for debuggers
28860 that have to find it by searching forward from the entry
28861 point or from the current pc. */
28862 fputs ("\t.long 0\n", file);
28864 /* Tbtab format type. Use format type 0. */
28865 fputs ("\t.byte 0,", file);
28867 /* Language type. Unfortunately, there does not seem to be any
28868 official way to discover the language being compiled, so we
28869 use language_string.
28870 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
28871 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
28872 a number, so for now use 9. LTO, Go and JIT aren't assigned numbers
28873 either, so for now use 0. */
28874 if (lang_GNU_C ()
28875 || ! strcmp (language_string, "GNU GIMPLE")
28876 || ! strcmp (language_string, "GNU Go")
28877 || ! strcmp (language_string, "libgccjit"))
28878 i = 0;
28879 else if (! strcmp (language_string, "GNU F77")
28880 || lang_GNU_Fortran ())
28881 i = 1;
28882 else if (! strcmp (language_string, "GNU Pascal"))
28883 i = 2;
28884 else if (! strcmp (language_string, "GNU Ada"))
28885 i = 3;
28886 else if (lang_GNU_CXX ()
28887 || ! strcmp (language_string, "GNU Objective-C++"))
28888 i = 9;
28889 else if (! strcmp (language_string, "GNU Java"))
28890 i = 13;
28891 else if (! strcmp (language_string, "GNU Objective-C"))
28892 i = 14;
28893 else
28894 gcc_unreachable ();
28895 fprintf (file, "%d,", i);
28897 /* 8 single bit fields: global linkage (not set for C extern linkage,
28898 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
28899 from start of procedure stored in tbtab, internal function, function
28900 has controlled storage, function has no toc, function uses fp,
28901 function logs/aborts fp operations. */
28902 /* Assume that fp operations are used if any fp reg must be saved. */
28903 fprintf (file, "%d,",
28904 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
28906 /* 6 bitfields: function is interrupt handler, name present in
28907 proc table, function calls alloca, on condition directives
28908 (controls stack walks, 3 bits), saves condition reg, saves
28909 link reg. */
28910 /* The `function calls alloca' bit seems to be set whenever reg 31 is
28911 set up as a frame pointer, even when there is no alloca call. */
28912 fprintf (file, "%d,",
28913 ((optional_tbtab << 6)
28914 | ((optional_tbtab & frame_pointer_needed) << 5)
28915 | (info->cr_save_p << 1)
28916 | (info->lr_save_p)));
28918 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
28919 (6 bits). */
28920 fprintf (file, "%d,",
28921 (info->push_p << 7) | (64 - info->first_fp_reg_save));
28923 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
28924 fprintf (file, "%d,", (32 - first_reg_to_save ()));
28926 if (optional_tbtab)
28928 /* Compute the parameter info from the function decl argument
28929 list. */
28930 tree decl;
28931 int next_parm_info_bit = 31;
28933 for (decl = DECL_ARGUMENTS (current_function_decl);
28934 decl; decl = DECL_CHAIN (decl))
28936 rtx parameter = DECL_INCOMING_RTL (decl);
28937 machine_mode mode = GET_MODE (parameter);
28939 if (GET_CODE (parameter) == REG)
28941 if (SCALAR_FLOAT_MODE_P (mode))
28943 int bits;
28945 float_parms++;
28947 switch (mode)
28949 case E_SFmode:
28950 case E_SDmode:
28951 bits = 0x2;
28952 break;
28954 case E_DFmode:
28955 case E_DDmode:
28956 case E_TFmode:
28957 case E_TDmode:
28958 case E_IFmode:
28959 case E_KFmode:
28960 bits = 0x3;
28961 break;
28963 default:
28964 gcc_unreachable ();
28967 /* If only one bit will fit, don't or in this entry. */
28968 if (next_parm_info_bit > 0)
28969 parm_info |= (bits << (next_parm_info_bit - 1));
28970 next_parm_info_bit -= 2;
28972 else
28974 fixed_parms += ((GET_MODE_SIZE (mode)
28975 + (UNITS_PER_WORD - 1))
28976 / UNITS_PER_WORD);
28977 next_parm_info_bit -= 1;
28983 /* Number of fixed point parameters. */
28984 /* This is actually the number of words of fixed point parameters; thus
28985 an 8 byte struct counts as 2; and thus the maximum value is 8. */
28986 fprintf (file, "%d,", fixed_parms);
28988 /* 2 bitfields: number of floating point parameters (7 bits), parameters
28989 all on stack. */
28990 /* This is actually the number of fp registers that hold parameters;
28991 and thus the maximum value is 13. */
28992 /* Set parameters on stack bit if parameters are not in their original
28993 registers, regardless of whether they are on the stack? Xlc
28994 seems to set the bit when not optimizing. */
28995 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
28997 if (optional_tbtab)
28999 /* Optional fields follow. Some are variable length. */
29001 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
29002 float, 11 double float. */
29003 /* There is an entry for each parameter in a register, in the order
29004 that they occur in the parameter list. Any intervening arguments
29005 on the stack are ignored. If the list overflows a long (max
29006 possible length 34 bits) then completely leave off all elements
29007 that don't fit. */
29008 /* Only emit this long if there was at least one parameter. */
29009 if (fixed_parms || float_parms)
29010 fprintf (file, "\t.long %d\n", parm_info);
29012 /* Offset from start of code to tb table. */
29013 fputs ("\t.long ", file);
29014 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
29015 RS6000_OUTPUT_BASENAME (file, fname);
29016 putc ('-', file);
29017 rs6000_output_function_entry (file, fname);
29018 putc ('\n', file);
29020 /* Interrupt handler mask. */
29021 /* Omit this long, since we never set the interrupt handler bit
29022 above. */
29024 /* Number of CTL (controlled storage) anchors. */
29025 /* Omit this long, since the has_ctl bit is never set above. */
29027 /* Displacement into stack of each CTL anchor. */
29028 /* Omit this list of longs, because there are no CTL anchors. */
29030 /* Length of function name. */
29031 if (*fname == '*')
29032 ++fname;
29033 fprintf (file, "\t.short %d\n", (int) strlen (fname));
29035 /* Function name. */
29036 assemble_string (fname, strlen (fname));
29038 /* Register for alloca automatic storage; this is always reg 31.
29039 Only emit this if the alloca bit was set above. */
29040 if (frame_pointer_needed)
29041 fputs ("\t.byte 31\n", file);
29043 fputs ("\t.align 2\n", file);
29047 /* Arrange to define .LCTOC1 label, if not already done. */
29048 if (need_toc_init)
29050 need_toc_init = 0;
29051 if (!toc_initialized)
29053 switch_to_section (toc_section);
29054 switch_to_section (current_function_section ());
29059 /* -fsplit-stack support. */
29061 /* A SYMBOL_REF for __morestack. */
29062 static GTY(()) rtx morestack_ref;
29064 static rtx
29065 gen_add3_const (rtx rt, rtx ra, long c)
29067 if (TARGET_64BIT)
29068 return gen_adddi3 (rt, ra, GEN_INT (c));
29069 else
29070 return gen_addsi3 (rt, ra, GEN_INT (c));
29073 /* Emit -fsplit-stack prologue, which goes before the regular function
29074 prologue (at local entry point in the case of ELFv2). */
29076 void
29077 rs6000_expand_split_stack_prologue (void)
29079 rs6000_stack_t *info = rs6000_stack_info ();
29080 unsigned HOST_WIDE_INT allocate;
29081 long alloc_hi, alloc_lo;
29082 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
29083 rtx_insn *insn;
29085 gcc_assert (flag_split_stack && reload_completed);
29087 if (!info->push_p)
29088 return;
29090 if (global_regs[29])
29092 error ("%qs uses register r29", "-fsplit-stack");
29093 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
29094 "conflicts with %qD", global_regs_decl[29]);
29097 allocate = info->total_size;
29098 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
29100 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
29101 return;
29103 if (morestack_ref == NULL_RTX)
29105 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
29106 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
29107 | SYMBOL_FLAG_FUNCTION);
29110 r0 = gen_rtx_REG (Pmode, 0);
29111 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29112 r12 = gen_rtx_REG (Pmode, 12);
29113 emit_insn (gen_load_split_stack_limit (r0));
29114 /* Always emit two insns here to calculate the requested stack,
29115 so that the linker can edit them when adjusting size for calling
29116 non-split-stack code. */
29117 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
29118 alloc_lo = -allocate - alloc_hi;
29119 if (alloc_hi != 0)
29121 emit_insn (gen_add3_const (r12, r1, alloc_hi));
29122 if (alloc_lo != 0)
29123 emit_insn (gen_add3_const (r12, r12, alloc_lo));
29124 else
29125 emit_insn (gen_nop ());
29127 else
29129 emit_insn (gen_add3_const (r12, r1, alloc_lo));
29130 emit_insn (gen_nop ());
29133 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
29134 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
29135 ok_label = gen_label_rtx ();
29136 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29137 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
29138 gen_rtx_LABEL_REF (VOIDmode, ok_label),
29139 pc_rtx);
29140 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29141 JUMP_LABEL (insn) = ok_label;
29142 /* Mark the jump as very likely to be taken. */
29143 add_reg_br_prob_note (insn, profile_probability::very_likely ());
29145 lr = gen_rtx_REG (Pmode, LR_REGNO);
29146 insn = emit_move_insn (r0, lr);
29147 RTX_FRAME_RELATED_P (insn) = 1;
29148 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
29149 RTX_FRAME_RELATED_P (insn) = 1;
29151 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
29152 const0_rtx, const0_rtx));
29153 call_fusage = NULL_RTX;
29154 use_reg (&call_fusage, r12);
29155 /* Say the call uses r0, even though it doesn't, to stop regrename
29156 from twiddling with the insns saving lr, trashing args for cfun.
29157 The insns restoring lr are similarly protected by making
29158 split_stack_return use r0. */
29159 use_reg (&call_fusage, r0);
29160 add_function_usage_to (insn, call_fusage);
29161 /* Indicate that this function can't jump to non-local gotos. */
29162 make_reg_eh_region_note_nothrow_nononlocal (insn);
29163 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
29164 insn = emit_move_insn (lr, r0);
29165 add_reg_note (insn, REG_CFA_RESTORE, lr);
29166 RTX_FRAME_RELATED_P (insn) = 1;
29167 emit_insn (gen_split_stack_return ());
29169 emit_label (ok_label);
29170 LABEL_NUSES (ok_label) = 1;
29173 /* Return the internal arg pointer used for function incoming
29174 arguments. When -fsplit-stack, the arg pointer is r12 so we need
29175 to copy it to a pseudo in order for it to be preserved over calls
29176 and suchlike. We'd really like to use a pseudo here for the
29177 internal arg pointer but data-flow analysis is not prepared to
29178 accept pseudos as live at the beginning of a function. */
29180 static rtx
29181 rs6000_internal_arg_pointer (void)
29183 if (flag_split_stack
29184 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
29185 == NULL))
29188 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
29190 rtx pat;
29192 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
29193 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
29195 /* Put the pseudo initialization right after the note at the
29196 beginning of the function. */
29197 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
29198 gen_rtx_REG (Pmode, 12));
29199 push_topmost_sequence ();
29200 emit_insn_after (pat, get_insns ());
29201 pop_topmost_sequence ();
29203 return plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
29204 FIRST_PARM_OFFSET (current_function_decl));
29206 return virtual_incoming_args_rtx;
29209 /* We may have to tell the dataflow pass that the split stack prologue
29210 is initializing a register. */
29212 static void
29213 rs6000_live_on_entry (bitmap regs)
29215 if (flag_split_stack)
29216 bitmap_set_bit (regs, 12);
29219 /* Emit -fsplit-stack dynamic stack allocation space check. */
29221 void
29222 rs6000_split_stack_space_check (rtx size, rtx label)
29224 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29225 rtx limit = gen_reg_rtx (Pmode);
29226 rtx requested = gen_reg_rtx (Pmode);
29227 rtx cmp = gen_reg_rtx (CCUNSmode);
29228 rtx jump;
29230 emit_insn (gen_load_split_stack_limit (limit));
29231 if (CONST_INT_P (size))
29232 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
29233 else
29235 size = force_reg (Pmode, size);
29236 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
29238 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
29239 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29240 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
29241 gen_rtx_LABEL_REF (VOIDmode, label),
29242 pc_rtx);
29243 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29244 JUMP_LABEL (jump) = label;
29247 /* A C compound statement that outputs the assembler code for a thunk
29248 function, used to implement C++ virtual function calls with
29249 multiple inheritance. The thunk acts as a wrapper around a virtual
29250 function, adjusting the implicit object parameter before handing
29251 control off to the real function.
29253 First, emit code to add the integer DELTA to the location that
29254 contains the incoming first argument. Assume that this argument
29255 contains a pointer, and is the one used to pass the `this' pointer
29256 in C++. This is the incoming argument *before* the function
29257 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29258 values of all other incoming arguments.
29260 After the addition, emit code to jump to FUNCTION, which is a
29261 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29262 not touch the return address. Hence returning from FUNCTION will
29263 return to whoever called the current `thunk'.
29265 The effect must be as if FUNCTION had been called directly with the
29266 adjusted first argument. This macro is responsible for emitting
29267 all of the code for a thunk function; output_function_prologue()
29268 and output_function_epilogue() are not invoked.
29270 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29271 been extracted from it.) It might possibly be useful on some
29272 targets, but probably not.
29274 If you do not define this macro, the target-independent code in the
29275 C++ frontend will generate a less efficient heavyweight thunk that
29276 calls FUNCTION instead of jumping to it. The generic approach does
29277 not support varargs. */
29279 static void
29280 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
29281 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
29282 tree function)
29284 rtx this_rtx, funexp;
29285 rtx_insn *insn;
29287 reload_completed = 1;
29288 epilogue_completed = 1;
29290 /* Mark the end of the (empty) prologue. */
29291 emit_note (NOTE_INSN_PROLOGUE_END);
29293 /* Find the "this" pointer. If the function returns a structure,
29294 the structure return pointer is in r3. */
29295 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
29296 this_rtx = gen_rtx_REG (Pmode, 4);
29297 else
29298 this_rtx = gen_rtx_REG (Pmode, 3);
29300 /* Apply the constant offset, if required. */
29301 if (delta)
29302 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
29304 /* Apply the offset from the vtable, if required. */
29305 if (vcall_offset)
29307 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
29308 rtx tmp = gen_rtx_REG (Pmode, 12);
29310 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
29311 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
29313 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
29314 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
29316 else
29318 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
29320 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
29322 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
29325 /* Generate a tail call to the target function. */
29326 if (!TREE_USED (function))
29328 assemble_external (function);
29329 TREE_USED (function) = 1;
29331 funexp = XEXP (DECL_RTL (function), 0);
29332 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
29334 #if TARGET_MACHO
29335 if (MACHOPIC_INDIRECT)
29336 funexp = machopic_indirect_call_target (funexp);
29337 #endif
29339 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29340 generate sibcall RTL explicitly. */
29341 insn = emit_call_insn (
29342 gen_rtx_PARALLEL (VOIDmode,
29343 gen_rtvec (3,
29344 gen_rtx_CALL (VOIDmode,
29345 funexp, const0_rtx),
29346 gen_rtx_USE (VOIDmode, const0_rtx),
29347 simple_return_rtx)));
29348 SIBLING_CALL_P (insn) = 1;
29349 emit_barrier ();
29351 /* Run just enough of rest_of_compilation to get the insns emitted.
29352 There's not really enough bulk here to make other passes such as
29353 instruction scheduling worth while. Note that use_thunk calls
29354 assemble_start_function and assemble_end_function. */
29355 insn = get_insns ();
29356 shorten_branches (insn);
29357 final_start_function (insn, file, 1);
29358 final (insn, file, 1);
29359 final_end_function ();
29361 reload_completed = 0;
29362 epilogue_completed = 0;
29365 /* A quick summary of the various types of 'constant-pool tables'
29366 under PowerPC:
29368 Target Flags Name One table per
29369 AIX (none) AIX TOC object file
29370 AIX -mfull-toc AIX TOC object file
29371 AIX -mminimal-toc AIX minimal TOC translation unit
29372 SVR4/EABI (none) SVR4 SDATA object file
29373 SVR4/EABI -fpic SVR4 pic object file
29374 SVR4/EABI -fPIC SVR4 PIC translation unit
29375 SVR4/EABI -mrelocatable EABI TOC function
29376 SVR4/EABI -maix AIX TOC object file
29377 SVR4/EABI -maix -mminimal-toc
29378 AIX minimal TOC translation unit
29380 Name Reg. Set by entries contains:
29381 made by addrs? fp? sum?
29383 AIX TOC 2 crt0 as Y option option
29384 AIX minimal TOC 30 prolog gcc Y Y option
29385 SVR4 SDATA 13 crt0 gcc N Y N
29386 SVR4 pic 30 prolog ld Y not yet N
29387 SVR4 PIC 30 prolog gcc Y option option
29388 EABI TOC 30 prolog gcc Y option option
29392 /* Hash functions for the hash table. */
29394 static unsigned
29395 rs6000_hash_constant (rtx k)
29397 enum rtx_code code = GET_CODE (k);
29398 machine_mode mode = GET_MODE (k);
29399 unsigned result = (code << 3) ^ mode;
29400 const char *format;
29401 int flen, fidx;
29403 format = GET_RTX_FORMAT (code);
29404 flen = strlen (format);
29405 fidx = 0;
29407 switch (code)
29409 case LABEL_REF:
29410 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
29412 case CONST_WIDE_INT:
29414 int i;
29415 flen = CONST_WIDE_INT_NUNITS (k);
29416 for (i = 0; i < flen; i++)
29417 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
29418 return result;
29421 case CONST_DOUBLE:
29422 if (mode != VOIDmode)
29423 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
29424 flen = 2;
29425 break;
29427 case CODE_LABEL:
29428 fidx = 3;
29429 break;
29431 default:
29432 break;
29435 for (; fidx < flen; fidx++)
29436 switch (format[fidx])
29438 case 's':
29440 unsigned i, len;
29441 const char *str = XSTR (k, fidx);
29442 len = strlen (str);
29443 result = result * 613 + len;
29444 for (i = 0; i < len; i++)
29445 result = result * 613 + (unsigned) str[i];
29446 break;
29448 case 'u':
29449 case 'e':
29450 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
29451 break;
29452 case 'i':
29453 case 'n':
29454 result = result * 613 + (unsigned) XINT (k, fidx);
29455 break;
29456 case 'w':
29457 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
29458 result = result * 613 + (unsigned) XWINT (k, fidx);
29459 else
29461 size_t i;
29462 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29463 result = result * 613 + (unsigned) (XWINT (k, fidx)
29464 >> CHAR_BIT * i);
29466 break;
29467 case '0':
29468 break;
29469 default:
29470 gcc_unreachable ();
29473 return result;
29476 hashval_t
29477 toc_hasher::hash (toc_hash_struct *thc)
29479 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29482 /* Compare H1 and H2 for equivalence. */
29484 bool
29485 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29487 rtx r1 = h1->key;
29488 rtx r2 = h2->key;
29490 if (h1->key_mode != h2->key_mode)
29491 return 0;
29493 return rtx_equal_p (r1, r2);
29496 /* These are the names given by the C++ front-end to vtables, and
29497 vtable-like objects. Ideally, this logic should not be here;
29498 instead, there should be some programmatic way of inquiring as
29499 to whether or not an object is a vtable. */
29501 #define VTABLE_NAME_P(NAME) \
29502 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29503 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29504 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29505 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29506 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29508 #ifdef NO_DOLLAR_IN_LABEL
29509 /* Return a GGC-allocated character string translating dollar signs in
29510 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29512 const char *
29513 rs6000_xcoff_strip_dollar (const char *name)
29515 char *strip, *p;
29516 const char *q;
29517 size_t len;
29519 q = (const char *) strchr (name, '$');
29521 if (q == 0 || q == name)
29522 return name;
29524 len = strlen (name);
29525 strip = XALLOCAVEC (char, len + 1);
29526 strcpy (strip, name);
29527 p = strip + (q - name);
29528 while (p)
29530 *p = '_';
29531 p = strchr (p + 1, '$');
29534 return ggc_alloc_string (strip, len);
29536 #endif
29538 void
29539 rs6000_output_symbol_ref (FILE *file, rtx x)
29541 const char *name = XSTR (x, 0);
29543 /* Currently C++ toc references to vtables can be emitted before it
29544 is decided whether the vtable is public or private. If this is
29545 the case, then the linker will eventually complain that there is
29546 a reference to an unknown section. Thus, for vtables only,
29547 we emit the TOC reference to reference the identifier and not the
29548 symbol. */
29549 if (VTABLE_NAME_P (name))
29551 RS6000_OUTPUT_BASENAME (file, name);
29553 else
29554 assemble_name (file, name);
29557 /* Output a TOC entry. We derive the entry name from what is being
29558 written. */
29560 void
29561 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29563 char buf[256];
29564 const char *name = buf;
29565 rtx base = x;
29566 HOST_WIDE_INT offset = 0;
29568 gcc_assert (!TARGET_NO_TOC);
29570 /* When the linker won't eliminate them, don't output duplicate
29571 TOC entries (this happens on AIX if there is any kind of TOC,
29572 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29573 CODE_LABELs. */
29574 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29576 struct toc_hash_struct *h;
29578 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29579 time because GGC is not initialized at that point. */
29580 if (toc_hash_table == NULL)
29581 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29583 h = ggc_alloc<toc_hash_struct> ();
29584 h->key = x;
29585 h->key_mode = mode;
29586 h->labelno = labelno;
29588 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29589 if (*found == NULL)
29590 *found = h;
29591 else /* This is indeed a duplicate.
29592 Set this label equal to that label. */
29594 fputs ("\t.set ", file);
29595 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29596 fprintf (file, "%d,", labelno);
29597 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29598 fprintf (file, "%d\n", ((*found)->labelno));
29600 #ifdef HAVE_AS_TLS
29601 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
29602 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
29603 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
29605 fputs ("\t.set ", file);
29606 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29607 fprintf (file, "%d,", labelno);
29608 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29609 fprintf (file, "%d\n", ((*found)->labelno));
29611 #endif
29612 return;
29616 /* If we're going to put a double constant in the TOC, make sure it's
29617 aligned properly when strict alignment is on. */
29618 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
29619 && STRICT_ALIGNMENT
29620 && GET_MODE_BITSIZE (mode) >= 64
29621 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
29622 ASM_OUTPUT_ALIGN (file, 3);
29625 (*targetm.asm_out.internal_label) (file, "LC", labelno);
29627 /* Handle FP constants specially. Note that if we have a minimal
29628 TOC, things we put here aren't actually in the TOC, so we can allow
29629 FP constants. */
29630 if (GET_CODE (x) == CONST_DOUBLE &&
29631 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
29632 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
29634 long k[4];
29636 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29637 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
29638 else
29639 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29641 if (TARGET_64BIT)
29643 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29644 fputs (DOUBLE_INT_ASM_OP, file);
29645 else
29646 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29647 k[0] & 0xffffffff, k[1] & 0xffffffff,
29648 k[2] & 0xffffffff, k[3] & 0xffffffff);
29649 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
29650 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29651 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
29652 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
29653 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
29654 return;
29656 else
29658 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29659 fputs ("\t.long ", file);
29660 else
29661 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29662 k[0] & 0xffffffff, k[1] & 0xffffffff,
29663 k[2] & 0xffffffff, k[3] & 0xffffffff);
29664 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29665 k[0] & 0xffffffff, k[1] & 0xffffffff,
29666 k[2] & 0xffffffff, k[3] & 0xffffffff);
29667 return;
29670 else if (GET_CODE (x) == CONST_DOUBLE &&
29671 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
29673 long k[2];
29675 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29676 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
29677 else
29678 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29680 if (TARGET_64BIT)
29682 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29683 fputs (DOUBLE_INT_ASM_OP, file);
29684 else
29685 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29686 k[0] & 0xffffffff, k[1] & 0xffffffff);
29687 fprintf (file, "0x%lx%08lx\n",
29688 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29689 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
29690 return;
29692 else
29694 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29695 fputs ("\t.long ", file);
29696 else
29697 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29698 k[0] & 0xffffffff, k[1] & 0xffffffff);
29699 fprintf (file, "0x%lx,0x%lx\n",
29700 k[0] & 0xffffffff, k[1] & 0xffffffff);
29701 return;
29704 else if (GET_CODE (x) == CONST_DOUBLE &&
29705 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
29707 long l;
29709 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29710 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
29711 else
29712 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
29714 if (TARGET_64BIT)
29716 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29717 fputs (DOUBLE_INT_ASM_OP, file);
29718 else
29719 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29720 if (WORDS_BIG_ENDIAN)
29721 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
29722 else
29723 fprintf (file, "0x%lx\n", l & 0xffffffff);
29724 return;
29726 else
29728 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29729 fputs ("\t.long ", file);
29730 else
29731 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29732 fprintf (file, "0x%lx\n", l & 0xffffffff);
29733 return;
29736 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
29738 unsigned HOST_WIDE_INT low;
29739 HOST_WIDE_INT high;
29741 low = INTVAL (x) & 0xffffffff;
29742 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
29744 /* TOC entries are always Pmode-sized, so when big-endian
29745 smaller integer constants in the TOC need to be padded.
29746 (This is still a win over putting the constants in
29747 a separate constant pool, because then we'd have
29748 to have both a TOC entry _and_ the actual constant.)
29750 For a 32-bit target, CONST_INT values are loaded and shifted
29751 entirely within `low' and can be stored in one TOC entry. */
29753 /* It would be easy to make this work, but it doesn't now. */
29754 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
29756 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
29758 low |= high << 32;
29759 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
29760 high = (HOST_WIDE_INT) low >> 32;
29761 low &= 0xffffffff;
29764 if (TARGET_64BIT)
29766 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29767 fputs (DOUBLE_INT_ASM_OP, file);
29768 else
29769 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29770 (long) high & 0xffffffff, (long) low & 0xffffffff);
29771 fprintf (file, "0x%lx%08lx\n",
29772 (long) high & 0xffffffff, (long) low & 0xffffffff);
29773 return;
29775 else
29777 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
29779 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29780 fputs ("\t.long ", file);
29781 else
29782 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29783 (long) high & 0xffffffff, (long) low & 0xffffffff);
29784 fprintf (file, "0x%lx,0x%lx\n",
29785 (long) high & 0xffffffff, (long) low & 0xffffffff);
29787 else
29789 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29790 fputs ("\t.long ", file);
29791 else
29792 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
29793 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
29795 return;
29799 if (GET_CODE (x) == CONST)
29801 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
29802 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
29804 base = XEXP (XEXP (x, 0), 0);
29805 offset = INTVAL (XEXP (XEXP (x, 0), 1));
29808 switch (GET_CODE (base))
29810 case SYMBOL_REF:
29811 name = XSTR (base, 0);
29812 break;
29814 case LABEL_REF:
29815 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
29816 CODE_LABEL_NUMBER (XEXP (base, 0)));
29817 break;
29819 case CODE_LABEL:
29820 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
29821 break;
29823 default:
29824 gcc_unreachable ();
29827 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29828 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
29829 else
29831 fputs ("\t.tc ", file);
29832 RS6000_OUTPUT_BASENAME (file, name);
29834 if (offset < 0)
29835 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
29836 else if (offset)
29837 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
29839 /* Mark large TOC symbols on AIX with [TE] so they are mapped
29840 after other TOC symbols, reducing overflow of small TOC access
29841 to [TC] symbols. */
29842 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
29843 ? "[TE]," : "[TC],", file);
29846 /* Currently C++ toc references to vtables can be emitted before it
29847 is decided whether the vtable is public or private. If this is
29848 the case, then the linker will eventually complain that there is
29849 a TOC reference to an unknown section. Thus, for vtables only,
29850 we emit the TOC reference to reference the symbol and not the
29851 section. */
29852 if (VTABLE_NAME_P (name))
29854 RS6000_OUTPUT_BASENAME (file, name);
29855 if (offset < 0)
29856 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
29857 else if (offset > 0)
29858 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
29860 else
29861 output_addr_const (file, x);
29863 #if HAVE_AS_TLS
29864 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF)
29866 switch (SYMBOL_REF_TLS_MODEL (base))
29868 case 0:
29869 break;
29870 case TLS_MODEL_LOCAL_EXEC:
29871 fputs ("@le", file);
29872 break;
29873 case TLS_MODEL_INITIAL_EXEC:
29874 fputs ("@ie", file);
29875 break;
29876 /* Use global-dynamic for local-dynamic. */
29877 case TLS_MODEL_GLOBAL_DYNAMIC:
29878 case TLS_MODEL_LOCAL_DYNAMIC:
29879 putc ('\n', file);
29880 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
29881 fputs ("\t.tc .", file);
29882 RS6000_OUTPUT_BASENAME (file, name);
29883 fputs ("[TC],", file);
29884 output_addr_const (file, x);
29885 fputs ("@m", file);
29886 break;
29887 default:
29888 gcc_unreachable ();
29891 #endif
29893 putc ('\n', file);
29896 /* Output an assembler pseudo-op to write an ASCII string of N characters
29897 starting at P to FILE.
29899 On the RS/6000, we have to do this using the .byte operation and
29900 write out special characters outside the quoted string.
29901 Also, the assembler is broken; very long strings are truncated,
29902 so we must artificially break them up early. */
29904 void
29905 output_ascii (FILE *file, const char *p, int n)
29907 char c;
29908 int i, count_string;
29909 const char *for_string = "\t.byte \"";
29910 const char *for_decimal = "\t.byte ";
29911 const char *to_close = NULL;
29913 count_string = 0;
29914 for (i = 0; i < n; i++)
29916 c = *p++;
29917 if (c >= ' ' && c < 0177)
29919 if (for_string)
29920 fputs (for_string, file);
29921 putc (c, file);
29923 /* Write two quotes to get one. */
29924 if (c == '"')
29926 putc (c, file);
29927 ++count_string;
29930 for_string = NULL;
29931 for_decimal = "\"\n\t.byte ";
29932 to_close = "\"\n";
29933 ++count_string;
29935 if (count_string >= 512)
29937 fputs (to_close, file);
29939 for_string = "\t.byte \"";
29940 for_decimal = "\t.byte ";
29941 to_close = NULL;
29942 count_string = 0;
29945 else
29947 if (for_decimal)
29948 fputs (for_decimal, file);
29949 fprintf (file, "%d", c);
29951 for_string = "\n\t.byte \"";
29952 for_decimal = ", ";
29953 to_close = "\n";
29954 count_string = 0;
29958 /* Now close the string if we have written one. Then end the line. */
29959 if (to_close)
29960 fputs (to_close, file);
29963 /* Generate a unique section name for FILENAME for a section type
29964 represented by SECTION_DESC. Output goes into BUF.
29966 SECTION_DESC can be any string, as long as it is different for each
29967 possible section type.
29969 We name the section in the same manner as xlc. The name begins with an
29970 underscore followed by the filename (after stripping any leading directory
29971 names) with the last period replaced by the string SECTION_DESC. If
29972 FILENAME does not contain a period, SECTION_DESC is appended to the end of
29973 the name. */
29975 void
29976 rs6000_gen_section_name (char **buf, const char *filename,
29977 const char *section_desc)
29979 const char *q, *after_last_slash, *last_period = 0;
29980 char *p;
29981 int len;
29983 after_last_slash = filename;
29984 for (q = filename; *q; q++)
29986 if (*q == '/')
29987 after_last_slash = q + 1;
29988 else if (*q == '.')
29989 last_period = q;
29992 len = strlen (after_last_slash) + strlen (section_desc) + 2;
29993 *buf = (char *) xmalloc (len);
29995 p = *buf;
29996 *p++ = '_';
29998 for (q = after_last_slash; *q; q++)
30000 if (q == last_period)
30002 strcpy (p, section_desc);
30003 p += strlen (section_desc);
30004 break;
30007 else if (ISALNUM (*q))
30008 *p++ = *q;
30011 if (last_period == 0)
30012 strcpy (p, section_desc);
30013 else
30014 *p = '\0';
30017 /* Emit profile function. */
30019 void
30020 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
30022 /* Non-standard profiling for kernels, which just saves LR then calls
30023 _mcount without worrying about arg saves. The idea is to change
30024 the function prologue as little as possible as it isn't easy to
30025 account for arg save/restore code added just for _mcount. */
30026 if (TARGET_PROFILE_KERNEL)
30027 return;
30029 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
30031 #ifndef NO_PROFILE_COUNTERS
30032 # define NO_PROFILE_COUNTERS 0
30033 #endif
30034 if (NO_PROFILE_COUNTERS)
30035 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30036 LCT_NORMAL, VOIDmode);
30037 else
30039 char buf[30];
30040 const char *label_name;
30041 rtx fun;
30043 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30044 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
30045 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
30047 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30048 LCT_NORMAL, VOIDmode, fun, Pmode);
30051 else if (DEFAULT_ABI == ABI_DARWIN)
30053 const char *mcount_name = RS6000_MCOUNT;
30054 int caller_addr_regno = LR_REGNO;
30056 /* Be conservative and always set this, at least for now. */
30057 crtl->uses_pic_offset_table = 1;
30059 #if TARGET_MACHO
30060 /* For PIC code, set up a stub and collect the caller's address
30061 from r0, which is where the prologue puts it. */
30062 if (MACHOPIC_INDIRECT
30063 && crtl->uses_pic_offset_table)
30064 caller_addr_regno = 0;
30065 #endif
30066 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
30067 LCT_NORMAL, VOIDmode,
30068 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
30072 /* Write function profiler code. */
30074 void
30075 output_function_profiler (FILE *file, int labelno)
30077 char buf[100];
30079 switch (DEFAULT_ABI)
30081 default:
30082 gcc_unreachable ();
30084 case ABI_V4:
30085 if (!TARGET_32BIT)
30087 warning (0, "no profiling of 64-bit code for this ABI");
30088 return;
30090 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30091 fprintf (file, "\tmflr %s\n", reg_names[0]);
30092 if (NO_PROFILE_COUNTERS)
30094 asm_fprintf (file, "\tstw %s,4(%s)\n",
30095 reg_names[0], reg_names[1]);
30097 else if (TARGET_SECURE_PLT && flag_pic)
30099 if (TARGET_LINK_STACK)
30101 char name[32];
30102 get_ppc476_thunk_name (name);
30103 asm_fprintf (file, "\tbl %s\n", name);
30105 else
30106 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
30107 asm_fprintf (file, "\tstw %s,4(%s)\n",
30108 reg_names[0], reg_names[1]);
30109 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30110 asm_fprintf (file, "\taddis %s,%s,",
30111 reg_names[12], reg_names[12]);
30112 assemble_name (file, buf);
30113 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
30114 assemble_name (file, buf);
30115 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
30117 else if (flag_pic == 1)
30119 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
30120 asm_fprintf (file, "\tstw %s,4(%s)\n",
30121 reg_names[0], reg_names[1]);
30122 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30123 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
30124 assemble_name (file, buf);
30125 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
30127 else if (flag_pic > 1)
30129 asm_fprintf (file, "\tstw %s,4(%s)\n",
30130 reg_names[0], reg_names[1]);
30131 /* Now, we need to get the address of the label. */
30132 if (TARGET_LINK_STACK)
30134 char name[32];
30135 get_ppc476_thunk_name (name);
30136 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
30137 assemble_name (file, buf);
30138 fputs ("-.\n1:", file);
30139 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30140 asm_fprintf (file, "\taddi %s,%s,4\n",
30141 reg_names[11], reg_names[11]);
30143 else
30145 fputs ("\tbcl 20,31,1f\n\t.long ", file);
30146 assemble_name (file, buf);
30147 fputs ("-.\n1:", file);
30148 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30150 asm_fprintf (file, "\tlwz %s,0(%s)\n",
30151 reg_names[0], reg_names[11]);
30152 asm_fprintf (file, "\tadd %s,%s,%s\n",
30153 reg_names[0], reg_names[0], reg_names[11]);
30155 else
30157 asm_fprintf (file, "\tlis %s,", reg_names[12]);
30158 assemble_name (file, buf);
30159 fputs ("@ha\n", file);
30160 asm_fprintf (file, "\tstw %s,4(%s)\n",
30161 reg_names[0], reg_names[1]);
30162 asm_fprintf (file, "\tla %s,", reg_names[0]);
30163 assemble_name (file, buf);
30164 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
30167 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
30168 fprintf (file, "\tbl %s%s\n",
30169 RS6000_MCOUNT, flag_pic ? "@plt" : "");
30170 break;
30172 case ABI_AIX:
30173 case ABI_ELFv2:
30174 case ABI_DARWIN:
30175 /* Don't do anything, done in output_profile_hook (). */
30176 break;
30182 /* The following variable value is the last issued insn. */
30184 static rtx_insn *last_scheduled_insn;
30186 /* The following variable helps to balance issuing of load and
30187 store instructions */
30189 static int load_store_pendulum;
30191 /* The following variable helps pair divide insns during scheduling. */
30192 static int divide_cnt;
30193 /* The following variable helps pair and alternate vector and vector load
30194 insns during scheduling. */
30195 static int vec_pairing;
30198 /* Power4 load update and store update instructions are cracked into a
30199 load or store and an integer insn which are executed in the same cycle.
30200 Branches have their own dispatch slot which does not count against the
30201 GCC issue rate, but it changes the program flow so there are no other
30202 instructions to issue in this cycle. */
30204 static int
30205 rs6000_variable_issue_1 (rtx_insn *insn, int more)
30207 last_scheduled_insn = insn;
30208 if (GET_CODE (PATTERN (insn)) == USE
30209 || GET_CODE (PATTERN (insn)) == CLOBBER)
30211 cached_can_issue_more = more;
30212 return cached_can_issue_more;
30215 if (insn_terminates_group_p (insn, current_group))
30217 cached_can_issue_more = 0;
30218 return cached_can_issue_more;
30221 /* If no reservation, but reach here */
30222 if (recog_memoized (insn) < 0)
30223 return more;
30225 if (rs6000_sched_groups)
30227 if (is_microcoded_insn (insn))
30228 cached_can_issue_more = 0;
30229 else if (is_cracked_insn (insn))
30230 cached_can_issue_more = more > 2 ? more - 2 : 0;
30231 else
30232 cached_can_issue_more = more - 1;
30234 return cached_can_issue_more;
30237 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
30238 return 0;
30240 cached_can_issue_more = more - 1;
30241 return cached_can_issue_more;
30244 static int
30245 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
30247 int r = rs6000_variable_issue_1 (insn, more);
30248 if (verbose)
30249 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
30250 return r;
30253 /* Adjust the cost of a scheduling dependency. Return the new cost of
30254 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30256 static int
30257 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
30258 unsigned int)
30260 enum attr_type attr_type;
30262 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
30263 return cost;
30265 switch (dep_type)
30267 case REG_DEP_TRUE:
30269 /* Data dependency; DEP_INSN writes a register that INSN reads
30270 some cycles later. */
30272 /* Separate a load from a narrower, dependent store. */
30273 if ((rs6000_sched_groups || rs6000_cpu_attr == CPU_POWER9)
30274 && GET_CODE (PATTERN (insn)) == SET
30275 && GET_CODE (PATTERN (dep_insn)) == SET
30276 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
30277 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
30278 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
30279 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
30280 return cost + 14;
30282 attr_type = get_attr_type (insn);
30284 switch (attr_type)
30286 case TYPE_JMPREG:
30287 /* Tell the first scheduling pass about the latency between
30288 a mtctr and bctr (and mtlr and br/blr). The first
30289 scheduling pass will not know about this latency since
30290 the mtctr instruction, which has the latency associated
30291 to it, will be generated by reload. */
30292 return 4;
30293 case TYPE_BRANCH:
30294 /* Leave some extra cycles between a compare and its
30295 dependent branch, to inhibit expensive mispredicts. */
30296 if ((rs6000_cpu_attr == CPU_PPC603
30297 || rs6000_cpu_attr == CPU_PPC604
30298 || rs6000_cpu_attr == CPU_PPC604E
30299 || rs6000_cpu_attr == CPU_PPC620
30300 || rs6000_cpu_attr == CPU_PPC630
30301 || rs6000_cpu_attr == CPU_PPC750
30302 || rs6000_cpu_attr == CPU_PPC7400
30303 || rs6000_cpu_attr == CPU_PPC7450
30304 || rs6000_cpu_attr == CPU_PPCE5500
30305 || rs6000_cpu_attr == CPU_PPCE6500
30306 || rs6000_cpu_attr == CPU_POWER4
30307 || rs6000_cpu_attr == CPU_POWER5
30308 || rs6000_cpu_attr == CPU_POWER7
30309 || rs6000_cpu_attr == CPU_POWER8
30310 || rs6000_cpu_attr == CPU_POWER9
30311 || rs6000_cpu_attr == CPU_CELL)
30312 && recog_memoized (dep_insn)
30313 && (INSN_CODE (dep_insn) >= 0))
30315 switch (get_attr_type (dep_insn))
30317 case TYPE_CMP:
30318 case TYPE_FPCOMPARE:
30319 case TYPE_CR_LOGICAL:
30320 case TYPE_DELAYED_CR:
30321 return cost + 2;
30322 case TYPE_EXTS:
30323 case TYPE_MUL:
30324 if (get_attr_dot (dep_insn) == DOT_YES)
30325 return cost + 2;
30326 else
30327 break;
30328 case TYPE_SHIFT:
30329 if (get_attr_dot (dep_insn) == DOT_YES
30330 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
30331 return cost + 2;
30332 else
30333 break;
30334 default:
30335 break;
30337 break;
30339 case TYPE_STORE:
30340 case TYPE_FPSTORE:
30341 if ((rs6000_cpu == PROCESSOR_POWER6)
30342 && recog_memoized (dep_insn)
30343 && (INSN_CODE (dep_insn) >= 0))
30346 if (GET_CODE (PATTERN (insn)) != SET)
30347 /* If this happens, we have to extend this to schedule
30348 optimally. Return default for now. */
30349 return cost;
30351 /* Adjust the cost for the case where the value written
30352 by a fixed point operation is used as the address
30353 gen value on a store. */
30354 switch (get_attr_type (dep_insn))
30356 case TYPE_LOAD:
30357 case TYPE_CNTLZ:
30359 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30360 return get_attr_sign_extend (dep_insn)
30361 == SIGN_EXTEND_YES ? 6 : 4;
30362 break;
30364 case TYPE_SHIFT:
30366 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30367 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30368 6 : 3;
30369 break;
30371 case TYPE_INTEGER:
30372 case TYPE_ADD:
30373 case TYPE_LOGICAL:
30374 case TYPE_EXTS:
30375 case TYPE_INSERT:
30377 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30378 return 3;
30379 break;
30381 case TYPE_STORE:
30382 case TYPE_FPLOAD:
30383 case TYPE_FPSTORE:
30385 if (get_attr_update (dep_insn) == UPDATE_YES
30386 && ! rs6000_store_data_bypass_p (dep_insn, insn))
30387 return 3;
30388 break;
30390 case TYPE_MUL:
30392 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30393 return 17;
30394 break;
30396 case TYPE_DIV:
30398 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30399 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30400 break;
30402 default:
30403 break;
30406 break;
30408 case TYPE_LOAD:
30409 if ((rs6000_cpu == PROCESSOR_POWER6)
30410 && recog_memoized (dep_insn)
30411 && (INSN_CODE (dep_insn) >= 0))
30414 /* Adjust the cost for the case where the value written
30415 by a fixed point instruction is used within the address
30416 gen portion of a subsequent load(u)(x) */
30417 switch (get_attr_type (dep_insn))
30419 case TYPE_LOAD:
30420 case TYPE_CNTLZ:
30422 if (set_to_load_agen (dep_insn, insn))
30423 return get_attr_sign_extend (dep_insn)
30424 == SIGN_EXTEND_YES ? 6 : 4;
30425 break;
30427 case TYPE_SHIFT:
30429 if (set_to_load_agen (dep_insn, insn))
30430 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30431 6 : 3;
30432 break;
30434 case TYPE_INTEGER:
30435 case TYPE_ADD:
30436 case TYPE_LOGICAL:
30437 case TYPE_EXTS:
30438 case TYPE_INSERT:
30440 if (set_to_load_agen (dep_insn, insn))
30441 return 3;
30442 break;
30444 case TYPE_STORE:
30445 case TYPE_FPLOAD:
30446 case TYPE_FPSTORE:
30448 if (get_attr_update (dep_insn) == UPDATE_YES
30449 && set_to_load_agen (dep_insn, insn))
30450 return 3;
30451 break;
30453 case TYPE_MUL:
30455 if (set_to_load_agen (dep_insn, insn))
30456 return 17;
30457 break;
30459 case TYPE_DIV:
30461 if (set_to_load_agen (dep_insn, insn))
30462 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30463 break;
30465 default:
30466 break;
30469 break;
30471 case TYPE_FPLOAD:
30472 if ((rs6000_cpu == PROCESSOR_POWER6)
30473 && get_attr_update (insn) == UPDATE_NO
30474 && recog_memoized (dep_insn)
30475 && (INSN_CODE (dep_insn) >= 0)
30476 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30477 return 2;
30479 default:
30480 break;
30483 /* Fall out to return default cost. */
30485 break;
30487 case REG_DEP_OUTPUT:
30488 /* Output dependency; DEP_INSN writes a register that INSN writes some
30489 cycles later. */
30490 if ((rs6000_cpu == PROCESSOR_POWER6)
30491 && recog_memoized (dep_insn)
30492 && (INSN_CODE (dep_insn) >= 0))
30494 attr_type = get_attr_type (insn);
30496 switch (attr_type)
30498 case TYPE_FP:
30499 case TYPE_FPSIMPLE:
30500 if (get_attr_type (dep_insn) == TYPE_FP
30501 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30502 return 1;
30503 break;
30504 case TYPE_FPLOAD:
30505 if (get_attr_update (insn) == UPDATE_NO
30506 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30507 return 2;
30508 break;
30509 default:
30510 break;
30513 /* Fall through, no cost for output dependency. */
30514 /* FALLTHRU */
30516 case REG_DEP_ANTI:
30517 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30518 cycles later. */
30519 return 0;
30521 default:
30522 gcc_unreachable ();
30525 return cost;
30528 /* Debug version of rs6000_adjust_cost. */
30530 static int
30531 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
30532 int cost, unsigned int dw)
30534 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
30536 if (ret != cost)
30538 const char *dep;
30540 switch (dep_type)
30542 default: dep = "unknown depencency"; break;
30543 case REG_DEP_TRUE: dep = "data dependency"; break;
30544 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30545 case REG_DEP_ANTI: dep = "anti depencency"; break;
30548 fprintf (stderr,
30549 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30550 "%s, insn:\n", ret, cost, dep);
30552 debug_rtx (insn);
30555 return ret;
30558 /* The function returns a true if INSN is microcoded.
30559 Return false otherwise. */
30561 static bool
30562 is_microcoded_insn (rtx_insn *insn)
30564 if (!insn || !NONDEBUG_INSN_P (insn)
30565 || GET_CODE (PATTERN (insn)) == USE
30566 || GET_CODE (PATTERN (insn)) == CLOBBER)
30567 return false;
30569 if (rs6000_cpu_attr == CPU_CELL)
30570 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30572 if (rs6000_sched_groups
30573 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
30575 enum attr_type type = get_attr_type (insn);
30576 if ((type == TYPE_LOAD
30577 && get_attr_update (insn) == UPDATE_YES
30578 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30579 || ((type == TYPE_LOAD || type == TYPE_STORE)
30580 && get_attr_update (insn) == UPDATE_YES
30581 && get_attr_indexed (insn) == INDEXED_YES)
30582 || type == TYPE_MFCR)
30583 return true;
30586 return false;
30589 /* The function returns true if INSN is cracked into 2 instructions
30590 by the processor (and therefore occupies 2 issue slots). */
30592 static bool
30593 is_cracked_insn (rtx_insn *insn)
30595 if (!insn || !NONDEBUG_INSN_P (insn)
30596 || GET_CODE (PATTERN (insn)) == USE
30597 || GET_CODE (PATTERN (insn)) == CLOBBER)
30598 return false;
30600 if (rs6000_sched_groups
30601 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
30603 enum attr_type type = get_attr_type (insn);
30604 if ((type == TYPE_LOAD
30605 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
30606 && get_attr_update (insn) == UPDATE_NO)
30607 || (type == TYPE_LOAD
30608 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
30609 && get_attr_update (insn) == UPDATE_YES
30610 && get_attr_indexed (insn) == INDEXED_NO)
30611 || (type == TYPE_STORE
30612 && get_attr_update (insn) == UPDATE_YES
30613 && get_attr_indexed (insn) == INDEXED_NO)
30614 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
30615 && get_attr_update (insn) == UPDATE_YES)
30616 || type == TYPE_DELAYED_CR
30617 || (type == TYPE_EXTS
30618 && get_attr_dot (insn) == DOT_YES)
30619 || (type == TYPE_SHIFT
30620 && get_attr_dot (insn) == DOT_YES
30621 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
30622 || (type == TYPE_MUL
30623 && get_attr_dot (insn) == DOT_YES)
30624 || type == TYPE_DIV
30625 || (type == TYPE_INSERT
30626 && get_attr_size (insn) == SIZE_32))
30627 return true;
30630 return false;
30633 /* The function returns true if INSN can be issued only from
30634 the branch slot. */
30636 static bool
30637 is_branch_slot_insn (rtx_insn *insn)
30639 if (!insn || !NONDEBUG_INSN_P (insn)
30640 || GET_CODE (PATTERN (insn)) == USE
30641 || GET_CODE (PATTERN (insn)) == CLOBBER)
30642 return false;
30644 if (rs6000_sched_groups)
30646 enum attr_type type = get_attr_type (insn);
30647 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
30648 return true;
30649 return false;
30652 return false;
30655 /* The function returns true if out_inst sets a value that is
30656 used in the address generation computation of in_insn */
30657 static bool
30658 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
30660 rtx out_set, in_set;
30662 /* For performance reasons, only handle the simple case where
30663 both loads are a single_set. */
30664 out_set = single_set (out_insn);
30665 if (out_set)
30667 in_set = single_set (in_insn);
30668 if (in_set)
30669 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
30672 return false;
30675 /* Try to determine base/offset/size parts of the given MEM.
30676 Return true if successful, false if all the values couldn't
30677 be determined.
30679 This function only looks for REG or REG+CONST address forms.
30680 REG+REG address form will return false. */
30682 static bool
30683 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
30684 HOST_WIDE_INT *size)
30686 rtx addr_rtx;
30687 if MEM_SIZE_KNOWN_P (mem)
30688 *size = MEM_SIZE (mem);
30689 else
30690 return false;
30692 addr_rtx = (XEXP (mem, 0));
30693 if (GET_CODE (addr_rtx) == PRE_MODIFY)
30694 addr_rtx = XEXP (addr_rtx, 1);
30696 *offset = 0;
30697 while (GET_CODE (addr_rtx) == PLUS
30698 && CONST_INT_P (XEXP (addr_rtx, 1)))
30700 *offset += INTVAL (XEXP (addr_rtx, 1));
30701 addr_rtx = XEXP (addr_rtx, 0);
30703 if (!REG_P (addr_rtx))
30704 return false;
30706 *base = addr_rtx;
30707 return true;
30710 /* The function returns true if the target storage location of
30711 mem1 is adjacent to the target storage location of mem2 */
30712 /* Return 1 if memory locations are adjacent. */
30714 static bool
30715 adjacent_mem_locations (rtx mem1, rtx mem2)
30717 rtx reg1, reg2;
30718 HOST_WIDE_INT off1, size1, off2, size2;
30720 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30721 && get_memref_parts (mem2, &reg2, &off2, &size2))
30722 return ((REGNO (reg1) == REGNO (reg2))
30723 && ((off1 + size1 == off2)
30724 || (off2 + size2 == off1)));
30726 return false;
30729 /* This function returns true if it can be determined that the two MEM
30730 locations overlap by at least 1 byte based on base reg/offset/size. */
30732 static bool
30733 mem_locations_overlap (rtx mem1, rtx mem2)
30735 rtx reg1, reg2;
30736 HOST_WIDE_INT off1, size1, off2, size2;
30738 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30739 && get_memref_parts (mem2, &reg2, &off2, &size2))
30740 return ((REGNO (reg1) == REGNO (reg2))
30741 && (((off1 <= off2) && (off1 + size1 > off2))
30742 || ((off2 <= off1) && (off2 + size2 > off1))));
30744 return false;
30747 /* A C statement (sans semicolon) to update the integer scheduling
30748 priority INSN_PRIORITY (INSN). Increase the priority to execute the
30749 INSN earlier, reduce the priority to execute INSN later. Do not
30750 define this macro if you do not need to adjust the scheduling
30751 priorities of insns. */
30753 static int
30754 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
30756 rtx load_mem, str_mem;
30757 /* On machines (like the 750) which have asymmetric integer units,
30758 where one integer unit can do multiply and divides and the other
30759 can't, reduce the priority of multiply/divide so it is scheduled
30760 before other integer operations. */
30762 #if 0
30763 if (! INSN_P (insn))
30764 return priority;
30766 if (GET_CODE (PATTERN (insn)) == USE)
30767 return priority;
30769 switch (rs6000_cpu_attr) {
30770 case CPU_PPC750:
30771 switch (get_attr_type (insn))
30773 default:
30774 break;
30776 case TYPE_MUL:
30777 case TYPE_DIV:
30778 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
30779 priority, priority);
30780 if (priority >= 0 && priority < 0x01000000)
30781 priority >>= 3;
30782 break;
30785 #endif
30787 if (insn_must_be_first_in_group (insn)
30788 && reload_completed
30789 && current_sched_info->sched_max_insns_priority
30790 && rs6000_sched_restricted_insns_priority)
30793 /* Prioritize insns that can be dispatched only in the first
30794 dispatch slot. */
30795 if (rs6000_sched_restricted_insns_priority == 1)
30796 /* Attach highest priority to insn. This means that in
30797 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
30798 precede 'priority' (critical path) considerations. */
30799 return current_sched_info->sched_max_insns_priority;
30800 else if (rs6000_sched_restricted_insns_priority == 2)
30801 /* Increase priority of insn by a minimal amount. This means that in
30802 haifa-sched.c:ready_sort(), only 'priority' (critical path)
30803 considerations precede dispatch-slot restriction considerations. */
30804 return (priority + 1);
30807 if (rs6000_cpu == PROCESSOR_POWER6
30808 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
30809 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
30810 /* Attach highest priority to insn if the scheduler has just issued two
30811 stores and this instruction is a load, or two loads and this instruction
30812 is a store. Power6 wants loads and stores scheduled alternately
30813 when possible */
30814 return current_sched_info->sched_max_insns_priority;
30816 return priority;
30819 /* Return true if the instruction is nonpipelined on the Cell. */
30820 static bool
30821 is_nonpipeline_insn (rtx_insn *insn)
30823 enum attr_type type;
30824 if (!insn || !NONDEBUG_INSN_P (insn)
30825 || GET_CODE (PATTERN (insn)) == USE
30826 || GET_CODE (PATTERN (insn)) == CLOBBER)
30827 return false;
30829 type = get_attr_type (insn);
30830 if (type == TYPE_MUL
30831 || type == TYPE_DIV
30832 || type == TYPE_SDIV
30833 || type == TYPE_DDIV
30834 || type == TYPE_SSQRT
30835 || type == TYPE_DSQRT
30836 || type == TYPE_MFCR
30837 || type == TYPE_MFCRF
30838 || type == TYPE_MFJMPR)
30840 return true;
30842 return false;
30846 /* Return how many instructions the machine can issue per cycle. */
30848 static int
30849 rs6000_issue_rate (void)
30851 /* Unless scheduling for register pressure, use issue rate of 1 for
30852 first scheduling pass to decrease degradation. */
30853 if (!reload_completed && !flag_sched_pressure)
30854 return 1;
30856 switch (rs6000_cpu_attr) {
30857 case CPU_RS64A:
30858 case CPU_PPC601: /* ? */
30859 case CPU_PPC7450:
30860 return 3;
30861 case CPU_PPC440:
30862 case CPU_PPC603:
30863 case CPU_PPC750:
30864 case CPU_PPC7400:
30865 case CPU_PPC8540:
30866 case CPU_PPC8548:
30867 case CPU_CELL:
30868 case CPU_PPCE300C2:
30869 case CPU_PPCE300C3:
30870 case CPU_PPCE500MC:
30871 case CPU_PPCE500MC64:
30872 case CPU_PPCE5500:
30873 case CPU_PPCE6500:
30874 case CPU_TITAN:
30875 return 2;
30876 case CPU_PPC476:
30877 case CPU_PPC604:
30878 case CPU_PPC604E:
30879 case CPU_PPC620:
30880 case CPU_PPC630:
30881 return 4;
30882 case CPU_POWER4:
30883 case CPU_POWER5:
30884 case CPU_POWER6:
30885 case CPU_POWER7:
30886 return 5;
30887 case CPU_POWER8:
30888 return 7;
30889 case CPU_POWER9:
30890 return 6;
30891 default:
30892 return 1;
30896 /* Return how many instructions to look ahead for better insn
30897 scheduling. */
30899 static int
30900 rs6000_use_sched_lookahead (void)
30902 switch (rs6000_cpu_attr)
30904 case CPU_PPC8540:
30905 case CPU_PPC8548:
30906 return 4;
30908 case CPU_CELL:
30909 return (reload_completed ? 8 : 0);
30911 default:
30912 return 0;
30916 /* We are choosing insn from the ready queue. Return zero if INSN can be
30917 chosen. */
30918 static int
30919 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
30921 if (ready_index == 0)
30922 return 0;
30924 if (rs6000_cpu_attr != CPU_CELL)
30925 return 0;
30927 gcc_assert (insn != NULL_RTX && INSN_P (insn));
30929 if (!reload_completed
30930 || is_nonpipeline_insn (insn)
30931 || is_microcoded_insn (insn))
30932 return 1;
30934 return 0;
30937 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
30938 and return true. */
30940 static bool
30941 find_mem_ref (rtx pat, rtx *mem_ref)
30943 const char * fmt;
30944 int i, j;
30946 /* stack_tie does not produce any real memory traffic. */
30947 if (tie_operand (pat, VOIDmode))
30948 return false;
30950 if (GET_CODE (pat) == MEM)
30952 *mem_ref = pat;
30953 return true;
30956 /* Recursively process the pattern. */
30957 fmt = GET_RTX_FORMAT (GET_CODE (pat));
30959 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
30961 if (fmt[i] == 'e')
30963 if (find_mem_ref (XEXP (pat, i), mem_ref))
30964 return true;
30966 else if (fmt[i] == 'E')
30967 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
30969 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
30970 return true;
30974 return false;
30977 /* Determine if PAT is a PATTERN of a load insn. */
30979 static bool
30980 is_load_insn1 (rtx pat, rtx *load_mem)
30982 if (!pat || pat == NULL_RTX)
30983 return false;
30985 if (GET_CODE (pat) == SET)
30986 return find_mem_ref (SET_SRC (pat), load_mem);
30988 if (GET_CODE (pat) == PARALLEL)
30990 int i;
30992 for (i = 0; i < XVECLEN (pat, 0); i++)
30993 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
30994 return true;
30997 return false;
31000 /* Determine if INSN loads from memory. */
31002 static bool
31003 is_load_insn (rtx insn, rtx *load_mem)
31005 if (!insn || !INSN_P (insn))
31006 return false;
31008 if (CALL_P (insn))
31009 return false;
31011 return is_load_insn1 (PATTERN (insn), load_mem);
31014 /* Determine if PAT is a PATTERN of a store insn. */
31016 static bool
31017 is_store_insn1 (rtx pat, rtx *str_mem)
31019 if (!pat || pat == NULL_RTX)
31020 return false;
31022 if (GET_CODE (pat) == SET)
31023 return find_mem_ref (SET_DEST (pat), str_mem);
31025 if (GET_CODE (pat) == PARALLEL)
31027 int i;
31029 for (i = 0; i < XVECLEN (pat, 0); i++)
31030 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
31031 return true;
31034 return false;
31037 /* Determine if INSN stores to memory. */
31039 static bool
31040 is_store_insn (rtx insn, rtx *str_mem)
31042 if (!insn || !INSN_P (insn))
31043 return false;
31045 return is_store_insn1 (PATTERN (insn), str_mem);
31048 /* Return whether TYPE is a Power9 pairable vector instruction type. */
31050 static bool
31051 is_power9_pairable_vec_type (enum attr_type type)
31053 switch (type)
31055 case TYPE_VECSIMPLE:
31056 case TYPE_VECCOMPLEX:
31057 case TYPE_VECDIV:
31058 case TYPE_VECCMP:
31059 case TYPE_VECPERM:
31060 case TYPE_VECFLOAT:
31061 case TYPE_VECFDIV:
31062 case TYPE_VECDOUBLE:
31063 return true;
31064 default:
31065 break;
31067 return false;
31070 /* Returns whether the dependence between INSN and NEXT is considered
31071 costly by the given target. */
31073 static bool
31074 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
31076 rtx insn;
31077 rtx next;
31078 rtx load_mem, str_mem;
31080 /* If the flag is not enabled - no dependence is considered costly;
31081 allow all dependent insns in the same group.
31082 This is the most aggressive option. */
31083 if (rs6000_sched_costly_dep == no_dep_costly)
31084 return false;
31086 /* If the flag is set to 1 - a dependence is always considered costly;
31087 do not allow dependent instructions in the same group.
31088 This is the most conservative option. */
31089 if (rs6000_sched_costly_dep == all_deps_costly)
31090 return true;
31092 insn = DEP_PRO (dep);
31093 next = DEP_CON (dep);
31095 if (rs6000_sched_costly_dep == store_to_load_dep_costly
31096 && is_load_insn (next, &load_mem)
31097 && is_store_insn (insn, &str_mem))
31098 /* Prevent load after store in the same group. */
31099 return true;
31101 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
31102 && is_load_insn (next, &load_mem)
31103 && is_store_insn (insn, &str_mem)
31104 && DEP_TYPE (dep) == REG_DEP_TRUE
31105 && mem_locations_overlap(str_mem, load_mem))
31106 /* Prevent load after store in the same group if it is a true
31107 dependence. */
31108 return true;
31110 /* The flag is set to X; dependences with latency >= X are considered costly,
31111 and will not be scheduled in the same group. */
31112 if (rs6000_sched_costly_dep <= max_dep_latency
31113 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
31114 return true;
31116 return false;
31119 /* Return the next insn after INSN that is found before TAIL is reached,
31120 skipping any "non-active" insns - insns that will not actually occupy
31121 an issue slot. Return NULL_RTX if such an insn is not found. */
31123 static rtx_insn *
31124 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
31126 if (insn == NULL_RTX || insn == tail)
31127 return NULL;
31129 while (1)
31131 insn = NEXT_INSN (insn);
31132 if (insn == NULL_RTX || insn == tail)
31133 return NULL;
31135 if (CALL_P (insn)
31136 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
31137 || (NONJUMP_INSN_P (insn)
31138 && GET_CODE (PATTERN (insn)) != USE
31139 && GET_CODE (PATTERN (insn)) != CLOBBER
31140 && INSN_CODE (insn) != CODE_FOR_stack_tie))
31141 break;
31143 return insn;
31146 /* Do Power9 specific sched_reorder2 reordering of ready list. */
31148 static int
31149 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
31151 int pos;
31152 int i;
31153 rtx_insn *tmp;
31154 enum attr_type type, type2;
31156 type = get_attr_type (last_scheduled_insn);
31158 /* Try to issue fixed point divides back-to-back in pairs so they will be
31159 routed to separate execution units and execute in parallel. */
31160 if (type == TYPE_DIV && divide_cnt == 0)
31162 /* First divide has been scheduled. */
31163 divide_cnt = 1;
31165 /* Scan the ready list looking for another divide, if found move it
31166 to the end of the list so it is chosen next. */
31167 pos = lastpos;
31168 while (pos >= 0)
31170 if (recog_memoized (ready[pos]) >= 0
31171 && get_attr_type (ready[pos]) == TYPE_DIV)
31173 tmp = ready[pos];
31174 for (i = pos; i < lastpos; i++)
31175 ready[i] = ready[i + 1];
31176 ready[lastpos] = tmp;
31177 break;
31179 pos--;
31182 else
31184 /* Last insn was the 2nd divide or not a divide, reset the counter. */
31185 divide_cnt = 0;
31187 /* The best dispatch throughput for vector and vector load insns can be
31188 achieved by interleaving a vector and vector load such that they'll
31189 dispatch to the same superslice. If this pairing cannot be achieved
31190 then it is best to pair vector insns together and vector load insns
31191 together.
31193 To aid in this pairing, vec_pairing maintains the current state with
31194 the following values:
31196 0 : Initial state, no vecload/vector pairing has been started.
31198 1 : A vecload or vector insn has been issued and a candidate for
31199 pairing has been found and moved to the end of the ready
31200 list. */
31201 if (type == TYPE_VECLOAD)
31203 /* Issued a vecload. */
31204 if (vec_pairing == 0)
31206 int vecload_pos = -1;
31207 /* We issued a single vecload, look for a vector insn to pair it
31208 with. If one isn't found, try to pair another vecload. */
31209 pos = lastpos;
31210 while (pos >= 0)
31212 if (recog_memoized (ready[pos]) >= 0)
31214 type2 = get_attr_type (ready[pos]);
31215 if (is_power9_pairable_vec_type (type2))
31217 /* Found a vector insn to pair with, move it to the
31218 end of the ready list so it is scheduled next. */
31219 tmp = ready[pos];
31220 for (i = pos; i < lastpos; i++)
31221 ready[i] = ready[i + 1];
31222 ready[lastpos] = tmp;
31223 vec_pairing = 1;
31224 return cached_can_issue_more;
31226 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
31227 /* Remember position of first vecload seen. */
31228 vecload_pos = pos;
31230 pos--;
31232 if (vecload_pos >= 0)
31234 /* Didn't find a vector to pair with but did find a vecload,
31235 move it to the end of the ready list. */
31236 tmp = ready[vecload_pos];
31237 for (i = vecload_pos; i < lastpos; i++)
31238 ready[i] = ready[i + 1];
31239 ready[lastpos] = tmp;
31240 vec_pairing = 1;
31241 return cached_can_issue_more;
31245 else if (is_power9_pairable_vec_type (type))
31247 /* Issued a vector operation. */
31248 if (vec_pairing == 0)
31250 int vec_pos = -1;
31251 /* We issued a single vector insn, look for a vecload to pair it
31252 with. If one isn't found, try to pair another vector. */
31253 pos = lastpos;
31254 while (pos >= 0)
31256 if (recog_memoized (ready[pos]) >= 0)
31258 type2 = get_attr_type (ready[pos]);
31259 if (type2 == TYPE_VECLOAD)
31261 /* Found a vecload insn to pair with, move it to the
31262 end of the ready list so it is scheduled next. */
31263 tmp = ready[pos];
31264 for (i = pos; i < lastpos; i++)
31265 ready[i] = ready[i + 1];
31266 ready[lastpos] = tmp;
31267 vec_pairing = 1;
31268 return cached_can_issue_more;
31270 else if (is_power9_pairable_vec_type (type2)
31271 && vec_pos == -1)
31272 /* Remember position of first vector insn seen. */
31273 vec_pos = pos;
31275 pos--;
31277 if (vec_pos >= 0)
31279 /* Didn't find a vecload to pair with but did find a vector
31280 insn, move it to the end of the ready list. */
31281 tmp = ready[vec_pos];
31282 for (i = vec_pos; i < lastpos; i++)
31283 ready[i] = ready[i + 1];
31284 ready[lastpos] = tmp;
31285 vec_pairing = 1;
31286 return cached_can_issue_more;
31291 /* We've either finished a vec/vecload pair, couldn't find an insn to
31292 continue the current pair, or the last insn had nothing to do with
31293 with pairing. In any case, reset the state. */
31294 vec_pairing = 0;
31297 return cached_can_issue_more;
31300 /* We are about to begin issuing insns for this clock cycle. */
31302 static int
31303 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
31304 rtx_insn **ready ATTRIBUTE_UNUSED,
31305 int *pn_ready ATTRIBUTE_UNUSED,
31306 int clock_var ATTRIBUTE_UNUSED)
31308 int n_ready = *pn_ready;
31310 if (sched_verbose)
31311 fprintf (dump, "// rs6000_sched_reorder :\n");
31313 /* Reorder the ready list, if the second to last ready insn
31314 is a nonepipeline insn. */
31315 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
31317 if (is_nonpipeline_insn (ready[n_ready - 1])
31318 && (recog_memoized (ready[n_ready - 2]) > 0))
31319 /* Simply swap first two insns. */
31320 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
31323 if (rs6000_cpu == PROCESSOR_POWER6)
31324 load_store_pendulum = 0;
31326 return rs6000_issue_rate ();
31329 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31331 static int
31332 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
31333 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
31335 if (sched_verbose)
31336 fprintf (dump, "// rs6000_sched_reorder2 :\n");
31338 /* For Power6, we need to handle some special cases to try and keep the
31339 store queue from overflowing and triggering expensive flushes.
31341 This code monitors how load and store instructions are being issued
31342 and skews the ready list one way or the other to increase the likelihood
31343 that a desired instruction is issued at the proper time.
31345 A couple of things are done. First, we maintain a "load_store_pendulum"
31346 to track the current state of load/store issue.
31348 - If the pendulum is at zero, then no loads or stores have been
31349 issued in the current cycle so we do nothing.
31351 - If the pendulum is 1, then a single load has been issued in this
31352 cycle and we attempt to locate another load in the ready list to
31353 issue with it.
31355 - If the pendulum is -2, then two stores have already been
31356 issued in this cycle, so we increase the priority of the first load
31357 in the ready list to increase it's likelihood of being chosen first
31358 in the next cycle.
31360 - If the pendulum is -1, then a single store has been issued in this
31361 cycle and we attempt to locate another store in the ready list to
31362 issue with it, preferring a store to an adjacent memory location to
31363 facilitate store pairing in the store queue.
31365 - If the pendulum is 2, then two loads have already been
31366 issued in this cycle, so we increase the priority of the first store
31367 in the ready list to increase it's likelihood of being chosen first
31368 in the next cycle.
31370 - If the pendulum < -2 or > 2, then do nothing.
31372 Note: This code covers the most common scenarios. There exist non
31373 load/store instructions which make use of the LSU and which
31374 would need to be accounted for to strictly model the behavior
31375 of the machine. Those instructions are currently unaccounted
31376 for to help minimize compile time overhead of this code.
31378 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
31380 int pos;
31381 int i;
31382 rtx_insn *tmp;
31383 rtx load_mem, str_mem;
31385 if (is_store_insn (last_scheduled_insn, &str_mem))
31386 /* Issuing a store, swing the load_store_pendulum to the left */
31387 load_store_pendulum--;
31388 else if (is_load_insn (last_scheduled_insn, &load_mem))
31389 /* Issuing a load, swing the load_store_pendulum to the right */
31390 load_store_pendulum++;
31391 else
31392 return cached_can_issue_more;
31394 /* If the pendulum is balanced, or there is only one instruction on
31395 the ready list, then all is well, so return. */
31396 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
31397 return cached_can_issue_more;
31399 if (load_store_pendulum == 1)
31401 /* A load has been issued in this cycle. Scan the ready list
31402 for another load to issue with it */
31403 pos = *pn_ready-1;
31405 while (pos >= 0)
31407 if (is_load_insn (ready[pos], &load_mem))
31409 /* Found a load. Move it to the head of the ready list,
31410 and adjust it's priority so that it is more likely to
31411 stay there */
31412 tmp = ready[pos];
31413 for (i=pos; i<*pn_ready-1; i++)
31414 ready[i] = ready[i + 1];
31415 ready[*pn_ready-1] = tmp;
31417 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31418 INSN_PRIORITY (tmp)++;
31419 break;
31421 pos--;
31424 else if (load_store_pendulum == -2)
31426 /* Two stores have been issued in this cycle. Increase the
31427 priority of the first load in the ready list to favor it for
31428 issuing in the next cycle. */
31429 pos = *pn_ready-1;
31431 while (pos >= 0)
31433 if (is_load_insn (ready[pos], &load_mem)
31434 && !sel_sched_p ()
31435 && INSN_PRIORITY_KNOWN (ready[pos]))
31437 INSN_PRIORITY (ready[pos])++;
31439 /* Adjust the pendulum to account for the fact that a load
31440 was found and increased in priority. This is to prevent
31441 increasing the priority of multiple loads */
31442 load_store_pendulum--;
31444 break;
31446 pos--;
31449 else if (load_store_pendulum == -1)
31451 /* A store has been issued in this cycle. Scan the ready list for
31452 another store to issue with it, preferring a store to an adjacent
31453 memory location */
31454 int first_store_pos = -1;
31456 pos = *pn_ready-1;
31458 while (pos >= 0)
31460 if (is_store_insn (ready[pos], &str_mem))
31462 rtx str_mem2;
31463 /* Maintain the index of the first store found on the
31464 list */
31465 if (first_store_pos == -1)
31466 first_store_pos = pos;
31468 if (is_store_insn (last_scheduled_insn, &str_mem2)
31469 && adjacent_mem_locations (str_mem, str_mem2))
31471 /* Found an adjacent store. Move it to the head of the
31472 ready list, and adjust it's priority so that it is
31473 more likely to stay there */
31474 tmp = ready[pos];
31475 for (i=pos; i<*pn_ready-1; i++)
31476 ready[i] = ready[i + 1];
31477 ready[*pn_ready-1] = tmp;
31479 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31480 INSN_PRIORITY (tmp)++;
31482 first_store_pos = -1;
31484 break;
31487 pos--;
31490 if (first_store_pos >= 0)
31492 /* An adjacent store wasn't found, but a non-adjacent store was,
31493 so move the non-adjacent store to the front of the ready
31494 list, and adjust its priority so that it is more likely to
31495 stay there. */
31496 tmp = ready[first_store_pos];
31497 for (i=first_store_pos; i<*pn_ready-1; i++)
31498 ready[i] = ready[i + 1];
31499 ready[*pn_ready-1] = tmp;
31500 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31501 INSN_PRIORITY (tmp)++;
31504 else if (load_store_pendulum == 2)
31506 /* Two loads have been issued in this cycle. Increase the priority
31507 of the first store in the ready list to favor it for issuing in
31508 the next cycle. */
31509 pos = *pn_ready-1;
31511 while (pos >= 0)
31513 if (is_store_insn (ready[pos], &str_mem)
31514 && !sel_sched_p ()
31515 && INSN_PRIORITY_KNOWN (ready[pos]))
31517 INSN_PRIORITY (ready[pos])++;
31519 /* Adjust the pendulum to account for the fact that a store
31520 was found and increased in priority. This is to prevent
31521 increasing the priority of multiple stores */
31522 load_store_pendulum++;
31524 break;
31526 pos--;
31531 /* Do Power9 dependent reordering if necessary. */
31532 if (rs6000_cpu == PROCESSOR_POWER9 && last_scheduled_insn
31533 && recog_memoized (last_scheduled_insn) >= 0)
31534 return power9_sched_reorder2 (ready, *pn_ready - 1);
31536 return cached_can_issue_more;
31539 /* Return whether the presence of INSN causes a dispatch group termination
31540 of group WHICH_GROUP.
31542 If WHICH_GROUP == current_group, this function will return true if INSN
31543 causes the termination of the current group (i.e, the dispatch group to
31544 which INSN belongs). This means that INSN will be the last insn in the
31545 group it belongs to.
31547 If WHICH_GROUP == previous_group, this function will return true if INSN
31548 causes the termination of the previous group (i.e, the dispatch group that
31549 precedes the group to which INSN belongs). This means that INSN will be
31550 the first insn in the group it belongs to). */
31552 static bool
31553 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31555 bool first, last;
31557 if (! insn)
31558 return false;
31560 first = insn_must_be_first_in_group (insn);
31561 last = insn_must_be_last_in_group (insn);
31563 if (first && last)
31564 return true;
31566 if (which_group == current_group)
31567 return last;
31568 else if (which_group == previous_group)
31569 return first;
31571 return false;
31575 static bool
31576 insn_must_be_first_in_group (rtx_insn *insn)
31578 enum attr_type type;
31580 if (!insn
31581 || NOTE_P (insn)
31582 || DEBUG_INSN_P (insn)
31583 || GET_CODE (PATTERN (insn)) == USE
31584 || GET_CODE (PATTERN (insn)) == CLOBBER)
31585 return false;
31587 switch (rs6000_cpu)
31589 case PROCESSOR_POWER5:
31590 if (is_cracked_insn (insn))
31591 return true;
31592 /* FALLTHRU */
31593 case PROCESSOR_POWER4:
31594 if (is_microcoded_insn (insn))
31595 return true;
31597 if (!rs6000_sched_groups)
31598 return false;
31600 type = get_attr_type (insn);
31602 switch (type)
31604 case TYPE_MFCR:
31605 case TYPE_MFCRF:
31606 case TYPE_MTCR:
31607 case TYPE_DELAYED_CR:
31608 case TYPE_CR_LOGICAL:
31609 case TYPE_MTJMPR:
31610 case TYPE_MFJMPR:
31611 case TYPE_DIV:
31612 case TYPE_LOAD_L:
31613 case TYPE_STORE_C:
31614 case TYPE_ISYNC:
31615 case TYPE_SYNC:
31616 return true;
31617 default:
31618 break;
31620 break;
31621 case PROCESSOR_POWER6:
31622 type = get_attr_type (insn);
31624 switch (type)
31626 case TYPE_EXTS:
31627 case TYPE_CNTLZ:
31628 case TYPE_TRAP:
31629 case TYPE_MUL:
31630 case TYPE_INSERT:
31631 case TYPE_FPCOMPARE:
31632 case TYPE_MFCR:
31633 case TYPE_MTCR:
31634 case TYPE_MFJMPR:
31635 case TYPE_MTJMPR:
31636 case TYPE_ISYNC:
31637 case TYPE_SYNC:
31638 case TYPE_LOAD_L:
31639 case TYPE_STORE_C:
31640 return true;
31641 case TYPE_SHIFT:
31642 if (get_attr_dot (insn) == DOT_NO
31643 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31644 return true;
31645 else
31646 break;
31647 case TYPE_DIV:
31648 if (get_attr_size (insn) == SIZE_32)
31649 return true;
31650 else
31651 break;
31652 case TYPE_LOAD:
31653 case TYPE_STORE:
31654 case TYPE_FPLOAD:
31655 case TYPE_FPSTORE:
31656 if (get_attr_update (insn) == UPDATE_YES)
31657 return true;
31658 else
31659 break;
31660 default:
31661 break;
31663 break;
31664 case PROCESSOR_POWER7:
31665 type = get_attr_type (insn);
31667 switch (type)
31669 case TYPE_CR_LOGICAL:
31670 case TYPE_MFCR:
31671 case TYPE_MFCRF:
31672 case TYPE_MTCR:
31673 case TYPE_DIV:
31674 case TYPE_ISYNC:
31675 case TYPE_LOAD_L:
31676 case TYPE_STORE_C:
31677 case TYPE_MFJMPR:
31678 case TYPE_MTJMPR:
31679 return true;
31680 case TYPE_MUL:
31681 case TYPE_SHIFT:
31682 case TYPE_EXTS:
31683 if (get_attr_dot (insn) == DOT_YES)
31684 return true;
31685 else
31686 break;
31687 case TYPE_LOAD:
31688 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31689 || get_attr_update (insn) == UPDATE_YES)
31690 return true;
31691 else
31692 break;
31693 case TYPE_STORE:
31694 case TYPE_FPLOAD:
31695 case TYPE_FPSTORE:
31696 if (get_attr_update (insn) == UPDATE_YES)
31697 return true;
31698 else
31699 break;
31700 default:
31701 break;
31703 break;
31704 case PROCESSOR_POWER8:
31705 type = get_attr_type (insn);
31707 switch (type)
31709 case TYPE_CR_LOGICAL:
31710 case TYPE_DELAYED_CR:
31711 case TYPE_MFCR:
31712 case TYPE_MFCRF:
31713 case TYPE_MTCR:
31714 case TYPE_SYNC:
31715 case TYPE_ISYNC:
31716 case TYPE_LOAD_L:
31717 case TYPE_STORE_C:
31718 case TYPE_VECSTORE:
31719 case TYPE_MFJMPR:
31720 case TYPE_MTJMPR:
31721 return true;
31722 case TYPE_SHIFT:
31723 case TYPE_EXTS:
31724 case TYPE_MUL:
31725 if (get_attr_dot (insn) == DOT_YES)
31726 return true;
31727 else
31728 break;
31729 case TYPE_LOAD:
31730 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31731 || get_attr_update (insn) == UPDATE_YES)
31732 return true;
31733 else
31734 break;
31735 case TYPE_STORE:
31736 if (get_attr_update (insn) == UPDATE_YES
31737 && get_attr_indexed (insn) == INDEXED_YES)
31738 return true;
31739 else
31740 break;
31741 default:
31742 break;
31744 break;
31745 default:
31746 break;
31749 return false;
31752 static bool
31753 insn_must_be_last_in_group (rtx_insn *insn)
31755 enum attr_type type;
31757 if (!insn
31758 || NOTE_P (insn)
31759 || DEBUG_INSN_P (insn)
31760 || GET_CODE (PATTERN (insn)) == USE
31761 || GET_CODE (PATTERN (insn)) == CLOBBER)
31762 return false;
31764 switch (rs6000_cpu) {
31765 case PROCESSOR_POWER4:
31766 case PROCESSOR_POWER5:
31767 if (is_microcoded_insn (insn))
31768 return true;
31770 if (is_branch_slot_insn (insn))
31771 return true;
31773 break;
31774 case PROCESSOR_POWER6:
31775 type = get_attr_type (insn);
31777 switch (type)
31779 case TYPE_EXTS:
31780 case TYPE_CNTLZ:
31781 case TYPE_TRAP:
31782 case TYPE_MUL:
31783 case TYPE_FPCOMPARE:
31784 case TYPE_MFCR:
31785 case TYPE_MTCR:
31786 case TYPE_MFJMPR:
31787 case TYPE_MTJMPR:
31788 case TYPE_ISYNC:
31789 case TYPE_SYNC:
31790 case TYPE_LOAD_L:
31791 case TYPE_STORE_C:
31792 return true;
31793 case TYPE_SHIFT:
31794 if (get_attr_dot (insn) == DOT_NO
31795 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31796 return true;
31797 else
31798 break;
31799 case TYPE_DIV:
31800 if (get_attr_size (insn) == SIZE_32)
31801 return true;
31802 else
31803 break;
31804 default:
31805 break;
31807 break;
31808 case PROCESSOR_POWER7:
31809 type = get_attr_type (insn);
31811 switch (type)
31813 case TYPE_ISYNC:
31814 case TYPE_SYNC:
31815 case TYPE_LOAD_L:
31816 case TYPE_STORE_C:
31817 return true;
31818 case TYPE_LOAD:
31819 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31820 && get_attr_update (insn) == UPDATE_YES)
31821 return true;
31822 else
31823 break;
31824 case TYPE_STORE:
31825 if (get_attr_update (insn) == UPDATE_YES
31826 && get_attr_indexed (insn) == INDEXED_YES)
31827 return true;
31828 else
31829 break;
31830 default:
31831 break;
31833 break;
31834 case PROCESSOR_POWER8:
31835 type = get_attr_type (insn);
31837 switch (type)
31839 case TYPE_MFCR:
31840 case TYPE_MTCR:
31841 case TYPE_ISYNC:
31842 case TYPE_SYNC:
31843 case TYPE_LOAD_L:
31844 case TYPE_STORE_C:
31845 return true;
31846 case TYPE_LOAD:
31847 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31848 && get_attr_update (insn) == UPDATE_YES)
31849 return true;
31850 else
31851 break;
31852 case TYPE_STORE:
31853 if (get_attr_update (insn) == UPDATE_YES
31854 && get_attr_indexed (insn) == INDEXED_YES)
31855 return true;
31856 else
31857 break;
31858 default:
31859 break;
31861 break;
31862 default:
31863 break;
31866 return false;
31869 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
31870 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
31872 static bool
31873 is_costly_group (rtx *group_insns, rtx next_insn)
31875 int i;
31876 int issue_rate = rs6000_issue_rate ();
31878 for (i = 0; i < issue_rate; i++)
31880 sd_iterator_def sd_it;
31881 dep_t dep;
31882 rtx insn = group_insns[i];
31884 if (!insn)
31885 continue;
31887 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
31889 rtx next = DEP_CON (dep);
31891 if (next == next_insn
31892 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
31893 return true;
31897 return false;
31900 /* Utility of the function redefine_groups.
31901 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
31902 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
31903 to keep it "far" (in a separate group) from GROUP_INSNS, following
31904 one of the following schemes, depending on the value of the flag
31905 -minsert_sched_nops = X:
31906 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
31907 in order to force NEXT_INSN into a separate group.
31908 (2) X < sched_finish_regroup_exact: insert exactly X nops.
31909 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
31910 insertion (has a group just ended, how many vacant issue slots remain in the
31911 last group, and how many dispatch groups were encountered so far). */
31913 static int
31914 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
31915 rtx_insn *next_insn, bool *group_end, int can_issue_more,
31916 int *group_count)
31918 rtx nop;
31919 bool force;
31920 int issue_rate = rs6000_issue_rate ();
31921 bool end = *group_end;
31922 int i;
31924 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
31925 return can_issue_more;
31927 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
31928 return can_issue_more;
31930 force = is_costly_group (group_insns, next_insn);
31931 if (!force)
31932 return can_issue_more;
31934 if (sched_verbose > 6)
31935 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
31936 *group_count ,can_issue_more);
31938 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
31940 if (*group_end)
31941 can_issue_more = 0;
31943 /* Since only a branch can be issued in the last issue_slot, it is
31944 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
31945 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
31946 in this case the last nop will start a new group and the branch
31947 will be forced to the new group. */
31948 if (can_issue_more && !is_branch_slot_insn (next_insn))
31949 can_issue_more--;
31951 /* Do we have a special group ending nop? */
31952 if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7
31953 || rs6000_cpu_attr == CPU_POWER8)
31955 nop = gen_group_ending_nop ();
31956 emit_insn_before (nop, next_insn);
31957 can_issue_more = 0;
31959 else
31960 while (can_issue_more > 0)
31962 nop = gen_nop ();
31963 emit_insn_before (nop, next_insn);
31964 can_issue_more--;
31967 *group_end = true;
31968 return 0;
31971 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
31973 int n_nops = rs6000_sched_insert_nops;
31975 /* Nops can't be issued from the branch slot, so the effective
31976 issue_rate for nops is 'issue_rate - 1'. */
31977 if (can_issue_more == 0)
31978 can_issue_more = issue_rate;
31979 can_issue_more--;
31980 if (can_issue_more == 0)
31982 can_issue_more = issue_rate - 1;
31983 (*group_count)++;
31984 end = true;
31985 for (i = 0; i < issue_rate; i++)
31987 group_insns[i] = 0;
31991 while (n_nops > 0)
31993 nop = gen_nop ();
31994 emit_insn_before (nop, next_insn);
31995 if (can_issue_more == issue_rate - 1) /* new group begins */
31996 end = false;
31997 can_issue_more--;
31998 if (can_issue_more == 0)
32000 can_issue_more = issue_rate - 1;
32001 (*group_count)++;
32002 end = true;
32003 for (i = 0; i < issue_rate; i++)
32005 group_insns[i] = 0;
32008 n_nops--;
32011 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
32012 can_issue_more++;
32014 /* Is next_insn going to start a new group? */
32015 *group_end
32016 = (end
32017 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32018 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32019 || (can_issue_more < issue_rate &&
32020 insn_terminates_group_p (next_insn, previous_group)));
32021 if (*group_end && end)
32022 (*group_count)--;
32024 if (sched_verbose > 6)
32025 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
32026 *group_count, can_issue_more);
32027 return can_issue_more;
32030 return can_issue_more;
32033 /* This function tries to synch the dispatch groups that the compiler "sees"
32034 with the dispatch groups that the processor dispatcher is expected to
32035 form in practice. It tries to achieve this synchronization by forcing the
32036 estimated processor grouping on the compiler (as opposed to the function
32037 'pad_goups' which tries to force the scheduler's grouping on the processor).
32039 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
32040 examines the (estimated) dispatch groups that will be formed by the processor
32041 dispatcher. It marks these group boundaries to reflect the estimated
32042 processor grouping, overriding the grouping that the scheduler had marked.
32043 Depending on the value of the flag '-minsert-sched-nops' this function can
32044 force certain insns into separate groups or force a certain distance between
32045 them by inserting nops, for example, if there exists a "costly dependence"
32046 between the insns.
32048 The function estimates the group boundaries that the processor will form as
32049 follows: It keeps track of how many vacant issue slots are available after
32050 each insn. A subsequent insn will start a new group if one of the following
32051 4 cases applies:
32052 - no more vacant issue slots remain in the current dispatch group.
32053 - only the last issue slot, which is the branch slot, is vacant, but the next
32054 insn is not a branch.
32055 - only the last 2 or less issue slots, including the branch slot, are vacant,
32056 which means that a cracked insn (which occupies two issue slots) can't be
32057 issued in this group.
32058 - less than 'issue_rate' slots are vacant, and the next insn always needs to
32059 start a new group. */
32061 static int
32062 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32063 rtx_insn *tail)
32065 rtx_insn *insn, *next_insn;
32066 int issue_rate;
32067 int can_issue_more;
32068 int slot, i;
32069 bool group_end;
32070 int group_count = 0;
32071 rtx *group_insns;
32073 /* Initialize. */
32074 issue_rate = rs6000_issue_rate ();
32075 group_insns = XALLOCAVEC (rtx, issue_rate);
32076 for (i = 0; i < issue_rate; i++)
32078 group_insns[i] = 0;
32080 can_issue_more = issue_rate;
32081 slot = 0;
32082 insn = get_next_active_insn (prev_head_insn, tail);
32083 group_end = false;
32085 while (insn != NULL_RTX)
32087 slot = (issue_rate - can_issue_more);
32088 group_insns[slot] = insn;
32089 can_issue_more =
32090 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32091 if (insn_terminates_group_p (insn, current_group))
32092 can_issue_more = 0;
32094 next_insn = get_next_active_insn (insn, tail);
32095 if (next_insn == NULL_RTX)
32096 return group_count + 1;
32098 /* Is next_insn going to start a new group? */
32099 group_end
32100 = (can_issue_more == 0
32101 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32102 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32103 || (can_issue_more < issue_rate &&
32104 insn_terminates_group_p (next_insn, previous_group)));
32106 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
32107 next_insn, &group_end, can_issue_more,
32108 &group_count);
32110 if (group_end)
32112 group_count++;
32113 can_issue_more = 0;
32114 for (i = 0; i < issue_rate; i++)
32116 group_insns[i] = 0;
32120 if (GET_MODE (next_insn) == TImode && can_issue_more)
32121 PUT_MODE (next_insn, VOIDmode);
32122 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
32123 PUT_MODE (next_insn, TImode);
32125 insn = next_insn;
32126 if (can_issue_more == 0)
32127 can_issue_more = issue_rate;
32128 } /* while */
32130 return group_count;
32133 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32134 dispatch group boundaries that the scheduler had marked. Pad with nops
32135 any dispatch groups which have vacant issue slots, in order to force the
32136 scheduler's grouping on the processor dispatcher. The function
32137 returns the number of dispatch groups found. */
32139 static int
32140 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32141 rtx_insn *tail)
32143 rtx_insn *insn, *next_insn;
32144 rtx nop;
32145 int issue_rate;
32146 int can_issue_more;
32147 int group_end;
32148 int group_count = 0;
32150 /* Initialize issue_rate. */
32151 issue_rate = rs6000_issue_rate ();
32152 can_issue_more = issue_rate;
32154 insn = get_next_active_insn (prev_head_insn, tail);
32155 next_insn = get_next_active_insn (insn, tail);
32157 while (insn != NULL_RTX)
32159 can_issue_more =
32160 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32162 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
32164 if (next_insn == NULL_RTX)
32165 break;
32167 if (group_end)
32169 /* If the scheduler had marked group termination at this location
32170 (between insn and next_insn), and neither insn nor next_insn will
32171 force group termination, pad the group with nops to force group
32172 termination. */
32173 if (can_issue_more
32174 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
32175 && !insn_terminates_group_p (insn, current_group)
32176 && !insn_terminates_group_p (next_insn, previous_group))
32178 if (!is_branch_slot_insn (next_insn))
32179 can_issue_more--;
32181 while (can_issue_more)
32183 nop = gen_nop ();
32184 emit_insn_before (nop, next_insn);
32185 can_issue_more--;
32189 can_issue_more = issue_rate;
32190 group_count++;
32193 insn = next_insn;
32194 next_insn = get_next_active_insn (insn, tail);
32197 return group_count;
32200 /* We're beginning a new block. Initialize data structures as necessary. */
32202 static void
32203 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
32204 int sched_verbose ATTRIBUTE_UNUSED,
32205 int max_ready ATTRIBUTE_UNUSED)
32207 last_scheduled_insn = NULL;
32208 load_store_pendulum = 0;
32209 divide_cnt = 0;
32210 vec_pairing = 0;
32213 /* The following function is called at the end of scheduling BB.
32214 After reload, it inserts nops at insn group bundling. */
32216 static void
32217 rs6000_sched_finish (FILE *dump, int sched_verbose)
32219 int n_groups;
32221 if (sched_verbose)
32222 fprintf (dump, "=== Finishing schedule.\n");
32224 if (reload_completed && rs6000_sched_groups)
32226 /* Do not run sched_finish hook when selective scheduling enabled. */
32227 if (sel_sched_p ())
32228 return;
32230 if (rs6000_sched_insert_nops == sched_finish_none)
32231 return;
32233 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
32234 n_groups = pad_groups (dump, sched_verbose,
32235 current_sched_info->prev_head,
32236 current_sched_info->next_tail);
32237 else
32238 n_groups = redefine_groups (dump, sched_verbose,
32239 current_sched_info->prev_head,
32240 current_sched_info->next_tail);
32242 if (sched_verbose >= 6)
32244 fprintf (dump, "ngroups = %d\n", n_groups);
32245 print_rtl (dump, current_sched_info->prev_head);
32246 fprintf (dump, "Done finish_sched\n");
32251 struct rs6000_sched_context
32253 short cached_can_issue_more;
32254 rtx_insn *last_scheduled_insn;
32255 int load_store_pendulum;
32256 int divide_cnt;
32257 int vec_pairing;
32260 typedef struct rs6000_sched_context rs6000_sched_context_def;
32261 typedef rs6000_sched_context_def *rs6000_sched_context_t;
32263 /* Allocate store for new scheduling context. */
32264 static void *
32265 rs6000_alloc_sched_context (void)
32267 return xmalloc (sizeof (rs6000_sched_context_def));
32270 /* If CLEAN_P is true then initializes _SC with clean data,
32271 and from the global context otherwise. */
32272 static void
32273 rs6000_init_sched_context (void *_sc, bool clean_p)
32275 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32277 if (clean_p)
32279 sc->cached_can_issue_more = 0;
32280 sc->last_scheduled_insn = NULL;
32281 sc->load_store_pendulum = 0;
32282 sc->divide_cnt = 0;
32283 sc->vec_pairing = 0;
32285 else
32287 sc->cached_can_issue_more = cached_can_issue_more;
32288 sc->last_scheduled_insn = last_scheduled_insn;
32289 sc->load_store_pendulum = load_store_pendulum;
32290 sc->divide_cnt = divide_cnt;
32291 sc->vec_pairing = vec_pairing;
32295 /* Sets the global scheduling context to the one pointed to by _SC. */
32296 static void
32297 rs6000_set_sched_context (void *_sc)
32299 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32301 gcc_assert (sc != NULL);
32303 cached_can_issue_more = sc->cached_can_issue_more;
32304 last_scheduled_insn = sc->last_scheduled_insn;
32305 load_store_pendulum = sc->load_store_pendulum;
32306 divide_cnt = sc->divide_cnt;
32307 vec_pairing = sc->vec_pairing;
32310 /* Free _SC. */
32311 static void
32312 rs6000_free_sched_context (void *_sc)
32314 gcc_assert (_sc != NULL);
32316 free (_sc);
32319 static bool
32320 rs6000_sched_can_speculate_insn (rtx_insn *insn)
32322 switch (get_attr_type (insn))
32324 case TYPE_DIV:
32325 case TYPE_SDIV:
32326 case TYPE_DDIV:
32327 case TYPE_VECDIV:
32328 case TYPE_SSQRT:
32329 case TYPE_DSQRT:
32330 return false;
32332 default:
32333 return true;
32337 /* Length in units of the trampoline for entering a nested function. */
32340 rs6000_trampoline_size (void)
32342 int ret = 0;
32344 switch (DEFAULT_ABI)
32346 default:
32347 gcc_unreachable ();
32349 case ABI_AIX:
32350 ret = (TARGET_32BIT) ? 12 : 24;
32351 break;
32353 case ABI_ELFv2:
32354 gcc_assert (!TARGET_32BIT);
32355 ret = 32;
32356 break;
32358 case ABI_DARWIN:
32359 case ABI_V4:
32360 ret = (TARGET_32BIT) ? 40 : 48;
32361 break;
32364 return ret;
32367 /* Emit RTL insns to initialize the variable parts of a trampoline.
32368 FNADDR is an RTX for the address of the function's pure code.
32369 CXT is an RTX for the static chain value for the function. */
32371 static void
32372 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
32374 int regsize = (TARGET_32BIT) ? 4 : 8;
32375 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
32376 rtx ctx_reg = force_reg (Pmode, cxt);
32377 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
32379 switch (DEFAULT_ABI)
32381 default:
32382 gcc_unreachable ();
32384 /* Under AIX, just build the 3 word function descriptor */
32385 case ABI_AIX:
32387 rtx fnmem, fn_reg, toc_reg;
32389 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32390 error ("you cannot take the address of a nested function if you use "
32391 "the %qs option", "-mno-pointers-to-nested-functions");
32393 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
32394 fn_reg = gen_reg_rtx (Pmode);
32395 toc_reg = gen_reg_rtx (Pmode);
32397 /* Macro to shorten the code expansions below. */
32398 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32400 m_tramp = replace_equiv_address (m_tramp, addr);
32402 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
32403 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
32404 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
32405 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
32406 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
32408 # undef MEM_PLUS
32410 break;
32412 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32413 case ABI_ELFv2:
32414 case ABI_DARWIN:
32415 case ABI_V4:
32416 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
32417 LCT_NORMAL, VOIDmode,
32418 addr, Pmode,
32419 GEN_INT (rs6000_trampoline_size ()), SImode,
32420 fnaddr, Pmode,
32421 ctx_reg, Pmode);
32422 break;
32427 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32428 identifier as an argument, so the front end shouldn't look it up. */
32430 static bool
32431 rs6000_attribute_takes_identifier_p (const_tree attr_id)
32433 return is_attribute_p ("altivec", attr_id);
32436 /* Handle the "altivec" attribute. The attribute may have
32437 arguments as follows:
32439 __attribute__((altivec(vector__)))
32440 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32441 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32443 and may appear more than once (e.g., 'vector bool char') in a
32444 given declaration. */
32446 static tree
32447 rs6000_handle_altivec_attribute (tree *node,
32448 tree name ATTRIBUTE_UNUSED,
32449 tree args,
32450 int flags ATTRIBUTE_UNUSED,
32451 bool *no_add_attrs)
32453 tree type = *node, result = NULL_TREE;
32454 machine_mode mode;
32455 int unsigned_p;
32456 char altivec_type
32457 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
32458 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
32459 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
32460 : '?');
32462 while (POINTER_TYPE_P (type)
32463 || TREE_CODE (type) == FUNCTION_TYPE
32464 || TREE_CODE (type) == METHOD_TYPE
32465 || TREE_CODE (type) == ARRAY_TYPE)
32466 type = TREE_TYPE (type);
32468 mode = TYPE_MODE (type);
32470 /* Check for invalid AltiVec type qualifiers. */
32471 if (type == long_double_type_node)
32472 error ("use of %<long double%> in AltiVec types is invalid");
32473 else if (type == boolean_type_node)
32474 error ("use of boolean types in AltiVec types is invalid");
32475 else if (TREE_CODE (type) == COMPLEX_TYPE)
32476 error ("use of %<complex%> in AltiVec types is invalid");
32477 else if (DECIMAL_FLOAT_MODE_P (mode))
32478 error ("use of decimal floating point types in AltiVec types is invalid");
32479 else if (!TARGET_VSX)
32481 if (type == long_unsigned_type_node || type == long_integer_type_node)
32483 if (TARGET_64BIT)
32484 error ("use of %<long%> in AltiVec types is invalid for "
32485 "64-bit code without %qs", "-mvsx");
32486 else if (rs6000_warn_altivec_long)
32487 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32488 "use %<int%>");
32490 else if (type == long_long_unsigned_type_node
32491 || type == long_long_integer_type_node)
32492 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32493 "-mvsx");
32494 else if (type == double_type_node)
32495 error ("use of %<double%> in AltiVec types is invalid without %qs",
32496 "-mvsx");
32499 switch (altivec_type)
32501 case 'v':
32502 unsigned_p = TYPE_UNSIGNED (type);
32503 switch (mode)
32505 case E_TImode:
32506 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32507 break;
32508 case E_DImode:
32509 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32510 break;
32511 case E_SImode:
32512 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32513 break;
32514 case E_HImode:
32515 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32516 break;
32517 case E_QImode:
32518 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32519 break;
32520 case E_SFmode: result = V4SF_type_node; break;
32521 case E_DFmode: result = V2DF_type_node; break;
32522 /* If the user says 'vector int bool', we may be handed the 'bool'
32523 attribute _before_ the 'vector' attribute, and so select the
32524 proper type in the 'b' case below. */
32525 case E_V4SImode: case E_V8HImode: case E_V16QImode: case E_V4SFmode:
32526 case E_V2DImode: case E_V2DFmode:
32527 result = type;
32528 default: break;
32530 break;
32531 case 'b':
32532 switch (mode)
32534 case E_DImode: case E_V2DImode: result = bool_V2DI_type_node; break;
32535 case E_SImode: case E_V4SImode: result = bool_V4SI_type_node; break;
32536 case E_HImode: case E_V8HImode: result = bool_V8HI_type_node; break;
32537 case E_QImode: case E_V16QImode: result = bool_V16QI_type_node;
32538 default: break;
32540 break;
32541 case 'p':
32542 switch (mode)
32544 case E_V8HImode: result = pixel_V8HI_type_node;
32545 default: break;
32547 default: break;
32550 /* Propagate qualifiers attached to the element type
32551 onto the vector type. */
32552 if (result && result != type && TYPE_QUALS (type))
32553 result = build_qualified_type (result, TYPE_QUALS (type));
32555 *no_add_attrs = true; /* No need to hang on to the attribute. */
32557 if (result)
32558 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32560 return NULL_TREE;
32563 /* AltiVec defines four built-in scalar types that serve as vector
32564 elements; we must teach the compiler how to mangle them. */
32566 static const char *
32567 rs6000_mangle_type (const_tree type)
32569 type = TYPE_MAIN_VARIANT (type);
32571 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32572 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32573 return NULL;
32575 if (type == bool_char_type_node) return "U6__boolc";
32576 if (type == bool_short_type_node) return "U6__bools";
32577 if (type == pixel_type_node) return "u7__pixel";
32578 if (type == bool_int_type_node) return "U6__booli";
32579 if (type == bool_long_type_node) return "U6__booll";
32581 /* Use a unique name for __float128 rather than trying to use "e" or "g". Use
32582 "g" for IBM extended double, no matter whether it is long double (using
32583 -mabi=ibmlongdouble) or the distinct __ibm128 type. */
32584 if (TARGET_FLOAT128_TYPE)
32586 if (type == ieee128_float_type_node)
32587 return "U10__float128";
32589 if (TARGET_LONG_DOUBLE_128)
32591 if (type == long_double_type_node)
32592 return (TARGET_IEEEQUAD) ? "U10__float128" : "g";
32594 if (type == ibm128_float_type_node)
32595 return "g";
32599 /* Mangle IBM extended float long double as `g' (__float128) on
32600 powerpc*-linux where long-double-64 previously was the default. */
32601 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
32602 && TARGET_ELF
32603 && TARGET_LONG_DOUBLE_128
32604 && !TARGET_IEEEQUAD)
32605 return "g";
32607 /* For all other types, use normal C++ mangling. */
32608 return NULL;
32611 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32612 struct attribute_spec.handler. */
32614 static tree
32615 rs6000_handle_longcall_attribute (tree *node, tree name,
32616 tree args ATTRIBUTE_UNUSED,
32617 int flags ATTRIBUTE_UNUSED,
32618 bool *no_add_attrs)
32620 if (TREE_CODE (*node) != FUNCTION_TYPE
32621 && TREE_CODE (*node) != FIELD_DECL
32622 && TREE_CODE (*node) != TYPE_DECL)
32624 warning (OPT_Wattributes, "%qE attribute only applies to functions",
32625 name);
32626 *no_add_attrs = true;
32629 return NULL_TREE;
32632 /* Set longcall attributes on all functions declared when
32633 rs6000_default_long_calls is true. */
32634 static void
32635 rs6000_set_default_type_attributes (tree type)
32637 if (rs6000_default_long_calls
32638 && (TREE_CODE (type) == FUNCTION_TYPE
32639 || TREE_CODE (type) == METHOD_TYPE))
32640 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
32641 NULL_TREE,
32642 TYPE_ATTRIBUTES (type));
32644 #if TARGET_MACHO
32645 darwin_set_default_type_attributes (type);
32646 #endif
32649 /* Return a reference suitable for calling a function with the
32650 longcall attribute. */
32653 rs6000_longcall_ref (rtx call_ref)
32655 const char *call_name;
32656 tree node;
32658 if (GET_CODE (call_ref) != SYMBOL_REF)
32659 return call_ref;
32661 /* System V adds '.' to the internal name, so skip them. */
32662 call_name = XSTR (call_ref, 0);
32663 if (*call_name == '.')
32665 while (*call_name == '.')
32666 call_name++;
32668 node = get_identifier (call_name);
32669 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
32672 return force_reg (Pmode, call_ref);
32675 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32676 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32677 #endif
32679 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32680 struct attribute_spec.handler. */
32681 static tree
32682 rs6000_handle_struct_attribute (tree *node, tree name,
32683 tree args ATTRIBUTE_UNUSED,
32684 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
32686 tree *type = NULL;
32687 if (DECL_P (*node))
32689 if (TREE_CODE (*node) == TYPE_DECL)
32690 type = &TREE_TYPE (*node);
32692 else
32693 type = node;
32695 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
32696 || TREE_CODE (*type) == UNION_TYPE)))
32698 warning (OPT_Wattributes, "%qE attribute ignored", name);
32699 *no_add_attrs = true;
32702 else if ((is_attribute_p ("ms_struct", name)
32703 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
32704 || ((is_attribute_p ("gcc_struct", name)
32705 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
32707 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
32708 name);
32709 *no_add_attrs = true;
32712 return NULL_TREE;
32715 static bool
32716 rs6000_ms_bitfield_layout_p (const_tree record_type)
32718 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
32719 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
32720 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
32723 #ifdef USING_ELFOS_H
32725 /* A get_unnamed_section callback, used for switching to toc_section. */
32727 static void
32728 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
32730 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32731 && TARGET_MINIMAL_TOC)
32733 if (!toc_initialized)
32735 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32736 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32737 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
32738 fprintf (asm_out_file, "\t.tc ");
32739 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
32740 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32741 fprintf (asm_out_file, "\n");
32743 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32744 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32745 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32746 fprintf (asm_out_file, " = .+32768\n");
32747 toc_initialized = 1;
32749 else
32750 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32752 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32754 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32755 if (!toc_initialized)
32757 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32758 toc_initialized = 1;
32761 else
32763 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32764 if (!toc_initialized)
32766 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32767 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32768 fprintf (asm_out_file, " = .+32768\n");
32769 toc_initialized = 1;
32774 /* Implement TARGET_ASM_INIT_SECTIONS. */
32776 static void
32777 rs6000_elf_asm_init_sections (void)
32779 toc_section
32780 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
32782 sdata2_section
32783 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
32784 SDATA2_SECTION_ASM_OP);
32787 /* Implement TARGET_SELECT_RTX_SECTION. */
32789 static section *
32790 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
32791 unsigned HOST_WIDE_INT align)
32793 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
32794 return toc_section;
32795 else
32796 return default_elf_select_rtx_section (mode, x, align);
32799 /* For a SYMBOL_REF, set generic flags and then perform some
32800 target-specific processing.
32802 When the AIX ABI is requested on a non-AIX system, replace the
32803 function name with the real name (with a leading .) rather than the
32804 function descriptor name. This saves a lot of overriding code to
32805 read the prefixes. */
32807 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
32808 static void
32809 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
32811 default_encode_section_info (decl, rtl, first);
32813 if (first
32814 && TREE_CODE (decl) == FUNCTION_DECL
32815 && !TARGET_AIX
32816 && DEFAULT_ABI == ABI_AIX)
32818 rtx sym_ref = XEXP (rtl, 0);
32819 size_t len = strlen (XSTR (sym_ref, 0));
32820 char *str = XALLOCAVEC (char, len + 2);
32821 str[0] = '.';
32822 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
32823 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
32827 static inline bool
32828 compare_section_name (const char *section, const char *templ)
32830 int len;
32832 len = strlen (templ);
32833 return (strncmp (section, templ, len) == 0
32834 && (section[len] == 0 || section[len] == '.'));
32837 bool
32838 rs6000_elf_in_small_data_p (const_tree decl)
32840 if (rs6000_sdata == SDATA_NONE)
32841 return false;
32843 /* We want to merge strings, so we never consider them small data. */
32844 if (TREE_CODE (decl) == STRING_CST)
32845 return false;
32847 /* Functions are never in the small data area. */
32848 if (TREE_CODE (decl) == FUNCTION_DECL)
32849 return false;
32851 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
32853 const char *section = DECL_SECTION_NAME (decl);
32854 if (compare_section_name (section, ".sdata")
32855 || compare_section_name (section, ".sdata2")
32856 || compare_section_name (section, ".gnu.linkonce.s")
32857 || compare_section_name (section, ".sbss")
32858 || compare_section_name (section, ".sbss2")
32859 || compare_section_name (section, ".gnu.linkonce.sb")
32860 || strcmp (section, ".PPC.EMB.sdata0") == 0
32861 || strcmp (section, ".PPC.EMB.sbss0") == 0)
32862 return true;
32864 else
32866 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
32868 if (size > 0
32869 && size <= g_switch_value
32870 /* If it's not public, and we're not going to reference it there,
32871 there's no need to put it in the small data section. */
32872 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
32873 return true;
32876 return false;
32879 #endif /* USING_ELFOS_H */
32881 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
32883 static bool
32884 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
32886 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
32889 /* Do not place thread-local symbols refs in the object blocks. */
32891 static bool
32892 rs6000_use_blocks_for_decl_p (const_tree decl)
32894 return !DECL_THREAD_LOCAL_P (decl);
32897 /* Return a REG that occurs in ADDR with coefficient 1.
32898 ADDR can be effectively incremented by incrementing REG.
32900 r0 is special and we must not select it as an address
32901 register by this routine since our caller will try to
32902 increment the returned register via an "la" instruction. */
32905 find_addr_reg (rtx addr)
32907 while (GET_CODE (addr) == PLUS)
32909 if (GET_CODE (XEXP (addr, 0)) == REG
32910 && REGNO (XEXP (addr, 0)) != 0)
32911 addr = XEXP (addr, 0);
32912 else if (GET_CODE (XEXP (addr, 1)) == REG
32913 && REGNO (XEXP (addr, 1)) != 0)
32914 addr = XEXP (addr, 1);
32915 else if (CONSTANT_P (XEXP (addr, 0)))
32916 addr = XEXP (addr, 1);
32917 else if (CONSTANT_P (XEXP (addr, 1)))
32918 addr = XEXP (addr, 0);
32919 else
32920 gcc_unreachable ();
32922 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
32923 return addr;
32926 void
32927 rs6000_fatal_bad_address (rtx op)
32929 fatal_insn ("bad address", op);
32932 #if TARGET_MACHO
32934 typedef struct branch_island_d {
32935 tree function_name;
32936 tree label_name;
32937 int line_number;
32938 } branch_island;
32941 static vec<branch_island, va_gc> *branch_islands;
32943 /* Remember to generate a branch island for far calls to the given
32944 function. */
32946 static void
32947 add_compiler_branch_island (tree label_name, tree function_name,
32948 int line_number)
32950 branch_island bi = {function_name, label_name, line_number};
32951 vec_safe_push (branch_islands, bi);
32954 /* Generate far-jump branch islands for everything recorded in
32955 branch_islands. Invoked immediately after the last instruction of
32956 the epilogue has been emitted; the branch islands must be appended
32957 to, and contiguous with, the function body. Mach-O stubs are
32958 generated in machopic_output_stub(). */
32960 static void
32961 macho_branch_islands (void)
32963 char tmp_buf[512];
32965 while (!vec_safe_is_empty (branch_islands))
32967 branch_island *bi = &branch_islands->last ();
32968 const char *label = IDENTIFIER_POINTER (bi->label_name);
32969 const char *name = IDENTIFIER_POINTER (bi->function_name);
32970 char name_buf[512];
32971 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
32972 if (name[0] == '*' || name[0] == '&')
32973 strcpy (name_buf, name+1);
32974 else
32976 name_buf[0] = '_';
32977 strcpy (name_buf+1, name);
32979 strcpy (tmp_buf, "\n");
32980 strcat (tmp_buf, label);
32981 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32982 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32983 dbxout_stabd (N_SLINE, bi->line_number);
32984 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32985 if (flag_pic)
32987 if (TARGET_LINK_STACK)
32989 char name[32];
32990 get_ppc476_thunk_name (name);
32991 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
32992 strcat (tmp_buf, name);
32993 strcat (tmp_buf, "\n");
32994 strcat (tmp_buf, label);
32995 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32997 else
32999 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
33000 strcat (tmp_buf, label);
33001 strcat (tmp_buf, "_pic\n");
33002 strcat (tmp_buf, label);
33003 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
33006 strcat (tmp_buf, "\taddis r11,r11,ha16(");
33007 strcat (tmp_buf, name_buf);
33008 strcat (tmp_buf, " - ");
33009 strcat (tmp_buf, label);
33010 strcat (tmp_buf, "_pic)\n");
33012 strcat (tmp_buf, "\tmtlr r0\n");
33014 strcat (tmp_buf, "\taddi r12,r11,lo16(");
33015 strcat (tmp_buf, name_buf);
33016 strcat (tmp_buf, " - ");
33017 strcat (tmp_buf, label);
33018 strcat (tmp_buf, "_pic)\n");
33020 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
33022 else
33024 strcat (tmp_buf, ":\nlis r12,hi16(");
33025 strcat (tmp_buf, name_buf);
33026 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
33027 strcat (tmp_buf, name_buf);
33028 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
33030 output_asm_insn (tmp_buf, 0);
33031 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33032 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33033 dbxout_stabd (N_SLINE, bi->line_number);
33034 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33035 branch_islands->pop ();
33039 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
33040 already there or not. */
33042 static int
33043 no_previous_def (tree function_name)
33045 branch_island *bi;
33046 unsigned ix;
33048 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33049 if (function_name == bi->function_name)
33050 return 0;
33051 return 1;
33054 /* GET_PREV_LABEL gets the label name from the previous definition of
33055 the function. */
33057 static tree
33058 get_prev_label (tree function_name)
33060 branch_island *bi;
33061 unsigned ix;
33063 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33064 if (function_name == bi->function_name)
33065 return bi->label_name;
33066 return NULL_TREE;
33069 /* INSN is either a function call or a millicode call. It may have an
33070 unconditional jump in its delay slot.
33072 CALL_DEST is the routine we are calling. */
33074 char *
33075 output_call (rtx_insn *insn, rtx *operands, int dest_operand_number,
33076 int cookie_operand_number)
33078 static char buf[256];
33079 if (darwin_emit_branch_islands
33080 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
33081 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
33083 tree labelname;
33084 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
33086 if (no_previous_def (funname))
33088 rtx label_rtx = gen_label_rtx ();
33089 char *label_buf, temp_buf[256];
33090 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
33091 CODE_LABEL_NUMBER (label_rtx));
33092 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
33093 labelname = get_identifier (label_buf);
33094 add_compiler_branch_island (labelname, funname, insn_line (insn));
33096 else
33097 labelname = get_prev_label (funname);
33099 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
33100 instruction will reach 'foo', otherwise link as 'bl L42'".
33101 "L42" should be a 'branch island', that will do a far jump to
33102 'foo'. Branch islands are generated in
33103 macho_branch_islands(). */
33104 sprintf (buf, "jbsr %%z%d,%.246s",
33105 dest_operand_number, IDENTIFIER_POINTER (labelname));
33107 else
33108 sprintf (buf, "bl %%z%d", dest_operand_number);
33109 return buf;
33112 /* Generate PIC and indirect symbol stubs. */
33114 void
33115 machopic_output_stub (FILE *file, const char *symb, const char *stub)
33117 unsigned int length;
33118 char *symbol_name, *lazy_ptr_name;
33119 char *local_label_0;
33120 static int label = 0;
33122 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
33123 symb = (*targetm.strip_name_encoding) (symb);
33126 length = strlen (symb);
33127 symbol_name = XALLOCAVEC (char, length + 32);
33128 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
33130 lazy_ptr_name = XALLOCAVEC (char, length + 32);
33131 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
33133 if (flag_pic == 2)
33134 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
33135 else
33136 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
33138 if (flag_pic == 2)
33140 fprintf (file, "\t.align 5\n");
33142 fprintf (file, "%s:\n", stub);
33143 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33145 label++;
33146 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
33147 sprintf (local_label_0, "\"L%011d$spb\"", label);
33149 fprintf (file, "\tmflr r0\n");
33150 if (TARGET_LINK_STACK)
33152 char name[32];
33153 get_ppc476_thunk_name (name);
33154 fprintf (file, "\tbl %s\n", name);
33155 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33157 else
33159 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
33160 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33162 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
33163 lazy_ptr_name, local_label_0);
33164 fprintf (file, "\tmtlr r0\n");
33165 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
33166 (TARGET_64BIT ? "ldu" : "lwzu"),
33167 lazy_ptr_name, local_label_0);
33168 fprintf (file, "\tmtctr r12\n");
33169 fprintf (file, "\tbctr\n");
33171 else
33173 fprintf (file, "\t.align 4\n");
33175 fprintf (file, "%s:\n", stub);
33176 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33178 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
33179 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
33180 (TARGET_64BIT ? "ldu" : "lwzu"),
33181 lazy_ptr_name);
33182 fprintf (file, "\tmtctr r12\n");
33183 fprintf (file, "\tbctr\n");
33186 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
33187 fprintf (file, "%s:\n", lazy_ptr_name);
33188 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33189 fprintf (file, "%sdyld_stub_binding_helper\n",
33190 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
33193 /* Legitimize PIC addresses. If the address is already
33194 position-independent, we return ORIG. Newly generated
33195 position-independent addresses go into a reg. This is REG if non
33196 zero, otherwise we allocate register(s) as necessary. */
33198 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33201 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
33202 rtx reg)
33204 rtx base, offset;
33206 if (reg == NULL && !reload_completed)
33207 reg = gen_reg_rtx (Pmode);
33209 if (GET_CODE (orig) == CONST)
33211 rtx reg_temp;
33213 if (GET_CODE (XEXP (orig, 0)) == PLUS
33214 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
33215 return orig;
33217 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
33219 /* Use a different reg for the intermediate value, as
33220 it will be marked UNCHANGING. */
33221 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
33222 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
33223 Pmode, reg_temp);
33224 offset =
33225 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
33226 Pmode, reg);
33228 if (GET_CODE (offset) == CONST_INT)
33230 if (SMALL_INT (offset))
33231 return plus_constant (Pmode, base, INTVAL (offset));
33232 else if (!reload_completed)
33233 offset = force_reg (Pmode, offset);
33234 else
33236 rtx mem = force_const_mem (Pmode, orig);
33237 return machopic_legitimize_pic_address (mem, Pmode, reg);
33240 return gen_rtx_PLUS (Pmode, base, offset);
33243 /* Fall back on generic machopic code. */
33244 return machopic_legitimize_pic_address (orig, mode, reg);
33247 /* Output a .machine directive for the Darwin assembler, and call
33248 the generic start_file routine. */
33250 static void
33251 rs6000_darwin_file_start (void)
33253 static const struct
33255 const char *arg;
33256 const char *name;
33257 HOST_WIDE_INT if_set;
33258 } mapping[] = {
33259 { "ppc64", "ppc64", MASK_64BIT },
33260 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
33261 { "power4", "ppc970", 0 },
33262 { "G5", "ppc970", 0 },
33263 { "7450", "ppc7450", 0 },
33264 { "7400", "ppc7400", MASK_ALTIVEC },
33265 { "G4", "ppc7400", 0 },
33266 { "750", "ppc750", 0 },
33267 { "740", "ppc750", 0 },
33268 { "G3", "ppc750", 0 },
33269 { "604e", "ppc604e", 0 },
33270 { "604", "ppc604", 0 },
33271 { "603e", "ppc603", 0 },
33272 { "603", "ppc603", 0 },
33273 { "601", "ppc601", 0 },
33274 { NULL, "ppc", 0 } };
33275 const char *cpu_id = "";
33276 size_t i;
33278 rs6000_file_start ();
33279 darwin_file_start ();
33281 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33283 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
33284 cpu_id = rs6000_default_cpu;
33286 if (global_options_set.x_rs6000_cpu_index)
33287 cpu_id = processor_target_table[rs6000_cpu_index].name;
33289 /* Look through the mapping array. Pick the first name that either
33290 matches the argument, has a bit set in IF_SET that is also set
33291 in the target flags, or has a NULL name. */
33293 i = 0;
33294 while (mapping[i].arg != NULL
33295 && strcmp (mapping[i].arg, cpu_id) != 0
33296 && (mapping[i].if_set & rs6000_isa_flags) == 0)
33297 i++;
33299 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
33302 #endif /* TARGET_MACHO */
33304 #if TARGET_ELF
33305 static int
33306 rs6000_elf_reloc_rw_mask (void)
33308 if (flag_pic)
33309 return 3;
33310 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33311 return 2;
33312 else
33313 return 0;
33316 /* Record an element in the table of global constructors. SYMBOL is
33317 a SYMBOL_REF of the function to be called; PRIORITY is a number
33318 between 0 and MAX_INIT_PRIORITY.
33320 This differs from default_named_section_asm_out_constructor in
33321 that we have special handling for -mrelocatable. */
33323 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
33324 static void
33325 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
33327 const char *section = ".ctors";
33328 char buf[18];
33330 if (priority != DEFAULT_INIT_PRIORITY)
33332 sprintf (buf, ".ctors.%.5u",
33333 /* Invert the numbering so the linker puts us in the proper
33334 order; constructors are run from right to left, and the
33335 linker sorts in increasing order. */
33336 MAX_INIT_PRIORITY - priority);
33337 section = buf;
33340 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33341 assemble_align (POINTER_SIZE);
33343 if (DEFAULT_ABI == ABI_V4
33344 && (TARGET_RELOCATABLE || flag_pic > 1))
33346 fputs ("\t.long (", asm_out_file);
33347 output_addr_const (asm_out_file, symbol);
33348 fputs (")@fixup\n", asm_out_file);
33350 else
33351 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33354 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
33355 static void
33356 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
33358 const char *section = ".dtors";
33359 char buf[18];
33361 if (priority != DEFAULT_INIT_PRIORITY)
33363 sprintf (buf, ".dtors.%.5u",
33364 /* Invert the numbering so the linker puts us in the proper
33365 order; constructors are run from right to left, and the
33366 linker sorts in increasing order. */
33367 MAX_INIT_PRIORITY - priority);
33368 section = buf;
33371 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33372 assemble_align (POINTER_SIZE);
33374 if (DEFAULT_ABI == ABI_V4
33375 && (TARGET_RELOCATABLE || flag_pic > 1))
33377 fputs ("\t.long (", asm_out_file);
33378 output_addr_const (asm_out_file, symbol);
33379 fputs (")@fixup\n", asm_out_file);
33381 else
33382 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33385 void
33386 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
33388 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
33390 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
33391 ASM_OUTPUT_LABEL (file, name);
33392 fputs (DOUBLE_INT_ASM_OP, file);
33393 rs6000_output_function_entry (file, name);
33394 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
33395 if (DOT_SYMBOLS)
33397 fputs ("\t.size\t", file);
33398 assemble_name (file, name);
33399 fputs (",24\n\t.type\t.", file);
33400 assemble_name (file, name);
33401 fputs (",@function\n", file);
33402 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
33404 fputs ("\t.globl\t.", file);
33405 assemble_name (file, name);
33406 putc ('\n', file);
33409 else
33410 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33411 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33412 rs6000_output_function_entry (file, name);
33413 fputs (":\n", file);
33414 return;
33417 int uses_toc;
33418 if (DEFAULT_ABI == ABI_V4
33419 && (TARGET_RELOCATABLE || flag_pic > 1)
33420 && !TARGET_SECURE_PLT
33421 && (!constant_pool_empty_p () || crtl->profile)
33422 && (uses_toc = uses_TOC ()))
33424 char buf[256];
33426 if (uses_toc == 2)
33427 switch_to_other_text_partition ();
33428 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33430 fprintf (file, "\t.long ");
33431 assemble_name (file, toc_label_name);
33432 need_toc_init = 1;
33433 putc ('-', file);
33434 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33435 assemble_name (file, buf);
33436 putc ('\n', file);
33437 if (uses_toc == 2)
33438 switch_to_other_text_partition ();
33441 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33442 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33444 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
33446 char buf[256];
33448 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33450 fprintf (file, "\t.quad .TOC.-");
33451 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33452 assemble_name (file, buf);
33453 putc ('\n', file);
33456 if (DEFAULT_ABI == ABI_AIX)
33458 const char *desc_name, *orig_name;
33460 orig_name = (*targetm.strip_name_encoding) (name);
33461 desc_name = orig_name;
33462 while (*desc_name == '.')
33463 desc_name++;
33465 if (TREE_PUBLIC (decl))
33466 fprintf (file, "\t.globl %s\n", desc_name);
33468 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33469 fprintf (file, "%s:\n", desc_name);
33470 fprintf (file, "\t.long %s\n", orig_name);
33471 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
33472 fputs ("\t.long 0\n", file);
33473 fprintf (file, "\t.previous\n");
33475 ASM_OUTPUT_LABEL (file, name);
33478 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
33479 static void
33480 rs6000_elf_file_end (void)
33482 #ifdef HAVE_AS_GNU_ATTRIBUTE
33483 /* ??? The value emitted depends on options active at file end.
33484 Assume anyone using #pragma or attributes that might change
33485 options knows what they are doing. */
33486 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
33487 && rs6000_passes_float)
33489 int fp;
33491 if (TARGET_DF_FPR)
33492 fp = 1;
33493 else if (TARGET_SF_FPR)
33494 fp = 3;
33495 else
33496 fp = 2;
33497 if (rs6000_passes_long_double)
33499 if (!TARGET_LONG_DOUBLE_128)
33500 fp |= 2 * 4;
33501 else if (TARGET_IEEEQUAD)
33502 fp |= 3 * 4;
33503 else
33504 fp |= 1 * 4;
33506 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
33508 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33510 if (rs6000_passes_vector)
33511 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33512 (TARGET_ALTIVEC_ABI ? 2 : 1));
33513 if (rs6000_returns_struct)
33514 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33515 aix_struct_return ? 2 : 1);
33517 #endif
33518 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33519 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33520 file_end_indicate_exec_stack ();
33521 #endif
33523 if (flag_split_stack)
33524 file_end_indicate_split_stack ();
33526 if (cpu_builtin_p)
33528 /* We have expanded a CPU builtin, so we need to emit a reference to
33529 the special symbol that LIBC uses to declare it supports the
33530 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33531 switch_to_section (data_section);
33532 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33533 fprintf (asm_out_file, "\t%s %s\n",
33534 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33537 #endif
33539 #if TARGET_XCOFF
33541 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33542 #define HAVE_XCOFF_DWARF_EXTRAS 0
33543 #endif
33545 static enum unwind_info_type
33546 rs6000_xcoff_debug_unwind_info (void)
33548 return UI_NONE;
33551 static void
33552 rs6000_xcoff_asm_output_anchor (rtx symbol)
33554 char buffer[100];
33556 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33557 SYMBOL_REF_BLOCK_OFFSET (symbol));
33558 fprintf (asm_out_file, "%s", SET_ASM_OP);
33559 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33560 fprintf (asm_out_file, ",");
33561 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33562 fprintf (asm_out_file, "\n");
33565 static void
33566 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33568 fputs (GLOBAL_ASM_OP, stream);
33569 RS6000_OUTPUT_BASENAME (stream, name);
33570 putc ('\n', stream);
33573 /* A get_unnamed_decl callback, used for read-only sections. PTR
33574 points to the section string variable. */
33576 static void
33577 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33579 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33580 *(const char *const *) directive,
33581 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33584 /* Likewise for read-write sections. */
33586 static void
33587 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
33589 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
33590 *(const char *const *) directive,
33591 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33594 static void
33595 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
33597 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
33598 *(const char *const *) directive,
33599 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33602 /* A get_unnamed_section callback, used for switching to toc_section. */
33604 static void
33605 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33607 if (TARGET_MINIMAL_TOC)
33609 /* toc_section is always selected at least once from
33610 rs6000_xcoff_file_start, so this is guaranteed to
33611 always be defined once and only once in each file. */
33612 if (!toc_initialized)
33614 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
33615 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
33616 toc_initialized = 1;
33618 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
33619 (TARGET_32BIT ? "" : ",3"));
33621 else
33622 fputs ("\t.toc\n", asm_out_file);
33625 /* Implement TARGET_ASM_INIT_SECTIONS. */
33627 static void
33628 rs6000_xcoff_asm_init_sections (void)
33630 read_only_data_section
33631 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33632 &xcoff_read_only_section_name);
33634 private_data_section
33635 = get_unnamed_section (SECTION_WRITE,
33636 rs6000_xcoff_output_readwrite_section_asm_op,
33637 &xcoff_private_data_section_name);
33639 tls_data_section
33640 = get_unnamed_section (SECTION_TLS,
33641 rs6000_xcoff_output_tls_section_asm_op,
33642 &xcoff_tls_data_section_name);
33644 tls_private_data_section
33645 = get_unnamed_section (SECTION_TLS,
33646 rs6000_xcoff_output_tls_section_asm_op,
33647 &xcoff_private_data_section_name);
33649 read_only_private_data_section
33650 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33651 &xcoff_private_data_section_name);
33653 toc_section
33654 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
33656 readonly_data_section = read_only_data_section;
33659 static int
33660 rs6000_xcoff_reloc_rw_mask (void)
33662 return 3;
33665 static void
33666 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
33667 tree decl ATTRIBUTE_UNUSED)
33669 int smclass;
33670 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
33672 if (flags & SECTION_EXCLUDE)
33673 smclass = 4;
33674 else if (flags & SECTION_DEBUG)
33676 fprintf (asm_out_file, "\t.dwsect %s\n", name);
33677 return;
33679 else if (flags & SECTION_CODE)
33680 smclass = 0;
33681 else if (flags & SECTION_TLS)
33682 smclass = 3;
33683 else if (flags & SECTION_WRITE)
33684 smclass = 2;
33685 else
33686 smclass = 1;
33688 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
33689 (flags & SECTION_CODE) ? "." : "",
33690 name, suffix[smclass], flags & SECTION_ENTSIZE);
33693 #define IN_NAMED_SECTION(DECL) \
33694 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33695 && DECL_SECTION_NAME (DECL) != NULL)
33697 static section *
33698 rs6000_xcoff_select_section (tree decl, int reloc,
33699 unsigned HOST_WIDE_INT align)
33701 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33702 named section. */
33703 if (align > BIGGEST_ALIGNMENT)
33705 resolve_unique_section (decl, reloc, true);
33706 if (IN_NAMED_SECTION (decl))
33707 return get_named_section (decl, NULL, reloc);
33710 if (decl_readonly_section (decl, reloc))
33712 if (TREE_PUBLIC (decl))
33713 return read_only_data_section;
33714 else
33715 return read_only_private_data_section;
33717 else
33719 #if HAVE_AS_TLS
33720 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33722 if (TREE_PUBLIC (decl))
33723 return tls_data_section;
33724 else if (bss_initializer_p (decl))
33726 /* Convert to COMMON to emit in BSS. */
33727 DECL_COMMON (decl) = 1;
33728 return tls_comm_section;
33730 else
33731 return tls_private_data_section;
33733 else
33734 #endif
33735 if (TREE_PUBLIC (decl))
33736 return data_section;
33737 else
33738 return private_data_section;
33742 static void
33743 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
33745 const char *name;
33747 /* Use select_section for private data and uninitialized data with
33748 alignment <= BIGGEST_ALIGNMENT. */
33749 if (!TREE_PUBLIC (decl)
33750 || DECL_COMMON (decl)
33751 || (DECL_INITIAL (decl) == NULL_TREE
33752 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
33753 || DECL_INITIAL (decl) == error_mark_node
33754 || (flag_zero_initialized_in_bss
33755 && initializer_zerop (DECL_INITIAL (decl))))
33756 return;
33758 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
33759 name = (*targetm.strip_name_encoding) (name);
33760 set_decl_section_name (decl, name);
33763 /* Select section for constant in constant pool.
33765 On RS/6000, all constants are in the private read-only data area.
33766 However, if this is being placed in the TOC it must be output as a
33767 toc entry. */
33769 static section *
33770 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
33771 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
33773 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33774 return toc_section;
33775 else
33776 return read_only_private_data_section;
33779 /* Remove any trailing [DS] or the like from the symbol name. */
33781 static const char *
33782 rs6000_xcoff_strip_name_encoding (const char *name)
33784 size_t len;
33785 if (*name == '*')
33786 name++;
33787 len = strlen (name);
33788 if (name[len - 1] == ']')
33789 return ggc_alloc_string (name, len - 4);
33790 else
33791 return name;
33794 /* Section attributes. AIX is always PIC. */
33796 static unsigned int
33797 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
33799 unsigned int align;
33800 unsigned int flags = default_section_type_flags (decl, name, reloc);
33802 /* Align to at least UNIT size. */
33803 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
33804 align = MIN_UNITS_PER_WORD;
33805 else
33806 /* Increase alignment of large objects if not already stricter. */
33807 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
33808 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
33809 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
33811 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
33814 /* Output at beginning of assembler file.
33816 Initialize the section names for the RS/6000 at this point.
33818 Specify filename, including full path, to assembler.
33820 We want to go into the TOC section so at least one .toc will be emitted.
33821 Also, in order to output proper .bs/.es pairs, we need at least one static
33822 [RW] section emitted.
33824 Finally, declare mcount when profiling to make the assembler happy. */
33826 static void
33827 rs6000_xcoff_file_start (void)
33829 rs6000_gen_section_name (&xcoff_bss_section_name,
33830 main_input_filename, ".bss_");
33831 rs6000_gen_section_name (&xcoff_private_data_section_name,
33832 main_input_filename, ".rw_");
33833 rs6000_gen_section_name (&xcoff_read_only_section_name,
33834 main_input_filename, ".ro_");
33835 rs6000_gen_section_name (&xcoff_tls_data_section_name,
33836 main_input_filename, ".tls_");
33837 rs6000_gen_section_name (&xcoff_tbss_section_name,
33838 main_input_filename, ".tbss_[UL]");
33840 fputs ("\t.file\t", asm_out_file);
33841 output_quoted_string (asm_out_file, main_input_filename);
33842 fputc ('\n', asm_out_file);
33843 if (write_symbols != NO_DEBUG)
33844 switch_to_section (private_data_section);
33845 switch_to_section (toc_section);
33846 switch_to_section (text_section);
33847 if (profile_flag)
33848 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
33849 rs6000_file_start ();
33852 /* Output at end of assembler file.
33853 On the RS/6000, referencing data should automatically pull in text. */
33855 static void
33856 rs6000_xcoff_file_end (void)
33858 switch_to_section (text_section);
33859 fputs ("_section_.text:\n", asm_out_file);
33860 switch_to_section (data_section);
33861 fputs (TARGET_32BIT
33862 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
33863 asm_out_file);
33866 struct declare_alias_data
33868 FILE *file;
33869 bool function_descriptor;
33872 /* Declare alias N. A helper function for for_node_and_aliases. */
33874 static bool
33875 rs6000_declare_alias (struct symtab_node *n, void *d)
33877 struct declare_alias_data *data = (struct declare_alias_data *)d;
33878 /* Main symbol is output specially, because varasm machinery does part of
33879 the job for us - we do not need to declare .globl/lglobs and such. */
33880 if (!n->alias || n->weakref)
33881 return false;
33883 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
33884 return false;
33886 /* Prevent assemble_alias from trying to use .set pseudo operation
33887 that does not behave as expected by the middle-end. */
33888 TREE_ASM_WRITTEN (n->decl) = true;
33890 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
33891 char *buffer = (char *) alloca (strlen (name) + 2);
33892 char *p;
33893 int dollar_inside = 0;
33895 strcpy (buffer, name);
33896 p = strchr (buffer, '$');
33897 while (p) {
33898 *p = '_';
33899 dollar_inside++;
33900 p = strchr (p + 1, '$');
33902 if (TREE_PUBLIC (n->decl))
33904 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
33906 if (dollar_inside) {
33907 if (data->function_descriptor)
33908 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33909 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33911 if (data->function_descriptor)
33913 fputs ("\t.globl .", data->file);
33914 RS6000_OUTPUT_BASENAME (data->file, buffer);
33915 putc ('\n', data->file);
33917 fputs ("\t.globl ", data->file);
33918 RS6000_OUTPUT_BASENAME (data->file, buffer);
33919 putc ('\n', data->file);
33921 #ifdef ASM_WEAKEN_DECL
33922 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
33923 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
33924 #endif
33926 else
33928 if (dollar_inside)
33930 if (data->function_descriptor)
33931 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33932 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33934 if (data->function_descriptor)
33936 fputs ("\t.lglobl .", data->file);
33937 RS6000_OUTPUT_BASENAME (data->file, buffer);
33938 putc ('\n', data->file);
33940 fputs ("\t.lglobl ", data->file);
33941 RS6000_OUTPUT_BASENAME (data->file, buffer);
33942 putc ('\n', data->file);
33944 if (data->function_descriptor)
33945 fputs (".", data->file);
33946 RS6000_OUTPUT_BASENAME (data->file, buffer);
33947 fputs (":\n", data->file);
33948 return false;
33952 #ifdef HAVE_GAS_HIDDEN
33953 /* Helper function to calculate visibility of a DECL
33954 and return the value as a const string. */
33956 static const char *
33957 rs6000_xcoff_visibility (tree decl)
33959 static const char * const visibility_types[] = {
33960 "", ",protected", ",hidden", ",internal"
33963 enum symbol_visibility vis = DECL_VISIBILITY (decl);
33965 if (TREE_CODE (decl) == FUNCTION_DECL
33966 && cgraph_node::get (decl)
33967 && cgraph_node::get (decl)->instrumentation_clone
33968 && cgraph_node::get (decl)->instrumented_version)
33969 vis = DECL_VISIBILITY (cgraph_node::get (decl)->instrumented_version->decl);
33971 return visibility_types[vis];
33973 #endif
33976 /* This macro produces the initial definition of a function name.
33977 On the RS/6000, we need to place an extra '.' in the function name and
33978 output the function descriptor.
33979 Dollar signs are converted to underscores.
33981 The csect for the function will have already been created when
33982 text_section was selected. We do have to go back to that csect, however.
33984 The third and fourth parameters to the .function pseudo-op (16 and 044)
33985 are placeholders which no longer have any use.
33987 Because AIX assembler's .set command has unexpected semantics, we output
33988 all aliases as alternative labels in front of the definition. */
33990 void
33991 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
33993 char *buffer = (char *) alloca (strlen (name) + 1);
33994 char *p;
33995 int dollar_inside = 0;
33996 struct declare_alias_data data = {file, false};
33998 strcpy (buffer, name);
33999 p = strchr (buffer, '$');
34000 while (p) {
34001 *p = '_';
34002 dollar_inside++;
34003 p = strchr (p + 1, '$');
34005 if (TREE_PUBLIC (decl))
34007 if (!RS6000_WEAK || !DECL_WEAK (decl))
34009 if (dollar_inside) {
34010 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34011 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34013 fputs ("\t.globl .", file);
34014 RS6000_OUTPUT_BASENAME (file, buffer);
34015 #ifdef HAVE_GAS_HIDDEN
34016 fputs (rs6000_xcoff_visibility (decl), file);
34017 #endif
34018 putc ('\n', file);
34021 else
34023 if (dollar_inside) {
34024 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34025 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34027 fputs ("\t.lglobl .", file);
34028 RS6000_OUTPUT_BASENAME (file, buffer);
34029 putc ('\n', file);
34031 fputs ("\t.csect ", file);
34032 RS6000_OUTPUT_BASENAME (file, buffer);
34033 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
34034 RS6000_OUTPUT_BASENAME (file, buffer);
34035 fputs (":\n", file);
34036 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34037 &data, true);
34038 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
34039 RS6000_OUTPUT_BASENAME (file, buffer);
34040 fputs (", TOC[tc0], 0\n", file);
34041 in_section = NULL;
34042 switch_to_section (function_section (decl));
34043 putc ('.', file);
34044 RS6000_OUTPUT_BASENAME (file, buffer);
34045 fputs (":\n", file);
34046 data.function_descriptor = true;
34047 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34048 &data, true);
34049 if (!DECL_IGNORED_P (decl))
34051 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
34052 xcoffout_declare_function (file, decl, buffer);
34053 else if (write_symbols == DWARF2_DEBUG)
34055 name = (*targetm.strip_name_encoding) (name);
34056 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
34059 return;
34063 /* Output assembly language to globalize a symbol from a DECL,
34064 possibly with visibility. */
34066 void
34067 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
34069 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
34070 fputs (GLOBAL_ASM_OP, stream);
34071 RS6000_OUTPUT_BASENAME (stream, name);
34072 #ifdef HAVE_GAS_HIDDEN
34073 fputs (rs6000_xcoff_visibility (decl), stream);
34074 #endif
34075 putc ('\n', stream);
34078 /* Output assembly language to define a symbol as COMMON from a DECL,
34079 possibly with visibility. */
34081 void
34082 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
34083 tree decl ATTRIBUTE_UNUSED,
34084 const char *name,
34085 unsigned HOST_WIDE_INT size,
34086 unsigned HOST_WIDE_INT align)
34088 unsigned HOST_WIDE_INT align2 = 2;
34090 if (align > 32)
34091 align2 = floor_log2 (align / BITS_PER_UNIT);
34092 else if (size > 4)
34093 align2 = 3;
34095 fputs (COMMON_ASM_OP, stream);
34096 RS6000_OUTPUT_BASENAME (stream, name);
34098 fprintf (stream,
34099 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
34100 size, align2);
34102 #ifdef HAVE_GAS_HIDDEN
34103 fputs (rs6000_xcoff_visibility (decl), stream);
34104 #endif
34105 putc ('\n', stream);
34108 /* This macro produces the initial definition of a object (variable) name.
34109 Because AIX assembler's .set command has unexpected semantics, we output
34110 all aliases as alternative labels in front of the definition. */
34112 void
34113 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
34115 struct declare_alias_data data = {file, false};
34116 RS6000_OUTPUT_BASENAME (file, name);
34117 fputs (":\n", file);
34118 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34119 &data, true);
34122 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
34124 void
34125 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
34127 fputs (integer_asm_op (size, FALSE), file);
34128 assemble_name (file, label);
34129 fputs ("-$", file);
34132 /* Output a symbol offset relative to the dbase for the current object.
34133 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
34134 signed offsets.
34136 __gcc_unwind_dbase is embedded in all executables/libraries through
34137 libgcc/config/rs6000/crtdbase.S. */
34139 void
34140 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
34142 fputs (integer_asm_op (size, FALSE), file);
34143 assemble_name (file, label);
34144 fputs("-__gcc_unwind_dbase", file);
34147 #ifdef HAVE_AS_TLS
34148 static void
34149 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
34151 rtx symbol;
34152 int flags;
34153 const char *symname;
34155 default_encode_section_info (decl, rtl, first);
34157 /* Careful not to prod global register variables. */
34158 if (!MEM_P (rtl))
34159 return;
34160 symbol = XEXP (rtl, 0);
34161 if (GET_CODE (symbol) != SYMBOL_REF)
34162 return;
34164 flags = SYMBOL_REF_FLAGS (symbol);
34166 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
34167 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
34169 SYMBOL_REF_FLAGS (symbol) = flags;
34171 /* Append mapping class to extern decls. */
34172 symname = XSTR (symbol, 0);
34173 if (decl /* sync condition with assemble_external () */
34174 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
34175 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
34176 || TREE_CODE (decl) == FUNCTION_DECL)
34177 && symname[strlen (symname) - 1] != ']')
34179 char *newname = (char *) alloca (strlen (symname) + 5);
34180 strcpy (newname, symname);
34181 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
34182 ? "[DS]" : "[UA]"));
34183 XSTR (symbol, 0) = ggc_strdup (newname);
34186 #endif /* HAVE_AS_TLS */
34187 #endif /* TARGET_XCOFF */
34189 void
34190 rs6000_asm_weaken_decl (FILE *stream, tree decl,
34191 const char *name, const char *val)
34193 fputs ("\t.weak\t", stream);
34194 RS6000_OUTPUT_BASENAME (stream, name);
34195 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34196 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34198 if (TARGET_XCOFF)
34199 fputs ("[DS]", stream);
34200 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34201 if (TARGET_XCOFF)
34202 fputs (rs6000_xcoff_visibility (decl), stream);
34203 #endif
34204 fputs ("\n\t.weak\t.", stream);
34205 RS6000_OUTPUT_BASENAME (stream, name);
34207 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34208 if (TARGET_XCOFF)
34209 fputs (rs6000_xcoff_visibility (decl), stream);
34210 #endif
34211 fputc ('\n', stream);
34212 if (val)
34214 #ifdef ASM_OUTPUT_DEF
34215 ASM_OUTPUT_DEF (stream, name, val);
34216 #endif
34217 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34218 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34220 fputs ("\t.set\t.", stream);
34221 RS6000_OUTPUT_BASENAME (stream, name);
34222 fputs (",.", stream);
34223 RS6000_OUTPUT_BASENAME (stream, val);
34224 fputc ('\n', stream);
34230 /* Return true if INSN should not be copied. */
34232 static bool
34233 rs6000_cannot_copy_insn_p (rtx_insn *insn)
34235 return recog_memoized (insn) >= 0
34236 && get_attr_cannot_copy (insn);
34239 /* Compute a (partial) cost for rtx X. Return true if the complete
34240 cost has been computed, and false if subexpressions should be
34241 scanned. In either case, *TOTAL contains the cost result. */
34243 static bool
34244 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
34245 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
34247 int code = GET_CODE (x);
34249 switch (code)
34251 /* On the RS/6000, if it is valid in the insn, it is free. */
34252 case CONST_INT:
34253 if (((outer_code == SET
34254 || outer_code == PLUS
34255 || outer_code == MINUS)
34256 && (satisfies_constraint_I (x)
34257 || satisfies_constraint_L (x)))
34258 || (outer_code == AND
34259 && (satisfies_constraint_K (x)
34260 || (mode == SImode
34261 ? satisfies_constraint_L (x)
34262 : satisfies_constraint_J (x))))
34263 || ((outer_code == IOR || outer_code == XOR)
34264 && (satisfies_constraint_K (x)
34265 || (mode == SImode
34266 ? satisfies_constraint_L (x)
34267 : satisfies_constraint_J (x))))
34268 || outer_code == ASHIFT
34269 || outer_code == ASHIFTRT
34270 || outer_code == LSHIFTRT
34271 || outer_code == ROTATE
34272 || outer_code == ROTATERT
34273 || outer_code == ZERO_EXTRACT
34274 || (outer_code == MULT
34275 && satisfies_constraint_I (x))
34276 || ((outer_code == DIV || outer_code == UDIV
34277 || outer_code == MOD || outer_code == UMOD)
34278 && exact_log2 (INTVAL (x)) >= 0)
34279 || (outer_code == COMPARE
34280 && (satisfies_constraint_I (x)
34281 || satisfies_constraint_K (x)))
34282 || ((outer_code == EQ || outer_code == NE)
34283 && (satisfies_constraint_I (x)
34284 || satisfies_constraint_K (x)
34285 || (mode == SImode
34286 ? satisfies_constraint_L (x)
34287 : satisfies_constraint_J (x))))
34288 || (outer_code == GTU
34289 && satisfies_constraint_I (x))
34290 || (outer_code == LTU
34291 && satisfies_constraint_P (x)))
34293 *total = 0;
34294 return true;
34296 else if ((outer_code == PLUS
34297 && reg_or_add_cint_operand (x, VOIDmode))
34298 || (outer_code == MINUS
34299 && reg_or_sub_cint_operand (x, VOIDmode))
34300 || ((outer_code == SET
34301 || outer_code == IOR
34302 || outer_code == XOR)
34303 && (INTVAL (x)
34304 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
34306 *total = COSTS_N_INSNS (1);
34307 return true;
34309 /* FALLTHRU */
34311 case CONST_DOUBLE:
34312 case CONST_WIDE_INT:
34313 case CONST:
34314 case HIGH:
34315 case SYMBOL_REF:
34316 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34317 return true;
34319 case MEM:
34320 /* When optimizing for size, MEM should be slightly more expensive
34321 than generating address, e.g., (plus (reg) (const)).
34322 L1 cache latency is about two instructions. */
34323 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34324 if (rs6000_slow_unaligned_access (mode, MEM_ALIGN (x)))
34325 *total += COSTS_N_INSNS (100);
34326 return true;
34328 case LABEL_REF:
34329 *total = 0;
34330 return true;
34332 case PLUS:
34333 case MINUS:
34334 if (FLOAT_MODE_P (mode))
34335 *total = rs6000_cost->fp;
34336 else
34337 *total = COSTS_N_INSNS (1);
34338 return false;
34340 case MULT:
34341 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34342 && satisfies_constraint_I (XEXP (x, 1)))
34344 if (INTVAL (XEXP (x, 1)) >= -256
34345 && INTVAL (XEXP (x, 1)) <= 255)
34346 *total = rs6000_cost->mulsi_const9;
34347 else
34348 *total = rs6000_cost->mulsi_const;
34350 else if (mode == SFmode)
34351 *total = rs6000_cost->fp;
34352 else if (FLOAT_MODE_P (mode))
34353 *total = rs6000_cost->dmul;
34354 else if (mode == DImode)
34355 *total = rs6000_cost->muldi;
34356 else
34357 *total = rs6000_cost->mulsi;
34358 return false;
34360 case FMA:
34361 if (mode == SFmode)
34362 *total = rs6000_cost->fp;
34363 else
34364 *total = rs6000_cost->dmul;
34365 break;
34367 case DIV:
34368 case MOD:
34369 if (FLOAT_MODE_P (mode))
34371 *total = mode == DFmode ? rs6000_cost->ddiv
34372 : rs6000_cost->sdiv;
34373 return false;
34375 /* FALLTHRU */
34377 case UDIV:
34378 case UMOD:
34379 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34380 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
34382 if (code == DIV || code == MOD)
34383 /* Shift, addze */
34384 *total = COSTS_N_INSNS (2);
34385 else
34386 /* Shift */
34387 *total = COSTS_N_INSNS (1);
34389 else
34391 if (GET_MODE (XEXP (x, 1)) == DImode)
34392 *total = rs6000_cost->divdi;
34393 else
34394 *total = rs6000_cost->divsi;
34396 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34397 if (!TARGET_MODULO && (code == MOD || code == UMOD))
34398 *total += COSTS_N_INSNS (2);
34399 return false;
34401 case CTZ:
34402 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
34403 return false;
34405 case FFS:
34406 *total = COSTS_N_INSNS (4);
34407 return false;
34409 case POPCOUNT:
34410 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
34411 return false;
34413 case PARITY:
34414 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
34415 return false;
34417 case NOT:
34418 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
34419 *total = 0;
34420 else
34421 *total = COSTS_N_INSNS (1);
34422 return false;
34424 case AND:
34425 if (CONST_INT_P (XEXP (x, 1)))
34427 rtx left = XEXP (x, 0);
34428 rtx_code left_code = GET_CODE (left);
34430 /* rotate-and-mask: 1 insn. */
34431 if ((left_code == ROTATE
34432 || left_code == ASHIFT
34433 || left_code == LSHIFTRT)
34434 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
34436 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
34437 if (!CONST_INT_P (XEXP (left, 1)))
34438 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
34439 *total += COSTS_N_INSNS (1);
34440 return true;
34443 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34444 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
34445 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
34446 || (val & 0xffff) == val
34447 || (val & 0xffff0000) == val
34448 || ((val & 0xffff) == 0 && mode == SImode))
34450 *total = rtx_cost (left, mode, AND, 0, speed);
34451 *total += COSTS_N_INSNS (1);
34452 return true;
34455 /* 2 insns. */
34456 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
34458 *total = rtx_cost (left, mode, AND, 0, speed);
34459 *total += COSTS_N_INSNS (2);
34460 return true;
34464 *total = COSTS_N_INSNS (1);
34465 return false;
34467 case IOR:
34468 /* FIXME */
34469 *total = COSTS_N_INSNS (1);
34470 return true;
34472 case CLZ:
34473 case XOR:
34474 case ZERO_EXTRACT:
34475 *total = COSTS_N_INSNS (1);
34476 return false;
34478 case ASHIFT:
34479 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34480 the sign extend and shift separately within the insn. */
34481 if (TARGET_EXTSWSLI && mode == DImode
34482 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
34483 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
34485 *total = 0;
34486 return false;
34488 /* fall through */
34490 case ASHIFTRT:
34491 case LSHIFTRT:
34492 case ROTATE:
34493 case ROTATERT:
34494 /* Handle mul_highpart. */
34495 if (outer_code == TRUNCATE
34496 && GET_CODE (XEXP (x, 0)) == MULT)
34498 if (mode == DImode)
34499 *total = rs6000_cost->muldi;
34500 else
34501 *total = rs6000_cost->mulsi;
34502 return true;
34504 else if (outer_code == AND)
34505 *total = 0;
34506 else
34507 *total = COSTS_N_INSNS (1);
34508 return false;
34510 case SIGN_EXTEND:
34511 case ZERO_EXTEND:
34512 if (GET_CODE (XEXP (x, 0)) == MEM)
34513 *total = 0;
34514 else
34515 *total = COSTS_N_INSNS (1);
34516 return false;
34518 case COMPARE:
34519 case NEG:
34520 case ABS:
34521 if (!FLOAT_MODE_P (mode))
34523 *total = COSTS_N_INSNS (1);
34524 return false;
34526 /* FALLTHRU */
34528 case FLOAT:
34529 case UNSIGNED_FLOAT:
34530 case FIX:
34531 case UNSIGNED_FIX:
34532 case FLOAT_TRUNCATE:
34533 *total = rs6000_cost->fp;
34534 return false;
34536 case FLOAT_EXTEND:
34537 if (mode == DFmode)
34538 *total = rs6000_cost->sfdf_convert;
34539 else
34540 *total = rs6000_cost->fp;
34541 return false;
34543 case UNSPEC:
34544 switch (XINT (x, 1))
34546 case UNSPEC_FRSP:
34547 *total = rs6000_cost->fp;
34548 return true;
34550 default:
34551 break;
34553 break;
34555 case CALL:
34556 case IF_THEN_ELSE:
34557 if (!speed)
34559 *total = COSTS_N_INSNS (1);
34560 return true;
34562 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
34564 *total = rs6000_cost->fp;
34565 return false;
34567 break;
34569 case NE:
34570 case EQ:
34571 case GTU:
34572 case LTU:
34573 /* Carry bit requires mode == Pmode.
34574 NEG or PLUS already counted so only add one. */
34575 if (mode == Pmode
34576 && (outer_code == NEG || outer_code == PLUS))
34578 *total = COSTS_N_INSNS (1);
34579 return true;
34581 if (outer_code == SET)
34583 if (XEXP (x, 1) == const0_rtx)
34585 if (TARGET_ISEL && !TARGET_MFCRF)
34586 *total = COSTS_N_INSNS (8);
34587 else
34588 *total = COSTS_N_INSNS (2);
34589 return true;
34591 else
34593 *total = COSTS_N_INSNS (3);
34594 return false;
34597 /* FALLTHRU */
34599 case GT:
34600 case LT:
34601 case UNORDERED:
34602 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
34604 if (TARGET_ISEL && !TARGET_MFCRF)
34605 *total = COSTS_N_INSNS (8);
34606 else
34607 *total = COSTS_N_INSNS (2);
34608 return true;
34610 /* CC COMPARE. */
34611 if (outer_code == COMPARE)
34613 *total = 0;
34614 return true;
34616 break;
34618 default:
34619 break;
34622 return false;
34625 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34627 static bool
34628 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
34629 int opno, int *total, bool speed)
34631 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
34633 fprintf (stderr,
34634 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34635 "opno = %d, total = %d, speed = %s, x:\n",
34636 ret ? "complete" : "scan inner",
34637 GET_MODE_NAME (mode),
34638 GET_RTX_NAME (outer_code),
34639 opno,
34640 *total,
34641 speed ? "true" : "false");
34643 debug_rtx (x);
34645 return ret;
34648 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
34650 static int
34651 rs6000_debug_address_cost (rtx x, machine_mode mode,
34652 addr_space_t as, bool speed)
34654 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
34656 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
34657 ret, speed ? "true" : "false");
34658 debug_rtx (x);
34660 return ret;
34664 /* A C expression returning the cost of moving data from a register of class
34665 CLASS1 to one of CLASS2. */
34667 static int
34668 rs6000_register_move_cost (machine_mode mode,
34669 reg_class_t from, reg_class_t to)
34671 int ret;
34673 if (TARGET_DEBUG_COST)
34674 dbg_cost_ctrl++;
34676 /* Moves from/to GENERAL_REGS. */
34677 if (reg_classes_intersect_p (to, GENERAL_REGS)
34678 || reg_classes_intersect_p (from, GENERAL_REGS))
34680 reg_class_t rclass = from;
34682 if (! reg_classes_intersect_p (to, GENERAL_REGS))
34683 rclass = to;
34685 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
34686 ret = (rs6000_memory_move_cost (mode, rclass, false)
34687 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
34689 /* It's more expensive to move CR_REGS than CR0_REGS because of the
34690 shift. */
34691 else if (rclass == CR_REGS)
34692 ret = 4;
34694 /* For those processors that have slow LR/CTR moves, make them more
34695 expensive than memory in order to bias spills to memory .*/
34696 else if ((rs6000_cpu == PROCESSOR_POWER6
34697 || rs6000_cpu == PROCESSOR_POWER7
34698 || rs6000_cpu == PROCESSOR_POWER8
34699 || rs6000_cpu == PROCESSOR_POWER9)
34700 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
34701 ret = 6 * hard_regno_nregs (0, mode);
34703 else
34704 /* A move will cost one instruction per GPR moved. */
34705 ret = 2 * hard_regno_nregs (0, mode);
34708 /* If we have VSX, we can easily move between FPR or Altivec registers. */
34709 else if (VECTOR_MEM_VSX_P (mode)
34710 && reg_classes_intersect_p (to, VSX_REGS)
34711 && reg_classes_intersect_p (from, VSX_REGS))
34712 ret = 2 * hard_regno_nregs (FIRST_FPR_REGNO, mode);
34714 /* Moving between two similar registers is just one instruction. */
34715 else if (reg_classes_intersect_p (to, from))
34716 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
34718 /* Everything else has to go through GENERAL_REGS. */
34719 else
34720 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
34721 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
34723 if (TARGET_DEBUG_COST)
34725 if (dbg_cost_ctrl == 1)
34726 fprintf (stderr,
34727 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
34728 ret, GET_MODE_NAME (mode), reg_class_names[from],
34729 reg_class_names[to]);
34730 dbg_cost_ctrl--;
34733 return ret;
34736 /* A C expressions returning the cost of moving data of MODE from a register to
34737 or from memory. */
34739 static int
34740 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
34741 bool in ATTRIBUTE_UNUSED)
34743 int ret;
34745 if (TARGET_DEBUG_COST)
34746 dbg_cost_ctrl++;
34748 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
34749 ret = 4 * hard_regno_nregs (0, mode);
34750 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
34751 || reg_classes_intersect_p (rclass, VSX_REGS)))
34752 ret = 4 * hard_regno_nregs (32, mode);
34753 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
34754 ret = 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO, mode);
34755 else
34756 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
34758 if (TARGET_DEBUG_COST)
34760 if (dbg_cost_ctrl == 1)
34761 fprintf (stderr,
34762 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
34763 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
34764 dbg_cost_ctrl--;
34767 return ret;
34770 /* Returns a code for a target-specific builtin that implements
34771 reciprocal of the function, or NULL_TREE if not available. */
34773 static tree
34774 rs6000_builtin_reciprocal (tree fndecl)
34776 switch (DECL_FUNCTION_CODE (fndecl))
34778 case VSX_BUILTIN_XVSQRTDP:
34779 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
34780 return NULL_TREE;
34782 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
34784 case VSX_BUILTIN_XVSQRTSP:
34785 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
34786 return NULL_TREE;
34788 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
34790 default:
34791 return NULL_TREE;
34795 /* Load up a constant. If the mode is a vector mode, splat the value across
34796 all of the vector elements. */
34798 static rtx
34799 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
34801 rtx reg;
34803 if (mode == SFmode || mode == DFmode)
34805 rtx d = const_double_from_real_value (dconst, mode);
34806 reg = force_reg (mode, d);
34808 else if (mode == V4SFmode)
34810 rtx d = const_double_from_real_value (dconst, SFmode);
34811 rtvec v = gen_rtvec (4, d, d, d, d);
34812 reg = gen_reg_rtx (mode);
34813 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34815 else if (mode == V2DFmode)
34817 rtx d = const_double_from_real_value (dconst, DFmode);
34818 rtvec v = gen_rtvec (2, d, d);
34819 reg = gen_reg_rtx (mode);
34820 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34822 else
34823 gcc_unreachable ();
34825 return reg;
34828 /* Generate an FMA instruction. */
34830 static void
34831 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
34833 machine_mode mode = GET_MODE (target);
34834 rtx dst;
34836 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
34837 gcc_assert (dst != NULL);
34839 if (dst != target)
34840 emit_move_insn (target, dst);
34843 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
34845 static void
34846 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
34848 machine_mode mode = GET_MODE (dst);
34849 rtx r;
34851 /* This is a tad more complicated, since the fnma_optab is for
34852 a different expression: fma(-m1, m2, a), which is the same
34853 thing except in the case of signed zeros.
34855 Fortunately we know that if FMA is supported that FNMSUB is
34856 also supported in the ISA. Just expand it directly. */
34858 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
34860 r = gen_rtx_NEG (mode, a);
34861 r = gen_rtx_FMA (mode, m1, m2, r);
34862 r = gen_rtx_NEG (mode, r);
34863 emit_insn (gen_rtx_SET (dst, r));
34866 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
34867 add a reg_note saying that this was a division. Support both scalar and
34868 vector divide. Assumes no trapping math and finite arguments. */
34870 void
34871 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
34873 machine_mode mode = GET_MODE (dst);
34874 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
34875 int i;
34877 /* Low precision estimates guarantee 5 bits of accuracy. High
34878 precision estimates guarantee 14 bits of accuracy. SFmode
34879 requires 23 bits of accuracy. DFmode requires 52 bits of
34880 accuracy. Each pass at least doubles the accuracy, leading
34881 to the following. */
34882 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34883 if (mode == DFmode || mode == V2DFmode)
34884 passes++;
34886 enum insn_code code = optab_handler (smul_optab, mode);
34887 insn_gen_fn gen_mul = GEN_FCN (code);
34889 gcc_assert (code != CODE_FOR_nothing);
34891 one = rs6000_load_constant_and_splat (mode, dconst1);
34893 /* x0 = 1./d estimate */
34894 x0 = gen_reg_rtx (mode);
34895 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
34896 UNSPEC_FRES)));
34898 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
34899 if (passes > 1) {
34901 /* e0 = 1. - d * x0 */
34902 e0 = gen_reg_rtx (mode);
34903 rs6000_emit_nmsub (e0, d, x0, one);
34905 /* x1 = x0 + e0 * x0 */
34906 x1 = gen_reg_rtx (mode);
34907 rs6000_emit_madd (x1, e0, x0, x0);
34909 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
34910 ++i, xprev = xnext, eprev = enext) {
34912 /* enext = eprev * eprev */
34913 enext = gen_reg_rtx (mode);
34914 emit_insn (gen_mul (enext, eprev, eprev));
34916 /* xnext = xprev + enext * xprev */
34917 xnext = gen_reg_rtx (mode);
34918 rs6000_emit_madd (xnext, enext, xprev, xprev);
34921 } else
34922 xprev = x0;
34924 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
34926 /* u = n * xprev */
34927 u = gen_reg_rtx (mode);
34928 emit_insn (gen_mul (u, n, xprev));
34930 /* v = n - (d * u) */
34931 v = gen_reg_rtx (mode);
34932 rs6000_emit_nmsub (v, d, u, n);
34934 /* dst = (v * xprev) + u */
34935 rs6000_emit_madd (dst, v, xprev, u);
34937 if (note_p)
34938 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
34941 /* Goldschmidt's Algorithm for single/double-precision floating point
34942 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
34944 void
34945 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
34947 machine_mode mode = GET_MODE (src);
34948 rtx e = gen_reg_rtx (mode);
34949 rtx g = gen_reg_rtx (mode);
34950 rtx h = gen_reg_rtx (mode);
34952 /* Low precision estimates guarantee 5 bits of accuracy. High
34953 precision estimates guarantee 14 bits of accuracy. SFmode
34954 requires 23 bits of accuracy. DFmode requires 52 bits of
34955 accuracy. Each pass at least doubles the accuracy, leading
34956 to the following. */
34957 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34958 if (mode == DFmode || mode == V2DFmode)
34959 passes++;
34961 int i;
34962 rtx mhalf;
34963 enum insn_code code = optab_handler (smul_optab, mode);
34964 insn_gen_fn gen_mul = GEN_FCN (code);
34966 gcc_assert (code != CODE_FOR_nothing);
34968 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
34970 /* e = rsqrt estimate */
34971 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
34972 UNSPEC_RSQRT)));
34974 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
34975 if (!recip)
34977 rtx zero = force_reg (mode, CONST0_RTX (mode));
34979 if (mode == SFmode)
34981 rtx target = emit_conditional_move (e, GT, src, zero, mode,
34982 e, zero, mode, 0);
34983 if (target != e)
34984 emit_move_insn (e, target);
34986 else
34988 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
34989 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
34993 /* g = sqrt estimate. */
34994 emit_insn (gen_mul (g, e, src));
34995 /* h = 1/(2*sqrt) estimate. */
34996 emit_insn (gen_mul (h, e, mhalf));
34998 if (recip)
35000 if (passes == 1)
35002 rtx t = gen_reg_rtx (mode);
35003 rs6000_emit_nmsub (t, g, h, mhalf);
35004 /* Apply correction directly to 1/rsqrt estimate. */
35005 rs6000_emit_madd (dst, e, t, e);
35007 else
35009 for (i = 0; i < passes; i++)
35011 rtx t1 = gen_reg_rtx (mode);
35012 rtx g1 = gen_reg_rtx (mode);
35013 rtx h1 = gen_reg_rtx (mode);
35015 rs6000_emit_nmsub (t1, g, h, mhalf);
35016 rs6000_emit_madd (g1, g, t1, g);
35017 rs6000_emit_madd (h1, h, t1, h);
35019 g = g1;
35020 h = h1;
35022 /* Multiply by 2 for 1/rsqrt. */
35023 emit_insn (gen_add3_insn (dst, h, h));
35026 else
35028 rtx t = gen_reg_rtx (mode);
35029 rs6000_emit_nmsub (t, g, h, mhalf);
35030 rs6000_emit_madd (dst, g, t, g);
35033 return;
35036 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
35037 (Power7) targets. DST is the target, and SRC is the argument operand. */
35039 void
35040 rs6000_emit_popcount (rtx dst, rtx src)
35042 machine_mode mode = GET_MODE (dst);
35043 rtx tmp1, tmp2;
35045 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
35046 if (TARGET_POPCNTD)
35048 if (mode == SImode)
35049 emit_insn (gen_popcntdsi2 (dst, src));
35050 else
35051 emit_insn (gen_popcntddi2 (dst, src));
35052 return;
35055 tmp1 = gen_reg_rtx (mode);
35057 if (mode == SImode)
35059 emit_insn (gen_popcntbsi2 (tmp1, src));
35060 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
35061 NULL_RTX, 0);
35062 tmp2 = force_reg (SImode, tmp2);
35063 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
35065 else
35067 emit_insn (gen_popcntbdi2 (tmp1, src));
35068 tmp2 = expand_mult (DImode, tmp1,
35069 GEN_INT ((HOST_WIDE_INT)
35070 0x01010101 << 32 | 0x01010101),
35071 NULL_RTX, 0);
35072 tmp2 = force_reg (DImode, tmp2);
35073 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
35078 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
35079 target, and SRC is the argument operand. */
35081 void
35082 rs6000_emit_parity (rtx dst, rtx src)
35084 machine_mode mode = GET_MODE (dst);
35085 rtx tmp;
35087 tmp = gen_reg_rtx (mode);
35089 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
35090 if (TARGET_CMPB)
35092 if (mode == SImode)
35094 emit_insn (gen_popcntbsi2 (tmp, src));
35095 emit_insn (gen_paritysi2_cmpb (dst, tmp));
35097 else
35099 emit_insn (gen_popcntbdi2 (tmp, src));
35100 emit_insn (gen_paritydi2_cmpb (dst, tmp));
35102 return;
35105 if (mode == SImode)
35107 /* Is mult+shift >= shift+xor+shift+xor? */
35108 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
35110 rtx tmp1, tmp2, tmp3, tmp4;
35112 tmp1 = gen_reg_rtx (SImode);
35113 emit_insn (gen_popcntbsi2 (tmp1, src));
35115 tmp2 = gen_reg_rtx (SImode);
35116 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
35117 tmp3 = gen_reg_rtx (SImode);
35118 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
35120 tmp4 = gen_reg_rtx (SImode);
35121 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
35122 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
35124 else
35125 rs6000_emit_popcount (tmp, src);
35126 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
35128 else
35130 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
35131 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
35133 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
35135 tmp1 = gen_reg_rtx (DImode);
35136 emit_insn (gen_popcntbdi2 (tmp1, src));
35138 tmp2 = gen_reg_rtx (DImode);
35139 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
35140 tmp3 = gen_reg_rtx (DImode);
35141 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
35143 tmp4 = gen_reg_rtx (DImode);
35144 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
35145 tmp5 = gen_reg_rtx (DImode);
35146 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
35148 tmp6 = gen_reg_rtx (DImode);
35149 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
35150 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
35152 else
35153 rs6000_emit_popcount (tmp, src);
35154 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
35158 /* Expand an Altivec constant permutation for little endian mode.
35159 There are two issues: First, the two input operands must be
35160 swapped so that together they form a double-wide array in LE
35161 order. Second, the vperm instruction has surprising behavior
35162 in LE mode: it interprets the elements of the source vectors
35163 in BE mode ("left to right") and interprets the elements of
35164 the destination vector in LE mode ("right to left"). To
35165 correct for this, we must subtract each element of the permute
35166 control vector from 31.
35168 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
35169 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
35170 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
35171 serve as the permute control vector. Then, in BE mode,
35173 vperm 9,10,11,12
35175 places the desired result in vr9. However, in LE mode the
35176 vector contents will be
35178 vr10 = 00000003 00000002 00000001 00000000
35179 vr11 = 00000007 00000006 00000005 00000004
35181 The result of the vperm using the same permute control vector is
35183 vr9 = 05000000 07000000 01000000 03000000
35185 That is, the leftmost 4 bytes of vr10 are interpreted as the
35186 source for the rightmost 4 bytes of vr9, and so on.
35188 If we change the permute control vector to
35190 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35192 and issue
35194 vperm 9,11,10,12
35196 we get the desired
35198 vr9 = 00000006 00000004 00000002 00000000. */
35200 void
35201 altivec_expand_vec_perm_const_le (rtx operands[4])
35203 unsigned int i;
35204 rtx perm[16];
35205 rtx constv, unspec;
35206 rtx target = operands[0];
35207 rtx op0 = operands[1];
35208 rtx op1 = operands[2];
35209 rtx sel = operands[3];
35211 /* Unpack and adjust the constant selector. */
35212 for (i = 0; i < 16; ++i)
35214 rtx e = XVECEXP (sel, 0, i);
35215 unsigned int elt = 31 - (INTVAL (e) & 31);
35216 perm[i] = GEN_INT (elt);
35219 /* Expand to a permute, swapping the inputs and using the
35220 adjusted selector. */
35221 if (!REG_P (op0))
35222 op0 = force_reg (V16QImode, op0);
35223 if (!REG_P (op1))
35224 op1 = force_reg (V16QImode, op1);
35226 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
35227 constv = force_reg (V16QImode, constv);
35228 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
35229 UNSPEC_VPERM);
35230 if (!REG_P (target))
35232 rtx tmp = gen_reg_rtx (V16QImode);
35233 emit_move_insn (tmp, unspec);
35234 unspec = tmp;
35237 emit_move_insn (target, unspec);
35240 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35241 permute control vector. But here it's not a constant, so we must
35242 generate a vector NAND or NOR to do the adjustment. */
35244 void
35245 altivec_expand_vec_perm_le (rtx operands[4])
35247 rtx notx, iorx, unspec;
35248 rtx target = operands[0];
35249 rtx op0 = operands[1];
35250 rtx op1 = operands[2];
35251 rtx sel = operands[3];
35252 rtx tmp = target;
35253 rtx norreg = gen_reg_rtx (V16QImode);
35254 machine_mode mode = GET_MODE (target);
35256 /* Get everything in regs so the pattern matches. */
35257 if (!REG_P (op0))
35258 op0 = force_reg (mode, op0);
35259 if (!REG_P (op1))
35260 op1 = force_reg (mode, op1);
35261 if (!REG_P (sel))
35262 sel = force_reg (V16QImode, sel);
35263 if (!REG_P (target))
35264 tmp = gen_reg_rtx (mode);
35266 if (TARGET_P9_VECTOR)
35268 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op0, op1, sel),
35269 UNSPEC_VPERMR);
35271 else
35273 /* Invert the selector with a VNAND if available, else a VNOR.
35274 The VNAND is preferred for future fusion opportunities. */
35275 notx = gen_rtx_NOT (V16QImode, sel);
35276 iorx = (TARGET_P8_VECTOR
35277 ? gen_rtx_IOR (V16QImode, notx, notx)
35278 : gen_rtx_AND (V16QImode, notx, notx));
35279 emit_insn (gen_rtx_SET (norreg, iorx));
35281 /* Permute with operands reversed and adjusted selector. */
35282 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
35283 UNSPEC_VPERM);
35286 /* Copy into target, possibly by way of a register. */
35287 if (!REG_P (target))
35289 emit_move_insn (tmp, unspec);
35290 unspec = tmp;
35293 emit_move_insn (target, unspec);
35296 /* Expand an Altivec constant permutation. Return true if we match
35297 an efficient implementation; false to fall back to VPERM. */
35299 bool
35300 altivec_expand_vec_perm_const (rtx operands[4])
35302 struct altivec_perm_insn {
35303 HOST_WIDE_INT mask;
35304 enum insn_code impl;
35305 unsigned char perm[16];
35307 static const struct altivec_perm_insn patterns[] = {
35308 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
35309 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35310 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
35311 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35312 { OPTION_MASK_ALTIVEC,
35313 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
35314 : CODE_FOR_altivec_vmrglb_direct),
35315 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35316 { OPTION_MASK_ALTIVEC,
35317 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
35318 : CODE_FOR_altivec_vmrglh_direct),
35319 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35320 { OPTION_MASK_ALTIVEC,
35321 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
35322 : CODE_FOR_altivec_vmrglw_direct),
35323 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35324 { OPTION_MASK_ALTIVEC,
35325 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
35326 : CODE_FOR_altivec_vmrghb_direct),
35327 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35328 { OPTION_MASK_ALTIVEC,
35329 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
35330 : CODE_FOR_altivec_vmrghh_direct),
35331 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35332 { OPTION_MASK_ALTIVEC,
35333 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
35334 : CODE_FOR_altivec_vmrghw_direct),
35335 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35336 { OPTION_MASK_P8_VECTOR,
35337 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
35338 : CODE_FOR_p8_vmrgow_v4sf_direct),
35339 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35340 { OPTION_MASK_P8_VECTOR,
35341 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
35342 : CODE_FOR_p8_vmrgew_v4sf_direct),
35343 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35346 unsigned int i, j, elt, which;
35347 unsigned char perm[16];
35348 rtx target, op0, op1, sel, x;
35349 bool one_vec;
35351 target = operands[0];
35352 op0 = operands[1];
35353 op1 = operands[2];
35354 sel = operands[3];
35356 /* Unpack the constant selector. */
35357 for (i = which = 0; i < 16; ++i)
35359 rtx e = XVECEXP (sel, 0, i);
35360 elt = INTVAL (e) & 31;
35361 which |= (elt < 16 ? 1 : 2);
35362 perm[i] = elt;
35365 /* Simplify the constant selector based on operands. */
35366 switch (which)
35368 default:
35369 gcc_unreachable ();
35371 case 3:
35372 one_vec = false;
35373 if (!rtx_equal_p (op0, op1))
35374 break;
35375 /* FALLTHRU */
35377 case 2:
35378 for (i = 0; i < 16; ++i)
35379 perm[i] &= 15;
35380 op0 = op1;
35381 one_vec = true;
35382 break;
35384 case 1:
35385 op1 = op0;
35386 one_vec = true;
35387 break;
35390 /* Look for splat patterns. */
35391 if (one_vec)
35393 elt = perm[0];
35395 for (i = 0; i < 16; ++i)
35396 if (perm[i] != elt)
35397 break;
35398 if (i == 16)
35400 if (!BYTES_BIG_ENDIAN)
35401 elt = 15 - elt;
35402 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
35403 return true;
35406 if (elt % 2 == 0)
35408 for (i = 0; i < 16; i += 2)
35409 if (perm[i] != elt || perm[i + 1] != elt + 1)
35410 break;
35411 if (i == 16)
35413 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
35414 x = gen_reg_rtx (V8HImode);
35415 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
35416 GEN_INT (field)));
35417 emit_move_insn (target, gen_lowpart (V16QImode, x));
35418 return true;
35422 if (elt % 4 == 0)
35424 for (i = 0; i < 16; i += 4)
35425 if (perm[i] != elt
35426 || perm[i + 1] != elt + 1
35427 || perm[i + 2] != elt + 2
35428 || perm[i + 3] != elt + 3)
35429 break;
35430 if (i == 16)
35432 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35433 x = gen_reg_rtx (V4SImode);
35434 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35435 GEN_INT (field)));
35436 emit_move_insn (target, gen_lowpart (V16QImode, x));
35437 return true;
35442 /* Look for merge and pack patterns. */
35443 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35445 bool swapped;
35447 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35448 continue;
35450 elt = patterns[j].perm[0];
35451 if (perm[0] == elt)
35452 swapped = false;
35453 else if (perm[0] == elt + 16)
35454 swapped = true;
35455 else
35456 continue;
35457 for (i = 1; i < 16; ++i)
35459 elt = patterns[j].perm[i];
35460 if (swapped)
35461 elt = (elt >= 16 ? elt - 16 : elt + 16);
35462 else if (one_vec && elt >= 16)
35463 elt -= 16;
35464 if (perm[i] != elt)
35465 break;
35467 if (i == 16)
35469 enum insn_code icode = patterns[j].impl;
35470 machine_mode omode = insn_data[icode].operand[0].mode;
35471 machine_mode imode = insn_data[icode].operand[1].mode;
35473 /* For little-endian, don't use vpkuwum and vpkuhum if the
35474 underlying vector type is not V4SI and V8HI, respectively.
35475 For example, using vpkuwum with a V8HI picks up the even
35476 halfwords (BE numbering) when the even halfwords (LE
35477 numbering) are what we need. */
35478 if (!BYTES_BIG_ENDIAN
35479 && icode == CODE_FOR_altivec_vpkuwum_direct
35480 && ((GET_CODE (op0) == REG
35481 && GET_MODE (op0) != V4SImode)
35482 || (GET_CODE (op0) == SUBREG
35483 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35484 continue;
35485 if (!BYTES_BIG_ENDIAN
35486 && icode == CODE_FOR_altivec_vpkuhum_direct
35487 && ((GET_CODE (op0) == REG
35488 && GET_MODE (op0) != V8HImode)
35489 || (GET_CODE (op0) == SUBREG
35490 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35491 continue;
35493 /* For little-endian, the two input operands must be swapped
35494 (or swapped back) to ensure proper right-to-left numbering
35495 from 0 to 2N-1. */
35496 if (swapped ^ !BYTES_BIG_ENDIAN)
35497 std::swap (op0, op1);
35498 if (imode != V16QImode)
35500 op0 = gen_lowpart (imode, op0);
35501 op1 = gen_lowpart (imode, op1);
35503 if (omode == V16QImode)
35504 x = target;
35505 else
35506 x = gen_reg_rtx (omode);
35507 emit_insn (GEN_FCN (icode) (x, op0, op1));
35508 if (omode != V16QImode)
35509 emit_move_insn (target, gen_lowpart (V16QImode, x));
35510 return true;
35514 if (!BYTES_BIG_ENDIAN)
35516 altivec_expand_vec_perm_const_le (operands);
35517 return true;
35520 return false;
35523 /* Expand a Paired Single or VSX Permute Doubleword constant permutation.
35524 Return true if we match an efficient implementation. */
35526 static bool
35527 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
35528 unsigned char perm0, unsigned char perm1)
35530 rtx x;
35532 /* If both selectors come from the same operand, fold to single op. */
35533 if ((perm0 & 2) == (perm1 & 2))
35535 if (perm0 & 2)
35536 op0 = op1;
35537 else
35538 op1 = op0;
35540 /* If both operands are equal, fold to simpler permutation. */
35541 if (rtx_equal_p (op0, op1))
35543 perm0 = perm0 & 1;
35544 perm1 = (perm1 & 1) + 2;
35546 /* If the first selector comes from the second operand, swap. */
35547 else if (perm0 & 2)
35549 if (perm1 & 2)
35550 return false;
35551 perm0 -= 2;
35552 perm1 += 2;
35553 std::swap (op0, op1);
35555 /* If the second selector does not come from the second operand, fail. */
35556 else if ((perm1 & 2) == 0)
35557 return false;
35559 /* Success! */
35560 if (target != NULL)
35562 machine_mode vmode, dmode;
35563 rtvec v;
35565 vmode = GET_MODE (target);
35566 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
35567 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4).require ();
35568 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
35569 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
35570 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
35571 emit_insn (gen_rtx_SET (target, x));
35573 return true;
35576 bool
35577 rs6000_expand_vec_perm_const (rtx operands[4])
35579 rtx target, op0, op1, sel;
35580 unsigned char perm0, perm1;
35582 target = operands[0];
35583 op0 = operands[1];
35584 op1 = operands[2];
35585 sel = operands[3];
35587 /* Unpack the constant selector. */
35588 perm0 = INTVAL (XVECEXP (sel, 0, 0)) & 3;
35589 perm1 = INTVAL (XVECEXP (sel, 0, 1)) & 3;
35591 return rs6000_expand_vec_perm_const_1 (target, op0, op1, perm0, perm1);
35594 /* Test whether a constant permutation is supported. */
35596 static bool
35597 rs6000_vectorize_vec_perm_const_ok (machine_mode vmode,
35598 const unsigned char *sel)
35600 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35601 if (TARGET_ALTIVEC)
35602 return true;
35604 /* Check for ps_merge* or evmerge* insns. */
35605 if (TARGET_PAIRED_FLOAT && vmode == V2SFmode)
35607 rtx op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
35608 rtx op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
35609 return rs6000_expand_vec_perm_const_1 (NULL, op0, op1, sel[0], sel[1]);
35612 return false;
35615 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
35617 static void
35618 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
35619 machine_mode vmode, unsigned nelt, rtx perm[])
35621 machine_mode imode;
35622 rtx x;
35624 imode = vmode;
35625 if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
35626 imode = mode_for_int_vector (vmode).require ();
35628 x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
35629 x = expand_vec_perm (vmode, op0, op1, x, target);
35630 if (x != target)
35631 emit_move_insn (target, x);
35634 /* Expand an extract even operation. */
35636 void
35637 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
35639 machine_mode vmode = GET_MODE (target);
35640 unsigned i, nelt = GET_MODE_NUNITS (vmode);
35641 rtx perm[16];
35643 for (i = 0; i < nelt; i++)
35644 perm[i] = GEN_INT (i * 2);
35646 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
35649 /* Expand a vector interleave operation. */
35651 void
35652 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
35654 machine_mode vmode = GET_MODE (target);
35655 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
35656 rtx perm[16];
35658 high = (highp ? 0 : nelt / 2);
35659 for (i = 0; i < nelt / 2; i++)
35661 perm[i * 2] = GEN_INT (i + high);
35662 perm[i * 2 + 1] = GEN_INT (i + nelt + high);
35665 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
35668 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
35669 void
35670 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
35672 HOST_WIDE_INT hwi_scale (scale);
35673 REAL_VALUE_TYPE r_pow;
35674 rtvec v = rtvec_alloc (2);
35675 rtx elt;
35676 rtx scale_vec = gen_reg_rtx (V2DFmode);
35677 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
35678 elt = const_double_from_real_value (r_pow, DFmode);
35679 RTVEC_ELT (v, 0) = elt;
35680 RTVEC_ELT (v, 1) = elt;
35681 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
35682 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
35685 /* Return an RTX representing where to find the function value of a
35686 function returning MODE. */
35687 static rtx
35688 rs6000_complex_function_value (machine_mode mode)
35690 unsigned int regno;
35691 rtx r1, r2;
35692 machine_mode inner = GET_MODE_INNER (mode);
35693 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
35695 if (TARGET_FLOAT128_TYPE
35696 && (mode == KCmode
35697 || (mode == TCmode && TARGET_IEEEQUAD)))
35698 regno = ALTIVEC_ARG_RETURN;
35700 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35701 regno = FP_ARG_RETURN;
35703 else
35705 regno = GP_ARG_RETURN;
35707 /* 32-bit is OK since it'll go in r3/r4. */
35708 if (TARGET_32BIT && inner_bytes >= 4)
35709 return gen_rtx_REG (mode, regno);
35712 if (inner_bytes >= 8)
35713 return gen_rtx_REG (mode, regno);
35715 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
35716 const0_rtx);
35717 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
35718 GEN_INT (inner_bytes));
35719 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
35722 /* Return an rtx describing a return value of MODE as a PARALLEL
35723 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
35724 stride REG_STRIDE. */
35726 static rtx
35727 rs6000_parallel_return (machine_mode mode,
35728 int n_elts, machine_mode elt_mode,
35729 unsigned int regno, unsigned int reg_stride)
35731 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
35733 int i;
35734 for (i = 0; i < n_elts; i++)
35736 rtx r = gen_rtx_REG (elt_mode, regno);
35737 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
35738 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
35739 regno += reg_stride;
35742 return par;
35745 /* Target hook for TARGET_FUNCTION_VALUE.
35747 An integer value is in r3 and a floating-point value is in fp1,
35748 unless -msoft-float. */
35750 static rtx
35751 rs6000_function_value (const_tree valtype,
35752 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
35753 bool outgoing ATTRIBUTE_UNUSED)
35755 machine_mode mode;
35756 unsigned int regno;
35757 machine_mode elt_mode;
35758 int n_elts;
35760 /* Special handling for structs in darwin64. */
35761 if (TARGET_MACHO
35762 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
35764 CUMULATIVE_ARGS valcum;
35765 rtx valret;
35767 valcum.words = 0;
35768 valcum.fregno = FP_ARG_MIN_REG;
35769 valcum.vregno = ALTIVEC_ARG_MIN_REG;
35770 /* Do a trial code generation as if this were going to be passed as
35771 an argument; if any part goes in memory, we return NULL. */
35772 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
35773 if (valret)
35774 return valret;
35775 /* Otherwise fall through to standard ABI rules. */
35778 mode = TYPE_MODE (valtype);
35780 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
35781 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
35783 int first_reg, n_regs;
35785 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
35787 /* _Decimal128 must use even/odd register pairs. */
35788 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35789 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
35791 else
35793 first_reg = ALTIVEC_ARG_RETURN;
35794 n_regs = 1;
35797 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
35800 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
35801 if (TARGET_32BIT && TARGET_POWERPC64)
35802 switch (mode)
35804 default:
35805 break;
35806 case E_DImode:
35807 case E_SCmode:
35808 case E_DCmode:
35809 case E_TCmode:
35810 int count = GET_MODE_SIZE (mode) / 4;
35811 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
35814 if ((INTEGRAL_TYPE_P (valtype)
35815 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
35816 || POINTER_TYPE_P (valtype))
35817 mode = TARGET_32BIT ? SImode : DImode;
35819 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35820 /* _Decimal128 must use an even/odd register pair. */
35821 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35822 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
35823 && !FLOAT128_VECTOR_P (mode)
35824 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
35825 regno = FP_ARG_RETURN;
35826 else if (TREE_CODE (valtype) == COMPLEX_TYPE
35827 && targetm.calls.split_complex_arg)
35828 return rs6000_complex_function_value (mode);
35829 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35830 return register is used in both cases, and we won't see V2DImode/V2DFmode
35831 for pure altivec, combine the two cases. */
35832 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
35833 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
35834 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
35835 regno = ALTIVEC_ARG_RETURN;
35836 else
35837 regno = GP_ARG_RETURN;
35839 return gen_rtx_REG (mode, regno);
35842 /* Define how to find the value returned by a library function
35843 assuming the value has mode MODE. */
35845 rs6000_libcall_value (machine_mode mode)
35847 unsigned int regno;
35849 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
35850 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
35851 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
35853 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35854 /* _Decimal128 must use an even/odd register pair. */
35855 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35856 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode)
35857 && TARGET_HARD_FLOAT
35858 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
35859 regno = FP_ARG_RETURN;
35860 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35861 return register is used in both cases, and we won't see V2DImode/V2DFmode
35862 for pure altivec, combine the two cases. */
35863 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
35864 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
35865 regno = ALTIVEC_ARG_RETURN;
35866 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
35867 return rs6000_complex_function_value (mode);
35868 else
35869 regno = GP_ARG_RETURN;
35871 return gen_rtx_REG (mode, regno);
35874 /* Compute register pressure classes. We implement the target hook to avoid
35875 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
35876 lead to incorrect estimates of number of available registers and therefor
35877 increased register pressure/spill. */
35878 static int
35879 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
35881 int n;
35883 n = 0;
35884 pressure_classes[n++] = GENERAL_REGS;
35885 if (TARGET_VSX)
35886 pressure_classes[n++] = VSX_REGS;
35887 else
35889 if (TARGET_ALTIVEC)
35890 pressure_classes[n++] = ALTIVEC_REGS;
35891 if (TARGET_HARD_FLOAT)
35892 pressure_classes[n++] = FLOAT_REGS;
35894 pressure_classes[n++] = CR_REGS;
35895 pressure_classes[n++] = SPECIAL_REGS;
35897 return n;
35900 /* Given FROM and TO register numbers, say whether this elimination is allowed.
35901 Frame pointer elimination is automatically handled.
35903 For the RS/6000, if frame pointer elimination is being done, we would like
35904 to convert ap into fp, not sp.
35906 We need r30 if -mminimal-toc was specified, and there are constant pool
35907 references. */
35909 static bool
35910 rs6000_can_eliminate (const int from, const int to)
35912 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
35913 ? ! frame_pointer_needed
35914 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
35915 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
35916 || constant_pool_empty_p ()
35917 : true);
35920 /* Define the offset between two registers, FROM to be eliminated and its
35921 replacement TO, at the start of a routine. */
35922 HOST_WIDE_INT
35923 rs6000_initial_elimination_offset (int from, int to)
35925 rs6000_stack_t *info = rs6000_stack_info ();
35926 HOST_WIDE_INT offset;
35928 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35929 offset = info->push_p ? 0 : -info->total_size;
35930 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35932 offset = info->push_p ? 0 : -info->total_size;
35933 if (FRAME_GROWS_DOWNWARD)
35934 offset += info->fixed_size + info->vars_size + info->parm_size;
35936 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35937 offset = FRAME_GROWS_DOWNWARD
35938 ? info->fixed_size + info->vars_size + info->parm_size
35939 : 0;
35940 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35941 offset = info->total_size;
35942 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35943 offset = info->push_p ? info->total_size : 0;
35944 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
35945 offset = 0;
35946 else
35947 gcc_unreachable ();
35949 return offset;
35952 /* Fill in sizes of registers used by unwinder. */
35954 static void
35955 rs6000_init_dwarf_reg_sizes_extra (tree address)
35957 if (TARGET_MACHO && ! TARGET_ALTIVEC)
35959 int i;
35960 machine_mode mode = TYPE_MODE (char_type_node);
35961 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
35962 rtx mem = gen_rtx_MEM (BLKmode, addr);
35963 rtx value = gen_int_mode (16, mode);
35965 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
35966 The unwinder still needs to know the size of Altivec registers. */
35968 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
35970 int column = DWARF_REG_TO_UNWIND_COLUMN
35971 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
35972 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
35974 emit_move_insn (adjust_address (mem, mode, offset), value);
35979 /* Map internal gcc register numbers to debug format register numbers.
35980 FORMAT specifies the type of debug register number to use:
35981 0 -- debug information, except for frame-related sections
35982 1 -- DWARF .debug_frame section
35983 2 -- DWARF .eh_frame section */
35985 unsigned int
35986 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
35988 /* Except for the above, we use the internal number for non-DWARF
35989 debug information, and also for .eh_frame. */
35990 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
35991 return regno;
35993 /* On some platforms, we use the standard DWARF register
35994 numbering for .debug_info and .debug_frame. */
35995 #ifdef RS6000_USE_DWARF_NUMBERING
35996 if (regno <= 63)
35997 return regno;
35998 if (regno == LR_REGNO)
35999 return 108;
36000 if (regno == CTR_REGNO)
36001 return 109;
36002 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
36003 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
36004 The actual code emitted saves the whole of CR, so we map CR2_REGNO
36005 to the DWARF reg for CR. */
36006 if (format == 1 && regno == CR2_REGNO)
36007 return 64;
36008 if (CR_REGNO_P (regno))
36009 return regno - CR0_REGNO + 86;
36010 if (regno == CA_REGNO)
36011 return 101; /* XER */
36012 if (ALTIVEC_REGNO_P (regno))
36013 return regno - FIRST_ALTIVEC_REGNO + 1124;
36014 if (regno == VRSAVE_REGNO)
36015 return 356;
36016 if (regno == VSCR_REGNO)
36017 return 67;
36018 #endif
36019 return regno;
36022 /* target hook eh_return_filter_mode */
36023 static scalar_int_mode
36024 rs6000_eh_return_filter_mode (void)
36026 return TARGET_32BIT ? SImode : word_mode;
36029 /* Target hook for scalar_mode_supported_p. */
36030 static bool
36031 rs6000_scalar_mode_supported_p (scalar_mode mode)
36033 /* -m32 does not support TImode. This is the default, from
36034 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
36035 same ABI as for -m32. But default_scalar_mode_supported_p allows
36036 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
36037 for -mpowerpc64. */
36038 if (TARGET_32BIT && mode == TImode)
36039 return false;
36041 if (DECIMAL_FLOAT_MODE_P (mode))
36042 return default_decimal_float_supported_p ();
36043 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
36044 return true;
36045 else
36046 return default_scalar_mode_supported_p (mode);
36049 /* Target hook for vector_mode_supported_p. */
36050 static bool
36051 rs6000_vector_mode_supported_p (machine_mode mode)
36054 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
36055 return true;
36057 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
36058 128-bit, the compiler might try to widen IEEE 128-bit to IBM
36059 double-double. */
36060 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
36061 return true;
36063 else
36064 return false;
36067 /* Target hook for floatn_mode. */
36068 static opt_scalar_float_mode
36069 rs6000_floatn_mode (int n, bool extended)
36071 if (extended)
36073 switch (n)
36075 case 32:
36076 return DFmode;
36078 case 64:
36079 if (TARGET_FLOAT128_TYPE)
36080 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36081 else
36082 return opt_scalar_float_mode ();
36084 case 128:
36085 return opt_scalar_float_mode ();
36087 default:
36088 /* Those are the only valid _FloatNx types. */
36089 gcc_unreachable ();
36092 else
36094 switch (n)
36096 case 32:
36097 return SFmode;
36099 case 64:
36100 return DFmode;
36102 case 128:
36103 if (TARGET_FLOAT128_TYPE)
36104 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36105 else
36106 return opt_scalar_float_mode ();
36108 default:
36109 return opt_scalar_float_mode ();
36115 /* Target hook for c_mode_for_suffix. */
36116 static machine_mode
36117 rs6000_c_mode_for_suffix (char suffix)
36119 if (TARGET_FLOAT128_TYPE)
36121 if (suffix == 'q' || suffix == 'Q')
36122 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36124 /* At the moment, we are not defining a suffix for IBM extended double.
36125 If/when the default for -mabi=ieeelongdouble is changed, and we want
36126 to support __ibm128 constants in legacy library code, we may need to
36127 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
36128 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
36129 __float80 constants. */
36132 return VOIDmode;
36135 /* Target hook for invalid_arg_for_unprototyped_fn. */
36136 static const char *
36137 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
36139 return (!rs6000_darwin64_abi
36140 && typelist == 0
36141 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
36142 && (funcdecl == NULL_TREE
36143 || (TREE_CODE (funcdecl) == FUNCTION_DECL
36144 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
36145 ? N_("AltiVec argument passed to unprototyped function")
36146 : NULL;
36149 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
36150 setup by using __stack_chk_fail_local hidden function instead of
36151 calling __stack_chk_fail directly. Otherwise it is better to call
36152 __stack_chk_fail directly. */
36154 static tree ATTRIBUTE_UNUSED
36155 rs6000_stack_protect_fail (void)
36157 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
36158 ? default_hidden_stack_protect_fail ()
36159 : default_external_stack_protect_fail ();
36162 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
36164 #if TARGET_ELF
36165 static unsigned HOST_WIDE_INT
36166 rs6000_asan_shadow_offset (void)
36168 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
36170 #endif
36172 /* Mask options that we want to support inside of attribute((target)) and
36173 #pragma GCC target operations. Note, we do not include things like
36174 64/32-bit, endianness, hard/soft floating point, etc. that would have
36175 different calling sequences. */
36177 struct rs6000_opt_mask {
36178 const char *name; /* option name */
36179 HOST_WIDE_INT mask; /* mask to set */
36180 bool invert; /* invert sense of mask */
36181 bool valid_target; /* option is a target option */
36184 static struct rs6000_opt_mask const rs6000_opt_masks[] =
36186 { "altivec", OPTION_MASK_ALTIVEC, false, true },
36187 { "cmpb", OPTION_MASK_CMPB, false, true },
36188 { "crypto", OPTION_MASK_CRYPTO, false, true },
36189 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
36190 { "dlmzb", OPTION_MASK_DLMZB, false, true },
36191 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
36192 false, true },
36193 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, true },
36194 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, true },
36195 { "fprnd", OPTION_MASK_FPRND, false, true },
36196 { "hard-dfp", OPTION_MASK_DFP, false, true },
36197 { "htm", OPTION_MASK_HTM, false, true },
36198 { "isel", OPTION_MASK_ISEL, false, true },
36199 { "mfcrf", OPTION_MASK_MFCRF, false, true },
36200 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
36201 { "modulo", OPTION_MASK_MODULO, false, true },
36202 { "mulhw", OPTION_MASK_MULHW, false, true },
36203 { "multiple", OPTION_MASK_MULTIPLE, false, true },
36204 { "popcntb", OPTION_MASK_POPCNTB, false, true },
36205 { "popcntd", OPTION_MASK_POPCNTD, false, true },
36206 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
36207 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
36208 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
36209 { "power9-fusion", OPTION_MASK_P9_FUSION, false, true },
36210 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
36211 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
36212 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
36213 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
36214 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
36215 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
36216 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
36217 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
36218 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
36219 { "string", OPTION_MASK_STRING, false, true },
36220 { "toc-fusion", OPTION_MASK_TOC_FUSION, false, true },
36221 { "update", OPTION_MASK_NO_UPDATE, true , true },
36222 { "vsx", OPTION_MASK_VSX, false, true },
36223 #ifdef OPTION_MASK_64BIT
36224 #if TARGET_AIX_OS
36225 { "aix64", OPTION_MASK_64BIT, false, false },
36226 { "aix32", OPTION_MASK_64BIT, true, false },
36227 #else
36228 { "64", OPTION_MASK_64BIT, false, false },
36229 { "32", OPTION_MASK_64BIT, true, false },
36230 #endif
36231 #endif
36232 #ifdef OPTION_MASK_EABI
36233 { "eabi", OPTION_MASK_EABI, false, false },
36234 #endif
36235 #ifdef OPTION_MASK_LITTLE_ENDIAN
36236 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
36237 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
36238 #endif
36239 #ifdef OPTION_MASK_RELOCATABLE
36240 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
36241 #endif
36242 #ifdef OPTION_MASK_STRICT_ALIGN
36243 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
36244 #endif
36245 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
36246 { "string", OPTION_MASK_STRING, false, false },
36249 /* Builtin mask mapping for printing the flags. */
36250 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
36252 { "altivec", RS6000_BTM_ALTIVEC, false, false },
36253 { "vsx", RS6000_BTM_VSX, false, false },
36254 { "paired", RS6000_BTM_PAIRED, false, false },
36255 { "fre", RS6000_BTM_FRE, false, false },
36256 { "fres", RS6000_BTM_FRES, false, false },
36257 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
36258 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
36259 { "popcntd", RS6000_BTM_POPCNTD, false, false },
36260 { "cell", RS6000_BTM_CELL, false, false },
36261 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
36262 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
36263 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
36264 { "crypto", RS6000_BTM_CRYPTO, false, false },
36265 { "htm", RS6000_BTM_HTM, false, false },
36266 { "hard-dfp", RS6000_BTM_DFP, false, false },
36267 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
36268 { "long-double-128", RS6000_BTM_LDBL128, false, false },
36269 { "float128", RS6000_BTM_FLOAT128, false, false },
36270 { "float128-hw", RS6000_BTM_FLOAT128_HW,false, false },
36273 /* Option variables that we want to support inside attribute((target)) and
36274 #pragma GCC target operations. */
36276 struct rs6000_opt_var {
36277 const char *name; /* option name */
36278 size_t global_offset; /* offset of the option in global_options. */
36279 size_t target_offset; /* offset of the option in target options. */
36282 static struct rs6000_opt_var const rs6000_opt_vars[] =
36284 { "friz",
36285 offsetof (struct gcc_options, x_TARGET_FRIZ),
36286 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
36287 { "avoid-indexed-addresses",
36288 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
36289 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
36290 { "paired",
36291 offsetof (struct gcc_options, x_rs6000_paired_float),
36292 offsetof (struct cl_target_option, x_rs6000_paired_float), },
36293 { "longcall",
36294 offsetof (struct gcc_options, x_rs6000_default_long_calls),
36295 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
36296 { "optimize-swaps",
36297 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
36298 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
36299 { "allow-movmisalign",
36300 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
36301 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
36302 { "sched-groups",
36303 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
36304 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
36305 { "always-hint",
36306 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
36307 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
36308 { "align-branch-targets",
36309 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
36310 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
36311 { "tls-markers",
36312 offsetof (struct gcc_options, x_tls_markers),
36313 offsetof (struct cl_target_option, x_tls_markers), },
36314 { "sched-prolog",
36315 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36316 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36317 { "sched-epilog",
36318 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36319 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36322 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36323 parsing. Return true if there were no errors. */
36325 static bool
36326 rs6000_inner_target_options (tree args, bool attr_p)
36328 bool ret = true;
36330 if (args == NULL_TREE)
36333 else if (TREE_CODE (args) == STRING_CST)
36335 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36336 char *q;
36338 while ((q = strtok (p, ",")) != NULL)
36340 bool error_p = false;
36341 bool not_valid_p = false;
36342 const char *cpu_opt = NULL;
36344 p = NULL;
36345 if (strncmp (q, "cpu=", 4) == 0)
36347 int cpu_index = rs6000_cpu_name_lookup (q+4);
36348 if (cpu_index >= 0)
36349 rs6000_cpu_index = cpu_index;
36350 else
36352 error_p = true;
36353 cpu_opt = q+4;
36356 else if (strncmp (q, "tune=", 5) == 0)
36358 int tune_index = rs6000_cpu_name_lookup (q+5);
36359 if (tune_index >= 0)
36360 rs6000_tune_index = tune_index;
36361 else
36363 error_p = true;
36364 cpu_opt = q+5;
36367 else
36369 size_t i;
36370 bool invert = false;
36371 char *r = q;
36373 error_p = true;
36374 if (strncmp (r, "no-", 3) == 0)
36376 invert = true;
36377 r += 3;
36380 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
36381 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
36383 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
36385 if (!rs6000_opt_masks[i].valid_target)
36386 not_valid_p = true;
36387 else
36389 error_p = false;
36390 rs6000_isa_flags_explicit |= mask;
36392 /* VSX needs altivec, so -mvsx automagically sets
36393 altivec and disables -mavoid-indexed-addresses. */
36394 if (!invert)
36396 if (mask == OPTION_MASK_VSX)
36398 mask |= OPTION_MASK_ALTIVEC;
36399 TARGET_AVOID_XFORM = 0;
36403 if (rs6000_opt_masks[i].invert)
36404 invert = !invert;
36406 if (invert)
36407 rs6000_isa_flags &= ~mask;
36408 else
36409 rs6000_isa_flags |= mask;
36411 break;
36414 if (error_p && !not_valid_p)
36416 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
36417 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
36419 size_t j = rs6000_opt_vars[i].global_offset;
36420 *((int *) ((char *)&global_options + j)) = !invert;
36421 error_p = false;
36422 not_valid_p = false;
36423 break;
36428 if (error_p)
36430 const char *eprefix, *esuffix;
36432 ret = false;
36433 if (attr_p)
36435 eprefix = "__attribute__((__target__(";
36436 esuffix = ")))";
36438 else
36440 eprefix = "#pragma GCC target ";
36441 esuffix = "";
36444 if (cpu_opt)
36445 error ("invalid cpu %qs for %s%qs%s", cpu_opt, eprefix,
36446 q, esuffix);
36447 else if (not_valid_p)
36448 error ("%s%qs%s is not allowed", eprefix, q, esuffix);
36449 else
36450 error ("%s%qs%s is invalid", eprefix, q, esuffix);
36455 else if (TREE_CODE (args) == TREE_LIST)
36459 tree value = TREE_VALUE (args);
36460 if (value)
36462 bool ret2 = rs6000_inner_target_options (value, attr_p);
36463 if (!ret2)
36464 ret = false;
36466 args = TREE_CHAIN (args);
36468 while (args != NULL_TREE);
36471 else
36473 error ("attribute %<target%> argument not a string");
36474 return false;
36477 return ret;
36480 /* Print out the target options as a list for -mdebug=target. */
36482 static void
36483 rs6000_debug_target_options (tree args, const char *prefix)
36485 if (args == NULL_TREE)
36486 fprintf (stderr, "%s<NULL>", prefix);
36488 else if (TREE_CODE (args) == STRING_CST)
36490 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36491 char *q;
36493 while ((q = strtok (p, ",")) != NULL)
36495 p = NULL;
36496 fprintf (stderr, "%s\"%s\"", prefix, q);
36497 prefix = ", ";
36501 else if (TREE_CODE (args) == TREE_LIST)
36505 tree value = TREE_VALUE (args);
36506 if (value)
36508 rs6000_debug_target_options (value, prefix);
36509 prefix = ", ";
36511 args = TREE_CHAIN (args);
36513 while (args != NULL_TREE);
36516 else
36517 gcc_unreachable ();
36519 return;
36523 /* Hook to validate attribute((target("..."))). */
36525 static bool
36526 rs6000_valid_attribute_p (tree fndecl,
36527 tree ARG_UNUSED (name),
36528 tree args,
36529 int flags)
36531 struct cl_target_option cur_target;
36532 bool ret;
36533 tree old_optimize = build_optimization_node (&global_options);
36534 tree new_target, new_optimize;
36535 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36537 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
36539 if (TARGET_DEBUG_TARGET)
36541 tree tname = DECL_NAME (fndecl);
36542 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
36543 if (tname)
36544 fprintf (stderr, "function: %.*s\n",
36545 (int) IDENTIFIER_LENGTH (tname),
36546 IDENTIFIER_POINTER (tname));
36547 else
36548 fprintf (stderr, "function: unknown\n");
36550 fprintf (stderr, "args:");
36551 rs6000_debug_target_options (args, " ");
36552 fprintf (stderr, "\n");
36554 if (flags)
36555 fprintf (stderr, "flags: 0x%x\n", flags);
36557 fprintf (stderr, "--------------------\n");
36560 /* attribute((target("default"))) does nothing, beyond
36561 affecting multi-versioning. */
36562 if (TREE_VALUE (args)
36563 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
36564 && TREE_CHAIN (args) == NULL_TREE
36565 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
36566 return true;
36568 old_optimize = build_optimization_node (&global_options);
36569 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36571 /* If the function changed the optimization levels as well as setting target
36572 options, start with the optimizations specified. */
36573 if (func_optimize && func_optimize != old_optimize)
36574 cl_optimization_restore (&global_options,
36575 TREE_OPTIMIZATION (func_optimize));
36577 /* The target attributes may also change some optimization flags, so update
36578 the optimization options if necessary. */
36579 cl_target_option_save (&cur_target, &global_options);
36580 rs6000_cpu_index = rs6000_tune_index = -1;
36581 ret = rs6000_inner_target_options (args, true);
36583 /* Set up any additional state. */
36584 if (ret)
36586 ret = rs6000_option_override_internal (false);
36587 new_target = build_target_option_node (&global_options);
36589 else
36590 new_target = NULL;
36592 new_optimize = build_optimization_node (&global_options);
36594 if (!new_target)
36595 ret = false;
36597 else if (fndecl)
36599 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
36601 if (old_optimize != new_optimize)
36602 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
36605 cl_target_option_restore (&global_options, &cur_target);
36607 if (old_optimize != new_optimize)
36608 cl_optimization_restore (&global_options,
36609 TREE_OPTIMIZATION (old_optimize));
36611 return ret;
36615 /* Hook to validate the current #pragma GCC target and set the state, and
36616 update the macros based on what was changed. If ARGS is NULL, then
36617 POP_TARGET is used to reset the options. */
36619 bool
36620 rs6000_pragma_target_parse (tree args, tree pop_target)
36622 tree prev_tree = build_target_option_node (&global_options);
36623 tree cur_tree;
36624 struct cl_target_option *prev_opt, *cur_opt;
36625 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
36626 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
36628 if (TARGET_DEBUG_TARGET)
36630 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
36631 fprintf (stderr, "args:");
36632 rs6000_debug_target_options (args, " ");
36633 fprintf (stderr, "\n");
36635 if (pop_target)
36637 fprintf (stderr, "pop_target:\n");
36638 debug_tree (pop_target);
36640 else
36641 fprintf (stderr, "pop_target: <NULL>\n");
36643 fprintf (stderr, "--------------------\n");
36646 if (! args)
36648 cur_tree = ((pop_target)
36649 ? pop_target
36650 : target_option_default_node);
36651 cl_target_option_restore (&global_options,
36652 TREE_TARGET_OPTION (cur_tree));
36654 else
36656 rs6000_cpu_index = rs6000_tune_index = -1;
36657 if (!rs6000_inner_target_options (args, false)
36658 || !rs6000_option_override_internal (false)
36659 || (cur_tree = build_target_option_node (&global_options))
36660 == NULL_TREE)
36662 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
36663 fprintf (stderr, "invalid pragma\n");
36665 return false;
36669 target_option_current_node = cur_tree;
36671 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
36672 change the macros that are defined. */
36673 if (rs6000_target_modify_macros_ptr)
36675 prev_opt = TREE_TARGET_OPTION (prev_tree);
36676 prev_bumask = prev_opt->x_rs6000_builtin_mask;
36677 prev_flags = prev_opt->x_rs6000_isa_flags;
36679 cur_opt = TREE_TARGET_OPTION (cur_tree);
36680 cur_flags = cur_opt->x_rs6000_isa_flags;
36681 cur_bumask = cur_opt->x_rs6000_builtin_mask;
36683 diff_bumask = (prev_bumask ^ cur_bumask);
36684 diff_flags = (prev_flags ^ cur_flags);
36686 if ((diff_flags != 0) || (diff_bumask != 0))
36688 /* Delete old macros. */
36689 rs6000_target_modify_macros_ptr (false,
36690 prev_flags & diff_flags,
36691 prev_bumask & diff_bumask);
36693 /* Define new macros. */
36694 rs6000_target_modify_macros_ptr (true,
36695 cur_flags & diff_flags,
36696 cur_bumask & diff_bumask);
36700 return true;
36704 /* Remember the last target of rs6000_set_current_function. */
36705 static GTY(()) tree rs6000_previous_fndecl;
36707 /* Restore target's globals from NEW_TREE and invalidate the
36708 rs6000_previous_fndecl cache. */
36710 static void
36711 rs6000_activate_target_options (tree new_tree)
36713 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
36714 if (TREE_TARGET_GLOBALS (new_tree))
36715 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
36716 else if (new_tree == target_option_default_node)
36717 restore_target_globals (&default_target_globals);
36718 else
36719 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
36720 rs6000_previous_fndecl = NULL_TREE;
36723 /* Establish appropriate back-end context for processing the function
36724 FNDECL. The argument might be NULL to indicate processing at top
36725 level, outside of any function scope. */
36726 static void
36727 rs6000_set_current_function (tree fndecl)
36729 if (TARGET_DEBUG_TARGET)
36731 fprintf (stderr, "\n==================== rs6000_set_current_function");
36733 if (fndecl)
36734 fprintf (stderr, ", fndecl %s (%p)",
36735 (DECL_NAME (fndecl)
36736 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
36737 : "<unknown>"), (void *)fndecl);
36739 if (rs6000_previous_fndecl)
36740 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
36742 fprintf (stderr, "\n");
36745 /* Only change the context if the function changes. This hook is called
36746 several times in the course of compiling a function, and we don't want to
36747 slow things down too much or call target_reinit when it isn't safe. */
36748 if (fndecl == rs6000_previous_fndecl)
36749 return;
36751 tree old_tree;
36752 if (rs6000_previous_fndecl == NULL_TREE)
36753 old_tree = target_option_current_node;
36754 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl))
36755 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl);
36756 else
36757 old_tree = target_option_default_node;
36759 tree new_tree;
36760 if (fndecl == NULL_TREE)
36762 if (old_tree != target_option_current_node)
36763 new_tree = target_option_current_node;
36764 else
36765 new_tree = NULL_TREE;
36767 else
36769 new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
36770 if (new_tree == NULL_TREE)
36771 new_tree = target_option_default_node;
36774 if (TARGET_DEBUG_TARGET)
36776 if (new_tree)
36778 fprintf (stderr, "\nnew fndecl target specific options:\n");
36779 debug_tree (new_tree);
36782 if (old_tree)
36784 fprintf (stderr, "\nold fndecl target specific options:\n");
36785 debug_tree (old_tree);
36788 if (old_tree != NULL_TREE || new_tree != NULL_TREE)
36789 fprintf (stderr, "--------------------\n");
36792 if (new_tree && old_tree != new_tree)
36793 rs6000_activate_target_options (new_tree);
36795 if (fndecl)
36796 rs6000_previous_fndecl = fndecl;
36800 /* Save the current options */
36802 static void
36803 rs6000_function_specific_save (struct cl_target_option *ptr,
36804 struct gcc_options *opts)
36806 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
36807 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
36810 /* Restore the current options */
36812 static void
36813 rs6000_function_specific_restore (struct gcc_options *opts,
36814 struct cl_target_option *ptr)
36817 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
36818 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
36819 (void) rs6000_option_override_internal (false);
36822 /* Print the current options */
36824 static void
36825 rs6000_function_specific_print (FILE *file, int indent,
36826 struct cl_target_option *ptr)
36828 rs6000_print_isa_options (file, indent, "Isa options set",
36829 ptr->x_rs6000_isa_flags);
36831 rs6000_print_isa_options (file, indent, "Isa options explicit",
36832 ptr->x_rs6000_isa_flags_explicit);
36835 /* Helper function to print the current isa or misc options on a line. */
36837 static void
36838 rs6000_print_options_internal (FILE *file,
36839 int indent,
36840 const char *string,
36841 HOST_WIDE_INT flags,
36842 const char *prefix,
36843 const struct rs6000_opt_mask *opts,
36844 size_t num_elements)
36846 size_t i;
36847 size_t start_column = 0;
36848 size_t cur_column;
36849 size_t max_column = 120;
36850 size_t prefix_len = strlen (prefix);
36851 size_t comma_len = 0;
36852 const char *comma = "";
36854 if (indent)
36855 start_column += fprintf (file, "%*s", indent, "");
36857 if (!flags)
36859 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
36860 return;
36863 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
36865 /* Print the various mask options. */
36866 cur_column = start_column;
36867 for (i = 0; i < num_elements; i++)
36869 bool invert = opts[i].invert;
36870 const char *name = opts[i].name;
36871 const char *no_str = "";
36872 HOST_WIDE_INT mask = opts[i].mask;
36873 size_t len = comma_len + prefix_len + strlen (name);
36875 if (!invert)
36877 if ((flags & mask) == 0)
36879 no_str = "no-";
36880 len += sizeof ("no-") - 1;
36883 flags &= ~mask;
36886 else
36888 if ((flags & mask) != 0)
36890 no_str = "no-";
36891 len += sizeof ("no-") - 1;
36894 flags |= mask;
36897 cur_column += len;
36898 if (cur_column > max_column)
36900 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
36901 cur_column = start_column + len;
36902 comma = "";
36905 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
36906 comma = ", ";
36907 comma_len = sizeof (", ") - 1;
36910 fputs ("\n", file);
36913 /* Helper function to print the current isa options on a line. */
36915 static void
36916 rs6000_print_isa_options (FILE *file, int indent, const char *string,
36917 HOST_WIDE_INT flags)
36919 rs6000_print_options_internal (file, indent, string, flags, "-m",
36920 &rs6000_opt_masks[0],
36921 ARRAY_SIZE (rs6000_opt_masks));
36924 static void
36925 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
36926 HOST_WIDE_INT flags)
36928 rs6000_print_options_internal (file, indent, string, flags, "",
36929 &rs6000_builtin_mask_names[0],
36930 ARRAY_SIZE (rs6000_builtin_mask_names));
36933 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
36934 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
36935 -mupper-regs-df, etc.).
36937 If the user used -mno-power8-vector, we need to turn off all of the implicit
36938 ISA 2.07 and 3.0 options that relate to the vector unit.
36940 If the user used -mno-power9-vector, we need to turn off all of the implicit
36941 ISA 3.0 options that relate to the vector unit.
36943 This function does not handle explicit options such as the user specifying
36944 -mdirect-move. These are handled in rs6000_option_override_internal, and
36945 the appropriate error is given if needed.
36947 We return a mask of all of the implicit options that should not be enabled
36948 by default. */
36950 static HOST_WIDE_INT
36951 rs6000_disable_incompatible_switches (void)
36953 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
36954 size_t i, j;
36956 static const struct {
36957 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
36958 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
36959 const char *const name; /* name of the switch. */
36960 } flags[] = {
36961 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
36962 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
36963 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
36966 for (i = 0; i < ARRAY_SIZE (flags); i++)
36968 HOST_WIDE_INT no_flag = flags[i].no_flag;
36970 if ((rs6000_isa_flags & no_flag) == 0
36971 && (rs6000_isa_flags_explicit & no_flag) != 0)
36973 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
36974 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
36975 & rs6000_isa_flags
36976 & dep_flags);
36978 if (set_flags)
36980 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
36981 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
36983 set_flags &= ~rs6000_opt_masks[j].mask;
36984 error ("%<-mno-%s%> turns off %<-m%s%>",
36985 flags[i].name,
36986 rs6000_opt_masks[j].name);
36989 gcc_assert (!set_flags);
36992 rs6000_isa_flags &= ~dep_flags;
36993 ignore_masks |= no_flag | dep_flags;
36997 return ignore_masks;
37001 /* Helper function for printing the function name when debugging. */
37003 static const char *
37004 get_decl_name (tree fn)
37006 tree name;
37008 if (!fn)
37009 return "<null>";
37011 name = DECL_NAME (fn);
37012 if (!name)
37013 return "<no-name>";
37015 return IDENTIFIER_POINTER (name);
37018 /* Return the clone id of the target we are compiling code for in a target
37019 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
37020 the priority list for the target clones (ordered from lowest to
37021 highest). */
37023 static int
37024 rs6000_clone_priority (tree fndecl)
37026 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
37027 HOST_WIDE_INT isa_masks;
37028 int ret = CLONE_DEFAULT;
37029 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
37030 const char *attrs_str = NULL;
37032 attrs = TREE_VALUE (TREE_VALUE (attrs));
37033 attrs_str = TREE_STRING_POINTER (attrs);
37035 /* Return priority zero for default function. Return the ISA needed for the
37036 function if it is not the default. */
37037 if (strcmp (attrs_str, "default") != 0)
37039 if (fn_opts == NULL_TREE)
37040 fn_opts = target_option_default_node;
37042 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
37043 isa_masks = rs6000_isa_flags;
37044 else
37045 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
37047 for (ret = CLONE_MAX - 1; ret != 0; ret--)
37048 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
37049 break;
37052 if (TARGET_DEBUG_TARGET)
37053 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
37054 get_decl_name (fndecl), ret);
37056 return ret;
37059 /* This compares the priority of target features in function DECL1 and DECL2.
37060 It returns positive value if DECL1 is higher priority, negative value if
37061 DECL2 is higher priority and 0 if they are the same. Note, priorities are
37062 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
37064 static int
37065 rs6000_compare_version_priority (tree decl1, tree decl2)
37067 int priority1 = rs6000_clone_priority (decl1);
37068 int priority2 = rs6000_clone_priority (decl2);
37069 int ret = priority1 - priority2;
37071 if (TARGET_DEBUG_TARGET)
37072 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
37073 get_decl_name (decl1), get_decl_name (decl2), ret);
37075 return ret;
37078 /* Make a dispatcher declaration for the multi-versioned function DECL.
37079 Calls to DECL function will be replaced with calls to the dispatcher
37080 by the front-end. Returns the decl of the dispatcher function. */
37082 static tree
37083 rs6000_get_function_versions_dispatcher (void *decl)
37085 tree fn = (tree) decl;
37086 struct cgraph_node *node = NULL;
37087 struct cgraph_node *default_node = NULL;
37088 struct cgraph_function_version_info *node_v = NULL;
37089 struct cgraph_function_version_info *first_v = NULL;
37091 tree dispatch_decl = NULL;
37093 struct cgraph_function_version_info *default_version_info = NULL;
37094 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
37096 if (TARGET_DEBUG_TARGET)
37097 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
37098 get_decl_name (fn));
37100 node = cgraph_node::get (fn);
37101 gcc_assert (node != NULL);
37103 node_v = node->function_version ();
37104 gcc_assert (node_v != NULL);
37106 if (node_v->dispatcher_resolver != NULL)
37107 return node_v->dispatcher_resolver;
37109 /* Find the default version and make it the first node. */
37110 first_v = node_v;
37111 /* Go to the beginning of the chain. */
37112 while (first_v->prev != NULL)
37113 first_v = first_v->prev;
37115 default_version_info = first_v;
37116 while (default_version_info != NULL)
37118 const tree decl2 = default_version_info->this_node->decl;
37119 if (is_function_default_version (decl2))
37120 break;
37121 default_version_info = default_version_info->next;
37124 /* If there is no default node, just return NULL. */
37125 if (default_version_info == NULL)
37126 return NULL;
37128 /* Make default info the first node. */
37129 if (first_v != default_version_info)
37131 default_version_info->prev->next = default_version_info->next;
37132 if (default_version_info->next)
37133 default_version_info->next->prev = default_version_info->prev;
37134 first_v->prev = default_version_info;
37135 default_version_info->next = first_v;
37136 default_version_info->prev = NULL;
37139 default_node = default_version_info->this_node;
37141 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
37142 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37143 "target_clones attribute needs GLIBC (2.23 and newer) that "
37144 "exports hardware capability bits");
37145 #else
37147 if (targetm.has_ifunc_p ())
37149 struct cgraph_function_version_info *it_v = NULL;
37150 struct cgraph_node *dispatcher_node = NULL;
37151 struct cgraph_function_version_info *dispatcher_version_info = NULL;
37153 /* Right now, the dispatching is done via ifunc. */
37154 dispatch_decl = make_dispatcher_decl (default_node->decl);
37156 dispatcher_node = cgraph_node::get_create (dispatch_decl);
37157 gcc_assert (dispatcher_node != NULL);
37158 dispatcher_node->dispatcher_function = 1;
37159 dispatcher_version_info
37160 = dispatcher_node->insert_new_function_version ();
37161 dispatcher_version_info->next = default_version_info;
37162 dispatcher_node->definition = 1;
37164 /* Set the dispatcher for all the versions. */
37165 it_v = default_version_info;
37166 while (it_v != NULL)
37168 it_v->dispatcher_resolver = dispatch_decl;
37169 it_v = it_v->next;
37172 else
37174 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37175 "multiversioning needs ifunc which is not supported "
37176 "on this target");
37178 #endif
37180 return dispatch_decl;
37183 /* Make the resolver function decl to dispatch the versions of a multi-
37184 versioned function, DEFAULT_DECL. Create an empty basic block in the
37185 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
37186 function. */
37188 static tree
37189 make_resolver_func (const tree default_decl,
37190 const tree dispatch_decl,
37191 basic_block *empty_bb)
37193 /* Make the resolver function static. The resolver function returns
37194 void *. */
37195 tree decl_name = clone_function_name (default_decl, "resolver");
37196 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
37197 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
37198 tree decl = build_fn_decl (resolver_name, type);
37199 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
37201 DECL_NAME (decl) = decl_name;
37202 TREE_USED (decl) = 1;
37203 DECL_ARTIFICIAL (decl) = 1;
37204 DECL_IGNORED_P (decl) = 0;
37205 TREE_PUBLIC (decl) = 0;
37206 DECL_UNINLINABLE (decl) = 1;
37208 /* Resolver is not external, body is generated. */
37209 DECL_EXTERNAL (decl) = 0;
37210 DECL_EXTERNAL (dispatch_decl) = 0;
37212 DECL_CONTEXT (decl) = NULL_TREE;
37213 DECL_INITIAL (decl) = make_node (BLOCK);
37214 DECL_STATIC_CONSTRUCTOR (decl) = 0;
37216 /* Build result decl and add to function_decl. */
37217 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
37218 DECL_ARTIFICIAL (t) = 1;
37219 DECL_IGNORED_P (t) = 1;
37220 DECL_RESULT (decl) = t;
37222 gimplify_function_tree (decl);
37223 push_cfun (DECL_STRUCT_FUNCTION (decl));
37224 *empty_bb = init_lowered_empty_function (decl, false,
37225 profile_count::uninitialized ());
37227 cgraph_node::add_new_function (decl, true);
37228 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
37230 pop_cfun ();
37232 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37233 DECL_ATTRIBUTES (dispatch_decl)
37234 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
37236 cgraph_node::create_same_body_alias (dispatch_decl, decl);
37238 return decl;
37241 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37242 return a pointer to VERSION_DECL if we are running on a machine that
37243 supports the index CLONE_ISA hardware architecture bits. This function will
37244 be called during version dispatch to decide which function version to
37245 execute. It returns the basic block at the end, to which more conditions
37246 can be added. */
37248 static basic_block
37249 add_condition_to_bb (tree function_decl, tree version_decl,
37250 int clone_isa, basic_block new_bb)
37252 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
37254 gcc_assert (new_bb != NULL);
37255 gimple_seq gseq = bb_seq (new_bb);
37258 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
37259 build_fold_addr_expr (version_decl));
37260 tree result_var = create_tmp_var (ptr_type_node);
37261 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
37262 gimple *return_stmt = gimple_build_return (result_var);
37264 if (clone_isa == CLONE_DEFAULT)
37266 gimple_seq_add_stmt (&gseq, convert_stmt);
37267 gimple_seq_add_stmt (&gseq, return_stmt);
37268 set_bb_seq (new_bb, gseq);
37269 gimple_set_bb (convert_stmt, new_bb);
37270 gimple_set_bb (return_stmt, new_bb);
37271 pop_cfun ();
37272 return new_bb;
37275 tree bool_zero = build_int_cst (bool_int_type_node, 0);
37276 tree cond_var = create_tmp_var (bool_int_type_node);
37277 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
37278 const char *arg_str = rs6000_clone_map[clone_isa].name;
37279 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
37280 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
37281 gimple_call_set_lhs (call_cond_stmt, cond_var);
37283 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
37284 gimple_set_bb (call_cond_stmt, new_bb);
37285 gimple_seq_add_stmt (&gseq, call_cond_stmt);
37287 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
37288 NULL_TREE, NULL_TREE);
37289 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
37290 gimple_set_bb (if_else_stmt, new_bb);
37291 gimple_seq_add_stmt (&gseq, if_else_stmt);
37293 gimple_seq_add_stmt (&gseq, convert_stmt);
37294 gimple_seq_add_stmt (&gseq, return_stmt);
37295 set_bb_seq (new_bb, gseq);
37297 basic_block bb1 = new_bb;
37298 edge e12 = split_block (bb1, if_else_stmt);
37299 basic_block bb2 = e12->dest;
37300 e12->flags &= ~EDGE_FALLTHRU;
37301 e12->flags |= EDGE_TRUE_VALUE;
37303 edge e23 = split_block (bb2, return_stmt);
37304 gimple_set_bb (convert_stmt, bb2);
37305 gimple_set_bb (return_stmt, bb2);
37307 basic_block bb3 = e23->dest;
37308 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
37310 remove_edge (e23);
37311 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
37313 pop_cfun ();
37314 return bb3;
37317 /* This function generates the dispatch function for multi-versioned functions.
37318 DISPATCH_DECL is the function which will contain the dispatch logic.
37319 FNDECLS are the function choices for dispatch, and is a tree chain.
37320 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37321 code is generated. */
37323 static int
37324 dispatch_function_versions (tree dispatch_decl,
37325 void *fndecls_p,
37326 basic_block *empty_bb)
37328 int ix;
37329 tree ele;
37330 vec<tree> *fndecls;
37331 tree clones[CLONE_MAX];
37333 if (TARGET_DEBUG_TARGET)
37334 fputs ("dispatch_function_versions, top\n", stderr);
37336 gcc_assert (dispatch_decl != NULL
37337 && fndecls_p != NULL
37338 && empty_bb != NULL);
37340 /* fndecls_p is actually a vector. */
37341 fndecls = static_cast<vec<tree> *> (fndecls_p);
37343 /* At least one more version other than the default. */
37344 gcc_assert (fndecls->length () >= 2);
37346 /* The first version in the vector is the default decl. */
37347 memset ((void *) clones, '\0', sizeof (clones));
37348 clones[CLONE_DEFAULT] = (*fndecls)[0];
37350 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37351 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37352 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37353 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37354 to insert the code here to do the call. */
37356 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
37358 int priority = rs6000_clone_priority (ele);
37359 if (!clones[priority])
37360 clones[priority] = ele;
37363 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
37364 if (clones[ix])
37366 if (TARGET_DEBUG_TARGET)
37367 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
37368 ix, get_decl_name (clones[ix]));
37370 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
37371 *empty_bb);
37374 return 0;
37377 /* Generate the dispatching code body to dispatch multi-versioned function
37378 DECL. The target hook is called to process the "target" attributes and
37379 provide the code to dispatch the right function at run-time. NODE points
37380 to the dispatcher decl whose body will be created. */
37382 static tree
37383 rs6000_generate_version_dispatcher_body (void *node_p)
37385 tree resolver;
37386 basic_block empty_bb;
37387 struct cgraph_node *node = (cgraph_node *) node_p;
37388 struct cgraph_function_version_info *ninfo = node->function_version ();
37390 if (ninfo->dispatcher_resolver)
37391 return ninfo->dispatcher_resolver;
37393 /* node is going to be an alias, so remove the finalized bit. */
37394 node->definition = false;
37396 /* The first version in the chain corresponds to the default version. */
37397 ninfo->dispatcher_resolver = resolver
37398 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
37400 if (TARGET_DEBUG_TARGET)
37401 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
37402 get_decl_name (resolver));
37404 push_cfun (DECL_STRUCT_FUNCTION (resolver));
37405 auto_vec<tree, 2> fn_ver_vec;
37407 for (struct cgraph_function_version_info *vinfo = ninfo->next;
37408 vinfo;
37409 vinfo = vinfo->next)
37411 struct cgraph_node *version = vinfo->this_node;
37412 /* Check for virtual functions here again, as by this time it should
37413 have been determined if this function needs a vtable index or
37414 not. This happens for methods in derived classes that override
37415 virtual methods in base classes but are not explicitly marked as
37416 virtual. */
37417 if (DECL_VINDEX (version->decl))
37418 sorry ("Virtual function multiversioning not supported");
37420 fn_ver_vec.safe_push (version->decl);
37423 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
37424 cgraph_edge::rebuild_edges ();
37425 pop_cfun ();
37426 return resolver;
37430 /* Hook to determine if one function can safely inline another. */
37432 static bool
37433 rs6000_can_inline_p (tree caller, tree callee)
37435 bool ret = false;
37436 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
37437 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
37439 /* If callee has no option attributes, then it is ok to inline. */
37440 if (!callee_tree)
37441 ret = true;
37443 /* If caller has no option attributes, but callee does then it is not ok to
37444 inline. */
37445 else if (!caller_tree)
37446 ret = false;
37448 else
37450 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
37451 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
37453 /* Callee's options should a subset of the caller's, i.e. a vsx function
37454 can inline an altivec function but a non-vsx function can't inline a
37455 vsx function. */
37456 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
37457 == callee_opts->x_rs6000_isa_flags)
37458 ret = true;
37461 if (TARGET_DEBUG_TARGET)
37462 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37463 get_decl_name (caller), get_decl_name (callee),
37464 (ret ? "can" : "cannot"));
37466 return ret;
37469 /* Allocate a stack temp and fixup the address so it meets the particular
37470 memory requirements (either offetable or REG+REG addressing). */
37473 rs6000_allocate_stack_temp (machine_mode mode,
37474 bool offsettable_p,
37475 bool reg_reg_p)
37477 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
37478 rtx addr = XEXP (stack, 0);
37479 int strict_p = reload_completed;
37481 if (!legitimate_indirect_address_p (addr, strict_p))
37483 if (offsettable_p
37484 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
37485 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37487 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
37488 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37491 return stack;
37494 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
37495 to such a form to deal with memory reference instructions like STFIWX that
37496 only take reg+reg addressing. */
37499 rs6000_address_for_fpconvert (rtx x)
37501 rtx addr;
37503 gcc_assert (MEM_P (x));
37504 addr = XEXP (x, 0);
37505 if (! legitimate_indirect_address_p (addr, reload_completed)
37506 && ! legitimate_indexed_address_p (addr, reload_completed))
37508 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
37510 rtx reg = XEXP (addr, 0);
37511 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
37512 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
37513 gcc_assert (REG_P (reg));
37514 emit_insn (gen_add3_insn (reg, reg, size_rtx));
37515 addr = reg;
37517 else if (GET_CODE (addr) == PRE_MODIFY)
37519 rtx reg = XEXP (addr, 0);
37520 rtx expr = XEXP (addr, 1);
37521 gcc_assert (REG_P (reg));
37522 gcc_assert (GET_CODE (expr) == PLUS);
37523 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
37524 addr = reg;
37527 x = replace_equiv_address (x, copy_addr_to_reg (addr));
37530 return x;
37533 /* Given a memory reference, if it is not in the form for altivec memory
37534 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
37535 convert to the altivec format. */
37538 rs6000_address_for_altivec (rtx x)
37540 gcc_assert (MEM_P (x));
37541 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
37543 rtx addr = XEXP (x, 0);
37545 if (!legitimate_indexed_address_p (addr, reload_completed)
37546 && !legitimate_indirect_address_p (addr, reload_completed))
37547 addr = copy_to_mode_reg (Pmode, addr);
37549 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
37550 x = change_address (x, GET_MODE (x), addr);
37553 return x;
37556 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37558 On the RS/6000, all integer constants are acceptable, most won't be valid
37559 for particular insns, though. Only easy FP constants are acceptable. */
37561 static bool
37562 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
37564 if (TARGET_ELF && tls_referenced_p (x))
37565 return false;
37567 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
37568 || GET_MODE (x) == VOIDmode
37569 || (TARGET_POWERPC64 && mode == DImode)
37570 || easy_fp_constant (x, mode)
37571 || easy_vector_constant (x, mode));
37575 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37577 static bool
37578 chain_already_loaded (rtx_insn *last)
37580 for (; last != NULL; last = PREV_INSN (last))
37582 if (NONJUMP_INSN_P (last))
37584 rtx patt = PATTERN (last);
37586 if (GET_CODE (patt) == SET)
37588 rtx lhs = XEXP (patt, 0);
37590 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
37591 return true;
37595 return false;
37598 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37600 void
37601 rs6000_call_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
37603 const bool direct_call_p
37604 = GET_CODE (func_desc) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (func_desc);
37605 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
37606 rtx toc_load = NULL_RTX;
37607 rtx toc_restore = NULL_RTX;
37608 rtx func_addr;
37609 rtx abi_reg = NULL_RTX;
37610 rtx call[4];
37611 int n_call;
37612 rtx insn;
37614 /* Handle longcall attributes. */
37615 if (INTVAL (cookie) & CALL_LONG)
37616 func_desc = rs6000_longcall_ref (func_desc);
37618 /* Handle indirect calls. */
37619 if (GET_CODE (func_desc) != SYMBOL_REF
37620 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func_desc)))
37622 /* Save the TOC into its reserved slot before the call,
37623 and prepare to restore it after the call. */
37624 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
37625 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
37626 rtx stack_toc_mem = gen_frame_mem (Pmode,
37627 gen_rtx_PLUS (Pmode, stack_ptr,
37628 stack_toc_offset));
37629 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
37630 gen_rtvec (1, stack_toc_offset),
37631 UNSPEC_TOCSLOT);
37632 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
37634 /* Can we optimize saving the TOC in the prologue or
37635 do we need to do it at every call? */
37636 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
37637 cfun->machine->save_toc_in_prologue = true;
37638 else
37640 MEM_VOLATILE_P (stack_toc_mem) = 1;
37641 emit_move_insn (stack_toc_mem, toc_reg);
37644 if (DEFAULT_ABI == ABI_ELFv2)
37646 /* A function pointer in the ELFv2 ABI is just a plain address, but
37647 the ABI requires it to be loaded into r12 before the call. */
37648 func_addr = gen_rtx_REG (Pmode, 12);
37649 emit_move_insn (func_addr, func_desc);
37650 abi_reg = func_addr;
37652 else
37654 /* A function pointer under AIX is a pointer to a data area whose
37655 first word contains the actual address of the function, whose
37656 second word contains a pointer to its TOC, and whose third word
37657 contains a value to place in the static chain register (r11).
37658 Note that if we load the static chain, our "trampoline" need
37659 not have any executable code. */
37661 /* Load up address of the actual function. */
37662 func_desc = force_reg (Pmode, func_desc);
37663 func_addr = gen_reg_rtx (Pmode);
37664 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
37666 /* Prepare to load the TOC of the called function. Note that the
37667 TOC load must happen immediately before the actual call so
37668 that unwinding the TOC registers works correctly. See the
37669 comment in frob_update_context. */
37670 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
37671 rtx func_toc_mem = gen_rtx_MEM (Pmode,
37672 gen_rtx_PLUS (Pmode, func_desc,
37673 func_toc_offset));
37674 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
37676 /* If we have a static chain, load it up. But, if the call was
37677 originally direct, the 3rd word has not been written since no
37678 trampoline has been built, so we ought not to load it, lest we
37679 override a static chain value. */
37680 if (!direct_call_p
37681 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
37682 && !chain_already_loaded (get_current_sequence ()->next->last))
37684 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
37685 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
37686 rtx func_sc_mem = gen_rtx_MEM (Pmode,
37687 gen_rtx_PLUS (Pmode, func_desc,
37688 func_sc_offset));
37689 emit_move_insn (sc_reg, func_sc_mem);
37690 abi_reg = sc_reg;
37694 else
37696 /* Direct calls use the TOC: for local calls, the callee will
37697 assume the TOC register is set; for non-local calls, the
37698 PLT stub needs the TOC register. */
37699 abi_reg = toc_reg;
37700 func_addr = func_desc;
37703 /* Create the call. */
37704 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), flag);
37705 if (value != NULL_RTX)
37706 call[0] = gen_rtx_SET (value, call[0]);
37707 n_call = 1;
37709 if (toc_load)
37710 call[n_call++] = toc_load;
37711 if (toc_restore)
37712 call[n_call++] = toc_restore;
37714 call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
37716 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
37717 insn = emit_call_insn (insn);
37719 /* Mention all registers defined by the ABI to hold information
37720 as uses in CALL_INSN_FUNCTION_USAGE. */
37721 if (abi_reg)
37722 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37725 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
37727 void
37728 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
37730 rtx call[2];
37731 rtx insn;
37733 gcc_assert (INTVAL (cookie) == 0);
37735 /* Create the call. */
37736 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), flag);
37737 if (value != NULL_RTX)
37738 call[0] = gen_rtx_SET (value, call[0]);
37740 call[1] = simple_return_rtx;
37742 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
37743 insn = emit_call_insn (insn);
37745 /* Note use of the TOC register. */
37746 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
37749 /* Return whether we need to always update the saved TOC pointer when we update
37750 the stack pointer. */
37752 static bool
37753 rs6000_save_toc_in_prologue_p (void)
37755 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
37758 #ifdef HAVE_GAS_HIDDEN
37759 # define USE_HIDDEN_LINKONCE 1
37760 #else
37761 # define USE_HIDDEN_LINKONCE 0
37762 #endif
37764 /* Fills in the label name that should be used for a 476 link stack thunk. */
37766 void
37767 get_ppc476_thunk_name (char name[32])
37769 gcc_assert (TARGET_LINK_STACK);
37771 if (USE_HIDDEN_LINKONCE)
37772 sprintf (name, "__ppc476.get_thunk");
37773 else
37774 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
37777 /* This function emits the simple thunk routine that is used to preserve
37778 the link stack on the 476 cpu. */
37780 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
37781 static void
37782 rs6000_code_end (void)
37784 char name[32];
37785 tree decl;
37787 if (!TARGET_LINK_STACK)
37788 return;
37790 get_ppc476_thunk_name (name);
37792 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
37793 build_function_type_list (void_type_node, NULL_TREE));
37794 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
37795 NULL_TREE, void_type_node);
37796 TREE_PUBLIC (decl) = 1;
37797 TREE_STATIC (decl) = 1;
37799 #if RS6000_WEAK
37800 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
37802 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
37803 targetm.asm_out.unique_section (decl, 0);
37804 switch_to_section (get_named_section (decl, NULL, 0));
37805 DECL_WEAK (decl) = 1;
37806 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
37807 targetm.asm_out.globalize_label (asm_out_file, name);
37808 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
37809 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
37811 else
37812 #endif
37814 switch_to_section (text_section);
37815 ASM_OUTPUT_LABEL (asm_out_file, name);
37818 DECL_INITIAL (decl) = make_node (BLOCK);
37819 current_function_decl = decl;
37820 allocate_struct_function (decl, false);
37821 init_function_start (decl);
37822 first_function_block_is_cold = false;
37823 /* Make sure unwind info is emitted for the thunk if needed. */
37824 final_start_function (emit_barrier (), asm_out_file, 1);
37826 fputs ("\tblr\n", asm_out_file);
37828 final_end_function ();
37829 init_insn_lengths ();
37830 free_after_compilation (cfun);
37831 set_cfun (NULL);
37832 current_function_decl = NULL;
37835 /* Add r30 to hard reg set if the prologue sets it up and it is not
37836 pic_offset_table_rtx. */
37838 static void
37839 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
37841 if (!TARGET_SINGLE_PIC_BASE
37842 && TARGET_TOC
37843 && TARGET_MINIMAL_TOC
37844 && !constant_pool_empty_p ())
37845 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
37846 if (cfun->machine->split_stack_argp_used)
37847 add_to_hard_reg_set (&set->set, Pmode, 12);
37849 /* Make sure the hard reg set doesn't include r2, which was possibly added
37850 via PIC_OFFSET_TABLE_REGNUM. */
37851 if (TARGET_TOC)
37852 remove_from_hard_reg_set (&set->set, Pmode, TOC_REGNUM);
37856 /* Helper function for rs6000_split_logical to emit a logical instruction after
37857 spliting the operation to single GPR registers.
37859 DEST is the destination register.
37860 OP1 and OP2 are the input source registers.
37861 CODE is the base operation (AND, IOR, XOR, NOT).
37862 MODE is the machine mode.
37863 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37864 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37865 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
37867 static void
37868 rs6000_split_logical_inner (rtx dest,
37869 rtx op1,
37870 rtx op2,
37871 enum rtx_code code,
37872 machine_mode mode,
37873 bool complement_final_p,
37874 bool complement_op1_p,
37875 bool complement_op2_p)
37877 rtx bool_rtx;
37879 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
37880 if (op2 && GET_CODE (op2) == CONST_INT
37881 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
37882 && !complement_final_p && !complement_op1_p && !complement_op2_p)
37884 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
37885 HOST_WIDE_INT value = INTVAL (op2) & mask;
37887 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
37888 if (code == AND)
37890 if (value == 0)
37892 emit_insn (gen_rtx_SET (dest, const0_rtx));
37893 return;
37896 else if (value == mask)
37898 if (!rtx_equal_p (dest, op1))
37899 emit_insn (gen_rtx_SET (dest, op1));
37900 return;
37904 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
37905 into separate ORI/ORIS or XORI/XORIS instrucitons. */
37906 else if (code == IOR || code == XOR)
37908 if (value == 0)
37910 if (!rtx_equal_p (dest, op1))
37911 emit_insn (gen_rtx_SET (dest, op1));
37912 return;
37917 if (code == AND && mode == SImode
37918 && !complement_final_p && !complement_op1_p && !complement_op2_p)
37920 emit_insn (gen_andsi3 (dest, op1, op2));
37921 return;
37924 if (complement_op1_p)
37925 op1 = gen_rtx_NOT (mode, op1);
37927 if (complement_op2_p)
37928 op2 = gen_rtx_NOT (mode, op2);
37930 /* For canonical RTL, if only one arm is inverted it is the first. */
37931 if (!complement_op1_p && complement_op2_p)
37932 std::swap (op1, op2);
37934 bool_rtx = ((code == NOT)
37935 ? gen_rtx_NOT (mode, op1)
37936 : gen_rtx_fmt_ee (code, mode, op1, op2));
37938 if (complement_final_p)
37939 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
37941 emit_insn (gen_rtx_SET (dest, bool_rtx));
37944 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
37945 operations are split immediately during RTL generation to allow for more
37946 optimizations of the AND/IOR/XOR.
37948 OPERANDS is an array containing the destination and two input operands.
37949 CODE is the base operation (AND, IOR, XOR, NOT).
37950 MODE is the machine mode.
37951 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37952 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37953 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
37954 CLOBBER_REG is either NULL or a scratch register of type CC to allow
37955 formation of the AND instructions. */
37957 static void
37958 rs6000_split_logical_di (rtx operands[3],
37959 enum rtx_code code,
37960 bool complement_final_p,
37961 bool complement_op1_p,
37962 bool complement_op2_p)
37964 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
37965 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
37966 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
37967 enum hi_lo { hi = 0, lo = 1 };
37968 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
37969 size_t i;
37971 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
37972 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
37973 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
37974 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
37976 if (code == NOT)
37977 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
37978 else
37980 if (GET_CODE (operands[2]) != CONST_INT)
37982 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
37983 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
37985 else
37987 HOST_WIDE_INT value = INTVAL (operands[2]);
37988 HOST_WIDE_INT value_hi_lo[2];
37990 gcc_assert (!complement_final_p);
37991 gcc_assert (!complement_op1_p);
37992 gcc_assert (!complement_op2_p);
37994 value_hi_lo[hi] = value >> 32;
37995 value_hi_lo[lo] = value & lower_32bits;
37997 for (i = 0; i < 2; i++)
37999 HOST_WIDE_INT sub_value = value_hi_lo[i];
38001 if (sub_value & sign_bit)
38002 sub_value |= upper_32bits;
38004 op2_hi_lo[i] = GEN_INT (sub_value);
38006 /* If this is an AND instruction, check to see if we need to load
38007 the value in a register. */
38008 if (code == AND && sub_value != -1 && sub_value != 0
38009 && !and_operand (op2_hi_lo[i], SImode))
38010 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
38015 for (i = 0; i < 2; i++)
38017 /* Split large IOR/XOR operations. */
38018 if ((code == IOR || code == XOR)
38019 && GET_CODE (op2_hi_lo[i]) == CONST_INT
38020 && !complement_final_p
38021 && !complement_op1_p
38022 && !complement_op2_p
38023 && !logical_const_operand (op2_hi_lo[i], SImode))
38025 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
38026 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
38027 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
38028 rtx tmp = gen_reg_rtx (SImode);
38030 /* Make sure the constant is sign extended. */
38031 if ((hi_16bits & sign_bit) != 0)
38032 hi_16bits |= upper_32bits;
38034 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
38035 code, SImode, false, false, false);
38037 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
38038 code, SImode, false, false, false);
38040 else
38041 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
38042 code, SImode, complement_final_p,
38043 complement_op1_p, complement_op2_p);
38046 return;
38049 /* Split the insns that make up boolean operations operating on multiple GPR
38050 registers. The boolean MD patterns ensure that the inputs either are
38051 exactly the same as the output registers, or there is no overlap.
38053 OPERANDS is an array containing the destination and two input operands.
38054 CODE is the base operation (AND, IOR, XOR, NOT).
38055 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38056 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38057 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38059 void
38060 rs6000_split_logical (rtx operands[3],
38061 enum rtx_code code,
38062 bool complement_final_p,
38063 bool complement_op1_p,
38064 bool complement_op2_p)
38066 machine_mode mode = GET_MODE (operands[0]);
38067 machine_mode sub_mode;
38068 rtx op0, op1, op2;
38069 int sub_size, regno0, regno1, nregs, i;
38071 /* If this is DImode, use the specialized version that can run before
38072 register allocation. */
38073 if (mode == DImode && !TARGET_POWERPC64)
38075 rs6000_split_logical_di (operands, code, complement_final_p,
38076 complement_op1_p, complement_op2_p);
38077 return;
38080 op0 = operands[0];
38081 op1 = operands[1];
38082 op2 = (code == NOT) ? NULL_RTX : operands[2];
38083 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
38084 sub_size = GET_MODE_SIZE (sub_mode);
38085 regno0 = REGNO (op0);
38086 regno1 = REGNO (op1);
38088 gcc_assert (reload_completed);
38089 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38090 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38092 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
38093 gcc_assert (nregs > 1);
38095 if (op2 && REG_P (op2))
38096 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
38098 for (i = 0; i < nregs; i++)
38100 int offset = i * sub_size;
38101 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
38102 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
38103 rtx sub_op2 = ((code == NOT)
38104 ? NULL_RTX
38105 : simplify_subreg (sub_mode, op2, mode, offset));
38107 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
38108 complement_final_p, complement_op1_p,
38109 complement_op2_p);
38112 return;
38116 /* Return true if the peephole2 can combine a load involving a combination of
38117 an addis instruction and a load with an offset that can be fused together on
38118 a power8. */
38120 bool
38121 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
38122 rtx addis_value, /* addis value. */
38123 rtx target, /* target register that is loaded. */
38124 rtx mem) /* bottom part of the memory addr. */
38126 rtx addr;
38127 rtx base_reg;
38129 /* Validate arguments. */
38130 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38131 return false;
38133 if (!base_reg_operand (target, GET_MODE (target)))
38134 return false;
38136 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38137 return false;
38139 /* Allow sign/zero extension. */
38140 if (GET_CODE (mem) == ZERO_EXTEND
38141 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
38142 mem = XEXP (mem, 0);
38144 if (!MEM_P (mem))
38145 return false;
38147 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
38148 return false;
38150 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38151 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
38152 return false;
38154 /* Validate that the register used to load the high value is either the
38155 register being loaded, or we can safely replace its use.
38157 This function is only called from the peephole2 pass and we assume that
38158 there are 2 instructions in the peephole (addis and load), so we want to
38159 check if the target register was not used in the memory address and the
38160 register to hold the addis result is dead after the peephole. */
38161 if (REGNO (addis_reg) != REGNO (target))
38163 if (reg_mentioned_p (target, mem))
38164 return false;
38166 if (!peep2_reg_dead_p (2, addis_reg))
38167 return false;
38169 /* If the target register being loaded is the stack pointer, we must
38170 avoid loading any other value into it, even temporarily. */
38171 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
38172 return false;
38175 base_reg = XEXP (addr, 0);
38176 return REGNO (addis_reg) == REGNO (base_reg);
38179 /* During the peephole2 pass, adjust and expand the insns for a load fusion
38180 sequence. We adjust the addis register to use the target register. If the
38181 load sign extends, we adjust the code to do the zero extending load, and an
38182 explicit sign extension later since the fusion only covers zero extending
38183 loads.
38185 The operands are:
38186 operands[0] register set with addis (to be replaced with target)
38187 operands[1] value set via addis
38188 operands[2] target register being loaded
38189 operands[3] D-form memory reference using operands[0]. */
38191 void
38192 expand_fusion_gpr_load (rtx *operands)
38194 rtx addis_value = operands[1];
38195 rtx target = operands[2];
38196 rtx orig_mem = operands[3];
38197 rtx new_addr, new_mem, orig_addr, offset;
38198 enum rtx_code plus_or_lo_sum;
38199 machine_mode target_mode = GET_MODE (target);
38200 machine_mode extend_mode = target_mode;
38201 machine_mode ptr_mode = Pmode;
38202 enum rtx_code extend = UNKNOWN;
38204 if (GET_CODE (orig_mem) == ZERO_EXTEND
38205 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
38207 extend = GET_CODE (orig_mem);
38208 orig_mem = XEXP (orig_mem, 0);
38209 target_mode = GET_MODE (orig_mem);
38212 gcc_assert (MEM_P (orig_mem));
38214 orig_addr = XEXP (orig_mem, 0);
38215 plus_or_lo_sum = GET_CODE (orig_addr);
38216 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38218 offset = XEXP (orig_addr, 1);
38219 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38220 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38222 if (extend != UNKNOWN)
38223 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
38225 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38226 UNSPEC_FUSION_GPR);
38227 emit_insn (gen_rtx_SET (target, new_mem));
38229 if (extend == SIGN_EXTEND)
38231 int sub_off = ((BYTES_BIG_ENDIAN)
38232 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
38233 : 0);
38234 rtx sign_reg
38235 = simplify_subreg (target_mode, target, extend_mode, sub_off);
38237 emit_insn (gen_rtx_SET (target,
38238 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
38241 return;
38244 /* Emit the addis instruction that will be part of a fused instruction
38245 sequence. */
38247 void
38248 emit_fusion_addis (rtx target, rtx addis_value, const char *comment,
38249 const char *mode_name)
38251 rtx fuse_ops[10];
38252 char insn_template[80];
38253 const char *addis_str = NULL;
38254 const char *comment_str = ASM_COMMENT_START;
38256 if (*comment_str == ' ')
38257 comment_str++;
38259 /* Emit the addis instruction. */
38260 fuse_ops[0] = target;
38261 if (satisfies_constraint_L (addis_value))
38263 fuse_ops[1] = addis_value;
38264 addis_str = "lis %0,%v1";
38267 else if (GET_CODE (addis_value) == PLUS)
38269 rtx op0 = XEXP (addis_value, 0);
38270 rtx op1 = XEXP (addis_value, 1);
38272 if (REG_P (op0) && CONST_INT_P (op1)
38273 && satisfies_constraint_L (op1))
38275 fuse_ops[1] = op0;
38276 fuse_ops[2] = op1;
38277 addis_str = "addis %0,%1,%v2";
38281 else if (GET_CODE (addis_value) == HIGH)
38283 rtx value = XEXP (addis_value, 0);
38284 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
38286 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
38287 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
38288 if (TARGET_ELF)
38289 addis_str = "addis %0,%2,%1@toc@ha";
38291 else if (TARGET_XCOFF)
38292 addis_str = "addis %0,%1@u(%2)";
38294 else
38295 gcc_unreachable ();
38298 else if (GET_CODE (value) == PLUS)
38300 rtx op0 = XEXP (value, 0);
38301 rtx op1 = XEXP (value, 1);
38303 if (GET_CODE (op0) == UNSPEC
38304 && XINT (op0, 1) == UNSPEC_TOCREL
38305 && CONST_INT_P (op1))
38307 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
38308 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
38309 fuse_ops[3] = op1;
38310 if (TARGET_ELF)
38311 addis_str = "addis %0,%2,%1+%3@toc@ha";
38313 else if (TARGET_XCOFF)
38314 addis_str = "addis %0,%1+%3@u(%2)";
38316 else
38317 gcc_unreachable ();
38321 else if (satisfies_constraint_L (value))
38323 fuse_ops[1] = value;
38324 addis_str = "lis %0,%v1";
38327 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
38329 fuse_ops[1] = value;
38330 addis_str = "lis %0,%1@ha";
38334 if (!addis_str)
38335 fatal_insn ("Could not generate addis value for fusion", addis_value);
38337 sprintf (insn_template, "%s\t\t%s %s, type %s", addis_str, comment_str,
38338 comment, mode_name);
38339 output_asm_insn (insn_template, fuse_ops);
38342 /* Emit a D-form load or store instruction that is the second instruction
38343 of a fusion sequence. */
38345 void
38346 emit_fusion_load_store (rtx load_store_reg, rtx addis_reg, rtx offset,
38347 const char *insn_str)
38349 rtx fuse_ops[10];
38350 char insn_template[80];
38352 fuse_ops[0] = load_store_reg;
38353 fuse_ops[1] = addis_reg;
38355 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
38357 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
38358 fuse_ops[2] = offset;
38359 output_asm_insn (insn_template, fuse_ops);
38362 else if (GET_CODE (offset) == UNSPEC
38363 && XINT (offset, 1) == UNSPEC_TOCREL)
38365 if (TARGET_ELF)
38366 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
38368 else if (TARGET_XCOFF)
38369 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38371 else
38372 gcc_unreachable ();
38374 fuse_ops[2] = XVECEXP (offset, 0, 0);
38375 output_asm_insn (insn_template, fuse_ops);
38378 else if (GET_CODE (offset) == PLUS
38379 && GET_CODE (XEXP (offset, 0)) == UNSPEC
38380 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
38381 && CONST_INT_P (XEXP (offset, 1)))
38383 rtx tocrel_unspec = XEXP (offset, 0);
38384 if (TARGET_ELF)
38385 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
38387 else if (TARGET_XCOFF)
38388 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
38390 else
38391 gcc_unreachable ();
38393 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
38394 fuse_ops[3] = XEXP (offset, 1);
38395 output_asm_insn (insn_template, fuse_ops);
38398 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
38400 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38402 fuse_ops[2] = offset;
38403 output_asm_insn (insn_template, fuse_ops);
38406 else
38407 fatal_insn ("Unable to generate load/store offset for fusion", offset);
38409 return;
38412 /* Wrap a TOC address that can be fused to indicate that special fusion
38413 processing is needed. */
38416 fusion_wrap_memory_address (rtx old_mem)
38418 rtx old_addr = XEXP (old_mem, 0);
38419 rtvec v = gen_rtvec (1, old_addr);
38420 rtx new_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_FUSION_ADDIS);
38421 return replace_equiv_address_nv (old_mem, new_addr, false);
38424 /* Given an address, convert it into the addis and load offset parts. Addresses
38425 created during the peephole2 process look like:
38426 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38427 (unspec [(...)] UNSPEC_TOCREL))
38429 Addresses created via toc fusion look like:
38430 (unspec [(unspec [(...)] UNSPEC_TOCREL)] UNSPEC_FUSION_ADDIS)) */
38432 static void
38433 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
38435 rtx hi, lo;
38437 if (GET_CODE (addr) == UNSPEC && XINT (addr, 1) == UNSPEC_FUSION_ADDIS)
38439 lo = XVECEXP (addr, 0, 0);
38440 hi = gen_rtx_HIGH (Pmode, lo);
38442 else if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
38444 hi = XEXP (addr, 0);
38445 lo = XEXP (addr, 1);
38447 else
38448 gcc_unreachable ();
38450 *p_hi = hi;
38451 *p_lo = lo;
38454 /* Return a string to fuse an addis instruction with a gpr load to the same
38455 register that we loaded up the addis instruction. The address that is used
38456 is the logical address that was formed during peephole2:
38457 (lo_sum (high) (low-part))
38459 Or the address is the TOC address that is wrapped before register allocation:
38460 (unspec [(addr) (toc-reg)] UNSPEC_FUSION_ADDIS)
38462 The code is complicated, so we call output_asm_insn directly, and just
38463 return "". */
38465 const char *
38466 emit_fusion_gpr_load (rtx target, rtx mem)
38468 rtx addis_value;
38469 rtx addr;
38470 rtx load_offset;
38471 const char *load_str = NULL;
38472 const char *mode_name = NULL;
38473 machine_mode mode;
38475 if (GET_CODE (mem) == ZERO_EXTEND)
38476 mem = XEXP (mem, 0);
38478 gcc_assert (REG_P (target) && MEM_P (mem));
38480 addr = XEXP (mem, 0);
38481 fusion_split_address (addr, &addis_value, &load_offset);
38483 /* Now emit the load instruction to the same register. */
38484 mode = GET_MODE (mem);
38485 switch (mode)
38487 case E_QImode:
38488 mode_name = "char";
38489 load_str = "lbz";
38490 break;
38492 case E_HImode:
38493 mode_name = "short";
38494 load_str = "lhz";
38495 break;
38497 case E_SImode:
38498 case E_SFmode:
38499 mode_name = (mode == SFmode) ? "float" : "int";
38500 load_str = "lwz";
38501 break;
38503 case E_DImode:
38504 case E_DFmode:
38505 gcc_assert (TARGET_POWERPC64);
38506 mode_name = (mode == DFmode) ? "double" : "long";
38507 load_str = "ld";
38508 break;
38510 default:
38511 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
38514 /* Emit the addis instruction. */
38515 emit_fusion_addis (target, addis_value, "gpr load fusion", mode_name);
38517 /* Emit the D-form load instruction. */
38518 emit_fusion_load_store (target, target, load_offset, load_str);
38520 return "";
38524 /* Return true if the peephole2 can combine a load/store involving a
38525 combination of an addis instruction and the memory operation. This was
38526 added to the ISA 3.0 (power9) hardware. */
38528 bool
38529 fusion_p9_p (rtx addis_reg, /* register set via addis. */
38530 rtx addis_value, /* addis value. */
38531 rtx dest, /* destination (memory or register). */
38532 rtx src) /* source (register or memory). */
38534 rtx addr, mem, offset;
38535 machine_mode mode = GET_MODE (src);
38537 /* Validate arguments. */
38538 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38539 return false;
38541 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38542 return false;
38544 /* Ignore extend operations that are part of the load. */
38545 if (GET_CODE (src) == FLOAT_EXTEND || GET_CODE (src) == ZERO_EXTEND)
38546 src = XEXP (src, 0);
38548 /* Test for memory<-register or register<-memory. */
38549 if (fpr_reg_operand (src, mode) || int_reg_operand (src, mode))
38551 if (!MEM_P (dest))
38552 return false;
38554 mem = dest;
38557 else if (MEM_P (src))
38559 if (!fpr_reg_operand (dest, mode) && !int_reg_operand (dest, mode))
38560 return false;
38562 mem = src;
38565 else
38566 return false;
38568 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38569 if (GET_CODE (addr) == PLUS)
38571 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
38572 return false;
38574 return satisfies_constraint_I (XEXP (addr, 1));
38577 else if (GET_CODE (addr) == LO_SUM)
38579 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
38580 return false;
38582 offset = XEXP (addr, 1);
38583 if (TARGET_XCOFF || (TARGET_ELF && TARGET_POWERPC64))
38584 return small_toc_ref (offset, GET_MODE (offset));
38586 else if (TARGET_ELF && !TARGET_POWERPC64)
38587 return CONSTANT_P (offset);
38590 return false;
38593 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38594 load sequence.
38596 The operands are:
38597 operands[0] register set with addis
38598 operands[1] value set via addis
38599 operands[2] target register being loaded
38600 operands[3] D-form memory reference using operands[0].
38602 This is similar to the fusion introduced with power8, except it scales to
38603 both loads/stores and does not require the result register to be the same as
38604 the base register. At the moment, we only do this if register set with addis
38605 is dead. */
38607 void
38608 expand_fusion_p9_load (rtx *operands)
38610 rtx tmp_reg = operands[0];
38611 rtx addis_value = operands[1];
38612 rtx target = operands[2];
38613 rtx orig_mem = operands[3];
38614 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn;
38615 enum rtx_code plus_or_lo_sum;
38616 machine_mode target_mode = GET_MODE (target);
38617 machine_mode extend_mode = target_mode;
38618 machine_mode ptr_mode = Pmode;
38619 enum rtx_code extend = UNKNOWN;
38621 if (GET_CODE (orig_mem) == FLOAT_EXTEND || GET_CODE (orig_mem) == ZERO_EXTEND)
38623 extend = GET_CODE (orig_mem);
38624 orig_mem = XEXP (orig_mem, 0);
38625 target_mode = GET_MODE (orig_mem);
38628 gcc_assert (MEM_P (orig_mem));
38630 orig_addr = XEXP (orig_mem, 0);
38631 plus_or_lo_sum = GET_CODE (orig_addr);
38632 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38634 offset = XEXP (orig_addr, 1);
38635 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38636 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38638 if (extend != UNKNOWN)
38639 new_mem = gen_rtx_fmt_e (extend, extend_mode, new_mem);
38641 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38642 UNSPEC_FUSION_P9);
38644 set = gen_rtx_SET (target, new_mem);
38645 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
38646 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
38647 emit_insn (insn);
38649 return;
38652 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38653 store sequence.
38655 The operands are:
38656 operands[0] register set with addis
38657 operands[1] value set via addis
38658 operands[2] target D-form memory being stored to
38659 operands[3] register being stored
38661 This is similar to the fusion introduced with power8, except it scales to
38662 both loads/stores and does not require the result register to be the same as
38663 the base register. At the moment, we only do this if register set with addis
38664 is dead. */
38666 void
38667 expand_fusion_p9_store (rtx *operands)
38669 rtx tmp_reg = operands[0];
38670 rtx addis_value = operands[1];
38671 rtx orig_mem = operands[2];
38672 rtx src = operands[3];
38673 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn, new_src;
38674 enum rtx_code plus_or_lo_sum;
38675 machine_mode target_mode = GET_MODE (orig_mem);
38676 machine_mode ptr_mode = Pmode;
38678 gcc_assert (MEM_P (orig_mem));
38680 orig_addr = XEXP (orig_mem, 0);
38681 plus_or_lo_sum = GET_CODE (orig_addr);
38682 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38684 offset = XEXP (orig_addr, 1);
38685 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38686 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38688 new_src = gen_rtx_UNSPEC (target_mode, gen_rtvec (1, src),
38689 UNSPEC_FUSION_P9);
38691 set = gen_rtx_SET (new_mem, new_src);
38692 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
38693 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
38694 emit_insn (insn);
38696 return;
38699 /* Return a string to fuse an addis instruction with a load using extended
38700 fusion. The address that is used is the logical address that was formed
38701 during peephole2: (lo_sum (high) (low-part))
38703 The code is complicated, so we call output_asm_insn directly, and just
38704 return "". */
38706 const char *
38707 emit_fusion_p9_load (rtx reg, rtx mem, rtx tmp_reg)
38709 machine_mode mode = GET_MODE (reg);
38710 rtx hi;
38711 rtx lo;
38712 rtx addr;
38713 const char *load_string;
38714 int r;
38716 if (GET_CODE (mem) == FLOAT_EXTEND || GET_CODE (mem) == ZERO_EXTEND)
38718 mem = XEXP (mem, 0);
38719 mode = GET_MODE (mem);
38722 if (GET_CODE (reg) == SUBREG)
38724 gcc_assert (SUBREG_BYTE (reg) == 0);
38725 reg = SUBREG_REG (reg);
38728 if (!REG_P (reg))
38729 fatal_insn ("emit_fusion_p9_load, bad reg #1", reg);
38731 r = REGNO (reg);
38732 if (FP_REGNO_P (r))
38734 if (mode == SFmode)
38735 load_string = "lfs";
38736 else if (mode == DFmode || mode == DImode)
38737 load_string = "lfd";
38738 else
38739 gcc_unreachable ();
38741 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_VECTOR)
38743 if (mode == SFmode)
38744 load_string = "lxssp";
38745 else if (mode == DFmode || mode == DImode)
38746 load_string = "lxsd";
38747 else
38748 gcc_unreachable ();
38750 else if (INT_REGNO_P (r))
38752 switch (mode)
38754 case E_QImode:
38755 load_string = "lbz";
38756 break;
38757 case E_HImode:
38758 load_string = "lhz";
38759 break;
38760 case E_SImode:
38761 case E_SFmode:
38762 load_string = "lwz";
38763 break;
38764 case E_DImode:
38765 case E_DFmode:
38766 if (!TARGET_POWERPC64)
38767 gcc_unreachable ();
38768 load_string = "ld";
38769 break;
38770 default:
38771 gcc_unreachable ();
38774 else
38775 fatal_insn ("emit_fusion_p9_load, bad reg #2", reg);
38777 if (!MEM_P (mem))
38778 fatal_insn ("emit_fusion_p9_load not MEM", mem);
38780 addr = XEXP (mem, 0);
38781 fusion_split_address (addr, &hi, &lo);
38783 /* Emit the addis instruction. */
38784 emit_fusion_addis (tmp_reg, hi, "power9 load fusion", GET_MODE_NAME (mode));
38786 /* Emit the D-form load instruction. */
38787 emit_fusion_load_store (reg, tmp_reg, lo, load_string);
38789 return "";
38792 /* Return a string to fuse an addis instruction with a store using extended
38793 fusion. The address that is used is the logical address that was formed
38794 during peephole2: (lo_sum (high) (low-part))
38796 The code is complicated, so we call output_asm_insn directly, and just
38797 return "". */
38799 const char *
38800 emit_fusion_p9_store (rtx mem, rtx reg, rtx tmp_reg)
38802 machine_mode mode = GET_MODE (reg);
38803 rtx hi;
38804 rtx lo;
38805 rtx addr;
38806 const char *store_string;
38807 int r;
38809 if (GET_CODE (reg) == SUBREG)
38811 gcc_assert (SUBREG_BYTE (reg) == 0);
38812 reg = SUBREG_REG (reg);
38815 if (!REG_P (reg))
38816 fatal_insn ("emit_fusion_p9_store, bad reg #1", reg);
38818 r = REGNO (reg);
38819 if (FP_REGNO_P (r))
38821 if (mode == SFmode)
38822 store_string = "stfs";
38823 else if (mode == DFmode)
38824 store_string = "stfd";
38825 else
38826 gcc_unreachable ();
38828 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_VECTOR)
38830 if (mode == SFmode)
38831 store_string = "stxssp";
38832 else if (mode == DFmode || mode == DImode)
38833 store_string = "stxsd";
38834 else
38835 gcc_unreachable ();
38837 else if (INT_REGNO_P (r))
38839 switch (mode)
38841 case E_QImode:
38842 store_string = "stb";
38843 break;
38844 case E_HImode:
38845 store_string = "sth";
38846 break;
38847 case E_SImode:
38848 case E_SFmode:
38849 store_string = "stw";
38850 break;
38851 case E_DImode:
38852 case E_DFmode:
38853 if (!TARGET_POWERPC64)
38854 gcc_unreachable ();
38855 store_string = "std";
38856 break;
38857 default:
38858 gcc_unreachable ();
38861 else
38862 fatal_insn ("emit_fusion_p9_store, bad reg #2", reg);
38864 if (!MEM_P (mem))
38865 fatal_insn ("emit_fusion_p9_store not MEM", mem);
38867 addr = XEXP (mem, 0);
38868 fusion_split_address (addr, &hi, &lo);
38870 /* Emit the addis instruction. */
38871 emit_fusion_addis (tmp_reg, hi, "power9 store fusion", GET_MODE_NAME (mode));
38873 /* Emit the D-form load instruction. */
38874 emit_fusion_load_store (reg, tmp_reg, lo, store_string);
38876 return "";
38879 #ifdef RS6000_GLIBC_ATOMIC_FENV
38880 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
38881 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
38882 #endif
38884 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
38886 static void
38887 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
38889 if (!TARGET_HARD_FLOAT)
38891 #ifdef RS6000_GLIBC_ATOMIC_FENV
38892 if (atomic_hold_decl == NULL_TREE)
38894 atomic_hold_decl
38895 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38896 get_identifier ("__atomic_feholdexcept"),
38897 build_function_type_list (void_type_node,
38898 double_ptr_type_node,
38899 NULL_TREE));
38900 TREE_PUBLIC (atomic_hold_decl) = 1;
38901 DECL_EXTERNAL (atomic_hold_decl) = 1;
38904 if (atomic_clear_decl == NULL_TREE)
38906 atomic_clear_decl
38907 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38908 get_identifier ("__atomic_feclearexcept"),
38909 build_function_type_list (void_type_node,
38910 NULL_TREE));
38911 TREE_PUBLIC (atomic_clear_decl) = 1;
38912 DECL_EXTERNAL (atomic_clear_decl) = 1;
38915 tree const_double = build_qualified_type (double_type_node,
38916 TYPE_QUAL_CONST);
38917 tree const_double_ptr = build_pointer_type (const_double);
38918 if (atomic_update_decl == NULL_TREE)
38920 atomic_update_decl
38921 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38922 get_identifier ("__atomic_feupdateenv"),
38923 build_function_type_list (void_type_node,
38924 const_double_ptr,
38925 NULL_TREE));
38926 TREE_PUBLIC (atomic_update_decl) = 1;
38927 DECL_EXTERNAL (atomic_update_decl) = 1;
38930 tree fenv_var = create_tmp_var_raw (double_type_node);
38931 TREE_ADDRESSABLE (fenv_var) = 1;
38932 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
38934 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
38935 *clear = build_call_expr (atomic_clear_decl, 0);
38936 *update = build_call_expr (atomic_update_decl, 1,
38937 fold_convert (const_double_ptr, fenv_addr));
38938 #endif
38939 return;
38942 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
38943 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
38944 tree call_mffs = build_call_expr (mffs, 0);
38946 /* Generates the equivalent of feholdexcept (&fenv_var)
38948 *fenv_var = __builtin_mffs ();
38949 double fenv_hold;
38950 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
38951 __builtin_mtfsf (0xff, fenv_hold); */
38953 /* Mask to clear everything except for the rounding modes and non-IEEE
38954 arithmetic flag. */
38955 const unsigned HOST_WIDE_INT hold_exception_mask =
38956 HOST_WIDE_INT_C (0xffffffff00000007);
38958 tree fenv_var = create_tmp_var_raw (double_type_node);
38960 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
38962 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
38963 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38964 build_int_cst (uint64_type_node,
38965 hold_exception_mask));
38967 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38968 fenv_llu_and);
38970 tree hold_mtfsf = build_call_expr (mtfsf, 2,
38971 build_int_cst (unsigned_type_node, 0xff),
38972 fenv_hold_mtfsf);
38974 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
38976 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
38978 double fenv_clear = __builtin_mffs ();
38979 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
38980 __builtin_mtfsf (0xff, fenv_clear); */
38982 /* Mask to clear everything except for the rounding modes and non-IEEE
38983 arithmetic flag. */
38984 const unsigned HOST_WIDE_INT clear_exception_mask =
38985 HOST_WIDE_INT_C (0xffffffff00000000);
38987 tree fenv_clear = create_tmp_var_raw (double_type_node);
38989 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
38991 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
38992 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
38993 fenv_clean_llu,
38994 build_int_cst (uint64_type_node,
38995 clear_exception_mask));
38997 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38998 fenv_clear_llu_and);
39000 tree clear_mtfsf = build_call_expr (mtfsf, 2,
39001 build_int_cst (unsigned_type_node, 0xff),
39002 fenv_clear_mtfsf);
39004 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
39006 /* Generates the equivalent of feupdateenv (&fenv_var)
39008 double old_fenv = __builtin_mffs ();
39009 double fenv_update;
39010 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
39011 (*(uint64_t*)fenv_var 0x1ff80fff);
39012 __builtin_mtfsf (0xff, fenv_update); */
39014 const unsigned HOST_WIDE_INT update_exception_mask =
39015 HOST_WIDE_INT_C (0xffffffff1fffff00);
39016 const unsigned HOST_WIDE_INT new_exception_mask =
39017 HOST_WIDE_INT_C (0x1ff80fff);
39019 tree old_fenv = create_tmp_var_raw (double_type_node);
39020 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
39022 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
39023 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
39024 build_int_cst (uint64_type_node,
39025 update_exception_mask));
39027 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
39028 build_int_cst (uint64_type_node,
39029 new_exception_mask));
39031 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
39032 old_llu_and, new_llu_and);
39034 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39035 new_llu_mask);
39037 tree update_mtfsf = build_call_expr (mtfsf, 2,
39038 build_int_cst (unsigned_type_node, 0xff),
39039 fenv_update_mtfsf);
39041 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
39044 void
39045 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
39047 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39049 rtx_tmp0 = gen_reg_rtx (V2DImode);
39050 rtx_tmp1 = gen_reg_rtx (V2DImode);
39052 /* The destination of the vmrgew instruction layout is:
39053 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39054 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39055 vmrgew instruction will be correct. */
39056 if (VECTOR_ELT_ORDER_BIG)
39058 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
39059 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
39061 else
39063 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
39064 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
39067 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39068 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39070 if (signed_convert)
39072 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
39073 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
39075 else
39077 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
39078 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
39081 if (VECTOR_ELT_ORDER_BIG)
39082 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39083 else
39084 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39087 void
39088 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
39089 rtx src2)
39091 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39093 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39094 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39096 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
39097 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
39099 rtx_tmp2 = gen_reg_rtx (V4SImode);
39100 rtx_tmp3 = gen_reg_rtx (V4SImode);
39102 if (signed_convert)
39104 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
39105 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
39107 else
39109 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
39110 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
39113 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
39116 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
39118 static bool
39119 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
39120 optimization_type opt_type)
39122 switch (op)
39124 case rsqrt_optab:
39125 return (opt_type == OPTIMIZE_FOR_SPEED
39126 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
39128 default:
39129 return true;
39133 struct gcc_target targetm = TARGET_INITIALIZER;
39135 #include "gt-rs6000.h"